hexsha stringlengths 40 40 | size int64 2 1.02M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 2 1.02M | avg_line_length float64 1 417k | max_line_length int64 1 987k | alphanum_fraction float64 0 1 | content_no_comment stringlengths 0 1.01M | is_comment_constant_removed bool 1
class | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1c2d5eaaf1132afd1bc8c11e078a763b26d068d6 | 8,594 | py | Python | sktime/forecasting/online_learning/_prediction_weighted_ensembler.py | BINAYKUMAR943/sktime | f02f656de86da420b1c14e58dc60194261969eb3 | [
"BSD-3-Clause"
] | 2 | 2020-12-25T08:08:38.000Z | 2021-04-07T08:00:56.000Z | sktime/forecasting/online_learning/_prediction_weighted_ensembler.py | afzal442/sktime | 294429e7f2ac5824171bb61ad075e0af0055cb02 | [
"BSD-3-Clause"
] | 1 | 2021-05-15T16:24:02.000Z | 2021-05-16T05:25:31.000Z | sktime/forecasting/online_learning/_prediction_weighted_ensembler.py | afzal442/sktime | 294429e7f2ac5824171bb61ad075e0af0055cb02 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
# !/usr/bin/env python3 -u
# copyright: sktime developers, BSD-3-Clause License (see LICENSE file)
"""Implements online algorithms for prediction weighted ensembles."""
import numpy as np
from scipy.optimize import bisect
from scipy.optimize import nnls
class _PredictionWeightedEnsembler:
"""Wrapper class to handle ensemble algorithms that use multiple forecasters.
This implements default methods for setting uniform weights, updating
and prediction.
Parameters
----------
n_estimators : float
number of estimators
loss_func : function
loss function which follows sklearn.metrics API, for updating weights
"""
_tags = {
"univariate-only": True,
"requires-fh-in-fit": False,
"handles-missing-data": False,
}
def __init__(self, n_estimators=10, loss_func=None):
self.n_estimators = n_estimators
self.weights = np.ones(n_estimators) / n_estimators
self.loss_func = loss_func
super(_PredictionWeightedEnsembler, self).__init__()
def _predict(self, y_pred):
"""Make predictions by taking weighted average of forecaster predictions.
Parameters
----------
y_pred : np.array(), shape=(time_axis,estimator_axis)
array with predictions from the estimators
Returns
-------
predictions : np.array(), shape=(time_axis)
array with our predictions
"""
prediction = np.dot(self.weights, y_pred)
return prediction
def _modify_weights(self, new_array):
"""Multiply pointwise the current weights with a new array of weights.
Parameters
----------
new_array : np.array()
input array for pointwise multiplication
"""
self.weights = self.weights * new_array
self.weights /= np.sum(self.weights)
def _update(self, y_pred, y_true):
"""Update fitted paramters and performs a new ensemble fit.
Resets the weights over the estimators by passing previous
observations to the weighting algorithm.
Parameters
----------
y_pred : np.array(), shape=(time_axis,estimator_axis)
array with predictions from the estimators
y_true : np.array(), shape=(time_axis)
array with actual values for predicted quantity
"""
raise NotImplementedError()
def _uniform_weights(self, n_estimators):
"""Reset weights for n estimator to uniform weights.
Parameters
----------
n_estimators : int
number of estimators
"""
self.n = n_estimators
self.weights = np.ones(n_estimators) / n_estimators
class HedgeExpertEnsemble(_PredictionWeightedEnsembler):
"""Use hedge-style ensemble algorithms.
Wrapper for hedge-style ensemble algorithms with a forecasting horizon and
normalizing constant.
Parameters
----------
n_estimators : float
number of estimators
T : int
forecasting horizon (in terms of timesteps)
a : float
normalizing constant
loss_func : function
loss function which follows sklearn.metrics API, for updating weights
"""
_tags = {
"univariate-only": True,
"requires-fh-in-fit": False,
"handles-missing-data": False,
}
def __init__(self, n_estimators=10, T=10, a=1, loss_func=None):
super().__init__(n_estimators=n_estimators, loss_func=loss_func)
self.T = T
self.a = a
self._uniform_weights(n_estimators)
self.loss_func = loss_func
class NormalHedgeEnsemble(HedgeExpertEnsemble):
"""Parameter free hedging algorithm.
Implementation of A Parameter-free Hedging Algorithm,
Kamalika Chaudhuri, Yoav Freund, Daniel Hsu (2009) as a hedge-style
algorithm.
Parameters
----------
n_estimators : float
number of estimators
T : int
forecasting horizon (in terms of timesteps)
a : float
normalizing constant
loss_func : function
loss function which follows sklearn.metrics API, for updating weights
"""
_tags = {
"univariate-only": True,
"requires-fh-in-fit": False,
"handles-missing-data": False,
}
def __init__(self, n_estimators=10, a=1, loss_func=None):
super().__init__(n_estimators=n_estimators, T=None, a=a, loss_func=loss_func)
self.R = np.zeros(n_estimators)
def update(self, y_pred, y_true, low_c=0.01):
"""Update forecaster weights.
The weights are updated over the estimators by passing previous
observations and updating based on Normal Hedge.
Parameters
----------
y_pred : np.array(), shape=(time_axis,estimator_axis)
array with predictions from the estimators
y_true : np.array(), shape=(time_axis)
array with actual values for predicted quantity
"""
assert y_pred.shape[1] == len(y_true), "Time Dimension Matches"
time_length = y_pred.shape[1]
for i in range(time_length):
loss_vector = np.array(
[
self.loss_func([prediction], [y_true[i]])
for prediction in y_pred[:, i]
]
)
average_loss = np.dot(self.weights, loss_vector)
instant_regret = average_loss - loss_vector
self.R += instant_regret
self._update_weights(low_c=low_c)
def _update_weights(self, low_c=0.01):
"""Update forecaster weights.
Update the weights on each of the estimators by performing a potential
function update with a root-finding search. low_c represents the lower
bound on the window that the root finding is occuring over.
Parameters
----------
low_c : float
lowest value that c can take
"""
# Calculating Normalizing Constant
R_plus = np.array(list(map(lambda x: 0 if 0 > x else x, self.R)))
normalizing_R = np.max(R_plus)
R_plus /= normalizing_R
low_c = low_c
high_c = (max(R_plus) ** 2) / 2
def _pot(c):
"""Calculate algorithm's potential Function.
Parameters
----------
low_c : float
lowest value that c can take
Returns
-------
potential: float
"""
return np.mean(np.exp((R_plus ** 2) / (2 * c))) - np.e
c_t = bisect(_pot, low_c, high_c)
def _prob(r, c_t):
"""Calculate algorithm's probability Function.
Parameters
----------
r : float
regret
c_t : float
current value for c
Returns
-------
prob : float
probability
"""
return (r / c_t) * np.exp((r ** 2) / (2 * c_t))
self.weights = np.array([_prob(r, c_t) for r in R_plus])
self.weights /= np.sum(self.weights)
class NNLSEnsemble(_PredictionWeightedEnsembler):
"""Ensemble forecasts with Non-negative least squares based weighting.
Ensemble class that performs a non-negative least squares to fit to the
estimators. Keeps track of all observations seen so far and fits to it.
Parameters
----------
n_estimators: int
number of estimators
loss_func : function
loss function which follows sklearn.metrics API, for updating weights
"""
_tags = {
"univariate-only": True,
"requires-fh-in-fit": False,
"handles-missing-data": False,
}
def __init__(self, n_estimators=10, loss_func=None):
super().__init__(n_estimators=n_estimators, loss_func=loss_func)
self.total_y_pred = np.empty((n_estimators, 0))
self.total_y_true = np.empty(0)
def update(self, y_pred, y_true):
"""Update the online ensemble with new data.
Parameters
----------
y_pred : np.array(), shape=(time_axis,estimator_axis)
array with predictions from the estimators
y_true : np.array(), shape=(time_axis)
array with actual values for predicted quantity
"""
self.total_y_pred = np.concatenate((self.total_y_pred, y_pred), axis=1)
self.total_y_true = np.concatenate((self.total_y_true, y_true))
weights, loss = nnls(self.total_y_pred.T, self.total_y_true)
self.weights = weights
| 30.692857 | 85 | 0.60647 |
import numpy as np
from scipy.optimize import bisect
from scipy.optimize import nnls
class _PredictionWeightedEnsembler:
_tags = {
"univariate-only": True,
"requires-fh-in-fit": False,
"handles-missing-data": False,
}
def __init__(self, n_estimators=10, loss_func=None):
self.n_estimators = n_estimators
self.weights = np.ones(n_estimators) / n_estimators
self.loss_func = loss_func
super(_PredictionWeightedEnsembler, self).__init__()
def _predict(self, y_pred):
prediction = np.dot(self.weights, y_pred)
return prediction
def _modify_weights(self, new_array):
self.weights = self.weights * new_array
self.weights /= np.sum(self.weights)
def _update(self, y_pred, y_true):
raise NotImplementedError()
def _uniform_weights(self, n_estimators):
self.n = n_estimators
self.weights = np.ones(n_estimators) / n_estimators
class HedgeExpertEnsemble(_PredictionWeightedEnsembler):
_tags = {
"univariate-only": True,
"requires-fh-in-fit": False,
"handles-missing-data": False,
}
def __init__(self, n_estimators=10, T=10, a=1, loss_func=None):
super().__init__(n_estimators=n_estimators, loss_func=loss_func)
self.T = T
self.a = a
self._uniform_weights(n_estimators)
self.loss_func = loss_func
class NormalHedgeEnsemble(HedgeExpertEnsemble):
_tags = {
"univariate-only": True,
"requires-fh-in-fit": False,
"handles-missing-data": False,
}
def __init__(self, n_estimators=10, a=1, loss_func=None):
super().__init__(n_estimators=n_estimators, T=None, a=a, loss_func=loss_func)
self.R = np.zeros(n_estimators)
def update(self, y_pred, y_true, low_c=0.01):
assert y_pred.shape[1] == len(y_true), "Time Dimension Matches"
time_length = y_pred.shape[1]
for i in range(time_length):
loss_vector = np.array(
[
self.loss_func([prediction], [y_true[i]])
for prediction in y_pred[:, i]
]
)
average_loss = np.dot(self.weights, loss_vector)
instant_regret = average_loss - loss_vector
self.R += instant_regret
self._update_weights(low_c=low_c)
def _update_weights(self, low_c=0.01):
R_plus = np.array(list(map(lambda x: 0 if 0 > x else x, self.R)))
normalizing_R = np.max(R_plus)
R_plus /= normalizing_R
low_c = low_c
high_c = (max(R_plus) ** 2) / 2
def _pot(c):
return np.mean(np.exp((R_plus ** 2) / (2 * c))) - np.e
c_t = bisect(_pot, low_c, high_c)
def _prob(r, c_t):
return (r / c_t) * np.exp((r ** 2) / (2 * c_t))
self.weights = np.array([_prob(r, c_t) for r in R_plus])
self.weights /= np.sum(self.weights)
class NNLSEnsemble(_PredictionWeightedEnsembler):
_tags = {
"univariate-only": True,
"requires-fh-in-fit": False,
"handles-missing-data": False,
}
def __init__(self, n_estimators=10, loss_func=None):
super().__init__(n_estimators=n_estimators, loss_func=loss_func)
self.total_y_pred = np.empty((n_estimators, 0))
self.total_y_true = np.empty(0)
def update(self, y_pred, y_true):
self.total_y_pred = np.concatenate((self.total_y_pred, y_pred), axis=1)
self.total_y_true = np.concatenate((self.total_y_true, y_true))
weights, loss = nnls(self.total_y_pred.T, self.total_y_true)
self.weights = weights
| true | true |
1c2d5f2c9df3da18ac55ae059c716cda6d52e5df | 1,184 | py | Python | IPython/core/ipapi.py | tinyclues/ipython | 71e32606b0242772b81c9be0d40751ba47d95f2c | [
"BSD-3-Clause-Clear"
] | 1 | 2016-05-26T10:57:18.000Z | 2016-05-26T10:57:18.000Z | IPython/core/ipapi.py | adgaudio/ipython | a924f50c0f7b84127391f1c396326258c2b303e2 | [
"BSD-3-Clause-Clear"
] | null | null | null | IPython/core/ipapi.py | adgaudio/ipython | a924f50c0f7b84127391f1c396326258c2b303e2 | [
"BSD-3-Clause-Clear"
] | null | null | null | # encoding: utf-8
"""
This module is *completely* deprecated and should no longer be used for
any purpose. Currently, we have a few parts of the core that have
not been componentized and thus, still rely on this module. When everything
has been made into a component, this module will be sent to deathrow.
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2008-2009 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
def get():
"""Get the global InteractiveShell instance."""
from IPython.core.interactiveshell import InteractiveShell
return InteractiveShell.instance()
| 39.466667 | 78 | 0.451014 |
def get():
from IPython.core.interactiveshell import InteractiveShell
return InteractiveShell.instance()
| true | true |
1c2d5f30fd5127aa23a56b3d35a865db7e43e244 | 341 | py | Python | accounts/models.py | achuthvarghese/django_realtime_chat | 922bd1971c7c0102f007581967e6aecf40171e48 | [
"MIT"
] | null | null | null | accounts/models.py | achuthvarghese/django_realtime_chat | 922bd1971c7c0102f007581967e6aecf40171e48 | [
"MIT"
] | null | null | null | accounts/models.py | achuthvarghese/django_realtime_chat | 922bd1971c7c0102f007581967e6aecf40171e48 | [
"MIT"
] | 1 | 2021-10-02T06:52:09.000Z | 2021-10-02T06:52:09.000Z | import uuid
from django.contrib.auth.models import AbstractUser
from django.db import models
from django.utils.translation import gettext_lazy as _
class User(AbstractUser):
id = models.UUIDField(
_("ID"), primary_key=True, default=uuid.uuid4, editable=False, unique=True
)
class Meta:
db_table = "auth_user"
| 22.733333 | 82 | 0.72434 | import uuid
from django.contrib.auth.models import AbstractUser
from django.db import models
from django.utils.translation import gettext_lazy as _
class User(AbstractUser):
id = models.UUIDField(
_("ID"), primary_key=True, default=uuid.uuid4, editable=False, unique=True
)
class Meta:
db_table = "auth_user"
| true | true |
1c2d5fe47862d14ed04c6b7a8abb128338c70732 | 8,005 | py | Python | formulario.py | AtlasGold/Formulario-Abro | f7afb4c6b192c58c6862ef557b15b95cd3205832 | [
"MIT"
] | null | null | null | formulario.py | AtlasGold/Formulario-Abro | f7afb4c6b192c58c6862ef557b15b95cd3205832 | [
"MIT"
] | null | null | null | formulario.py | AtlasGold/Formulario-Abro | f7afb4c6b192c58c6862ef557b15b95cd3205832 | [
"MIT"
] | null | null | null | from logging import exception
from PIL import Image
from os import write
import streamlit as st
from streamlit.type_util import Key
import os
#from conexão.conexao import cnxn,cursor
#trocar o nome da pagina e o icone
st.set_page_config(page_title = "Abro - Odontologia Especializada",
page_icon=":smiley:")
#remover o botão de Menu
hide_st_style = """
<style>
#MainMenu {visibility: hidden;}
footer {visibility: hidden;}
</style>
"""
st.markdown(hide_st_style, unsafe_allow_html=True)
#conexão com o banco
#import mysql.connector
#cnxn = mysql.connector.connect(host=st.secrets["host"], user=st.secrets["user"], passwd= st.secrets["passwd"], db= st.secrets["db"])
#cursor = cnxn.cursor()
import mysql.connector
cnxn = mysql.connector.connect(host=os.environ['host'], user=os.environ['user'], passwd= os.environ['passwd'], db= os.environ['db'])
cursor = cnxn.cursor()
#enviar os dados
def inserir(Nome,Telefone,CPF):
cursor.execute('INSERT INTO cadastro(Nome,Telefone,CPF) VALUES (%s, %s, %s)',(Nome,Telefone,CPF))
cnxn.commit()
def inserir_an(motivo,tratamento,medicamento,qmedicamentos,alergia,qalergias,anestesia,ultimo,canal,gengiva,fuma,sangra,dor,desmaio,gravida,procedimento,cpf,nome):
cursor.execute("INSERT INTO anamnese1(`Qual O Motivo Da Consulta`,`Tratamento ou Problema de Saude`,`Está Tomando Algum Medicamento`,`Quais Medicamentos`,`Tem alergia a algum medicamento`,`Apresenta Alergia a Quais Medicamentos`,`Teve Alguma Reação a Anestesia Local`,`Quando Foi o Seu Ultimo Tratamento Odontologico`,`Tratamento de Canal Protese Implante Perdeu um Dente`,`Sua Gengiva Sangra Com Frequência`,`Voce Fuma`,`Quando Você se Corta Sangra Muito`,`Dores de Dente Cabeça Face Ouvido Articulações`,`Teve Algum Desmaio Ataques Nervoso Epilepsia ou Convulsoes`,`Pode Estar Gravida`,`Procedimento Facial Botox Preenchimento Hialurônico PMA`,`CPF`,`Nome`) VALUES (%s, %s, %s,%s, %s, %s,%s, %s, %s,%s, %s, %s,%s, %s, %s,%s, %s, %s)",(motivo,tratamento,medicamento,qmedicamentos,alergia,qalergias,anestesia,ultimo,canal,gengiva,fuma,sangra,dor,desmaio,gravida,procedimento,cpf,nome))
cnxn.commit()
def inserir_so(profissão,time,qtime,animal,qanimal,filho,nfilho,medo,sorriso,facebook,instagram,qinstagram,hobby,qhobby,ambiente,generom,programação, generof,cpf,nome):
cursor.execute("INSERT INTO sociais(`Qual sua profissão ?`,`Gosta de Futebol ?`,`Times que torce`,`Tem algum animal de estimação`,`Qual animal?`,`Tem filhos ?`,`Como se chamam ?`,`Tem medo de dentista ?`,`Esta Satisfeito Com Sua Estética Facil e de Sorriso ?`,`Tem Facebook`,`Tem Instagram ?`,`Qual instagram ?`,`Tem algum Hobby ?`,`Quais hobbies?`,`Gosta de música ambiente ?`,`Qual Gênero/Ritmo Gosta de Ouvir ?`,`Qual Tipo De Programa De Televisão Gosta De Assistir ?`,`Qual Gênero De Filme Gosta ?`,`CPF`,`Nome`) VALUES (%s, %s, %s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)",(profissão,time,qtime,animal,qanimal,filho,nfilho,medo,sorriso,facebook,instagram,qinstagram,hobby,qhobby,ambiente,generom,programação, generof,cpf,nome))
cnxn.commit()
#receber os dados
st.title("Cadastro")
input_nome = st.text_input("Digite Seu Nome Completo")
input_telefone = st.text_input("Telefone Para Contato")
input_CPF = st.text_input("CPF")
col1, col2, col3 = st.columns(3)
image = Image.open('abbro.png')
col2.image(image, use_column_width=True)
st.title("Anamnese")
input_01 = st.text_input("Qual O Motivo Da Consulta ? ")
input_02 = st.selectbox("Está Fazendo Algum Tratamento Médico ou Tem Algum Problema de Saúde ? ",["","Sim","Não"])
input_03 = st.selectbox("Está Tomando Algum Medicamento?",["","Sim","Não"])
if input_03 ==("Sim"):
input_003 =st.text_input("Quais Medicamentos ?")
input_04 = st.selectbox("Tem Alergia a Algum Medicamento ?",["","Sim","Não"])
if input_04 ==("Sim"):
input_004 = st.text_input("Apresenta Alergia a Quais Medicamentos ?")
input_05 = st.selectbox("Teve Alguma Reação a Anestesia Local ?",["","Sim","Não"])
input_06 = st.text_input("Quando Foi o Seu Ultimo Tratamento Odontológico ?")
input_006 = st.selectbox ("Já Realizou Tratamento de Canal ? Prótese ? Implante ? Perdeu Algum Dente ?",["","Sim","Não"])
input_07 = st.selectbox("Sua Gengiva Sangra Com Frequência ?",["","Sim","Não"])
input_08 = st.selectbox("Você Fuma ?",["","Sim","Não"])
input_09 = st.selectbox("Quando Você se Corta, Sangra Muito ?",["","Sim","Não"])
input_10 = st.selectbox("Sente Dores de Dente ? Cabeça ? Dores na Face ? Ouvido ou Articulações ?",["","Sim","Não"])
input_11 = st.selectbox("Teve Algum Desmaio, Ataques Nervoso, Epilepsia ou Convulsões ? ",["","Sim","Não"])
input_12 = st.selectbox("Pode Estar Gravida ?",["","Sim","Não"])
input_25 = st.selectbox("Já Realizou Algum Procedimento Estético Facial ? Botox? Preenchimento com Ac. Hialurônico ou PMA ?",["","Sim","Não"])
st.title("Sobre Você")
st.warning("Perguntas Opcionais")
input_13 = st.text_input("Qual Sua Profissão ?")
if input_13=="":
input_13=("Não Informou")
input_14 = st.selectbox("Gosta de Futebol ?",["","Sim","Não"])
if input_14 == ("Sim") :
input_014 = st.text_input("Para Quais Times Você Torce ?")
input_15 = st.selectbox("Tem Algum Animal De Estimação ?",["","Sim","Não"])
if input_15 == ("Sim"):
input_015 = st.text_input("Qual ?")
input_16 = st.selectbox("Tem Filhos ?",["","Sim","Não"])
if input_16 == ("Sim"):
input_0016 = st.text_input("Como se Chamam ?")
input_17 = st.selectbox("Tem Medo De Dentista ?",["","Sim","Não"])
if input_17=="":
input_17=("Não Informou")
input_18 = st.selectbox("Esta Satisfeito Com Sua Estética Facil e de Sorriso ? ",["","Sim","Não"])
if input_18=="":
input_18=("Não Informou")
input_19 = st.selectbox("Tem Facebook? ",["","Sim","Não"])
if input_19=="":
input_19=("Não Informou")
input_20 = st.selectbox("Tem Instagram ?",["","Sim","Não"])
if input_20 == ("Sim"):
input_020 = st.text_input("Qual?",key='chave')
input_21 = st.selectbox("Tem Algum Hobby ?",["","Sim","Não"])
if input_21 == ("Sim"):
input_021 = st.text_input("Quais ?")
input_22 = st.selectbox ("Gosta De Musica Ambiente ? ",["","Sim","Não"])
if input_22 == ("Sim"):
input_022 = st.text_input("Qual Gênero/Ritmo Gosta de Ouvir ?")
input_23 = st.text_input ("Qual Tipo De Programa De Televisão Gosta De Assistir ?")
if input_23=="":
input_23=("Não Informou")
input_24 = st.text_input ("Qual Gênero De Filme Gosta ?")
#tirar a obrigatoriedade de responder os dados pessoais
if input_24=="":
input_24=("")
if input_03 == 'Não':
input_003 = 'Não'
if input_04 == 'Não':
input_004 = 'Não'
if input_14 == 'Não':
input_014 = 'Nenhum'
if input_14 == "":
input_014=("Não Informou")
if input_15 == 'Não':
input_015 = 'Nenhum'
if input_15 == "":
input_015=("Não informou")
if input_16 == 'Não':
input_0016 = 'Não tenho'
if input_16 == "":
input_0016=("Não informou")
if input_20 == 'Não':
input_020 = 'Não tenho'
if input_20 == "":
input_020=("Não informou")
if input_21 == 'Não':
input_021 = 'Não tenho'
if input_21 == "":
input_021=("Não informou")
if input_22 == 'Não':
input_022 = 'Não gosto'
if input_22 == "":
input_022=("Não informou")
#botão de enviar
try:
if st.button("Enviar"):
inserir(input_nome,input_telefone,input_CPF)
inserir_an(input_01,input_02,input_03,input_003,input_04,input_004,input_05,input_06,input_006,input_07,input_08,input_09,input_10,input_11,input_12,input_25,input_CPF,input_nome )
inserir_so(input_13,input_14,input_014,input_15,input_015,input_16,input_0016,input_17,input_18,input_19,input_20,input_020,input_21,input_021,input_22,input_022,input_23,input_24,input_CPF,input_nome)
if st.button == 0 : st.error("Certifique-se que enviou tudo!")
else: st.success('Tudo Certo !')
except:
st.error("Algumas Informações Importantes Estão Faltando")
| 41.910995 | 889 | 0.690319 | from logging import exception
from PIL import Image
from os import write
import streamlit as st
from streamlit.type_util import Key
import os
st.set_page_config(page_title = "Abro - Odontologia Especializada",
page_icon=":smiley:")
hide_st_style = """
<style>
#MainMenu {visibility: hidden;}
footer {visibility: hidden;}
</style>
"""
st.markdown(hide_st_style, unsafe_allow_html=True)
import mysql.connector
cnxn = mysql.connector.connect(host=os.environ['host'], user=os.environ['user'], passwd= os.environ['passwd'], db= os.environ['db'])
cursor = cnxn.cursor()
def inserir(Nome,Telefone,CPF):
cursor.execute('INSERT INTO cadastro(Nome,Telefone,CPF) VALUES (%s, %s, %s)',(Nome,Telefone,CPF))
cnxn.commit()
def inserir_an(motivo,tratamento,medicamento,qmedicamentos,alergia,qalergias,anestesia,ultimo,canal,gengiva,fuma,sangra,dor,desmaio,gravida,procedimento,cpf,nome):
cursor.execute("INSERT INTO anamnese1(`Qual O Motivo Da Consulta`,`Tratamento ou Problema de Saude`,`Está Tomando Algum Medicamento`,`Quais Medicamentos`,`Tem alergia a algum medicamento`,`Apresenta Alergia a Quais Medicamentos`,`Teve Alguma Reação a Anestesia Local`,`Quando Foi o Seu Ultimo Tratamento Odontologico`,`Tratamento de Canal Protese Implante Perdeu um Dente`,`Sua Gengiva Sangra Com Frequência`,`Voce Fuma`,`Quando Você se Corta Sangra Muito`,`Dores de Dente Cabeça Face Ouvido Articulações`,`Teve Algum Desmaio Ataques Nervoso Epilepsia ou Convulsoes`,`Pode Estar Gravida`,`Procedimento Facial Botox Preenchimento Hialurônico PMA`,`CPF`,`Nome`) VALUES (%s, %s, %s,%s, %s, %s,%s, %s, %s,%s, %s, %s,%s, %s, %s,%s, %s, %s)",(motivo,tratamento,medicamento,qmedicamentos,alergia,qalergias,anestesia,ultimo,canal,gengiva,fuma,sangra,dor,desmaio,gravida,procedimento,cpf,nome))
cnxn.commit()
def inserir_so(profissão,time,qtime,animal,qanimal,filho,nfilho,medo,sorriso,facebook,instagram,qinstagram,hobby,qhobby,ambiente,generom,programação, generof,cpf,nome):
cursor.execute("INSERT INTO sociais(`Qual sua profissão ?`,`Gosta de Futebol ?`,`Times que torce`,`Tem algum animal de estimação`,`Qual animal?`,`Tem filhos ?`,`Como se chamam ?`,`Tem medo de dentista ?`,`Esta Satisfeito Com Sua Estética Facil e de Sorriso ?`,`Tem Facebook`,`Tem Instagram ?`,`Qual instagram ?`,`Tem algum Hobby ?`,`Quais hobbies?`,`Gosta de música ambiente ?`,`Qual Gênero/Ritmo Gosta de Ouvir ?`,`Qual Tipo De Programa De Televisão Gosta De Assistir ?`,`Qual Gênero De Filme Gosta ?`,`CPF`,`Nome`) VALUES (%s, %s, %s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)",(profissão,time,qtime,animal,qanimal,filho,nfilho,medo,sorriso,facebook,instagram,qinstagram,hobby,qhobby,ambiente,generom,programação, generof,cpf,nome))
cnxn.commit()
st.title("Cadastro")
input_nome = st.text_input("Digite Seu Nome Completo")
input_telefone = st.text_input("Telefone Para Contato")
input_CPF = st.text_input("CPF")
col1, col2, col3 = st.columns(3)
image = Image.open('abbro.png')
col2.image(image, use_column_width=True)
st.title("Anamnese")
input_01 = st.text_input("Qual O Motivo Da Consulta ? ")
input_02 = st.selectbox("Está Fazendo Algum Tratamento Médico ou Tem Algum Problema de Saúde ? ",["","Sim","Não"])
input_03 = st.selectbox("Está Tomando Algum Medicamento?",["","Sim","Não"])
if input_03 ==("Sim"):
input_003 =st.text_input("Quais Medicamentos ?")
input_04 = st.selectbox("Tem Alergia a Algum Medicamento ?",["","Sim","Não"])
if input_04 ==("Sim"):
input_004 = st.text_input("Apresenta Alergia a Quais Medicamentos ?")
input_05 = st.selectbox("Teve Alguma Reação a Anestesia Local ?",["","Sim","Não"])
input_06 = st.text_input("Quando Foi o Seu Ultimo Tratamento Odontológico ?")
input_006 = st.selectbox ("Já Realizou Tratamento de Canal ? Prótese ? Implante ? Perdeu Algum Dente ?",["","Sim","Não"])
input_07 = st.selectbox("Sua Gengiva Sangra Com Frequência ?",["","Sim","Não"])
input_08 = st.selectbox("Você Fuma ?",["","Sim","Não"])
input_09 = st.selectbox("Quando Você se Corta, Sangra Muito ?",["","Sim","Não"])
input_10 = st.selectbox("Sente Dores de Dente ? Cabeça ? Dores na Face ? Ouvido ou Articulações ?",["","Sim","Não"])
input_11 = st.selectbox("Teve Algum Desmaio, Ataques Nervoso, Epilepsia ou Convulsões ? ",["","Sim","Não"])
input_12 = st.selectbox("Pode Estar Gravida ?",["","Sim","Não"])
input_25 = st.selectbox("Já Realizou Algum Procedimento Estético Facial ? Botox? Preenchimento com Ac. Hialurônico ou PMA ?",["","Sim","Não"])
st.title("Sobre Você")
st.warning("Perguntas Opcionais")
input_13 = st.text_input("Qual Sua Profissão ?")
if input_13=="":
input_13=("Não Informou")
input_14 = st.selectbox("Gosta de Futebol ?",["","Sim","Não"])
if input_14 == ("Sim") :
input_014 = st.text_input("Para Quais Times Você Torce ?")
input_15 = st.selectbox("Tem Algum Animal De Estimação ?",["","Sim","Não"])
if input_15 == ("Sim"):
input_015 = st.text_input("Qual ?")
input_16 = st.selectbox("Tem Filhos ?",["","Sim","Não"])
if input_16 == ("Sim"):
input_0016 = st.text_input("Como se Chamam ?")
input_17 = st.selectbox("Tem Medo De Dentista ?",["","Sim","Não"])
if input_17=="":
input_17=("Não Informou")
input_18 = st.selectbox("Esta Satisfeito Com Sua Estética Facil e de Sorriso ? ",["","Sim","Não"])
if input_18=="":
input_18=("Não Informou")
input_19 = st.selectbox("Tem Facebook? ",["","Sim","Não"])
if input_19=="":
input_19=("Não Informou")
input_20 = st.selectbox("Tem Instagram ?",["","Sim","Não"])
if input_20 == ("Sim"):
input_020 = st.text_input("Qual?",key='chave')
input_21 = st.selectbox("Tem Algum Hobby ?",["","Sim","Não"])
if input_21 == ("Sim"):
input_021 = st.text_input("Quais ?")
input_22 = st.selectbox ("Gosta De Musica Ambiente ? ",["","Sim","Não"])
if input_22 == ("Sim"):
input_022 = st.text_input("Qual Gênero/Ritmo Gosta de Ouvir ?")
input_23 = st.text_input ("Qual Tipo De Programa De Televisão Gosta De Assistir ?")
if input_23=="":
input_23=("Não Informou")
input_24 = st.text_input ("Qual Gênero De Filme Gosta ?")
if input_24=="":
input_24=("")
if input_03 == 'Não':
input_003 = 'Não'
if input_04 == 'Não':
input_004 = 'Não'
if input_14 == 'Não':
input_014 = 'Nenhum'
if input_14 == "":
input_014=("Não Informou")
if input_15 == 'Não':
input_015 = 'Nenhum'
if input_15 == "":
input_015=("Não informou")
if input_16 == 'Não':
input_0016 = 'Não tenho'
if input_16 == "":
input_0016=("Não informou")
if input_20 == 'Não':
input_020 = 'Não tenho'
if input_20 == "":
input_020=("Não informou")
if input_21 == 'Não':
input_021 = 'Não tenho'
if input_21 == "":
input_021=("Não informou")
if input_22 == 'Não':
input_022 = 'Não gosto'
if input_22 == "":
input_022=("Não informou")
try:
if st.button("Enviar"):
inserir(input_nome,input_telefone,input_CPF)
inserir_an(input_01,input_02,input_03,input_003,input_04,input_004,input_05,input_06,input_006,input_07,input_08,input_09,input_10,input_11,input_12,input_25,input_CPF,input_nome )
inserir_so(input_13,input_14,input_014,input_15,input_015,input_16,input_0016,input_17,input_18,input_19,input_20,input_020,input_21,input_021,input_22,input_022,input_23,input_24,input_CPF,input_nome)
if st.button == 0 : st.error("Certifique-se que enviou tudo!")
else: st.success('Tudo Certo !')
except:
st.error("Algumas Informações Importantes Estão Faltando")
| true | true |
1c2d5ff1aa83ff5041a74b069d2d258e7e229a6d | 2,267 | py | Python | reid/insightface/model.py | amirassov/topcoder-facial-marathon | 37f6828a589717d0004dd84d51eb7bc6a1b310fd | [
"MIT"
] | 11 | 2019-08-19T11:49:11.000Z | 2021-05-21T06:00:08.000Z | reid/insightface/model.py | amirassov/topcoder-facial-marathon | 37f6828a589717d0004dd84d51eb7bc6a1b310fd | [
"MIT"
] | null | null | null | reid/insightface/model.py | amirassov/topcoder-facial-marathon | 37f6828a589717d0004dd84d51eb7bc6a1b310fd | [
"MIT"
] | 3 | 2019-10-09T09:20:56.000Z | 2020-08-28T02:39:01.000Z | import cv2
import numpy as np
import mxnet as mx
from sklearn.preprocessing import normalize
from reid.insightface.mtcnn import MtcnnDetector
from reid.insightface.utils import preprocess
def get_embedder(ctx, image_size, model_prefix: str, layer):
sym, arg_params, aux_params = mx.model.load_checkpoint(model_prefix, 0)
all_layers = sym.get_internals()
sym = all_layers[layer + '_output']
model = mx.mod.Module(symbol=sym, context=ctx, label_names=None)
model.bind(data_shapes=[('data', (1, 3, image_size[0], image_size[1]))])
model.set_params(arg_params, aux_params)
return model
class ArcFaceModel:
def __init__(self, embedder_path, mtcnn_path, image_size=(112, 112)):
self.image_size = image_size
self.ctx = mx.cpu()
self.embedder = get_embedder(self.ctx, image_size, embedder_path, 'fc1')
self.detector = MtcnnDetector(
model_folder=mtcnn_path,
ctx=self.ctx,
accurate_landmark=True,
threshold=[0.6, 0.7, 0.8]
)
def predict(self, image):
embedding = None
preprocessed_img, bbox, landmark = self.detect(image)
if preprocessed_img is not None:
embedding = self.embed(preprocessed_img)
return embedding
def align(self, image, bbox, landmark):
landmark = landmark.reshape((2, 5)).T
preprocessed_img = preprocess(image, bbox, landmark, image_size=self.image_size)
preprocessed_img = cv2.cvtColor(preprocessed_img, cv2.COLOR_BGR2RGB)
preprocessed_img = np.transpose(preprocessed_img, (2, 0, 1))
return preprocessed_img, bbox, landmark
def detect(self, image):
bboxes, landmarks = self.detector.detect_face(image)
if bboxes is None:
return None, None, None
bboxes, scores = bboxes[:, :4], bboxes[:, 4]
return self.align(image, bboxes[0], landmarks[0])
def embed(self, image):
input_blob = np.expand_dims(image, axis=0)
data = mx.nd.array(input_blob)
db = mx.io.DataBatch(data=(data,))
self.embedder.forward(db, is_train=False)
embedding = self.embedder.get_outputs()[0].asnumpy()
embedding = normalize(embedding).flatten()
return embedding
| 36.564516 | 88 | 0.664314 | import cv2
import numpy as np
import mxnet as mx
from sklearn.preprocessing import normalize
from reid.insightface.mtcnn import MtcnnDetector
from reid.insightface.utils import preprocess
def get_embedder(ctx, image_size, model_prefix: str, layer):
sym, arg_params, aux_params = mx.model.load_checkpoint(model_prefix, 0)
all_layers = sym.get_internals()
sym = all_layers[layer + '_output']
model = mx.mod.Module(symbol=sym, context=ctx, label_names=None)
model.bind(data_shapes=[('data', (1, 3, image_size[0], image_size[1]))])
model.set_params(arg_params, aux_params)
return model
class ArcFaceModel:
def __init__(self, embedder_path, mtcnn_path, image_size=(112, 112)):
self.image_size = image_size
self.ctx = mx.cpu()
self.embedder = get_embedder(self.ctx, image_size, embedder_path, 'fc1')
self.detector = MtcnnDetector(
model_folder=mtcnn_path,
ctx=self.ctx,
accurate_landmark=True,
threshold=[0.6, 0.7, 0.8]
)
def predict(self, image):
embedding = None
preprocessed_img, bbox, landmark = self.detect(image)
if preprocessed_img is not None:
embedding = self.embed(preprocessed_img)
return embedding
def align(self, image, bbox, landmark):
landmark = landmark.reshape((2, 5)).T
preprocessed_img = preprocess(image, bbox, landmark, image_size=self.image_size)
preprocessed_img = cv2.cvtColor(preprocessed_img, cv2.COLOR_BGR2RGB)
preprocessed_img = np.transpose(preprocessed_img, (2, 0, 1))
return preprocessed_img, bbox, landmark
def detect(self, image):
bboxes, landmarks = self.detector.detect_face(image)
if bboxes is None:
return None, None, None
bboxes, scores = bboxes[:, :4], bboxes[:, 4]
return self.align(image, bboxes[0], landmarks[0])
def embed(self, image):
input_blob = np.expand_dims(image, axis=0)
data = mx.nd.array(input_blob)
db = mx.io.DataBatch(data=(data,))
self.embedder.forward(db, is_train=False)
embedding = self.embedder.get_outputs()[0].asnumpy()
embedding = normalize(embedding).flatten()
return embedding
| true | true |
1c2d60629e50286a1c1d72d99a70b5b0dfbf9049 | 9,690 | py | Python | fastai2/text/learner.py | moritzschwyzer/fastai2 | 3aa40a4e736ffac50b17359a399aef40ac11fcca | [
"Apache-2.0"
] | null | null | null | fastai2/text/learner.py | moritzschwyzer/fastai2 | 3aa40a4e736ffac50b17359a399aef40ac11fcca | [
"Apache-2.0"
] | null | null | null | fastai2/text/learner.py | moritzschwyzer/fastai2 | 3aa40a4e736ffac50b17359a399aef40ac11fcca | [
"Apache-2.0"
] | null | null | null | # AUTOGENERATED! DO NOT EDIT! File to edit: nbs/37_text.learner.ipynb (unless otherwise specified).
__all__ = ['match_embeds', 'load_ignore_keys', 'TextLearner', 'decode_spec_tokens', 'LMLearner',
'language_model_learner', 'text_classifier_learner']
# Cell
from ..basics import *
from .core import *
from .data import *
from .models.core import *
from .models.awdlstm import *
from ..callback.rnn import *
# Cell
def match_embeds(old_wgts, old_vocab, new_vocab):
"Convert the embedding in `wgts` to go with a new vocabulary."
bias, wgts = old_wgts.get('1.decoder.bias', None), old_wgts['0.encoder.weight']
wgts_m = wgts.mean(0)
new_wgts = wgts.new_zeros((len(new_vocab),wgts.size(1)))
if bias is not None:
bias_m = bias.mean(0)
new_bias = bias.new_zeros((len(new_vocab),))
old_o2i = old_vocab.o2i if hasattr(old_vocab, 'o2i') else {w:i for i,w in enumerate(old_vocab)}
for i,w in enumerate(new_vocab):
idx = old_o2i.get(w, -1)
new_wgts[i] = wgts[idx] if idx>=0 else wgts_m
if bias is not None: new_bias[i] = bias[idx] if idx>=0 else bias_m
old_wgts['0.encoder.weight'] = new_wgts
if '0.encoder_dp.emb.weight' in old_wgts: old_wgts['0.encoder_dp.emb.weight'] = new_wgts.clone()
old_wgts['1.decoder.weight'] = new_wgts.clone()
if bias is not None: old_wgts['1.decoder.bias'] = new_bias
return old_wgts
# Cell
def _get_text_vocab(dls):
vocab = dls.vocab
if isinstance(vocab, L): vocab = vocab[0]
return vocab
# Cell
def load_ignore_keys(model, wgts):
"Load `wgts` in `model` ignoring the names of the keys, just taking parameters in order"
sd = model.state_dict()
for k1,k2 in zip(sd.keys(), wgts.keys()): sd[k1].data = wgts[k2].data.clone()
return model.load_state_dict(sd)
# Cell
@delegates(Learner.__init__)
class TextLearner(Learner):
"Basic class for a `Learner` in NLP."
def __init__(self, model, dls, alpha=2., beta=1., moms=(0.8,0.7,0.8), **kwargs):
super().__init__(model, dls, moms=moms, **kwargs)
self.add_cbs([ModelReseter(), RNNRegularizer(alpha=alpha, beta=beta)])
def save_encoder(self, file):
"Save the encoder to `self.path/self.model_dir/file`"
if rank_distrib(): return # don't save if slave proc
encoder = get_model(self.model)[0]
if hasattr(encoder, 'module'): encoder = encoder.module
torch.save(encoder.state_dict(), join_path_file(file,self.path/self.model_dir, ext='.pth'))
def load_encoder(self, file, device=None):
"Load the encoder `name` from the model directory."
encoder = get_model(self.model)[0]
if device is None: device = self.dls.device
if hasattr(encoder, 'module'): encoder = encoder.module
distrib_barrier()
encoder.load_state_dict(torch.load(join_path_file(file,self.path/self.model_dir, ext='.pth'), map_location=device))
self.freeze()
return self
def load_pretrained(self, wgts_fname, vocab_fname, model=None):
"Load a pretrained model and adapt it to the data vocabulary."
old_vocab = Path(vocab_fname).load()
new_vocab = _get_text_vocab(self.dls)
wgts = torch.load(wgts_fname, map_location = lambda storage,loc: storage)
if 'model' in wgts: wgts = wgts['model'] #Just in case the pretrained model was saved with an optimizer
wgts = match_embeds(wgts, old_vocab, new_vocab)
load_ignore_keys(self.model if model is None else model, wgts)
self.freeze()
return self
# Cell
def decode_spec_tokens(tokens):
new_toks,rule,arg = [],None,None
for t in tokens:
if t in [TK_MAJ, TK_UP, TK_REP, TK_WREP]: rule = t
elif rule is None: new_toks.append(t)
elif rule == TK_MAJ:
new_toks.append(t[:1].upper() + t[1:].lower())
rule = None
elif rule == TK_UP:
new_toks.append(t.upper())
rule = None
elif arg is None:
try: arg = int(t)
except: rule = None
else:
if rule == TK_REP: new_toks.append(t * arg)
else: new_toks += [t] * arg
return new_toks
# Cell
class LMLearner(TextLearner):
"Add functionality to `TextLearner` when dealingwith a language model"
@delegates(tokenize1)
def predict(self, text, n_words=1, no_unk=True, temperature=1., min_p=None, rm_type_tfms=0, no_bar=False,
decoder=decode_spec_tokens, **kwargs):
"Return `text` and the `n_words` that come after"
self.model.reset()
tokens = tokenize1(text, **kwargs)
tfm = self.dls.train_ds.numericalize
idxs = tfm(tokens).to(self.dls.device)
if no_unk: unk_idx = self.dls.vocab.index(UNK)
for _ in (range(n_words) if no_bar else progress_bar(range(n_words), leave=False)):
with self.no_bar(): preds,_ = self.get_preds(dl=[(idxs[None],)])
res = preds[0][-1]
if no_unk: res[unk_idx] = 0.
if min_p is not None:
if (res >= min_p).float().sum() == 0:
warn(f"There is no item with probability >= {min_p}, try a lower value.")
else: res[res < min_p] = 0.
if temperature != 1.: res.pow_(1 / temperature)
idx = torch.multinomial(res, 1).item()
idxs = torch.cat([idxs, idxs.new([idx])])
tokens = [tfm.vocab[i] for i in idxs if tfm.vocab[i] not in [BOS, PAD]]
sep = self.dls.train_ds.tokenizer
return sep.join(decoder(tokens))
@delegates(Learner.get_preds)
def get_preds(self, concat_dim=1, **kwargs): return super().get_preds(concat_dim=1, **kwargs)
# Cell
from .models.core import _model_meta
# Cell
def _get_text_vocab(dls):
vocab = dls.vocab
if isinstance(vocab, L): vocab = vocab[0]
return vocab
# Cell
@delegates(Learner.__init__)
def language_model_learner(dls, arch, config=None, drop_mult=1., pretrained=True, pretrained_fnames=None, **kwargs):
"Create a `Learner` with a language model from `data` and `arch`."
vocab = _get_text_vocab(dls)
model = get_language_model(arch, len(vocab), config=config, drop_mult=drop_mult)
meta = _model_meta[arch]
learn = LMLearner(dls, model, loss_func=CrossEntropyLossFlat(), splitter=meta['split_lm'], **kwargs)
#TODO: add backard
#url = 'url_bwd' if data.backwards else 'url'
if pretrained or pretrained_fnames:
if pretrained_fnames is not None:
fnames = [learn.path/learn.model_dir/f'{fn}.{ext}' for fn,ext in zip(pretrained_fnames, ['pth', 'pkl'])]
else:
if 'url' not in meta:
warn("There are no pretrained weights for that architecture yet!")
return learn
model_path = untar_data(meta['url'] , c_key='model')
fnames = [list(model_path.glob(f'*.{ext}'))[0] for ext in ['pth', 'pkl']]
learn = learn.load_pretrained(*fnames)
return learn
# Cell
@delegates(Learner.__init__)
def text_classifier_learner(dls, arch, seq_len=72, config=None, pretrained=True, drop_mult=0.5, n_out=None,
lin_ftrs=None, ps=None, max_len=72*20, **kwargs):
"Create a `Learner` with a text classifier from `data` and `arch`."
vocab = _get_text_vocab(dls)
if n_out is None: n_out = get_c(dls)
assert n_out, "`n_out` is not defined, and could not be infered from data, set `dls.c` or pass `n_out`"
model = get_text_classifier(arch, len(vocab), n_out, seq_len=seq_len, config=config,
drop_mult=drop_mult, lin_ftrs=lin_ftrs, ps=ps, max_len=max_len)
meta = _model_meta[arch]
learn = TextLearner(dls, model, splitter=meta['split_clas'], **kwargs)
if pretrained:
if 'url' not in meta:
warn("There are no pretrained weights for that architecture yet!")
return learn
model_path = untar_data(meta['url'], c_key='model')
fnames = [list(model_path.glob(f'*.{ext}'))[0] for ext in ['pth', 'pkl']]
learn = learn.load_pretrained(*fnames, model=learn.model[0])
learn.freeze()
return learn
# Cell
@typedispatch
def show_results(x: LMTensorText, y, samples, outs, ctxs=None, max_n=10, **kwargs):
if ctxs is None: ctxs = get_empty_df(min(len(samples), max_n))
for i,l in enumerate(['input', 'target']):
ctxs = [b.show(ctx=c, label=l, **kwargs) for b,c,_ in zip(samples.itemgot(i),ctxs,range(max_n))]
ctxs = [b.show(ctx=c, label='pred', **kwargs) for b,c,_ in zip(outs.itemgot(0),ctxs,range(max_n))]
display_df(pd.DataFrame(ctxs))
return ctxs
# Cell
@typedispatch
def show_results(x: TensorText, y, samples, outs, ctxs=None, max_n=10, trunc_at=150, **kwargs):
if ctxs is None: ctxs = get_empty_df(min(len(samples), max_n))
samples = L((s[0].truncate(trunc_at),*s[1:]) for s in samples)
ctxs = show_results[object](x, y, samples, outs, ctxs=ctxs, max_n=max_n, **kwargs)
display_df(pd.DataFrame(ctxs))
return ctxs
# Cell
@typedispatch
def plot_top_losses(x: TensorText, y:TensorCategory, samples, outs, raws, losses, trunc_at=150, **kwargs):
rows = get_empty_df(len(samples))
samples = L((s[0].truncate(trunc_at),*s[1:]) for s in samples)
for i,l in enumerate(['input', 'target']):
rows = [b.show(ctx=c, label=l, **kwargs) for b,c in zip(samples.itemgot(i),rows)]
outs = L(o + (TitledFloat(r.max().item()), TitledFloat(l.item())) for o,r,l in zip(outs, raws, losses))
for i,l in enumerate(['predicted', 'probability', 'loss']):
rows = [b.show(ctx=c, label=l, **kwargs) for b,c in zip(outs.itemgot(i),rows)]
display_df(pd.DataFrame(rows)) | 44.861111 | 123 | 0.642724 |
__all__ = ['match_embeds', 'load_ignore_keys', 'TextLearner', 'decode_spec_tokens', 'LMLearner',
'language_model_learner', 'text_classifier_learner']
from ..basics import *
from .core import *
from .data import *
from .models.core import *
from .models.awdlstm import *
from ..callback.rnn import *
def match_embeds(old_wgts, old_vocab, new_vocab):
bias, wgts = old_wgts.get('1.decoder.bias', None), old_wgts['0.encoder.weight']
wgts_m = wgts.mean(0)
new_wgts = wgts.new_zeros((len(new_vocab),wgts.size(1)))
if bias is not None:
bias_m = bias.mean(0)
new_bias = bias.new_zeros((len(new_vocab),))
old_o2i = old_vocab.o2i if hasattr(old_vocab, 'o2i') else {w:i for i,w in enumerate(old_vocab)}
for i,w in enumerate(new_vocab):
idx = old_o2i.get(w, -1)
new_wgts[i] = wgts[idx] if idx>=0 else wgts_m
if bias is not None: new_bias[i] = bias[idx] if idx>=0 else bias_m
old_wgts['0.encoder.weight'] = new_wgts
if '0.encoder_dp.emb.weight' in old_wgts: old_wgts['0.encoder_dp.emb.weight'] = new_wgts.clone()
old_wgts['1.decoder.weight'] = new_wgts.clone()
if bias is not None: old_wgts['1.decoder.bias'] = new_bias
return old_wgts
def _get_text_vocab(dls):
vocab = dls.vocab
if isinstance(vocab, L): vocab = vocab[0]
return vocab
def load_ignore_keys(model, wgts):
sd = model.state_dict()
for k1,k2 in zip(sd.keys(), wgts.keys()): sd[k1].data = wgts[k2].data.clone()
return model.load_state_dict(sd)
@delegates(Learner.__init__)
class TextLearner(Learner):
def __init__(self, model, dls, alpha=2., beta=1., moms=(0.8,0.7,0.8), **kwargs):
super().__init__(model, dls, moms=moms, **kwargs)
self.add_cbs([ModelReseter(), RNNRegularizer(alpha=alpha, beta=beta)])
def save_encoder(self, file):
if rank_distrib(): return
encoder = get_model(self.model)[0]
if hasattr(encoder, 'module'): encoder = encoder.module
torch.save(encoder.state_dict(), join_path_file(file,self.path/self.model_dir, ext='.pth'))
def load_encoder(self, file, device=None):
encoder = get_model(self.model)[0]
if device is None: device = self.dls.device
if hasattr(encoder, 'module'): encoder = encoder.module
distrib_barrier()
encoder.load_state_dict(torch.load(join_path_file(file,self.path/self.model_dir, ext='.pth'), map_location=device))
self.freeze()
return self
def load_pretrained(self, wgts_fname, vocab_fname, model=None):
old_vocab = Path(vocab_fname).load()
new_vocab = _get_text_vocab(self.dls)
wgts = torch.load(wgts_fname, map_location = lambda storage,loc: storage)
if 'model' in wgts: wgts = wgts['model'] #Just in case the pretrained model was saved with an optimizer
wgts = match_embeds(wgts, old_vocab, new_vocab)
load_ignore_keys(self.model if model is None else model, wgts)
self.freeze()
return self
# Cell
def decode_spec_tokens(tokens):
new_toks,rule,arg = [],None,None
for t in tokens:
if t in [TK_MAJ, TK_UP, TK_REP, TK_WREP]: rule = t
elif rule is None: new_toks.append(t)
elif rule == TK_MAJ:
new_toks.append(t[:1].upper() + t[1:].lower())
rule = None
elif rule == TK_UP:
new_toks.append(t.upper())
rule = None
elif arg is None:
try: arg = int(t)
except: rule = None
else:
if rule == TK_REP: new_toks.append(t * arg)
else: new_toks += [t] * arg
return new_toks
# Cell
class LMLearner(TextLearner):
@delegates(tokenize1)
def predict(self, text, n_words=1, no_unk=True, temperature=1., min_p=None, rm_type_tfms=0, no_bar=False,
decoder=decode_spec_tokens, **kwargs):
self.model.reset()
tokens = tokenize1(text, **kwargs)
tfm = self.dls.train_ds.numericalize
idxs = tfm(tokens).to(self.dls.device)
if no_unk: unk_idx = self.dls.vocab.index(UNK)
for _ in (range(n_words) if no_bar else progress_bar(range(n_words), leave=False)):
with self.no_bar(): preds,_ = self.get_preds(dl=[(idxs[None],)])
res = preds[0][-1]
if no_unk: res[unk_idx] = 0.
if min_p is not None:
if (res >= min_p).float().sum() == 0:
warn(f"There is no item with probability >= {min_p}, try a lower value.")
else: res[res < min_p] = 0.
if temperature != 1.: res.pow_(1 / temperature)
idx = torch.multinomial(res, 1).item()
idxs = torch.cat([idxs, idxs.new([idx])])
tokens = [tfm.vocab[i] for i in idxs if tfm.vocab[i] not in [BOS, PAD]]
sep = self.dls.train_ds.tokenizer
return sep.join(decoder(tokens))
@delegates(Learner.get_preds)
def get_preds(self, concat_dim=1, **kwargs): return super().get_preds(concat_dim=1, **kwargs)
# Cell
from .models.core import _model_meta
# Cell
def _get_text_vocab(dls):
vocab = dls.vocab
if isinstance(vocab, L): vocab = vocab[0]
return vocab
# Cell
@delegates(Learner.__init__)
def language_model_learner(dls, arch, config=None, drop_mult=1., pretrained=True, pretrained_fnames=None, **kwargs):
vocab = _get_text_vocab(dls)
model = get_language_model(arch, len(vocab), config=config, drop_mult=drop_mult)
meta = _model_meta[arch]
learn = LMLearner(dls, model, loss_func=CrossEntropyLossFlat(), splitter=meta['split_lm'], **kwargs)
#TODO: add backard
#url = 'url_bwd' if data.backwards else 'url'
if pretrained or pretrained_fnames:
if pretrained_fnames is not None:
fnames = [learn.path/learn.model_dir/f'{fn}.{ext}' for fn,ext in zip(pretrained_fnames, ['pth', 'pkl'])]
else:
if 'url' not in meta:
warn("There are no pretrained weights for that architecture yet!")
return learn
model_path = untar_data(meta['url'] , c_key='model')
fnames = [list(model_path.glob(f'*.{ext}'))[0] for ext in ['pth', 'pkl']]
learn = learn.load_pretrained(*fnames)
return learn
# Cell
@delegates(Learner.__init__)
def text_classifier_learner(dls, arch, seq_len=72, config=None, pretrained=True, drop_mult=0.5, n_out=None,
lin_ftrs=None, ps=None, max_len=72*20, **kwargs):
vocab = _get_text_vocab(dls)
if n_out is None: n_out = get_c(dls)
assert n_out, "`n_out` is not defined, and could not be infered from data, set `dls.c` or pass `n_out`"
model = get_text_classifier(arch, len(vocab), n_out, seq_len=seq_len, config=config,
drop_mult=drop_mult, lin_ftrs=lin_ftrs, ps=ps, max_len=max_len)
meta = _model_meta[arch]
learn = TextLearner(dls, model, splitter=meta['split_clas'], **kwargs)
if pretrained:
if 'url' not in meta:
warn("There are no pretrained weights for that architecture yet!")
return learn
model_path = untar_data(meta['url'], c_key='model')
fnames = [list(model_path.glob(f'*.{ext}'))[0] for ext in ['pth', 'pkl']]
learn = learn.load_pretrained(*fnames, model=learn.model[0])
learn.freeze()
return learn
# Cell
@typedispatch
def show_results(x: LMTensorText, y, samples, outs, ctxs=None, max_n=10, **kwargs):
if ctxs is None: ctxs = get_empty_df(min(len(samples), max_n))
for i,l in enumerate(['input', 'target']):
ctxs = [b.show(ctx=c, label=l, **kwargs) for b,c,_ in zip(samples.itemgot(i),ctxs,range(max_n))]
ctxs = [b.show(ctx=c, label='pred', **kwargs) for b,c,_ in zip(outs.itemgot(0),ctxs,range(max_n))]
display_df(pd.DataFrame(ctxs))
return ctxs
# Cell
@typedispatch
def show_results(x: TensorText, y, samples, outs, ctxs=None, max_n=10, trunc_at=150, **kwargs):
if ctxs is None: ctxs = get_empty_df(min(len(samples), max_n))
samples = L((s[0].truncate(trunc_at),*s[1:]) for s in samples)
ctxs = show_results[object](x, y, samples, outs, ctxs=ctxs, max_n=max_n, **kwargs)
display_df(pd.DataFrame(ctxs))
return ctxs
# Cell
@typedispatch
def plot_top_losses(x: TensorText, y:TensorCategory, samples, outs, raws, losses, trunc_at=150, **kwargs):
rows = get_empty_df(len(samples))
samples = L((s[0].truncate(trunc_at),*s[1:]) for s in samples)
for i,l in enumerate(['input', 'target']):
rows = [b.show(ctx=c, label=l, **kwargs) for b,c in zip(samples.itemgot(i),rows)]
outs = L(o + (TitledFloat(r.max().item()), TitledFloat(l.item())) for o,r,l in zip(outs, raws, losses))
for i,l in enumerate(['predicted', 'probability', 'loss']):
rows = [b.show(ctx=c, label=l, **kwargs) for b,c in zip(outs.itemgot(i),rows)]
display_df(pd.DataFrame(rows)) | true | true |
1c2d6121c44378aca199f3bf7aee582ad8f74fb8 | 3,912 | py | Python | modules/finance/arrival_and_billing/code/list_arrival_and_billing.py | xuhuiliang-maybe/ace_office | 07fae18676a193206802e8fb9aa32a805b1da24c | [
"Apache-2.0"
] | 1 | 2018-11-27T08:08:07.000Z | 2018-11-27T08:08:07.000Z | modules/finance/arrival_and_billing/code/list_arrival_and_billing.py | xuhuiliang-maybe/ace_office | 07fae18676a193206802e8fb9aa32a805b1da24c | [
"Apache-2.0"
] | null | null | null | modules/finance/arrival_and_billing/code/list_arrival_and_billing.py | xuhuiliang-maybe/ace_office | 07fae18676a193206802e8fb9aa32a805b1da24c | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
from django.contrib.auth.decorators import login_required
from django.contrib.auth.decorators import permission_required
from django.views.generic import ListView
from modules.finance.arrival_and_billing.models import *
from modules.share_module.formater import *
from modules.share_module.permissionMixin import class_view_decorator
from modules.share_module.utils import get_kwargs
from config.conf_core import PAGINATE
@class_view_decorator(login_required)
@class_view_decorator(permission_required('arrival_and_billing.browse_arrivalandbilling', raise_exception=True))
class ArrivalAndBillingListView(ListView):
context_object_name = "arrival_and_billing_list"
template_name = "arrival_and_billing_list.html"
allow_empty = True
paginate_by = PAGINATE
def get_queryset(self):
try:
self.settlement_date = self.request.GET.get("settlement_date", "") # 结算月份
settlement_date_year = ""
settlement_date_month = ""
if self.settlement_date:
settlement_date_date = date_formater(self.settlement_date, "%Y/%m/%d")
settlement_date_year = settlement_date_date.year
settlement_date_month = settlement_date_date.month
search_condition = {
"settlement_date__year": settlement_date_year,
"settlement_date__month": settlement_date_month,
}
kwargs = get_kwargs(search_condition)
return ArrivalAndBilling.objects.filter(**kwargs)
except:
traceback.print_exc()
def get_context_data(self, **kwargs):
context = super(ArrivalAndBillingListView, self).get_context_data(**kwargs)
context["settlement_date"] = self.settlement_date
return context
@class_view_decorator(login_required)
class CreditedDetailsListView(ListView):
context_object_name = "credited_details_list"
template_name = "credited_details_list.html"
allow_empty = True
paginate_by = PAGINATE
def get_queryset(self):
try:
self.arrivalandbilling = self.kwargs.get("arrivalandbilling", "")
self.credited_date = self.request.GET.get("credited_date", "") # 到账时间
credited_date_year = ""
credited_date_month = ""
if self.credited_date:
credited_date = date_formater(self.credited_date, "%Y/%m/%d")
credited_date_year = credited_date.year
credited_date_month = credited_date.month
search_condition = {
"credited_date__year": credited_date_year,
"credited_date__month": credited_date_month,
"arrival": self.arrivalandbilling,
}
kwargs = get_kwargs(search_condition)
return CreditedDetails.objects.filter(**kwargs)
except:
traceback.print_exc()
def get_context_data(self, **kwargs):
context = super(CreditedDetailsListView, self).get_context_data(**kwargs)
context["credited_date"] = self.credited_date
context["arrivalandbilling"] = self.arrivalandbilling
return context
@class_view_decorator(login_required)
class BillingDetailsListView(ListView):
context_object_name = "billing_details_list"
template_name = "billing_details_list.html"
allow_empty = True
paginate_by = PAGINATE
def get_queryset(self):
try:
self.arrivalandbilling = self.kwargs.get("arrivalandbilling", "")
self.billing_date = self.request.GET.get("billing_date", "") # 开票时间
billing_date_year = ""
billing_date_month = ""
if self.billing_date:
billing_date = date_formater(self.billing_date, "%Y/%m/%d")
billing_date_year = billing_date.year
billing_date_month = billing_date.month
search_condition = {
"billing_date__year": billing_date_year,
"billing_date__month": billing_date_month,
"billing": self.arrivalandbilling,
}
kwargs = get_kwargs(search_condition)
return BillingDetails.objects.filter(**kwargs)
except:
traceback.print_exc()
def get_context_data(self, **kwargs):
context = super(BillingDetailsListView, self).get_context_data(**kwargs)
context["billing_date"] = self.billing_date
context["arrivalandbilling"] = self.arrivalandbilling
return context
| 32.6 | 112 | 0.77684 |
from django.contrib.auth.decorators import login_required
from django.contrib.auth.decorators import permission_required
from django.views.generic import ListView
from modules.finance.arrival_and_billing.models import *
from modules.share_module.formater import *
from modules.share_module.permissionMixin import class_view_decorator
from modules.share_module.utils import get_kwargs
from config.conf_core import PAGINATE
@class_view_decorator(login_required)
@class_view_decorator(permission_required('arrival_and_billing.browse_arrivalandbilling', raise_exception=True))
class ArrivalAndBillingListView(ListView):
context_object_name = "arrival_and_billing_list"
template_name = "arrival_and_billing_list.html"
allow_empty = True
paginate_by = PAGINATE
def get_queryset(self):
try:
self.settlement_date = self.request.GET.get("settlement_date", "")
settlement_date_year = ""
settlement_date_month = ""
if self.settlement_date:
settlement_date_date = date_formater(self.settlement_date, "%Y/%m/%d")
settlement_date_year = settlement_date_date.year
settlement_date_month = settlement_date_date.month
search_condition = {
"settlement_date__year": settlement_date_year,
"settlement_date__month": settlement_date_month,
}
kwargs = get_kwargs(search_condition)
return ArrivalAndBilling.objects.filter(**kwargs)
except:
traceback.print_exc()
def get_context_data(self, **kwargs):
context = super(ArrivalAndBillingListView, self).get_context_data(**kwargs)
context["settlement_date"] = self.settlement_date
return context
@class_view_decorator(login_required)
class CreditedDetailsListView(ListView):
context_object_name = "credited_details_list"
template_name = "credited_details_list.html"
allow_empty = True
paginate_by = PAGINATE
def get_queryset(self):
try:
self.arrivalandbilling = self.kwargs.get("arrivalandbilling", "")
self.credited_date = self.request.GET.get("credited_date", "")
credited_date_year = ""
credited_date_month = ""
if self.credited_date:
credited_date = date_formater(self.credited_date, "%Y/%m/%d")
credited_date_year = credited_date.year
credited_date_month = credited_date.month
search_condition = {
"credited_date__year": credited_date_year,
"credited_date__month": credited_date_month,
"arrival": self.arrivalandbilling,
}
kwargs = get_kwargs(search_condition)
return CreditedDetails.objects.filter(**kwargs)
except:
traceback.print_exc()
def get_context_data(self, **kwargs):
context = super(CreditedDetailsListView, self).get_context_data(**kwargs)
context["credited_date"] = self.credited_date
context["arrivalandbilling"] = self.arrivalandbilling
return context
@class_view_decorator(login_required)
class BillingDetailsListView(ListView):
context_object_name = "billing_details_list"
template_name = "billing_details_list.html"
allow_empty = True
paginate_by = PAGINATE
def get_queryset(self):
try:
self.arrivalandbilling = self.kwargs.get("arrivalandbilling", "")
self.billing_date = self.request.GET.get("billing_date", "")
billing_date_year = ""
billing_date_month = ""
if self.billing_date:
billing_date = date_formater(self.billing_date, "%Y/%m/%d")
billing_date_year = billing_date.year
billing_date_month = billing_date.month
search_condition = {
"billing_date__year": billing_date_year,
"billing_date__month": billing_date_month,
"billing": self.arrivalandbilling,
}
kwargs = get_kwargs(search_condition)
return BillingDetails.objects.filter(**kwargs)
except:
traceback.print_exc()
def get_context_data(self, **kwargs):
context = super(BillingDetailsListView, self).get_context_data(**kwargs)
context["billing_date"] = self.billing_date
context["arrivalandbilling"] = self.arrivalandbilling
return context
| true | true |
1c2d61cbe1d6786da71786c3d16234d6959e6428 | 2,633 | py | Python | ppr-api/test_data/create_test_data.py | pwei1018/ppr | 1fdd2f1ad33217045404d7b872d9fad41a4c7da6 | [
"Apache-2.0"
] | null | null | null | ppr-api/test_data/create_test_data.py | pwei1018/ppr | 1fdd2f1ad33217045404d7b872d9fad41a4c7da6 | [
"Apache-2.0"
] | null | null | null | ppr-api/test_data/create_test_data.py | pwei1018/ppr | 1fdd2f1ad33217045404d7b872d9fad41a4c7da6 | [
"Apache-2.0"
] | null | null | null | # Copyright © 2019 Province of British Columbia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Create all unit test data.
The scripts run in the following order:
1. ./test_reset.sql
2. ./create_first.sql
3. All the files in ./test_data sorted by name.
"""
import os
from sqlalchemy.sql import text
from ppr_api import create_app
from ppr_api.models import db
#from test_data.test_single import execute_file
def execute_script(session, file_name):
print('Executing SQL statements in file ' + file_name)
with open(file_name, 'r') as sql_file:
sql_command = ''
# Iterate over all lines in the sql file
for line in sql_file:
# Ignore commented lines
if not line.startswith('--') and line.strip('\n'):
# Append line to the command string
sql_command += line.strip('\n')
# If the command string ends with ';', it is a full statement
if sql_command.endswith(';'):
sql_command = sql_command.replace(';', '')
# print('Executing SQL: ' + sql_command)
# Try to execute statement and commit it
try:
session.execute(text(sql_command))
# Assert in case of error
except Exception as ex:
print(repr(ex))
# Finally, clear command string
finally:
sql_command = ''
session.commit()
sql_file.close()
app = create_app('testing')
with app.app_context():
conn = db.engine.connect()
options = dict(bind=conn, binds={})
session = db.create_scoped_session(options=options)
execute_script(session, 'test_data/test_reset.sql')
execute_script(session, 'test_data/create_first.sql')
filenames = os.listdir(os.path.join(os.getcwd(), 'test_data/data_files'))
sorted_names = sorted(filenames)
for filename in sorted_names:
execute_script(session, os.path.join(os.getcwd(), ('test_data/data_files/' + filename)))
conn.close()
| 34.644737 | 96 | 0.631599 |
import os
from sqlalchemy.sql import text
from ppr_api import create_app
from ppr_api.models import db
def execute_script(session, file_name):
print('Executing SQL statements in file ' + file_name)
with open(file_name, 'r') as sql_file:
sql_command = ''
for line in sql_file:
if not line.startswith('--') and line.strip('\n'):
sql_command += line.strip('\n')
if sql_command.endswith(';'):
sql_command = sql_command.replace(';', '')
try:
session.execute(text(sql_command))
except Exception as ex:
print(repr(ex))
finally:
sql_command = ''
session.commit()
sql_file.close()
app = create_app('testing')
with app.app_context():
conn = db.engine.connect()
options = dict(bind=conn, binds={})
session = db.create_scoped_session(options=options)
execute_script(session, 'test_data/test_reset.sql')
execute_script(session, 'test_data/create_first.sql')
filenames = os.listdir(os.path.join(os.getcwd(), 'test_data/data_files'))
sorted_names = sorted(filenames)
for filename in sorted_names:
execute_script(session, os.path.join(os.getcwd(), ('test_data/data_files/' + filename)))
conn.close()
| true | true |
1c2d62d5d2f34e2b1d7c27b8e207a5bd7ecb9ea2 | 1,030 | py | Python | isi_sdk_7_2/test/test_auth_access_access_item_relevant_ace.py | mohitjain97/isilon_sdk_python | a371f438f542568edb8cda35e929e6b300b1177c | [
"Unlicense"
] | 24 | 2018-06-22T14:13:23.000Z | 2022-03-23T01:21:26.000Z | isi_sdk_7_2/test/test_auth_access_access_item_relevant_ace.py | mohitjain97/isilon_sdk_python | a371f438f542568edb8cda35e929e6b300b1177c | [
"Unlicense"
] | 46 | 2018-04-30T13:28:22.000Z | 2022-03-21T21:11:07.000Z | isi_sdk_7_2/test/test_auth_access_access_item_relevant_ace.py | mohitjain97/isilon_sdk_python | a371f438f542568edb8cda35e929e6b300b1177c | [
"Unlicense"
] | 29 | 2018-06-19T00:14:04.000Z | 2022-02-08T17:51:19.000Z | # coding: utf-8
"""
Isilon SDK
Isilon SDK - Language bindings for the OneFS API # noqa: E501
OpenAPI spec version: 2
Contact: sdk@isilon.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import isi_sdk_7_2
from isi_sdk_7_2.models.auth_access_access_item_relevant_ace import AuthAccessAccessItemRelevantAce # noqa: E501
from isi_sdk_7_2.rest import ApiException
class TestAuthAccessAccessItemRelevantAce(unittest.TestCase):
"""AuthAccessAccessItemRelevantAce unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testAuthAccessAccessItemRelevantAce(self):
"""Test AuthAccessAccessItemRelevantAce"""
# FIXME: construct object with mandatory attributes with example values
# model = isi_sdk_7_2.models.auth_access_access_item_relevant_ace.AuthAccessAccessItemRelevantAce() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 25.121951 | 121 | 0.741748 |
from __future__ import absolute_import
import unittest
import isi_sdk_7_2
from isi_sdk_7_2.models.auth_access_access_item_relevant_ace import AuthAccessAccessItemRelevantAce
from isi_sdk_7_2.rest import ApiException
class TestAuthAccessAccessItemRelevantAce(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def testAuthAccessAccessItemRelevantAce(self):
s
if __name__ == '__main__':
unittest.main()
| true | true |
1c2d6449897c82aec7ff4cc6d62161ab56b18cad | 2,977 | py | Python | run_mypy.py | paper42/meson | f67994476da4bdc5389c558989809df48a172c6e | [
"Apache-2.0"
] | 2 | 2021-12-06T21:07:39.000Z | 2021-12-06T21:20:39.000Z | run_mypy.py | eli-schwartz/meson | 40343fae9fb0acae6509753a5879bf4964276053 | [
"Apache-2.0"
] | 1 | 2021-09-03T18:10:25.000Z | 2021-09-03T18:10:25.000Z | run_mypy.py | eli-schwartz/meson | 40343fae9fb0acae6509753a5879bf4964276053 | [
"Apache-2.0"
] | 3 | 2021-01-08T08:54:47.000Z | 2021-03-16T11:55:20.000Z | #!/usr/bin/env python3
from pathlib import Path
import argparse
import os
import subprocess
import sys
import typing as T
from mesonbuild.mesonlib import version_compare
modules = [
# fully typed submodules
# 'mesonbuild/ast',
'mesonbuild/cmake',
'mesonbuild/compilers',
'mesonbuild/dependencies',
'mesonbuild/interpreter/primitives',
'mesonbuild/interpreterbase',
'mesonbuild/linkers',
'mesonbuild/scripts',
'mesonbuild/wrap',
# specific files
'mesonbuild/arglist.py',
'mesonbuild/backend/backends.py',
# 'mesonbuild/coredata.py',
'mesonbuild/envconfig.py',
'mesonbuild/interpreter/compiler.py',
'mesonbuild/interpreter/mesonmain.py',
'mesonbuild/interpreter/interpreterobjects.py',
'mesonbuild/interpreter/type_checking.py',
'mesonbuild/mcompile.py',
'mesonbuild/mdevenv.py',
'mesonbuild/mesonlib/platform.py',
'mesonbuild/mesonlib/universal.py',
'mesonbuild/minit.py',
'mesonbuild/minstall.py',
'mesonbuild/mintro.py',
'mesonbuild/mlog.py',
'mesonbuild/msubprojects.py',
'mesonbuild/modules/fs.py',
'mesonbuild/modules/i18n.py',
'mesonbuild/modules/java.py',
'mesonbuild/modules/keyval.py',
'mesonbuild/modules/qt.py',
'mesonbuild/modules/unstable_external_project.py',
'mesonbuild/modules/unstable_rust.py',
'mesonbuild/modules/windows.py',
'mesonbuild/mparser.py',
'mesonbuild/msetup.py',
'mesonbuild/mtest.py',
'mesonbuild/optinterpreter.py',
'mesonbuild/programs.py',
'run_custom_lint.py',
'run_mypy.py',
'run_project_tests.py',
'run_single_test.py',
'tools',
'docs/genrefman.py',
'docs/refman',
]
if os.name == 'posix':
modules.append('mesonbuild/mesonlib/posix.py')
elif os.name == 'nt':
modules.append('mesonbuild/mesonlib/win32.py')
def check_mypy() -> None:
try:
import mypy
except ImportError:
print('Failed import mypy')
sys.exit(1)
from mypy.version import __version__ as mypy_version
if not version_compare(mypy_version, '>=0.812'):
print('mypy >=0.812 is required, older versions report spurious errors')
sys.exit(1)
def main() -> int:
check_mypy()
root = Path(__file__).absolute().parent
args = [] # type: T.List[str]
parser = argparse.ArgumentParser(description='Process some integers.')
parser.add_argument('-p', '--pretty', action='store_true', help='pretty print mypy errors')
parser.add_argument('-C', '--clear', action='store_true', help='clear the terminal before running mypy')
opts = parser.parse_args()
if opts.pretty:
args.append('--pretty')
if opts.clear:
print('\x1bc', end='', flush=True)
print('Running mypy (this can take some time) ...')
p = subprocess.run(
[sys.executable, '-m', 'mypy'] + args + modules,
cwd=root,
)
return p.returncode
if __name__ == '__main__':
sys.exit(main())
| 27.82243 | 108 | 0.665771 |
from pathlib import Path
import argparse
import os
import subprocess
import sys
import typing as T
from mesonbuild.mesonlib import version_compare
modules = [
'mesonbuild/cmake',
'mesonbuild/compilers',
'mesonbuild/dependencies',
'mesonbuild/interpreter/primitives',
'mesonbuild/interpreterbase',
'mesonbuild/linkers',
'mesonbuild/scripts',
'mesonbuild/wrap',
'mesonbuild/arglist.py',
'mesonbuild/backend/backends.py',
'mesonbuild/envconfig.py',
'mesonbuild/interpreter/compiler.py',
'mesonbuild/interpreter/mesonmain.py',
'mesonbuild/interpreter/interpreterobjects.py',
'mesonbuild/interpreter/type_checking.py',
'mesonbuild/mcompile.py',
'mesonbuild/mdevenv.py',
'mesonbuild/mesonlib/platform.py',
'mesonbuild/mesonlib/universal.py',
'mesonbuild/minit.py',
'mesonbuild/minstall.py',
'mesonbuild/mintro.py',
'mesonbuild/mlog.py',
'mesonbuild/msubprojects.py',
'mesonbuild/modules/fs.py',
'mesonbuild/modules/i18n.py',
'mesonbuild/modules/java.py',
'mesonbuild/modules/keyval.py',
'mesonbuild/modules/qt.py',
'mesonbuild/modules/unstable_external_project.py',
'mesonbuild/modules/unstable_rust.py',
'mesonbuild/modules/windows.py',
'mesonbuild/mparser.py',
'mesonbuild/msetup.py',
'mesonbuild/mtest.py',
'mesonbuild/optinterpreter.py',
'mesonbuild/programs.py',
'run_custom_lint.py',
'run_mypy.py',
'run_project_tests.py',
'run_single_test.py',
'tools',
'docs/genrefman.py',
'docs/refman',
]
if os.name == 'posix':
modules.append('mesonbuild/mesonlib/posix.py')
elif os.name == 'nt':
modules.append('mesonbuild/mesonlib/win32.py')
def check_mypy() -> None:
try:
import mypy
except ImportError:
print('Failed import mypy')
sys.exit(1)
from mypy.version import __version__ as mypy_version
if not version_compare(mypy_version, '>=0.812'):
print('mypy >=0.812 is required, older versions report spurious errors')
sys.exit(1)
def main() -> int:
check_mypy()
root = Path(__file__).absolute().parent
args = []
parser = argparse.ArgumentParser(description='Process some integers.')
parser.add_argument('-p', '--pretty', action='store_true', help='pretty print mypy errors')
parser.add_argument('-C', '--clear', action='store_true', help='clear the terminal before running mypy')
opts = parser.parse_args()
if opts.pretty:
args.append('--pretty')
if opts.clear:
print('\x1bc', end='', flush=True)
print('Running mypy (this can take some time) ...')
p = subprocess.run(
[sys.executable, '-m', 'mypy'] + args + modules,
cwd=root,
)
return p.returncode
if __name__ == '__main__':
sys.exit(main())
| true | true |
1c2d6457bac7b4ad1cd1f39f5199fcc8637c5ec7 | 3,679 | py | Python | modules/dials/precommitbx/nagger.py | jorgediazjr/dials-dev20191018 | 77d66c719b5746f37af51ad593e2941ed6fbba17 | [
"BSD-3-Clause"
] | null | null | null | modules/dials/precommitbx/nagger.py | jorgediazjr/dials-dev20191018 | 77d66c719b5746f37af51ad593e2941ed6fbba17 | [
"BSD-3-Clause"
] | null | null | null | modules/dials/precommitbx/nagger.py | jorgediazjr/dials-dev20191018 | 77d66c719b5746f37af51ad593e2941ed6fbba17 | [
"BSD-3-Clause"
] | 1 | 2020-02-04T15:39:06.000Z | 2020-02-04T15:39:06.000Z | from __future__ import absolute_import, division, print_function
import os
import sys
try:
import stat
except ImportError:
stat = None
def nag():
"""
Check if pre-commits should be installed for this repository.
If they are not and should be then annoy the developer.
To be called in libtbx_refresh.py
"""
if os.name == "nt" or not stat: # unsupported
return
# Determine the name of the calling module, and thus the internal module name
# of the libtbx_refresh file. Use exception trick to pick up the current frame.
try:
raise Exception()
except Exception:
frame = sys.exc_info()[2].tb_frame.f_back
# Extract the caller name
caller = frame.f_globals["__name__"]
if caller == "__main__":
# well that is not very informative, is it.
caller = os.path.abspath(
frame.f_code.co_filename
) # Get the full path of the libtbx_refresh.py file.
refresh_file, _ = os.path.splitext(caller)
if not refresh_file.endswith("libtbx_refresh"):
raise RuntimeError(
"pre-commit nagging can only be done from within libtbx_refresh.py"
)
# the name of the parent directory of libtbx_refresh.py is the caller name
caller = os.path.basename(os.path.dirname(refresh_file))
else:
if not caller.endswith(".libtbx_refresh"):
raise RuntimeError(
"pre-commit nagging can only be done from within libtbx_refresh.py"
)
caller = caller[:-15]
try:
import libtbx.load_env
except Exception as e:
print("error on importing libtbx environment for pre-commit nagging:", e)
return
try:
path = libtbx.env.dist_path(caller)
except Exception as e:
print(
"error on obtaining module path for %s for pre-commit nagging:" % caller, e
)
return
if not os.path.isdir(os.path.join(path, ".git")):
return # not a developer installation
precommit_python = abs(libtbx.env.build_path / "precommitbx" / "bin" / "python3")
hookfile = os.path.join(path, ".git", "hooks", "pre-commit")
if os.path.isfile(hookfile) and os.access(hookfile, os.X_OK):
with open(hookfile, "r") as fh:
precommit = fh.read()
if "precommitbx" in precommit and os.path.exists(precommit_python):
return # libtbx.precommit hook is fine
if "generated by pre-commit" in precommit and "libtbx" not in precommit:
return # genuine pre-commit hook is also fine
try:
with open(hookfile, "w") as fh:
fh.write(
"""#!/bin/bash
echo
echo Please install the DIALS pre-commit hooks before committing into the DIALS
echo repository. These hooks run simple static code analysis to catch common
echo coding mistakes early and ensure a common code style.
echo
echo The command you need to run is:
echo " libtbx.precommit install"
echo
if [ -z "$DIALS_WITHOUT_PRECOMMITS" ]; then
echo If you want to continue without installing pre-commits then you can override
echo this check by setting the environment variable DIALS_WITHOUT_PRECOMMITS
fi
echo You can find more information about contributing to DIALS at:
echo https://github.com/dials/dials/blob/master/CONTRIBUTING.md
echo
if [ -z "$DIALS_WITHOUT_PRECOMMITS" ]; then
exit 1
fi
"""
)
mode = os.fstat(fh.fileno()).st_mode
mode |= stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH
os.fchmod(fh.fileno(), stat.S_IMODE(mode))
except Exception as e:
print("Could not generate pre-commit stub:", e)
| 35.718447 | 87 | 0.65371 | from __future__ import absolute_import, division, print_function
import os
import sys
try:
import stat
except ImportError:
stat = None
def nag():
if os.name == "nt" or not stat:
return
try:
raise Exception()
except Exception:
frame = sys.exc_info()[2].tb_frame.f_back
caller = frame.f_globals["__name__"]
if caller == "__main__":
caller = os.path.abspath(
frame.f_code.co_filename
)
refresh_file, _ = os.path.splitext(caller)
if not refresh_file.endswith("libtbx_refresh"):
raise RuntimeError(
"pre-commit nagging can only be done from within libtbx_refresh.py"
)
caller = os.path.basename(os.path.dirname(refresh_file))
else:
if not caller.endswith(".libtbx_refresh"):
raise RuntimeError(
"pre-commit nagging can only be done from within libtbx_refresh.py"
)
caller = caller[:-15]
try:
import libtbx.load_env
except Exception as e:
print("error on importing libtbx environment for pre-commit nagging:", e)
return
try:
path = libtbx.env.dist_path(caller)
except Exception as e:
print(
"error on obtaining module path for %s for pre-commit nagging:" % caller, e
)
return
if not os.path.isdir(os.path.join(path, ".git")):
return
precommit_python = abs(libtbx.env.build_path / "precommitbx" / "bin" / "python3")
hookfile = os.path.join(path, ".git", "hooks", "pre-commit")
if os.path.isfile(hookfile) and os.access(hookfile, os.X_OK):
with open(hookfile, "r") as fh:
precommit = fh.read()
if "precommitbx" in precommit and os.path.exists(precommit_python):
return
if "generated by pre-commit" in precommit and "libtbx" not in precommit:
return
try:
with open(hookfile, "w") as fh:
fh.write(
"""#!/bin/bash
echo
echo Please install the DIALS pre-commit hooks before committing into the DIALS
echo repository. These hooks run simple static code analysis to catch common
echo coding mistakes early and ensure a common code style.
echo
echo The command you need to run is:
echo " libtbx.precommit install"
echo
if [ -z "$DIALS_WITHOUT_PRECOMMITS" ]; then
echo If you want to continue without installing pre-commits then you can override
echo this check by setting the environment variable DIALS_WITHOUT_PRECOMMITS
fi
echo You can find more information about contributing to DIALS at:
echo https://github.com/dials/dials/blob/master/CONTRIBUTING.md
echo
if [ -z "$DIALS_WITHOUT_PRECOMMITS" ]; then
exit 1
fi
"""
)
mode = os.fstat(fh.fileno()).st_mode
mode |= stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH
os.fchmod(fh.fileno(), stat.S_IMODE(mode))
except Exception as e:
print("Could not generate pre-commit stub:", e)
| true | true |
1c2d65b943ba706213e2cc73d096a3a4131c6d90 | 6,035 | py | Python | influxdump/bin/influxdump.py | gams/influxdump | a17690e72529ef6d5d378089a7d05f8ed2591877 | [
"Apache-2.0"
] | 4 | 2017-05-25T12:22:07.000Z | 2021-09-04T04:37:55.000Z | influxdump/bin/influxdump.py | gams/influxdump | a17690e72529ef6d5d378089a7d05f8ed2591877 | [
"Apache-2.0"
] | 16 | 2018-01-18T05:20:44.000Z | 2021-04-30T05:14:20.000Z | influxdump/bin/influxdump.py | gams/influxdump | a17690e72529ef6d5d378089a7d05f8ed2591877 | [
"Apache-2.0"
] | 2 | 2017-11-15T09:21:11.000Z | 2019-09-14T08:09:40.000Z | # -*- coding: utf-8 -*-
import argparse
import getpass
import json
import sys
from influxdump.data import dump_data, load_file, load_folder
from influxdump.db import get_client
from influxdump.exceptions import TypecastError
CHUNKSIZE = 50000
def get_args():
parser = argparse.ArgumentParser(description='influxDB data backup tool')
parser.add_argument('-c', '--chunksize',
help='query chunk size, default to {}'.format(CHUNKSIZE),
type=int, default=CHUNKSIZE)
parser.add_argument('-d', '--database', help='database', required=True,
type=str)
parser.add_argument('-e', '--end', default='', type=str,
help="""
Exclude all results after the specified timestamp (RFC3339 format).
""")
parser.add_argument('-F', '--folder', default=None,
help="""
destination folder for fragmented dump, if this flag is not used
then dump on stdout
""")
parser.add_argument('-H', '--host', help='server host',
default="localhost", type=str)
parser.add_argument('-i', '--input', default=None,
help="data/metadata input file, will force action to 'load'")
parser.add_argument('-L', '--legacy', action="store_true",
help='influxdb legacy client (<=0.8)')
parser.add_argument('-m', '--measurements', help='measurement pattern')
parser.add_argument('-n', '--dry-run', help='do not really do anything',
action="store_true")
parser.add_argument('-p', '--port', help='server port', default=8086,
type=int)
parser.add_argument('-r', '--retry', default=0, type=int,
help="""
Retry a dump query in case of problem, 0 to disable, defaults to 0
""")
parser.add_argument('-s', '--start', default='', type=str,
help="""
Include all points starting with the specified timestamp (RFC3339
format).
If used without --start, all data will be backed up starting from
1970-01-01T00:00:00Z
""")
parser.add_argument('-t', '--typecast',
help="""
Enable casting field types based on file, meta or auto discovery
if possible. When used with 'dump', will add casting infor in meta.
When used with 'load', will try to find casting info. If casting is
enabled but no casting info can be found, the program will exit.
""", action="store_true")
parser.add_argument('--castfile',
help="""
File containing casting definitions, will supersede any other type
cast definition
""", type=str, default='')
parser.add_argument('-u', '--user', help='username', default='', type=str)
parser.add_argument('-v', '--verbose', help='make the script verbose',
action="store_true")
parser.add_argument('-w', '--password', help='password', default='',
type=str)
parser.add_argument('-W', '--pwdprompt', help='password prompt',
action="store_true")
parser.add_argument('action', metavar="action", nargs="?", default='dump',
help="""
action, can be 'dump' or 'load', default to 'dump'. If action is
'load', one input file (--input) or a folder with data to load has
to be provided
""", choices=["load", "dump"])
args = parser.parse_args()
if args.pwdprompt is True:
pwd = getpass.getpass()
else:
pwd = args.password
if args.end != "" and args.start == "":
args.start = "1970-01-01T00:00:00Z"
if args.castfile != '':
with open(args.castfile, 'r') as fd:
cast = json.load(fd)
else:
cast = {}
if args.action == "load" \
and args.input is None and args.folder is None:
sys.stderr.write("Action is load, missing input file or folder\n\n")
parser.print_help()
sys.exit(1)
return {
"chunksize": args.chunksize,
"db": args.database,
"end": args.end,
"folder": args.folder,
"host": args.host,
"input": args.input,
"legacy": args.legacy,
"measurements": args.measurements,
"dryrun": args.dry_run,
"port": args.port,
"retry": args.retry,
"start": args.start,
"user": args.user,
"verbose": args.verbose,
"pwd": pwd,
"action": args.action,
"typecast": args.typecast,
"cast": cast,
}
def dump(args, client):
dump_data(
client,
args["measurements"],
args["folder"],
dryrun=args["dryrun"],
chunk_size=args["chunksize"],
start=args["start"],
end=args["end"],
retry=args["retry"],
typecast=args["typecast"],
cast=args["cast"],
verbose=args["verbose"]
)
def load(args, client):
if args["input"] is not None:
load_file(
client,
args["input"],
typecast=args["typecast"],
cast=args["cast"],
verbose=args["verbose"]
)
else:
load_folder(
client,
args["folder"],
pattern=args["measurements"],
typecast=args["typecast"],
cast=args["cast"],
verbose=args["verbose"]
)
def main():
args = get_args()
client = get_client(
host=args["host"],
port=args["port"],
user=args["user"],
pwd=args["pwd"],
db=args["db"],
legacy=args["legacy"],
)
if args["action"] == "load" or args["input"] is not None:
load(args, client)
else:
dump(args, client)
if __name__ == "__main__":
try:
main()
except TypecastError as e:
sys.stderr.write("""Error trying to guess field types for casting,
influxdb < 1.0 did not provide key types when queried.
""")
sys.exit(1)
| 32.798913 | 79 | 0.556089 |
import argparse
import getpass
import json
import sys
from influxdump.data import dump_data, load_file, load_folder
from influxdump.db import get_client
from influxdump.exceptions import TypecastError
CHUNKSIZE = 50000
def get_args():
parser = argparse.ArgumentParser(description='influxDB data backup tool')
parser.add_argument('-c', '--chunksize',
help='query chunk size, default to {}'.format(CHUNKSIZE),
type=int, default=CHUNKSIZE)
parser.add_argument('-d', '--database', help='database', required=True,
type=str)
parser.add_argument('-e', '--end', default='', type=str,
help="""
Exclude all results after the specified timestamp (RFC3339 format).
""")
parser.add_argument('-F', '--folder', default=None,
help="""
destination folder for fragmented dump, if this flag is not used
then dump on stdout
""")
parser.add_argument('-H', '--host', help='server host',
default="localhost", type=str)
parser.add_argument('-i', '--input', default=None,
help="data/metadata input file, will force action to 'load'")
parser.add_argument('-L', '--legacy', action="store_true",
help='influxdb legacy client (<=0.8)')
parser.add_argument('-m', '--measurements', help='measurement pattern')
parser.add_argument('-n', '--dry-run', help='do not really do anything',
action="store_true")
parser.add_argument('-p', '--port', help='server port', default=8086,
type=int)
parser.add_argument('-r', '--retry', default=0, type=int,
help="""
Retry a dump query in case of problem, 0 to disable, defaults to 0
""")
parser.add_argument('-s', '--start', default='', type=str,
help="""
Include all points starting with the specified timestamp (RFC3339
format).
If used without --start, all data will be backed up starting from
1970-01-01T00:00:00Z
""")
parser.add_argument('-t', '--typecast',
help="""
Enable casting field types based on file, meta or auto discovery
if possible. When used with 'dump', will add casting infor in meta.
When used with 'load', will try to find casting info. If casting is
enabled but no casting info can be found, the program will exit.
""", action="store_true")
parser.add_argument('--castfile',
help="""
File containing casting definitions, will supersede any other type
cast definition
""", type=str, default='')
parser.add_argument('-u', '--user', help='username', default='', type=str)
parser.add_argument('-v', '--verbose', help='make the script verbose',
action="store_true")
parser.add_argument('-w', '--password', help='password', default='',
type=str)
parser.add_argument('-W', '--pwdprompt', help='password prompt',
action="store_true")
parser.add_argument('action', metavar="action", nargs="?", default='dump',
help="""
action, can be 'dump' or 'load', default to 'dump'. If action is
'load', one input file (--input) or a folder with data to load has
to be provided
""", choices=["load", "dump"])
args = parser.parse_args()
if args.pwdprompt is True:
pwd = getpass.getpass()
else:
pwd = args.password
if args.end != "" and args.start == "":
args.start = "1970-01-01T00:00:00Z"
if args.castfile != '':
with open(args.castfile, 'r') as fd:
cast = json.load(fd)
else:
cast = {}
if args.action == "load" \
and args.input is None and args.folder is None:
sys.stderr.write("Action is load, missing input file or folder\n\n")
parser.print_help()
sys.exit(1)
return {
"chunksize": args.chunksize,
"db": args.database,
"end": args.end,
"folder": args.folder,
"host": args.host,
"input": args.input,
"legacy": args.legacy,
"measurements": args.measurements,
"dryrun": args.dry_run,
"port": args.port,
"retry": args.retry,
"start": args.start,
"user": args.user,
"verbose": args.verbose,
"pwd": pwd,
"action": args.action,
"typecast": args.typecast,
"cast": cast,
}
def dump(args, client):
dump_data(
client,
args["measurements"],
args["folder"],
dryrun=args["dryrun"],
chunk_size=args["chunksize"],
start=args["start"],
end=args["end"],
retry=args["retry"],
typecast=args["typecast"],
cast=args["cast"],
verbose=args["verbose"]
)
def load(args, client):
if args["input"] is not None:
load_file(
client,
args["input"],
typecast=args["typecast"],
cast=args["cast"],
verbose=args["verbose"]
)
else:
load_folder(
client,
args["folder"],
pattern=args["measurements"],
typecast=args["typecast"],
cast=args["cast"],
verbose=args["verbose"]
)
def main():
args = get_args()
client = get_client(
host=args["host"],
port=args["port"],
user=args["user"],
pwd=args["pwd"],
db=args["db"],
legacy=args["legacy"],
)
if args["action"] == "load" or args["input"] is not None:
load(args, client)
else:
dump(args, client)
if __name__ == "__main__":
try:
main()
except TypecastError as e:
sys.stderr.write("""Error trying to guess field types for casting,
influxdb < 1.0 did not provide key types when queried.
""")
sys.exit(1)
| true | true |
1c2d6640df87b3db1e71e52a5923727b2b0ee65f | 46,334 | py | Python | Messages.py | accept8605/OOT-Rando-with-working-Navi | 84616d081efdc6989acd534676d2400d2bd68f23 | [
"MIT"
] | null | null | null | Messages.py | accept8605/OOT-Rando-with-working-Navi | 84616d081efdc6989acd534676d2400d2bd68f23 | [
"MIT"
] | null | null | null | Messages.py | accept8605/OOT-Rando-with-working-Navi | 84616d081efdc6989acd534676d2400d2bd68f23 | [
"MIT"
] | null | null | null | # text details: https://wiki.cloudmodding.com/oot/Text_Format
import random
TABLE_START = 0xB849EC
TEXT_START = 0x92D000
TABLE_SIZE_LIMIT = 0x43A8
ENG_TEXT_SIZE_LIMIT = 0x38130
JPN_TEXT_SIZE_LIMIT = 0x3A150
# name of type, followed by number of additional bytes to read, follwed by a function that prints the code
CONTROL_CODES = {
0x00: ('pad', 0, lambda _: '<pad>' ),
0x01: ('line-break', 0, lambda _: '\n' ),
0x02: ('end', 0, lambda _: '' ),
0x04: ('box-break', 0, lambda _: '\n▼\n' ),
0x05: ('color', 1, lambda d: '<color ' + "{:02x}".format(d) + '>' ),
0x06: ('gap', 1, lambda d: '<' + str(d) + 'px gap>' ),
0x07: ('goto', 2, lambda d: '<goto ' + "{:04x}".format(d) + '>' ),
0x08: ('instant', 0, lambda _: '<allow instant text>' ),
0x09: ('un-instant', 0, lambda _: '<disallow instant text>' ),
0x0A: ('keep-open', 0, lambda _: '<keep open>' ),
0x0B: ('event', 0, lambda _: '<event>' ),
0x0C: ('box-break-delay', 1, lambda d: '\n▼<wait ' + str(d) + ' frames>\n' ),
0x0E: ('fade-out', 1, lambda d: '<fade after ' + str(d) + ' frames?>' ),
0x0F: ('name', 0, lambda _: '<name>' ),
0x10: ('ocarina', 0, lambda _: '<ocarina>' ),
0x12: ('sound', 2, lambda d: '<play SFX ' + "{:04x}".format(d) + '>' ),
0x13: ('icon', 1, lambda d: '<icon ' + "{:02x}".format(d) + '>' ),
0x14: ('speed', 1, lambda d: '<delay each character by ' + str(d) + ' frames>' ),
0x15: ('background', 3, lambda d: '<set background to ' + "{:06x}".format(d) + '>' ),
0x16: ('marathon', 0, lambda _: '<marathon time>' ),
0x17: ('race', 0, lambda _: '<race time>' ),
0x18: ('points', 0, lambda _: '<points>' ),
0x19: ('skulltula', 0, lambda _: '<skulltula count>' ),
0x1A: ('unskippable', 0, lambda _: '<text is unskippable>' ),
0x1B: ('two-choice', 0, lambda _: '<start two choice>' ),
0x1C: ('three-choice', 0, lambda _: '<start three choice>' ),
0x1D: ('fish', 0, lambda _: '<fish weight>' ),
0x1E: ('high-score', 1, lambda d: '<high-score ' + "{:02x}".format(d) + '>' ),
0x1F: ('time', 0, lambda _: '<current time>' ),
}
SPECIAL_CHARACTERS = {
0x96: 'é',
0x9F: '[A]',
0xA0: '[B]',
0xA1: '[C]',
0xA2: '[L]',
0xA3: '[R]',
0xA4: '[Z]',
0xA5: '[C Up]',
0xA6: '[C Down]',
0xA7: '[C Left]',
0xA8: '[C Right]',
0xA9: '[Triangle]',
0xAA: '[Control Stick]',
}
GOSSIP_STONE_MESSAGES = list( range(0x0401, 0x04FF) ) # ids of the actual hints
GOSSIP_STONE_MESSAGES += [0x2053, 0x2054] # shared initial stone messages
TEMPLE_HINTS_MESSAGES = [0x7057, 0x707A] # dungeon reward hints from the temple of time pedestal
LIGHT_ARROW_HINT = [0x70CC] # ganondorf's light arrow hint line
GS_TOKEN_MESSAGES = [0x00B4, 0x00B5] # Get Gold Skulltula Token messages
ERROR_MESSAGE = 0x0001
# messages for shorter item messages
# ids are in the space freed up by move_shop_item_messages()
ITEM_MESSAGES = {
0x0001: "\x08\x06\x30\x05\x41TEXT ID ERROR!\x05\x40",
0x9001: "\x08\x13\x2DYou borrowed a \x05\x41Pocket Egg\x05\x40!\x01A Pocket Cucco will hatch from\x01it overnight. Be sure to give it\x01back when you are done with it.",
0x0002: "\x08\x13\x2FYou returned the Pocket Cucco\x01and got \x05\x41Cojiro\x05\x40 in return!\x01Unlike other Cuccos, Cojiro\x01rarely crows.",
0x0003: "\x08\x13\x30You got an \x05\x41Odd Mushroom\x05\x40!\x01A fresh mushroom like this is\x01sure to spoil quickly! Take it to\x01the Kakariko Potion Shop, quickly!",
0x0004: "\x08\x13\x31You received an \x05\x41Odd Potion\x05\x40!\x01It may be useful for something...\x01Hurry to the Lost Woods!",
0x0005: "\x08\x13\x32You returned the Odd Potion \x01and got the \x05\x41Poacher's Saw\x05\x40!\x01The young punk guy must have\x01left this behind.",
0x0007: "\x08\x13\x48You got a \x01\x05\x41Deku Seeds Bullet Bag\x05\x40.\x01This bag can hold up to \x05\x4640\x05\x40\x01slingshot bullets.",
0x0008: "\x08\x13\x33You traded the Poacher's Saw \x01for a \x05\x41Broken Goron's Sword\x05\x40!\x01Visit Biggoron to get it repaired!",
0x0009: "\x08\x13\x34You checked in the Broken \x01Goron's Sword and received a \x01\x05\x41Prescription\x05\x40!\x01Go see King Zora!",
0x000A: "\x08\x13\x37The Biggoron's Sword...\x01You got a \x05\x41Claim Check \x05\x40for it!\x01You can't wait for the sword\x01to be completed!",
0x000B: "\x08\x13\x2EYou got a \x05\x41Pocket Cucco, \x05\x40one\x01of Anju's prized hens! It fits \x01in your pocket.",
0x000C: "\x08\x13\x3DYou handed in the Claim Check\x01and got the \x05\x41Biggoron's Sword\x05\x40!\x01This blade was forged by a \x01master smith and won't break!",
0x000D: "\x08\x13\x35You used the Prescription and\x01received an \x05\x41Eyeball Frog\x05\x40!\x01Be quick and deliver it to Lake \x01Hylia while it's cold!",
0x000E: "\x08\x13\x36You traded the Eyeball Frog \x01for the \x05\x41World's Finest Eye Drops\x05\x40!\x01Hurry! Take them to Biggoron\x01before they go bad!",
0x0010: "\x08\x13\x25You borrowed a \x05\x41Skull Mask\x05\x40.\x01You feel like a monster while you\x01wear this mask!",
0x0011: "\x08\x13\x26You borrowed a \x05\x41Spooky Mask\x05\x40.\x01You can scare many people\x01with this mask!",
0x0012: "\x08\x13\x24You borrowed a \x05\x41Keaton Mask\x05\x40.\x01You'll be a popular guy with\x01this mask on!",
0x0013: "\x08\x13\x27You borrowed a \x05\x41Bunny Hood\x05\x40.\x01The hood's long ears are so\x01cute!",
0x0014: "\x08\x13\x28You borrowed a \x05\x41Goron Mask\x05\x40.\x01It will make your head look\x01big, though.",
0x0015: "\x08\x13\x29You borrowed a \x05\x41Zora Mask\x05\x40.\x01With this mask, you can\x01become one of the Zoras!",
0x0016: "\x08\x13\x2AYou borrowed a \x05\x41Gerudo Mask\x05\x40.\x01This mask will make you look\x01like...a girl?",
0x0017: "\x08\x13\x2BYou borrowed a \x05\x41Mask of Truth\x05\x40.\x01Show it to many people!",
0x0030: "\x08\x13\x06You found the \x05\x41Fairy Slingshot\x05\x40!",
0x0031: "\x08\x13\x03You found the \x05\x41Fairy Bow\x05\x40!",
0x0032: "\x08\x13\x02You got \x05\x41Bombs\x05\x40!\x01If you see something\x01suspicious, bomb it!",
0x0033: "\x08\x13\x09You got \x05\x41Bombchus\x05\x40!",
0x0034: "\x08\x13\x01You got a \x05\x41Deku Nut\x05\x40!",
0x0035: "\x08\x13\x0EYou found the \x05\x41Boomerang\x05\x40!",
0x0036: "\x08\x13\x0AYou found the \x05\x41Hookshot\x05\x40!\x01It's a spring-loaded chain that\x01you can cast out to hook things.",
0x0037: "\x08\x13\x00You got a \x05\x41Deku Stick\x05\x40!",
0x0038: "\x08\x13\x11You found the \x05\x41Megaton Hammer\x05\x40!\x01It's so heavy, you need to\x01use two hands to swing it!",
0x0039: "\x08\x13\x0FYou found the \x05\x41Lens of Truth\x05\x40!\x01Mysterious things are hidden\x01everywhere!",
0x003A: "\x08\x13\x08You found the \x05\x41Ocarina of Time\x05\x40!\x01It glows with a mystical light...",
0x003C: "\x08\x13\x67You received the \x05\x41Fire\x01Medallion\x05\x40!\x01Darunia awakens as a Sage and\x01adds his power to yours!",
0x003D: "\x08\x13\x68You received the \x05\x43Water\x01Medallion\x05\x40!\x01Ruto awakens as a Sage and\x01adds her power to yours!",
0x003E: "\x08\x13\x66You received the \x05\x42Forest\x01Medallion\x05\x40!\x01Saria awakens as a Sage and\x01adds her power to yours!",
0x003F: "\x08\x13\x69You received the \x05\x46Spirit\x01Medallion\x05\x40!\x01Nabooru awakens as a Sage and\x01adds her power to yours!",
0x0040: "\x08\x13\x6BYou received the \x05\x44Light\x01Medallion\x05\x40!\x01Rauru the Sage adds his power\x01to yours!",
0x0041: "\x08\x13\x6AYou received the \x05\x45Shadow\x01Medallion\x05\x40!\x01Impa awakens as a Sage and\x01adds her power to yours!",
0x0042: "\x08\x13\x14You got an \x05\x41Empty Bottle\x05\x40!\x01You can put something in this\x01bottle.",
0x0043: "\x08\x13\x15You got a \x05\x41Red Potion\x05\x40!\x01It will restore your health",
0x0044: "\x08\x13\x16You got a \x05\x42Green Potion\x05\x40!\x01It will restore your magic.",
0x0045: "\x08\x13\x17You got a \x05\x43Blue Potion\x05\x40!\x01It will recover your health\x01and magic.",
0x0046: "\x08\x13\x18You caught a \x05\x41Fairy\x05\x40 in a bottle!\x01It will revive you\x01the moment you run out of life \x01energy.",
0x0047: "\x08\x13\x19You got a \x05\x41Fish\x05\x40!\x01It looks so fresh and\x01delicious!",
0x0048: "\x08\x13\x10You got a \x05\x41Magic Bean\x05\x40!\x01Find a suitable spot for a garden\x01and plant it.",
0x004A: "\x08\x13\x07You received the \x05\x41Fairy Ocarina\x05\x40!\x01This is a memento from Saria.",
0x004B: "\x08\x13\x3DYou got the \x05\x42Giant's Knife\x05\x40!\x01Hold it with both hands to\x01attack! It's so long, you\x01can't use it with a \x05\x44shield\x05\x40.",
0x004C: "\x08\x13\x3EYou got a \x05\x44Deku Shield\x05\x40!",
0x004D: "\x08\x13\x3FYou got a \x05\x44Hylian Shield\x05\x40!",
0x004E: "\x08\x13\x40You found the \x05\x44Mirror Shield\x05\x40!\x01The shield's polished surface can\x01reflect light or energy.",
0x004F: "\x08\x13\x0BYou found the \x05\x41Longshot\x05\x40!\x01It's an upgraded Hookshot.\x01It extends \x05\x41twice\x05\x40 as far!",
0x0050: "\x08\x13\x42You got a \x05\x41Goron Tunic\x05\x40!\x01Going to a hot place? No worry!",
0x0051: "\x08\x13\x43You got a \x05\x43Zora Tunic\x05\x40!\x01Wear it, and you won't drown\x01underwater.",
0x0052: "\x08You got a \x05\x42Magic Jar\x05\x40!\x01Your Magic Meter is filled!",
0x0053: "\x08\x13\x45You got the \x05\x41Iron Boots\x05\x40!\x01So heavy, you can't run.\x01So heavy, you can't float.",
0x0054: "\x08\x13\x46You got the \x05\x41Hover Boots\x05\x40!\x01With these mysterious boots\x01you can hover above the ground.",
0x0055: "\x08You got a \x05\x45Recovery Heart\x05\x40!\x01Your life energy is recovered!",
0x0056: "\x08\x13\x4BYou upgraded your quiver to a\x01\x05\x41Big Quiver\x05\x40!\x01Now you can carry more arrows-\x01\x05\x4640 \x05\x40in total!",
0x0057: "\x08\x13\x4BYou upgraded your quiver to\x01the \x05\x41Biggest Quiver\x05\x40!\x01Now you can carry even more \x01arrows, to a maximum of \x05\x4650\x05\x40!",
0x0058: "\x08\x13\x4DYou found a \x05\x41Bomb Bag\x05\x40!\x01You found \x05\x4120 Bombs\x05\x40 inside!",
0x0059: "\x08\x13\x4EYou got a \x05\x41Big Bomb Bag\x05\x40!\x01Now you can carry more \x01Bombs, up to a maximum of \x05\x4630\x05\x40!",
0x005A: "\x08\x13\x4FYou got the \x01\x05\x41Biggest Bomb Bag\x05\x40!\x01Now, you can carry up to \x01\x05\x4640\x05\x40 Bombs!",
0x005B: "\x08\x13\x51You found the \x05\x43Silver Gauntlets\x05\x40!\x01You feel the power to lift\x01big things with it!",
0x005C: "\x08\x13\x52You found the \x05\x43Golden Gauntlets\x05\x40!\x01You can feel even more power\x01coursing through your arms!",
0x005D: "\x08\x13\x1CYou put a \x05\x44Blue Fire\x05\x40\x01into the bottle!\x01This is a cool flame you can\x01use on red ice.",
0x005E: "\x08\x13\x56You got an \x05\x43Adult's Wallet\x05\x40!\x01Now you can hold\x01up to \x05\x46200\x05\x40 \x05\x46Rupees\x05\x40.",
0x005F: "\x08\x13\x57You got a \x05\x43Giant's Wallet\x05\x40!\x01Now you can hold\x01up to \x05\x46500\x05\x40 \x05\x46Rupees\x05\x40.",
0x0060: "\x08\x13\x77You found a \x05\x41Small Key\x05\x40!\x01This key will open a locked \x01door. You can use it only\x01in this dungeon.",
0x0066: "\x08\x13\x76You found the \x05\x41Dungeon Map\x05\x40!\x01It's the map to this dungeon.",
0x0067: "\x08\x13\x75You found the \x05\x41Compass\x05\x40!\x01Now you can see the locations\x01of many hidden things in the\x01dungeon!",
0x0068: "\x08\x13\x6FYou obtained the \x05\x41Stone of Agony\x05\x40!\x01If you equip a \x05\x44Rumble Pak\x05\x40, it\x01will react to nearby...secrets.",
0x0069: "\x08\x13\x23You received \x05\x41Zelda's Letter\x05\x40!\x01Wow! This letter has Princess\x01Zelda's autograph!",
0x006C: "\x08\x13\x49Your \x05\x41Deku Seeds Bullet Bag \x01\x05\x40has become bigger!\x01This bag can hold \x05\x4650\x05\x41 \x05\x40bullets!",
0x006F: "\x08You got a \x05\x42Green Rupee\x05\x40!\x01That's \x05\x42one Rupee\x05\x40!",
0x0070: "\x08\x13\x04You got the \x05\x41Fire Arrow\x05\x40!\x01If you hit your target,\x01it will catch fire.",
0x0071: "\x08\x13\x0CYou got the \x05\x43Ice Arrow\x05\x40!\x01If you hit your target,\x01it will freeze.",
0x0072: "\x08\x13\x12You got the \x05\x44Light Arrow\x05\x40!\x01The light of justice\x01will smite evil!",
0x0073: "\x08\x06\x28You have learned the\x01\x06\x2F\x05\x42Minuet of Forest\x05\x40!",
0x0074: "\x08\x06\x28You have learned the\x01\x06\x37\x05\x41Bolero of Fire\x05\x40!",
0x0075: "\x08\x06\x28You have learned the\x01\x06\x29\x05\x43Serenade of Water\x05\x40!",
0x0076: "\x08\x06\x28You have learned the\x01\x06\x2D\x05\x46Requiem of Spirit\x05\x40!",
0x0077: "\x08\x06\x28You have learned the\x01\x06\x28\x05\x45Nocturne of Shadow\x05\x40!",
0x0078: "\x08\x06\x28You have learned the\x01\x06\x32\x05\x44Prelude of Light\x05\x40!",
0x0079: "\x08\x13\x50You got the \x05\x41Goron's Bracelet\x05\x40!\x01Now you can pull up Bomb\x01Flowers.",
0x007A: "\x08\x13\x1DYou put a \x05\x41Bug \x05\x40in the bottle!\x01This kind of bug prefers to\x01live in small holes in the ground.",
0x007B: "\x08\x13\x70You obtained the \x05\x41Gerudo's \x01Membership Card\x05\x40!\x01You can get into the Gerudo's\x01training ground in their hideout.",
0x0080: "\x08\x13\x6CYou got the \x05\x42Kokiri's Emerald\x05\x40!\x01This is the Spiritual Stone of \x01the Forest, now entrusted to \x01you by the Great Deku Tree.",
0x0081: "\x08\x13\x6DYou obtained the \x05\x41Goron's Ruby\x05\x40!\x01This is the Spiritual Stone of \x01Fire passed down by the Gorons!",
0x0082: "\x08\x13\x6EYou obtained \x05\x43Zora's Sapphire\x05\x40!\x01This is the Spiritual Stone of\x01Water passed down by the\x01Zoras!",
0x0090: "\x08\x13\x00Now you can pick up \x01many \x05\x41Deku Sticks\x05\x40!\x01You can carry up to \x05\x4620\x05\x40 of them!",
0x0091: "\x08\x13\x00You can now pick up \x01even more \x05\x41Deku Sticks\x05\x40!\x01You can carry up to \x05\x4630\x05\x40 of them!",
0x0097: "\x08\x13\x20You caught a \x05\x41Poe \x05\x40in a bottle!\x01Something good might happen!",
0x0098: "\x08\x13\x1AYou got \x05\x41Lon Lon Milk\x05\x40!\x01This milk is very nutritious!\x01There are two drinks in it.",
0x0099: "\x08\x13\x1BYou found \x05\x41Ruto's Letter\x05\x40 in a\x01bottle! Show it to King Zora.",
0x9099: "\x08\x13\x1BYou found \x05\x41a letter in a bottle\x05\x40!\x01You remove the letter from the\x01bottle, freeing it for other uses.",
0x009A: "\x08\x13\x21You got a \x05\x41Weird Egg\x05\x40!\x01Feels like there's something\x01moving inside!",
0x00A4: "\x08\x13\x3BYou got the \x05\x42Kokiri Sword\x05\x40!\x01This is a hidden treasure of\x01the Kokiri.",
0x00A7: "\x08\x13\x01Now you can carry\x01many \x05\x41Deku Nuts\x05\x40!\x01You can hold up to \x05\x4630\x05\x40 nuts!",
0x00A8: "\x08\x13\x01You can now carry even\x01more \x05\x41Deku Nuts\x05\x40! You can carry\x01up to \x05\x4640\x05\x41 \x05\x40nuts!",
0x00AD: "\x08\x13\x05You got \x05\x41Din's Fire\x05\x40!\x01Its fireball engulfs everything!",
0x00AE: "\x08\x13\x0DYou got \x05\x42Farore's Wind\x05\x40!\x01This is warp magic you can use!",
0x00AF: "\x08\x13\x13You got \x05\x43Nayru's Love\x05\x40!\x01Cast this to create a powerful\x01protective barrier.",
0x00B4: "\x08You got a \x05\x41Gold Skulltula Token\x05\x40!\x01You've collected \x05\x41\x19\x05\x40 tokens in total.",
0x00B5: "\x08You destroyed a \x05\x41Gold Skulltula\x05\x40.\x01You got a token proving you \x01destroyed it!", #Unused
0x00C2: "\x08\x13\x73You got a \x05\x41Piece of Heart\x05\x40!\x01Collect four pieces total to get\x01another Heart Container.",
0x00C3: "\x08\x13\x73You got a \x05\x41Piece of Heart\x05\x40!\x01So far, you've collected two \x01pieces.",
0x00C4: "\x08\x13\x73You got a \x05\x41Piece of Heart\x05\x40!\x01Now you've collected three \x01pieces!",
0x00C5: "\x08\x13\x73You got a \x05\x41Piece of Heart\x05\x40!\x01You've completed another Heart\x01Container!",
0x00C6: "\x08\x13\x72You got a \x05\x41Heart Container\x05\x40!\x01Your maximum life energy is \x01increased by one heart.",
0x00C7: "\x08\x13\x74You got the \x05\x41Boss Key\x05\x40!\x01Now you can get inside the \x01chamber where the Boss lurks.",
0x9002: "\x08You are a \x05\x43FOOL\x05\x40!",
0x00CC: "\x08You got a \x05\x43Blue Rupee\x05\x40!\x01That's \x05\x43five Rupees\x05\x40!",
0x00CD: "\x08\x13\x53You got the \x05\x43Silver Scale\x05\x40!\x01You can dive deeper than you\x01could before.",
0x00CE: "\x08\x13\x54You got the \x05\x43Golden Scale\x05\x40!\x01Now you can dive much\x01deeper than you could before!",
0x00D1: "\x08\x06\x14You've learned \x05\x42Saria's Song\x05\x40!",
0x00D2: "\x08\x06\x11You've learned \x05\x41Epona's Song\x05\x40!",
0x00D3: "\x08\x06\x0BYou've learned the \x05\x46Sun's Song\x05\x40!",
0x00D4: "\x08\x06\x15You've learned \x05\x43Zelda's Lullaby\x05\x40!",
0x00D5: "\x08\x06\x05You've learned the \x05\x44Song of Time\x05\x40!",
0x00D6: "\x08You've learned the \x05\x45Song of Storms\x05\x40!",
0x00DC: "\x08\x13\x58You got \x05\x41Deku Seeds\x05\x40!\x01Use these as bullets\x01for your Slingshot.",
0x00DD: "\x08You mastered the secret sword\x01technique of the \x05\x41Spin Attack\x05\x40!",
0x00E4: "\x08You can now use \x05\x42Magic\x05\x40!",
0x00E5: "\x08Your \x05\x44defensive power\x05\x40 is enhanced!",
0x00E6: "\x08You got a \x05\x46bundle of arrows\x05\x40!",
0x00E8: "\x08Your magic power has been \x01enhanced! Now you have twice\x01as much \x05\x41Magic Power\x05\x40!",
0x00E9: "\x08Your defensive power has been \x01enhanced! Damage inflicted by \x01enemies will be \x05\x41reduced by half\x05\x40.",
0x00F0: "\x08You got a \x05\x41Red Rupee\x05\x40!\x01That's \x05\x41twenty Rupees\x05\x40!",
0x00F1: "\x08You got a \x05\x45Purple Rupee\x05\x40!\x01That's \x05\x45fifty Rupees\x05\x40!",
0x00F2: "\x08You got a \x05\x46Huge Rupee\x05\x40!\x01This Rupee is worth a whopping\x01\x05\x46two hundred Rupees\x05\x40!",
0x00F9: "\x08\x13\x1EYou put a \x05\x41Big Poe \x05\x40in a bottle!\x01Let's sell it at the \x05\x41Ghost Shop\x05\x40!\x01Something good might happen!",
}
KEYSANITY_MESSAGES = {
0x001C: "\x13\x74\x08You got the \x05\x41Boss Key\x05\x40\x01for the \x05\x41Fire Temple\x05\x40!\x09",
0x0006: "\x13\x74\x08You got the \x05\x41Boss Key\x05\x40\x01for the \x05\x42Forest Temple\x05\x40!\x09",
0x001D: "\x13\x74\x08You got the \x05\x41Boss Key\x05\x40\x01for the \x05\x43Water Temple\x05\x40!\x09",
0x001E: "\x13\x74\x08You got the \x05\x41Boss Key\x05\x40\x01for the \x05\x46Spirit Temple\x05\x40!\x09",
0x002A: "\x13\x74\x08You got the \x05\x41Boss Key\x05\x40\x01for the \x05\x45Shadow Temple\x05\x40!\x09",
0x0061: "\x13\x74\x08You got the \x05\x41Boss Key\x05\x40\x01for \x05\x41Ganon's Castle\x05\x40!\x09",
0x0062: "\x13\x75\x08You found the \x05\x41Compass\x05\x40\x01for the \x05\x42Deku Tree\x05\x40!\x09",
0x0063: "\x13\x75\x08You found the \x05\x41Compass\x05\x40\x01for \x05\x41Dodongo's Cavern\x05\x40!\x09",
0x0064: "\x13\x75\x08You found the \x05\x41Compass\x05\x40\x01for \x05\x43Jabu Jabu's Belly\x05\x40!\x09",
0x0065: "\x13\x75\x08You found the \x05\x41Compass\x05\x40\x01for the \x05\x42Forest Temple\x05\x40!\x09",
0x007C: "\x13\x75\x08You found the \x05\x41Compass\x05\x40\x01for the \x05\x41Fire Temple\x05\x40!\x09",
0x007D: "\x13\x75\x08You found the \x05\x41Compass\x05\x40\x01for the \x05\x43Water Temple\x05\x40!\x09",
0x007E: "\x13\x75\x08You found the \x05\x41Compass\x05\x40\x01for the \x05\x46Spirit Temple\x05\x40!\x09",
0x007F: "\x13\x75\x08You found the \x05\x41Compass\x05\x40\x01for the \x05\x45Shadow Temple\x05\x40!\x09",
0x0087: "\x13\x75\x08You found the \x05\x41Compass\x05\x40\x01for the \x05\x44Ice Cavern\x05\x40!\x09",
0x0088: "\x13\x76\x08You found the \x05\x41Dungeon Map\x05\x40\x01for the \x05\x42Deku Tree\x05\x40!\x09",
0x0089: "\x13\x76\x08You found the \x05\x41Dungeon Map\x05\x40\x01for \x05\x41Dodongo's Cavern\x05\x40!\x09",
0x008A: "\x13\x76\x08You found the \x05\x41Dungeon Map\x05\x40\x01for \x05\x43Jabu Jabu's Belly\x05\x40!\x09",
0x008B: "\x13\x76\x08You found the \x05\x41Dungeon Map\x05\x40\x01for the \x05\x42Forest Temple\x05\x40!\x09",
0x008C: "\x13\x76\x08You found the \x05\x41Dungeon Map\x05\x40\x01for the \x05\x41Fire Temple\x05\x40!\x09",
0x008E: "\x13\x76\x08You found the \x05\x41Dungeon Map\x05\x40\x01for the \x05\x43Water Temple\x05\x40!\x09",
0x008F: "\x13\x76\x08You found the \x05\x41Dungeon Map\x05\x40\x01for the \x05\x46Spirit Temple\x05\x40!\x09",
0x0092: "\x13\x76\x08You found the \x05\x41Dungeon Map\x05\x40\x01for the \x05\x44Ice Cavern\x05\x40!\x09",
0x0093: "\x13\x77\x08You found a \x05\x41Small Key\x05\x40\x01for the \x05\x42Forest Temple\x05\x40!\x09",
0x0094: "\x13\x77\x08You found a \x05\x41Small Key\x05\x40\x01for the \x05\x41Fire Temple\x05\x40!\x09",
0x0095: "\x13\x77\x08You found a \x05\x41Small Key\x05\x40\x01for the \x05\x43Water Temple\x05\x40!\x09",
0x009B: "\x13\x77\x08You found a \x05\x41Small Key\x05\x40\x01for the \x05\x45Bottom of the Well\x05\x40!\x09",
0x009F: "\x13\x77\x08You found a \x05\x41Small Key\x05\x40\x01for the \x05\x46Gerudo Training\x01Grounds\x05\x40!\x09",
0x00A0: "\x13\x77\x08You found a \x05\x41Small Key\x05\x40\x01for the \x05\x46Gerudo's Fortress\x05\x40!\x09",
0x00A1: "\x13\x77\x08You found a \x05\x41Small Key\x05\x40\x01for \x05\x41Ganon's Castle\x05\x40!\x09",
0x00A2: "\x13\x75\x08You found the \x05\x41Compass\x05\x40\x01for the \x05\x45Bottom of the Well\x05\x40!\x09",
0x00A3: "\x13\x76\x08You found the \x05\x41Dungeon Map\x05\x40\x01for the \x05\x45Shadow Temple\x05\x40!\x09",
0x00A5: "\x13\x76\x08You found the \x05\x41Dungeon Map\x05\x40\x01for the \x05\x45Bottom of the Well\x05\x40!\x09",
0x00A6: "\x13\x77\x08You found a \x05\x41Small Key\x05\x40\x01for the \x05\x46Spirit Temple\x05\x40!\x09",
0x00A9: "\x13\x77\x08You found a \x05\x41Small Key\x05\x40\x01for the \x05\x45Shadow Temple\x05\x40!\x09",
}
MISC_MESSAGES = {
0x507B: (bytearray(
b"\x08I tell you, I saw him!\x04" \
b"\x08I saw the ghostly figure of Damp\x96\x01" \
b"the gravekeeper sinking into\x01" \
b"his grave. It looked like he was\x01" \
b"holding some kind of \x05\x41treasure\x05\x40!\x02"
), None),
0x0422: ("They say that once \x05\x41Morpha's Curse\x05\x40\x01is lifted, striking \x05\x42this stone\x05\x40 can\x01shift the tides of \x05\x44Lake Hylia\x05\x40.\x02", 0x23),
}
# convert byte array to an integer
def bytes_to_int(bytes, signed=False):
return int.from_bytes(bytes, byteorder='big', signed=signed)
# convert int to an array of bytes of the given width
def int_to_bytes(num, width, signed=False):
return int.to_bytes(num, width, byteorder='big', signed=signed)
def display_code_list(codes):
message = ""
for code in codes:
message += str(code)
return message
# holds a single character or control code of a string
class Text_Code():
def display(self):
if self.code in CONTROL_CODES:
return CONTROL_CODES[self.code][2](self.data)
elif self.code in SPECIAL_CHARACTERS:
return SPECIAL_CHARACTERS[self.code]
elif self.code >= 0x7F:
return '?'
else:
return chr(self.code)
def get_python_string(self):
if self.code in CONTROL_CODES:
ret = ''
subdata = self.data
for _ in range(0, CONTROL_CODES[self.code][1]):
ret = ('\\x%02X' % (subdata & 0xFF)) + ret
subdata = subdata >> 8
ret = '\\x%02X' % self.code + ret
return ret
elif self.code in SPECIAL_CHARACTERS:
return '\\x%02X' % self.code
elif self.code >= 0x7F:
return '?'
else:
return chr(self.code)
# writes the code to the given offset, and returns the offset of the next byte
def size(self):
size = 1
if self.code in CONTROL_CODES:
size += CONTROL_CODES[self.code][1]
return size
# writes the code to the given offset, and returns the offset of the next byte
def write(self, rom, offset):
rom.write_byte(TEXT_START + offset, self.code)
extra_bytes = 0
if self.code in CONTROL_CODES:
extra_bytes = CONTROL_CODES[self.code][1]
bytes_to_write = int_to_bytes(self.data, extra_bytes)
rom.write_bytes(TEXT_START + offset + 1, bytes_to_write)
return offset + 1 + extra_bytes
def __init__(self, code, data):
self.code = code
if code in CONTROL_CODES:
self.type = CONTROL_CODES[code][0]
else:
self.type = 'character'
self.data = data
__str__ = __repr__ = display
# holds a single message, and all its data
class Message():
def display(self):
meta_data = ["#" + str(self.index),
"ID: 0x" + "{:04x}".format(self.id),
"Offset: 0x" + "{:06x}".format(self.offset),
"Length: 0x" + "{:04x}".format(self.unpadded_length) + "/0x" + "{:04x}".format(self.length),
"Box Type: " + str(self.box_type),
"Postion: " + str(self.position)]
return ', '.join(meta_data) + '\n' + self.text
def get_python_string(self):
ret = ''
for code in self.text_codes:
ret = ret + code.get_python_string()
return ret
# check if this is an unused message that just contains it's own id as text
def is_id_message(self):
if self.unpadded_length == 5:
for i in range(4):
code = self.text_codes[i].code
if not (code in range(ord('0'),ord('9')+1) or code in range(ord('A'),ord('F')+1) or code in range(ord('a'),ord('f')+1) ):
return False
return True
return False
def parse_text(self):
self.text_codes = []
index = 0
while index < self.length:
next_char = self.raw_text[index]
data = 0
index += 1
if next_char in CONTROL_CODES:
extra_bytes = CONTROL_CODES[next_char][1]
if extra_bytes > 0:
data = bytes_to_int(self.raw_text[index : index + extra_bytes])
index += extra_bytes
text_code = Text_Code(next_char, data)
self.text_codes.append(text_code)
if next_char == 0x02: # message end code
break
if next_char == 0x07: # goto
self.has_goto = True
self.ending = text_code
if next_char == 0x0A: # keep-open
self.has_keep_open = True
self.ending = text_code
if next_char == 0x0B: # event
self.has_event = True
self.ending = text_code
if next_char == 0x0E: # fade out
self.has_fade = True
self.ending = text_code
if next_char == 0x10: # ocarina
self.has_ocarina = True
self.ending = text_code
if next_char == 0x1B: # two choice
self.has_two_choice = True
if next_char == 0x1C: # three choice
self.has_three_choice = True
self.text = display_code_list(self.text_codes)
self.unpadded_length = index
def is_basic(self):
return not (self.has_goto or self.has_keep_open or self.has_event or self.has_fade or self.has_ocarina or self.has_two_choice or self.has_three_choice)
# writes a Message back into the rom, using the given index and offset to update the table
# returns the offset of the next message
def size(self, replace_ending=False, ending=None, always_allow_skip=True, speed_up_text=True):
size = 0
ending_codes = [0x02, 0x07, 0x0A, 0x0B, 0x0E, 0x10]
box_breaks = [0x04, 0x0C]
slows_text = [0x08, 0x09, 0x14]
# # speed the text
if speed_up_text:
size += 1
# write the message
for code in self.text_codes:
# ignore ending codes if it's going to be replaced
if replace_ending and code.code in ending_codes:
pass
# ignore the "make unskippable flag"
elif always_allow_skip and code.code == 0x1A:
pass
# ignore anything that slows down text
elif speed_up_text and code.code in slows_text:
pass
elif speed_up_text and code.code in box_breaks:
size += 2
else:
size += code.size()
if replace_ending:
if ending:
if speed_up_text and ending.code == 0x10: # ocarina
size += 1
size += ending.size() # write special ending
size += 1
while size % 4 > 0:
size += 1
return size
# writes a Message back into the rom, using the given index and offset to update the table
# returns the offset of the next message
def write(self, rom, index, offset, replace_ending=False, ending=None, always_allow_skip=True, speed_up_text=True, bank=0x07):
# construct the table entry
id_bytes = int_to_bytes(self.id, 2)
offset_bytes = int_to_bytes(offset, 3)
entry = id_bytes + bytes([self.opts, 0x00, bank]) + offset_bytes
# write it back
entry_offset = TABLE_START + 8 * index
rom.write_bytes(entry_offset, entry)
ending_codes = [0x02, 0x07, 0x0A, 0x0B, 0x0E, 0x10]
box_breaks = [0x04, 0x0C]
slows_text = [0x08, 0x09, 0x14]
# # speed the text
if speed_up_text:
offset = Text_Code(0x08, 0).write(rom, offset) # allow instant
# write the message
for code in self.text_codes:
# ignore ending codes if it's going to be replaced
if replace_ending and code.code in ending_codes:
pass
# ignore the "make unskippable flag"
elif always_allow_skip and code.code == 0x1A:
pass
# ignore anything that slows down text
elif speed_up_text and code.code in slows_text:
pass
elif speed_up_text and code.code in box_breaks:
if self.id == 0x605A: #special case for twinrova text
offset = code.write(rom, offset)
else:
offset = Text_Code(0x04, 0).write(rom, offset) # un-delayed break
offset = Text_Code(0x08, 0).write(rom, offset) # allow instant
else:
offset = code.write(rom, offset)
if replace_ending:
if ending:
if speed_up_text and ending.code == 0x10: # ocarina
offset = Text_Code(0x09, 0).write(rom, offset) # disallow instant text
offset = ending.write(rom, offset) # write special ending
offset = Text_Code(0x02, 0).write(rom, offset) # write end code
while offset % 4 > 0:
offset = Text_Code(0x00, 0).write(rom, offset) # pad to 4 byte align
return offset
def __init__(self, raw_text, index, id, opts, offset, length):
self.raw_text = raw_text
self.index = index
self.id = id
self.opts = opts
self.box_type = (self.opts & 0xF0) >> 4
self.position = (self.opts & 0x0F)
self.offset = offset
self.length = length
self.has_goto = False
self.has_keep_open = False
self.has_event = False
self.has_fade = False
self.has_ocarina = False
self.has_two_choice = False
self.has_three_choice = False
self.ending = None
self.parse_text()
# read a single message from rom
@classmethod
def from_rom(cls, rom, index):
entry_offset = TABLE_START + 8 * index
entry = rom.read_bytes(entry_offset, 8)
next = rom.read_bytes(entry_offset + 8, 8)
id = bytes_to_int(entry[0:2])
opts = entry[2]
offset = bytes_to_int(entry[5:8])
length = bytes_to_int(next[5:8]) - offset
raw_text = rom.read_bytes(TEXT_START + offset, length)
return cls(raw_text, index, id, opts, offset, length)
@classmethod
def from_string(cls, text, id=0, opts=0x00):
bytes = list(text.encode('utf-8')) + [0x02]
return cls(bytes, 0, id, opts, 0, len(bytes) + 1)
@classmethod
def from_bytearray(cls, bytearray, id=0, opts=0x00):
bytes = list(bytearray) + [0x02]
return cls(bytes, 0, id, opts, 0, len(bytes) + 1)
__str__ = __repr__ = display
# wrapper for updating the text of a message, given its message id
# if the id does not exist in the list, then it will add it
def update_message_by_id(messages, id, text, opts=None):
# get the message index
index = next( (m.index for m in messages if m.id == id), -1)
# update if it was found
if index >= 0:
update_message_by_index(messages, index, text, opts)
else:
add_message(messages, text, id, opts)
# Gets the message by its ID. Returns None if the index does not exist
def get_message_by_id(messages, id):
# get the message index
index = next( (m.index for m in messages if m.id == id), -1)
if index >= 0:
return messages[index]
else:
return None
# wrapper for updating the text of a message, given its index in the list
def update_message_by_index(messages, index, text, opts=None):
if opts is None:
opts = messages[index].opts
if isinstance(text, bytearray):
messages[index] = Message.from_bytearray(text, messages[index].id, opts)
else:
messages[index] = Message.from_string(text, messages[index].id, opts)
messages[index].index = index
# wrapper for adding a string message to a list of messages
def add_message(messages, text, id=0, opts=0x00):
if isinstance(text, bytearray):
messages.append( Message.from_bytearray(text, id, opts) )
else:
messages.append( Message.from_string(text, id, opts) )
messages[-1].index = len(messages) - 1
# holds a row in the shop item table (which contains pointers to the description and purchase messages)
class Shop_Item():
def display(self):
meta_data = ["#" + str(self.index),
"Item: 0x" + "{:04x}".format(self.get_item_id),
"Price: " + str(self.price),
"Amount: " + str(self.pieces),
"Object: 0x" + "{:04x}".format(self.object),
"Model: 0x" + "{:04x}".format(self.model),
"Description: 0x" + "{:04x}".format(self.description_message),
"Purchase: 0x" + "{:04x}".format(self.purchase_message),]
func_data = [
"func1: 0x" + "{:08x}".format(self.func1),
"func2: 0x" + "{:08x}".format(self.func2),
"func3: 0x" + "{:08x}".format(self.func3),
"func4: 0x" + "{:08x}".format(self.func4),]
return ', '.join(meta_data) + '\n' + ', '.join(func_data)
# write the shop item back
def write(self, rom, shop_table_address, index):
entry_offset = shop_table_address + 0x20 * index
bytes = []
bytes += int_to_bytes(self.object, 2)
bytes += int_to_bytes(self.model, 2)
bytes += int_to_bytes(self.func1, 4)
bytes += int_to_bytes(self.price, 2)
bytes += int_to_bytes(self.pieces, 2)
bytes += int_to_bytes(self.description_message, 2)
bytes += int_to_bytes(self.purchase_message, 2)
bytes += [0x00, 0x00]
bytes += int_to_bytes(self.get_item_id, 2)
bytes += int_to_bytes(self.func2, 4)
bytes += int_to_bytes(self.func3, 4)
bytes += int_to_bytes(self.func4, 4)
rom.write_bytes(entry_offset, bytes)
# read a single message
def __init__(self, rom, shop_table_address, index):
entry_offset = shop_table_address + 0x20 * index
entry = rom.read_bytes(entry_offset, 0x20)
self.index = index
self.object = bytes_to_int(entry[0x00:0x02])
self.model = bytes_to_int(entry[0x02:0x04])
self.func1 = bytes_to_int(entry[0x04:0x08])
self.price = bytes_to_int(entry[0x08:0x0A])
self.pieces = bytes_to_int(entry[0x0A:0x0C])
self.description_message = bytes_to_int(entry[0x0C:0x0E])
self.purchase_message = bytes_to_int(entry[0x0E:0x10])
# 0x10-0x11 is always 0000 padded apparently
self.get_item_id = bytes_to_int(entry[0x12:0x14])
self.func2 = bytes_to_int(entry[0x14:0x18])
self.func3 = bytes_to_int(entry[0x18:0x1C])
self.func4 = bytes_to_int(entry[0x1C:0x20])
__str__ = __repr__ = display
# reads each of the shop items
def read_shop_items(rom, shop_table_address):
shop_items = []
for index in range(0, 100):
shop_items.append( Shop_Item(rom, shop_table_address, index) )
return shop_items
# writes each of the shop item back into rom
def write_shop_items(rom, shop_table_address, shop_items):
for s in shop_items:
s.write(rom, shop_table_address, s.index)
# these are unused shop items, and contain text ids that are used elsewhere, and should not be moved
SHOP_ITEM_EXCEPTIONS = [0x0A, 0x0B, 0x11, 0x12, 0x13, 0x14, 0x29]
# returns a set of all message ids used for shop items
def get_shop_message_id_set(shop_items):
ids = set()
for shop in shop_items:
if shop.index not in SHOP_ITEM_EXCEPTIONS:
ids.add(shop.description_message)
ids.add(shop.purchase_message)
return ids
# remove all messages that easy to tell are unused to create space in the message index table
def remove_unused_messages(messages):
messages[:] = [m for m in messages if not m.is_id_message()]
for index, m in enumerate(messages):
m.index = index
# takes all messages used for shop items, and moves messages from the 00xx range into the unused 80xx range
def move_shop_item_messages(messages, shop_items):
# checks if a message id is in the item message range
def is_in_item_range(id):
bytes = int_to_bytes(id, 2)
return bytes[0] == 0x00
# get the ids we want to move
ids = set( id for id in get_shop_message_id_set(shop_items) if is_in_item_range(id) )
# update them in the message list
for id in ids:
# should be a singleton list, but in case something funky is going on, handle it as a list regardless
relevant_messages = [message for message in messages if message.id == id]
for message in relevant_messages:
message.id |= 0x8000
# update them in the shop item list
for shop in shop_items:
if is_in_item_range(shop.description_message):
shop.description_message |= 0x8000
if is_in_item_range(shop.purchase_message):
shop.purchase_message |= 0x8000
def make_player_message(text):
player_text = '\x05\x42\x0F\x05\x40'
pronoun_mapping = {
"You have ": player_text + " ",
"You are ": player_text + " is ",
"You've ": player_text + " ",
"Your ": player_text + "'s ",
"You ": player_text + " ",
"you have ": player_text + " ",
"you are ": player_text + " is ",
"you've ": player_text + " ",
"your ": player_text + "'s ",
"you ": player_text + " ",
}
verb_mapping = {
'obtained ': 'got ',
'received ': 'got ',
'learned ': 'got ',
'borrowed ': 'got ',
'found ': 'got ',
}
new_text = text
# Replace the first instance of a 'You' with the player name
lower_text = text.lower()
you_index = lower_text.find('you')
if you_index != -1:
for find_text, replace_text in pronoun_mapping.items():
# if the index do not match, then it is not the first 'You'
if text.find(find_text) == you_index:
new_text = new_text.replace(find_text, replace_text, 1)
break
# because names are longer, we shorten the verbs to they fit in the textboxes better
for find_text, replace_text in verb_mapping.items():
new_text = new_text.replace(find_text, replace_text)
return new_text
# reduce item message sizes and add new item messages
# make sure to call this AFTER move_shop_item_messages()
def update_item_messages(messages, world):
new_item_messages = {**ITEM_MESSAGES, **KEYSANITY_MESSAGES}
for id, text in new_item_messages.items():
if world.world_count > 1:
update_message_by_id(messages, id, make_player_message(text), 0x23)
else:
update_message_by_id(messages, id, text, 0x23)
for id, (text, opt) in MISC_MESSAGES.items():
update_message_by_id(messages, id, text, opt)
# run all keysanity related patching to add messages for dungeon specific items
def add_item_messages(messages, shop_items, world):
move_shop_item_messages(messages, shop_items)
update_item_messages(messages, world)
# reads each of the game's messages into a list of Message objects
def read_messages(rom):
table_offset = TABLE_START
index = 0
messages = []
while True:
entry = rom.read_bytes(table_offset, 8)
id = bytes_to_int(entry[0:2])
if id == 0xFFFD:
table_offset += 8
continue # this is only here to give an ending offset
if id == 0xFFFF:
break # this marks the end of the table
messages.append( Message.from_rom(rom, index) )
index += 1
table_offset += 8
return messages
# write the messages back
def repack_messages(rom, messages, permutation=None, always_allow_skip=True, speed_up_text=True):
if permutation is None:
permutation = range(len(messages))
# repack messages
offset = 0
text_size_limit = ENG_TEXT_SIZE_LIMIT
text_bank = 0x07
for old_index, new_index in enumerate(permutation):
old_message = messages[old_index]
new_message = messages[new_index]
remember_id = new_message.id
new_message.id = old_message.id
# check if there is space to write the message
message_size = new_message.size(True, old_message.ending, always_allow_skip, speed_up_text)
if message_size + offset > text_size_limit:
# if there is no room then switch banks
if text_bank == 0x07:
text_size_limit = JPN_TEXT_SIZE_LIMIT
text_bank = 0x08
offset = 0
# actually write the message
offset = new_message.write(rom, old_index, offset, True, old_message.ending, always_allow_skip, speed_up_text, text_bank)
new_message.id = remember_id
# raise an exception if too much is written
# we raise it at the end so that we know how much overflow there is
if offset > text_size_limit:
raise(TypeError("Message Text table is too large: 0x" + "{:x}".format(ENG_TEXT_SIZE_LIMIT + offset) + " written / 0x" + "{:x}".format(ENG_TEXT_SIZE_LIMIT + JPN_TEXT_SIZE_LIMIT) + " allowed."))
# end the table
table_index = len(messages)
entry = bytes([0xFF, 0xFD, 0x00, 0x00, 0x07]) + int_to_bytes(offset, 3)
entry_offset = TABLE_START + 8 * table_index
rom.write_bytes(entry_offset, entry)
table_index += 1
entry_offset = TABLE_START + 8 * table_index
if 8 * (table_index + 1) > TABLE_SIZE_LIMIT:
raise(TypeError("Message ID table is too large: 0x" + "{:x}".format(8 * (table_index + 1)) + " written / 0x" + "{:x}".format(TABLE_SIZE_LIMIT) + " allowed."))
rom.write_bytes(entry_offset, [0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00])
# shuffles the messages in the game, making sure to keep various message types in their own group
def shuffle_messages(rom, except_hints=True, always_allow_skip=True):
messages = read_messages(rom)
permutation = [i for i, _ in enumerate(messages)]
def is_exempt(m):
hint_ids = (
GOSSIP_STONE_MESSAGES + TEMPLE_HINTS_MESSAGES + LIGHT_ARROW_HINT +
list(KEYSANITY_MESSAGES.keys()) + shuffle_messages.shop_item_messages
)
is_hint = (except_hints and m.id in hint_ids)
is_error_message = (m.id == ERROR_MESSAGE)
return (is_hint or is_error_message or m.is_id_message())
have_goto = list( filter(lambda m: not is_exempt(m) and m.has_goto, messages) )
have_keep_open = list( filter(lambda m: not is_exempt(m) and m.has_keep_open, messages) )
have_event = list( filter(lambda m: not is_exempt(m) and m.has_event, messages) )
have_fade = list( filter(lambda m: not is_exempt(m) and m.has_fade, messages) )
have_ocarina = list( filter(lambda m: not is_exempt(m) and m.has_ocarina, messages) )
have_two_choice = list( filter(lambda m: not is_exempt(m) and m.has_two_choice, messages) )
have_three_choice = list( filter(lambda m: not is_exempt(m) and m.has_three_choice, messages) )
basic_messages = list( filter(lambda m: not is_exempt(m) and m.is_basic(), messages) )
def shuffle_group(group):
group_permutation = [i for i, _ in enumerate(group)]
random.shuffle(group_permutation)
for index_from, index_to in enumerate(group_permutation):
permutation[group[index_to].index] = group[index_from].index
# need to use 'list' to force 'map' to actually run through
list( map( shuffle_group, [
have_goto + have_keep_open + have_event + have_fade + basic_messages,
have_ocarina,
have_two_choice,
have_three_choice,
]))
# write the messages back
repack_messages(rom, messages, permutation, always_allow_skip, False)
| 52.892694 | 200 | 0.66053 |
import random
TABLE_START = 0xB849EC
TEXT_START = 0x92D000
TABLE_SIZE_LIMIT = 0x43A8
ENG_TEXT_SIZE_LIMIT = 0x38130
JPN_TEXT_SIZE_LIMIT = 0x3A150
CONTROL_CODES = {
0x00: ('pad', 0, lambda _: '<pad>' ),
0x01: ('line-break', 0, lambda _: '\n' ),
0x02: ('end', 0, lambda _: '' ),
0x04: ('box-break', 0, lambda _: '\n▼\n' ),
0x05: ('color', 1, lambda d: '<color ' + "{:02x}".format(d) + '>' ),
0x06: ('gap', 1, lambda d: '<' + str(d) + 'px gap>' ),
0x07: ('goto', 2, lambda d: '<goto ' + "{:04x}".format(d) + '>' ),
0x08: ('instant', 0, lambda _: '<allow instant text>' ),
0x09: ('un-instant', 0, lambda _: '<disallow instant text>' ),
0x0A: ('keep-open', 0, lambda _: '<keep open>' ),
0x0B: ('event', 0, lambda _: '<event>' ),
0x0C: ('box-break-delay', 1, lambda d: '\n▼<wait ' + str(d) + ' frames>\n' ),
0x0E: ('fade-out', 1, lambda d: '<fade after ' + str(d) + ' frames?>' ),
0x0F: ('name', 0, lambda _: '<name>' ),
0x10: ('ocarina', 0, lambda _: '<ocarina>' ),
0x12: ('sound', 2, lambda d: '<play SFX ' + "{:04x}".format(d) + '>' ),
0x13: ('icon', 1, lambda d: '<icon ' + "{:02x}".format(d) + '>' ),
0x14: ('speed', 1, lambda d: '<delay each character by ' + str(d) + ' frames>' ),
0x15: ('background', 3, lambda d: '<set background to ' + "{:06x}".format(d) + '>' ),
0x16: ('marathon', 0, lambda _: '<marathon time>' ),
0x17: ('race', 0, lambda _: '<race time>' ),
0x18: ('points', 0, lambda _: '<points>' ),
0x19: ('skulltula', 0, lambda _: '<skulltula count>' ),
0x1A: ('unskippable', 0, lambda _: '<text is unskippable>' ),
0x1B: ('two-choice', 0, lambda _: '<start two choice>' ),
0x1C: ('three-choice', 0, lambda _: '<start three choice>' ),
0x1D: ('fish', 0, lambda _: '<fish weight>' ),
0x1E: ('high-score', 1, lambda d: '<high-score ' + "{:02x}".format(d) + '>' ),
0x1F: ('time', 0, lambda _: '<current time>' ),
}
SPECIAL_CHARACTERS = {
0x96: 'é',
0x9F: '[A]',
0xA0: '[B]',
0xA1: '[C]',
0xA2: '[L]',
0xA3: '[R]',
0xA4: '[Z]',
0xA5: '[C Up]',
0xA6: '[C Down]',
0xA7: '[C Left]',
0xA8: '[C Right]',
0xA9: '[Triangle]',
0xAA: '[Control Stick]',
}
GOSSIP_STONE_MESSAGES = list( range(0x0401, 0x04FF) )
GOSSIP_STONE_MESSAGES += [0x2053, 0x2054]
TEMPLE_HINTS_MESSAGES = [0x7057, 0x707A]
LIGHT_ARROW_HINT = [0x70CC]
GS_TOKEN_MESSAGES = [0x00B4, 0x00B5] # Get Gold Skulltula Token messages
ERROR_MESSAGE = 0x0001
# messages for shorter item messages
# ids are in the space freed up by move_shop_item_messages()
ITEM_MESSAGES = {
0x0001: "\x08\x06\x30\x05\x41TEXT ID ERROR!\x05\x40",
0x9001: "\x08\x13\x2DYou borrowed a \x05\x41Pocket Egg\x05\x40!\x01A Pocket Cucco will hatch from\x01it overnight. Be sure to give it\x01back when you are done with it.",
0x0002: "\x08\x13\x2FYou returned the Pocket Cucco\x01and got \x05\x41Cojiro\x05\x40 in return!\x01Unlike other Cuccos, Cojiro\x01rarely crows.",
0x0003: "\x08\x13\x30You got an \x05\x41Odd Mushroom\x05\x40!\x01A fresh mushroom like this is\x01sure to spoil quickly! Take it to\x01the Kakariko Potion Shop, quickly!",
0x0004: "\x08\x13\x31You received an \x05\x41Odd Potion\x05\x40!\x01It may be useful for something...\x01Hurry to the Lost Woods!",
0x0005: "\x08\x13\x32You returned the Odd Potion \x01and got the \x05\x41Poacher's Saw\x05\x40!\x01The young punk guy must have\x01left this behind.",
0x0007: "\x08\x13\x48You got a \x01\x05\x41Deku Seeds Bullet Bag\x05\x40.\x01This bag can hold up to \x05\x4640\x05\x40\x01slingshot bullets.",
0x0008: "\x08\x13\x33You traded the Poacher's Saw \x01for a \x05\x41Broken Goron's Sword\x05\x40!\x01Visit Biggoron to get it repaired!",
0x0009: "\x08\x13\x34You checked in the Broken \x01Goron's Sword and received a \x01\x05\x41Prescription\x05\x40!\x01Go see King Zora!",
0x000A: "\x08\x13\x37The Biggoron's Sword...\x01You got a \x05\x41Claim Check \x05\x40for it!\x01You can't wait for the sword\x01to be completed!",
0x000B: "\x08\x13\x2EYou got a \x05\x41Pocket Cucco, \x05\x40one\x01of Anju's prized hens! It fits \x01in your pocket.",
0x000C: "\x08\x13\x3DYou handed in the Claim Check\x01and got the \x05\x41Biggoron's Sword\x05\x40!\x01This blade was forged by a \x01master smith and won't break!",
0x000D: "\x08\x13\x35You used the Prescription and\x01received an \x05\x41Eyeball Frog\x05\x40!\x01Be quick and deliver it to Lake \x01Hylia while it's cold!",
0x000E: "\x08\x13\x36You traded the Eyeball Frog \x01for the \x05\x41World's Finest Eye Drops\x05\x40!\x01Hurry! Take them to Biggoron\x01before they go bad!",
0x0010: "\x08\x13\x25You borrowed a \x05\x41Skull Mask\x05\x40.\x01You feel like a monster while you\x01wear this mask!",
0x0011: "\x08\x13\x26You borrowed a \x05\x41Spooky Mask\x05\x40.\x01You can scare many people\x01with this mask!",
0x0012: "\x08\x13\x24You borrowed a \x05\x41Keaton Mask\x05\x40.\x01You'll be a popular guy with\x01this mask on!",
0x0013: "\x08\x13\x27You borrowed a \x05\x41Bunny Hood\x05\x40.\x01The hood's long ears are so\x01cute!",
0x0014: "\x08\x13\x28You borrowed a \x05\x41Goron Mask\x05\x40.\x01It will make your head look\x01big, though.",
0x0015: "\x08\x13\x29You borrowed a \x05\x41Zora Mask\x05\x40.\x01With this mask, you can\x01become one of the Zoras!",
0x0016: "\x08\x13\x2AYou borrowed a \x05\x41Gerudo Mask\x05\x40.\x01This mask will make you look\x01like...a girl?",
0x0017: "\x08\x13\x2BYou borrowed a \x05\x41Mask of Truth\x05\x40.\x01Show it to many people!",
0x0030: "\x08\x13\x06You found the \x05\x41Fairy Slingshot\x05\x40!",
0x0031: "\x08\x13\x03You found the \x05\x41Fairy Bow\x05\x40!",
0x0032: "\x08\x13\x02You got \x05\x41Bombs\x05\x40!\x01If you see something\x01suspicious, bomb it!",
0x0033: "\x08\x13\x09You got \x05\x41Bombchus\x05\x40!",
0x0034: "\x08\x13\x01You got a \x05\x41Deku Nut\x05\x40!",
0x0035: "\x08\x13\x0EYou found the \x05\x41Boomerang\x05\x40!",
0x0036: "\x08\x13\x0AYou found the \x05\x41Hookshot\x05\x40!\x01It's a spring-loaded chain that\x01you can cast out to hook things.",
0x0037: "\x08\x13\x00You got a \x05\x41Deku Stick\x05\x40!",
0x0038: "\x08\x13\x11You found the \x05\x41Megaton Hammer\x05\x40!\x01It's so heavy, you need to\x01use two hands to swing it!",
0x0039: "\x08\x13\x0FYou found the \x05\x41Lens of Truth\x05\x40!\x01Mysterious things are hidden\x01everywhere!",
0x003A: "\x08\x13\x08You found the \x05\x41Ocarina of Time\x05\x40!\x01It glows with a mystical light...",
0x003C: "\x08\x13\x67You received the \x05\x41Fire\x01Medallion\x05\x40!\x01Darunia awakens as a Sage and\x01adds his power to yours!",
0x003D: "\x08\x13\x68You received the \x05\x43Water\x01Medallion\x05\x40!\x01Ruto awakens as a Sage and\x01adds her power to yours!",
0x003E: "\x08\x13\x66You received the \x05\x42Forest\x01Medallion\x05\x40!\x01Saria awakens as a Sage and\x01adds her power to yours!",
0x003F: "\x08\x13\x69You received the \x05\x46Spirit\x01Medallion\x05\x40!\x01Nabooru awakens as a Sage and\x01adds her power to yours!",
0x0040: "\x08\x13\x6BYou received the \x05\x44Light\x01Medallion\x05\x40!\x01Rauru the Sage adds his power\x01to yours!",
0x0041: "\x08\x13\x6AYou received the \x05\x45Shadow\x01Medallion\x05\x40!\x01Impa awakens as a Sage and\x01adds her power to yours!",
0x0042: "\x08\x13\x14You got an \x05\x41Empty Bottle\x05\x40!\x01You can put something in this\x01bottle.",
0x0043: "\x08\x13\x15You got a \x05\x41Red Potion\x05\x40!\x01It will restore your health",
0x0044: "\x08\x13\x16You got a \x05\x42Green Potion\x05\x40!\x01It will restore your magic.",
0x0045: "\x08\x13\x17You got a \x05\x43Blue Potion\x05\x40!\x01It will recover your health\x01and magic.",
0x0046: "\x08\x13\x18You caught a \x05\x41Fairy\x05\x40 in a bottle!\x01It will revive you\x01the moment you run out of life \x01energy.",
0x0047: "\x08\x13\x19You got a \x05\x41Fish\x05\x40!\x01It looks so fresh and\x01delicious!",
0x0048: "\x08\x13\x10You got a \x05\x41Magic Bean\x05\x40!\x01Find a suitable spot for a garden\x01and plant it.",
0x004A: "\x08\x13\x07You received the \x05\x41Fairy Ocarina\x05\x40!\x01This is a memento from Saria.",
0x004B: "\x08\x13\x3DYou got the \x05\x42Giant's Knife\x05\x40!\x01Hold it with both hands to\x01attack! It's so long, you\x01can't use it with a \x05\x44shield\x05\x40.",
0x004C: "\x08\x13\x3EYou got a \x05\x44Deku Shield\x05\x40!",
0x004D: "\x08\x13\x3FYou got a \x05\x44Hylian Shield\x05\x40!",
0x004E: "\x08\x13\x40You found the \x05\x44Mirror Shield\x05\x40!\x01The shield's polished surface can\x01reflect light or energy.",
0x004F: "\x08\x13\x0BYou found the \x05\x41Longshot\x05\x40!\x01It's an upgraded Hookshot.\x01It extends \x05\x41twice\x05\x40 as far!",
0x0050: "\x08\x13\x42You got a \x05\x41Goron Tunic\x05\x40!\x01Going to a hot place? No worry!",
0x0051: "\x08\x13\x43You got a \x05\x43Zora Tunic\x05\x40!\x01Wear it, and you won't drown\x01underwater.",
0x0052: "\x08You got a \x05\x42Magic Jar\x05\x40!\x01Your Magic Meter is filled!",
0x0053: "\x08\x13\x45You got the \x05\x41Iron Boots\x05\x40!\x01So heavy, you can't run.\x01So heavy, you can't float.",
0x0054: "\x08\x13\x46You got the \x05\x41Hover Boots\x05\x40!\x01With these mysterious boots\x01you can hover above the ground.",
0x0055: "\x08You got a \x05\x45Recovery Heart\x05\x40!\x01Your life energy is recovered!",
0x0056: "\x08\x13\x4BYou upgraded your quiver to a\x01\x05\x41Big Quiver\x05\x40!\x01Now you can carry more arrows-\x01\x05\x4640 \x05\x40in total!",
0x0057: "\x08\x13\x4BYou upgraded your quiver to\x01the \x05\x41Biggest Quiver\x05\x40!\x01Now you can carry even more \x01arrows, to a maximum of \x05\x4650\x05\x40!",
0x0058: "\x08\x13\x4DYou found a \x05\x41Bomb Bag\x05\x40!\x01You found \x05\x4120 Bombs\x05\x40 inside!",
0x0059: "\x08\x13\x4EYou got a \x05\x41Big Bomb Bag\x05\x40!\x01Now you can carry more \x01Bombs, up to a maximum of \x05\x4630\x05\x40!",
0x005A: "\x08\x13\x4FYou got the \x01\x05\x41Biggest Bomb Bag\x05\x40!\x01Now, you can carry up to \x01\x05\x4640\x05\x40 Bombs!",
0x005B: "\x08\x13\x51You found the \x05\x43Silver Gauntlets\x05\x40!\x01You feel the power to lift\x01big things with it!",
0x005C: "\x08\x13\x52You found the \x05\x43Golden Gauntlets\x05\x40!\x01You can feel even more power\x01coursing through your arms!",
0x005D: "\x08\x13\x1CYou put a \x05\x44Blue Fire\x05\x40\x01into the bottle!\x01This is a cool flame you can\x01use on red ice.",
0x005E: "\x08\x13\x56You got an \x05\x43Adult's Wallet\x05\x40!\x01Now you can hold\x01up to \x05\x46200\x05\x40 \x05\x46Rupees\x05\x40.",
0x005F: "\x08\x13\x57You got a \x05\x43Giant's Wallet\x05\x40!\x01Now you can hold\x01up to \x05\x46500\x05\x40 \x05\x46Rupees\x05\x40.",
0x0060: "\x08\x13\x77You found a \x05\x41Small Key\x05\x40!\x01This key will open a locked \x01door. You can use it only\x01in this dungeon.",
0x0066: "\x08\x13\x76You found the \x05\x41Dungeon Map\x05\x40!\x01It's the map to this dungeon.",
0x0067: "\x08\x13\x75You found the \x05\x41Compass\x05\x40!\x01Now you can see the locations\x01of many hidden things in the\x01dungeon!",
0x0068: "\x08\x13\x6FYou obtained the \x05\x41Stone of Agony\x05\x40!\x01If you equip a \x05\x44Rumble Pak\x05\x40, it\x01will react to nearby...secrets.",
0x0069: "\x08\x13\x23You received \x05\x41Zelda's Letter\x05\x40!\x01Wow! This letter has Princess\x01Zelda's autograph!",
0x006C: "\x08\x13\x49Your \x05\x41Deku Seeds Bullet Bag \x01\x05\x40has become bigger!\x01This bag can hold \x05\x4650\x05\x41 \x05\x40bullets!",
0x006F: "\x08You got a \x05\x42Green Rupee\x05\x40!\x01That's \x05\x42one Rupee\x05\x40!",
0x0070: "\x08\x13\x04You got the \x05\x41Fire Arrow\x05\x40!\x01If you hit your target,\x01it will catch fire.",
0x0071: "\x08\x13\x0CYou got the \x05\x43Ice Arrow\x05\x40!\x01If you hit your target,\x01it will freeze.",
0x0072: "\x08\x13\x12You got the \x05\x44Light Arrow\x05\x40!\x01The light of justice\x01will smite evil!",
0x0073: "\x08\x06\x28You have learned the\x01\x06\x2F\x05\x42Minuet of Forest\x05\x40!",
0x0074: "\x08\x06\x28You have learned the\x01\x06\x37\x05\x41Bolero of Fire\x05\x40!",
0x0075: "\x08\x06\x28You have learned the\x01\x06\x29\x05\x43Serenade of Water\x05\x40!",
0x0076: "\x08\x06\x28You have learned the\x01\x06\x2D\x05\x46Requiem of Spirit\x05\x40!",
0x0077: "\x08\x06\x28You have learned the\x01\x06\x28\x05\x45Nocturne of Shadow\x05\x40!",
0x0078: "\x08\x06\x28You have learned the\x01\x06\x32\x05\x44Prelude of Light\x05\x40!",
0x0079: "\x08\x13\x50You got the \x05\x41Goron's Bracelet\x05\x40!\x01Now you can pull up Bomb\x01Flowers.",
0x007A: "\x08\x13\x1DYou put a \x05\x41Bug \x05\x40in the bottle!\x01This kind of bug prefers to\x01live in small holes in the ground.",
0x007B: "\x08\x13\x70You obtained the \x05\x41Gerudo's \x01Membership Card\x05\x40!\x01You can get into the Gerudo's\x01training ground in their hideout.",
0x0080: "\x08\x13\x6CYou got the \x05\x42Kokiri's Emerald\x05\x40!\x01This is the Spiritual Stone of \x01the Forest, now entrusted to \x01you by the Great Deku Tree.",
0x0081: "\x08\x13\x6DYou obtained the \x05\x41Goron's Ruby\x05\x40!\x01This is the Spiritual Stone of \x01Fire passed down by the Gorons!",
0x0082: "\x08\x13\x6EYou obtained \x05\x43Zora's Sapphire\x05\x40!\x01This is the Spiritual Stone of\x01Water passed down by the\x01Zoras!",
0x0090: "\x08\x13\x00Now you can pick up \x01many \x05\x41Deku Sticks\x05\x40!\x01You can carry up to \x05\x4620\x05\x40 of them!",
0x0091: "\x08\x13\x00You can now pick up \x01even more \x05\x41Deku Sticks\x05\x40!\x01You can carry up to \x05\x4630\x05\x40 of them!",
0x0097: "\x08\x13\x20You caught a \x05\x41Poe \x05\x40in a bottle!\x01Something good might happen!",
0x0098: "\x08\x13\x1AYou got \x05\x41Lon Lon Milk\x05\x40!\x01This milk is very nutritious!\x01There are two drinks in it.",
0x0099: "\x08\x13\x1BYou found \x05\x41Ruto's Letter\x05\x40 in a\x01bottle! Show it to King Zora.",
0x9099: "\x08\x13\x1BYou found \x05\x41a letter in a bottle\x05\x40!\x01You remove the letter from the\x01bottle, freeing it for other uses.",
0x009A: "\x08\x13\x21You got a \x05\x41Weird Egg\x05\x40!\x01Feels like there's something\x01moving inside!",
0x00A4: "\x08\x13\x3BYou got the \x05\x42Kokiri Sword\x05\x40!\x01This is a hidden treasure of\x01the Kokiri.",
0x00A7: "\x08\x13\x01Now you can carry\x01many \x05\x41Deku Nuts\x05\x40!\x01You can hold up to \x05\x4630\x05\x40 nuts!",
0x00A8: "\x08\x13\x01You can now carry even\x01more \x05\x41Deku Nuts\x05\x40! You can carry\x01up to \x05\x4640\x05\x41 \x05\x40nuts!",
0x00AD: "\x08\x13\x05You got \x05\x41Din's Fire\x05\x40!\x01Its fireball engulfs everything!",
0x00AE: "\x08\x13\x0DYou got \x05\x42Farore's Wind\x05\x40!\x01This is warp magic you can use!",
0x00AF: "\x08\x13\x13You got \x05\x43Nayru's Love\x05\x40!\x01Cast this to create a powerful\x01protective barrier.",
0x00B4: "\x08You got a \x05\x41Gold Skulltula Token\x05\x40!\x01You've collected \x05\x41\x19\x05\x40 tokens in total.",
0x00B5: "\x08You destroyed a \x05\x41Gold Skulltula\x05\x40.\x01You got a token proving you \x01destroyed it!",
0x00C2: "\x08\x13\x73You got a \x05\x41Piece of Heart\x05\x40!\x01Collect four pieces total to get\x01another Heart Container.",
0x00C3: "\x08\x13\x73You got a \x05\x41Piece of Heart\x05\x40!\x01So far, you've collected two \x01pieces.",
0x00C4: "\x08\x13\x73You got a \x05\x41Piece of Heart\x05\x40!\x01Now you've collected three \x01pieces!",
0x00C5: "\x08\x13\x73You got a \x05\x41Piece of Heart\x05\x40!\x01You've completed another Heart\x01Container!",
0x00C6: "\x08\x13\x72You got a \x05\x41Heart Container\x05\x40!\x01Your maximum life energy is \x01increased by one heart.",
0x00C7: "\x08\x13\x74You got the \x05\x41Boss Key\x05\x40!\x01Now you can get inside the \x01chamber where the Boss lurks.",
0x9002: "\x08You are a \x05\x43FOOL\x05\x40!",
0x00CC: "\x08You got a \x05\x43Blue Rupee\x05\x40!\x01That's \x05\x43five Rupees\x05\x40!",
0x00CD: "\x08\x13\x53You got the \x05\x43Silver Scale\x05\x40!\x01You can dive deeper than you\x01could before.",
0x00CE: "\x08\x13\x54You got the \x05\x43Golden Scale\x05\x40!\x01Now you can dive much\x01deeper than you could before!",
0x00D1: "\x08\x06\x14You've learned \x05\x42Saria's Song\x05\x40!",
0x00D2: "\x08\x06\x11You've learned \x05\x41Epona's Song\x05\x40!",
0x00D3: "\x08\x06\x0BYou've learned the \x05\x46Sun's Song\x05\x40!",
0x00D4: "\x08\x06\x15You've learned \x05\x43Zelda's Lullaby\x05\x40!",
0x00D5: "\x08\x06\x05You've learned the \x05\x44Song of Time\x05\x40!",
0x00D6: "\x08You've learned the \x05\x45Song of Storms\x05\x40!",
0x00DC: "\x08\x13\x58You got \x05\x41Deku Seeds\x05\x40!\x01Use these as bullets\x01for your Slingshot.",
0x00DD: "\x08You mastered the secret sword\x01technique of the \x05\x41Spin Attack\x05\x40!",
0x00E4: "\x08You can now use \x05\x42Magic\x05\x40!",
0x00E5: "\x08Your \x05\x44defensive power\x05\x40 is enhanced!",
0x00E6: "\x08You got a \x05\x46bundle of arrows\x05\x40!",
0x00E8: "\x08Your magic power has been \x01enhanced! Now you have twice\x01as much \x05\x41Magic Power\x05\x40!",
0x00E9: "\x08Your defensive power has been \x01enhanced! Damage inflicted by \x01enemies will be \x05\x41reduced by half\x05\x40.",
0x00F0: "\x08You got a \x05\x41Red Rupee\x05\x40!\x01That's \x05\x41twenty Rupees\x05\x40!",
0x00F1: "\x08You got a \x05\x45Purple Rupee\x05\x40!\x01That's \x05\x45fifty Rupees\x05\x40!",
0x00F2: "\x08You got a \x05\x46Huge Rupee\x05\x40!\x01This Rupee is worth a whopping\x01\x05\x46two hundred Rupees\x05\x40!",
0x00F9: "\x08\x13\x1EYou put a \x05\x41Big Poe \x05\x40in a bottle!\x01Let's sell it at the \x05\x41Ghost Shop\x05\x40!\x01Something good might happen!",
}
KEYSANITY_MESSAGES = {
0x001C: "\x13\x74\x08You got the \x05\x41Boss Key\x05\x40\x01for the \x05\x41Fire Temple\x05\x40!\x09",
0x0006: "\x13\x74\x08You got the \x05\x41Boss Key\x05\x40\x01for the \x05\x42Forest Temple\x05\x40!\x09",
0x001D: "\x13\x74\x08You got the \x05\x41Boss Key\x05\x40\x01for the \x05\x43Water Temple\x05\x40!\x09",
0x001E: "\x13\x74\x08You got the \x05\x41Boss Key\x05\x40\x01for the \x05\x46Spirit Temple\x05\x40!\x09",
0x002A: "\x13\x74\x08You got the \x05\x41Boss Key\x05\x40\x01for the \x05\x45Shadow Temple\x05\x40!\x09",
0x0061: "\x13\x74\x08You got the \x05\x41Boss Key\x05\x40\x01for \x05\x41Ganon's Castle\x05\x40!\x09",
0x0062: "\x13\x75\x08You found the \x05\x41Compass\x05\x40\x01for the \x05\x42Deku Tree\x05\x40!\x09",
0x0063: "\x13\x75\x08You found the \x05\x41Compass\x05\x40\x01for \x05\x41Dodongo's Cavern\x05\x40!\x09",
0x0064: "\x13\x75\x08You found the \x05\x41Compass\x05\x40\x01for \x05\x43Jabu Jabu's Belly\x05\x40!\x09",
0x0065: "\x13\x75\x08You found the \x05\x41Compass\x05\x40\x01for the \x05\x42Forest Temple\x05\x40!\x09",
0x007C: "\x13\x75\x08You found the \x05\x41Compass\x05\x40\x01for the \x05\x41Fire Temple\x05\x40!\x09",
0x007D: "\x13\x75\x08You found the \x05\x41Compass\x05\x40\x01for the \x05\x43Water Temple\x05\x40!\x09",
0x007E: "\x13\x75\x08You found the \x05\x41Compass\x05\x40\x01for the \x05\x46Spirit Temple\x05\x40!\x09",
0x007F: "\x13\x75\x08You found the \x05\x41Compass\x05\x40\x01for the \x05\x45Shadow Temple\x05\x40!\x09",
0x0087: "\x13\x75\x08You found the \x05\x41Compass\x05\x40\x01for the \x05\x44Ice Cavern\x05\x40!\x09",
0x0088: "\x13\x76\x08You found the \x05\x41Dungeon Map\x05\x40\x01for the \x05\x42Deku Tree\x05\x40!\x09",
0x0089: "\x13\x76\x08You found the \x05\x41Dungeon Map\x05\x40\x01for \x05\x41Dodongo's Cavern\x05\x40!\x09",
0x008A: "\x13\x76\x08You found the \x05\x41Dungeon Map\x05\x40\x01for \x05\x43Jabu Jabu's Belly\x05\x40!\x09",
0x008B: "\x13\x76\x08You found the \x05\x41Dungeon Map\x05\x40\x01for the \x05\x42Forest Temple\x05\x40!\x09",
0x008C: "\x13\x76\x08You found the \x05\x41Dungeon Map\x05\x40\x01for the \x05\x41Fire Temple\x05\x40!\x09",
0x008E: "\x13\x76\x08You found the \x05\x41Dungeon Map\x05\x40\x01for the \x05\x43Water Temple\x05\x40!\x09",
0x008F: "\x13\x76\x08You found the \x05\x41Dungeon Map\x05\x40\x01for the \x05\x46Spirit Temple\x05\x40!\x09",
0x0092: "\x13\x76\x08You found the \x05\x41Dungeon Map\x05\x40\x01for the \x05\x44Ice Cavern\x05\x40!\x09",
0x0093: "\x13\x77\x08You found a \x05\x41Small Key\x05\x40\x01for the \x05\x42Forest Temple\x05\x40!\x09",
0x0094: "\x13\x77\x08You found a \x05\x41Small Key\x05\x40\x01for the \x05\x41Fire Temple\x05\x40!\x09",
0x0095: "\x13\x77\x08You found a \x05\x41Small Key\x05\x40\x01for the \x05\x43Water Temple\x05\x40!\x09",
0x009B: "\x13\x77\x08You found a \x05\x41Small Key\x05\x40\x01for the \x05\x45Bottom of the Well\x05\x40!\x09",
0x009F: "\x13\x77\x08You found a \x05\x41Small Key\x05\x40\x01for the \x05\x46Gerudo Training\x01Grounds\x05\x40!\x09",
0x00A0: "\x13\x77\x08You found a \x05\x41Small Key\x05\x40\x01for the \x05\x46Gerudo's Fortress\x05\x40!\x09",
0x00A1: "\x13\x77\x08You found a \x05\x41Small Key\x05\x40\x01for \x05\x41Ganon's Castle\x05\x40!\x09",
0x00A2: "\x13\x75\x08You found the \x05\x41Compass\x05\x40\x01for the \x05\x45Bottom of the Well\x05\x40!\x09",
0x00A3: "\x13\x76\x08You found the \x05\x41Dungeon Map\x05\x40\x01for the \x05\x45Shadow Temple\x05\x40!\x09",
0x00A5: "\x13\x76\x08You found the \x05\x41Dungeon Map\x05\x40\x01for the \x05\x45Bottom of the Well\x05\x40!\x09",
0x00A6: "\x13\x77\x08You found a \x05\x41Small Key\x05\x40\x01for the \x05\x46Spirit Temple\x05\x40!\x09",
0x00A9: "\x13\x77\x08You found a \x05\x41Small Key\x05\x40\x01for the \x05\x45Shadow Temple\x05\x40!\x09",
}
MISC_MESSAGES = {
0x507B: (bytearray(
b"\x08I tell you, I saw him!\x04" \
b"\x08I saw the ghostly figure of Damp\x96\x01" \
b"the gravekeeper sinking into\x01" \
b"his grave. It looked like he was\x01" \
b"holding some kind of \x05\x41treasure\x05\x40!\x02"
), None),
0x0422: ("They say that once \x05\x41Morpha's Curse\x05\x40\x01is lifted, striking \x05\x42this stone\x05\x40 can\x01shift the tides of \x05\x44Lake Hylia\x05\x40.\x02", 0x23),
}
# convert byte array to an integer
def bytes_to_int(bytes, signed=False):
return int.from_bytes(bytes, byteorder='big', signed=signed)
# convert int to an array of bytes of the given width
def int_to_bytes(num, width, signed=False):
return int.to_bytes(num, width, byteorder='big', signed=signed)
def display_code_list(codes):
message = ""
for code in codes:
message += str(code)
return message
# holds a single character or control code of a string
class Text_Code():
def display(self):
if self.code in CONTROL_CODES:
return CONTROL_CODES[self.code][2](self.data)
elif self.code in SPECIAL_CHARACTERS:
return SPECIAL_CHARACTERS[self.code]
elif self.code >= 0x7F:
return '?'
else:
return chr(self.code)
def get_python_string(self):
if self.code in CONTROL_CODES:
ret = ''
subdata = self.data
for _ in range(0, CONTROL_CODES[self.code][1]):
ret = ('\\x%02X' % (subdata & 0xFF)) + ret
subdata = subdata >> 8
ret = '\\x%02X' % self.code + ret
return ret
elif self.code in SPECIAL_CHARACTERS:
return '\\x%02X' % self.code
elif self.code >= 0x7F:
return '?'
else:
return chr(self.code)
# writes the code to the given offset, and returns the offset of the next byte
def size(self):
size = 1
if self.code in CONTROL_CODES:
size += CONTROL_CODES[self.code][1]
return size
# writes the code to the given offset, and returns the offset of the next byte
def write(self, rom, offset):
rom.write_byte(TEXT_START + offset, self.code)
extra_bytes = 0
if self.code in CONTROL_CODES:
extra_bytes = CONTROL_CODES[self.code][1]
bytes_to_write = int_to_bytes(self.data, extra_bytes)
rom.write_bytes(TEXT_START + offset + 1, bytes_to_write)
return offset + 1 + extra_bytes
def __init__(self, code, data):
self.code = code
if code in CONTROL_CODES:
self.type = CONTROL_CODES[code][0]
else:
self.type = 'character'
self.data = data
__str__ = __repr__ = display
# holds a single message, and all its data
class Message():
def display(self):
meta_data = ["#" + str(self.index),
"ID: 0x" + "{:04x}".format(self.id),
"Offset: 0x" + "{:06x}".format(self.offset),
"Length: 0x" + "{:04x}".format(self.unpadded_length) + "/0x" + "{:04x}".format(self.length),
"Box Type: " + str(self.box_type),
"Postion: " + str(self.position)]
return ', '.join(meta_data) + '\n' + self.text
def get_python_string(self):
ret = ''
for code in self.text_codes:
ret = ret + code.get_python_string()
return ret
# check if this is an unused message that just contains it's own id as text
def is_id_message(self):
if self.unpadded_length == 5:
for i in range(4):
code = self.text_codes[i].code
if not (code in range(ord('0'),ord('9')+1) or code in range(ord('A'),ord('F')+1) or code in range(ord('a'),ord('f')+1) ):
return False
return True
return False
def parse_text(self):
self.text_codes = []
index = 0
while index < self.length:
next_char = self.raw_text[index]
data = 0
index += 1
if next_char in CONTROL_CODES:
extra_bytes = CONTROL_CODES[next_char][1]
if extra_bytes > 0:
data = bytes_to_int(self.raw_text[index : index + extra_bytes])
index += extra_bytes
text_code = Text_Code(next_char, data)
self.text_codes.append(text_code)
if next_char == 0x02:
break
if next_char == 0x07:
self.has_goto = True
self.ending = text_code
if next_char == 0x0A:
self.has_keep_open = True
self.ending = text_code
if next_char == 0x0B:
self.has_event = True
self.ending = text_code
if next_char == 0x0E:
self.has_fade = True
self.ending = text_code
if next_char == 0x10:
self.has_ocarina = True
self.ending = text_code
if next_char == 0x1B:
self.has_two_choice = True
if next_char == 0x1C:
self.has_three_choice = True
self.text = display_code_list(self.text_codes)
self.unpadded_length = index
def is_basic(self):
return not (self.has_goto or self.has_keep_open or self.has_event or self.has_fade or self.has_ocarina or self.has_two_choice or self.has_three_choice)
def size(self, replace_ending=False, ending=None, always_allow_skip=True, speed_up_text=True):
size = 0
ending_codes = [0x02, 0x07, 0x0A, 0x0B, 0x0E, 0x10]
box_breaks = [0x04, 0x0C]
slows_text = [0x08, 0x09, 0x14]
d_up_text:
size += 1
for code in self.text_codes:
if replace_ending and code.code in ending_codes:
pass
# ignore the "make unskippable flag"
elif always_allow_skip and code.code == 0x1A:
pass
# ignore anything that slows down text
elif speed_up_text and code.code in slows_text:
pass
elif speed_up_text and code.code in box_breaks:
size += 2
else:
size += code.size()
if replace_ending:
if ending:
if speed_up_text and ending.code == 0x10: # ocarina
size += 1
size += ending.size() # write special ending
size += 1
while size % 4 > 0:
size += 1
return size
# writes a Message back into the rom, using the given index and offset to update the table
# returns the offset of the next message
def write(self, rom, index, offset, replace_ending=False, ending=None, always_allow_skip=True, speed_up_text=True, bank=0x07):
# construct the table entry
id_bytes = int_to_bytes(self.id, 2)
offset_bytes = int_to_bytes(offset, 3)
entry = id_bytes + bytes([self.opts, 0x00, bank]) + offset_bytes
# write it back
entry_offset = TABLE_START + 8 * index
rom.write_bytes(entry_offset, entry)
ending_codes = [0x02, 0x07, 0x0A, 0x0B, 0x0E, 0x10]
box_breaks = [0x04, 0x0C]
slows_text = [0x08, 0x09, 0x14]
# # speed the text
if speed_up_text:
offset = Text_Code(0x08, 0).write(rom, offset) # allow instant
# write the message
for code in self.text_codes:
# ignore ending codes if it's going to be replaced
if replace_ending and code.code in ending_codes:
pass
elif always_allow_skip and code.code == 0x1A:
pass
elif speed_up_text and code.code in slows_text:
pass
elif speed_up_text and code.code in box_breaks:
if self.id == 0x605A:
offset = code.write(rom, offset)
else:
offset = Text_Code(0x04, 0).write(rom, offset)
offset = Text_Code(0x08, 0).write(rom, offset)
else:
offset = code.write(rom, offset)
if replace_ending:
if ending:
if speed_up_text and ending.code == 0x10:
offset = Text_Code(0x09, 0).write(rom, offset)
offset = ending.write(rom, offset)
offset = Text_Code(0x02, 0).write(rom, offset)
while offset % 4 > 0:
offset = Text_Code(0x00, 0).write(rom, offset)
return offset
def __init__(self, raw_text, index, id, opts, offset, length):
self.raw_text = raw_text
self.index = index
self.id = id
self.opts = opts
self.box_type = (self.opts & 0xF0) >> 4
self.position = (self.opts & 0x0F)
self.offset = offset
self.length = length
self.has_goto = False
self.has_keep_open = False
self.has_event = False
self.has_fade = False
self.has_ocarina = False
self.has_two_choice = False
self.has_three_choice = False
self.ending = None
self.parse_text()
@classmethod
def from_rom(cls, rom, index):
entry_offset = TABLE_START + 8 * index
entry = rom.read_bytes(entry_offset, 8)
next = rom.read_bytes(entry_offset + 8, 8)
id = bytes_to_int(entry[0:2])
opts = entry[2]
offset = bytes_to_int(entry[5:8])
length = bytes_to_int(next[5:8]) - offset
raw_text = rom.read_bytes(TEXT_START + offset, length)
return cls(raw_text, index, id, opts, offset, length)
@classmethod
def from_string(cls, text, id=0, opts=0x00):
bytes = list(text.encode('utf-8')) + [0x02]
return cls(bytes, 0, id, opts, 0, len(bytes) + 1)
@classmethod
def from_bytearray(cls, bytearray, id=0, opts=0x00):
bytes = list(bytearray) + [0x02]
return cls(bytes, 0, id, opts, 0, len(bytes) + 1)
__str__ = __repr__ = display
def update_message_by_id(messages, id, text, opts=None):
index = next( (m.index for m in messages if m.id == id), -1)
if index >= 0:
update_message_by_index(messages, index, text, opts)
else:
add_message(messages, text, id, opts)
def get_message_by_id(messages, id):
index = next( (m.index for m in messages if m.id == id), -1)
if index >= 0:
return messages[index]
else:
return None
def update_message_by_index(messages, index, text, opts=None):
if opts is None:
opts = messages[index].opts
if isinstance(text, bytearray):
messages[index] = Message.from_bytearray(text, messages[index].id, opts)
else:
messages[index] = Message.from_string(text, messages[index].id, opts)
messages[index].index = index
def add_message(messages, text, id=0, opts=0x00):
if isinstance(text, bytearray):
messages.append( Message.from_bytearray(text, id, opts) )
else:
messages.append( Message.from_string(text, id, opts) )
messages[-1].index = len(messages) - 1
class Shop_Item():
def display(self):
meta_data = ["#" + str(self.index),
"Item: 0x" + "{:04x}".format(self.get_item_id),
"Price: " + str(self.price),
"Amount: " + str(self.pieces),
"Object: 0x" + "{:04x}".format(self.object),
"Model: 0x" + "{:04x}".format(self.model),
"Description: 0x" + "{:04x}".format(self.description_message),
"Purchase: 0x" + "{:04x}".format(self.purchase_message),]
func_data = [
"func1: 0x" + "{:08x}".format(self.func1),
"func2: 0x" + "{:08x}".format(self.func2),
"func3: 0x" + "{:08x}".format(self.func3),
"func4: 0x" + "{:08x}".format(self.func4),]
return ', '.join(meta_data) + '\n' + ', '.join(func_data)
def write(self, rom, shop_table_address, index):
entry_offset = shop_table_address + 0x20 * index
bytes = []
bytes += int_to_bytes(self.object, 2)
bytes += int_to_bytes(self.model, 2)
bytes += int_to_bytes(self.func1, 4)
bytes += int_to_bytes(self.price, 2)
bytes += int_to_bytes(self.pieces, 2)
bytes += int_to_bytes(self.description_message, 2)
bytes += int_to_bytes(self.purchase_message, 2)
bytes += [0x00, 0x00]
bytes += int_to_bytes(self.get_item_id, 2)
bytes += int_to_bytes(self.func2, 4)
bytes += int_to_bytes(self.func3, 4)
bytes += int_to_bytes(self.func4, 4)
rom.write_bytes(entry_offset, bytes)
def __init__(self, rom, shop_table_address, index):
entry_offset = shop_table_address + 0x20 * index
entry = rom.read_bytes(entry_offset, 0x20)
self.index = index
self.object = bytes_to_int(entry[0x00:0x02])
self.model = bytes_to_int(entry[0x02:0x04])
self.func1 = bytes_to_int(entry[0x04:0x08])
self.price = bytes_to_int(entry[0x08:0x0A])
self.pieces = bytes_to_int(entry[0x0A:0x0C])
self.description_message = bytes_to_int(entry[0x0C:0x0E])
self.purchase_message = bytes_to_int(entry[0x0E:0x10])
self.get_item_id = bytes_to_int(entry[0x12:0x14])
self.func2 = bytes_to_int(entry[0x14:0x18])
self.func3 = bytes_to_int(entry[0x18:0x1C])
self.func4 = bytes_to_int(entry[0x1C:0x20])
__str__ = __repr__ = display
def read_shop_items(rom, shop_table_address):
shop_items = []
for index in range(0, 100):
shop_items.append( Shop_Item(rom, shop_table_address, index) )
return shop_items
def write_shop_items(rom, shop_table_address, shop_items):
for s in shop_items:
s.write(rom, shop_table_address, s.index)
SHOP_ITEM_EXCEPTIONS = [0x0A, 0x0B, 0x11, 0x12, 0x13, 0x14, 0x29]
def get_shop_message_id_set(shop_items):
ids = set()
for shop in shop_items:
if shop.index not in SHOP_ITEM_EXCEPTIONS:
ids.add(shop.description_message)
ids.add(shop.purchase_message)
return ids
def remove_unused_messages(messages):
messages[:] = [m for m in messages if not m.is_id_message()]
for index, m in enumerate(messages):
m.index = index
def move_shop_item_messages(messages, shop_items):
def is_in_item_range(id):
bytes = int_to_bytes(id, 2)
return bytes[0] == 0x00
ids = set( id for id in get_shop_message_id_set(shop_items) if is_in_item_range(id) )
for id in ids:
relevant_messages = [message for message in messages if message.id == id]
for message in relevant_messages:
message.id |= 0x8000
for shop in shop_items:
if is_in_item_range(shop.description_message):
shop.description_message |= 0x8000
if is_in_item_range(shop.purchase_message):
shop.purchase_message |= 0x8000
def make_player_message(text):
player_text = '\x05\x42\x0F\x05\x40'
pronoun_mapping = {
"You have ": player_text + " ",
"You are ": player_text + " is ",
"You've ": player_text + " ",
"Your ": player_text + "'s ",
"You ": player_text + " ",
"you have ": player_text + " ",
"you are ": player_text + " is ",
"you've ": player_text + " ",
"your ": player_text + "'s ",
"you ": player_text + " ",
}
verb_mapping = {
'obtained ': 'got ',
'received ': 'got ',
'learned ': 'got ',
'borrowed ': 'got ',
'found ': 'got ',
}
new_text = text
lower_text = text.lower()
you_index = lower_text.find('you')
if you_index != -1:
for find_text, replace_text in pronoun_mapping.items():
if text.find(find_text) == you_index:
new_text = new_text.replace(find_text, replace_text, 1)
break
for find_text, replace_text in verb_mapping.items():
new_text = new_text.replace(find_text, replace_text)
return new_text
def update_item_messages(messages, world):
new_item_messages = {**ITEM_MESSAGES, **KEYSANITY_MESSAGES}
for id, text in new_item_messages.items():
if world.world_count > 1:
update_message_by_id(messages, id, make_player_message(text), 0x23)
else:
update_message_by_id(messages, id, text, 0x23)
for id, (text, opt) in MISC_MESSAGES.items():
update_message_by_id(messages, id, text, opt)
def add_item_messages(messages, shop_items, world):
move_shop_item_messages(messages, shop_items)
update_item_messages(messages, world)
def read_messages(rom):
table_offset = TABLE_START
index = 0
messages = []
while True:
entry = rom.read_bytes(table_offset, 8)
id = bytes_to_int(entry[0:2])
if id == 0xFFFD:
table_offset += 8
continue # this is only here to give an ending offset
if id == 0xFFFF:
break # this marks the end of the table
messages.append( Message.from_rom(rom, index) )
index += 1
table_offset += 8
return messages
# write the messages back
def repack_messages(rom, messages, permutation=None, always_allow_skip=True, speed_up_text=True):
if permutation is None:
permutation = range(len(messages))
# repack messages
offset = 0
text_size_limit = ENG_TEXT_SIZE_LIMIT
text_bank = 0x07
for old_index, new_index in enumerate(permutation):
old_message = messages[old_index]
new_message = messages[new_index]
remember_id = new_message.id
new_message.id = old_message.id
# check if there is space to write the message
message_size = new_message.size(True, old_message.ending, always_allow_skip, speed_up_text)
if message_size + offset > text_size_limit:
# if there is no room then switch banks
if text_bank == 0x07:
text_size_limit = JPN_TEXT_SIZE_LIMIT
text_bank = 0x08
offset = 0
# actually write the message
offset = new_message.write(rom, old_index, offset, True, old_message.ending, always_allow_skip, speed_up_text, text_bank)
new_message.id = remember_id
# raise an exception if too much is written
# we raise it at the end so that we know how much overflow there is
if offset > text_size_limit:
raise(TypeError("Message Text table is too large: 0x" + "{:x}".format(ENG_TEXT_SIZE_LIMIT + offset) + " written / 0x" + "{:x}".format(ENG_TEXT_SIZE_LIMIT + JPN_TEXT_SIZE_LIMIT) + " allowed."))
# end the table
table_index = len(messages)
entry = bytes([0xFF, 0xFD, 0x00, 0x00, 0x07]) + int_to_bytes(offset, 3)
entry_offset = TABLE_START + 8 * table_index
rom.write_bytes(entry_offset, entry)
table_index += 1
entry_offset = TABLE_START + 8 * table_index
if 8 * (table_index + 1) > TABLE_SIZE_LIMIT:
raise(TypeError("Message ID table is too large: 0x" + "{:x}".format(8 * (table_index + 1)) + " written / 0x" + "{:x}".format(TABLE_SIZE_LIMIT) + " allowed."))
rom.write_bytes(entry_offset, [0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00])
# shuffles the messages in the game, making sure to keep various message types in their own group
def shuffle_messages(rom, except_hints=True, always_allow_skip=True):
messages = read_messages(rom)
permutation = [i for i, _ in enumerate(messages)]
def is_exempt(m):
hint_ids = (
GOSSIP_STONE_MESSAGES + TEMPLE_HINTS_MESSAGES + LIGHT_ARROW_HINT +
list(KEYSANITY_MESSAGES.keys()) + shuffle_messages.shop_item_messages
)
is_hint = (except_hints and m.id in hint_ids)
is_error_message = (m.id == ERROR_MESSAGE)
return (is_hint or is_error_message or m.is_id_message())
have_goto = list( filter(lambda m: not is_exempt(m) and m.has_goto, messages) )
have_keep_open = list( filter(lambda m: not is_exempt(m) and m.has_keep_open, messages) )
have_event = list( filter(lambda m: not is_exempt(m) and m.has_event, messages) )
have_fade = list( filter(lambda m: not is_exempt(m) and m.has_fade, messages) )
have_ocarina = list( filter(lambda m: not is_exempt(m) and m.has_ocarina, messages) )
have_two_choice = list( filter(lambda m: not is_exempt(m) and m.has_two_choice, messages) )
have_three_choice = list( filter(lambda m: not is_exempt(m) and m.has_three_choice, messages) )
basic_messages = list( filter(lambda m: not is_exempt(m) and m.is_basic(), messages) )
def shuffle_group(group):
group_permutation = [i for i, _ in enumerate(group)]
random.shuffle(group_permutation)
for index_from, index_to in enumerate(group_permutation):
permutation[group[index_to].index] = group[index_from].index
# need to use 'list' to force 'map' to actually run through
list( map( shuffle_group, [
have_goto + have_keep_open + have_event + have_fade + basic_messages,
have_ocarina,
have_two_choice,
have_three_choice,
]))
# write the messages back
repack_messages(rom, messages, permutation, always_allow_skip, False)
| true | true |
1c2d66ca6f72fce91804ef705bf0a12dad3f4c66 | 11,297 | py | Python | postprocess/contours_image_cdb_extract.py | cinemascienceworkflows/2021-06_E4S-Tutorial | bf97fb65bce73b4f59e2cd26e969c01370fea62c | [
"BSD-3-Clause"
] | null | null | null | postprocess/contours_image_cdb_extract.py | cinemascienceworkflows/2021-06_E4S-Tutorial | bf97fb65bce73b4f59e2cd26e969c01370fea62c | [
"BSD-3-Clause"
] | null | null | null | postprocess/contours_image_cdb_extract.py | cinemascienceworkflows/2021-06_E4S-Tutorial | bf97fb65bce73b4f59e2cd26e969c01370fea62c | [
"BSD-3-Clause"
] | null | null | null | # state file generated using paraview version 5.9.0
#### import the simple module from the paraview
from paraview.simple import *
import glob
import os
def create_cinema_csv( db, datadir ):
files = sorted(glob.glob(db + "/RenderView*"))
cur_image = 0
with open(db + "/data.csv", "w") as f:
f.write("time,FILE\n")
for infile in files:
path, file_name = os.path.split(infile)
print(file_name)
f.write(str(cur_image) + "," + file_name + "\n")
cur_image = cur_image + 1
def paraview_set_up_extracts( db, datadir ):
#### disable automatic camera reset on 'Show'
paraview.simple._DisableFirstRenderCameraReset()
# ----------------------------------------------------------------
# setup views used in the visualization
# ----------------------------------------------------------------
# black background
LoadPalette(paletteName='BlackBackground')
# get the material library
materialLibrary1 = GetMaterialLibrary()
# Create a new 'Render View'
renderView1 = CreateView('RenderView')
renderView1.ViewSize = [1920, 1080]
renderView1.AxesGrid = 'GridAxes3DActor'
renderView1.CenterOfRotation = [-1.4435499906539917e-08, -2.2351741790771484e-08, 0.08731197472661734]
renderView1.StereoType = 'Crystal Eyes'
renderView1.CameraPosition = [-0.913270744661002, 0.0, 0.3921541663602877]
renderView1.CameraFocalPoint = [-1.549582841626004e-17, 0.0, 0.1506849378347397]
renderView1.CameraViewUp = [0.2556166324404753, 0.0, 0.9667782254580369]
renderView1.CameraFocalDisk = 1.0
renderView1.CameraParallelScale = 0.24449439988291152
renderView1.BackEnd = 'OSPRay raycaster'
renderView1.OSPRayMaterialLibrary = materialLibrary1
SetActiveView(None)
# ----------------------------------------------------------------
# setup view layouts
# ----------------------------------------------------------------
# create new layout object 'Layout #1'
layout1 = CreateLayout(name='Layout #1')
layout1.AssignView(0, renderView1)
layout1.SetSize(1920, 1080)
# ----------------------------------------------------------------
# restore active view
SetActiveView(renderView1)
# ----------------------------------------------------------------
# ----------------------------------------------------------------
# setup the data processing pipelines
# ----------------------------------------------------------------
# create a new 'ExodusIIReader'
registration_name=(datadir + '/nonIsoEdgeOpenJet.e.4.*')
filename_list=[(datadir + '/nonIsoEdgeOpenJet.e.4.0'), (datadir + '/nonIsoEdgeOpenJet.e.4.1'), (datadir + '/nonIsoEdgeOpenJet.e.4.2'), (datadir + '/nonIsoEdgeOpenJet.e.4.3'),]
nonIsoEdgeOpenJete4 = ExodusIIReader(registrationName=registration_name, FileName=filename_list)
nonIsoEdgeOpenJete4.PointVariables = ['enthalpy', 'pressure', 'temperature', 'velocity_']
nonIsoEdgeOpenJete4.SideSetArrayStatus = []
nonIsoEdgeOpenJete4.ElementBlocks = ['block_1']
# create a new 'Contour'
negativePressureContour = Contour(registrationName='NegativePressureContour', Input=nonIsoEdgeOpenJete4)
negativePressureContour.ContourBy = ['POINTS', 'pressure']
negativePressureContour.Isosurfaces = [-0.0001]
negativePressureContour.PointMergeMethod = 'Uniform Binning'
# create a new 'Contour'
enthalpyContour = Contour(registrationName='EnthalpyContour', Input=nonIsoEdgeOpenJete4)
enthalpyContour.ContourBy = ['POINTS', 'enthalpy']
enthalpyContour.Isosurfaces = [10000.0]
enthalpyContour.PointMergeMethod = 'Uniform Binning'
# create a new 'Contour'
positivePressureContour = Contour(registrationName='PositivePressureContour', Input=nonIsoEdgeOpenJete4)
positivePressureContour.ContourBy = ['POINTS', 'pressure']
positivePressureContour.Isosurfaces = [0.0001]
positivePressureContour.PointMergeMethod = 'Uniform Binning'
# ----------------------------------------------------------------
# setup the visualization in view 'renderView1'
# ----------------------------------------------------------------
# show data from nonIsoEdgeOpenJete4
nonIsoEdgeOpenJete4Display = Show(nonIsoEdgeOpenJete4, renderView1, 'UnstructuredGridRepresentation')
# trace defaults for the display properties.
nonIsoEdgeOpenJete4Display.Representation = 'Surface'
nonIsoEdgeOpenJete4Display.ColorArrayName = ['POINTS', '']
nonIsoEdgeOpenJete4Display.Opacity = 0.32
nonIsoEdgeOpenJete4Display.SelectTCoordArray = 'None'
nonIsoEdgeOpenJete4Display.SelectNormalArray = 'None'
nonIsoEdgeOpenJete4Display.SelectTangentArray = 'None'
nonIsoEdgeOpenJete4Display.OSPRayScaleArray = 'GlobalNodeId'
nonIsoEdgeOpenJete4Display.OSPRayScaleFunction = 'PiecewiseFunction'
nonIsoEdgeOpenJete4Display.SelectOrientationVectors = 'None'
nonIsoEdgeOpenJete4Display.ScaleFactor = 0.030136987566947937
nonIsoEdgeOpenJete4Display.SelectScaleArray = 'GlobalNodeId'
nonIsoEdgeOpenJete4Display.GlyphType = 'Arrow'
nonIsoEdgeOpenJete4Display.GlyphTableIndexArray = 'GlobalNodeId'
nonIsoEdgeOpenJete4Display.GaussianRadius = 0.0015068493783473968
nonIsoEdgeOpenJete4Display.SetScaleArray = ['POINTS', 'GlobalNodeId']
nonIsoEdgeOpenJete4Display.ScaleTransferFunction = 'PiecewiseFunction'
nonIsoEdgeOpenJete4Display.OpacityArray = ['POINTS', 'GlobalNodeId']
nonIsoEdgeOpenJete4Display.OpacityTransferFunction = 'PiecewiseFunction'
nonIsoEdgeOpenJete4Display.DataAxesGrid = 'GridAxesRepresentation'
nonIsoEdgeOpenJete4Display.PolarAxes = 'PolarAxesRepresentation'
nonIsoEdgeOpenJete4Display.ScalarOpacityUnitDistance = 0.015062372960425148
nonIsoEdgeOpenJete4Display.OpacityArrayName = ['POINTS', 'GlobalNodeId']
nonIsoEdgeOpenJete4Display.ExtractedBlockIndex = 2
# init the 'PiecewiseFunction' selected for 'ScaleTransferFunction'
nonIsoEdgeOpenJete4Display.ScaleTransferFunction.Points = [1.0, 0.0, 0.5, 0.0, 35532.0, 1.0, 0.5, 0.0]
# init the 'PiecewiseFunction' selected for 'OpacityTransferFunction'
nonIsoEdgeOpenJete4Display.OpacityTransferFunction.Points = [1.0, 0.0, 0.5, 0.0, 35532.0, 1.0, 0.5, 0.0]
# show data from enthalpyContour
enthalpyContourDisplay = Show(enthalpyContour, renderView1, 'GeometryRepresentation')
# get color transfer function/color map for 'velocity_'
velocity_LUT = GetColorTransferFunction('velocity_')
velocity_LUT.RGBPoints = [-0.01593730623308355, 0.231373, 0.298039, 0.752941, 0.030675974411747547, 0.865003, 0.865003, 0.865003, 0.07728925505657865, 0.705882, 0.0156863, 0.14902]
velocity_LUT.ScalarRangeInitialized = 1.0
velocity_LUT.VectorComponent = 2
velocity_LUT.VectorMode = 'Component'
# trace defaults for the display properties.
enthalpyContourDisplay.Representation = 'Surface'
enthalpyContourDisplay.ColorArrayName = ['POINTS', 'velocity_']
enthalpyContourDisplay.LookupTable = velocity_LUT
enthalpyContourDisplay.SelectTCoordArray = 'None'
enthalpyContourDisplay.SelectNormalArray = 'Normals'
enthalpyContourDisplay.SelectTangentArray = 'None'
enthalpyContourDisplay.OSPRayScaleArray = 'enthalpy'
enthalpyContourDisplay.OSPRayScaleFunction = 'PiecewiseFunction'
enthalpyContourDisplay.SelectOrientationVectors = 'None'
enthalpyContourDisplay.ScaleFactor = 0.01933152247220278
enthalpyContourDisplay.SelectScaleArray = 'enthalpy'
enthalpyContourDisplay.GlyphType = 'Arrow'
enthalpyContourDisplay.GlyphTableIndexArray = 'enthalpy'
enthalpyContourDisplay.GaussianRadius = 0.000966576123610139
enthalpyContourDisplay.SetScaleArray = ['POINTS', 'enthalpy']
enthalpyContourDisplay.ScaleTransferFunction = 'PiecewiseFunction'
enthalpyContourDisplay.OpacityArray = ['POINTS', 'enthalpy']
enthalpyContourDisplay.OpacityTransferFunction = 'PiecewiseFunction'
enthalpyContourDisplay.DataAxesGrid = 'GridAxesRepresentation'
enthalpyContourDisplay.PolarAxes = 'PolarAxesRepresentation'
# init the 'PiecewiseFunction' selected for 'ScaleTransferFunction'
enthalpyContourDisplay.ScaleTransferFunction.Points = [48470.90823995246, 0.0, 0.5, 0.0, 48478.91015625, 1.0, 0.5, 0.0]
# init the 'PiecewiseFunction' selected for 'OpacityTransferFunction'
enthalpyContourDisplay.OpacityTransferFunction.Points = [48470.90823995246, 0.0, 0.5, 0.0, 48478.91015625, 1.0, 0.5, 0.0]
# setup the color legend parameters for each legend in this view
# get color legend/bar for velocity_LUT in view renderView1
velocity_LUTColorBar = GetScalarBar(velocity_LUT, renderView1)
velocity_LUTColorBar.WindowLocation = 'UpperLeftCorner'
velocity_LUTColorBar.Position = [0.002070393374741201, 0.6534810126582279]
velocity_LUTColorBar.Title = 'velocity_'
velocity_LUTColorBar.ComponentTitle = 'Z'
# set color bar visibility
velocity_LUTColorBar.Visibility = 1
# show color legend
enthalpyContourDisplay.SetScalarBarVisibility(renderView1, True)
# ----------------------------------------------------------------
# setup color maps and opacity mapes used in the visualization
# note: the Get..() functions create a new object, if needed
# ----------------------------------------------------------------
# get opacity transfer function/opacity map for 'velocity_'
velocity_PWF = GetOpacityTransferFunction('velocity_')
velocity_PWF.Points = [-0.01593730623308355, 0.0, 0.5, 0.0, 0.07728925505657865, 1.0, 0.5, 0.0]
velocity_PWF.ScalarRangeInitialized = 1
# ----------------------------------------------------------------
# setup extractors
# ----------------------------------------------------------------
# create extractor
pNG1 = CreateExtractor('PNG', renderView1, registrationName='PNG1')
# trace defaults for the extractor.
# init the 'PNG' selected for 'Writer'
pNG1.Writer.FileName = 'RenderView1_%.6ts%cm.png'
pNG1.Writer.ImageResolution = [1920, 1080]
pNG1.Writer.TransparentBackground = 0
pNG1.Writer.Format = 'PNG'
pNG1.Writer.CameraMode = 'Static'
#pNG1.Writer.CameraMode = 'Phi-Theta'
#pNG1.Writer.PhiResolution = 3
#pNG1.Writer.ThetaResolution = 3
# ----------------------------------------------------------------
# restore active source
SetActiveSource(pNG1)
# ----------------------------------------------------------------
# -----------------------------------------------------------------------------------------------
#
# main
#
# -----------------------------------------------------------------------------------------------
if __name__ == '__main__':
infile = ""
database = ""
if len(sys.argv) == 3:
infile = sys.argv[2]
database = sys.argv[1]
else:
print("ERROR")
# set up paraview pipeline and extracts for cdb
paraview_set_up_extracts( database, infile )
# generate extracts
SaveExtracts(ExtractsOutputDirectory=database)
# manually make csv file for cdb
create_cinema_csv( database, infile )
| 47.070833 | 184 | 0.653713 |
iew*"))
cur_image = 0
with open(db + "/data.csv", "w") as f:
f.write("time,FILE\n")
for infile in files:
path, file_name = os.path.split(infile)
print(file_name)
f.write(str(cur_image) + "," + file_name + "\n")
cur_image = cur_image + 1
def paraview_set_up_extracts( db, datadir ):
materialLibrary1 = GetMaterialLibrary()
renderView1 = CreateView('RenderView')
renderView1.ViewSize = [1920, 1080]
renderView1.AxesGrid = 'GridAxes3DActor'
renderView1.CenterOfRotation = [-1.4435499906539917e-08, -2.2351741790771484e-08, 0.08731197472661734]
renderView1.StereoType = 'Crystal Eyes'
renderView1.CameraPosition = [-0.913270744661002, 0.0, 0.3921541663602877]
renderView1.CameraFocalPoint = [-1.549582841626004e-17, 0.0, 0.1506849378347397]
renderView1.CameraViewUp = [0.2556166324404753, 0.0, 0.9667782254580369]
renderView1.CameraFocalDisk = 1.0
renderView1.CameraParallelScale = 0.24449439988291152
renderView1.BackEnd = 'OSPRay raycaster'
renderView1.OSPRayMaterialLibrary = materialLibrary1
SetActiveView(None)
layout1 = CreateLayout(name='Layout #1')
layout1.AssignView(0, renderView1)
layout1.SetSize(1920, 1080)
SetActiveView(renderView1)
registration_name=(datadir + '/nonIsoEdgeOpenJet.e.4.*')
filename_list=[(datadir + '/nonIsoEdgeOpenJet.e.4.0'), (datadir + '/nonIsoEdgeOpenJet.e.4.1'), (datadir + '/nonIsoEdgeOpenJet.e.4.2'), (datadir + '/nonIsoEdgeOpenJet.e.4.3'),]
nonIsoEdgeOpenJete4 = ExodusIIReader(registrationName=registration_name, FileName=filename_list)
nonIsoEdgeOpenJete4.PointVariables = ['enthalpy', 'pressure', 'temperature', 'velocity_']
nonIsoEdgeOpenJete4.SideSetArrayStatus = []
nonIsoEdgeOpenJete4.ElementBlocks = ['block_1']
negativePressureContour = Contour(registrationName='NegativePressureContour', Input=nonIsoEdgeOpenJete4)
negativePressureContour.ContourBy = ['POINTS', 'pressure']
negativePressureContour.Isosurfaces = [-0.0001]
negativePressureContour.PointMergeMethod = 'Uniform Binning'
enthalpyContour = Contour(registrationName='EnthalpyContour', Input=nonIsoEdgeOpenJete4)
enthalpyContour.ContourBy = ['POINTS', 'enthalpy']
enthalpyContour.Isosurfaces = [10000.0]
enthalpyContour.PointMergeMethod = 'Uniform Binning'
positivePressureContour = Contour(registrationName='PositivePressureContour', Input=nonIsoEdgeOpenJete4)
positivePressureContour.ContourBy = ['POINTS', 'pressure']
positivePressureContour.Isosurfaces = [0.0001]
positivePressureContour.PointMergeMethod = 'Uniform Binning'
nonIsoEdgeOpenJete4Display = Show(nonIsoEdgeOpenJete4, renderView1, 'UnstructuredGridRepresentation')
nonIsoEdgeOpenJete4Display.Representation = 'Surface'
nonIsoEdgeOpenJete4Display.ColorArrayName = ['POINTS', '']
nonIsoEdgeOpenJete4Display.Opacity = 0.32
nonIsoEdgeOpenJete4Display.SelectTCoordArray = 'None'
nonIsoEdgeOpenJete4Display.SelectNormalArray = 'None'
nonIsoEdgeOpenJete4Display.SelectTangentArray = 'None'
nonIsoEdgeOpenJete4Display.OSPRayScaleArray = 'GlobalNodeId'
nonIsoEdgeOpenJete4Display.OSPRayScaleFunction = 'PiecewiseFunction'
nonIsoEdgeOpenJete4Display.SelectOrientationVectors = 'None'
nonIsoEdgeOpenJete4Display.ScaleFactor = 0.030136987566947937
nonIsoEdgeOpenJete4Display.SelectScaleArray = 'GlobalNodeId'
nonIsoEdgeOpenJete4Display.GlyphType = 'Arrow'
nonIsoEdgeOpenJete4Display.GlyphTableIndexArray = 'GlobalNodeId'
nonIsoEdgeOpenJete4Display.GaussianRadius = 0.0015068493783473968
nonIsoEdgeOpenJete4Display.SetScaleArray = ['POINTS', 'GlobalNodeId']
nonIsoEdgeOpenJete4Display.ScaleTransferFunction = 'PiecewiseFunction'
nonIsoEdgeOpenJete4Display.OpacityArray = ['POINTS', 'GlobalNodeId']
nonIsoEdgeOpenJete4Display.OpacityTransferFunction = 'PiecewiseFunction'
nonIsoEdgeOpenJete4Display.DataAxesGrid = 'GridAxesRepresentation'
nonIsoEdgeOpenJete4Display.PolarAxes = 'PolarAxesRepresentation'
nonIsoEdgeOpenJete4Display.ScalarOpacityUnitDistance = 0.015062372960425148
nonIsoEdgeOpenJete4Display.OpacityArrayName = ['POINTS', 'GlobalNodeId']
nonIsoEdgeOpenJete4Display.ExtractedBlockIndex = 2
nonIsoEdgeOpenJete4Display.ScaleTransferFunction.Points = [1.0, 0.0, 0.5, 0.0, 35532.0, 1.0, 0.5, 0.0]
nonIsoEdgeOpenJete4Display.OpacityTransferFunction.Points = [1.0, 0.0, 0.5, 0.0, 35532.0, 1.0, 0.5, 0.0]
enthalpyContourDisplay = Show(enthalpyContour, renderView1, 'GeometryRepresentation')
velocity_LUT = GetColorTransferFunction('velocity_')
velocity_LUT.RGBPoints = [-0.01593730623308355, 0.231373, 0.298039, 0.752941, 0.030675974411747547, 0.865003, 0.865003, 0.865003, 0.07728925505657865, 0.705882, 0.0156863, 0.14902]
velocity_LUT.ScalarRangeInitialized = 1.0
velocity_LUT.VectorComponent = 2
velocity_LUT.VectorMode = 'Component'
enthalpyContourDisplay.Representation = 'Surface'
enthalpyContourDisplay.ColorArrayName = ['POINTS', 'velocity_']
enthalpyContourDisplay.LookupTable = velocity_LUT
enthalpyContourDisplay.SelectTCoordArray = 'None'
enthalpyContourDisplay.SelectNormalArray = 'Normals'
enthalpyContourDisplay.SelectTangentArray = 'None'
enthalpyContourDisplay.OSPRayScaleArray = 'enthalpy'
enthalpyContourDisplay.OSPRayScaleFunction = 'PiecewiseFunction'
enthalpyContourDisplay.SelectOrientationVectors = 'None'
enthalpyContourDisplay.ScaleFactor = 0.01933152247220278
enthalpyContourDisplay.SelectScaleArray = 'enthalpy'
enthalpyContourDisplay.GlyphType = 'Arrow'
enthalpyContourDisplay.GlyphTableIndexArray = 'enthalpy'
enthalpyContourDisplay.GaussianRadius = 0.000966576123610139
enthalpyContourDisplay.SetScaleArray = ['POINTS', 'enthalpy']
enthalpyContourDisplay.ScaleTransferFunction = 'PiecewiseFunction'
enthalpyContourDisplay.OpacityArray = ['POINTS', 'enthalpy']
enthalpyContourDisplay.OpacityTransferFunction = 'PiecewiseFunction'
enthalpyContourDisplay.DataAxesGrid = 'GridAxesRepresentation'
enthalpyContourDisplay.PolarAxes = 'PolarAxesRepresentation'
enthalpyContourDisplay.ScaleTransferFunction.Points = [48470.90823995246, 0.0, 0.5, 0.0, 48478.91015625, 1.0, 0.5, 0.0]
enthalpyContourDisplay.OpacityTransferFunction.Points = [48470.90823995246, 0.0, 0.5, 0.0, 48478.91015625, 1.0, 0.5, 0.0]
velocity_LUTColorBar = GetScalarBar(velocity_LUT, renderView1)
velocity_LUTColorBar.WindowLocation = 'UpperLeftCorner'
velocity_LUTColorBar.Position = [0.002070393374741201, 0.6534810126582279]
velocity_LUTColorBar.Title = 'velocity_'
velocity_LUTColorBar.ComponentTitle = 'Z'
velocity_LUTColorBar.Visibility = 1
enthalpyContourDisplay.SetScalarBarVisibility(renderView1, True)
velocity_PWF = GetOpacityTransferFunction('velocity_')
velocity_PWF.Points = [-0.01593730623308355, 0.0, 0.5, 0.0, 0.07728925505657865, 1.0, 0.5, 0.0]
velocity_PWF.ScalarRangeInitialized = 1
pNG1 = CreateExtractor('PNG', renderView1, registrationName='PNG1')
pNG1.Writer.FileName = 'RenderView1_%.6ts%cm.png'
pNG1.Writer.ImageResolution = [1920, 1080]
pNG1.Writer.TransparentBackground = 0
pNG1.Writer.Format = 'PNG'
pNG1.Writer.CameraMode = 'Static'
SetActiveSource(pNG1)
if __name__ == '__main__':
infile = ""
database = ""
if len(sys.argv) == 3:
infile = sys.argv[2]
database = sys.argv[1]
else:
print("ERROR")
paraview_set_up_extracts( database, infile )
SaveExtracts(ExtractsOutputDirectory=database)
create_cinema_csv( database, infile )
| true | true |
1c2d68cb6473308dfc2af807e1b06014c9ca446c | 47,587 | py | Python | astropy/modeling/fitting.py | EnjoyLifeFund/macHighSierra-py36-pkgs | 5668b5785296b314ea1321057420bcd077dba9ea | [
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | null | null | null | astropy/modeling/fitting.py | EnjoyLifeFund/macHighSierra-py36-pkgs | 5668b5785296b314ea1321057420bcd077dba9ea | [
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | null | null | null | astropy/modeling/fitting.py | EnjoyLifeFund/macHighSierra-py36-pkgs | 5668b5785296b314ea1321057420bcd077dba9ea | [
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | null | null | null | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module implements classes (called Fitters) which combine optimization
algorithms (typically from `scipy.optimize`) with statistic functions to perform
fitting. Fitters are implemented as callable classes. In addition to the data
to fit, the ``__call__`` method takes an instance of
`~astropy.modeling.core.FittableModel` as input, and returns a copy of the
model with its parameters determined by the optimizer.
Optimization algorithms, called "optimizers" are implemented in
`~astropy.modeling.optimizers` and statistic functions are in
`~astropy.modeling.statistic`. The goal is to provide an easy to extend
framework and allow users to easily create new fitters by combining statistics
with optimizers.
There are two exceptions to the above scheme.
`~astropy.modeling.fitting.LinearLSQFitter` uses Numpy's `~numpy.linalg.lstsq`
function. `~astropy.modeling.fitting.LevMarLSQFitter` uses
`~scipy.optimize.leastsq` which combines optimization and statistic in one
implementation.
"""
from __future__ import (absolute_import, unicode_literals, division,
print_function)
import abc
import inspect
import operator
import warnings
from functools import reduce, wraps
import numpy as np
from .utils import poly_map_domain, _combine_equivalency_dict
from ..units import Quantity
from ..utils.exceptions import AstropyUserWarning
from ..extern import six
from ..extern.six.moves import range
from .optimizers import (SLSQP, Simplex)
from .statistic import (leastsquare)
# Check pkg_resources exists
try:
from pkg_resources import iter_entry_points
HAS_PKG = True
except ImportError:
HAS_PKG = False
__all__ = ['LinearLSQFitter', 'LevMarLSQFitter', 'FittingWithOutlierRemoval',
'SLSQPLSQFitter', 'SimplexLSQFitter', 'JointFitter', 'Fitter']
# Statistic functions implemented in `astropy.modeling.statistic.py
STATISTICS = [leastsquare]
# Optimizers implemented in `astropy.modeling.optimizers.py
OPTIMIZERS = [Simplex, SLSQP]
from .optimizers import (DEFAULT_MAXITER, DEFAULT_EPS, DEFAULT_ACC)
class ModelsError(Exception):
"""Base class for model exceptions"""
class ModelLinearityError(ModelsError):
""" Raised when a non-linear model is passed to a linear fitter."""
class UnsupportedConstraintError(ModelsError, ValueError):
"""
Raised when a fitter does not support a type of constraint.
"""
class _FitterMeta(abc.ABCMeta):
"""
Currently just provides a registry for all Fitter classes.
"""
registry = set()
def __new__(mcls, name, bases, members):
cls = super(_FitterMeta, mcls).__new__(mcls, name, bases, members)
if not inspect.isabstract(cls) and not name.startswith('_'):
mcls.registry.add(cls)
return cls
def fitter_unit_support(func):
"""
This is a decorator that can be used to add support for dealing with
quantities to any __call__ method on a fitter which may not support
quantities itself. This is done by temporarily removing units from all
parameters then adding them back once the fitting has completed.
"""
@wraps(func)
def wrapper(self, model, x, y, z=None, **kwargs):
equivalencies = kwargs.pop('equivalencies', None)
data_has_units = (isinstance(x, Quantity) or
isinstance(y, Quantity) or
isinstance(z, Quantity))
model_has_units = model._has_units
if data_has_units or model_has_units:
if model._supports_unit_fitting:
# We now combine any instance-level input equivalencies with user
# specified ones at call-time.
input_units_equivalencies = _combine_equivalency_dict(
model.inputs, equivalencies, model.input_units_equivalencies)
# If input_units is defined, we transform the input data into those
# expected by the model. We hard-code the input names 'x', and 'y'
# here since FittableModel instances have input names ('x',) or
# ('x', 'y')
if model.input_units is not None:
if isinstance(x, Quantity):
x = x.to(model.input_units['x'], equivalencies=input_units_equivalencies['x'])
if isinstance(y, Quantity) and z is not None:
y = y.to(model.input_units['y'], equivalencies=input_units_equivalencies['y'])
# We now strip away the units from the parameters, taking care to
# first convert any parameters to the units that correspond to the
# input units (to make sure that initial guesses on the parameters)
# are in the right unit system
model = model.without_units_for_data(x=x, y=y, z=z)
# We strip away the units from the input itself
add_back_units = False
if isinstance(x, Quantity):
add_back_units = True
xdata = x.value
else:
xdata = np.asarray(x)
if isinstance(y, Quantity):
add_back_units = True
ydata = y.value
else:
ydata = np.asarray(y)
if z is not None:
if isinstance(y, Quantity):
add_back_units = True
zdata = z.value
else:
zdata = np.asarray(z)
# We run the fitting
if z is None:
model_new = func(self, model, xdata, ydata, **kwargs)
else:
model_new = func(self, model, xdata, ydata, zdata, **kwargs)
# And finally we add back units to the parameters
if add_back_units:
model_new = model_new.with_units_from_data(x=x, y=y, z=z)
return model_new
else:
raise NotImplementedError("This model does not support being fit to data with units")
else:
return func(self, model, x, y, z=z, **kwargs)
return wrapper
@six.add_metaclass(_FitterMeta)
class Fitter(object):
"""
Base class for all fitters.
Parameters
----------
optimizer : callable
A callable implementing an optimization algorithm
statistic : callable
Statistic function
"""
def __init__(self, optimizer, statistic):
if optimizer is None:
raise ValueError("Expected an optimizer.")
if statistic is None:
raise ValueError("Expected a statistic function.")
if inspect.isclass(optimizer):
# a callable class
self._opt_method = optimizer()
elif inspect.isfunction(optimizer):
self._opt_method = optimizer
else:
raise ValueError("Expected optimizer to be a callable class or a function.")
if inspect.isclass(statistic):
self._stat_method = statistic()
else:
self._stat_method = statistic
def objective_function(self, fps, *args):
"""
Function to minimize.
Parameters
----------
fps : list
parameters returned by the fitter
args : list
[model, [other_args], [input coordinates]]
other_args may include weights or any other quantities specific for
a statistic
Notes
-----
The list of arguments (args) is set in the `__call__` method.
Fitters may overwrite this method, e.g. when statistic functions
require other arguments.
"""
model = args[0]
meas = args[-1]
_fitter_to_model_params(model, fps)
res = self._stat_method(meas, model, *args[1:-1])
return res
@abc.abstractmethod
def __call__(self):
"""
This method performs the actual fitting and modifies the parameter list
of a model.
Fitter subclasses should implement this method.
"""
raise NotImplementedError("Subclasses should implement this method.")
# TODO: I have ongoing branch elsewhere that's refactoring this module so that
# all the fitter classes in here are Fitter subclasses. In the meantime we
# need to specify that _FitterMeta is its metaclass.
@six.add_metaclass(_FitterMeta)
class LinearLSQFitter(object):
"""
A class performing a linear least square fitting.
Uses `numpy.linalg.lstsq` to do the fitting.
Given a model and data, fits the model to the data and changes the
model's parameters. Keeps a dictionary of auxiliary fitting information.
"""
supported_constraints = ['fixed']
def __init__(self):
self.fit_info = {'residuals': None,
'rank': None,
'singular_values': None,
'params': None
}
@staticmethod
def _deriv_with_constraints(model, param_indices, x=None, y=None):
if y is None:
d = np.array(model.fit_deriv(x, *model.parameters))
else:
d = np.array(model.fit_deriv(x, y, *model.parameters))
if model.col_fit_deriv:
return d[param_indices]
else:
return d[..., param_indices]
def _map_domain_window(self, model, x, y=None):
"""
Maps domain into window for a polynomial model which has these
attributes.
"""
if y is None:
if hasattr(model, 'domain') and model.domain is None:
model.domain = [x.min(), x.max()]
if hasattr(model, 'window') and model.window is None:
model.window = [-1, 1]
return poly_map_domain(x, model.domain, model.window)
else:
if hasattr(model, 'x_domain') and model.x_domain is None:
model.x_domain = [x.min(), x.max()]
if hasattr(model, 'y_domain') and model.y_domain is None:
model.y_domain = [y.min(), y.max()]
if hasattr(model, 'x_window') and model.x_window is None:
model.x_window = [-1., 1.]
if hasattr(model, 'y_window') and model.y_window is None:
model.y_window = [-1., 1.]
xnew = poly_map_domain(x, model.x_domain, model.x_window)
ynew = poly_map_domain(y, model.y_domain, model.y_window)
return xnew, ynew
@fitter_unit_support
def __call__(self, model, x, y, z=None, weights=None, rcond=None):
"""
Fit data to this model.
Parameters
----------
model : `~astropy.modeling.FittableModel`
model to fit to x, y, z
x : array
input coordinates
y : array
input coordinates
z : array (optional)
input coordinates
weights : array (optional)
weights
rcond : float, optional
Cut-off ratio for small singular values of ``a``.
Singular values are set to zero if they are smaller than ``rcond``
times the largest singular value of ``a``.
equivalencies : list or None, optional and keyword-only argument
List of *additional* equivalencies that are should be applied in
case x, y and/or z have units. Default is None.
Returns
-------
model_copy : `~astropy.modeling.FittableModel`
a copy of the input model with parameters set by the fitter
"""
if not model.fittable:
raise ValueError("Model must be a subclass of FittableModel")
if not model.linear:
raise ModelLinearityError('Model is not linear in parameters, '
'linear fit methods should not be used.')
_validate_constraints(self.supported_constraints, model)
model_copy = model.copy()
_, fitparam_indices = _model_to_fit_params(model_copy)
if model_copy.n_inputs == 2 and z is None:
raise ValueError("Expected x, y and z for a 2 dimensional model.")
farg = _convert_input(x, y, z, n_models=len(model_copy),
model_set_axis=model_copy.model_set_axis)
has_fixed = any(model_copy.fixed.values())
if has_fixed:
# The list of fixed params is the complement of those being fitted:
fixparam_indices = [idx for idx in
range(len(model_copy.param_names))
if idx not in fitparam_indices]
# Construct matrix of user-fixed parameters that can be dotted with
# the corresponding fit_deriv() terms, to evaluate corrections to
# the dependent variable in order to fit only the remaining terms:
fixparams = np.asarray([getattr(model_copy,
model_copy.param_names[idx]).value
for idx in fixparam_indices])
if len(farg) == 2:
x, y = farg
# map domain into window
if hasattr(model_copy, 'domain'):
x = self._map_domain_window(model_copy, x)
if has_fixed:
lhs = self._deriv_with_constraints(model_copy,
fitparam_indices,
x=x)
fixderivs = self._deriv_with_constraints(model_copy,
fixparam_indices,
x=x)
else:
lhs = model_copy.fit_deriv(x, *model_copy.parameters)
sum_of_implicit_terms = model_copy.sum_of_implicit_terms(x)
rhs = y
else:
x, y, z = farg
# map domain into window
if hasattr(model_copy, 'x_domain'):
x, y = self._map_domain_window(model_copy, x, y)
if has_fixed:
lhs = self._deriv_with_constraints(model_copy,
fitparam_indices, x=x, y=y)
fixderivs = self._deriv_with_constraints(model_copy,
fixparam_indices, x=x, y=y)
else:
lhs = model_copy.fit_deriv(x, y, *model_copy.parameters)
sum_of_implicit_terms = model_copy.sum_of_implicit_terms(x, y)
if len(model_copy) > 1:
if z.ndim > 2:
# Basically this code here is making the assumption that if
# z has 3 dimensions it represents multiple models where
# the value of z is one plane per model. It's then
# flattening each plane and transposing so that the model
# axis is *last*. That's fine, but this could be
# generalized for other dimensionalities of z.
# TODO: See above comment
rhs = np.array([i.flatten() for i in z]).T
else:
rhs = z.T
else:
rhs = z.flatten()
# If the derivative is defined along rows (as with non-linear models)
if model_copy.col_fit_deriv:
lhs = np.asarray(lhs).T
# Subtract any terms fixed by the user from (a copy of) the RHS, in
# order to fit the remaining terms correctly:
if has_fixed:
if model_copy.col_fit_deriv:
fixderivs = np.asarray(fixderivs).T # as for lhs above
rhs = rhs - fixderivs.dot(fixparams) # evaluate user-fixed terms
# Subtract any terms implicit in the model from the RHS, which, like
# user-fixed terms, affect the dependent variable but are not fitted:
if sum_of_implicit_terms is not None:
# If we have a model set, the extra axis must be added to
# sum_of_implicit_terms as its innermost dimension, to match the
# dimensionality of rhs after _convert_input "rolls" it as needed
# by np.linalg.lstsq. The vector then gets broadcast to the right
# number of sets (columns). This assumes all the models share the
# same input co-ordinates, as is currently the case.
if len(model_copy) > 1:
sum_of_implicit_terms = sum_of_implicit_terms[..., np.newaxis]
rhs = rhs - sum_of_implicit_terms
if weights is not None:
weights = np.asarray(weights, dtype=np.float)
if len(x) != len(weights):
raise ValueError("x and weights should have the same length")
if rhs.ndim == 2:
lhs *= weights[:, np.newaxis]
# Don't modify in-place in case rhs was the original dependent
# variable array
rhs = rhs * weights[:, np.newaxis]
else:
lhs *= weights[:, np.newaxis]
rhs = rhs * weights
if rcond is None:
rcond = len(x) * np.finfo(x.dtype).eps
scl = (lhs * lhs).sum(0)
lacoef, resids, rank, sval = np.linalg.lstsq(lhs / scl, rhs, rcond)
self.fit_info['residuals'] = resids
self.fit_info['rank'] = rank
self.fit_info['singular_values'] = sval
lacoef = (lacoef.T / scl).T
self.fit_info['params'] = lacoef
# TODO: Only Polynomial models currently have an _order attribute;
# maybe change this to read isinstance(model, PolynomialBase)
if hasattr(model_copy, '_order') and rank != model_copy._order:
warnings.warn("The fit may be poorly conditioned\n",
AstropyUserWarning)
_fitter_to_model_params(model_copy, lacoef.flatten())
return model_copy
class FittingWithOutlierRemoval(object):
"""
This class combines an outlier removal technique with a fitting procedure.
Basically, given a number of iterations ``niter``, outliers are removed
and fitting is performed for each iteration.
Parameters
----------
fitter : An Astropy fitter
An instance of any Astropy fitter, i.e., LinearLSQFitter,
LevMarLSQFitter, SLSQPLSQFitter, SimplexLSQFitter, JointFitter.
outlier_func : function
A function for outlier removal.
niter : int (optional)
Number of iterations.
outlier_kwargs : dict (optional)
Keyword arguments for outlier_func.
"""
def __init__(self, fitter, outlier_func, niter=3, **outlier_kwargs):
self.fitter = fitter
self.outlier_func = outlier_func
self.niter = niter
self.outlier_kwargs = outlier_kwargs
def __str__(self):
return ("Fitter: {0}\nOutlier function: {1}\nNum. of iterations: {2}" +
("\nOutlier func. args.: {3}"))\
.format(self.fitter__class__.__name__,
self.outlier_func.__name__, self.niter,
self.outlier_kwargs)
def __repr__(self):
return ("{0}(fitter: {1}, outlier_func: {2}," +
" niter: {3}, outlier_kwargs: {4})")\
.format(self.__class__.__name__,
self.fitter.__class__.__name__,
self.outlier_func.__name__, self.niter,
self.outlier_kwargs)
def __call__(self, model, x, y, z=None, weights=None, **kwargs):
"""
Parameters
----------
model : `~astropy.modeling.FittableModel`
An analytic model which will be fit to the provided data.
This also contains the initial guess for an optimization
algorithm.
x : array-like
Input coordinates.
y : array-like
Data measurements (1D case) or input coordinates (2D case).
z : array-like (optional)
Data measurements (2D case).
weights : array-like (optional)
Weights to be passed to the fitter.
kwargs : dict (optional)
Keyword arguments to be passed to the fitter.
Returns
-------
filtered_data : numpy.ma.core.MaskedArray
Data used to perform the fitting after outlier removal.
fitted_model : `~astropy.modeling.FittableModel`
Fitted model after outlier removal.
"""
fitted_model = self.fitter(model, x, y, z, weights=weights, **kwargs)
if z is None:
filtered_data = y
for n in range(self.niter):
filtered_data = self.outlier_func(filtered_data - fitted_model(x),
**self.outlier_kwargs)
filtered_data += fitted_model(x)
fitted_model = self.fitter(fitted_model,
x[~filtered_data.mask],
filtered_data.data[~filtered_data.mask],
**kwargs)
else:
filtered_data = z
for n in range(self.niter):
filtered_data = self.outlier_func(filtered_data - fitted_model(x, y),
**self.outlier_kwargs)
filtered_data += fitted_model(x, y)
fitted_model = self.fitter(fitted_model,
x[~filtered_data.mask],
y[~filtered_data.mask],
filtered_data.data[~filtered_data.mask],
**kwargs)
return filtered_data, fitted_model
@six.add_metaclass(_FitterMeta)
class LevMarLSQFitter(object):
"""
Levenberg-Marquardt algorithm and least squares statistic.
Attributes
----------
fit_info : dict
The `scipy.optimize.leastsq` result for the most recent fit (see
notes).
Notes
-----
The ``fit_info`` dictionary contains the values returned by
`scipy.optimize.leastsq` for the most recent fit, including the values from
the ``infodict`` dictionary it returns. See the `scipy.optimize.leastsq`
documentation for details on the meaning of these values. Note that the
``x`` return value is *not* included (as it is instead the parameter values
of the returned model).
Additionally, one additional element of ``fit_info`` is computed whenever a
model is fit, with the key 'param_cov'. The corresponding value is the
covariance matrix of the parameters as a 2D numpy array. The order of the
matrix elements matches the order of the parameters in the fitted model
(i.e., the same order as ``model.param_names``).
"""
supported_constraints = ['fixed', 'tied', 'bounds']
"""
The constraint types supported by this fitter type.
"""
def __init__(self):
self.fit_info = {'nfev': None,
'fvec': None,
'fjac': None,
'ipvt': None,
'qtf': None,
'message': None,
'ierr': None,
'param_jac': None,
'param_cov': None}
super(LevMarLSQFitter, self).__init__()
def objective_function(self, fps, *args):
"""
Function to minimize.
Parameters
----------
fps : list
parameters returned by the fitter
args : list
[model, [weights], [input coordinates]]
"""
model = args[0]
weights = args[1]
_fitter_to_model_params(model, fps)
meas = args[-1]
if weights is None:
return np.ravel(model(*args[2: -1]) - meas)
else:
return np.ravel(weights * (model(*args[2: -1]) - meas))
@fitter_unit_support
def __call__(self, model, x, y, z=None, weights=None,
maxiter=DEFAULT_MAXITER, acc=DEFAULT_ACC,
epsilon=DEFAULT_EPS, estimate_jacobian=False):
"""
Fit data to this model.
Parameters
----------
model : `~astropy.modeling.FittableModel`
model to fit to x, y, z
x : array
input coordinates
y : array
input coordinates
z : array (optional)
input coordinates
weights : array (optional)
weights
maxiter : int
maximum number of iterations
acc : float
Relative error desired in the approximate solution
epsilon : float
A suitable step length for the forward-difference
approximation of the Jacobian (if model.fjac=None). If
epsfcn is less than the machine precision, it is
assumed that the relative errors in the functions are
of the order of the machine precision.
estimate_jacobian : bool
If False (default) and if the model has a fit_deriv method,
it will be used. Otherwise the Jacobian will be estimated.
If True, the Jacobian will be estimated in any case.
equivalencies : list or None, optional and keyword-only argument
List of *additional* equivalencies that are should be applied in
case x, y and/or z have units. Default is None.
Returns
-------
model_copy : `~astropy.modeling.FittableModel`
a copy of the input model with parameters set by the fitter
"""
from scipy import optimize
model_copy = _validate_model(model, self.supported_constraints)
farg = (model_copy, weights, ) + _convert_input(x, y, z)
if model_copy.fit_deriv is None or estimate_jacobian:
dfunc = None
else:
dfunc = self._wrap_deriv
init_values, _ = _model_to_fit_params(model_copy)
fitparams, cov_x, dinfo, mess, ierr = optimize.leastsq(
self.objective_function, init_values, args=farg, Dfun=dfunc,
col_deriv=model_copy.col_fit_deriv, maxfev=maxiter, epsfcn=epsilon,
xtol=acc, full_output=True)
_fitter_to_model_params(model_copy, fitparams)
self.fit_info.update(dinfo)
self.fit_info['cov_x'] = cov_x
self.fit_info['message'] = mess
self.fit_info['ierr'] = ierr
if ierr not in [1, 2, 3, 4]:
warnings.warn("The fit may be unsuccessful; check "
"fit_info['message'] for more information.",
AstropyUserWarning)
# now try to compute the true covariance matrix
if (len(y) > len(init_values)) and cov_x is not None:
sum_sqrs = np.sum(self.objective_function(fitparams, *farg)**2)
dof = len(y) - len(init_values)
self.fit_info['param_cov'] = cov_x * sum_sqrs / dof
else:
self.fit_info['param_cov'] = None
return model_copy
@staticmethod
def _wrap_deriv(params, model, weights, x, y, z=None):
"""
Wraps the method calculating the Jacobian of the function to account
for model constraints.
`scipy.optimize.leastsq` expects the function derivative to have the
above signature (parlist, (argtuple)). In order to accommodate model
constraints, instead of using p directly, we set the parameter list in
this function.
"""
if weights is None:
weights = 1.0
if any(model.fixed.values()) or any(model.tied.values()):
if z is None:
full_deriv = np.ravel(weights) * np.array(model.fit_deriv(x, *model.parameters))
else:
full_deriv = (np.ravel(weights) * np.array(model.fit_deriv(x, y, *model.parameters)).T).T
pars = [getattr(model, name) for name in model.param_names]
fixed = [par.fixed for par in pars]
tied = [par.tied for par in pars]
tied = list(np.where([par.tied is not False for par in pars],
True, tied))
fix_and_tie = np.logical_or(fixed, tied)
ind = np.logical_not(fix_and_tie)
if not model.col_fit_deriv:
full_deriv = np.asarray(full_deriv).T
residues = np.asarray(full_deriv[np.nonzero(ind)]).T
else:
residues = full_deriv[np.nonzero(ind)]
return [np.ravel(_) for _ in residues]
else:
if z is None:
return [np.ravel(_) for _ in np.ravel(weights) * np.array(model.fit_deriv(x, *params))]
else:
if not model.col_fit_deriv:
return [np.ravel(_) for _ in (np.ravel(weights) * np.array(model.fit_deriv(x, y, *params)).T).T]
else:
return [np.ravel(_) for _ in (weights * np.array(model.fit_deriv(x, y, *params)))]
class SLSQPLSQFitter(Fitter):
"""
SLSQP optimization algorithm and least squares statistic.
Raises
------
ModelLinearityError
A linear model is passed to a nonlinear fitter
"""
supported_constraints = SLSQP.supported_constraints
def __init__(self):
super(SLSQPLSQFitter, self).__init__(optimizer=SLSQP, statistic=leastsquare)
self.fit_info = {}
@fitter_unit_support
def __call__(self, model, x, y, z=None, weights=None, **kwargs):
"""
Fit data to this model.
Parameters
----------
model : `~astropy.modeling.FittableModel`
model to fit to x, y, z
x : array
input coordinates
y : array
input coordinates
z : array (optional)
input coordinates
weights : array (optional)
weights
kwargs : dict
optional keyword arguments to be passed to the optimizer or the statistic
verblevel : int
0-silent
1-print summary upon completion,
2-print summary after each iteration
maxiter : int
maximum number of iterations
epsilon : float
the step size for finite-difference derivative estimates
acc : float
Requested accuracy
equivalencies : list or None, optional and keyword-only argument
List of *additional* equivalencies that are should be applied in
case x, y and/or z have units. Default is None.
Returns
-------
model_copy : `~astropy.modeling.FittableModel`
a copy of the input model with parameters set by the fitter
"""
model_copy = _validate_model(model, self._opt_method.supported_constraints)
farg = _convert_input(x, y, z)
farg = (model_copy, weights, ) + farg
p0, _ = _model_to_fit_params(model_copy)
fitparams, self.fit_info = self._opt_method(
self.objective_function, p0, farg, **kwargs)
_fitter_to_model_params(model_copy, fitparams)
return model_copy
class SimplexLSQFitter(Fitter):
"""
Simplex algorithm and least squares statistic.
Raises
------
ModelLinearityError
A linear model is passed to a nonlinear fitter
"""
supported_constraints = Simplex.supported_constraints
def __init__(self):
super(SimplexLSQFitter, self).__init__(optimizer=Simplex,
statistic=leastsquare)
self.fit_info = {}
@fitter_unit_support
def __call__(self, model, x, y, z=None, weights=None, **kwargs):
"""
Fit data to this model.
Parameters
----------
model : `~astropy.modeling.FittableModel`
model to fit to x, y, z
x : array
input coordinates
y : array
input coordinates
z : array (optional)
input coordinates
weights : array (optional)
weights
kwargs : dict
optional keyword arguments to be passed to the optimizer or the statistic
maxiter : int
maximum number of iterations
acc : float
Relative error in approximate solution
equivalencies : list or None, optional and keyword-only argument
List of *additional* equivalencies that are should be applied in
case x, y and/or z have units. Default is None.
Returns
-------
model_copy : `~astropy.modeling.FittableModel`
a copy of the input model with parameters set by the fitter
"""
model_copy = _validate_model(model,
self._opt_method.supported_constraints)
farg = _convert_input(x, y, z)
farg = (model_copy, weights, ) + farg
p0, _ = _model_to_fit_params(model_copy)
fitparams, self.fit_info = self._opt_method(
self.objective_function, p0, farg, **kwargs)
_fitter_to_model_params(model_copy, fitparams)
return model_copy
@six.add_metaclass(_FitterMeta)
class JointFitter(object):
"""
Fit models which share a parameter.
For example, fit two gaussians to two data sets but keep
the FWHM the same.
Parameters
----------
models : list
a list of model instances
jointparameters : list
a list of joint parameters
initvals : list
a list of initial values
"""
def __init__(self, models, jointparameters, initvals):
self.models = list(models)
self.initvals = list(initvals)
self.jointparams = jointparameters
self._verify_input()
self.fitparams = self._model_to_fit_params()
# a list of model.n_inputs
self.modeldims = [m.n_inputs for m in self.models]
# sum all model dimensions
self.ndim = np.sum(self.modeldims)
def _model_to_fit_params(self):
fparams = []
fparams.extend(self.initvals)
for model in self.models:
params = [p.flatten() for p in model.parameters]
joint_params = self.jointparams[model]
param_metrics = model._param_metrics
for param_name in joint_params:
slice_ = param_metrics[param_name]['slice']
del params[slice_]
fparams.extend(params)
return fparams
def objective_function(self, fps, *args):
"""
Function to minimize.
Parameters
----------
fps : list
the fitted parameters - result of an one iteration of the
fitting algorithm
args : dict
tuple of measured and input coordinates
args is always passed as a tuple from optimize.leastsq
"""
lstsqargs = list(args)
fitted = []
fitparams = list(fps)
numjp = len(self.initvals)
# make a separate list of the joint fitted parameters
jointfitparams = fitparams[:numjp]
del fitparams[:numjp]
for model in self.models:
joint_params = self.jointparams[model]
margs = lstsqargs[:model.n_inputs + 1]
del lstsqargs[:model.n_inputs + 1]
# separate each model separately fitted parameters
numfp = len(model._parameters) - len(joint_params)
mfparams = fitparams[:numfp]
del fitparams[:numfp]
# recreate the model parameters
mparams = []
param_metrics = model._param_metrics
for param_name in model.param_names:
if param_name in joint_params:
index = joint_params.index(param_name)
# should do this with slices in case the
# parameter is not a number
mparams.extend([jointfitparams[index]])
else:
slice_ = param_metrics[param_name]['slice']
plen = slice_.stop - slice_.start
mparams.extend(mfparams[:plen])
del mfparams[:plen]
modelfit = model.evaluate(margs[:-1], *mparams)
fitted.extend(modelfit - margs[-1])
return np.ravel(fitted)
def _verify_input(self):
if len(self.models) <= 1:
raise TypeError("Expected >1 models, {} is given".format(
len(self.models)))
if len(self.jointparams.keys()) < 2:
raise TypeError("At least two parameters are expected, "
"{} is given".format(len(self.jointparams.keys())))
for j in self.jointparams.keys():
if len(self.jointparams[j]) != len(self.initvals):
raise TypeError("{} parameter(s) provided but {} expected".format(
len(self.jointparams[j]), len(self.initvals)))
def __call__(self, *args):
"""
Fit data to these models keeping some of the parameters common to the
two models.
"""
from scipy import optimize
if len(args) != reduce(lambda x, y: x + 1 + y + 1, self.modeldims):
raise ValueError("Expected {} coordinates in args but {} provided"
.format(reduce(lambda x, y: x + 1 + y + 1,
self.modeldims), len(args)))
self.fitparams[:], _ = optimize.leastsq(self.objective_function,
self.fitparams, args=args)
fparams = self.fitparams[:]
numjp = len(self.initvals)
# make a separate list of the joint fitted parameters
jointfitparams = fparams[:numjp]
del fparams[:numjp]
for model in self.models:
# extract each model's fitted parameters
joint_params = self.jointparams[model]
numfp = len(model._parameters) - len(joint_params)
mfparams = fparams[:numfp]
del fparams[:numfp]
# recreate the model parameters
mparams = []
param_metrics = model._param_metrics
for param_name in model.param_names:
if param_name in joint_params:
index = joint_params.index(param_name)
# should do this with slices in case the parameter
# is not a number
mparams.extend([jointfitparams[index]])
else:
slice_ = param_metrics[param_name]['slice']
plen = slice_.stop - slice_.start
mparams.extend(mfparams[:plen])
del mfparams[:plen]
model.parameters = np.array(mparams)
def _convert_input(x, y, z=None, n_models=1, model_set_axis=0):
"""Convert inputs to float arrays."""
x = np.asarray(x, dtype=np.float)
y = np.asarray(y, dtype=np.float)
if z is not None:
z = np.asarray(z, dtype=np.float)
# For compatibility with how the linear fitter code currently expects to
# work, shift the dependent variable's axes to the expected locations
if n_models > 1:
if z is None:
if y.shape[model_set_axis] != n_models:
raise ValueError(
"Number of data sets (y array is expected to equal "
"the number of parameter sets)")
# For a 1-D model the y coordinate's model-set-axis is expected to
# be last, so that its first dimension is the same length as the x
# coordinates. This is in line with the expectations of
# numpy.linalg.lstsq:
# http://docs.scipy.org/doc/numpy/reference/generated/numpy.linalg.lstsq.html
# That is, each model should be represented by a column. TODO:
# Obviously this is a detail of np.linalg.lstsq and should be
# handled specifically by any fitters that use it...
y = np.rollaxis(y, model_set_axis, y.ndim)
else:
# Shape of z excluding model_set_axis
z_shape = z.shape[:model_set_axis] + z.shape[model_set_axis + 1:]
if not (x.shape == y.shape == z_shape):
raise ValueError("x, y and z should have the same shape")
if z is None:
farg = (x, y)
else:
farg = (x, y, z)
return farg
# TODO: These utility functions are really particular to handling
# bounds/tied/fixed constraints for scipy.optimize optimizers that do not
# support them inherently; this needs to be reworked to be clear about this
# distinction (and the fact that these are not necessarily applicable to any
# arbitrary fitter--as evidenced for example by the fact that JointFitter has
# its own versions of these)
# TODO: Most of this code should be entirely rewritten; it should not be as
# inefficient as it is.
def _fitter_to_model_params(model, fps):
"""
Constructs the full list of model parameters from the fitted and
constrained parameters.
"""
_, fit_param_indices = _model_to_fit_params(model)
has_tied = any(model.tied.values())
has_fixed = any(model.fixed.values())
has_bound = any(b != (None, None) for b in model.bounds.values())
if not (has_tied or has_fixed or has_bound):
# We can just assign directly
model.parameters = fps
return
fit_param_indices = set(fit_param_indices)
offset = 0
param_metrics = model._param_metrics
for idx, name in enumerate(model.param_names):
if idx not in fit_param_indices:
continue
slice_ = param_metrics[name]['slice']
shape = param_metrics[name]['shape']
# This is determining which range of fps (the fitted parameters) maps
# to parameters of the model
size = reduce(operator.mul, shape, 1)
values = fps[offset:offset + size]
# Check bounds constraints
if model.bounds[name] != (None, None):
_min, _max = model.bounds[name]
if _min is not None:
values = np.fmax(values, _min)
if _max is not None:
values = np.fmin(values, _max)
model.parameters[slice_] = values
offset += size
# This has to be done in a separate loop due to how tied parameters are
# currently evaluated (the fitted parameters need to actually be *set* on
# the model first, for use in evaluating the "tied" expression--it might be
# better to change this at some point
if has_tied:
for idx, name in enumerate(model.param_names):
if model.tied[name]:
value = model.tied[name](model)
slice_ = param_metrics[name]['slice']
model.parameters[slice_] = value
def _model_to_fit_params(model):
"""
Convert a model instance's parameter array to an array that can be used
with a fitter that doesn't natively support fixed or tied parameters.
In particular, it removes fixed/tied parameters from the parameter
array.
These may be a subset of the model parameters, if some of them are held
constant or tied.
"""
fitparam_indices = list(range(len(model.param_names)))
if any(model.fixed.values()) or any(model.tied.values()):
params = list(model.parameters)
param_metrics = model._param_metrics
for idx, name in list(enumerate(model.param_names))[::-1]:
if model.fixed[name] or model.tied[name]:
slice_ = param_metrics[name]['slice']
del params[slice_]
del fitparam_indices[idx]
return (np.array(params), fitparam_indices)
else:
return (model.parameters, fitparam_indices)
def _validate_constraints(supported_constraints, model):
"""Make sure model constraints are supported by the current fitter."""
message = 'Optimizer cannot handle {0} constraints.'
if (any(six.itervalues(model.fixed)) and
'fixed' not in supported_constraints):
raise UnsupportedConstraintError(
message.format('fixed parameter'))
if any(six.itervalues(model.tied)) and 'tied' not in supported_constraints:
raise UnsupportedConstraintError(
message.format('tied parameter'))
if (any(tuple(b) != (None, None) for b in six.itervalues(model.bounds)) and
'bounds' not in supported_constraints):
raise UnsupportedConstraintError(
message.format('bound parameter'))
if model.eqcons and 'eqcons' not in supported_constraints:
raise UnsupportedConstraintError(message.format('equality'))
if model.ineqcons and 'ineqcons' not in supported_constraints:
raise UnsupportedConstraintError(message.format('inequality'))
def _validate_model(model, supported_constraints):
"""
Check that model and fitter are compatible and return a copy of the model.
"""
if not model.fittable:
raise ValueError("Model does not appear to be fittable.")
if model.linear:
warnings.warn('Model is linear in parameters; '
'consider using linear fitting methods.',
AstropyUserWarning)
elif len(model) != 1:
# for now only single data sets ca be fitted
raise ValueError("Non-linear fitters can only fit "
"one data set at a time.")
_validate_constraints(supported_constraints, model)
model_copy = model.copy()
return model_copy
def populate_entry_points(entry_points):
"""
This injects entry points into the `astropy.modeling.fitting` namespace.
This provides a means of inserting a fitting routine without requirement
of it being merged into astropy's core.
Parameters
----------
entry_points : a list of `~pkg_resources.EntryPoint`
entry_points are objects which encapsulate
importable objects and are defined on the
installation of a package.
Notes
-----
An explanation of entry points can be found `here <http://setuptools.readthedocs.io/en/latest/setuptools.html#dynamic-discovery-of-services-and-plugins>`
"""
for entry_point in entry_points:
name = entry_point.name
try:
entry_point = entry_point.load()
except Exception as e:
# This stops the fitting from choking if an entry_point produces an error.
warnings.warn(AstropyUserWarning('{type} error occurred in entry '
'point {name}.' .format(type=type(e).__name__, name=name)))
else:
if not inspect.isclass(entry_point):
warnings.warn(AstropyUserWarning(
'Modeling entry point {0} expected to be a '
'Class.' .format(name)))
else:
if issubclass(entry_point, Fitter):
name = entry_point.__name__
globals()[name] = entry_point
__all__.append(name)
else:
warnings.warn(AstropyUserWarning(
'Modeling entry point {0} expected to extend '
'astropy.modeling.Fitter' .format(name)))
# this is so fitting doesn't choke if pkg_resources doesn't exist
if HAS_PKG:
populate_entry_points(iter_entry_points(group='astropy.modeling', name=None))
| 37.293887 | 157 | 0.589195 |
from __future__ import (absolute_import, unicode_literals, division,
print_function)
import abc
import inspect
import operator
import warnings
from functools import reduce, wraps
import numpy as np
from .utils import poly_map_domain, _combine_equivalency_dict
from ..units import Quantity
from ..utils.exceptions import AstropyUserWarning
from ..extern import six
from ..extern.six.moves import range
from .optimizers import (SLSQP, Simplex)
from .statistic import (leastsquare)
try:
from pkg_resources import iter_entry_points
HAS_PKG = True
except ImportError:
HAS_PKG = False
__all__ = ['LinearLSQFitter', 'LevMarLSQFitter', 'FittingWithOutlierRemoval',
'SLSQPLSQFitter', 'SimplexLSQFitter', 'JointFitter', 'Fitter']
STATISTICS = [leastsquare]
OPTIMIZERS = [Simplex, SLSQP]
from .optimizers import (DEFAULT_MAXITER, DEFAULT_EPS, DEFAULT_ACC)
class ModelsError(Exception):
class ModelLinearityError(ModelsError):
class UnsupportedConstraintError(ModelsError, ValueError):
class _FitterMeta(abc.ABCMeta):
registry = set()
def __new__(mcls, name, bases, members):
cls = super(_FitterMeta, mcls).__new__(mcls, name, bases, members)
if not inspect.isabstract(cls) and not name.startswith('_'):
mcls.registry.add(cls)
return cls
def fitter_unit_support(func):
@wraps(func)
def wrapper(self, model, x, y, z=None, **kwargs):
equivalencies = kwargs.pop('equivalencies', None)
data_has_units = (isinstance(x, Quantity) or
isinstance(y, Quantity) or
isinstance(z, Quantity))
model_has_units = model._has_units
if data_has_units or model_has_units:
if model._supports_unit_fitting:
input_units_equivalencies = _combine_equivalency_dict(
model.inputs, equivalencies, model.input_units_equivalencies)
if model.input_units is not None:
if isinstance(x, Quantity):
x = x.to(model.input_units['x'], equivalencies=input_units_equivalencies['x'])
if isinstance(y, Quantity) and z is not None:
y = y.to(model.input_units['y'], equivalencies=input_units_equivalencies['y'])
model = model.without_units_for_data(x=x, y=y, z=z)
add_back_units = False
if isinstance(x, Quantity):
add_back_units = True
xdata = x.value
else:
xdata = np.asarray(x)
if isinstance(y, Quantity):
add_back_units = True
ydata = y.value
else:
ydata = np.asarray(y)
if z is not None:
if isinstance(y, Quantity):
add_back_units = True
zdata = z.value
else:
zdata = np.asarray(z)
if z is None:
model_new = func(self, model, xdata, ydata, **kwargs)
else:
model_new = func(self, model, xdata, ydata, zdata, **kwargs)
if add_back_units:
model_new = model_new.with_units_from_data(x=x, y=y, z=z)
return model_new
else:
raise NotImplementedError("This model does not support being fit to data with units")
else:
return func(self, model, x, y, z=z, **kwargs)
return wrapper
@six.add_metaclass(_FitterMeta)
class Fitter(object):
def __init__(self, optimizer, statistic):
if optimizer is None:
raise ValueError("Expected an optimizer.")
if statistic is None:
raise ValueError("Expected a statistic function.")
if inspect.isclass(optimizer):
self._opt_method = optimizer()
elif inspect.isfunction(optimizer):
self._opt_method = optimizer
else:
raise ValueError("Expected optimizer to be a callable class or a function.")
if inspect.isclass(statistic):
self._stat_method = statistic()
else:
self._stat_method = statistic
def objective_function(self, fps, *args):
model = args[0]
meas = args[-1]
_fitter_to_model_params(model, fps)
res = self._stat_method(meas, model, *args[1:-1])
return res
@abc.abstractmethod
def __call__(self):
raise NotImplementedError("Subclasses should implement this method.")
# all the fitter classes in here are Fitter subclasses. In the meantime we
# need to specify that _FitterMeta is its metaclass.
@six.add_metaclass(_FitterMeta)
class LinearLSQFitter(object):
supported_constraints = ['fixed']
def __init__(self):
self.fit_info = {'residuals': None,
'rank': None,
'singular_values': None,
'params': None
}
@staticmethod
def _deriv_with_constraints(model, param_indices, x=None, y=None):
if y is None:
d = np.array(model.fit_deriv(x, *model.parameters))
else:
d = np.array(model.fit_deriv(x, y, *model.parameters))
if model.col_fit_deriv:
return d[param_indices]
else:
return d[..., param_indices]
def _map_domain_window(self, model, x, y=None):
if y is None:
if hasattr(model, 'domain') and model.domain is None:
model.domain = [x.min(), x.max()]
if hasattr(model, 'window') and model.window is None:
model.window = [-1, 1]
return poly_map_domain(x, model.domain, model.window)
else:
if hasattr(model, 'x_domain') and model.x_domain is None:
model.x_domain = [x.min(), x.max()]
if hasattr(model, 'y_domain') and model.y_domain is None:
model.y_domain = [y.min(), y.max()]
if hasattr(model, 'x_window') and model.x_window is None:
model.x_window = [-1., 1.]
if hasattr(model, 'y_window') and model.y_window is None:
model.y_window = [-1., 1.]
xnew = poly_map_domain(x, model.x_domain, model.x_window)
ynew = poly_map_domain(y, model.y_domain, model.y_window)
return xnew, ynew
@fitter_unit_support
def __call__(self, model, x, y, z=None, weights=None, rcond=None):
if not model.fittable:
raise ValueError("Model must be a subclass of FittableModel")
if not model.linear:
raise ModelLinearityError('Model is not linear in parameters, '
'linear fit methods should not be used.')
_validate_constraints(self.supported_constraints, model)
model_copy = model.copy()
_, fitparam_indices = _model_to_fit_params(model_copy)
if model_copy.n_inputs == 2 and z is None:
raise ValueError("Expected x, y and z for a 2 dimensional model.")
farg = _convert_input(x, y, z, n_models=len(model_copy),
model_set_axis=model_copy.model_set_axis)
has_fixed = any(model_copy.fixed.values())
if has_fixed:
# The list of fixed params is the complement of those being fitted:
fixparam_indices = [idx for idx in
range(len(model_copy.param_names))
if idx not in fitparam_indices]
# Construct matrix of user-fixed parameters that can be dotted with
# the corresponding fit_deriv() terms, to evaluate corrections to
# the dependent variable in order to fit only the remaining terms:
fixparams = np.asarray([getattr(model_copy,
model_copy.param_names[idx]).value
for idx in fixparam_indices])
if len(farg) == 2:
x, y = farg
# map domain into window
if hasattr(model_copy, 'domain'):
x = self._map_domain_window(model_copy, x)
if has_fixed:
lhs = self._deriv_with_constraints(model_copy,
fitparam_indices,
x=x)
fixderivs = self._deriv_with_constraints(model_copy,
fixparam_indices,
x=x)
else:
lhs = model_copy.fit_deriv(x, *model_copy.parameters)
sum_of_implicit_terms = model_copy.sum_of_implicit_terms(x)
rhs = y
else:
x, y, z = farg
# map domain into window
if hasattr(model_copy, 'x_domain'):
x, y = self._map_domain_window(model_copy, x, y)
if has_fixed:
lhs = self._deriv_with_constraints(model_copy,
fitparam_indices, x=x, y=y)
fixderivs = self._deriv_with_constraints(model_copy,
fixparam_indices, x=x, y=y)
else:
lhs = model_copy.fit_deriv(x, y, *model_copy.parameters)
sum_of_implicit_terms = model_copy.sum_of_implicit_terms(x, y)
if len(model_copy) > 1:
if z.ndim > 2:
# Basically this code here is making the assumption that if
# z has 3 dimensions it represents multiple models where
# the value of z is one plane per model. It's then
# generalized for other dimensionalities of z.
# TODO: See above comment
rhs = np.array([i.flatten() for i in z]).T
else:
rhs = z.T
else:
rhs = z.flatten()
# If the derivative is defined along rows (as with non-linear models)
if model_copy.col_fit_deriv:
lhs = np.asarray(lhs).T
# Subtract any terms fixed by the user from (a copy of) the RHS, in
# order to fit the remaining terms correctly:
if has_fixed:
if model_copy.col_fit_deriv:
fixderivs = np.asarray(fixderivs).T # as for lhs above
rhs = rhs - fixderivs.dot(fixparams) # evaluate user-fixed terms
# Subtract any terms implicit in the model from the RHS, which, like
# user-fixed terms, affect the dependent variable but are not fitted:
if sum_of_implicit_terms is not None:
# If we have a model set, the extra axis must be added to
# sum_of_implicit_terms as its innermost dimension, to match the
# dimensionality of rhs after _convert_input "rolls" it as needed
# by np.linalg.lstsq. The vector then gets broadcast to the right
# number of sets (columns). This assumes all the models share the
# same input co-ordinates, as is currently the case.
if len(model_copy) > 1:
sum_of_implicit_terms = sum_of_implicit_terms[..., np.newaxis]
rhs = rhs - sum_of_implicit_terms
if weights is not None:
weights = np.asarray(weights, dtype=np.float)
if len(x) != len(weights):
raise ValueError("x and weights should have the same length")
if rhs.ndim == 2:
lhs *= weights[:, np.newaxis]
# Don't modify in-place in case rhs was the original dependent
rhs = rhs * weights[:, np.newaxis]
else:
lhs *= weights[:, np.newaxis]
rhs = rhs * weights
if rcond is None:
rcond = len(x) * np.finfo(x.dtype).eps
scl = (lhs * lhs).sum(0)
lacoef, resids, rank, sval = np.linalg.lstsq(lhs / scl, rhs, rcond)
self.fit_info['residuals'] = resids
self.fit_info['rank'] = rank
self.fit_info['singular_values'] = sval
lacoef = (lacoef.T / scl).T
self.fit_info['params'] = lacoef
if hasattr(model_copy, '_order') and rank != model_copy._order:
warnings.warn("The fit may be poorly conditioned\n",
AstropyUserWarning)
_fitter_to_model_params(model_copy, lacoef.flatten())
return model_copy
class FittingWithOutlierRemoval(object):
def __init__(self, fitter, outlier_func, niter=3, **outlier_kwargs):
self.fitter = fitter
self.outlier_func = outlier_func
self.niter = niter
self.outlier_kwargs = outlier_kwargs
def __str__(self):
return ("Fitter: {0}\nOutlier function: {1}\nNum. of iterations: {2}" +
("\nOutlier func. args.: {3}"))\
.format(self.fitter__class__.__name__,
self.outlier_func.__name__, self.niter,
self.outlier_kwargs)
def __repr__(self):
return ("{0}(fitter: {1}, outlier_func: {2}," +
" niter: {3}, outlier_kwargs: {4})")\
.format(self.__class__.__name__,
self.fitter.__class__.__name__,
self.outlier_func.__name__, self.niter,
self.outlier_kwargs)
def __call__(self, model, x, y, z=None, weights=None, **kwargs):
fitted_model = self.fitter(model, x, y, z, weights=weights, **kwargs)
if z is None:
filtered_data = y
for n in range(self.niter):
filtered_data = self.outlier_func(filtered_data - fitted_model(x),
**self.outlier_kwargs)
filtered_data += fitted_model(x)
fitted_model = self.fitter(fitted_model,
x[~filtered_data.mask],
filtered_data.data[~filtered_data.mask],
**kwargs)
else:
filtered_data = z
for n in range(self.niter):
filtered_data = self.outlier_func(filtered_data - fitted_model(x, y),
**self.outlier_kwargs)
filtered_data += fitted_model(x, y)
fitted_model = self.fitter(fitted_model,
x[~filtered_data.mask],
y[~filtered_data.mask],
filtered_data.data[~filtered_data.mask],
**kwargs)
return filtered_data, fitted_model
@six.add_metaclass(_FitterMeta)
class LevMarLSQFitter(object):
supported_constraints = ['fixed', 'tied', 'bounds']
def __init__(self):
self.fit_info = {'nfev': None,
'fvec': None,
'fjac': None,
'ipvt': None,
'qtf': None,
'message': None,
'ierr': None,
'param_jac': None,
'param_cov': None}
super(LevMarLSQFitter, self).__init__()
def objective_function(self, fps, *args):
model = args[0]
weights = args[1]
_fitter_to_model_params(model, fps)
meas = args[-1]
if weights is None:
return np.ravel(model(*args[2: -1]) - meas)
else:
return np.ravel(weights * (model(*args[2: -1]) - meas))
@fitter_unit_support
def __call__(self, model, x, y, z=None, weights=None,
maxiter=DEFAULT_MAXITER, acc=DEFAULT_ACC,
epsilon=DEFAULT_EPS, estimate_jacobian=False):
from scipy import optimize
model_copy = _validate_model(model, self.supported_constraints)
farg = (model_copy, weights, ) + _convert_input(x, y, z)
if model_copy.fit_deriv is None or estimate_jacobian:
dfunc = None
else:
dfunc = self._wrap_deriv
init_values, _ = _model_to_fit_params(model_copy)
fitparams, cov_x, dinfo, mess, ierr = optimize.leastsq(
self.objective_function, init_values, args=farg, Dfun=dfunc,
col_deriv=model_copy.col_fit_deriv, maxfev=maxiter, epsfcn=epsilon,
xtol=acc, full_output=True)
_fitter_to_model_params(model_copy, fitparams)
self.fit_info.update(dinfo)
self.fit_info['cov_x'] = cov_x
self.fit_info['message'] = mess
self.fit_info['ierr'] = ierr
if ierr not in [1, 2, 3, 4]:
warnings.warn("The fit may be unsuccessful; check "
"fit_info['message'] for more information.",
AstropyUserWarning)
if (len(y) > len(init_values)) and cov_x is not None:
sum_sqrs = np.sum(self.objective_function(fitparams, *farg)**2)
dof = len(y) - len(init_values)
self.fit_info['param_cov'] = cov_x * sum_sqrs / dof
else:
self.fit_info['param_cov'] = None
return model_copy
@staticmethod
def _wrap_deriv(params, model, weights, x, y, z=None):
if weights is None:
weights = 1.0
if any(model.fixed.values()) or any(model.tied.values()):
if z is None:
full_deriv = np.ravel(weights) * np.array(model.fit_deriv(x, *model.parameters))
else:
full_deriv = (np.ravel(weights) * np.array(model.fit_deriv(x, y, *model.parameters)).T).T
pars = [getattr(model, name) for name in model.param_names]
fixed = [par.fixed for par in pars]
tied = [par.tied for par in pars]
tied = list(np.where([par.tied is not False for par in pars],
True, tied))
fix_and_tie = np.logical_or(fixed, tied)
ind = np.logical_not(fix_and_tie)
if not model.col_fit_deriv:
full_deriv = np.asarray(full_deriv).T
residues = np.asarray(full_deriv[np.nonzero(ind)]).T
else:
residues = full_deriv[np.nonzero(ind)]
return [np.ravel(_) for _ in residues]
else:
if z is None:
return [np.ravel(_) for _ in np.ravel(weights) * np.array(model.fit_deriv(x, *params))]
else:
if not model.col_fit_deriv:
return [np.ravel(_) for _ in (np.ravel(weights) * np.array(model.fit_deriv(x, y, *params)).T).T]
else:
return [np.ravel(_) for _ in (weights * np.array(model.fit_deriv(x, y, *params)))]
class SLSQPLSQFitter(Fitter):
supported_constraints = SLSQP.supported_constraints
def __init__(self):
super(SLSQPLSQFitter, self).__init__(optimizer=SLSQP, statistic=leastsquare)
self.fit_info = {}
@fitter_unit_support
def __call__(self, model, x, y, z=None, weights=None, **kwargs):
model_copy = _validate_model(model, self._opt_method.supported_constraints)
farg = _convert_input(x, y, z)
farg = (model_copy, weights, ) + farg
p0, _ = _model_to_fit_params(model_copy)
fitparams, self.fit_info = self._opt_method(
self.objective_function, p0, farg, **kwargs)
_fitter_to_model_params(model_copy, fitparams)
return model_copy
class SimplexLSQFitter(Fitter):
supported_constraints = Simplex.supported_constraints
def __init__(self):
super(SimplexLSQFitter, self).__init__(optimizer=Simplex,
statistic=leastsquare)
self.fit_info = {}
@fitter_unit_support
def __call__(self, model, x, y, z=None, weights=None, **kwargs):
model_copy = _validate_model(model,
self._opt_method.supported_constraints)
farg = _convert_input(x, y, z)
farg = (model_copy, weights, ) + farg
p0, _ = _model_to_fit_params(model_copy)
fitparams, self.fit_info = self._opt_method(
self.objective_function, p0, farg, **kwargs)
_fitter_to_model_params(model_copy, fitparams)
return model_copy
@six.add_metaclass(_FitterMeta)
class JointFitter(object):
def __init__(self, models, jointparameters, initvals):
self.models = list(models)
self.initvals = list(initvals)
self.jointparams = jointparameters
self._verify_input()
self.fitparams = self._model_to_fit_params()
self.modeldims = [m.n_inputs for m in self.models]
self.ndim = np.sum(self.modeldims)
def _model_to_fit_params(self):
fparams = []
fparams.extend(self.initvals)
for model in self.models:
params = [p.flatten() for p in model.parameters]
joint_params = self.jointparams[model]
param_metrics = model._param_metrics
for param_name in joint_params:
slice_ = param_metrics[param_name]['slice']
del params[slice_]
fparams.extend(params)
return fparams
def objective_function(self, fps, *args):
lstsqargs = list(args)
fitted = []
fitparams = list(fps)
numjp = len(self.initvals)
jointfitparams = fitparams[:numjp]
del fitparams[:numjp]
for model in self.models:
joint_params = self.jointparams[model]
margs = lstsqargs[:model.n_inputs + 1]
del lstsqargs[:model.n_inputs + 1]
numfp = len(model._parameters) - len(joint_params)
mfparams = fitparams[:numfp]
del fitparams[:numfp]
mparams = []
param_metrics = model._param_metrics
for param_name in model.param_names:
if param_name in joint_params:
index = joint_params.index(param_name)
mparams.extend([jointfitparams[index]])
else:
slice_ = param_metrics[param_name]['slice']
plen = slice_.stop - slice_.start
mparams.extend(mfparams[:plen])
del mfparams[:plen]
modelfit = model.evaluate(margs[:-1], *mparams)
fitted.extend(modelfit - margs[-1])
return np.ravel(fitted)
def _verify_input(self):
if len(self.models) <= 1:
raise TypeError("Expected >1 models, {} is given".format(
len(self.models)))
if len(self.jointparams.keys()) < 2:
raise TypeError("At least two parameters are expected, "
"{} is given".format(len(self.jointparams.keys())))
for j in self.jointparams.keys():
if len(self.jointparams[j]) != len(self.initvals):
raise TypeError("{} parameter(s) provided but {} expected".format(
len(self.jointparams[j]), len(self.initvals)))
def __call__(self, *args):
from scipy import optimize
if len(args) != reduce(lambda x, y: x + 1 + y + 1, self.modeldims):
raise ValueError("Expected {} coordinates in args but {} provided"
.format(reduce(lambda x, y: x + 1 + y + 1,
self.modeldims), len(args)))
self.fitparams[:], _ = optimize.leastsq(self.objective_function,
self.fitparams, args=args)
fparams = self.fitparams[:]
numjp = len(self.initvals)
jointfitparams = fparams[:numjp]
del fparams[:numjp]
for model in self.models:
joint_params = self.jointparams[model]
numfp = len(model._parameters) - len(joint_params)
mfparams = fparams[:numfp]
del fparams[:numfp]
# recreate the model parameters
mparams = []
param_metrics = model._param_metrics
for param_name in model.param_names:
if param_name in joint_params:
index = joint_params.index(param_name)
# should do this with slices in case the parameter
# is not a number
mparams.extend([jointfitparams[index]])
else:
slice_ = param_metrics[param_name]['slice']
plen = slice_.stop - slice_.start
mparams.extend(mfparams[:plen])
del mfparams[:plen]
model.parameters = np.array(mparams)
def _convert_input(x, y, z=None, n_models=1, model_set_axis=0):
x = np.asarray(x, dtype=np.float)
y = np.asarray(y, dtype=np.float)
if z is not None:
z = np.asarray(z, dtype=np.float)
# For compatibility with how the linear fitter code currently expects to
# work, shift the dependent variable's axes to the expected locations
if n_models > 1:
if z is None:
if y.shape[model_set_axis] != n_models:
raise ValueError(
"Number of data sets (y array is expected to equal "
"the number of parameter sets)")
# be last, so that its first dimension is the same length as the x
# coordinates. This is in line with the expectations of
# numpy.linalg.lstsq:
# http://docs.scipy.org/doc/numpy/reference/generated/numpy.linalg.lstsq.html
# That is, each model should be represented by a column. TODO:
# Obviously this is a detail of np.linalg.lstsq and should be
# handled specifically by any fitters that use it...
y = np.rollaxis(y, model_set_axis, y.ndim)
else:
# Shape of z excluding model_set_axis
z_shape = z.shape[:model_set_axis] + z.shape[model_set_axis + 1:]
if not (x.shape == y.shape == z_shape):
raise ValueError("x, y and z should have the same shape")
if z is None:
farg = (x, y)
else:
farg = (x, y, z)
return farg
# TODO: These utility functions are really particular to handling
# bounds/tied/fixed constraints for scipy.optimize optimizers that do not
# support them inherently; this needs to be reworked to be clear about this
# distinction (and the fact that these are not necessarily applicable to any
# arbitrary fitter--as evidenced for example by the fact that JointFitter has
# its own versions of these)
# TODO: Most of this code should be entirely rewritten; it should not be as
# inefficient as it is.
def _fitter_to_model_params(model, fps):
_, fit_param_indices = _model_to_fit_params(model)
has_tied = any(model.tied.values())
has_fixed = any(model.fixed.values())
has_bound = any(b != (None, None) for b in model.bounds.values())
if not (has_tied or has_fixed or has_bound):
# We can just assign directly
model.parameters = fps
return
fit_param_indices = set(fit_param_indices)
offset = 0
param_metrics = model._param_metrics
for idx, name in enumerate(model.param_names):
if idx not in fit_param_indices:
continue
slice_ = param_metrics[name]['slice']
shape = param_metrics[name]['shape']
# This is determining which range of fps (the fitted parameters) maps
# to parameters of the model
size = reduce(operator.mul, shape, 1)
values = fps[offset:offset + size]
# Check bounds constraints
if model.bounds[name] != (None, None):
_min, _max = model.bounds[name]
if _min is not None:
values = np.fmax(values, _min)
if _max is not None:
values = np.fmin(values, _max)
model.parameters[slice_] = values
offset += size
# This has to be done in a separate loop due to how tied parameters are
# currently evaluated (the fitted parameters need to actually be *set* on
# the model first, for use in evaluating the "tied" expression--it might be
# better to change this at some point
if has_tied:
for idx, name in enumerate(model.param_names):
if model.tied[name]:
value = model.tied[name](model)
slice_ = param_metrics[name]['slice']
model.parameters[slice_] = value
def _model_to_fit_params(model):
fitparam_indices = list(range(len(model.param_names)))
if any(model.fixed.values()) or any(model.tied.values()):
params = list(model.parameters)
param_metrics = model._param_metrics
for idx, name in list(enumerate(model.param_names))[::-1]:
if model.fixed[name] or model.tied[name]:
slice_ = param_metrics[name]['slice']
del params[slice_]
del fitparam_indices[idx]
return (np.array(params), fitparam_indices)
else:
return (model.parameters, fitparam_indices)
def _validate_constraints(supported_constraints, model):
message = 'Optimizer cannot handle {0} constraints.'
if (any(six.itervalues(model.fixed)) and
'fixed' not in supported_constraints):
raise UnsupportedConstraintError(
message.format('fixed parameter'))
if any(six.itervalues(model.tied)) and 'tied' not in supported_constraints:
raise UnsupportedConstraintError(
message.format('tied parameter'))
if (any(tuple(b) != (None, None) for b in six.itervalues(model.bounds)) and
'bounds' not in supported_constraints):
raise UnsupportedConstraintError(
message.format('bound parameter'))
if model.eqcons and 'eqcons' not in supported_constraints:
raise UnsupportedConstraintError(message.format('equality'))
if model.ineqcons and 'ineqcons' not in supported_constraints:
raise UnsupportedConstraintError(message.format('inequality'))
def _validate_model(model, supported_constraints):
if not model.fittable:
raise ValueError("Model does not appear to be fittable.")
if model.linear:
warnings.warn('Model is linear in parameters; '
'consider using linear fitting methods.',
AstropyUserWarning)
elif len(model) != 1:
# for now only single data sets ca be fitted
raise ValueError("Non-linear fitters can only fit "
"one data set at a time.")
_validate_constraints(supported_constraints, model)
model_copy = model.copy()
return model_copy
def populate_entry_points(entry_points):
for entry_point in entry_points:
name = entry_point.name
try:
entry_point = entry_point.load()
except Exception as e:
# This stops the fitting from choking if an entry_point produces an error.
warnings.warn(AstropyUserWarning('{type} error occurred in entry '
'point {name}.' .format(type=type(e).__name__, name=name)))
else:
if not inspect.isclass(entry_point):
warnings.warn(AstropyUserWarning(
'Modeling entry point {0} expected to be a '
'Class.' .format(name)))
else:
if issubclass(entry_point, Fitter):
name = entry_point.__name__
globals()[name] = entry_point
__all__.append(name)
else:
warnings.warn(AstropyUserWarning(
'Modeling entry point {0} expected to extend '
'astropy.modeling.Fitter' .format(name)))
# this is so fitting doesn't choke if pkg_resources doesn't exist
if HAS_PKG:
populate_entry_points(iter_entry_points(group='astropy.modeling', name=None))
| true | true |
1c2d692655e59256487700286bbf76b05f2cdd07 | 2,116 | py | Python | examples/albert/utils.py | hivemind-debug/debug | c5b756a6a48532a43cd080d4b3e02e7dd023e317 | [
"MIT"
] | null | null | null | examples/albert/utils.py | hivemind-debug/debug | c5b756a6a48532a43cd080d4b3e02e7dd023e317 | [
"MIT"
] | null | null | null | examples/albert/utils.py | hivemind-debug/debug | c5b756a6a48532a43cd080d4b3e02e7dd023e317 | [
"MIT"
] | null | null | null | from typing import Dict, List, Tuple
from multiaddr import Multiaddr
from pydantic import BaseModel, StrictFloat, confloat, conint
from hivemind import choose_ip_address
from hivemind.dht.crypto import RSASignatureValidator
from hivemind.dht.schema import BytesWithPublicKey, SchemaValidator
from hivemind.dht.validation import RecordValidatorBase
from hivemind.utils.logging import get_logger
logger = get_logger(__name__)
class LocalMetrics(BaseModel):
step: conint(ge=0, strict=True)
samples_per_second: confloat(ge=0.0, strict=True)
samples_accumulated: conint(ge=0, strict=True)
loss: StrictFloat
mini_steps: conint(ge=0, strict=True)
class MetricSchema(BaseModel):
metrics: Dict[BytesWithPublicKey, LocalMetrics]
def make_validators(experiment_prefix: str) -> Tuple[List[RecordValidatorBase], bytes]:
signature_validator = RSASignatureValidator()
validators = [SchemaValidator(MetricSchema, prefix=experiment_prefix), signature_validator]
return validators, signature_validator.local_public_key
class TextStyle:
BOLD = "\033[1m"
BLUE = "\033[34m"
RESET = "\033[0m"
def log_visible_maddrs(visible_maddrs: List[Multiaddr], only_p2p: bool) -> None:
if only_p2p:
unique_addrs = {addr["p2p"] for addr in visible_maddrs}
initial_peers_str = " ".join(f"/p2p/{addr}" for addr in unique_addrs)
else:
available_ips = [Multiaddr(addr) for addr in visible_maddrs if "ip4" in addr or "ip6" in addr]
if available_ips:
preferred_ip = choose_ip_address(available_ips)
selected_maddrs = [addr for addr in visible_maddrs if preferred_ip in str(addr)]
else:
selected_maddrs = visible_maddrs
initial_peers_str = " ".join(str(addr) for addr in selected_maddrs)
logger.info(
f"Running a DHT peer. To connect other peers to this one over the Internet, use "
f"{TextStyle.BOLD}{TextStyle.BLUE}--initial_peers {initial_peers_str}{TextStyle.RESET}"
)
logger.info(f"Full list of visible multiaddresses: {' '.join(str(addr) for addr in visible_maddrs)}")
| 36.482759 | 105 | 0.734877 | from typing import Dict, List, Tuple
from multiaddr import Multiaddr
from pydantic import BaseModel, StrictFloat, confloat, conint
from hivemind import choose_ip_address
from hivemind.dht.crypto import RSASignatureValidator
from hivemind.dht.schema import BytesWithPublicKey, SchemaValidator
from hivemind.dht.validation import RecordValidatorBase
from hivemind.utils.logging import get_logger
logger = get_logger(__name__)
class LocalMetrics(BaseModel):
step: conint(ge=0, strict=True)
samples_per_second: confloat(ge=0.0, strict=True)
samples_accumulated: conint(ge=0, strict=True)
loss: StrictFloat
mini_steps: conint(ge=0, strict=True)
class MetricSchema(BaseModel):
metrics: Dict[BytesWithPublicKey, LocalMetrics]
def make_validators(experiment_prefix: str) -> Tuple[List[RecordValidatorBase], bytes]:
signature_validator = RSASignatureValidator()
validators = [SchemaValidator(MetricSchema, prefix=experiment_prefix), signature_validator]
return validators, signature_validator.local_public_key
class TextStyle:
BOLD = "\033[1m"
BLUE = "\033[34m"
RESET = "\033[0m"
def log_visible_maddrs(visible_maddrs: List[Multiaddr], only_p2p: bool) -> None:
if only_p2p:
unique_addrs = {addr["p2p"] for addr in visible_maddrs}
initial_peers_str = " ".join(f"/p2p/{addr}" for addr in unique_addrs)
else:
available_ips = [Multiaddr(addr) for addr in visible_maddrs if "ip4" in addr or "ip6" in addr]
if available_ips:
preferred_ip = choose_ip_address(available_ips)
selected_maddrs = [addr for addr in visible_maddrs if preferred_ip in str(addr)]
else:
selected_maddrs = visible_maddrs
initial_peers_str = " ".join(str(addr) for addr in selected_maddrs)
logger.info(
f"Running a DHT peer. To connect other peers to this one over the Internet, use "
f"{TextStyle.BOLD}{TextStyle.BLUE}--initial_peers {initial_peers_str}{TextStyle.RESET}"
)
logger.info(f"Full list of visible multiaddresses: {' '.join(str(addr) for addr in visible_maddrs)}")
| true | true |
1c2d6a5ab20c4467dfe13585058f9cff624d91c4 | 1,048 | py | Python | users/urls.py | utk-dev/Storyteller | 4c15daada1948698a5169275e544e714b836a404 | [
"MIT"
] | 3 | 2020-11-02T09:13:47.000Z | 2020-11-07T06:02:10.000Z | users/urls.py | utk-dev/Storyteller | 4c15daada1948698a5169275e544e714b836a404 | [
"MIT"
] | null | null | null | users/urls.py | utk-dev/Storyteller | 4c15daada1948698a5169275e544e714b836a404 | [
"MIT"
] | null | null | null | from django.urls import path
from .views import *
app_name = 'users'
urlpatterns = [
path('signup/', signup_view, name='signup-view'),
path('login/', login_view, name='login-view'),
path('logout/', logout_view, name='logout-view'),
path('settings/', settings_view, name='settings-view'),
path('settings/freeze/', freeze_view, name='freeze-view'),
path('settings/freeze/<int:id>/', freeze_id, name='freeze-id'),
path('settings/unfreeze/', unfreeze_view, name='unfreeze-view'),
path('settings/unfreeze/<int:id>', unfreeze_id, name='unfreeze-id'),
path('settings/privilege-application/', privilege_application, name='privilege-application'),
path('settings/grant-privileges/', grant_privilege, name='grant-privilege'),
path('settings/grant-privileges/grant/<int:id>/', grant_privilege_id, name='grant-id'),
path('settings/grant-privileges/reject/<int:id>/', reject_privilege_id, name='reject-id'),
path('settings/privilege-application/already-applied/', already_applied, name='already-applied'),
] | 52.4 | 101 | 0.708969 | from django.urls import path
from .views import *
app_name = 'users'
urlpatterns = [
path('signup/', signup_view, name='signup-view'),
path('login/', login_view, name='login-view'),
path('logout/', logout_view, name='logout-view'),
path('settings/', settings_view, name='settings-view'),
path('settings/freeze/', freeze_view, name='freeze-view'),
path('settings/freeze/<int:id>/', freeze_id, name='freeze-id'),
path('settings/unfreeze/', unfreeze_view, name='unfreeze-view'),
path('settings/unfreeze/<int:id>', unfreeze_id, name='unfreeze-id'),
path('settings/privilege-application/', privilege_application, name='privilege-application'),
path('settings/grant-privileges/', grant_privilege, name='grant-privilege'),
path('settings/grant-privileges/grant/<int:id>/', grant_privilege_id, name='grant-id'),
path('settings/grant-privileges/reject/<int:id>/', reject_privilege_id, name='reject-id'),
path('settings/privilege-application/already-applied/', already_applied, name='already-applied'),
] | true | true |
1c2d6a93b212bc20997528b53f8a81e1cf96b964 | 436 | py | Python | python/ctci-big-o.py | gajubadge11/hackerrank-3 | 132a5019b7ed21507bb95b5063fa66c446b0eff7 | [
"MIT"
] | 21 | 2015-02-09T18:08:38.000Z | 2021-11-08T15:00:48.000Z | python/ctci-big-o.py | gajubadge11/hackerrank-3 | 132a5019b7ed21507bb95b5063fa66c446b0eff7 | [
"MIT"
] | 7 | 2020-04-12T23:00:19.000Z | 2021-01-30T23:44:24.000Z | python/ctci-big-o.py | gajubadge11/hackerrank-3 | 132a5019b7ed21507bb95b5063fa66c446b0eff7 | [
"MIT"
] | 27 | 2015-07-22T18:08:12.000Z | 2022-02-28T19:50:26.000Z | import math
def is_prime(test_case):
if test_case == 1:
return False
for factor in range(2, math.ceil(math.sqrt(test_case) + 1)):
if test_case != factor and test_case % factor == 0:
return False
return True
NUM_CASES = int(input().strip())
for _ in range(NUM_CASES):
test_case = int(input().strip())
if is_prime(test_case):
print("Prime")
else:
print("Not prime")
| 21.8 | 64 | 0.603211 | import math
def is_prime(test_case):
if test_case == 1:
return False
for factor in range(2, math.ceil(math.sqrt(test_case) + 1)):
if test_case != factor and test_case % factor == 0:
return False
return True
NUM_CASES = int(input().strip())
for _ in range(NUM_CASES):
test_case = int(input().strip())
if is_prime(test_case):
print("Prime")
else:
print("Not prime")
| true | true |
1c2d6a9405b277d357d1ad3e17d00230b246d383 | 1,551 | py | Python | graphgym/models/register.py | tigerneil/GraphGym | 77f1e7acb4d08b6647b2cf1d147d86b736ac25e2 | [
"MIT"
] | 7 | 2021-03-23T08:10:25.000Z | 2022-01-10T05:51:38.000Z | graphgym/models/register.py | batermj/GraphGym | 05f749900ef07029a127dc36e74e43f4d0eb1a06 | [
"MIT"
] | null | null | null | graphgym/models/register.py | batermj/GraphGym | 05f749900ef07029a127dc36e74e43f4d0eb1a06 | [
"MIT"
] | 2 | 2020-11-23T21:42:59.000Z | 2021-03-10T11:43:27.000Z | import torch
import torch.nn as nn
def register(key, module, module_dict):
if key in module_dict:
raise KeyError('Key {} is already pre-defined.'.format(key))
else:
module_dict[key] = module
act_dict = {}
def register_act(key, module):
register(key, module, act_dict)
node_encoder_dict = {}
def register_node_encoder(key, module):
register(key, module, node_encoder_dict)
edge_encoder_dict = {}
def register_edge_encoder(key, module):
register(key, module, edge_encoder_dict)
stage_dict = {}
def register_stage(key, module):
register(key, module, stage_dict)
head_dict = {}
def register_head(key, module):
register(key, module, head_dict)
layer_dict = {}
def register_layer(key, module):
register(key, module, layer_dict)
pooling_dict = {}
def register_pooling(key, module):
register(key, module, pooling_dict)
network_dict = {}
def register_network(key, module):
register(key, module, network_dict)
config_dict = {}
def register_config(key, module):
register(key, module, config_dict)
loader_dict = {}
def register_loader(key, module):
register(key, module, loader_dict)
optimizer_dict = {}
def register_optimizer(key, module):
register(key, module, optimizer_dict)
scheduler_dict = {}
def register_scheduler(key, module):
register(key, module, scheduler_dict)
loss_dict = {}
def register_loss(key, module):
register(key, module, loss_dict)
feature_augment_dict = {}
def register_feature_augment(key, module):
register(key, module, feature_augment_dict) | 23.5 | 68 | 0.727917 | import torch
import torch.nn as nn
def register(key, module, module_dict):
if key in module_dict:
raise KeyError('Key {} is already pre-defined.'.format(key))
else:
module_dict[key] = module
act_dict = {}
def register_act(key, module):
register(key, module, act_dict)
node_encoder_dict = {}
def register_node_encoder(key, module):
register(key, module, node_encoder_dict)
edge_encoder_dict = {}
def register_edge_encoder(key, module):
register(key, module, edge_encoder_dict)
stage_dict = {}
def register_stage(key, module):
register(key, module, stage_dict)
head_dict = {}
def register_head(key, module):
register(key, module, head_dict)
layer_dict = {}
def register_layer(key, module):
register(key, module, layer_dict)
pooling_dict = {}
def register_pooling(key, module):
register(key, module, pooling_dict)
network_dict = {}
def register_network(key, module):
register(key, module, network_dict)
config_dict = {}
def register_config(key, module):
register(key, module, config_dict)
loader_dict = {}
def register_loader(key, module):
register(key, module, loader_dict)
optimizer_dict = {}
def register_optimizer(key, module):
register(key, module, optimizer_dict)
scheduler_dict = {}
def register_scheduler(key, module):
register(key, module, scheduler_dict)
loss_dict = {}
def register_loss(key, module):
register(key, module, loss_dict)
feature_augment_dict = {}
def register_feature_augment(key, module):
register(key, module, feature_augment_dict) | true | true |
1c2d6b04c3cbbef99c9d3c2801c0377fb7de6248 | 10,117 | py | Python | MeerK40t.py | Sophist-UK/meerk40t | abbbd19796a0d50d90ca553106ac738f618dc19b | [
"MIT"
] | null | null | null | MeerK40t.py | Sophist-UK/meerk40t | abbbd19796a0d50d90ca553106ac738f618dc19b | [
"MIT"
] | null | null | null | MeerK40t.py | Sophist-UK/meerk40t | abbbd19796a0d50d90ca553106ac738f618dc19b | [
"MIT"
] | null | null | null | import argparse
import sys
from Console import Console
from DefaultModules import *
from GrblDevice import GrblDevice
from LhystudiosDevice import LhystudiosDevice
from MoshiboardDevice import MoshiboardDevice
from RasterScripts import RasterScripts
from RuidaDevice import RuidaDevice
from LaserServer import *
try:
from math import tau
except ImportError:
from math import pi
tau = pi * 2
"""
Laser software for the Stock-LIHUIYU laserboard.
MeerK40t (pronounced MeerKat) is a built-from-the-ground-up MIT licensed
open-source laser cutting software. See https://github.com/meerk40t/meerk40t
for full details.
"""
MEERK40T_VERSION = '0.6.17'
kernel = Kernel()
kernel.open('module', 'Signaler')
kernel.open('module', 'Elemental')
def pair(value):
rv = value.split('=')
if len(rv) != 2:
raise argparse.ArgumentParser()
return rv
parser = argparse.ArgumentParser()
parser.add_argument('input', nargs='?', type=argparse.FileType('r'), help='input file')
parser.add_argument('-V', '--version', action='store_true', help='MeerK40t version')
parser.add_argument('-z', '--no_gui', action='store_true', help='run without gui')
parser.add_argument('-c', '--console', action='store_true', help='start as console')
parser.add_argument('-a', '--auto', action='store_true', help='start running laser')
parser.add_argument('-p', '--path', type=str, help='add SVG Path command')
parser.add_argument('-t', '--transform', type=str, help="adds SVG Transform command")
parser.add_argument('-o', '--output', type=argparse.FileType('w'), help='output file name')
parser.add_argument('-v', '--verbose', action='store_true', help='display verbose debugging')
parser.add_argument('-m', '--mock', action='store_true', help='uses mock usb device')
parser.add_argument('-s', '--set', action='append', nargs='?', type=pair, metavar='key=value', help='set a device variable')
parser.add_argument('-H', '--home', action='store_true', help="prehome the device")
parser.add_argument('-O', '--origin', action='store_true', help="return back to 0,0 on finish")
parser.add_argument('-b', '--batch', type=argparse.FileType('r'), help='console batch file')
parser.add_argument('-S', '--speed', type=float, help='set the speed of all operations')
parser.add_argument('-x', '--execute', type=str, help='direct execute egv file')
parser.add_argument('-gs', '--grbl', type=int, help='run grbl-emulator on given port.')
parser.add_argument('-gy', '--flip_y', action='store_true', help="grbl y-flip")
parser.add_argument('-gx', '--flip_x', action='store_true', help="grbl x-flip")
parser.add_argument('-ga', '--adjust_x', type=int, help='adjust grbl home_x position')
parser.add_argument('-gb', '--adjust_y', type=int, help='adjust grbl home_y position')
parser.add_argument('-rs', '--ruida', action='store_true', help='run ruida-emulator')
argv = sys.argv[1:]
args = parser.parse_args(argv)
if args.version:
print("MeerK40t %s" % MEERK40T_VERSION)
else:
kernel.register('static', 'RasterScripts', RasterScripts)
kernel.register('module', 'Console', Console)
kernel.register('module', 'LaserServer', LaserServer)
kernel.register('load', 'SVGLoader', SVGLoader)
kernel.register('load', 'ImageLoader', ImageLoader)
kernel.register('load', "DxfLoader", DxfLoader)
kernel.register('save', 'SVGWriter', SVGWriter)
kernel.register('device', 'Lhystudios', LhystudiosDevice)
kernel.register('disabled-device', 'Moshiboard', MoshiboardDevice)
kernel.register('disabled-device', 'Ruida', RuidaDevice)
kernel.register('disabled-device', 'GRBL', GrblDevice)
if not args.no_gui:
from wxMeerK40t import wxMeerK40t
kernel.register_module('wxMeerK40t', wxMeerK40t)
meerk40tgui = kernel.open('module', 'wxMeerK40t', device=kernel)
kernel.boot()
device = None
if 'device' in kernel.instances:
# Device was booted by kernel boot.
for key, d in kernel.instances['device'].items():
device = d
break
else:
if args.no_gui:
# Without a booted device, if also no gui, just start a default device.
device = kernel.open('device', 'Lhystudios', instance_name='1', uid=1)
device.boot()
pass
else:
# There is a gui but the device wasn't booted.
devices = list(kernel.derivable())
device_entries = list()
for dev in devices:
try:
device_entries.append(int(dev))
except ValueError:
continue
if len(device_entries) == 0:
# There are no device entries in the kernel.
kernel.device_add('Lhystudios', 1)
kernel.device_boot()
for key, d in kernel.instances['device'].items():
device = d
break
if device is None:
# Set device to kernel and start the DeviceManager
device = kernel
kernel.open('window', "DeviceManager", None)
if args.verbose:
# Debug the device.
device.execute('Debug Device')
kernel.execute('Debug Device')
if args.input is not None:
# load the given filename.
import os
kernel.load(os.path.realpath(args.input.name))
if args.path is not None:
# Force the inclusion of the path.
from svgelements import Path
try:
path = Path(args.path)
path.stroke = Color('blue')
kernel.elements.add_elem(path)
except Exception:
print("SVG Path Exception to: %s" % ' '.join(sys.argv))
if args.transform:
# Transform any data loaded data
from svgelements import Matrix
m = Matrix(args.transform)
for e in kernel.elements.elems():
e *= m
try:
e.modified()
except AttributeError:
pass
if args.set is not None:
# Set the variables requested here.
for v in args.set:
attr = v[0]
value = v[1]
if hasattr(device, attr):
v = getattr(device, attr)
if isinstance(v, bool):
setattr(device, attr, bool(value))
elif isinstance(v, int):
setattr(device, attr, int(value))
elif isinstance(v, float):
setattr(device, attr, float(value))
elif isinstance(v, str):
setattr(device, attr, str(value))
if device is not kernel: # We can process this stuff only with a real device.
if args.grbl is not None:
# Start the GRBL server on the device.
device.setting(int, 'grbl_flip_x', 1)
device.setting(int, 'grbl_flip_y', 1)
device.setting(int, 'grbl_home_x', 0)
device.setting(int, 'grbl_home_y', 0)
if args.flip_y:
device.grbl_flip_x = -1
if args.flip_x:
device.grbl_flip_y = -1
if args.adjust_y is not None:
device.grbl_home_y = args.adjust_y
if args.adjust_x is not None:
device.grbl_home_x = args.adjust_x
device.using('module', 'Console').write('grblserver\n')
if args.ruida:
device.using('module', 'Console').write('ruidaserver\n')
if args.home:
device.using('module', 'Console').write('home\n')
device.setting(bool, 'quit', True)
device.quit = True
if args.auto:
# Automatically classify and start the job.
elements = kernel.elements
elements.classify(list(elements.elems()))
ops = list(elements.ops())
if args.speed is not None:
for o in ops:
o.speed = args.speed
device.spooler.jobs(ops)
device.setting(bool, 'quit', True)
device.quit = True
if args.execute:
egv_file = args.execute
device.setting(bool, 'quit', True)
device.quit = True
try:
device.using('module', 'Console').write('egv_import %s\n' % egv_file)
except FileNotFoundError:
pass
if args.origin:
def origin():
yield COMMAND_WAIT_FINISH
yield COMMAND_MODE_RAPID
yield COMMAND_SET_ABSOLUTE
yield COMMAND_MOVE, 0, 0
device.spooler.job(origin)
if args.mock:
# Set the device to mock.
device.setting(bool, 'mock', True)
device.mock = True
if args.output is not None:
import os
kernel.save(os.path.realpath(args.output.name))
if args.batch:
device.add_watcher('console', print)
console = device.using('module', 'Console')
with args.batch as batch:
for line in batch:
console.write(line.strip() + '\n')
device.remove_watcher('console', print)
if args.console:
console = device.using('module', 'Console')
device.add_watcher('console', print)
kernel.add_watcher('shutdown', print)
while True:
device_entries = input('>')
if device.state == STATE_TERMINATE:
break
if device_entries == 'quit':
break
console.write(device_entries + '\n')
device.remove_watcher('console', print)
if not args.no_gui:
if device.state != STATE_TERMINATE:
if 'device' in kernel.instances:
for key, device in kernel.instances['device'].items():
device.open('window', 'MeerK40t', None)
meerk40tgui.MainLoop() | 39.213178 | 125 | 0.583869 | import argparse
import sys
from Console import Console
from DefaultModules import *
from GrblDevice import GrblDevice
from LhystudiosDevice import LhystudiosDevice
from MoshiboardDevice import MoshiboardDevice
from RasterScripts import RasterScripts
from RuidaDevice import RuidaDevice
from LaserServer import *
try:
from math import tau
except ImportError:
from math import pi
tau = pi * 2
MEERK40T_VERSION = '0.6.17'
kernel = Kernel()
kernel.open('module', 'Signaler')
kernel.open('module', 'Elemental')
def pair(value):
rv = value.split('=')
if len(rv) != 2:
raise argparse.ArgumentParser()
return rv
parser = argparse.ArgumentParser()
parser.add_argument('input', nargs='?', type=argparse.FileType('r'), help='input file')
parser.add_argument('-V', '--version', action='store_true', help='MeerK40t version')
parser.add_argument('-z', '--no_gui', action='store_true', help='run without gui')
parser.add_argument('-c', '--console', action='store_true', help='start as console')
parser.add_argument('-a', '--auto', action='store_true', help='start running laser')
parser.add_argument('-p', '--path', type=str, help='add SVG Path command')
parser.add_argument('-t', '--transform', type=str, help="adds SVG Transform command")
parser.add_argument('-o', '--output', type=argparse.FileType('w'), help='output file name')
parser.add_argument('-v', '--verbose', action='store_true', help='display verbose debugging')
parser.add_argument('-m', '--mock', action='store_true', help='uses mock usb device')
parser.add_argument('-s', '--set', action='append', nargs='?', type=pair, metavar='key=value', help='set a device variable')
parser.add_argument('-H', '--home', action='store_true', help="prehome the device")
parser.add_argument('-O', '--origin', action='store_true', help="return back to 0,0 on finish")
parser.add_argument('-b', '--batch', type=argparse.FileType('r'), help='console batch file')
parser.add_argument('-S', '--speed', type=float, help='set the speed of all operations')
parser.add_argument('-x', '--execute', type=str, help='direct execute egv file')
parser.add_argument('-gs', '--grbl', type=int, help='run grbl-emulator on given port.')
parser.add_argument('-gy', '--flip_y', action='store_true', help="grbl y-flip")
parser.add_argument('-gx', '--flip_x', action='store_true', help="grbl x-flip")
parser.add_argument('-ga', '--adjust_x', type=int, help='adjust grbl home_x position')
parser.add_argument('-gb', '--adjust_y', type=int, help='adjust grbl home_y position')
parser.add_argument('-rs', '--ruida', action='store_true', help='run ruida-emulator')
argv = sys.argv[1:]
args = parser.parse_args(argv)
if args.version:
print("MeerK40t %s" % MEERK40T_VERSION)
else:
kernel.register('static', 'RasterScripts', RasterScripts)
kernel.register('module', 'Console', Console)
kernel.register('module', 'LaserServer', LaserServer)
kernel.register('load', 'SVGLoader', SVGLoader)
kernel.register('load', 'ImageLoader', ImageLoader)
kernel.register('load', "DxfLoader", DxfLoader)
kernel.register('save', 'SVGWriter', SVGWriter)
kernel.register('device', 'Lhystudios', LhystudiosDevice)
kernel.register('disabled-device', 'Moshiboard', MoshiboardDevice)
kernel.register('disabled-device', 'Ruida', RuidaDevice)
kernel.register('disabled-device', 'GRBL', GrblDevice)
if not args.no_gui:
from wxMeerK40t import wxMeerK40t
kernel.register_module('wxMeerK40t', wxMeerK40t)
meerk40tgui = kernel.open('module', 'wxMeerK40t', device=kernel)
kernel.boot()
device = None
if 'device' in kernel.instances:
for key, d in kernel.instances['device'].items():
device = d
break
else:
if args.no_gui:
device = kernel.open('device', 'Lhystudios', instance_name='1', uid=1)
device.boot()
pass
else:
devices = list(kernel.derivable())
device_entries = list()
for dev in devices:
try:
device_entries.append(int(dev))
except ValueError:
continue
if len(device_entries) == 0:
# There are no device entries in the kernel.
kernel.device_add('Lhystudios', 1)
kernel.device_boot()
for key, d in kernel.instances['device'].items():
device = d
break
if device is None:
# Set device to kernel and start the DeviceManager
device = kernel
kernel.open('window', "DeviceManager", None)
if args.verbose:
# Debug the device.
device.execute('Debug Device')
kernel.execute('Debug Device')
if args.input is not None:
# load the given filename.
import os
kernel.load(os.path.realpath(args.input.name))
if args.path is not None:
# Force the inclusion of the path.
from svgelements import Path
try:
path = Path(args.path)
path.stroke = Color('blue')
kernel.elements.add_elem(path)
except Exception:
print("SVG Path Exception to: %s" % ' '.join(sys.argv))
if args.transform:
# Transform any data loaded data
from svgelements import Matrix
m = Matrix(args.transform)
for e in kernel.elements.elems():
e *= m
try:
e.modified()
except AttributeError:
pass
if args.set is not None:
# Set the variables requested here.
for v in args.set:
attr = v[0]
value = v[1]
if hasattr(device, attr):
v = getattr(device, attr)
if isinstance(v, bool):
setattr(device, attr, bool(value))
elif isinstance(v, int):
setattr(device, attr, int(value))
elif isinstance(v, float):
setattr(device, attr, float(value))
elif isinstance(v, str):
setattr(device, attr, str(value))
if device is not kernel: # We can process this stuff only with a real device.
if args.grbl is not None:
# Start the GRBL server on the device.
device.setting(int, 'grbl_flip_x', 1)
device.setting(int, 'grbl_flip_y', 1)
device.setting(int, 'grbl_home_x', 0)
device.setting(int, 'grbl_home_y', 0)
if args.flip_y:
device.grbl_flip_x = -1
if args.flip_x:
device.grbl_flip_y = -1
if args.adjust_y is not None:
device.grbl_home_y = args.adjust_y
if args.adjust_x is not None:
device.grbl_home_x = args.adjust_x
device.using('module', 'Console').write('grblserver\n')
if args.ruida:
device.using('module', 'Console').write('ruidaserver\n')
if args.home:
device.using('module', 'Console').write('home\n')
device.setting(bool, 'quit', True)
device.quit = True
if args.auto:
# Automatically classify and start the job.
elements = kernel.elements
elements.classify(list(elements.elems()))
ops = list(elements.ops())
if args.speed is not None:
for o in ops:
o.speed = args.speed
device.spooler.jobs(ops)
device.setting(bool, 'quit', True)
device.quit = True
if args.execute:
egv_file = args.execute
device.setting(bool, 'quit', True)
device.quit = True
try:
device.using('module', 'Console').write('egv_import %s\n' % egv_file)
except FileNotFoundError:
pass
if args.origin:
def origin():
yield COMMAND_WAIT_FINISH
yield COMMAND_MODE_RAPID
yield COMMAND_SET_ABSOLUTE
yield COMMAND_MOVE, 0, 0
device.spooler.job(origin)
if args.mock:
# Set the device to mock.
device.setting(bool, 'mock', True)
device.mock = True
if args.output is not None:
import os
kernel.save(os.path.realpath(args.output.name))
if args.batch:
device.add_watcher('console', print)
console = device.using('module', 'Console')
with args.batch as batch:
for line in batch:
console.write(line.strip() + '\n')
device.remove_watcher('console', print)
if args.console:
console = device.using('module', 'Console')
device.add_watcher('console', print)
kernel.add_watcher('shutdown', print)
while True:
device_entries = input('>')
if device.state == STATE_TERMINATE:
break
if device_entries == 'quit':
break
console.write(device_entries + '\n')
device.remove_watcher('console', print)
if not args.no_gui:
if device.state != STATE_TERMINATE:
if 'device' in kernel.instances:
for key, device in kernel.instances['device'].items():
device.open('window', 'MeerK40t', None)
meerk40tgui.MainLoop() | true | true |
1c2d6b20355c1a024ee4a79bffc236f44ba72659 | 5,027 | py | Python | pytest_localserver/https.py | kianmeng/pytest-localserver | 387eb4a9e2b9a0e116685fd0ed2ace7dd710bb5b | [
"MIT"
] | null | null | null | pytest_localserver/https.py | kianmeng/pytest-localserver | 387eb4a9e2b9a0e116685fd0ed2ace7dd710bb5b | [
"MIT"
] | 2 | 2021-09-14T09:06:20.000Z | 2021-09-14T17:23:07.000Z | pytest_localserver/https.py | kianmeng/pytest-localserver | 387eb4a9e2b9a0e116685fd0ed2ace7dd710bb5b | [
"MIT"
] | null | null | null | # Copyright (C) 2010-2013 Sebastian Rahlf and others (see AUTHORS).
#
# This program is release under the MIT license. You can find the full text of
# the license in the LICENSE file.
import os.path
from pytest_localserver.http import ContentServer
#: default server certificate
DEFAULT_CERTIFICATE = os.path.join(
os.path.abspath(os.path.dirname(__file__)), 'server.pem')
class SecureContentServer (ContentServer):
"""
Small test server which works just like :class:`http.Server` over HTTP::
server = SecureContentServer(
port=8080, key='/srv/my.key', cert='my.certificate')
server.start()
print 'Test server running at %s' % server.url
server.serve_content(open('/path/to/some.file').read())
# any call to https://localhost:8080 will get the contents of
# /path/to/some.file as a response.
To avoid *ssl handshake failures* you can import the `pytest-localserver
CA`_ into your browser of choice.
How to create a self-signed certificate
---------------------------------------
If you want to create your own server certificate, you need `OpenSSL`_
installed on your machine. A self-signed certificate consists of a
certificate and a private key for your server. It can be created with
a command like this, using OpenSSL 1.1.1::
openssl req \
-x509 \
-newkey rsa:4096 \
-sha256 \
-days 3650 \
-nodes \
-keyout server.pem \
-out server.pem \
-subj "/CN=127.0.0.1/O=pytest-localserver/OU=Testing Dept." \
-addext "subjectAltName=DNS:localhost"
Note that both key and certificate are in a single file now named
``server.pem``.
How to create your own Certificate Authority
--------------------------------------------
Generate a server key and request for signing (csr). Make sure that the
common name (CN) is your IP address/domain name (e.g. ``localhost``). ::
openssl genpkey \
-algorithm RSA \
-pkeyopt rsa_keygen_bits:4096 \
-out server.key
openssl req \
-new \
-addext "subjectAltName=DNS:localhost" \
-key server.key \
-out server.csr
Generate your own CA. Make sure that this time the CN is *not* your IP
address/domain name (e.g. ``localhost CA``). ::
openssl genpkey \
-algorithm RSA \
-pkeyopt rsa_keygen_bits:4096 \
-aes256 \
-out ca.key
openssl req \
-new \
-x509 \
-key ca.key \
-out ca.crt
Sign the certificate signing request (csr) with the self-created CA that
you made earlier. Note that OpenSSL does not copy the subjectAltName field
from the request (csr), so you have to provide it again as a file. If you
issue subsequent certificates and your browser already knows about previous
ones simply increment the serial number. ::
echo "subjectAltName=DNS:localhost" >server-extensions.txt
openssl x509 -req -in server.csr -CA ca.crt -CAkey ca.key \
-set_serial 01 -extfile server-extensions.txt -out server.crt
Create a single file for both key and certificate::
cat server.key server.crt > server.pem
Now you only need to import ``ca.crt`` as a CA in your browser.
Want to know more?
------------------
This information was compiled from the following sources, which you might
find helpful if you want to dig deeper into `pyOpenSSH`_, certificates and
CAs:
- http://code.activestate.com/recipes/442473/
- http://www.tc.umn.edu/~brams006/selfsign.html
-
A more advanced tutorial can be found `here`_.
.. _pytest-localserver CA: https://raw.githubusercontent.com/pytest-dev/pytest-localserver/master/pytest_localserver/ca.crt
.. _pyOpenSSH: https://launchpad.net/pyopenssl
"""
def __init__(self, host='localhost', port=0,
key=DEFAULT_CERTIFICATE, cert=DEFAULT_CERTIFICATE):
"""
:param key: location of file containing the server private key.
:param cert: location of file containing server certificate.
"""
super(SecureContentServer, self).__init__(host, port, ssl_context=(key, cert))
if __name__ == '__main__': # pragma: no cover
import sys
import time
print('Using certificate %s.' % DEFAULT_CERTIFICATE)
server = SecureContentServer()
server.start()
server.logging = True
print('HTTPS server is running at %s' % server.url)
print('Type <Ctrl-C> to stop')
try:
path = sys.argv[1]
except IndexError:
path = os.path.join(
os.path.dirname(os.path.abspath(__file__)), '..', 'README.rst')
server.serve_content(open(path).read(), 302)
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
print('\rstopping...')
server.stop()
| 32.642857 | 127 | 0.623632 |
import os.path
from pytest_localserver.http import ContentServer
DEFAULT_CERTIFICATE = os.path.join(
os.path.abspath(os.path.dirname(__file__)), 'server.pem')
class SecureContentServer (ContentServer):
def __init__(self, host='localhost', port=0,
key=DEFAULT_CERTIFICATE, cert=DEFAULT_CERTIFICATE):
super(SecureContentServer, self).__init__(host, port, ssl_context=(key, cert))
if __name__ == '__main__':
import sys
import time
print('Using certificate %s.' % DEFAULT_CERTIFICATE)
server = SecureContentServer()
server.start()
server.logging = True
print('HTTPS server is running at %s' % server.url)
print('Type <Ctrl-C> to stop')
try:
path = sys.argv[1]
except IndexError:
path = os.path.join(
os.path.dirname(os.path.abspath(__file__)), '..', 'README.rst')
server.serve_content(open(path).read(), 302)
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
print('\rstopping...')
server.stop()
| true | true |
1c2d6b234b979e190d0b0b73d6e00747ee6c7c5a | 1,319 | py | Python | tests/empty_test.py | pkking/waaagh_system | 1587a2df863136bb2acb275427300c8e4fda977d | [
"Apache-2.0"
] | null | null | null | tests/empty_test.py | pkking/waaagh_system | 1587a2df863136bb2acb275427300c8e4fda977d | [
"Apache-2.0"
] | null | null | null | tests/empty_test.py | pkking/waaagh_system | 1587a2df863136bb2acb275427300c8e4fda977d | [
"Apache-2.0"
] | null | null | null | import pytest
import json
@pytest.mark.usefixtures("db")
class TestEmptyResources:
def test_empty_post(self):
# all the resources should return 404
res = self.app.get('/posts')
assert res.status_code == 404
assert 'not any Post exist' in json.loads(res.data.decode('utf-8'))['message']
res = self.app.get('/posts/1')
assert res.status_code == 404
assert "Post id 1 doesn't exist" in json.loads(res.data.decode('utf-8'))['message']
def test_empty_persion(self):
# all the resources should return 404
res = self.app.get('/persions')
assert res.status_code == 404
assert 'not any Persion exist' in json.loads(res.data.decode('utf-8'))['message']
res = self.app.get('/persions/1')
assert res.status_code == 404
assert "Persion id 1 doesn't exist" in json.loads(res.data.decode('utf-8'))['message']
def test_empty_tag(self):
# all the resources should return 404
res = self.app.get('/tags')
assert res.status_code == 404
assert 'not any Tag exist' in json.loads(res.data.decode('utf-8'))['message']
res = self.app.get('/tags/1')
assert res.status_code == 404
assert "Tag id 1 doesn't exist" in json.loads(res.data.decode('utf-8'))['message'] | 42.548387 | 94 | 0.627748 | import pytest
import json
@pytest.mark.usefixtures("db")
class TestEmptyResources:
def test_empty_post(self):
res = self.app.get('/posts')
assert res.status_code == 404
assert 'not any Post exist' in json.loads(res.data.decode('utf-8'))['message']
res = self.app.get('/posts/1')
assert res.status_code == 404
assert "Post id 1 doesn't exist" in json.loads(res.data.decode('utf-8'))['message']
def test_empty_persion(self):
# all the resources should return 404
res = self.app.get('/persions')
assert res.status_code == 404
assert 'not any Persion exist' in json.loads(res.data.decode('utf-8'))['message']
res = self.app.get('/persions/1')
assert res.status_code == 404
assert "Persion id 1 doesn't exist" in json.loads(res.data.decode('utf-8'))['message']
def test_empty_tag(self):
res = self.app.get('/tags')
assert res.status_code == 404
assert 'not any Tag exist' in json.loads(res.data.decode('utf-8'))['message']
res = self.app.get('/tags/1')
assert res.status_code == 404
assert "Tag id 1 doesn't exist" in json.loads(res.data.decode('utf-8'))['message'] | true | true |
1c2d6b8f79b45992ad7532202bda3be14726f0b1 | 3,588 | py | Python | ic_engine.py | cattech-lab/cantera_examples | 13e969b2a767bed5bb0d79adf86187cd9fbd090d | [
"MIT"
] | 4 | 2020-11-27T03:11:07.000Z | 2022-02-17T17:09:23.000Z | ic_engine.py | Ohlagrange/cantera_examples | 13e969b2a767bed5bb0d79adf86187cd9fbd090d | [
"MIT"
] | null | null | null | ic_engine.py | Ohlagrange/cantera_examples | 13e969b2a767bed5bb0d79adf86187cd9fbd090d | [
"MIT"
] | 3 | 2020-11-27T03:11:13.000Z | 2022-01-07T13:39:07.000Z | """
Simulation of a internal combustion engine.
"""
import cantera as ct
import numpy as np
import csv
#------------------------------------------------------
# Input Parameters
rpm = 600.0 # engine speed [rpm]
bore = 82.55 # bore diameter [mm]
stroke = 114.3 # stroke [mm]
cratio = 10.0 # compression ratio [-]
conrod = 200.0 # connecting rod [mm]
# initial temperature, pressure, and equivalence ratio
T_ini = 350.0 # [K]
p_ini = 1.0e5 # [Pa]
phi = 0.33
# outer temperature, pressure, and composition
T_out = 300.0 # [K]
p_out = 1.0e5 # [Pa]
c_out = 'O2:1.0, N2:3.76'
# Reaction mechanism name
reaction_mechanism = 'reduced_247.cti'
# Simulation time
ca_start = -144.0 # start CA [deg]
ca_end = 180.0 # end CA [deg]
ca_step = 0.01 # step CA [deg]
ca_out = 0.2 # output CA [deg]
#------------------------------------------------------
# load reaction mechanism
gas = ct.Solution(reaction_mechanism)
# define initial state
gas.TP = T_ini, p_ini
gas.set_equivalence_ratio(phi, 'NC7H16', 'O2:1.0, N2:3.76')
r = ct.IdealGasReactor(gas)
sim = ct.ReactorNet([r])
gas.TPX = T_out, p_out, c_out
outer = ct.Reservoir(gas)
# convert time to crank angle [rad]
rps = rpm / 60.0
def crank_angle(t):
return 2.0 * np.pi * rps * t + ca_start * np.pi / 180.0
# set up IC engine parameters
stroke *= 0.001
bore *= 0.001
conrod *= 0.001
area = 0.25 * np.pi * bore * bore
vol_h = stroke * area # volume cylinder
vol_c = vol_h / (cratio - 1.0) # volume combustion dome
ca = crank_angle(0.0) # initial CA
r_ca = stroke * 0.5 # crank radius
vol_ini= (r_ca + conrod - (r_ca * np.cos(ca) + np.sqrt(conrod**2 - r_ca**2 * np.sin(ca)**2))) * area + vol_c
r.volume = vol_ini # initial volume
# set up piston
piston = ct.Wall(outer, r)
piston.area = area # piston area
def piston_speed(t):
ca = crank_angle(t)
return -2.0 * np.pi * rps * (r_ca * np.sin(ca) + r_ca**2 * np.sin(2.0 * ca) / 2.0 / np.sqrt(conrod**2 - r_ca**2 * np.sin(ca)**2))
piston.set_velocity(piston_speed) # piston speed
# set up time
t_sim = (ca_end - ca_start) / rps / 360.0 # simulation time
t_step = ca_step / rps / 360.0 # simulation time step
t_out = ca_out / rps / 360.0 # simulation output time
ttt = 0.0
# set up output data arrays
states = ct.SolutionArray(r.thermo)
t = []
heat_release_rate = []
# output file
outfile = open('ic_engine.csv', 'w', newline="")
csvfile = csv.writer(outfile)
csvfile.writerow(['ca[deg]','P[bar]','T[K]','HR[J/deg]'])
# do simulation
for t_i in np.arange(0, t_sim, t_step):
sim.advance(t_i)
# write output data
if t_i >= ttt:
ca = crank_angle(t_i) * 180.0 / np.pi
t.append(ca)
states.append(r.thermo.state)
hr = -r.volume * ct.gas_constant * r.T * np.sum(gas.standard_enthalpies_RT * r.thermo.net_production_rates, 0)
hr = hr * t_step / ca_step
heat_release_rate.append(hr)
csvfile.writerow([ca, r.thermo.P / 1.0e5, r.T, hr])
ttt += t_out
outfile.close()
#------------------------------------------------------
# Plot Results in matplotlib
import matplotlib.pyplot as plt
# pressure
fig1 = plt.figure()
ax1 = fig1.add_subplot()
ax1.plot(t, states.P / 1.0e5)
ax1.set_ylabel('P [bar]')
ax1.set_xlabel('CA [deg]')
ax1.set_xlim(-60, 60)
# temperature
fig2 = plt.figure()
ax2 = fig2.add_subplot()
ax2.plot(t, states.T)
ax2.set_ylabel('T [K]')
ax2.set_xlabel('CA [deg]')
ax2.set_xlim(-60, 60)
# heat release rate
fig3 = plt.figure()
ax3 = fig3.add_subplot()
ax3.plot(t, heat_release_rate)
ax3.set_ylabel('Heat release rate [J/deg]')
ax3.set_xlabel('CA [deg]')
ax3.set_xlim(-60, 60)
plt.show()
| 26 | 133 | 0.628763 |
import cantera as ct
import numpy as np
import csv
rpm = 600.0
bore = 82.55
stroke = 114.3
cratio = 10.0
conrod = 200.0
T_ini = 350.0
p_ini = 1.0e5
phi = 0.33
T_out = 300.0
p_out = 1.0e5
c_out = 'O2:1.0, N2:3.76'
reaction_mechanism = 'reduced_247.cti'
ca_start = -144.0
ca_end = 180.0
ca_step = 0.01
ca_out = 0.2
gas = ct.Solution(reaction_mechanism)
gas.TP = T_ini, p_ini
gas.set_equivalence_ratio(phi, 'NC7H16', 'O2:1.0, N2:3.76')
r = ct.IdealGasReactor(gas)
sim = ct.ReactorNet([r])
gas.TPX = T_out, p_out, c_out
outer = ct.Reservoir(gas)
rps = rpm / 60.0
def crank_angle(t):
return 2.0 * np.pi * rps * t + ca_start * np.pi / 180.0
stroke *= 0.001
bore *= 0.001
conrod *= 0.001
area = 0.25 * np.pi * bore * bore
vol_h = stroke * area
vol_c = vol_h / (cratio - 1.0)
ca = crank_angle(0.0)
r_ca = stroke * 0.5
vol_ini= (r_ca + conrod - (r_ca * np.cos(ca) + np.sqrt(conrod**2 - r_ca**2 * np.sin(ca)**2))) * area + vol_c
r.volume = vol_ini
piston = ct.Wall(outer, r)
piston.area = area
def piston_speed(t):
ca = crank_angle(t)
return -2.0 * np.pi * rps * (r_ca * np.sin(ca) + r_ca**2 * np.sin(2.0 * ca) / 2.0 / np.sqrt(conrod**2 - r_ca**2 * np.sin(ca)**2))
piston.set_velocity(piston_speed)
t_sim = (ca_end - ca_start) / rps / 360.0
t_step = ca_step / rps / 360.0
t_out = ca_out / rps / 360.0
ttt = 0.0
states = ct.SolutionArray(r.thermo)
t = []
heat_release_rate = []
outfile = open('ic_engine.csv', 'w', newline="")
csvfile = csv.writer(outfile)
csvfile.writerow(['ca[deg]','P[bar]','T[K]','HR[J/deg]'])
for t_i in np.arange(0, t_sim, t_step):
sim.advance(t_i)
if t_i >= ttt:
ca = crank_angle(t_i) * 180.0 / np.pi
t.append(ca)
states.append(r.thermo.state)
hr = -r.volume * ct.gas_constant * r.T * np.sum(gas.standard_enthalpies_RT * r.thermo.net_production_rates, 0)
hr = hr * t_step / ca_step
heat_release_rate.append(hr)
csvfile.writerow([ca, r.thermo.P / 1.0e5, r.T, hr])
ttt += t_out
outfile.close()
import matplotlib.pyplot as plt
fig1 = plt.figure()
ax1 = fig1.add_subplot()
ax1.plot(t, states.P / 1.0e5)
ax1.set_ylabel('P [bar]')
ax1.set_xlabel('CA [deg]')
ax1.set_xlim(-60, 60)
fig2 = plt.figure()
ax2 = fig2.add_subplot()
ax2.plot(t, states.T)
ax2.set_ylabel('T [K]')
ax2.set_xlabel('CA [deg]')
ax2.set_xlim(-60, 60)
fig3 = plt.figure()
ax3 = fig3.add_subplot()
ax3.plot(t, heat_release_rate)
ax3.set_ylabel('Heat release rate [J/deg]')
ax3.set_xlabel('CA [deg]')
ax3.set_xlim(-60, 60)
plt.show()
| true | true |
1c2d6bee2051e16a5c40b582fca8a67a0f1ad392 | 427 | py | Python | env/Lib/site-packages/plotly/validators/funnel/marker/colorbar/title/_text.py | andresgreen-byte/Laboratorio-1--Inversion-de-Capital | 8a4707301d19c3826c31026c4077930bcd6a8182 | [
"MIT"
] | 11,750 | 2015-10-12T07:03:39.000Z | 2022-03-31T20:43:15.000Z | venv/Lib/site-packages/plotly/validators/funnel/marker/colorbar/title/_text.py | wakisalvador/constructed-misdirection | 74779e9ec640a11bc08d5d1967c85ac4fa44ea5e | [
"Unlicense"
] | 2,951 | 2015-10-12T00:41:25.000Z | 2022-03-31T22:19:26.000Z | venv/Lib/site-packages/plotly/validators/funnel/marker/colorbar/title/_text.py | wakisalvador/constructed-misdirection | 74779e9ec640a11bc08d5d1967c85ac4fa44ea5e | [
"Unlicense"
] | 2,623 | 2015-10-15T14:40:27.000Z | 2022-03-28T16:05:50.000Z | import _plotly_utils.basevalidators
class TextValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(
self, plotly_name="text", parent_name="funnel.marker.colorbar.title", **kwargs
):
super(TextValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
**kwargs
)
| 30.5 | 86 | 0.65808 | import _plotly_utils.basevalidators
class TextValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(
self, plotly_name="text", parent_name="funnel.marker.colorbar.title", **kwargs
):
super(TextValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
**kwargs
)
| true | true |
1c2d6ce1b5a4e74a3fcafcdee6cf905f17b70757 | 10,336 | py | Python | core/platform/email/mailgun_email_services_test.py | anubhavsinha98/oppia | 9a64ea2e91d2f471ce22bd39da77b43dccd5b51f | [
"Apache-2.0"
] | 1 | 2019-08-31T17:06:41.000Z | 2019-08-31T17:06:41.000Z | core/platform/email/mailgun_email_services_test.py | anubhavsinha98/oppia | 9a64ea2e91d2f471ce22bd39da77b43dccd5b51f | [
"Apache-2.0"
] | null | null | null | core/platform/email/mailgun_email_services_test.py | anubhavsinha98/oppia | 9a64ea2e91d2f471ce22bd39da77b43dccd5b51f | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the Mailgun API wrapper."""
from __future__ import absolute_import # pylint: disable=import-only-modules
from core.platform.email import mailgun_email_services
from core.tests import test_utils
import feconf
import python_utils
class EmailTests(test_utils.GenericTestBase):
"""Tests for sending emails."""
def test_post_to_mailgun(self):
"""Test for sending HTTP POST request."""
swapped_urlopen = lambda x: x
swapped_request = lambda *args: args
swap_urlopen_context = self.swap(
python_utils, 'url_open', swapped_urlopen)
swap_request_context = self.swap(
python_utils, 'url_request', swapped_request)
swap_api = self.swap(feconf, 'MAILGUN_API_KEY', 'key')
swap_domain = self.swap(feconf, 'MAILGUN_DOMAIN_NAME', 'domain')
with swap_urlopen_context, swap_request_context, swap_api, swap_domain:
result = mailgun_email_services.post_to_mailgun({'data': 'data'})
expected = (
'https://api.mailgun.net/v3/domain/messages', 'data=data',
{'Authorization': 'Basic YXBpOmtleQ=='})
self.assertEqual(result, expected)
def test_send_mail_raises_exception_for_missing_api_key(self):
"""Tests the missing Mailgun API key exception."""
mailgun_api_exception = (
self.assertRaisesRegexp(
Exception, 'Mailgun API key is not available.'))
allow_emailing = self.swap(feconf, 'CAN_SEND_EMAILS', True)
with mailgun_api_exception, allow_emailing:
mailgun_email_services.send_mail(
feconf.SYSTEM_EMAIL_ADDRESS, feconf.ADMIN_EMAIL_ADDRESS,
'subject', 'body', 'html', bcc_admin=False)
def test_send_mail_raises_exception_for_missing_domain_name(self):
"""Tests the missing Mailgun domain name exception."""
mailgun_api = self.swap(feconf, 'MAILGUN_API_KEY', 'api')
mailgun_domain_name_exception = (
self.assertRaisesRegexp(
Exception, 'Mailgun domain name is not set.'))
allow_emailing = self.swap(feconf, 'CAN_SEND_EMAILS', True)
with mailgun_api, mailgun_domain_name_exception, allow_emailing:
mailgun_email_services.send_mail(
feconf.SYSTEM_EMAIL_ADDRESS, feconf.ADMIN_EMAIL_ADDRESS,
'subject', 'body', 'html', bcc_admin=False)
def test_send_mail_raises_exception_for_invalid_permissions(self):
"""Tests the send_mail exception raised for invalid user permissions."""
send_email_exception = (
self.assertRaisesRegexp(
Exception, 'This app cannot send emails to users.'))
mailgun_api = self.swap(feconf, 'MAILGUN_API_KEY', 'api')
mailgun_domain = self.swap(feconf, 'MAILGUN_DOMAIN_NAME', 'domain')
with mailgun_api, mailgun_domain, send_email_exception:
mailgun_email_services.send_mail(
feconf.SYSTEM_EMAIL_ADDRESS, feconf.ADMIN_EMAIL_ADDRESS,
'subject', 'body', 'html', bcc_admin=False)
def test_send_mail_data_properly_sent(self):
"""Verifies that the data sent in send_mail is correct."""
mailgun_api = self.swap(feconf, 'MAILGUN_API_KEY', 'api')
mailgun_domain = self.swap(feconf, 'MAILGUN_DOMAIN_NAME', 'domain')
allow_emailing = self.swap(feconf, 'CAN_SEND_EMAILS', True)
# Data we expect to have been sent in the
# mailgun_email_services.post_to_mailgun().
expected = {'from': feconf.SYSTEM_EMAIL_ADDRESS,
'to': feconf.ADMIN_EMAIL_ADDRESS,
'subject': 'subject',
'text': 'body',
'html': 'html'}
# Lambda function, will replace post_to_mailgun().
req_post_lambda = (lambda data=None:
self.assertDictContainsSubset(expected, data))
post_request = self.swap(
mailgun_email_services, 'post_to_mailgun', req_post_lambda)
with mailgun_api, mailgun_domain, post_request, allow_emailing:
mailgun_email_services.send_mail(
feconf.SYSTEM_EMAIL_ADDRESS, feconf.ADMIN_EMAIL_ADDRESS,
'subject', 'body', 'html', bcc_admin=False)
def test_bcc_admin_flag(self):
"""Verifies that the bcc admin flag is working properly in send_mail.
Note that we replace the mailgun_email_services.post_to_mailgun()
function in send_mail with an alternate lambda that asserts the correct
values were placed in the data dictionary that is then passed to the
mailgun api.
"""
mailgun_api = self.swap(feconf, 'MAILGUN_API_KEY', 'api')
mailgun_domain = self.swap(feconf, 'MAILGUN_DOMAIN_NAME', 'domain')
allow_emailing = self.swap(feconf, 'CAN_SEND_EMAILS', True)
# Lambda function, will replace post_to_mailgun().
req_post_lambda = (lambda data=None:
self.assertEqual(
data['bcc'], feconf.ADMIN_EMAIL_ADDRESS))
post_request = self.swap(
mailgun_email_services, 'post_to_mailgun', req_post_lambda)
with mailgun_api, mailgun_domain, post_request, allow_emailing:
mailgun_email_services.send_mail(
feconf.SYSTEM_EMAIL_ADDRESS, feconf.ADMIN_EMAIL_ADDRESS,
'subject', 'body', 'html', bcc_admin=True)
def test_reply_to_id_flag(self):
"""Verifies that the reply_to_id flag is working properly."""
mailgun_api = self.swap(feconf, 'MAILGUN_API_KEY', 'api')
mailgun_domain = self.swap(feconf, 'MAILGUN_DOMAIN_NAME', 'domain')
allow_emailing = self.swap(feconf, 'CAN_SEND_EMAILS', True)
reply_id = 123
# Lambda function, will replace post_to_mailgun().
req_post_lambda = (
lambda data=None:
self.assertEqual(
data['h:Reply-To'],
'reply+' + python_utils.STR(reply_id) + '@' +
feconf.INCOMING_EMAILS_DOMAIN_NAME))
post_request = self.swap(
mailgun_email_services, 'post_to_mailgun', req_post_lambda)
with mailgun_api, mailgun_domain, post_request, allow_emailing:
mailgun_email_services.send_mail(
feconf.SYSTEM_EMAIL_ADDRESS, feconf.ADMIN_EMAIL_ADDRESS,
'subject', 'body', 'html',
bcc_admin=False, reply_to_id=reply_id)
def test_send_bulk_mail_raises_exception_for_missing_api_key(self):
"""Test that send_bulk_mail raises exception for missing
mailgun api key.
"""
mailgun_api_exception = (
self.assertRaisesRegexp(
Exception, 'Mailgun API key is not available.'))
allow_emailing = self.swap(feconf, 'CAN_SEND_EMAILS', True)
with mailgun_api_exception, allow_emailing:
mailgun_email_services.send_bulk_mail(
feconf.SYSTEM_EMAIL_ADDRESS, [feconf.ADMIN_EMAIL_ADDRESS],
'subject', 'body', 'html')
def test_send_bulk_mail_raises_exception_for_missing_domain_name(self):
"""Tests the missing Mailgun domain name exception for
send_bulk_mail.
"""
mailgun_api = self.swap(feconf, 'MAILGUN_API_KEY', 'api')
allow_emailing = self.swap(feconf, 'CAN_SEND_EMAILS', True)
mailgun_domain_name_exception = (
self.assertRaisesRegexp(
Exception, 'Mailgun domain name is not set.'))
with mailgun_api, mailgun_domain_name_exception, allow_emailing:
mailgun_email_services.send_bulk_mail(
feconf.SYSTEM_EMAIL_ADDRESS, [feconf.ADMIN_EMAIL_ADDRESS],
'subject', 'body', 'html')
def test_send_bulk_mail_exception_for_invalid_permissions(self):
"""Tests the send_bulk_mail exception raised for invalid user
permissions.
"""
send_email_exception = (
self.assertRaisesRegexp(
Exception, 'This app cannot send emails to users.'))
mailgun_api = self.swap(feconf, 'MAILGUN_API_KEY', 'api')
mailgun_domain = self.swap(feconf, 'MAILGUN_DOMAIN_NAME', 'domain')
with mailgun_api, mailgun_domain, send_email_exception:
mailgun_email_services.send_bulk_mail(
feconf.SYSTEM_EMAIL_ADDRESS, [feconf.ADMIN_EMAIL_ADDRESS],
'subject', 'body', 'html')
def test_send_bulk_mail_data_properly_sent(self):
"""Verifies that the data sent in send_bulk_mail is correct
for each user in the recipient list.
"""
mailgun_api = self.swap(feconf, 'MAILGUN_API_KEY', 'api')
mailgun_domain = self.swap(feconf, 'MAILGUN_DOMAIN_NAME', 'domain')
allow_emailing = self.swap(feconf, 'CAN_SEND_EMAILS', True)
recipients = [feconf.ADMIN_EMAIL_ADDRESS]
# Data that we expect to have been sent in the post_to_mailgun().
expected = ({'from': feconf.SYSTEM_EMAIL_ADDRESS, 'to': recipients,
'subject': 'subject', 'text': 'body', 'html': 'html',
'recipient-variables': '{}'})
# Lambda function, will replace post_to_mailgun().
req_post_lambda = (lambda data=None:
self.assertDictContainsSubset(expected, data))
post_request = self.swap(
mailgun_email_services, 'post_to_mailgun', req_post_lambda)
with mailgun_api, mailgun_domain, post_request, allow_emailing:
mailgun_email_services.send_bulk_mail(
feconf.SYSTEM_EMAIL_ADDRESS, recipients,
'subject', 'body', 'html')
| 47.196347 | 80 | 0.654605 |
from __future__ import absolute_import
from core.platform.email import mailgun_email_services
from core.tests import test_utils
import feconf
import python_utils
class EmailTests(test_utils.GenericTestBase):
def test_post_to_mailgun(self):
swapped_urlopen = lambda x: x
swapped_request = lambda *args: args
swap_urlopen_context = self.swap(
python_utils, 'url_open', swapped_urlopen)
swap_request_context = self.swap(
python_utils, 'url_request', swapped_request)
swap_api = self.swap(feconf, 'MAILGUN_API_KEY', 'key')
swap_domain = self.swap(feconf, 'MAILGUN_DOMAIN_NAME', 'domain')
with swap_urlopen_context, swap_request_context, swap_api, swap_domain:
result = mailgun_email_services.post_to_mailgun({'data': 'data'})
expected = (
'https://api.mailgun.net/v3/domain/messages', 'data=data',
{'Authorization': 'Basic YXBpOmtleQ=='})
self.assertEqual(result, expected)
def test_send_mail_raises_exception_for_missing_api_key(self):
mailgun_api_exception = (
self.assertRaisesRegexp(
Exception, 'Mailgun API key is not available.'))
allow_emailing = self.swap(feconf, 'CAN_SEND_EMAILS', True)
with mailgun_api_exception, allow_emailing:
mailgun_email_services.send_mail(
feconf.SYSTEM_EMAIL_ADDRESS, feconf.ADMIN_EMAIL_ADDRESS,
'subject', 'body', 'html', bcc_admin=False)
def test_send_mail_raises_exception_for_missing_domain_name(self):
mailgun_api = self.swap(feconf, 'MAILGUN_API_KEY', 'api')
mailgun_domain_name_exception = (
self.assertRaisesRegexp(
Exception, 'Mailgun domain name is not set.'))
allow_emailing = self.swap(feconf, 'CAN_SEND_EMAILS', True)
with mailgun_api, mailgun_domain_name_exception, allow_emailing:
mailgun_email_services.send_mail(
feconf.SYSTEM_EMAIL_ADDRESS, feconf.ADMIN_EMAIL_ADDRESS,
'subject', 'body', 'html', bcc_admin=False)
def test_send_mail_raises_exception_for_invalid_permissions(self):
send_email_exception = (
self.assertRaisesRegexp(
Exception, 'This app cannot send emails to users.'))
mailgun_api = self.swap(feconf, 'MAILGUN_API_KEY', 'api')
mailgun_domain = self.swap(feconf, 'MAILGUN_DOMAIN_NAME', 'domain')
with mailgun_api, mailgun_domain, send_email_exception:
mailgun_email_services.send_mail(
feconf.SYSTEM_EMAIL_ADDRESS, feconf.ADMIN_EMAIL_ADDRESS,
'subject', 'body', 'html', bcc_admin=False)
def test_send_mail_data_properly_sent(self):
mailgun_api = self.swap(feconf, 'MAILGUN_API_KEY', 'api')
mailgun_domain = self.swap(feconf, 'MAILGUN_DOMAIN_NAME', 'domain')
allow_emailing = self.swap(feconf, 'CAN_SEND_EMAILS', True)
expected = {'from': feconf.SYSTEM_EMAIL_ADDRESS,
'to': feconf.ADMIN_EMAIL_ADDRESS,
'subject': 'subject',
'text': 'body',
'html': 'html'}
req_post_lambda = (lambda data=None:
self.assertDictContainsSubset(expected, data))
post_request = self.swap(
mailgun_email_services, 'post_to_mailgun', req_post_lambda)
with mailgun_api, mailgun_domain, post_request, allow_emailing:
mailgun_email_services.send_mail(
feconf.SYSTEM_EMAIL_ADDRESS, feconf.ADMIN_EMAIL_ADDRESS,
'subject', 'body', 'html', bcc_admin=False)
def test_bcc_admin_flag(self):
mailgun_api = self.swap(feconf, 'MAILGUN_API_KEY', 'api')
mailgun_domain = self.swap(feconf, 'MAILGUN_DOMAIN_NAME', 'domain')
allow_emailing = self.swap(feconf, 'CAN_SEND_EMAILS', True)
req_post_lambda = (lambda data=None:
self.assertEqual(
data['bcc'], feconf.ADMIN_EMAIL_ADDRESS))
post_request = self.swap(
mailgun_email_services, 'post_to_mailgun', req_post_lambda)
with mailgun_api, mailgun_domain, post_request, allow_emailing:
mailgun_email_services.send_mail(
feconf.SYSTEM_EMAIL_ADDRESS, feconf.ADMIN_EMAIL_ADDRESS,
'subject', 'body', 'html', bcc_admin=True)
def test_reply_to_id_flag(self):
mailgun_api = self.swap(feconf, 'MAILGUN_API_KEY', 'api')
mailgun_domain = self.swap(feconf, 'MAILGUN_DOMAIN_NAME', 'domain')
allow_emailing = self.swap(feconf, 'CAN_SEND_EMAILS', True)
reply_id = 123
req_post_lambda = (
lambda data=None:
self.assertEqual(
data['h:Reply-To'],
'reply+' + python_utils.STR(reply_id) + '@' +
feconf.INCOMING_EMAILS_DOMAIN_NAME))
post_request = self.swap(
mailgun_email_services, 'post_to_mailgun', req_post_lambda)
with mailgun_api, mailgun_domain, post_request, allow_emailing:
mailgun_email_services.send_mail(
feconf.SYSTEM_EMAIL_ADDRESS, feconf.ADMIN_EMAIL_ADDRESS,
'subject', 'body', 'html',
bcc_admin=False, reply_to_id=reply_id)
def test_send_bulk_mail_raises_exception_for_missing_api_key(self):
mailgun_api_exception = (
self.assertRaisesRegexp(
Exception, 'Mailgun API key is not available.'))
allow_emailing = self.swap(feconf, 'CAN_SEND_EMAILS', True)
with mailgun_api_exception, allow_emailing:
mailgun_email_services.send_bulk_mail(
feconf.SYSTEM_EMAIL_ADDRESS, [feconf.ADMIN_EMAIL_ADDRESS],
'subject', 'body', 'html')
def test_send_bulk_mail_raises_exception_for_missing_domain_name(self):
mailgun_api = self.swap(feconf, 'MAILGUN_API_KEY', 'api')
allow_emailing = self.swap(feconf, 'CAN_SEND_EMAILS', True)
mailgun_domain_name_exception = (
self.assertRaisesRegexp(
Exception, 'Mailgun domain name is not set.'))
with mailgun_api, mailgun_domain_name_exception, allow_emailing:
mailgun_email_services.send_bulk_mail(
feconf.SYSTEM_EMAIL_ADDRESS, [feconf.ADMIN_EMAIL_ADDRESS],
'subject', 'body', 'html')
def test_send_bulk_mail_exception_for_invalid_permissions(self):
send_email_exception = (
self.assertRaisesRegexp(
Exception, 'This app cannot send emails to users.'))
mailgun_api = self.swap(feconf, 'MAILGUN_API_KEY', 'api')
mailgun_domain = self.swap(feconf, 'MAILGUN_DOMAIN_NAME', 'domain')
with mailgun_api, mailgun_domain, send_email_exception:
mailgun_email_services.send_bulk_mail(
feconf.SYSTEM_EMAIL_ADDRESS, [feconf.ADMIN_EMAIL_ADDRESS],
'subject', 'body', 'html')
def test_send_bulk_mail_data_properly_sent(self):
mailgun_api = self.swap(feconf, 'MAILGUN_API_KEY', 'api')
mailgun_domain = self.swap(feconf, 'MAILGUN_DOMAIN_NAME', 'domain')
allow_emailing = self.swap(feconf, 'CAN_SEND_EMAILS', True)
recipients = [feconf.ADMIN_EMAIL_ADDRESS]
expected = ({'from': feconf.SYSTEM_EMAIL_ADDRESS, 'to': recipients,
'subject': 'subject', 'text': 'body', 'html': 'html',
'recipient-variables': '{}'})
req_post_lambda = (lambda data=None:
self.assertDictContainsSubset(expected, data))
post_request = self.swap(
mailgun_email_services, 'post_to_mailgun', req_post_lambda)
with mailgun_api, mailgun_domain, post_request, allow_emailing:
mailgun_email_services.send_bulk_mail(
feconf.SYSTEM_EMAIL_ADDRESS, recipients,
'subject', 'body', 'html')
| true | true |
1c2d6d5e6c094413dfde7eaea42872c1563a08c2 | 2,332 | py | Python | openmdao/core/tests/test_reconf_parallel_group.py | onodip/OpenMDAO | 96a99806fb3a547b881d2ad3da2733bca9978567 | [
"Apache-2.0"
] | null | null | null | openmdao/core/tests/test_reconf_parallel_group.py | onodip/OpenMDAO | 96a99806fb3a547b881d2ad3da2733bca9978567 | [
"Apache-2.0"
] | null | null | null | openmdao/core/tests/test_reconf_parallel_group.py | onodip/OpenMDAO | 96a99806fb3a547b881d2ad3da2733bca9978567 | [
"Apache-2.0"
] | null | null | null | from __future__ import division
import numpy as np
import unittest
from openmdao.api import Problem, Group, IndepVarComp, ExplicitComponent, ExecComp
from openmdao.api import NewtonSolver, PETScKrylov, NonlinearBlockGS, LinearBlockGS
from openmdao.utils.assert_utils import assert_rel_error
try:
from openmdao.parallel_api import PETScVector
except ImportError:
PETScVector = None
class ReconfGroup(Group):
def __init__(self):
super(ReconfGroup, self).__init__()
self.parallel = True
def setup(self):
self._mpi_proc_allocator.parallel = self.parallel
if self.parallel:
self.nonlinear_solver = NewtonSolver()
self.linear_solver = PETScKrylov()
else:
self.nonlinear_solver = NonlinearBlockGS()
self.linear_solver = LinearBlockGS()
self.add_subsystem('C1', ExecComp('z = 1 / 3. * y + x0'), promotes=['x0'])
self.add_subsystem('C2', ExecComp('z = 1 / 4. * y + x1'), promotes=['x1'])
self.connect('C1.z', 'C2.y')
self.connect('C2.z', 'C1.y')
self.parallel = not self.parallel
@unittest.skipUnless(PETScVector, "PETSc is required.")
class Test(unittest.TestCase):
N_PROCS = 2
def test(self):
prob = Problem(model=Group())
prob.model.add_subsystem('Cx0', IndepVarComp('x0'), promotes=['x0'])
prob.model.add_subsystem('Cx1', IndepVarComp('x1'), promotes=['x1'])
prob.model.add_subsystem('g', ReconfGroup(), promotes=['*'])
prob.setup(check=False)
# First, run with full setup, so ReconfGroup should be a parallel group
prob['x0'] = 6.
prob['x1'] = 4.
prob.run_model()
if prob.comm.rank == 0:
assert_rel_error(self, prob['C1.z'], 8.0)
print(prob['C1.z'])
elif prob.comm.rank == 1:
assert_rel_error(self, prob['C2.z'], 6.0)
print(prob['C2.z'])
# Now, reconfigure so ReconfGroup is not parallel, and x0, x1 should be preserved
prob.model.g.resetup('reconf')
prob.model.resetup('update')
prob.run_model()
assert_rel_error(self, prob['C1.z'], 8.0, 1e-8)
assert_rel_error(self, prob['C2.z'], 6.0, 1e-8)
print(prob['C1.z'], prob['C2.z'])
if __name__ == '__main__':
unittest.main()
| 31.513514 | 89 | 0.62307 | from __future__ import division
import numpy as np
import unittest
from openmdao.api import Problem, Group, IndepVarComp, ExplicitComponent, ExecComp
from openmdao.api import NewtonSolver, PETScKrylov, NonlinearBlockGS, LinearBlockGS
from openmdao.utils.assert_utils import assert_rel_error
try:
from openmdao.parallel_api import PETScVector
except ImportError:
PETScVector = None
class ReconfGroup(Group):
def __init__(self):
super(ReconfGroup, self).__init__()
self.parallel = True
def setup(self):
self._mpi_proc_allocator.parallel = self.parallel
if self.parallel:
self.nonlinear_solver = NewtonSolver()
self.linear_solver = PETScKrylov()
else:
self.nonlinear_solver = NonlinearBlockGS()
self.linear_solver = LinearBlockGS()
self.add_subsystem('C1', ExecComp('z = 1 / 3. * y + x0'), promotes=['x0'])
self.add_subsystem('C2', ExecComp('z = 1 / 4. * y + x1'), promotes=['x1'])
self.connect('C1.z', 'C2.y')
self.connect('C2.z', 'C1.y')
self.parallel = not self.parallel
@unittest.skipUnless(PETScVector, "PETSc is required.")
class Test(unittest.TestCase):
N_PROCS = 2
def test(self):
prob = Problem(model=Group())
prob.model.add_subsystem('Cx0', IndepVarComp('x0'), promotes=['x0'])
prob.model.add_subsystem('Cx1', IndepVarComp('x1'), promotes=['x1'])
prob.model.add_subsystem('g', ReconfGroup(), promotes=['*'])
prob.setup(check=False)
prob['x0'] = 6.
prob['x1'] = 4.
prob.run_model()
if prob.comm.rank == 0:
assert_rel_error(self, prob['C1.z'], 8.0)
print(prob['C1.z'])
elif prob.comm.rank == 1:
assert_rel_error(self, prob['C2.z'], 6.0)
print(prob['C2.z'])
prob.model.g.resetup('reconf')
prob.model.resetup('update')
prob.run_model()
assert_rel_error(self, prob['C1.z'], 8.0, 1e-8)
assert_rel_error(self, prob['C2.z'], 6.0, 1e-8)
print(prob['C1.z'], prob['C2.z'])
if __name__ == '__main__':
unittest.main()
| true | true |
1c2d6d80e0bf81021a0b116517fff0995fc983a1 | 77,921 | py | Python | src/quart/app.py | Dunkledore/quart | 803c8678b083895f4ece35fccb6aca56e189ee0a | [
"MIT"
] | null | null | null | src/quart/app.py | Dunkledore/quart | 803c8678b083895f4ece35fccb6aca56e189ee0a | [
"MIT"
] | null | null | null | src/quart/app.py | Dunkledore/quart | 803c8678b083895f4ece35fccb6aca56e189ee0a | [
"MIT"
] | null | null | null | from __future__ import annotations
import asyncio
import signal
import sys
import warnings
from collections import defaultdict, OrderedDict
from datetime import timedelta
from itertools import chain
from logging import Logger
from pathlib import Path
from types import TracebackType
from typing import (
Any,
AnyStr,
Awaitable,
Callable,
cast,
Coroutine,
Dict,
IO,
Iterable,
List,
Optional,
Set,
Tuple,
Type,
Union,
ValuesView,
)
from hypercorn.asyncio import serve
from hypercorn.config import Config as HyperConfig
from werkzeug.datastructures import Headers
from werkzeug.routing import MapAdapter
from .asgi import ASGIHTTPConnection, ASGILifespan, ASGIWebsocketConnection
from .blueprints import Blueprint
from .cli import AppGroup
from .config import Config, ConfigAttribute, DEFAULT_CONFIG
from .ctx import (
_AppCtxGlobals,
_request_ctx_stack,
_websocket_ctx_stack,
AppContext,
has_request_context,
has_websocket_context,
RequestContext,
WebsocketContext,
)
from .exceptions import all_http_exceptions, HTTPException
from .globals import g, request, session
from .helpers import (
_endpoint_from_view_func,
find_package,
get_debug_flag,
get_env,
get_flashed_messages,
url_for,
)
from .json import JSONDecoder, JSONEncoder, jsonify, tojson_filter
from .logging import create_logger, create_serving_logger
from .routing import QuartMap, QuartRule
from .sessions import SecureCookieSession, SecureCookieSessionInterface, Session
from .signals import (
appcontext_tearing_down,
got_request_exception,
got_websocket_exception,
request_finished,
request_started,
request_tearing_down,
websocket_finished,
websocket_started,
websocket_tearing_down,
)
from .static import PackageStatic
from .templating import _default_template_context_processor, DispatchingJinjaLoader, Environment
from .testing import (
make_test_body_with_headers,
make_test_headers_path_and_query_string,
no_op_push,
QuartClient,
sentinel,
)
from .typing import FilePath, ResponseReturnValue
from .utils import file_path_to_path, is_coroutine_function, run_sync
from .wrappers import BaseRequestWebsocket, Request, Response, Websocket
AppOrBlueprintKey = Optional[str] # The App key is None, whereas blueprints are named
def _convert_timedelta(value: Union[float, timedelta]) -> timedelta:
if not isinstance(value, timedelta):
return timedelta(seconds=value)
return value
class Quart(PackageStatic):
"""The web framework class, handles requests and returns responses.
The primary method from a serving viewpoint is
:meth:`~quart.app.Quart.handle_request`, from an application
viewpoint all the other methods are vital.
This can be extended in many ways, with most methods designed with
this in mind. Additionally any of the classes listed as attributes
can be replaced.
Attributes:
app_ctx_globals_class: The class to use for the ``g`` object
asgi_http_class: The class to use to handle the ASGI HTTP
protocol.
asgi_lifespan_class: The class to use to handle the ASGI
lifespan protocol.
asgi_websocket_class: The class to use to handle the ASGI
websocket protocol.
config_class: The class to use for the configuration.
env: The name of the environment the app is running on.
debug: Wrapper around configuration DEBUG value, in many places
this will result in more output if True. If unset, debug
mode will be activated if environ is set to 'development'.
jinja_environment: The class to use for the jinja environment.
jinja_options: The default options to set when creating the jinja
environment.
json_decoder: The decoder for JSON data.
json_encoder: The encoder for JSON data.
permanent_session_lifetime: Wrapper around configuration
PERMANENT_SESSION_LIFETIME value. Specifies how long the session
data should survive.
request_class: The class to use for requests.
response_class: The class to user for responses.
secret_key: Warpper around configuration SECRET_KEY value. The app
secret for signing sessions.
session_cookie_name: Wrapper around configuration
SESSION_COOKIE_NAME, use to specify the cookie name for session
data.
session_interface: The class to use as the session interface.
url_map_class: The class to map rules to endpoints.
url_rule_class: The class to use for URL rules.
websocket_class: The class to use for websockets.
"""
app_ctx_globals_class = _AppCtxGlobals
asgi_http_class = ASGIHTTPConnection
asgi_lifespan_class = ASGILifespan
asgi_websocket_class = ASGIWebsocketConnection
config_class = Config
debug = ConfigAttribute("DEBUG")
env = ConfigAttribute("ENV")
jinja_environment = Environment
jinja_options = {
"autoescape": True,
"extensions": ["jinja2.ext.autoescape", "jinja2.ext.with_"],
}
json_decoder = JSONDecoder
json_encoder = JSONEncoder
lock_class = asyncio.Lock
permanent_session_lifetime = ConfigAttribute(
"PERMANENT_SESSION_LIFETIME", converter=_convert_timedelta
)
request_class = Request
response_class = Response
secret_key = ConfigAttribute("SECRET_KEY")
send_file_max_age_default = ConfigAttribute(
"SEND_FILE_MAX_AGE_DEFAULT", converter=_convert_timedelta
)
session_cookie_name = ConfigAttribute("SESSION_COOKIE_NAME")
session_interface = SecureCookieSessionInterface()
test_client_class = QuartClient
testing = ConfigAttribute("TESTING")
url_map_class = QuartMap
url_rule_class = QuartRule
websocket_class = Websocket
def __init__(
self,
import_name: str,
static_url_path: Optional[str] = None,
static_folder: Optional[str] = "static",
static_host: Optional[str] = None,
host_matching: bool = False,
subdomain_matching: bool = False,
template_folder: Optional[str] = "templates",
root_path: Optional[str] = None,
instance_path: Optional[str] = None,
instance_relative_config: bool = False,
) -> None:
"""Construct a Quart web application.
Use to create a new web application to which requests should
be handled, as specified by the various attached url
rules. See also :class:`~quart.static.PackageStatic` for
additional constructutor arguments.
Arguments:
import_name: The name at import of the application, use
``__name__`` unless there is a specific issue.
host_matching: Optionally choose to match the host to the
configured host on request (404 if no match).
instance_path: Optional path to an instance folder, for
deployment specific settings and files.
instance_relative_config: If True load the config from a
path relative to the instance path.
Attributes:
after_request_funcs: The functions to execute after a
request has been handled.
after_websocket_funcs: The functions to execute after a
websocket has been handled.
before_first_request_func: Functions to execute before the
first request only.
before_request_funcs: The functions to execute before handling
a request.
before_websocket_funcs: The functions to execute before handling
a websocket.
"""
super().__init__(import_name, template_folder, root_path, static_folder, static_url_path)
instance_path = Path(instance_path) if instance_path else self.auto_find_instance_path()
if not instance_path.is_absolute():
raise ValueError("The instance_path must be an absolute path.")
self.instance_path = instance_path
self.config = self.make_config(instance_relative_config)
self.after_request_funcs: Dict[
AppOrBlueprintKey, List[Callable[[Response], Awaitable[Response]]]
] = defaultdict(list)
self.after_serving_funcs: List[Callable[[], Awaitable[None]]] = []
self.after_websocket_funcs: Dict[
AppOrBlueprintKey, List[Callable[[Response], Awaitable[Optional[Response]]]]
] = defaultdict(list)
self.before_first_request_funcs: List[Callable[[], Awaitable[None]]] = []
self.before_request_funcs: Dict[
AppOrBlueprintKey, List[Callable[[], Awaitable[None]]]
] = defaultdict(list)
self.before_serving_funcs: List[Callable[[], Awaitable[None]]] = []
self.before_websocket_funcs: Dict[
AppOrBlueprintKey, List[Callable[[], Awaitable[None]]]
] = defaultdict(list)
self.blueprints: Dict[str, Blueprint] = OrderedDict()
self.error_handler_spec: Dict[
AppOrBlueprintKey, Dict[Exception, Callable[[Exception], Awaitable[None]]]
] = defaultdict(dict)
self.extensions: Dict[str, Any] = {}
self.shell_context_processors: List[Callable[[], None]] = []
self.teardown_appcontext_funcs: List[
Callable[[Optional[BaseException]], Awaitable[None]]
] = []
self.teardown_request_funcs: Dict[
AppOrBlueprintKey, List[Callable[[Optional[BaseException]], Awaitable[None]]]
] = defaultdict(list)
self.teardown_websocket_funcs: Dict[
AppOrBlueprintKey, List[Callable[[Optional[BaseException]], Awaitable[None]]]
] = defaultdict(list)
self.template_context_processors: Dict[
AppOrBlueprintKey, List[Callable[[], Awaitable[Dict[str, Any]]]]
] = defaultdict(list)
self.url_build_error_handlers: List[Callable[[Exception, str, dict], str]] = []
self.url_default_functions: Dict[AppOrBlueprintKey, List[Callable]] = defaultdict(list)
self.url_map = self.url_map_class(host_matching=host_matching)
self.subdomain_matching = subdomain_matching
self.url_value_preprocessors: Dict[
AppOrBlueprintKey, List[Callable[[str, dict], None]]
] = defaultdict(list)
self.view_functions: Dict[str, Callable] = {}
self._got_first_request = False
self._first_request_lock = self.lock_class()
self._jinja_env: Optional[Environment] = None
self._logger: Optional[Logger] = None
self.cli = AppGroup(self.name)
if self.has_static_folder:
if bool(static_host) != host_matching:
raise ValueError(
"static_host must be set if there is a static folder and host_matching is "
"enabled"
)
self.add_url_rule(
f"{self.static_url_path}/<path:filename>",
"static",
self.send_static_file,
host=static_host,
)
self.template_context_processors[None] = [_default_template_context_processor]
@property
def name(self) -> str:
"""The name of this application.
This is taken from the :attr:`import_name` and is used for
debugging purposes.
"""
if self.import_name == "__main__":
path = Path(getattr(sys.modules["__main__"], "__file__", "__main__.py"))
return path.stem
return self.import_name
@property
def propagate_exceptions(self) -> bool:
"""Return true if exceptions should be propagated into debug pages.
If false the exception will be handled. See the
``PROPAGATE_EXCEPTIONS`` config settin.
"""
propagate = self.config["PROPAGATE_EXCEPTIONS"]
if propagate is not None:
return propagate
else:
return self.debug or self.testing
@property
def logger(self) -> Logger:
"""A :class:`logging.Logger` logger for the app.
This can be used to log messages in a format as defined in the
app configuration, for example,
.. code-block:: python
app.logger.debug("Request method %s", request.method)
app.logger.error("Error, of some kind")
"""
if self._logger is None:
self._logger = create_logger(self)
return self._logger
@property
def jinja_env(self) -> Environment:
"""The jinja environment used to load templates."""
if self._jinja_env is None:
self._jinja_env = self.create_jinja_environment()
return self._jinja_env
@property
def got_first_request(self) -> bool:
"""Return if the app has received a request."""
return self._got_first_request
def auto_find_instance_path(self) -> Path:
"""Locates the instace_path if it was not provided
"""
prefix, package_path = find_package(self.import_name)
if prefix is None:
return package_path / "instance"
return prefix / "var" / f"{self.name}-instance"
def make_config(self, instance_relative: bool = False) -> Config:
"""Create and return the configuration with appropriate defaults."""
config = self.config_class(
self.instance_path if instance_relative else self.root_path, DEFAULT_CONFIG
)
config["ENV"] = get_env()
config["DEBUG"] = get_debug_flag()
return config
def open_instance_resource(self, path: FilePath, mode: str = "rb") -> IO[AnyStr]:
"""Open a file for reading.
Use as
.. code-block:: python
with app.open_instance_resouce(path) as file_:
file_.read()
"""
return open(self.instance_path / file_path_to_path(path), mode)
def create_url_adapter(self, request: Optional[BaseRequestWebsocket]) -> Optional[MapAdapter]:
"""Create and return a URL adapter.
This will create the adapter based on the request if present
otherwise the app configuration.
"""
if request is not None:
subdomain = (
(self.url_map.default_subdomain or None) if not self.subdomain_matching else None
)
return self.url_map.bind_to_request(request, subdomain, self.config["SERVER_NAME"])
if self.config["SERVER_NAME"] is not None:
scheme = "https" if self.config["PREFER_SECURE_URLS"] else "http"
return self.url_map.bind(self.config["SERVER_NAME"], url_scheme=scheme)
return None
def create_jinja_environment(self) -> Environment:
"""Create and return the jinja environment.
This will create the environment based on the
:attr:`jinja_options` and configuration settings. The
environment will include the Quart globals by default.
"""
options = dict(self.jinja_options)
if "autoescape" not in options:
options["autoescape"] = self.select_jinja_autoescape
if "auto_reload" not in options:
options["auto_reload"] = self.config["TEMPLATES_AUTO_RELOAD"] or self.debug
jinja_env = self.jinja_environment(self, **options)
jinja_env.globals.update(
{
"config": self.config,
"g": g,
"get_flashed_messages": get_flashed_messages,
"request": request,
"session": session,
"url_for": url_for,
}
)
jinja_env.filters["tojson"] = tojson_filter
return jinja_env
def create_global_jinja_loader(self) -> DispatchingJinjaLoader:
"""Create and return a global (not blueprint specific) Jinja loader."""
return DispatchingJinjaLoader(self)
def select_jinja_autoescape(self, filename: str) -> bool:
"""Returns True if the filename indicates that it should be escaped."""
if filename is None:
return True
return Path(filename).suffix in {".htm", ".html", ".xhtml", ".xml"}
async def update_template_context(self, context: dict) -> None:
"""Update the provided template context.
This adds additional context from the various template context
processors.
Arguments:
context: The context to update (mutate).
"""
processors = self.template_context_processors[None]
if has_request_context():
blueprint = _request_ctx_stack.top.request.blueprint
if blueprint is not None and blueprint in self.template_context_processors:
processors = chain( # type: ignore
processors, self.template_context_processors[blueprint]
)
extra_context: dict = {}
for processor in processors:
extra_context.update(await processor())
original = context.copy()
context.update(extra_context)
context.update(original)
def make_shell_context(self) -> dict:
"""Create a context for interactive shell usage.
The :attr:`shell_context_processors` can be used to add
additional context.
"""
context = {"app": self, "g": g}
for processor in self.shell_context_processors:
context.update(processor())
return context
def route(
self,
path: str,
methods: Optional[List[str]] = None,
endpoint: Optional[str] = None,
defaults: Optional[dict] = None,
host: Optional[str] = None,
subdomain: Optional[str] = None,
*,
provide_automatic_options: Optional[bool] = None,
strict_slashes: Optional[bool] = None,
) -> Callable:
"""Add a route to the application.
This is designed to be used as a decorator, if used to
decorate a synchronous function, the function will be wrapped
in :func:`~quart.utils.run_sync` and run in a thread executor
(with the wrapped function returned). An example usage,
.. code-block:: python
@app.route('/')
async def route():
...
Arguments:
path: The path to route on, should start with a ``/``.
methods: List of HTTP verbs the function routes.
defaults: A dictionary of variables to provide automatically, use
to provide a simpler default path for a route, e.g. to allow
for ``/book`` rather than ``/book/0``,
.. code-block:: python
@app.route('/book', defaults={'page': 0})
@app.route('/book/<int:page>')
def book(page):
...
host: The full host name for this route (should include subdomain
if needed) - cannot be used with subdomain.
subdomain: A subdomain for this specific route.
provide_automatic_options: Optionally False to prevent
OPTION handling.
strict_slashes: Strictly match the trailing slash present in the
path. Will redirect a leaf (no slash) to a branch (with slash).
"""
def decorator(func: Callable) -> Callable:
self.add_url_rule(
path,
endpoint,
func,
methods,
defaults=defaults,
host=host,
subdomain=subdomain,
provide_automatic_options=provide_automatic_options,
strict_slashes=strict_slashes,
)
return func
return decorator
def add_url_rule(
self,
path: str,
endpoint: Optional[str] = None,
view_func: Optional[Callable] = None,
methods: Optional[Iterable[str]] = None,
defaults: Optional[dict] = None,
host: Optional[str] = None,
subdomain: Optional[str] = None,
*,
provide_automatic_options: Optional[bool] = None,
is_websocket: bool = False,
strict_slashes: Optional[bool] = None,
merge_slashes: Optional[bool] = None,
) -> None:
"""Add a route/url rule to the application.
This is designed to be used on the application directly. An
example usage,
.. code-block:: python
def route():
...
app.add_url_rule('/', route)
Arguments:
path: The path to route on, should start with a ``/``.
func: Callable that returns a response.
methods: List of HTTP verbs the function routes.
endpoint: Optional endpoint name, if not present the
function name is used.
defaults: A dictionary of variables to provide automatically, use
to provide a simpler default path for a route, e.g. to allow
for ``/book`` rather than ``/book/0``,
.. code-block:: python
@app.route('/book', defaults={'page': 0})
@app.route('/book/<int:page>')
def book(page):
...
host: The full host name for this route (should include subdomain
if needed) - cannot be used with subdomain.
subdomain: A subdomain for this specific route.
provide_automatic_options: Optionally False to prevent
OPTION handling.
strict_slashes: Strictly match the trailing slash present in the
path. Will redirect a leaf (no slash) to a branch (with slash).
merge_slashes: Merge consecutive slashes to a single slash (unless
as part of the path variable).
"""
endpoint = endpoint or _endpoint_from_view_func(view_func)
handler = self.ensure_async(view_func)
if methods is None:
methods = getattr(view_func, "methods", ["GET"])
methods = cast(Set[str], set(methods))
required_methods = set(getattr(view_func, "required_methods", set()))
if provide_automatic_options is None:
automatic_options = getattr(view_func, "provide_automatic_options", None)
if automatic_options is None:
automatic_options = "OPTIONS" not in methods
else:
automatic_options = provide_automatic_options
if automatic_options:
required_methods.add("OPTIONS")
methods.update(required_methods)
rule = self.url_rule_class(
path,
methods=methods,
endpoint=endpoint,
host=host,
subdomain=subdomain,
defaults=defaults,
websocket=is_websocket,
strict_slashes=strict_slashes,
merge_slashes=merge_slashes,
provide_automatic_options=automatic_options,
)
self.url_map.add(rule)
if handler is not None:
old_handler = self.view_functions.get(endpoint)
if getattr(old_handler, "_quart_async_wrapper", False):
old_handler = old_handler.__wrapped__ # type: ignore
if old_handler is not None and old_handler != view_func:
raise AssertionError(f"Handler is overwriting existing for endpoint {endpoint}")
self.view_functions[endpoint] = handler
def websocket(
self,
path: str,
endpoint: Optional[str] = None,
defaults: Optional[dict] = None,
host: Optional[str] = None,
subdomain: Optional[str] = None,
*,
strict_slashes: Optional[bool] = None,
) -> Callable:
"""Add a websocket to the application.
This is designed to be used as a decorator, if used to
decorate a synchronous function, the function will be wrapped
in :func:`~quart.utils.run_sync` and run in a thread executor
(with the wrapped function returned). An example usage,
.. code-block:: python
@app.websocket('/')
async def websocket_route():
...
Arguments:
path: The path to route on, should start with a ``/``.
defaults: A dictionary of variables to provide automatically, use
to provide a simpler default path for a route, e.g. to allow
for ``/book`` rather than ``/book/0``,
.. code-block:: python
@app.websocket('/book', defaults={'page': 0})
@app.websocket('/book/<int:page>')
def book(page):
...
host: The full host name for this route (should include subdomain
if needed) - cannot be used with subdomain.
subdomain: A subdomain for this specific route.
strict_slashes: Strictly match the trailing slash present in the
path. Will redirect a leaf (no slash) to a branch (with slash).
"""
def decorator(func: Callable) -> Callable:
self.add_websocket(
path,
endpoint,
func,
defaults=defaults,
host=host,
subdomain=subdomain,
strict_slashes=strict_slashes,
)
return func
return decorator
def add_websocket(
self,
path: str,
endpoint: Optional[str] = None,
view_func: Optional[Callable] = None,
defaults: Optional[dict] = None,
host: Optional[str] = None,
subdomain: Optional[str] = None,
*,
strict_slashes: Optional[bool] = None,
) -> None:
"""Add a websocket url rule to the application.
This is designed to be used on the application directly. An
example usage,
.. code-block:: python
def websocket_route():
...
app.add_websocket('/', websocket_route)
Arguments:
path: The path to route on, should start with a ``/``.
func: Callable that returns a response.
endpoint: Optional endpoint name, if not present the
function name is used.
defaults: A dictionary of variables to provide automatically, use
to provide a simpler default path for a route, e.g. to allow
for ``/book`` rather than ``/book/0``,
.. code-block:: python
@app.websocket('/book', defaults={'page': 0})
@app.websocket('/book/<int:page>')
def book(page):
...
host: The full host name for this route (should include subdomain
if needed) - cannot be used with subdomain.
subdomain: A subdomain for this specific route.
strict_slashes: Strictly match the trailing slash present in the
path. Will redirect a leaf (no slash) to a branch (with slash).
"""
return self.add_url_rule(
path,
endpoint,
view_func,
{"GET"},
defaults=defaults,
host=host,
subdomain=subdomain,
provide_automatic_options=False,
is_websocket=True,
strict_slashes=strict_slashes,
)
def endpoint(self, endpoint: str) -> Callable:
"""Register a function as an endpoint.
This is designed to be used as a decorator, if used to
decorate a synchronous function, the function will be wrapped
in :func:`~quart.utils.run_sync` and run in a thread executor
(with the wrapped function returned). An example usage,
.. code-block:: python
@app.endpoint('name')
async def endpoint():
...
Arguments:
endpoint: The endpoint name to use.
"""
def decorator(func: Callable) -> Callable:
handler = self.ensure_async(func)
self.view_functions[endpoint] = handler
return handler
return decorator
def errorhandler(self, error: Union[Type[Exception], int]) -> Callable:
"""Register a function as an error handler.
This is designed to be used as a decorator. An example usage,
.. code-block:: python
@app.errorhandler(500)
def error_handler():
return "Error", 500
Arguments:
error: The error code or Exception to handle.
"""
def decorator(func: Callable) -> Callable:
self.register_error_handler(error, func)
return func
return decorator
def register_error_handler(
self,
error: Union[Type[Exception], int],
func: Union[Callable[[Exception], None], Callable[[Exception], Awaitable[None]]],
name: AppOrBlueprintKey = None,
) -> None:
"""Register a function as an error handler.
This is designed to be used on the application directly. An
example usage,
.. code-block:: python
def error_handler():
return "Error", 500
app.register_error_handler(500, error_handler)
Arguments:
error: The error code or Exception to handle.
func: The function to handle the error.
name: Optional blueprint key name.
"""
handler = self.ensure_async(func)
if isinstance(error, int):
error = all_http_exceptions[error]
self.error_handler_spec[name][error] = handler # type: ignore
def template_filter(self, name: Optional[str] = None) -> Callable:
"""Add a template filter.
This is designed to be used as a decorator. An example usage,
.. code-block:: python
@app.template_filter('name')
def to_upper(value):
return value.upper()
Arguments:
name: The filter name (defaults to function name).
"""
def decorator(func: Callable) -> Callable:
self.add_template_filter(func, name=name)
return func
return decorator
def add_template_filter(self, func: Callable, name: Optional[str] = None) -> None:
"""Add a template filter.
This is designed to be used on the application directly. An
example usage,
.. code-block:: python
def to_upper(value):
return value.upper()
app.add_template_filter(to_upper)
Arguments:
func: The function that is the filter.
name: The filter name (defaults to function name).
"""
self.jinja_env.filters[name or func.__name__] = func
def template_test(self, name: Optional[str] = None) -> Callable:
"""Add a template test.
This is designed to be used as a decorator. An example usage,
.. code-block:: python
@app.template_test('name')
def is_upper(value):
return value.isupper()
Arguments:
name: The test name (defaults to function name).
"""
def decorator(func: Callable) -> Callable:
self.add_template_test(func, name=name)
return func
return decorator
def add_template_test(self, func: Callable, name: Optional[str] = None) -> None:
"""Add a template test.
This is designed to be used on the application directly. An
example usage,
.. code-block:: python
def is_upper(value):
return value.isupper()
app.add_template_test(is_upper)
Arguments:
func: The function that is the test.
name: The test name (defaults to function name).
"""
self.jinja_env.tests[name or func.__name__] = func
def template_global(self, name: Optional[str] = None) -> Callable:
"""Add a template global.
This is designed to be used as a decorator. An example usage,
.. code-block:: python
@app.template_global('name')
def five():
return 5
Arguments:
name: The global name (defaults to function name).
"""
def decorator(func: Callable) -> Callable:
self.add_template_global(func, name=name)
return func
return decorator
def add_template_global(self, func: Callable, name: Optional[str] = None) -> None:
"""Add a template global.
This is designed to be used on the application directly. An
example usage,
.. code-block:: python
def five():
return 5
app.add_template_global(five)
Arguments:
func: The function that is the global.
name: The global name (defaults to function name).
"""
self.jinja_env.globals[name or func.__name__] = func
def context_processor(
self,
func: Union[Callable[[], Dict[str, Any]], Callable[[], Awaitable[Dict[str, Any]]]],
name: AppOrBlueprintKey = None,
) -> Callable[[], Awaitable[Dict[str, Any]]]:
"""Add a template context processor.
This is designed to be used as a decorator, if used to
decorate a synchronous function, the function will be wrapped
in :func:`~quart.utils.run_sync` and run in a thread executor
(with the wrapped function returned). An example usage,
.. code-block:: python
@app.context_processor
async def update_context(context):
return context
"""
handler = self.ensure_async(func)
self.template_context_processors[name].append(handler)
return handler
def shell_context_processor(self, func: Callable[[], None]) -> Callable:
"""Add a shell context processor.
This is designed to be used as a decorator. An example usage,
.. code-block:: python
@app.shell_context_processor
def additional_context():
return context
"""
self.shell_context_processors.append(func)
return func
def url_defaults(self, func: Callable, name: AppOrBlueprintKey = None) -> Callable:
"""Add a url default preprocessor.
This is designed to be used as a decorator. An example usage,
.. code-block:: python
@app.url_defaults
def default(endpoint, values):
...
"""
self.url_default_functions[name].append(func)
return func
def url_value_preprocessor(
self, func: Callable[[str, dict], None], name: AppOrBlueprintKey = None
) -> Callable:
"""Add a url value preprocessor.
This is designed to be used as a decorator. An example usage,
.. code-block:: python
@app.url_value_preprocessor
def value_preprocessor(endpoint, view_args):
...
"""
self.url_value_preprocessors[name].append(func)
return func
def inject_url_defaults(self, endpoint: str, values: dict) -> None:
"""Injects default URL values into the passed values dict.
This is used to assist when building urls, see
:func:`~quart.helpers.url_for`.
"""
functions = self.url_default_functions[None]
if "." in endpoint:
blueprint = endpoint.rsplit(".", 1)[0]
functions = chain(functions, self.url_default_functions[blueprint]) # type: ignore
for function in functions:
function(endpoint, values)
def handle_url_build_error(self, error: Exception, endpoint: str, values: dict) -> str:
"""Handle a build error.
Ideally this will return a valid url given the error endpoint
and values.
"""
for handler in self.url_build_error_handlers:
result = handler(error, endpoint, values)
if result is not None:
return result
raise error
def _find_exception_handler(
self, error: Exception
) -> Optional[Callable[[Exception], Awaitable[None]]]:
if _request_ctx_stack.top is not None:
blueprint = _request_ctx_stack.top.request.blueprint
elif _websocket_ctx_stack.top is not None:
blueprint = _websocket_ctx_stack.top.websocket.blueprint
else:
blueprint = None
handler = _find_exception_handler(
error, self.error_handler_spec.get(blueprint, {}) # type: ignore
)
if handler is None:
handler = _find_exception_handler(error, self.error_handler_spec[None])
return handler
async def handle_http_exception(self, error: Exception) -> Response:
"""Handle a HTTPException subclass error.
This will attempt to find a handler for the error and if fails
will fall back to the error response.
"""
handler = self._find_exception_handler(error)
if handler is None:
return error.get_response() # type: ignore
else:
return await handler(error)
def trap_http_exception(self, error: Exception) -> bool:
"""Check it error is http and should be trapped.
Trapped errors are not handled by the
:meth:`handle_http_exception`, but instead trapped by the
outer most (or user handlers). This can be useful when
debuging to allow tracebacks to be viewed by the debug page.
"""
return self.config["TRAP_HTTP_EXCEPTIONS"]
async def handle_user_exception(self, error: Exception) -> Response:
"""Handle an exception that has been raised.
This should forward :class:`~quart.exception.HTTPException` to
:meth:`handle_http_exception`, then attempt to handle the
error. If it cannot it should reraise the error.
"""
if isinstance(error, HTTPException) and not self.trap_http_exception(error):
return await self.handle_http_exception(error)
handler = self._find_exception_handler(error)
if handler is None:
raise error
return await handler(error)
async def handle_exception(self, error: Exception) -> Response:
"""Handle an uncaught exception.
By default this switches the error response to a 500 internal
server error.
"""
await got_request_exception.send(self, exception=error)
self.log_exception(sys.exc_info())
if self.propagate_exceptions:
raise error
internal_server_error = all_http_exceptions[500]()
handler = self._find_exception_handler(internal_server_error)
if handler is None:
return internal_server_error.get_response()
else:
return await self.finalize_request(await handler(error), from_error_handler=True)
async def handle_websocket_exception(self, error: Exception) -> Optional[Response]:
"""Handle an uncaught exception.
By default this logs the exception and then re-raises it.
"""
await got_websocket_exception.send(self, exception=error)
self.log_exception(sys.exc_info())
internal_server_error = all_http_exceptions[500]()
handler = self._find_exception_handler(internal_server_error)
if handler is None:
return internal_server_error.get_response()
else:
return await self.finalize_websocket(await handler(error), from_error_handler=True)
def log_exception(self, exception_info: Tuple[type, BaseException, TracebackType]) -> None:
"""Log a exception to the :attr:`logger`.
By default this is only invoked for unhandled exceptions.
"""
if has_request_context():
request_ = _request_ctx_stack.top.request
self.logger.error(
f"Exception on request {request_.method} {request_.path}", exc_info=exception_info
)
if has_websocket_context():
websocket_ = _websocket_ctx_stack.top.websocket
self.logger.error(f"Exception on websocket {websocket_.path}", exc_info=exception_info)
def before_request(
self,
func: Union[Callable[[], None], Callable[[], Awaitable[None]]],
name: AppOrBlueprintKey = None,
) -> Callable[[], Awaitable[None]]:
"""Add a before request function.
This is designed to be used as a decorator, if used to
decorate a synchronous function, the function will be wrapped
in :func:`~quart.utils.run_sync` and run in a thread executor
(with the wrapped function returned). An example usage,
.. code-block:: python
@app.before_request
async def func():
...
Arguments:
func: The before request function itself.
name: Optional blueprint key name.
"""
handler = self.ensure_async(func)
self.before_request_funcs[name].append(handler)
return handler
def before_websocket(
self,
func: Union[Callable[[], None], Callable[[], Awaitable[None]]],
name: AppOrBlueprintKey = None,
) -> Callable[[], Awaitable[None]]:
"""Add a before websocket function.
This is designed to be used as a decorator, if used to
decorate a synchronous function, the function will be wrapped
in :func:`~quart.utils.run_sync` and run in a thread executor
(with the wrapped function returned). An example usage,
.. code-block:: python
@app.before_websocket
async def func():
...
Arguments:
func: The before websocket function itself.
name: Optional blueprint key name.
"""
handler = self.ensure_async(func)
self.before_websocket_funcs[name].append(handler)
return handler
def before_first_request(
self,
func: Union[Callable[[], None], Callable[[], Awaitable[None]]],
name: AppOrBlueprintKey = None,
) -> Callable[[], Awaitable[None]]:
"""Add a before **first** request function.
This is designed to be used as a decorator, if used to
decorate a synchronous function, the function will be wrapped
in :func:`~quart.utils.run_sync` and run in a thread executor
(with the wrapped function returned). An example usage,
.. code-block:: python
@app.before_first_request
async def func():
...
Arguments:
func: The before first request function itself.
name: Optional blueprint key name.
"""
handler = self.ensure_async(func)
self.before_first_request_funcs.append(handler)
return handler
def before_serving(
self, func: Union[Callable[[], None], Callable[[], Awaitable[None]]]
) -> Callable[[], Awaitable[None]]:
"""Add a before serving function.
This will allow the function provided to be called once before
anything is served (before any byte is received).
This is designed to be used as a decorator, if used to
decorate a synchronous function, the function will be wrapped
in :func:`~quart.utils.run_sync` and run in a thread executor
(with the wrapped function returned). An example usage,
.. code-block:: python
@app.before_serving
async def func():
...
Arguments:
func: The function itself.
"""
handler = self.ensure_async(func)
self.before_serving_funcs.append(handler)
return handler
def after_request(
self,
func: Union[Callable[[Response], Response], Callable[[Response], Awaitable[Response]]],
name: AppOrBlueprintKey = None,
) -> Callable[[Response], Awaitable[None]]:
"""Add an after request function.
This is designed to be used as a decorator, if used to
decorate a synchronous function, the function will be wrapped
in :func:`~quart.utils.run_sync` and run in a thread executor
(with the wrapped function returned). An example usage,
.. code-block:: python
@app.after_request
async def func(response):
return response
Arguments:
func: The after request function itself.
name: Optional blueprint key name.
"""
handler = self.ensure_async(func)
self.after_request_funcs[name].append(handler)
return handler
def after_websocket(
self,
func: Union[
Callable[[Response], Optional[Response]],
Callable[[Response], Awaitable[Optional[Response]]],
],
name: AppOrBlueprintKey = None,
) -> Callable[[Response], Awaitable[None]]:
"""Add an after websocket function.
This is designed to be used as a decorator, if used to
decorate a synchronous function, the function will be wrapped
in :func:`~quart.utils.run_sync` and run in a thread executor
(with the wrapped function returned). An example usage,
.. code-block:: python
@app.after_websocket
async def func(response):
return response
Arguments:
func: The after websocket function itself.
name: Optional blueprint key name.
"""
handler = self.ensure_async(func)
self.after_websocket_funcs[name].append(handler)
return handler
def after_serving(
self, func: Union[Callable[[], None], Callable[[], Awaitable[None]]]
) -> Callable[[], Awaitable[None]]:
"""Add a after serving function.
This will allow the function provided to be called once after
anything is served (after last byte is sent).
This is designed to be used as a decorator, if used to
decorate a synchronous function, the function will be wrapped
in :func:`~quart.utils.run_sync` and run in a thread executor
(with the wrapped function returned). An example usage,
.. code-block:: python
@app.after_serving
async def func():
...
Arguments:
func: The function itself.
"""
handler = self.ensure_async(func)
self.after_serving_funcs.append(handler)
return handler
def teardown_request(
self,
func: Union[
Callable[[Optional[BaseException]], None],
Callable[[Optional[BaseException]], Awaitable[None]],
],
name: AppOrBlueprintKey = None,
) -> Callable[[Optional[BaseException]], Awaitable[None]]:
"""Add a teardown request function.
This is designed to be used as a decorator, if used to
decorate a synchronous function, the function will be wrapped
in :func:`~quart.utils.run_sync` and run in a thread executor
(with the wrapped function returned). An example usage,
.. code-block:: python
@app.teardown_request
async def func():
...
Arguments:
func: The teardown request function itself.
name: Optional blueprint key name.
"""
handler = self.ensure_async(func)
self.teardown_request_funcs[name].append(handler)
return handler
def teardown_websocket(
self,
func: Union[
Callable[[Optional[BaseException]], None],
Callable[[Optional[BaseException]], Awaitable[None]],
],
name: AppOrBlueprintKey = None,
) -> Callable[[Optional[BaseException]], Awaitable[None]]:
"""Add a teardown websocket function.
This is designed to be used as a decorator, if used to
decorate a synchronous function, the function will be wrapped
in :func:`~quart.utils.run_sync` and run in a thread executor
(with the wrapped function returned). An example usage,
.. code-block:: python
@app.teardown_websocket
async def func():
...
Arguments:
func: The teardown websocket function itself.
name: Optional blueprint key name.
"""
handler = self.ensure_async(func)
self.teardown_websocket_funcs[name].append(handler)
return handler
def teardown_appcontext(
self,
func: Union[
Callable[[Optional[BaseException]], None],
Callable[[Optional[BaseException]], Awaitable[None]],
],
) -> Callable[[Optional[BaseException]], Awaitable[None]]:
"""Add a teardown app (context) function.
This is designed to be used as a decorator, if used to
decorate a synchronous function, the function will be wrapped
in :func:`~quart.utils.run_sync` and run in a thread executor
(with the wrapped function returned). An example usage,
.. code-block:: python
@app.teardown_appcontext
async def func():
...
Arguments:
func: The teardown function itself.
name: Optional blueprint key name.
"""
handler = self.ensure_async(func)
self.teardown_appcontext_funcs.append(handler)
return handler
def register_blueprint(
self,
blueprint: Blueprint,
url_prefix: Optional[str] = None,
*,
subdomain: Optional[str] = None,
) -> None:
"""Register a blueprint on the app.
This results in the blueprint's routes, error handlers
etc... being added to the app.
Arguments:
blueprint: The blueprint to register.
url_prefix: Optional prefix to apply to all paths.
"""
first_registration = False
if blueprint.name in self.blueprints and self.blueprints[blueprint.name] is not blueprint:
raise RuntimeError(
f"Blueprint name '{blueprint.name}' "
f"is already registered by {self.blueprints[blueprint.name]}. "
"Blueprints must have unique names"
)
else:
self.blueprints[blueprint.name] = blueprint
first_registration = True
blueprint.register(self, first_registration, url_prefix=url_prefix, subdomain=subdomain)
def iter_blueprints(self) -> ValuesView[Blueprint]:
"""Return a iterator over the blueprints."""
return self.blueprints.values()
def ensure_async(self, func: Callable[..., Any]) -> Callable[..., Awaitable[Any]]:
"""Ensure that the returned func is async and calls the func.
.. versionadded:: 0.11
Override if you wish to change how synchronous functions are
run. Before Quart 0.11 this did not run the synchronous code
in an executor.
"""
if is_coroutine_function(func):
return func
else:
return run_sync(func)
async def open_session(self, request: BaseRequestWebsocket) -> Session:
"""Open and return a Session using the request."""
return await self.ensure_async(self.session_interface.open_session)(self, request)
async def make_null_session(self) -> Session:
"""Create and return a null session."""
return await self.ensure_async(self.session_interface.make_null_session)(self)
async def save_session(self, session: Session, response: Response) -> None:
"""Saves the session to the response."""
await self.ensure_async(self.session_interface.save_session)(self, session, response)
async def do_teardown_request(
self, exc: Optional[BaseException], request_context: Optional[RequestContext] = None
) -> None:
"""Teardown the request, calling the teardown functions.
Arguments:
exc: Any exception not handled that has caused the request
to teardown.
request_context: The request context, optional as Flask
omits this argument.
"""
request_ = (request_context or _request_ctx_stack.top).request
functions = self.teardown_request_funcs[None]
blueprint = request_.blueprint
if blueprint is not None:
functions = chain(functions, self.teardown_request_funcs[blueprint]) # type: ignore
for function in functions:
await function(exc)
await request_tearing_down.send(self, exc=exc)
async def do_teardown_websocket(
self, exc: Optional[BaseException], websocket_context: Optional[WebsocketContext] = None
) -> None:
"""Teardown the websocket, calling the teardown functions.
Arguments:
exc: Any exception not handled that has caused the websocket
to teardown.
websocket_context: The websocket context, optional as Flask
omits this argument.
"""
websocket_ = (websocket_context or _websocket_ctx_stack.top).websocket
functions = self.teardown_websocket_funcs[None]
blueprint = websocket_.blueprint
if blueprint is not None:
functions = chain(functions, self.teardown_websocket_funcs[blueprint]) # type: ignore
for function in functions:
await function(exc)
await websocket_tearing_down.send(self, exc=exc)
async def do_teardown_appcontext(self, exc: Optional[BaseException]) -> None:
"""Teardown the app (context), calling the teardown functions."""
for function in self.teardown_appcontext_funcs:
await function(exc)
await appcontext_tearing_down.send(self, exc=exc)
def app_context(self) -> AppContext:
"""Create and return an app context.
This is best used within a context, i.e.
.. code-block:: python
async with app.app_context():
...
"""
return AppContext(self)
def request_context(self, request: Request) -> RequestContext:
"""Create and return a request context.
Use the :meth:`test_request_context` whilst testing. This is
best used within a context, i.e.
.. code-block:: python
async with app.request_context(request):
...
Arguments:
request: A request to build a context around.
"""
return RequestContext(self, request)
def websocket_context(self, websocket: Websocket) -> WebsocketContext:
"""Create and return a websocket context.
Use the :meth:`test_websocket_context` whilst testing. This is
best used within a context, i.e.
.. code-block:: python
async with app.websocket_context(websocket):
...
Arguments:
websocket: A websocket to build a context around.
"""
return WebsocketContext(self, websocket)
async def run(
self,
host: str = "127.0.0.1",
port: int = 5000,
debug: Optional[bool] = None,
use_reloader: bool = True,
loop: Optional[asyncio.AbstractEventLoop] = None,
ca_certs: Optional[str] = None,
certfile: Optional[str] = None,
keyfile: Optional[str] = None,
**kwargs: Any,
) -> None:
"""Run this application.
This is best used for development only, see Hypercorn for
production servers.
Arguments:
host: Hostname to listen on. By default this is loopback
only, use 0.0.0.0 to have the server listen externally.
port: Port number to listen on.
debug: If set enable (or disable) debug mode and debug output.
use_reloader: Automatically reload on code changes.
loop: Asyncio loop to create the server in, if None, take default one.
If specified it is the caller's responsibility to close and cleanup the
loop.
ca_certs: Path to the SSL CA certificate file.
certfile: Path to the SSL certificate file.
keyfile: Path to the SSL key file.
"""
if kwargs:
warnings.warn(
f"Additional arguments, {','.join(kwargs.keys())}, are not supported.\n"
"They may be supported by Hypercorn, which is the ASGI server Quart "
"uses by default. This method is meant for development and debugging."
)
if loop is None:
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
loop.set_debug(debug or False)
shutdown_event = asyncio.Event()
def _signal_handler(*_: Any) -> None:
shutdown_event.set()
try:
loop.add_signal_handler(signal.SIGTERM, _signal_handler)
loop.add_signal_handler(signal.SIGINT, _signal_handler)
except (AttributeError, NotImplementedError):
pass
task = self.run_task(
host,
port,
debug,
use_reloader,
ca_certs,
certfile,
keyfile,
shutdown_trigger=shutdown_event.wait, # type: ignore
)
scheme = "https" if certfile is not None and keyfile is not None else "http"
print(f"Running on {scheme}://{host}:{port} (CTRL + C to quit)") # noqa: T001, T002
loop.create_task(task)
#finally:
#try:
#_cancel_all_tasks(loop)
#loop.run_until_complete(loop.shutdown_asyncgens())
#finally:
#asyncio.set_event_loop(None)
#loop.close()
def run_task(
self,
host: str = "127.0.0.1",
port: int = 5000,
debug: Optional[bool] = None,
use_reloader: bool = True,
ca_certs: Optional[str] = None,
certfile: Optional[str] = None,
keyfile: Optional[str] = None,
shutdown_trigger: Optional[Callable[..., Awaitable[None]]] = None,
) -> Coroutine[None, None, None]:
"""Return a task that when awaited runs this application.
This is best used for development only, see Hypercorn for
production servers.
Arguments:
host: Hostname to listen on. By default this is loopback
only, use 0.0.0.0 to have the server listen externally.
port: Port number to listen on.
debug: If set enable (or disable) debug mode and debug output.
use_reloader: Automatically reload on code changes.
loop: Asyncio loop to create the server in, if None, take default one.
If specified it is the caller's responsibility to close and cleanup the
loop.
ca_certs: Path to the SSL CA certificate file.
certfile: Path to the SSL certificate file.
keyfile: Path to the SSL key file.
"""
config = HyperConfig()
config.access_log_format = "%(h)s %(r)s %(s)s %(b)s %(D)s"
config.accesslog = create_serving_logger()
config.bind = [f"{host}:{port}"]
config.ca_certs = ca_certs
config.certfile = certfile
if debug is not None:
self.debug = debug
config.errorlog = config.accesslog
config.keyfile = keyfile
config.use_reloader = use_reloader
return serve(self, config, shutdown_trigger=shutdown_trigger)
def test_client(self) -> QuartClient:
"""Creates and returns a test client."""
return self.test_client_class(self)
def test_request_context(
self,
path: str,
*,
method: str = "GET",
headers: Optional[Union[dict, Headers]] = None,
query_string: Optional[dict] = None,
scheme: str = "http",
send_push_promise: Callable[[str, Headers], Awaitable[None]] = no_op_push,
data: Optional[AnyStr] = None,
form: Optional[dict] = None,
json: Any = sentinel,
root_path: str = "",
http_version: str = "1.1",
) -> RequestContext:
"""Create a request context for testing purposes.
This is best used for testing code within request contexts. It
is a simplified wrapper of :meth:`request_context`. It is best
used in a with block, i.e.
.. code-block:: python
async with app.test_request_context("/", method="GET"):
...
Arguments:
path: Request path.
method: HTTP verb
headers: Headers to include in the request.
query_string: To send as a dictionary, alternatively the
query_string can be determined from the path.
scheme: Scheme for the request, default http.
"""
headers, path, query_string_bytes = make_test_headers_path_and_query_string(
self, path, headers, query_string
)
request_body, body_headers = make_test_body_with_headers(data, form, json)
# Replace with headers.update(**body_headers) when Werkzeug
# supports https://github.com/pallets/werkzeug/pull/1687
for key, value in body_headers.items():
headers[key] = value
request = self.request_class(
method,
scheme,
path,
query_string_bytes,
headers,
root_path,
http_version,
send_push_promise=send_push_promise,
)
request.body.set_result(request_body)
return self.request_context(request)
async def try_trigger_before_first_request_functions(self) -> None:
"""Trigger the before first request methods."""
if self._got_first_request:
return
# Reverse the teardown functions, so as to match the expected usage
self.teardown_appcontext_funcs = list(reversed(self.teardown_appcontext_funcs))
for key, value in self.teardown_request_funcs.items():
self.teardown_request_funcs[key] = list(reversed(value))
for key, value in self.teardown_websocket_funcs.items():
self.teardown_websocket_funcs[key] = list(reversed(value))
async with self._first_request_lock:
if self._got_first_request:
return
for function in self.before_first_request_funcs:
await function()
self._got_first_request = True
async def make_default_options_response(self) -> Response:
"""This is the default route function for OPTIONS requests."""
methods = _request_ctx_stack.top.url_adapter.allowed_methods()
return self.response_class("", headers={"Allow": ", ".join(methods)})
async def make_response(self, result: ResponseReturnValue) -> Response:
"""Make a Response from the result of the route handler.
The result itself can either be:
- A Response object (or subclass).
- A tuple of a ResponseValue and a header dictionary.
- A tuple of a ResponseValue, status code and a header dictionary.
A ResponseValue is either a Response object (or subclass) or a str.
"""
status_or_headers = None
headers: Optional[dict] = None
status = None
if isinstance(result, tuple):
value, status_or_headers, headers = result + (None,) * (3 - len(result)) # type: ignore
else:
value = result
if value is None:
raise TypeError("The response value returned by the view function cannot be None")
if isinstance(status_or_headers, (dict, list)):
headers = status_or_headers
status = None
elif status_or_headers is not None:
status = status_or_headers
if not isinstance(value, Response):
if isinstance(value, dict):
response = jsonify(value)
else:
response = self.response_class(value) # type: ignore
else:
response = value
if status is not None:
response.status_code = int(status) # type: ignore
if headers is not None:
# Replace with response.headers.update(**headers) when
# Werkzeug supports
# https://github.com/pallets/werkzeug/pull/1687
for key, value in headers.items():
response.headers[key] = value
return response
async def handle_request(self, request: Request) -> Response:
async with self.request_context(request) as request_context:
try:
return await self.full_dispatch_request(request_context)
except asyncio.CancelledError:
raise # CancelledErrors should be handled by serving code.
except Exception as error:
return await self.handle_exception(error)
async def full_dispatch_request(
self, request_context: Optional[RequestContext] = None
) -> Response:
"""Adds pre and post processing to the request dispatching.
Arguments:
request_context: The request context, optional as Flask
omits this argument.
"""
await self.try_trigger_before_first_request_functions()
await request_started.send(self)
try:
result = await self.preprocess_request(request_context)
if result is None:
result = await self.dispatch_request(request_context)
except Exception as error:
result = await self.handle_user_exception(error)
return await self.finalize_request(result, request_context)
async def preprocess_request(
self, request_context: Optional[RequestContext] = None
) -> Optional[ResponseReturnValue]:
"""Preprocess the request i.e. call before_request functions.
Arguments:
request_context: The request context, optional as Flask
omits this argument.
"""
request_ = (request_context or _request_ctx_stack.top).request
blueprint = request_.blueprint
processors = self.url_value_preprocessors[None]
if blueprint is not None:
processors = chain(processors, self.url_value_preprocessors[blueprint]) # type: ignore
for processor in processors:
processor(request.endpoint, request.view_args)
functions = self.before_request_funcs[None]
if blueprint is not None:
functions = chain(functions, self.before_request_funcs[blueprint]) # type: ignore
for function in functions:
result = await function()
if result is not None:
return result
return None
async def dispatch_request(
self, request_context: Optional[RequestContext] = None
) -> ResponseReturnValue:
"""Dispatch the request to the view function.
Arguments:
request_context: The request context, optional as Flask
omits this argument.
"""
request_ = (request_context or _request_ctx_stack.top).request
if request_.routing_exception is not None:
raise request_.routing_exception
if request_.method == "OPTIONS" and request_.url_rule.provide_automatic_options:
return await self.make_default_options_response()
handler = self.view_functions[request_.url_rule.endpoint]
return await handler(**request_.view_args)
async def finalize_request(
self,
result: ResponseReturnValue,
request_context: Optional[RequestContext] = None,
from_error_handler: bool = False,
) -> Response:
"""Turns the view response return value into a response.
Arguments:
result: The result of the request to finalize into a response.
request_context: The request context, optional as Flask
omits this argument.
"""
response = await self.make_response(result)
try:
response = await self.process_response(response, request_context)
await request_finished.send(self, response=response)
except Exception:
if not from_error_handler:
raise
self.logger.exception("Request finalizing errored")
return response
async def process_response(
self, response: Response, request_context: Optional[RequestContext] = None
) -> Response:
"""Postprocess the request acting on the response.
Arguments:
response: The response after the request is finalized.
request_context: The request context, optional as Flask
omits this argument.
"""
request_ = (request_context or _request_ctx_stack.top).request
functions = (request_context or _request_ctx_stack.top)._after_request_functions
blueprint = request_.blueprint
if blueprint is not None:
functions = chain(functions, self.after_request_funcs[blueprint])
functions = chain(functions, self.after_request_funcs[None])
for function in functions:
response = await function(response)
session_ = (request_context or _request_ctx_stack.top).session
if not self.session_interface.is_null_session(session_):
await self.save_session(session_, response)
return response
async def handle_websocket(self, websocket: Websocket) -> Optional[Response]:
async with self.websocket_context(websocket) as websocket_context:
try:
return await self.full_dispatch_websocket(websocket_context)
except asyncio.CancelledError:
raise # CancelledErrors should be handled by serving code.
except Exception as error:
return await self.handle_websocket_exception(error)
async def full_dispatch_websocket(
self, websocket_context: Optional[WebsocketContext] = None
) -> Optional[Response]:
"""Adds pre and post processing to the websocket dispatching.
Arguments:
websocket_context: The websocket context, optional to match
the Flask convention.
"""
await self.try_trigger_before_first_request_functions()
await websocket_started.send(self)
try:
result = await self.preprocess_websocket(websocket_context)
if result is None:
result = await self.dispatch_websocket(websocket_context)
except Exception as error:
result = await self.handle_user_exception(error)
return await self.finalize_websocket(result, websocket_context)
async def preprocess_websocket(
self, websocket_context: Optional[WebsocketContext] = None
) -> Optional[ResponseReturnValue]:
"""Preprocess the websocket i.e. call before_websocket functions.
Arguments:
websocket_context: The websocket context, optional as Flask
omits this argument.
"""
websocket_ = (websocket_context or _websocket_ctx_stack.top).websocket
blueprint = websocket_.blueprint
processors = self.url_value_preprocessors[None]
if blueprint is not None:
processors = chain(processors, self.url_value_preprocessors[blueprint]) # type: ignore
for processor in processors:
processor(websocket_.endpoint, websocket_.view_args)
functions = self.before_websocket_funcs[None]
if blueprint is not None:
functions = chain(functions, self.before_websocket_funcs[blueprint]) # type: ignore
for function in functions:
result = await function()
if result is not None:
return result
return None
async def dispatch_websocket(
self, websocket_context: Optional[WebsocketContext] = None
) -> None:
"""Dispatch the websocket to the view function.
Arguments:
websocket_context: The websocket context, optional to match
the Flask convention.
"""
websocket_ = (websocket_context or _websocket_ctx_stack.top).websocket
if websocket_.routing_exception is not None:
raise websocket_.routing_exception
handler = self.view_functions[websocket_.url_rule.endpoint]
return await handler(**websocket_.view_args)
async def finalize_websocket(
self,
result: ResponseReturnValue,
websocket_context: Optional[WebsocketContext] = None,
from_error_handler: bool = False,
) -> Optional[Response]:
"""Turns the view response return value into a response.
Arguments:
result: The result of the websocket to finalize into a response.
websocket_context: The websocket context, optional as Flask
omits this argument.
"""
if result is not None:
response = await self.make_response(result)
else:
response = None
try:
response = await self.postprocess_websocket(response, websocket_context)
await websocket_finished.send(self, response=response)
except Exception:
if not from_error_handler:
raise
self.logger.exception("Request finalizing errored")
return response
async def postprocess_websocket(
self, response: Optional[Response], websocket_context: Optional[WebsocketContext] = None
) -> Response:
"""Postprocess the websocket acting on the response.
Arguments:
response: The response after the websocket is finalized.
webcoket_context: The websocket context, optional as Flask
omits this argument.
"""
websocket_ = (websocket_context or _websocket_ctx_stack.top).websocket
functions = (websocket_context or _websocket_ctx_stack.top)._after_websocket_functions
blueprint = websocket_.blueprint
if blueprint is not None:
functions = chain(functions, self.after_websocket_funcs[blueprint])
functions = chain(functions, self.after_websocket_funcs[None])
for function in functions:
response = await function(response)
session_ = (websocket_context or _request_ctx_stack.top).session
if not self.session_interface.is_null_session(session_):
if response is None and isinstance(session_, SecureCookieSession) and session_.modified:
self.logger.exception(
"Secure Cookie Session modified during websocket handling. "
"These modifications will be lost as a cookie cannot be set."
)
else:
await self.save_session(session_, response)
return response
async def __call__(self, scope: dict, receive: Callable, send: Callable) -> None:
"""Called by ASGI servers.
The related :meth:`~quart.app.Quart.asgi_app` is called,
allowing for middleware usage whilst keeping the top level app
a :class:`~quart.app.Quart` instance.
"""
await self.asgi_app(scope, receive, send)
async def asgi_app(self, scope: dict, receive: Callable, send: Callable) -> None:
"""This handles ASGI calls, it can be wrapped in middleware.
When using middleware with Quart it is preferable to wrap this
method rather than the app itself. This is to ensure that the
app is an instance of this class - which allows the quart cli
to work correctly. To use this feature simply do,
.. code-block:: python
app.asgi_app = middleware(app.asgi_app)
"""
if scope["type"] == "http":
asgi_handler = self.asgi_http_class(self, scope)
elif scope["type"] == "websocket":
asgi_handler = self.asgi_websocket_class(self, scope) # type: ignore
elif scope["type"] == "lifespan":
asgi_handler = self.asgi_lifespan_class(self, scope) # type: ignore
else:
raise RuntimeError("ASGI Scope type is unknown")
await asgi_handler(receive, send)
async def startup(self) -> None:
self._got_first_request = False
async with self.app_context():
for func in self.before_serving_funcs:
await func()
async def shutdown(self) -> None:
async with self.app_context():
for func in self.after_serving_funcs:
await func()
def _find_exception_handler(
error: Exception, exception_handlers: Dict[Exception, Callable]
) -> Optional[Callable]:
for exception, handler in exception_handlers.items():
if isinstance(error, exception): # type: ignore
return handler
return None
def _cancel_all_tasks(loop: asyncio.AbstractEventLoop) -> None:
tasks = [task for task in asyncio.all_tasks(loop) if not task.done()]
if not tasks:
return
for task in tasks:
task.cancel()
loop.run_until_complete(asyncio.gather(*tasks, loop=loop, return_exceptions=True))
for task in tasks:
if not task.cancelled() and task.exception() is not None:
loop.call_exception_handler(
{
"message": "unhandled exception during shutdown",
"exception": task.exception(),
"task": task,
}
)
| 36.68597 | 100 | 0.619743 | from __future__ import annotations
import asyncio
import signal
import sys
import warnings
from collections import defaultdict, OrderedDict
from datetime import timedelta
from itertools import chain
from logging import Logger
from pathlib import Path
from types import TracebackType
from typing import (
Any,
AnyStr,
Awaitable,
Callable,
cast,
Coroutine,
Dict,
IO,
Iterable,
List,
Optional,
Set,
Tuple,
Type,
Union,
ValuesView,
)
from hypercorn.asyncio import serve
from hypercorn.config import Config as HyperConfig
from werkzeug.datastructures import Headers
from werkzeug.routing import MapAdapter
from .asgi import ASGIHTTPConnection, ASGILifespan, ASGIWebsocketConnection
from .blueprints import Blueprint
from .cli import AppGroup
from .config import Config, ConfigAttribute, DEFAULT_CONFIG
from .ctx import (
_AppCtxGlobals,
_request_ctx_stack,
_websocket_ctx_stack,
AppContext,
has_request_context,
has_websocket_context,
RequestContext,
WebsocketContext,
)
from .exceptions import all_http_exceptions, HTTPException
from .globals import g, request, session
from .helpers import (
_endpoint_from_view_func,
find_package,
get_debug_flag,
get_env,
get_flashed_messages,
url_for,
)
from .json import JSONDecoder, JSONEncoder, jsonify, tojson_filter
from .logging import create_logger, create_serving_logger
from .routing import QuartMap, QuartRule
from .sessions import SecureCookieSession, SecureCookieSessionInterface, Session
from .signals import (
appcontext_tearing_down,
got_request_exception,
got_websocket_exception,
request_finished,
request_started,
request_tearing_down,
websocket_finished,
websocket_started,
websocket_tearing_down,
)
from .static import PackageStatic
from .templating import _default_template_context_processor, DispatchingJinjaLoader, Environment
from .testing import (
make_test_body_with_headers,
make_test_headers_path_and_query_string,
no_op_push,
QuartClient,
sentinel,
)
from .typing import FilePath, ResponseReturnValue
from .utils import file_path_to_path, is_coroutine_function, run_sync
from .wrappers import BaseRequestWebsocket, Request, Response, Websocket
AppOrBlueprintKey = Optional[str]
def _convert_timedelta(value: Union[float, timedelta]) -> timedelta:
if not isinstance(value, timedelta):
return timedelta(seconds=value)
return value
class Quart(PackageStatic):
app_ctx_globals_class = _AppCtxGlobals
asgi_http_class = ASGIHTTPConnection
asgi_lifespan_class = ASGILifespan
asgi_websocket_class = ASGIWebsocketConnection
config_class = Config
debug = ConfigAttribute("DEBUG")
env = ConfigAttribute("ENV")
jinja_environment = Environment
jinja_options = {
"autoescape": True,
"extensions": ["jinja2.ext.autoescape", "jinja2.ext.with_"],
}
json_decoder = JSONDecoder
json_encoder = JSONEncoder
lock_class = asyncio.Lock
permanent_session_lifetime = ConfigAttribute(
"PERMANENT_SESSION_LIFETIME", converter=_convert_timedelta
)
request_class = Request
response_class = Response
secret_key = ConfigAttribute("SECRET_KEY")
send_file_max_age_default = ConfigAttribute(
"SEND_FILE_MAX_AGE_DEFAULT", converter=_convert_timedelta
)
session_cookie_name = ConfigAttribute("SESSION_COOKIE_NAME")
session_interface = SecureCookieSessionInterface()
test_client_class = QuartClient
testing = ConfigAttribute("TESTING")
url_map_class = QuartMap
url_rule_class = QuartRule
websocket_class = Websocket
def __init__(
self,
import_name: str,
static_url_path: Optional[str] = None,
static_folder: Optional[str] = "static",
static_host: Optional[str] = None,
host_matching: bool = False,
subdomain_matching: bool = False,
template_folder: Optional[str] = "templates",
root_path: Optional[str] = None,
instance_path: Optional[str] = None,
instance_relative_config: bool = False,
) -> None:
super().__init__(import_name, template_folder, root_path, static_folder, static_url_path)
instance_path = Path(instance_path) if instance_path else self.auto_find_instance_path()
if not instance_path.is_absolute():
raise ValueError("The instance_path must be an absolute path.")
self.instance_path = instance_path
self.config = self.make_config(instance_relative_config)
self.after_request_funcs: Dict[
AppOrBlueprintKey, List[Callable[[Response], Awaitable[Response]]]
] = defaultdict(list)
self.after_serving_funcs: List[Callable[[], Awaitable[None]]] = []
self.after_websocket_funcs: Dict[
AppOrBlueprintKey, List[Callable[[Response], Awaitable[Optional[Response]]]]
] = defaultdict(list)
self.before_first_request_funcs: List[Callable[[], Awaitable[None]]] = []
self.before_request_funcs: Dict[
AppOrBlueprintKey, List[Callable[[], Awaitable[None]]]
] = defaultdict(list)
self.before_serving_funcs: List[Callable[[], Awaitable[None]]] = []
self.before_websocket_funcs: Dict[
AppOrBlueprintKey, List[Callable[[], Awaitable[None]]]
] = defaultdict(list)
self.blueprints: Dict[str, Blueprint] = OrderedDict()
self.error_handler_spec: Dict[
AppOrBlueprintKey, Dict[Exception, Callable[[Exception], Awaitable[None]]]
] = defaultdict(dict)
self.extensions: Dict[str, Any] = {}
self.shell_context_processors: List[Callable[[], None]] = []
self.teardown_appcontext_funcs: List[
Callable[[Optional[BaseException]], Awaitable[None]]
] = []
self.teardown_request_funcs: Dict[
AppOrBlueprintKey, List[Callable[[Optional[BaseException]], Awaitable[None]]]
] = defaultdict(list)
self.teardown_websocket_funcs: Dict[
AppOrBlueprintKey, List[Callable[[Optional[BaseException]], Awaitable[None]]]
] = defaultdict(list)
self.template_context_processors: Dict[
AppOrBlueprintKey, List[Callable[[], Awaitable[Dict[str, Any]]]]
] = defaultdict(list)
self.url_build_error_handlers: List[Callable[[Exception, str, dict], str]] = []
self.url_default_functions: Dict[AppOrBlueprintKey, List[Callable]] = defaultdict(list)
self.url_map = self.url_map_class(host_matching=host_matching)
self.subdomain_matching = subdomain_matching
self.url_value_preprocessors: Dict[
AppOrBlueprintKey, List[Callable[[str, dict], None]]
] = defaultdict(list)
self.view_functions: Dict[str, Callable] = {}
self._got_first_request = False
self._first_request_lock = self.lock_class()
self._jinja_env: Optional[Environment] = None
self._logger: Optional[Logger] = None
self.cli = AppGroup(self.name)
if self.has_static_folder:
if bool(static_host) != host_matching:
raise ValueError(
"static_host must be set if there is a static folder and host_matching is "
"enabled"
)
self.add_url_rule(
f"{self.static_url_path}/<path:filename>",
"static",
self.send_static_file,
host=static_host,
)
self.template_context_processors[None] = [_default_template_context_processor]
@property
def name(self) -> str:
if self.import_name == "__main__":
path = Path(getattr(sys.modules["__main__"], "__file__", "__main__.py"))
return path.stem
return self.import_name
@property
def propagate_exceptions(self) -> bool:
propagate = self.config["PROPAGATE_EXCEPTIONS"]
if propagate is not None:
return propagate
else:
return self.debug or self.testing
@property
def logger(self) -> Logger:
if self._logger is None:
self._logger = create_logger(self)
return self._logger
@property
def jinja_env(self) -> Environment:
if self._jinja_env is None:
self._jinja_env = self.create_jinja_environment()
return self._jinja_env
@property
def got_first_request(self) -> bool:
return self._got_first_request
def auto_find_instance_path(self) -> Path:
prefix, package_path = find_package(self.import_name)
if prefix is None:
return package_path / "instance"
return prefix / "var" / f"{self.name}-instance"
def make_config(self, instance_relative: bool = False) -> Config:
config = self.config_class(
self.instance_path if instance_relative else self.root_path, DEFAULT_CONFIG
)
config["ENV"] = get_env()
config["DEBUG"] = get_debug_flag()
return config
def open_instance_resource(self, path: FilePath, mode: str = "rb") -> IO[AnyStr]:
return open(self.instance_path / file_path_to_path(path), mode)
def create_url_adapter(self, request: Optional[BaseRequestWebsocket]) -> Optional[MapAdapter]:
if request is not None:
subdomain = (
(self.url_map.default_subdomain or None) if not self.subdomain_matching else None
)
return self.url_map.bind_to_request(request, subdomain, self.config["SERVER_NAME"])
if self.config["SERVER_NAME"] is not None:
scheme = "https" if self.config["PREFER_SECURE_URLS"] else "http"
return self.url_map.bind(self.config["SERVER_NAME"], url_scheme=scheme)
return None
def create_jinja_environment(self) -> Environment:
options = dict(self.jinja_options)
if "autoescape" not in options:
options["autoescape"] = self.select_jinja_autoescape
if "auto_reload" not in options:
options["auto_reload"] = self.config["TEMPLATES_AUTO_RELOAD"] or self.debug
jinja_env = self.jinja_environment(self, **options)
jinja_env.globals.update(
{
"config": self.config,
"g": g,
"get_flashed_messages": get_flashed_messages,
"request": request,
"session": session,
"url_for": url_for,
}
)
jinja_env.filters["tojson"] = tojson_filter
return jinja_env
def create_global_jinja_loader(self) -> DispatchingJinjaLoader:
return DispatchingJinjaLoader(self)
def select_jinja_autoescape(self, filename: str) -> bool:
if filename is None:
return True
return Path(filename).suffix in {".htm", ".html", ".xhtml", ".xml"}
async def update_template_context(self, context: dict) -> None:
processors = self.template_context_processors[None]
if has_request_context():
blueprint = _request_ctx_stack.top.request.blueprint
if blueprint is not None and blueprint in self.template_context_processors:
processors = chain(
processors, self.template_context_processors[blueprint]
)
extra_context: dict = {}
for processor in processors:
extra_context.update(await processor())
original = context.copy()
context.update(extra_context)
context.update(original)
def make_shell_context(self) -> dict:
context = {"app": self, "g": g}
for processor in self.shell_context_processors:
context.update(processor())
return context
def route(
self,
path: str,
methods: Optional[List[str]] = None,
endpoint: Optional[str] = None,
defaults: Optional[dict] = None,
host: Optional[str] = None,
subdomain: Optional[str] = None,
*,
provide_automatic_options: Optional[bool] = None,
strict_slashes: Optional[bool] = None,
) -> Callable:
def decorator(func: Callable) -> Callable:
self.add_url_rule(
path,
endpoint,
func,
methods,
defaults=defaults,
host=host,
subdomain=subdomain,
provide_automatic_options=provide_automatic_options,
strict_slashes=strict_slashes,
)
return func
return decorator
def add_url_rule(
self,
path: str,
endpoint: Optional[str] = None,
view_func: Optional[Callable] = None,
methods: Optional[Iterable[str]] = None,
defaults: Optional[dict] = None,
host: Optional[str] = None,
subdomain: Optional[str] = None,
*,
provide_automatic_options: Optional[bool] = None,
is_websocket: bool = False,
strict_slashes: Optional[bool] = None,
merge_slashes: Optional[bool] = None,
) -> None:
endpoint = endpoint or _endpoint_from_view_func(view_func)
handler = self.ensure_async(view_func)
if methods is None:
methods = getattr(view_func, "methods", ["GET"])
methods = cast(Set[str], set(methods))
required_methods = set(getattr(view_func, "required_methods", set()))
if provide_automatic_options is None:
automatic_options = getattr(view_func, "provide_automatic_options", None)
if automatic_options is None:
automatic_options = "OPTIONS" not in methods
else:
automatic_options = provide_automatic_options
if automatic_options:
required_methods.add("OPTIONS")
methods.update(required_methods)
rule = self.url_rule_class(
path,
methods=methods,
endpoint=endpoint,
host=host,
subdomain=subdomain,
defaults=defaults,
websocket=is_websocket,
strict_slashes=strict_slashes,
merge_slashes=merge_slashes,
provide_automatic_options=automatic_options,
)
self.url_map.add(rule)
if handler is not None:
old_handler = self.view_functions.get(endpoint)
if getattr(old_handler, "_quart_async_wrapper", False):
old_handler = old_handler.__wrapped__
if old_handler is not None and old_handler != view_func:
raise AssertionError(f"Handler is overwriting existing for endpoint {endpoint}")
self.view_functions[endpoint] = handler
def websocket(
self,
path: str,
endpoint: Optional[str] = None,
defaults: Optional[dict] = None,
host: Optional[str] = None,
subdomain: Optional[str] = None,
*,
strict_slashes: Optional[bool] = None,
) -> Callable:
def decorator(func: Callable) -> Callable:
self.add_websocket(
path,
endpoint,
func,
defaults=defaults,
host=host,
subdomain=subdomain,
strict_slashes=strict_slashes,
)
return func
return decorator
def add_websocket(
self,
path: str,
endpoint: Optional[str] = None,
view_func: Optional[Callable] = None,
defaults: Optional[dict] = None,
host: Optional[str] = None,
subdomain: Optional[str] = None,
*,
strict_slashes: Optional[bool] = None,
) -> None:
return self.add_url_rule(
path,
endpoint,
view_func,
{"GET"},
defaults=defaults,
host=host,
subdomain=subdomain,
provide_automatic_options=False,
is_websocket=True,
strict_slashes=strict_slashes,
)
def endpoint(self, endpoint: str) -> Callable:
def decorator(func: Callable) -> Callable:
handler = self.ensure_async(func)
self.view_functions[endpoint] = handler
return handler
return decorator
def errorhandler(self, error: Union[Type[Exception], int]) -> Callable:
def decorator(func: Callable) -> Callable:
self.register_error_handler(error, func)
return func
return decorator
def register_error_handler(
self,
error: Union[Type[Exception], int],
func: Union[Callable[[Exception], None], Callable[[Exception], Awaitable[None]]],
name: AppOrBlueprintKey = None,
) -> None:
handler = self.ensure_async(func)
if isinstance(error, int):
error = all_http_exceptions[error]
self.error_handler_spec[name][error] = handler
def template_filter(self, name: Optional[str] = None) -> Callable:
def decorator(func: Callable) -> Callable:
self.add_template_filter(func, name=name)
return func
return decorator
def add_template_filter(self, func: Callable, name: Optional[str] = None) -> None:
self.jinja_env.filters[name or func.__name__] = func
def template_test(self, name: Optional[str] = None) -> Callable:
def decorator(func: Callable) -> Callable:
self.add_template_test(func, name=name)
return func
return decorator
def add_template_test(self, func: Callable, name: Optional[str] = None) -> None:
self.jinja_env.tests[name or func.__name__] = func
def template_global(self, name: Optional[str] = None) -> Callable:
def decorator(func: Callable) -> Callable:
self.add_template_global(func, name=name)
return func
return decorator
def add_template_global(self, func: Callable, name: Optional[str] = None) -> None:
self.jinja_env.globals[name or func.__name__] = func
def context_processor(
self,
func: Union[Callable[[], Dict[str, Any]], Callable[[], Awaitable[Dict[str, Any]]]],
name: AppOrBlueprintKey = None,
) -> Callable[[], Awaitable[Dict[str, Any]]]:
handler = self.ensure_async(func)
self.template_context_processors[name].append(handler)
return handler
def shell_context_processor(self, func: Callable[[], None]) -> Callable:
self.shell_context_processors.append(func)
return func
def url_defaults(self, func: Callable, name: AppOrBlueprintKey = None) -> Callable:
self.url_default_functions[name].append(func)
return func
def url_value_preprocessor(
self, func: Callable[[str, dict], None], name: AppOrBlueprintKey = None
) -> Callable:
self.url_value_preprocessors[name].append(func)
return func
def inject_url_defaults(self, endpoint: str, values: dict) -> None:
functions = self.url_default_functions[None]
if "." in endpoint:
blueprint = endpoint.rsplit(".", 1)[0]
functions = chain(functions, self.url_default_functions[blueprint])
for function in functions:
function(endpoint, values)
def handle_url_build_error(self, error: Exception, endpoint: str, values: dict) -> str:
for handler in self.url_build_error_handlers:
result = handler(error, endpoint, values)
if result is not None:
return result
raise error
def _find_exception_handler(
self, error: Exception
) -> Optional[Callable[[Exception], Awaitable[None]]]:
if _request_ctx_stack.top is not None:
blueprint = _request_ctx_stack.top.request.blueprint
elif _websocket_ctx_stack.top is not None:
blueprint = _websocket_ctx_stack.top.websocket.blueprint
else:
blueprint = None
handler = _find_exception_handler(
error, self.error_handler_spec.get(blueprint, {})
)
if handler is None:
handler = _find_exception_handler(error, self.error_handler_spec[None])
return handler
async def handle_http_exception(self, error: Exception) -> Response:
handler = self._find_exception_handler(error)
if handler is None:
return error.get_response()
else:
return await handler(error)
def trap_http_exception(self, error: Exception) -> bool:
return self.config["TRAP_HTTP_EXCEPTIONS"]
async def handle_user_exception(self, error: Exception) -> Response:
if isinstance(error, HTTPException) and not self.trap_http_exception(error):
return await self.handle_http_exception(error)
handler = self._find_exception_handler(error)
if handler is None:
raise error
return await handler(error)
async def handle_exception(self, error: Exception) -> Response:
await got_request_exception.send(self, exception=error)
self.log_exception(sys.exc_info())
if self.propagate_exceptions:
raise error
internal_server_error = all_http_exceptions[500]()
handler = self._find_exception_handler(internal_server_error)
if handler is None:
return internal_server_error.get_response()
else:
return await self.finalize_request(await handler(error), from_error_handler=True)
async def handle_websocket_exception(self, error: Exception) -> Optional[Response]:
await got_websocket_exception.send(self, exception=error)
self.log_exception(sys.exc_info())
internal_server_error = all_http_exceptions[500]()
handler = self._find_exception_handler(internal_server_error)
if handler is None:
return internal_server_error.get_response()
else:
return await self.finalize_websocket(await handler(error), from_error_handler=True)
def log_exception(self, exception_info: Tuple[type, BaseException, TracebackType]) -> None:
if has_request_context():
request_ = _request_ctx_stack.top.request
self.logger.error(
f"Exception on request {request_.method} {request_.path}", exc_info=exception_info
)
if has_websocket_context():
websocket_ = _websocket_ctx_stack.top.websocket
self.logger.error(f"Exception on websocket {websocket_.path}", exc_info=exception_info)
def before_request(
self,
func: Union[Callable[[], None], Callable[[], Awaitable[None]]],
name: AppOrBlueprintKey = None,
) -> Callable[[], Awaitable[None]]:
handler = self.ensure_async(func)
self.before_request_funcs[name].append(handler)
return handler
def before_websocket(
self,
func: Union[Callable[[], None], Callable[[], Awaitable[None]]],
name: AppOrBlueprintKey = None,
) -> Callable[[], Awaitable[None]]:
handler = self.ensure_async(func)
self.before_websocket_funcs[name].append(handler)
return handler
def before_first_request(
self,
func: Union[Callable[[], None], Callable[[], Awaitable[None]]],
name: AppOrBlueprintKey = None,
) -> Callable[[], Awaitable[None]]:
handler = self.ensure_async(func)
self.before_first_request_funcs.append(handler)
return handler
def before_serving(
self, func: Union[Callable[[], None], Callable[[], Awaitable[None]]]
) -> Callable[[], Awaitable[None]]:
handler = self.ensure_async(func)
self.before_serving_funcs.append(handler)
return handler
def after_request(
self,
func: Union[Callable[[Response], Response], Callable[[Response], Awaitable[Response]]],
name: AppOrBlueprintKey = None,
) -> Callable[[Response], Awaitable[None]]:
handler = self.ensure_async(func)
self.after_request_funcs[name].append(handler)
return handler
def after_websocket(
self,
func: Union[
Callable[[Response], Optional[Response]],
Callable[[Response], Awaitable[Optional[Response]]],
],
name: AppOrBlueprintKey = None,
) -> Callable[[Response], Awaitable[None]]:
handler = self.ensure_async(func)
self.after_websocket_funcs[name].append(handler)
return handler
def after_serving(
self, func: Union[Callable[[], None], Callable[[], Awaitable[None]]]
) -> Callable[[], Awaitable[None]]:
handler = self.ensure_async(func)
self.after_serving_funcs.append(handler)
return handler
def teardown_request(
self,
func: Union[
Callable[[Optional[BaseException]], None],
Callable[[Optional[BaseException]], Awaitable[None]],
],
name: AppOrBlueprintKey = None,
) -> Callable[[Optional[BaseException]], Awaitable[None]]:
handler = self.ensure_async(func)
self.teardown_request_funcs[name].append(handler)
return handler
def teardown_websocket(
self,
func: Union[
Callable[[Optional[BaseException]], None],
Callable[[Optional[BaseException]], Awaitable[None]],
],
name: AppOrBlueprintKey = None,
) -> Callable[[Optional[BaseException]], Awaitable[None]]:
handler = self.ensure_async(func)
self.teardown_websocket_funcs[name].append(handler)
return handler
def teardown_appcontext(
self,
func: Union[
Callable[[Optional[BaseException]], None],
Callable[[Optional[BaseException]], Awaitable[None]],
],
) -> Callable[[Optional[BaseException]], Awaitable[None]]:
handler = self.ensure_async(func)
self.teardown_appcontext_funcs.append(handler)
return handler
def register_blueprint(
self,
blueprint: Blueprint,
url_prefix: Optional[str] = None,
*,
subdomain: Optional[str] = None,
) -> None:
first_registration = False
if blueprint.name in self.blueprints and self.blueprints[blueprint.name] is not blueprint:
raise RuntimeError(
f"Blueprint name '{blueprint.name}' "
f"is already registered by {self.blueprints[blueprint.name]}. "
"Blueprints must have unique names"
)
else:
self.blueprints[blueprint.name] = blueprint
first_registration = True
blueprint.register(self, first_registration, url_prefix=url_prefix, subdomain=subdomain)
def iter_blueprints(self) -> ValuesView[Blueprint]:
return self.blueprints.values()
def ensure_async(self, func: Callable[..., Any]) -> Callable[..., Awaitable[Any]]:
if is_coroutine_function(func):
return func
else:
return run_sync(func)
async def open_session(self, request: BaseRequestWebsocket) -> Session:
return await self.ensure_async(self.session_interface.open_session)(self, request)
async def make_null_session(self) -> Session:
return await self.ensure_async(self.session_interface.make_null_session)(self)
async def save_session(self, session: Session, response: Response) -> None:
await self.ensure_async(self.session_interface.save_session)(self, session, response)
async def do_teardown_request(
self, exc: Optional[BaseException], request_context: Optional[RequestContext] = None
) -> None:
request_ = (request_context or _request_ctx_stack.top).request
functions = self.teardown_request_funcs[None]
blueprint = request_.blueprint
if blueprint is not None:
functions = chain(functions, self.teardown_request_funcs[blueprint])
for function in functions:
await function(exc)
await request_tearing_down.send(self, exc=exc)
async def do_teardown_websocket(
self, exc: Optional[BaseException], websocket_context: Optional[WebsocketContext] = None
) -> None:
websocket_ = (websocket_context or _websocket_ctx_stack.top).websocket
functions = self.teardown_websocket_funcs[None]
blueprint = websocket_.blueprint
if blueprint is not None:
functions = chain(functions, self.teardown_websocket_funcs[blueprint])
for function in functions:
await function(exc)
await websocket_tearing_down.send(self, exc=exc)
async def do_teardown_appcontext(self, exc: Optional[BaseException]) -> None:
for function in self.teardown_appcontext_funcs:
await function(exc)
await appcontext_tearing_down.send(self, exc=exc)
def app_context(self) -> AppContext:
return AppContext(self)
def request_context(self, request: Request) -> RequestContext:
return RequestContext(self, request)
def websocket_context(self, websocket: Websocket) -> WebsocketContext:
return WebsocketContext(self, websocket)
async def run(
self,
host: str = "127.0.0.1",
port: int = 5000,
debug: Optional[bool] = None,
use_reloader: bool = True,
loop: Optional[asyncio.AbstractEventLoop] = None,
ca_certs: Optional[str] = None,
certfile: Optional[str] = None,
keyfile: Optional[str] = None,
**kwargs: Any,
) -> None:
if kwargs:
warnings.warn(
f"Additional arguments, {','.join(kwargs.keys())}, are not supported.\n"
"They may be supported by Hypercorn, which is the ASGI server Quart "
"uses by default. This method is meant for development and debugging."
)
if loop is None:
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
loop.set_debug(debug or False)
shutdown_event = asyncio.Event()
def _signal_handler(*_: Any) -> None:
shutdown_event.set()
try:
loop.add_signal_handler(signal.SIGTERM, _signal_handler)
loop.add_signal_handler(signal.SIGINT, _signal_handler)
except (AttributeError, NotImplementedError):
pass
task = self.run_task(
host,
port,
debug,
use_reloader,
ca_certs,
certfile,
keyfile,
shutdown_trigger=shutdown_event.wait,
)
scheme = "https" if certfile is not None and keyfile is not None else "http"
print(f"Running on {scheme}://{host}:{port} (CTRL + C to quit)")
loop.create_task(task)
def run_task(
self,
host: str = "127.0.0.1",
port: int = 5000,
debug: Optional[bool] = None,
use_reloader: bool = True,
ca_certs: Optional[str] = None,
certfile: Optional[str] = None,
keyfile: Optional[str] = None,
shutdown_trigger: Optional[Callable[..., Awaitable[None]]] = None,
) -> Coroutine[None, None, None]:
config = HyperConfig()
config.access_log_format = "%(h)s %(r)s %(s)s %(b)s %(D)s"
config.accesslog = create_serving_logger()
config.bind = [f"{host}:{port}"]
config.ca_certs = ca_certs
config.certfile = certfile
if debug is not None:
self.debug = debug
config.errorlog = config.accesslog
config.keyfile = keyfile
config.use_reloader = use_reloader
return serve(self, config, shutdown_trigger=shutdown_trigger)
def test_client(self) -> QuartClient:
return self.test_client_class(self)
def test_request_context(
self,
path: str,
*,
method: str = "GET",
headers: Optional[Union[dict, Headers]] = None,
query_string: Optional[dict] = None,
scheme: str = "http",
send_push_promise: Callable[[str, Headers], Awaitable[None]] = no_op_push,
data: Optional[AnyStr] = None,
form: Optional[dict] = None,
json: Any = sentinel,
root_path: str = "",
http_version: str = "1.1",
) -> RequestContext:
headers, path, query_string_bytes = make_test_headers_path_and_query_string(
self, path, headers, query_string
)
request_body, body_headers = make_test_body_with_headers(data, form, json)
for key, value in body_headers.items():
headers[key] = value
request = self.request_class(
method,
scheme,
path,
query_string_bytes,
headers,
root_path,
http_version,
send_push_promise=send_push_promise,
)
request.body.set_result(request_body)
return self.request_context(request)
async def try_trigger_before_first_request_functions(self) -> None:
if self._got_first_request:
return
self.teardown_appcontext_funcs = list(reversed(self.teardown_appcontext_funcs))
for key, value in self.teardown_request_funcs.items():
self.teardown_request_funcs[key] = list(reversed(value))
for key, value in self.teardown_websocket_funcs.items():
self.teardown_websocket_funcs[key] = list(reversed(value))
async with self._first_request_lock:
if self._got_first_request:
return
for function in self.before_first_request_funcs:
await function()
self._got_first_request = True
async def make_default_options_response(self) -> Response:
methods = _request_ctx_stack.top.url_adapter.allowed_methods()
return self.response_class("", headers={"Allow": ", ".join(methods)})
async def make_response(self, result: ResponseReturnValue) -> Response:
status_or_headers = None
headers: Optional[dict] = None
status = None
if isinstance(result, tuple):
value, status_or_headers, headers = result + (None,) * (3 - len(result))
else:
value = result
if value is None:
raise TypeError("The response value returned by the view function cannot be None")
if isinstance(status_or_headers, (dict, list)):
headers = status_or_headers
status = None
elif status_or_headers is not None:
status = status_or_headers
if not isinstance(value, Response):
if isinstance(value, dict):
response = jsonify(value)
else:
response = self.response_class(value)
else:
response = value
if status is not None:
response.status_code = int(status)
if headers is not None:
for key, value in headers.items():
response.headers[key] = value
return response
async def handle_request(self, request: Request) -> Response:
async with self.request_context(request) as request_context:
try:
return await self.full_dispatch_request(request_context)
except asyncio.CancelledError:
raise
except Exception as error:
return await self.handle_exception(error)
async def full_dispatch_request(
self, request_context: Optional[RequestContext] = None
) -> Response:
await self.try_trigger_before_first_request_functions()
await request_started.send(self)
try:
result = await self.preprocess_request(request_context)
if result is None:
result = await self.dispatch_request(request_context)
except Exception as error:
result = await self.handle_user_exception(error)
return await self.finalize_request(result, request_context)
async def preprocess_request(
self, request_context: Optional[RequestContext] = None
) -> Optional[ResponseReturnValue]:
request_ = (request_context or _request_ctx_stack.top).request
blueprint = request_.blueprint
processors = self.url_value_preprocessors[None]
if blueprint is not None:
processors = chain(processors, self.url_value_preprocessors[blueprint])
for processor in processors:
processor(request.endpoint, request.view_args)
functions = self.before_request_funcs[None]
if blueprint is not None:
functions = chain(functions, self.before_request_funcs[blueprint])
for function in functions:
result = await function()
if result is not None:
return result
return None
async def dispatch_request(
self, request_context: Optional[RequestContext] = None
) -> ResponseReturnValue:
request_ = (request_context or _request_ctx_stack.top).request
if request_.routing_exception is not None:
raise request_.routing_exception
if request_.method == "OPTIONS" and request_.url_rule.provide_automatic_options:
return await self.make_default_options_response()
handler = self.view_functions[request_.url_rule.endpoint]
return await handler(**request_.view_args)
async def finalize_request(
self,
result: ResponseReturnValue,
request_context: Optional[RequestContext] = None,
from_error_handler: bool = False,
) -> Response:
response = await self.make_response(result)
try:
response = await self.process_response(response, request_context)
await request_finished.send(self, response=response)
except Exception:
if not from_error_handler:
raise
self.logger.exception("Request finalizing errored")
return response
async def process_response(
self, response: Response, request_context: Optional[RequestContext] = None
) -> Response:
request_ = (request_context or _request_ctx_stack.top).request
functions = (request_context or _request_ctx_stack.top)._after_request_functions
blueprint = request_.blueprint
if blueprint is not None:
functions = chain(functions, self.after_request_funcs[blueprint])
functions = chain(functions, self.after_request_funcs[None])
for function in functions:
response = await function(response)
session_ = (request_context or _request_ctx_stack.top).session
if not self.session_interface.is_null_session(session_):
await self.save_session(session_, response)
return response
async def handle_websocket(self, websocket: Websocket) -> Optional[Response]:
async with self.websocket_context(websocket) as websocket_context:
try:
return await self.full_dispatch_websocket(websocket_context)
except asyncio.CancelledError:
raise
except Exception as error:
return await self.handle_websocket_exception(error)
async def full_dispatch_websocket(
self, websocket_context: Optional[WebsocketContext] = None
) -> Optional[Response]:
await self.try_trigger_before_first_request_functions()
await websocket_started.send(self)
try:
result = await self.preprocess_websocket(websocket_context)
if result is None:
result = await self.dispatch_websocket(websocket_context)
except Exception as error:
result = await self.handle_user_exception(error)
return await self.finalize_websocket(result, websocket_context)
async def preprocess_websocket(
self, websocket_context: Optional[WebsocketContext] = None
) -> Optional[ResponseReturnValue]:
websocket_ = (websocket_context or _websocket_ctx_stack.top).websocket
blueprint = websocket_.blueprint
processors = self.url_value_preprocessors[None]
if blueprint is not None:
processors = chain(processors, self.url_value_preprocessors[blueprint])
for processor in processors:
processor(websocket_.endpoint, websocket_.view_args)
functions = self.before_websocket_funcs[None]
if blueprint is not None:
functions = chain(functions, self.before_websocket_funcs[blueprint])
for function in functions:
result = await function()
if result is not None:
return result
return None
async def dispatch_websocket(
self, websocket_context: Optional[WebsocketContext] = None
) -> None:
websocket_ = (websocket_context or _websocket_ctx_stack.top).websocket
if websocket_.routing_exception is not None:
raise websocket_.routing_exception
handler = self.view_functions[websocket_.url_rule.endpoint]
return await handler(**websocket_.view_args)
async def finalize_websocket(
self,
result: ResponseReturnValue,
websocket_context: Optional[WebsocketContext] = None,
from_error_handler: bool = False,
) -> Optional[Response]:
if result is not None:
response = await self.make_response(result)
else:
response = None
try:
response = await self.postprocess_websocket(response, websocket_context)
await websocket_finished.send(self, response=response)
except Exception:
if not from_error_handler:
raise
self.logger.exception("Request finalizing errored")
return response
async def postprocess_websocket(
self, response: Optional[Response], websocket_context: Optional[WebsocketContext] = None
) -> Response:
websocket_ = (websocket_context or _websocket_ctx_stack.top).websocket
functions = (websocket_context or _websocket_ctx_stack.top)._after_websocket_functions
blueprint = websocket_.blueprint
if blueprint is not None:
functions = chain(functions, self.after_websocket_funcs[blueprint])
functions = chain(functions, self.after_websocket_funcs[None])
for function in functions:
response = await function(response)
session_ = (websocket_context or _request_ctx_stack.top).session
if not self.session_interface.is_null_session(session_):
if response is None and isinstance(session_, SecureCookieSession) and session_.modified:
self.logger.exception(
"Secure Cookie Session modified during websocket handling. "
"These modifications will be lost as a cookie cannot be set."
)
else:
await self.save_session(session_, response)
return response
async def __call__(self, scope: dict, receive: Callable, send: Callable) -> None:
await self.asgi_app(scope, receive, send)
async def asgi_app(self, scope: dict, receive: Callable, send: Callable) -> None:
if scope["type"] == "http":
asgi_handler = self.asgi_http_class(self, scope)
elif scope["type"] == "websocket":
asgi_handler = self.asgi_websocket_class(self, scope)
elif scope["type"] == "lifespan":
asgi_handler = self.asgi_lifespan_class(self, scope)
else:
raise RuntimeError("ASGI Scope type is unknown")
await asgi_handler(receive, send)
async def startup(self) -> None:
self._got_first_request = False
async with self.app_context():
for func in self.before_serving_funcs:
await func()
async def shutdown(self) -> None:
async with self.app_context():
for func in self.after_serving_funcs:
await func()
def _find_exception_handler(
error: Exception, exception_handlers: Dict[Exception, Callable]
) -> Optional[Callable]:
for exception, handler in exception_handlers.items():
if isinstance(error, exception):
return handler
return None
def _cancel_all_tasks(loop: asyncio.AbstractEventLoop) -> None:
tasks = [task for task in asyncio.all_tasks(loop) if not task.done()]
if not tasks:
return
for task in tasks:
task.cancel()
loop.run_until_complete(asyncio.gather(*tasks, loop=loop, return_exceptions=True))
for task in tasks:
if not task.cancelled() and task.exception() is not None:
loop.call_exception_handler(
{
"message": "unhandled exception during shutdown",
"exception": task.exception(),
"task": task,
}
)
| true | true |
1c2d6e01456bf2346f5ad723a642aa5a13a6b14a | 7,769 | py | Python | epic_barcoder/epic_barcoder.py | manutamminen/epic_barcoder | a9055539fe89fb77bb9661926da1da80691b775d | [
"MIT"
] | null | null | null | epic_barcoder/epic_barcoder.py | manutamminen/epic_barcoder | a9055539fe89fb77bb9661926da1da80691b775d | [
"MIT"
] | null | null | null | epic_barcoder/epic_barcoder.py | manutamminen/epic_barcoder | a9055539fe89fb77bb9661926da1da80691b775d | [
"MIT"
] | null | null | null | import os
import subprocess
import string
import random
import time
from collections import defaultdict, Counter
import epride as ep
bridges_dict = {"16S": "GWATTACCGCGGCKGCTGCATCTTCTCCAAATGGGTCATGATC",
"18S": "AAGAACGGCCATGCACCACCACATCTTCTCCAAATGGGTCATGATC",
"narG2": "ACCGACATGCCGWTSCTGGTCATCTTCTCCAAATGGGTCATGATC",
"nosZ2": "AACAAGTTCTCSAAGGACCGCATCTTCTCCAAATGGGTCATGATC",
"nosZ3": "CTCMAAGGACCGGTTCMTSMCATCTTCTCCAAATGGGTCATGATC",
"norB2": "GCACCGGYCACCAYTAYTWCCATCTTCTCCAAATGGGTCATGATC"}
size_filter_dict = {'narG2': 100, 'norB2': 90, 'nosZ3': 100, '18S': 70, '16S': 90, 'nosZ2': 100}
array_dict = {'lsf': '''#!/bin/bash
#BSUB -n 1
#BSUB -R "rusage[mem={c[mem]}]"
#BSUB -J {c[job]}[1-{c[job_no]}]
#BSUB -o wrf.%I_tmp.out
#BSUB -e wrf.%I_tmp.out
#BSUB -W {c[time]}
cd {c[home_dir]}
name=$(sed -n "$LSB_JOBINDEX"p {c[namelist_file]})
{c[command]}
''', 'slurm': '''#!/bin/bash
#SBATCH --mem-per-cpu={c[mem]}
#SBATCH -J {c[job]}
#SBATCH --array=1-{c[job_no]}
#SBATCH -t {c[time]}
#SBATCH -o array_job_out_%j_tmp.out
#SBATCH -e array_job_err_%j_tmp.out
#SBATCH -n 1
#SBATCH -p serial
cd {c[home_dir]}
name=$(sed -n "$SLURM_ARRAY_TASK_ID"p {c[namelist_file]})
{c[command]}
'''}
batch_dict = {'lsf': '''
''', 'slurm': '''#!/bin/bash -l
#SBATCH --mem-per-cpu={c[mem]}
#SBATCH -J {c[job]}
#SBATCH -t {c[time]}
#SBATCH -o {c[job]}_tmp_output.txt
#SBATCH -e {c[job]}_tmp_errors.txt
#SBATCH -n 1
#
cd {c[home_dir]}
{c[command]}
'''}
def filter_bridge(bc_seq, seq_type, bridge_seq):
expanded_bridges = ep.expand_primers(ep.reverse_complement(bridge_seq))
for seq_id, seq in bc_seq:
for bridge in expanded_bridges:
if bridge in seq:
bc, rest = seq.split(bridge)
if len(bc) == 20:
seq_id = "{} droplet_bc={}".format(seq_id.strip(), bc)
if seq_type:
seq_id = "{} sequence_type={}".format(seq_id, seq_type)
yield([seq_id, rest])
def filter_reverse(bc_seq, rev_seq):
expanded_reverses = ep.expand_primers(rev_seq)
for seq_id, seq in bc_seq:
for reverse in expanded_reverses:
if reverse in seq:
good_seq, _ = seq.split(reverse)
yield([seq_id, good_seq])
def process_barcode_info(bc_seq, output_file, bridge_seq, reverse_seq=None, seq_type=None):
bc_lst = ep.read_fasta(bc_seq)
bridge_filtered = filter_bridge(bc_lst, seq_type, bridge_seq)
if reverse_seq:
out_iter = filter_reverse(bridge_filtered, reverse_seq)
ep.write_fasta(out_iter, output_file)
else:
ep.write_fasta(bridge_filtered, output_file)
def get_seed_dict(uc_file):
seed_dict = {}
with open(uc_file) as f:
for line in f:
if line.split("\t")[0] == "H":
seq_id = line.split("\t")[8].split()[0]
seed_id = line.split("\t")[9].split()[0]
seed_dict[seq_id] = seed_id
if line.split("\t")[0] == "S":
seq_id = line.split("\t")[8].split()[0]
seed_dict[seq_id] = seq_id
return seed_dict
def add_otus_to_fasta(seq_file, uc_file, output_file):
seeds = get_seed_dict(uc_file)
seq_acc = []
for seq_id, seq in ep.read_fasta(seq_file):
short_id = seq_id[1:].split()[0]
seed_id = seeds[short_id]
new_seq_id = "{} OTU={}".format(seq_id.strip(), seed_id)
seq_acc.append([new_seq_id, seq])
ep.write_fasta(seq_acc, output_file)
def generate_id(size=8):
""" Generate random sequences of characters for temporary file names.
"""
chars = string.ascii_uppercase + string.digits
return ''.join(random.choice(chars) for _ in range(size))
def make_split_dict(chunk_iter, no_splits):
chunk_list = list(chunk_iter)
chunk_len = len(chunk_list)
chunk_size = int(chunk_len/no_splits)
split_dict = defaultdict(list)
for ix, chunk in enumerate(chunk_list):
if ix % chunk_size == 0:
chunk_id = generate_id()
split_dict[chunk_id].append(chunk)
return split_dict
def split_seqs(seq_file, no_splits):
seqs = ep.read_fasta(seq_file)
split_dict = make_split_dict(seqs, no_splits)
for key, val in split_dict.items():
seq_name = key + "_tmp.fasta"
ep.write_fasta(val, seq_name)
return list(split_dict.keys())
def run_batch_job(batch_command, scheduler='slurm', memory=2048, run_time='02:00:00', cleanup=True):
user = subprocess.check_output('whoami', universal_newlines=True).strip()
home_dir = os.getcwd()
if isinstance(batch_command, str):
job_name = generate_id()
batch_info = {'mem': memory, 'job': job_name, 'time': run_time,
'home_dir': home_dir, 'command': batch_command}
batch = batch_dict[scheduler].format(c=batch_info)
batch_file_name = generate_id() + "_tmp.sh"
with open(batch_file_name, "w") as f:
for line in batch:
f.write(line)
if scheduler == 'slurm':
subprocess.call(['sbatch', batch_file_name])
elif isinstance(batch_command, list):
for command in batch_command:
job_name = generate_id()
batch_info = {'mem': memory, 'job': job_name, 'time': run_time,
'home_dir': home_dir, 'command': command}
batch = batch_dict[scheduler].format(c=batch_info)
batch_file_name = generate_id() + "_tmp.sh"
with open(batch_file_name, "w") as f:
for line in batch:
f.write(line)
if scheduler == 'slurm':
subprocess.call(['sbatch', batch_file_name])
time.sleep(10)
while True:
jobs = subprocess.check_output(['squeue', '-u', user], universal_newlines=True).split("\n")
if len(jobs) == 2:
break
print("{} jobs left.".format(len(jobs) - 2))
time.sleep(5)
if cleanup:
print("Cleaning up.")
[os.remove(tmp_file) for tmp_file in os.listdir() if "tmp" in tmp_file]
print("Done!")
def run_array_job(seqs, batch_command, post_command=None, no_splits=1000, scheduler='slurm', memory=2048, run_time='02:00:00', cleanup=True):
job_name = generate_id()
user = subprocess.check_output('whoami', universal_newlines=True).strip()
namelist = job_name + "_tmp.namelist"
seq_ids = split_seqs(seqs, no_splits)
job_no = len(seq_ids)
home_dir = os.getcwd()
batch_info = {'mem': memory, 'job': job_name, 'job_no': job_no, 'time': run_time,
'home_dir': home_dir, 'namelist_file': namelist, 'command': batch_command}
array = array_dict[scheduler].format(c=batch_info)
array_file_name = generate_id() + "_tmp.sh"
with open(array_file_name, "w") as f:
for line in array:
f.write(line)
with open(namelist, "w") as f:
for item in seq_ids:
f.write(item + "_tmp.fasta\n")
if scheduler == 'slurm':
subprocess.call(['sbatch', array_file_name])
print("A total of {} jobs.".format(job_no))
time.sleep(10)
while True:
jobs = subprocess.check_output(['squeue', '-u', user],
universal_newlines=True).split("\n")
if len(jobs) == 2:
break
print("{} jobs left.".format(len(jobs) - 2))
time.sleep(5)
if post_command:
print("Executing the post-batch command.")
subprocess.call(post_command, shell=True)
if cleanup:
print("Cleaning up.")
[os.remove(tmp_file) for tmp_file in os.listdir() if "tmp" in tmp_file]
print("Done!")
| 33.487069 | 141 | 0.61179 | import os
import subprocess
import string
import random
import time
from collections import defaultdict, Counter
import epride as ep
bridges_dict = {"16S": "GWATTACCGCGGCKGCTGCATCTTCTCCAAATGGGTCATGATC",
"18S": "AAGAACGGCCATGCACCACCACATCTTCTCCAAATGGGTCATGATC",
"narG2": "ACCGACATGCCGWTSCTGGTCATCTTCTCCAAATGGGTCATGATC",
"nosZ2": "AACAAGTTCTCSAAGGACCGCATCTTCTCCAAATGGGTCATGATC",
"nosZ3": "CTCMAAGGACCGGTTCMTSMCATCTTCTCCAAATGGGTCATGATC",
"norB2": "GCACCGGYCACCAYTAYTWCCATCTTCTCCAAATGGGTCATGATC"}
size_filter_dict = {'narG2': 100, 'norB2': 90, 'nosZ3': 100, '18S': 70, '16S': 90, 'nosZ2': 100}
array_dict = {'lsf': '''#!/bin/bash
#BSUB -n 1
#BSUB -R "rusage[mem={c[mem]}]"
#BSUB -J {c[job]}[1-{c[job_no]}]
#BSUB -o wrf.%I_tmp.out
#BSUB -e wrf.%I_tmp.out
#BSUB -W {c[time]}
cd {c[home_dir]}
name=$(sed -n "$LSB_JOBINDEX"p {c[namelist_file]})
{c[command]}
''', 'slurm': '''#!/bin/bash
#SBATCH --mem-per-cpu={c[mem]}
#SBATCH -J {c[job]}
#SBATCH --array=1-{c[job_no]}
#SBATCH -t {c[time]}
#SBATCH -o array_job_out_%j_tmp.out
#SBATCH -e array_job_err_%j_tmp.out
#SBATCH -n 1
#SBATCH -p serial
cd {c[home_dir]}
name=$(sed -n "$SLURM_ARRAY_TASK_ID"p {c[namelist_file]})
{c[command]}
'''}
batch_dict = {'lsf': '''
''', 'slurm': '''#!/bin/bash -l
#SBATCH --mem-per-cpu={c[mem]}
#SBATCH -J {c[job]}
#SBATCH -t {c[time]}
#SBATCH -o {c[job]}_tmp_output.txt
#SBATCH -e {c[job]}_tmp_errors.txt
#SBATCH -n 1
#
cd {c[home_dir]}
{c[command]}
'''}
def filter_bridge(bc_seq, seq_type, bridge_seq):
expanded_bridges = ep.expand_primers(ep.reverse_complement(bridge_seq))
for seq_id, seq in bc_seq:
for bridge in expanded_bridges:
if bridge in seq:
bc, rest = seq.split(bridge)
if len(bc) == 20:
seq_id = "{} droplet_bc={}".format(seq_id.strip(), bc)
if seq_type:
seq_id = "{} sequence_type={}".format(seq_id, seq_type)
yield([seq_id, rest])
def filter_reverse(bc_seq, rev_seq):
expanded_reverses = ep.expand_primers(rev_seq)
for seq_id, seq in bc_seq:
for reverse in expanded_reverses:
if reverse in seq:
good_seq, _ = seq.split(reverse)
yield([seq_id, good_seq])
def process_barcode_info(bc_seq, output_file, bridge_seq, reverse_seq=None, seq_type=None):
bc_lst = ep.read_fasta(bc_seq)
bridge_filtered = filter_bridge(bc_lst, seq_type, bridge_seq)
if reverse_seq:
out_iter = filter_reverse(bridge_filtered, reverse_seq)
ep.write_fasta(out_iter, output_file)
else:
ep.write_fasta(bridge_filtered, output_file)
def get_seed_dict(uc_file):
seed_dict = {}
with open(uc_file) as f:
for line in f:
if line.split("\t")[0] == "H":
seq_id = line.split("\t")[8].split()[0]
seed_id = line.split("\t")[9].split()[0]
seed_dict[seq_id] = seed_id
if line.split("\t")[0] == "S":
seq_id = line.split("\t")[8].split()[0]
seed_dict[seq_id] = seq_id
return seed_dict
def add_otus_to_fasta(seq_file, uc_file, output_file):
seeds = get_seed_dict(uc_file)
seq_acc = []
for seq_id, seq in ep.read_fasta(seq_file):
short_id = seq_id[1:].split()[0]
seed_id = seeds[short_id]
new_seq_id = "{} OTU={}".format(seq_id.strip(), seed_id)
seq_acc.append([new_seq_id, seq])
ep.write_fasta(seq_acc, output_file)
def generate_id(size=8):
chars = string.ascii_uppercase + string.digits
return ''.join(random.choice(chars) for _ in range(size))
def make_split_dict(chunk_iter, no_splits):
chunk_list = list(chunk_iter)
chunk_len = len(chunk_list)
chunk_size = int(chunk_len/no_splits)
split_dict = defaultdict(list)
for ix, chunk in enumerate(chunk_list):
if ix % chunk_size == 0:
chunk_id = generate_id()
split_dict[chunk_id].append(chunk)
return split_dict
def split_seqs(seq_file, no_splits):
seqs = ep.read_fasta(seq_file)
split_dict = make_split_dict(seqs, no_splits)
for key, val in split_dict.items():
seq_name = key + "_tmp.fasta"
ep.write_fasta(val, seq_name)
return list(split_dict.keys())
def run_batch_job(batch_command, scheduler='slurm', memory=2048, run_time='02:00:00', cleanup=True):
user = subprocess.check_output('whoami', universal_newlines=True).strip()
home_dir = os.getcwd()
if isinstance(batch_command, str):
job_name = generate_id()
batch_info = {'mem': memory, 'job': job_name, 'time': run_time,
'home_dir': home_dir, 'command': batch_command}
batch = batch_dict[scheduler].format(c=batch_info)
batch_file_name = generate_id() + "_tmp.sh"
with open(batch_file_name, "w") as f:
for line in batch:
f.write(line)
if scheduler == 'slurm':
subprocess.call(['sbatch', batch_file_name])
elif isinstance(batch_command, list):
for command in batch_command:
job_name = generate_id()
batch_info = {'mem': memory, 'job': job_name, 'time': run_time,
'home_dir': home_dir, 'command': command}
batch = batch_dict[scheduler].format(c=batch_info)
batch_file_name = generate_id() + "_tmp.sh"
with open(batch_file_name, "w") as f:
for line in batch:
f.write(line)
if scheduler == 'slurm':
subprocess.call(['sbatch', batch_file_name])
time.sleep(10)
while True:
jobs = subprocess.check_output(['squeue', '-u', user], universal_newlines=True).split("\n")
if len(jobs) == 2:
break
print("{} jobs left.".format(len(jobs) - 2))
time.sleep(5)
if cleanup:
print("Cleaning up.")
[os.remove(tmp_file) for tmp_file in os.listdir() if "tmp" in tmp_file]
print("Done!")
def run_array_job(seqs, batch_command, post_command=None, no_splits=1000, scheduler='slurm', memory=2048, run_time='02:00:00', cleanup=True):
job_name = generate_id()
user = subprocess.check_output('whoami', universal_newlines=True).strip()
namelist = job_name + "_tmp.namelist"
seq_ids = split_seqs(seqs, no_splits)
job_no = len(seq_ids)
home_dir = os.getcwd()
batch_info = {'mem': memory, 'job': job_name, 'job_no': job_no, 'time': run_time,
'home_dir': home_dir, 'namelist_file': namelist, 'command': batch_command}
array = array_dict[scheduler].format(c=batch_info)
array_file_name = generate_id() + "_tmp.sh"
with open(array_file_name, "w") as f:
for line in array:
f.write(line)
with open(namelist, "w") as f:
for item in seq_ids:
f.write(item + "_tmp.fasta\n")
if scheduler == 'slurm':
subprocess.call(['sbatch', array_file_name])
print("A total of {} jobs.".format(job_no))
time.sleep(10)
while True:
jobs = subprocess.check_output(['squeue', '-u', user],
universal_newlines=True).split("\n")
if len(jobs) == 2:
break
print("{} jobs left.".format(len(jobs) - 2))
time.sleep(5)
if post_command:
print("Executing the post-batch command.")
subprocess.call(post_command, shell=True)
if cleanup:
print("Cleaning up.")
[os.remove(tmp_file) for tmp_file in os.listdir() if "tmp" in tmp_file]
print("Done!")
| true | true |
1c2d6e28c1806da22d3fad7024f4b9259a7a8ef1 | 80 | py | Python | src/waldur_mastermind/notifications/__init__.py | geant-multicloud/MCMS-mastermind | 81333180f5e56a0bc88d7dad448505448e01f24e | [
"MIT"
] | 26 | 2017-10-18T13:49:58.000Z | 2021-09-19T04:44:09.000Z | src/waldur_mastermind/notifications/__init__.py | geant-multicloud/MCMS-mastermind | 81333180f5e56a0bc88d7dad448505448e01f24e | [
"MIT"
] | 14 | 2018-12-10T14:14:51.000Z | 2021-06-07T10:33:39.000Z | src/waldur_mastermind/notifications/__init__.py | geant-multicloud/MCMS-mastermind | 81333180f5e56a0bc88d7dad448505448e01f24e | [
"MIT"
] | 32 | 2017-09-24T03:10:45.000Z | 2021-10-16T16:41:09.000Z | default_app_config = 'waldur_mastermind.notifications.apps.NotificationsConfig'
| 40 | 79 | 0.8875 | default_app_config = 'waldur_mastermind.notifications.apps.NotificationsConfig'
| true | true |
1c2d6eef7384ad3b951f6a4a2afb394083967c06 | 12,194 | py | Python | aiida/cmdline/commands/cmd_code.py | sponce24/aiida-core | a31fdbf1a458c76cc30886a3c296ee859c0d3833 | [
"BSD-2-Clause",
"MIT"
] | null | null | null | aiida/cmdline/commands/cmd_code.py | sponce24/aiida-core | a31fdbf1a458c76cc30886a3c296ee859c0d3833 | [
"BSD-2-Clause",
"MIT"
] | null | null | null | aiida/cmdline/commands/cmd_code.py | sponce24/aiida-core | a31fdbf1a458c76cc30886a3c296ee859c0d3833 | [
"BSD-2-Clause",
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida-core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
"""`verdi code` command."""
from functools import partial
import click
import tabulate
from aiida.cmdline.commands.cmd_verdi import verdi
from aiida.cmdline.params import options, arguments
from aiida.cmdline.params.options.commands import code as options_code
from aiida.cmdline.utils import echo
from aiida.cmdline.utils.decorators import with_dbenv
from aiida.cmdline.utils.multi_line_input import ensure_scripts
from aiida.common.exceptions import InputValidationError
@verdi.group('code')
def verdi_code():
"""Setup and manage codes."""
def get_default(key, ctx):
"""
Get the default argument using a user instance property
:param value: The name of the property to use
:param ctx: The click context (which will be used to get the user)
:return: The default value, or None
"""
try:
value = getattr(ctx.code_builder, key)
if value == '':
value = None
except KeyError:
value = None
return value
def get_computer_name(ctx):
return getattr(ctx.code_builder, 'computer').name
def get_on_computer(ctx):
return not getattr(ctx.code_builder, 'is_local')()
# pylint: disable=unused-argument
def set_code_builder(ctx, param, value):
"""Set the code spec for defaults of following options."""
from aiida.orm.utils.builders.code import CodeBuilder
ctx.code_builder = CodeBuilder.from_code(value)
return value
@verdi_code.command('setup')
@options_code.LABEL()
@options_code.DESCRIPTION()
@options_code.INPUT_PLUGIN()
@options_code.ON_COMPUTER()
@options_code.COMPUTER()
@options_code.REMOTE_ABS_PATH()
@options_code.FOLDER()
@options_code.REL_PATH()
@options.PREPEND_TEXT()
@options.APPEND_TEXT()
@options.NON_INTERACTIVE()
@options.CONFIG_FILE()
@with_dbenv()
def setup_code(non_interactive, **kwargs):
"""Setup a new code."""
from aiida.common.exceptions import ValidationError
from aiida.orm.utils.builders.code import CodeBuilder
if not non_interactive:
try:
pre, post = ensure_scripts(kwargs.pop('prepend_text', ''), kwargs.pop('append_text', ''), kwargs)
except InputValidationError as exception:
raise click.BadParameter('invalid prepend and or append text: {}'.format(exception))
kwargs['prepend_text'] = pre
kwargs['append_text'] = post
if kwargs.pop('on_computer'):
kwargs['code_type'] = CodeBuilder.CodeType.ON_COMPUTER
else:
kwargs['code_type'] = CodeBuilder.CodeType.STORE_AND_UPLOAD
code_builder = CodeBuilder(**kwargs)
code = code_builder.new()
try:
code.store()
code.reveal()
except ValidationError as exception:
echo.echo_critical('Unable to store the Code: {}'.format(exception))
echo.echo_success('Code<{}> {} created'.format(code.pk, code.full_label))
@verdi_code.command('duplicate')
@arguments.CODE(callback=set_code_builder)
@options_code.LABEL(contextual_default=partial(get_default, 'label'))
@options_code.DESCRIPTION(contextual_default=partial(get_default, 'description'))
@options_code.INPUT_PLUGIN(contextual_default=partial(get_default, 'input_plugin'))
@options_code.ON_COMPUTER(contextual_default=get_on_computer)
@options_code.COMPUTER(contextual_default=get_computer_name)
@options_code.REMOTE_ABS_PATH(contextual_default=partial(get_default, 'remote_abs_path'))
@options_code.FOLDER(contextual_default=partial(get_default, 'code_folder'))
@options_code.REL_PATH(contextual_default=partial(get_default, 'code_rel_path'))
@options.PREPEND_TEXT()
@options.APPEND_TEXT()
@options.NON_INTERACTIVE()
@click.option('--hide-original', is_flag=True, default=False, help='Hide the code being copied.')
@click.pass_context
@with_dbenv()
def code_duplicate(ctx, code, non_interactive, **kwargs):
"""Duplicate a code allowing to change some parameters."""
from aiida.common.exceptions import ValidationError
from aiida.orm.utils.builders.code import CodeBuilder
if not non_interactive:
try:
pre, post = ensure_scripts(kwargs.pop('prepend_text', ''), kwargs.pop('append_text', ''), kwargs)
except InputValidationError as exception:
raise click.BadParameter('invalid prepend and or append text: {}'.format(exception))
kwargs['prepend_text'] = pre
kwargs['append_text'] = post
if kwargs.pop('on_computer'):
kwargs['code_type'] = CodeBuilder.CodeType.ON_COMPUTER
else:
kwargs['code_type'] = CodeBuilder.CodeType.STORE_AND_UPLOAD
if kwargs.pop('hide_original'):
code.hide()
code_builder = ctx.code_builder
for key, value in kwargs.items():
if value is not None:
setattr(code_builder, key, value)
new_code = code_builder.new()
try:
new_code.store()
new_code.reveal()
except ValidationError as exception:
echo.echo_critical('Unable to store the Code: {}'.format(exception))
echo.echo_success('Code<{}> {} created'.format(new_code.pk, new_code.full_label))
@verdi_code.command()
@arguments.CODE()
@options.VERBOSE()
@with_dbenv()
def show(code, verbose):
"""Display detailed information for a code."""
click.echo(tabulate.tabulate(code.get_full_text_info(verbose)))
@verdi_code.command()
@arguments.CODES()
@options.VERBOSE()
@options.DRY_RUN()
@options.FORCE()
@with_dbenv()
def delete(codes, verbose, dry_run, force):
"""Delete a code.
Note that codes are part of the data provenance, and deleting a code will delete all calculations using it.
"""
from aiida.manage.database.delete.nodes import delete_nodes
verbosity = 1
if force:
verbosity = 0
elif verbose:
verbosity = 2
node_pks_to_delete = [code.pk for code in codes]
delete_nodes(node_pks_to_delete, dry_run=dry_run, verbosity=verbosity, force=force)
@verdi_code.command()
@arguments.CODES()
@with_dbenv()
def hide(codes):
"""Hide one or more codes from `verdi code list`."""
for code in codes:
code.hide()
echo.echo_success('Code<{}> {} hidden'.format(code.pk, code.full_label))
@verdi_code.command()
@arguments.CODES()
@with_dbenv()
def reveal(codes):
"""Reveal one or more hidden codes in `verdi code list`."""
for code in codes:
code.reveal()
echo.echo_success('Code<{}> {} revealed'.format(code.pk, code.full_label))
@verdi_code.command()
@arguments.CODE()
@arguments.LABEL()
@with_dbenv()
def relabel(code, label):
"""Relabel a code."""
old_label = code.full_label
try:
code.relabel(label)
except InputValidationError as exception:
echo.echo_critical('invalid code name: {}'.format(exception))
else:
echo.echo_success('Code<{}> relabeled from {} to {}'.format(code.pk, old_label, code.full_label))
@verdi_code.command('list')
@options.COMPUTER(help='Filter codes by computer.')
@options.INPUT_PLUGIN(help='Filter codes by calculation input plugin.')
@options.ALL(help='Include hidden codes.')
@options.ALL_USERS(help='Include codes from all users.')
@click.option('-o', '--show-owner', 'show_owner', is_flag=True, default=False, help='Show owners of codes.')
@with_dbenv()
def code_list(computer, input_plugin, all_entries, all_users, show_owner):
"""List the available codes."""
from aiida.orm import Code # pylint: disable=redefined-outer-name
from aiida import orm
qb_user_filters = dict()
if not all_users:
user = orm.User.objects.get_default()
qb_user_filters['email'] = user.email
qb_computer_filters = dict()
if computer is not None:
qb_computer_filters['name'] = computer.name
qb_code_filters = dict()
if input_plugin is not None:
qb_code_filters['attributes.input_plugin'] = input_plugin.name
# If not all_entries, hide codes with HIDDEN_KEY extra set to True
if not all_entries:
qb_code_filters['or'] = [{
'extras': {
'!has_key': Code.HIDDEN_KEY
}
}, {
'extras.{}'.format(Code.HIDDEN_KEY): {
'==': False
}
}]
echo.echo('# List of configured codes:')
echo.echo("# (use 'verdi code show CODEID' to see the details)")
showed_results = False
# pylint: disable=invalid-name
if computer is not None:
qb = orm.QueryBuilder()
qb.append(Code, tag='code', filters=qb_code_filters, project=['id', 'label'])
# We have a user assigned to the code so we can ask for the
# presence of a user even if there is no user filter
qb.append(orm.User, with_node='code', project=['email'], filters=qb_user_filters)
# We also add the filter on computer. This will automatically
# return codes that have a computer (and of course satisfy the
# other filters). The codes that have a computer attached are the
# remote codes.
qb.append(orm.Computer, with_node='code', project=['name'], filters=qb_computer_filters)
qb.order_by({Code: {'id': 'asc'}})
showed_results = qb.count() > 0
print_list_res(qb, show_owner)
# If there is no filter on computers
else:
# Print all codes that have a computer assigned to them
# (these are the remote codes)
qb = orm.QueryBuilder()
qb.append(Code, tag='code', filters=qb_code_filters, project=['id', 'label'])
# We have a user assigned to the code so we can ask for the
# presence of a user even if there is no user filter
qb.append(orm.User, with_node='code', project=['email'], filters=qb_user_filters)
qb.append(orm.Computer, with_node='code', project=['name'])
qb.order_by({Code: {'id': 'asc'}})
print_list_res(qb, show_owner)
showed_results = showed_results or qb.count() > 0
# Now print all the local codes. To get the local codes we ask
# the dbcomputer_id variable to be None.
qb = orm.QueryBuilder()
comp_non_existence = {'dbcomputer_id': {'==': None}}
if not qb_code_filters:
qb_code_filters = comp_non_existence
else:
new_qb_code_filters = {'and': [qb_code_filters, comp_non_existence]}
qb_code_filters = new_qb_code_filters
qb.append(Code, tag='code', filters=qb_code_filters, project=['id', 'label'])
# We have a user assigned to the code so we can ask for the
# presence of a user even if there is no user filter
qb.append(orm.User, with_node='code', project=['email'], filters=qb_user_filters)
qb.order_by({Code: {'id': 'asc'}})
showed_results = showed_results or qb.count() > 0
print_list_res(qb, show_owner)
if not showed_results:
echo.echo('# No codes found matching the specified criteria.')
def print_list_res(qb_query, show_owner):
"""Print a list of available codes."""
# pylint: disable=invalid-name
for tuple_ in qb_query.all():
if len(tuple_) == 3:
(pk, label, useremail) = tuple_
computername = None
elif len(tuple_) == 4:
(pk, label, useremail, computername) = tuple_
else:
echo.echo_warning('Wrong tuple size')
return
if show_owner:
owner_string = ' ({})'.format(useremail)
else:
owner_string = ''
if computername is None:
computernamestring = ''
else:
computernamestring = '@{}'.format(computername)
echo.echo('* pk {} - {}{}{}'.format(pk, label, computernamestring, owner_string))
| 35.55102 | 111 | 0.660243 |
))
@verdi_code.command()
@arguments.CODE()
@arguments.LABEL()
@with_dbenv()
def relabel(code, label):
old_label = code.full_label
try:
code.relabel(label)
except InputValidationError as exception:
echo.echo_critical('invalid code name: {}'.format(exception))
else:
echo.echo_success('Code<{}> relabeled from {} to {}'.format(code.pk, old_label, code.full_label))
@verdi_code.command('list')
@options.COMPUTER(help='Filter codes by computer.')
@options.INPUT_PLUGIN(help='Filter codes by calculation input plugin.')
@options.ALL(help='Include hidden codes.')
@options.ALL_USERS(help='Include codes from all users.')
@click.option('-o', '--show-owner', 'show_owner', is_flag=True, default=False, help='Show owners of codes.')
@with_dbenv()
def code_list(computer, input_plugin, all_entries, all_users, show_owner):
from aiida.orm import Code
from aiida import orm
qb_user_filters = dict()
if not all_users:
user = orm.User.objects.get_default()
qb_user_filters['email'] = user.email
qb_computer_filters = dict()
if computer is not None:
qb_computer_filters['name'] = computer.name
qb_code_filters = dict()
if input_plugin is not None:
qb_code_filters['attributes.input_plugin'] = input_plugin.name
if not all_entries:
qb_code_filters['or'] = [{
'extras': {
'!has_key': Code.HIDDEN_KEY
}
}, {
'extras.{}'.format(Code.HIDDEN_KEY): {
'==': False
}
}]
echo.echo('# List of configured codes:')
echo.echo("# (use 'verdi code show CODEID' to see the details)")
showed_results = False
if computer is not None:
qb = orm.QueryBuilder()
qb.append(Code, tag='code', filters=qb_code_filters, project=['id', 'label'])
qb.append(orm.User, with_node='code', project=['email'], filters=qb_user_filters)
qb.append(orm.Computer, with_node='code', project=['name'], filters=qb_computer_filters)
qb.order_by({Code: {'id': 'asc'}})
showed_results = qb.count() > 0
print_list_res(qb, show_owner)
else:
qb = orm.QueryBuilder()
qb.append(Code, tag='code', filters=qb_code_filters, project=['id', 'label'])
qb.append(orm.User, with_node='code', project=['email'], filters=qb_user_filters)
qb.append(orm.Computer, with_node='code', project=['name'])
qb.order_by({Code: {'id': 'asc'}})
print_list_res(qb, show_owner)
showed_results = showed_results or qb.count() > 0
qb = orm.QueryBuilder()
comp_non_existence = {'dbcomputer_id': {'==': None}}
if not qb_code_filters:
qb_code_filters = comp_non_existence
else:
new_qb_code_filters = {'and': [qb_code_filters, comp_non_existence]}
qb_code_filters = new_qb_code_filters
qb.append(Code, tag='code', filters=qb_code_filters, project=['id', 'label'])
qb.append(orm.User, with_node='code', project=['email'], filters=qb_user_filters)
qb.order_by({Code: {'id': 'asc'}})
showed_results = showed_results or qb.count() > 0
print_list_res(qb, show_owner)
if not showed_results:
echo.echo('# No codes found matching the specified criteria.')
def print_list_res(qb_query, show_owner):
for tuple_ in qb_query.all():
if len(tuple_) == 3:
(pk, label, useremail) = tuple_
computername = None
elif len(tuple_) == 4:
(pk, label, useremail, computername) = tuple_
else:
echo.echo_warning('Wrong tuple size')
return
if show_owner:
owner_string = ' ({})'.format(useremail)
else:
owner_string = ''
if computername is None:
computernamestring = ''
else:
computernamestring = '@{}'.format(computername)
echo.echo('* pk {} - {}{}{}'.format(pk, label, computernamestring, owner_string))
| true | true |
1c2d703cd9d7ce09514fa83cfca5a0cf5bcf78b9 | 6,002 | py | Python | discord/stage_instance.py | mrvillage/discord.py | 15a09772c292a9fc76ba4125c76bdf7cec08fd2e | [
"MIT"
] | 3 | 2021-08-28T04:55:11.000Z | 2022-01-26T03:22:08.000Z | discord/stage_instance.py | mrvillage/discord.py2 | 15a09772c292a9fc76ba4125c76bdf7cec08fd2e | [
"MIT"
] | null | null | null | discord/stage_instance.py | mrvillage/discord.py2 | 15a09772c292a9fc76ba4125c76bdf7cec08fd2e | [
"MIT"
] | null | null | null | """
The MIT License (MIT)
Copyright (c) 2015-present Rapptz
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
from __future__ import annotations
from typing import TYPE_CHECKING, Optional
from .enums import StagePrivacyLevel, try_enum
from .errors import InvalidArgument
from .mixins import Hashable
from .utils import MISSING, cached_slot_property
__all__ = ("StageInstance",)
if TYPE_CHECKING:
from .channel import StageChannel
from .guild import Guild
from .state import ConnectionState
from .types.channel import StageInstance as StageInstancePayload
class StageInstance(Hashable):
"""Represents a stage instance of a stage channel in a guild.
.. versionadded:: 2.0
.. container:: operations
.. describe:: x == y
Checks if two stage instances are equal.
.. describe:: x != y
Checks if two stage instances are not equal.
.. describe:: hash(x)
Returns the stage instance's hash.
Attributes
-----------
id: :class:`int`
The stage instance's ID.
guild: :class:`Guild`
The guild that the stage instance is running in.
channel_id: :class:`int`
The ID of the channel that the stage instance is running in.
topic: :class:`str`
The topic of the stage instance.
privacy_level: :class:`StagePrivacyLevel`
The privacy level of the stage instance.
discoverable_disabled: :class:`bool`
Whether discoverability for the stage instance is disabled.
"""
__slots__ = (
"_state",
"id",
"guild",
"channel_id",
"topic",
"privacy_level",
"discoverable_disabled",
"_cs_channel",
)
def __init__(
self, *, state: ConnectionState, guild: Guild, data: StageInstancePayload
) -> None:
self._state = state
self.guild = guild
self._update(data)
def _update(self, data: StageInstancePayload):
self.id: int = int(data["id"])
self.channel_id: int = int(data["channel_id"])
self.topic: str = data["topic"]
self.privacy_level: StagePrivacyLevel = try_enum(
StagePrivacyLevel, data["privacy_level"]
)
self.discoverable_disabled: bool = data.get("discoverable_disabled", False)
def __repr__(self) -> str:
return f"<StageInstance id={self.id} guild={self.guild!r} channel_id={self.channel_id} topic={self.topic!r}>"
@cached_slot_property("_cs_channel")
def channel(self) -> Optional[StageChannel]:
"""Optional[:class:`StageChannel`]: The channel that stage instance is running in."""
# the returned channel will always be a StageChannel or None
return self._state.get_channel(self.channel_id) # type: ignore
def is_public(self) -> bool:
return self.privacy_level is StagePrivacyLevel.public
async def edit(
self,
*,
topic: str = MISSING,
privacy_level: StagePrivacyLevel = MISSING,
reason: Optional[str] = None,
) -> None:
"""|coro|
Edits the stage instance.
You must have the :attr:`~Permissions.manage_channels` permission to
use this.
Parameters
-----------
topic: :class:`str`
The stage instance's new topic.
privacy_level: :class:`StagePrivacyLevel`
The stage instance's new privacy level.
reason: :class:`str`
The reason the stage instance was edited. Shows up on the audit log.
Raises
------
InvalidArgument
If the ``privacy_level`` parameter is not the proper type.
Forbidden
You do not have permissions to edit the stage instance.
HTTPException
Editing a stage instance failed.
"""
payload = {}
if topic is not MISSING:
payload["topic"] = topic
if privacy_level is not MISSING:
if not isinstance(privacy_level, StagePrivacyLevel):
raise InvalidArgument(
"privacy_level field must be of type PrivacyLevel"
)
payload["privacy_level"] = privacy_level.value
if payload:
await self._state.http.edit_stage_instance(
self.channel_id, **payload, reason=reason
)
async def delete(self, *, reason: Optional[str] = None) -> None:
"""|coro|
Deletes the stage instance.
You must have the :attr:`~Permissions.manage_channels` permission to
use this.
Parameters
-----------
reason: :class:`str`
The reason the stage instance was deleted. Shows up on the audit log.
Raises
------
Forbidden
You do not have permissions to delete the stage instance.
HTTPException
Deleting the stage instance failed.
"""
await self._state.http.delete_stage_instance(self.channel_id, reason=reason)
| 31.756614 | 117 | 0.645452 |
from __future__ import annotations
from typing import TYPE_CHECKING, Optional
from .enums import StagePrivacyLevel, try_enum
from .errors import InvalidArgument
from .mixins import Hashable
from .utils import MISSING, cached_slot_property
__all__ = ("StageInstance",)
if TYPE_CHECKING:
from .channel import StageChannel
from .guild import Guild
from .state import ConnectionState
from .types.channel import StageInstance as StageInstancePayload
class StageInstance(Hashable):
__slots__ = (
"_state",
"id",
"guild",
"channel_id",
"topic",
"privacy_level",
"discoverable_disabled",
"_cs_channel",
)
def __init__(
self, *, state: ConnectionState, guild: Guild, data: StageInstancePayload
) -> None:
self._state = state
self.guild = guild
self._update(data)
def _update(self, data: StageInstancePayload):
self.id: int = int(data["id"])
self.channel_id: int = int(data["channel_id"])
self.topic: str = data["topic"]
self.privacy_level: StagePrivacyLevel = try_enum(
StagePrivacyLevel, data["privacy_level"]
)
self.discoverable_disabled: bool = data.get("discoverable_disabled", False)
def __repr__(self) -> str:
return f"<StageInstance id={self.id} guild={self.guild!r} channel_id={self.channel_id} topic={self.topic!r}>"
@cached_slot_property("_cs_channel")
def channel(self) -> Optional[StageChannel]:
return self._state.get_channel(self.channel_id)
def is_public(self) -> bool:
return self.privacy_level is StagePrivacyLevel.public
async def edit(
self,
*,
topic: str = MISSING,
privacy_level: StagePrivacyLevel = MISSING,
reason: Optional[str] = None,
) -> None:
payload = {}
if topic is not MISSING:
payload["topic"] = topic
if privacy_level is not MISSING:
if not isinstance(privacy_level, StagePrivacyLevel):
raise InvalidArgument(
"privacy_level field must be of type PrivacyLevel"
)
payload["privacy_level"] = privacy_level.value
if payload:
await self._state.http.edit_stage_instance(
self.channel_id, **payload, reason=reason
)
async def delete(self, *, reason: Optional[str] = None) -> None:
await self._state.http.delete_stage_instance(self.channel_id, reason=reason)
| true | true |
1c2d704e3aa0894611a77b676b4ac217652f620d | 795 | py | Python | ex072.py | BrunosVieira88/Python | 7dc105a62ede0b33d25c5864e892637ca71f2beb | [
"MIT"
] | null | null | null | ex072.py | BrunosVieira88/Python | 7dc105a62ede0b33d25c5864e892637ca71f2beb | [
"MIT"
] | null | null | null | ex072.py | BrunosVieira88/Python | 7dc105a62ede0b33d25c5864e892637ca71f2beb | [
"MIT"
] | null | null | null | print("""
Exercício Python 072: Crie um programa que tenha uma dupla totalmente preenchida com uma contagem por extenso, de zero até vinte.
Seu programa deverá ler um número pelo teclado (entre 0 e 20) e mostrá-lo por extenso.
""")
zero = ('zero','um','dois','Tres','quatro','cinco','seis','sete','oito','nove','dez','onze'
,'doze','treze','quatorze','quinze','dezesseis','dezessete','dezoito','dezenove','vinte')
entrada=0
saida = ''
while saida != 's':
print('*--' * 20)
entrada=int(input('Digite um numero de 0 a 20: '))
if entrada >=0 and entrada <=20:
print(f'voce digitou {zero[entrada]}')
print('*--' * 20)
saida=str(input('voce deseja sair [S/N]')).lower().strip()
if saida == 's':
break
else:
print('tente novamente ')
| 33.125 | 130 | 0.615094 | print("""
Exercício Python 072: Crie um programa que tenha uma dupla totalmente preenchida com uma contagem por extenso, de zero até vinte.
Seu programa deverá ler um número pelo teclado (entre 0 e 20) e mostrá-lo por extenso.
""")
zero = ('zero','um','dois','Tres','quatro','cinco','seis','sete','oito','nove','dez','onze'
,'doze','treze','quatorze','quinze','dezesseis','dezessete','dezoito','dezenove','vinte')
entrada=0
saida = ''
while saida != 's':
print('*--' * 20)
entrada=int(input('Digite um numero de 0 a 20: '))
if entrada >=0 and entrada <=20:
print(f'voce digitou {zero[entrada]}')
print('*--' * 20)
saida=str(input('voce deseja sair [S/N]')).lower().strip()
if saida == 's':
break
else:
print('tente novamente ')
| true | true |
1c2d707a1b5ddad68ed53067b735ef86194014f0 | 1,319 | py | Python | code/python/FactSetOwnership/v1/fds/sdk/FactSetOwnership/models/__init__.py | factset/enterprise-sdk | 3fd4d1360756c515c9737a0c9a992c7451d7de7e | [
"Apache-2.0"
] | 6 | 2022-02-07T16:34:18.000Z | 2022-03-30T08:04:57.000Z | code/python/FactSetOwnership/v1/fds/sdk/FactSetOwnership/models/__init__.py | factset/enterprise-sdk | 3fd4d1360756c515c9737a0c9a992c7451d7de7e | [
"Apache-2.0"
] | 2 | 2022-02-07T05:25:57.000Z | 2022-03-07T14:18:04.000Z | code/python/FactSetOwnership/v1/fds/sdk/FactSetOwnership/models/__init__.py | factset/enterprise-sdk | 3fd4d1360756c515c9737a0c9a992c7451d7de7e | [
"Apache-2.0"
] | null | null | null | # flake8: noqa
# import all models into this package
# if you have many models here with many references from one model to another this may
# raise a RecursionError
# to avoid this, import only the models that you directly need like:
# from from fds.sdk.FactSetOwnership.model.pet import Pet
# or import this package, but before doing it, use:
# import sys
# sys.setrecursionlimit(n)
from fds.sdk.FactSetOwnership.model.asset_type import AssetType
from fds.sdk.FactSetOwnership.model.error_response import ErrorResponse
from fds.sdk.FactSetOwnership.model.error_response_sub_errors import ErrorResponseSubErrors
from fds.sdk.FactSetOwnership.model.fund_holding import FundHolding
from fds.sdk.FactSetOwnership.model.fund_holdings_request import FundHoldingsRequest
from fds.sdk.FactSetOwnership.model.fund_holdings_response import FundHoldingsResponse
from fds.sdk.FactSetOwnership.model.holder_type import HolderType
from fds.sdk.FactSetOwnership.model.id_fund_holdings import IdFundHoldings
from fds.sdk.FactSetOwnership.model.id_holders import IdHolders
from fds.sdk.FactSetOwnership.model.security_holders import SecurityHolders
from fds.sdk.FactSetOwnership.model.security_holders_request import SecurityHoldersRequest
from fds.sdk.FactSetOwnership.model.security_holders_response import SecurityHoldersResponse
| 54.958333 | 92 | 0.859742 |
from fds.sdk.FactSetOwnership.model.asset_type import AssetType
from fds.sdk.FactSetOwnership.model.error_response import ErrorResponse
from fds.sdk.FactSetOwnership.model.error_response_sub_errors import ErrorResponseSubErrors
from fds.sdk.FactSetOwnership.model.fund_holding import FundHolding
from fds.sdk.FactSetOwnership.model.fund_holdings_request import FundHoldingsRequest
from fds.sdk.FactSetOwnership.model.fund_holdings_response import FundHoldingsResponse
from fds.sdk.FactSetOwnership.model.holder_type import HolderType
from fds.sdk.FactSetOwnership.model.id_fund_holdings import IdFundHoldings
from fds.sdk.FactSetOwnership.model.id_holders import IdHolders
from fds.sdk.FactSetOwnership.model.security_holders import SecurityHolders
from fds.sdk.FactSetOwnership.model.security_holders_request import SecurityHoldersRequest
from fds.sdk.FactSetOwnership.model.security_holders_response import SecurityHoldersResponse
| true | true |
1c2d709fffae9e0e4b0c66df3aca50c6e32acd0d | 2,906 | py | Python | notebooks/model_v2_small.py | deKeijzer/SRON-DCGAN | 1ead827ebf549917435e6bc9ddd2d4d5951aa205 | [
"MIT"
] | null | null | null | notebooks/model_v2_small.py | deKeijzer/SRON-DCGAN | 1ead827ebf549917435e6bc9ddd2d4d5951aa205 | [
"MIT"
] | null | null | null | notebooks/model_v2_small.py | deKeijzer/SRON-DCGAN | 1ead827ebf549917435e6bc9ddd2d4d5951aa205 | [
"MIT"
] | null | null | null | # General imports
from __future__ import print_function
#%matplotlib inline
import argparse
import os
import random
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim as optim
import torch.utils.data
import torchvision.datasets as dset
import torchvision.transforms as transforms
import torchvision.utils as vutils
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from IPython.display import HTML
from keijzer_exogan import *
class Generator(nn.Module):
def __init__(self, ngpu, nz=100, ngf=32, nc=1):
super(Generator, self).__init__()
self.ngpu = ngpu
self.nz = nz
self.nc = nc
self.ngf = ngf
"""
where (in_channels, out_channels,
kernel_size, stride=1, padding=0, output_padding=0, groups=1, bias=True, dilation=1)
"""
self.main = nn.Sequential(
#1
# input is Z (100,1,1), going into a convolution
nn.ConvTranspose2d( nz, ngf * 16, 4, 1, 0, bias=False),
nn.LeakyReLU(0.2, inplace=True), # Should use ReLU in generator according to DCGAN paper,
#nn.Dropout2d(0.5),
#2
# state size. (ngf*8) x 4 x 4
nn.ConvTranspose2d(ngf * 16, ngf * 8, 4, 2, 1, bias=False),
nn.LeakyReLU(0.2, inplace=True),
#nn.Dropout2d(0.5),
#3
# state size. (ngf*4) x 8 x 8
nn.ConvTranspose2d( ngf * 8, ngf * 4, 4, 2, 1, bias=False),
nn.LeakyReLU(0.2, inplace=True),
#nn.Dropout2d(0.5),
# G(z)
nn.ConvTranspose2d( ngf * 4, nc, 4, 2, 1, bias=False),
nn.Tanh() # Not used because ASPAs
)
def forward(self, input):
return self.main(input)
class Discriminator(nn.Module):
def __init__(self, ngpu, nc=1, ndf=32):
super(Discriminator, self).__init__()
self.ngpu = ngpu
self.nc = nc
self.ndf = ndf
self.main = nn.Sequential(
# input is (nc) x 64 x 64
nn.Conv2d(nc, ndf*4, 4, 2, 1, bias=False),
nn.LeakyReLU(0.2, inplace=True),
#nn.Dropout2d(0.5),
# state size. (ndf) x 32 x 32
nn.Conv2d(ndf*4, ndf * 8, 4, 2, 1, bias=False),
nn.LeakyReLU(0.2, inplace=True),
#nn.Dropout2d(0.5),
# state size. (ndf*2) x 16 x 16
nn.Conv2d(ndf * 8, ndf * 16, 4, 2, 1, bias=False),
nn.LeakyReLU(0.2, inplace=True),
#nn.Dropout2d(0.5),
# state size. (ndf*4) x 8 x 8
nn.Conv2d(ndf * 16, 1, 4, 2, 1, bias=False),
nn.Sigmoid()
)
def forward(self, input):
return self.main(input)
| 29.958763 | 101 | 0.5468 |
from __future__ import print_function
import argparse
import os
import random
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim as optim
import torch.utils.data
import torchvision.datasets as dset
import torchvision.transforms as transforms
import torchvision.utils as vutils
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from IPython.display import HTML
from keijzer_exogan import *
class Generator(nn.Module):
def __init__(self, ngpu, nz=100, ngf=32, nc=1):
super(Generator, self).__init__()
self.ngpu = ngpu
self.nz = nz
self.nc = nc
self.ngf = ngf
self.main = nn.Sequential(
nn.ConvTranspose2d( nz, ngf * 16, 4, 1, 0, bias=False),
nn.LeakyReLU(0.2, inplace=True),
nn.ConvTranspose2d(ngf * 16, ngf * 8, 4, 2, 1, bias=False),
nn.LeakyReLU(0.2, inplace=True),
nn.ConvTranspose2d( ngf * 8, ngf * 4, 4, 2, 1, bias=False),
nn.LeakyReLU(0.2, inplace=True),
nn.ConvTranspose2d( ngf * 4, nc, 4, 2, 1, bias=False),
nn.Tanh()
)
def forward(self, input):
return self.main(input)
class Discriminator(nn.Module):
def __init__(self, ngpu, nc=1, ndf=32):
super(Discriminator, self).__init__()
self.ngpu = ngpu
self.nc = nc
self.ndf = ndf
self.main = nn.Sequential(
nn.Conv2d(nc, ndf*4, 4, 2, 1, bias=False),
nn.LeakyReLU(0.2, inplace=True),
nn.Conv2d(ndf*4, ndf * 8, 4, 2, 1, bias=False),
nn.LeakyReLU(0.2, inplace=True),
nn.Conv2d(ndf * 8, ndf * 16, 4, 2, 1, bias=False),
nn.LeakyReLU(0.2, inplace=True),
nn.Conv2d(ndf * 16, 1, 4, 2, 1, bias=False),
nn.Sigmoid()
)
def forward(self, input):
return self.main(input)
| true | true |
1c2d715d7003a6f5ae48af76c2dbf46d9fd72999 | 577 | py | Python | Components/student/urls.py | iamTanTan/E-Learning_Lab_Spring_2021 | e426ba982cc5044510eb1d8b80b377cb0bd5407a | [
"MIT"
] | 2 | 2021-01-29T22:35:28.000Z | 2021-05-13T23:35:54.000Z | Components/student/urls.py | iamTanTan/E-Learning_Lab_Spring_2021 | e426ba982cc5044510eb1d8b80b377cb0bd5407a | [
"MIT"
] | 8 | 2021-03-19T11:24:23.000Z | 2022-03-12T00:57:13.000Z | Components/student/urls.py | iamTanTan/E-Learning_Lab_Spring_2021 | e426ba982cc5044510eb1d8b80b377cb0bd5407a | [
"MIT"
] | 1 | 2021-09-11T15:00:09.000Z | 2021-09-11T15:00:09.000Z | from django.urls import path
from . import views
urlpatterns = [
path("<id_field>/", views.class_index, name="class_index"),
path("<id_field>/<int:pk>/", views.class_detail, name="class_detail"),
path("welcome/<id_field>", views.welcome_page, name="welcome_page"),
path("reading_material/<id_field>", views.reading_material, name="reading_material"),
path("connect/<id_field>", views.connect, name="connect"),
# path("quizzes/<id_field>", views.quizzes, name="quiz"),
# path("quizzes/<id_field>/<int:id>", views.quizzes_index, name="quiz_index"),
] | 48.083333 | 89 | 0.694974 | from django.urls import path
from . import views
urlpatterns = [
path("<id_field>/", views.class_index, name="class_index"),
path("<id_field>/<int:pk>/", views.class_detail, name="class_detail"),
path("welcome/<id_field>", views.welcome_page, name="welcome_page"),
path("reading_material/<id_field>", views.reading_material, name="reading_material"),
path("connect/<id_field>", views.connect, name="connect"),
] | true | true |
1c2d7239b1e15d3d0f200c27c24ade50540899cb | 1,215 | py | Python | mcsrvstats/exceptions/exceptions.py | Darkflame72/mc-server-stats | 991020d3bac9aa453fd38546ef3eab914ce250fa | [
"MIT"
] | 1 | 2021-11-24T02:02:34.000Z | 2021-11-24T02:02:34.000Z | mcsrvstats/exceptions/exceptions.py | Darkflame72/mc-server-stats | 991020d3bac9aa453fd38546ef3eab914ce250fa | [
"MIT"
] | 22 | 2020-08-26T05:12:46.000Z | 2021-12-20T15:20:45.000Z | mcsrvstats/exceptions/exceptions.py | Obsidion-dev/mc-server-stats | 991020d3bac9aa453fd38546ef3eab914ce250fa | [
"MIT"
] | 2 | 2020-10-31T05:54:56.000Z | 2021-02-15T03:11:32.000Z | """All exceptions for mcsrvstats."""
from typing import Optional
class ApiError(Exception):
"""Raised when a error occurs on the api side."""
def __init__(self, error: str, source: Optional[str] = "unknown source") -> None:
"""Error raised when api is not succesful.
Args:
error (str): Error message
source (str, optional): Source of the error. Defaults to "unknown source".
"""
self.message = f"The {source}API had {error}"
super().__init__(self.message)
def __str__(self) -> str:
"""Return error in readable format.
Returns:
str: string version of error
"""
return self.message
class PlayerNotFoundError(Exception):
"""Raised when a player is not found."""
def __init__(self, username: str) -> None:
"""Erorr raised when player is not found.
Args:
username (str): The username of the player.
"""
self.message = f"The player {username} could not be found."
def __str__(self) -> str:
"""Return error in readable format.
Returns:
str: string version of error.
"""
return self.message
| 27 | 86 | 0.588477 | from typing import Optional
class ApiError(Exception):
def __init__(self, error: str, source: Optional[str] = "unknown source") -> None:
self.message = f"The {source}API had {error}"
super().__init__(self.message)
def __str__(self) -> str:
return self.message
class PlayerNotFoundError(Exception):
def __init__(self, username: str) -> None:
self.message = f"The player {username} could not be found."
def __str__(self) -> str:
return self.message
| true | true |
1c2d72a65ac2c4d2503dd453f46284e22f26e76e | 1,033 | py | Python | src/images.py | rafalou38/mangafetch | 6bd313f9b9cfb2ea729e49d382a4085c9aaf094e | [
"MIT"
] | 1 | 2021-03-17T07:56:14.000Z | 2021-03-17T07:56:14.000Z | src/images.py | rafalou38/mangafetch | 6bd313f9b9cfb2ea729e49d382a4085c9aaf094e | [
"MIT"
] | null | null | null | src/images.py | rafalou38/mangafetch | 6bd313f9b9cfb2ea729e49d382a4085c9aaf094e | [
"MIT"
] | null | null | null | import PIL.Image
import numpy
import os
def tile(image_path):
try:
ratio = 1.4
image = PIL.Image.open(image_path)
image = numpy.array(image)
height = image.shape[1] * ratio
if image.shape[1] * 2 < image.shape[0]:
# width = image.shape[0]
images = []
y = 0
for i in range(0, int(image.shape[0]), int(height)):
chunk = image[i : int(i + height)]
p = (
os.path.splitext(image_path)[0]
+ " cut "
+ str(y)
+ os.path.splitext(image_path)[1]
)
PIL.Image.fromarray(chunk).save(p)
images.append(p)
y += 1
return images
else:
return [image_path]
except:
return [image_path]
# tile("tmp\\images\\sweet-home\\0\\1.jpeg")
# for image in tile("tmp\\images\\one-piece\\680\\1.jpeg"):
# # image.show()
# # input()
# print(image)
| 25.825 | 64 | 0.463698 | import PIL.Image
import numpy
import os
def tile(image_path):
try:
ratio = 1.4
image = PIL.Image.open(image_path)
image = numpy.array(image)
height = image.shape[1] * ratio
if image.shape[1] * 2 < image.shape[0]:
images = []
y = 0
for i in range(0, int(image.shape[0]), int(height)):
chunk = image[i : int(i + height)]
p = (
os.path.splitext(image_path)[0]
+ " cut "
+ str(y)
+ os.path.splitext(image_path)[1]
)
PIL.Image.fromarray(chunk).save(p)
images.append(p)
y += 1
return images
else:
return [image_path]
except:
return [image_path]
| true | true |
1c2d72d2c42ef715c70ebbdd79ac19337179888d | 469 | py | Python | data/scripts/templates/object/draft_schematic/furniture/shared_furniture_chair_loveseat_cheap.py | obi-two/GameServer | 7d37024e2291a97d49522610cd8f1dbe5666afc2 | [
"MIT"
] | 20 | 2015-02-23T15:11:56.000Z | 2022-03-18T20:56:48.000Z | data/scripts/templates/object/draft_schematic/furniture/shared_furniture_chair_loveseat_cheap.py | apathyboy/swganh | 665128efe9154611dec4cb5efc61d246dd095984 | [
"MIT"
] | null | null | null | data/scripts/templates/object/draft_schematic/furniture/shared_furniture_chair_loveseat_cheap.py | apathyboy/swganh | 665128efe9154611dec4cb5efc61d246dd095984 | [
"MIT"
] | 20 | 2015-04-04T16:35:59.000Z | 2022-03-24T14:54:37.000Z | #### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Intangible()
result.template = "object/draft_schematic/furniture/shared_furniture_chair_loveseat_cheap.iff"
result.attribute_template_id = -1
result.stfName("string_id_table","")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | 27.588235 | 95 | 0.739872 | true | true | |
1c2d7458d6d470582126afdc26881299806f1051 | 3,454 | py | Python | forecasting.py | caegomezji/FORDAT | 450dca5a9b0c059429421b773be3e7d54367fdcf | [
"MIT"
] | 1 | 2021-08-28T22:42:11.000Z | 2021-08-28T22:42:11.000Z | forecasting.py | caegomezji/FORDAT | 450dca5a9b0c059429421b773be3e7d54367fdcf | [
"MIT"
] | null | null | null | forecasting.py | caegomezji/FORDAT | 450dca5a9b0c059429421b773be3e7d54367fdcf | [
"MIT"
] | 1 | 2021-08-14T00:58:54.000Z | 2021-08-14T00:58:54.000Z | # ARIMA example
from statsmodels.tsa.arima.model import ARIMA
import numpy as np
from statsmodels.tools.eval_measures import rmse
import pmdarima as pm
from fbprophet import Prophet
from time import time
import matplotlib.pyplot as plt
import pandas as pd
import math
def BIC(y_real, y_model):
n = len(y_real)
k = 2
resid = y_real - y_model
sse = sum(resid**2)
BIC_value = k * math.log(n) - 2 * math.log(sse)
return BIC_value
def AIC(y_real, y_modelo):
resid = y_real - y_modelo
print(resid)
sse = sum(resid**2)
k = 1 # parameters
AIC_value = 2*k - 2*math.log(sse)
return AIC_value
def make_forecast(data, model='prophet'):
data = data.interpolate()
test_months = 6
train, test = data[0:-test_months], data[-test_months:]
history = [x for x in train]
predictions = list()
future_months = 18
test_time = len(test)
if model == 'prophet':
predictions = forecast_prophet(train, future_months)
elif model == 'autoarima':
predictions = forecast_autoarima(train, future_months)
return test, predictions
def forecast_autoarima(data, future_months=12):
history = [x for x in data]
model = pm.auto_arima(history, start_p=0, d=1, start_q=0,
max_p=2, max_d=2, max_q=2, start_P=0,
D=1, start_Q=0, max_P=2, max_D=2,
max_Q=2, m=12, seasonal=True,
error_action='warn', trace=True,
supress_warnings=True, stepwise=True,
random_state=20, n_fits=10)
predictions = model.predict(future_months)
return pd.Series(predictions)
def forecast_prophet(data, future_months=12):
data_model = data.reset_index(name='FOBDOL')
data_model.columns = ['ds', 'y' ] # prophet model just understando these names
#print(data_model.shape)
# HERE YOU SHOULD PUT YOUR MODEL
print('start model')
model = Prophet(interval_width=0.95, seasonality_mode='multiplicative')
print('model defined')
model.fit(data_model)
print('model fitted')
# future_months = 12 # this variable is a slider in the app
future = model.make_future_dataframe(periods=future_months, freq='MS') # predict on months
forecast = model.predict(future)
predictions = forecast.yhat
print('predicted')
# in case you want to see its output
# print(forecast.head(2))
# print(predictions)
return predictions[-future_months:].reset_index()['yhat']
def forcast_arima(data):
# data[Sector ] == X
data = data.interpolate()
size = int(len(data) * 0.80)
train, test = data[0:size], data[size:len(data)]
history = [x for x in train]
predictions = list()
future_months = 12
test_time = len(test)
for t in range(test_time + future_months ):
model = ARIMA(history, order=(5, 1, 0))
model_fit = model.fit()
output = model_fit.forecast()
yhat = output[0]
predictions.append(yhat)
if t < test_time:
obs = test[t]
history.append(obs)
if t >= test_time:
history.append(yhat)
# evaluate forecasts
# error = np.sqrt(rmse(test, predictions))
#print('Test RMSE: %.3f' % error)
# plot forecasts against actual outcomes
# plt.plot(np.array(test))
# plt.plot(predictions, color='red')
# plt.show()
return test, predictions | 30.566372 | 95 | 0.631152 |
from statsmodels.tsa.arima.model import ARIMA
import numpy as np
from statsmodels.tools.eval_measures import rmse
import pmdarima as pm
from fbprophet import Prophet
from time import time
import matplotlib.pyplot as plt
import pandas as pd
import math
def BIC(y_real, y_model):
n = len(y_real)
k = 2
resid = y_real - y_model
sse = sum(resid**2)
BIC_value = k * math.log(n) - 2 * math.log(sse)
return BIC_value
def AIC(y_real, y_modelo):
resid = y_real - y_modelo
print(resid)
sse = sum(resid**2)
k = 1
AIC_value = 2*k - 2*math.log(sse)
return AIC_value
def make_forecast(data, model='prophet'):
data = data.interpolate()
test_months = 6
train, test = data[0:-test_months], data[-test_months:]
history = [x for x in train]
predictions = list()
future_months = 18
test_time = len(test)
if model == 'prophet':
predictions = forecast_prophet(train, future_months)
elif model == 'autoarima':
predictions = forecast_autoarima(train, future_months)
return test, predictions
def forecast_autoarima(data, future_months=12):
history = [x for x in data]
model = pm.auto_arima(history, start_p=0, d=1, start_q=0,
max_p=2, max_d=2, max_q=2, start_P=0,
D=1, start_Q=0, max_P=2, max_D=2,
max_Q=2, m=12, seasonal=True,
error_action='warn', trace=True,
supress_warnings=True, stepwise=True,
random_state=20, n_fits=10)
predictions = model.predict(future_months)
return pd.Series(predictions)
def forecast_prophet(data, future_months=12):
data_model = data.reset_index(name='FOBDOL')
data_model.columns = ['ds', 'y' ]
print('start model')
model = Prophet(interval_width=0.95, seasonality_mode='multiplicative')
print('model defined')
model.fit(data_model)
print('model fitted')
ame(periods=future_months, freq='MS')
forecast = model.predict(future)
predictions = forecast.yhat
print('predicted')
return predictions[-future_months:].reset_index()['yhat']
def forcast_arima(data):
data = data.interpolate()
size = int(len(data) * 0.80)
train, test = data[0:size], data[size:len(data)]
history = [x for x in train]
predictions = list()
future_months = 12
test_time = len(test)
for t in range(test_time + future_months ):
model = ARIMA(history, order=(5, 1, 0))
model_fit = model.fit()
output = model_fit.forecast()
yhat = output[0]
predictions.append(yhat)
if t < test_time:
obs = test[t]
history.append(obs)
if t >= test_time:
history.append(yhat)
return test, predictions | true | true |
1c2d74c185631f60c1aead669312563eb156e198 | 1,883 | py | Python | package/spack-probconsrna/package.py | ctuning/ck-spack | 307934efce1be2d4f104251275c82fbc70127105 | [
"BSD-3-Clause"
] | 1 | 2018-07-17T07:45:09.000Z | 2018-07-17T07:45:09.000Z | package/spack-probconsrna/package.py | ctuning/ck-spack | 307934efce1be2d4f104251275c82fbc70127105 | [
"BSD-3-Clause"
] | null | null | null | package/spack-probconsrna/package.py | ctuning/ck-spack | 307934efce1be2d4f104251275c82fbc70127105 | [
"BSD-3-Clause"
] | null | null | null | ##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Probconsrna(Package):
"""Experimental version of PROBCONS with parameters estimated via
unsupervised training on BRAliBASE """
homepage = "http://probcons.stanford.edu/"
url = "http://probcons.stanford.edu/probconsRNA.tar.gz"
version('2005-6-7', '2aa13012124208ca5dd6b0a1d508208d')
def install(self, build, prefix):
mkdirp(prefix.bin)
install('compare', prefix.bin)
install('makegnuplot', prefix.bin)
install('probcons', prefix.bin)
# needed for tcoffee
install('probcons', prefix.bin.probconsRNA)
install('project', prefix.bin)
| 41.844444 | 78 | 0.669145 | true | true | |
1c2d751d07cda34dd2dbbc3db31438df60aa5cd1 | 6,209 | py | Python | tests/many_to_one_null/tests.py | geelweb/django | 59afe61a970dd60df388e7cda9041ef3c0e770cb | [
"PSF-2.0",
"BSD-3-Clause"
] | 1 | 2020-10-21T02:20:06.000Z | 2020-10-21T02:20:06.000Z | tests/many_to_one_null/tests.py | geelweb/django | 59afe61a970dd60df388e7cda9041ef3c0e770cb | [
"PSF-2.0",
"BSD-3-Clause"
] | 1 | 2020-07-02T21:10:44.000Z | 2020-07-02T21:11:21.000Z | tests/many_to_one_null/tests.py | geelweb/django | 59afe61a970dd60df388e7cda9041ef3c0e770cb | [
"PSF-2.0",
"BSD-3-Clause"
] | 1 | 2020-08-11T18:46:32.000Z | 2020-08-11T18:46:32.000Z | from __future__ import unicode_literals
from django.test import TestCase
from .models import Article, Car, Driver, Reporter
class ManyToOneNullTests(TestCase):
def setUp(self):
# Create a Reporter.
self.r = Reporter(name='John Smith')
self.r.save()
# Create an Article.
self.a = Article(headline="First", reporter=self.r)
self.a.save()
# Create an Article via the Reporter object.
self.a2 = self.r.article_set.create(headline="Second")
# Create an Article with no Reporter by passing "reporter=None".
self.a3 = Article(headline="Third", reporter=None)
self.a3.save()
# Create another article and reporter
self.r2 = Reporter(name='Paul Jones')
self.r2.save()
self.a4 = self.r2.article_set.create(headline='Fourth')
def test_get_related(self):
self.assertEqual(self.a.reporter.id, self.r.id)
# Article objects have access to their related Reporter objects.
r = self.a.reporter
self.assertEqual(r.id, self.r.id)
def test_created_via_related_set(self):
self.assertEqual(self.a2.reporter.id, self.r.id)
def test_related_set(self):
# Reporter objects have access to their related Article objects.
self.assertQuerysetEqual(self.r.article_set.all(), ['<Article: First>', '<Article: Second>'])
self.assertQuerysetEqual(self.r.article_set.filter(headline__startswith='Fir'), ['<Article: First>'])
self.assertEqual(self.r.article_set.count(), 2)
def test_created_without_related(self):
self.assertIsNone(self.a3.reporter)
# Need to reget a3 to refresh the cache
a3 = Article.objects.get(pk=self.a3.pk)
with self.assertRaises(AttributeError):
getattr(a3.reporter, 'id')
# Accessing an article's 'reporter' attribute returns None
# if the reporter is set to None.
self.assertIsNone(a3.reporter)
# To retrieve the articles with no reporters set, use "reporter__isnull=True".
self.assertQuerysetEqual(Article.objects.filter(reporter__isnull=True), ['<Article: Third>'])
# We can achieve the same thing by filtering for the case where the
# reporter is None.
self.assertQuerysetEqual(Article.objects.filter(reporter=None), ['<Article: Third>'])
# Set the reporter for the Third article
self.assertQuerysetEqual(self.r.article_set.all(), ['<Article: First>', '<Article: Second>'])
self.r.article_set.add(a3)
self.assertQuerysetEqual(
self.r.article_set.all(),
['<Article: First>', '<Article: Second>', '<Article: Third>']
)
# Remove an article from the set, and check that it was removed.
self.r.article_set.remove(a3)
self.assertQuerysetEqual(self.r.article_set.all(), ['<Article: First>', '<Article: Second>'])
self.assertQuerysetEqual(Article.objects.filter(reporter__isnull=True), ['<Article: Third>'])
def test_remove_from_wrong_set(self):
self.assertQuerysetEqual(self.r2.article_set.all(), ['<Article: Fourth>'])
# Try to remove a4 from a set it does not belong to
with self.assertRaises(Reporter.DoesNotExist):
self.r.article_set.remove(self.a4)
self.assertQuerysetEqual(self.r2.article_set.all(), ['<Article: Fourth>'])
def test_set(self):
# Use manager.set() to allocate ForeignKey. Null is legal, so existing
# members of the set that are not in the assignment set are set to null.
self.r2.article_set.set([self.a2, self.a3])
self.assertQuerysetEqual(self.r2.article_set.all(), ['<Article: Second>', '<Article: Third>'])
# Use manager.set(clear=True)
self.r2.article_set.set([self.a3, self.a4], clear=True)
self.assertQuerysetEqual(self.r2.article_set.all(), ['<Article: Fourth>', '<Article: Third>'])
# Clear the rest of the set
self.r2.article_set.set([])
self.assertQuerysetEqual(self.r2.article_set.all(), [])
self.assertQuerysetEqual(
Article.objects.filter(reporter__isnull=True),
['<Article: Fourth>', '<Article: Second>', '<Article: Third>']
)
def test_assign_clear_related_set(self):
# Use descriptor assignment to allocate ForeignKey. Null is legal, so
# existing members of the set that are not in the assignment set are
# set to null.
self.r2.article_set.set([self.a2, self.a3])
self.assertQuerysetEqual(self.r2.article_set.all(), ['<Article: Second>', '<Article: Third>'])
# Clear the rest of the set
self.r.article_set.clear()
self.assertQuerysetEqual(self.r.article_set.all(), [])
self.assertQuerysetEqual(
Article.objects.filter(reporter__isnull=True),
['<Article: First>', '<Article: Fourth>']
)
def test_assign_with_queryset(self):
# Ensure that querysets used in reverse FK assignments are pre-evaluated
# so their value isn't affected by the clearing operation in
# RelatedManager.set() (#19816).
self.r2.article_set.set([self.a2, self.a3])
qs = self.r2.article_set.filter(headline="Second")
self.r2.article_set.set(qs)
self.assertEqual(1, self.r2.article_set.count())
self.assertEqual(1, qs.count())
def test_add_efficiency(self):
r = Reporter.objects.create()
articles = []
for _ in range(3):
articles.append(Article.objects.create())
with self.assertNumQueries(1):
r.article_set.add(*articles)
self.assertEqual(r.article_set.count(), 3)
def test_clear_efficiency(self):
r = Reporter.objects.create()
for _ in range(3):
r.article_set.create()
with self.assertNumQueries(1):
r.article_set.clear()
self.assertEqual(r.article_set.count(), 0)
def test_related_null_to_field(self):
c1 = Car.objects.create()
d1 = Driver.objects.create()
self.assertIs(d1.car, None)
with self.assertNumQueries(0):
self.assertEqual(list(c1.drivers.all()), [])
| 44.669065 | 109 | 0.645515 | from __future__ import unicode_literals
from django.test import TestCase
from .models import Article, Car, Driver, Reporter
class ManyToOneNullTests(TestCase):
def setUp(self):
self.r = Reporter(name='John Smith')
self.r.save()
self.a = Article(headline="First", reporter=self.r)
self.a.save()
self.a2 = self.r.article_set.create(headline="Second")
self.a3 = Article(headline="Third", reporter=None)
self.a3.save()
self.r2 = Reporter(name='Paul Jones')
self.r2.save()
self.a4 = self.r2.article_set.create(headline='Fourth')
def test_get_related(self):
self.assertEqual(self.a.reporter.id, self.r.id)
r = self.a.reporter
self.assertEqual(r.id, self.r.id)
def test_created_via_related_set(self):
self.assertEqual(self.a2.reporter.id, self.r.id)
def test_related_set(self):
self.assertQuerysetEqual(self.r.article_set.all(), ['<Article: First>', '<Article: Second>'])
self.assertQuerysetEqual(self.r.article_set.filter(headline__startswith='Fir'), ['<Article: First>'])
self.assertEqual(self.r.article_set.count(), 2)
def test_created_without_related(self):
self.assertIsNone(self.a3.reporter)
a3 = Article.objects.get(pk=self.a3.pk)
with self.assertRaises(AttributeError):
getattr(a3.reporter, 'id')
# if the reporter is set to None.
self.assertIsNone(a3.reporter)
# To retrieve the articles with no reporters set, use "reporter__isnull=True".
self.assertQuerysetEqual(Article.objects.filter(reporter__isnull=True), ['<Article: Third>'])
# We can achieve the same thing by filtering for the case where the
# reporter is None.
self.assertQuerysetEqual(Article.objects.filter(reporter=None), ['<Article: Third>'])
# Set the reporter for the Third article
self.assertQuerysetEqual(self.r.article_set.all(), ['<Article: First>', '<Article: Second>'])
self.r.article_set.add(a3)
self.assertQuerysetEqual(
self.r.article_set.all(),
['<Article: First>', '<Article: Second>', '<Article: Third>']
)
# Remove an article from the set, and check that it was removed.
self.r.article_set.remove(a3)
self.assertQuerysetEqual(self.r.article_set.all(), ['<Article: First>', '<Article: Second>'])
self.assertQuerysetEqual(Article.objects.filter(reporter__isnull=True), ['<Article: Third>'])
def test_remove_from_wrong_set(self):
self.assertQuerysetEqual(self.r2.article_set.all(), ['<Article: Fourth>'])
# Try to remove a4 from a set it does not belong to
with self.assertRaises(Reporter.DoesNotExist):
self.r.article_set.remove(self.a4)
self.assertQuerysetEqual(self.r2.article_set.all(), ['<Article: Fourth>'])
def test_set(self):
# Use manager.set() to allocate ForeignKey. Null is legal, so existing
# members of the set that are not in the assignment set are set to null.
self.r2.article_set.set([self.a2, self.a3])
self.assertQuerysetEqual(self.r2.article_set.all(), ['<Article: Second>', '<Article: Third>'])
# Use manager.set(clear=True)
self.r2.article_set.set([self.a3, self.a4], clear=True)
self.assertQuerysetEqual(self.r2.article_set.all(), ['<Article: Fourth>', '<Article: Third>'])
# Clear the rest of the set
self.r2.article_set.set([])
self.assertQuerysetEqual(self.r2.article_set.all(), [])
self.assertQuerysetEqual(
Article.objects.filter(reporter__isnull=True),
['<Article: Fourth>', '<Article: Second>', '<Article: Third>']
)
def test_assign_clear_related_set(self):
# Use descriptor assignment to allocate ForeignKey. Null is legal, so
# existing members of the set that are not in the assignment set are
# set to null.
self.r2.article_set.set([self.a2, self.a3])
self.assertQuerysetEqual(self.r2.article_set.all(), ['<Article: Second>', '<Article: Third>'])
# Clear the rest of the set
self.r.article_set.clear()
self.assertQuerysetEqual(self.r.article_set.all(), [])
self.assertQuerysetEqual(
Article.objects.filter(reporter__isnull=True),
['<Article: First>', '<Article: Fourth>']
)
def test_assign_with_queryset(self):
# Ensure that querysets used in reverse FK assignments are pre-evaluated
# so their value isn't affected by the clearing operation in
self.r2.article_set.set([self.a2, self.a3])
qs = self.r2.article_set.filter(headline="Second")
self.r2.article_set.set(qs)
self.assertEqual(1, self.r2.article_set.count())
self.assertEqual(1, qs.count())
def test_add_efficiency(self):
r = Reporter.objects.create()
articles = []
for _ in range(3):
articles.append(Article.objects.create())
with self.assertNumQueries(1):
r.article_set.add(*articles)
self.assertEqual(r.article_set.count(), 3)
def test_clear_efficiency(self):
r = Reporter.objects.create()
for _ in range(3):
r.article_set.create()
with self.assertNumQueries(1):
r.article_set.clear()
self.assertEqual(r.article_set.count(), 0)
def test_related_null_to_field(self):
c1 = Car.objects.create()
d1 = Driver.objects.create()
self.assertIs(d1.car, None)
with self.assertNumQueries(0):
self.assertEqual(list(c1.drivers.all()), [])
| true | true |
1c2d775a3f9fcb6acb760af0a42a51e9c6b4dd17 | 5,227 | py | Python | retweetcascade/rt_cascade_friendships.py | guglielmocola/RetweetCascade | f96319d0107473715104acceb2ff0925d35dd9e3 | [
"MIT"
] | null | null | null | retweetcascade/rt_cascade_friendships.py | guglielmocola/RetweetCascade | f96319d0107473715104acceb2ff0925d35dd9e3 | [
"MIT"
] | null | null | null | retweetcascade/rt_cascade_friendships.py | guglielmocola/RetweetCascade | f96319d0107473715104acceb2ff0925d35dd9e3 | [
"MIT"
] | null | null | null | import pandas as pd
from retweetcascade.utility_functions import __convert_to_pandas_list_tw, __explode_dict
def rt_cascade_friendships(retweets, followers, friends, **kwargs):
"""Estimate the retweet cascade based on friendship among retweeters.
A retweeter X is linked to the root R (original tweet's author) if he/she is a follower
of R, otherwise it is linked to the last friend who retweeted before X. If X is not
a follower of R and there are no friends who retweeted before X, then it is not possible
to automatically link X to another retweeter, and X is disconnected from the cascade graph.
:param retweets: list of retweets, each retweet being a tweet object.
:param followers: list of followers of the root (id_str values).
:param friends: dictionary describing the friends of retweeters (only the friends of
retweeters who are not followers of the root are useful); each key is the id_str
of a retweeter and points to the list of friends (id_str values).
:keyword verbose: Verbose mode (default False)
:keyword qt: Weight assigned to quotes (default 1.0)
:keyword re: Weight assigned to replies (default 1.0)
:keyword rt: Weight assigned to retweets (default 1.0)
:return: Returns a pandas DataFrame with columns source and target, where each
row describes an edge of the retweet cascade graph. Disconnected nodes are included with a
target equal to NaN.
"""
# Set keyword arguments, start from default values
verbose = False
if 'verbose' in kwargs:
verbose = kwargs['verbose']
# Find the root from a retweet.
root_id = retweets[0]['retweeted_status']['user']['id_str']
# DataFrame with RETWEETS (in case of multiple retweets from the same subject, keep the oldest)
df_rt = __convert_to_pandas_list_tw(retweets, ['created_at', 'user.id_str'])
df_rt = df_rt.sort_values(by=['created_at'], ascending=False)
df_rt = df_rt.groupby(df_rt['user.id_str']).last().reset_index() # last is the oldest
# List of retweeters who also are followers ("direct retweeters")
direct_rt_list = []
# List of non-follower retweeters (will be useful later to find disconnected nodes)
nf_rt_list = []
for rt in retweets:
rt_user = rt['user']['id_str']
if rt_user in followers:
direct_rt_list.append(rt_user)
else:
nf_rt_list.append(rt_user)
# Remove duplicates
direct_rt_list = list(set(direct_rt_list))
# Create DataFrame for these users, then add it to the main one.
df_direct = pd.DataFrame(direct_rt_list, columns=['source'])
df_direct['target'] = root_id
# Create rt DataFrame with just non-follower retweeters.
df_nf_rt = df_rt[~df_rt['user.id_str'].isin(direct_rt_list)].reset_index(drop=True)
# Create DataFrame for friendships, with <user.id, friend.id> info
df_friends = pd.DataFrame(__explode_dict(friends)).T
df_friends.columns = ['follower_id_str', 'friend_id_str']
# First merge links non-follower retweeters with their friends
df_merge1 = df_nf_rt.merge(df_friends, left_on='user.id_str', right_on='follower_id_str')
# Second merge adds retweet information for friends (this time the merge needs to be with
# the entire retweets DataFrame)
df_merge2 = df_merge1.merge(df_rt, left_on='friend_id_str', right_on='user.id_str', suffixes=('', '_y'))
# Remove rows where 'created_at_y' > 'created_at'
df_merge2['delta'] = (df_merge2['created_at'] - df_merge2['created_at_y']).dt.total_seconds()
df_merge2 = df_merge2[df_merge2['delta'] > 0]
df_final = df_merge2[['user.id_str', 'created_at', 'friend_id_str', 'created_at_y', 'delta']]
df_final = df_final.sort_values(by=['delta'], ascending=False)
df_final = df_final.groupby(df_final['user.id_str']).last().reset_index() # last is the oldest
# Prepare cascade DataFrame based on friendship, then cat it with direct followers
cascade_df = pd.DataFrame()
cascade_df['source'] = df_final['user.id_str']
cascade_df['target'] = df_final['friend_id_str']
# Save list of "friend-based" non follower retweeters for later.
fb_rt_list = cascade_df['source'].tolist()
# Cat with direct retweeters (followers of root)
cascade_df = pd.concat([cascade_df, df_direct], ignore_index=True)
# Finally, find disconnected nodes, and add a row with NaN target for them.
disconnected_nodes = set(nf_rt_list) - set(fb_rt_list)
# print('dis:', len(disconnected_nodes), 'nf:', len(set(nf_rt_list)), 'fb-estimated', len(set(fb_rt_list)))
# fw_in_int = set(fb_rt_list) - set(direct_rt_list)
# print(len(fw_in_int))
# Add disconnected nodes with 'NaN' target
disconnected_df = pd.DataFrame(
{'source': list(disconnected_nodes),
'target': [float("NaN")] * len(disconnected_nodes),
})
# Find final edges df, including disconnected nodes
cascade_df = pd.concat([cascade_df, disconnected_df], ignore_index=True)
# Remove the root from source, if present
cascade_df.drop(cascade_df.loc[cascade_df['source'] == root_id].index, inplace=True)
cascade_df.reset_index(inplace=True, drop=True)
return cascade_df
| 46.256637 | 111 | 0.712646 | import pandas as pd
from retweetcascade.utility_functions import __convert_to_pandas_list_tw, __explode_dict
def rt_cascade_friendships(retweets, followers, friends, **kwargs):
verbose = False
if 'verbose' in kwargs:
verbose = kwargs['verbose']
root_id = retweets[0]['retweeted_status']['user']['id_str']
df_rt = __convert_to_pandas_list_tw(retweets, ['created_at', 'user.id_str'])
df_rt = df_rt.sort_values(by=['created_at'], ascending=False)
df_rt = df_rt.groupby(df_rt['user.id_str']).last().reset_index()
direct_rt_list = []
nf_rt_list = []
for rt in retweets:
rt_user = rt['user']['id_str']
if rt_user in followers:
direct_rt_list.append(rt_user)
else:
nf_rt_list.append(rt_user)
direct_rt_list = list(set(direct_rt_list))
df_direct = pd.DataFrame(direct_rt_list, columns=['source'])
df_direct['target'] = root_id
df_nf_rt = df_rt[~df_rt['user.id_str'].isin(direct_rt_list)].reset_index(drop=True)
df_friends = pd.DataFrame(__explode_dict(friends)).T
df_friends.columns = ['follower_id_str', 'friend_id_str']
df_merge1 = df_nf_rt.merge(df_friends, left_on='user.id_str', right_on='follower_id_str')
df_merge2 = df_merge1.merge(df_rt, left_on='friend_id_str', right_on='user.id_str', suffixes=('', '_y'))
df_merge2['delta'] = (df_merge2['created_at'] - df_merge2['created_at_y']).dt.total_seconds()
df_merge2 = df_merge2[df_merge2['delta'] > 0]
df_final = df_merge2[['user.id_str', 'created_at', 'friend_id_str', 'created_at_y', 'delta']]
df_final = df_final.sort_values(by=['delta'], ascending=False)
df_final = df_final.groupby(df_final['user.id_str']).last().reset_index()
cascade_df = pd.DataFrame()
cascade_df['source'] = df_final['user.id_str']
cascade_df['target'] = df_final['friend_id_str']
fb_rt_list = cascade_df['source'].tolist()
cascade_df = pd.concat([cascade_df, df_direct], ignore_index=True)
disconnected_nodes = set(nf_rt_list) - set(fb_rt_list)
disconnected_df = pd.DataFrame(
{'source': list(disconnected_nodes),
'target': [float("NaN")] * len(disconnected_nodes),
})
cascade_df = pd.concat([cascade_df, disconnected_df], ignore_index=True)
cascade_df.drop(cascade_df.loc[cascade_df['source'] == root_id].index, inplace=True)
cascade_df.reset_index(inplace=True, drop=True)
return cascade_df
| true | true |
1c2d776b3b17a239f417bf3ed5c557c97f983b74 | 459 | py | Python | src/playerMemory.py | koleberd/goaldependency | b4572758c405c90b33408497429531db82f5d22b | [
"FTL",
"CNRI-Python",
"CECILL-B",
"AAL"
] | 1 | 2018-01-10T21:41:27.000Z | 2018-01-10T21:41:27.000Z | src/playerMemory.py | koleberd/goaldependency | b4572758c405c90b33408497429531db82f5d22b | [
"FTL",
"CNRI-Python",
"CECILL-B",
"AAL"
] | null | null | null | src/playerMemory.py | koleberd/goaldependency | b4572758c405c90b33408497429531db82f5d22b | [
"FTL",
"CNRI-Python",
"CECILL-B",
"AAL"
] | null | null | null | ###
#contains structures used to track the user's path, world snapshots, and things used for rollbacks as well as extra metrics.
###
class PlayerMemory:
def __init__(self,target=None):
self.target = target
self.metrics = {'distance traveled':0,'path':[]}
self.prev_at = None
self.curr_at = None
self.prev_at_parent = None
self.prev_at_parent_parent = None
self.prev_at_parent_parent_parent = None
| 30.6 | 124 | 0.668845 | ##
class PlayerMemory:
def __init__(self,target=None):
self.target = target
self.metrics = {'distance traveled':0,'path':[]}
self.prev_at = None
self.curr_at = None
self.prev_at_parent = None
self.prev_at_parent_parent = None
self.prev_at_parent_parent_parent = None
| true | true |
1c2d78c536fcf3aa392b79dcf9e8d2ce5bdd8790 | 2,552 | py | Python | ps3api/memutils.py | iMoD1998/PS3API | ba64e6f5c1ed56746309d50c66c906c15045a254 | [
"MIT"
] | 8 | 2021-12-30T15:20:36.000Z | 2022-03-27T12:22:23.000Z | ps3api/memutils.py | iMoD1998/PS3API | ba64e6f5c1ed56746309d50c66c906c15045a254 | [
"MIT"
] | null | null | null | ps3api/memutils.py | iMoD1998/PS3API | ba64e6f5c1ed56746309d50c66c906c15045a254 | [
"MIT"
] | null | null | null | import struct
'''
Byte Conversions
'''
PackInt8BE = struct.Struct('>B').pack
PackInt16BE = struct.Struct('>H').pack
PackInt32BE = struct.Struct('>L').pack
PackInt64BE = struct.Struct('>Q').pack
PackFloatBE = struct.Struct('>f').pack
PackDoubleBE = struct.Struct('>d').pack
UnpackInt8BE = struct.Struct('>B').unpack
UnpackInt16BE = struct.Struct('>H').unpack
UnpackInt32BE = struct.Struct('>L').unpack
UnpackInt64BE = struct.Struct('>Q').unpack
UnpackFloatBE = struct.Struct('>f').unpack
UnpackDoubleBE = struct.Struct('>d').unpack
PackInt8LE = struct.Struct('<B').pack
PackInt16LE = struct.Struct('<H').pack
PackInt32LE = struct.Struct('<L').pack
PackInt64LE = struct.Struct('<Q').pack
PackFloatLE = struct.Struct('<f').pack
PackDoubleLE = struct.Struct('<d').pack
UnpackInt8LE = struct.Struct('<B').unpack
UnpackInt16LE = struct.Struct('<H').unpack
UnpackInt32LE = struct.Struct('<L').unpack
UnpackInt64LE = struct.Struct('<Q').unpack
UnpackFloatLE = struct.Struct('<f').unpack
UnpackDoubleLE = struct.Struct('<d').unpack
def ReadInt8(ReadMemory):
return lambda Address: UnpackInt8BE(ReadMemory(Address, 1))[0]
def ReadInt16(ReadMemory):
return lambda Address: UnpackInt16BE(ReadMemory(Address, 2))[0]
def ReadInt32(ReadMemory):
return lambda Address: UnpackInt32BE(ReadMemory(Address, 4))[0]
def ReadInt64(ReadMemory):
return lambda Address: UnpackInt64BE(ReadMemory(Address, 8))[0]
def ReadFloat(ReadMemory):
return lambda Address: UnpackFloatBE(ReadMemory(Address, 4))[0]
def ReadDouble(ReadMemory):
return lambda Address: UnpackDoubleBE(ReadMemory(Address, 8))[0]
def ReadString(ReadMemory):
return lambda Address, Encoding="ascii", MaxLength=1024 : ReadMemory(Address, MaxLength).decode(Encoding).split("\x00")[0]
def WriteInt8(WriteMemory):
return lambda Address, Value: WriteMemory(Address, PackInt8BE(Value))
def WriteInt16(WriteMemory):
return lambda Address, Value: WriteMemory(Address, PackInt16BE(Value))
def WriteInt32(WriteMemory):
return lambda Address, Value: WriteMemory(Address, PackInt32BE(Value))
def WriteInt64(WriteMemory):
return lambda Address, Value: WriteMemory(Address, PackInt64BE(Value))
def WriteFloat(WriteMemory):
return lambda Address, Value: WriteMemory(Address, PackFloatBE(Value))
def WriteDouble(WriteMemory):
return lambda Address, Value: WriteMemory(Address, PackDoubleBE(Value))
def WriteString(WriteMemory):
return lambda Address, String, Encoding="ascii": WriteMemory(Address, String.encode(Encoding) + b"\x00") | 34.486486 | 126 | 0.744122 | import struct
PackInt8BE = struct.Struct('>B').pack
PackInt16BE = struct.Struct('>H').pack
PackInt32BE = struct.Struct('>L').pack
PackInt64BE = struct.Struct('>Q').pack
PackFloatBE = struct.Struct('>f').pack
PackDoubleBE = struct.Struct('>d').pack
UnpackInt8BE = struct.Struct('>B').unpack
UnpackInt16BE = struct.Struct('>H').unpack
UnpackInt32BE = struct.Struct('>L').unpack
UnpackInt64BE = struct.Struct('>Q').unpack
UnpackFloatBE = struct.Struct('>f').unpack
UnpackDoubleBE = struct.Struct('>d').unpack
PackInt8LE = struct.Struct('<B').pack
PackInt16LE = struct.Struct('<H').pack
PackInt32LE = struct.Struct('<L').pack
PackInt64LE = struct.Struct('<Q').pack
PackFloatLE = struct.Struct('<f').pack
PackDoubleLE = struct.Struct('<d').pack
UnpackInt8LE = struct.Struct('<B').unpack
UnpackInt16LE = struct.Struct('<H').unpack
UnpackInt32LE = struct.Struct('<L').unpack
UnpackInt64LE = struct.Struct('<Q').unpack
UnpackFloatLE = struct.Struct('<f').unpack
UnpackDoubleLE = struct.Struct('<d').unpack
def ReadInt8(ReadMemory):
return lambda Address: UnpackInt8BE(ReadMemory(Address, 1))[0]
def ReadInt16(ReadMemory):
return lambda Address: UnpackInt16BE(ReadMemory(Address, 2))[0]
def ReadInt32(ReadMemory):
return lambda Address: UnpackInt32BE(ReadMemory(Address, 4))[0]
def ReadInt64(ReadMemory):
return lambda Address: UnpackInt64BE(ReadMemory(Address, 8))[0]
def ReadFloat(ReadMemory):
return lambda Address: UnpackFloatBE(ReadMemory(Address, 4))[0]
def ReadDouble(ReadMemory):
return lambda Address: UnpackDoubleBE(ReadMemory(Address, 8))[0]
def ReadString(ReadMemory):
return lambda Address, Encoding="ascii", MaxLength=1024 : ReadMemory(Address, MaxLength).decode(Encoding).split("\x00")[0]
def WriteInt8(WriteMemory):
return lambda Address, Value: WriteMemory(Address, PackInt8BE(Value))
def WriteInt16(WriteMemory):
return lambda Address, Value: WriteMemory(Address, PackInt16BE(Value))
def WriteInt32(WriteMemory):
return lambda Address, Value: WriteMemory(Address, PackInt32BE(Value))
def WriteInt64(WriteMemory):
return lambda Address, Value: WriteMemory(Address, PackInt64BE(Value))
def WriteFloat(WriteMemory):
return lambda Address, Value: WriteMemory(Address, PackFloatBE(Value))
def WriteDouble(WriteMemory):
return lambda Address, Value: WriteMemory(Address, PackDoubleBE(Value))
def WriteString(WriteMemory):
return lambda Address, String, Encoding="ascii": WriteMemory(Address, String.encode(Encoding) + b"\x00") | true | true |
1c2d7924a444c24c2db6abc56e45ac2e5724a4cd | 11,136 | py | Python | examples/legacy/seq2seq/seq2seq_trainer.py | dctelus/transformers | 6786cbc4b14ebff0ac59c768cadd109391db9a08 | [
"Apache-2.0"
] | 1 | 2019-10-01T17:42:02.000Z | 2019-10-01T17:42:02.000Z | examples/legacy/seq2seq/seq2seq_trainer.py | dctelus/transformers | 6786cbc4b14ebff0ac59c768cadd109391db9a08 | [
"Apache-2.0"
] | 1 | 2022-03-23T19:49:13.000Z | 2022-03-23T19:49:13.000Z | examples/legacy/seq2seq/seq2seq_trainer.py | dctelus/transformers | 6786cbc4b14ebff0ac59c768cadd109391db9a08 | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
logger = logging.get_logger(__name__)
arg_to_scheduler = {
"linear": get_linear_schedule_with_warmup,
"cosine": get_cosine_schedule_with_warmup,
"cosine_w_restarts": get_cosine_with_hard_restarts_schedule_with_warmup,
"polynomial": get_polynomial_decay_schedule_with_warmup,
"constant": get_constant_schedule,
"constant_w_warmup": get_constant_schedule_with_warmup,
}
class Seq2SeqTrainer(Trainer):
def __init__(self, config=None, data_args=None, *args, **kwargs):
super().__init__(*args, **kwargs)
if config is None:
assert isinstance(
self.model, PreTrainedModel
), f"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is {self.model.__class__}"
self.config = self.model.config
else:
self.config = config
self.data_args = data_args
self.vocab_size = self.config.tgt_vocab_size if isinstance(self.config, FSMTConfig) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert (
self.config.pad_token_id is not None
), "Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss calculation or doing label smoothing."
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
f"The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for padding.."
)
if self.args.label_smoothing == 0:
self.loss_fn = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id)
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
self.loss_fn = label_smoothed_nll_loss
def create_optimizer_and_scheduler(self, num_training_steps: int):
"""
Setup the optimizer and the learning rate scheduler.
We provide a reasonable default that works well. If you want to use something else, you can pass a tuple in the
Trainer's init through :obj:`optimizers`, or subclass and override this method in a subclass.
"""
if self.optimizer is None:
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay)],
"weight_decay": self.args.weight_decay,
},
{
"params": [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay)],
"weight_decay": 0.0,
},
]
optimizer_cls = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
optimizer_cls = Adafactor
optimizer_kwargs = {"scale_parameter": False, "relative_step": False}
else:
optimizer_cls = AdamW
optimizer_kwargs = {
"betas": (self.args.adam_beta1, self.args.adam_beta2),
"eps": self.args.adam_epsilon,
}
optimizer_kwargs["lr"] = self.args.learning_rate
if self.sharded_dpp:
self.optimizer = OSS(
params=optimizer_grouped_parameters,
optim=optimizer_cls,
**optimizer_kwargs,
)
else:
self.optimizer = optimizer_cls(optimizer_grouped_parameters, **optimizer_kwargs)
if self.lr_scheduler is None:
self.lr_scheduler = self._get_lr_scheduler(num_training_steps)
else: # ignoring --lr_scheduler
logger.warning("scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.")
def _get_lr_scheduler(self, num_training_steps):
schedule_func = arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
scheduler = schedule_func(self.optimizer)
elif self.args.lr_scheduler == "constant_w_warmup":
scheduler = schedule_func(self.optimizer, num_warmup_steps=self.args.warmup_steps)
else:
scheduler = schedule_func(
self.optimizer, num_warmup_steps=self.args.warmup_steps, num_training_steps=num_training_steps
)
return scheduler
def _get_train_sampler(self) -> Optional[torch.utils.data.Sampler]:
if isinstance(self.train_dataset, torch.utils.data.IterableDataset):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset)
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size,
distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED),
)
return (
RandomSampler(self.train_dataset)
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset)
)
def _compute_loss(self, model, inputs, labels):
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
logits = model(**inputs, use_cache=False)[0]
loss = self.loss_fn(logits.view(-1, logits.shape[-1]), labels.view(-1))
else:
# compute usual loss via models
loss, logits = model(**inputs, labels=labels, use_cache=False)[:2]
else:
# compute label smoothed loss
logits = model(**inputs, use_cache=False)[0]
lprobs = torch.nn.functional.log_softmax(logits, dim=-1)
loss, _ = self.loss_fn(lprobs, labels, self.args.label_smoothing, ignore_index=self.config.pad_token_id)
return loss, logits
def compute_loss(self, model, inputs):
labels = inputs.pop("labels")
loss, _ = self._compute_loss(model, inputs, labels)
return loss
def prediction_step(
self,
model: nn.Module,
inputs: Dict[str, Union[torch.Tensor, Any]],
prediction_loss_only: bool,
ignore_keys: Optional[List[str]] = None,
) -> Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]:
"""
Perform an evaluation step on :obj:`model` using obj:`inputs`.
Subclass and override to inject custom behavior.
Args:
model (:obj:`nn.Module`):
The model to evaluate.
inputs (:obj:`Dict[str, Union[torch.Tensor, Any]]`):
The inputs and targets of the model.
The dictionary will be unpacked before being fed to the model. Most models expect the targets under the
argument :obj:`labels`. Check your model's documentation for all accepted arguments.
prediction_loss_only (:obj:`bool`):
Whether or not to return the loss only.
Return:
Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]:
A tuple with the loss, logits and labels (each being optional).
"""
inputs = self._prepare_inputs(inputs)
gen_kwargs = {
"max_length": self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
"num_beams": self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
generated_tokens = self.model.generate(
inputs["input_ids"],
attention_mask=inputs["attention_mask"],
**gen_kwargs,
)
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
generated_tokens = self._pad_tensors_to_max_len(generated_tokens, gen_kwargs["max_length"])
labels = inputs.pop("labels")
with torch.no_grad():
# compute loss on predict data
loss, logits = self._compute_loss(model, inputs, labels)
loss = loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
logits = generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
labels = self._pad_tensors_to_max_len(labels, gen_kwargs["max_length"])
return (loss, logits, labels)
def _pad_tensors_to_max_len(self, tensor, max_length):
# If PAD token is not defined at least EOS token has to be defined
pad_token_id = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
f"Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be padded to `max_length`={max_length}"
)
padded_tensor = pad_token_id * torch.ones(
(tensor.shape[0], max_length), dtype=tensor.dtype, device=tensor.device
)
padded_tensor[:, : tensor.shape[-1]] = tensor
return padded_tensor
| 42.996139 | 154 | 0.646911 |
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
logger = logging.get_logger(__name__)
arg_to_scheduler = {
"linear": get_linear_schedule_with_warmup,
"cosine": get_cosine_schedule_with_warmup,
"cosine_w_restarts": get_cosine_with_hard_restarts_schedule_with_warmup,
"polynomial": get_polynomial_decay_schedule_with_warmup,
"constant": get_constant_schedule,
"constant_w_warmup": get_constant_schedule_with_warmup,
}
class Seq2SeqTrainer(Trainer):
def __init__(self, config=None, data_args=None, *args, **kwargs):
super().__init__(*args, **kwargs)
if config is None:
assert isinstance(
self.model, PreTrainedModel
), f"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is {self.model.__class__}"
self.config = self.model.config
else:
self.config = config
self.data_args = data_args
self.vocab_size = self.config.tgt_vocab_size if isinstance(self.config, FSMTConfig) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert (
self.config.pad_token_id is not None
), "Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss calculation or doing label smoothing."
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
f"The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for padding.."
)
if self.args.label_smoothing == 0:
self.loss_fn = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id)
else:
from utils import label_smoothed_nll_loss
self.loss_fn = label_smoothed_nll_loss
def create_optimizer_and_scheduler(self, num_training_steps: int):
if self.optimizer is None:
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay)],
"weight_decay": self.args.weight_decay,
},
{
"params": [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay)],
"weight_decay": 0.0,
},
]
optimizer_cls = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
optimizer_cls = Adafactor
optimizer_kwargs = {"scale_parameter": False, "relative_step": False}
else:
optimizer_cls = AdamW
optimizer_kwargs = {
"betas": (self.args.adam_beta1, self.args.adam_beta2),
"eps": self.args.adam_epsilon,
}
optimizer_kwargs["lr"] = self.args.learning_rate
if self.sharded_dpp:
self.optimizer = OSS(
params=optimizer_grouped_parameters,
optim=optimizer_cls,
**optimizer_kwargs,
)
else:
self.optimizer = optimizer_cls(optimizer_grouped_parameters, **optimizer_kwargs)
if self.lr_scheduler is None:
self.lr_scheduler = self._get_lr_scheduler(num_training_steps)
else:
logger.warning("scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.")
def _get_lr_scheduler(self, num_training_steps):
schedule_func = arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
scheduler = schedule_func(self.optimizer)
elif self.args.lr_scheduler == "constant_w_warmup":
scheduler = schedule_func(self.optimizer, num_warmup_steps=self.args.warmup_steps)
else:
scheduler = schedule_func(
self.optimizer, num_warmup_steps=self.args.warmup_steps, num_training_steps=num_training_steps
)
return scheduler
def _get_train_sampler(self) -> Optional[torch.utils.data.Sampler]:
if isinstance(self.train_dataset, torch.utils.data.IterableDataset):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset)
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size,
distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED),
)
return (
RandomSampler(self.train_dataset)
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset)
)
def _compute_loss(self, model, inputs, labels):
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
logits = model(**inputs, use_cache=False)[0]
loss = self.loss_fn(logits.view(-1, logits.shape[-1]), labels.view(-1))
else:
loss, logits = model(**inputs, labels=labels, use_cache=False)[:2]
else:
logits = model(**inputs, use_cache=False)[0]
lprobs = torch.nn.functional.log_softmax(logits, dim=-1)
loss, _ = self.loss_fn(lprobs, labels, self.args.label_smoothing, ignore_index=self.config.pad_token_id)
return loss, logits
def compute_loss(self, model, inputs):
labels = inputs.pop("labels")
loss, _ = self._compute_loss(model, inputs, labels)
return loss
def prediction_step(
self,
model: nn.Module,
inputs: Dict[str, Union[torch.Tensor, Any]],
prediction_loss_only: bool,
ignore_keys: Optional[List[str]] = None,
) -> Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]:
inputs = self._prepare_inputs(inputs)
gen_kwargs = {
"max_length": self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
"num_beams": self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
generated_tokens = self.model.generate(
inputs["input_ids"],
attention_mask=inputs["attention_mask"],
**gen_kwargs,
)
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
generated_tokens = self._pad_tensors_to_max_len(generated_tokens, gen_kwargs["max_length"])
labels = inputs.pop("labels")
with torch.no_grad():
loss, logits = self._compute_loss(model, inputs, labels)
loss = loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
logits = generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
labels = self._pad_tensors_to_max_len(labels, gen_kwargs["max_length"])
return (loss, logits, labels)
def _pad_tensors_to_max_len(self, tensor, max_length):
pad_token_id = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
f"Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be padded to `max_length`={max_length}"
)
padded_tensor = pad_token_id * torch.ones(
(tensor.shape[0], max_length), dtype=tensor.dtype, device=tensor.device
)
padded_tensor[:, : tensor.shape[-1]] = tensor
return padded_tensor
| true | true |
1c2d796a09140eeb79105576d8d176108f33b282 | 398 | py | Python | app/server/migrations/0010_document_priority.py | gong-io/doccano | f649ad39cb7795152253034a4937b0acdd377ee5 | [
"MIT"
] | 7 | 2019-05-20T18:41:25.000Z | 2021-06-12T14:20:21.000Z | app/server/migrations/0010_document_priority.py | gong-io/doccano | f649ad39cb7795152253034a4937b0acdd377ee5 | [
"MIT"
] | 2 | 2019-02-26T18:55:30.000Z | 2019-02-28T19:56:38.000Z | app/server/migrations/0010_document_priority.py | gong-io/doccano | f649ad39cb7795152253034a4937b0acdd377ee5 | [
"MIT"
] | 5 | 2019-05-30T15:56:29.000Z | 2020-08-25T06:58:02.000Z | # Generated by Django 2.1.5 on 2019-03-01 20:55
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('server', '0009_project_use_machine_model_sort'),
]
operations = [
migrations.AddField(
model_name='document',
name='priority',
field=models.IntegerField(null=True),
),
]
| 20.947368 | 58 | 0.610553 |
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('server', '0009_project_use_machine_model_sort'),
]
operations = [
migrations.AddField(
model_name='document',
name='priority',
field=models.IntegerField(null=True),
),
]
| true | true |
1c2d7b1ff5a18e98fb6b0f6e6fb196d5e2f0e367 | 7,777 | py | Python | doc/examples/2_seismics/plot_03_rays_layered_and_gradient_models.py | baender/gimli | eb9a2204669cf11209b9577472f61ac70217a191 | [
"Apache-2.0"
] | 224 | 2015-02-20T21:36:24.000Z | 2022-03-30T07:27:43.000Z | doc/examples/2_seismics/plot_03_rays_layered_and_gradient_models.py | baender/gimli | eb9a2204669cf11209b9577472f61ac70217a191 | [
"Apache-2.0"
] | 341 | 2015-05-21T14:39:51.000Z | 2022-03-31T01:54:07.000Z | doc/examples/2_seismics/plot_03_rays_layered_and_gradient_models.py | baender/gimli | eb9a2204669cf11209b9577472f61ac70217a191 | [
"Apache-2.0"
] | 107 | 2015-01-24T14:40:21.000Z | 2022-02-25T12:12:13.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Raypaths in layered and gradient models
=======================================
This example performs raytracing for a two-layer and a vertical gradient model
and compares the resulting traveltimes to existing analytical solutions. An
approximation of the raypath is found by finding the shortest-path through a
grid of nodes. The possible angular coverage is small when only corner points
of a cell (primary nodes) are used for this purpose. The angular coverage, and
hence the numerical accuracy of traveltime calculations, can be significantly
improved by a few secondary nodes along the cell edges. Details can be found in
`Giroux & Larouche (2013) <https://doi.org/10.1016/j.cageo.2012.12.005>`_.
"""
# sphinx_gallery_thumbnail_number = 3
from math import asin, tan
import matplotlib.pyplot as plt
import numpy as np
import pygimli as pg
import pygimli.meshtools as mt
from pygimli.viewer.mpl import drawMesh
from pygimli.physics import TravelTimeManager
###############################################################################
# Two-layer model
# ---------------
# We start by building a regular grid.
mesh_layered = mt.createGrid(
np.arange(-20, 155, step=5, dtype=float), np.linspace(-60, 0, 13))
###############################################################################
# We now construct the velocity vector for the two-layer case by iterating over
# the cells. Cells above 25 m depth are assigned :math:`v = 1000` m/s and cells
# below are assigned :math:`v = 3000` m/s.
vel_layered = np.zeros(mesh_layered.cellCount())
for cell in mesh_layered.cells():
if cell.center().y() < -25:
vel = 3000.0
else:
vel = 1000.0
vel_layered[cell.id()] = vel
pg.show(mesh_layered, vel_layered, label="Velocity (m/s)")
###############################################################################
# We now define the analytical solution. The traveltime at a given offset `x`
# is the minimum of the direct and critically refracted wave, where the latter
# is governed by Snell's law.
def analyticalSolution2Layer(x, zlay=25, v1=1000, v2=3000):
"""Analytical solution for 2 layer case."""
tdirect = np.abs(x) / v1 # direct wave
alfa = asin(v1 / v2) # critically refracted wave angle
xreflec = tan(alfa) * zlay * 2. # first critically refracted
trefrac = (x - xreflec) / v2 + xreflec * v2 / v1**2
return np.minimum(tdirect, trefrac)
###############################################################################
# Vertical gradient model
# -----------------------
# We first create an unstructured mesh:
sensors = np.arange(131, step=10.0)
plc = mt.createWorld([-20, -60], [150, 0], worldMarker=False)
for pos in sensors:
plc.createNode([pos, 0.0])
mesh_gradient = mt.createMesh(plc, quality=33, area=3)
###############################################################################
# A vertical gradient model, i.e. :math:`v(z) = a + bz`, is defined per cell.
a = 1000
b = 100
vel_gradient = []
for node in mesh_gradient.nodes():
vel_gradient.append(a + b * abs(node.y()))
vel_gradient = pg.meshtools.nodeDataToCellData(mesh_gradient,
np.array(vel_gradient))
pg.show(mesh_gradient, vel_gradient, label="Velocity (m/s)")
###############################################################################
# The traveltime for a gradient velocity model is given by:
#
# .. math::
#
# v = \left|b^{-1}cosh^{-1}\left(1 + \frac{b^2 x^2}{2a^2}\right)\right|
#
def analyticalSolutionGradient(x, a=1000, b=100):
"""Analytical solution for gradient model."""
tdirect = np.abs(x) / a # direct wave
tmp = 1 + ((b**2 * np.abs(x)**2) / (2 * a**2))
trefrac = np.abs(b**-1 * np.arccosh(tmp))
return np.minimum(tdirect, trefrac)
###############################################################################
# The loop below calculates the travel times and makes the comparison plot.
fig, ax = plt.subplots(3, 2, figsize=(10, 10), sharex=True)
for j, (case, mesh, vel) in enumerate(zip(["layered", "gradient"],
[mesh_layered, mesh_gradient],
[vel_layered, vel_gradient])):
pg.boxprint(case)
if case == "gradient":
ana = analyticalSolutionGradient
elif case == "layered":
ana = analyticalSolution2Layer
for boundary in mesh.boundaries():
boundary.setMarker(0)
xmin, xmax = mesh.xmin(), mesh.xmax()
mesh.createNeighborInfos()
# In order to use the Dijkstra, we extract the surface positions >0
mx = pg.x(mesh)
my = pg.y(mesh)
px = np.sort(mx[my == 0.0])
# A data container with index arrays named s (shot) and g (geophones) is
# created and filled with the positions and shot/geophone indices.
data = pg.DataContainer()
data.registerSensorIndex('s')
data.registerSensorIndex('g')
for i, pxi in enumerate(px):
data.createSensor([pxi, 0.0])
if pxi == 0.0:
source = i
nData = len(px)
data.resize(nData)
data['s'] = [source] * nData # only one shot at first sensor
data['g'] = range(nData) # and all sensors are receiver geophones
# Draw initial mesh with velocity distribution
pg.show(mesh, vel, ax=ax[0, j], label="Velocity (m/s)", hold=True,
logScale=False, cMap="summer_r", coverage=0.7)
drawMesh(ax[0, j], mesh, color="white", lw=0.21)
# We compare the accuracy for 0-5 secondary nodes
sec_nodes = [0, 1, 5]
t_all = []
durations = []
paths = []
mgr = TravelTimeManager()
cols = ["orangered", "blue", "black"]
recs = [1, 3, 8, 13]
for i, n in enumerate(sec_nodes):
# Perform traveltime calculations and log time with pg.tic() & pg.toc()
pg.tic()
res = mgr.simulate(vel=vel, scheme=data, mesh=mesh, secNodes=n)
# We need to copy res['t'] here because res['t'] is a reference to
# an array in res, and res will be removed in the next iteration.
# Unfortunately, we don't have any reverence counting for core objects yet.
t_all.append(res['t'].array())
durations.append(pg.dur())
pg.toc("Raytracing with %d secondary nodes:" % n)
for r, p in enumerate(recs):
if r == 0:
lab = "Raypath with %d sec nodes" % n
else:
lab = None
recNode = mgr.fop.mesh().findNearestNode([sensors[p], 0.0])
sourceNode = mgr.fop.mesh().findNearestNode([0.0, 0.0])
path = mgr.fop.dijkstra.shortestPath(sourceNode, recNode)
points = mgr.fop.mesh().positions(withSecNodes=True)[path]
ax[0, j].plot(pg.x(points), pg.y(points), cols[i], label=lab)
t_ana = ana(px)
# Upper subplot
ax[1, j].plot(px, t_ana * 1000, label="Analytical solution")
for i, n in enumerate(sec_nodes):
ax[1, j].plot(px, t_all[i] * 1000,
label="Dijkstra (%d sec nodes, %.2f s)" % (n, durations[i]))
ax[2, j].plot(px, np.zeros_like(px), label="Zero line") # to keep color cycle
for i, n in enumerate(sec_nodes):
ax[2, j].plot(px, np.abs(t_all[i] - t_ana) * 1000)
ax[1, j].legend()
# Draw sensor positions for the selected receivers
for p in recs:
ax[0, j].plot(sensors[p], 0.0, "kv", ms=10)
ax[0, j].plot(0.0, 0.0, "ro", ms=10)
ax[0, j].set_ylim(mesh.ymin(), 2)
ax[0, 0].set_title("Two-layer model")
ax[0, 1].set_title("Vertical gradient model")
ax[0, 0].legend()
ax[0, 0].set_ylabel("y (m)")
ax[1, 0].set_ylabel("Traveltime (ms)")
ax[2, 0].set_ylabel("Absolute difference to\nanalytical solution (ms)")
ax[2, 0].set_xlabel("x (m)")
fig.tight_layout()
| 36.341121 | 83 | 0.586087 |
from math import asin, tan
import matplotlib.pyplot as plt
import numpy as np
import pygimli as pg
import pygimli.meshtools as mt
from pygimli.viewer.mpl import drawMesh
from pygimli.physics import TravelTimeManager
| true | true |
1c2d7c15637ab69c0e6f2dd5b70dcc76319886ba | 1,303 | py | Python | osf_tests/test_reviewable.py | gaybro8777/osf.io | 30408511510a40bc393565817b343ef5fd76ab14 | [
"Apache-2.0"
] | 628 | 2015-01-15T04:33:22.000Z | 2022-03-30T06:40:10.000Z | osf_tests/test_reviewable.py | gaybro8777/osf.io | 30408511510a40bc393565817b343ef5fd76ab14 | [
"Apache-2.0"
] | 4,712 | 2015-01-02T01:41:53.000Z | 2022-03-30T14:18:40.000Z | osf_tests/test_reviewable.py | Johnetordoff/osf.io | de10bf249c46cede04c78f7e6f7e352c69e6e6b5 | [
"Apache-2.0"
] | 371 | 2015-01-12T16:14:08.000Z | 2022-03-31T18:58:29.000Z | import mock
import pytest
from osf.models import Preprint
from osf.utils.workflows import DefaultStates
from osf_tests.factories import PreprintFactory, AuthUserFactory
@pytest.mark.django_db
class TestReviewable:
@mock.patch('website.identifiers.utils.request_identifiers')
def test_state_changes(self, _):
user = AuthUserFactory()
preprint = PreprintFactory(provider__reviews_workflow='pre-moderation', is_published=False)
assert preprint.machine_state == DefaultStates.INITIAL.value
preprint.run_submit(user)
assert preprint.machine_state == DefaultStates.PENDING.value
preprint.run_accept(user, 'comment')
assert preprint.machine_state == DefaultStates.ACCEPTED.value
from_db = Preprint.objects.get(id=preprint.id)
assert from_db.machine_state == DefaultStates.ACCEPTED.value
preprint.run_reject(user, 'comment')
assert preprint.machine_state == DefaultStates.REJECTED.value
from_db.refresh_from_db()
assert from_db.machine_state == DefaultStates.REJECTED.value
preprint.run_accept(user, 'comment')
assert preprint.machine_state == DefaultStates.ACCEPTED.value
from_db.refresh_from_db()
assert from_db.machine_state == DefaultStates.ACCEPTED.value
| 38.323529 | 99 | 0.743668 | import mock
import pytest
from osf.models import Preprint
from osf.utils.workflows import DefaultStates
from osf_tests.factories import PreprintFactory, AuthUserFactory
@pytest.mark.django_db
class TestReviewable:
@mock.patch('website.identifiers.utils.request_identifiers')
def test_state_changes(self, _):
user = AuthUserFactory()
preprint = PreprintFactory(provider__reviews_workflow='pre-moderation', is_published=False)
assert preprint.machine_state == DefaultStates.INITIAL.value
preprint.run_submit(user)
assert preprint.machine_state == DefaultStates.PENDING.value
preprint.run_accept(user, 'comment')
assert preprint.machine_state == DefaultStates.ACCEPTED.value
from_db = Preprint.objects.get(id=preprint.id)
assert from_db.machine_state == DefaultStates.ACCEPTED.value
preprint.run_reject(user, 'comment')
assert preprint.machine_state == DefaultStates.REJECTED.value
from_db.refresh_from_db()
assert from_db.machine_state == DefaultStates.REJECTED.value
preprint.run_accept(user, 'comment')
assert preprint.machine_state == DefaultStates.ACCEPTED.value
from_db.refresh_from_db()
assert from_db.machine_state == DefaultStates.ACCEPTED.value
| true | true |
1c2d7dbc7e352df0cd2a244450e5537e8a1ce9c5 | 7,229 | py | Python | tools/run_tests/xds_k8s_test_driver/framework/infrastructure/gcp/network_security.py | echo80313/grpc | 93cdc8b77e7b3fe4a3afec1c9c7e29b3f02ec3cf | [
"Apache-2.0"
] | null | null | null | tools/run_tests/xds_k8s_test_driver/framework/infrastructure/gcp/network_security.py | echo80313/grpc | 93cdc8b77e7b3fe4a3afec1c9c7e29b3f02ec3cf | [
"Apache-2.0"
] | 4 | 2022-02-27T18:59:37.000Z | 2022-02-27T18:59:53.000Z | tools/run_tests/xds_k8s_test_driver/framework/infrastructure/gcp/network_security.py | echo80313/grpc | 93cdc8b77e7b3fe4a3afec1c9c7e29b3f02ec3cf | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
import dataclasses
import logging
from typing import Any, Dict
from google.rpc import code_pb2
import tenacity
from framework.infrastructure import gcp
logger = logging.getLogger(__name__)
# Type aliases
GcpResource = gcp.compute.ComputeV1.GcpResource
@dataclasses.dataclass(frozen=True)
class ServerTlsPolicy:
url: str
name: str
server_certificate: dict
mtls_policy: dict
update_time: str
create_time: str
@classmethod
def from_response(cls, name: str, response: Dict[str,
Any]) -> 'ServerTlsPolicy':
return cls(name=name,
url=response['name'],
server_certificate=response.get('serverCertificate', {}),
mtls_policy=response.get('mtlsPolicy', {}),
create_time=response['createTime'],
update_time=response['updateTime'])
@dataclasses.dataclass(frozen=True)
class ClientTlsPolicy:
url: str
name: str
client_certificate: dict
server_validation_ca: list
update_time: str
create_time: str
@classmethod
def from_response(cls, name: str, response: Dict[str,
Any]) -> 'ClientTlsPolicy':
return cls(name=name,
url=response['name'],
client_certificate=response.get('clientCertificate', {}),
server_validation_ca=response.get('serverValidationCa', []),
create_time=response['createTime'],
update_time=response['updateTime'])
@dataclasses.dataclass(frozen=True)
class AuthorizationPolicy:
url: str
name: str
update_time: str
create_time: str
action: str
rules: list
@classmethod
def from_response(cls, name: str,
response: Dict[str, Any]) -> 'AuthorizationPolicy':
return cls(name=name,
url=response['name'],
create_time=response['createTime'],
update_time=response['updateTime'],
action=response['action'],
rules=response.get('rules', []))
class _NetworkSecurityBase(gcp.api.GcpStandardCloudApiResource,
metaclass=abc.ABCMeta):
"""Base class for NetworkSecurity APIs."""
# TODO(https://github.com/grpc/grpc/issues/29532) remove pylint disable
# pylint: disable=abstract-method
def __init__(self, api_manager: gcp.api.GcpApiManager, project: str):
super().__init__(api_manager.networksecurity(self.api_version), project)
# Shortcut to projects/*/locations/ endpoints
self._api_locations = self.api.projects().locations()
@property
def api_name(self) -> str:
return 'networksecurity'
def _execute(self, *args, **kwargs): # pylint: disable=signature-differs,arguments-differ
# Workaround TD bug: throttled operations are reported as internal.
# Ref b/175345578
retryer = tenacity.Retrying(
retry=tenacity.retry_if_exception(self._operation_internal_error),
wait=tenacity.wait_fixed(10),
stop=tenacity.stop_after_delay(5 * 60),
before_sleep=tenacity.before_sleep_log(logger, logging.DEBUG),
reraise=True)
retryer(super()._execute, *args, **kwargs)
@staticmethod
def _operation_internal_error(exception):
return (isinstance(exception, gcp.api.OperationError) and
exception.error.code == code_pb2.INTERNAL)
class NetworkSecurityV1Beta1(_NetworkSecurityBase):
"""NetworkSecurity API v1beta1."""
SERVER_TLS_POLICIES = 'serverTlsPolicies'
CLIENT_TLS_POLICIES = 'clientTlsPolicies'
AUTHZ_POLICIES = 'authorizationPolicies'
@property
def api_version(self) -> str:
return 'v1beta1'
def create_server_tls_policy(self, name: str, body: dict) -> GcpResource:
return self._create_resource(
collection=self._api_locations.serverTlsPolicies(),
body=body,
serverTlsPolicyId=name)
def get_server_tls_policy(self, name: str) -> ServerTlsPolicy:
response = self._get_resource(
collection=self._api_locations.serverTlsPolicies(),
full_name=self.resource_full_name(name, self.SERVER_TLS_POLICIES))
return ServerTlsPolicy.from_response(name, response)
def delete_server_tls_policy(self, name: str) -> bool:
return self._delete_resource(
collection=self._api_locations.serverTlsPolicies(),
full_name=self.resource_full_name(name, self.SERVER_TLS_POLICIES))
def create_client_tls_policy(self, name: str, body: dict) -> GcpResource:
return self._create_resource(
collection=self._api_locations.clientTlsPolicies(),
body=body,
clientTlsPolicyId=name)
def get_client_tls_policy(self, name: str) -> ClientTlsPolicy:
response = self._get_resource(
collection=self._api_locations.clientTlsPolicies(),
full_name=self.resource_full_name(name, self.CLIENT_TLS_POLICIES))
return ClientTlsPolicy.from_response(name, response)
def delete_client_tls_policy(self, name: str) -> bool:
return self._delete_resource(
collection=self._api_locations.clientTlsPolicies(),
full_name=self.resource_full_name(name, self.CLIENT_TLS_POLICIES))
def create_authz_policy(self, name: str, body: dict) -> GcpResource:
return self._create_resource(
collection=self._api_locations.authorizationPolicies(),
body=body,
authorizationPolicyId=name)
def get_authz_policy(self, name: str) -> ClientTlsPolicy:
response = self._get_resource(
collection=self._api_locations.authorizationPolicies(),
full_name=self.resource_full_name(name, self.AUTHZ_POLICIES))
return ClientTlsPolicy.from_response(name, response)
def delete_authz_policy(self, name: str) -> bool:
return self._delete_resource(
collection=self._api_locations.authorizationPolicies(),
full_name=self.resource_full_name(name, self.AUTHZ_POLICIES))
class NetworkSecurityV1Alpha1(NetworkSecurityV1Beta1):
"""NetworkSecurity API v1alpha1.
Note: extending v1beta1 class presumes that v1beta1 is just a v1alpha1 API
graduated into a more stable version. This is true in most cases. However,
v1alpha1 class can always override and reimplement incompatible methods.
"""
@property
def api_version(self) -> str:
return 'v1alpha1'
| 36.510101 | 94 | 0.669664 |
import abc
import dataclasses
import logging
from typing import Any, Dict
from google.rpc import code_pb2
import tenacity
from framework.infrastructure import gcp
logger = logging.getLogger(__name__)
GcpResource = gcp.compute.ComputeV1.GcpResource
@dataclasses.dataclass(frozen=True)
class ServerTlsPolicy:
url: str
name: str
server_certificate: dict
mtls_policy: dict
update_time: str
create_time: str
@classmethod
def from_response(cls, name: str, response: Dict[str,
Any]) -> 'ServerTlsPolicy':
return cls(name=name,
url=response['name'],
server_certificate=response.get('serverCertificate', {}),
mtls_policy=response.get('mtlsPolicy', {}),
create_time=response['createTime'],
update_time=response['updateTime'])
@dataclasses.dataclass(frozen=True)
class ClientTlsPolicy:
url: str
name: str
client_certificate: dict
server_validation_ca: list
update_time: str
create_time: str
@classmethod
def from_response(cls, name: str, response: Dict[str,
Any]) -> 'ClientTlsPolicy':
return cls(name=name,
url=response['name'],
client_certificate=response.get('clientCertificate', {}),
server_validation_ca=response.get('serverValidationCa', []),
create_time=response['createTime'],
update_time=response['updateTime'])
@dataclasses.dataclass(frozen=True)
class AuthorizationPolicy:
url: str
name: str
update_time: str
create_time: str
action: str
rules: list
@classmethod
def from_response(cls, name: str,
response: Dict[str, Any]) -> 'AuthorizationPolicy':
return cls(name=name,
url=response['name'],
create_time=response['createTime'],
update_time=response['updateTime'],
action=response['action'],
rules=response.get('rules', []))
class _NetworkSecurityBase(gcp.api.GcpStandardCloudApiResource,
metaclass=abc.ABCMeta):
def __init__(self, api_manager: gcp.api.GcpApiManager, project: str):
super().__init__(api_manager.networksecurity(self.api_version), project)
self._api_locations = self.api.projects().locations()
@property
def api_name(self) -> str:
return 'networksecurity'
def _execute(self, *args, **kwargs):
retryer = tenacity.Retrying(
retry=tenacity.retry_if_exception(self._operation_internal_error),
wait=tenacity.wait_fixed(10),
stop=tenacity.stop_after_delay(5 * 60),
before_sleep=tenacity.before_sleep_log(logger, logging.DEBUG),
reraise=True)
retryer(super()._execute, *args, **kwargs)
@staticmethod
def _operation_internal_error(exception):
return (isinstance(exception, gcp.api.OperationError) and
exception.error.code == code_pb2.INTERNAL)
class NetworkSecurityV1Beta1(_NetworkSecurityBase):
SERVER_TLS_POLICIES = 'serverTlsPolicies'
CLIENT_TLS_POLICIES = 'clientTlsPolicies'
AUTHZ_POLICIES = 'authorizationPolicies'
@property
def api_version(self) -> str:
return 'v1beta1'
def create_server_tls_policy(self, name: str, body: dict) -> GcpResource:
return self._create_resource(
collection=self._api_locations.serverTlsPolicies(),
body=body,
serverTlsPolicyId=name)
def get_server_tls_policy(self, name: str) -> ServerTlsPolicy:
response = self._get_resource(
collection=self._api_locations.serverTlsPolicies(),
full_name=self.resource_full_name(name, self.SERVER_TLS_POLICIES))
return ServerTlsPolicy.from_response(name, response)
def delete_server_tls_policy(self, name: str) -> bool:
return self._delete_resource(
collection=self._api_locations.serverTlsPolicies(),
full_name=self.resource_full_name(name, self.SERVER_TLS_POLICIES))
def create_client_tls_policy(self, name: str, body: dict) -> GcpResource:
return self._create_resource(
collection=self._api_locations.clientTlsPolicies(),
body=body,
clientTlsPolicyId=name)
def get_client_tls_policy(self, name: str) -> ClientTlsPolicy:
response = self._get_resource(
collection=self._api_locations.clientTlsPolicies(),
full_name=self.resource_full_name(name, self.CLIENT_TLS_POLICIES))
return ClientTlsPolicy.from_response(name, response)
def delete_client_tls_policy(self, name: str) -> bool:
return self._delete_resource(
collection=self._api_locations.clientTlsPolicies(),
full_name=self.resource_full_name(name, self.CLIENT_TLS_POLICIES))
def create_authz_policy(self, name: str, body: dict) -> GcpResource:
return self._create_resource(
collection=self._api_locations.authorizationPolicies(),
body=body,
authorizationPolicyId=name)
def get_authz_policy(self, name: str) -> ClientTlsPolicy:
response = self._get_resource(
collection=self._api_locations.authorizationPolicies(),
full_name=self.resource_full_name(name, self.AUTHZ_POLICIES))
return ClientTlsPolicy.from_response(name, response)
def delete_authz_policy(self, name: str) -> bool:
return self._delete_resource(
collection=self._api_locations.authorizationPolicies(),
full_name=self.resource_full_name(name, self.AUTHZ_POLICIES))
class NetworkSecurityV1Alpha1(NetworkSecurityV1Beta1):
@property
def api_version(self) -> str:
return 'v1alpha1'
| true | true |
1c2d7df8770200cff1fd47c54db9a07605cd4716 | 25,342 | py | Python | libs/cherrypy/_cpdispatch.py | scambra/HTPC-Manager | 1a1440db84ae1b6e7a2610c7f3bd5b6adf0aab1d | [
"MIT"
] | 674 | 2015-11-06T04:22:47.000Z | 2022-02-26T17:31:43.000Z | libs/cherrypy/_cpdispatch.py | scambra/HTPC-Manager | 1a1440db84ae1b6e7a2610c7f3bd5b6adf0aab1d | [
"MIT"
] | 713 | 2015-11-06T10:48:58.000Z | 2018-11-27T16:32:18.000Z | libs/cherrypy/_cpdispatch.py | scambra/HTPC-Manager | 1a1440db84ae1b6e7a2610c7f3bd5b6adf0aab1d | [
"MIT"
] | 115 | 2015-01-08T14:41:00.000Z | 2022-02-13T12:31:17.000Z | """CherryPy dispatchers.
A 'dispatcher' is the object which looks up the 'page handler' callable
and collects config for the current request based on the path_info, other
request attributes, and the application architecture. The core calls the
dispatcher as early as possible, passing it a 'path_info' argument.
The default dispatcher discovers the page handler by matching path_info
to a hierarchical arrangement of objects, starting at request.app.root.
"""
import string
import sys
import types
try:
classtype = (type, types.ClassType)
except AttributeError:
classtype = type
import cherrypy
from cherrypy._cpcompat import set
class PageHandler(object):
"""Callable which sets response.body."""
def __init__(self, callable, *args, **kwargs):
self.callable = callable
self.args = args
self.kwargs = kwargs
def get_args(self):
return cherrypy.serving.request.args
def set_args(self, args):
cherrypy.serving.request.args = args
return cherrypy.serving.request.args
args = property(
get_args,
set_args,
doc="The ordered args should be accessible from post dispatch hooks"
)
def get_kwargs(self):
return cherrypy.serving.request.kwargs
def set_kwargs(self, kwargs):
cherrypy.serving.request.kwargs = kwargs
return cherrypy.serving.request.kwargs
kwargs = property(
get_kwargs,
set_kwargs,
doc="The named kwargs should be accessible from post dispatch hooks"
)
def __call__(self):
try:
return self.callable(*self.args, **self.kwargs)
except TypeError:
x = sys.exc_info()[1]
try:
test_callable_spec(self.callable, self.args, self.kwargs)
except cherrypy.HTTPError:
raise sys.exc_info()[1]
except:
raise x
raise
def test_callable_spec(callable, callable_args, callable_kwargs):
"""
Inspect callable and test to see if the given args are suitable for it.
When an error occurs during the handler's invoking stage there are 2
erroneous cases:
1. Too many parameters passed to a function which doesn't define
one of *args or **kwargs.
2. Too little parameters are passed to the function.
There are 3 sources of parameters to a cherrypy handler.
1. query string parameters are passed as keyword parameters to the
handler.
2. body parameters are also passed as keyword parameters.
3. when partial matching occurs, the final path atoms are passed as
positional args.
Both the query string and path atoms are part of the URI. If they are
incorrect, then a 404 Not Found should be raised. Conversely the body
parameters are part of the request; if they are invalid a 400 Bad Request.
"""
show_mismatched_params = getattr(
cherrypy.serving.request, 'show_mismatched_params', False)
try:
(args, varargs, varkw, defaults) = getargspec(callable)
except TypeError:
if isinstance(callable, object) and hasattr(callable, '__call__'):
(args, varargs, varkw,
defaults) = getargspec(callable.__call__)
else:
# If it wasn't one of our own types, re-raise
# the original error
raise
if args and args[0] == 'self':
args = args[1:]
arg_usage = dict([(arg, 0,) for arg in args])
vararg_usage = 0
varkw_usage = 0
extra_kwargs = set()
for i, value in enumerate(callable_args):
try:
arg_usage[args[i]] += 1
except IndexError:
vararg_usage += 1
for key in callable_kwargs.keys():
try:
arg_usage[key] += 1
except KeyError:
varkw_usage += 1
extra_kwargs.add(key)
# figure out which args have defaults.
args_with_defaults = args[-len(defaults or []):]
for i, val in enumerate(defaults or []):
# Defaults take effect only when the arg hasn't been used yet.
if arg_usage[args_with_defaults[i]] == 0:
arg_usage[args_with_defaults[i]] += 1
missing_args = []
multiple_args = []
for key, usage in arg_usage.items():
if usage == 0:
missing_args.append(key)
elif usage > 1:
multiple_args.append(key)
if missing_args:
# In the case where the method allows body arguments
# there are 3 potential errors:
# 1. not enough query string parameters -> 404
# 2. not enough body parameters -> 400
# 3. not enough path parts (partial matches) -> 404
#
# We can't actually tell which case it is,
# so I'm raising a 404 because that covers 2/3 of the
# possibilities
#
# In the case where the method does not allow body
# arguments it's definitely a 404.
message = None
if show_mismatched_params:
message = "Missing parameters: %s" % ",".join(missing_args)
raise cherrypy.HTTPError(404, message=message)
# the extra positional arguments come from the path - 404 Not Found
if not varargs and vararg_usage > 0:
raise cherrypy.HTTPError(404)
body_params = cherrypy.serving.request.body.params or {}
body_params = set(body_params.keys())
qs_params = set(callable_kwargs.keys()) - body_params
if multiple_args:
if qs_params.intersection(set(multiple_args)):
# If any of the multiple parameters came from the query string then
# it's a 404 Not Found
error = 404
else:
# Otherwise it's a 400 Bad Request
error = 400
message = None
if show_mismatched_params:
message = "Multiple values for parameters: "\
"%s" % ",".join(multiple_args)
raise cherrypy.HTTPError(error, message=message)
if not varkw and varkw_usage > 0:
# If there were extra query string parameters, it's a 404 Not Found
extra_qs_params = set(qs_params).intersection(extra_kwargs)
if extra_qs_params:
message = None
if show_mismatched_params:
message = "Unexpected query string "\
"parameters: %s" % ", ".join(extra_qs_params)
raise cherrypy.HTTPError(404, message=message)
# If there were any extra body parameters, it's a 400 Not Found
extra_body_params = set(body_params).intersection(extra_kwargs)
if extra_body_params:
message = None
if show_mismatched_params:
message = "Unexpected body parameters: "\
"%s" % ", ".join(extra_body_params)
raise cherrypy.HTTPError(400, message=message)
try:
import inspect
except ImportError:
test_callable_spec = lambda callable, args, kwargs: None
else:
getargspec = inspect.getargspec
# Python 3 requires using getfullargspec if keyword-only arguments are present
if hasattr(inspect, 'getfullargspec'):
def getargspec(callable):
return inspect.getfullargspec(callable)[:4]
class LateParamPageHandler(PageHandler):
"""When passing cherrypy.request.params to the page handler, we do not
want to capture that dict too early; we want to give tools like the
decoding tool a chance to modify the params dict in-between the lookup
of the handler and the actual calling of the handler. This subclass
takes that into account, and allows request.params to be 'bound late'
(it's more complicated than that, but that's the effect).
"""
def _get_kwargs(self):
kwargs = cherrypy.serving.request.params.copy()
if self._kwargs:
kwargs.update(self._kwargs)
return kwargs
def _set_kwargs(self, kwargs):
cherrypy.serving.request.kwargs = kwargs
self._kwargs = kwargs
kwargs = property(_get_kwargs, _set_kwargs,
doc='page handler kwargs (with '
'cherrypy.request.params copied in)')
if sys.version_info < (3, 0):
punctuation_to_underscores = string.maketrans(
string.punctuation, '_' * len(string.punctuation))
def validate_translator(t):
if not isinstance(t, str) or len(t) != 256:
raise ValueError(
"The translate argument must be a str of len 256.")
else:
punctuation_to_underscores = str.maketrans(
string.punctuation, '_' * len(string.punctuation))
def validate_translator(t):
if not isinstance(t, dict):
raise ValueError("The translate argument must be a dict.")
class Dispatcher(object):
"""CherryPy Dispatcher which walks a tree of objects to find a handler.
The tree is rooted at cherrypy.request.app.root, and each hierarchical
component in the path_info argument is matched to a corresponding nested
attribute of the root object. Matching handlers must have an 'exposed'
attribute which evaluates to True. The special method name "index"
matches a URI which ends in a slash ("/"). The special method name
"default" may match a portion of the path_info (but only when no longer
substring of the path_info matches some other object).
This is the default, built-in dispatcher for CherryPy.
"""
dispatch_method_name = '_cp_dispatch'
"""
The name of the dispatch method that nodes may optionally implement
to provide their own dynamic dispatch algorithm.
"""
def __init__(self, dispatch_method_name=None,
translate=punctuation_to_underscores):
validate_translator(translate)
self.translate = translate
if dispatch_method_name:
self.dispatch_method_name = dispatch_method_name
def __call__(self, path_info):
"""Set handler and config for the current request."""
request = cherrypy.serving.request
func, vpath = self.find_handler(path_info)
if func:
# Decode any leftover %2F in the virtual_path atoms.
vpath = [x.replace("%2F", "/") for x in vpath]
request.handler = LateParamPageHandler(func, *vpath)
else:
request.handler = cherrypy.NotFound()
def find_handler(self, path):
"""Return the appropriate page handler, plus any virtual path.
This will return two objects. The first will be a callable,
which can be used to generate page output. Any parameters from
the query string or request body will be sent to that callable
as keyword arguments.
The callable is found by traversing the application's tree,
starting from cherrypy.request.app.root, and matching path
components to successive objects in the tree. For example, the
URL "/path/to/handler" might return root.path.to.handler.
The second object returned will be a list of names which are
'virtual path' components: parts of the URL which are dynamic,
and were not used when looking up the handler.
These virtual path components are passed to the handler as
positional arguments.
"""
request = cherrypy.serving.request
app = request.app
root = app.root
dispatch_name = self.dispatch_method_name
# Get config for the root object/path.
fullpath = [x for x in path.strip('/').split('/') if x] + ['index']
fullpath_len = len(fullpath)
segleft = fullpath_len
nodeconf = {}
if hasattr(root, "_cp_config"):
nodeconf.update(root._cp_config)
if "/" in app.config:
nodeconf.update(app.config["/"])
object_trail = [['root', root, nodeconf, segleft]]
node = root
iternames = fullpath[:]
while iternames:
name = iternames[0]
# map to legal Python identifiers (e.g. replace '.' with '_')
objname = name.translate(self.translate)
nodeconf = {}
subnode = getattr(node, objname, None)
pre_len = len(iternames)
if subnode is None:
dispatch = getattr(node, dispatch_name, None)
if dispatch and hasattr(dispatch, '__call__') and not \
getattr(dispatch, 'exposed', False) and \
pre_len > 1:
# Don't expose the hidden 'index' token to _cp_dispatch
# We skip this if pre_len == 1 since it makes no sense
# to call a dispatcher when we have no tokens left.
index_name = iternames.pop()
subnode = dispatch(vpath=iternames)
iternames.append(index_name)
else:
# We didn't find a path, but keep processing in case there
# is a default() handler.
iternames.pop(0)
else:
# We found the path, remove the vpath entry
iternames.pop(0)
segleft = len(iternames)
if segleft > pre_len:
# No path segment was removed. Raise an error.
raise cherrypy.CherryPyException(
"A vpath segment was added. Custom dispatchers may only "
+ "remove elements. While trying to process "
+ "{0} in {1}".format(name, fullpath)
)
elif segleft == pre_len:
# Assume that the handler used the current path segment, but
# did not pop it. This allows things like
# return getattr(self, vpath[0], None)
iternames.pop(0)
segleft -= 1
node = subnode
if node is not None:
# Get _cp_config attached to this node.
if hasattr(node, "_cp_config"):
nodeconf.update(node._cp_config)
# Mix in values from app.config for this path.
existing_len = fullpath_len - pre_len
if existing_len != 0:
curpath = '/' + '/'.join(fullpath[0:existing_len])
else:
curpath = ''
new_segs = fullpath[fullpath_len - pre_len:fullpath_len - segleft]
for seg in new_segs:
curpath += '/' + seg
if curpath in app.config:
nodeconf.update(app.config[curpath])
object_trail.append([name, node, nodeconf, segleft])
def set_conf():
"""Collapse all object_trail config into cherrypy.request.config.
"""
base = cherrypy.config.copy()
# Note that we merge the config from each node
# even if that node was None.
for name, obj, conf, segleft in object_trail:
base.update(conf)
if 'tools.staticdir.dir' in conf:
base['tools.staticdir.section'] = '/' + \
'/'.join(fullpath[0:fullpath_len - segleft])
return base
# Try successive objects (reverse order)
num_candidates = len(object_trail) - 1
for i in range(num_candidates, -1, -1):
name, candidate, nodeconf, segleft = object_trail[i]
if candidate is None:
continue
# Try a "default" method on the current leaf.
if hasattr(candidate, "default"):
defhandler = candidate.default
if getattr(defhandler, 'exposed', False):
# Insert any extra _cp_config from the default handler.
conf = getattr(defhandler, "_cp_config", {})
object_trail.insert(
i + 1, ["default", defhandler, conf, segleft])
request.config = set_conf()
# See https://bitbucket.org/cherrypy/cherrypy/issue/613
request.is_index = path.endswith("/")
return defhandler, fullpath[fullpath_len - segleft:-1]
# Uncomment the next line to restrict positional params to
# "default".
# if i < num_candidates - 2: continue
# Try the current leaf.
if getattr(candidate, 'exposed', False):
request.config = set_conf()
if i == num_candidates:
# We found the extra ".index". Mark request so tools
# can redirect if path_info has no trailing slash.
request.is_index = True
else:
# We're not at an 'index' handler. Mark request so tools
# can redirect if path_info has NO trailing slash.
# Note that this also includes handlers which take
# positional parameters (virtual paths).
request.is_index = False
return candidate, fullpath[fullpath_len - segleft:-1]
# We didn't find anything
request.config = set_conf()
return None, []
class MethodDispatcher(Dispatcher):
"""Additional dispatch based on cherrypy.request.method.upper().
Methods named GET, POST, etc will be called on an exposed class.
The method names must be all caps; the appropriate Allow header
will be output showing all capitalized method names as allowable
HTTP verbs.
Note that the containing class must be exposed, not the methods.
"""
def __call__(self, path_info):
"""Set handler and config for the current request."""
request = cherrypy.serving.request
resource, vpath = self.find_handler(path_info)
if resource:
# Set Allow header
avail = [m for m in dir(resource) if m.isupper()]
if "GET" in avail and "HEAD" not in avail:
avail.append("HEAD")
avail.sort()
cherrypy.serving.response.headers['Allow'] = ", ".join(avail)
# Find the subhandler
meth = request.method.upper()
func = getattr(resource, meth, None)
if func is None and meth == "HEAD":
func = getattr(resource, "GET", None)
if func:
# Grab any _cp_config on the subhandler.
if hasattr(func, "_cp_config"):
request.config.update(func._cp_config)
# Decode any leftover %2F in the virtual_path atoms.
vpath = [x.replace("%2F", "/") for x in vpath]
request.handler = LateParamPageHandler(func, *vpath)
else:
request.handler = cherrypy.HTTPError(405)
else:
request.handler = cherrypy.NotFound()
class RoutesDispatcher(object):
"""A Routes based dispatcher for CherryPy."""
def __init__(self, full_result=False, **mapper_options):
"""
Routes dispatcher
Set full_result to True if you wish the controller
and the action to be passed on to the page handler
parameters. By default they won't be.
"""
import routes
self.full_result = full_result
self.controllers = {}
self.mapper = routes.Mapper(**mapper_options)
self.mapper.controller_scan = self.controllers.keys
def connect(self, name, route, controller, **kwargs):
self.controllers[name] = controller
self.mapper.connect(name, route, controller=name, **kwargs)
def redirect(self, url):
raise cherrypy.HTTPRedirect(url)
def __call__(self, path_info):
"""Set handler and config for the current request."""
func = self.find_handler(path_info)
if func:
cherrypy.serving.request.handler = LateParamPageHandler(func)
else:
cherrypy.serving.request.handler = cherrypy.NotFound()
def find_handler(self, path_info):
"""Find the right page handler, and set request.config."""
import routes
request = cherrypy.serving.request
config = routes.request_config()
config.mapper = self.mapper
if hasattr(request, 'wsgi_environ'):
config.environ = request.wsgi_environ
config.host = request.headers.get('Host', None)
config.protocol = request.scheme
config.redirect = self.redirect
result = self.mapper.match(path_info)
config.mapper_dict = result
params = {}
if result:
params = result.copy()
if not self.full_result:
params.pop('controller', None)
params.pop('action', None)
request.params.update(params)
# Get config for the root object/path.
request.config = base = cherrypy.config.copy()
curpath = ""
def merge(nodeconf):
if 'tools.staticdir.dir' in nodeconf:
nodeconf['tools.staticdir.section'] = curpath or "/"
base.update(nodeconf)
app = request.app
root = app.root
if hasattr(root, "_cp_config"):
merge(root._cp_config)
if "/" in app.config:
merge(app.config["/"])
# Mix in values from app.config.
atoms = [x for x in path_info.split("/") if x]
if atoms:
last = atoms.pop()
else:
last = None
for atom in atoms:
curpath = "/".join((curpath, atom))
if curpath in app.config:
merge(app.config[curpath])
handler = None
if result:
controller = result.get('controller')
controller = self.controllers.get(controller, controller)
if controller:
if isinstance(controller, classtype):
controller = controller()
# Get config from the controller.
if hasattr(controller, "_cp_config"):
merge(controller._cp_config)
action = result.get('action')
if action is not None:
handler = getattr(controller, action, None)
# Get config from the handler
if hasattr(handler, "_cp_config"):
merge(handler._cp_config)
else:
handler = controller
# Do the last path atom here so it can
# override the controller's _cp_config.
if last:
curpath = "/".join((curpath, last))
if curpath in app.config:
merge(app.config[curpath])
return handler
def XMLRPCDispatcher(next_dispatcher=Dispatcher()):
from cherrypy.lib import xmlrpcutil
def xmlrpc_dispatch(path_info):
path_info = xmlrpcutil.patched_path(path_info)
return next_dispatcher(path_info)
return xmlrpc_dispatch
def VirtualHost(next_dispatcher=Dispatcher(), use_x_forwarded_host=True,
**domains):
"""
Select a different handler based on the Host header.
This can be useful when running multiple sites within one CP server.
It allows several domains to point to different parts of a single
website structure. For example::
http://www.domain.example -> root
http://www.domain2.example -> root/domain2/
http://www.domain2.example:443 -> root/secure
can be accomplished via the following config::
[/]
request.dispatch = cherrypy.dispatch.VirtualHost(
**{'www.domain2.example': '/domain2',
'www.domain2.example:443': '/secure',
})
next_dispatcher
The next dispatcher object in the dispatch chain.
The VirtualHost dispatcher adds a prefix to the URL and calls
another dispatcher. Defaults to cherrypy.dispatch.Dispatcher().
use_x_forwarded_host
If True (the default), any "X-Forwarded-Host"
request header will be used instead of the "Host" header. This
is commonly added by HTTP servers (such as Apache) when proxying.
``**domains``
A dict of {host header value: virtual prefix} pairs.
The incoming "Host" request header is looked up in this dict,
and, if a match is found, the corresponding "virtual prefix"
value will be prepended to the URL path before calling the
next dispatcher. Note that you often need separate entries
for "example.com" and "www.example.com". In addition, "Host"
headers may contain the port number.
"""
from cherrypy.lib import httputil
def vhost_dispatch(path_info):
request = cherrypy.serving.request
header = request.headers.get
domain = header('Host', '')
if use_x_forwarded_host:
domain = header("X-Forwarded-Host", domain)
prefix = domains.get(domain, "")
if prefix:
path_info = httputil.urljoin(prefix, path_info)
result = next_dispatcher(path_info)
# Touch up staticdir config. See
# https://bitbucket.org/cherrypy/cherrypy/issue/614.
section = request.config.get('tools.staticdir.section')
if section:
section = section[len(prefix):]
request.config['tools.staticdir.section'] = section
return result
return vhost_dispatch
| 36.887918 | 82 | 0.603899 |
import string
import sys
import types
try:
classtype = (type, types.ClassType)
except AttributeError:
classtype = type
import cherrypy
from cherrypy._cpcompat import set
class PageHandler(object):
def __init__(self, callable, *args, **kwargs):
self.callable = callable
self.args = args
self.kwargs = kwargs
def get_args(self):
return cherrypy.serving.request.args
def set_args(self, args):
cherrypy.serving.request.args = args
return cherrypy.serving.request.args
args = property(
get_args,
set_args,
doc="The ordered args should be accessible from post dispatch hooks"
)
def get_kwargs(self):
return cherrypy.serving.request.kwargs
def set_kwargs(self, kwargs):
cherrypy.serving.request.kwargs = kwargs
return cherrypy.serving.request.kwargs
kwargs = property(
get_kwargs,
set_kwargs,
doc="The named kwargs should be accessible from post dispatch hooks"
)
def __call__(self):
try:
return self.callable(*self.args, **self.kwargs)
except TypeError:
x = sys.exc_info()[1]
try:
test_callable_spec(self.callable, self.args, self.kwargs)
except cherrypy.HTTPError:
raise sys.exc_info()[1]
except:
raise x
raise
def test_callable_spec(callable, callable_args, callable_kwargs):
show_mismatched_params = getattr(
cherrypy.serving.request, 'show_mismatched_params', False)
try:
(args, varargs, varkw, defaults) = getargspec(callable)
except TypeError:
if isinstance(callable, object) and hasattr(callable, '__call__'):
(args, varargs, varkw,
defaults) = getargspec(callable.__call__)
else:
# the original error
raise
if args and args[0] == 'self':
args = args[1:]
arg_usage = dict([(arg, 0,) for arg in args])
vararg_usage = 0
varkw_usage = 0
extra_kwargs = set()
for i, value in enumerate(callable_args):
try:
arg_usage[args[i]] += 1
except IndexError:
vararg_usage += 1
for key in callable_kwargs.keys():
try:
arg_usage[key] += 1
except KeyError:
varkw_usage += 1
extra_kwargs.add(key)
# figure out which args have defaults.
args_with_defaults = args[-len(defaults or []):]
for i, val in enumerate(defaults or []):
# Defaults take effect only when the arg hasn't been used yet.
if arg_usage[args_with_defaults[i]] == 0:
arg_usage[args_with_defaults[i]] += 1
missing_args = []
multiple_args = []
for key, usage in arg_usage.items():
if usage == 0:
missing_args.append(key)
elif usage > 1:
multiple_args.append(key)
if missing_args:
# so I'm raising a 404 because that covers 2/3 of the
message = None
if show_mismatched_params:
message = "Missing parameters: %s" % ",".join(missing_args)
raise cherrypy.HTTPError(404, message=message)
# the extra positional arguments come from the path - 404 Not Found
if not varargs and vararg_usage > 0:
raise cherrypy.HTTPError(404)
body_params = cherrypy.serving.request.body.params or {}
body_params = set(body_params.keys())
qs_params = set(callable_kwargs.keys()) - body_params
if multiple_args:
if qs_params.intersection(set(multiple_args)):
# If any of the multiple parameters came from the query string then
# it's a 404 Not Found
error = 404
else:
error = 400
message = None
if show_mismatched_params:
message = "Multiple values for parameters: "\
"%s" % ",".join(multiple_args)
raise cherrypy.HTTPError(error, message=message)
if not varkw and varkw_usage > 0:
# If there were extra query string parameters, it's a 404 Not Found
extra_qs_params = set(qs_params).intersection(extra_kwargs)
if extra_qs_params:
message = None
if show_mismatched_params:
message = "Unexpected query string "\
"parameters: %s" % ", ".join(extra_qs_params)
raise cherrypy.HTTPError(404, message=message)
extra_body_params = set(body_params).intersection(extra_kwargs)
if extra_body_params:
message = None
if show_mismatched_params:
message = "Unexpected body parameters: "\
"%s" % ", ".join(extra_body_params)
raise cherrypy.HTTPError(400, message=message)
try:
import inspect
except ImportError:
test_callable_spec = lambda callable, args, kwargs: None
else:
getargspec = inspect.getargspec
# Python 3 requires using getfullargspec if keyword-only arguments are present
if hasattr(inspect, 'getfullargspec'):
def getargspec(callable):
return inspect.getfullargspec(callable)[:4]
class LateParamPageHandler(PageHandler):
def _get_kwargs(self):
kwargs = cherrypy.serving.request.params.copy()
if self._kwargs:
kwargs.update(self._kwargs)
return kwargs
def _set_kwargs(self, kwargs):
cherrypy.serving.request.kwargs = kwargs
self._kwargs = kwargs
kwargs = property(_get_kwargs, _set_kwargs,
doc='page handler kwargs (with '
'cherrypy.request.params copied in)')
if sys.version_info < (3, 0):
punctuation_to_underscores = string.maketrans(
string.punctuation, '_' * len(string.punctuation))
def validate_translator(t):
if not isinstance(t, str) or len(t) != 256:
raise ValueError(
"The translate argument must be a str of len 256.")
else:
punctuation_to_underscores = str.maketrans(
string.punctuation, '_' * len(string.punctuation))
def validate_translator(t):
if not isinstance(t, dict):
raise ValueError("The translate argument must be a dict.")
class Dispatcher(object):
dispatch_method_name = '_cp_dispatch'
def __init__(self, dispatch_method_name=None,
translate=punctuation_to_underscores):
validate_translator(translate)
self.translate = translate
if dispatch_method_name:
self.dispatch_method_name = dispatch_method_name
def __call__(self, path_info):
request = cherrypy.serving.request
func, vpath = self.find_handler(path_info)
if func:
# Decode any leftover %2F in the virtual_path atoms.
vpath = [x.replace("%2F", "/") for x in vpath]
request.handler = LateParamPageHandler(func, *vpath)
else:
request.handler = cherrypy.NotFound()
def find_handler(self, path):
request = cherrypy.serving.request
app = request.app
root = app.root
dispatch_name = self.dispatch_method_name
# Get config for the root object/path.
fullpath = [x for x in path.strip('/').split('/') if x] + ['index']
fullpath_len = len(fullpath)
segleft = fullpath_len
nodeconf = {}
if hasattr(root, "_cp_config"):
nodeconf.update(root._cp_config)
if "/" in app.config:
nodeconf.update(app.config["/"])
object_trail = [['root', root, nodeconf, segleft]]
node = root
iternames = fullpath[:]
while iternames:
name = iternames[0]
# map to legal Python identifiers (e.g. replace '.' with '_')
objname = name.translate(self.translate)
nodeconf = {}
subnode = getattr(node, objname, None)
pre_len = len(iternames)
if subnode is None:
dispatch = getattr(node, dispatch_name, None)
if dispatch and hasattr(dispatch, '__call__') and not \
getattr(dispatch, 'exposed', False) and \
pre_len > 1:
# Don't expose the hidden 'index' token to _cp_dispatch
index_name = iternames.pop()
subnode = dispatch(vpath=iternames)
iternames.append(index_name)
else:
# is a default() handler.
iternames.pop(0)
else:
# We found the path, remove the vpath entry
iternames.pop(0)
segleft = len(iternames)
if segleft > pre_len:
# No path segment was removed. Raise an error.
raise cherrypy.CherryPyException(
"A vpath segment was added. Custom dispatchers may only "
+ "remove elements. While trying to process "
+ "{0} in {1}".format(name, fullpath)
)
elif segleft == pre_len:
# Assume that the handler used the current path segment, but
# did not pop it. This allows things like
# return getattr(self, vpath[0], None)
iternames.pop(0)
segleft -= 1
node = subnode
if node is not None:
# Get _cp_config attached to this node.
if hasattr(node, "_cp_config"):
nodeconf.update(node._cp_config)
# Mix in values from app.config for this path.
existing_len = fullpath_len - pre_len
if existing_len != 0:
curpath = '/' + '/'.join(fullpath[0:existing_len])
else:
curpath = ''
new_segs = fullpath[fullpath_len - pre_len:fullpath_len - segleft]
for seg in new_segs:
curpath += '/' + seg
if curpath in app.config:
nodeconf.update(app.config[curpath])
object_trail.append([name, node, nodeconf, segleft])
def set_conf():
base = cherrypy.config.copy()
# Note that we merge the config from each node
# even if that node was None.
for name, obj, conf, segleft in object_trail:
base.update(conf)
if 'tools.staticdir.dir' in conf:
base['tools.staticdir.section'] = '/' + \
'/'.join(fullpath[0:fullpath_len - segleft])
return base
# Try successive objects (reverse order)
num_candidates = len(object_trail) - 1
for i in range(num_candidates, -1, -1):
name, candidate, nodeconf, segleft = object_trail[i]
if candidate is None:
continue
# Try a "default" method on the current leaf.
if hasattr(candidate, "default"):
defhandler = candidate.default
if getattr(defhandler, 'exposed', False):
# Insert any extra _cp_config from the default handler.
conf = getattr(defhandler, "_cp_config", {})
object_trail.insert(
i + 1, ["default", defhandler, conf, segleft])
request.config = set_conf()
# See https://bitbucket.org/cherrypy/cherrypy/issue/613
request.is_index = path.endswith("/")
return defhandler, fullpath[fullpath_len - segleft:-1]
# Uncomment the next line to restrict positional params to
# "default".
# if i < num_candidates - 2: continue
# Try the current leaf.
if getattr(candidate, 'exposed', False):
request.config = set_conf()
if i == num_candidates:
# We found the extra ".index". Mark request so tools
# can redirect if path_info has no trailing slash.
request.is_index = True
else:
# We're not at an 'index' handler. Mark request so tools
request.is_index = False
return candidate, fullpath[fullpath_len - segleft:-1]
request.config = set_conf()
return None, []
class MethodDispatcher(Dispatcher):
def __call__(self, path_info):
request = cherrypy.serving.request
resource, vpath = self.find_handler(path_info)
if resource:
# Set Allow header
avail = [m for m in dir(resource) if m.isupper()]
if "GET" in avail and "HEAD" not in avail:
avail.append("HEAD")
avail.sort()
cherrypy.serving.response.headers['Allow'] = ", ".join(avail)
# Find the subhandler
meth = request.method.upper()
func = getattr(resource, meth, None)
if func is None and meth == "HEAD":
func = getattr(resource, "GET", None)
if func:
# Grab any _cp_config on the subhandler.
if hasattr(func, "_cp_config"):
request.config.update(func._cp_config)
# Decode any leftover %2F in the virtual_path atoms.
vpath = [x.replace("%2F", "/") for x in vpath]
request.handler = LateParamPageHandler(func, *vpath)
else:
request.handler = cherrypy.HTTPError(405)
else:
request.handler = cherrypy.NotFound()
class RoutesDispatcher(object):
def __init__(self, full_result=False, **mapper_options):
import routes
self.full_result = full_result
self.controllers = {}
self.mapper = routes.Mapper(**mapper_options)
self.mapper.controller_scan = self.controllers.keys
def connect(self, name, route, controller, **kwargs):
self.controllers[name] = controller
self.mapper.connect(name, route, controller=name, **kwargs)
def redirect(self, url):
raise cherrypy.HTTPRedirect(url)
def __call__(self, path_info):
func = self.find_handler(path_info)
if func:
cherrypy.serving.request.handler = LateParamPageHandler(func)
else:
cherrypy.serving.request.handler = cherrypy.NotFound()
def find_handler(self, path_info):
import routes
request = cherrypy.serving.request
config = routes.request_config()
config.mapper = self.mapper
if hasattr(request, 'wsgi_environ'):
config.environ = request.wsgi_environ
config.host = request.headers.get('Host', None)
config.protocol = request.scheme
config.redirect = self.redirect
result = self.mapper.match(path_info)
config.mapper_dict = result
params = {}
if result:
params = result.copy()
if not self.full_result:
params.pop('controller', None)
params.pop('action', None)
request.params.update(params)
# Get config for the root object/path.
request.config = base = cherrypy.config.copy()
curpath = ""
def merge(nodeconf):
if 'tools.staticdir.dir' in nodeconf:
nodeconf['tools.staticdir.section'] = curpath or "/"
base.update(nodeconf)
app = request.app
root = app.root
if hasattr(root, "_cp_config"):
merge(root._cp_config)
if "/" in app.config:
merge(app.config["/"])
# Mix in values from app.config.
atoms = [x for x in path_info.split("/") if x]
if atoms:
last = atoms.pop()
else:
last = None
for atom in atoms:
curpath = "/".join((curpath, atom))
if curpath in app.config:
merge(app.config[curpath])
handler = None
if result:
controller = result.get('controller')
controller = self.controllers.get(controller, controller)
if controller:
if isinstance(controller, classtype):
controller = controller()
# Get config from the controller.
if hasattr(controller, "_cp_config"):
merge(controller._cp_config)
action = result.get('action')
if action is not None:
handler = getattr(controller, action, None)
# Get config from the handler
if hasattr(handler, "_cp_config"):
merge(handler._cp_config)
else:
handler = controller
# Do the last path atom here so it can
# override the controller's _cp_config.
if last:
curpath = "/".join((curpath, last))
if curpath in app.config:
merge(app.config[curpath])
return handler
def XMLRPCDispatcher(next_dispatcher=Dispatcher()):
from cherrypy.lib import xmlrpcutil
def xmlrpc_dispatch(path_info):
path_info = xmlrpcutil.patched_path(path_info)
return next_dispatcher(path_info)
return xmlrpc_dispatch
def VirtualHost(next_dispatcher=Dispatcher(), use_x_forwarded_host=True,
**domains):
from cherrypy.lib import httputil
def vhost_dispatch(path_info):
request = cherrypy.serving.request
header = request.headers.get
domain = header('Host', '')
if use_x_forwarded_host:
domain = header("X-Forwarded-Host", domain)
prefix = domains.get(domain, "")
if prefix:
path_info = httputil.urljoin(prefix, path_info)
result = next_dispatcher(path_info)
section = request.config.get('tools.staticdir.section')
if section:
section = section[len(prefix):]
request.config['tools.staticdir.section'] = section
return result
return vhost_dispatch
| true | true |
1c2d7f1488771765549ad48122870b4d39690ad8 | 8,999 | py | Python | talos/tools/project.py | royl88/talos | 7a803f3e9a3dcf1d16ed6e9cd798a7a401de8cb8 | [
"Apache-2.0"
] | 1 | 2021-05-08T09:00:35.000Z | 2021-05-08T09:00:35.000Z | talos/tools/project.py | royl88/talos | 7a803f3e9a3dcf1d16ed6e9cd798a7a401de8cb8 | [
"Apache-2.0"
] | null | null | null | talos/tools/project.py | royl88/talos | 7a803f3e9a3dcf1d16ed6e9cd798a7a401de8cb8 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
import os
import os.path
import platform
import re
import shutil
import sys
from mako.template import Template
import six
if six.PY2:
reload(sys)
if platform.system() == 'Linux':
sys.setdefaultencoding('UTF-8')
else:
sys.setdefaultencoding('GBK')
else:
raw_input = input
def mkdir(dir_path):
try:
os.makedirs(dir_path)
except:
pass
PYTHON_CODING = '# coding=utf-8'
DEFAULT_VAR_RULE = r'^[a-zA-Z][_a-zA-Z0-9]*$'
def input_var_with_check(prompt, rule=None, max_try=3):
rule = rule or DEFAULT_VAR_RULE
content = raw_input(prompt)
counter = 1
while not re.match(rule, content):
if counter > max_try:
sys.exit(1)
content = raw_input(prompt)
max_try += 1
return content
def render(source_code, output_file, **kwargs):
with open(output_file, 'wb') as f_target:
kwargs['sys_default_coding'] = '# coding=utf-8'
content = Template(source_code, output_encoding='utf-8').render(**kwargs)
f_target.write(content)
def get_template(name):
mod_name = 'talos.template.' + name
__import__(mod_name)
mod = sys.modules[mod_name]
return mod.TEMPLATE
def initialize_package(dest_path, pkg_name, author, author_email, version):
src_dir = os.path.join(dest_path, pkg_name)
mkdir(src_dir)
render(get_template('tpl_init_py'),
os.path.join(src_dir, '__init__.py'),
author=author, coding=PYTHON_CODING)
render(get_template('tpl_LICENSE'),
os.path.join(dest_path, 'LICENSE'),
author=author)
render(get_template('tpl_MANIFEST_in'),
os.path.join(dest_path, 'MANIFEST.in'))
render(get_template('tpl_README_md'),
os.path.join(dest_path, 'README.md'))
render(get_template('tpl_requirements_txt'),
os.path.join(dest_path, 'requirements.txt'))
render(get_template('tpl_setup_cfg'),
os.path.join(dest_path, 'setup.cfg'),
pkg_name=pkg_name, author=author, author_email=author_email)
render(get_template('tpl_setup_py'),
os.path.join(dest_path, 'setup.py'),
pkg_name=pkg_name, author=author, author_email=author_email, coding=PYTHON_CODING)
render(get_template('tpl_tox_ini'),
os.path.join(dest_path, 'tox.ini'))
render(get_template('tpl_VERSION'),
os.path.join(dest_path, 'VERSION'),
version=version)
def initialize_server(dest_path, pkg_name, config_file, config_dir):
config_file_fixed = config_file.replace('\\', '/')
config_dir_fixed = config_dir.replace('\\', '/')
server_dir = os.path.join(dest_path, pkg_name, 'server')
mkdir(server_dir)
render(get_template('server.tpl_init_py'),
os.path.join(server_dir, '__init__.py'),
coding=PYTHON_CODING)
render(get_template('server.tpl_simple_server_py'),
os.path.join(server_dir, 'simple_server.py'),
pkg_name=pkg_name, coding=PYTHON_CODING)
render(get_template('server.tpl_wsgi_server_py'),
os.path.join(server_dir, 'wsgi_server.py'),
pkg_name=pkg_name, config_file=config_file_fixed, config_dir=config_dir_fixed, coding=PYTHON_CODING)
render(get_template('server.tpl_celery_worker_py'),
os.path.join(server_dir, 'celery_worker.py'),
pkg_name=pkg_name, config_file=config_file_fixed, config_dir=config_dir_fixed, coding=PYTHON_CODING)
def initialize_etc(dest_path, pkg_name, config_file, config_dir, db_connection):
etc_dir = os.path.join(dest_path, 'etc')
mkdir(etc_dir)
locale_dir = os.path.join(dest_path, 'etc', 'locale', 'en', 'LC_MESSAGES')
mkdir(locale_dir)
render(get_template('etc.locale.en.LC_MESSAGES.tpl_project_po'),
os.path.join(locale_dir, pkg_name + '.po'))
render(get_template('etc.tpl_gunicorn_py'),
os.path.join(etc_dir, 'gunicorn.py'),
pkg_name=pkg_name, config_file=config_file, config_dir=config_dir, coding=PYTHON_CODING)
render(get_template('etc.tpl_project_conf'),
os.path.join(etc_dir, pkg_name + '.conf'),
pkg_name=pkg_name, db_connection=db_connection)
def initialize_alembic(dest_path, pkg_name, db_connection):
alembic_dir = os.path.join(dest_path, 'alembic')
mkdir(alembic_dir)
migration_dir = os.path.join(dest_path, 'alembic', 'migration')
mkdir(migration_dir)
render(get_template('alembic.tpl_alembic_ini'),
os.path.join(alembic_dir, 'alembic.ini'),
db_connection=db_connection)
render(get_template('alembic.migration.tpl_env_py'),
os.path.join(migration_dir, 'env.py'),
pkg_name=pkg_name, coding=PYTHON_CODING)
render(get_template('alembic.migration.tpl_README'),
os.path.join(migration_dir, 'README'),
pkg_name=pkg_name, coding=PYTHON_CODING)
render(get_template('alembic.migration.tpl_script_py'),
os.path.join(migration_dir, 'script.py.mako'),
pkg_name=pkg_name, coding=PYTHON_CODING)
def initialize_middlewares(dest_path, pkg_name):
middlewares_dir = os.path.join(dest_path, pkg_name, 'middlewares')
mkdir(middlewares_dir)
render(get_template('middlewares.tpl_init_py'),
os.path.join(middlewares_dir, '__init__.py'),
coding=PYTHON_CODING)
def initialize_database(dest_path, pkg_name):
db_dir = os.path.join(dest_path, pkg_name, 'db')
mkdir(db_dir)
render(get_template('db.tpl_init_py'),
os.path.join(db_dir, '__init__.py'),
coding=PYTHON_CODING)
render(get_template('db.tpl_models_py'),
os.path.join(db_dir, 'models.py'),
coding=PYTHON_CODING)
def initialize_app(dest_path, pkg_name, app_name):
if not os.path.exists(os.path.join(dest_path, '__init__.py')):
render(get_template('tpl_init_py'),
os.path.join(dest_path, '__init__.py'),
coding=PYTHON_CODING)
app_dir = os.path.join(dest_path, app_name)
mkdir(app_dir)
render(get_template('apps.tpl_init_py'),
os.path.join(app_dir, '__init__.py'),
pkg_name=pkg_name, app_name=app_name, coding=PYTHON_CODING)
render(get_template('apps.tpl_app_api_py'),
os.path.join(app_dir, 'api.py'),
pkg_name=pkg_name, app_name=app_name, coding=PYTHON_CODING)
render(get_template('apps.tpl_app_controller_py'),
os.path.join(app_dir, 'controller.py'),
pkg_name=pkg_name, app_name=app_name, coding=PYTHON_CODING)
render(get_template('apps.tpl_route_py'),
os.path.join(app_dir, 'route.py'),
pkg_name=pkg_name, app_name=app_name, coding=PYTHON_CODING)
def create_project(dest_path, name, version, author, author_email, config_dir, db_connection=''):
dest_path = os.path.join(dest_path, name)
mkdir(dest_path)
print(u"### 创建项目目录:%s" % dest_path)
config_file = os.path.join(config_dir, name + '.conf')
config_dir = config_file + '.d'
# 初始化python标准包文件
initialize_package(dest_path, name, author, author_email, version)
print(u"### 创建项目:%s(%s)通用文件 " % (name, version))
# 初始化server目录
initialize_server(dest_path, name, config_file, config_dir)
print(u"### 创建启动服务脚本")
# 初始化etc目录
initialize_etc(dest_path, name, config_file, config_dir, db_connection)
print(u"### 创建启动配置:%s" % config_file)
# 初始化alembic目录
# 初始化DB目录
initialize_database(dest_path, name)
print(u"### 创建数据库支持脚本")
initialize_alembic(dest_path, name, db_connection)
print(u"### 创建数据库迁移脚本")
if not db_connection:
print(u"### 数据库连接串无效, 如果需要数据库版本管理功能支持,您需要手动修改alembic/alembic.ini sqlalchemy.url值")
# 初始化middlewares目录
initialize_middlewares(dest_path, name)
print(u"### 创建中间件目录")
print(u"### 完成")
def create_app(dest_path, pkg_name, name):
dest_path = os.path.join(dest_path, pkg_name, pkg_name, 'apps')
mkdir(dest_path)
print(u"### 创建app目录:%s" % dest_path)
# 初始化app目录
initialize_app(dest_path, pkg_name, name)
print(u"### 创建app脚本:%s" % name)
print(u"### 完成")
def generate():
dest_path = input_var_with_check(u'请输入项目生成目录:', rule='.*')
pkg_name = input_var_with_check(u'请输入项目名称(英):')
while True:
gen_type = raw_input(u'请输入生成类型[project,app,其他内容退出]:')
if gen_type.lower() == 'project':
version = input_var_with_check(u'请输入项目版本:', rule='.*')
author = input_var_with_check(u'请输入项目作者:', rule='.*')
author_email = input_var_with_check(u'请输入项目作者Email:', rule='.*')
config_path = input_var_with_check(u'请输入项目启动配置目录:', rule='.*')
db_conn = input_var_with_check(u'请输入项目DB连接串:', rule='.*')
create_project(dest_path, pkg_name, version, author, author_email, config_path, db_conn)
elif gen_type.lower() == 'app':
app_name = input_var_with_check(u'请输入app名称(英):')
create_app(dest_path, pkg_name, app_name)
else:
sys.exit(0)
| 37.495833 | 111 | 0.670741 |
import os
import os.path
import platform
import re
import shutil
import sys
from mako.template import Template
import six
if six.PY2:
reload(sys)
if platform.system() == 'Linux':
sys.setdefaultencoding('UTF-8')
else:
sys.setdefaultencoding('GBK')
else:
raw_input = input
def mkdir(dir_path):
try:
os.makedirs(dir_path)
except:
pass
PYTHON_CODING = '# coding=utf-8'
DEFAULT_VAR_RULE = r'^[a-zA-Z][_a-zA-Z0-9]*$'
def input_var_with_check(prompt, rule=None, max_try=3):
rule = rule or DEFAULT_VAR_RULE
content = raw_input(prompt)
counter = 1
while not re.match(rule, content):
if counter > max_try:
sys.exit(1)
content = raw_input(prompt)
max_try += 1
return content
def render(source_code, output_file, **kwargs):
with open(output_file, 'wb') as f_target:
kwargs['sys_default_coding'] = '# coding=utf-8'
content = Template(source_code, output_encoding='utf-8').render(**kwargs)
f_target.write(content)
def get_template(name):
mod_name = 'talos.template.' + name
__import__(mod_name)
mod = sys.modules[mod_name]
return mod.TEMPLATE
def initialize_package(dest_path, pkg_name, author, author_email, version):
src_dir = os.path.join(dest_path, pkg_name)
mkdir(src_dir)
render(get_template('tpl_init_py'),
os.path.join(src_dir, '__init__.py'),
author=author, coding=PYTHON_CODING)
render(get_template('tpl_LICENSE'),
os.path.join(dest_path, 'LICENSE'),
author=author)
render(get_template('tpl_MANIFEST_in'),
os.path.join(dest_path, 'MANIFEST.in'))
render(get_template('tpl_README_md'),
os.path.join(dest_path, 'README.md'))
render(get_template('tpl_requirements_txt'),
os.path.join(dest_path, 'requirements.txt'))
render(get_template('tpl_setup_cfg'),
os.path.join(dest_path, 'setup.cfg'),
pkg_name=pkg_name, author=author, author_email=author_email)
render(get_template('tpl_setup_py'),
os.path.join(dest_path, 'setup.py'),
pkg_name=pkg_name, author=author, author_email=author_email, coding=PYTHON_CODING)
render(get_template('tpl_tox_ini'),
os.path.join(dest_path, 'tox.ini'))
render(get_template('tpl_VERSION'),
os.path.join(dest_path, 'VERSION'),
version=version)
def initialize_server(dest_path, pkg_name, config_file, config_dir):
config_file_fixed = config_file.replace('\\', '/')
config_dir_fixed = config_dir.replace('\\', '/')
server_dir = os.path.join(dest_path, pkg_name, 'server')
mkdir(server_dir)
render(get_template('server.tpl_init_py'),
os.path.join(server_dir, '__init__.py'),
coding=PYTHON_CODING)
render(get_template('server.tpl_simple_server_py'),
os.path.join(server_dir, 'simple_server.py'),
pkg_name=pkg_name, coding=PYTHON_CODING)
render(get_template('server.tpl_wsgi_server_py'),
os.path.join(server_dir, 'wsgi_server.py'),
pkg_name=pkg_name, config_file=config_file_fixed, config_dir=config_dir_fixed, coding=PYTHON_CODING)
render(get_template('server.tpl_celery_worker_py'),
os.path.join(server_dir, 'celery_worker.py'),
pkg_name=pkg_name, config_file=config_file_fixed, config_dir=config_dir_fixed, coding=PYTHON_CODING)
def initialize_etc(dest_path, pkg_name, config_file, config_dir, db_connection):
etc_dir = os.path.join(dest_path, 'etc')
mkdir(etc_dir)
locale_dir = os.path.join(dest_path, 'etc', 'locale', 'en', 'LC_MESSAGES')
mkdir(locale_dir)
render(get_template('etc.locale.en.LC_MESSAGES.tpl_project_po'),
os.path.join(locale_dir, pkg_name + '.po'))
render(get_template('etc.tpl_gunicorn_py'),
os.path.join(etc_dir, 'gunicorn.py'),
pkg_name=pkg_name, config_file=config_file, config_dir=config_dir, coding=PYTHON_CODING)
render(get_template('etc.tpl_project_conf'),
os.path.join(etc_dir, pkg_name + '.conf'),
pkg_name=pkg_name, db_connection=db_connection)
def initialize_alembic(dest_path, pkg_name, db_connection):
alembic_dir = os.path.join(dest_path, 'alembic')
mkdir(alembic_dir)
migration_dir = os.path.join(dest_path, 'alembic', 'migration')
mkdir(migration_dir)
render(get_template('alembic.tpl_alembic_ini'),
os.path.join(alembic_dir, 'alembic.ini'),
db_connection=db_connection)
render(get_template('alembic.migration.tpl_env_py'),
os.path.join(migration_dir, 'env.py'),
pkg_name=pkg_name, coding=PYTHON_CODING)
render(get_template('alembic.migration.tpl_README'),
os.path.join(migration_dir, 'README'),
pkg_name=pkg_name, coding=PYTHON_CODING)
render(get_template('alembic.migration.tpl_script_py'),
os.path.join(migration_dir, 'script.py.mako'),
pkg_name=pkg_name, coding=PYTHON_CODING)
def initialize_middlewares(dest_path, pkg_name):
middlewares_dir = os.path.join(dest_path, pkg_name, 'middlewares')
mkdir(middlewares_dir)
render(get_template('middlewares.tpl_init_py'),
os.path.join(middlewares_dir, '__init__.py'),
coding=PYTHON_CODING)
def initialize_database(dest_path, pkg_name):
db_dir = os.path.join(dest_path, pkg_name, 'db')
mkdir(db_dir)
render(get_template('db.tpl_init_py'),
os.path.join(db_dir, '__init__.py'),
coding=PYTHON_CODING)
render(get_template('db.tpl_models_py'),
os.path.join(db_dir, 'models.py'),
coding=PYTHON_CODING)
def initialize_app(dest_path, pkg_name, app_name):
if not os.path.exists(os.path.join(dest_path, '__init__.py')):
render(get_template('tpl_init_py'),
os.path.join(dest_path, '__init__.py'),
coding=PYTHON_CODING)
app_dir = os.path.join(dest_path, app_name)
mkdir(app_dir)
render(get_template('apps.tpl_init_py'),
os.path.join(app_dir, '__init__.py'),
pkg_name=pkg_name, app_name=app_name, coding=PYTHON_CODING)
render(get_template('apps.tpl_app_api_py'),
os.path.join(app_dir, 'api.py'),
pkg_name=pkg_name, app_name=app_name, coding=PYTHON_CODING)
render(get_template('apps.tpl_app_controller_py'),
os.path.join(app_dir, 'controller.py'),
pkg_name=pkg_name, app_name=app_name, coding=PYTHON_CODING)
render(get_template('apps.tpl_route_py'),
os.path.join(app_dir, 'route.py'),
pkg_name=pkg_name, app_name=app_name, coding=PYTHON_CODING)
def create_project(dest_path, name, version, author, author_email, config_dir, db_connection=''):
dest_path = os.path.join(dest_path, name)
mkdir(dest_path)
print(u"### 创建项目目录:%s" % dest_path)
config_file = os.path.join(config_dir, name + '.conf')
config_dir = config_file + '.d'
initialize_package(dest_path, name, author, author_email, version)
print(u"### 创建项目:%s(%s)通用文件 " % (name, version))
initialize_server(dest_path, name, config_file, config_dir)
print(u"### 创建启动服务脚本")
initialize_etc(dest_path, name, config_file, config_dir, db_connection)
print(u"### 创建启动配置:%s" % config_file)
initialize_database(dest_path, name)
print(u"### 创建数据库支持脚本")
initialize_alembic(dest_path, name, db_connection)
print(u"### 创建数据库迁移脚本")
if not db_connection:
print(u"### 数据库连接串无效, 如果需要数据库版本管理功能支持,您需要手动修改alembic/alembic.ini sqlalchemy.url值")
initialize_middlewares(dest_path, name)
print(u"### 创建中间件目录")
print(u"### 完成")
def create_app(dest_path, pkg_name, name):
dest_path = os.path.join(dest_path, pkg_name, pkg_name, 'apps')
mkdir(dest_path)
print(u"### 创建app目录:%s" % dest_path)
initialize_app(dest_path, pkg_name, name)
print(u"### 创建app脚本:%s" % name)
print(u"### 完成")
def generate():
dest_path = input_var_with_check(u'请输入项目生成目录:', rule='.*')
pkg_name = input_var_with_check(u'请输入项目名称(英):')
while True:
gen_type = raw_input(u'请输入生成类型[project,app,其他内容退出]:')
if gen_type.lower() == 'project':
version = input_var_with_check(u'请输入项目版本:', rule='.*')
author = input_var_with_check(u'请输入项目作者:', rule='.*')
author_email = input_var_with_check(u'请输入项目作者Email:', rule='.*')
config_path = input_var_with_check(u'请输入项目启动配置目录:', rule='.*')
db_conn = input_var_with_check(u'请输入项目DB连接串:', rule='.*')
create_project(dest_path, pkg_name, version, author, author_email, config_path, db_conn)
elif gen_type.lower() == 'app':
app_name = input_var_with_check(u'请输入app名称(英):')
create_app(dest_path, pkg_name, app_name)
else:
sys.exit(0)
| true | true |
1c2d7fc265e46bbef8d4b3b094cfce762bc3d29a | 872 | py | Python | setup.py | Evgeneus/solo-learn | 36782b829736d0216511f8d066631610b52e8663 | [
"MIT"
] | null | null | null | setup.py | Evgeneus/solo-learn | 36782b829736d0216511f8d066631610b52e8663 | [
"MIT"
] | null | null | null | setup.py | Evgeneus/solo-learn | 36782b829736d0216511f8d066631610b52e8663 | [
"MIT"
] | null | null | null | from setuptools import find_packages, setup
with open("requirements.txt") as f:
requirements = [p.strip() for p in f.readlines()]
KW = ["artificial intelligence", "deep learning", "unsupervised learning", "contrastive learning"]
setup(
name="solo",
packages=find_packages(exclude=["bash_files"]),
version="0.0.1",
license="MIT",
author="Victor G. Turrisi da Costa, Enrico Fini",
author_email="vturrisi@gmail.com, enrico.fini@gmail.com",
url="https://github.com/vturrisi/solo-learn",
keywords=KW,
install_requires=requirements,
dependency_links=["https://developer.download.nvidia.com/compute/redist"],
classifiers=[
"Programming Language :: Python :: 3.8",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
include_package_data=True,
zip_safe=False,
)
| 32.296296 | 98 | 0.676606 | from setuptools import find_packages, setup
with open("requirements.txt") as f:
requirements = [p.strip() for p in f.readlines()]
KW = ["artificial intelligence", "deep learning", "unsupervised learning", "contrastive learning"]
setup(
name="solo",
packages=find_packages(exclude=["bash_files"]),
version="0.0.1",
license="MIT",
author="Victor G. Turrisi da Costa, Enrico Fini",
author_email="vturrisi@gmail.com, enrico.fini@gmail.com",
url="https://github.com/vturrisi/solo-learn",
keywords=KW,
install_requires=requirements,
dependency_links=["https://developer.download.nvidia.com/compute/redist"],
classifiers=[
"Programming Language :: Python :: 3.8",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
include_package_data=True,
zip_safe=False,
)
| true | true |
1c2d8143e4ac5173be0ac5a9ed07de0c0e298b80 | 1,700 | py | Python | experiments/sampleE/complete_inference.py | neptunes5thmoon/simpleference | 96c74187bd3d83f6f1e910e72e56f45d6cc8d5d9 | [
"MIT"
] | 2 | 2019-10-13T12:10:00.000Z | 2021-10-09T11:49:43.000Z | experiments/sampleE/complete_inference.py | neptunes5thmoon/simpleference | 96c74187bd3d83f6f1e910e72e56f45d6cc8d5d9 | [
"MIT"
] | null | null | null | experiments/sampleE/complete_inference.py | neptunes5thmoon/simpleference | 96c74187bd3d83f6f1e910e72e56f45d6cc8d5d9 | [
"MIT"
] | 1 | 2018-05-08T14:10:59.000Z | 2018-05-08T14:10:59.000Z | from __future__ import print_function
import sys
import os
from concurrent.futures import ProcessPoolExecutor
from subprocess import call
from simpleference.inference.util import get_offset_lists
sys.path.append('/groups/saalfeld/home/papec/Work/my_projects/z5/bld/python')
import z5py
def single_inference(gpu, iteration, gpu_offset):
print(gpu, iteration, gpu_offset)
call(['./run_inference.sh', str(gpu), str(iteration), str(gpu_offset)])
return True
def complete_inference(gpu_list, iteration, gpu_offset):
out_shape = (56,) *3
raw_path = '/nrs/saalfeld/sample_E/sample_E.n5/volumes/raw/s0'
g = z5py.File(raw_path)
shape = g['.'].shape[::-1]
# open the datasets
#f = z5py.File('/groups/saalfeld/saalfeldlab/sampleE/my_prediction.n5', use_zarr_format=False)
#f.create_dataset('affs_xy', shape=shape,
# compression='gzip',
# dtype='float32',
# chunks=out_shape)
#f.create_dataset('affs_z', shape=shape,
# compression='gzip',
# dtype='float32',
# chunks=out_shape)
# run multiprocessed inference
with ProcessPoolExecutor(max_workers=len(gpu_list)) as pp:
tasks = [pp.submit(single_inference, gpu, iteration, gpu_offset) for gpu in gpu_list]
result = [t.result() for t in tasks]
if all(result):
print("All gpu's finished inference properly.")
else:
print("WARNING: at least one process didn't finish properly.")
if __name__ == '__main__':
gpu_list = range(8)
iteration = 100000
gpu_offset = int(sys.argv[1])
complete_inference(gpu_list, iteration, gpu_offset)
| 32.075472 | 98 | 0.663529 | from __future__ import print_function
import sys
import os
from concurrent.futures import ProcessPoolExecutor
from subprocess import call
from simpleference.inference.util import get_offset_lists
sys.path.append('/groups/saalfeld/home/papec/Work/my_projects/z5/bld/python')
import z5py
def single_inference(gpu, iteration, gpu_offset):
print(gpu, iteration, gpu_offset)
call(['./run_inference.sh', str(gpu), str(iteration), str(gpu_offset)])
return True
def complete_inference(gpu_list, iteration, gpu_offset):
out_shape = (56,) *3
raw_path = '/nrs/saalfeld/sample_E/sample_E.n5/volumes/raw/s0'
g = z5py.File(raw_path)
shape = g['.'].shape[::-1]
with ProcessPoolExecutor(max_workers=len(gpu_list)) as pp:
tasks = [pp.submit(single_inference, gpu, iteration, gpu_offset) for gpu in gpu_list]
result = [t.result() for t in tasks]
if all(result):
print("All gpu's finished inference properly.")
else:
print("WARNING: at least one process didn't finish properly.")
if __name__ == '__main__':
gpu_list = range(8)
iteration = 100000
gpu_offset = int(sys.argv[1])
complete_inference(gpu_list, iteration, gpu_offset)
| true | true |
1c2d81675eee247acf8c0c5871d81e85a66bcfe2 | 31,548 | py | Python | pyblp/configurations/integration.py | rebekahanne/pyblp | fe41991c5ec457bb5cfc1bf4d5788d18660a3de2 | [
"MIT"
] | 1 | 2021-01-25T01:25:45.000Z | 2021-01-25T01:25:45.000Z | pyblp/configurations/integration.py | rebekahanne/pyblp | fe41991c5ec457bb5cfc1bf4d5788d18660a3de2 | [
"MIT"
] | null | null | null | pyblp/configurations/integration.py | rebekahanne/pyblp | fe41991c5ec457bb5cfc1bf4d5788d18660a3de2 | [
"MIT"
] | null | null | null | """Construction of nodes and weights for integration."""
import functools
import itertools
from typing import Iterable, List, Optional, Tuple
import numpy as np
import scipy.special
import scipy.stats
from ..utilities.basics import Array, Options, StringRepresentation, format_options
class Integration(StringRepresentation):
r"""Configuration for building integration nodes and weights.
Parameters
----------
specification : `str`
How to build nodes and weights. One of the following:
- ``'monte_carlo'`` - Draw from a pseudo-random standard multivariate normal distribution. Integration
weights are ``1 / size``. The ``seed`` field of ``options`` can be used to seed the random number
generator.
- ``'halton'`` - Generate nodes according to the Halton. Different primes (2, 3, 5, etc.) are used for
different dimensions. Integration weights are ``1 / size``. By default, the first ``100`` values in each
dimension are discarded to eliminate correlation between dimensions. The ``discard`` field of ``options``
can be used to increase this number.
- ``'lhs'`` - Generate nodes according to Latin Hypercube Sampling (LHS). Integration weights are
``1 / size``. The ``seed`` field of ``options`` can be used to seed the random number generator.
- ``'mlhs'`` - Generate nodes according to Modified Latin Hypercube Sampling (MLHS) described by
:ref:`references:Hess, Train, and Polak (2004)`. Integration weights are ``1 / size``. The ``seed`` field
of ``options`` can be used to seed the random number generator.
- ``'product'`` - Generate nodes and weights according to the level-``size`` Gauss-Hermite product rule.
- ``'nested_product'`` - Generate nodes and weights according to the level-``size`` nested Gauss-Hermite
product rule. Weights can be negative.
- ``'grid'`` - Generate a sparse grid of nodes and weights according to the level-``size`` Gauss-Hermite
quadrature rule. Weights can be negative.
- ``'nested_grid'`` - Generate a sparse grid of nodes and weights according to the level ``size`` nested
Gauss-Hermite quadrature rule. Weights can be negative.
Best practice for low dimensions is probably to use ``'product'`` to a relatively high degree of polynomial
accuracy. In higher dimensions, ``'grid'`` appears to scale the best. For more information, see
:ref:`references:Judd and Skrainka (2011)` and :ref:`references:Conlon and Gortmaker (2019)`.
Sparse grids are constructed in analogously to the Matlab function `nwspgr <http://www.sparse-grids.de/>`_
created by Florian Heiss and Viktor Winschel. For more information, see
:ref:`references:Heiss and Winschel (2008)`.
size : `int`
The number of draws if ``specification`` is ``'monte_carlo'``, ``'lhs'``, or ``'mlhs'``, and the level of the
quadrature rule otherwise.
specification_options : `dict, optional`
Options for the integration specification. The ``'monte_carlo'``, ``'lhs'``, and ``'mlhs'`` specifications
support the following option:
- **seed** : (`int`) - Passed to :class:`numpy.random.mtrand.RandomState` to seed the random number
generator before building integration nodes. By default, a seed is not passed to the random number
generator.
The ``'halton'`` specification supports the following option:
- **discard** : (`int`) - How many values at the beginning of each dimension's Halton sequence to discard.
Discarding values at the start of each dimension's sequence is the simplest way to eliminate correlation
between dimensions. By default, the first ``100`` values in each dimension are discarded.
Examples
--------
.. raw:: latex
\begin{examplenotebook}
.. toctree::
/_notebooks/api/integration.ipynb
.. raw:: latex
\end{examplenotebook}
"""
_size: int
_seed: Optional[int]
_description: str
_builder: functools.partial
_specification_options: Options
def __init__(self, specification: str, size: int, specification_options: Optional[Options] = None) -> None:
"""Validate the specification and identify the builder."""
specifications = {
'monte_carlo': (functools.partial(monte_carlo), "with Monte Carlo simulation"),
'halton': (functools.partial(halton), "with Halton sequences"),
'lhs': (functools.partial(lhs), "with Latin Hypercube Sampling (LHS)"),
'mlhs': (functools.partial(lhs, modified=True), "with Modified Latin Hypercube Sampling (MLHS)"),
'product': (functools.partial(product_rule), f"according to the level-{size} Gauss-Hermite product rule"),
'grid': (
functools.partial(sparse_grid),
f"in a sparse grid according to the level-{size} Gauss-Hermite rule"
),
'nested_product': (
functools.partial(product_rule, nested=True),
f"according to the level-{size} nested Gauss-Hermite product rule"
),
'nested_grid': (
functools.partial(sparse_grid, nested=True),
f"in a sparse grid according to the level-{size} nested Gauss-Hermite rule"
)
}
# validate the configuration
if specification not in specifications:
raise ValueError(f"specification must be one of {list(specifications.keys())}.")
if not isinstance(size, int) or size < 1:
raise ValueError("size must be a positive integer.")
if specification_options is not None and not isinstance(specification_options, dict):
raise ValueError("specification_options must be None or a dict.")
# initialize class attributes
self._size = size
self._specification = specification
self._builder, self._description = specifications[specification]
# set default options
self._specification_options: Options = {}
if specification == 'halton':
self._specification_options['discard'] = 100
# update and validate options
self._specification_options.update(specification_options or {})
if specification in {'monte_carlo', 'lhs', 'mlhs'}:
if not isinstance(self._specification_options.get('seed', 0), int):
raise ValueError("The specification option seed must be an integer.")
elif specification == 'halton':
discard = self._specification_options['discard']
if not isinstance(discard, int) or discard < 0:
raise ValueError("The specification option discard must be a nonnegative integer.")
def __str__(self) -> str:
"""Format the configuration as a string."""
return (
f"Configured to construct nodes and weights {self._description} with options "
f"{format_options(self._specification_options)}."
)
def _build_many(self, dimensions: int, ids: Iterable) -> Tuple[Array, Array, Array]:
"""Build concatenated IDs, nodes, and weights for each ID."""
builder = self._builder
if self._specification in {'monte_carlo', 'lhs', 'mlhs'}:
builder = functools.partial(builder, state=np.random.RandomState(self._specification_options.get('seed')))
count = 0
ids_list: List[Array] = []
nodes_list: List[Array] = []
weights_list: List[Array] = []
for i in ids:
if self._specification == 'halton':
nodes, weights = builder(dimensions, self._size, start=self._specification_options['discard'] + count)
else:
nodes, weights = builder(dimensions, self._size)
ids_list.append(np.repeat(i, weights.size))
nodes_list.append(nodes)
weights_list.append(weights)
count += weights.size
return np.concatenate(ids_list), np.concatenate(nodes_list), np.concatenate(weights_list)
def _build(self, dimensions: int) -> Tuple[Array, Array]:
"""Build nodes and weights."""
builder = self._builder
if self._specification in {'monte_carlo', 'lhs', 'mlhs'}:
builder = functools.partial(builder, state=np.random.RandomState(self._specification_options.get('seed')))
if self._specification == 'halton':
return builder(dimensions, self._size, start=self._specification_options['discard'])
return builder(dimensions, self._size)
def monte_carlo(dimensions: int, size: int, state: np.random.RandomState) -> Tuple[Array, Array]:
"""Draw from a pseudo-random standard multivariate normal distribution."""
nodes = state.normal(size=(size, dimensions))
weights = np.repeat(1 / size, size)
return nodes, weights
def halton(dimensions: int, size: int, start: int) -> Tuple[Array, Array]:
"""Generate nodes and weights for integration according to the Halton sequence."""
# generate Halton sequences
sequences = np.zeros((size, dimensions))
for dimension in range(dimensions):
base = get_prime(dimension)
for index in range(size):
value = 0.0
denominator = 1.0
quotient = start + index
while quotient > 0:
quotient, remainder = divmod(quotient, base)
denominator *= base
value += remainder / denominator
sequences[index, dimension] = value
# transform the sequences and construct weights
nodes = scipy.stats.norm().ppf(sequences)
weights = np.repeat(1 / size, size)
return nodes, weights
def lhs(dimensions: int, size: int, state: np.random.RandomState, modified: bool = False) -> Tuple[Array, Array]:
"""Use Latin Hypercube Sampling to generate nodes and weights for integration."""
# generate the samples
samples = np.zeros((size, dimensions))
for dimension in range(dimensions):
samples[:, dimension] = state.permutation(np.arange(size) + state.uniform(size=1 if modified else size)) / size
# transform the samples and construct weights
nodes = scipy.stats.norm().ppf(samples)
weights = np.repeat(1 / size, size)
return nodes, weights
@functools.lru_cache()
def product_rule(dimensions: int, level: int, nested: bool = False) -> Tuple[Array, Array]:
"""Generate nodes and weights for integration according to the Gauss-Hermite product rule or its nested analog."""
base_nodes, base_weights = quadrature_rule(level, nested)
nodes = np.array(list(itertools.product(base_nodes, repeat=dimensions)))
weights = functools.reduce(np.kron, itertools.repeat(base_weights, dimensions))
return nodes, weights
@functools.lru_cache()
def sparse_grid(dimensions: int, level: int, nested: bool = False) -> Tuple[Array, Array]:
"""Generate a sparse grid of nodes and weights according to the univariate Gauss-Hermite quadrature rule or its
nested analog.
"""
# construct nodes and weights
nodes_list: List[Array] = []
weights_list: List[Array] = []
for q in range(max(0, level - dimensions), level):
# compute the combinatorial coefficient applied to the component product rules
coefficient = (-1)**(level - q - 1) * scipy.special.binom(dimensions - 1, dimensions + q - level)
# compute product rules for each level in all dimensions-length sequences that sum to dimensions + q
for base_levels in same_size_sequences(dimensions, dimensions + q):
base_nodes_list, base_weights_list = zip(*(quadrature_rule(l, nested) for l in base_levels))
nodes_list.append(np.array(list(itertools.product(*base_nodes_list))))
weights_list.append(coefficient * functools.reduce(np.kron, base_weights_list))
# combine the lists of nodes and weights into arrays
nodes = np.concatenate(nodes_list)
weights = np.concatenate(weights_list)
# sort nodes and weights by the first column of nodes, then by the second column, and so on
sorted_indices = np.lexsort(nodes[:, ::-1].T)
nodes = nodes[sorted_indices]
weights = weights[sorted_indices]
# merge weights for repeated rows, keeping only one set of nodes
last = 0
keep = [last]
for row in range(1, weights.size):
if np.array_equal(nodes[row], nodes[row - 1]):
weights[last] += weights[row]
continue
last = row
keep.append(row)
nodes = nodes[keep]
weights = weights[keep]
# normalize the weights
weights /= weights.sum()
return nodes, weights
def same_size_sequences(size: int, summation: int) -> Array:
"""Compute all sequences of positive integers with a fixed size that sum to a fixed number. The algorithm was
written to allow for with vectors that can take on zero, so we subtract the fixed size from the fixed summation at
the beginning and then increment the sequences by one at the end.
"""
summation -= size
sequence = np.zeros(size, np.int64)
sequence[0] = summation
sequences = [sequence.copy()]
forward = 0
while sequence[-1] < summation:
if forward == size - 1:
for backward in reversed(range(forward)):
forward = backward
if sequence[backward] != 0:
break
sequence[forward] -= 1
forward += 1
sequence[forward] = summation - sequence[:forward].sum()
if forward < size - 1:
sequence[forward + 1:] = 0
sequences.append(sequence.copy())
return np.vstack(sequences) + 1
def quadrature_rule(level: int, nested: bool) -> Tuple[Array, Array]:
"""Compute nodes and weights for the univariate Gauss-Hermite quadrature rule or its nested analog."""
if not nested:
raw_nodes, raw_weights = np.polynomial.hermite.hermgauss(level)
return raw_nodes * np.sqrt(2), raw_weights / np.sqrt(np.pi)
node_data, weight_data = get_nested_data(level)
return np.r_[-node_data[::-1], 0, node_data], np.r_[weight_data[::-1], weight_data[1:]]
def get_prime(dimension: int) -> int:
"""Return the prime number corresponding to a dimension when constructing a Halton sequence."""
primes = [
2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97, 101, 103, 107,
109, 113, 127, 131, 137, 139, 149, 151, 157, 163, 167, 173, 179, 181, 191, 193, 197, 199, 211, 223, 227, 229,
233, 239, 241, 251, 257, 263, 269, 271, 277, 281, 283, 293, 307, 311, 313, 317, 331, 337, 347, 349, 353, 359,
367, 373, 379, 383, 389, 397, 401, 409, 419, 421, 431, 433, 439, 443, 449, 457, 461, 463, 467, 479, 487, 491,
499, 503, 509, 521, 523, 541, 547, 557, 563, 569, 571, 577, 587, 593, 599, 601, 607, 613, 617, 619, 631, 641,
643, 647, 653, 659, 661, 673, 677, 683, 691, 701, 709, 719, 727, 733, 739, 743, 751, 757, 761, 769, 773, 787,
797, 809, 811, 821, 823, 827, 829, 839, 853, 857, 859, 863, 877, 881, 883, 887, 907, 911, 919, 929, 937, 941,
947, 953, 967, 971, 977, 983, 991, 997
]
try:
return primes[dimension]
except IndexError:
raise ValueError(f"Halton sequences are only available for {len(primes)} dimensions here.")
def get_nested_data(level: int) -> Tuple[Array, Array]:
"""Return node and weight data used to construct the nested Gauss-Hermite rule."""
node_data_list = [
[],
[1.7320508075688772e+00],
[1.7320508075688772e+00],
[7.4109534999454085e-01, 1.7320508075688772e+00, 4.1849560176727323e+00],
[7.4109534999454085e-01, 1.7320508075688772e+00, 2.8612795760570582e+00, 4.1849560176727323e+00],
[7.4109534999454085e-01, 1.7320508075688772e+00, 2.8612795760570582e+00, 4.1849560176727323e+00],
[7.4109534999454085e-01, 1.7320508075688772e+00, 2.8612795760570582e+00, 4.1849560176727323e+00],
[7.4109534999454085e-01, 1.7320508075688772e+00, 2.8612795760570582e+00, 4.1849560176727323e+00],
[
7.4109534999454085e-01, 1.2304236340273060e+00, 1.7320508075688772e+00, 2.5960831150492023e+00,
2.8612795760570582e+00, 4.1849560176727323e+00, 5.1870160399136562e+00, 6.3633944943363696e+00
],
[
7.4109534999454085e-01, 1.2304236340273060e+00, 1.7320508075688772e+00, 2.5960831150492023e+00,
2.8612795760570582e+00, 3.2053337944991944e+00, 4.1849560176727323e+00, 5.1870160399136562e+00,
6.3633944943363696e+00
],
[
7.4109534999454085e-01, 1.2304236340273060e+00, 1.7320508075688772e+00, 2.5960831150492023e+00,
2.8612795760570582e+00, 3.2053337944991944e+00, 4.1849560176727323e+00, 5.1870160399136562e+00,
6.3633944943363696e+00
],
[
7.4109534999454085e-01, 1.2304236340273060e+00, 1.7320508075688772e+00, 2.5960831150492023e+00,
2.8612795760570582e+00, 3.2053337944991944e+00, 4.1849560176727323e+00, 5.1870160399136562e+00,
6.3633944943363696e+00
],
[
7.4109534999454085e-01, 1.2304236340273060e+00, 1.7320508075688772e+00, 2.5960831150492023e+00,
2.8612795760570582e+00, 3.2053337944991944e+00, 4.1849560176727323e+00, 5.1870160399136562e+00,
6.3633944943363696e+00
],
[
7.4109534999454085e-01, 1.2304236340273060e+00, 1.7320508075688772e+00, 2.5960831150492023e+00,
2.8612795760570582e+00, 3.2053337944991944e+00, 4.1849560176727323e+00, 5.1870160399136562e+00,
6.3633944943363696e+00
],
[
7.4109534999454085e-01, 1.2304236340273060e+00, 1.7320508075688772e+00, 2.5960831150492023e+00,
2.8612795760570582e+00, 3.2053337944991944e+00, 4.1849560176727323e+00, 5.1870160399136562e+00,
6.3633944943363696e+00
],
[
2.4899229757996061e-01, 7.4109534999454085e-01, 1.2304236340273060e+00, 1.7320508075688772e+00,
2.2336260616769419e+00, 2.5960831150492023e+00, 2.8612795760570582e+00, 3.2053337944991944e+00,
3.6353185190372783e+00, 4.1849560176727323e+00, 5.1870160399136562e+00, 6.3633944943363696e+00,
7.1221067008046166e+00, 7.9807717985905606e+00, 9.0169397898903032e+00
],
[
2.4899229757996061e-01, 7.4109534999454085e-01, 1.2304236340273060e+00, 1.7320508075688772e+00,
2.2336260616769419e+00, 2.5960831150492023e+00, 2.8612795760570582e+00, 3.2053337944991944e+00,
3.6353185190372783e+00, 4.1849560176727323e+00, 5.1870160399136562e+00, 5.6981777684881099e+00,
6.3633944943363696e+00, 7.1221067008046166e+00, 7.9807717985905606e+00, 9.0169397898903032e+00
],
[
2.4899229757996061e-01, 7.4109534999454085e-01, 1.2304236340273060e+00, 1.7320508075688772e+00,
2.2336260616769419e+00, 2.5960831150492023e+00, 2.8612795760570582e+00, 3.2053337944991944e+00,
3.6353185190372783e+00, 4.1849560176727323e+00, 4.7364330859522967e+00, 5.1870160399136562e+00,
5.6981777684881099e+00, 6.3633944943363696e+00, 7.1221067008046166e+00, 7.9807717985905606e+00,
9.0169397898903032e+00
],
[
2.4899229757996061e-01, 7.4109534999454085e-01, 1.2304236340273060e+00, 1.7320508075688772e+00,
2.2336260616769419e+00, 2.5960831150492023e+00, 2.8612795760570582e+00, 3.2053337944991944e+00,
3.6353185190372783e+00, 4.1849560176727323e+00, 4.7364330859522967e+00, 5.1870160399136562e+00,
5.6981777684881099e+00, 6.3633944943363696e+00, 7.1221067008046166e+00, 7.9807717985905606e+00,
9.0169397898903032e+00
],
[
2.4899229757996061e-01, 7.4109534999454085e-01, 1.2304236340273060e+00, 1.7320508075688772e+00,
2.2336260616769419e+00, 2.5960831150492023e+00, 2.8612795760570582e+00, 3.2053337944991944e+00,
3.6353185190372783e+00, 4.1849560176727323e+00, 4.7364330859522967e+00, 5.1870160399136562e+00,
5.6981777684881099e+00, 6.3633944943363696e+00, 7.1221067008046166e+00, 7.9807717985905606e+00,
9.0169397898903032e+00
],
[
2.4899229757996061e-01, 7.4109534999454085e-01, 1.2304236340273060e+00, 1.7320508075688772e+00,
2.2336260616769419e+00, 2.5960831150492023e+00, 2.8612795760570582e+00, 3.2053337944991944e+00,
3.6353185190372783e+00, 4.1849560176727323e+00, 4.7364330859522967e+00, 5.1870160399136562e+00,
5.6981777684881099e+00, 6.3633944943363696e+00, 7.1221067008046166e+00, 7.9807717985905606e+00,
9.0169397898903032e+00
],
[
2.4899229757996061e-01, 7.4109534999454085e-01, 1.2304236340273060e+00, 1.7320508075688772e+00,
2.2336260616769419e+00, 2.5960831150492023e+00, 2.8612795760570582e+00, 3.2053337944991944e+00,
3.6353185190372783e+00, 4.1849560176727323e+00, 4.7364330859522967e+00, 5.1870160399136562e+00,
5.6981777684881099e+00, 6.3633944943363696e+00, 7.1221067008046166e+00, 7.9807717985905606e+00,
9.0169397898903032e+00
],
[
2.4899229757996061e-01, 7.4109534999454085e-01, 1.2304236340273060e+00, 1.7320508075688772e+00,
2.2336260616769419e+00, 2.5960831150492023e+00, 2.8612795760570582e+00, 3.2053337944991944e+00,
3.6353185190372783e+00, 4.1849560176727323e+00, 4.7364330859522967e+00, 5.1870160399136562e+00,
5.6981777684881099e+00, 6.3633944943363696e+00, 7.1221067008046166e+00, 7.9807717985905606e+00,
9.0169397898903032e+00
],
[
2.4899229757996061e-01, 7.4109534999454085e-01, 1.2304236340273060e+00, 1.7320508075688772e+00,
2.2336260616769419e+00, 2.5960831150492023e+00, 2.8612795760570582e+00, 3.2053337944991944e+00,
3.6353185190372783e+00, 4.1849560176727323e+00, 4.7364330859522967e+00, 5.1870160399136562e+00,
5.6981777684881099e+00, 6.3633944943363696e+00, 7.1221067008046166e+00, 7.9807717985905606e+00,
9.0169397898903032e+00
],
[
2.4899229757996061e-01, 7.4109534999454085e-01, 1.2304236340273060e+00, 1.7320508075688772e+00,
2.2336260616769419e+00, 2.5960831150492023e+00, 2.8612795760570582e+00, 3.2053337944991944e+00,
3.6353185190372783e+00, 4.1849560176727323e+00, 4.7364330859522967e+00, 5.1870160399136562e+00,
5.6981777684881099e+00, 6.3633944943363696e+00, 7.1221067008046166e+00, 7.9807717985905606e+00,
9.0169397898903032e+00
]
]
weight_data_list = [
[+1.0000000000000000e+00],
[+6.6666666666666663e-01, +1.6666666666666666e-01],
[+6.6666666666666674e-01, +1.6666666666666666e-01],
[+4.5874486825749189e-01, +1.3137860698313561e-01, +1.3855327472974924e-01, +6.9568415836913987e-04],
[
+2.5396825396825407e-01, +2.7007432957793776e-01, +9.4850948509485125e-02, +7.9963254708935293e-03,
+9.4269457556517470e-05
],
[
+2.5396825396825429e-01, +2.7007432957793776e-01, +9.4850948509485070e-02, +7.9963254708935293e-03,
+9.4269457556517551e-05
],
[
+2.5396825396825418e-01, +2.7007432957793781e-01, +9.4850948509485014e-02, +7.9963254708935311e-03,
+9.4269457556517592e-05
],
[
+2.5396825396825418e-01, +2.7007432957793781e-01, +9.4850948509485042e-02, +7.9963254708935276e-03,
+9.4269457556517375e-05
],
[
+2.6692223033505302e-01, +2.5456123204171222e-01, +1.4192654826449365e-02, +8.8681002152028010e-02,
+1.9656770938777492e-03, +7.0334802378279075e-03, +1.0563783615416941e-04, -8.2049207541509217e-07,
+2.1136499505424257e-08
],
[
+3.0346719985420623e-01, +2.0832499164960877e-01, +6.1151730125247716e-02, +6.4096054686807610e-02,
+1.8085234254798462e-02, -6.3372247933737571e-03, +2.8848804365067559e-03, +6.0123369459847997e-05,
+6.0948087314689840e-07, +8.6296846022298632e-10
],
[
+3.0346719985420623e-01, +2.0832499164960872e-01, +6.1151730125247709e-02, +6.4096054686807541e-02,
+1.8085234254798459e-02, -6.3372247933737545e-03, +2.8848804365067555e-03, +6.0123369459847922e-05,
+6.0948087314689830e-07, +8.6296846022298839e-10
],
[
+3.0346719985420623e-01, +2.0832499164960872e-01, +6.1151730125247716e-02, +6.4096054686807624e-02,
+1.8085234254798466e-02, -6.3372247933737545e-03, +2.8848804365067559e-03, +6.0123369459847841e-05,
+6.0948087314689830e-07, +8.6296846022298963e-10
],
[
+3.0346719985420600e-01, +2.0832499164960883e-01, +6.1151730125247730e-02, +6.4096054686807638e-02,
+1.8085234254798459e-02, -6.3372247933737580e-03, +2.8848804365067555e-03, +6.0123369459847868e-05,
+6.0948087314689830e-07, +8.6296846022298756e-10
],
[
+3.0346719985420617e-01, +2.0832499164960874e-01, +6.1151730125247702e-02, +6.4096054686807596e-02,
+1.8085234254798459e-02, -6.3372247933737563e-03, +2.8848804365067555e-03, +6.0123369459847936e-05,
+6.0948087314689851e-07, +8.6296846022298322e-10
],
[
+3.0346719985420612e-01, +2.0832499164960874e-01, +6.1151730125247723e-02, +6.4096054686807652e-02,
+1.8085234254798459e-02, -6.3372247933737597e-03, +2.8848804365067563e-03, +6.0123369459848091e-05,
+6.0948087314689851e-07, +8.6296846022298983e-10
],
[
+2.5890005324151566e-01, +2.8128101540033167e-02, +1.9968863511734550e-01, +6.5417392836092561e-02,
+6.1718532565867179e-02, +1.7608475581318002e-03, +1.6592492698936010e-02, -5.5610063068358157e-03,
+2.7298430467334002e-03, +1.5044205390914219e-05, +5.9474961163931621e-05, +6.1435843232617913e-07,
+7.9298267864869338e-10, +5.1158053105504208e-12, -1.4840835740298868e-13, +1.2618464280815118e-15
],
[
+1.3911022236338039e-01, +1.0387687125574284e-01, +1.7607598741571459e-01, +7.7443602746299481e-02,
+5.4677556143463042e-02, +7.3530110204955076e-03, +1.1529247065398790e-02, -2.7712189007789243e-03,
+2.1202259559596325e-03, +8.3236045295766745e-05, +5.5691158981081479e-05, +6.9086261179113738e-07,
-1.3486017348542930e-08, +1.5542195992782658e-09, -1.9341305000880955e-11, +2.6640625166231651e-13,
-9.9313913286822465e-16
],
[
+5.1489450806921377e-04, +1.9176011588804434e-01, +1.4807083115521585e-01, +9.2364726716986353e-02,
+4.5273685465150391e-02, +1.5673473751851151e-02, +3.1554462691875513e-03, +2.3113452403522071e-03,
+8.1895392750226735e-04, +2.7524214116785131e-04, +3.5729348198975332e-05, +2.7342206801187888e-06,
+2.4676421345798140e-07, +2.1394194479561062e-08, +4.6011760348655917e-10, +3.0972223576062995e-12,
+5.4500412650638128e-15, +1.0541326582334014e-18
],
[
+5.1489450806921377e-04, +1.9176011588804437e-01, +1.4807083115521585e-01, +9.2364726716986353e-02,
+4.5273685465150523e-02, +1.5673473751851151e-02, +3.1554462691875604e-03, +2.3113452403522050e-03,
+8.1895392750226670e-04, +2.7524214116785131e-04, +3.5729348198975447e-05, +2.7342206801187884e-06,
+2.4676421345798140e-07, +2.1394194479561056e-08, +4.6011760348656077e-10, +3.0972223576063011e-12,
+5.4500412650637663e-15, +1.0541326582337958e-18
],
[
+5.1489450806925551e-04, +1.9176011588804440e-01, +1.4807083115521585e-01, +9.2364726716986298e-02,
+4.5273685465150537e-02, +1.5673473751851155e-02, +3.1554462691875573e-03, +2.3113452403522080e-03,
+8.1895392750226724e-04, +2.7524214116785137e-04, +3.5729348198975352e-05, +2.7342206801187888e-06,
+2.4676421345798124e-07, +2.1394194479561056e-08, +4.6011760348656144e-10, +3.0972223576062963e-12,
+5.4500412650638365e-15, +1.0541326582335402e-18
],
[
+5.1489450806913744e-04, +1.9176011588804429e-01, +1.4807083115521594e-01, +9.2364726716986312e-02,
+4.5273685465150391e-02, +1.5673473751851151e-02, +3.1554462691875565e-03, +2.3113452403522089e-03,
+8.1895392750226670e-04, +2.7524214116785142e-04, +3.5729348198975285e-05, +2.7342206801187888e-06,
+2.4676421345798119e-07, +2.1394194479561059e-08, +4.6011760348656594e-10, +3.0972223576062950e-12,
+5.4500412650638696e-15, +1.0541326582332041e-18
],
[
+5.1489450806903368e-04, +1.9176011588804448e-01, +1.4807083115521574e-01, +9.2364726716986423e-02,
+4.5273685465150516e-02, +1.5673473751851161e-02, +3.1554462691875543e-03, +2.3113452403522063e-03,
+8.1895392750226713e-04, +2.7524214116785164e-04, +3.5729348198975319e-05, +2.7342206801187905e-06,
+2.4676421345798151e-07, +2.1394194479561082e-08, +4.6011760348656005e-10, +3.0972223576063043e-12,
+5.4500412650637592e-15, +1.0541326582339926e-18
],
[
+5.1489450806913755e-04, +1.9176011588804442e-01, +1.4807083115521577e-01, +9.2364726716986381e-02,
+4.5273685465150468e-02, +1.5673473751851155e-02, +3.1554462691875560e-03, +2.3113452403522045e-03,
+8.1895392750226572e-04, +2.7524214116785158e-04, +3.5729348198975298e-05, +2.7342206801187892e-06,
+2.4676421345798129e-07, +2.1394194479561072e-08, +4.6011760348656103e-10, +3.0972223576062963e-12,
+5.4500412650638207e-15, +1.0541326582338368e-18
],
[
+5.1489450806914438e-04, +1.9176011588804442e-01, +1.4807083115521577e-01, +9.2364726716986340e-02,
+4.5273685465150509e-02, +1.5673473751851155e-02, +3.1554462691875586e-03, +2.3113452403522058e-03,
+8.1895392750226551e-04, +2.7524214116785142e-04, +3.5729348198975386e-05, +2.7342206801187884e-06,
+2.4676421345798082e-07, +2.1394194479561059e-08, +4.6011760348656382e-10, +3.0972223576062942e-12,
+5.4500412650638381e-15, +1.0541326582336941e-18
],
[
+5.1489450806919989e-04, +1.9176011588804437e-01, +1.4807083115521580e-01, +9.2364726716986395e-02,
+4.5273685465150426e-02, +1.5673473751851158e-02, +3.1554462691875539e-03, +2.3113452403522054e-03,
+8.1895392750226681e-04, +2.7524214116785142e-04, +3.5729348198975292e-05, +2.7342206801187884e-06,
+2.4676421345798108e-07, +2.1394194479561056e-08, +4.6011760348655901e-10, +3.0972223576062975e-12,
+5.4500412650638412e-15, +1.0541326582337527e-18
]
]
try:
node_data = np.array(node_data_list[level - 1])
weight_data = np.array(weight_data_list[level - 1])
except IndexError:
raise ValueError(f"The nested rule is only available up to a level of {len(node_data_list)}.")
return node_data, weight_data
| 54.487047 | 119 | 0.66188 |
import functools
import itertools
from typing import Iterable, List, Optional, Tuple
import numpy as np
import scipy.special
import scipy.stats
from ..utilities.basics import Array, Options, StringRepresentation, format_options
class Integration(StringRepresentation):
_size: int
_seed: Optional[int]
_description: str
_builder: functools.partial
_specification_options: Options
def __init__(self, specification: str, size: int, specification_options: Optional[Options] = None) -> None:
specifications = {
'monte_carlo': (functools.partial(monte_carlo), "with Monte Carlo simulation"),
'halton': (functools.partial(halton), "with Halton sequences"),
'lhs': (functools.partial(lhs), "with Latin Hypercube Sampling (LHS)"),
'mlhs': (functools.partial(lhs, modified=True), "with Modified Latin Hypercube Sampling (MLHS)"),
'product': (functools.partial(product_rule), f"according to the level-{size} Gauss-Hermite product rule"),
'grid': (
functools.partial(sparse_grid),
f"in a sparse grid according to the level-{size} Gauss-Hermite rule"
),
'nested_product': (
functools.partial(product_rule, nested=True),
f"according to the level-{size} nested Gauss-Hermite product rule"
),
'nested_grid': (
functools.partial(sparse_grid, nested=True),
f"in a sparse grid according to the level-{size} nested Gauss-Hermite rule"
)
}
if specification not in specifications:
raise ValueError(f"specification must be one of {list(specifications.keys())}.")
if not isinstance(size, int) or size < 1:
raise ValueError("size must be a positive integer.")
if specification_options is not None and not isinstance(specification_options, dict):
raise ValueError("specification_options must be None or a dict.")
self._size = size
self._specification = specification
self._builder, self._description = specifications[specification]
self._specification_options: Options = {}
if specification == 'halton':
self._specification_options['discard'] = 100
self._specification_options.update(specification_options or {})
if specification in {'monte_carlo', 'lhs', 'mlhs'}:
if not isinstance(self._specification_options.get('seed', 0), int):
raise ValueError("The specification option seed must be an integer.")
elif specification == 'halton':
discard = self._specification_options['discard']
if not isinstance(discard, int) or discard < 0:
raise ValueError("The specification option discard must be a nonnegative integer.")
def __str__(self) -> str:
return (
f"Configured to construct nodes and weights {self._description} with options "
f"{format_options(self._specification_options)}."
)
def _build_many(self, dimensions: int, ids: Iterable) -> Tuple[Array, Array, Array]:
builder = self._builder
if self._specification in {'monte_carlo', 'lhs', 'mlhs'}:
builder = functools.partial(builder, state=np.random.RandomState(self._specification_options.get('seed')))
count = 0
ids_list: List[Array] = []
nodes_list: List[Array] = []
weights_list: List[Array] = []
for i in ids:
if self._specification == 'halton':
nodes, weights = builder(dimensions, self._size, start=self._specification_options['discard'] + count)
else:
nodes, weights = builder(dimensions, self._size)
ids_list.append(np.repeat(i, weights.size))
nodes_list.append(nodes)
weights_list.append(weights)
count += weights.size
return np.concatenate(ids_list), np.concatenate(nodes_list), np.concatenate(weights_list)
def _build(self, dimensions: int) -> Tuple[Array, Array]:
builder = self._builder
if self._specification in {'monte_carlo', 'lhs', 'mlhs'}:
builder = functools.partial(builder, state=np.random.RandomState(self._specification_options.get('seed')))
if self._specification == 'halton':
return builder(dimensions, self._size, start=self._specification_options['discard'])
return builder(dimensions, self._size)
def monte_carlo(dimensions: int, size: int, state: np.random.RandomState) -> Tuple[Array, Array]:
nodes = state.normal(size=(size, dimensions))
weights = np.repeat(1 / size, size)
return nodes, weights
def halton(dimensions: int, size: int, start: int) -> Tuple[Array, Array]:
sequences = np.zeros((size, dimensions))
for dimension in range(dimensions):
base = get_prime(dimension)
for index in range(size):
value = 0.0
denominator = 1.0
quotient = start + index
while quotient > 0:
quotient, remainder = divmod(quotient, base)
denominator *= base
value += remainder / denominator
sequences[index, dimension] = value
nodes = scipy.stats.norm().ppf(sequences)
weights = np.repeat(1 / size, size)
return nodes, weights
def lhs(dimensions: int, size: int, state: np.random.RandomState, modified: bool = False) -> Tuple[Array, Array]:
samples = np.zeros((size, dimensions))
for dimension in range(dimensions):
samples[:, dimension] = state.permutation(np.arange(size) + state.uniform(size=1 if modified else size)) / size
nodes = scipy.stats.norm().ppf(samples)
weights = np.repeat(1 / size, size)
return nodes, weights
@functools.lru_cache()
def product_rule(dimensions: int, level: int, nested: bool = False) -> Tuple[Array, Array]:
base_nodes, base_weights = quadrature_rule(level, nested)
nodes = np.array(list(itertools.product(base_nodes, repeat=dimensions)))
weights = functools.reduce(np.kron, itertools.repeat(base_weights, dimensions))
return nodes, weights
@functools.lru_cache()
def sparse_grid(dimensions: int, level: int, nested: bool = False) -> Tuple[Array, Array]:
nodes_list: List[Array] = []
weights_list: List[Array] = []
for q in range(max(0, level - dimensions), level):
coefficient = (-1)**(level - q - 1) * scipy.special.binom(dimensions - 1, dimensions + q - level)
for base_levels in same_size_sequences(dimensions, dimensions + q):
base_nodes_list, base_weights_list = zip(*(quadrature_rule(l, nested) for l in base_levels))
nodes_list.append(np.array(list(itertools.product(*base_nodes_list))))
weights_list.append(coefficient * functools.reduce(np.kron, base_weights_list))
nodes = np.concatenate(nodes_list)
weights = np.concatenate(weights_list)
sorted_indices = np.lexsort(nodes[:, ::-1].T)
nodes = nodes[sorted_indices]
weights = weights[sorted_indices]
last = 0
keep = [last]
for row in range(1, weights.size):
if np.array_equal(nodes[row], nodes[row - 1]):
weights[last] += weights[row]
continue
last = row
keep.append(row)
nodes = nodes[keep]
weights = weights[keep]
weights /= weights.sum()
return nodes, weights
def same_size_sequences(size: int, summation: int) -> Array:
summation -= size
sequence = np.zeros(size, np.int64)
sequence[0] = summation
sequences = [sequence.copy()]
forward = 0
while sequence[-1] < summation:
if forward == size - 1:
for backward in reversed(range(forward)):
forward = backward
if sequence[backward] != 0:
break
sequence[forward] -= 1
forward += 1
sequence[forward] = summation - sequence[:forward].sum()
if forward < size - 1:
sequence[forward + 1:] = 0
sequences.append(sequence.copy())
return np.vstack(sequences) + 1
def quadrature_rule(level: int, nested: bool) -> Tuple[Array, Array]:
if not nested:
raw_nodes, raw_weights = np.polynomial.hermite.hermgauss(level)
return raw_nodes * np.sqrt(2), raw_weights / np.sqrt(np.pi)
node_data, weight_data = get_nested_data(level)
return np.r_[-node_data[::-1], 0, node_data], np.r_[weight_data[::-1], weight_data[1:]]
def get_prime(dimension: int) -> int:
primes = [
2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97, 101, 103, 107,
109, 113, 127, 131, 137, 139, 149, 151, 157, 163, 167, 173, 179, 181, 191, 193, 197, 199, 211, 223, 227, 229,
233, 239, 241, 251, 257, 263, 269, 271, 277, 281, 283, 293, 307, 311, 313, 317, 331, 337, 347, 349, 353, 359,
367, 373, 379, 383, 389, 397, 401, 409, 419, 421, 431, 433, 439, 443, 449, 457, 461, 463, 467, 479, 487, 491,
499, 503, 509, 521, 523, 541, 547, 557, 563, 569, 571, 577, 587, 593, 599, 601, 607, 613, 617, 619, 631, 641,
643, 647, 653, 659, 661, 673, 677, 683, 691, 701, 709, 719, 727, 733, 739, 743, 751, 757, 761, 769, 773, 787,
797, 809, 811, 821, 823, 827, 829, 839, 853, 857, 859, 863, 877, 881, 883, 887, 907, 911, 919, 929, 937, 941,
947, 953, 967, 971, 977, 983, 991, 997
]
try:
return primes[dimension]
except IndexError:
raise ValueError(f"Halton sequences are only available for {len(primes)} dimensions here.")
def get_nested_data(level: int) -> Tuple[Array, Array]:
node_data_list = [
[],
[1.7320508075688772e+00],
[1.7320508075688772e+00],
[7.4109534999454085e-01, 1.7320508075688772e+00, 4.1849560176727323e+00],
[7.4109534999454085e-01, 1.7320508075688772e+00, 2.8612795760570582e+00, 4.1849560176727323e+00],
[7.4109534999454085e-01, 1.7320508075688772e+00, 2.8612795760570582e+00, 4.1849560176727323e+00],
[7.4109534999454085e-01, 1.7320508075688772e+00, 2.8612795760570582e+00, 4.1849560176727323e+00],
[7.4109534999454085e-01, 1.7320508075688772e+00, 2.8612795760570582e+00, 4.1849560176727323e+00],
[
7.4109534999454085e-01, 1.2304236340273060e+00, 1.7320508075688772e+00, 2.5960831150492023e+00,
2.8612795760570582e+00, 4.1849560176727323e+00, 5.1870160399136562e+00, 6.3633944943363696e+00
],
[
7.4109534999454085e-01, 1.2304236340273060e+00, 1.7320508075688772e+00, 2.5960831150492023e+00,
2.8612795760570582e+00, 3.2053337944991944e+00, 4.1849560176727323e+00, 5.1870160399136562e+00,
6.3633944943363696e+00
],
[
7.4109534999454085e-01, 1.2304236340273060e+00, 1.7320508075688772e+00, 2.5960831150492023e+00,
2.8612795760570582e+00, 3.2053337944991944e+00, 4.1849560176727323e+00, 5.1870160399136562e+00,
6.3633944943363696e+00
],
[
7.4109534999454085e-01, 1.2304236340273060e+00, 1.7320508075688772e+00, 2.5960831150492023e+00,
2.8612795760570582e+00, 3.2053337944991944e+00, 4.1849560176727323e+00, 5.1870160399136562e+00,
6.3633944943363696e+00
],
[
7.4109534999454085e-01, 1.2304236340273060e+00, 1.7320508075688772e+00, 2.5960831150492023e+00,
2.8612795760570582e+00, 3.2053337944991944e+00, 4.1849560176727323e+00, 5.1870160399136562e+00,
6.3633944943363696e+00
],
[
7.4109534999454085e-01, 1.2304236340273060e+00, 1.7320508075688772e+00, 2.5960831150492023e+00,
2.8612795760570582e+00, 3.2053337944991944e+00, 4.1849560176727323e+00, 5.1870160399136562e+00,
6.3633944943363696e+00
],
[
7.4109534999454085e-01, 1.2304236340273060e+00, 1.7320508075688772e+00, 2.5960831150492023e+00,
2.8612795760570582e+00, 3.2053337944991944e+00, 4.1849560176727323e+00, 5.1870160399136562e+00,
6.3633944943363696e+00
],
[
2.4899229757996061e-01, 7.4109534999454085e-01, 1.2304236340273060e+00, 1.7320508075688772e+00,
2.2336260616769419e+00, 2.5960831150492023e+00, 2.8612795760570582e+00, 3.2053337944991944e+00,
3.6353185190372783e+00, 4.1849560176727323e+00, 5.1870160399136562e+00, 6.3633944943363696e+00,
7.1221067008046166e+00, 7.9807717985905606e+00, 9.0169397898903032e+00
],
[
2.4899229757996061e-01, 7.4109534999454085e-01, 1.2304236340273060e+00, 1.7320508075688772e+00,
2.2336260616769419e+00, 2.5960831150492023e+00, 2.8612795760570582e+00, 3.2053337944991944e+00,
3.6353185190372783e+00, 4.1849560176727323e+00, 5.1870160399136562e+00, 5.6981777684881099e+00,
6.3633944943363696e+00, 7.1221067008046166e+00, 7.9807717985905606e+00, 9.0169397898903032e+00
],
[
2.4899229757996061e-01, 7.4109534999454085e-01, 1.2304236340273060e+00, 1.7320508075688772e+00,
2.2336260616769419e+00, 2.5960831150492023e+00, 2.8612795760570582e+00, 3.2053337944991944e+00,
3.6353185190372783e+00, 4.1849560176727323e+00, 4.7364330859522967e+00, 5.1870160399136562e+00,
5.6981777684881099e+00, 6.3633944943363696e+00, 7.1221067008046166e+00, 7.9807717985905606e+00,
9.0169397898903032e+00
],
[
2.4899229757996061e-01, 7.4109534999454085e-01, 1.2304236340273060e+00, 1.7320508075688772e+00,
2.2336260616769419e+00, 2.5960831150492023e+00, 2.8612795760570582e+00, 3.2053337944991944e+00,
3.6353185190372783e+00, 4.1849560176727323e+00, 4.7364330859522967e+00, 5.1870160399136562e+00,
5.6981777684881099e+00, 6.3633944943363696e+00, 7.1221067008046166e+00, 7.9807717985905606e+00,
9.0169397898903032e+00
],
[
2.4899229757996061e-01, 7.4109534999454085e-01, 1.2304236340273060e+00, 1.7320508075688772e+00,
2.2336260616769419e+00, 2.5960831150492023e+00, 2.8612795760570582e+00, 3.2053337944991944e+00,
3.6353185190372783e+00, 4.1849560176727323e+00, 4.7364330859522967e+00, 5.1870160399136562e+00,
5.6981777684881099e+00, 6.3633944943363696e+00, 7.1221067008046166e+00, 7.9807717985905606e+00,
9.0169397898903032e+00
],
[
2.4899229757996061e-01, 7.4109534999454085e-01, 1.2304236340273060e+00, 1.7320508075688772e+00,
2.2336260616769419e+00, 2.5960831150492023e+00, 2.8612795760570582e+00, 3.2053337944991944e+00,
3.6353185190372783e+00, 4.1849560176727323e+00, 4.7364330859522967e+00, 5.1870160399136562e+00,
5.6981777684881099e+00, 6.3633944943363696e+00, 7.1221067008046166e+00, 7.9807717985905606e+00,
9.0169397898903032e+00
],
[
2.4899229757996061e-01, 7.4109534999454085e-01, 1.2304236340273060e+00, 1.7320508075688772e+00,
2.2336260616769419e+00, 2.5960831150492023e+00, 2.8612795760570582e+00, 3.2053337944991944e+00,
3.6353185190372783e+00, 4.1849560176727323e+00, 4.7364330859522967e+00, 5.1870160399136562e+00,
5.6981777684881099e+00, 6.3633944943363696e+00, 7.1221067008046166e+00, 7.9807717985905606e+00,
9.0169397898903032e+00
],
[
2.4899229757996061e-01, 7.4109534999454085e-01, 1.2304236340273060e+00, 1.7320508075688772e+00,
2.2336260616769419e+00, 2.5960831150492023e+00, 2.8612795760570582e+00, 3.2053337944991944e+00,
3.6353185190372783e+00, 4.1849560176727323e+00, 4.7364330859522967e+00, 5.1870160399136562e+00,
5.6981777684881099e+00, 6.3633944943363696e+00, 7.1221067008046166e+00, 7.9807717985905606e+00,
9.0169397898903032e+00
],
[
2.4899229757996061e-01, 7.4109534999454085e-01, 1.2304236340273060e+00, 1.7320508075688772e+00,
2.2336260616769419e+00, 2.5960831150492023e+00, 2.8612795760570582e+00, 3.2053337944991944e+00,
3.6353185190372783e+00, 4.1849560176727323e+00, 4.7364330859522967e+00, 5.1870160399136562e+00,
5.6981777684881099e+00, 6.3633944943363696e+00, 7.1221067008046166e+00, 7.9807717985905606e+00,
9.0169397898903032e+00
],
[
2.4899229757996061e-01, 7.4109534999454085e-01, 1.2304236340273060e+00, 1.7320508075688772e+00,
2.2336260616769419e+00, 2.5960831150492023e+00, 2.8612795760570582e+00, 3.2053337944991944e+00,
3.6353185190372783e+00, 4.1849560176727323e+00, 4.7364330859522967e+00, 5.1870160399136562e+00,
5.6981777684881099e+00, 6.3633944943363696e+00, 7.1221067008046166e+00, 7.9807717985905606e+00,
9.0169397898903032e+00
]
]
weight_data_list = [
[+1.0000000000000000e+00],
[+6.6666666666666663e-01, +1.6666666666666666e-01],
[+6.6666666666666674e-01, +1.6666666666666666e-01],
[+4.5874486825749189e-01, +1.3137860698313561e-01, +1.3855327472974924e-01, +6.9568415836913987e-04],
[
+2.5396825396825407e-01, +2.7007432957793776e-01, +9.4850948509485125e-02, +7.9963254708935293e-03,
+9.4269457556517470e-05
],
[
+2.5396825396825429e-01, +2.7007432957793776e-01, +9.4850948509485070e-02, +7.9963254708935293e-03,
+9.4269457556517551e-05
],
[
+2.5396825396825418e-01, +2.7007432957793781e-01, +9.4850948509485014e-02, +7.9963254708935311e-03,
+9.4269457556517592e-05
],
[
+2.5396825396825418e-01, +2.7007432957793781e-01, +9.4850948509485042e-02, +7.9963254708935276e-03,
+9.4269457556517375e-05
],
[
+2.6692223033505302e-01, +2.5456123204171222e-01, +1.4192654826449365e-02, +8.8681002152028010e-02,
+1.9656770938777492e-03, +7.0334802378279075e-03, +1.0563783615416941e-04, -8.2049207541509217e-07,
+2.1136499505424257e-08
],
[
+3.0346719985420623e-01, +2.0832499164960877e-01, +6.1151730125247716e-02, +6.4096054686807610e-02,
+1.8085234254798462e-02, -6.3372247933737571e-03, +2.8848804365067559e-03, +6.0123369459847997e-05,
+6.0948087314689840e-07, +8.6296846022298632e-10
],
[
+3.0346719985420623e-01, +2.0832499164960872e-01, +6.1151730125247709e-02, +6.4096054686807541e-02,
+1.8085234254798459e-02, -6.3372247933737545e-03, +2.8848804365067555e-03, +6.0123369459847922e-05,
+6.0948087314689830e-07, +8.6296846022298839e-10
],
[
+3.0346719985420623e-01, +2.0832499164960872e-01, +6.1151730125247716e-02, +6.4096054686807624e-02,
+1.8085234254798466e-02, -6.3372247933737545e-03, +2.8848804365067559e-03, +6.0123369459847841e-05,
+6.0948087314689830e-07, +8.6296846022298963e-10
],
[
+3.0346719985420600e-01, +2.0832499164960883e-01, +6.1151730125247730e-02, +6.4096054686807638e-02,
+1.8085234254798459e-02, -6.3372247933737580e-03, +2.8848804365067555e-03, +6.0123369459847868e-05,
+6.0948087314689830e-07, +8.6296846022298756e-10
],
[
+3.0346719985420617e-01, +2.0832499164960874e-01, +6.1151730125247702e-02, +6.4096054686807596e-02,
+1.8085234254798459e-02, -6.3372247933737563e-03, +2.8848804365067555e-03, +6.0123369459847936e-05,
+6.0948087314689851e-07, +8.6296846022298322e-10
],
[
+3.0346719985420612e-01, +2.0832499164960874e-01, +6.1151730125247723e-02, +6.4096054686807652e-02,
+1.8085234254798459e-02, -6.3372247933737597e-03, +2.8848804365067563e-03, +6.0123369459848091e-05,
+6.0948087314689851e-07, +8.6296846022298983e-10
],
[
+2.5890005324151566e-01, +2.8128101540033167e-02, +1.9968863511734550e-01, +6.5417392836092561e-02,
+6.1718532565867179e-02, +1.7608475581318002e-03, +1.6592492698936010e-02, -5.5610063068358157e-03,
+2.7298430467334002e-03, +1.5044205390914219e-05, +5.9474961163931621e-05, +6.1435843232617913e-07,
+7.9298267864869338e-10, +5.1158053105504208e-12, -1.4840835740298868e-13, +1.2618464280815118e-15
],
[
+1.3911022236338039e-01, +1.0387687125574284e-01, +1.7607598741571459e-01, +7.7443602746299481e-02,
+5.4677556143463042e-02, +7.3530110204955076e-03, +1.1529247065398790e-02, -2.7712189007789243e-03,
+2.1202259559596325e-03, +8.3236045295766745e-05, +5.5691158981081479e-05, +6.9086261179113738e-07,
-1.3486017348542930e-08, +1.5542195992782658e-09, -1.9341305000880955e-11, +2.6640625166231651e-13,
-9.9313913286822465e-16
],
[
+5.1489450806921377e-04, +1.9176011588804434e-01, +1.4807083115521585e-01, +9.2364726716986353e-02,
+4.5273685465150391e-02, +1.5673473751851151e-02, +3.1554462691875513e-03, +2.3113452403522071e-03,
+8.1895392750226735e-04, +2.7524214116785131e-04, +3.5729348198975332e-05, +2.7342206801187888e-06,
+2.4676421345798140e-07, +2.1394194479561062e-08, +4.6011760348655917e-10, +3.0972223576062995e-12,
+5.4500412650638128e-15, +1.0541326582334014e-18
],
[
+5.1489450806921377e-04, +1.9176011588804437e-01, +1.4807083115521585e-01, +9.2364726716986353e-02,
+4.5273685465150523e-02, +1.5673473751851151e-02, +3.1554462691875604e-03, +2.3113452403522050e-03,
+8.1895392750226670e-04, +2.7524214116785131e-04, +3.5729348198975447e-05, +2.7342206801187884e-06,
+2.4676421345798140e-07, +2.1394194479561056e-08, +4.6011760348656077e-10, +3.0972223576063011e-12,
+5.4500412650637663e-15, +1.0541326582337958e-18
],
[
+5.1489450806925551e-04, +1.9176011588804440e-01, +1.4807083115521585e-01, +9.2364726716986298e-02,
+4.5273685465150537e-02, +1.5673473751851155e-02, +3.1554462691875573e-03, +2.3113452403522080e-03,
+8.1895392750226724e-04, +2.7524214116785137e-04, +3.5729348198975352e-05, +2.7342206801187888e-06,
+2.4676421345798124e-07, +2.1394194479561056e-08, +4.6011760348656144e-10, +3.0972223576062963e-12,
+5.4500412650638365e-15, +1.0541326582335402e-18
],
[
+5.1489450806913744e-04, +1.9176011588804429e-01, +1.4807083115521594e-01, +9.2364726716986312e-02,
+4.5273685465150391e-02, +1.5673473751851151e-02, +3.1554462691875565e-03, +2.3113452403522089e-03,
+8.1895392750226670e-04, +2.7524214116785142e-04, +3.5729348198975285e-05, +2.7342206801187888e-06,
+2.4676421345798119e-07, +2.1394194479561059e-08, +4.6011760348656594e-10, +3.0972223576062950e-12,
+5.4500412650638696e-15, +1.0541326582332041e-18
],
[
+5.1489450806903368e-04, +1.9176011588804448e-01, +1.4807083115521574e-01, +9.2364726716986423e-02,
+4.5273685465150516e-02, +1.5673473751851161e-02, +3.1554462691875543e-03, +2.3113452403522063e-03,
+8.1895392750226713e-04, +2.7524214116785164e-04, +3.5729348198975319e-05, +2.7342206801187905e-06,
+2.4676421345798151e-07, +2.1394194479561082e-08, +4.6011760348656005e-10, +3.0972223576063043e-12,
+5.4500412650637592e-15, +1.0541326582339926e-18
],
[
+5.1489450806913755e-04, +1.9176011588804442e-01, +1.4807083115521577e-01, +9.2364726716986381e-02,
+4.5273685465150468e-02, +1.5673473751851155e-02, +3.1554462691875560e-03, +2.3113452403522045e-03,
+8.1895392750226572e-04, +2.7524214116785158e-04, +3.5729348198975298e-05, +2.7342206801187892e-06,
+2.4676421345798129e-07, +2.1394194479561072e-08, +4.6011760348656103e-10, +3.0972223576062963e-12,
+5.4500412650638207e-15, +1.0541326582338368e-18
],
[
+5.1489450806914438e-04, +1.9176011588804442e-01, +1.4807083115521577e-01, +9.2364726716986340e-02,
+4.5273685465150509e-02, +1.5673473751851155e-02, +3.1554462691875586e-03, +2.3113452403522058e-03,
+8.1895392750226551e-04, +2.7524214116785142e-04, +3.5729348198975386e-05, +2.7342206801187884e-06,
+2.4676421345798082e-07, +2.1394194479561059e-08, +4.6011760348656382e-10, +3.0972223576062942e-12,
+5.4500412650638381e-15, +1.0541326582336941e-18
],
[
+5.1489450806919989e-04, +1.9176011588804437e-01, +1.4807083115521580e-01, +9.2364726716986395e-02,
+4.5273685465150426e-02, +1.5673473751851158e-02, +3.1554462691875539e-03, +2.3113452403522054e-03,
+8.1895392750226681e-04, +2.7524214116785142e-04, +3.5729348198975292e-05, +2.7342206801187884e-06,
+2.4676421345798108e-07, +2.1394194479561056e-08, +4.6011760348655901e-10, +3.0972223576062975e-12,
+5.4500412650638412e-15, +1.0541326582337527e-18
]
]
try:
node_data = np.array(node_data_list[level - 1])
weight_data = np.array(weight_data_list[level - 1])
except IndexError:
raise ValueError(f"The nested rule is only available up to a level of {len(node_data_list)}.")
return node_data, weight_data
| true | true |
1c2d827baba29302aa7dd11c4233696b7db2ff29 | 1,549 | py | Python | examples/pcrl_example.py | GuoJingyao/cornac | e7529990ec1dfa586c4af3de98e4b3e00a786578 | [
"Apache-2.0"
] | null | null | null | examples/pcrl_example.py | GuoJingyao/cornac | e7529990ec1dfa586c4af3de98e4b3e00a786578 | [
"Apache-2.0"
] | null | null | null | examples/pcrl_example.py | GuoJingyao/cornac | e7529990ec1dfa586c4af3de98e4b3e00a786578 | [
"Apache-2.0"
] | null | null | null | """
Fit to and evaluate PCRL [1] on the Office Amazon dataset.
[1] Salah, Aghiles, and Hady W. Lauw. Probabilistic Collaborative Representation Learning\
for Personalized Item Recommendation. In UAI 2018.
@author: Aghiles Salah <asalah@smu.edu.sg>
"""
from cornac.data import GraphModule
from cornac.eval_methods import RatioSplit
from cornac.experiment import Experiment
from cornac import metrics
from cornac.models import PCRL
from cornac.datasets import amazon_office as office
# Load office ratings and item contexts
ratings = office.load_rating()
contexts = office.load_context()
item_graph_module = GraphModule(data=contexts)
ratio_split = RatioSplit(data=ratings,
test_size=0.2, rating_threshold=3.5,
shuffle=True, exclude_unknowns=True,
verbose=True, item_graph=item_graph_module)
pcrl = PCRL(k=100, z_dims=[300],
max_iter=300,
learning_rate=0.001)
# Evaluation metrics
nDgc = metrics.NDCG(k=-1)
rec = metrics.Recall(k=20)
pre = metrics.Precision(k=20)
# Instantiate and run your experiment
exp = Experiment(eval_method=ratio_split,
models=[pcrl],
metrics=[nDgc, rec, pre])
exp.run()
"""
Output:
| NDCG@-1 | Recall@20 | Precision@20 | Train (s) | Test (s)
---- + ------- + --------- + ------------ + --------- + --------
pcrl | 0.1922 | 0.0862 | 0.0148 | 2591.4878 | 4.0957
*Results may change slightly from one run to another due to different random initial parameters
""" | 30.372549 | 95 | 0.657844 |
from cornac.data import GraphModule
from cornac.eval_methods import RatioSplit
from cornac.experiment import Experiment
from cornac import metrics
from cornac.models import PCRL
from cornac.datasets import amazon_office as office
ratings = office.load_rating()
contexts = office.load_context()
item_graph_module = GraphModule(data=contexts)
ratio_split = RatioSplit(data=ratings,
test_size=0.2, rating_threshold=3.5,
shuffle=True, exclude_unknowns=True,
verbose=True, item_graph=item_graph_module)
pcrl = PCRL(k=100, z_dims=[300],
max_iter=300,
learning_rate=0.001)
nDgc = metrics.NDCG(k=-1)
rec = metrics.Recall(k=20)
pre = metrics.Precision(k=20)
exp = Experiment(eval_method=ratio_split,
models=[pcrl],
metrics=[nDgc, rec, pre])
exp.run()
| true | true |
1c2d8398405119af5984541e83c831bd2125df6b | 3,948 | py | Python | ax/modelbridge/transforms/centered_unit_x.py | Balandat/Ax | 6c7556165291a5329744b5075d5f95d2dec18938 | [
"MIT"
] | null | null | null | ax/modelbridge/transforms/centered_unit_x.py | Balandat/Ax | 6c7556165291a5329744b5075d5f95d2dec18938 | [
"MIT"
] | null | null | null | ax/modelbridge/transforms/centered_unit_x.py | Balandat/Ax | 6c7556165291a5329744b5075d5f95d2dec18938 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import TYPE_CHECKING, Dict, List, Optional, Tuple
from ax.core.observation import ObservationData, ObservationFeatures
from ax.core.parameter import ParameterType, RangeParameter
from ax.core.search_space import SearchSpace
from ax.core.types import TConfig
from ax.modelbridge.transforms.base import Transform
from ax.utils.common.docutils import copy_doc
if TYPE_CHECKING:
# import as module to make sphinx-autodoc-typehints happy
from ax import modelbridge as modelbridge_module # noqa F401 # pragma: no cover
class CenteredUnitX(Transform):
"""Map X to [-1, 1]^d for RangeParameter of type float and not log scale.
Currently does not support linear constraints, but could in the future be
adjusted to transform them too, since this is a linear operation.
Transform is done in-place.
"""
def __init__(
self,
search_space: SearchSpace,
observation_features: List[ObservationFeatures],
observation_data: List[ObservationData],
modelbridge: Optional["modelbridge_module.base.ModelBridge"] = None,
config: Optional[TConfig] = None,
) -> None:
# Identify parameters that should be transformed
self.bounds: Dict[str, Tuple[float, float]] = {}
for p_name, p in search_space.parameters.items():
if (
isinstance(p, RangeParameter)
and p.parameter_type == ParameterType.FLOAT
and not p.log_scale
):
self.bounds[p_name] = (p.lower, p.upper)
@copy_doc(Transform.transform_observation_features)
def transform_observation_features(
self, observation_features: List[ObservationFeatures]
) -> List[ObservationFeatures]:
for obsf in observation_features:
for p_name, (l, u) in self.bounds.items():
if p_name in obsf.parameters:
# pyre: param is declared to have type `float` but is used
# pyre-fixme[9]: as type `Optional[typing.Union[bool, float, str]]`.
param: float = obsf.parameters[p_name]
obsf.parameters[p_name] = -1 + 2 * (param - l) / (u - l)
return observation_features
@copy_doc(Transform.transform_search_space)
def transform_search_space(self, search_space: SearchSpace) -> SearchSpace:
for p_name, p in search_space.parameters.items():
if p_name in self.bounds and isinstance(p, RangeParameter):
p.update_range(lower=-1.0, upper=1.0)
if p.target_value is not None:
l, u = self.bounds[p_name]
# pyre-fixme[58]: `-` is not supported for operand types
# `Union[None, bool, float, int, str]` and `float`.
new_tval = -1 + 2 * (p.target_value - l) / (u - l)
p._target_value = new_tval
for c in search_space.parameter_constraints:
for p_name in c.constraint_dict:
if p_name in self.bounds:
raise ValueError("Does not support parameter constraints")
return search_space
@copy_doc(Transform.untransform_observation_features)
def untransform_observation_features(
self, observation_features: List[ObservationFeatures]
) -> List[ObservationFeatures]:
for obsf in observation_features:
for p_name, (l, u) in self.bounds.items():
# pyre: param is declared to have type `float` but is used as
# pyre-fixme[9]: type `Optional[typing.Union[bool, float, str]]`.
param: float = obsf.parameters[p_name]
obsf.parameters[p_name] = ((param + 1) / 2) * (u - l) + l
return observation_features
| 44.359551 | 88 | 0.645897 |
from typing import TYPE_CHECKING, Dict, List, Optional, Tuple
from ax.core.observation import ObservationData, ObservationFeatures
from ax.core.parameter import ParameterType, RangeParameter
from ax.core.search_space import SearchSpace
from ax.core.types import TConfig
from ax.modelbridge.transforms.base import Transform
from ax.utils.common.docutils import copy_doc
if TYPE_CHECKING:
from ax import modelbridge as modelbridge_module nitX(Transform):
def __init__(
self,
search_space: SearchSpace,
observation_features: List[ObservationFeatures],
observation_data: List[ObservationData],
modelbridge: Optional["modelbridge_module.base.ModelBridge"] = None,
config: Optional[TConfig] = None,
) -> None:
self.bounds: Dict[str, Tuple[float, float]] = {}
for p_name, p in search_space.parameters.items():
if (
isinstance(p, RangeParameter)
and p.parameter_type == ParameterType.FLOAT
and not p.log_scale
):
self.bounds[p_name] = (p.lower, p.upper)
@copy_doc(Transform.transform_observation_features)
def transform_observation_features(
self, observation_features: List[ObservationFeatures]
) -> List[ObservationFeatures]:
for obsf in observation_features:
for p_name, (l, u) in self.bounds.items():
if p_name in obsf.parameters:
param: float = obsf.parameters[p_name]
obsf.parameters[p_name] = -1 + 2 * (param - l) / (u - l)
return observation_features
@copy_doc(Transform.transform_search_space)
def transform_search_space(self, search_space: SearchSpace) -> SearchSpace:
for p_name, p in search_space.parameters.items():
if p_name in self.bounds and isinstance(p, RangeParameter):
p.update_range(lower=-1.0, upper=1.0)
if p.target_value is not None:
l, u = self.bounds[p_name]
new_tval = -1 + 2 * (p.target_value - l) / (u - l)
p._target_value = new_tval
for c in search_space.parameter_constraints:
for p_name in c.constraint_dict:
if p_name in self.bounds:
raise ValueError("Does not support parameter constraints")
return search_space
@copy_doc(Transform.untransform_observation_features)
def untransform_observation_features(
self, observation_features: List[ObservationFeatures]
) -> List[ObservationFeatures]:
for obsf in observation_features:
for p_name, (l, u) in self.bounds.items():
param: float = obsf.parameters[p_name]
obsf.parameters[p_name] = ((param + 1) / 2) * (u - l) + l
return observation_features
| true | true |
1c2d8428b2a25dbbb25160c16277f654c9cffd62 | 10,641 | py | Python | oidc_provider/lib/endpoints/token.py | senzil/django-oidc-provider | b2144ea31f3178d7346809cd45341831830d9e3e | [
"MIT"
] | null | null | null | oidc_provider/lib/endpoints/token.py | senzil/django-oidc-provider | b2144ea31f3178d7346809cd45341831830d9e3e | [
"MIT"
] | 1 | 2021-06-15T11:54:09.000Z | 2021-06-15T11:58:47.000Z | oidc_provider/lib/endpoints/token.py | senzil/senzil-django-oidc-provider | b2144ea31f3178d7346809cd45341831830d9e3e | [
"MIT"
] | null | null | null | import inspect
from base64 import urlsafe_b64encode
import hashlib
import logging
from django.contrib.auth import authenticate
from django.http import JsonResponse
from oidc_provider.lib.errors import (
TokenError,
UserAuthError,
)
from oidc_provider.lib.utils.oauth2 import extract_client_auth
from oidc_provider.lib.utils.token import (
create_id_token,
create_token,
encode_jwt,
access_token_format,
)
from oidc_provider.models import (
Client,
Code,
Token,
)
from oidc_provider import settings
logger = logging.getLogger(__name__)
class TokenEndpoint(object):
def __init__(self, request):
self.request = request
self.params = {}
self.user = None
self._extract_params()
def _extract_params(self):
client_id, client_secret = extract_client_auth(self.request)
self.params['client_id'] = client_id
self.params['client_secret'] = client_secret
self.params['redirect_uri'] = self.request.POST.get('redirect_uri', '')
self.params['grant_type'] = self.request.POST.get('grant_type', '')
self.params['code'] = self.request.POST.get('code', '')
self.params['state'] = self.request.POST.get('state', '')
self.params['scope'] = self.request.POST.get('scope', '')
self.params['refresh_token'] = self.request.POST.get('refresh_token', '')
# PKCE parameter.
self.params['code_verifier'] = self.request.POST.get('code_verifier')
self.params['username'] = self.request.POST.get('username', '')
self.params['password'] = self.request.POST.get('password', '')
def validate_params(self):
try:
self.client = Client.objects.get(client_id=self.params['client_id'])
except Client.DoesNotExist:
logger.debug('[Token] Client does not exist: %s', self.params['client_id'])
raise TokenError('invalid_client')
if self.client.client_type == 'confidential':
if not (self.client.client_secret == self.params['client_secret']):
logger.debug('[Token] Invalid client secret: client %s do not have secret %s',
self.client.client_id, self.client.client_secret)
raise TokenError('invalid_client')
if self.params['grant_type'] == 'authorization_code':
if not (self.params['redirect_uri'] in self.client.redirect_uris):
logger.debug('[Token] Invalid redirect uri: %s', self.params['redirect_uri'])
raise TokenError('invalid_client')
try:
self.code = Code.objects.get(code=self.params['code'])
except Code.DoesNotExist:
logger.debug('[Token] Code does not exist: %s', self.params['code'])
raise TokenError('invalid_grant')
if not (self.code.client == self.client) \
or self.code.has_expired():
logger.debug('[Token] Invalid code: invalid client or code has expired')
raise TokenError('invalid_grant')
# Validate PKCE parameters.
if self.params['code_verifier']:
if self.code.code_challenge_method == 'S256':
new_code_challenge = urlsafe_b64encode(
hashlib.sha256(self.params['code_verifier'].encode('ascii')).digest()
).decode('utf-8').replace('=', '')
else:
new_code_challenge = self.params['code_verifier']
# TODO: We should explain the error.
if not (new_code_challenge == self.code.code_challenge):
raise TokenError('invalid_grant')
elif self.params['grant_type'] == 'password':
if not settings.get('OIDC_GRANT_TYPE_PASSWORD_ENABLE'):
raise TokenError('unsupported_grant_type')
auth_args = (self.request,)
try:
inspect.getcallargs(authenticate, *auth_args)
except TypeError:
auth_args = ()
user = authenticate(
*auth_args,
username=self.params['username'],
password=self.params['password']
)
if not user:
raise UserAuthError()
self.user = user
elif self.params['grant_type'] == 'refresh_token':
if not self.params['refresh_token']:
logger.debug('[Token] Missing refresh token')
raise TokenError('invalid_grant')
try:
self.token = Token.objects.get(refresh_token=self.params['refresh_token'],
client=self.client)
except Token.DoesNotExist:
logger.debug(
'[Token] Refresh token does not exist: %s', self.params['refresh_token'])
raise TokenError('invalid_grant')
elif self.params['grant_type'] == 'client_credentials':
if not self.client._scope:
logger.debug('[Token] Client using client credentials with empty scope')
raise TokenError('invalid_scope')
else:
logger.debug('[Token] Invalid grant type: %s', self.params['grant_type'])
raise TokenError('unsupported_grant_type')
def create_response_dic(self):
if self.params['grant_type'] == 'authorization_code':
return self.create_code_response_dic()
elif self.params['grant_type'] == 'refresh_token':
return self.create_refresh_response_dic()
elif self.params['grant_type'] == 'password':
return self.create_access_token_response_dic()
elif self.params['grant_type'] == 'client_credentials':
return self.create_client_credentials_response_dic()
def create_code_response_dic(self):
# See https://tools.ietf.org/html/rfc6749#section-4.1
token = create_token(
user=self.code.user,
client=self.code.client,
scope=self.code.scope)
if self.code.is_authentication:
id_token_dic = create_id_token(
user=self.code.user,
aud=self.client.client_id,
token=token,
nonce=self.code.nonce,
at_hash=token.at_hash,
request=self.request,
scope=token.scope,
)
else:
id_token_dic = {}
token.id_token = id_token_dic
# Store the token.
token.save()
# We don't need to store the code anymore.
self.code.delete()
access_token = access_token_format(
token=token,
user=self.code.user,
client=self.client,
request=self.request)
dic = {
'access_token': access_token,
'refresh_token': token.refresh_token,
'token_type': 'bearer',
'expires_in': settings.get('OIDC_TOKEN_EXPIRE'),
'id_token': encode_jwt(id_token_dic, token.client),
}
return dic
def create_refresh_response_dic(self):
# See https://tools.ietf.org/html/rfc6749#section-6
scope_param = self.params['scope']
scope = (scope_param.split(' ') if scope_param else self.token.scope)
unauthorized_scopes = set(scope) - set(self.token.scope)
if unauthorized_scopes:
raise TokenError('invalid_scope')
token = create_token(
user=self.token.user,
client=self.token.client,
scope=scope)
# If the Token has an id_token it's an Authentication request.
if self.token.id_token:
id_token_dic = create_id_token(
user=self.token.user,
aud=self.client.client_id,
token=token,
nonce=None,
at_hash=token.at_hash,
request=self.request,
scope=token.scope,
)
else:
id_token_dic = {}
token.id_token = id_token_dic
# Store the token.
token.save()
# Forget the old token.
self.token.delete()
access_token = access_token_format(
token=token,
user=self.token.user,
client=self.client,
request=self.request)
dic = {
'access_token': access_token,
'refresh_token': token.refresh_token,
'token_type': 'bearer',
'expires_in': settings.get('OIDC_TOKEN_EXPIRE'),
'id_token': encode_jwt(id_token_dic, self.token.client),
}
return dic
def create_access_token_response_dic(self):
# See https://tools.ietf.org/html/rfc6749#section-4.3
token = create_token(
self.user,
self.client,
self.params['scope'].split(' '))
id_token_dic = create_id_token(
token=token,
user=self.user,
aud=self.client.client_id,
nonce='self.code.nonce',
at_hash=token.at_hash,
request=self.request,
scope=token.scope,
)
token.id_token = id_token_dic
token.save()
access_token = access_token_format(
token=token,
user=self.user,
client=self.client,
request=self.request)
return {
'access_token': access_token,
'refresh_token': token.refresh_token,
'expires_in': settings.get('OIDC_TOKEN_EXPIRE'),
'token_type': 'bearer',
'id_token': encode_jwt(id_token_dic, token.client),
}
def create_client_credentials_response_dic(self):
# See https://tools.ietf.org/html/rfc6749#section-4.4.3
token = create_token(
user=None,
client=self.client,
scope=self.client._scope.split())
token.save()
access_token = access_token_format(
token=token,
client=self.client,
request=self.request)
return {
'access_token': access_token,
'expires_in': settings.get('OIDC_TOKEN_EXPIRE'),
'token_type': 'bearer',
'scope': self.client._scope,
}
@classmethod
def response(cls, dic, status=200):
"""
Create and return a response object.
"""
response = JsonResponse(dic, status=status)
response['Cache-Control'] = 'no-store'
response['Pragma'] = 'no-cache'
return response
| 34.215434 | 97 | 0.575228 | import inspect
from base64 import urlsafe_b64encode
import hashlib
import logging
from django.contrib.auth import authenticate
from django.http import JsonResponse
from oidc_provider.lib.errors import (
TokenError,
UserAuthError,
)
from oidc_provider.lib.utils.oauth2 import extract_client_auth
from oidc_provider.lib.utils.token import (
create_id_token,
create_token,
encode_jwt,
access_token_format,
)
from oidc_provider.models import (
Client,
Code,
Token,
)
from oidc_provider import settings
logger = logging.getLogger(__name__)
class TokenEndpoint(object):
def __init__(self, request):
self.request = request
self.params = {}
self.user = None
self._extract_params()
def _extract_params(self):
client_id, client_secret = extract_client_auth(self.request)
self.params['client_id'] = client_id
self.params['client_secret'] = client_secret
self.params['redirect_uri'] = self.request.POST.get('redirect_uri', '')
self.params['grant_type'] = self.request.POST.get('grant_type', '')
self.params['code'] = self.request.POST.get('code', '')
self.params['state'] = self.request.POST.get('state', '')
self.params['scope'] = self.request.POST.get('scope', '')
self.params['refresh_token'] = self.request.POST.get('refresh_token', '')
self.params['code_verifier'] = self.request.POST.get('code_verifier')
self.params['username'] = self.request.POST.get('username', '')
self.params['password'] = self.request.POST.get('password', '')
def validate_params(self):
try:
self.client = Client.objects.get(client_id=self.params['client_id'])
except Client.DoesNotExist:
logger.debug('[Token] Client does not exist: %s', self.params['client_id'])
raise TokenError('invalid_client')
if self.client.client_type == 'confidential':
if not (self.client.client_secret == self.params['client_secret']):
logger.debug('[Token] Invalid client secret: client %s do not have secret %s',
self.client.client_id, self.client.client_secret)
raise TokenError('invalid_client')
if self.params['grant_type'] == 'authorization_code':
if not (self.params['redirect_uri'] in self.client.redirect_uris):
logger.debug('[Token] Invalid redirect uri: %s', self.params['redirect_uri'])
raise TokenError('invalid_client')
try:
self.code = Code.objects.get(code=self.params['code'])
except Code.DoesNotExist:
logger.debug('[Token] Code does not exist: %s', self.params['code'])
raise TokenError('invalid_grant')
if not (self.code.client == self.client) \
or self.code.has_expired():
logger.debug('[Token] Invalid code: invalid client or code has expired')
raise TokenError('invalid_grant')
if self.params['code_verifier']:
if self.code.code_challenge_method == 'S256':
new_code_challenge = urlsafe_b64encode(
hashlib.sha256(self.params['code_verifier'].encode('ascii')).digest()
).decode('utf-8').replace('=', '')
else:
new_code_challenge = self.params['code_verifier']
if not (new_code_challenge == self.code.code_challenge):
raise TokenError('invalid_grant')
elif self.params['grant_type'] == 'password':
if not settings.get('OIDC_GRANT_TYPE_PASSWORD_ENABLE'):
raise TokenError('unsupported_grant_type')
auth_args = (self.request,)
try:
inspect.getcallargs(authenticate, *auth_args)
except TypeError:
auth_args = ()
user = authenticate(
*auth_args,
username=self.params['username'],
password=self.params['password']
)
if not user:
raise UserAuthError()
self.user = user
elif self.params['grant_type'] == 'refresh_token':
if not self.params['refresh_token']:
logger.debug('[Token] Missing refresh token')
raise TokenError('invalid_grant')
try:
self.token = Token.objects.get(refresh_token=self.params['refresh_token'],
client=self.client)
except Token.DoesNotExist:
logger.debug(
'[Token] Refresh token does not exist: %s', self.params['refresh_token'])
raise TokenError('invalid_grant')
elif self.params['grant_type'] == 'client_credentials':
if not self.client._scope:
logger.debug('[Token] Client using client credentials with empty scope')
raise TokenError('invalid_scope')
else:
logger.debug('[Token] Invalid grant type: %s', self.params['grant_type'])
raise TokenError('unsupported_grant_type')
def create_response_dic(self):
if self.params['grant_type'] == 'authorization_code':
return self.create_code_response_dic()
elif self.params['grant_type'] == 'refresh_token':
return self.create_refresh_response_dic()
elif self.params['grant_type'] == 'password':
return self.create_access_token_response_dic()
elif self.params['grant_type'] == 'client_credentials':
return self.create_client_credentials_response_dic()
def create_code_response_dic(self):
ken = create_token(
user=self.code.user,
client=self.code.client,
scope=self.code.scope)
if self.code.is_authentication:
id_token_dic = create_id_token(
user=self.code.user,
aud=self.client.client_id,
token=token,
nonce=self.code.nonce,
at_hash=token.at_hash,
request=self.request,
scope=token.scope,
)
else:
id_token_dic = {}
token.id_token = id_token_dic
token.save()
self.code.delete()
access_token = access_token_format(
token=token,
user=self.code.user,
client=self.client,
request=self.request)
dic = {
'access_token': access_token,
'refresh_token': token.refresh_token,
'token_type': 'bearer',
'expires_in': settings.get('OIDC_TOKEN_EXPIRE'),
'id_token': encode_jwt(id_token_dic, token.client),
}
return dic
def create_refresh_response_dic(self):
# See https://tools.ietf.org/html/rfc6749#section-6
scope_param = self.params['scope']
scope = (scope_param.split(' ') if scope_param else self.token.scope)
unauthorized_scopes = set(scope) - set(self.token.scope)
if unauthorized_scopes:
raise TokenError('invalid_scope')
token = create_token(
user=self.token.user,
client=self.token.client,
scope=scope)
# If the Token has an id_token it's an Authentication request.
if self.token.id_token:
id_token_dic = create_id_token(
user=self.token.user,
aud=self.client.client_id,
token=token,
nonce=None,
at_hash=token.at_hash,
request=self.request,
scope=token.scope,
)
else:
id_token_dic = {}
token.id_token = id_token_dic
token.save()
self.token.delete()
access_token = access_token_format(
token=token,
user=self.token.user,
client=self.client,
request=self.request)
dic = {
'access_token': access_token,
'refresh_token': token.refresh_token,
'token_type': 'bearer',
'expires_in': settings.get('OIDC_TOKEN_EXPIRE'),
'id_token': encode_jwt(id_token_dic, self.token.client),
}
return dic
def create_access_token_response_dic(self):
ken = create_token(
self.user,
self.client,
self.params['scope'].split(' '))
id_token_dic = create_id_token(
token=token,
user=self.user,
aud=self.client.client_id,
nonce='self.code.nonce',
at_hash=token.at_hash,
request=self.request,
scope=token.scope,
)
token.id_token = id_token_dic
token.save()
access_token = access_token_format(
token=token,
user=self.user,
client=self.client,
request=self.request)
return {
'access_token': access_token,
'refresh_token': token.refresh_token,
'expires_in': settings.get('OIDC_TOKEN_EXPIRE'),
'token_type': 'bearer',
'id_token': encode_jwt(id_token_dic, token.client),
}
def create_client_credentials_response_dic(self):
n = create_token(
user=None,
client=self.client,
scope=self.client._scope.split())
token.save()
access_token = access_token_format(
token=token,
client=self.client,
request=self.request)
return {
'access_token': access_token,
'expires_in': settings.get('OIDC_TOKEN_EXPIRE'),
'token_type': 'bearer',
'scope': self.client._scope,
}
@classmethod
def response(cls, dic, status=200):
response = JsonResponse(dic, status=status)
response['Cache-Control'] = 'no-store'
response['Pragma'] = 'no-cache'
return response
| true | true |
1c2d84546cc93c9b22053ab15edd6dcb7459cd86 | 4,274 | py | Python | dsgn/utils/torch_utils.py | joshliu11/DSGN | ac693e748ff3a7372b1292c2b7b3796854072030 | [
"MIT"
] | 166 | 2020-04-20T09:30:54.000Z | 2021-05-16T07:42:15.000Z | dsgn/utils/torch_utils.py | joshliu11/DSGN | ac693e748ff3a7372b1292c2b7b3796854072030 | [
"MIT"
] | 15 | 2020-05-12T23:58:01.000Z | 2021-05-05T12:03:51.000Z | dsgn/utils/torch_utils.py | joshliu11/DSGN | ac693e748ff3a7372b1292c2b7b3796854072030 | [
"MIT"
] | 35 | 2020-04-27T13:11:42.000Z | 2021-05-16T07:45:02.000Z | import numpy as np
import torch
def project_image_to_rect(uv_depth, P):
# uv_depth (3, N)
c_u = P[0,2]
c_v = P[1,2]
f_u = P[0,0]
f_v = P[1,1]
b_x = P[0,3]/(-f_u) # relative
b_y = P[1,3]/(-f_v)
# use camera coordinate
n = uv_depth.shape[1]
x = ((uv_depth[0]-c_u)*uv_depth[2])/f_u + b_x
y = ((uv_depth[1]-c_v)*uv_depth[2])/f_v + b_y
return torch.stack([x, y, uv_depth[2]], dim=0)
def project_disp_to_depth_new(points_cam, Proj):
xs, ys, disp = points_cam[0:1], points_cam[1:2], points_cam[2:3]
_, h, w, d = disp.shape
baseline = 0.54
mask = disp > 0
depth = Proj[0,0] * baseline / (disp + 1. - mask.float())
points = torch.cat([xs, ys, depth], dim=0)
points = points.reshape((3, -1))
# camera coordinate
cloud = project_image_to_rect(points, Proj)
cloud = cloud.reshape(3, h, w, d)
return cloud
def project_rect_to_image(pts_3d_rect, P):
n = pts_3d_rect.shape[0]
ones = torch.ones((n,1))
if pts_3d_rect.is_cuda:
ones = ones.cuda()
pts_3d_rect = torch.cat([pts_3d_rect, ones], dim=1)
pts_2d = torch.mm(pts_3d_rect, torch.transpose(P, 0, 1)) # nx3
pts_2d[:,0] /= pts_2d[:,2]
pts_2d[:,1] /= pts_2d[:,2]
return pts_2d[:,0:2]
# def compute_locations(h, w, stride, device):
# shifts_x = torch.arange(
# 0, w * stride, step=stride,
# dtype=torch.float32, device=device
# )
# shifts_y = torch.arange(
# 0, h * stride, step=stride,
# dtype=torch.float32, device=device
# )
# shift_y, shift_x = torch.meshgrid(shifts_y, shifts_x)
# shift_x = shift_x.reshape(-1)
# shift_y = shift_y.reshape(-1)
# locations = torch.stack((shift_x, shift_y), dim=1) + stride // 2
# return locations
# def compute_locations_3d(h, w, stride, device):
# shifts_x = torch.arange(
# 0, w * stride, step=stride,
# dtype=torch.float32, device=device
# )
# shifts_y = torch.arange(
# 0, h * stride, step=stride,
# dtype=torch.float32, device=device
# )
# shift_y, shift_x = torch.meshgrid(shifts_y, shifts_x)
# shift_x = shift_x.reshape(-1)
# shift_y = shift_y.reshape(-1)
# locations = torch.stack((shift_x, shift_y), dim=1) + stride // 2
# return locations
def compute_locations_bev(Z_MIN, Z_MAX, VOXEL_Z_SIZE, X_MIN, X_MAX, VOXEL_X_SIZE, device):
shifts_z = torch.arange(Z_MIN, Z_MAX - np.sign(VOXEL_Z_SIZE) * 1e-10, step=VOXEL_Z_SIZE,
dtype=torch.float32).to(device) + VOXEL_Z_SIZE / 2.
shifts_x = torch.arange(X_MIN, X_MAX - np.sign(VOXEL_X_SIZE) * 1e-10, step=VOXEL_X_SIZE,
dtype=torch.float32).to(device) + VOXEL_X_SIZE / 2.
shifts_z, shifts_x = torch.meshgrid(shifts_z, shifts_x)
locations_bev = torch.stack([shifts_x, shifts_z], dim=-1)
locations_bev = locations_bev.reshape(-1, 2)
return locations_bev
def compute_centerness_targets(reg_targets):
left_right = reg_targets[:, [0, 2]]
top_bottom = reg_targets[:, [1, 3]]
centerness = (left_right.min(dim=-1)[0] / left_right.max(dim=-1)[0]) * \
(top_bottom.min(dim=-1)[0] / top_bottom.max(dim=-1)[0])
return torch.sqrt(centerness)
# def compute_corners_R(dimensions, rot):
# num_boxes = dimensions.shape[0]
# h, w, l = torch.split( dimensions.view(num_boxes, 1, 3), [1, 1, 1], dim=2)
# # zeros = torch.zeros((num_boxes, 1, 1), dtype=torch.float32).cuda()
# corners = torch.cat([torch.cat([l/2, l/2, -l/2, -l/2, l/2, l/2, -l/2, -l/2], dim=2),
# torch.cat([h/2, h/2, h/2, h/2, -h/2, -h/2, -h/2, -h/2], dim=2),
# torch.cat([w/2, -w/2, -w/2, w/2, w/2, -w/2, -w/2, w/2], dim=2)], dim=1)
# corners = torch.matmul(rot, corners)
# return corners
# def angle_to_bevrotation(sin, cos):
# zeros = torch.zeros_like(sin)
# rot_1 = torch.stack([cos, zeros, -sin], dim=1)
# rot_2 = torch.stack([zeros, zeros+1., zeros], dim=1)
# rot_3 = torch.stack([sin, zeros, cos], dim=1)
# rot = torch.stack([rot_1, rot_2, rot_3], dim=1)
# return rot
def convert_to_viewpoint_torch(ry, z, x):
return ry + torch.atan2(z, x) - np.pi / 2
def convert_to_ry_torch(alpha, z, x):
return alpha - torch.atan2(z, x) + np.pi / 2
| 36.529915 | 98 | 0.606458 | import numpy as np
import torch
def project_image_to_rect(uv_depth, P):
c_u = P[0,2]
c_v = P[1,2]
f_u = P[0,0]
f_v = P[1,1]
b_x = P[0,3]/(-f_u)
b_y = P[1,3]/(-f_v)
n = uv_depth.shape[1]
x = ((uv_depth[0]-c_u)*uv_depth[2])/f_u + b_x
y = ((uv_depth[1]-c_v)*uv_depth[2])/f_v + b_y
return torch.stack([x, y, uv_depth[2]], dim=0)
def project_disp_to_depth_new(points_cam, Proj):
xs, ys, disp = points_cam[0:1], points_cam[1:2], points_cam[2:3]
_, h, w, d = disp.shape
baseline = 0.54
mask = disp > 0
depth = Proj[0,0] * baseline / (disp + 1. - mask.float())
points = torch.cat([xs, ys, depth], dim=0)
points = points.reshape((3, -1))
cloud = project_image_to_rect(points, Proj)
cloud = cloud.reshape(3, h, w, d)
return cloud
def project_rect_to_image(pts_3d_rect, P):
n = pts_3d_rect.shape[0]
ones = torch.ones((n,1))
if pts_3d_rect.is_cuda:
ones = ones.cuda()
pts_3d_rect = torch.cat([pts_3d_rect, ones], dim=1)
pts_2d = torch.mm(pts_3d_rect, torch.transpose(P, 0, 1))
pts_2d[:,0] /= pts_2d[:,2]
pts_2d[:,1] /= pts_2d[:,2]
return pts_2d[:,0:2]
def compute_locations_bev(Z_MIN, Z_MAX, VOXEL_Z_SIZE, X_MIN, X_MAX, VOXEL_X_SIZE, device):
shifts_z = torch.arange(Z_MIN, Z_MAX - np.sign(VOXEL_Z_SIZE) * 1e-10, step=VOXEL_Z_SIZE,
dtype=torch.float32).to(device) + VOXEL_Z_SIZE / 2.
shifts_x = torch.arange(X_MIN, X_MAX - np.sign(VOXEL_X_SIZE) * 1e-10, step=VOXEL_X_SIZE,
dtype=torch.float32).to(device) + VOXEL_X_SIZE / 2.
shifts_z, shifts_x = torch.meshgrid(shifts_z, shifts_x)
locations_bev = torch.stack([shifts_x, shifts_z], dim=-1)
locations_bev = locations_bev.reshape(-1, 2)
return locations_bev
def compute_centerness_targets(reg_targets):
left_right = reg_targets[:, [0, 2]]
top_bottom = reg_targets[:, [1, 3]]
centerness = (left_right.min(dim=-1)[0] / left_right.max(dim=-1)[0]) * \
(top_bottom.min(dim=-1)[0] / top_bottom.max(dim=-1)[0])
return torch.sqrt(centerness)
ry + torch.atan2(z, x) - np.pi / 2
def convert_to_ry_torch(alpha, z, x):
return alpha - torch.atan2(z, x) + np.pi / 2
| true | true |
1c2d84c0446e87ba738145f6ff8987981568c650 | 73 | py | Python | launcher.py | heffer-dev/proxy-client | df51852c7847771ee84637028280743acdd133a4 | [
"MIT"
] | null | null | null | launcher.py | heffer-dev/proxy-client | df51852c7847771ee84637028280743acdd133a4 | [
"MIT"
] | null | null | null | launcher.py | heffer-dev/proxy-client | df51852c7847771ee84637028280743acdd133a4 | [
"MIT"
] | null | null | null | from lib.bot import bot
VERSION = "0.1.1"
bot.run(VERSION)
| 8.111111 | 25 | 0.575342 | from lib.bot import bot
VERSION = "0.1.1"
bot.run(VERSION)
| true | true |
1c2d84c8511a592ba1dd911ec6bd397892a05851 | 521 | py | Python | myGym/envs/__init__.py | gabinsane/myGym | a41c6b11a47eaf19d0c69e67aeb48cf7a999d45a | [
"MIT"
] | 1 | 2021-04-23T20:52:39.000Z | 2021-04-23T20:52:39.000Z | myGym/envs/__init__.py | gabinsane/myGym | a41c6b11a47eaf19d0c69e67aeb48cf7a999d45a | [
"MIT"
] | null | null | null | myGym/envs/__init__.py | gabinsane/myGym | a41c6b11a47eaf19d0c69e67aeb48cf7a999d45a | [
"MIT"
] | 1 | 2021-01-22T16:46:48.000Z | 2021-01-22T16:46:48.000Z | from gym.envs.registration import register
register(
id="CrowWorkspaceEnv-v0",
entry_point="myGym.envs.crow_workspace_env:CrowWorkspaceEnv",
max_episode_steps=8192,
)
register(
id="Gym-v0",
entry_point="myGym.envs.gym_env:GymEnv",
max_episode_steps=8192,
)
register(
id="HackEnv-v0",
entry_point="myGym.envs.hack_env:HackEnv",
max_episode_steps=8192,
)
register(
id="ObjectTestEnv-v0",
entry_point="myGym.envs.object_test_env:ObjectTestEnv",
max_episode_steps=25600,
)
| 19.296296 | 65 | 0.727447 | from gym.envs.registration import register
register(
id="CrowWorkspaceEnv-v0",
entry_point="myGym.envs.crow_workspace_env:CrowWorkspaceEnv",
max_episode_steps=8192,
)
register(
id="Gym-v0",
entry_point="myGym.envs.gym_env:GymEnv",
max_episode_steps=8192,
)
register(
id="HackEnv-v0",
entry_point="myGym.envs.hack_env:HackEnv",
max_episode_steps=8192,
)
register(
id="ObjectTestEnv-v0",
entry_point="myGym.envs.object_test_env:ObjectTestEnv",
max_episode_steps=25600,
)
| true | true |
1c2d86f9ab621946794bc71ef7bd912fedb02281 | 1,606 | py | Python | kolibri/content/utils/annotation.py | rtibbles/kolibri | 7efdf0497738c793f281013f9913f8ecc1a55f10 | [
"MIT"
] | null | null | null | kolibri/content/utils/annotation.py | rtibbles/kolibri | 7efdf0497738c793f281013f9913f8ecc1a55f10 | [
"MIT"
] | 7 | 2016-06-23T16:01:02.000Z | 2018-12-01T22:15:13.000Z | kolibri/content/utils/annotation.py | rtibbles/kolibri | 7efdf0497738c793f281013f9913f8ecc1a55f10 | [
"MIT"
] | 1 | 2021-06-01T23:15:26.000Z | 2021-06-01T23:15:26.000Z | from kolibri.utils.time import local_now
from django.conf import settings
from ..content_db_router import using_content_database
from ..models import ChannelMetadata, ChannelMetadataCache
from .channels import get_channel_ids_for_content_database_dir
def update_channel_metadata_cache():
"""
After a channel is imported, or when the devserver is started,
scan through the settings.CONTENT_DATABASE_DIR folder for all channel content databases,
and pull the data from each database's ChannelMetadata object to update the ChannelMetadataCache
object in the default database to ensure they are in sync.
"""
db_names = get_channel_ids_for_content_database_dir(settings.CONTENT_DATABASE_DIR)
# Delete ChannelMetadataCache objects in default db that are not found in CONTENT_DATABASE_DIR
ChannelMetadataCache.objects.exclude(id__in=db_names).delete()
# sync the ChannelMetadataCache objects in default db with ChannelMetadata objects in CONTENT_DATABASE_DIR
for db_name in db_names:
with using_content_database(db_name):
update_values = ChannelMetadata.objects.values()[0]
ch_metadata_obj, _ = ChannelMetadataCache.objects.update_or_create(
id=db_name,
defaults=update_values,
)
# Records a new last_updated only if channel is brand new. Does not
# handle case where channel's version is upgraded, which is not
# yet supported on UI anyway
if ch_metadata_obj.last_updated is None:
ch_metadata_obj.last_updated = local_now()
ch_metadata_obj.save()
| 45.885714 | 110 | 0.755293 | from kolibri.utils.time import local_now
from django.conf import settings
from ..content_db_router import using_content_database
from ..models import ChannelMetadata, ChannelMetadataCache
from .channels import get_channel_ids_for_content_database_dir
def update_channel_metadata_cache():
db_names = get_channel_ids_for_content_database_dir(settings.CONTENT_DATABASE_DIR)
ChannelMetadataCache.objects.exclude(id__in=db_names).delete()
for db_name in db_names:
with using_content_database(db_name):
update_values = ChannelMetadata.objects.values()[0]
ch_metadata_obj, _ = ChannelMetadataCache.objects.update_or_create(
id=db_name,
defaults=update_values,
)
# yet supported on UI anyway
if ch_metadata_obj.last_updated is None:
ch_metadata_obj.last_updated = local_now()
ch_metadata_obj.save()
| true | true |
1c2d87445e38339928f3b6ae5fffb73af6f03885 | 7,049 | py | Python | jack/readers/knowledge_base_population/models.py | mitchelljeff/hack1 | 990d873cbcd40d2978f44560016d18a76800908e | [
"MIT"
] | 1 | 2018-10-23T12:07:31.000Z | 2018-10-23T12:07:31.000Z | jack/readers/knowledge_base_population/models.py | mitchelljeff/hack1 | 990d873cbcd40d2978f44560016d18a76800908e | [
"MIT"
] | null | null | null | jack/readers/knowledge_base_population/models.py | mitchelljeff/hack1 | 990d873cbcd40d2978f44560016d18a76800908e | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from jack.core import *
from jack.core.data_structures import *
from jack.readers.knowledge_base_population.shared import KBPPorts
from jack.util.map import numpify
class KnowledgeGraphEmbeddingInputModule(OnlineInputModule[List[List[int]]]):
def __init__(self, shared_resources):
self.shared_resources = shared_resources
def setup_from_data(self, data: Iterable[Tuple[QASetting, List[Answer]]]):
self.triples = [x[0].question.split() for x in data]
self.entity_set = {s for [s, _, _] in self.triples} | {o for [_, _, o] in self.triples}
self.predicate_set = {p for [_, p, _] in self.triples}
self.entity_to_index = {entity: index for index, entity in enumerate(self.entity_set)}
self.predicate_to_index = {predicate: index for index, predicate in enumerate(self.predicate_set)}
self.shared_resources.config['entity_to_index'] = self.entity_to_index
self.shared_resources.config['predicate_to_index'] = self.predicate_to_index
return self.shared_resources
@property
def training_ports(self) -> List[TensorPort]:
return []
def preprocess(self, questions: List[QASetting],
answers: Optional[List[List[Answer]]] = None,
is_eval: bool = False) -> List[List[int]]:
"""Converts questions to triples."""
triples = []
for qa_setting in questions:
s, p, o = qa_setting.question.split()
s_idx, o_idx = self.entity_to_index[s], self.entity_to_index[o]
p_idx = self.predicate_to_index[p]
triples.append([s_idx, p_idx, o_idx])
return triples
def create_batch(self, triples: List[List[int]],
is_eval: bool, with_answers: bool) -> Mapping[TensorPort, np.ndarray]:
batch_size = len(triples)
xy_dict = {
Ports.Input.multiple_support: [0] * batch_size,
Ports.Input.question: triples,
Ports.Input.atomic_candidates: [0] * batch_size
}
return numpify(xy_dict)
@property
def output_ports(self) -> List[TensorPort]:
return [Ports.Input.question]
class KnowledgeGraphEmbeddingModelModule(TFModelModule):
def __init__(self, *args, model_name='DistMult', **kwargs):
super().__init__(*args, **kwargs)
self.model_name = model_name
@property
def input_ports(self) -> List[TensorPort]:
return [Ports.Input.question]
@property
def output_ports(self) -> List[TensorPort]:
return [KBPPorts.triple_logits]
@property
def training_input_ports(self) -> List[TensorPort]:
return [Ports.Input.question, KBPPorts.triple_logits]
@property
def training_output_ports(self) -> List[TensorPort]:
return [Ports.loss, Ports.Prediction.logits]
def create_training_output(self, shared_resources: SharedResources,
question: tf.Tensor, logits: tf.Tensor) -> Sequence[tf.Tensor]:
positive_labels = tf.ones_like(logits)
nb_entities = len(self.entity_to_index)
random_subject_indices = tf.random_uniform(shape=(tf.shape(question)[0], 1),
minval=0, maxval=nb_entities, dtype=tf.int32)
random_object_indices = tf.random_uniform(shape=(tf.shape(question)[0], 1),
minval=0, maxval=nb_entities, dtype=tf.int32)
# question_corrupted_subjects[:, 0].assign(random_indices)
question_corrupted_subjects = tf.concat(values=[random_subject_indices, question[:, 1:]], axis=1)
question_corrupted_objects = tf.concat(values=[question[:, :2], random_object_indices], axis=1)
negative_subject_logits = self.forward_pass(shared_resources, question_corrupted_subjects)
negative_object_logits = self.forward_pass(shared_resources, question_corrupted_objects)
logits = tf.concat(values=[logits, negative_subject_logits, negative_object_logits], axis=0)
negative_labels = tf.zeros_like(positive_labels)
labels = tf.concat(values=[positive_labels, negative_labels, negative_labels], axis=0)
losses = tf.nn.sigmoid_cross_entropy_with_logits(logits=logits, labels=labels)
loss = tf.reduce_mean(losses, axis=0)
return loss, logits
def create_output(self, shared_resources: SharedResources, question: tf.Tensor) -> Sequence[tf.Tensor]:
with tf.variable_scope('knowledge_graph_embedding'):
self.embedding_size = shared_resources.config['repr_dim']
self.entity_to_index = shared_resources.config['entity_to_index']
self.predicate_to_index = shared_resources.config['predicate_to_index']
nb_entities = len(self.entity_to_index)
nb_predicates = len(self.predicate_to_index)
self.entity_embeddings = tf.get_variable('entity_embeddings',
[nb_entities, self.embedding_size],
initializer=tf.contrib.layers.xavier_initializer(),
dtype='float32')
self.predicate_embeddings = tf.get_variable('predicate_embeddings',
[nb_predicates, self.embedding_size],
initializer=tf.contrib.layers.xavier_initializer(),
dtype='float32')
logits = self.forward_pass(shared_resources, question)
return logits,
def forward_pass(self, shared_resources, question):
subject_idx = question[:, 0]
predicate_idx = question[:, 1]
object_idx = question[:, 2]
subject_emb = tf.nn.embedding_lookup(self.entity_embeddings, subject_idx, max_norm=1.0)
predicate_emb = tf.nn.embedding_lookup(self.predicate_embeddings, predicate_idx)
object_emb = tf.nn.embedding_lookup(self.entity_embeddings, object_idx, max_norm=1.0)
from jack.readers.knowledge_base_population import scores
assert self.model_name is not None
model_class = scores.get_function(self.model_name)
model = model_class(
subject_embeddings=subject_emb,
predicate_embeddings=predicate_emb,
object_embeddings=object_emb)
return model()
class KnowledgeGraphEmbeddingOutputModule(OutputModule):
def setup(self):
pass
@property
def input_ports(self) -> List[TensorPort]:
return [KBPPorts.triple_logits]
def __call__(self, inputs: Sequence[QASetting], logits: np.ndarray) -> Sequence[Answer]:
# len(inputs) == batch size
# logits: [batch_size, max_num_candidates]
results = []
for index_in_batch, question in enumerate(inputs):
score = logits[index_in_batch]
results.append(Answer(None, score=score))
return results
| 42.721212 | 107 | 0.643496 |
from jack.core import *
from jack.core.data_structures import *
from jack.readers.knowledge_base_population.shared import KBPPorts
from jack.util.map import numpify
class KnowledgeGraphEmbeddingInputModule(OnlineInputModule[List[List[int]]]):
def __init__(self, shared_resources):
self.shared_resources = shared_resources
def setup_from_data(self, data: Iterable[Tuple[QASetting, List[Answer]]]):
self.triples = [x[0].question.split() for x in data]
self.entity_set = {s for [s, _, _] in self.triples} | {o for [_, _, o] in self.triples}
self.predicate_set = {p for [_, p, _] in self.triples}
self.entity_to_index = {entity: index for index, entity in enumerate(self.entity_set)}
self.predicate_to_index = {predicate: index for index, predicate in enumerate(self.predicate_set)}
self.shared_resources.config['entity_to_index'] = self.entity_to_index
self.shared_resources.config['predicate_to_index'] = self.predicate_to_index
return self.shared_resources
@property
def training_ports(self) -> List[TensorPort]:
return []
def preprocess(self, questions: List[QASetting],
answers: Optional[List[List[Answer]]] = None,
is_eval: bool = False) -> List[List[int]]:
triples = []
for qa_setting in questions:
s, p, o = qa_setting.question.split()
s_idx, o_idx = self.entity_to_index[s], self.entity_to_index[o]
p_idx = self.predicate_to_index[p]
triples.append([s_idx, p_idx, o_idx])
return triples
def create_batch(self, triples: List[List[int]],
is_eval: bool, with_answers: bool) -> Mapping[TensorPort, np.ndarray]:
batch_size = len(triples)
xy_dict = {
Ports.Input.multiple_support: [0] * batch_size,
Ports.Input.question: triples,
Ports.Input.atomic_candidates: [0] * batch_size
}
return numpify(xy_dict)
@property
def output_ports(self) -> List[TensorPort]:
return [Ports.Input.question]
class KnowledgeGraphEmbeddingModelModule(TFModelModule):
def __init__(self, *args, model_name='DistMult', **kwargs):
super().__init__(*args, **kwargs)
self.model_name = model_name
@property
def input_ports(self) -> List[TensorPort]:
return [Ports.Input.question]
@property
def output_ports(self) -> List[TensorPort]:
return [KBPPorts.triple_logits]
@property
def training_input_ports(self) -> List[TensorPort]:
return [Ports.Input.question, KBPPorts.triple_logits]
@property
def training_output_ports(self) -> List[TensorPort]:
return [Ports.loss, Ports.Prediction.logits]
def create_training_output(self, shared_resources: SharedResources,
question: tf.Tensor, logits: tf.Tensor) -> Sequence[tf.Tensor]:
positive_labels = tf.ones_like(logits)
nb_entities = len(self.entity_to_index)
random_subject_indices = tf.random_uniform(shape=(tf.shape(question)[0], 1),
minval=0, maxval=nb_entities, dtype=tf.int32)
random_object_indices = tf.random_uniform(shape=(tf.shape(question)[0], 1),
minval=0, maxval=nb_entities, dtype=tf.int32)
question_corrupted_subjects = tf.concat(values=[random_subject_indices, question[:, 1:]], axis=1)
question_corrupted_objects = tf.concat(values=[question[:, :2], random_object_indices], axis=1)
negative_subject_logits = self.forward_pass(shared_resources, question_corrupted_subjects)
negative_object_logits = self.forward_pass(shared_resources, question_corrupted_objects)
logits = tf.concat(values=[logits, negative_subject_logits, negative_object_logits], axis=0)
negative_labels = tf.zeros_like(positive_labels)
labels = tf.concat(values=[positive_labels, negative_labels, negative_labels], axis=0)
losses = tf.nn.sigmoid_cross_entropy_with_logits(logits=logits, labels=labels)
loss = tf.reduce_mean(losses, axis=0)
return loss, logits
def create_output(self, shared_resources: SharedResources, question: tf.Tensor) -> Sequence[tf.Tensor]:
with tf.variable_scope('knowledge_graph_embedding'):
self.embedding_size = shared_resources.config['repr_dim']
self.entity_to_index = shared_resources.config['entity_to_index']
self.predicate_to_index = shared_resources.config['predicate_to_index']
nb_entities = len(self.entity_to_index)
nb_predicates = len(self.predicate_to_index)
self.entity_embeddings = tf.get_variable('entity_embeddings',
[nb_entities, self.embedding_size],
initializer=tf.contrib.layers.xavier_initializer(),
dtype='float32')
self.predicate_embeddings = tf.get_variable('predicate_embeddings',
[nb_predicates, self.embedding_size],
initializer=tf.contrib.layers.xavier_initializer(),
dtype='float32')
logits = self.forward_pass(shared_resources, question)
return logits,
def forward_pass(self, shared_resources, question):
subject_idx = question[:, 0]
predicate_idx = question[:, 1]
object_idx = question[:, 2]
subject_emb = tf.nn.embedding_lookup(self.entity_embeddings, subject_idx, max_norm=1.0)
predicate_emb = tf.nn.embedding_lookup(self.predicate_embeddings, predicate_idx)
object_emb = tf.nn.embedding_lookup(self.entity_embeddings, object_idx, max_norm=1.0)
from jack.readers.knowledge_base_population import scores
assert self.model_name is not None
model_class = scores.get_function(self.model_name)
model = model_class(
subject_embeddings=subject_emb,
predicate_embeddings=predicate_emb,
object_embeddings=object_emb)
return model()
class KnowledgeGraphEmbeddingOutputModule(OutputModule):
def setup(self):
pass
@property
def input_ports(self) -> List[TensorPort]:
return [KBPPorts.triple_logits]
def __call__(self, inputs: Sequence[QASetting], logits: np.ndarray) -> Sequence[Answer]:
results = []
for index_in_batch, question in enumerate(inputs):
score = logits[index_in_batch]
results.append(Answer(None, score=score))
return results
| true | true |
1c2d8797181f7057d1804ac8c677cac32af39435 | 13,762 | py | Python | ckine/figures/figureC12.py | meyer-lab/type-I-ckine-model | fb2db21f1c476d79467e2bf22e1fdc2cdd6c47a3 | [
"MIT"
] | null | null | null | ckine/figures/figureC12.py | meyer-lab/type-I-ckine-model | fb2db21f1c476d79467e2bf22e1fdc2cdd6c47a3 | [
"MIT"
] | 6 | 2021-02-01T23:47:16.000Z | 2021-04-28T19:56:17.000Z | ckine/figures/figureC12.py | meyer-lab/gc-valent | bc0451610655633483a98ab450d20ef631479d2b | [
"MIT"
] | null | null | null | import os
import matplotlib.lines as mlines
import pandas as pds
import numpy as np
from .figureCommon import subplotLabel, getSetup
from ..flow import importF
from ..PCA import sampleT
from ..flow import gating, count_data
from ..FCimports import compMatrix, applyMatrix
path_here = os.path.dirname(os.path.dirname(__file__))
def makeFigure():
"""Get a list of the axis objects and create a figure"""
# Get list of axis objects
ax, f = getSetup((10, 10), (4, 4))
subplotLabel(ax)
StatMV()
# global_legend(ax[7])
return f
def global_legend(ax):
""" Create legend for Inverse and Standard Gini """
blue = mlines.Line2D([], [], color='navy', marker='o', linestyle='None', markersize=6, label='Gini Coeff')
orange = mlines.Line2D([], [], color='darkorange', marker='o', linestyle='None', markersize=6, label='Inverse Gini Coeff')
ax.legend(handles=[orange, blue], bbox_to_anchor=(0, 1), loc="upper left")
def StatMV():
"""
Calculate mean and variance of a sample in a pandas dataframe, and plot.
"""
dataFiles = ["/data/flow/2019-03-19 IL-2 and IL-15 treated pSTAT5 assay - Lymphocyte gated - Treg plate.zip",
"/data/flow/2019-03-27 IL-2 and IL-15 treated pSTAT5 assay - Lymphocyte gated - Treg plate.zip",
"/data/flow/2019-04-18 IL-2 and IL-15 treated pSTAT5 assay - Lymphocyte gated - Treg plate - NEW PBMC LOT/",
"/data/flow/2019-03-15 IL-2 and IL-15 treated pSTAT5 assay - Lymphocyte gated - NK plate.zip",
"/data/flow/2019-03-27 IL-2 and IL-15 treated pSTAT5 assay - Lymphocyte gated - NK plate.zip",
"/data/flow/2019-04-18 IL-2 and IL-15 treated pSTAT5 assay - Lymphocyte gated - NK plate - NEW PBMC LOT.zip"]
dataFiles = ["/home/brianoj/Tplate15", "/home/brianoj/Tplate27", "/home/brianoj/Tplate418", "/home/brianoj/Nkplate15", "/home/brianoj/Nkplate27", "/home/brianoj/Nkplate418"]
dates = ["3/15/2019", "3/27/2019", "4/18/2019", "3/15/2019", "3/27/2019", "4/18/2019"]
rows = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H']
cellTypesT = ['treg', 'nonTreg']
TitlesT = ["Treg", "Thelper"]
masterMVdf = pds.DataFrame(columns={"Date", "Time", "Cell", "Ligand", "Dose", "Mean", "Bin", "NumCells"})
MVdf = pds.DataFrame(columns={"Date", "Time", "Cell", "Ligand", "Dose", "Mean", "Bin", "NumCells"})
alldata = []
dosemat = np.array([[84, 28, 9.333333, 3.111, 1.037037, 0.345679, 0.115226, 0.038409, 0.012803, 0.004268, 0.001423, 0.000474]])
repList = [0, 0, 0, 0, 0, 0]
numBins = 4
T_matrix = compMatrix("2019-11-08", "1", "A") # Create matrix 1
for i, filename in enumerate(dataFiles):
if i < 3:
Tcells = True
else:
Tcells = False
if Tcells:
statcol = "RL1-H"
IL2RaCol = "VL1-H"
for k, cell_type in enumerate(cellTypesT):
for j, row in enumerate(rows):
print(filename)
sample, _ = importF(filename, row)
if cell_type:
for jj, subSample in enumerate(sample):
sample[jj] = applyMatrix(subSample, T_matrix)
gates = gating(cell_type, dates[i], True, repList[i])
_, alldata = count_data(sample, gates, Tcells, True)
else:
for jj, samplejj in enumerate(sample):
_, pstat, _ = sampleT(samplejj)
alldata.append(pstat)
for ii, _ in enumerate(sample): # get pstat data and put it into list form
dat_array = alldata[ii]
stat_array = dat_array[[statcol]]
stat_array = stat_array.to_numpy()
stat_array = stat_array.clip(min=1) # remove small percentage of negative pstat values
IL2Ra_array = dat_array[[IL2RaCol]]
IL2Ra_array = IL2Ra_array.to_numpy()
IL2Ra_array = IL2Ra_array.clip(min=1)
IL2Ra_array = IL2Ra_array / 1.5
while np.amax(stat_array) > 100000:
IL2Ra_array = np.reshape(IL2Ra_array[stat_array != np.amax(stat_array)], (-1, 1)) # Remove random exploding value
stat_array = np.reshape(stat_array[stat_array != np.amax(stat_array)], (-1, 1)) # Remove random exploding value
bins = np.logspace(np.log10(np.percentile(IL2Ra_array, 5)), np.log10(np.percentile(IL2Ra_array, 95)), num=numBins)
for kk in range(0, bins.size - 1):
binDat = stat_array[(IL2Ra_array > bins[kk]) & (IL2Ra_array < bins[kk + 1])]
if stat_array.size == 0:
MVdf = MVdf.append(pds.DataFrame.from_dict({"Date": dates[i], "Time": timeFunc(row), "Cell": TitlesT[k], "Ligand": cytFunc(row), "Dose": dosemat[0, ii], "Mean": [0],
"Bin": [kk], "NumCells": 0, "Bivalent": [0]}))
else:
MVdf = MVdf.append(pds.DataFrame.from_dict({"Date": dates[i], "Time": timeFunc(row), "Cell": TitlesT[k], "Ligand": cytFunc(
row), "Dose": dosemat[0, ii], "Mean": np.mean(binDat), "Bin": [kk + 1], "NumCells": [binDat.size], "Bivalent": [0]}))
if j == 3 or j == 7:
MVdf['Mean'] = MVdf['Mean'] - MVdf.loc[(MVdf.Dose <= 0.001423)].Mean.mean()
masterMVdf = masterMVdf.append(MVdf)
MVdf = pds.DataFrame(columns={"Date", "Time", "Ligand", "Dose", "Mean", "Bin", "NumCells", "Bivalent"})
dataFiles = ["/home/brianoj/Muteins 060-062 T/2019-04-19 IL2-060 IL2-062 Treg plate",
"/home/brianoj/Muteins 088-097 T/2019-04-19 IL2-088 IL2-097 Treg plate",
"/home/brianoj/Muteins 060-088 T/2019-05-02 IL2-060 IL2-088 Treg plate",
"/home/brianoj/Muteins 062-097 T/2019-05-02 IL2-062 IL2-097 Treg plate",
"/home/brianoj/Muteins 060-062 Nk/2019-04-19 IL2-060 IL2-062 NK plate",
"/home/brianoj/Muteins 088-097 Nk/2019-04-19 IL2-088 IL2-097 NK plate",
"/home/brianoj/Muteins 060-088 Nk/2019-05-02 IL2-060 IL2-088 NK plate",
"/home/brianoj/Muteins 062-097 Nk/2019-05-02 IL2-062 IL2-097 NK plate"]
dates = ["4/19/2019", "4/19/2019", "5/2/2019", "5/2/2019", "4/19/2019", "4/19/2019", "5/2/2019", "5/2/2019"]
repList = [0, 1, 0, 1, 0, 1, 0, 1]
print("Starting Muteins")
for i, filename in enumerate(dataFiles):
if i < 4:
Tcells = True
else:
Tcells = False
if Tcells:
statcol = "RL1-H"
for k, cell_type in enumerate(cellTypesT):
for j, row in enumerate(rows):
print(filename)
sample, _ = importF(filename, row)
if cell_type:
for jj, subSample in enumerate(sample):
sample[jj] = applyMatrix(subSample, T_matrix)
gates = gating(cell_type, dates[i], True, repList[i])
_, alldata = count_data(sample, gates, Tcells, True)
else:
for jj, samplejj in enumerate(sample):
_, pstat, _ = sampleT(samplejj)
alldata.append(pstat)
for ii, _ in enumerate(sample): # get pstat data and put it into list form
dat_array = alldata[ii]
stat_array = dat_array[[statcol]]
stat_array = stat_array.to_numpy()
stat_array = stat_array.clip(min=1) # remove small percentage of negative pstat values
IL2Ra_array = dat_array[[IL2RaCol]]
IL2Ra_array = IL2Ra_array.to_numpy()
IL2Ra_array = IL2Ra_array.clip(min=1)
IL2Ra_array = IL2Ra_array / 1.5
while np.amax(stat_array) > 100000:
IL2Ra_array = np.reshape(IL2Ra_array[stat_array != np.amax(stat_array)], (-1, 1)) # Remove random exploding value
stat_array = np.reshape(stat_array[stat_array != np.amax(stat_array)], (-1, 1)) # Remove random exploding value
bins = np.logspace(np.log10(np.percentile(IL2Ra_array, 5)), np.log10(np.percentile(IL2Ra_array, 95)), num=numBins)
timelig = mutFunc(row, filename)
for kk in range(0, bins.size - 1):
binDat = stat_array[(IL2Ra_array > bins[kk]) & (IL2Ra_array < bins[kk + 1])]
if stat_array.size == 0:
MVdf = MVdf.append(pds.DataFrame.from_dict({"Date": dates[i], "Time": timelig[0], "Cell": TitlesT[k], "Ligand": timelig[1], "Dose": dosemat[0, ii], "Mean": [0],
"Bin": [kk], "NumCells": 0, "Bivalent": timelig[2]}))
else:
MVdf = MVdf.append(pds.DataFrame.from_dict({"Date": dates[i], "Time": timelig[0], "Cell": TitlesT[k], "Ligand": timelig[1],
"Dose": dosemat[0, ii], "Mean": np.mean(binDat), "Bin": [kk + 1], "NumCells": [binDat.size], "Bivalent": timelig[2]}))
if j == 3 or j == 7:
MVdf['Mean'] = MVdf['Mean'] - MVdf.loc[(MVdf.Dose <= 0.001423)].Mean.mean()
masterMVdf = masterMVdf.append(MVdf)
MVdf = pds.DataFrame(columns={"Date", "Time", "Ligand", "Dose", "Mean", "Bin", "NumCells", "Bivalent"})
masterMVdf.Mean = masterMVdf.Mean.clip(lower=0)
masterMVdf = masterMVdf.loc[masterMVdf.Ligand != "H16L N-term"]
masterMVdf.to_csv("WTDimericMutSingleCellDataBin.csv", index=False)
return MVdf
def timeFunc(letter):
if letter == "A" or letter == "E":
return 4.0
elif letter == "B" or letter == "F":
return 2.0
elif letter == "C" or letter == "G":
return 1.0
elif letter == "D" or letter == "H":
return 0.5
def cytFunc(letter):
if letter == "A" or letter == "B" or letter == "C" or letter == "D":
return "IL2"
elif letter == "E" or letter == "F" or letter == "G" or letter == "H":
return "IL15"
def mutFunc(letter, datafile):
if datafile == "/home/brianoj/Muteins 060-062 T/2019-04-19 IL2-060 IL2-062 Treg plate" or datafile == "/home/brianoj/Muteins 060-062 Nk/2019-04-19 IL2-060 IL2-062 NK plate":
if letter == "A":
return [4.0, "WT N-term", 1]
elif letter == "B":
return [4.0, "WT N-term", 1]
elif letter == "C":
return [4.0, "WT N-term", 1]
elif letter == "D":
return [0.5, "WT N-term", 1]
elif letter == "E":
return [4.0, "H16N N-term", 1]
elif letter == "F":
return [2.0, "H16N N-term", 1]
elif letter == "G":
return [1.0, "H16N N-term", 1]
elif letter == "H":
return [2.0, "WT N-term", 1]
elif datafile == "/home/brianoj/Muteins 088-097 T/2019-04-19 IL2-088 IL2-097 Treg plate" or datafile == "/home/brianoj/Muteins 088-097 Nk/2019-04-19 IL2-088 IL2-097 NK plate":
if letter == "A":
return [4.0, "R38Q N-term", 1]
elif letter == "B":
return [2.0, "R38Q N-term", 1]
elif letter == "C":
return [1.0, "R38Q N-term", 1]
elif letter == "D":
return [1.0, "WT N-term", 1]
elif letter == "E":
return [4.0, "R38Q/H16N", 1]
elif letter == "F":
return [2.0, "R38Q/H16N", 1]
elif letter == "G":
return [1.0, "R38Q/H16N", 1]
elif letter == "H":
return [0.5, "R38Q/H16N", 1]
elif datafile == "/home/brianoj/Muteins 060-088 T/2019-05-02 IL2-060 IL2-088 Treg plate" or datafile == "/home/brianoj/Muteins 060-088 Nk/2019-05-02 IL2-060 IL2-088 NK plate":
if letter == "A":
return [4.0, "WT N-term", 1]
elif letter == "B":
return [4.0, "WT N-term", 1]
elif letter == "C":
return [4.0, "WT N-term", 1]
elif letter == "D":
return [0.5, "WT N-term", 1]
elif letter == "E":
return [4.0, "R38Q N-term", 1]
elif letter == "F":
return [2.0, "R38Q N-term", 1]
elif letter == "G":
return [1.0, "R38Q N-term", 1]
elif letter == "H":
return [2.0, "R38Q N-term", 1]
elif datafile == "/home/brianoj/Muteins 062-097 T/2019-05-02 IL2-062 IL2-097 Treg plate" or datafile == "/home/brianoj/Muteins 062-097 Nk/2019-05-02 IL2-062 IL2-097 NK plate":
if letter == "A":
return [4.0, "H16N N-term", 1]
elif letter == "B":
return [2.0, "H16N N-term", 1]
elif letter == "C":
return [1.0, "H16N N-term", 1]
elif letter == "D":
return [1.0, "H16N N-term", 1]
elif letter == "E":
return [4.0, "R38Q/H16N", 1]
elif letter == "F":
return [2.0, "R38Q/H16N", 1]
elif letter == "G":
return [1.0, "R38Q/H16N", 1]
elif letter == "H":
return [0.5, "R38Q/H16N", 1]
| 50.782288 | 197 | 0.516204 | import os
import matplotlib.lines as mlines
import pandas as pds
import numpy as np
from .figureCommon import subplotLabel, getSetup
from ..flow import importF
from ..PCA import sampleT
from ..flow import gating, count_data
from ..FCimports import compMatrix, applyMatrix
path_here = os.path.dirname(os.path.dirname(__file__))
def makeFigure():
ax, f = getSetup((10, 10), (4, 4))
subplotLabel(ax)
StatMV()
return f
def global_legend(ax):
blue = mlines.Line2D([], [], color='navy', marker='o', linestyle='None', markersize=6, label='Gini Coeff')
orange = mlines.Line2D([], [], color='darkorange', marker='o', linestyle='None', markersize=6, label='Inverse Gini Coeff')
ax.legend(handles=[orange, blue], bbox_to_anchor=(0, 1), loc="upper left")
def StatMV():
dataFiles = ["/data/flow/2019-03-19 IL-2 and IL-15 treated pSTAT5 assay - Lymphocyte gated - Treg plate.zip",
"/data/flow/2019-03-27 IL-2 and IL-15 treated pSTAT5 assay - Lymphocyte gated - Treg plate.zip",
"/data/flow/2019-04-18 IL-2 and IL-15 treated pSTAT5 assay - Lymphocyte gated - Treg plate - NEW PBMC LOT/",
"/data/flow/2019-03-15 IL-2 and IL-15 treated pSTAT5 assay - Lymphocyte gated - NK plate.zip",
"/data/flow/2019-03-27 IL-2 and IL-15 treated pSTAT5 assay - Lymphocyte gated - NK plate.zip",
"/data/flow/2019-04-18 IL-2 and IL-15 treated pSTAT5 assay - Lymphocyte gated - NK plate - NEW PBMC LOT.zip"]
dataFiles = ["/home/brianoj/Tplate15", "/home/brianoj/Tplate27", "/home/brianoj/Tplate418", "/home/brianoj/Nkplate15", "/home/brianoj/Nkplate27", "/home/brianoj/Nkplate418"]
dates = ["3/15/2019", "3/27/2019", "4/18/2019", "3/15/2019", "3/27/2019", "4/18/2019"]
rows = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H']
cellTypesT = ['treg', 'nonTreg']
TitlesT = ["Treg", "Thelper"]
masterMVdf = pds.DataFrame(columns={"Date", "Time", "Cell", "Ligand", "Dose", "Mean", "Bin", "NumCells"})
MVdf = pds.DataFrame(columns={"Date", "Time", "Cell", "Ligand", "Dose", "Mean", "Bin", "NumCells"})
alldata = []
dosemat = np.array([[84, 28, 9.333333, 3.111, 1.037037, 0.345679, 0.115226, 0.038409, 0.012803, 0.004268, 0.001423, 0.000474]])
repList = [0, 0, 0, 0, 0, 0]
numBins = 4
T_matrix = compMatrix("2019-11-08", "1", "A")
for i, filename in enumerate(dataFiles):
if i < 3:
Tcells = True
else:
Tcells = False
if Tcells:
statcol = "RL1-H"
IL2RaCol = "VL1-H"
for k, cell_type in enumerate(cellTypesT):
for j, row in enumerate(rows):
print(filename)
sample, _ = importF(filename, row)
if cell_type:
for jj, subSample in enumerate(sample):
sample[jj] = applyMatrix(subSample, T_matrix)
gates = gating(cell_type, dates[i], True, repList[i])
_, alldata = count_data(sample, gates, Tcells, True)
else:
for jj, samplejj in enumerate(sample):
_, pstat, _ = sampleT(samplejj)
alldata.append(pstat)
for ii, _ in enumerate(sample):
dat_array = alldata[ii]
stat_array = dat_array[[statcol]]
stat_array = stat_array.to_numpy()
stat_array = stat_array.clip(min=1)
IL2Ra_array = dat_array[[IL2RaCol]]
IL2Ra_array = IL2Ra_array.to_numpy()
IL2Ra_array = IL2Ra_array.clip(min=1)
IL2Ra_array = IL2Ra_array / 1.5
while np.amax(stat_array) > 100000:
IL2Ra_array = np.reshape(IL2Ra_array[stat_array != np.amax(stat_array)], (-1, 1))
stat_array = np.reshape(stat_array[stat_array != np.amax(stat_array)], (-1, 1))
bins = np.logspace(np.log10(np.percentile(IL2Ra_array, 5)), np.log10(np.percentile(IL2Ra_array, 95)), num=numBins)
for kk in range(0, bins.size - 1):
binDat = stat_array[(IL2Ra_array > bins[kk]) & (IL2Ra_array < bins[kk + 1])]
if stat_array.size == 0:
MVdf = MVdf.append(pds.DataFrame.from_dict({"Date": dates[i], "Time": timeFunc(row), "Cell": TitlesT[k], "Ligand": cytFunc(row), "Dose": dosemat[0, ii], "Mean": [0],
"Bin": [kk], "NumCells": 0, "Bivalent": [0]}))
else:
MVdf = MVdf.append(pds.DataFrame.from_dict({"Date": dates[i], "Time": timeFunc(row), "Cell": TitlesT[k], "Ligand": cytFunc(
row), "Dose": dosemat[0, ii], "Mean": np.mean(binDat), "Bin": [kk + 1], "NumCells": [binDat.size], "Bivalent": [0]}))
if j == 3 or j == 7:
MVdf['Mean'] = MVdf['Mean'] - MVdf.loc[(MVdf.Dose <= 0.001423)].Mean.mean()
masterMVdf = masterMVdf.append(MVdf)
MVdf = pds.DataFrame(columns={"Date", "Time", "Ligand", "Dose", "Mean", "Bin", "NumCells", "Bivalent"})
dataFiles = ["/home/brianoj/Muteins 060-062 T/2019-04-19 IL2-060 IL2-062 Treg plate",
"/home/brianoj/Muteins 088-097 T/2019-04-19 IL2-088 IL2-097 Treg plate",
"/home/brianoj/Muteins 060-088 T/2019-05-02 IL2-060 IL2-088 Treg plate",
"/home/brianoj/Muteins 062-097 T/2019-05-02 IL2-062 IL2-097 Treg plate",
"/home/brianoj/Muteins 060-062 Nk/2019-04-19 IL2-060 IL2-062 NK plate",
"/home/brianoj/Muteins 088-097 Nk/2019-04-19 IL2-088 IL2-097 NK plate",
"/home/brianoj/Muteins 060-088 Nk/2019-05-02 IL2-060 IL2-088 NK plate",
"/home/brianoj/Muteins 062-097 Nk/2019-05-02 IL2-062 IL2-097 NK plate"]
dates = ["4/19/2019", "4/19/2019", "5/2/2019", "5/2/2019", "4/19/2019", "4/19/2019", "5/2/2019", "5/2/2019"]
repList = [0, 1, 0, 1, 0, 1, 0, 1]
print("Starting Muteins")
for i, filename in enumerate(dataFiles):
if i < 4:
Tcells = True
else:
Tcells = False
if Tcells:
statcol = "RL1-H"
for k, cell_type in enumerate(cellTypesT):
for j, row in enumerate(rows):
print(filename)
sample, _ = importF(filename, row)
if cell_type:
for jj, subSample in enumerate(sample):
sample[jj] = applyMatrix(subSample, T_matrix)
gates = gating(cell_type, dates[i], True, repList[i])
_, alldata = count_data(sample, gates, Tcells, True)
else:
for jj, samplejj in enumerate(sample):
_, pstat, _ = sampleT(samplejj)
alldata.append(pstat)
for ii, _ in enumerate(sample):
dat_array = alldata[ii]
stat_array = dat_array[[statcol]]
stat_array = stat_array.to_numpy()
stat_array = stat_array.clip(min=1)
IL2Ra_array = dat_array[[IL2RaCol]]
IL2Ra_array = IL2Ra_array.to_numpy()
IL2Ra_array = IL2Ra_array.clip(min=1)
IL2Ra_array = IL2Ra_array / 1.5
while np.amax(stat_array) > 100000:
IL2Ra_array = np.reshape(IL2Ra_array[stat_array != np.amax(stat_array)], (-1, 1))
stat_array = np.reshape(stat_array[stat_array != np.amax(stat_array)], (-1, 1))
bins = np.logspace(np.log10(np.percentile(IL2Ra_array, 5)), np.log10(np.percentile(IL2Ra_array, 95)), num=numBins)
timelig = mutFunc(row, filename)
for kk in range(0, bins.size - 1):
binDat = stat_array[(IL2Ra_array > bins[kk]) & (IL2Ra_array < bins[kk + 1])]
if stat_array.size == 0:
MVdf = MVdf.append(pds.DataFrame.from_dict({"Date": dates[i], "Time": timelig[0], "Cell": TitlesT[k], "Ligand": timelig[1], "Dose": dosemat[0, ii], "Mean": [0],
"Bin": [kk], "NumCells": 0, "Bivalent": timelig[2]}))
else:
MVdf = MVdf.append(pds.DataFrame.from_dict({"Date": dates[i], "Time": timelig[0], "Cell": TitlesT[k], "Ligand": timelig[1],
"Dose": dosemat[0, ii], "Mean": np.mean(binDat), "Bin": [kk + 1], "NumCells": [binDat.size], "Bivalent": timelig[2]}))
if j == 3 or j == 7:
MVdf['Mean'] = MVdf['Mean'] - MVdf.loc[(MVdf.Dose <= 0.001423)].Mean.mean()
masterMVdf = masterMVdf.append(MVdf)
MVdf = pds.DataFrame(columns={"Date", "Time", "Ligand", "Dose", "Mean", "Bin", "NumCells", "Bivalent"})
masterMVdf.Mean = masterMVdf.Mean.clip(lower=0)
masterMVdf = masterMVdf.loc[masterMVdf.Ligand != "H16L N-term"]
masterMVdf.to_csv("WTDimericMutSingleCellDataBin.csv", index=False)
return MVdf
def timeFunc(letter):
if letter == "A" or letter == "E":
return 4.0
elif letter == "B" or letter == "F":
return 2.0
elif letter == "C" or letter == "G":
return 1.0
elif letter == "D" or letter == "H":
return 0.5
def cytFunc(letter):
if letter == "A" or letter == "B" or letter == "C" or letter == "D":
return "IL2"
elif letter == "E" or letter == "F" or letter == "G" or letter == "H":
return "IL15"
def mutFunc(letter, datafile):
if datafile == "/home/brianoj/Muteins 060-062 T/2019-04-19 IL2-060 IL2-062 Treg plate" or datafile == "/home/brianoj/Muteins 060-062 Nk/2019-04-19 IL2-060 IL2-062 NK plate":
if letter == "A":
return [4.0, "WT N-term", 1]
elif letter == "B":
return [4.0, "WT N-term", 1]
elif letter == "C":
return [4.0, "WT N-term", 1]
elif letter == "D":
return [0.5, "WT N-term", 1]
elif letter == "E":
return [4.0, "H16N N-term", 1]
elif letter == "F":
return [2.0, "H16N N-term", 1]
elif letter == "G":
return [1.0, "H16N N-term", 1]
elif letter == "H":
return [2.0, "WT N-term", 1]
elif datafile == "/home/brianoj/Muteins 088-097 T/2019-04-19 IL2-088 IL2-097 Treg plate" or datafile == "/home/brianoj/Muteins 088-097 Nk/2019-04-19 IL2-088 IL2-097 NK plate":
if letter == "A":
return [4.0, "R38Q N-term", 1]
elif letter == "B":
return [2.0, "R38Q N-term", 1]
elif letter == "C":
return [1.0, "R38Q N-term", 1]
elif letter == "D":
return [1.0, "WT N-term", 1]
elif letter == "E":
return [4.0, "R38Q/H16N", 1]
elif letter == "F":
return [2.0, "R38Q/H16N", 1]
elif letter == "G":
return [1.0, "R38Q/H16N", 1]
elif letter == "H":
return [0.5, "R38Q/H16N", 1]
elif datafile == "/home/brianoj/Muteins 060-088 T/2019-05-02 IL2-060 IL2-088 Treg plate" or datafile == "/home/brianoj/Muteins 060-088 Nk/2019-05-02 IL2-060 IL2-088 NK plate":
if letter == "A":
return [4.0, "WT N-term", 1]
elif letter == "B":
return [4.0, "WT N-term", 1]
elif letter == "C":
return [4.0, "WT N-term", 1]
elif letter == "D":
return [0.5, "WT N-term", 1]
elif letter == "E":
return [4.0, "R38Q N-term", 1]
elif letter == "F":
return [2.0, "R38Q N-term", 1]
elif letter == "G":
return [1.0, "R38Q N-term", 1]
elif letter == "H":
return [2.0, "R38Q N-term", 1]
elif datafile == "/home/brianoj/Muteins 062-097 T/2019-05-02 IL2-062 IL2-097 Treg plate" or datafile == "/home/brianoj/Muteins 062-097 Nk/2019-05-02 IL2-062 IL2-097 NK plate":
if letter == "A":
return [4.0, "H16N N-term", 1]
elif letter == "B":
return [2.0, "H16N N-term", 1]
elif letter == "C":
return [1.0, "H16N N-term", 1]
elif letter == "D":
return [1.0, "H16N N-term", 1]
elif letter == "E":
return [4.0, "R38Q/H16N", 1]
elif letter == "F":
return [2.0, "R38Q/H16N", 1]
elif letter == "G":
return [1.0, "R38Q/H16N", 1]
elif letter == "H":
return [0.5, "R38Q/H16N", 1]
| true | true |
1c2d8aaadbe5c799d88a5d38cc7f826a82cd8a94 | 526 | py | Python | app/convertfile/uploading/migrations/0002_auto_20200108_1333.py | web-user/django-convert-video-file-docker-nginx | 40bd6a67c2d21d91d408d6112fa1756635300e60 | [
"MIT"
] | null | null | null | app/convertfile/uploading/migrations/0002_auto_20200108_1333.py | web-user/django-convert-video-file-docker-nginx | 40bd6a67c2d21d91d408d6112fa1756635300e60 | [
"MIT"
] | 3 | 2021-04-08T21:59:25.000Z | 2021-06-09T19:19:25.000Z | app/convertfile/uploading/migrations/0002_auto_20200108_1333.py | web-user/django-convert-video-file-docker-nginx | 40bd6a67c2d21d91d408d6112fa1756635300e60 | [
"MIT"
] | null | null | null | # Generated by Django 2.2.9 on 2020-01-08 13:33
import convertfile.utils
from django.db import migrations, models
import uploading.models
class Migration(migrations.Migration):
dependencies = [
('uploading', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='mediafile',
name='file',
field=models.FileField(default='', upload_to=uploading.models.upload_media_file, validators=[convertfile.utils.validate_file_extension]),
),
]
| 25.047619 | 149 | 0.6673 |
import convertfile.utils
from django.db import migrations, models
import uploading.models
class Migration(migrations.Migration):
dependencies = [
('uploading', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='mediafile',
name='file',
field=models.FileField(default='', upload_to=uploading.models.upload_media_file, validators=[convertfile.utils.validate_file_extension]),
),
]
| true | true |
1c2d8bf92c003205fd371162a11dfe4408f8d590 | 19,624 | py | Python | tests/algorithms/profile_model/ellipsoid/test_parameterisation.py | dials-src/dials | 25055c1f6164dc33e672e7c5c6a9c5a35e870660 | [
"BSD-3-Clause"
] | 1 | 2021-12-10T17:28:16.000Z | 2021-12-10T17:28:16.000Z | tests/algorithms/profile_model/ellipsoid/test_parameterisation.py | dials-src/dials | 25055c1f6164dc33e672e7c5c6a9c5a35e870660 | [
"BSD-3-Clause"
] | null | null | null | tests/algorithms/profile_model/ellipsoid/test_parameterisation.py | dials-src/dials | 25055c1f6164dc33e672e7c5c6a9c5a35e870660 | [
"BSD-3-Clause"
] | 1 | 2021-12-07T12:39:04.000Z | 2021-12-07T12:39:04.000Z | from __future__ import annotations
from collections import namedtuple
from copy import copy
from random import randint, uniform
import numpy as np
import pytest
from scitbx import matrix
from dials.algorithms.profile_model.ellipsoid.parameterisation import (
Angular2MosaicityParameterisation,
Angular4MosaicityParameterisation,
ModelState,
ReflectionModelState,
Simple1MosaicityParameterisation,
Simple6MosaicityParameterisation,
WavelengthSpreadParameterisation,
)
from dials.algorithms.refinement.parameterisation.crystal_parameters import (
CrystalOrientationParameterisation,
CrystalUnitCellParameterisation,
)
from dials.array_family import flex
def test_Simple1MosaicityParameterisation():
p = Simple1MosaicityParameterisation(params=np.array([1e-3]))
assert p.is_angular() is False
assert p.num_parameters() == 1
assert p.parameters == (1e-3,)
p.parameters = np.array([2e-3])
assert p.parameters == (2e-3,)
psq = p.parameters[0] ** 2
assert list(p.sigma().flatten()) == pytest.approx([psq, 0, 0, 0, psq, 0, 0, 0, psq])
d = p.first_derivatives()
assert d.shape[0] == 1
d1 = 2 * p.parameters[0]
assert list(d[0, :, :].flatten()) == pytest.approx([d1, 0, 0, 0, d1, 0, 0, 0, d1])
def test_Simple6MosaicityParameterisation():
params = np.array([1e-3, 2e-3, 3e-3, 4e-3, 5e-3, 6e-3])
p = Simple6MosaicityParameterisation(params=params)
assert p.is_angular() is False
assert p.num_parameters() == 6
assert list(p.parameters) == pytest.approx(list(params))
params = np.array([2e-3, 3e-3, 4e-3, 5e-3, 6e-3, 7e-3])
p.parameters = params
assert list(p.parameters) == pytest.approx(list(params))
b1, b2, b3, b4, b5, b6 = params
assert p.sigma()[0, 0] == pytest.approx(b1**2)
assert p.sigma()[0, 1] == pytest.approx(b1 * b2)
assert p.sigma()[0, 2] == pytest.approx(b1 * b4)
assert p.sigma()[1, 0] == pytest.approx(b1 * b2)
assert p.sigma()[1, 1] == pytest.approx(b2**2 + b3 * b3)
assert p.sigma()[1, 2] == pytest.approx(b2 * b4 + b3 * b5)
assert p.sigma()[2, 0] == pytest.approx(b1 * b4)
assert p.sigma()[2, 1] == pytest.approx(b2 * b4 + b3 * b5)
assert p.sigma()[2, 2] == pytest.approx(b4**2 + b5**2 + b6**2)
dSdb = [
(2 * b1, b2, b4, b2, 0, 0, b4, 0, 0),
(0, b1, 0, b1, 2 * b2, b4, 0, b4, 0),
(0, 0, 0, 0, 2 * b3, b5, 0, b5, 0),
(0, 0, b1, 0, 0, b2, b1, b2, 2 * b4),
(0, 0, 0, 0, 0, b3, 0, b3, 2 * b5),
(0, 0, 0, 0, 0, 0, 0, 0, 2 * b6),
]
d = p.first_derivatives()
assert d.shape[0] == 6
for i in range(d.shape[0]):
a = dSdb[i]
b = d[i, :, :]
for j in range(9):
assert b.flatten()[j] == pytest.approx(a[j], abs=1e-12)
def test_WavelengthSpreadParameterisation():
params = np.array([1e-3])
p = WavelengthSpreadParameterisation(params=params)
assert p.num_parameters() == 1
assert p.parameters[0] == pytest.approx(params[0])
params = np.array([2e-3])
p.parameters = params
assert p.parameters[0] == pytest.approx(params[0])
assert p.sigma() == pytest.approx(params[0] ** 2)
assert p.first_derivatives()[0] == pytest.approx(2 * params[0])
def test_Angular2MosaicityParameterisation():
params = np.array([1e-3, 2e-3])
p = Angular2MosaicityParameterisation(params=params)
assert p.is_angular() is True
assert p.num_parameters() == 2
assert p.parameters[0] == pytest.approx(params[0])
assert p.parameters[1] == pytest.approx(params[1])
params = np.array([2e-3, 3e-3])
p.parameters = params
assert p.parameters[0] == pytest.approx(params[0])
assert p.parameters[1] == pytest.approx(params[1])
b1, b2 = params
assert list(p.sigma().flatten()) == pytest.approx(
[b1**2, 0, 0, 0, b1**2, 0, 0, 0, b2**2]
)
dSdb = [(2 * b1, 0, 0, 0, 2 * b1, 0, 0, 0, 0), (0, 0, 0, 0, 0, 0, 0, 0, 2 * b2)]
d = p.first_derivatives()
assert d.shape[0] == 2
for i in range(d.shape[0]):
a = dSdb[i]
b = d[i, :, :]
for j in range(9):
assert b.flatten()[j] == pytest.approx(a[j])
def test_Angular4MosaicityParameterisation():
params = np.array([1e-3, 2e-3, 3e-3, 4e-3])
p = Angular4MosaicityParameterisation(params=params)
assert p.is_angular() is True
assert p.num_parameters() == 4
assert list(p.parameters) == pytest.approx(list(params))
params = np.array([2e-3, 3e-3, 4e-3, 5e-3])
p.parameters = params
assert list(p.parameters) == pytest.approx(list(params))
b1, b2, b3, b4 = params
assert p.sigma()[0, 0] == pytest.approx(b1**2)
assert p.sigma()[0, 1] == pytest.approx(b1 * b2)
assert p.sigma()[0, 2] == pytest.approx(0)
assert p.sigma()[1, 0] == pytest.approx(b1 * b2)
assert p.sigma()[1, 1] == pytest.approx(b2**2 + b3 * b3)
assert p.sigma()[1, 2] == pytest.approx(0)
assert p.sigma()[2, 0] == pytest.approx(0)
assert p.sigma()[2, 1] == pytest.approx(0)
assert p.sigma()[2, 2] == pytest.approx(b4**2)
dSdb = [
(2 * b1, b2, 0, b2, 0, 0, 0, 0, 0),
(0, b1, 0, b1, 2 * b2, 0, 0, 0, 0),
(0, 0, 0, 0, 2 * b3, 0, 0, 0, 0),
(0, 0, 0, 0, 0, 0, 0, 0, 2 * b4),
]
d = p.first_derivatives()
assert d.shape[0] == 4
for i in range(d.shape[0]):
a = dSdb[i]
b = d[i, ::, :]
for j in range(9):
assert b.flatten()[j] == pytest.approx(a[j])
def check_model_state_with_fixed(
experiment,
mosaicity_parameterisation,
wavelength_parameterisation,
fix_mosaic_spread=False,
fix_wavelength_spread=False,
fix_unit_cell=False,
fix_orientation=False,
):
state = ModelState(
experiment,
mosaicity_parameterisation,
wavelength_parameterisation,
fix_mosaic_spread=fix_mosaic_spread,
fix_wavelength_spread=fix_wavelength_spread,
fix_unit_cell=fix_unit_cell,
fix_orientation=fix_orientation,
)
assert state.is_orientation_fixed == fix_orientation
assert state.is_unit_cell_fixed == fix_unit_cell
assert state.is_mosaic_spread_fixed == fix_mosaic_spread
assert state.is_wavelength_spread_fixed == fix_wavelength_spread
U = state.U_matrix
B = state.B_matrix
A = state.A_matrix
M = state.mosaicity_covariance_matrix
L = state.wavelength_spread
assert U.shape == (3, 3)
assert B.shape == (3, 3)
assert A.shape == (3, 3)
assert M.shape == (3, 3)
if wavelength_parameterisation is not None:
assert len(L) == 1
else:
assert len(L) == 0
assert len(state.U_params) == 3
assert len(state.B_params) == 2
assert len(state.M_params) == mosaicity_parameterisation.num_parameters()
if wavelength_parameterisation is not None:
assert len(state.L_params) == 1
else:
assert len(state.L_params) == 0
dU = state.dU_dp
dB = state.dB_dp
dM = state.dM_dp
dL = state.dL_dp
assert dU.shape[0] == 3
assert dB.shape[0] == 2
assert dM.shape[0] == mosaicity_parameterisation.num_parameters()
if wavelength_parameterisation is not None:
assert len(dL) == 1
else:
assert len(dL) == 0
params = state.active_parameters
expected_len = 0
if not fix_mosaic_spread:
expected_len += mosaicity_parameterisation.num_parameters()
if not fix_wavelength_spread:
if wavelength_parameterisation is not None:
expected_len += wavelength_parameterisation.num_parameters()
if not fix_unit_cell:
expected_len += 2
if not fix_orientation:
expected_len += 3
assert len(params) == expected_len
new_params = params
state.active_parameters = new_params
def test_ModelState(test_experiment):
experiments = [test_experiment]
S1 = Simple1MosaicityParameterisation()
S6 = Simple6MosaicityParameterisation()
W = WavelengthSpreadParameterisation()
with pytest.raises(AssertionError):
check_model_state_with_fixed(experiments[0], S1, None, fix_mosaic_spread=True)
with pytest.raises(AssertionError):
check_model_state_with_fixed(experiments[0], S1, None, fix_unit_cell=True)
with pytest.raises(AssertionError):
check_model_state_with_fixed(experiments[0], S1, None, fix_orientation=True)
check_model_state_with_fixed(experiments[0], S1, None, fix_wavelength_spread=True)
check_model_state_with_fixed(experiments[0], S1, W, fix_mosaic_spread=True)
check_model_state_with_fixed(experiments[0], S1, W, fix_wavelength_spread=True)
check_model_state_with_fixed(experiments[0], S1, W, fix_unit_cell=True)
check_model_state_with_fixed(experiments[0], S1, W, fix_orientation=True)
with pytest.raises(AssertionError):
check_model_state_with_fixed(experiments[0], S6, None, fix_mosaic_spread=True)
with pytest.raises(AssertionError):
check_model_state_with_fixed(experiments[0], S6, None, fix_unit_cell=True)
with pytest.raises(AssertionError):
check_model_state_with_fixed(experiments[0], S6, None, fix_orientation=True)
check_model_state_with_fixed(experiments[0], S6, None, fix_wavelength_spread=True)
check_model_state_with_fixed(experiments[0], S6, W, fix_mosaic_spread=True)
check_model_state_with_fixed(experiments[0], S6, W, fix_wavelength_spread=True)
check_model_state_with_fixed(experiments[0], S6, W, fix_unit_cell=True)
check_model_state_with_fixed(experiments[0], S6, W, fix_orientation=True)
def check_reflection_model_state_with_fixed(
experiment,
mosaicity_parameterisation,
wavelength_parameterisation,
fix_mosaic_spread=False,
fix_wavelength_spread=False,
fix_unit_cell=False,
fix_orientation=False,
):
state = ModelState(
experiment,
mosaicity_parameterisation,
wavelength_parameterisation,
fix_mosaic_spread=fix_mosaic_spread,
fix_wavelength_spread=fix_wavelength_spread,
fix_unit_cell=fix_unit_cell,
fix_orientation=fix_orientation,
)
model = ReflectionModelState(
state, matrix.col(experiment.beam.get_s0()), matrix.col((1, 1, 1))
)
assert list(model.mosaicity_covariance_matrix.flatten()) == list(
mosaicity_parameterisation.sigma().flatten()
)
assert list(model.get_r().flatten()) == pytest.approx(
np.matmul(state.A_matrix, np.array([1, 1, 1]).reshape(3, 1))[:, 0].tolist(),
abs=1e-6,
)
if wavelength_parameterisation is not None:
assert model.wavelength_spread == wavelength_parameterisation.sigma()
else:
assert model.wavelength_spread == 0
dS_dp = model.get_dS_dp()
dr_dp = model.get_dr_dp()
dL_dp = model.get_dL_dp()
assert dS_dp.shape[2] == len(state.parameter_labels)
assert dr_dp.shape[1] == len(state.parameter_labels)
assert len(dL_dp) == len(state.parameter_labels)
if not fix_wavelength_spread:
assert dr_dp[:, -1].flatten() == pytest.approx([0, 0, 0], abs=1e-6)
assert dS_dp[:, :, -1].flatten() == pytest.approx(
(0, 0, 0, 0, 0, 0, 0, 0, 0), abs=1e-6
)
dr_dp = dr_dp[:, :-1]
dS_dp = dS_dp[:, :, :-1]
dL_dp = dL_dp[:-1]
if not fix_mosaic_spread:
num_params = mosaicity_parameterisation.num_parameters()
for i in range(num_params):
assert dr_dp[:, -(i + 1)] == pytest.approx([0, 0, 0], abs=1e-6)
assert dS_dp[:, :, -(i + 1)] == pytest.approx(
state.dM_dp[-(i + 1), :, :], abs=1e-6
)
assert dL_dp[-1] == 0
dr_dp = dr_dp[:, :-num_params]
dS_dp = dS_dp[:, :, :-num_params]
dL_dp = dL_dp[:-num_params]
if not fix_unit_cell:
num_params = state.B_params.size
for i in range(num_params):
assert dS_dp[:, :, -(i + 1)].flatten() == pytest.approx(
(0, 0, 0, 0, 0, 0, 0, 0, 0), abs=1e-6
)
assert dL_dp[-(i + 1)] == 0
dr_dp = dr_dp[:, :-num_params]
dS_dp = dS_dp[:, :, :-num_params]
dL_dp = dL_dp[:-num_params]
if not fix_orientation:
num_params = state.U_params.size
for i in range(num_params):
assert dS_dp[:, :, -(i + 1)].flatten() == pytest.approx(
(0, 0, 0, 0, 0, 0, 0, 0, 0), abs=1e-6
)
assert dL_dp[-(i + 1)] == 0
dr_dp = dr_dp[:, :-num_params]
dS_dp = dS_dp[:, :, :-num_params]
dL_dp = dL_dp[:-num_params]
def test_ReflectionModelState(test_experiment):
experiments = [test_experiment]
S1 = Simple1MosaicityParameterisation()
S6 = Simple6MosaicityParameterisation()
W = WavelengthSpreadParameterisation()
check_reflection_model_state_with_fixed(
experiments[0], S1, None, fix_wavelength_spread=True
)
check_reflection_model_state_with_fixed(
experiments[0], S1, W, fix_mosaic_spread=True
)
check_reflection_model_state_with_fixed(
experiments[0], S1, W, fix_wavelength_spread=True
)
check_reflection_model_state_with_fixed(experiments[0], S1, W, fix_unit_cell=True)
check_reflection_model_state_with_fixed(experiments[0], S1, W, fix_orientation=True)
check_reflection_model_state_with_fixed(
experiments[0], S6, None, fix_wavelength_spread=True
)
check_reflection_model_state_with_fixed(
experiments[0], S6, W, fix_mosaic_spread=True
)
check_reflection_model_state_with_fixed(
experiments[0], S6, W, fix_wavelength_spread=True
)
check_reflection_model_state_with_fixed(experiments[0], S6, W, fix_unit_cell=True)
check_reflection_model_state_with_fixed(experiments[0], S6, W, fix_orientation=True)
def first_derivative(func, x, h):
return (-func(x + 2 * h) + 8 * func(x + h) - 8 * func(x - h) + func(x - 2 * h)) / (
12 * h
)
def generate_data(experiments, reflections):
from random import seed
seed(0)
index = randint(0, len(reflections))
h = reflections[index]["miller_index"]
s0 = matrix.col(experiments[0].beam.get_s0())
U_param = CrystalOrientationParameterisation(experiments[0].crystal)
B_param = CrystalUnitCellParameterisation(experiments[0].crystal)
U = matrix.sqr(experiments[0].crystal.get_U())
B = matrix.sqr(experiments[0].crystal.get_B())
r = U * B * matrix.col(h)
s2 = s0 + r
mobs = (
s2 + matrix.col((uniform(0, 1e-3), uniform(0, 1e-3), uniform(0, 1e-3)))
).normalize() * s0.length()
b1, b2, b3, b4, b5, b6 = (
uniform(1e-3, 3e-3),
uniform(0.0, 1e-3),
uniform(1e-3, 3e-3),
uniform(0.0, 1e-3),
uniform(0.0, 1e-3),
uniform(1e-3, 3e-3),
)
S_param = (b1, b2, b3, b4, b5, b6)
L_param = (uniform(1e-3, 2e-3),)
ctot = randint(100, 1000)
T = matrix.sqr((uniform(1e-3, 2e-3), 0, uniform(1e-6, 2e-6), uniform(1e-3, 2e-3)))
Sobs = T * T.transpose()
params = [S_param, U_param, B_param, L_param]
return params, s0, h, ctot, mobs, Sobs
@pytest.fixture
def testdata(test_experiment):
TestData = namedtuple(
"TestData", ["experiment", "models", "s0", "h", "ctot", "mobs", "Sobs"]
)
experiments = [test_experiment]
reflections = flex.reflection_table.from_predictions_multi(experiments)
models, s0, h, ctot, mobs, Sobs = generate_data(experiments, reflections)
return TestData(
experiment=experiments[0],
models=models,
s0=s0,
h=h,
ctot=ctot,
mobs=mobs,
Sobs=Sobs,
)
def test_ReflectionModelState_derivatives(testdata):
def check(
mosaicity_parameterisation,
wavelength_parameterisation,
fix_mosaic_spread=False,
fix_wavelength_spread=False,
fix_unit_cell=False,
fix_orientation=False,
):
experiment = testdata.experiment
models = testdata.models
s0 = testdata.s0
h = testdata.h
# ctot = testdata.ctot
# mobs = testdata.mobs
# Sobs = testdata.Sobs
U_params = models[1].get_param_vals()
B_params = models[2].get_param_vals()
M_params = np.array(models[0][: mosaicity_parameterisation.num_parameters()])
L_params = np.array(models[3])
state = ModelState(
experiment,
mosaicity_parameterisation,
wavelength_parameterisation,
fix_mosaic_spread=fix_mosaic_spread,
fix_wavelength_spread=fix_wavelength_spread,
fix_unit_cell=fix_unit_cell,
fix_orientation=fix_orientation,
)
state.U_params = U_params
state.B_params = B_params
state.M_params = M_params
state.L_params = L_params
model = ReflectionModelState(state, s0, h)
dr_dp = model.get_dr_dp()
dS_dp = model.get_dS_dp()
dL_dp = model.get_dL_dp()
def compute_sigma(parameters):
state.active_parameters = parameters
model = ReflectionModelState(state, s0, h)
return model.mosaicity_covariance_matrix
def compute_r(parameters):
state.active_parameters = parameters
model = ReflectionModelState(state, s0, h)
return model.get_r()
def compute_sigma_lambda(parameters):
state.active_parameters = parameters
model = ReflectionModelState(state, s0, h)
return model.wavelength_spread
step = 1e-6
parameters = copy(state.active_parameters)
dr_num = []
for i in range(len(parameters)):
def f(x):
p = copy(parameters)
p[i] = x
return compute_r(p)
dr_num.append(first_derivative(f, parameters[i], step).reshape(3, 1))
dr_num = np.concatenate(dr_num, axis=1)
for n, c in zip(dr_num, dr_dp):
for nn, cc in zip(n, c):
print(nn)
print(cc)
assert abs(nn - cc) < 1e-7
ds_num = []
for i in range(len(parameters)):
def f(x):
p = copy(parameters)
p[i] = x
return compute_sigma(p)
fd = first_derivative(f, parameters[i], step)
print(fd)
ds_num.append(fd.reshape(3, 3, 1))
ds_num = np.concatenate(ds_num, axis=2)
for i in range(len(parameters)):
for n, c in zip(ds_num[:, :, i], dS_dp[:, :, i]):
for nn, cc in zip(n.flatten(), c.flatten()):
print(nn)
print(cc)
assert abs(nn - cc) < 1e-5
dl_num = []
for i in range(len(parameters)):
def f(x):
p = copy(parameters)
p[i] = x
return compute_sigma_lambda(p) ** 2
dl_num.append(first_derivative(f, parameters[i], step))
for n, c in zip(dl_num, dL_dp):
assert abs(n - c) < 1e-7
S1 = Simple1MosaicityParameterisation()
S6 = Simple6MosaicityParameterisation()
W = WavelengthSpreadParameterisation()
check(S1, None, fix_wavelength_spread=True)
check(S1, W, fix_mosaic_spread=True)
check(S1, W, fix_wavelength_spread=True)
check(S1, W, fix_unit_cell=True)
check(S1, W, fix_orientation=True)
check(S6, None, fix_wavelength_spread=True)
check(S6, W, fix_mosaic_spread=True)
check(S6, W, fix_wavelength_spread=True)
check(S6, W, fix_unit_cell=True)
check(S6, W, fix_orientation=True)
| 32.490066 | 88 | 0.623522 | from __future__ import annotations
from collections import namedtuple
from copy import copy
from random import randint, uniform
import numpy as np
import pytest
from scitbx import matrix
from dials.algorithms.profile_model.ellipsoid.parameterisation import (
Angular2MosaicityParameterisation,
Angular4MosaicityParameterisation,
ModelState,
ReflectionModelState,
Simple1MosaicityParameterisation,
Simple6MosaicityParameterisation,
WavelengthSpreadParameterisation,
)
from dials.algorithms.refinement.parameterisation.crystal_parameters import (
CrystalOrientationParameterisation,
CrystalUnitCellParameterisation,
)
from dials.array_family import flex
def test_Simple1MosaicityParameterisation():
p = Simple1MosaicityParameterisation(params=np.array([1e-3]))
assert p.is_angular() is False
assert p.num_parameters() == 1
assert p.parameters == (1e-3,)
p.parameters = np.array([2e-3])
assert p.parameters == (2e-3,)
psq = p.parameters[0] ** 2
assert list(p.sigma().flatten()) == pytest.approx([psq, 0, 0, 0, psq, 0, 0, 0, psq])
d = p.first_derivatives()
assert d.shape[0] == 1
d1 = 2 * p.parameters[0]
assert list(d[0, :, :].flatten()) == pytest.approx([d1, 0, 0, 0, d1, 0, 0, 0, d1])
def test_Simple6MosaicityParameterisation():
params = np.array([1e-3, 2e-3, 3e-3, 4e-3, 5e-3, 6e-3])
p = Simple6MosaicityParameterisation(params=params)
assert p.is_angular() is False
assert p.num_parameters() == 6
assert list(p.parameters) == pytest.approx(list(params))
params = np.array([2e-3, 3e-3, 4e-3, 5e-3, 6e-3, 7e-3])
p.parameters = params
assert list(p.parameters) == pytest.approx(list(params))
b1, b2, b3, b4, b5, b6 = params
assert p.sigma()[0, 0] == pytest.approx(b1**2)
assert p.sigma()[0, 1] == pytest.approx(b1 * b2)
assert p.sigma()[0, 2] == pytest.approx(b1 * b4)
assert p.sigma()[1, 0] == pytest.approx(b1 * b2)
assert p.sigma()[1, 1] == pytest.approx(b2**2 + b3 * b3)
assert p.sigma()[1, 2] == pytest.approx(b2 * b4 + b3 * b5)
assert p.sigma()[2, 0] == pytest.approx(b1 * b4)
assert p.sigma()[2, 1] == pytest.approx(b2 * b4 + b3 * b5)
assert p.sigma()[2, 2] == pytest.approx(b4**2 + b5**2 + b6**2)
dSdb = [
(2 * b1, b2, b4, b2, 0, 0, b4, 0, 0),
(0, b1, 0, b1, 2 * b2, b4, 0, b4, 0),
(0, 0, 0, 0, 2 * b3, b5, 0, b5, 0),
(0, 0, b1, 0, 0, b2, b1, b2, 2 * b4),
(0, 0, 0, 0, 0, b3, 0, b3, 2 * b5),
(0, 0, 0, 0, 0, 0, 0, 0, 2 * b6),
]
d = p.first_derivatives()
assert d.shape[0] == 6
for i in range(d.shape[0]):
a = dSdb[i]
b = d[i, :, :]
for j in range(9):
assert b.flatten()[j] == pytest.approx(a[j], abs=1e-12)
def test_WavelengthSpreadParameterisation():
params = np.array([1e-3])
p = WavelengthSpreadParameterisation(params=params)
assert p.num_parameters() == 1
assert p.parameters[0] == pytest.approx(params[0])
params = np.array([2e-3])
p.parameters = params
assert p.parameters[0] == pytest.approx(params[0])
assert p.sigma() == pytest.approx(params[0] ** 2)
assert p.first_derivatives()[0] == pytest.approx(2 * params[0])
def test_Angular2MosaicityParameterisation():
params = np.array([1e-3, 2e-3])
p = Angular2MosaicityParameterisation(params=params)
assert p.is_angular() is True
assert p.num_parameters() == 2
assert p.parameters[0] == pytest.approx(params[0])
assert p.parameters[1] == pytest.approx(params[1])
params = np.array([2e-3, 3e-3])
p.parameters = params
assert p.parameters[0] == pytest.approx(params[0])
assert p.parameters[1] == pytest.approx(params[1])
b1, b2 = params
assert list(p.sigma().flatten()) == pytest.approx(
[b1**2, 0, 0, 0, b1**2, 0, 0, 0, b2**2]
)
dSdb = [(2 * b1, 0, 0, 0, 2 * b1, 0, 0, 0, 0), (0, 0, 0, 0, 0, 0, 0, 0, 2 * b2)]
d = p.first_derivatives()
assert d.shape[0] == 2
for i in range(d.shape[0]):
a = dSdb[i]
b = d[i, :, :]
for j in range(9):
assert b.flatten()[j] == pytest.approx(a[j])
def test_Angular4MosaicityParameterisation():
params = np.array([1e-3, 2e-3, 3e-3, 4e-3])
p = Angular4MosaicityParameterisation(params=params)
assert p.is_angular() is True
assert p.num_parameters() == 4
assert list(p.parameters) == pytest.approx(list(params))
params = np.array([2e-3, 3e-3, 4e-3, 5e-3])
p.parameters = params
assert list(p.parameters) == pytest.approx(list(params))
b1, b2, b3, b4 = params
assert p.sigma()[0, 0] == pytest.approx(b1**2)
assert p.sigma()[0, 1] == pytest.approx(b1 * b2)
assert p.sigma()[0, 2] == pytest.approx(0)
assert p.sigma()[1, 0] == pytest.approx(b1 * b2)
assert p.sigma()[1, 1] == pytest.approx(b2**2 + b3 * b3)
assert p.sigma()[1, 2] == pytest.approx(0)
assert p.sigma()[2, 0] == pytest.approx(0)
assert p.sigma()[2, 1] == pytest.approx(0)
assert p.sigma()[2, 2] == pytest.approx(b4**2)
dSdb = [
(2 * b1, b2, 0, b2, 0, 0, 0, 0, 0),
(0, b1, 0, b1, 2 * b2, 0, 0, 0, 0),
(0, 0, 0, 0, 2 * b3, 0, 0, 0, 0),
(0, 0, 0, 0, 0, 0, 0, 0, 2 * b4),
]
d = p.first_derivatives()
assert d.shape[0] == 4
for i in range(d.shape[0]):
a = dSdb[i]
b = d[i, ::, :]
for j in range(9):
assert b.flatten()[j] == pytest.approx(a[j])
def check_model_state_with_fixed(
experiment,
mosaicity_parameterisation,
wavelength_parameterisation,
fix_mosaic_spread=False,
fix_wavelength_spread=False,
fix_unit_cell=False,
fix_orientation=False,
):
state = ModelState(
experiment,
mosaicity_parameterisation,
wavelength_parameterisation,
fix_mosaic_spread=fix_mosaic_spread,
fix_wavelength_spread=fix_wavelength_spread,
fix_unit_cell=fix_unit_cell,
fix_orientation=fix_orientation,
)
assert state.is_orientation_fixed == fix_orientation
assert state.is_unit_cell_fixed == fix_unit_cell
assert state.is_mosaic_spread_fixed == fix_mosaic_spread
assert state.is_wavelength_spread_fixed == fix_wavelength_spread
U = state.U_matrix
B = state.B_matrix
A = state.A_matrix
M = state.mosaicity_covariance_matrix
L = state.wavelength_spread
assert U.shape == (3, 3)
assert B.shape == (3, 3)
assert A.shape == (3, 3)
assert M.shape == (3, 3)
if wavelength_parameterisation is not None:
assert len(L) == 1
else:
assert len(L) == 0
assert len(state.U_params) == 3
assert len(state.B_params) == 2
assert len(state.M_params) == mosaicity_parameterisation.num_parameters()
if wavelength_parameterisation is not None:
assert len(state.L_params) == 1
else:
assert len(state.L_params) == 0
dU = state.dU_dp
dB = state.dB_dp
dM = state.dM_dp
dL = state.dL_dp
assert dU.shape[0] == 3
assert dB.shape[0] == 2
assert dM.shape[0] == mosaicity_parameterisation.num_parameters()
if wavelength_parameterisation is not None:
assert len(dL) == 1
else:
assert len(dL) == 0
params = state.active_parameters
expected_len = 0
if not fix_mosaic_spread:
expected_len += mosaicity_parameterisation.num_parameters()
if not fix_wavelength_spread:
if wavelength_parameterisation is not None:
expected_len += wavelength_parameterisation.num_parameters()
if not fix_unit_cell:
expected_len += 2
if not fix_orientation:
expected_len += 3
assert len(params) == expected_len
new_params = params
state.active_parameters = new_params
def test_ModelState(test_experiment):
experiments = [test_experiment]
S1 = Simple1MosaicityParameterisation()
S6 = Simple6MosaicityParameterisation()
W = WavelengthSpreadParameterisation()
with pytest.raises(AssertionError):
check_model_state_with_fixed(experiments[0], S1, None, fix_mosaic_spread=True)
with pytest.raises(AssertionError):
check_model_state_with_fixed(experiments[0], S1, None, fix_unit_cell=True)
with pytest.raises(AssertionError):
check_model_state_with_fixed(experiments[0], S1, None, fix_orientation=True)
check_model_state_with_fixed(experiments[0], S1, None, fix_wavelength_spread=True)
check_model_state_with_fixed(experiments[0], S1, W, fix_mosaic_spread=True)
check_model_state_with_fixed(experiments[0], S1, W, fix_wavelength_spread=True)
check_model_state_with_fixed(experiments[0], S1, W, fix_unit_cell=True)
check_model_state_with_fixed(experiments[0], S1, W, fix_orientation=True)
with pytest.raises(AssertionError):
check_model_state_with_fixed(experiments[0], S6, None, fix_mosaic_spread=True)
with pytest.raises(AssertionError):
check_model_state_with_fixed(experiments[0], S6, None, fix_unit_cell=True)
with pytest.raises(AssertionError):
check_model_state_with_fixed(experiments[0], S6, None, fix_orientation=True)
check_model_state_with_fixed(experiments[0], S6, None, fix_wavelength_spread=True)
check_model_state_with_fixed(experiments[0], S6, W, fix_mosaic_spread=True)
check_model_state_with_fixed(experiments[0], S6, W, fix_wavelength_spread=True)
check_model_state_with_fixed(experiments[0], S6, W, fix_unit_cell=True)
check_model_state_with_fixed(experiments[0], S6, W, fix_orientation=True)
def check_reflection_model_state_with_fixed(
experiment,
mosaicity_parameterisation,
wavelength_parameterisation,
fix_mosaic_spread=False,
fix_wavelength_spread=False,
fix_unit_cell=False,
fix_orientation=False,
):
state = ModelState(
experiment,
mosaicity_parameterisation,
wavelength_parameterisation,
fix_mosaic_spread=fix_mosaic_spread,
fix_wavelength_spread=fix_wavelength_spread,
fix_unit_cell=fix_unit_cell,
fix_orientation=fix_orientation,
)
model = ReflectionModelState(
state, matrix.col(experiment.beam.get_s0()), matrix.col((1, 1, 1))
)
assert list(model.mosaicity_covariance_matrix.flatten()) == list(
mosaicity_parameterisation.sigma().flatten()
)
assert list(model.get_r().flatten()) == pytest.approx(
np.matmul(state.A_matrix, np.array([1, 1, 1]).reshape(3, 1))[:, 0].tolist(),
abs=1e-6,
)
if wavelength_parameterisation is not None:
assert model.wavelength_spread == wavelength_parameterisation.sigma()
else:
assert model.wavelength_spread == 0
dS_dp = model.get_dS_dp()
dr_dp = model.get_dr_dp()
dL_dp = model.get_dL_dp()
assert dS_dp.shape[2] == len(state.parameter_labels)
assert dr_dp.shape[1] == len(state.parameter_labels)
assert len(dL_dp) == len(state.parameter_labels)
if not fix_wavelength_spread:
assert dr_dp[:, -1].flatten() == pytest.approx([0, 0, 0], abs=1e-6)
assert dS_dp[:, :, -1].flatten() == pytest.approx(
(0, 0, 0, 0, 0, 0, 0, 0, 0), abs=1e-6
)
dr_dp = dr_dp[:, :-1]
dS_dp = dS_dp[:, :, :-1]
dL_dp = dL_dp[:-1]
if not fix_mosaic_spread:
num_params = mosaicity_parameterisation.num_parameters()
for i in range(num_params):
assert dr_dp[:, -(i + 1)] == pytest.approx([0, 0, 0], abs=1e-6)
assert dS_dp[:, :, -(i + 1)] == pytest.approx(
state.dM_dp[-(i + 1), :, :], abs=1e-6
)
assert dL_dp[-1] == 0
dr_dp = dr_dp[:, :-num_params]
dS_dp = dS_dp[:, :, :-num_params]
dL_dp = dL_dp[:-num_params]
if not fix_unit_cell:
num_params = state.B_params.size
for i in range(num_params):
assert dS_dp[:, :, -(i + 1)].flatten() == pytest.approx(
(0, 0, 0, 0, 0, 0, 0, 0, 0), abs=1e-6
)
assert dL_dp[-(i + 1)] == 0
dr_dp = dr_dp[:, :-num_params]
dS_dp = dS_dp[:, :, :-num_params]
dL_dp = dL_dp[:-num_params]
if not fix_orientation:
num_params = state.U_params.size
for i in range(num_params):
assert dS_dp[:, :, -(i + 1)].flatten() == pytest.approx(
(0, 0, 0, 0, 0, 0, 0, 0, 0), abs=1e-6
)
assert dL_dp[-(i + 1)] == 0
dr_dp = dr_dp[:, :-num_params]
dS_dp = dS_dp[:, :, :-num_params]
dL_dp = dL_dp[:-num_params]
def test_ReflectionModelState(test_experiment):
experiments = [test_experiment]
S1 = Simple1MosaicityParameterisation()
S6 = Simple6MosaicityParameterisation()
W = WavelengthSpreadParameterisation()
check_reflection_model_state_with_fixed(
experiments[0], S1, None, fix_wavelength_spread=True
)
check_reflection_model_state_with_fixed(
experiments[0], S1, W, fix_mosaic_spread=True
)
check_reflection_model_state_with_fixed(
experiments[0], S1, W, fix_wavelength_spread=True
)
check_reflection_model_state_with_fixed(experiments[0], S1, W, fix_unit_cell=True)
check_reflection_model_state_with_fixed(experiments[0], S1, W, fix_orientation=True)
check_reflection_model_state_with_fixed(
experiments[0], S6, None, fix_wavelength_spread=True
)
check_reflection_model_state_with_fixed(
experiments[0], S6, W, fix_mosaic_spread=True
)
check_reflection_model_state_with_fixed(
experiments[0], S6, W, fix_wavelength_spread=True
)
check_reflection_model_state_with_fixed(experiments[0], S6, W, fix_unit_cell=True)
check_reflection_model_state_with_fixed(experiments[0], S6, W, fix_orientation=True)
def first_derivative(func, x, h):
return (-func(x + 2 * h) + 8 * func(x + h) - 8 * func(x - h) + func(x - 2 * h)) / (
12 * h
)
def generate_data(experiments, reflections):
from random import seed
seed(0)
index = randint(0, len(reflections))
h = reflections[index]["miller_index"]
s0 = matrix.col(experiments[0].beam.get_s0())
U_param = CrystalOrientationParameterisation(experiments[0].crystal)
B_param = CrystalUnitCellParameterisation(experiments[0].crystal)
U = matrix.sqr(experiments[0].crystal.get_U())
B = matrix.sqr(experiments[0].crystal.get_B())
r = U * B * matrix.col(h)
s2 = s0 + r
mobs = (
s2 + matrix.col((uniform(0, 1e-3), uniform(0, 1e-3), uniform(0, 1e-3)))
).normalize() * s0.length()
b1, b2, b3, b4, b5, b6 = (
uniform(1e-3, 3e-3),
uniform(0.0, 1e-3),
uniform(1e-3, 3e-3),
uniform(0.0, 1e-3),
uniform(0.0, 1e-3),
uniform(1e-3, 3e-3),
)
S_param = (b1, b2, b3, b4, b5, b6)
L_param = (uniform(1e-3, 2e-3),)
ctot = randint(100, 1000)
T = matrix.sqr((uniform(1e-3, 2e-3), 0, uniform(1e-6, 2e-6), uniform(1e-3, 2e-3)))
Sobs = T * T.transpose()
params = [S_param, U_param, B_param, L_param]
return params, s0, h, ctot, mobs, Sobs
@pytest.fixture
def testdata(test_experiment):
TestData = namedtuple(
"TestData", ["experiment", "models", "s0", "h", "ctot", "mobs", "Sobs"]
)
experiments = [test_experiment]
reflections = flex.reflection_table.from_predictions_multi(experiments)
models, s0, h, ctot, mobs, Sobs = generate_data(experiments, reflections)
return TestData(
experiment=experiments[0],
models=models,
s0=s0,
h=h,
ctot=ctot,
mobs=mobs,
Sobs=Sobs,
)
def test_ReflectionModelState_derivatives(testdata):
def check(
mosaicity_parameterisation,
wavelength_parameterisation,
fix_mosaic_spread=False,
fix_wavelength_spread=False,
fix_unit_cell=False,
fix_orientation=False,
):
experiment = testdata.experiment
models = testdata.models
s0 = testdata.s0
h = testdata.h
U_params = models[1].get_param_vals()
B_params = models[2].get_param_vals()
M_params = np.array(models[0][: mosaicity_parameterisation.num_parameters()])
L_params = np.array(models[3])
state = ModelState(
experiment,
mosaicity_parameterisation,
wavelength_parameterisation,
fix_mosaic_spread=fix_mosaic_spread,
fix_wavelength_spread=fix_wavelength_spread,
fix_unit_cell=fix_unit_cell,
fix_orientation=fix_orientation,
)
state.U_params = U_params
state.B_params = B_params
state.M_params = M_params
state.L_params = L_params
model = ReflectionModelState(state, s0, h)
dr_dp = model.get_dr_dp()
dS_dp = model.get_dS_dp()
dL_dp = model.get_dL_dp()
def compute_sigma(parameters):
state.active_parameters = parameters
model = ReflectionModelState(state, s0, h)
return model.mosaicity_covariance_matrix
def compute_r(parameters):
state.active_parameters = parameters
model = ReflectionModelState(state, s0, h)
return model.get_r()
def compute_sigma_lambda(parameters):
state.active_parameters = parameters
model = ReflectionModelState(state, s0, h)
return model.wavelength_spread
step = 1e-6
parameters = copy(state.active_parameters)
dr_num = []
for i in range(len(parameters)):
def f(x):
p = copy(parameters)
p[i] = x
return compute_r(p)
dr_num.append(first_derivative(f, parameters[i], step).reshape(3, 1))
dr_num = np.concatenate(dr_num, axis=1)
for n, c in zip(dr_num, dr_dp):
for nn, cc in zip(n, c):
print(nn)
print(cc)
assert abs(nn - cc) < 1e-7
ds_num = []
for i in range(len(parameters)):
def f(x):
p = copy(parameters)
p[i] = x
return compute_sigma(p)
fd = first_derivative(f, parameters[i], step)
print(fd)
ds_num.append(fd.reshape(3, 3, 1))
ds_num = np.concatenate(ds_num, axis=2)
for i in range(len(parameters)):
for n, c in zip(ds_num[:, :, i], dS_dp[:, :, i]):
for nn, cc in zip(n.flatten(), c.flatten()):
print(nn)
print(cc)
assert abs(nn - cc) < 1e-5
dl_num = []
for i in range(len(parameters)):
def f(x):
p = copy(parameters)
p[i] = x
return compute_sigma_lambda(p) ** 2
dl_num.append(first_derivative(f, parameters[i], step))
for n, c in zip(dl_num, dL_dp):
assert abs(n - c) < 1e-7
S1 = Simple1MosaicityParameterisation()
S6 = Simple6MosaicityParameterisation()
W = WavelengthSpreadParameterisation()
check(S1, None, fix_wavelength_spread=True)
check(S1, W, fix_mosaic_spread=True)
check(S1, W, fix_wavelength_spread=True)
check(S1, W, fix_unit_cell=True)
check(S1, W, fix_orientation=True)
check(S6, None, fix_wavelength_spread=True)
check(S6, W, fix_mosaic_spread=True)
check(S6, W, fix_wavelength_spread=True)
check(S6, W, fix_unit_cell=True)
check(S6, W, fix_orientation=True)
| true | true |
1c2d8c2c711bf8ea9496e06fa1fe208338b90f0a | 15,600 | py | Python | pipelines.py | ufal/augpt | fa8a57961ed1d8fe6099978c489c0b0f8956d64e | [
"MIT"
] | 11 | 2021-02-05T15:37:59.000Z | 2022-03-24T14:33:08.000Z | pipelines.py | ufal/augpt | fa8a57961ed1d8fe6099978c489c0b0f8956d64e | [
"MIT"
] | 6 | 2021-03-14T10:55:09.000Z | 2021-07-22T09:42:14.000Z | pipelines.py | ufal/augpt | fa8a57961ed1d8fe6099978c489c0b0f8956d64e | [
"MIT"
] | 1 | 2021-02-28T22:22:33.000Z | 2021-02-28T22:22:33.000Z | from typing import Union, List, Optional
import logging
import uuid
from uuid import UUID
import transformers
from functools import partial
from collections import OrderedDict
from model import ModelPredictor
from data import BeliefParser
from utils import AutoDatabase, AutoLexicalizer
logger = logging.getLogger()
def get_context_from_conversation(user, system):
context = []
user.reverse()
system.reverse()
user.append(None)
system.append(None)
for user_input, system_response in zip(user, system):
if user_input is not None:
context.append(user_input)
if system_response is not None:
context.append(system_response)
context.reverse()
return context
# TODO: upgrade to newer transformers
if not hasattr(transformers, 'Conversation'):
class Conversation:
"""
Utility class containing a conversation and its history. This class is meant to be used as an input to the
:class:`~transformers.ConversationalPipeline`. The conversation contains a number of utility function to manage the
addition of new user input and generated model responses. A conversation needs to contain an unprocessed user input
before being passed to the :class:`~transformers.ConversationalPipeline`. This user input is either created when
the class is instantiated, or by calling :obj:`conversional_pipeline.append_response("input")` after a conversation
turn.
Arguments:
text (:obj:`str`, `optional`):
The initial user input to start the conversation. If not provided, a user input needs to be provided
manually using the :meth:`~transformers.Conversation.add_user_input` method before the conversation can
begin.
conversation_id (:obj:`uuid.UUID`, `optional`):
Unique identifier for the conversation. If not provided, a random UUID4 id will be assigned to the
conversation.
Usage::
conversation = Conversation("Going to the movies tonight - any suggestions?")
# Steps usually performed by the model when generating a response:
# 1. Mark the user input as processed (moved to the history)
conversation.mark_processed()
# 2. Append a mode response
conversation.append_response("The Big lebowski.")
conversation.add_user_input("Is it good?")
"""
def __init__(self, text: str = None, conversation_id: UUID = None):
if not conversation_id:
conversation_id = uuid.uuid4()
self.uuid: UUID = conversation_id
self.past_user_inputs: List[str] = []
self.generated_responses: List[str] = []
self.history: List[int] = []
self.new_user_input: Optional[str] = text
def add_user_input(self, text: str, overwrite: bool = False):
"""
Add a user input to the conversation for the next round. This populates the internal :obj:`new_user_input`
field.
Args:
text (:obj:`str`): The user input for the next conversation round.
overwrite (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not existing and unprocessed user input should be overwritten when this function is called.
"""
if self.new_user_input:
if overwrite:
logger.warning(
'User input added while unprocessed input was existing: "{}" was overwritten with: "{}".'.format(
self.new_user_input, text
)
)
self.new_user_input = text
else:
logger.warning(
'User input added while unprocessed input was existing: "{}" new input ignored: "{}". '
"Set `overwrite` to True to overwrite unprocessed user input".format(self.new_user_input, text)
)
else:
self.new_user_input = text
def mark_processed(self):
"""
Mark the conversation as processed (moves the content of :obj:`new_user_input` to :obj:`past_user_inputs`) and
empties the :obj:`new_user_input` field.
"""
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input)
self.new_user_input = None
def append_response(self, response: str):
"""
Append a response to the list of generated responses.
Args:
response (:obj:`str`): The model generated response.
"""
self.generated_responses.append(response)
def set_history(self, history: List[int]):
"""
Updates the value of the history of the conversation. The history is represented by a list of :obj:`token_ids`.
The history is used by the model to generate responses based on the previous conversation turns.
Args:
history (:obj:`List[int]`): Historyof tokens provided and generated for this conversation.
"""
self.history = history
def __repr__(self):
"""
Generates a string representation of the conversation.
Return:
:obj:`str`:
Example:
Conversation id: 7d15686b-dc94-49f2-9c4b-c9eac6a1f114
user >> Going to the movies tonight - any suggestions?
bot >> The Big Lebowski
"""
output = "Conversation id: {} \n".format(self.uuid)
for user_input, generated_response in zip(self.past_user_inputs, self.generated_responses):
output += "user >> {} \n".format(user_input)
output += "bot >> {} \n".format(generated_response)
if self.new_user_input is not None:
output += "user >> {} \n".format(self.new_user_input)
return output
else:
Conversation = transformers.Conversation
class AuGPTConversation(Conversation):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.generated_belief = None
self.database_results = None
self.raw_response = None
self.oracle_belief = None
self.oracle_database_results = None
def add_user_input(self, *args, **kwargs):
super().add_user_input(*args, **kwargs)
self.generated_belief = None
self.database_results = None
self.raw_response = None
self.oracle_belief = None
self.oracle_database_results = None
class AuGPTConversationalPipeline(transformers.Pipeline):
"""
Multi-turn conversational pipeline.
This conversational pipeline can currently be loaded from :func:`~transformers.pipeline` using the following
task identifier: :obj:`"augpt-conversational"`.
AuGPTConversationalPipeline is similar to `transformers.ConversationalPipeline`,
but supports database and lexicalization. The interface could be the same, or if `AuGPTConversation`
type is passed as the input, additional fields are filled by the Pipeline.
The models that this pipeline can use are models that have been fine-tuned on a multi-turn conversational task,
currently: `'jkulhanek/augpt-mw-21'`.
Usage::
conversational_pipeline = pipeline("conversational")
conversation_1 = Conversation("Going to the movies tonight - any suggestions?")
conversation_2 = Conversation("What's the last book you have read?")
conversational_pipeline([conversation_1, conversation_2])
conversation_1.add_user_input("Is it an action movie?")
conversation_2.add_user_input("What is the genre of this book?")
conversational_pipeline([conversation_1, conversation_2])
"""
def __init__(self, lexicalizer=None, database=None, *args, **kwargs):
super().__init__(*args, **kwargs)
self.lexicalizer = lexicalizer
if isinstance(lexicalizer, str):
self.lexicalizer = AutoLexicalizer.load(lexicalizer)
self.database = database
if isinstance(database, str):
self.database = AutoDatabase.load(database)
self.predictor = ModelPredictor(self.model, self.tokenizer, device=self.device)
self.parse_belief = BeliefParser()
def __call__(self, conversations: Union[AuGPTConversation, Conversation, List[Union[AuGPTConversation, Conversation]]]):
r"""
Generate responses for the conversation(s) given as inputs.
Args:
conversations (a :class:`~transformers.Conversation` or a list of :class:`~transformers.Conversation`):
Conversations to generate responses for. If `AuGPTConversation` instances are passed as the input,
additional information is returned from the system, e.g., database results, belief state and
delexicalized response.
Returns:
:class:`~transformers.Conversation` or a list of :class:`~transformers.Conversation`: Conversation(s) with
updated generated responses for those containing a new user input.
"""
# Input validation
if isinstance(conversations, list):
for conversation in conversations:
assert isinstance(
conversation, Conversation
), "AuGPTConversationalPipeline expects a Conversation or list of Conversations as an input"
if conversation.new_user_input is None:
raise ValueError(
"Conversation with UUID {} does not contain new user input to process. "
"Add user inputs with the conversation's `add_user_input` method".format(
type(conversation.uuid)
)
)
assert (
self.tokenizer.pad_token_id is not None or self.tokenizer.eos_token_id is not None
), "Please make sure that the tokenizer has a pad_token_id or eos_token_id when using a batch input"
elif isinstance(conversations, Conversation):
conversations = [conversations]
else:
raise ValueError("AuGPTConversationalPipeline expects a Conversation or list of Conversations as an input")
with self.device_placement():
contexts = AuGPTConversationalPipeline._get_contexts_from_conversations(conversations)
original_belief_strs = self.predictor.predict_belief(contexts)
oracle_beliefs = [getattr(x, 'oracle_belief', None) for x in conversations]
oracle_dbs_results = [getattr(x, 'oracle_database_results', None) for x in conversations]
beliefs = [oracle_belief if oracle_belief is not None else self.parse_belief(belief_str)
for oracle_belief, belief_str in zip(oracle_beliefs, original_belief_strs)]
dbs_results = [oracle_db if oracle_db is not None else self.database(bs, return_results=True)
for oracle_db, bs in zip(oracle_dbs_results, beliefs)]
dbs = [OrderedDict((k, x[0] if isinstance(x, tuple) else x) for k, x in db.items()) for db in dbs_results]
delex_responses = self.predictor.predict_response(contexts, original_belief_strs, dbs)
responses = [self._lexicalise(response, db, bf, ctx)
for response, bf, db, ctx in zip(delex_responses, beliefs, dbs_results, contexts)]
output = []
for conversation_index, (conversation, response, belief, db, delex) \
in enumerate(zip(conversations, responses, original_belief_strs, dbs_results, delex_responses)):
conversation.mark_processed()
conversation.append_response(response)
if hasattr(conversation, 'generated_belief'):
conversation.generated_belief = belief
if hasattr(conversation, 'database_results'):
conversation.database_results = db
if hasattr(conversation, 'raw_response'):
conversation.raw_response = delex
output.append(conversation)
if len(output) == 1:
return output[0]
else:
return output
def save_pretrained(self, save_directory):
super().save_pretrained(save_directory)
if self.lexicalizer is not None:
self.lexicalizer.save(save_directory)
self.database.save(save_directory)
@staticmethod
def _get_contexts_from_conversations(conversations):
def _get_context_from_conversation(conversation):
user = list(conversation.past_user_inputs)
if conversation.new_user_input is not None:
user.append(conversation.new_user_input)
system = list(conversation.generated_responses)
return get_context_from_conversation(user, system)
return list(map(_get_context_from_conversation, conversations))
def _lexicalise(self, response, db, belief, context):
if self.lexicalizer is None:
return response
return self.lexicalizer(response, db, belief=belief, context=context)
# Registering the pipeline with transformers if desired
transformers.pipelines.SUPPORTED_TASKS["augpt-conversational"] = {
"impl": AuGPTConversationalPipeline,
"tf": transformers.TFAutoModelForCausalLM if transformers.is_tf_available() else None,
"pt": transformers.AutoModelForCausalLM if transformers.is_torch_available() else None,
"default": {"model": {"pt": "jkulhanek/augpt-mw-21", "tf": "jkulhanek/augpt-mw-21"}}
}
# Utility function for transformers to call `pipeline('augpt-conversational')` with default model.
__old_pipeline = transformers.pipeline
def augpt_pipeline(task: str, model: Optional = None, *args, **kwargs) -> transformers.Pipeline: # noqa
if task == 'augpt-conversational':
lexicalizer = kwargs.get('lexicalizer', 'default')
database = kwargs.get('database', 'default')
config = kwargs.get('config', None)
model_name = model
if model_name is None:
model_name = 'jkulhanek/augpt-mw-21'
# Try to infer database and lexicalizer from model or config name (if provided as str)
if lexicalizer == 'default':
if isinstance(model_name, str):
lexicalizer = model_name
elif isinstance(config, str):
lexicalizer = config
else:
# Impossible to guest what is the right tokenizer here
raise Exception(
"Impossible to guess which lexicalizer to use. "
)
kwargs['lexicalizer'] = lexicalizer
if database == 'default':
if isinstance(model_name, str):
database = model_name
elif isinstance(config, str):
database = config
else:
# Impossible to guest what is the right tokenizer here
raise Exception(
"Impossible to guess which database to use. "
)
kwargs['database'] = database
return __old_pipeline(task, model, *args, **kwargs)
transformers.pipeline = augpt_pipeline
transformers.pipelines.pipeline = augpt_pipeline
| 44.827586 | 124 | 0.633462 | from typing import Union, List, Optional
import logging
import uuid
from uuid import UUID
import transformers
from functools import partial
from collections import OrderedDict
from model import ModelPredictor
from data import BeliefParser
from utils import AutoDatabase, AutoLexicalizer
logger = logging.getLogger()
def get_context_from_conversation(user, system):
context = []
user.reverse()
system.reverse()
user.append(None)
system.append(None)
for user_input, system_response in zip(user, system):
if user_input is not None:
context.append(user_input)
if system_response is not None:
context.append(system_response)
context.reverse()
return context
if not hasattr(transformers, 'Conversation'):
class Conversation:
def __init__(self, text: str = None, conversation_id: UUID = None):
if not conversation_id:
conversation_id = uuid.uuid4()
self.uuid: UUID = conversation_id
self.past_user_inputs: List[str] = []
self.generated_responses: List[str] = []
self.history: List[int] = []
self.new_user_input: Optional[str] = text
def add_user_input(self, text: str, overwrite: bool = False):
if self.new_user_input:
if overwrite:
logger.warning(
'User input added while unprocessed input was existing: "{}" was overwritten with: "{}".'.format(
self.new_user_input, text
)
)
self.new_user_input = text
else:
logger.warning(
'User input added while unprocessed input was existing: "{}" new input ignored: "{}". '
"Set `overwrite` to True to overwrite unprocessed user input".format(self.new_user_input, text)
)
else:
self.new_user_input = text
def mark_processed(self):
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input)
self.new_user_input = None
def append_response(self, response: str):
self.generated_responses.append(response)
def set_history(self, history: List[int]):
self.history = history
def __repr__(self):
output = "Conversation id: {} \n".format(self.uuid)
for user_input, generated_response in zip(self.past_user_inputs, self.generated_responses):
output += "user >> {} \n".format(user_input)
output += "bot >> {} \n".format(generated_response)
if self.new_user_input is not None:
output += "user >> {} \n".format(self.new_user_input)
return output
else:
Conversation = transformers.Conversation
class AuGPTConversation(Conversation):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.generated_belief = None
self.database_results = None
self.raw_response = None
self.oracle_belief = None
self.oracle_database_results = None
def add_user_input(self, *args, **kwargs):
super().add_user_input(*args, **kwargs)
self.generated_belief = None
self.database_results = None
self.raw_response = None
self.oracle_belief = None
self.oracle_database_results = None
class AuGPTConversationalPipeline(transformers.Pipeline):
def __init__(self, lexicalizer=None, database=None, *args, **kwargs):
super().__init__(*args, **kwargs)
self.lexicalizer = lexicalizer
if isinstance(lexicalizer, str):
self.lexicalizer = AutoLexicalizer.load(lexicalizer)
self.database = database
if isinstance(database, str):
self.database = AutoDatabase.load(database)
self.predictor = ModelPredictor(self.model, self.tokenizer, device=self.device)
self.parse_belief = BeliefParser()
def __call__(self, conversations: Union[AuGPTConversation, Conversation, List[Union[AuGPTConversation, Conversation]]]):
if isinstance(conversations, list):
for conversation in conversations:
assert isinstance(
conversation, Conversation
), "AuGPTConversationalPipeline expects a Conversation or list of Conversations as an input"
if conversation.new_user_input is None:
raise ValueError(
"Conversation with UUID {} does not contain new user input to process. "
"Add user inputs with the conversation's `add_user_input` method".format(
type(conversation.uuid)
)
)
assert (
self.tokenizer.pad_token_id is not None or self.tokenizer.eos_token_id is not None
), "Please make sure that the tokenizer has a pad_token_id or eos_token_id when using a batch input"
elif isinstance(conversations, Conversation):
conversations = [conversations]
else:
raise ValueError("AuGPTConversationalPipeline expects a Conversation or list of Conversations as an input")
with self.device_placement():
contexts = AuGPTConversationalPipeline._get_contexts_from_conversations(conversations)
original_belief_strs = self.predictor.predict_belief(contexts)
oracle_beliefs = [getattr(x, 'oracle_belief', None) for x in conversations]
oracle_dbs_results = [getattr(x, 'oracle_database_results', None) for x in conversations]
beliefs = [oracle_belief if oracle_belief is not None else self.parse_belief(belief_str)
for oracle_belief, belief_str in zip(oracle_beliefs, original_belief_strs)]
dbs_results = [oracle_db if oracle_db is not None else self.database(bs, return_results=True)
for oracle_db, bs in zip(oracle_dbs_results, beliefs)]
dbs = [OrderedDict((k, x[0] if isinstance(x, tuple) else x) for k, x in db.items()) for db in dbs_results]
delex_responses = self.predictor.predict_response(contexts, original_belief_strs, dbs)
responses = [self._lexicalise(response, db, bf, ctx)
for response, bf, db, ctx in zip(delex_responses, beliefs, dbs_results, contexts)]
output = []
for conversation_index, (conversation, response, belief, db, delex) \
in enumerate(zip(conversations, responses, original_belief_strs, dbs_results, delex_responses)):
conversation.mark_processed()
conversation.append_response(response)
if hasattr(conversation, 'generated_belief'):
conversation.generated_belief = belief
if hasattr(conversation, 'database_results'):
conversation.database_results = db
if hasattr(conversation, 'raw_response'):
conversation.raw_response = delex
output.append(conversation)
if len(output) == 1:
return output[0]
else:
return output
def save_pretrained(self, save_directory):
super().save_pretrained(save_directory)
if self.lexicalizer is not None:
self.lexicalizer.save(save_directory)
self.database.save(save_directory)
@staticmethod
def _get_contexts_from_conversations(conversations):
def _get_context_from_conversation(conversation):
user = list(conversation.past_user_inputs)
if conversation.new_user_input is not None:
user.append(conversation.new_user_input)
system = list(conversation.generated_responses)
return get_context_from_conversation(user, system)
return list(map(_get_context_from_conversation, conversations))
def _lexicalise(self, response, db, belief, context):
if self.lexicalizer is None:
return response
return self.lexicalizer(response, db, belief=belief, context=context)
# Registering the pipeline with transformers if desired
transformers.pipelines.SUPPORTED_TASKS["augpt-conversational"] = {
"impl": AuGPTConversationalPipeline,
"tf": transformers.TFAutoModelForCausalLM if transformers.is_tf_available() else None,
"pt": transformers.AutoModelForCausalLM if transformers.is_torch_available() else None,
"default": {"model": {"pt": "jkulhanek/augpt-mw-21", "tf": "jkulhanek/augpt-mw-21"}}
}
# Utility function for transformers to call `pipeline('augpt-conversational')` with default model.
__old_pipeline = transformers.pipeline
def augpt_pipeline(task: str, model: Optional = None, *args, **kwargs) -> transformers.Pipeline: # noqa
if task == 'augpt-conversational':
lexicalizer = kwargs.get('lexicalizer', 'default')
database = kwargs.get('database', 'default')
config = kwargs.get('config', None)
model_name = model
if model_name is None:
model_name = 'jkulhanek/augpt-mw-21'
# Try to infer database and lexicalizer from model or config name (if provided as str)
if lexicalizer == 'default':
if isinstance(model_name, str):
lexicalizer = model_name
elif isinstance(config, str):
lexicalizer = config
else:
# Impossible to guest what is the right tokenizer here
raise Exception(
"Impossible to guess which lexicalizer to use. "
)
kwargs['lexicalizer'] = lexicalizer
if database == 'default':
if isinstance(model_name, str):
database = model_name
elif isinstance(config, str):
database = config
else:
# Impossible to guest what is the right tokenizer here
raise Exception(
"Impossible to guess which database to use. "
)
kwargs['database'] = database
return __old_pipeline(task, model, *args, **kwargs)
transformers.pipeline = augpt_pipeline
transformers.pipelines.pipeline = augpt_pipeline
| true | true |
1c2d8cafbefd1b3e84db79cf567a91cb465e6fb1 | 40,871 | py | Python | tensorflow/python/keras/layers/core.py | plopresti/tensorflow | 8b0c84d30d957596cbb3bcac9245e114c3f0b65b | [
"Apache-2.0"
] | 1 | 2019-07-15T08:40:24.000Z | 2019-07-15T08:40:24.000Z | tensorflow/python/keras/layers/core.py | plopresti/tensorflow | 8b0c84d30d957596cbb3bcac9245e114c3f0b65b | [
"Apache-2.0"
] | 3 | 2020-10-14T00:35:40.000Z | 2022-02-09T22:35:09.000Z | tensorflow/python/keras/layers/core.py | plopresti/tensorflow | 8b0c84d30d957596cbb3bcac9245e114c3f0b65b | [
"Apache-2.0"
] | 1 | 2020-01-19T16:33:55.000Z | 2020-01-19T16:33:55.000Z | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Core Keras layers.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import sys
import types as python_types
import warnings
import numpy as np
from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.keras import activations
from tensorflow.python.keras import backend as K
from tensorflow.python.keras import constraints
from tensorflow.python.keras import initializers
from tensorflow.python.keras import regularizers
from tensorflow.python.keras.engine.base_layer import Layer
from tensorflow.python.keras.engine.input_spec import InputSpec
from tensorflow.python.keras.utils import conv_utils
from tensorflow.python.keras.utils import generic_utils
from tensorflow.python.keras.utils import tf_utils
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_math_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import standard_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.util import nest
from tensorflow.python.util import tf_inspect
from tensorflow.python.util.tf_export import keras_export
@keras_export('keras.layers.Masking')
class Masking(Layer):
"""Masks a sequence by using a mask value to skip timesteps.
For each timestep in the input tensor (dimension #1 in the tensor),
if all values in the input tensor at that timestep
are equal to `mask_value`, then the timestep will be masked (skipped)
in all downstream layers (as long as they support masking).
If any downstream layer does not support masking yet receives such
an input mask, an exception will be raised.
Example:
Consider a Numpy data array `x` of shape `(samples, timesteps, features)`,
to be fed to an LSTM layer.
You want to mask timestep #3 and #5 because you lack data for
these timesteps. You can:
- Set `x[:, 3, :] = 0.` and `x[:, 5, :] = 0.`
- Insert a `Masking` layer with `mask_value=0.` before the LSTM layer:
```python
model = Sequential()
model.add(Masking(mask_value=0., input_shape=(timesteps, features)))
model.add(LSTM(32))
```
"""
def __init__(self, mask_value=0., **kwargs):
super(Masking, self).__init__(**kwargs)
self.supports_masking = True
self.mask_value = mask_value
self._compute_output_and_mask_jointly = True
def compute_mask(self, inputs, mask=None):
return K.any(math_ops.not_equal(inputs, self.mask_value), axis=-1)
def call(self, inputs):
boolean_mask = K.any(
math_ops.not_equal(inputs, self.mask_value), axis=-1, keepdims=True)
outputs = inputs * math_ops.cast(boolean_mask, inputs.dtype)
# Compute the mask and outputs simultaneously.
outputs._keras_mask = array_ops.squeeze(boolean_mask, axis=-1) # pylint: disable=protected-access
return outputs
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
config = {'mask_value': self.mask_value}
base_config = super(Masking, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export('keras.layers.Dropout')
class Dropout(Layer):
"""Applies Dropout to the input.
Dropout consists in randomly setting
a fraction `rate` of input units to 0 at each update during training time,
which helps prevent overfitting.
Arguments:
rate: Float between 0 and 1. Fraction of the input units to drop.
noise_shape: 1D integer tensor representing the shape of the
binary dropout mask that will be multiplied with the input.
For instance, if your inputs have shape
`(batch_size, timesteps, features)` and
you want the dropout mask to be the same for all timesteps,
you can use `noise_shape=(batch_size, 1, features)`.
seed: A Python integer to use as random seed.
Call arguments:
inputs: Input tensor (of any rank).
training: Python boolean indicating whether the layer should behave in
training mode (adding dropout) or in inference mode (doing nothing).
"""
def __init__(self, rate, noise_shape=None, seed=None, **kwargs):
super(Dropout, self).__init__(**kwargs)
self.rate = rate
self.noise_shape = noise_shape
self.seed = seed
self.supports_masking = True
def _get_noise_shape(self, inputs):
# Subclasses of `Dropout` may implement `_get_noise_shape(self, inputs)`,
# which will override `self.noise_shape`, and allows for custom noise
# shapes with dynamically sized inputs.
if self.noise_shape is None:
return None
concrete_inputs_shape = array_ops.shape(inputs)
noise_shape = []
for i, value in enumerate(self.noise_shape):
noise_shape.append(concrete_inputs_shape[i] if value is None else value)
return ops.convert_to_tensor(noise_shape)
def call(self, inputs, training=None):
if training is None:
training = K.learning_phase()
def dropped_inputs():
return nn.dropout(
inputs,
noise_shape=self._get_noise_shape(inputs),
seed=self.seed,
rate=self.rate)
output = tf_utils.smart_cond(training,
dropped_inputs,
lambda: array_ops.identity(inputs))
return output
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
config = {
'rate': self.rate,
'noise_shape': self.noise_shape,
'seed': self.seed
}
base_config = super(Dropout, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export('keras.layers.SpatialDropout1D')
class SpatialDropout1D(Dropout):
"""Spatial 1D version of Dropout.
This version performs the same function as Dropout, however it drops
entire 1D feature maps instead of individual elements. If adjacent frames
within feature maps are strongly correlated (as is normally the case in
early convolution layers) then regular dropout will not regularize the
activations and will otherwise just result in an effective learning rate
decrease. In this case, SpatialDropout1D will help promote independence
between feature maps and should be used instead.
Arguments:
rate: Float between 0 and 1. Fraction of the input units to drop.
Call arguments:
inputs: A 3D tensor.
training: Python boolean indicating whether the layer should behave in
training mode (adding dropout) or in inference mode (doing nothing).
Input shape:
3D tensor with shape:
`(samples, timesteps, channels)`
Output shape:
Same as input.
References:
- [Efficient Object Localization Using Convolutional
Networks](https://arxiv.org/abs/1411.4280)
"""
def __init__(self, rate, **kwargs):
super(SpatialDropout1D, self).__init__(rate, **kwargs)
self.input_spec = InputSpec(ndim=3)
def _get_noise_shape(self, inputs):
input_shape = array_ops.shape(inputs)
noise_shape = (input_shape[0], 1, input_shape[2])
return noise_shape
@keras_export('keras.layers.SpatialDropout2D')
class SpatialDropout2D(Dropout):
"""Spatial 2D version of Dropout.
This version performs the same function as Dropout, however it drops
entire 2D feature maps instead of individual elements. If adjacent pixels
within feature maps are strongly correlated (as is normally the case in
early convolution layers) then regular dropout will not regularize the
activations and will otherwise just result in an effective learning rate
decrease. In this case, SpatialDropout2D will help promote independence
between feature maps and should be used instead.
Arguments:
rate: Float between 0 and 1. Fraction of the input units to drop.
data_format: 'channels_first' or 'channels_last'.
In 'channels_first' mode, the channels dimension
(the depth) is at index 1,
in 'channels_last' mode is it at index 3.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
Call arguments:
inputs: A 4D tensor.
training: Python boolean indicating whether the layer should behave in
training mode (adding dropout) or in inference mode (doing nothing).
Input shape:
4D tensor with shape:
`(samples, channels, rows, cols)` if data_format='channels_first'
or 4D tensor with shape:
`(samples, rows, cols, channels)` if data_format='channels_last'.
Output shape:
Same as input.
References:
- [Efficient Object Localization Using Convolutional
Networks](https://arxiv.org/abs/1411.4280)
"""
def __init__(self, rate, data_format=None, **kwargs):
super(SpatialDropout2D, self).__init__(rate, **kwargs)
if data_format is None:
data_format = K.image_data_format()
if data_format not in {'channels_last', 'channels_first'}:
raise ValueError('data_format must be in '
'{"channels_last", "channels_first"}')
self.data_format = data_format
self.input_spec = InputSpec(ndim=4)
def _get_noise_shape(self, inputs):
input_shape = array_ops.shape(inputs)
if self.data_format == 'channels_first':
return (input_shape[0], input_shape[1], 1, 1)
elif self.data_format == 'channels_last':
return (input_shape[0], 1, 1, input_shape[3])
@keras_export('keras.layers.SpatialDropout3D')
class SpatialDropout3D(Dropout):
"""Spatial 3D version of Dropout.
This version performs the same function as Dropout, however it drops
entire 3D feature maps instead of individual elements. If adjacent voxels
within feature maps are strongly correlated (as is normally the case in
early convolution layers) then regular dropout will not regularize the
activations and will otherwise just result in an effective learning rate
decrease. In this case, SpatialDropout3D will help promote independence
between feature maps and should be used instead.
Arguments:
rate: Float between 0 and 1. Fraction of the input units to drop.
data_format: 'channels_first' or 'channels_last'.
In 'channels_first' mode, the channels dimension (the depth)
is at index 1, in 'channels_last' mode is it at index 4.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
Call arguments:
inputs: A 5D tensor.
training: Python boolean indicating whether the layer should behave in
training mode (adding dropout) or in inference mode (doing nothing).
Input shape:
5D tensor with shape:
`(samples, channels, dim1, dim2, dim3)` if data_format='channels_first'
or 5D tensor with shape:
`(samples, dim1, dim2, dim3, channels)` if data_format='channels_last'.
Output shape:
Same as input.
References:
- [Efficient Object Localization Using Convolutional
Networks](https://arxiv.org/abs/1411.4280)
"""
def __init__(self, rate, data_format=None, **kwargs):
super(SpatialDropout3D, self).__init__(rate, **kwargs)
if data_format is None:
data_format = K.image_data_format()
if data_format not in {'channels_last', 'channels_first'}:
raise ValueError('data_format must be in '
'{"channels_last", "channels_first"}')
self.data_format = data_format
self.input_spec = InputSpec(ndim=5)
def _get_noise_shape(self, inputs):
input_shape = array_ops.shape(inputs)
if self.data_format == 'channels_first':
return (input_shape[0], input_shape[1], 1, 1, 1)
elif self.data_format == 'channels_last':
return (input_shape[0], 1, 1, 1, input_shape[4])
@keras_export('keras.layers.Activation')
class Activation(Layer):
"""Applies an activation function to an output.
Arguments:
activation: Activation function, such as `tf.nn.relu`, or string name of
built-in activation function, such as "relu".
Input shape:
Arbitrary. Use the keyword argument `input_shape`
(tuple of integers, does not include the samples axis)
when using this layer as the first layer in a model.
Output shape:
Same shape as input.
"""
def __init__(self, activation, **kwargs):
super(Activation, self).__init__(**kwargs)
self.supports_masking = True
self.activation = activations.get(activation)
def call(self, inputs):
return self.activation(inputs)
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
config = {'activation': activations.serialize(self.activation)}
base_config = super(Activation, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export('keras.layers.Reshape')
class Reshape(Layer):
"""Reshapes an output to a certain shape.
Arguments:
target_shape: Target shape. Tuple of integers,
does not include the samples dimension (batch size).
Input shape:
Arbitrary, although all dimensions in the input shaped must be fixed.
Use the keyword argument `input_shape`
(tuple of integers, does not include the samples axis)
when using this layer as the first layer in a model.
Output shape:
`(batch_size,) + target_shape`
Example:
```python
# as first layer in a Sequential model
model = Sequential()
model.add(Reshape((3, 4), input_shape=(12,)))
# now: model.output_shape == (None, 3, 4)
# note: `None` is the batch dimension
# as intermediate layer in a Sequential model
model.add(Reshape((6, 2)))
# now: model.output_shape == (None, 6, 2)
# also supports shape inference using `-1` as dimension
model.add(Reshape((-1, 2, 2)))
# now: model.output_shape == (None, None, 2, 2)
```
"""
def __init__(self, target_shape, **kwargs):
super(Reshape, self).__init__(**kwargs)
self.target_shape = tuple(target_shape)
def _fix_unknown_dimension(self, input_shape, output_shape):
"""Find and replace a missing dimension in an output shape.
This is a near direct port of the internal Numpy function
`_fix_unknown_dimension` in `numpy/core/src/multiarray/shape.c`
Arguments:
input_shape: Shape of array being reshaped
output_shape: Desired shape of the array with at most
a single -1 which indicates a dimension that should be
derived from the input shape.
Returns:
The new output shape with a -1 replaced with its computed value.
Raises:
ValueError: If the total array size of the output_shape is
different than the input_shape, or more than one unknown dimension
is specified.
"""
output_shape = list(output_shape)
msg = 'total size of new array must be unchanged'
known, unknown = 1, None
for index, dim in enumerate(output_shape):
if dim < 0:
if unknown is None:
unknown = index
else:
raise ValueError('Can only specify one unknown dimension.')
else:
known *= dim
original = np.prod(input_shape, dtype=int)
if unknown is not None:
if known == 0 or original % known != 0:
raise ValueError(msg)
output_shape[unknown] = original // known
elif original != known:
raise ValueError(msg)
return output_shape
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
if None in input_shape[1:]:
output_shape = [input_shape[0]]
# input shape (partially) unknown? replace -1's with None's
output_shape += tuple(s if s != -1 else None for s in self.target_shape)
else:
output_shape = [input_shape[0]]
output_shape += self._fix_unknown_dimension(input_shape[1:],
self.target_shape)
return tensor_shape.TensorShape(output_shape)
def call(self, inputs):
return array_ops.reshape(inputs,
(array_ops.shape(inputs)[0],) + self.target_shape)
def get_config(self):
config = {'target_shape': self.target_shape}
base_config = super(Reshape, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export('keras.layers.Permute')
class Permute(Layer):
"""Permutes the dimensions of the input according to a given pattern.
Useful for e.g. connecting RNNs and convnets together.
Example:
```python
model = Sequential()
model.add(Permute((2, 1), input_shape=(10, 64)))
# now: model.output_shape == (None, 64, 10)
# note: `None` is the batch dimension
```
Arguments:
dims: Tuple of integers. Permutation pattern, does not include the
samples dimension. Indexing starts at 1.
For instance, `(2, 1)` permutes the first and second dimensions
of the input.
Input shape:
Arbitrary. Use the keyword argument `input_shape`
(tuple of integers, does not include the samples axis)
when using this layer as the first layer in a model.
Output shape:
Same as the input shape, but with the dimensions re-ordered according
to the specified pattern.
"""
def __init__(self, dims, **kwargs):
super(Permute, self).__init__(**kwargs)
self.dims = tuple(dims)
if sorted(dims) != list(range(1, len(dims) + 1)):
raise ValueError(
'Invalid permutation `dims` for Permute Layer: %s. '
'The set of indices in `dims` must be consecutive and start from 1.' %
(dims,))
self.input_spec = InputSpec(ndim=len(self.dims) + 1)
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
output_shape = copy.copy(input_shape)
for i, dim in enumerate(self.dims):
target_dim = input_shape[dim]
output_shape[i + 1] = target_dim
return tensor_shape.TensorShape(output_shape)
def call(self, inputs):
return array_ops.transpose(inputs, perm=(0,) + self.dims)
def get_config(self):
config = {'dims': self.dims}
base_config = super(Permute, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export('keras.layers.Flatten')
class Flatten(Layer):
"""Flattens the input. Does not affect the batch size.
If inputs are shaped `(batch,)` without a channel dimension, then flattening
adds an extra channel dimension and output shapes are `(batch, 1)`.
Arguments:
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, ..., channels)` while `channels_first` corresponds to
inputs with shape `(batch, channels, ...)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
Example:
```python
model = Sequential()
model.add(Convolution2D(64, 3, 3,
border_mode='same',
input_shape=(3, 32, 32)))
# now: model.output_shape == (None, 64, 32, 32)
model.add(Flatten())
# now: model.output_shape == (None, 65536)
```
"""
def __init__(self, data_format=None, **kwargs):
super(Flatten, self).__init__(**kwargs)
self.data_format = conv_utils.normalize_data_format(data_format)
self.input_spec = InputSpec(min_ndim=1)
def call(self, inputs):
if (self.data_format == 'channels_first'
and K.ndim(inputs) is not None and K.ndim(inputs) > 1):
permutation = [0]
permutation.extend([i for i in
range(2, K.ndim(inputs))])
permutation.append(1)
inputs = array_ops.transpose(inputs, perm=permutation)
outputs = array_ops.reshape(
inputs, (tensor_shape.dimension_value(inputs.shape[0]) or
array_ops.shape(inputs)[0], -1))
if not context.executing_eagerly():
outputs.set_shape(self.compute_output_shape(inputs.shape))
return outputs
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
if not input_shape:
output_shape = tensor_shape.TensorShape([1])
output_shape = [input_shape[0]]
if all(input_shape[1:]):
output_shape += [np.prod(input_shape[1:])]
else:
output_shape += [None]
return tensor_shape.TensorShape(output_shape)
def get_config(self):
config = {'data_format': self.data_format}
base_config = super(Flatten, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export('keras.layers.RepeatVector')
class RepeatVector(Layer):
"""Repeats the input n times.
Example:
```python
model = Sequential()
model.add(Dense(32, input_dim=32))
# now: model.output_shape == (None, 32)
# note: `None` is the batch dimension
model.add(RepeatVector(3))
# now: model.output_shape == (None, 3, 32)
```
Arguments:
n: Integer, repetition factor.
Input shape:
2D tensor of shape `(num_samples, features)`.
Output shape:
3D tensor of shape `(num_samples, n, features)`.
"""
def __init__(self, n, **kwargs):
super(RepeatVector, self).__init__(**kwargs)
self.n = n
self.input_spec = InputSpec(ndim=2)
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
return tensor_shape.TensorShape([input_shape[0], self.n, input_shape[1]])
def call(self, inputs):
return K.repeat(inputs, self.n)
def get_config(self):
config = {'n': self.n}
base_config = super(RepeatVector, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export('keras.layers.Lambda')
class Lambda(Layer):
"""Wraps arbitrary expressions as a `Layer` object.
The `Lambda` layer exists so that arbitrary TensorFlow functions
can be used when constructing `Sequential` and Functional API
models. `Lambda` layers are best suited for simple operations or
quick experimentation. For more advanced use cases, subclassing
`keras.layers.Layer` is preferred. One reason for this is that
when saving a Model, `Lambda` layers are saved by serializing the
Python bytecode, whereas subclassed Layers are saved via overriding
their `get_config` method and are thus more portable. Models that rely
on subclassed Layers are also often easier to visualize and reason
about.
Examples:
```python
# add a x -> x^2 layer
model.add(Lambda(lambda x: x ** 2))
```
```python
# add a layer that returns the concatenation
# of the positive part of the input and
# the opposite of the negative part
def antirectifier(x):
x -= K.mean(x, axis=1, keepdims=True)
x = K.l2_normalize(x, axis=1)
pos = K.relu(x)
neg = K.relu(-x)
return K.concatenate([pos, neg], axis=1)
model.add(Lambda(antirectifier))
```
Variables can be created within a `Lambda` layer. Like with
other layers, these variables will be created only once and reused
if the `Lambda` layer is called on new inputs. If creating more
than one variable in a given `Lambda` instance, be sure to use
a different name for each variable. Note that calling sublayers
from within a `Lambda` is not supported.
Example of variable creation:
```python
def linear_transform(x):
v1 = tf.Variable(1., name='multiplier')
v2 = tf.Variable(0., name='bias')
return x*v1 + v2
linear_layer = Lambda(linear_transform)
model.add(linear_layer)
model.add(keras.layers.Dense(10, activation='relu'))
model.add(linear_layer) # Reuses existing Variables
```
Note that creating two instances of `Lambda` using the same function
will *not* share Variables between the two instances. Each instance of
`Lambda` will create and manage its own weights.
Arguments:
function: The function to be evaluated. Takes input tensor as first
argument.
output_shape: Expected output shape from function. This argument can be
inferred if not explicitly provided. Can be a tuple or function. If a
tuple, it only specifies the first dimension onward;
sample dimension is assumed either the same as the input: `output_shape =
(input_shape[0], ) + output_shape` or, the input is `None` and
the sample dimension is also `None`: `output_shape = (None, ) +
output_shape` If a function, it specifies the entire shape as a function
of the
input shape: `output_shape = f(input_shape)`
mask: Either None (indicating no masking) or a callable with the same
signature as the `compute_mask` layer method, or a tensor that will be
returned as output mask regardless what the input is.
arguments: Optional dictionary of keyword arguments to be passed to the
function.
Input shape: Arbitrary. Use the keyword argument input_shape (tuple of
integers, does not include the samples axis) when using this layer as the
first layer in a model.
Output shape: Specified by `output_shape` argument
"""
def __init__(self, function, output_shape=None, mask=None, arguments=None,
**kwargs):
super(Lambda, self).__init__(**kwargs)
self.function = function
self.arguments = arguments if arguments else {}
if mask is not None:
self.supports_masking = True
self.mask = mask
self._output_shape = output_shape
self._variable_dict = {}
# These attributes are inherited from `Layer`.
self._trainable_weights = []
self._non_trainable_weights = []
function_args = tf_inspect.getfullargspec(self.function).args
self._fn_expects_training_arg = 'training' in function_args
self._fn_expects_mask_arg = 'mask' in function_args
@tf_utils.shape_type_conversion
def compute_output_shape(self, input_shape):
if self._output_shape is None:
# Make use of existing autocomputation but provide Lambda-specific
# error message. This is always safe to run even when the outer context
# is Graph mode because Lambda layers don't have side effects such as
# `add_loss`.
with context.eager_mode():
try:
return super(Lambda, self).compute_output_shape(input_shape)
except NotImplementedError:
raise NotImplementedError(
'We could not automatically infer the shape of the Lambda\'s '
'output. Please specify `output_shape` for this Lambda.')
if callable(self._output_shape):
output_shapes = self._output_shape(input_shape)
return tf_utils.convert_shapes(output_shapes, to_tuples=False)
# Output shapes are passed directly and don't include batch dimension.
input_tensor_shape = tf_utils.convert_shapes(input_shape, to_tuples=False)
batch_size = nest.flatten(input_tensor_shape)[0][0] if input_shape else None
def _add_batch(shape):
return tensor_shape.TensorShape([batch_size] + shape.as_list())
output_shapes = tf_utils.convert_shapes(self._output_shape, to_tuples=False)
return nest.map_structure(_add_batch, output_shapes)
def call(self, inputs, mask=None, training=None):
arguments = self.arguments
if self._fn_expects_mask_arg:
arguments['mask'] = mask
if self._fn_expects_training_arg:
arguments['training'] = training
with variable_scope.variable_creator_scope(self._variable_creator):
return self.function(inputs, **arguments)
def _variable_creator(self, next_creator, **kwargs):
name = kwargs['name']
if name in self._variable_dict:
return self._variable_dict[name]
var = next_creator(**kwargs)
self._variable_dict[name] = var
if var.trainable:
self._trainable_weights.append(var)
else:
self._non_trainable_weights.append(var)
K.track_variable(var)
return var
def compute_mask(self, inputs, mask=None):
if callable(self.mask):
return self.mask(inputs, mask)
return self.mask
def get_config(self):
function_config = self._serialize_function_to_config(self.function)
output_shape_config = self._serialize_function_to_config(self._output_shape,
allow_raw=True)
config = {
'function': function_config[0],
'function_type': function_config[1],
'module': function_config[2],
'output_shape': output_shape_config[0],
'output_shape_type': output_shape_config[1],
'output_shape_module': output_shape_config[2],
}
if self.mask is not None:
mask_config = self._serialize_function_to_config(self.mask)
config.update({
'mask': mask_config[0],
'mask_type': mask_config[1],
'mask_module': mask_config[2]
})
config['arguments'] = self.arguments
base_config = super(Lambda, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def _serialize_function_to_config(self, inputs, allow_raw=False):
if isinstance(inputs, python_types.LambdaType):
output = generic_utils.func_dump(inputs)
output_type = 'lambda'
module = inputs.__module__
elif callable(inputs):
output = inputs.__name__
output_type = 'function'
module = inputs.__module__
elif allow_raw:
output = inputs
output_type = 'raw'
module = None
else:
raise ValueError(
'Invalid input for serialization, type: %s ' % type(inputs))
return output, output_type, module
@classmethod
def from_config(cls, config, custom_objects=None):
config = config.copy()
function = cls._parse_function_from_config(
config, custom_objects, 'function', 'module', 'function_type')
output_shape = cls._parse_function_from_config(
config, custom_objects, 'output_shape', 'output_shape_module',
'output_shape_type')
if 'mask' in config:
mask = cls._parse_function_from_config(
config, custom_objects, 'mask', 'mask_module', 'mask_type')
else:
mask = None
config['function'] = function
config['output_shape'] = output_shape
config['mask'] = mask
# If arguments were numpy array, they have been saved as
# list. We need to recover the ndarray
if 'arguments' in config:
for key in config['arguments']:
if isinstance(config['arguments'][key], dict):
arg_dict = config['arguments'][key]
if 'type' in arg_dict and arg_dict['type'] == 'ndarray':
# Overwrite the argument with its numpy translation
config['arguments'][key] = np.array(arg_dict['value'])
return cls(**config)
@classmethod
def _parse_function_from_config(
cls, config, custom_objects, func_attr_name, module_attr_name,
func_type_attr_name):
globs = globals()
module = config.pop(module_attr_name, None)
if module in sys.modules:
globs.update(sys.modules[module].__dict__)
elif module is not None:
# Note: we don't know the name of the function if it's a lambda.
warnings.warn('{} is not loaded, but a Lambda layer uses it. '
'It may cause errors.'.format(module)
, UserWarning)
if custom_objects:
globs.update(custom_objects)
function_type = config.pop(func_type_attr_name)
if function_type == 'function':
# Simple lookup in custom objects
function = generic_utils.deserialize_keras_object(
config[func_attr_name],
custom_objects=custom_objects,
printable_module_name='function in Lambda layer')
elif function_type == 'lambda':
# Unsafe deserialization from bytecode
function = generic_utils.func_load(
config[func_attr_name], globs=globs)
elif function_type == 'raw':
function = config[func_attr_name]
else:
raise TypeError('Unknown function type:', function_type)
return function
@keras_export('keras.layers.Dense')
class Dense(Layer):
"""Just your regular densely-connected NN layer.
`Dense` implements the operation:
`output = activation(dot(input, kernel) + bias)`
where `activation` is the element-wise activation function
passed as the `activation` argument, `kernel` is a weights matrix
created by the layer, and `bias` is a bias vector created by the layer
(only applicable if `use_bias` is `True`).
Note: If the input to the layer has a rank greater than 2, then
it is flattened prior to the initial dot product with `kernel`.
Example:
```python
# as first layer in a sequential model:
model = Sequential()
model.add(Dense(32, input_shape=(16,)))
# now the model will take as input arrays of shape (*, 16)
# and output arrays of shape (*, 32)
# after the first layer, you don't need to specify
# the size of the input anymore:
model.add(Dense(32))
```
Arguments:
units: Positive integer, dimensionality of the output space.
activation: Activation function to use.
If you don't specify anything, no activation is applied
(ie. "linear" activation: `a(x) = x`).
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix.
bias_initializer: Initializer for the bias vector.
kernel_regularizer: Regularizer function applied to
the `kernel` weights matrix.
bias_regularizer: Regularizer function applied to the bias vector.
activity_regularizer: Regularizer function applied to
the output of the layer (its "activation")..
kernel_constraint: Constraint function applied to
the `kernel` weights matrix.
bias_constraint: Constraint function applied to the bias vector.
Input shape:
N-D tensor with shape: `(batch_size, ..., input_dim)`.
The most common situation would be
a 2D input with shape `(batch_size, input_dim)`.
Output shape:
N-D tensor with shape: `(batch_size, ..., units)`.
For instance, for a 2D input with shape `(batch_size, input_dim)`,
the output would have shape `(batch_size, units)`.
"""
def __init__(self,
units,
activation=None,
use_bias=True,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs):
if 'input_shape' not in kwargs and 'input_dim' in kwargs:
kwargs['input_shape'] = (kwargs.pop('input_dim'),)
super(Dense, self).__init__(
activity_regularizer=regularizers.get(activity_regularizer), **kwargs)
self.units = int(units)
self.activation = activations.get(activation)
self.use_bias = use_bias
self.kernel_initializer = initializers.get(kernel_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.bias_constraint = constraints.get(bias_constraint)
self.supports_masking = True
self.input_spec = InputSpec(min_ndim=2)
def build(self, input_shape):
dtype = dtypes.as_dtype(self.dtype or K.floatx())
if not (dtype.is_floating or dtype.is_complex):
raise TypeError('Unable to build `Dense` layer with non-floating point '
'dtype %s' % (dtype,))
input_shape = tensor_shape.TensorShape(input_shape)
if tensor_shape.dimension_value(input_shape[-1]) is None:
raise ValueError('The last dimension of the inputs to `Dense` '
'should be defined. Found `None`.')
last_dim = tensor_shape.dimension_value(input_shape[-1])
self.input_spec = InputSpec(min_ndim=2,
axes={-1: last_dim})
self.kernel = self.add_weight(
'kernel',
shape=[last_dim, self.units],
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint,
dtype=self.dtype,
trainable=True)
if self.use_bias:
self.bias = self.add_weight(
'bias',
shape=[self.units,],
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint,
dtype=self.dtype,
trainable=True)
else:
self.bias = None
self.built = True
def call(self, inputs):
rank = len(inputs.shape)
if rank > 2:
# Broadcasting is required for the inputs.
outputs = standard_ops.tensordot(inputs, self.kernel, [[rank - 1], [0]])
# Reshape the output back to the original ndim of the input.
if not context.executing_eagerly():
shape = inputs.shape.as_list()
output_shape = shape[:-1] + [self.units]
outputs.set_shape(output_shape)
else:
# Cast the inputs to self.dtype, which is the variable dtype. We do not
# cast if `should_cast_variables` is True, as in that case the variable
# will be automatically casted to inputs.dtype.
if not self._mixed_precision_policy.should_cast_variables:
inputs = math_ops.cast(inputs, self.dtype)
if K.is_sparse(inputs):
outputs = sparse_ops.sparse_tensor_dense_matmul(inputs, self.kernel)
else:
outputs = gen_math_ops.mat_mul(inputs, self.kernel)
if self.use_bias:
outputs = nn.bias_add(outputs, self.bias)
if self.activation is not None:
return self.activation(outputs) # pylint: disable=not-callable
return outputs
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape)
input_shape = input_shape.with_rank_at_least(2)
if tensor_shape.dimension_value(input_shape[-1]) is None:
raise ValueError(
'The innermost dimension of input_shape must be defined, but saw: %s'
% input_shape)
return input_shape[:-1].concatenate(self.units)
def get_config(self):
config = {
'units': self.units,
'activation': activations.serialize(self.activation),
'use_bias': self.use_bias,
'kernel_initializer': initializers.serialize(self.kernel_initializer),
'bias_initializer': initializers.serialize(self.bias_initializer),
'kernel_regularizer': regularizers.serialize(self.kernel_regularizer),
'bias_regularizer': regularizers.serialize(self.bias_regularizer),
'activity_regularizer':
regularizers.serialize(self.activity_regularizer),
'kernel_constraint': constraints.serialize(self.kernel_constraint),
'bias_constraint': constraints.serialize(self.bias_constraint)
}
base_config = super(Dense, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export('keras.layers.ActivityRegularization')
class ActivityRegularization(Layer):
"""Layer that applies an update to the cost function based input activity.
Arguments:
l1: L1 regularization factor (positive float).
l2: L2 regularization factor (positive float).
Input shape:
Arbitrary. Use the keyword argument `input_shape`
(tuple of integers, does not include the samples axis)
when using this layer as the first layer in a model.
Output shape:
Same shape as input.
"""
def __init__(self, l1=0., l2=0., **kwargs):
super(ActivityRegularization, self).__init__(
activity_regularizer=regularizers.L1L2(l1=l1, l2=l2), **kwargs)
self.supports_masking = True
self.l1 = l1
self.l2 = l2
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
config = {'l1': self.l1, 'l2': self.l2}
base_config = super(ActivityRegularization, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
| 36.524576 | 102 | 0.695921 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import sys
import types as python_types
import warnings
import numpy as np
from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.keras import activations
from tensorflow.python.keras import backend as K
from tensorflow.python.keras import constraints
from tensorflow.python.keras import initializers
from tensorflow.python.keras import regularizers
from tensorflow.python.keras.engine.base_layer import Layer
from tensorflow.python.keras.engine.input_spec import InputSpec
from tensorflow.python.keras.utils import conv_utils
from tensorflow.python.keras.utils import generic_utils
from tensorflow.python.keras.utils import tf_utils
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_math_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import standard_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.util import nest
from tensorflow.python.util import tf_inspect
from tensorflow.python.util.tf_export import keras_export
@keras_export('keras.layers.Masking')
class Masking(Layer):
def __init__(self, mask_value=0., **kwargs):
super(Masking, self).__init__(**kwargs)
self.supports_masking = True
self.mask_value = mask_value
self._compute_output_and_mask_jointly = True
def compute_mask(self, inputs, mask=None):
return K.any(math_ops.not_equal(inputs, self.mask_value), axis=-1)
def call(self, inputs):
boolean_mask = K.any(
math_ops.not_equal(inputs, self.mask_value), axis=-1, keepdims=True)
outputs = inputs * math_ops.cast(boolean_mask, inputs.dtype)
outputs._keras_mask = array_ops.squeeze(boolean_mask, axis=-1)
return outputs
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
config = {'mask_value': self.mask_value}
base_config = super(Masking, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export('keras.layers.Dropout')
class Dropout(Layer):
def __init__(self, rate, noise_shape=None, seed=None, **kwargs):
super(Dropout, self).__init__(**kwargs)
self.rate = rate
self.noise_shape = noise_shape
self.seed = seed
self.supports_masking = True
def _get_noise_shape(self, inputs):
if self.noise_shape is None:
return None
concrete_inputs_shape = array_ops.shape(inputs)
noise_shape = []
for i, value in enumerate(self.noise_shape):
noise_shape.append(concrete_inputs_shape[i] if value is None else value)
return ops.convert_to_tensor(noise_shape)
def call(self, inputs, training=None):
if training is None:
training = K.learning_phase()
def dropped_inputs():
return nn.dropout(
inputs,
noise_shape=self._get_noise_shape(inputs),
seed=self.seed,
rate=self.rate)
output = tf_utils.smart_cond(training,
dropped_inputs,
lambda: array_ops.identity(inputs))
return output
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
config = {
'rate': self.rate,
'noise_shape': self.noise_shape,
'seed': self.seed
}
base_config = super(Dropout, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export('keras.layers.SpatialDropout1D')
class SpatialDropout1D(Dropout):
def __init__(self, rate, **kwargs):
super(SpatialDropout1D, self).__init__(rate, **kwargs)
self.input_spec = InputSpec(ndim=3)
def _get_noise_shape(self, inputs):
input_shape = array_ops.shape(inputs)
noise_shape = (input_shape[0], 1, input_shape[2])
return noise_shape
@keras_export('keras.layers.SpatialDropout2D')
class SpatialDropout2D(Dropout):
def __init__(self, rate, data_format=None, **kwargs):
super(SpatialDropout2D, self).__init__(rate, **kwargs)
if data_format is None:
data_format = K.image_data_format()
if data_format not in {'channels_last', 'channels_first'}:
raise ValueError('data_format must be in '
'{"channels_last", "channels_first"}')
self.data_format = data_format
self.input_spec = InputSpec(ndim=4)
def _get_noise_shape(self, inputs):
input_shape = array_ops.shape(inputs)
if self.data_format == 'channels_first':
return (input_shape[0], input_shape[1], 1, 1)
elif self.data_format == 'channels_last':
return (input_shape[0], 1, 1, input_shape[3])
@keras_export('keras.layers.SpatialDropout3D')
class SpatialDropout3D(Dropout):
def __init__(self, rate, data_format=None, **kwargs):
super(SpatialDropout3D, self).__init__(rate, **kwargs)
if data_format is None:
data_format = K.image_data_format()
if data_format not in {'channels_last', 'channels_first'}:
raise ValueError('data_format must be in '
'{"channels_last", "channels_first"}')
self.data_format = data_format
self.input_spec = InputSpec(ndim=5)
def _get_noise_shape(self, inputs):
input_shape = array_ops.shape(inputs)
if self.data_format == 'channels_first':
return (input_shape[0], input_shape[1], 1, 1, 1)
elif self.data_format == 'channels_last':
return (input_shape[0], 1, 1, 1, input_shape[4])
@keras_export('keras.layers.Activation')
class Activation(Layer):
def __init__(self, activation, **kwargs):
super(Activation, self).__init__(**kwargs)
self.supports_masking = True
self.activation = activations.get(activation)
def call(self, inputs):
return self.activation(inputs)
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
config = {'activation': activations.serialize(self.activation)}
base_config = super(Activation, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export('keras.layers.Reshape')
class Reshape(Layer):
def __init__(self, target_shape, **kwargs):
super(Reshape, self).__init__(**kwargs)
self.target_shape = tuple(target_shape)
def _fix_unknown_dimension(self, input_shape, output_shape):
output_shape = list(output_shape)
msg = 'total size of new array must be unchanged'
known, unknown = 1, None
for index, dim in enumerate(output_shape):
if dim < 0:
if unknown is None:
unknown = index
else:
raise ValueError('Can only specify one unknown dimension.')
else:
known *= dim
original = np.prod(input_shape, dtype=int)
if unknown is not None:
if known == 0 or original % known != 0:
raise ValueError(msg)
output_shape[unknown] = original // known
elif original != known:
raise ValueError(msg)
return output_shape
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
if None in input_shape[1:]:
output_shape = [input_shape[0]]
output_shape += tuple(s if s != -1 else None for s in self.target_shape)
else:
output_shape = [input_shape[0]]
output_shape += self._fix_unknown_dimension(input_shape[1:],
self.target_shape)
return tensor_shape.TensorShape(output_shape)
def call(self, inputs):
return array_ops.reshape(inputs,
(array_ops.shape(inputs)[0],) + self.target_shape)
def get_config(self):
config = {'target_shape': self.target_shape}
base_config = super(Reshape, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export('keras.layers.Permute')
class Permute(Layer):
def __init__(self, dims, **kwargs):
super(Permute, self).__init__(**kwargs)
self.dims = tuple(dims)
if sorted(dims) != list(range(1, len(dims) + 1)):
raise ValueError(
'Invalid permutation `dims` for Permute Layer: %s. '
'The set of indices in `dims` must be consecutive and start from 1.' %
(dims,))
self.input_spec = InputSpec(ndim=len(self.dims) + 1)
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
output_shape = copy.copy(input_shape)
for i, dim in enumerate(self.dims):
target_dim = input_shape[dim]
output_shape[i + 1] = target_dim
return tensor_shape.TensorShape(output_shape)
def call(self, inputs):
return array_ops.transpose(inputs, perm=(0,) + self.dims)
def get_config(self):
config = {'dims': self.dims}
base_config = super(Permute, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export('keras.layers.Flatten')
class Flatten(Layer):
def __init__(self, data_format=None, **kwargs):
super(Flatten, self).__init__(**kwargs)
self.data_format = conv_utils.normalize_data_format(data_format)
self.input_spec = InputSpec(min_ndim=1)
def call(self, inputs):
if (self.data_format == 'channels_first'
and K.ndim(inputs) is not None and K.ndim(inputs) > 1):
permutation = [0]
permutation.extend([i for i in
range(2, K.ndim(inputs))])
permutation.append(1)
inputs = array_ops.transpose(inputs, perm=permutation)
outputs = array_ops.reshape(
inputs, (tensor_shape.dimension_value(inputs.shape[0]) or
array_ops.shape(inputs)[0], -1))
if not context.executing_eagerly():
outputs.set_shape(self.compute_output_shape(inputs.shape))
return outputs
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
if not input_shape:
output_shape = tensor_shape.TensorShape([1])
output_shape = [input_shape[0]]
if all(input_shape[1:]):
output_shape += [np.prod(input_shape[1:])]
else:
output_shape += [None]
return tensor_shape.TensorShape(output_shape)
def get_config(self):
config = {'data_format': self.data_format}
base_config = super(Flatten, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export('keras.layers.RepeatVector')
class RepeatVector(Layer):
def __init__(self, n, **kwargs):
super(RepeatVector, self).__init__(**kwargs)
self.n = n
self.input_spec = InputSpec(ndim=2)
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
return tensor_shape.TensorShape([input_shape[0], self.n, input_shape[1]])
def call(self, inputs):
return K.repeat(inputs, self.n)
def get_config(self):
config = {'n': self.n}
base_config = super(RepeatVector, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export('keras.layers.Lambda')
class Lambda(Layer):
def __init__(self, function, output_shape=None, mask=None, arguments=None,
**kwargs):
super(Lambda, self).__init__(**kwargs)
self.function = function
self.arguments = arguments if arguments else {}
if mask is not None:
self.supports_masking = True
self.mask = mask
self._output_shape = output_shape
self._variable_dict = {}
self._trainable_weights = []
self._non_trainable_weights = []
function_args = tf_inspect.getfullargspec(self.function).args
self._fn_expects_training_arg = 'training' in function_args
self._fn_expects_mask_arg = 'mask' in function_args
@tf_utils.shape_type_conversion
def compute_output_shape(self, input_shape):
if self._output_shape is None:
# `add_loss`.
with context.eager_mode():
try:
return super(Lambda, self).compute_output_shape(input_shape)
except NotImplementedError:
raise NotImplementedError(
'We could not automatically infer the shape of the Lambda\'s '
'output. Please specify `output_shape` for this Lambda.')
if callable(self._output_shape):
output_shapes = self._output_shape(input_shape)
return tf_utils.convert_shapes(output_shapes, to_tuples=False)
input_tensor_shape = tf_utils.convert_shapes(input_shape, to_tuples=False)
batch_size = nest.flatten(input_tensor_shape)[0][0] if input_shape else None
def _add_batch(shape):
return tensor_shape.TensorShape([batch_size] + shape.as_list())
output_shapes = tf_utils.convert_shapes(self._output_shape, to_tuples=False)
return nest.map_structure(_add_batch, output_shapes)
def call(self, inputs, mask=None, training=None):
arguments = self.arguments
if self._fn_expects_mask_arg:
arguments['mask'] = mask
if self._fn_expects_training_arg:
arguments['training'] = training
with variable_scope.variable_creator_scope(self._variable_creator):
return self.function(inputs, **arguments)
def _variable_creator(self, next_creator, **kwargs):
name = kwargs['name']
if name in self._variable_dict:
return self._variable_dict[name]
var = next_creator(**kwargs)
self._variable_dict[name] = var
if var.trainable:
self._trainable_weights.append(var)
else:
self._non_trainable_weights.append(var)
K.track_variable(var)
return var
def compute_mask(self, inputs, mask=None):
if callable(self.mask):
return self.mask(inputs, mask)
return self.mask
def get_config(self):
function_config = self._serialize_function_to_config(self.function)
output_shape_config = self._serialize_function_to_config(self._output_shape,
allow_raw=True)
config = {
'function': function_config[0],
'function_type': function_config[1],
'module': function_config[2],
'output_shape': output_shape_config[0],
'output_shape_type': output_shape_config[1],
'output_shape_module': output_shape_config[2],
}
if self.mask is not None:
mask_config = self._serialize_function_to_config(self.mask)
config.update({
'mask': mask_config[0],
'mask_type': mask_config[1],
'mask_module': mask_config[2]
})
config['arguments'] = self.arguments
base_config = super(Lambda, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def _serialize_function_to_config(self, inputs, allow_raw=False):
if isinstance(inputs, python_types.LambdaType):
output = generic_utils.func_dump(inputs)
output_type = 'lambda'
module = inputs.__module__
elif callable(inputs):
output = inputs.__name__
output_type = 'function'
module = inputs.__module__
elif allow_raw:
output = inputs
output_type = 'raw'
module = None
else:
raise ValueError(
'Invalid input for serialization, type: %s ' % type(inputs))
return output, output_type, module
@classmethod
def from_config(cls, config, custom_objects=None):
config = config.copy()
function = cls._parse_function_from_config(
config, custom_objects, 'function', 'module', 'function_type')
output_shape = cls._parse_function_from_config(
config, custom_objects, 'output_shape', 'output_shape_module',
'output_shape_type')
if 'mask' in config:
mask = cls._parse_function_from_config(
config, custom_objects, 'mask', 'mask_module', 'mask_type')
else:
mask = None
config['function'] = function
config['output_shape'] = output_shape
config['mask'] = mask
# If arguments were numpy array, they have been saved as
# list. We need to recover the ndarray
if 'arguments' in config:
for key in config['arguments']:
if isinstance(config['arguments'][key], dict):
arg_dict = config['arguments'][key]
if 'type' in arg_dict and arg_dict['type'] == 'ndarray':
# Overwrite the argument with its numpy translation
config['arguments'][key] = np.array(arg_dict['value'])
return cls(**config)
@classmethod
def _parse_function_from_config(
cls, config, custom_objects, func_attr_name, module_attr_name,
func_type_attr_name):
globs = globals()
module = config.pop(module_attr_name, None)
if module in sys.modules:
globs.update(sys.modules[module].__dict__)
elif module is not None:
# Note: we don't know the name of the function if it's a lambda.
warnings.warn('{} is not loaded, but a Lambda layer uses it. '
'It may cause errors.'.format(module)
, UserWarning)
if custom_objects:
globs.update(custom_objects)
function_type = config.pop(func_type_attr_name)
if function_type == 'function':
# Simple lookup in custom objects
function = generic_utils.deserialize_keras_object(
config[func_attr_name],
custom_objects=custom_objects,
printable_module_name='function in Lambda layer')
elif function_type == 'lambda':
# Unsafe deserialization from bytecode
function = generic_utils.func_load(
config[func_attr_name], globs=globs)
elif function_type == 'raw':
function = config[func_attr_name]
else:
raise TypeError('Unknown function type:', function_type)
return function
@keras_export('keras.layers.Dense')
class Dense(Layer):
def __init__(self,
units,
activation=None,
use_bias=True,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs):
if 'input_shape' not in kwargs and 'input_dim' in kwargs:
kwargs['input_shape'] = (kwargs.pop('input_dim'),)
super(Dense, self).__init__(
activity_regularizer=regularizers.get(activity_regularizer), **kwargs)
self.units = int(units)
self.activation = activations.get(activation)
self.use_bias = use_bias
self.kernel_initializer = initializers.get(kernel_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.bias_constraint = constraints.get(bias_constraint)
self.supports_masking = True
self.input_spec = InputSpec(min_ndim=2)
def build(self, input_shape):
dtype = dtypes.as_dtype(self.dtype or K.floatx())
if not (dtype.is_floating or dtype.is_complex):
raise TypeError('Unable to build `Dense` layer with non-floating point '
'dtype %s' % (dtype,))
input_shape = tensor_shape.TensorShape(input_shape)
if tensor_shape.dimension_value(input_shape[-1]) is None:
raise ValueError('The last dimension of the inputs to `Dense` '
'should be defined. Found `None`.')
last_dim = tensor_shape.dimension_value(input_shape[-1])
self.input_spec = InputSpec(min_ndim=2,
axes={-1: last_dim})
self.kernel = self.add_weight(
'kernel',
shape=[last_dim, self.units],
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint,
dtype=self.dtype,
trainable=True)
if self.use_bias:
self.bias = self.add_weight(
'bias',
shape=[self.units,],
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint,
dtype=self.dtype,
trainable=True)
else:
self.bias = None
self.built = True
def call(self, inputs):
rank = len(inputs.shape)
if rank > 2:
# Broadcasting is required for the inputs.
outputs = standard_ops.tensordot(inputs, self.kernel, [[rank - 1], [0]])
# Reshape the output back to the original ndim of the input.
if not context.executing_eagerly():
shape = inputs.shape.as_list()
output_shape = shape[:-1] + [self.units]
outputs.set_shape(output_shape)
else:
# Cast the inputs to self.dtype, which is the variable dtype. We do not
# cast if `should_cast_variables` is True, as in that case the variable
# will be automatically casted to inputs.dtype.
if not self._mixed_precision_policy.should_cast_variables:
inputs = math_ops.cast(inputs, self.dtype)
if K.is_sparse(inputs):
outputs = sparse_ops.sparse_tensor_dense_matmul(inputs, self.kernel)
else:
outputs = gen_math_ops.mat_mul(inputs, self.kernel)
if self.use_bias:
outputs = nn.bias_add(outputs, self.bias)
if self.activation is not None:
return self.activation(outputs) # pylint: disable=not-callable
return outputs
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape)
input_shape = input_shape.with_rank_at_least(2)
if tensor_shape.dimension_value(input_shape[-1]) is None:
raise ValueError(
'The innermost dimension of input_shape must be defined, but saw: %s'
% input_shape)
return input_shape[:-1].concatenate(self.units)
def get_config(self):
config = {
'units': self.units,
'activation': activations.serialize(self.activation),
'use_bias': self.use_bias,
'kernel_initializer': initializers.serialize(self.kernel_initializer),
'bias_initializer': initializers.serialize(self.bias_initializer),
'kernel_regularizer': regularizers.serialize(self.kernel_regularizer),
'bias_regularizer': regularizers.serialize(self.bias_regularizer),
'activity_regularizer':
regularizers.serialize(self.activity_regularizer),
'kernel_constraint': constraints.serialize(self.kernel_constraint),
'bias_constraint': constraints.serialize(self.bias_constraint)
}
base_config = super(Dense, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export('keras.layers.ActivityRegularization')
class ActivityRegularization(Layer):
def __init__(self, l1=0., l2=0., **kwargs):
super(ActivityRegularization, self).__init__(
activity_regularizer=regularizers.L1L2(l1=l1, l2=l2), **kwargs)
self.supports_masking = True
self.l1 = l1
self.l2 = l2
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
config = {'l1': self.l1, 'l2': self.l2}
base_config = super(ActivityRegularization, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
| true | true |
1c2d8d266dac0a3c8e36d4ce58d4dcef2d05bb2b | 315 | py | Python | mongomail/models/users.py | terrabitz/MongoMail | 8c7321540fde534e29451c603d22dcad4d2b77c5 | [
"MIT"
] | null | null | null | mongomail/models/users.py | terrabitz/MongoMail | 8c7321540fde534e29451c603d22dcad4d2b77c5 | [
"MIT"
] | 7 | 2017-07-03T15:31:10.000Z | 2017-07-03T19:07:18.000Z | mongomail/models/users.py | terrabitz/MongoMail | 8c7321540fde534e29451c603d22dcad4d2b77c5 | [
"MIT"
] | null | null | null | import random
import string
from mongoengine import Document
from mongoengine.fields import StringField
KEY_SIZE = 32
class ApiKey(Document):
key = StringField(required=True, unique=True,
default=''.join([random.choice(string.ascii_letters + string.digits) for _ in range(KEY_SIZE)]))
| 24.230769 | 118 | 0.726984 | import random
import string
from mongoengine import Document
from mongoengine.fields import StringField
KEY_SIZE = 32
class ApiKey(Document):
key = StringField(required=True, unique=True,
default=''.join([random.choice(string.ascii_letters + string.digits) for _ in range(KEY_SIZE)]))
| true | true |
1c2d8d5796b88137cbd413287b8438e672ac6586 | 3,444 | py | Python | tests/test_dipdup/test_dipdup.py | spruceid/dipdup-py | adc904196cfd66563938feec0f0afcc5f3df03e3 | [
"MIT"
] | null | null | null | tests/test_dipdup/test_dipdup.py | spruceid/dipdup-py | adc904196cfd66563938feec0f0afcc5f3df03e3 | [
"MIT"
] | null | null | null | tests/test_dipdup/test_dipdup.py | spruceid/dipdup-py | adc904196cfd66563938feec0f0afcc5f3df03e3 | [
"MIT"
] | null | null | null | from contextlib import AsyncExitStack
from datetime import datetime
from os.path import dirname, join
from unittest import IsolatedAsyncioTestCase
from pytz import UTC
from dipdup.config import DipDupConfig
from dipdup.context import pending_indexes
from dipdup.dipdup import IndexDispatcher
from dipdup.enums import IndexStatus, IndexType
from dipdup.exceptions import ReindexingRequiredError
from dipdup.models import Index
from dipdup.test import create_test_dipdup
async def _create_index(hash_: str) -> None:
await Index.create(
**{
'level': 1365000,
'name': 'hen_mainnet',
'template': None,
'config_hash': hash_,
'created_at': datetime(2021, 10, 8, 18, 43, 35, 744412, tzinfo=UTC),
'template_values': {},
'status': IndexStatus.NEW,
'updated_at': datetime(2021, 10, 8, 18, 43, 35, 744449, tzinfo=UTC),
'type': IndexType.operation,
}
)
async def _spawn_index(dispatcher: IndexDispatcher, name: str) -> None:
await dispatcher._ctx._spawn_index(name)
dispatcher._indexes[name] = pending_indexes.pop()
class IndexStateTest(IsolatedAsyncioTestCase):
async def asyncSetUp(self) -> None:
name = 'hic_et_nunc.yml'
config_path = join(dirname(__file__), '..', 'integration_tests', name)
self.config = DipDupConfig.load([config_path])
self.new_hash = '32e3aaf18a45acf090bea833fd89a71c9b50cefcc7d859ff7faf9e1d5ebb5938'
self.old_hash = '18e9a5816f5fa2653f193ce0d99fd157dcead67dcdf7c58e62e1f7c00cbe0152'
async def test_first_run(self) -> None:
async with AsyncExitStack() as stack:
# Arrange
dipdup = await create_test_dipdup(self.config, stack)
dispatcher = IndexDispatcher(dipdup._ctx)
# Act
await _spawn_index(dispatcher, 'hen_mainnet')
# Assert
index = await Index.filter().get()
print(index.__dict__)
self.assertEqual(self.new_hash, index.config_hash)
async def test_new_hash(self) -> None:
async with AsyncExitStack() as stack:
# Arrange
dipdup = await create_test_dipdup(self.config, stack)
dispatcher = IndexDispatcher(dipdup._ctx)
await _create_index(self.new_hash)
# Act
await dispatcher._load_index_states()
# Assert
index = await Index.filter().get()
self.assertEqual(self.new_hash, index.config_hash)
async def test_old_hash(self) -> None:
async with AsyncExitStack() as stack:
# Arrange
dipdup = await create_test_dipdup(self.config, stack)
dispatcher = IndexDispatcher(dipdup._ctx)
await _create_index(self.old_hash)
# Act
await dispatcher._load_index_states()
# Assert
index = await Index.filter().get()
self.assertEqual(self.new_hash, index.config_hash)
async def test_invalid_hash(self) -> None:
async with AsyncExitStack() as stack:
# Arrange
dipdup = await create_test_dipdup(self.config, stack)
dispatcher = IndexDispatcher(dipdup._ctx)
await _create_index('hehehe')
# Act, Assert
with self.assertRaises(ReindexingRequiredError):
await dispatcher._load_index_states()
| 34.787879 | 90 | 0.64547 | from contextlib import AsyncExitStack
from datetime import datetime
from os.path import dirname, join
from unittest import IsolatedAsyncioTestCase
from pytz import UTC
from dipdup.config import DipDupConfig
from dipdup.context import pending_indexes
from dipdup.dipdup import IndexDispatcher
from dipdup.enums import IndexStatus, IndexType
from dipdup.exceptions import ReindexingRequiredError
from dipdup.models import Index
from dipdup.test import create_test_dipdup
async def _create_index(hash_: str) -> None:
await Index.create(
**{
'level': 1365000,
'name': 'hen_mainnet',
'template': None,
'config_hash': hash_,
'created_at': datetime(2021, 10, 8, 18, 43, 35, 744412, tzinfo=UTC),
'template_values': {},
'status': IndexStatus.NEW,
'updated_at': datetime(2021, 10, 8, 18, 43, 35, 744449, tzinfo=UTC),
'type': IndexType.operation,
}
)
async def _spawn_index(dispatcher: IndexDispatcher, name: str) -> None:
await dispatcher._ctx._spawn_index(name)
dispatcher._indexes[name] = pending_indexes.pop()
class IndexStateTest(IsolatedAsyncioTestCase):
async def asyncSetUp(self) -> None:
name = 'hic_et_nunc.yml'
config_path = join(dirname(__file__), '..', 'integration_tests', name)
self.config = DipDupConfig.load([config_path])
self.new_hash = '32e3aaf18a45acf090bea833fd89a71c9b50cefcc7d859ff7faf9e1d5ebb5938'
self.old_hash = '18e9a5816f5fa2653f193ce0d99fd157dcead67dcdf7c58e62e1f7c00cbe0152'
async def test_first_run(self) -> None:
async with AsyncExitStack() as stack:
dipdup = await create_test_dipdup(self.config, stack)
dispatcher = IndexDispatcher(dipdup._ctx)
await _spawn_index(dispatcher, 'hen_mainnet')
index = await Index.filter().get()
print(index.__dict__)
self.assertEqual(self.new_hash, index.config_hash)
async def test_new_hash(self) -> None:
async with AsyncExitStack() as stack:
dipdup = await create_test_dipdup(self.config, stack)
dispatcher = IndexDispatcher(dipdup._ctx)
await _create_index(self.new_hash)
await dispatcher._load_index_states()
index = await Index.filter().get()
self.assertEqual(self.new_hash, index.config_hash)
async def test_old_hash(self) -> None:
async with AsyncExitStack() as stack:
dipdup = await create_test_dipdup(self.config, stack)
dispatcher = IndexDispatcher(dipdup._ctx)
await _create_index(self.old_hash)
await dispatcher._load_index_states()
index = await Index.filter().get()
self.assertEqual(self.new_hash, index.config_hash)
async def test_invalid_hash(self) -> None:
async with AsyncExitStack() as stack:
dipdup = await create_test_dipdup(self.config, stack)
dispatcher = IndexDispatcher(dipdup._ctx)
await _create_index('hehehe')
with self.assertRaises(ReindexingRequiredError):
await dispatcher._load_index_states()
| true | true |
1c2d8d90a17121995310552a973682f2ec38ce4f | 2,745 | py | Python | tests/test_pyglotaran_alias.py | glotaran/pyglotaran-alias | e8e490a50e70551bf84bdfba0c4aa55c8ac8406b | [
"Apache-2.0"
] | 2 | 2021-03-02T09:23:56.000Z | 2021-11-09T11:40:43.000Z | tests/test_pyglotaran_alias.py | glotaran/pyglotaran-alias | e8e490a50e70551bf84bdfba0c4aa55c8ac8406b | [
"Apache-2.0"
] | 11 | 2020-10-04T00:14:04.000Z | 2021-06-18T20:45:44.000Z | tests/test_pyglotaran_alias.py | glotaran/pyglotaran-alias | e8e490a50e70551bf84bdfba0c4aa55c8ac8406b | [
"Apache-2.0"
] | 2 | 2020-10-03T23:51:48.000Z | 2020-10-18T16:26:36.000Z | import re
import subprocess
import sys
import pytest
from _pytest.monkeypatch import MonkeyPatch
def test_exception_if_glotaran_is_missing(monkeypatch: MonkeyPatch):
"""Raise Exception if glotaran isn't installed."""
monkeypatch.setitem(sys.modules, "glotaran", None)
with pytest.raises(ImportError, match=r"you need to install pyglotaran"):
import pyglotaran # noqa: F401
def test_glotaran_import_not_leeking_out():
"""Glotaran isn't imported normally and thus not in globals."""
import pyglotaran # noqa: F401
assert "glotaran" not in globals().keys()
@pytest.mark.parametrize(
"pyglotaran_alias_local_variable",
[
"find_spec",
"fullname",
"glotaran_module",
"modules_to_update",
],
)
def test_pyglotaran_alias_local_variables_leeking_out(pyglotaran_alias_local_variable: str):
"""Test that local variables are removed."""
assert pyglotaran_alias_local_variable not in locals().keys()
assert pyglotaran_alias_local_variable not in globals().keys()
with pytest.raises(ImportError):
exec(f"from pyglotaran import {pyglotaran_alias_local_variable}")
def test_import_works():
"""Check that 'import pyglotaran' works and 'pyglotaran' is an actual alias to 'glotaran'."""
# pylint: disable=no-member
import glotaran # noqa: F401
import pyglotaran # noqa: F401
assert hasattr(pyglotaran, "__version__")
assert glotaran.__version__ == pyglotaran.__version__ # type:ignore
loaded_module_names = sys.modules.keys()
glotaran_modules = tuple(
filter(
lambda name: name == "glotaran" or name.startswith("glotaran."),
loaded_module_names,
)
)
pyglotaran_modules = tuple(
filter(
lambda name: name == "pyglotaran" or name.startswith("pyglotaran."),
loaded_module_names,
)
)
assert len(glotaran_modules) == len(pyglotaran_modules)
for glotaran_module in glotaran_modules:
assert f"py{glotaran_module}" in pyglotaran_modules
assert glotaran.model.model.__code__ == pyglotaran.model.model.__code__ # type:ignore
def test_from_import_works():
"""Test that from imports work."""
# pylint: disable=no-name-in-module
import glotaran # noqa: F401
from pyglotaran.model import model # type:ignore
assert glotaran.model.model.__code__ == model.__code__
def test_cli_raises_proper_exeption():
"""Test that the cli alias works properly."""
output = subprocess.run("pyglotaran", shell=True, capture_output=True)
assert (
re.search(br"Usage\: pyglotaran \[OPTIONS\] COMMAND \[ARGS\]", output.stdout) is not None
)
assert output.stderr == b""
| 29.836957 | 97 | 0.695811 | import re
import subprocess
import sys
import pytest
from _pytest.monkeypatch import MonkeyPatch
def test_exception_if_glotaran_is_missing(monkeypatch: MonkeyPatch):
monkeypatch.setitem(sys.modules, "glotaran", None)
with pytest.raises(ImportError, match=r"you need to install pyglotaran"):
import pyglotaran
def test_glotaran_import_not_leeking_out():
import pyglotaran
assert "glotaran" not in globals().keys()
@pytest.mark.parametrize(
"pyglotaran_alias_local_variable",
[
"find_spec",
"fullname",
"glotaran_module",
"modules_to_update",
],
)
def test_pyglotaran_alias_local_variables_leeking_out(pyglotaran_alias_local_variable: str):
assert pyglotaran_alias_local_variable not in locals().keys()
assert pyglotaran_alias_local_variable not in globals().keys()
with pytest.raises(ImportError):
exec(f"from pyglotaran import {pyglotaran_alias_local_variable}")
def test_import_works():
import glotaran
import pyglotaran
assert hasattr(pyglotaran, "__version__")
assert glotaran.__version__ == pyglotaran.__version__
loaded_module_names = sys.modules.keys()
glotaran_modules = tuple(
filter(
lambda name: name == "glotaran" or name.startswith("glotaran."),
loaded_module_names,
)
)
pyglotaran_modules = tuple(
filter(
lambda name: name == "pyglotaran" or name.startswith("pyglotaran."),
loaded_module_names,
)
)
assert len(glotaran_modules) == len(pyglotaran_modules)
for glotaran_module in glotaran_modules:
assert f"py{glotaran_module}" in pyglotaran_modules
assert glotaran.model.model.__code__ == pyglotaran.model.model.__code__
def test_from_import_works():
import glotaran
from pyglotaran.model import model
assert glotaran.model.model.__code__ == model.__code__
def test_cli_raises_proper_exeption():
output = subprocess.run("pyglotaran", shell=True, capture_output=True)
assert (
re.search(br"Usage\: pyglotaran \[OPTIONS\] COMMAND \[ARGS\]", output.stdout) is not None
)
assert output.stderr == b""
| true | true |
1c2d8dc766507b416a2c61e4bba7204773b5038c | 7,431 | py | Python | main/hongmeng.py | Andimeo/EverydayWechat | 7b36443528442bc90c1b0bb3b1c5f88a6cbf008b | [
"MIT"
] | 23 | 2019-08-14T01:10:16.000Z | 2019-08-16T10:17:19.000Z | main/hongmeng.py | Pengjie-Li/EverydayWechat | 86648b9b9e8f9e51216bd6f4d630abdd3bf7c2f1 | [
"MIT"
] | null | null | null | main/hongmeng.py | Pengjie-Li/EverydayWechat | 86648b9b9e8f9e51216bd6f4d630abdd3bf7c2f1 | [
"MIT"
] | null | null | null | # coding=utf-8
"""
每天定时给多个女友发给微信暖心话
核心代码。
"""
import os
import time
import json
from apscheduler.schedulers.blocking import BlockingScheduler
import itchat
import random
from itchat.content import *
from main.common import (
get_yaml
)
from main.utils import (
get_bot_info,
get_weather_info,
get_dictum_info,
get_diff_time,
get_xzw_info
)
reply_userNames = []
FILEHELPER_MARK = ['文件传输助手', 'filehelper'] # 文件传输助手标识
FILEHELPER = 'filehelper'
def run():
""" 主运行入口 """
conf = get_yaml()
if not conf: # 如果 conf,表示配置文件出错。
print('程序中止...')
return
# 判断是否登录,如果没有登录则自动登录,返回 False 表示登录失败
if not is_online(auto_login=True):
return
set_system_notice('登录成功')
if conf.get('is_auto_relay'):
print('已开启图灵自动回复...')
init_alarm() # 初始化定时任务
def is_online(auto_login=False):
"""
判断是否还在线。
:param auto_login: bool,当为 Ture 则自动重连(默认为 False)。
:return: bool,当返回为 True 时,在线;False 已断开连接。
"""
def _online():
"""
通过获取好友信息,判断用户是否还在线。
:return: bool,当返回为 True 时,在线;False 已断开连接。
"""
try:
if itchat.search_friends():
return True
except IndexError:
return False
return True
if _online(): return True # 如果在线,则直接返回 True
if not auto_login: # 不自动登录,则直接返回 False
print('微信已离线..')
return False
hotReload = not get_yaml().get('is_forced_switch', False) # 切换微信号,重新扫码。
loginCallback = init_wechat
exitCallback = exit_msg
for _ in range(2): # 尝试登录 2 次。
if os.environ.get('MODE') == 'server':
# 命令行显示登录二维码。
itchat.auto_login(enableCmdQR=2, hotReload=hotReload, loginCallback=loginCallback,
exitCallback=exitCallback)
itchat.run(blockThread=False)
else:
itchat.auto_login(hotReload=hotReload, loginCallback=loginCallback, exitCallback=exitCallback)
itchat.run(blockThread=False)
if _online():
print('登录成功')
return True
print('登录失败。')
return False
def init_wechat():
""" 初始化微信所需数据 """
conf = get_yaml()
itchat.get_friends(update=True) # 更新好友数据。
itchat.get_chatrooms(update=True) # 更新群聊数据。
for name in conf.get('auto_reply_names'):
if name.lower() in FILEHELPER_MARK: # 判断是否文件传输助手
if FILEHELPER not in reply_userNames:
reply_userNames.append(FILEHELPER)
continue
friend = get_friend(name)
if friend:
reply_userNames.append(friend['UserName'])
else:
print('自动回复中的好友昵称『{}』有误。'.format(name))
# print(reply_userNames)
def init_alarm():
""" 初始化定时提醒 """
alarm_info = get_yaml().get('alarm_info', None)
if not alarm_info: return
is_alarm = alarm_info.get('is_alarm', False)
if not is_alarm: return
alarm_timed = alarm_info.get('alarm_timed', None)
if not alarm_timed: return
hour, minute = [int(x) for x in alarm_timed.split(':')]
# 检查数据的有效性
for info in get_yaml().get('girlfriend_infos'):
if not info: break # 解决无数据时会出现的 bug。
wechat_name = info.get('wechat_name')
if (wechat_name and wechat_name.lower() not in FILEHELPER_MARK
and not get_friend(wechat_name)):
print('定时任务中的好友名称『{}』有误。'.format(wechat_name))
# 更新信息
group_name = info.get('group_name')
if group_name and not get_group(group_name):
print('定时任务中的群聊名称『{}』有误。'
'(注意:必须要把需要的群聊保存到通讯录)'.format(group_name))
# 定时任务
scheduler = BlockingScheduler()
# 每天9:30左右给女朋友发送每日一句
scheduler.add_job(send_alarm_msg, 'cron', hour=hour,
minute=minute, misfire_grace_time=15 * 60)
# 每隔 30 秒发送一条数据用于测试。
# scheduler.add_job(send_alarm_msg, 'interval', seconds=30)
print('已开启定时发送提醒功能...')
scheduler.start()
@itchat.msg_register([TEXT])
def text_reply(msg):
""" 监听用户消息,用于自动回复 """
try:
# print(json.dumps(msg, ensure_ascii=False))
# print(reply_userNames)
# 获取发送者的用户id
uuid = FILEHELPER if msg['ToUserName'] == FILEHELPER else msg.fromUserName
# 如果用户id是自动回复列表的人员
if uuid in reply_userNames:
receive_text = msg.text # 好友发送来的消息内容
# 好友叫啥
nickName = FILEHELPER if uuid == FILEHELPER else msg.user.nickName
print('\n{}发来信息:{}'.format(nickName, receive_text))
reply_text = get_bot_info(receive_text, uuid) # 获取自动回复
time.sleep(random.randint(0, 2)) # 休眠一秒,保安全。想更快的,可以直接注释。
if reply_text: # 如内容不为空,回复消息
reply_text = reply_text if not uuid == FILEHELPER else '机器人回复:' + reply_text
itchat.send(reply_text, toUserName=uuid)
print('回复{}:{}\n'.format(nickName, reply_text))
else:
print('自动回复失败\n'.format(receive_text))
except Exception as e:
print(str(e))
def send_alarm_msg():
""" 发送定时提醒 """
print('\n启动定时自动提醒...')
conf = get_yaml()
for gf in conf.get('girlfriend_infos'):
dictum = get_dictum_info(gf.get('dictum_channel'))
weather = get_weather_info(gf.get('city_name'))
diff_time = get_diff_time(gf.get('start_date'))
sweet_words = gf.get('sweet_words')
horoscope = get_xzw_info(gf.get("birthday"))
send_msg = '\n'.join(x for x in [weather, dictum, diff_time, sweet_words, horoscope] if x)
print(send_msg)
if not send_msg or not is_online(): continue
# 给微信好友发信息
wechat_name = gf.get('wechat_name')
if wechat_name:
if wechat_name.lower() in FILEHELPER_MARK:
itchat.send(send_msg, toUserName=FILEHELPER)
print('定时给『{}』发送的内容是:\n{}\n发送成功...\n\n'.format(wechat_name, send_msg))
else:
wechat_users = itchat.search_friends(name=wechat_name)
if not wechat_users: continue
wechat_users[0].send(send_msg)
print('定时给『{}』发送的内容是:\n{}\n发送成功...\n\n'.format(wechat_name, send_msg))
# 给群聊里发信息
group_name = gf.get('group_name')
if group_name:
group = get_group(group_name)
if group:
group.send(send_msg)
print('定时给群聊『{}』发送的内容是:\n{}\n发送成功...\n\n'.format(group_name, send_msg))
print('自动提醒消息发送完成...\n')
def set_system_notice(text):
"""
给文件传输助手发送系统日志。
:param text:日志内容
:return:None
"""
if text:
text = '系统通知:' + text
itchat.send(text, toUserName=FILEHELPER)
def exit_msg():
set_system_notice('项目已断开连接')
def get_group(gruop_name, update=False):
"""
根据群组名获取群组数据
:param wechat_name: 群组名
:param update: 强制更新群组数据
:return: msg
"""
if update: itchat.get_chatrooms(update=True)
if not gruop_name: return None
groups = itchat.search_chatrooms(name=gruop_name)
if not groups: return None
return groups[0]
def get_friend(wechat_name, update=False):
"""
根据用户名获取用户数据
:param wechat_name: 用户名
:param update: 强制更新用户数据
:return: msg
"""
if update: itchat.get_friends(update=True)
if not wechat_name: return None
friends = itchat.search_friends(name=wechat_name)
if not friends: return None
return friends[0]
if __name__ == '__main__':
run()
# send_alarm_msg()
| 29.141176 | 106 | 0.610416 |
import os
import time
import json
from apscheduler.schedulers.blocking import BlockingScheduler
import itchat
import random
from itchat.content import *
from main.common import (
get_yaml
)
from main.utils import (
get_bot_info,
get_weather_info,
get_dictum_info,
get_diff_time,
get_xzw_info
)
reply_userNames = []
FILEHELPER_MARK = ['文件传输助手', 'filehelper']
FILEHELPER = 'filehelper'
def run():
conf = get_yaml()
if not conf:
print('程序中止...')
return
if not is_online(auto_login=True):
return
set_system_notice('登录成功')
if conf.get('is_auto_relay'):
print('已开启图灵自动回复...')
init_alarm()
def is_online(auto_login=False):
def _online():
try:
if itchat.search_friends():
return True
except IndexError:
return False
return True
if _online(): return True
if not auto_login:
print('微信已离线..')
return False
hotReload = not get_yaml().get('is_forced_switch', False)
loginCallback = init_wechat
exitCallback = exit_msg
for _ in range(2):
if os.environ.get('MODE') == 'server':
itchat.auto_login(enableCmdQR=2, hotReload=hotReload, loginCallback=loginCallback,
exitCallback=exitCallback)
itchat.run(blockThread=False)
else:
itchat.auto_login(hotReload=hotReload, loginCallback=loginCallback, exitCallback=exitCallback)
itchat.run(blockThread=False)
if _online():
print('登录成功')
return True
print('登录失败。')
return False
def init_wechat():
conf = get_yaml()
itchat.get_friends(update=True)
itchat.get_chatrooms(update=True)
for name in conf.get('auto_reply_names'):
if name.lower() in FILEHELPER_MARK:
if FILEHELPER not in reply_userNames:
reply_userNames.append(FILEHELPER)
continue
friend = get_friend(name)
if friend:
reply_userNames.append(friend['UserName'])
else:
print('自动回复中的好友昵称『{}』有误。'.format(name))
def init_alarm():
alarm_info = get_yaml().get('alarm_info', None)
if not alarm_info: return
is_alarm = alarm_info.get('is_alarm', False)
if not is_alarm: return
alarm_timed = alarm_info.get('alarm_timed', None)
if not alarm_timed: return
hour, minute = [int(x) for x in alarm_timed.split(':')]
for info in get_yaml().get('girlfriend_infos'):
if not info: break
wechat_name = info.get('wechat_name')
if (wechat_name and wechat_name.lower() not in FILEHELPER_MARK
and not get_friend(wechat_name)):
print('定时任务中的好友名称『{}』有误。'.format(wechat_name))
group_name = info.get('group_name')
if group_name and not get_group(group_name):
print('定时任务中的群聊名称『{}』有误。'
'(注意:必须要把需要的群聊保存到通讯录)'.format(group_name))
scheduler = BlockingScheduler()
scheduler.add_job(send_alarm_msg, 'cron', hour=hour,
minute=minute, misfire_grace_time=15 * 60)
print('已开启定时发送提醒功能...')
scheduler.start()
@itchat.msg_register([TEXT])
def text_reply(msg):
try:
uuid = FILEHELPER if msg['ToUserName'] == FILEHELPER else msg.fromUserName
if uuid in reply_userNames:
receive_text = msg.text
nickName = FILEHELPER if uuid == FILEHELPER else msg.user.nickName
print('\n{}发来信息:{}'.format(nickName, receive_text))
reply_text = get_bot_info(receive_text, uuid)
time.sleep(random.randint(0, 2))
if reply_text:
reply_text = reply_text if not uuid == FILEHELPER else '机器人回复:' + reply_text
itchat.send(reply_text, toUserName=uuid)
print('回复{}:{}\n'.format(nickName, reply_text))
else:
print('自动回复失败\n'.format(receive_text))
except Exception as e:
print(str(e))
def send_alarm_msg():
print('\n启动定时自动提醒...')
conf = get_yaml()
for gf in conf.get('girlfriend_infos'):
dictum = get_dictum_info(gf.get('dictum_channel'))
weather = get_weather_info(gf.get('city_name'))
diff_time = get_diff_time(gf.get('start_date'))
sweet_words = gf.get('sweet_words')
horoscope = get_xzw_info(gf.get("birthday"))
send_msg = '\n'.join(x for x in [weather, dictum, diff_time, sweet_words, horoscope] if x)
print(send_msg)
if not send_msg or not is_online(): continue
wechat_name = gf.get('wechat_name')
if wechat_name:
if wechat_name.lower() in FILEHELPER_MARK:
itchat.send(send_msg, toUserName=FILEHELPER)
print('定时给『{}』发送的内容是:\n{}\n发送成功...\n\n'.format(wechat_name, send_msg))
else:
wechat_users = itchat.search_friends(name=wechat_name)
if not wechat_users: continue
wechat_users[0].send(send_msg)
print('定时给『{}』发送的内容是:\n{}\n发送成功...\n\n'.format(wechat_name, send_msg))
group_name = gf.get('group_name')
if group_name:
group = get_group(group_name)
if group:
group.send(send_msg)
print('定时给群聊『{}』发送的内容是:\n{}\n发送成功...\n\n'.format(group_name, send_msg))
print('自动提醒消息发送完成...\n')
def set_system_notice(text):
if text:
text = '系统通知:' + text
itchat.send(text, toUserName=FILEHELPER)
def exit_msg():
set_system_notice('项目已断开连接')
def get_group(gruop_name, update=False):
if update: itchat.get_chatrooms(update=True)
if not gruop_name: return None
groups = itchat.search_chatrooms(name=gruop_name)
if not groups: return None
return groups[0]
def get_friend(wechat_name, update=False):
if update: itchat.get_friends(update=True)
if not wechat_name: return None
friends = itchat.search_friends(name=wechat_name)
if not friends: return None
return friends[0]
if __name__ == '__main__':
run()
| true | true |
1c2d8e4a5a4f9d4d1449c4145d4add75077b4a01 | 2,022 | py | Python | setup.py | techalchemy/tablib | 6b80db8fa806980d342bdf11bc1cd31973371cf3 | [
"MIT"
] | null | null | null | setup.py | techalchemy/tablib | 6b80db8fa806980d342bdf11bc1cd31973371cf3 | [
"MIT"
] | null | null | null | setup.py | techalchemy/tablib | 6b80db8fa806980d342bdf11bc1cd31973371cf3 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import re
import sys
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
if sys.argv[-1] == 'publish':
os.system("python setup.py sdist upload")
sys.exit()
if sys.argv[-1] == 'speedups':
try:
__import__('pip')
except ImportError:
print('Pip required.')
sys.exit(1)
os.system('pip install ujson')
sys.exit()
if sys.argv[-1] == 'test':
try:
__import__('py')
except ImportError:
print('py.test required.')
sys.exit(1)
errors = os.system('py.test test_tablib.py')
sys.exit(bool(errors))
packages = [
'tablib', 'tablib.formats',
'tablib.packages',
'tablib.packages.dbfpy',
'tablib.packages.dbfpy3'
]
install = [
'odfpy',
'openpyxl',
'unicodecsv',
'xlrd',
'xlwt',
'pyyaml',
'six'
]
with open('tablib/core.py', 'r') as fd:
version = re.search(r'^__version__\s*=\s*[\'"]([^\'"]*)[\'"]',
fd.read(), re.MULTILINE).group(1)
setup(
name='tablib',
version=version,
description='Format agnostic tabular data library (XLS, JSON, YAML, CSV)',
long_description=(open('README.rst').read() + '\n\n' +
open('HISTORY.rst').read()),
author='Kenneth Reitz',
author_email='me@kennethreitz.org',
url='http://python-tablib.org',
packages=packages,
license='MIT',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Natural Language :: English',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
tests_require=['pytest'],
install_requires=install,
extras_require={
'pandas': ['pandas'],
},
)
| 23.241379 | 78 | 0.577151 |
import os
import re
import sys
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
if sys.argv[-1] == 'publish':
os.system("python setup.py sdist upload")
sys.exit()
if sys.argv[-1] == 'speedups':
try:
__import__('pip')
except ImportError:
print('Pip required.')
sys.exit(1)
os.system('pip install ujson')
sys.exit()
if sys.argv[-1] == 'test':
try:
__import__('py')
except ImportError:
print('py.test required.')
sys.exit(1)
errors = os.system('py.test test_tablib.py')
sys.exit(bool(errors))
packages = [
'tablib', 'tablib.formats',
'tablib.packages',
'tablib.packages.dbfpy',
'tablib.packages.dbfpy3'
]
install = [
'odfpy',
'openpyxl',
'unicodecsv',
'xlrd',
'xlwt',
'pyyaml',
'six'
]
with open('tablib/core.py', 'r') as fd:
version = re.search(r'^__version__\s*=\s*[\'"]([^\'"]*)[\'"]',
fd.read(), re.MULTILINE).group(1)
setup(
name='tablib',
version=version,
description='Format agnostic tabular data library (XLS, JSON, YAML, CSV)',
long_description=(open('README.rst').read() + '\n\n' +
open('HISTORY.rst').read()),
author='Kenneth Reitz',
author_email='me@kennethreitz.org',
url='http://python-tablib.org',
packages=packages,
license='MIT',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Natural Language :: English',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
tests_require=['pytest'],
install_requires=install,
extras_require={
'pandas': ['pandas'],
},
)
| true | true |
1c2d8ecc146adf9ed998328f8ab9413613b7009c | 998 | py | Python | eyey/config.py | dincamihai/eyey | 146c75a1c8b4f3da6ced57b4fee43ba3315052e0 | [
"Apache-2.0"
] | null | null | null | eyey/config.py | dincamihai/eyey | 146c75a1c8b4f3da6ced57b4fee43ba3315052e0 | [
"Apache-2.0"
] | null | null | null | eyey/config.py | dincamihai/eyey | 146c75a1c8b4f3da6ced57b4fee43ba3315052e0 | [
"Apache-2.0"
] | null | null | null | import imaplib
from credentials import USER, PASSWORD, SERVER
EVAL_INTERVAL = 60
SUBJECT_FEATURES = 10
BODY_FEATURES = 100
BUGZILLA_HEADERS = [
"X-Bugzilla-Reason", # QAcontact
"X-Bugzilla-Type", # changed
"X-Bugzilla-Watch-Reason", # None
"X-Bugzilla-Classification", # SUSE Manager
"X-Bugzilla-Product", # SUSE Manager 3.2
"X-Bugzilla-Component", # Salt
"X-Bugzilla-Version", # 3.2.4
# "X-Bugzilla-Keywords", # DSLA_REQUIRED, DSLA_SOLUTION_PROVIDED
"X-Bugzilla-Severity", # Major
"X-Bugzilla-Who", # someone@example.com
"X-Bugzilla-Status", # REOPENED
"X-Bugzilla-Priority", # P2 - High
"X-Bugzilla-Assigned-To", # someone@example.com
"X-Bugzilla-QA-Contact", # someone@example.com
"X-Bugzilla-Target-Milestone", # ---
"X-Bugzilla-Flags", #
# "X-Bugzilla-Changed-Fields", #
# "X-Bugzilla-NTS-Support-Num", #
]
def get_connection():
con = imaplib.IMAP4_SSL(SERVER)
con.login(USER, PASSWORD)
return con
| 26.263158 | 68 | 0.663327 | import imaplib
from credentials import USER, PASSWORD, SERVER
EVAL_INTERVAL = 60
SUBJECT_FEATURES = 10
BODY_FEATURES = 100
BUGZILLA_HEADERS = [
"X-Bugzilla-Reason",
"X-Bugzilla-Type",
"X-Bugzilla-Watch-Reason",
"X-Bugzilla-Classification",
"X-Bugzilla-Product",
"X-Bugzilla-Component",
"X-Bugzilla-Version",
zilla-Who",
"X-Bugzilla-Status",
"X-Bugzilla-Priority",
"X-Bugzilla-Assigned-To",
"X-Bugzilla-QA-Contact",
"X-Bugzilla-Target-Milestone",
"X-Bugzilla-Flags",
def get_connection():
con = imaplib.IMAP4_SSL(SERVER)
con.login(USER, PASSWORD)
return con
| true | true |
1c2d8f41bb0f80e0baf9bb69b37515c206cf19d0 | 7,278 | py | Python | detection/demo_retinanet.py | ravising-h/Grad-CAM.pytorch | e0a1ae3870ea189f9b4cd8782eb77bfb5293c31b | [
"Apache-2.0"
] | null | null | null | detection/demo_retinanet.py | ravising-h/Grad-CAM.pytorch | e0a1ae3870ea189f9b4cd8782eb77bfb5293c31b | [
"Apache-2.0"
] | null | null | null | detection/demo_retinanet.py | ravising-h/Grad-CAM.pytorch | e0a1ae3870ea189f9b4cd8782eb77bfb5293c31b | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""
@File : demo_retinanet.py
@Time : 2020/5/16 下午9:59
@Author : yizuotian
@Description :
"""
import argparse
import multiprocessing as mp
import os
from tqdm import tqdm
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import ImageGrid
import cv2
import detectron2.data.transforms as T
import numpy as np
import torch
from detectron2.checkpoint import DetectionCheckpointer
from detectron2.config import get_cfg
from detectron2.data import MetadataCatalog
from detectron2.data.detection_utils import read_image
from detectron2.modeling import build_model
from detectron2.utils.logger import setup_logger
from skimage import io
from grad_cam_retinanet import GradCAM, GradCamPlusPlus
# constants
WINDOW_NAME = "COCO detections"
def setup_cfg(args):
# load config from file and command-line arguments
cfg = get_cfg()
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
# Set score_threshold for builtin models
cfg.MODEL.RETINANET.SCORE_THRESH_TEST = 0.25
cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = args.confidence_threshold
cfg.MODEL.PANOPTIC_FPN.COMBINE.INSTANCES_CONFIDENCE_THRESH = args.confidence_threshold
cfg.freeze()
return cfg
def norm_image(image):
"""
标准化图像
:param image: [H,W,C]
:return:
"""
image = image.copy()
image -= np.max(np.min(image), 0)
image /= np.max(image)
image *= 255.
return np.uint8(image)
def gen_cam(image, mask):
"""
生成CAM图
:param image: [H,W,C],原始图像
:param mask: [H,W],范围0~1
:return: tuple(cam,heatmap)
"""
# mask转为heatmap
heatmap = cv2.applyColorMap(np.uint8(255 * mask), cv2.COLORMAP_JET)
heatmap = np.float32(heatmap) / 255
heatmap = heatmap[..., ::-1] # gbr to rgb
# 合并heatmap到原始图像
cam = heatmap + np.float32(image)
return norm_image(cam), heatmap
def save_image(image_dicts, input_image_name, layer_name, network='retinanet', output_dir='./results'):
prefix = os.path.splitext(input_image_name)[0]
for key, image in image_dicts.items():
if key == 'predict_box':
io.imsave(os.path.join(output_dir,
'{}-{}-{}.jpg'.format(prefix, network, key)),
image)
else:
io.imsave(os.path.join(output_dir,
'{}-{}-{}-{}.jpg'.format(prefix, network, layer_name, key)),
(255 * image).astype(np.uint8) )
def get_parser():
parser = argparse.ArgumentParser(description="Detectron2 demo for builtin models")
parser.add_argument(
"--config-file",
default="configs/quick_schedules/mask_rcnn_R_50_FPN_inference_acc_test.yaml",
metavar="FILE",
help="path to config file",
)
parser.add_argument("--input", help="A list of space separated input images")
parser.add_argument(
"--output",
help="A file or directory to save output visualizations. "
"If not given, will show output in an OpenCV window.",
)
parser.add_argument(
"--confidence-threshold",
type=float,
default=0.2,
help="Minimum score for instance predictions to be shown",
)
parser.add_argument(
"--opts",
help="Modify config options using the command-line 'KEY VALUE' pairs",
default=[],
nargs=argparse.REMAINDER,
)
parser.add_argument('--layer-name', type=str, default='head.cls_subnet.2',
help='使用哪层特征去生成CAM')
return parser
def main(args):
from detectron2.data.datasets import register_coco_instances
register_coco_instances("Dent_Detection_train", {}, "/content/drive/MyDrive/Damage/Dataset/coco_15k_balanced/annotations/instance_train2017.json", "/content/drive/MyDrive/Damage/Dataset/coco_15k_balanced/JPEGImages")
register_coco_instances("Dent_Detection_test", {}, "/content/drive/MyDrive/Damage/Dataset/coco_15k_balanced/annotations/instance_val2017.json", "/content/drive/MyDrive/Damage/Dataset/coco_15k_balanced/JPEGImages")
MetadataCatalog.get("Dent_Detection_test").thing_classes = ['Dent']
setup_logger(name="fvcore")
logger = setup_logger()
logger.info("Arguments: " + str(args))
cfg = setup_cfg(args)
print(cfg)
model = build_model(cfg)
checkpointer = DetectionCheckpointer(model)
checkpointer.load(cfg.MODEL.WEIGHTS)
path = os.path.expanduser(args.input)
original_image = read_image(path, format="BGR")
height, width = original_image.shape[:2]
transform_gen = T.ResizeShortestEdge(
[cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST], cfg.INPUT.MAX_SIZE_TEST
)
image = transform_gen.get_transform(original_image).apply_image(original_image)
image = torch.as_tensor(image.astype("float32").transpose(2, 0, 1)).requires_grad_(True)
inputs = {"image": image, "height": height, "width": width}
# Grad-CAM
img_grid = [0]*18
for ly in tqdm(range(8), desc ="LAYER BY LAYER"):
layer_name = f'head.cls_subnet.{ly}'
grad_cam = GradCAM(model, layer_name)
mask, box, class_id = grad_cam(inputs) # cam mask
grad_cam.remove_handlers()
#
image_dict = {}
img = original_image[..., ::-1]
x1, y1, x2, y2 = box
image_dict['predict_box'] = img[y1:y2, x1:x2]
img_grid[0], img_grid[9] = image_dict['predict_box'] , image_dict['predict_box']
image_cam, image_dict['heatmap'] = gen_cam(img[y1:y2, x1:x2], mask[y1:y2, x1:x2])
img_grid[ly+1] = (image_dict['heatmap']*255).astype(np.uint8)
# Grad-CAM++
grad_cam_plus_plus = GradCamPlusPlus(model, layer_name)
mask_plus_plus = grad_cam_plus_plus(inputs) # cam mask
_, image_dict['heatmap++'] = gen_cam(img[y1:y2, x1:x2], mask_plus_plus[y1:y2, x1:x2])
img_grid[ly+9] = (image_dict['heatmap++']*255).astype(np.uint8)
grad_cam_plus_plus.remove_handlers()
# 获取类别名称
meta = MetadataCatalog.get(
cfg.DATASETS.TEST[0] if len(cfg.DATASETS.TEST) else "__unused"
)
label = meta.thing_classes[class_id]
print("label:{}".format(label))
fig = plt.figure(figsize=(20., 6.))
grid = ImageGrid(fig, 111, # similar to subplot(111)
nrows_ncols=(2, 9), # creates 2x2 grid of axes
axes_pad=0.1, # pad between axes in inch.
)
for ax, im_ in zip(grid, img_grid):
# Iterating over the grid returns the Axes.
ax.imshow(cv2.addWeighted(im_, 0.6, img_grid[0], 0.4, 0.2))
plt.savefig(os.path.join(output_dir, f'{os.path.basename(path)}-grid.jpg'))
# save_image(image_dict, os.path.basename(path), args.layer_name)
if __name__ == "__main__":
"""
Usage:export KMP_DUPLICATE_LIB_OK=TRUE
python detection/demo_retinanet.py --config-file detection/retinanet_R_50_FPN_3x.yaml \
--input ./examples/pic1.jpg \
--layer-name head.cls_subnet.7 \
--opts MODEL.WEIGHTS /Users/yizuotian/pretrained_model/model_final_4cafe0.pkl MODEL.DEVICE cpu
"""
mp.set_start_method("spawn", force=True)
arguments = get_parser().parse_args()
main(arguments)
| 34.822967 | 220 | 0.657461 |
import argparse
import multiprocessing as mp
import os
from tqdm import tqdm
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import ImageGrid
import cv2
import detectron2.data.transforms as T
import numpy as np
import torch
from detectron2.checkpoint import DetectionCheckpointer
from detectron2.config import get_cfg
from detectron2.data import MetadataCatalog
from detectron2.data.detection_utils import read_image
from detectron2.modeling import build_model
from detectron2.utils.logger import setup_logger
from skimage import io
from grad_cam_retinanet import GradCAM, GradCamPlusPlus
WINDOW_NAME = "COCO detections"
def setup_cfg(args):
cfg = get_cfg()
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
cfg.MODEL.RETINANET.SCORE_THRESH_TEST = 0.25
cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = args.confidence_threshold
cfg.MODEL.PANOPTIC_FPN.COMBINE.INSTANCES_CONFIDENCE_THRESH = args.confidence_threshold
cfg.freeze()
return cfg
def norm_image(image):
image = image.copy()
image -= np.max(np.min(image), 0)
image /= np.max(image)
image *= 255.
return np.uint8(image)
def gen_cam(image, mask):
heatmap = cv2.applyColorMap(np.uint8(255 * mask), cv2.COLORMAP_JET)
heatmap = np.float32(heatmap) / 255
heatmap = heatmap[..., ::-1]
cam = heatmap + np.float32(image)
return norm_image(cam), heatmap
def save_image(image_dicts, input_image_name, layer_name, network='retinanet', output_dir='./results'):
prefix = os.path.splitext(input_image_name)[0]
for key, image in image_dicts.items():
if key == 'predict_box':
io.imsave(os.path.join(output_dir,
'{}-{}-{}.jpg'.format(prefix, network, key)),
image)
else:
io.imsave(os.path.join(output_dir,
'{}-{}-{}-{}.jpg'.format(prefix, network, layer_name, key)),
(255 * image).astype(np.uint8) )
def get_parser():
parser = argparse.ArgumentParser(description="Detectron2 demo for builtin models")
parser.add_argument(
"--config-file",
default="configs/quick_schedules/mask_rcnn_R_50_FPN_inference_acc_test.yaml",
metavar="FILE",
help="path to config file",
)
parser.add_argument("--input", help="A list of space separated input images")
parser.add_argument(
"--output",
help="A file or directory to save output visualizations. "
"If not given, will show output in an OpenCV window.",
)
parser.add_argument(
"--confidence-threshold",
type=float,
default=0.2,
help="Minimum score for instance predictions to be shown",
)
parser.add_argument(
"--opts",
help="Modify config options using the command-line 'KEY VALUE' pairs",
default=[],
nargs=argparse.REMAINDER,
)
parser.add_argument('--layer-name', type=str, default='head.cls_subnet.2',
help='使用哪层特征去生成CAM')
return parser
def main(args):
from detectron2.data.datasets import register_coco_instances
register_coco_instances("Dent_Detection_train", {}, "/content/drive/MyDrive/Damage/Dataset/coco_15k_balanced/annotations/instance_train2017.json", "/content/drive/MyDrive/Damage/Dataset/coco_15k_balanced/JPEGImages")
register_coco_instances("Dent_Detection_test", {}, "/content/drive/MyDrive/Damage/Dataset/coco_15k_balanced/annotations/instance_val2017.json", "/content/drive/MyDrive/Damage/Dataset/coco_15k_balanced/JPEGImages")
MetadataCatalog.get("Dent_Detection_test").thing_classes = ['Dent']
setup_logger(name="fvcore")
logger = setup_logger()
logger.info("Arguments: " + str(args))
cfg = setup_cfg(args)
print(cfg)
model = build_model(cfg)
checkpointer = DetectionCheckpointer(model)
checkpointer.load(cfg.MODEL.WEIGHTS)
path = os.path.expanduser(args.input)
original_image = read_image(path, format="BGR")
height, width = original_image.shape[:2]
transform_gen = T.ResizeShortestEdge(
[cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST], cfg.INPUT.MAX_SIZE_TEST
)
image = transform_gen.get_transform(original_image).apply_image(original_image)
image = torch.as_tensor(image.astype("float32").transpose(2, 0, 1)).requires_grad_(True)
inputs = {"image": image, "height": height, "width": width}
img_grid = [0]*18
for ly in tqdm(range(8), desc ="LAYER BY LAYER"):
layer_name = f'head.cls_subnet.{ly}'
grad_cam = GradCAM(model, layer_name)
mask, box, class_id = grad_cam(inputs)
grad_cam.remove_handlers()
image_dict = {}
img = original_image[..., ::-1]
x1, y1, x2, y2 = box
image_dict['predict_box'] = img[y1:y2, x1:x2]
img_grid[0], img_grid[9] = image_dict['predict_box'] , image_dict['predict_box']
image_cam, image_dict['heatmap'] = gen_cam(img[y1:y2, x1:x2], mask[y1:y2, x1:x2])
img_grid[ly+1] = (image_dict['heatmap']*255).astype(np.uint8)
grad_cam_plus_plus = GradCamPlusPlus(model, layer_name)
mask_plus_plus = grad_cam_plus_plus(inputs)
_, image_dict['heatmap++'] = gen_cam(img[y1:y2, x1:x2], mask_plus_plus[y1:y2, x1:x2])
img_grid[ly+9] = (image_dict['heatmap++']*255).astype(np.uint8)
grad_cam_plus_plus.remove_handlers()
meta = MetadataCatalog.get(
cfg.DATASETS.TEST[0] if len(cfg.DATASETS.TEST) else "__unused"
)
label = meta.thing_classes[class_id]
print("label:{}".format(label))
fig = plt.figure(figsize=(20., 6.))
grid = ImageGrid(fig, 111,
nrows_ncols=(2, 9),
axes_pad=0.1,
)
for ax, im_ in zip(grid, img_grid):
ax.imshow(cv2.addWeighted(im_, 0.6, img_grid[0], 0.4, 0.2))
plt.savefig(os.path.join(output_dir, f'{os.path.basename(path)}-grid.jpg'))
if __name__ == "__main__":
mp.set_start_method("spawn", force=True)
arguments = get_parser().parse_args()
main(arguments)
| true | true |
1c2d907b28e0ff50cdb573cfd9b6d1c8390a419d | 12,853 | py | Python | tasks/data_utils.py | lcylcy/GLM_copa | dbb28be27cd48905986ab5db6e29620eff05984c | [
"MIT"
] | null | null | null | tasks/data_utils.py | lcylcy/GLM_copa | dbb28be27cd48905986ab5db6e29620eff05984c | [
"MIT"
] | null | null | null | tasks/data_utils.py | lcylcy/GLM_copa | dbb28be27cd48905986ab5db6e29620eff05984c | [
"MIT"
] | null | null | null | # coding=utf-8
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Tasks data utility."""
import copy
import json
import pickle
import re
from typing import Dict, List, Optional
import numpy as np
import torch
import torch.utils.data
from torch.utils.data.dataloader import default_collate
import mpu
def clean_text(text):
"""Remove new lines and multiple spaces and adjust end of sentence dot."""
text = text.replace("\n", " ")
text = re.sub(r'\s+', ' ', text)
for _ in range(3):
text = text.replace(' . ', '. ')
return text
class InputExample(object):
"""A raw input example consisting of one or two segments of text and a label"""
def __init__(self, guid, text_a, text_b=None, label=None, logits=None, meta: Optional[Dict] = None, idx=-1,
num_choices=1):
"""
Create a new InputExample.
:param guid: a unique textual identifier
:param text_a: the sequence of text
:param text_b: an optional, second sequence of text
:param label: an optional label
:param logits: an optional list of per-class logits
:param meta: an optional dictionary to store arbitrary meta information
:param idx: an optional numeric index
"""
self.guid = guid
self.text_a = text_a
self.text_b = text_b
self.label = label
self.logits = logits
self.idx = idx
self.num_choices = num_choices
self.meta = meta if meta else {}
def __repr__(self):
return str(self.to_json_string())
def to_dict(self):
"""Serialize this instance to a Python dictionary."""
output = copy.deepcopy(self.__dict__)
return output
def to_json_string(self):
"""Serialize this instance to a JSON string."""
return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
@staticmethod
def load_examples(path: str) -> List['InputExample']:
"""Load a set of input examples from a file"""
with open(path, 'rb') as fh:
return pickle.load(fh)
@staticmethod
def save_examples(examples: List['InputExample'], path: str) -> None:
"""Save a set of input examples to a file"""
with open(path, 'wb') as fh:
pickle.dump(examples, fh)
def num_special_tokens_to_add(text_a_ids, text_b_ids, answer_ids, add_cls, add_sep, add_piece, add_eos=True):
num_tokens = 0
if add_cls:
num_tokens += 1
if text_b_ids and add_sep:
num_tokens += 1
if add_eos:
num_tokens += 1
if not answer_ids and add_piece:
num_tokens += 1
return num_tokens
def build_input_from_ids(text_a_ids, text_b_ids, answer_ids, max_seq_length, tokenizer, args=None, add_cls=True,
add_sep=False, add_piece=False, add_eos=True, mask_id=None):
if mask_id is None:
mask_id = tokenizer.get_command('MASK').Id
eos_id = tokenizer.get_command('eos').Id
cls_id = tokenizer.get_command('ENC').Id
sep_id = tokenizer.get_command('sep').Id
ids = []
types = []
paddings = []
# CLS
if add_cls:
ids.append(cls_id)
types.append(0)
paddings.append(1)
# A
len_text_a = len(text_a_ids)
ids.extend(text_a_ids)
types.extend([0] * len_text_a)
paddings.extend([1] * len_text_a)
# B
if text_b_ids is not None:
# SEP
if add_sep:
ids.append(sep_id)
types.append(0)
paddings.append(1)
len_text_b = len(text_b_ids)
ids.extend(text_b_ids)
types.extend([1] * len_text_b)
paddings.extend([1] * len_text_b)
eos_length = 1 if add_eos else 0
# Cap the size.
if len(ids) >= max_seq_length - eos_length:
max_seq_length_m1 = max_seq_length - 1
ids = ids[0:max_seq_length_m1]
types = types[0:max_seq_length_m1]
paddings = paddings[0:max_seq_length_m1]
end_type = 0 if text_b_ids is None else 1
if add_eos:
ids.append(eos_id)
types.append(end_type)
paddings.append(1)
sep = len(ids)
target_ids = [0] * len(ids)
loss_masks = [0] * len(ids)
position_ids = list(range(len(ids)))
block_position_ids = [0] * len(ids)
# Piece
if add_piece or answer_ids is not None:
sop_id = tokenizer.get_command('sop').Id
mask_position = ids.index(mask_id) if not args.sentinel_token else args.max_position_embeddings
ids.append(sop_id)
types.append(end_type)
paddings.append(1)
position_ids.append(mask_position)
block_position_ids.append(1)
if answer_ids is not None:
len_answer = len(answer_ids)
ids.extend(answer_ids[:-1])
types.extend([end_type] * (len_answer - 1))
paddings.extend([1] * (len_answer - 1))
position_ids.extend([mask_position] * (len_answer - 1))
if not args.no_block_position:
block_position_ids.extend(range(2, len(answer_ids) + 1))
else:
block_position_ids.extend([1] * (len(answer_ids) - 1))
target_ids.extend(answer_ids)
loss_masks.extend([1] * len(answer_ids))
else:
target_ids.append(0)
loss_masks.append(1)
# Padding.
padding_length = max_seq_length - len(ids)
if padding_length > 0:
ids.extend([eos_id] * padding_length)
types.extend([eos_id] * padding_length)
paddings.extend([0] * padding_length)
position_ids.extend([0] * padding_length)
block_position_ids.extend([0] * padding_length)
target_ids.extend([0] * padding_length)
loss_masks.extend([0] * padding_length)
if not args.masked_lm:
position_ids = [position_ids, block_position_ids]
return ids, types, paddings, position_ids, sep, target_ids, loss_masks
def build_decoder_input(enc_ids, answer_ids, max_seq_length, max_dec_seq_length, tokenizer):
mask_id = tokenizer.get_command('MASK').Id
eos_id = tokenizer.get_command('eos').Id
sop_id = tokenizer.get_command('sop').Id
enc_len = len(enc_ids)
masks = []
# TODO: it probably takes too much memory
# for i in range(max_dec_seq_length):
# m = [1]*enc_len + [0]*(max_seq_length - enc_len) + [1]*(i+1) + [0]*(max_dec_seq_length-1-i)
# masks.append(m)
mask_position = enc_ids.index(mask_id)
len_answer = len(answer_ids)
ids = [sop_id] + answer_ids[:-1]
types = [0] * len_answer # not used
paddings = [1] * len_answer
position_ids = [mask_position] * len_answer
block_position_ids = list(range(1, len_answer + 1))
target_ids = answer_ids
loss_masks = [1] * len_answer
# Padding.
padding_length = max_dec_seq_length - len(ids)
if padding_length > 0:
ids.extend([eos_id] * padding_length)
types.extend([0] * padding_length)
paddings.extend([0] * padding_length)
position_ids.extend([0] * padding_length)
block_position_ids.extend([0] * padding_length)
target_ids.extend([0] * padding_length)
loss_masks.extend([0] * padding_length)
position_ids = [position_ids, block_position_ids]
return ids, types, paddings, position_ids, masks, target_ids, loss_masks
def build_sample(ids, types=None, paddings=None, positions=None, masks=None, label=None, unique_id=None, target=None,
logit_mask=None, segment_ids=None, prompt_ids=None):
"""Convert to numpy and return a sample consumed by the batch producer."""
ids_np = np.array(ids, dtype=np.int64)
sample = {'text': ids_np, 'label': int(label)}
if types is not None:
types_np = np.array(types, dtype=np.int64)
sample['types'] = types_np
if paddings is not None:
paddings_np = np.array(paddings, dtype=np.int64)
sample['padding_mask'] = paddings_np
if positions is not None:
positions_np = np.array(positions, dtype=np.int64)
sample['position'] = positions_np
if masks is not None:
masks_np = np.array(masks, dtype=np.int64)
sample['mask'] = masks_np
if target is not None:
target_np = np.array(target, dtype=np.int64)
sample['target'] = target_np
if logit_mask is not None:
logit_mask_np = np.array(logit_mask, dtype=np.int64)
sample['logit_mask'] = logit_mask_np
if segment_ids is not None:
segment_ids = np.array(segment_ids, dtype=np.int64)
sample['segment_id'] = segment_ids
if prompt_ids is not None:
prompt_ids = np.array(prompt_ids, dtype=np.int64)
sample['prompt_pos'] = prompt_ids
if unique_id is not None:
sample['uid'] = unique_id
return sample
def build_decoder_sample(sample, dec_ids, dec_position, dec_masks, dec_target, dec_logit_mask):
sample['dec_text'] = np.array(dec_ids)
sample['dec_position'] = np.array(dec_position)
sample['dec_mask'] = np.array(dec_masks)
sample['dec_target'] = np.array(dec_target)
sample['dec_logit_mask'] = np.array(dec_logit_mask)
return sample
def my_collate(batch):
new_batch = [{key: value for key, value in sample.items() if key != 'uid'} for sample in batch]
text_list = [sample['text'] for sample in batch]
def pad_choice_dim(data, choice_num):
if len(data) < choice_num:
data = np.concatenate([data] + [data[0:1]] * (choice_num - len(data)))
return data
if len(text_list[0].shape) == 2:
choice_nums = list(map(len, text_list))
max_choice_num = max(choice_nums)
for i, sample in enumerate(new_batch):
for key, value in sample.items():
if key != 'label':
sample[key] = pad_choice_dim(value, max_choice_num)
else:
sample[key] = value
sample['loss_mask'] = np.array([1] * choice_nums[i] + [0] * (max_choice_num - choice_nums[i]),
dtype=np.int64)
if 'dec_text' in new_batch[0]:
choice_nums = [len(sample['dec_text']) for sample in new_batch]
if choice_nums.count(choice_nums[0]) != len(choice_nums):
max_choice_num = max(choice_nums)
for i, sample in enumerate(new_batch):
for key, value in sample.items():
if key.startswith('dec_'):
sample[key] = pad_choice_dim(value, max_choice_num)
sample['loss_mask'] = np.array([1] * choice_nums[i] + [0] * (max_choice_num - choice_nums[i]),
dtype=np.int64)
new_batch = default_collate(new_batch)
if 'uid' in batch[0]:
uid_list = [sample['uid'] for sample in batch]
new_batch['uid'] = uid_list #['test-0']
return new_batch
class FakeDataloader:
def __init__(self, num_iters):
self.num_iters = num_iters
def __iter__(self):
if self.num_iters is not None:
for _ in range(self.num_iters):
yield None
else:
while True:
yield None
def build_data_loader(dataset, batch_size, num_workers, drop_last, shuffle=True, only_rank0=False):
"""Data loader. Note that batch-size is the local (per GPU) batch-size."""
if only_rank0:
rank, world_size = 0, 1
else:
world_size = mpu.get_data_parallel_world_size()
rank = mpu.get_data_parallel_rank()
sampler = torch.utils.data.distributed.DistributedSampler(
dataset, num_replicas=world_size, rank=rank, shuffle=shuffle)
# Data loader. Note that batch size is the per GPU batch size.
data_loader = torch.utils.data.DataLoader(dataset,
batch_size=batch_size,
sampler=sampler,
shuffle=False,
# num_workers = 1,
num_workers=num_workers,
drop_last=drop_last,
pin_memory=True,
collate_fn=my_collate)
return data_loader
| 37.363372 | 117 | 0.617755 |
import copy
import json
import pickle
import re
from typing import Dict, List, Optional
import numpy as np
import torch
import torch.utils.data
from torch.utils.data.dataloader import default_collate
import mpu
def clean_text(text):
text = text.replace("\n", " ")
text = re.sub(r'\s+', ' ', text)
for _ in range(3):
text = text.replace(' . ', '. ')
return text
class InputExample(object):
def __init__(self, guid, text_a, text_b=None, label=None, logits=None, meta: Optional[Dict] = None, idx=-1,
num_choices=1):
self.guid = guid
self.text_a = text_a
self.text_b = text_b
self.label = label
self.logits = logits
self.idx = idx
self.num_choices = num_choices
self.meta = meta if meta else {}
def __repr__(self):
return str(self.to_json_string())
def to_dict(self):
output = copy.deepcopy(self.__dict__)
return output
def to_json_string(self):
return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
@staticmethod
def load_examples(path: str) -> List['InputExample']:
with open(path, 'rb') as fh:
return pickle.load(fh)
@staticmethod
def save_examples(examples: List['InputExample'], path: str) -> None:
with open(path, 'wb') as fh:
pickle.dump(examples, fh)
def num_special_tokens_to_add(text_a_ids, text_b_ids, answer_ids, add_cls, add_sep, add_piece, add_eos=True):
num_tokens = 0
if add_cls:
num_tokens += 1
if text_b_ids and add_sep:
num_tokens += 1
if add_eos:
num_tokens += 1
if not answer_ids and add_piece:
num_tokens += 1
return num_tokens
def build_input_from_ids(text_a_ids, text_b_ids, answer_ids, max_seq_length, tokenizer, args=None, add_cls=True,
add_sep=False, add_piece=False, add_eos=True, mask_id=None):
if mask_id is None:
mask_id = tokenizer.get_command('MASK').Id
eos_id = tokenizer.get_command('eos').Id
cls_id = tokenizer.get_command('ENC').Id
sep_id = tokenizer.get_command('sep').Id
ids = []
types = []
paddings = []
if add_cls:
ids.append(cls_id)
types.append(0)
paddings.append(1)
len_text_a = len(text_a_ids)
ids.extend(text_a_ids)
types.extend([0] * len_text_a)
paddings.extend([1] * len_text_a)
if text_b_ids is not None:
if add_sep:
ids.append(sep_id)
types.append(0)
paddings.append(1)
len_text_b = len(text_b_ids)
ids.extend(text_b_ids)
types.extend([1] * len_text_b)
paddings.extend([1] * len_text_b)
eos_length = 1 if add_eos else 0
if len(ids) >= max_seq_length - eos_length:
max_seq_length_m1 = max_seq_length - 1
ids = ids[0:max_seq_length_m1]
types = types[0:max_seq_length_m1]
paddings = paddings[0:max_seq_length_m1]
end_type = 0 if text_b_ids is None else 1
if add_eos:
ids.append(eos_id)
types.append(end_type)
paddings.append(1)
sep = len(ids)
target_ids = [0] * len(ids)
loss_masks = [0] * len(ids)
position_ids = list(range(len(ids)))
block_position_ids = [0] * len(ids)
if add_piece or answer_ids is not None:
sop_id = tokenizer.get_command('sop').Id
mask_position = ids.index(mask_id) if not args.sentinel_token else args.max_position_embeddings
ids.append(sop_id)
types.append(end_type)
paddings.append(1)
position_ids.append(mask_position)
block_position_ids.append(1)
if answer_ids is not None:
len_answer = len(answer_ids)
ids.extend(answer_ids[:-1])
types.extend([end_type] * (len_answer - 1))
paddings.extend([1] * (len_answer - 1))
position_ids.extend([mask_position] * (len_answer - 1))
if not args.no_block_position:
block_position_ids.extend(range(2, len(answer_ids) + 1))
else:
block_position_ids.extend([1] * (len(answer_ids) - 1))
target_ids.extend(answer_ids)
loss_masks.extend([1] * len(answer_ids))
else:
target_ids.append(0)
loss_masks.append(1)
padding_length = max_seq_length - len(ids)
if padding_length > 0:
ids.extend([eos_id] * padding_length)
types.extend([eos_id] * padding_length)
paddings.extend([0] * padding_length)
position_ids.extend([0] * padding_length)
block_position_ids.extend([0] * padding_length)
target_ids.extend([0] * padding_length)
loss_masks.extend([0] * padding_length)
if not args.masked_lm:
position_ids = [position_ids, block_position_ids]
return ids, types, paddings, position_ids, sep, target_ids, loss_masks
def build_decoder_input(enc_ids, answer_ids, max_seq_length, max_dec_seq_length, tokenizer):
mask_id = tokenizer.get_command('MASK').Id
eos_id = tokenizer.get_command('eos').Id
sop_id = tokenizer.get_command('sop').Id
enc_len = len(enc_ids)
masks = []
mask_position = enc_ids.index(mask_id)
len_answer = len(answer_ids)
ids = [sop_id] + answer_ids[:-1]
types = [0] * len_answer
paddings = [1] * len_answer
position_ids = [mask_position] * len_answer
block_position_ids = list(range(1, len_answer + 1))
target_ids = answer_ids
loss_masks = [1] * len_answer
padding_length = max_dec_seq_length - len(ids)
if padding_length > 0:
ids.extend([eos_id] * padding_length)
types.extend([0] * padding_length)
paddings.extend([0] * padding_length)
position_ids.extend([0] * padding_length)
block_position_ids.extend([0] * padding_length)
target_ids.extend([0] * padding_length)
loss_masks.extend([0] * padding_length)
position_ids = [position_ids, block_position_ids]
return ids, types, paddings, position_ids, masks, target_ids, loss_masks
def build_sample(ids, types=None, paddings=None, positions=None, masks=None, label=None, unique_id=None, target=None,
logit_mask=None, segment_ids=None, prompt_ids=None):
ids_np = np.array(ids, dtype=np.int64)
sample = {'text': ids_np, 'label': int(label)}
if types is not None:
types_np = np.array(types, dtype=np.int64)
sample['types'] = types_np
if paddings is not None:
paddings_np = np.array(paddings, dtype=np.int64)
sample['padding_mask'] = paddings_np
if positions is not None:
positions_np = np.array(positions, dtype=np.int64)
sample['position'] = positions_np
if masks is not None:
masks_np = np.array(masks, dtype=np.int64)
sample['mask'] = masks_np
if target is not None:
target_np = np.array(target, dtype=np.int64)
sample['target'] = target_np
if logit_mask is not None:
logit_mask_np = np.array(logit_mask, dtype=np.int64)
sample['logit_mask'] = logit_mask_np
if segment_ids is not None:
segment_ids = np.array(segment_ids, dtype=np.int64)
sample['segment_id'] = segment_ids
if prompt_ids is not None:
prompt_ids = np.array(prompt_ids, dtype=np.int64)
sample['prompt_pos'] = prompt_ids
if unique_id is not None:
sample['uid'] = unique_id
return sample
def build_decoder_sample(sample, dec_ids, dec_position, dec_masks, dec_target, dec_logit_mask):
sample['dec_text'] = np.array(dec_ids)
sample['dec_position'] = np.array(dec_position)
sample['dec_mask'] = np.array(dec_masks)
sample['dec_target'] = np.array(dec_target)
sample['dec_logit_mask'] = np.array(dec_logit_mask)
return sample
def my_collate(batch):
new_batch = [{key: value for key, value in sample.items() if key != 'uid'} for sample in batch]
text_list = [sample['text'] for sample in batch]
def pad_choice_dim(data, choice_num):
if len(data) < choice_num:
data = np.concatenate([data] + [data[0:1]] * (choice_num - len(data)))
return data
if len(text_list[0].shape) == 2:
choice_nums = list(map(len, text_list))
max_choice_num = max(choice_nums)
for i, sample in enumerate(new_batch):
for key, value in sample.items():
if key != 'label':
sample[key] = pad_choice_dim(value, max_choice_num)
else:
sample[key] = value
sample['loss_mask'] = np.array([1] * choice_nums[i] + [0] * (max_choice_num - choice_nums[i]),
dtype=np.int64)
if 'dec_text' in new_batch[0]:
choice_nums = [len(sample['dec_text']) for sample in new_batch]
if choice_nums.count(choice_nums[0]) != len(choice_nums):
max_choice_num = max(choice_nums)
for i, sample in enumerate(new_batch):
for key, value in sample.items():
if key.startswith('dec_'):
sample[key] = pad_choice_dim(value, max_choice_num)
sample['loss_mask'] = np.array([1] * choice_nums[i] + [0] * (max_choice_num - choice_nums[i]),
dtype=np.int64)
new_batch = default_collate(new_batch)
if 'uid' in batch[0]:
uid_list = [sample['uid'] for sample in batch]
new_batch['uid'] = uid_list
return new_batch
class FakeDataloader:
def __init__(self, num_iters):
self.num_iters = num_iters
def __iter__(self):
if self.num_iters is not None:
for _ in range(self.num_iters):
yield None
else:
while True:
yield None
def build_data_loader(dataset, batch_size, num_workers, drop_last, shuffle=True, only_rank0=False):
if only_rank0:
rank, world_size = 0, 1
else:
world_size = mpu.get_data_parallel_world_size()
rank = mpu.get_data_parallel_rank()
sampler = torch.utils.data.distributed.DistributedSampler(
dataset, num_replicas=world_size, rank=rank, shuffle=shuffle)
data_loader = torch.utils.data.DataLoader(dataset,
batch_size=batch_size,
sampler=sampler,
shuffle=False,
num_workers=num_workers,
drop_last=drop_last,
pin_memory=True,
collate_fn=my_collate)
return data_loader
| true | true |
1c2d90f87f40141af385751a4193713ac88ba775 | 3,529 | py | Python | bindings/python/ensmallen/datasets/string/henriciellamarina.py | AnacletoLAB/ensmallen_graph | b2c1b18fb1e5801712852bcc239f239e03076f09 | [
"MIT"
] | 5 | 2021-02-17T00:44:45.000Z | 2021-08-09T16:41:47.000Z | bindings/python/ensmallen/datasets/string/henriciellamarina.py | AnacletoLAB/ensmallen_graph | b2c1b18fb1e5801712852bcc239f239e03076f09 | [
"MIT"
] | 18 | 2021-01-07T16:47:39.000Z | 2021-08-12T21:51:32.000Z | bindings/python/ensmallen/datasets/string/henriciellamarina.py | AnacletoLAB/ensmallen | b2c1b18fb1e5801712852bcc239f239e03076f09 | [
"MIT"
] | 3 | 2021-01-14T02:20:59.000Z | 2021-08-04T19:09:52.000Z | """
This file offers the methods to automatically retrieve the graph Henriciella marina.
The graph is automatically retrieved from the STRING repository.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
from typing import Dict
from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph
from ...ensmallen import Graph # pylint: disable=import-error
def HenriciellaMarina(
directed: bool = False,
preprocess: bool = True,
load_nodes: bool = True,
verbose: int = 2,
cache: bool = True,
cache_path: str = "graphs/string",
version: str = "links.v11.5",
**additional_graph_kwargs: Dict
) -> Graph:
"""Return new instance of the Henriciella marina graph.
The graph is automatically retrieved from the STRING repository.
Parameters
-------------------
directed: bool = False
Wether to load the graph as directed or undirected.
By default false.
preprocess: bool = True
Whether to preprocess the graph to be loaded in
optimal time and memory.
load_nodes: bool = True,
Whether to load the nodes vocabulary or treat the nodes
simply as a numeric range.
verbose: int = 2,
Wether to show loading bars during the retrieval and building
of the graph.
cache: bool = True
Whether to use cache, i.e. download files only once
and preprocess them only once.
cache_path: str = "graphs"
Where to store the downloaded graphs.
version: str = "links.v11.5"
The version of the graph to retrieve.
The available versions are:
- homology.v11.0
- homology.v11.5
- physical.links.v11.0
- physical.links.v11.5
- links.v11.0
- links.v11.5
additional_graph_kwargs: Dict
Additional graph kwargs.
Returns
-----------------------
Instace of Henriciella marina graph.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
return AutomaticallyRetrievedGraph(
graph_name="HenriciellaMarina",
repository="string",
version=version,
directed=directed,
preprocess=preprocess,
load_nodes=load_nodes,
verbose=verbose,
cache=cache,
cache_path=cache_path,
additional_graph_kwargs=additional_graph_kwargs
)()
| 32.675926 | 223 | 0.675829 | from typing import Dict
from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph
from ...ensmallen import Graph
def HenriciellaMarina(
directed: bool = False,
preprocess: bool = True,
load_nodes: bool = True,
verbose: int = 2,
cache: bool = True,
cache_path: str = "graphs/string",
version: str = "links.v11.5",
**additional_graph_kwargs: Dict
) -> Graph:
return AutomaticallyRetrievedGraph(
graph_name="HenriciellaMarina",
repository="string",
version=version,
directed=directed,
preprocess=preprocess,
load_nodes=load_nodes,
verbose=verbose,
cache=cache,
cache_path=cache_path,
additional_graph_kwargs=additional_graph_kwargs
)()
| true | true |
1c2d91349c242b43ed1148546b45cf09e118f038 | 4,998 | py | Python | src/exceptionite/Handler.py | MasoniteFramework/exceptions | ce15da5e9f763c563e9d687771fb0599b875b83f | [
"MIT"
] | 6 | 2019-12-13T05:22:49.000Z | 2020-01-02T20:50:24.000Z | src/exceptionite/Handler.py | MasoniteFramework/exceptions | ce15da5e9f763c563e9d687771fb0599b875b83f | [
"MIT"
] | 7 | 2019-12-12T18:02:20.000Z | 2020-01-04T19:49:49.000Z | src/exceptionite/Handler.py | MasoniteFramework/exceptions | ce15da5e9f763c563e9d687771fb0599b875b83f | [
"MIT"
] | 3 | 2020-08-11T22:07:46.000Z | 2022-02-21T05:22:59.000Z | import sys
import traceback
from dotty_dict import dotty
from typing import Type, TYPE_CHECKING
from typing_extensions import Protocol
if TYPE_CHECKING:
class Renderer(Protocol):
handler: "Handler"
def __init__(self, handler: "Handler") -> None:
...
def render(self) -> str:
"""Render exception with the given renderer"""
...
from .StackTrace import StackTrace
from .renderers import WebRenderer, TerminalRenderer, JSONRenderer
from .options import DEFAULT_OPTIONS as DefaultOptions
class Handler:
"""Exceptionite handler used to handle exceptions and render them using the given renderer."""
scrub_keywords = [
"password",
"passwd",
"pwd",
"secret",
"key",
"api_key",
"apikey",
"access_token",
"credentials",
"token",
]
def __init__(
self,
):
self.renderers: dict["Renderer"] = {}
self.options: dict = dotty(DefaultOptions)
self.context = {}
self.add_renderer("web", WebRenderer)
self.add_renderer("terminal", TerminalRenderer)
self.add_renderer("json", JSONRenderer)
def set_options(self, options: dict) -> "Handler":
"""Configure exceptionite handler with given options."""
# ensure options is a dict here, might already be a dotty dict
self.options = dotty(dict(options))
return self
def add_renderer(self, name: str, renderer_class: Type["Renderer"]) -> "Handler":
"""Register a renderer to handle exceptions."""
self.renderers.update({name: renderer_class(self)})
return self
def renderer(self, name: str) -> "Renderer":
"""Get the renderer with the given name."""
return self.renderers[name]
def add_context(self, name: str, data: dict) -> "Handler":
self.context.update({name: data})
return self
def start(self, exception: BaseException) -> "Handler":
"""Start handling the given exception."""
self._exception = exception
self._type, self._value, self._original_traceback = sys.exc_info()
traceback_exc = traceback.TracebackException(
self._type, self._value, self._original_traceback, capture_locals=True
)
self._stacktrace = StackTrace(
traceback_exc,
self._exception,
offset=self.options.get("options.stack.offset"),
shorten=self.options.get("options.stack.shorten"),
scrubber=self.scrub_data,
)
self._stacktrace.generate().reverse()
return self
# helpers
def exception(self) -> str:
"""Get the handled exception name."""
return self._exception.__class__.__name__
def namespace(self) -> str:
"""Get the handled exception full namespace."""
return self._exception.__class__.__module__ + "." + self.exception()
def message(self) -> str:
"""Get the handled exception message."""
return str(self._exception)
def stacktrace(self) -> "StackTrace":
"""Get the handled exception stack trace object."""
return self._stacktrace
def count(self):
return len(self._stacktrace)
def render(self, renderer: str) -> str:
"""Render the handled exception with the given renderer."""
return self.renderer(renderer).render()
def add_scrub_keywords(self, keywords: list) -> "Handler":
"""Add new scrub keywords used to hide sensitive data."""
self.scrub_keywords.extend(keywords)
# ensure keywords are not duplicated
self.scrub_keywords = list(set(self.scrub_keywords))
return self
def set_scrub_keywords(self, keywords: list) -> "Handler":
"""Override scrub keywords used to hide sensitive data."""
self.scrub_keywords = keywords
return self
def scrub_data(self, data: dict, disable: bool = False) -> dict:
"""Hide sensitive data of the given dictionary if enabled in the options with
'hide_sensitive_data' parameter."""
if not self.options.get("options.hide_sensitive_data") or disable:
return data
scrubbed_data = {}
if not data:
return scrubbed_data
for key, val in data.items():
if not val:
scrubbed_data[key] = val
continue
if isinstance(val, dict):
scrubbed_data[key] = self.scrub_data(val, disable)
else:
# scrub entire value if key matches
should_scrub = False
for token in self.scrub_keywords:
if token.lower() in key.lower():
should_scrub = True
if should_scrub:
scrubbed_val = "*****"
else:
scrubbed_val = val
scrubbed_data[key] = scrubbed_val
return scrubbed_data
| 33.543624 | 98 | 0.605242 | import sys
import traceback
from dotty_dict import dotty
from typing import Type, TYPE_CHECKING
from typing_extensions import Protocol
if TYPE_CHECKING:
class Renderer(Protocol):
handler: "Handler"
def __init__(self, handler: "Handler") -> None:
...
def render(self) -> str:
...
from .StackTrace import StackTrace
from .renderers import WebRenderer, TerminalRenderer, JSONRenderer
from .options import DEFAULT_OPTIONS as DefaultOptions
class Handler:
scrub_keywords = [
"password",
"passwd",
"pwd",
"secret",
"key",
"api_key",
"apikey",
"access_token",
"credentials",
"token",
]
def __init__(
self,
):
self.renderers: dict["Renderer"] = {}
self.options: dict = dotty(DefaultOptions)
self.context = {}
self.add_renderer("web", WebRenderer)
self.add_renderer("terminal", TerminalRenderer)
self.add_renderer("json", JSONRenderer)
def set_options(self, options: dict) -> "Handler":
self.options = dotty(dict(options))
return self
def add_renderer(self, name: str, renderer_class: Type["Renderer"]) -> "Handler":
self.renderers.update({name: renderer_class(self)})
return self
def renderer(self, name: str) -> "Renderer":
return self.renderers[name]
def add_context(self, name: str, data: dict) -> "Handler":
self.context.update({name: data})
return self
def start(self, exception: BaseException) -> "Handler":
self._exception = exception
self._type, self._value, self._original_traceback = sys.exc_info()
traceback_exc = traceback.TracebackException(
self._type, self._value, self._original_traceback, capture_locals=True
)
self._stacktrace = StackTrace(
traceback_exc,
self._exception,
offset=self.options.get("options.stack.offset"),
shorten=self.options.get("options.stack.shorten"),
scrubber=self.scrub_data,
)
self._stacktrace.generate().reverse()
return self
def exception(self) -> str:
return self._exception.__class__.__name__
def namespace(self) -> str:
return self._exception.__class__.__module__ + "." + self.exception()
def message(self) -> str:
return str(self._exception)
def stacktrace(self) -> "StackTrace":
return self._stacktrace
def count(self):
return len(self._stacktrace)
def render(self, renderer: str) -> str:
return self.renderer(renderer).render()
def add_scrub_keywords(self, keywords: list) -> "Handler":
self.scrub_keywords.extend(keywords)
self.scrub_keywords = list(set(self.scrub_keywords))
return self
def set_scrub_keywords(self, keywords: list) -> "Handler":
self.scrub_keywords = keywords
return self
def scrub_data(self, data: dict, disable: bool = False) -> dict:
if not self.options.get("options.hide_sensitive_data") or disable:
return data
scrubbed_data = {}
if not data:
return scrubbed_data
for key, val in data.items():
if not val:
scrubbed_data[key] = val
continue
if isinstance(val, dict):
scrubbed_data[key] = self.scrub_data(val, disable)
else:
should_scrub = False
for token in self.scrub_keywords:
if token.lower() in key.lower():
should_scrub = True
if should_scrub:
scrubbed_val = "*****"
else:
scrubbed_val = val
scrubbed_data[key] = scrubbed_val
return scrubbed_data
| true | true |
1c2d9225dd44601ceefd958f0a2cc9bc2ed2ac90 | 3,114 | py | Python | conductor/conductor/solver/request/generic_objective.py | onap/optf-has | dd06e2675aedd7ae6344f2f51e70bbd468f36ce5 | [
"Apache-2.0"
] | 4 | 2019-02-14T19:18:09.000Z | 2019-10-21T17:17:59.000Z | conductor/conductor/solver/request/generic_objective.py | onap/optf-has | dd06e2675aedd7ae6344f2f51e70bbd468f36ce5 | [
"Apache-2.0"
] | null | null | null | conductor/conductor/solver/request/generic_objective.py | onap/optf-has | dd06e2675aedd7ae6344f2f51e70bbd468f36ce5 | [
"Apache-2.0"
] | 4 | 2019-05-09T07:05:54.000Z | 2020-11-20T05:56:47.000Z | #
# -------------------------------------------------------------------------
# Copyright (C) 2020 Wipro Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -------------------------------------------------------------------------
#
from conductor.solver.request import functions
from conductor.solver.utils.utils import OPERATOR_FUNCTIONS
GOALS = {'minimize': 'min',
'maximize': 'max'}
def get_method_class(function_name):
module_name = getattr(functions, function_name)
return getattr(module_name, dir(module_name)[0])
def get_normalized_value(value, start, end):
return (value - start) / (end - start)
class GenericObjective(object):
def __init__(self, objective_function):
self.goal = GOALS[objective_function.get('goal')]
self.operation_function = objective_function.get('operation_function')
self.operand_list = [] # keeping this for compatibility with the solver
def compute(self, _decision_path, _request):
value = self.compute_operation_function(self.operation_function, _decision_path, _request)
_decision_path.cumulated_value = value
_decision_path.total_value = \
_decision_path.cumulated_value + \
_decision_path.heuristic_to_go_value
def compute_operation_function(self, operation_function, _decision_path, _request):
operator = operation_function.get('operator')
operands = operation_function.get('operands')
result_list = []
for operand in operands:
if 'operation_function' in operand:
value = self.compute_operation_function(operand.get('operation_function'),
_decision_path, _request)
else:
function_name = operand.get('function')
function_class = get_method_class(function_name)
function = function_class(function_name)
args = function.get_args_from_params(_decision_path, _request,
operand.get('params'))
value = function.compute(*args)
if 'normalization' in operand:
normalization = operand.get('normalization')
value = get_normalized_value(value, normalization.get('start'),
normalization.get('end'))
if 'weight' in operand:
value = value * operand.get("weight")
result_list.append(value)
return OPERATOR_FUNCTIONS.get(operator)(result_list)
| 39.417722 | 98 | 0.618818 |
from conductor.solver.request import functions
from conductor.solver.utils.utils import OPERATOR_FUNCTIONS
GOALS = {'minimize': 'min',
'maximize': 'max'}
def get_method_class(function_name):
module_name = getattr(functions, function_name)
return getattr(module_name, dir(module_name)[0])
def get_normalized_value(value, start, end):
return (value - start) / (end - start)
class GenericObjective(object):
def __init__(self, objective_function):
self.goal = GOALS[objective_function.get('goal')]
self.operation_function = objective_function.get('operation_function')
self.operand_list = []
def compute(self, _decision_path, _request):
value = self.compute_operation_function(self.operation_function, _decision_path, _request)
_decision_path.cumulated_value = value
_decision_path.total_value = \
_decision_path.cumulated_value + \
_decision_path.heuristic_to_go_value
def compute_operation_function(self, operation_function, _decision_path, _request):
operator = operation_function.get('operator')
operands = operation_function.get('operands')
result_list = []
for operand in operands:
if 'operation_function' in operand:
value = self.compute_operation_function(operand.get('operation_function'),
_decision_path, _request)
else:
function_name = operand.get('function')
function_class = get_method_class(function_name)
function = function_class(function_name)
args = function.get_args_from_params(_decision_path, _request,
operand.get('params'))
value = function.compute(*args)
if 'normalization' in operand:
normalization = operand.get('normalization')
value = get_normalized_value(value, normalization.get('start'),
normalization.get('end'))
if 'weight' in operand:
value = value * operand.get("weight")
result_list.append(value)
return OPERATOR_FUNCTIONS.get(operator)(result_list)
| true | true |
1c2d9283d382e048364a6ebcb1d9c183da7ec7f7 | 218 | py | Python | examples/use_as_module_minimal.py | DougBarry/m365-endpoint-api-digester | d1ed1f4223a040cae021aef2e769929682e1aa34 | [
"MIT"
] | null | null | null | examples/use_as_module_minimal.py | DougBarry/m365-endpoint-api-digester | d1ed1f4223a040cae021aef2e769929682e1aa34 | [
"MIT"
] | 1 | 2021-05-07T17:04:21.000Z | 2021-05-07T17:04:21.000Z | examples/use_as_module_minimal.py | DougBarry/m365-endpoint-api-digester | d1ed1f4223a040cae021aef2e769929682e1aa34 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Part of m365-endpoint-api-digester
# Use as module example minimal
import pprint
from m365digester.M365Digester import M365Digester
app = M365Digester()
app.main()
pprint.pprint(app.rule_list)
| 24.222222 | 50 | 0.798165 |
import pprint
from m365digester.M365Digester import M365Digester
app = M365Digester()
app.main()
pprint.pprint(app.rule_list)
| true | true |
1c2d93f0de31e8fd4ad1ab51951960e6ac32247b | 81 | py | Python | src/twisted/test/reflect_helper_VE.py | giadram/twisted | 4771b1340b822d20d0664bb7d8334e8fb7e52863 | [
"MIT",
"Unlicense"
] | 4,612 | 2015-01-01T12:57:23.000Z | 2022-03-30T01:08:23.000Z | src/twisted/test/reflect_helper_VE.py | giadram/twisted | 4771b1340b822d20d0664bb7d8334e8fb7e52863 | [
"MIT",
"Unlicense"
] | 1,243 | 2015-01-23T17:23:59.000Z | 2022-03-28T13:46:17.000Z | src/twisted/test/reflect_helper_VE.py | giadram/twisted | 4771b1340b822d20d0664bb7d8334e8fb7e52863 | [
"MIT",
"Unlicense"
] | 1,236 | 2015-01-13T14:41:26.000Z | 2022-03-17T07:12:36.000Z | # Helper for a test_reflect test
raise ValueError("Stuff is broken and things")
| 20.25 | 46 | 0.777778 |
raise ValueError("Stuff is broken and things")
| true | true |
1c2d94414b183f0621b1473a1c8116496d480455 | 3,500 | py | Python | .leetcode/11.container-with-most-water.py | KuiyuanFu/PythonLeetCode | 8962df2fa838eb7ae48fa59de272ba55a89756d8 | [
"MIT"
] | null | null | null | .leetcode/11.container-with-most-water.py | KuiyuanFu/PythonLeetCode | 8962df2fa838eb7ae48fa59de272ba55a89756d8 | [
"MIT"
] | null | null | null | .leetcode/11.container-with-most-water.py | KuiyuanFu/PythonLeetCode | 8962df2fa838eb7ae48fa59de272ba55a89756d8 | [
"MIT"
] | null | null | null | # @lc app=leetcode id=11 lang=python3
#
# [11] Container With Most Water
#
# https://leetcode.com/problems/container-with-most-water/description/
#
# algorithms
# Medium (52.91%)
# Likes: 9053
# Dislikes: 697
# Total Accepted: 914.4K
# Total Submissions: 1.7M
# Testcase Example: '[1,8,6,2,5,4,8,3,7]'
#
# Given n non-negative integers a1, a2, ..., an , where each represents a point
# at coordinate (i, ai). n vertical lines are drawn such that the two endpoints
# of the line i is at (i, ai) and (i, 0). Find two lines, which, together with
# the x-axis forms a container, such that the container contains the most
# water.
#
# Notice that you may not slant the container.
#
#
# Example 1:
#
#
# Input: height = [1,8,6,2,5,4,8,3,7]
# Output: 49
# Explanation: The above vertical lines are represented by array
# [1,8,6,2,5,4,8,3,7]. In this case, the max area of water (blue section) the
# container can contain is 49.
#
#
# Example 2:
#
#
# Input: height = [1,1]
# Output: 1
#
#
# Example 3:
#
#
# Input: height = [4,3,2,1,4]
# Output: 16
#
#
# Example 4:
#
#
# Input: height = [1,2,1]
# Output: 2
#
#
#
# Constraints:
#
#
# n == height.length
# 2 <= n <= 10^5
# 0 <= height[i] <= 10^4
#
#
#
#
# @lc tags=array;two-pointers
# @lc imports=start
from imports import *
# @lc imports=end
# @lc idea=start
#
# 在一系列柱子中,计算选择两个柱子组成的容器最大的容量。从两侧向内依次找比原来大高的柱子。比如从左侧开始,之后向右找到一根更高的柱子,这根柱子与另一侧柱子组成的容器才有可能大于原来的容器,因为移动时,容器的底变小了。每次都移动较矮的一侧的柱子。
#
# @lc idea=end
# @lc group=two-pointers
# @lc rank=10
# @lc code=start
class Solution:
def maxArea(self, height: List[int]) -> int:
leftMax = 0
rightMax = len(height) - 1
waterMax = min(height[leftMax],
height[rightMax]) * (rightMax - leftMax)
left = leftMax
right = rightMax
while left < right:
if (height[leftMax] < height[rightMax]):
left += 1
while left < right:
if height[left] > height[leftMax]:
leftMax = left
break
else:
left += 1
else:
right -= 1
while left < right:
if height[right] > height[rightMax]:
rightMax = right
break
else:
right -= 1
water = min(height[leftMax], height[rightMax]) * \
(rightMax - leftMax)
waterMax = water if water > waterMax else waterMax
return waterMax
pass
# @lc code=end
# @lc main=start
if __name__ == '__main__':
print('Example 1:')
print('Input : ')
print('height = [1,8,6,2,5,4,8,3,7]')
print('Output :')
print(str(Solution().maxArea([1, 8, 6, 2, 5, 4, 8, 3, 7])))
print('Exception :')
print('49')
print()
print('Example 2:')
print('Input : ')
print('height = [1,1]')
print('Output :')
print(str(Solution().maxArea([1, 1])))
print('Exception :')
print('1')
print()
print('Example 3:')
print('Input : ')
print('height = [4,3,2,1,4]')
print('Output :')
print(str(Solution().maxArea([4, 3, 2, 1, 4])))
print('Exception :')
print('16')
print()
print('Example 4:')
print('Input : ')
print('height = [1,2,1]')
print('Output :')
print(str(Solution().maxArea([1, 2, 1])))
print('Exception :')
print('2')
print()
pass
# @lc main=end | 21.604938 | 123 | 0.545429 |
from imports import *
class Solution:
def maxArea(self, height: List[int]) -> int:
leftMax = 0
rightMax = len(height) - 1
waterMax = min(height[leftMax],
height[rightMax]) * (rightMax - leftMax)
left = leftMax
right = rightMax
while left < right:
if (height[leftMax] < height[rightMax]):
left += 1
while left < right:
if height[left] > height[leftMax]:
leftMax = left
break
else:
left += 1
else:
right -= 1
while left < right:
if height[right] > height[rightMax]:
rightMax = right
break
else:
right -= 1
water = min(height[leftMax], height[rightMax]) * \
(rightMax - leftMax)
waterMax = water if water > waterMax else waterMax
return waterMax
pass
if __name__ == '__main__':
print('Example 1:')
print('Input : ')
print('height = [1,8,6,2,5,4,8,3,7]')
print('Output :')
print(str(Solution().maxArea([1, 8, 6, 2, 5, 4, 8, 3, 7])))
print('Exception :')
print('49')
print()
print('Example 2:')
print('Input : ')
print('height = [1,1]')
print('Output :')
print(str(Solution().maxArea([1, 1])))
print('Exception :')
print('1')
print()
print('Example 3:')
print('Input : ')
print('height = [4,3,2,1,4]')
print('Output :')
print(str(Solution().maxArea([4, 3, 2, 1, 4])))
print('Exception :')
print('16')
print()
print('Example 4:')
print('Input : ')
print('height = [1,2,1]')
print('Output :')
print(str(Solution().maxArea([1, 2, 1])))
print('Exception :')
print('2')
print()
pass
| true | true |
1c2d944ee87236b6114fda1638c31b1d60f9fe3b | 297 | py | Python | nivlink/__init__.py | szorowi1/NivLink | c5c03f36975a10445e586499fa516e2e38feae2b | [
"MIT"
] | 6 | 2018-08-15T16:58:13.000Z | 2021-12-06T04:38:40.000Z | nivlink/__init__.py | nivlab/NivLink | c5c03f36975a10445e586499fa516e2e38feae2b | [
"MIT"
] | 13 | 2018-08-13T21:33:58.000Z | 2019-07-04T03:51:12.000Z | nivlink/__init__.py | szorowi1/NivLink | c5c03f36975a10445e586499fa516e2e38feae2b | [
"MIT"
] | 2 | 2018-08-17T19:12:59.000Z | 2018-08-31T14:07:37.000Z | """Niv Lab software for preprocessing eyelink eyetracking data."""
__version__ = '0.2.5'
from .raw import (Raw)
from .epochs import (Epochs)
from .gaze import (align_to_aoi, compute_fixations)
from .screen import (Screen)
from . import projects
from .viz import (plot_raw_blinks, plot_heatmaps)
| 27 | 66 | 0.767677 |
__version__ = '0.2.5'
from .raw import (Raw)
from .epochs import (Epochs)
from .gaze import (align_to_aoi, compute_fixations)
from .screen import (Screen)
from . import projects
from .viz import (plot_raw_blinks, plot_heatmaps)
| true | true |
1c2d9620e834b3766a8631177e2f4224cbb3cef4 | 540 | py | Python | dataio/resources/test/zz_cleanup.py | hschwane/offline_production | e14a6493782f613b8bbe64217559765d5213dc1e | [
"MIT"
] | 1 | 2020-12-24T22:00:01.000Z | 2020-12-24T22:00:01.000Z | dataio/resources/test/zz_cleanup.py | hschwane/offline_production | e14a6493782f613b8bbe64217559765d5213dc1e | [
"MIT"
] | null | null | null | dataio/resources/test/zz_cleanup.py | hschwane/offline_production | e14a6493782f613b8bbe64217559765d5213dc1e | [
"MIT"
] | 3 | 2020-07-17T09:20:29.000Z | 2021-03-30T16:44:18.000Z | #!/usr/bin/env python
import os, sys
from glob import glob
for fname in (["alpha.i3", "alphabet.i3", "catted.i3.gz", "evens.i3",
"filtered.i3", "ints.i3", "foo.i3", "noinfo.i3", "test.i3", "tmp2.i3",
"tmp.i3", "withnulls.i3", "orphanarium.i3", "pass1.i3", "one_event.i3",
"hasmutineer.i3.gz", "split.gcd.i3", "testmultiD.*.i3", "testmulti.*.i3",
"stillhasmutineer.i3.gz"] +
glob("split.physics.*.i3")+ glob("pass1_*.i3.*")):
if os.path.exists(fname):
os.unlink(fname)
sys.exit(0)
| 36 | 77 | 0.581481 |
import os, sys
from glob import glob
for fname in (["alpha.i3", "alphabet.i3", "catted.i3.gz", "evens.i3",
"filtered.i3", "ints.i3", "foo.i3", "noinfo.i3", "test.i3", "tmp2.i3",
"tmp.i3", "withnulls.i3", "orphanarium.i3", "pass1.i3", "one_event.i3",
"hasmutineer.i3.gz", "split.gcd.i3", "testmultiD.*.i3", "testmulti.*.i3",
"stillhasmutineer.i3.gz"] +
glob("split.physics.*.i3")+ glob("pass1_*.i3.*")):
if os.path.exists(fname):
os.unlink(fname)
sys.exit(0)
| true | true |
1c2d9812282a94f2c8a881d4aa241ad48e343bf8 | 54 | py | Python | codeplayer-compiler-microservice/test.py | saurass/codeplayer | f6d1309fa56f3ec3a699bf359de9ae58bba89462 | [
"MIT"
] | 14 | 2020-07-06T16:56:49.000Z | 2020-09-16T17:25:19.000Z | codeplayer-compiler-microservice/test.py | saurass/codeplayer | f6d1309fa56f3ec3a699bf359de9ae58bba89462 | [
"MIT"
] | null | null | null | codeplayer-compiler-microservice/test.py | saurass/codeplayer | f6d1309fa56f3ec3a699bf359de9ae58bba89462 | [
"MIT"
] | 2 | 2020-07-10T05:23:46.000Z | 2020-07-26T21:20:30.000Z | from worker import Worker
work = Worker()
work.boot() | 13.5 | 25 | 0.740741 | from worker import Worker
work = Worker()
work.boot() | true | true |
1c2d9813c30395a2d4f25e2b7bc28366ada8f869 | 866 | py | Python | src/chat_engine/messages.py | asyncee/django-channels-chat | fe53f48067f98bcb2cc7ebf0d837e66d6b7b388a | [
"MIT"
] | 70 | 2016-04-15T20:57:17.000Z | 2020-12-21T19:51:32.000Z | src/chat_engine/messages.py | asyncee/django-channels-chat | fe53f48067f98bcb2cc7ebf0d837e66d6b7b388a | [
"MIT"
] | 2 | 2017-08-03T10:49:30.000Z | 2017-11-15T18:13:08.000Z | src/chat_engine/messages.py | asyncee/django-channels-chat | fe53f48067f98bcb2cc7ebf0d837e66d6b7b388a | [
"MIT"
] | 19 | 2016-10-05T17:15:19.000Z | 2020-09-25T13:07:59.000Z | import json
class Message(dict):
def __init__(self, text=None, username=None, **kwargs):
defaults = {'type': 'message'}
if text:
defaults['text'] = text
if username:
defaults['user'] = username
defaults.update(kwargs)
self['text'] = json.dumps(defaults)
class InfoMessage(Message):
def __init__(self, *args, **kwargs):
kwargs['type'] = 'info'
super().__init__(*args, **kwargs)
class SystemMessage(Message):
def __init__(self, *args, **kwargs):
kwargs['type'] = 'system'
kwargs['username'] = 'system'
super().__init__(*args, **kwargs)
def message(*args, **kwargs):
return Message(*args, **kwargs)
def info(*args, **kwargs):
return InfoMessage(*args, **kwargs)
def system(*args, **kwargs):
return SystemMessage(*args, **kwargs)
| 22.789474 | 59 | 0.591224 | import json
class Message(dict):
def __init__(self, text=None, username=None, **kwargs):
defaults = {'type': 'message'}
if text:
defaults['text'] = text
if username:
defaults['user'] = username
defaults.update(kwargs)
self['text'] = json.dumps(defaults)
class InfoMessage(Message):
def __init__(self, *args, **kwargs):
kwargs['type'] = 'info'
super().__init__(*args, **kwargs)
class SystemMessage(Message):
def __init__(self, *args, **kwargs):
kwargs['type'] = 'system'
kwargs['username'] = 'system'
super().__init__(*args, **kwargs)
def message(*args, **kwargs):
return Message(*args, **kwargs)
def info(*args, **kwargs):
return InfoMessage(*args, **kwargs)
def system(*args, **kwargs):
return SystemMessage(*args, **kwargs)
| true | true |
1c2d98bfe8a8f52475a1a0ad8a10d0e29bc6ba96 | 120 | py | Python | src/csbuilder/errors/scheme.py | huykingsofm/csbuilder | c6ba6f0dd3fd2a0d03c7492de20a7107cb1b9191 | [
"MIT"
] | null | null | null | src/csbuilder/errors/scheme.py | huykingsofm/csbuilder | c6ba6f0dd3fd2a0d03c7492de20a7107cb1b9191 | [
"MIT"
] | null | null | null | src/csbuilder/errors/scheme.py | huykingsofm/csbuilder | c6ba6f0dd3fd2a0d03c7492de20a7107cb1b9191 | [
"MIT"
] | null | null | null | from csbuilder.errors import CSError
class SchemeError(CSError):
"The exception is raised by failures in scheme."
| 20 | 52 | 0.775 | from csbuilder.errors import CSError
class SchemeError(CSError):
| true | true |
1c2d993573b8814f52b948b6d6fd38ede1bf99c4 | 1,014 | py | Python | tests/conftest.py | AABur/python-project-lvl2 | 5be0fe2ceb56afc7877ff12680fcb29f4b98ce8f | [
"MIT"
] | 2 | 2020-10-03T18:41:00.000Z | 2021-09-20T10:07:28.000Z | tests/conftest.py | AABur/python-project-lvl2 | 5be0fe2ceb56afc7877ff12680fcb29f4b98ce8f | [
"MIT"
] | 15 | 2021-01-14T08:18:42.000Z | 2021-06-25T15:49:09.000Z | tests/conftest.py | AABur/python-project-lvl2 | 5be0fe2ceb56afc7877ff12680fcb29f4b98ce8f | [
"MIT"
] | 7 | 2020-11-22T11:21:24.000Z | 2021-12-11T15:38:20.000Z | # -*- coding:utf-8 -*-
import pytest
@pytest.fixture()
def simple_before_path():
return 'tests/fixtures/simple/before.yaml'
@pytest.fixture()
def simple_after_path():
return 'tests/fixtures/simple/after.yaml'
@pytest.fixture()
def complex_before_path():
return 'tests/fixtures/complex/before.json'
@pytest.fixture()
def complex_after_path():
return 'tests/fixtures/complex/after.json'
@pytest.fixture()
def result_complex_json():
return 'tests/fixtures/complex/result_json.txt'
@pytest.fixture()
def result_complex_plain():
return 'tests/fixtures/complex/result_plain.txt'
@pytest.fixture()
def result_complex_stylish():
return 'tests/fixtures/complex/result_stylish.txt'
@pytest.fixture()
def result_simple_json():
return 'tests/fixtures/simple/result_json.txt'
@pytest.fixture()
def result_simple_plain():
return 'tests/fixtures/simple/result_plain.txt'
@pytest.fixture()
def result_simple_stylish():
return 'tests/fixtures/simple/result_stylish.txt'
| 18.777778 | 54 | 0.747535 |
import pytest
@pytest.fixture()
def simple_before_path():
return 'tests/fixtures/simple/before.yaml'
@pytest.fixture()
def simple_after_path():
return 'tests/fixtures/simple/after.yaml'
@pytest.fixture()
def complex_before_path():
return 'tests/fixtures/complex/before.json'
@pytest.fixture()
def complex_after_path():
return 'tests/fixtures/complex/after.json'
@pytest.fixture()
def result_complex_json():
return 'tests/fixtures/complex/result_json.txt'
@pytest.fixture()
def result_complex_plain():
return 'tests/fixtures/complex/result_plain.txt'
@pytest.fixture()
def result_complex_stylish():
return 'tests/fixtures/complex/result_stylish.txt'
@pytest.fixture()
def result_simple_json():
return 'tests/fixtures/simple/result_json.txt'
@pytest.fixture()
def result_simple_plain():
return 'tests/fixtures/simple/result_plain.txt'
@pytest.fixture()
def result_simple_stylish():
return 'tests/fixtures/simple/result_stylish.txt'
| true | true |
1c2d9958ab68ed176a35f00797cb9012c552081c | 3,186 | py | Python | predict_class_sklearn.py | sayan2207/Class-Predictor | 191d98ca4d10d75fef24a4b6b78022c57679d4bc | [
"MIT"
] | 2 | 2021-11-22T03:38:49.000Z | 2021-11-22T03:38:54.000Z | predict_class_sklearn.py | sayan2207/Class-Predictor | 191d98ca4d10d75fef24a4b6b78022c57679d4bc | [
"MIT"
] | null | null | null | predict_class_sklearn.py | sayan2207/Class-Predictor | 191d98ca4d10d75fef24a4b6b78022c57679d4bc | [
"MIT"
] | null | null | null | import numpy as np
import random
import scipy.stats as ss
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap as lcm
from sklearn.neighbors import KNeighborsClassifier as knc
from time import strftime as stime
import os
def synthetic_plot(limits=(-5,5,-5,5), unit=0.1, no_of_points=20, no_of_classes=2, k=5):
(predictors, outcomes) = generate_synth_data(no_of_points, no_of_classes)
genuine_plot(predictors, outcomes, limits, unit, k)
def genuine_plot(predictors, outcomes, limits=(-5,5,-5,5), unit=0.1, k=5, home=False):
(xx, yy, prediction_grid) = make_prediction_grid(predictors, outcomes, limits, unit, k)
plot_prediction_grid(xx, yy, prediction_grid, predictors, outcomes);
def plot_prediction_grid (xx, yy, predicted_grid, predictors, outcomes):
""" Plot KNN predictions for every point on the grid."""
types = len( set( outcomes ) )
c_bg = np.zeros((types,3))
c_ob = np.zeros((types,3))
for i in range(types):
c_bg_i = np.array([random.randint(100,255) / 255, random.randint(100,255) / 255, random.randint(100,255) / 255])
c_ob_i = (c_bg_i*255 - 50)/255
c_bg[i] = c_bg_i
c_ob[i] = c_ob_i
background_colormap = lcm(c_bg)
observation_colormap = (c_ob)
plt.figure( figsize =(10,10) )
plt.pcolormesh(xx, yy, predicted_grid, cmap = background_colormap, alpha = 0.5)
xs = np.array(predictors[:,0])
ys = np.array(predictors[:,1])
outcomes = np.array( outcomes )
for i in range(types):
to_plot = outcomes==i
plt.scatter(xs[to_plot] , ys[to_plot] ,s = 50,color=observation_colormap[i] , label="Class "+str(i+1))
plt.xlabel('Variable 1'); plt.ylabel('Variable 2')
x_labels = np.linspace( np.min(xx), np.max(xx), 5 )
y_labels = np.linspace( np.min(yy), np.max(yy), 5 )
plt.xticks(x_labels, rotation="vertical")
plt.yticks(y_labels)
plt.xlim (np.min(xx), np.max(xx))
plt.ylim (np.min(yy), np.max(yy))
plt.legend(loc="lower right")
if not os.path.exists("Plots"):
os.makedirs("Plots")
filename = "Plots\plot_" + stime("%d-%m-%Y_%H-%M-%S") + ".pdf"
plt.savefig(filename)
plt.show()
def make_prediction_grid(points, outcomes, limits, steps=1, k=5):
(x_min, x_max, y_min, y_max) = limits
xs = np.arange(x_min, x_max, steps)
ys = np.arange(y_min, y_max, steps)
knn = knc(n_neighbors=k)
knn.fit(points,outcomes)
(xx, yy) = np.meshgrid(xs, ys)
prediction_grid = np.zeros(xx.shape, dtype=int)
for i,x in enumerate(xs):
for j,y in enumerate(ys):
p = np.array([x,y])
prediction_grid[j,i] = knn.predict([p])[0]
return (xx, yy, prediction_grid)
def generate_synth_data(n=50,types=2):
points = ss.norm(0 , 1).rvs((n,2))
outcomes = np.repeat(0 , n)
for i in range(1,types):
points = np.concatenate( (points, ss.norm(i , 1).rvs((n,2)) ), axis=0 )
outcomes = np.concatenate( (outcomes, np.repeat(i,n)) )
return (points, outcomes)
| 30.634615 | 120 | 0.616133 | import numpy as np
import random
import scipy.stats as ss
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap as lcm
from sklearn.neighbors import KNeighborsClassifier as knc
from time import strftime as stime
import os
def synthetic_plot(limits=(-5,5,-5,5), unit=0.1, no_of_points=20, no_of_classes=2, k=5):
(predictors, outcomes) = generate_synth_data(no_of_points, no_of_classes)
genuine_plot(predictors, outcomes, limits, unit, k)
def genuine_plot(predictors, outcomes, limits=(-5,5,-5,5), unit=0.1, k=5, home=False):
(xx, yy, prediction_grid) = make_prediction_grid(predictors, outcomes, limits, unit, k)
plot_prediction_grid(xx, yy, prediction_grid, predictors, outcomes);
def plot_prediction_grid (xx, yy, predicted_grid, predictors, outcomes):
types = len( set( outcomes ) )
c_bg = np.zeros((types,3))
c_ob = np.zeros((types,3))
for i in range(types):
c_bg_i = np.array([random.randint(100,255) / 255, random.randint(100,255) / 255, random.randint(100,255) / 255])
c_ob_i = (c_bg_i*255 - 50)/255
c_bg[i] = c_bg_i
c_ob[i] = c_ob_i
background_colormap = lcm(c_bg)
observation_colormap = (c_ob)
plt.figure( figsize =(10,10) )
plt.pcolormesh(xx, yy, predicted_grid, cmap = background_colormap, alpha = 0.5)
xs = np.array(predictors[:,0])
ys = np.array(predictors[:,1])
outcomes = np.array( outcomes )
for i in range(types):
to_plot = outcomes==i
plt.scatter(xs[to_plot] , ys[to_plot] ,s = 50,color=observation_colormap[i] , label="Class "+str(i+1))
plt.xlabel('Variable 1'); plt.ylabel('Variable 2')
x_labels = np.linspace( np.min(xx), np.max(xx), 5 )
y_labels = np.linspace( np.min(yy), np.max(yy), 5 )
plt.xticks(x_labels, rotation="vertical")
plt.yticks(y_labels)
plt.xlim (np.min(xx), np.max(xx))
plt.ylim (np.min(yy), np.max(yy))
plt.legend(loc="lower right")
if not os.path.exists("Plots"):
os.makedirs("Plots")
filename = "Plots\plot_" + stime("%d-%m-%Y_%H-%M-%S") + ".pdf"
plt.savefig(filename)
plt.show()
def make_prediction_grid(points, outcomes, limits, steps=1, k=5):
(x_min, x_max, y_min, y_max) = limits
xs = np.arange(x_min, x_max, steps)
ys = np.arange(y_min, y_max, steps)
knn = knc(n_neighbors=k)
knn.fit(points,outcomes)
(xx, yy) = np.meshgrid(xs, ys)
prediction_grid = np.zeros(xx.shape, dtype=int)
for i,x in enumerate(xs):
for j,y in enumerate(ys):
p = np.array([x,y])
prediction_grid[j,i] = knn.predict([p])[0]
return (xx, yy, prediction_grid)
def generate_synth_data(n=50,types=2):
points = ss.norm(0 , 1).rvs((n,2))
outcomes = np.repeat(0 , n)
for i in range(1,types):
points = np.concatenate( (points, ss.norm(i , 1).rvs((n,2)) ), axis=0 )
outcomes = np.concatenate( (outcomes, np.repeat(i,n)) )
return (points, outcomes)
| true | true |
1c2d9aac1cb93c456e03168d1fdf9752271e65c0 | 3,624 | py | Python | schedule/calendars/models.py | Tomatosoup97/schedule | 2e7d9b0f49c3d96aed58812cf29866f11ab0fdba | [
"0BSD"
] | null | null | null | schedule/calendars/models.py | Tomatosoup97/schedule | 2e7d9b0f49c3d96aed58812cf29866f11ab0fdba | [
"0BSD"
] | null | null | null | schedule/calendars/models.py | Tomatosoup97/schedule | 2e7d9b0f49c3d96aed58812cf29866f11ab0fdba | [
"0BSD"
] | null | null | null | from __future__ import unicode_literals
from django.utils.translation import ugettext_lazy as _
from django.conf import settings
from django.db import models
from slugify import slugify
from users.models import ClientProfile, HostProfile
from core.models import TimeStampedModel
class Meeting(TimeStampedModel):
"""
Meeting created by host(s) for client(s),
marked on calendar,
can be private or public
"""
title = models.CharField(_('title'), max_length=100, db_index=True)
description = models.TextField(_('description'))
image = models.ImageField(_('image'),
upload_to='meeting/%Y/%m', null=True, blank=True)
location = models.CharField(_('location'), max_length=100, blank=True)
start = models.DateTimeField(_('start of the meeting'), db_index=True)
end = models.DateTimeField(_('end of the meeting'), blank=True, null=True)
private = models.BooleanField(_('private'),
default=True,
help_text=_('mark if you want this meeting not to be seen by clients'))
public = models.BooleanField(_('public'),
default=False,
help_text=_('mark if you want this meeting to be public' \
' (visible for everyone)'))
hosts = models.ManyToManyField(HostProfile, db_index=True)
clients = models.ManyToManyField(ClientProfile, db_index=True, blank=True)
tags = models.ManyToManyField(
'Tag', verbose_name=_('tags'),
blank=True, related_name='meetings')
category = models.ForeignKey(
'Category', verbose_name=_('category'),
blank=True, null=True, related_name='meetings')
slug = models.SlugField(_('url name'), editable=False)
class Meta:
verbose_name = _('meeting')
verbose_name_plural = _('meetings')
ordering = ['start']
def duration(self):
if self.end:
return self.end - self.start
return None
def save(self, *args, **kwargs):
# Create slug on creation
if not self.id:
self.slug = slugify(self.title)
super(Meeting, self).save(*args, **kwargs)
def __str__(self):
return self.title
class Suggestion(models.Model):
"""
Suggestion made by Client
"""
title = models.CharField(_('title'), max_length=80, unique=True)
description = models.TextField(_('description'))
owner = models.ForeignKey(
settings.AUTH_USER_MODEL, db_index=True, editable=False)
meeting = models.ForeignKey(
Meeting, related_name='suggestion', verbose_name=_('meeting'))
class Meta:
verbose_name = _('suggestion')
verbose_name_plural = _('suggestions')
def __str__(self):
return self.title
class Category(models.Model):
COLORS = (
('white', _('white')),
('grey', _('grey')),
('red', _('red')),
('green', _('green')),
('blue', _('blue')),
('yellow', _('yellow')),
)
name = models.CharField(_('name'), max_length=80, unique=True)
color = models.CharField(
_('color'),
choices = COLORS,
max_length=20,
default='white')
class Meta:
verbose_name = _('category')
verbose_name_plural = _('categories')
def __str__(self):
return self.name
class Tag(models.Model):
"""
Short tag to provide brief information
and improve searching mechanism
"""
name = models.CharField(_('name'), max_length=40)
class Meta:
verbose_name = _('tag')
verbose_name_plural = _('tags')
def __str__(self):
return self.name
| 29.950413 | 79 | 0.628035 | from __future__ import unicode_literals
from django.utils.translation import ugettext_lazy as _
from django.conf import settings
from django.db import models
from slugify import slugify
from users.models import ClientProfile, HostProfile
from core.models import TimeStampedModel
class Meeting(TimeStampedModel):
title = models.CharField(_('title'), max_length=100, db_index=True)
description = models.TextField(_('description'))
image = models.ImageField(_('image'),
upload_to='meeting/%Y/%m', null=True, blank=True)
location = models.CharField(_('location'), max_length=100, blank=True)
start = models.DateTimeField(_('start of the meeting'), db_index=True)
end = models.DateTimeField(_('end of the meeting'), blank=True, null=True)
private = models.BooleanField(_('private'),
default=True,
help_text=_('mark if you want this meeting not to be seen by clients'))
public = models.BooleanField(_('public'),
default=False,
help_text=_('mark if you want this meeting to be public' \
' (visible for everyone)'))
hosts = models.ManyToManyField(HostProfile, db_index=True)
clients = models.ManyToManyField(ClientProfile, db_index=True, blank=True)
tags = models.ManyToManyField(
'Tag', verbose_name=_('tags'),
blank=True, related_name='meetings')
category = models.ForeignKey(
'Category', verbose_name=_('category'),
blank=True, null=True, related_name='meetings')
slug = models.SlugField(_('url name'), editable=False)
class Meta:
verbose_name = _('meeting')
verbose_name_plural = _('meetings')
ordering = ['start']
def duration(self):
if self.end:
return self.end - self.start
return None
def save(self, *args, **kwargs):
if not self.id:
self.slug = slugify(self.title)
super(Meeting, self).save(*args, **kwargs)
def __str__(self):
return self.title
class Suggestion(models.Model):
title = models.CharField(_('title'), max_length=80, unique=True)
description = models.TextField(_('description'))
owner = models.ForeignKey(
settings.AUTH_USER_MODEL, db_index=True, editable=False)
meeting = models.ForeignKey(
Meeting, related_name='suggestion', verbose_name=_('meeting'))
class Meta:
verbose_name = _('suggestion')
verbose_name_plural = _('suggestions')
def __str__(self):
return self.title
class Category(models.Model):
COLORS = (
('white', _('white')),
('grey', _('grey')),
('red', _('red')),
('green', _('green')),
('blue', _('blue')),
('yellow', _('yellow')),
)
name = models.CharField(_('name'), max_length=80, unique=True)
color = models.CharField(
_('color'),
choices = COLORS,
max_length=20,
default='white')
class Meta:
verbose_name = _('category')
verbose_name_plural = _('categories')
def __str__(self):
return self.name
class Tag(models.Model):
name = models.CharField(_('name'), max_length=40)
class Meta:
verbose_name = _('tag')
verbose_name_plural = _('tags')
def __str__(self):
return self.name
| true | true |
1c2d9ab8b133938e8e0dd83bf95decfa58103b9c | 1,473 | py | Python | mockdown/__init__.py | anandology/mockdown | e37b27c841c8076aad7534fb5e5c480e63645fcf | [
"BSD-3-Clause"
] | 4 | 2015-09-15T21:27:27.000Z | 2021-08-17T09:46:14.000Z | mockdown/__init__.py | anandology/mockdown | e37b27c841c8076aad7534fb5e5c480e63645fcf | [
"BSD-3-Clause"
] | 3 | 2015-09-21T06:38:58.000Z | 2015-09-23T10:25:14.000Z | mockdown/__init__.py | anandology/mockdown | e37b27c841c8076aad7534fb5e5c480e63645fcf | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Mockdown
~~~~~~~~
Tool to simplify creating HTML mockups.
"""
from flask import Blueprint, render_template, abort, redirect, url_for
import yaml
import pathlib
from .mockdown import Mockdown
mockdown_app = Blueprint("mockdown", __name__,
template_folder="templates")
_mockdown = Mockdown(root=".")
def mockdown_url_for(endpoint, **kwargs):
if endpoint == 'static':
return url_for('static', **kwargs)
else:
return url_for('.mock', path=endpoint + ".html")
_mockdown.template_globals['url_for'] = mockdown_url_for
@mockdown_app.route("/")
@mockdown_app.route("/<path:path>")
def mock(path=""):
if not _mockdown.exists(path):
abort(404)
elif _mockdown.is_dir(path):
return mock_index(path)
elif path.endswith(".html"):
return _mockdown.render_template(path)
elif path.endswith(".yml"):
data = _mockdown.read_yaml_file(path)
return yaml.dump(data)
else:
print("aborting...")
abort(404)
def mock_index(path):
if path and not path.endswith("/"):
return redirect("/" + path + "/")
root = pathlib.Path(_mockdown.root).name
pathobj = pathlib.Path(_mockdown.root, path)
subdirs = [p.name for p in pathobj.iterdir() if p.is_dir()]
filenames = [f.name for f in pathobj.glob("*.html")]
return render_template("index.html", root=root, path=path, subdirs=subdirs, filenames=filenames)
| 28.326923 | 100 | 0.648337 |
from flask import Blueprint, render_template, abort, redirect, url_for
import yaml
import pathlib
from .mockdown import Mockdown
mockdown_app = Blueprint("mockdown", __name__,
template_folder="templates")
_mockdown = Mockdown(root=".")
def mockdown_url_for(endpoint, **kwargs):
if endpoint == 'static':
return url_for('static', **kwargs)
else:
return url_for('.mock', path=endpoint + ".html")
_mockdown.template_globals['url_for'] = mockdown_url_for
@mockdown_app.route("/")
@mockdown_app.route("/<path:path>")
def mock(path=""):
if not _mockdown.exists(path):
abort(404)
elif _mockdown.is_dir(path):
return mock_index(path)
elif path.endswith(".html"):
return _mockdown.render_template(path)
elif path.endswith(".yml"):
data = _mockdown.read_yaml_file(path)
return yaml.dump(data)
else:
print("aborting...")
abort(404)
def mock_index(path):
if path and not path.endswith("/"):
return redirect("/" + path + "/")
root = pathlib.Path(_mockdown.root).name
pathobj = pathlib.Path(_mockdown.root, path)
subdirs = [p.name for p in pathobj.iterdir() if p.is_dir()]
filenames = [f.name for f in pathobj.glob("*.html")]
return render_template("index.html", root=root, path=path, subdirs=subdirs, filenames=filenames)
| true | true |
1c2d9b171f83136cf0cedeeb9ad7303c8580d9b0 | 25,328 | py | Python | dragg/reformat.py | apigott/dra | c2f24d9ef6d9d8cb78c5a058574cec8e132afd4f | [
"MIT"
] | 2 | 2021-12-02T09:34:01.000Z | 2021-12-26T07:51:31.000Z | dragg/reformat.py | wuyou33/dragg | c2f24d9ef6d9d8cb78c5a058574cec8e132afd4f | [
"MIT"
] | 7 | 2020-05-09T15:50:10.000Z | 2020-11-12T15:58:56.000Z | dragg/reformat.py | wuyou33/dragg | c2f24d9ef6d9d8cb78c5a058574cec8e132afd4f | [
"MIT"
] | 3 | 2021-03-29T16:00:00.000Z | 2021-12-02T09:33:57.000Z | import os
import sys
import json
import toml
from datetime import datetime, timedelta
import numpy as np
import pandas as pd
import itertools as it
import random
import plotly.graph_objects as go
import plotly.express as px
from plotly.subplots import make_subplots
import plotly.io as pio
import plotly
from prettytable import PrettyTable
from dragg.logger import Logger
class Reformat:
def __init__(self):
self.log = Logger("reformat")
self.data_dir = os.path.expanduser(os.environ.get('DATA_DIR','data'))
self.outputs_dir = os.path.expanduser(os.environ.get('OUTPUT_DIR','outputs'))
if not os.path.isdir(self.outputs_dir):
self.log.logger.error("No outputs directory found.")
quit()
self.config_file = os.path.join(self.data_dir, os.environ.get('CONFIG_FILE', 'config.toml'))
self.config = self._import_config()
self.add_date_ranges()
self.add_mpc_params()
self.date_folders = self.set_date_folders()
self.mpc_folders = self.set_mpc_folders()
self.files = self.set_files()
self.fig_list = None
self.save_path = os.path.join('outputs', 'images', datetime.now().strftime("%m%dT%H%M%S"))
def main(self):
# put a list of plotting functions here
self.sample_home = "Crystal-RXXFA"
self.plots = [self.rl2baseline,
self.plot_single_home]
self.images = self.plot_all()
def plot_all(self, save_images=False):
figs = []
for plot in self.plots:
fig = make_subplots(specs=[[{"secondary_y": True}]])
fig.update_layout(
font=dict(
size=65,
)
)
fig.update_xaxes(
title_standoff=80
)
fig.update_yaxes(
title_standoff=60
)
fig = plot(fig)
fig.show()
figs += [fig]
return figs
def save_images(self):
if not os.path.isdir(self.save_path):
os.makedirs(self.save_path)
for img in self.images:
self.log.logger.info(f"Saving images of outputs to timestamped folder at {self.save_path}.")
try:
path = os.path.join(self.save_path, f"{img.layout.title.text}.png")
pio.write_image(img, path, width=1024, height=768)
except:
self.log.logger.error("Could not save plotly image(s) to outputs directory.")
def add_date_ranges(self):
start_dates = set([datetime.strptime(self.config['simulation']['start_datetime'], '%Y-%m-%d %H')])
end_dates = set([datetime.strptime(self.config['simulation']['end_datetime'], '%Y-%m-%d %H')])
temp = {"start_datetime": start_dates, "end_datetime": end_dates}
self.date_ranges = temp
def add_mpc_params(self):
n_houses = self.config['community']['total_number_homes']
mpc_horizon = self.config['home']['hems']['prediction_horizon']
dt = self.config['home']['hems']['sub_subhourly_steps']
solver = self.config['home']['hems']['solver']
check_type = self.config['simulation']['check_type']
agg_interval = self.config['agg']['subhourly_steps']
temp = {"n_houses": set([n_houses]), "mpc_prediction_horizons": set([mpc_horizon]), "mpc_hourly_steps": set([dt]), "check_type": set([check_type]), "agg_interval": set([agg_interval]), "solver": set([solver])}
# for key in temp:
# if key in additional_params:
# temp[key] |= set(additional_params[key])
self.mpc_params = temp
self.versions = set([self.config['simulation']['named_version']])
def set_date_folders(self):
temp = []
# self.date_ranges['mpc_steps'] = set([self.config['home']['hems']['sub_subhourly_steps']])
# self.date_ranges['rl_steps'] = set([self.config['agg']['subhourly_steps']])
keys, values = zip(*self.date_ranges.items())
permutations = [dict(zip(keys, v)) for v in it.product(*values)]
permutations = sorted(permutations, key=lambda i: i['end_datetime'], reverse=True)
for i in permutations:
date_folder = os.path.join(self.outputs_dir, f"{i['start_datetime'].strftime('%Y-%m-%dT%H')}_{i['end_datetime'].strftime('%Y-%m-%dT%H')}")
self.log.logger.info(f"Looking for files in: {date_folder}.")
if os.path.isdir(date_folder):
hours = i['end_datetime'] - i['start_datetime']
hours = int(hours.total_seconds() / 3600)
new_folder = {"folder": date_folder, "hours": hours, "start_dt": i['start_datetime']}
temp.append(new_folder)
if len(temp) == 0:
self.log.logger.error("No files found for the date ranges specified.")
exit()
return temp
def set_mpc_folders(self):
temp = []
keys, values = zip(*self.mpc_params.items())
permutations = [dict(zip(keys, v)) for v in it.product(*values)]
for j in self.date_folders:
for i in permutations:
mpc_folder = os.path.join(j["folder"], f"{i['check_type']}-homes_{i['n_houses']}-horizon_{i['mpc_prediction_horizons']}-interval_{60 // i['agg_interval']}-{60 // i['mpc_hourly_steps'] // i['agg_interval']}-solver_{i['solver']}")
if os.path.isdir(mpc_folder):
timesteps = j['hours'] * i['agg_interval']
minutes = 60 // i['agg_interval']
x_lims = [j['start_dt'] + timedelta(minutes=minutes*x) for x in range(timesteps)]
set = {'path': mpc_folder, 'agg_dt': i['agg_interval'], 'ts': timesteps, 'x_lims': x_lims,}
if not mpc_folder in temp:
temp.append(set)
for x in temp:
print(x['path'])
return temp
def set_files(self):
temp = []
keys, values = zip(*self.mpc_params.items())
permutations = [dict(zip(keys, v)) for v in it.product(*values)]
color_families = [['rgb(204,236,230)','rgb(153,216,201)','rgb(102,194,164)','rgb(65,174,118)','rgb(35,139,69)','rgb(0,88,36)'],
['rgb(191,211,230)','rgb(158,188,218)','rgb(140,150,198)','rgb(140,107,177)','rgb(136,65,157)','rgb(110,1,107)'],
['rgb(217,217,217)','rgb(189,189,189)','rgb(150,150,150)','rgb(115,115,115)','rgb(82,82,82)','rgb(37,37,37)'],
['rgb(253,208,162)','rgb(253,174,107)','rgb(253,141,60)','rgb(241,105,19)','rgb(217,72,1)','rgb(140,45,4)'],]
c = 0
d = 0
dash = ["solid", "dash", "dot", "dashdot"]
for j in self.mpc_folders:
path = j['path']
for i in permutations:
for k in self.versions:
dir = os.path.join(path, f"version-{k}")
for case_dir in os.listdir(dir):
file = os.path.join(dir, case_dir, "results.json")
if os.path.isfile(file):
name = f"{case_dir}, v = {k}"
set = {"results": file, "name": name, "parent": j, "color": color_families[c][d], "dash":dash[c]}
temp.append(set)
self.log.logger.info(f"Adding baseline file at {file}")
d = (d + 1) % len(color_families[c])
c = (c + 1) % len(color_families)
return temp
def get_type_list(self, type):
type_list = set([])
i = 0
for file in self.files:
with open(file["results"]) as f:
data = json.load(f)
temp = set([])
for name, house in data.items():
try:
if house["type"] == type:
temp.add(name)
except:
pass
if i < 1:
type_list = temp
else:
type_list = type_list.intersection(temp)
self.log.logger.info(f"{len(type_list)} homes found of type {type}: {type_list}")
return type_list
def _import_config(self):
if not os.path.exists(self.config_file):
self.log.logger.error(f"Configuration file does not exist: {self.config_file}")
sys.exit(1)
with open(self.config_file, 'r') as f:
data = toml.load(f)
return data
def plot_environmental_values(self, name, fig, summary, file, fname):
fig.add_trace(go.Scatter(x=file['parent']['x_lims'], y=summary["OAT"][0:file["parent"]["ts"]], name=f"OAT (C)", visible='legendonly'))
fig.add_trace(go.Scatter(x=file['parent']['x_lims'], y=summary["GHI"][0:file["parent"]["ts"]], name=f"GHI", line={'color':'goldenrod', 'width':8}, visible='legendonly'))
fig.add_trace(go.Scatter(x=file['parent']['x_lims'], y=summary["TOU"][0:file["parent"]["ts"]], name=f"TOU Price ($/kWh)", line_shape='hv', visible='legendonly'), secondary_y=True)
fig = self.plot_thermal_bounds(fig, file['parent']['x_lims'], name, fname)
return fig
def plot_thermal_bounds(self, fig, x_lims, name, fname):
ah_file = os.path.join(self.outputs_dir, f"all_homes-{self.config['community']['total_number_homes']}-config.json")
with open(ah_file) as f:
data = json.load(f)
for dict in data:
if dict['name'] == name:
data = dict
fig.add_trace(go.Scatter(x=x_lims, y=data['hvac']['temp_in_min'] * np.ones(len(x_lims)), name=f"Tin_min", fill=None, showlegend=False, mode='lines', line_color='lightsteelblue'))
fig.add_trace(go.Scatter(x=x_lims, y=data['hvac']['temp_in_max'] * np.ones(len(x_lims)), name=f"Tin_bounds", fill='tonexty' , mode='lines', line_color='lightsteelblue'))
fig.add_trace(go.Scatter(x=x_lims, y=data['wh']['temp_wh_min'] * np.ones(len(x_lims)), name=f"Twh_min", fill=None, showlegend=False, mode='lines', line_color='pink'))
fig.add_trace(go.Scatter(x=x_lims, y=data['wh']['temp_wh_max'] * np.ones(len(x_lims)), name=f"Twh_bounds", fill='tonexty' , mode='lines', line_color='pink'))
return fig
def plot_base_home(self, name, fig, data, summary, fname, file, plot_price=True):
fig.add_trace(go.Scatter(x=file['parent']['x_lims'], y=data["temp_in_opt"], name=f"Tin - {fname}", legendgroup='tin', line={'color':'blue', 'width':8, 'dash':file['dash']}))
fig.add_trace(go.Scatter(x=file['parent']['x_lims'], y=data["temp_wh_opt"], showlegend=True, legendgroup='twh', name=f"Twh - {fname}", line={'color':'firebrick', 'width':8, 'dash':file['dash']}))
fig.update_layout(legend=dict(
yanchor="top",
y=0.99,
xanchor="left",
x=0.03,
font=dict(
size=65),
),
yaxis_title="Temperature (deg C)"
)
return fig
def plot_pv(self, name, fig, data, fname, file):
fig.add_trace(go.Scatter(x=file['parent']['x_lims'], y=data["p_pv_opt"], name=f"Ppv (kW)", line_color='orange', line_shape='hv', visible='legendonly'))
fig.add_trace(go.Scatter(x=file['parent']['x_lims'], y=data["u_pv_curt_opt"], name=f"U_pv_curt (kW) - {fname}", line_shape='hv', visible='legendonly'))
return fig
def plot_battery(self, name, fig, data, fname, file):
fig.add_trace(go.Scatter(x=file['parent']['x_lims'], y=data["e_batt_opt"], name=f"SOC (kW) - {fname}", line_shape='hv', visible='legendonly'))
fig.add_trace(go.Scatter(x=file['parent']['x_lims'], y=data["p_batt_ch"], name=f"Pch (kW) - {fname}", line_shape='hv', visible='legendonly'))
fig.add_trace(go.Scatter(x=file['parent']['x_lims'], y=data["p_batt_disch"], name=f"Pdis (kW) - {fname}", line_shape='hv', visible='legendonly'))
return fig
def plot_single_home(self, fig):
if self.sample_home is None:
if type is None:
type = "base"
self.log.logger.warning("Specify a home type or name. Proceeding with home of type: \"base\".")
type_list = self._type_list(type)
self.sample_home = random.sample(type_list,1)[0]
self.log.logger.info(f"Proceeding with home: {name}")
flag = False
for file in self.files:
with open(file["results"]) as f:
comm_data = json.load(f)
try:
data = comm_data[self.sample_home]
except:
self.log.logger.error(f"No home with name: {self.sample_home}")
return
type = data["type"]
summary = comm_data["Summary"]
if not flag:
fig = self.plot_environmental_values(self.sample_home, fig, summary, file, file["name"])
flag = True
fig.update_xaxes(title_text="Time of Day (hour)")
fig.update_layout(title_text=f"{self.sample_home} - {type} type")
fig = self.plot_base_home(self.sample_home, fig, data, summary, file["name"], file)
if 'pv' in type:
fig = self.plot_pv(self.sample_home, fig, data, file["name"], file)
if 'batt' in type:
fig = self.plot_battery(self.sample_home, fig, data, file["name"], file)
return fig
def plot_all_homes(self, fig=None):
homes = ["Crystal-RXXFA","Myles-XQ5IA","Lillie-NMHUH","Robert-2D73X","Serena-98EPE","Gary-U95TS","Bruno-PVRNB","Dorothy-9XMNY","Jason-INS3S","Alvin-4BAYB",]
for self.sample_home in homes:
fig = make_subplots(specs=[[{"secondary_y": True}]])
fig.update_layout(
font=dict(
size = 12
)
)
fig = self.plot_single_home(fig)
return
def plot_baseline(self, fig):
for file in self.files:
with open(file["results"]) as f:
data = json.load(f)
ts = len(data['Summary']['p_grid_aggregate'])-1
fig.add_trace(go.Scatter(x=file['parent']['x_lims'], y=data["Summary"]["p_grid_aggregate"], name=f"Agg Load - {file['name']}", line_shape='hv', line={'color':file['color'], 'width':4, 'dash':'solid'}))
fig.add_trace(go.Scatter(x=file['parent']['x_lims'], y=np.cumsum(np.divide(data["Summary"]["p_grid_aggregate"], file['parent']['agg_dt'])), name=f"Cumulative Agg Load - {file['name']}", line_shape='hv', visible='legendonly', line={'color':file['color'], 'width':4, 'dash':'dash'}))
fig.add_trace(go.Scatter(x=file['parent']['x_lims'], y=np.divide(np.cumsum(data["Summary"]["p_grid_aggregate"]), np.arange(ts + 1) + 1), name=f"Avg Cumulative Agg Load - {file['name']}", line_shape='hv', visible='legendonly', line={'color':file['color'], 'width':4, 'dash':'dashdot'}))
return fig
def plot_typ_day(self, fig):
rl_counter = 0
tou_counter = 0
dn_counter = 0
for file in self.files:
flag = True
with open(file["results"]) as f:
data = json.load(f)
name = file["name"]
ts = len(data['Summary']['p_grid_aggregate'])-1
rl_setpoint = data['Summary']['p_grid_setpoint']
if 'clipped' in file['name']:
rl_setpoint = np.clip(rl_setpoint, 45, 60)
loads = np.array(data["Summary"]["p_grid_aggregate"])
loads = loads[:len(loads) // (24*file['parent']['agg_dt']) * 24 * file['parent']['agg_dt']]
if len(loads) > 24:
daily_max_loads = np.repeat(np.amax(loads.reshape(-1, 24*file['parent']['agg_dt']), axis=1), 24*file['parent']['agg_dt'])
daily_min_loads = np.repeat(np.amin(loads.reshape(-1, 24*file['parent']['agg_dt']), axis=1), 24*file['parent']['agg_dt'])
daily_range_loads = np.subtract(daily_max_loads, daily_min_loads)
daily_range_loads = [abs(loads[max(i-6, 0)] - loads[min(i+6, len(loads)-1)]) for i in range(len(loads))]
daily_avg_loads = np.repeat(np.mean(loads.reshape(-1, 24*file['parent']['agg_dt']), axis=1), 24*file['parent']['agg_dt'])
daily_std_loads = np.repeat(np.std(loads.reshape(-1, 24*file['parent']['agg_dt']), axis=1), 24*file['parent']['agg_dt'])
daily_std_loads = [np.std(loads[max(i-6, 0):i+6]) for i in range(len(loads))]
composite_day = np.average(loads.reshape(-1, 24*file['parent']['agg_dt']), axis=0)
fig.add_trace(go.Scatter(x=file['parent']['x_lims'], y=composite_day, name=f"{name}", opacity=0.5, showlegend=flag, line={'color':clr, 'width':8, 'dash':dash}))
fig.update_layout(legend=dict(
yanchor="top",
y=0.45,
xanchor="left",
x=0.7
))
fig.update_layout(
font=dict(
# family="Courier New, monospace",
size=65,
),
title="Avg Daily Load Profile",
xaxis_title="Time of Day",
yaxis_title="Agg. Demand (kW)"
)
fig.update_xaxes(
title_standoff=80
)
fig.update_yaxes(
title_standoff=60
)
return fig
def plot_max_and_12hravg(self, fig):
for file in self.files:
# all_avgs.add_column()
clr = file['color']
with open(file["results"]) as f:
data = json.load(f)
name = file["name"]
ts = len(data['Summary']['p_grid_aggregate'])-1
rl_setpoint = data['Summary']['p_grid_setpoint']
if 'clipped' in file['name']:
rl_setpoint = np.clip(rl_setpoint, 45, 60)
loads = np.array(data["Summary"]["p_grid_aggregate"])
loads = loads[:len(loads) // (24*file['parent']['agg_dt']) * 24 * file['parent']['agg_dt']]
if len(loads) > 24:
daily_max_loads = np.repeat(np.amax(loads.reshape(-1, 24*file['parent']['agg_dt']), axis=1), 24*file['parent']['agg_dt'])
daily_min_loads = np.repeat(np.amin(loads.reshape(-1, 24*file['parent']['agg_dt']), axis=1), 24*file['parent']['agg_dt'])
daily_range_loads = np.subtract(daily_max_loads, daily_min_loads)
daily_range_loads = [abs(loads[max(i-6, 0)] - loads[min(i+6, len(loads)-1)]) for i in range(len(loads))]
daily_avg_loads = np.repeat(np.mean(loads.reshape(-1, 24*file['parent']['agg_dt']), axis=1), 24*file['parent']['agg_dt'])
daily_std_loads = np.repeat(np.std(loads.reshape(-1, 24*file['parent']['agg_dt']), axis=1), 24*file['parent']['agg_dt'])
fig.add_trace(go.Scatter(x=file['parent']['x_lims'], y=daily_max_loads, name=f"{name} - Daily Max", line_shape='hv', opacity=1, legendgroup="first", line={'color':'firebrick', 'dash':dash, 'width':8}))
fig.add_trace(go.Scatter(x=file['parent']['x_lims'], y=rl_setpoint, name=f"{name} - 12 Hr Avg", opacity=0.5, legendgroup="second", line={'color':'blue', 'dash':dash, 'width':8}))
fig.update_layout(legend=dict(
yanchor="top",
y=0.8,
xanchor="left",
x=0.7
))
fig.update_layout(
font=dict(
size=65,
),
title="12 Hour Avg and Daily Max",
yaxis_title="Agg. Demand (kW)"
)
fig.update_xaxes(
title_standoff=80
)
fig.update_yaxes(
title_standoff=60
)
return fig
def plot_parametric(self, fig):
all_daily_stats = PrettyTable(['run name', 'avg daily max', 'std daily max','overall max', 'avg daily range'])
for file in self.files:
clr = file['color']
with open(file["results"]) as f:
data = json.load(f)
name = file["name"]
ts = len(data['Summary']['p_grid_aggregate'])-1
rl_setpoint = data['Summary']['p_grid_setpoint']
if 'clipped' in file['name']:
rl_setpoint = np.clip(rl_setpoint, 45, 60)
loads = np.array(data["Summary"]["p_grid_aggregate"])
loads = loads[:len(loads) // (24*file['parent']['agg_dt']) * 24 * file['parent']['agg_dt']]
if len(loads) >= 24:
daily_max_loads = np.repeat(np.amax(loads.reshape(-1, 24*file['parent']['agg_dt']), axis=1), 24*file['parent']['agg_dt'])
daily_min_loads = np.repeat(np.amin(loads.reshape(-1, 24*file['parent']['agg_dt']), axis=1), 24*file['parent']['agg_dt'])
daily_range_loads = np.subtract(daily_max_loads, daily_min_loads)
daily_range_loads = [abs(loads[max(i-6, 0)] - loads[min(i+6, len(loads)-1)]) for i in range(len(loads))]
daily_avg_loads = np.repeat(np.mean(loads.reshape(-1, 24*file['parent']['agg_dt']), axis=1), 24*file['parent']['agg_dt'])
daily_std_loads = [np.std(loads[max(i-6, 0):i+6]) for i in range(len(loads))]
composite_day = np.average(loads.reshape(-1, 24*file['parent']['agg_dt']), axis=0)
fig.update_layout(legend=dict(
yanchor="top",
y=0.45,
xanchor="left",
x=0.5
))
fig.add_trace(go.Scatter(x=file['parent']['x_lims'], y=rl_setpoint, name=f"{name} - 12 Hr Avg", opacity=0.5, line={'color':clr, 'width':8}))
fig.add_trace(go.Scatter(x=file['parent']['x_lims'], y=data["Summary"]["p_grid_aggregate"], name=f"Agg Load - RL - {name}", line_shape='hv', line={'color':clr}))
fig.add_trace(go.Scatter(x=file['parent']['x_lims'], y=daily_max_loads, name=f"{name} - Daily Max", line_shape='hv', opacity=0.5, line={'color':clr, 'dash':'dot'}))
fig.add_trace(go.Scatter(x=file['parent']['x_lims'], y=daily_min_loads, name=f"Daily Min Agg Load - RL - {name}", line_shape='hv', opacity=0.5, line={'color':clr, 'dash':'dash'}))
fig.add_trace(go.Scatter(x=file['parent']['x_lims'], y=daily_range_loads, name=f"Daily Agg Load Range - RL - {name}", line_shape='hv', opacity=0.5, line={'color':clr}))
fig.add_trace(go.Scatter(x=file['parent']['x_lims'], y=np.average(daily_range_loads) * np.ones(len(loads)), name=f"Avg Daily Agg Load Range - RL - {name}", line_shape='hv', opacity=0.5, line={'color':clr}))
fig.add_trace(go.Scatter(x=file['parent']['x_lims'], y=daily_avg_loads, name=f"Daily Avg Agg Load - RL - {name}", line_shape='hv', opacity=0.5, line={'color':clr, 'dash':'dash'}))
fig.add_trace(go.Scatter(x=file['parent']['x_lims'], y=daily_std_loads, name=f"Daily Std Agg Load - RL - {name}", line_shape='hv', opacity=0.5, line={'color':clr, 'dash':'dashdot'}))
fig.add_trace(go.Scatter(x=file['parent']['x_lims'], y=np.average(daily_std_loads) * np.ones(len(loads)), name=f"Avg Daily Std Agg Load - RL - {name}", line_shape='hv', opacity=0.5, line={'color':clr, 'dash':'dashdot'}))
fig.add_trace(go.Scatter(x=file['parent']['x_lims'], y=np.cumsum(np.divide(data["Summary"]["p_grid_aggregate"],file['parent']['agg_dt'])), name=f"{name}", line_shape='hv', visible='legendonly', line={'color':clr, }))
all_daily_stats.add_row([file['name'], np.average(daily_max_loads), np.std(daily_max_loads), max(daily_max_loads), np.average(daily_range_loads)])
else:
self.log.logger.warning("Not enough data collected to have daily stats, try running the aggregator for longer.")
print(all_daily_stats)
return fig
def rl2baseline(self, fig):
if len(self.files) == 0:
self.log.logger.warning("No aggregator runs found for analysis.")
return fig
fig = self.plot_baseline(fig)
fig = self.plot_parametric(fig)
fig.update_layout(title_text="RL Baseline Comparison")
fig.update_layout(
title="Avg Daily Load Profile",
xaxis_title="Time of Day",
yaxis_title="Agg. Demand (kWh)",)
return fig
def all_rps(self, fig):
for file in self.files:
with open(file['results']) as f:
data = json.load(f)
rps = data['Summary']['RP']
fig.add_trace(go.Histogram(x=rps, name=f"{file['name']}"), row=1, col=1)
with open(file['q_results']) as f:
data = json.load(f)
data = data["horizon"]
mu = np.array(data["mu"])
std = self.config['agg']['parameters']['exploration_rate'][0]
delta = np.subtract(mu, rps)
fig.add_trace(go.Histogram(x=delta, name=f"{file['name']}"), row=2, col=1)
fig.add_trace(go.Scatter(x=[-std, -std, std, std], y=[0, 0.3*len(rps), 0.3*len(rps), 0], fill="toself"), row=2, col=1)
return fig
if __name__ == "__main__":
r = Reformat()
r.main()
| 49.662745 | 297 | 0.562618 | import os
import sys
import json
import toml
from datetime import datetime, timedelta
import numpy as np
import pandas as pd
import itertools as it
import random
import plotly.graph_objects as go
import plotly.express as px
from plotly.subplots import make_subplots
import plotly.io as pio
import plotly
from prettytable import PrettyTable
from dragg.logger import Logger
class Reformat:
def __init__(self):
self.log = Logger("reformat")
self.data_dir = os.path.expanduser(os.environ.get('DATA_DIR','data'))
self.outputs_dir = os.path.expanduser(os.environ.get('OUTPUT_DIR','outputs'))
if not os.path.isdir(self.outputs_dir):
self.log.logger.error("No outputs directory found.")
quit()
self.config_file = os.path.join(self.data_dir, os.environ.get('CONFIG_FILE', 'config.toml'))
self.config = self._import_config()
self.add_date_ranges()
self.add_mpc_params()
self.date_folders = self.set_date_folders()
self.mpc_folders = self.set_mpc_folders()
self.files = self.set_files()
self.fig_list = None
self.save_path = os.path.join('outputs', 'images', datetime.now().strftime("%m%dT%H%M%S"))
def main(self):
self.sample_home = "Crystal-RXXFA"
self.plots = [self.rl2baseline,
self.plot_single_home]
self.images = self.plot_all()
def plot_all(self, save_images=False):
figs = []
for plot in self.plots:
fig = make_subplots(specs=[[{"secondary_y": True}]])
fig.update_layout(
font=dict(
size=65,
)
)
fig.update_xaxes(
title_standoff=80
)
fig.update_yaxes(
title_standoff=60
)
fig = plot(fig)
fig.show()
figs += [fig]
return figs
def save_images(self):
if not os.path.isdir(self.save_path):
os.makedirs(self.save_path)
for img in self.images:
self.log.logger.info(f"Saving images of outputs to timestamped folder at {self.save_path}.")
try:
path = os.path.join(self.save_path, f"{img.layout.title.text}.png")
pio.write_image(img, path, width=1024, height=768)
except:
self.log.logger.error("Could not save plotly image(s) to outputs directory.")
def add_date_ranges(self):
start_dates = set([datetime.strptime(self.config['simulation']['start_datetime'], '%Y-%m-%d %H')])
end_dates = set([datetime.strptime(self.config['simulation']['end_datetime'], '%Y-%m-%d %H')])
temp = {"start_datetime": start_dates, "end_datetime": end_dates}
self.date_ranges = temp
def add_mpc_params(self):
n_houses = self.config['community']['total_number_homes']
mpc_horizon = self.config['home']['hems']['prediction_horizon']
dt = self.config['home']['hems']['sub_subhourly_steps']
solver = self.config['home']['hems']['solver']
check_type = self.config['simulation']['check_type']
agg_interval = self.config['agg']['subhourly_steps']
temp = {"n_houses": set([n_houses]), "mpc_prediction_horizons": set([mpc_horizon]), "mpc_hourly_steps": set([dt]), "check_type": set([check_type]), "agg_interval": set([agg_interval]), "solver": set([solver])}
self.mpc_params = temp
self.versions = set([self.config['simulation']['named_version']])
def set_date_folders(self):
temp = []
keys, values = zip(*self.date_ranges.items())
permutations = [dict(zip(keys, v)) for v in it.product(*values)]
permutations = sorted(permutations, key=lambda i: i['end_datetime'], reverse=True)
for i in permutations:
date_folder = os.path.join(self.outputs_dir, f"{i['start_datetime'].strftime('%Y-%m-%dT%H')}_{i['end_datetime'].strftime('%Y-%m-%dT%H')}")
self.log.logger.info(f"Looking for files in: {date_folder}.")
if os.path.isdir(date_folder):
hours = i['end_datetime'] - i['start_datetime']
hours = int(hours.total_seconds() / 3600)
new_folder = {"folder": date_folder, "hours": hours, "start_dt": i['start_datetime']}
temp.append(new_folder)
if len(temp) == 0:
self.log.logger.error("No files found for the date ranges specified.")
exit()
return temp
def set_mpc_folders(self):
temp = []
keys, values = zip(*self.mpc_params.items())
permutations = [dict(zip(keys, v)) for v in it.product(*values)]
for j in self.date_folders:
for i in permutations:
mpc_folder = os.path.join(j["folder"], f"{i['check_type']}-homes_{i['n_houses']}-horizon_{i['mpc_prediction_horizons']}-interval_{60 // i['agg_interval']}-{60 // i['mpc_hourly_steps'] // i['agg_interval']}-solver_{i['solver']}")
if os.path.isdir(mpc_folder):
timesteps = j['hours'] * i['agg_interval']
minutes = 60 // i['agg_interval']
x_lims = [j['start_dt'] + timedelta(minutes=minutes*x) for x in range(timesteps)]
set = {'path': mpc_folder, 'agg_dt': i['agg_interval'], 'ts': timesteps, 'x_lims': x_lims,}
if not mpc_folder in temp:
temp.append(set)
for x in temp:
print(x['path'])
return temp
def set_files(self):
temp = []
keys, values = zip(*self.mpc_params.items())
permutations = [dict(zip(keys, v)) for v in it.product(*values)]
color_families = [['rgb(204,236,230)','rgb(153,216,201)','rgb(102,194,164)','rgb(65,174,118)','rgb(35,139,69)','rgb(0,88,36)'],
['rgb(191,211,230)','rgb(158,188,218)','rgb(140,150,198)','rgb(140,107,177)','rgb(136,65,157)','rgb(110,1,107)'],
['rgb(217,217,217)','rgb(189,189,189)','rgb(150,150,150)','rgb(115,115,115)','rgb(82,82,82)','rgb(37,37,37)'],
['rgb(253,208,162)','rgb(253,174,107)','rgb(253,141,60)','rgb(241,105,19)','rgb(217,72,1)','rgb(140,45,4)'],]
c = 0
d = 0
dash = ["solid", "dash", "dot", "dashdot"]
for j in self.mpc_folders:
path = j['path']
for i in permutations:
for k in self.versions:
dir = os.path.join(path, f"version-{k}")
for case_dir in os.listdir(dir):
file = os.path.join(dir, case_dir, "results.json")
if os.path.isfile(file):
name = f"{case_dir}, v = {k}"
set = {"results": file, "name": name, "parent": j, "color": color_families[c][d], "dash":dash[c]}
temp.append(set)
self.log.logger.info(f"Adding baseline file at {file}")
d = (d + 1) % len(color_families[c])
c = (c + 1) % len(color_families)
return temp
def get_type_list(self, type):
type_list = set([])
i = 0
for file in self.files:
with open(file["results"]) as f:
data = json.load(f)
temp = set([])
for name, house in data.items():
try:
if house["type"] == type:
temp.add(name)
except:
pass
if i < 1:
type_list = temp
else:
type_list = type_list.intersection(temp)
self.log.logger.info(f"{len(type_list)} homes found of type {type}: {type_list}")
return type_list
def _import_config(self):
if not os.path.exists(self.config_file):
self.log.logger.error(f"Configuration file does not exist: {self.config_file}")
sys.exit(1)
with open(self.config_file, 'r') as f:
data = toml.load(f)
return data
def plot_environmental_values(self, name, fig, summary, file, fname):
fig.add_trace(go.Scatter(x=file['parent']['x_lims'], y=summary["OAT"][0:file["parent"]["ts"]], name=f"OAT (C)", visible='legendonly'))
fig.add_trace(go.Scatter(x=file['parent']['x_lims'], y=summary["GHI"][0:file["parent"]["ts"]], name=f"GHI", line={'color':'goldenrod', 'width':8}, visible='legendonly'))
fig.add_trace(go.Scatter(x=file['parent']['x_lims'], y=summary["TOU"][0:file["parent"]["ts"]], name=f"TOU Price ($/kWh)", line_shape='hv', visible='legendonly'), secondary_y=True)
fig = self.plot_thermal_bounds(fig, file['parent']['x_lims'], name, fname)
return fig
def plot_thermal_bounds(self, fig, x_lims, name, fname):
ah_file = os.path.join(self.outputs_dir, f"all_homes-{self.config['community']['total_number_homes']}-config.json")
with open(ah_file) as f:
data = json.load(f)
for dict in data:
if dict['name'] == name:
data = dict
fig.add_trace(go.Scatter(x=x_lims, y=data['hvac']['temp_in_min'] * np.ones(len(x_lims)), name=f"Tin_min", fill=None, showlegend=False, mode='lines', line_color='lightsteelblue'))
fig.add_trace(go.Scatter(x=x_lims, y=data['hvac']['temp_in_max'] * np.ones(len(x_lims)), name=f"Tin_bounds", fill='tonexty' , mode='lines', line_color='lightsteelblue'))
fig.add_trace(go.Scatter(x=x_lims, y=data['wh']['temp_wh_min'] * np.ones(len(x_lims)), name=f"Twh_min", fill=None, showlegend=False, mode='lines', line_color='pink'))
fig.add_trace(go.Scatter(x=x_lims, y=data['wh']['temp_wh_max'] * np.ones(len(x_lims)), name=f"Twh_bounds", fill='tonexty' , mode='lines', line_color='pink'))
return fig
def plot_base_home(self, name, fig, data, summary, fname, file, plot_price=True):
fig.add_trace(go.Scatter(x=file['parent']['x_lims'], y=data["temp_in_opt"], name=f"Tin - {fname}", legendgroup='tin', line={'color':'blue', 'width':8, 'dash':file['dash']}))
fig.add_trace(go.Scatter(x=file['parent']['x_lims'], y=data["temp_wh_opt"], showlegend=True, legendgroup='twh', name=f"Twh - {fname}", line={'color':'firebrick', 'width':8, 'dash':file['dash']}))
fig.update_layout(legend=dict(
yanchor="top",
y=0.99,
xanchor="left",
x=0.03,
font=dict(
size=65),
),
yaxis_title="Temperature (deg C)"
)
return fig
def plot_pv(self, name, fig, data, fname, file):
fig.add_trace(go.Scatter(x=file['parent']['x_lims'], y=data["p_pv_opt"], name=f"Ppv (kW)", line_color='orange', line_shape='hv', visible='legendonly'))
fig.add_trace(go.Scatter(x=file['parent']['x_lims'], y=data["u_pv_curt_opt"], name=f"U_pv_curt (kW) - {fname}", line_shape='hv', visible='legendonly'))
return fig
def plot_battery(self, name, fig, data, fname, file):
fig.add_trace(go.Scatter(x=file['parent']['x_lims'], y=data["e_batt_opt"], name=f"SOC (kW) - {fname}", line_shape='hv', visible='legendonly'))
fig.add_trace(go.Scatter(x=file['parent']['x_lims'], y=data["p_batt_ch"], name=f"Pch (kW) - {fname}", line_shape='hv', visible='legendonly'))
fig.add_trace(go.Scatter(x=file['parent']['x_lims'], y=data["p_batt_disch"], name=f"Pdis (kW) - {fname}", line_shape='hv', visible='legendonly'))
return fig
def plot_single_home(self, fig):
if self.sample_home is None:
if type is None:
type = "base"
self.log.logger.warning("Specify a home type or name. Proceeding with home of type: \"base\".")
type_list = self._type_list(type)
self.sample_home = random.sample(type_list,1)[0]
self.log.logger.info(f"Proceeding with home: {name}")
flag = False
for file in self.files:
with open(file["results"]) as f:
comm_data = json.load(f)
try:
data = comm_data[self.sample_home]
except:
self.log.logger.error(f"No home with name: {self.sample_home}")
return
type = data["type"]
summary = comm_data["Summary"]
if not flag:
fig = self.plot_environmental_values(self.sample_home, fig, summary, file, file["name"])
flag = True
fig.update_xaxes(title_text="Time of Day (hour)")
fig.update_layout(title_text=f"{self.sample_home} - {type} type")
fig = self.plot_base_home(self.sample_home, fig, data, summary, file["name"], file)
if 'pv' in type:
fig = self.plot_pv(self.sample_home, fig, data, file["name"], file)
if 'batt' in type:
fig = self.plot_battery(self.sample_home, fig, data, file["name"], file)
return fig
def plot_all_homes(self, fig=None):
homes = ["Crystal-RXXFA","Myles-XQ5IA","Lillie-NMHUH","Robert-2D73X","Serena-98EPE","Gary-U95TS","Bruno-PVRNB","Dorothy-9XMNY","Jason-INS3S","Alvin-4BAYB",]
for self.sample_home in homes:
fig = make_subplots(specs=[[{"secondary_y": True}]])
fig.update_layout(
font=dict(
size = 12
)
)
fig = self.plot_single_home(fig)
return
def plot_baseline(self, fig):
for file in self.files:
with open(file["results"]) as f:
data = json.load(f)
ts = len(data['Summary']['p_grid_aggregate'])-1
fig.add_trace(go.Scatter(x=file['parent']['x_lims'], y=data["Summary"]["p_grid_aggregate"], name=f"Agg Load - {file['name']}", line_shape='hv', line={'color':file['color'], 'width':4, 'dash':'solid'}))
fig.add_trace(go.Scatter(x=file['parent']['x_lims'], y=np.cumsum(np.divide(data["Summary"]["p_grid_aggregate"], file['parent']['agg_dt'])), name=f"Cumulative Agg Load - {file['name']}", line_shape='hv', visible='legendonly', line={'color':file['color'], 'width':4, 'dash':'dash'}))
fig.add_trace(go.Scatter(x=file['parent']['x_lims'], y=np.divide(np.cumsum(data["Summary"]["p_grid_aggregate"]), np.arange(ts + 1) + 1), name=f"Avg Cumulative Agg Load - {file['name']}", line_shape='hv', visible='legendonly', line={'color':file['color'], 'width':4, 'dash':'dashdot'}))
return fig
def plot_typ_day(self, fig):
rl_counter = 0
tou_counter = 0
dn_counter = 0
for file in self.files:
flag = True
with open(file["results"]) as f:
data = json.load(f)
name = file["name"]
ts = len(data['Summary']['p_grid_aggregate'])-1
rl_setpoint = data['Summary']['p_grid_setpoint']
if 'clipped' in file['name']:
rl_setpoint = np.clip(rl_setpoint, 45, 60)
loads = np.array(data["Summary"]["p_grid_aggregate"])
loads = loads[:len(loads) // (24*file['parent']['agg_dt']) * 24 * file['parent']['agg_dt']]
if len(loads) > 24:
daily_max_loads = np.repeat(np.amax(loads.reshape(-1, 24*file['parent']['agg_dt']), axis=1), 24*file['parent']['agg_dt'])
daily_min_loads = np.repeat(np.amin(loads.reshape(-1, 24*file['parent']['agg_dt']), axis=1), 24*file['parent']['agg_dt'])
daily_range_loads = np.subtract(daily_max_loads, daily_min_loads)
daily_range_loads = [abs(loads[max(i-6, 0)] - loads[min(i+6, len(loads)-1)]) for i in range(len(loads))]
daily_avg_loads = np.repeat(np.mean(loads.reshape(-1, 24*file['parent']['agg_dt']), axis=1), 24*file['parent']['agg_dt'])
daily_std_loads = np.repeat(np.std(loads.reshape(-1, 24*file['parent']['agg_dt']), axis=1), 24*file['parent']['agg_dt'])
daily_std_loads = [np.std(loads[max(i-6, 0):i+6]) for i in range(len(loads))]
composite_day = np.average(loads.reshape(-1, 24*file['parent']['agg_dt']), axis=0)
fig.add_trace(go.Scatter(x=file['parent']['x_lims'], y=composite_day, name=f"{name}", opacity=0.5, showlegend=flag, line={'color':clr, 'width':8, 'dash':dash}))
fig.update_layout(legend=dict(
yanchor="top",
y=0.45,
xanchor="left",
x=0.7
))
fig.update_layout(
font=dict(
size=65,
),
title="Avg Daily Load Profile",
xaxis_title="Time of Day",
yaxis_title="Agg. Demand (kW)"
)
fig.update_xaxes(
title_standoff=80
)
fig.update_yaxes(
title_standoff=60
)
return fig
def plot_max_and_12hravg(self, fig):
for file in self.files:
clr = file['color']
with open(file["results"]) as f:
data = json.load(f)
name = file["name"]
ts = len(data['Summary']['p_grid_aggregate'])-1
rl_setpoint = data['Summary']['p_grid_setpoint']
if 'clipped' in file['name']:
rl_setpoint = np.clip(rl_setpoint, 45, 60)
loads = np.array(data["Summary"]["p_grid_aggregate"])
loads = loads[:len(loads) // (24*file['parent']['agg_dt']) * 24 * file['parent']['agg_dt']]
if len(loads) > 24:
daily_max_loads = np.repeat(np.amax(loads.reshape(-1, 24*file['parent']['agg_dt']), axis=1), 24*file['parent']['agg_dt'])
daily_min_loads = np.repeat(np.amin(loads.reshape(-1, 24*file['parent']['agg_dt']), axis=1), 24*file['parent']['agg_dt'])
daily_range_loads = np.subtract(daily_max_loads, daily_min_loads)
daily_range_loads = [abs(loads[max(i-6, 0)] - loads[min(i+6, len(loads)-1)]) for i in range(len(loads))]
daily_avg_loads = np.repeat(np.mean(loads.reshape(-1, 24*file['parent']['agg_dt']), axis=1), 24*file['parent']['agg_dt'])
daily_std_loads = np.repeat(np.std(loads.reshape(-1, 24*file['parent']['agg_dt']), axis=1), 24*file['parent']['agg_dt'])
fig.add_trace(go.Scatter(x=file['parent']['x_lims'], y=daily_max_loads, name=f"{name} - Daily Max", line_shape='hv', opacity=1, legendgroup="first", line={'color':'firebrick', 'dash':dash, 'width':8}))
fig.add_trace(go.Scatter(x=file['parent']['x_lims'], y=rl_setpoint, name=f"{name} - 12 Hr Avg", opacity=0.5, legendgroup="second", line={'color':'blue', 'dash':dash, 'width':8}))
fig.update_layout(legend=dict(
yanchor="top",
y=0.8,
xanchor="left",
x=0.7
))
fig.update_layout(
font=dict(
size=65,
),
title="12 Hour Avg and Daily Max",
yaxis_title="Agg. Demand (kW)"
)
fig.update_xaxes(
title_standoff=80
)
fig.update_yaxes(
title_standoff=60
)
return fig
def plot_parametric(self, fig):
all_daily_stats = PrettyTable(['run name', 'avg daily max', 'std daily max','overall max', 'avg daily range'])
for file in self.files:
clr = file['color']
with open(file["results"]) as f:
data = json.load(f)
name = file["name"]
ts = len(data['Summary']['p_grid_aggregate'])-1
rl_setpoint = data['Summary']['p_grid_setpoint']
if 'clipped' in file['name']:
rl_setpoint = np.clip(rl_setpoint, 45, 60)
loads = np.array(data["Summary"]["p_grid_aggregate"])
loads = loads[:len(loads) // (24*file['parent']['agg_dt']) * 24 * file['parent']['agg_dt']]
if len(loads) >= 24:
daily_max_loads = np.repeat(np.amax(loads.reshape(-1, 24*file['parent']['agg_dt']), axis=1), 24*file['parent']['agg_dt'])
daily_min_loads = np.repeat(np.amin(loads.reshape(-1, 24*file['parent']['agg_dt']), axis=1), 24*file['parent']['agg_dt'])
daily_range_loads = np.subtract(daily_max_loads, daily_min_loads)
daily_range_loads = [abs(loads[max(i-6, 0)] - loads[min(i+6, len(loads)-1)]) for i in range(len(loads))]
daily_avg_loads = np.repeat(np.mean(loads.reshape(-1, 24*file['parent']['agg_dt']), axis=1), 24*file['parent']['agg_dt'])
daily_std_loads = [np.std(loads[max(i-6, 0):i+6]) for i in range(len(loads))]
composite_day = np.average(loads.reshape(-1, 24*file['parent']['agg_dt']), axis=0)
fig.update_layout(legend=dict(
yanchor="top",
y=0.45,
xanchor="left",
x=0.5
))
fig.add_trace(go.Scatter(x=file['parent']['x_lims'], y=rl_setpoint, name=f"{name} - 12 Hr Avg", opacity=0.5, line={'color':clr, 'width':8}))
fig.add_trace(go.Scatter(x=file['parent']['x_lims'], y=data["Summary"]["p_grid_aggregate"], name=f"Agg Load - RL - {name}", line_shape='hv', line={'color':clr}))
fig.add_trace(go.Scatter(x=file['parent']['x_lims'], y=daily_max_loads, name=f"{name} - Daily Max", line_shape='hv', opacity=0.5, line={'color':clr, 'dash':'dot'}))
fig.add_trace(go.Scatter(x=file['parent']['x_lims'], y=daily_min_loads, name=f"Daily Min Agg Load - RL - {name}", line_shape='hv', opacity=0.5, line={'color':clr, 'dash':'dash'}))
fig.add_trace(go.Scatter(x=file['parent']['x_lims'], y=daily_range_loads, name=f"Daily Agg Load Range - RL - {name}", line_shape='hv', opacity=0.5, line={'color':clr}))
fig.add_trace(go.Scatter(x=file['parent']['x_lims'], y=np.average(daily_range_loads) * np.ones(len(loads)), name=f"Avg Daily Agg Load Range - RL - {name}", line_shape='hv', opacity=0.5, line={'color':clr}))
fig.add_trace(go.Scatter(x=file['parent']['x_lims'], y=daily_avg_loads, name=f"Daily Avg Agg Load - RL - {name}", line_shape='hv', opacity=0.5, line={'color':clr, 'dash':'dash'}))
fig.add_trace(go.Scatter(x=file['parent']['x_lims'], y=daily_std_loads, name=f"Daily Std Agg Load - RL - {name}", line_shape='hv', opacity=0.5, line={'color':clr, 'dash':'dashdot'}))
fig.add_trace(go.Scatter(x=file['parent']['x_lims'], y=np.average(daily_std_loads) * np.ones(len(loads)), name=f"Avg Daily Std Agg Load - RL - {name}", line_shape='hv', opacity=0.5, line={'color':clr, 'dash':'dashdot'}))
fig.add_trace(go.Scatter(x=file['parent']['x_lims'], y=np.cumsum(np.divide(data["Summary"]["p_grid_aggregate"],file['parent']['agg_dt'])), name=f"{name}", line_shape='hv', visible='legendonly', line={'color':clr, }))
all_daily_stats.add_row([file['name'], np.average(daily_max_loads), np.std(daily_max_loads), max(daily_max_loads), np.average(daily_range_loads)])
else:
self.log.logger.warning("Not enough data collected to have daily stats, try running the aggregator for longer.")
print(all_daily_stats)
return fig
def rl2baseline(self, fig):
if len(self.files) == 0:
self.log.logger.warning("No aggregator runs found for analysis.")
return fig
fig = self.plot_baseline(fig)
fig = self.plot_parametric(fig)
fig.update_layout(title_text="RL Baseline Comparison")
fig.update_layout(
title="Avg Daily Load Profile",
xaxis_title="Time of Day",
yaxis_title="Agg. Demand (kWh)",)
return fig
def all_rps(self, fig):
for file in self.files:
with open(file['results']) as f:
data = json.load(f)
rps = data['Summary']['RP']
fig.add_trace(go.Histogram(x=rps, name=f"{file['name']}"), row=1, col=1)
with open(file['q_results']) as f:
data = json.load(f)
data = data["horizon"]
mu = np.array(data["mu"])
std = self.config['agg']['parameters']['exploration_rate'][0]
delta = np.subtract(mu, rps)
fig.add_trace(go.Histogram(x=delta, name=f"{file['name']}"), row=2, col=1)
fig.add_trace(go.Scatter(x=[-std, -std, std, std], y=[0, 0.3*len(rps), 0.3*len(rps), 0], fill="toself"), row=2, col=1)
return fig
if __name__ == "__main__":
r = Reformat()
r.main()
| true | true |
1c2d9dc8a49a6ca4002890c9e0ada41524f390da | 4,229 | py | Python | tests/tests_coord.py | macph/easement-curve | e1657682db3bc5b8d59a1fb06816732b784d8314 | [
"MIT"
] | 1 | 2019-05-31T03:24:40.000Z | 2019-05-31T03:24:40.000Z | tests/tests_coord.py | macph/easement-curve | e1657682db3bc5b8d59a1fb06816732b784d8314 | [
"MIT"
] | null | null | null | tests/tests_coord.py | macph/easement-curve | e1657682db3bc5b8d59a1fb06816732b784d8314 | [
"MIT"
] | null | null | null | # MIT License, copyright Ewan Macpherson, 2016; see LICENCE in root directory
# Test script for the TrackCoord class
import math
import os
import sys
import unittest
sys.path.insert(0, os.path.abspath('..'))
import ec.coord
class CoordGeneralTests(unittest.TestCase):
def test_position(self):
tc = ec.coord.TrackCoord(5, 5, 0, ec.coord.Q.NONE, curvature=0)
self.assertEqual((tc.pos_x, tc.pos_z), (5, 5))
def test_exception_position(self):
with self.assertRaisesRegex(ec.coord.CoordError, 'Position values'):
ec.coord.TrackCoord(None, 5, 0, ec.coord.Q.NONE, curvature=0)
class CoordBearingTests(unittest.TestCase):
def test_exception_rotation_over_range(self):
with self.assertRaisesRegex(ec.coord.CoordError, 'y-axis rotation'):
ec.coord.TrackCoord(0, 0, 100, ec.coord.Q.NE, curvature=0)
def test_exception_rotation_under_range(self):
with self.assertRaisesRegex(ec.coord.CoordError, 'y-axis rotation'):
ec.coord.TrackCoord(0, 0, -100, ec.coord.Q.NE, curvature=0)
def test_exception_wrong_bearing(self):
with self.assertRaisesRegex(ValueError, 'bearing needs to'):
ec.coord.TrackCoord(0, 0, None, ec.coord.Q.NONE, curvature=0)
def test_exception_wrong_quad(self):
with self.assertRaisesRegex(ec.coord.CoordError, 'compass quadrant'):
ec.coord.TrackCoord(0, 0, 0, quad="A")
def test_quad_ne(self):
tc = ec.coord.TrackCoord(0, 0, 30, ec.coord.Q.NE, curvature=0)
self.assertAlmostEqual(tc.bearing.deg, 30)
def test_quad_se(self):
tc = ec.coord.TrackCoord(0, 0, 30, ec.coord.Q.SE, curvature=0)
self.assertAlmostEqual(tc.bearing.deg, 150)
def test_quad_sw(self):
tc = ec.coord.TrackCoord(0, 0, 30, ec.coord.Q.SW, curvature=0)
self.assertAlmostEqual(tc.bearing.deg, 210)
def test_quad_nw(self):
tc = ec.coord.TrackCoord(0, 0, 30, ec.coord.Q.NW, curvature=0)
self.assertAlmostEqual(tc.bearing.deg, 330)
def test_quad_neg(self):
tc = ec.coord.TrackCoord(0, 0, -60, ec.coord.Q.NE, curvature=0)
self.assertAlmostEqual(tc.bearing.deg, 60)
def test_bearing(self):
tc = ec.coord.TrackCoord(0, 0, math.pi, ec.coord.Q.NONE, curvature=0)
self.assertAlmostEqual(tc.bearing.deg, 180)
class CoordCurvatureTests(unittest.TestCase):
def test_radius_straight(self):
tc = ec.coord.TrackCoord(0, 0, 0, ec.coord.Q.NONE, curvature=0)
self.assertEqual(tc.radius, 0)
def test_radius_left(self):
tc = ec.coord.TrackCoord(0, 0, 0, ec.coord.Q.NONE, curvature=0.01)
self.assertEqual(tc.radius, 100)
def test_radius_right(self):
tc = ec.coord.TrackCoord(0, 0, 0, ec.coord.Q.NONE, curvature=-0.01)
self.assertEqual(tc.radius, 100)
def test_clockwise_straight(self):
tc = ec.coord.TrackCoord(0, 0, 0, ec.coord.Q.NONE, curvature=0)
self.assertEqual(tc.clockwise, 'straight')
def test_clockwise_acw(self):
tc = ec.coord.TrackCoord(0, 0, 0, ec.coord.Q.NONE, curvature=0.01)
self.assertEqual(tc.clockwise, 'ACW')
def test_clockwise_cw(self):
tc = ec.coord.TrackCoord(0, 0, 0, ec.coord.Q.NONE, curvature=-0.01)
self.assertEqual(tc.clockwise, 'CW')
def test_get_quad_ne(self):
tc = ec.coord.TrackCoord(0, 0, math.pi / 6, ec.coord.Q.NONE, curvature=0)
r, q = tc.quad
self.assertAlmostEqual(r, 30)
self.assertEqual(q, ec.coord.Q.NE.name)
def test_get_quad_se(self):
tc = ec.coord.TrackCoord(0, 0, math.pi * (4 / 6), ec.coord.Q.NONE, curvature=0)
r, q = tc.quad
self.assertAlmostEqual(r, 60)
self.assertEqual(q, ec.coord.Q.SE.name)
def test_get_quad_sw(self):
tc = ec.coord.TrackCoord(0, 0, math.pi * (8 / 6), ec.coord.Q.NONE, curvature=0)
r, q = tc.quad
self.assertAlmostEqual(r, 60)
self.assertEqual(q, ec.coord.Q.SW.name)
def test_get_quad_nw(self):
tc = ec.coord.TrackCoord(0, 0, math.pi * (11 / 6), ec.coord.Q.NONE, curvature=0)
r, q = tc.quad
self.assertAlmostEqual(r, 30)
self.assertEqual(q, ec.coord.Q.NW.name)
| 36.456897 | 88 | 0.653819 |
import math
import os
import sys
import unittest
sys.path.insert(0, os.path.abspath('..'))
import ec.coord
class CoordGeneralTests(unittest.TestCase):
def test_position(self):
tc = ec.coord.TrackCoord(5, 5, 0, ec.coord.Q.NONE, curvature=0)
self.assertEqual((tc.pos_x, tc.pos_z), (5, 5))
def test_exception_position(self):
with self.assertRaisesRegex(ec.coord.CoordError, 'Position values'):
ec.coord.TrackCoord(None, 5, 0, ec.coord.Q.NONE, curvature=0)
class CoordBearingTests(unittest.TestCase):
def test_exception_rotation_over_range(self):
with self.assertRaisesRegex(ec.coord.CoordError, 'y-axis rotation'):
ec.coord.TrackCoord(0, 0, 100, ec.coord.Q.NE, curvature=0)
def test_exception_rotation_under_range(self):
with self.assertRaisesRegex(ec.coord.CoordError, 'y-axis rotation'):
ec.coord.TrackCoord(0, 0, -100, ec.coord.Q.NE, curvature=0)
def test_exception_wrong_bearing(self):
with self.assertRaisesRegex(ValueError, 'bearing needs to'):
ec.coord.TrackCoord(0, 0, None, ec.coord.Q.NONE, curvature=0)
def test_exception_wrong_quad(self):
with self.assertRaisesRegex(ec.coord.CoordError, 'compass quadrant'):
ec.coord.TrackCoord(0, 0, 0, quad="A")
def test_quad_ne(self):
tc = ec.coord.TrackCoord(0, 0, 30, ec.coord.Q.NE, curvature=0)
self.assertAlmostEqual(tc.bearing.deg, 30)
def test_quad_se(self):
tc = ec.coord.TrackCoord(0, 0, 30, ec.coord.Q.SE, curvature=0)
self.assertAlmostEqual(tc.bearing.deg, 150)
def test_quad_sw(self):
tc = ec.coord.TrackCoord(0, 0, 30, ec.coord.Q.SW, curvature=0)
self.assertAlmostEqual(tc.bearing.deg, 210)
def test_quad_nw(self):
tc = ec.coord.TrackCoord(0, 0, 30, ec.coord.Q.NW, curvature=0)
self.assertAlmostEqual(tc.bearing.deg, 330)
def test_quad_neg(self):
tc = ec.coord.TrackCoord(0, 0, -60, ec.coord.Q.NE, curvature=0)
self.assertAlmostEqual(tc.bearing.deg, 60)
def test_bearing(self):
tc = ec.coord.TrackCoord(0, 0, math.pi, ec.coord.Q.NONE, curvature=0)
self.assertAlmostEqual(tc.bearing.deg, 180)
class CoordCurvatureTests(unittest.TestCase):
def test_radius_straight(self):
tc = ec.coord.TrackCoord(0, 0, 0, ec.coord.Q.NONE, curvature=0)
self.assertEqual(tc.radius, 0)
def test_radius_left(self):
tc = ec.coord.TrackCoord(0, 0, 0, ec.coord.Q.NONE, curvature=0.01)
self.assertEqual(tc.radius, 100)
def test_radius_right(self):
tc = ec.coord.TrackCoord(0, 0, 0, ec.coord.Q.NONE, curvature=-0.01)
self.assertEqual(tc.radius, 100)
def test_clockwise_straight(self):
tc = ec.coord.TrackCoord(0, 0, 0, ec.coord.Q.NONE, curvature=0)
self.assertEqual(tc.clockwise, 'straight')
def test_clockwise_acw(self):
tc = ec.coord.TrackCoord(0, 0, 0, ec.coord.Q.NONE, curvature=0.01)
self.assertEqual(tc.clockwise, 'ACW')
def test_clockwise_cw(self):
tc = ec.coord.TrackCoord(0, 0, 0, ec.coord.Q.NONE, curvature=-0.01)
self.assertEqual(tc.clockwise, 'CW')
def test_get_quad_ne(self):
tc = ec.coord.TrackCoord(0, 0, math.pi / 6, ec.coord.Q.NONE, curvature=0)
r, q = tc.quad
self.assertAlmostEqual(r, 30)
self.assertEqual(q, ec.coord.Q.NE.name)
def test_get_quad_se(self):
tc = ec.coord.TrackCoord(0, 0, math.pi * (4 / 6), ec.coord.Q.NONE, curvature=0)
r, q = tc.quad
self.assertAlmostEqual(r, 60)
self.assertEqual(q, ec.coord.Q.SE.name)
def test_get_quad_sw(self):
tc = ec.coord.TrackCoord(0, 0, math.pi * (8 / 6), ec.coord.Q.NONE, curvature=0)
r, q = tc.quad
self.assertAlmostEqual(r, 60)
self.assertEqual(q, ec.coord.Q.SW.name)
def test_get_quad_nw(self):
tc = ec.coord.TrackCoord(0, 0, math.pi * (11 / 6), ec.coord.Q.NONE, curvature=0)
r, q = tc.quad
self.assertAlmostEqual(r, 30)
self.assertEqual(q, ec.coord.Q.NW.name)
| true | true |
1c2d9ea947e157fe845432f224f613ebae49404c | 6,518 | py | Python | tests/h/services/feature_test.py | julien-cheng/h | 36c8ec044725720cf36f0986cdf025395aca8929 | [
"BSD-2-Clause"
] | 2 | 2019-08-04T07:22:11.000Z | 2020-07-17T05:01:41.000Z | tests/h/services/feature_test.py | fuelpress/i.fuel.press | af7b25895d813af0fef656dcf483afe852a99d76 | [
"BSD-2-Clause"
] | null | null | null | tests/h/services/feature_test.py | fuelpress/i.fuel.press | af7b25895d813af0fef656dcf483afe852a99d76 | [
"BSD-2-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import mock
import pytest
from h import models
from h.services.feature import (
FeatureRequestProperty,
FeatureService,
UnknownFeatureError,
feature_service_factory,
)
class TestFeatureRequestProperty(object):
def test_single_feature_fetch(self, pyramid_request, feature_service):
result = pyramid_request.feature("foo")
feature_service.enabled.assert_called_once_with("foo", user=mock.sentinel.user)
assert result == feature_service.enabled.return_value
def test_all_feature_fetch(self, pyramid_request, feature_service):
result = pyramid_request.feature.all()
feature_service.all.assert_called_once_with(user=mock.sentinel.user)
assert result == feature_service.all.return_value
@pytest.fixture
def feature_service(self, pyramid_config, pyramid_request):
svc = mock.Mock(spec_set=feature_service_factory(None, pyramid_request))
pyramid_config.register_service(svc, name="feature")
return svc
@pytest.fixture
def pyramid_config(self, pyramid_config, pyramid_request):
from pyramid.request import apply_request_extensions
pyramid_config.add_request_method(
FeatureRequestProperty, name="feature", reify=True
)
apply_request_extensions(pyramid_request)
return pyramid_config
@pytest.fixture
def pyramid_request(self, pyramid_request):
# Remove the preexisting dummy feature client
delattr(pyramid_request, "feature")
pyramid_request.user = mock.sentinel.user
return pyramid_request
@pytest.mark.usefixtures("features")
class TestFeatureService(object):
def test_enabled_true_if_overridden(self, db_session):
svc = FeatureService(session=db_session, overrides=["foo"])
assert svc.enabled("foo") is True
def test_enabled_false_if_everyone_false(self, db_session):
svc = FeatureService(session=db_session)
assert svc.enabled("foo") is False
def test_enabled_true_if_everyone_true(self, db_session):
svc = FeatureService(session=db_session)
assert svc.enabled("on-for-everyone") is True
def test_enabled_false_when_admins_true_no_user(self, db_session):
svc = FeatureService(session=db_session)
assert svc.enabled("on-for-admins") is False
def test_enabled_false_when_admins_true_nonadmin_user(self, db_session, factories):
svc = FeatureService(session=db_session)
user = factories.User(admin=False)
assert svc.enabled("on-for-admins", user=user) is False
def test_enabled_true_when_admins_true_admin_user(self, db_session, factories):
svc = FeatureService(session=db_session)
user = factories.User(admin=True)
assert svc.enabled("on-for-admins", user=user) is True
def test_enabled_false_when_staff_true_no_user(self, db_session):
svc = FeatureService(session=db_session)
assert svc.enabled("on-for-staff") is False
def test_enabled_false_when_staff_true_nonstaff_user(self, db_session, factories):
svc = FeatureService(session=db_session)
user = factories.User(staff=False)
assert svc.enabled("on-for-staff", user=user) is False
def test_enabled_true_when_staff_true_staff_user(self, db_session, factories):
svc = FeatureService(db_session)
user = factories.User(staff=True)
assert svc.enabled("on-for-staff", user=user) is True
def test_enabled_false_when_cohort_no_user(self, db_session):
svc = FeatureService(db_session)
assert svc.enabled("on-for-cohort") is False
def test_enabled_false_when_cohort_user_not_in_cohort(self, db_session, factories):
svc = FeatureService(db_session)
user = factories.User()
assert svc.enabled("on-for-cohort", user=user) is False
def test_enabled_true_when_cohort_user_in_cohort(
self, cohort, db_session, factories
):
svc = FeatureService(db_session)
user = factories.User(cohorts=[cohort])
assert svc.enabled("on-for-cohort", user=user) is True
def test_enabled_raises_for_unknown_features(self, db_session):
svc = FeatureService(session=db_session)
with pytest.raises(UnknownFeatureError):
svc.enabled("wibble")
def test_all_returns_feature_dictionary(self, db_session):
svc = FeatureService(db_session)
result = svc.all()
assert result == {
"foo": False,
"bar": False,
"on-for-everyone": True,
"on-for-staff": False,
"on-for-admins": False,
"on-for-cohort": False,
}
def test_all_respects_user_param(self, db_session, factories):
svc = FeatureService(db_session)
user = factories.User(staff=True)
result = svc.all(user=user)
assert result == {
"foo": False,
"bar": False,
"on-for-everyone": True,
"on-for-staff": True,
"on-for-admins": False,
"on-for-cohort": False,
}
@pytest.fixture
def features(self, cohort, factories, patch):
model = patch("h.services.feature.models.Feature")
model.all.return_value = [
factories.Feature(name="foo"),
factories.Feature(name="bar"),
factories.Feature(name="on-for-everyone", everyone=True),
factories.Feature(name="on-for-staff", staff=True),
factories.Feature(name="on-for-admins", admins=True),
factories.Feature(name="on-for-cohort", cohorts=[cohort]),
]
@pytest.fixture
def cohort(self):
return models.FeatureCohort(name="cohort")
class TestFeatureServiceFactory(object):
def test_passes_session(self, pyramid_request):
svc = feature_service_factory(None, pyramid_request)
assert svc.session is pyramid_request.db
def test_passes_overrides_parsed_from_get_params(self, pyramid_request):
pyramid_request.GET["something-else"] = ""
pyramid_request.GET["__feature__[foo]"] = ""
pyramid_request.GET["__feature__[bar]"] = ""
svc = feature_service_factory(None, pyramid_request)
assert sorted(svc.overrides) == sorted(["foo", "bar"])
@pytest.fixture
def pyramid_request(self, pyramid_request):
pyramid_request.user = mock.sentinel.user
return pyramid_request
| 33.425641 | 87 | 0.683645 |
from __future__ import unicode_literals
import mock
import pytest
from h import models
from h.services.feature import (
FeatureRequestProperty,
FeatureService,
UnknownFeatureError,
feature_service_factory,
)
class TestFeatureRequestProperty(object):
def test_single_feature_fetch(self, pyramid_request, feature_service):
result = pyramid_request.feature("foo")
feature_service.enabled.assert_called_once_with("foo", user=mock.sentinel.user)
assert result == feature_service.enabled.return_value
def test_all_feature_fetch(self, pyramid_request, feature_service):
result = pyramid_request.feature.all()
feature_service.all.assert_called_once_with(user=mock.sentinel.user)
assert result == feature_service.all.return_value
@pytest.fixture
def feature_service(self, pyramid_config, pyramid_request):
svc = mock.Mock(spec_set=feature_service_factory(None, pyramid_request))
pyramid_config.register_service(svc, name="feature")
return svc
@pytest.fixture
def pyramid_config(self, pyramid_config, pyramid_request):
from pyramid.request import apply_request_extensions
pyramid_config.add_request_method(
FeatureRequestProperty, name="feature", reify=True
)
apply_request_extensions(pyramid_request)
return pyramid_config
@pytest.fixture
def pyramid_request(self, pyramid_request):
delattr(pyramid_request, "feature")
pyramid_request.user = mock.sentinel.user
return pyramid_request
@pytest.mark.usefixtures("features")
class TestFeatureService(object):
def test_enabled_true_if_overridden(self, db_session):
svc = FeatureService(session=db_session, overrides=["foo"])
assert svc.enabled("foo") is True
def test_enabled_false_if_everyone_false(self, db_session):
svc = FeatureService(session=db_session)
assert svc.enabled("foo") is False
def test_enabled_true_if_everyone_true(self, db_session):
svc = FeatureService(session=db_session)
assert svc.enabled("on-for-everyone") is True
def test_enabled_false_when_admins_true_no_user(self, db_session):
svc = FeatureService(session=db_session)
assert svc.enabled("on-for-admins") is False
def test_enabled_false_when_admins_true_nonadmin_user(self, db_session, factories):
svc = FeatureService(session=db_session)
user = factories.User(admin=False)
assert svc.enabled("on-for-admins", user=user) is False
def test_enabled_true_when_admins_true_admin_user(self, db_session, factories):
svc = FeatureService(session=db_session)
user = factories.User(admin=True)
assert svc.enabled("on-for-admins", user=user) is True
def test_enabled_false_when_staff_true_no_user(self, db_session):
svc = FeatureService(session=db_session)
assert svc.enabled("on-for-staff") is False
def test_enabled_false_when_staff_true_nonstaff_user(self, db_session, factories):
svc = FeatureService(session=db_session)
user = factories.User(staff=False)
assert svc.enabled("on-for-staff", user=user) is False
def test_enabled_true_when_staff_true_staff_user(self, db_session, factories):
svc = FeatureService(db_session)
user = factories.User(staff=True)
assert svc.enabled("on-for-staff", user=user) is True
def test_enabled_false_when_cohort_no_user(self, db_session):
svc = FeatureService(db_session)
assert svc.enabled("on-for-cohort") is False
def test_enabled_false_when_cohort_user_not_in_cohort(self, db_session, factories):
svc = FeatureService(db_session)
user = factories.User()
assert svc.enabled("on-for-cohort", user=user) is False
def test_enabled_true_when_cohort_user_in_cohort(
self, cohort, db_session, factories
):
svc = FeatureService(db_session)
user = factories.User(cohorts=[cohort])
assert svc.enabled("on-for-cohort", user=user) is True
def test_enabled_raises_for_unknown_features(self, db_session):
svc = FeatureService(session=db_session)
with pytest.raises(UnknownFeatureError):
svc.enabled("wibble")
def test_all_returns_feature_dictionary(self, db_session):
svc = FeatureService(db_session)
result = svc.all()
assert result == {
"foo": False,
"bar": False,
"on-for-everyone": True,
"on-for-staff": False,
"on-for-admins": False,
"on-for-cohort": False,
}
def test_all_respects_user_param(self, db_session, factories):
svc = FeatureService(db_session)
user = factories.User(staff=True)
result = svc.all(user=user)
assert result == {
"foo": False,
"bar": False,
"on-for-everyone": True,
"on-for-staff": True,
"on-for-admins": False,
"on-for-cohort": False,
}
@pytest.fixture
def features(self, cohort, factories, patch):
model = patch("h.services.feature.models.Feature")
model.all.return_value = [
factories.Feature(name="foo"),
factories.Feature(name="bar"),
factories.Feature(name="on-for-everyone", everyone=True),
factories.Feature(name="on-for-staff", staff=True),
factories.Feature(name="on-for-admins", admins=True),
factories.Feature(name="on-for-cohort", cohorts=[cohort]),
]
@pytest.fixture
def cohort(self):
return models.FeatureCohort(name="cohort")
class TestFeatureServiceFactory(object):
def test_passes_session(self, pyramid_request):
svc = feature_service_factory(None, pyramid_request)
assert svc.session is pyramid_request.db
def test_passes_overrides_parsed_from_get_params(self, pyramid_request):
pyramid_request.GET["something-else"] = ""
pyramid_request.GET["__feature__[foo]"] = ""
pyramid_request.GET["__feature__[bar]"] = ""
svc = feature_service_factory(None, pyramid_request)
assert sorted(svc.overrides) == sorted(["foo", "bar"])
@pytest.fixture
def pyramid_request(self, pyramid_request):
pyramid_request.user = mock.sentinel.user
return pyramid_request
| true | true |
1c2d9ef17af99fb74c0778340b827b7ed380f649 | 1,155 | py | Python | Languages/Python/mcm.py | bluey-crypto/Hacktoberfest | c826d5faf1d1c860dbffe665e6a7cf1e35ba76ba | [
"MIT"
] | 1 | 2020-10-03T03:17:03.000Z | 2020-10-03T03:17:03.000Z | Languages/Python/mcm.py | bluey-crypto/Hacktoberfest | c826d5faf1d1c860dbffe665e6a7cf1e35ba76ba | [
"MIT"
] | 1 | 2020-10-01T18:03:45.000Z | 2020-10-01T18:03:45.000Z | Languages/Python/mcm.py | bluey-crypto/Hacktoberfest | c826d5faf1d1c860dbffe665e6a7cf1e35ba76ba | [
"MIT"
] | 4 | 2020-10-07T14:58:50.000Z | 2020-10-24T10:13:17.000Z | # Dynamic Programming Python implementation of Matrix
# Chain Multiplication. See the Cormen book for details
# of the following algorithm
import sys
# Matrix Ai has dimension p[i-1] x p[i] for i = 1..n
def MatrixChainOrder(p, n):
# For simplicity of the program, one extra row and one
# extra column are allocated in m[][]. 0th row and 0th
# column of m[][] are not used
m = [[0 for x in range(n)] for x in range(n)]
# m[i, j] = Minimum number of scalar multiplications needed
# to compute the matrix A[i]A[i + 1]...A[j] = A[i..j] where
# dimension of A[i] is p[i-1] x p[i]
# cost is zero when multiplying one matrix.
for i in range(1, n):
m[i][i] = 0
# L is chain length.
for L in range(2, n):
for i in range(1, n-L + 1):
j = i + L-1
m[i][j] = sys.maxint
for k in range(i, j):
# q = cost / scalar multiplications
q = m[i][k] + m[k + 1][j] + p[i-1]*p[k]*p[j]
if q < m[i][j]:
m[i][j] = q
return m[1][n-1]
# Driver program to test above function
arr = [1, 2, 3, 4]
size = len(arr)
print("Minimum number of multiplications is " +
str(MatrixChainOrder(arr, size)))
# This Code is contributed by Bhavya Jain
| 27.5 | 60 | 0.630303 |
import sys
def MatrixChainOrder(p, n):
m = [[0 for x in range(n)] for x in range(n)]
for i in range(1, n):
m[i][i] = 0
for L in range(2, n):
for i in range(1, n-L + 1):
j = i + L-1
m[i][j] = sys.maxint
for k in range(i, j):
q = m[i][k] + m[k + 1][j] + p[i-1]*p[k]*p[j]
if q < m[i][j]:
m[i][j] = q
return m[1][n-1]
arr = [1, 2, 3, 4]
size = len(arr)
print("Minimum number of multiplications is " +
str(MatrixChainOrder(arr, size)))
| true | true |
1c2d9ef1f3d3556468e9fbc47d7384fea9f3c2a0 | 1,313 | py | Python | backend/battles/migrations/0004_battleteam.py | gabrielaleal/pokebattle | 3259204eb34f27a5e79f2bbff57994f435b624c1 | [
"MIT"
] | 1 | 2020-03-02T18:15:29.000Z | 2020-03-02T18:15:29.000Z | backend/battles/migrations/0004_battleteam.py | gabrielaleal/pokebattle | 3259204eb34f27a5e79f2bbff57994f435b624c1 | [
"MIT"
] | 12 | 2020-03-18T21:50:29.000Z | 2022-02-19T00:30:14.000Z | backend/battles/migrations/0004_battleteam.py | gabrielaleal/pokebattle | 3259204eb34f27a5e79f2bbff57994f435b624c1 | [
"MIT"
] | null | null | null | # Generated by Django 2.2.10 on 2020-02-28 14:20
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('pokemon', '0001_initial'),
('battles', '0003_auto_20200228_1258'),
]
operations = [
migrations.CreateModel(
name='BattleTeam',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('battle', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='teams', to='battles.Battle')),
('creator', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='teams', to=settings.AUTH_USER_MODEL)),
('pokemon_1', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='+', to='pokemon.Pokemon')),
('pokemon_2', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='+', to='pokemon.Pokemon')),
('pokemon_3', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='+', to='pokemon.Pokemon')),
],
),
]
| 45.275862 | 143 | 0.66032 |
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('pokemon', '0001_initial'),
('battles', '0003_auto_20200228_1258'),
]
operations = [
migrations.CreateModel(
name='BattleTeam',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('battle', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='teams', to='battles.Battle')),
('creator', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='teams', to=settings.AUTH_USER_MODEL)),
('pokemon_1', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='+', to='pokemon.Pokemon')),
('pokemon_2', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='+', to='pokemon.Pokemon')),
('pokemon_3', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='+', to='pokemon.Pokemon')),
],
),
]
| true | true |
1c2d9f9ad36e481653364b261c99e4779f4cc65c | 2,570 | py | Python | entropica_qaoa/tests/test_vqe.py | NunoEdgarGFlowHub/entropica_qaoa | bc9496320dbeadeac1302bda056e9d7b67607e30 | [
"Apache-2.0"
] | null | null | null | entropica_qaoa/tests/test_vqe.py | NunoEdgarGFlowHub/entropica_qaoa | bc9496320dbeadeac1302bda056e9d7b67607e30 | [
"Apache-2.0"
] | null | null | null | entropica_qaoa/tests/test_vqe.py | NunoEdgarGFlowHub/entropica_qaoa | bc9496320dbeadeac1302bda056e9d7b67607e30 | [
"Apache-2.0"
] | null | null | null | """
Test that all the components of vqe play nicely together
"""
import numpy as np
import pytest
from scipy.optimize import minimize
from pyquil.paulis import PauliSum, PauliTerm
from pyquil.api import WavefunctionSimulator, local_qvm, get_qc
from pyquil.quil import Program
from pyquil.gates import RX, CNOT
from entropica_qaoa.vqe.cost_function import (PrepareAndMeasureOnWFSim,
PrepareAndMeasureOnQVM)
# gonna need this program and hamiltonian for both tests. So define them globally
# hamiltonian = PauliSum.from_compact_str("(-1.0)*Z0*Z1 + 0.8*Z0 + (-0.5)*Z1")
term1 = PauliTerm("Z", 0, -1)
term1 *= PauliTerm("Z", 1)
term2 = PauliTerm("Z", 0, 0.8)
term3 = PauliTerm("Z", 1, -0.5)
hamiltonian = PauliSum([term1, term2, term3])
prepare_ansatz = Program()
params = prepare_ansatz.declare("params", memory_type="REAL", memory_size=4)
prepare_ansatz.inst(RX(params[0], 0))
prepare_ansatz.inst(RX(params[1], 1))
prepare_ansatz.inst(CNOT(0, 1))
prepare_ansatz.inst(RX(params[2], 0))
prepare_ansatz.inst(RX(params[3], 1))
p0 = [0, 0, 0, 0]
@pytest.mark.slow
def test_vqe_on_WFSim():
sim = WavefunctionSimulator()
cost_fun = PrepareAndMeasureOnWFSim(prepare_ansatz=prepare_ansatz,
make_memory_map=lambda p: {"params": p},
hamiltonian=hamiltonian,
sim=sim,
scalar_cost_function=True)
with local_qvm():
out = minimize(cost_fun, p0, tol=1e-3, method="COBYLA")
wf = sim.wavefunction(prepare_ansatz, {"params": out['x']})
assert np.allclose(wf.probabilities(), [0, 0, 0, 1], rtol=1.5, atol=0.01)
assert np.allclose(out['fun'], -1.3)
assert out['success']
@pytest.mark.slow
def test_vqe_on_QVM():
p0 = [3.1, -1.5, 0, 0] # make it easier when sampling
qvm = get_qc("2q-qvm")
with local_qvm():
cost_fun = PrepareAndMeasureOnQVM(prepare_ansatz=prepare_ansatz,
make_memory_map=lambda p: {"params": p},
hamiltonian=hamiltonian,
qvm=qvm,
scalar_cost_function=True,
nshots=4,
base_numshots=50)
out = minimize(cost_fun, p0, tol=1e-2, method="Cobyla")
print(out)
assert np.allclose(out['fun'], -1.3, rtol=1.1)
assert out['success']
| 36.714286 | 82 | 0.585603 |
import numpy as np
import pytest
from scipy.optimize import minimize
from pyquil.paulis import PauliSum, PauliTerm
from pyquil.api import WavefunctionSimulator, local_qvm, get_qc
from pyquil.quil import Program
from pyquil.gates import RX, CNOT
from entropica_qaoa.vqe.cost_function import (PrepareAndMeasureOnWFSim,
PrepareAndMeasureOnQVM)
term1 = PauliTerm("Z", 0, -1)
term1 *= PauliTerm("Z", 1)
term2 = PauliTerm("Z", 0, 0.8)
term3 = PauliTerm("Z", 1, -0.5)
hamiltonian = PauliSum([term1, term2, term3])
prepare_ansatz = Program()
params = prepare_ansatz.declare("params", memory_type="REAL", memory_size=4)
prepare_ansatz.inst(RX(params[0], 0))
prepare_ansatz.inst(RX(params[1], 1))
prepare_ansatz.inst(CNOT(0, 1))
prepare_ansatz.inst(RX(params[2], 0))
prepare_ansatz.inst(RX(params[3], 1))
p0 = [0, 0, 0, 0]
@pytest.mark.slow
def test_vqe_on_WFSim():
sim = WavefunctionSimulator()
cost_fun = PrepareAndMeasureOnWFSim(prepare_ansatz=prepare_ansatz,
make_memory_map=lambda p: {"params": p},
hamiltonian=hamiltonian,
sim=sim,
scalar_cost_function=True)
with local_qvm():
out = minimize(cost_fun, p0, tol=1e-3, method="COBYLA")
wf = sim.wavefunction(prepare_ansatz, {"params": out['x']})
assert np.allclose(wf.probabilities(), [0, 0, 0, 1], rtol=1.5, atol=0.01)
assert np.allclose(out['fun'], -1.3)
assert out['success']
@pytest.mark.slow
def test_vqe_on_QVM():
p0 = [3.1, -1.5, 0, 0]
qvm = get_qc("2q-qvm")
with local_qvm():
cost_fun = PrepareAndMeasureOnQVM(prepare_ansatz=prepare_ansatz,
make_memory_map=lambda p: {"params": p},
hamiltonian=hamiltonian,
qvm=qvm,
scalar_cost_function=True,
nshots=4,
base_numshots=50)
out = minimize(cost_fun, p0, tol=1e-2, method="Cobyla")
print(out)
assert np.allclose(out['fun'], -1.3, rtol=1.1)
assert out['success']
| true | true |
1c2da042495ddcdc035511151cc4ab40f8ed1660 | 2,894 | py | Python | shooter_game.py | maria-2302/Proekt | eec5a094afb7ce6aceaf319fdbeda8112026b277 | [
"CC0-1.0"
] | null | null | null | shooter_game.py | maria-2302/Proekt | eec5a094afb7ce6aceaf319fdbeda8112026b277 | [
"CC0-1.0"
] | null | null | null | shooter_game.py | maria-2302/Proekt | eec5a094afb7ce6aceaf319fdbeda8112026b277 | [
"CC0-1.0"
] | null | null | null | #Создай собственный Шутер!
from pygame import *
okno = display.set_mode((800,600))
bkgd = transform.scale(image.load('galaxy.jpg'),(800,600))
gm = True
mixer.init()
mixer.music.load('space.ogg')
mixer.music.play()
class sprit(sprite.Sprite):
def __init__(self, imimage, x, y, sspeed):
super().__init__()
self.image = transform.scale(image.load(imimage),(110,130))
self.speed = sspeed
self.rect = self.image.get_rect()
self.rect.x = x
self.rect.y = y
def selff (self):
okno.blit(self.image,(self.rect.x, self.rect.y))
from random import*
class Enemy(sprit):
def __init__(self, imimage, x, y, sspeed):
self.image = transform.scale(image.load(imimage),(130,100))
self.speed = sspeed
self.rect = self.image.get_rect()
self.rect.x = x
self.rect.y = y
def selff (self):
okno.blit(self.image,(self.rect.x, self.rect.y))
def update(self):
if self.rect.y < 500:
self.rect.y += self.speed
else:
self.rect.y = 0
self.rect.x = randint(0,500)
class bullet(sprit):
def __init__(self, imimage, x, y, sspeed):
self.image = transform.scale(image.load(imimage),(30,70))
self.speed = sspeed
self.rect = self.image.get_rect()
self.rect.x = x
self.rect.y = y
def fire(self):
if self.rect.y > 0:
self.rect.y -= self.speed
else:
self.rect.y = -100
b=bullet('bullet.png',-200,-200,10)
class igrok(sprit):
def update(self):
keys_pressed = key.get_pressed()
if keys_pressed[K_a] and self.rect.x >0:
self.rect.x -=5
if keys_pressed[K_d] and self.rect.x < 620-5:
self.rect.x +=5
if keys_pressed[K_SPACE]:
b.rect.x = self.rect.x+40
b.rect.y = self.rect.y-20
font.init()
font1 = font.Font(None, 80)
font2 = font.Font(None, 80)
hero = igrok('rocket.png', 150,470,5)
monsters = Enemy('ufo.png', 100,50, 3)
ochki = 0
health = 10
while gm:
okno.blit(bkgd,(0,0))
for i in event.get():
if i.type == QUIT:
gm = False
txt = 'очки '+ str(ochki)
text = font1.render(txt,1,(250,0,0))
okno.blit(text,(10,30))
hero.selff()
hero.update()
monsters.selff()
monsters.update()
b.selff()
b.fire()
if sprite.collide_rect(monsters, b):
monsters.rect.y = -10
monsters.rect.x = randint(0,500)
b.rect.y = -100
ochki += 1
if ochki >10:
gm = False
helths = 'здоровье ' + str(health)
hearts = font2.render(helths,2,(250,0,0))
okno.blit(hearts,(450,30))
if sprite.collide_rect(monsters, hero):
monsters.rect.y = -10
monsters.rect.x = randint(0,500)
helth -= 1
if helth < 1:
gm = False
b.update()
display.update()
| 27.561905 | 67 | 0.568072 |
from pygame import *
okno = display.set_mode((800,600))
bkgd = transform.scale(image.load('galaxy.jpg'),(800,600))
gm = True
mixer.init()
mixer.music.load('space.ogg')
mixer.music.play()
class sprit(sprite.Sprite):
def __init__(self, imimage, x, y, sspeed):
super().__init__()
self.image = transform.scale(image.load(imimage),(110,130))
self.speed = sspeed
self.rect = self.image.get_rect()
self.rect.x = x
self.rect.y = y
def selff (self):
okno.blit(self.image,(self.rect.x, self.rect.y))
from random import*
class Enemy(sprit):
def __init__(self, imimage, x, y, sspeed):
self.image = transform.scale(image.load(imimage),(130,100))
self.speed = sspeed
self.rect = self.image.get_rect()
self.rect.x = x
self.rect.y = y
def selff (self):
okno.blit(self.image,(self.rect.x, self.rect.y))
def update(self):
if self.rect.y < 500:
self.rect.y += self.speed
else:
self.rect.y = 0
self.rect.x = randint(0,500)
class bullet(sprit):
def __init__(self, imimage, x, y, sspeed):
self.image = transform.scale(image.load(imimage),(30,70))
self.speed = sspeed
self.rect = self.image.get_rect()
self.rect.x = x
self.rect.y = y
def fire(self):
if self.rect.y > 0:
self.rect.y -= self.speed
else:
self.rect.y = -100
b=bullet('bullet.png',-200,-200,10)
class igrok(sprit):
def update(self):
keys_pressed = key.get_pressed()
if keys_pressed[K_a] and self.rect.x >0:
self.rect.x -=5
if keys_pressed[K_d] and self.rect.x < 620-5:
self.rect.x +=5
if keys_pressed[K_SPACE]:
b.rect.x = self.rect.x+40
b.rect.y = self.rect.y-20
font.init()
font1 = font.Font(None, 80)
font2 = font.Font(None, 80)
hero = igrok('rocket.png', 150,470,5)
monsters = Enemy('ufo.png', 100,50, 3)
ochki = 0
health = 10
while gm:
okno.blit(bkgd,(0,0))
for i in event.get():
if i.type == QUIT:
gm = False
txt = 'очки '+ str(ochki)
text = font1.render(txt,1,(250,0,0))
okno.blit(text,(10,30))
hero.selff()
hero.update()
monsters.selff()
monsters.update()
b.selff()
b.fire()
if sprite.collide_rect(monsters, b):
monsters.rect.y = -10
monsters.rect.x = randint(0,500)
b.rect.y = -100
ochki += 1
if ochki >10:
gm = False
helths = 'здоровье ' + str(health)
hearts = font2.render(helths,2,(250,0,0))
okno.blit(hearts,(450,30))
if sprite.collide_rect(monsters, hero):
monsters.rect.y = -10
monsters.rect.x = randint(0,500)
helth -= 1
if helth < 1:
gm = False
b.update()
display.update()
| true | true |
1c2da145767d95072bb35ea3a50f1a47c19e0f34 | 361 | py | Python | src/game/game.py | Palaszczuk/clase-23 | b5565585f5e3ee177162f4df081f89d2ad31f044 | [
"MIT"
] | null | null | null | src/game/game.py | Palaszczuk/clase-23 | b5565585f5e3ee177162f4df081f89d2ad31f044 | [
"MIT"
] | null | null | null | src/game/game.py | Palaszczuk/clase-23 | b5565585f5e3ee177162f4df081f89d2ad31f044 | [
"MIT"
] | null | null | null |
class Game:
player1 = None
player2 = None
def __init__(self) -> None:
"""
Solo el constructor
"""
pass
def __str__(self) -> str:
return f"Esto es el objeto Game"
def start(self) -> bool:
pass
def end(self) -> bool:
pass
def update(self) -> bool:
pass | 15.695652 | 40 | 0.476454 |
class Game:
player1 = None
player2 = None
def __init__(self) -> None:
pass
def __str__(self) -> str:
return f"Esto es el objeto Game"
def start(self) -> bool:
pass
def end(self) -> bool:
pass
def update(self) -> bool:
pass | true | true |
1c2da1d891b041dc595bc2ace632ef69d4988ab8 | 233 | py | Python | Python/lib/settings.py | MatteoEsposito/ProgettoInItinereI-IngegneriaDegliAlgoritmi- | 75da3bb8b3a4189503d67b1b03cc50c14871f9e7 | [
"MIT"
] | null | null | null | Python/lib/settings.py | MatteoEsposito/ProgettoInItinereI-IngegneriaDegliAlgoritmi- | 75da3bb8b3a4189503d67b1b03cc50c14871f9e7 | [
"MIT"
] | null | null | null | Python/lib/settings.py | MatteoEsposito/ProgettoInItinereI-IngegneriaDegliAlgoritmi- | 75da3bb8b3a4189503d67b1b03cc50c14871f9e7 | [
"MIT"
] | null | null | null | # coding=utf-8
# settings.py
# Autore: Matteo Esposito
# Versione di Python: 2.6.9
# Variabili Globali per impostare la verbosità dell'algoritmo e facilitarne il DEBUG ed/o il TESTING ed/o il PROFILING
DEBUG = False
RELEASE = False
| 25.888889 | 118 | 0.763948 |
DEBUG = False
RELEASE = False
| true | true |
1c2da2abb77d52abeb662a7d3bf4cb9b5ce2ade6 | 192 | py | Python | pywick/models/classification/resnext_features/__init__.py | achaiah/pywick | 9d663faf0c1660a9b8359a6472c164f658dfc8cb | [
"MIT"
] | 408 | 2019-05-16T16:12:41.000Z | 2022-03-26T17:27:12.000Z | pywick/models/classification/resnext_features/__init__.py | ashishpatel26/pywick | 1afffd1c21c2b188836d3599e802146182757bb5 | [
"MIT"
] | 13 | 2019-05-17T05:47:06.000Z | 2021-06-21T19:02:30.000Z | pywick/models/classification/resnext_features/__init__.py | ashishpatel26/pywick | 1afffd1c21c2b188836d3599e802146182757bb5 | [
"MIT"
] | 42 | 2019-05-16T19:57:12.000Z | 2022-03-06T15:23:18.000Z | from .resnext101_32x4d_features import resnext101_32x4d_features
from .resnext101_64x4d_features import resnext101_64x4d_features
from .resnext50_32x4d_features import resnext50_32x4d_features | 64 | 64 | 0.927083 | from .resnext101_32x4d_features import resnext101_32x4d_features
from .resnext101_64x4d_features import resnext101_64x4d_features
from .resnext50_32x4d_features import resnext50_32x4d_features | true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.