content stringlengths 27 928k | path stringlengths 4 230 | size int64 27 928k | nl_text stringlengths 21 396k | nl_size int64 21 396k | nl_language stringlengths 2 3 | nl_language_score float64 0.04 1 |
|---|---|---|---|---|---|---|
import json
import kfp.dsl as _kfp_dsl
import kfp.components as _kfp_components
from collections import OrderedDict
from kubernetes import client as k8s_client
def step1():
from kale.common import mlmdutils as _kale_mlmdutils
_kale_mlmdutils.init_metadata()
from kale.marshal.decorator import marshal as _kale_marshal
from kale.common.runutils import link_artifacts as _kale_link_artifacts
_kale_pipeline_parameters = {}
@_kale_marshal([], ['_b', '_a'], _kale_pipeline_parameters, "/marshal")
def step1():
a = 1
b = 2
return a, b
step1()
_kale_artifacts = {}
_kale_link_artifacts(_kale_artifacts)
_kale_mlmdutils.call("mark_execution_complete")
def step2():
from kale.common import mlmdutils as _kale_mlmdutils
_kale_mlmdutils.init_metadata()
from kale.common.runutils import ttl as _kale_ttl
from kale.marshal.decorator import marshal as _kale_marshal
from kale.common.runutils import link_artifacts as _kale_link_artifacts
_kale_pipeline_parameters = {}
@_kale_ttl(5)
@_kale_marshal(['_b', '_a'], ['_c'], _kale_pipeline_parameters, "/marshal")
def step2(a, b):
c = a + b
print(c)
return c
step2()
_kale_artifacts = {}
_kale_link_artifacts(_kale_artifacts)
_kale_mlmdutils.call("mark_execution_complete")
def step3():
from kale.common import mlmdutils as _kale_mlmdutils
_kale_mlmdutils.init_metadata()
from kale.marshal.decorator import marshal as _kale_marshal
from kale.common.runutils import link_artifacts as _kale_link_artifacts
_kale_pipeline_parameters = {}
@_kale_marshal(['_a', '_c'], [], _kale_pipeline_parameters, "/marshal")
def step3(a, c):
d = c + a
print(d)
step3()
_kale_artifacts = {}
_kale_link_artifacts(_kale_artifacts)
_kale_mlmdutils.call("mark_execution_complete")
_kale_step1_op = _kfp_components.func_to_container_op(step1)
_kale_step2_op = _kfp_components.func_to_container_op(step2)
_kale_step3_op = _kfp_components.func_to_container_op(step3)
@_kfp_dsl.pipeline(
name='test',
description=''
)
def auto_generated_pipeline():
_kale_pvolumes_dict = OrderedDict()
_kale_volume_step_names = []
_kale_volume_name_parameters = []
_kale_marshal_vop = _kfp_dsl.VolumeOp(
name="kale-marshal-volume",
resource_name="kale-marshal-pvc",
modes=['ReadWriteMany'],
size="1Gi"
)
_kale_volume_step_names.append(_kale_marshal_vop.name)
_kale_volume_name_parameters.append(
_kale_marshal_vop.outputs["name"].full_name)
_kale_pvolumes_dict['/marshal'] = _kale_marshal_vop.volume
_kale_volume_step_names.sort()
_kale_volume_name_parameters.sort()
_kale_step1_task = _kale_step1_op()\
.add_pvolumes(_kale_pvolumes_dict)\
.after()
_kale_step_labels = {'common-label': 'true'}
for _kale_k, _kale_v in _kale_step_labels.items():
_kale_step1_task.add_pod_label(_kale_k, _kale_v)
_kale_step_limits = {'amd/gpu': '1'}
for _kale_k, _kale_v in _kale_step_limits.items():
_kale_step1_task.container.add_resource_limit(_kale_k, _kale_v)
_kale_step1_task.container.working_dir = "/test"
_kale_step1_task.container.set_security_context(
k8s_client.V1SecurityContext(run_as_user=0))
_kale_output_artifacts = {}
_kale_step1_task.output_artifact_paths.update(_kale_output_artifacts)
_kale_step1_task.add_pod_label(
"pipelines.kubeflow.org/metadata_written", "true")
_kale_dep_names = (_kale_step1_task.dependent_names +
_kale_volume_step_names)
_kale_step1_task.add_pod_annotation(
"kubeflow-kale.org/dependent-templates", json.dumps(_kale_dep_names))
if _kale_volume_name_parameters:
_kale_step1_task.add_pod_annotation(
"kubeflow-kale.org/volume-name-parameters",
json.dumps(_kale_volume_name_parameters))
_kale_step2_task = _kale_step2_op()\
.add_pvolumes(_kale_pvolumes_dict)\
.after(_kale_step1_task)
_kale_step_labels = {'common-label': 'true'}
for _kale_k, _kale_v in _kale_step_labels.items():
_kale_step2_task.add_pod_label(_kale_k, _kale_v)
_kale_step2_task.set_retry_strategy(
num_retries=5,
retry_policy="Always",
backoff_duration="20",
backoff_factor=2,
backoff_max_duration=None)
_kale_step2_task.container.working_dir = "/test"
_kale_step2_task.container.set_security_context(
k8s_client.V1SecurityContext(run_as_user=0))
_kale_output_artifacts = {}
_kale_step2_task.output_artifact_paths.update(_kale_output_artifacts)
_kale_step2_task.add_pod_label(
"pipelines.kubeflow.org/metadata_written", "true")
_kale_dep_names = (_kale_step2_task.dependent_names +
_kale_volume_step_names)
_kale_step2_task.add_pod_annotation(
"kubeflow-kale.org/dependent-templates", json.dumps(_kale_dep_names))
if _kale_volume_name_parameters:
_kale_step2_task.add_pod_annotation(
"kubeflow-kale.org/volume-name-parameters",
json.dumps(_kale_volume_name_parameters))
_kale_step3_task = _kale_step3_op()\
.add_pvolumes(_kale_pvolumes_dict)\
.after(_kale_step2_task, _kale_step1_task)
_kale_step_annotations = {'step3-annotation': 'test'}
for _kale_k, _kale_v in _kale_step_annotations.items():
_kale_step3_task.add_pod_annotation(_kale_k, _kale_v)
_kale_step_labels = {'common-label': 'true'}
for _kale_k, _kale_v in _kale_step_labels.items():
_kale_step3_task.add_pod_label(_kale_k, _kale_v)
_kale_step3_task.container.working_dir = "/test"
_kale_step3_task.container.set_security_context(
k8s_client.V1SecurityContext(run_as_user=0))
_kale_output_artifacts = {}
_kale_step3_task.output_artifact_paths.update(_kale_output_artifacts)
_kale_step3_task.add_pod_label(
"pipelines.kubeflow.org/metadata_written", "true")
_kale_dep_names = (_kale_step3_task.dependent_names +
_kale_volume_step_names)
_kale_step3_task.add_pod_annotation(
"kubeflow-kale.org/dependent-templates", json.dumps(_kale_dep_names))
if _kale_volume_name_parameters:
_kale_step3_task.add_pod_annotation(
"kubeflow-kale.org/volume-name-parameters",
json.dumps(_kale_volume_name_parameters))
if __name__ == "__main__":
pipeline_func = auto_generated_pipeline
pipeline_filename = pipeline_func.__name__ + '.pipeline.tar.gz'
import kfp.compiler as compiler
compiler.Compiler().compile(pipeline_func, pipeline_filename)
# Get or create an experiment and submit a pipeline run
import kfp
client = kfp.Client()
experiment = client.create_experiment('test')
# Submit a pipeline run
from kale.common import kfputils
pipeline_id, version_id = kfputils.upload_pipeline(
pipeline_filename, "test")
run_result = kfputils.run_pipeline(
experiment_name=experiment.name, pipeline_id=pipeline_id, version_id=version_id)
| backend/kale/tests/assets/kfp_dsl/simple_data_passing.py | 7,172 | Get or create an experiment and submit a pipeline run Submit a pipeline run | 75 | en | 0.69972 |
import numpy as np
import matplotlib.pyplot as plt
from UTILS.Calculus import Calculus
from UTILS.SetAxisLimit import SetAxisLimit
from UTILS.Tools import Tools
from UTILS.Errors import Errors
import sys
# Theoretical background https://arxiv.org/abs/1401.5176
# Mocak, Meakin, Viallet, Arnett, 2014, Compressible Hydrodynamic Mean-Field #
# Equations in Spherical Geometry and their Application to Turbulent Stellar #
# Convection Data #
class LuminosityEquation(Calculus, SetAxisLimit, Tools, Errors, object):
def __init__(self, filename, ig, ieos, fext, intc, tke_diss, bconv, tconv, data_prefix):
super(LuminosityEquation, self).__init__(ig)
# load data to structured array
eht = self.customLoad(filename)
# load grid
xzn0 = self.getRAdata(eht, 'xzn0')
yzn0 = self.getRAdata(eht, 'yzn0')
zzn0 = self.getRAdata(eht, 'zzn0')
nx = self.getRAdata(eht, 'nx')
# pick equation-specific Reynolds-averaged mean fields according to:
# https://github.com/mmicromegas/ransX/blob/master/DOCS/ransXimplementationGuide.pdf
dd = self.getRAdata(eht, 'dd')[intc]
ux = self.getRAdata(eht, 'ux')[intc]
pp = self.getRAdata(eht, 'pp')[intc]
tt = self.getRAdata(eht, 'tt')[intc]
cp = self.getRAdata(eht, 'cp')[intc]
gg = self.getRAdata(eht, 'gg')[intc]
abar = self.getRAdata(eht, 'abar')[intc]
ddux = self.getRAdata(eht, 'ddux')[intc]
dduy = self.getRAdata(eht, 'dduy')[intc]
dduz = self.getRAdata(eht, 'dduz')[intc]
ddttux = self.getRAdata(eht, 'ddttux')[intc]
dduxttx = self.getRAdata(eht, 'dduxttx')[intc]
dduytty = self.getRAdata(eht, 'dduytty')[intc]
dduzttz = self.getRAdata(eht, 'dduzttz')[intc]
eiuxddx = self.getRAdata(eht, 'eiuxddx')[intc]
eiuyddy = self.getRAdata(eht, 'eiuyddy')[intc]
eiuzddz = self.getRAdata(eht, 'eiuzddz')[intc]
dduxux = self.getRAdata(eht, 'dduxux')[intc]
dduyuy = self.getRAdata(eht, 'dduyuy')[intc]
dduzuz = self.getRAdata(eht, 'dduzuz')[intc]
dduxuy = self.getRAdata(eht, 'dduxuy')[intc]
dduxuz = self.getRAdata(eht, 'dduxuz')[intc]
ddekux = self.getRAdata(eht, 'ddekux')[intc]
ddek = self.getRAdata(eht, 'ddek')[intc]
ddei = self.getRAdata(eht, 'ddei')[intc]
ddeiux = self.getRAdata(eht, 'ddeiux')[intc]
eiux = self.getRAdata(eht, 'eiux')[intc]
ddetux = self.getRAdata(eht, 'ddetux')[intc]
divu = self.getRAdata(eht, 'divu')[intc]
ppdivu = self.getRAdata(eht, 'ppdivu')[intc]
dddivu = self.getRAdata(eht, 'dddivu')[intc]
uxdivu = self.getRAdata(eht, 'uxdivu')[intc]
ppux = self.getRAdata(eht, 'ppux')[intc]
ddenuc1 = self.getRAdata(eht, 'ddenuc1')[intc]
ddenuc2 = self.getRAdata(eht, 'ddenuc2')[intc]
chim = self.getRAdata(eht, 'chim')[intc]
chit = self.getRAdata(eht, 'chit')[intc]
chid = self.getRAdata(eht, 'chid')[intc]
gamma1 = self.getRAdata(eht, 'gamma1')[intc]
gascon = 8.3144629e7 # gas constant in cgs
# override gamma for ideal gas eos (need to be fixed in PROMPI later)
if ieos == 1:
cp = self.getRAdata(eht, 'cp')[intc]
cv = self.getRAdata(eht, 'cv')[intc]
gamma1 = cp / cv # gamma1,gamma2,gamma3 = gamma = cp/cv Cox & Giuli 2nd Ed. page 230, Eq.9.110
# print(gamma1)
# print("-----------")
# print((gamma1/(gamma1-1.))*gascon/abar)
# print("-----------")
# print(cp)
##########################
# HSSE LUMINOSITY EQUATION
##########################
# store time series for time derivatives
t_timec = self.getRAdata(eht, 'timec')
t_dd = self.getRAdata(eht, 'dd')
t_tt = self.getRAdata(eht, 'tt')
t_pp = self.getRAdata(eht, 'pp')
t_ddei = self.getRAdata(eht, 'ddei')
t_ddss = self.getRAdata(eht, 'ddss')
t_ddtt = self.getRAdata(eht, 'ddtt')
t_ddux = self.getRAdata(eht, 'ddux')
t_dduy = self.getRAdata(eht, 'dduy')
t_dduz = self.getRAdata(eht, 'dduz')
t_dduxux = self.getRAdata(eht, 'dduxux')
t_dduyuy = self.getRAdata(eht, 'dduyuy')
t_dduzuz = self.getRAdata(eht, 'dduzuz')
t_uxux = self.getRAdata(eht, 'uxux')
t_uyuy = self.getRAdata(eht, 'uyuy')
t_uzuz = self.getRAdata(eht, 'uzuz')
t_fht_ek = 0.5 * (t_dduxux + t_dduyuy + t_dduzuz) / t_dd
t_fht_ei = t_ddei / t_dd
t_fht_et = t_fht_ek + t_fht_ei
t_fht_ss = t_ddss / t_dd
t_fht_ux = t_ddux / t_dd
t_fht_uy = t_dduy / t_dd
t_fht_uz = t_dduz / t_dd
t_fht_ui_fht_ui = t_fht_ux * t_fht_ux + t_fht_uy * t_fht_uy + t_fht_uz * t_fht_uz
t_fht_tt = t_ddtt/t_dd
# t_mm = self.getRAdata(eht,'mm'))
# minus_dt_mm = -self.dt(t_mm,xzn0,t_timec,intc)
# fht_ux = minus_dt_mm/(4.*np.pi*(xzn0**2.)*dd)
# construct equation-specific mean fields
# fht_ek = 0.5*(dduxux + dduyuy + dduzuz)/dd
fht_ek = ddek / dd
fht_ux = ddux / dd
fht_uy = dduy / dd
fht_uz = dduz / dd
fht_ei = ddei / dd
fht_et = fht_ek + fht_ei
fht_enuc = (ddenuc1 + ddenuc2) / dd
fht_eiux = ddeiux/dd
fei = ddeiux - ddux * ddei / dd
fekx = ddekux - fht_ux * fht_ek
fpx = ppux - pp * ux
fekx = ddekux - fht_ux * fht_ek
fht_ui_fht_ui = fht_ux * fht_ux + fht_uy * fht_uy + fht_uz * fht_uz
if self.ig == 1: # Kippenhahn and Weigert, page 38
alpha = 1.
delta = 1.
phi = 1.
elif self.ig == 2:
alpha = 1. / chid
delta = -chit / chid
phi = chid / chim
fht_rxx = dduxux - ddux * ddux / dd
fdil = (uxdivu - ux * divu)
gg = -gg
if self.ig == 1:
surface = (yzn0[-1] - yzn0[0]) * (zzn0[-1] - zzn0[0])
elif self.ig == 2:
# sphere surface
surface = +4. * np.pi * (xzn0 ** 2.)
else:
print("ERROR(Properties.py): " + self.errorGeometry(self.ig))
sys.exit()
####################################
# STANDARD LUMINOSITY EQUATION EXACT
####################################
self.minus_cp_rho_dTdt = -cp*(self.dt(t_ddtt, xzn0, t_timec, intc) + self.Div(ddttux,xzn0) - (dduxttx + dduytty + dduzttz))
self.plus_delta_dPdt = +delta * self.dt(t_pp, xzn0, t_timec, intc)
self.minus_dd_div_eiui = -(self.Div(ddeiux, xzn0) - (eiuxddx + eiuyddy + eiuzddz))
#self.minus_dd_div_eiui = -(self.Div(ddeiux, xzn0))
self.plus_tke_diss = +tke_diss
self.minus_resLumExactEquation = -(self.minus_cp_rho_dTdt+self.plus_delta_dPdt+self.minus_dd_div_eiui+self.plus_tke_diss)
########################################
# END STANDARD LUMINOSITY EQUATION EXACT
########################################
# assign global data to be shared across whole class
self.data_prefix = data_prefix
self.xzn0 = xzn0
self.fht_et = fht_ei + fht_ek
self.nx = nx
self.bconv = bconv
self.tconv = tconv
self.fext = fext
def plot_et(self, LAXIS, xbl, xbr, ybu, ybd, ilg):
"""Plot mean total energy stratification in the model"""
# load x GRID
grd1 = self.xzn0
# load DATA to plot
plt1 = self.fht_et
# create FIGURE
plt.figure(figsize=(7, 6))
# format AXIS, make sure it is exponential
plt.gca().yaxis.get_major_formatter().set_powerlimits((0, 0))
# set plot boundaries
to_plot = [plt1]
self.set_plt_axis(LAXIS, xbl, xbr, ybu, ybd, to_plot)
# plot DATA
plt.title(r'total energy')
plt.plot(grd1, plt1, color='brown', label=r'$\widetilde{\varepsilon}_t$')
# define and show x/y LABELS
if self.ig == 1:
setxlabel = r"x (cm)"
setylabel = r"$\widetilde{\varepsilon}_t$ (erg g$^{-1}$)"
plt.xlabel(setxlabel)
plt.ylabel(setylabel)
elif self.ig == 2:
setxlabel = r"r (cm)"
setylabel = r"$\widetilde{\varepsilon}_t$ (erg g$^{-1}$)"
plt.xlabel(setxlabel)
plt.ylabel(setylabel)
# show LEGEND
plt.legend(loc=ilg, prop={'size': 18})
# display PLOT
plt.show(block=False)
# save PLOT
if self.fext == 'png':
plt.savefig('RESULTS/' + self.data_prefix + 'mean_et.png')
elif self.fext == 'eps':
plt.savefig('RESULTS/' + self.data_prefix + 'mean_et.eps')
def plot_luminosity_equation_exact(self, LAXIS, xbl, xbr, ybu, ybd, ilg):
"""Plot luminosity equation in the model"""
# load x GRID
grd1 = self.xzn0
rhs0 = self.minus_cp_rho_dTdt
rhs1 = self.plus_delta_dPdt
rhs2 = self.minus_dd_div_eiui
rhs3 = self.plus_tke_diss
res = self.minus_resLumExactEquation
# create FIGURE
plt.figure(figsize=(7, 6))
# format AXIS, make sure it is exponential
plt.gca().yaxis.get_major_formatter().set_powerlimits((0, 0))
# set plot boundaries
to_plot = [rhs0, rhs1, res]
self.set_plt_axis(LAXIS, xbl, xbr, ybu, ybd, to_plot)
self.bconv = 4.e8
self.tconv = 1.2e9
xlimitrange = np.where((grd1 > self.bconv) & (grd1 < self.tconv))
xlimitbottom = np.where(grd1 < self.bconv)
xlimittop = np.where(grd1 > self.tconv)
# plot DATA
plt.title("standard luminosity equation exact")
if self.ig == 1:
plt.plot(grd1[xlimitrange], rhs0[xlimitrange], color='#FF8C00', label=r"$-c_P \overline{\rho \partial_t T}$")
plt.plot(grd1[xlimitrange], rhs1[xlimitrange], color='y',label = r"$+\delta \overline{\partial_t P}$")
plt.plot(grd1[xlimitrange], rhs2[xlimitrange], color='r',label = r"$-\overline{\rho \nabla \cdot \epsilon_I {\bf u}}$")
plt.plot(grd1[xlimitrange], rhs3[xlimitrange], color='g',label = r"$+\varepsilon_K$")
plt.plot(grd1, res, color='k', linestyle='--', label=r"res $\sim N$")
zeros = np.zeros(self.nx)
plt.plot(grd1, zeros, color='k', linewidth=0.6, label="zero")
elif self.ig == 2:
plt.plot(grd1[xlimitrange], rhs0[xlimitrange], color='#FF8C00', label=r"$-c_P \rho dT/dt$")
plt.plot(grd1[xlimitrange], rhs1[xlimitrange], color='y',label = r"$+\delta dP/dt$")
plt.plot(grd1, res, color='k', linestyle='--', label=r"res $\sim N$")
zeros = np.zeros(self.nx)
plt.plot(grd1, zeros, color='k', linewidth=0.6, label="zero")
# convective boundary markers
plt.axvline(self.bconv, linestyle='--', linewidth=0.7, color='k')
plt.axvline(self.tconv, linestyle='--', linewidth=0.7, color='k')
if self.ig == 1:
setxlabel = r"x (cm)"
setylabel = r"erg g$^{-1}$ s$^{-1}$"
plt.xlabel(setxlabel)
plt.ylabel(setylabel)
elif self.ig == 2:
setxlabel = r"r (cm)"
setylabel = r"erg g$^{-1}$ s$^{-1}$"
plt.xlabel(setxlabel)
plt.ylabel(setylabel)
# show LEGEND
plt.legend(loc=ilg, prop={'size': 10}, ncol = 2)
# display PLOT
plt.show(block=False)
# save PLOT
if self.fext == 'png':
plt.savefig('RESULTS/' + self.data_prefix + 'standard_luminosity_exact_eq.png')
elif self.fext == 'eps':
plt.savefig('RESULTS/' + self.data_prefix + 'standard_luminosity_exact_eq.eps')
| CANUTO1997/LuminosityEquation.py | 11,870 | Plot mean total energy stratification in the model
Plot luminosity equation in the model
Theoretical background https://arxiv.org/abs/1401.5176 Mocak, Meakin, Viallet, Arnett, 2014, Compressible Hydrodynamic Mean-Field Equations in Spherical Geometry and their Application to Turbulent Stellar Convection Data load data to structured array load grid pick equation-specific Reynolds-averaged mean fields according to: https://github.com/mmicromegas/ransX/blob/master/DOCS/ransXimplementationGuide.pdf gas constant in cgs override gamma for ideal gas eos (need to be fixed in PROMPI later) gamma1,gamma2,gamma3 = gamma = cp/cv Cox & Giuli 2nd Ed. page 230, Eq.9.110 print(gamma1) print("-----------") print((gamma1/(gamma1-1.))*gascon/abar) print("-----------") print(cp) HSSE LUMINOSITY EQUATION store time series for time derivatives t_mm = self.getRAdata(eht,'mm')) minus_dt_mm = -self.dt(t_mm,xzn0,t_timec,intc) fht_ux = minus_dt_mm/(4.*np.pi*(xzn0**2.)*dd) construct equation-specific mean fields fht_ek = 0.5*(dduxux + dduyuy + dduzuz)/dd Kippenhahn and Weigert, page 38 sphere surface STANDARD LUMINOSITY EQUATION EXACTself.minus_dd_div_eiui = -(self.Div(ddeiux, xzn0)) END STANDARD LUMINOSITY EQUATION EXACT assign global data to be shared across whole class load x GRID load DATA to plot create FIGURE format AXIS, make sure it is exponential set plot boundaries plot DATA define and show x/y LABELS show LEGEND display PLOT save PLOT load x GRID create FIGURE format AXIS, make sure it is exponential set plot boundaries plot DATA convective boundary markers show LEGEND display PLOT save PLOT | 1,624 | en | 0.68908 |
import numpy as np
class Graph():
""" The Graph to model the skeletons extracted by the openpose
Args:
strategy (string): must be one of the follow candidates
- uniform: Uniform Labeling
- distance: Distance Partitioning
- spatial: Spatial Configuration
For more information, please refer to the section 'Partition Strategies'
in our paper (https://arxiv.org/abs/1801.07455).
layout (string): must be one of the follow candidates
- openpose: Is consists of 18 joints. For more information, please
refer to https://github.com/CMU-Perceptual-Computing-Lab/openpose#output
- ntu-rgb+d: Is consists of 25 joints. For more information, please
refer to https://github.com/shahroudy/NTURGB-D
max_hop (int): the maximal distance between two connected nodes
dilation (int): controls the spacing between the kernel points
"""
def __init__(self,
layout='openpose',
strategy='uniform',
max_hop=1,
dilation=1):
self.max_hop = max_hop
self.dilation = dilation
self.get_edge(layout)
self.hop_dis = get_hop_distance(
self.num_node, self.edge, max_hop=max_hop)
self.get_adjacency(strategy)
def __str__(self):
return self.A
def get_edge(self, layout):
if layout == 'openpose':
self.num_node = 18
self_link = [(i, i) for i in range(self.num_node)]
neighbor_link = [(4, 3), (3, 2), (7, 6), (6, 5), (13, 12), (12,
11),
(10, 9), (9, 8), (11, 5), (8, 2), (5, 1), (2, 1),
(0, 1), (15, 0), (14, 0), (17, 15), (16, 14)]
self.edge = self_link + neighbor_link
self.center = 1
elif layout == 'ntu-rgb+d':
self.num_node = 25
self_link = [(i, i) for i in range(self.num_node)]
neighbor_1base = [(1, 2), (2, 21), (3, 21), (4, 3), (5, 21),
(6, 5), (7, 6), (8, 7), (9, 21), (10, 9),
(11, 10), (12, 11), (13, 1), (14, 13), (15, 14),
(16, 15), (17, 1), (18, 17), (19, 18), (20, 19),
(22, 23), (23, 8), (24, 25), (25, 12)]
neighbor_link = [(i - 1, j - 1) for (i, j) in neighbor_1base]
self.edge = self_link + neighbor_link
self.center = 21 - 1
elif layout == 'ntu_edge':
self.num_node = 24
self_link = [(i, i) for i in range(self.num_node)]
neighbor_1base = [(1, 2), (3, 2), (4, 3), (5, 2), (6, 5), (7, 6),
(8, 7), (9, 2), (10, 9), (11, 10), (12, 11),
(13, 1), (14, 13), (15, 14), (16, 15), (17, 1),
(18, 17), (19, 18), (20, 19), (21, 22), (22, 8),
(23, 24), (24, 12)]
neighbor_link = [(i - 1, j - 1) for (i, j) in neighbor_1base]
self.edge = self_link + neighbor_link
self.center = 2
# elif layout=='customer settings':
# pass
else:
raise ValueError("Do Not Exist This Layout.")
#计算邻接矩阵A
def get_adjacency(self, strategy):
valid_hop = range(0, self.max_hop + 1, self.dilation) #range(start,stop,step)
adjacency = np.zeros((self.num_node, self.num_node))
for hop in valid_hop:
adjacency[self.hop_dis == hop] = 1
normalize_adjacency = normalize_digraph(adjacency)
if strategy == 'uniform':
A = np.zeros((1, self.num_node, self.num_node))
A[0] = normalize_adjacency
self.A = A
elif strategy == 'distance':
A = np.zeros((len(valid_hop), self.num_node, self.num_node))
for i, hop in enumerate(valid_hop):
A[i][self.hop_dis == hop] = normalize_adjacency[self.hop_dis ==
hop]
self.A = A
elif strategy == 'spatial':
A = []
for hop in valid_hop:
a_root = np.zeros((self.num_node, self.num_node))
a_close = np.zeros((self.num_node, self.num_node))
a_further = np.zeros((self.num_node, self.num_node))
for i in range(self.num_node):
for j in range(self.num_node):
if self.hop_dis[j, i] == hop:
if self.hop_dis[j, self.center] == self.hop_dis[
i, self.center]:
a_root[j, i] = normalize_adjacency[j, i]
elif self.hop_dis[j, self.
center] > self.hop_dis[i, self.
center]:
a_close[j, i] = normalize_adjacency[j, i]
else:
a_further[j, i] = normalize_adjacency[j, i]
if hop == 0:
A.append(a_root)
else:
A.append(a_root + a_close)
A.append(a_further)
A = np.stack(A)
self.A = A
else:
raise ValueError("Do Not Exist This Strategy")
# 此函数的返回值hop_dis就是图的邻接矩阵
def get_hop_distance(num_node, edge, max_hop=1):
A = np.zeros((num_node, num_node))
for i, j in edge:
A[j, i] = 1
A[i, j] = 1
# compute hop steps
hop_dis = np.zeros((num_node, num_node)) + np.inf # np.inf 表示一个无穷大的正数
# np.linalg.matrix_power(A, d)求矩阵A的d幂次方,transfer_mat矩阵(I,A)是一个将A矩阵拼接max_hop+1次的矩阵
transfer_mat = [np.linalg.matrix_power(A, d) for d in range(max_hop + 1)]
# (np.stack(transfer_mat) > 0)矩阵中大于0的返回Ture,小于0的返回False,最终arrive_mat是一个布尔矩阵,大小与transfer_mat一样
arrive_mat = (np.stack(transfer_mat) > 0)
# range(start,stop,step) step=-1表示倒着取
for d in range(max_hop, -1, -1):
# 将arrive_mat[d]矩阵中为True的对应于hop_dis[]位置的数设置为d
hop_dis[arrive_mat[d]] = d
return hop_dis
# 将矩阵A中的每一列的各个元素分别除以此列元素的形成新的矩阵
def normalize_digraph(A):
Dl = np.sum(A, 0) #将矩阵A压缩成一行
num_node = A.shape[0]
Dn = np.zeros((num_node, num_node))
for i in range(num_node):
if Dl[i] > 0:
Dn[i, i] = Dl[i]**(-1)
AD = np.dot(A, Dn)
return AD
def normalize_undigraph(A):
Dl = np.sum(A, 0)
num_node = A.shape[0]
Dn = np.zeros((num_node, num_node))
for i in range(num_node):
if Dl[i] > 0:
Dn[i, i] = Dl[i]**(-0.5)
DAD = np.dot(np.dot(Dn, A), Dn)
return DAD | action/pose_based/net/utils/graph.py | 7,104 | The Graph to model the skeletons extracted by the openpose
Args:
strategy (string): must be one of the follow candidates
- uniform: Uniform Labeling
- distance: Distance Partitioning
- spatial: Spatial Configuration
For more information, please refer to the section 'Partition Strategies'
in our paper (https://arxiv.org/abs/1801.07455).
layout (string): must be one of the follow candidates
- openpose: Is consists of 18 joints. For more information, please
refer to https://github.com/CMU-Perceptual-Computing-Lab/openpose#output
- ntu-rgb+d: Is consists of 25 joints. For more information, please
refer to https://github.com/shahroudy/NTURGB-D
max_hop (int): the maximal distance between two connected nodes
dilation (int): controls the spacing between the kernel points
elif layout=='customer settings': pass计算邻接矩阵Arange(start,stop,step) 此函数的返回值hop_dis就是图的邻接矩阵 compute hop steps np.inf 表示一个无穷大的正数 np.linalg.matrix_power(A, d)求矩阵A的d幂次方,transfer_mat矩阵(I,A)是一个将A矩阵拼接max_hop+1次的矩阵 (np.stack(transfer_mat) > 0)矩阵中大于0的返回Ture,小于0的返回False,最终arrive_mat是一个布尔矩阵,大小与transfer_mat一样 range(start,stop,step) step=-1表示倒着取 将arrive_mat[d]矩阵中为True的对应于hop_dis[]位置的数设置为d 将矩阵A中的每一列的各个元素分别除以此列元素的形成新的矩阵将矩阵A压缩成一行 | 1,264 | en | 0.566456 |
from selenium.webdriver.support.select import Select
def get_selected_option(browser, css_selector):
# Takes a css selector for a <select> element and returns the value of
# the selected option
select = Select(browser.find_element_by_css_selector(css_selector))
return select.first_selected_option.get_attribute('value')
| tests/test_helpers/selenium_helper.py | 338 | Takes a css selector for a <select> element and returns the value of the selected option | 88 | en | 0.683958 |
import sys
import numpy as np
from contextlib import contextmanager
from qtpy.QtGui import QOpenGLBuffer
def setup_vertex_buffer(gl, data, shader, shader_variable):
'Setup a vertex buffer with `data` vertices as `shader_variable` on shader'
vbo = QOpenGLBuffer(QOpenGLBuffer.VertexBuffer)
vbo.create()
with bind(vbo):
vertices = np.array(data, np.float32)
count, dim_vertex = vertices.shape
vbo.allocate(vertices.flatten(), vertices.nbytes)
attr_loc = shader.attributeLocation(shader_variable)
shader.enableAttributeArray(attr_loc)
shader.setAttributeBuffer(attr_loc, gl.GL_FLOAT, 0, dim_vertex)
return vbo
def update_vertex_buffer(vbo, data):
'Update a vertex buffer with `data` vertices'
vertices = np.asarray(data, np.float32)
count, dim_vertex = vertices.shape
with bind(vbo):
vbo.allocate(vertices.flatten(), vertices.nbytes)
def copy_data_to_pbo(pbo, data, *, mapped_array=None):
'Allocate or update data stored in a pixel buffer object'
width, height = data.shape
with bind(pbo):
if pbo.isCreated() and mapped_array is not None:
mapped_array[:] = data.reshape((width, height))
return mapped_array
full_size = data.nbytes
pointer_type = np.ctypeslib.ndpointer(
dtype=data.dtype, shape=(width, height), ndim=data.ndim)
pbo.create()
with bind(pbo):
pbo.allocate(data, full_size)
ptr = pbo.map(QOpenGLBuffer.WriteOnly)
assert ptr is not None, 'Failed to map pixel buffer array'
pointer_type = np.ctypeslib.ndpointer(
dtype=data.dtype, shape=(width, height), ndim=data.ndim)
mapped_array = np.ctypeslib.as_array(pointer_type(int(ptr)))
pbo.unmap()
mapped_array[:] = data.reshape((width, height))
return mapped_array
def update_pbo_texture(gl, pbo, texture, *, array_data, texture_format,
source_format, source_type):
'Update a texture associated with a PBO'
width, height = array_data.shape[:2]
if source_format == gl.GL_RGB:
height //= 3
with bind(pbo, texture):
# AreaDetector arrays are not strided
gl.glPixelStorei(gl.GL_UNPACK_ALIGNMENT, 1)
# AreaDetector arrays are big endian - so let OpenGL take care of
# byteswapping if that doesn't match up with the system/array
# endianness
# gl.glPixelStorei(gl.GL_UNPACK_SWAP_BYTES,
# int(not array_data.dtype.isnative))
gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MIN_FILTER,
gl.GL_LINEAR)
gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MAG_FILTER,
gl.GL_LINEAR)
gl.glTexImage2D(gl.GL_TEXTURE_2D, 0, texture_format, width, height, 0,
source_format, source_type, None)
@contextmanager
def bind(*objs, args=None):
'Bind all objs (optionally with positional arguments); releases at cleanup'
if args is None:
args = (None for obj in objs)
for obj, arg in zip(objs, args):
if arg is not None:
obj.bind(arg)
else:
obj.bind()
yield
for obj in objs[::-1]:
obj.release()
| caimageviewer/gl_util.py | 3,275 | Bind all objs (optionally with positional arguments); releases at cleanup
Allocate or update data stored in a pixel buffer object
Setup a vertex buffer with `data` vertices as `shader_variable` on shader
Update a texture associated with a PBO
Update a vertex buffer with `data` vertices
AreaDetector arrays are not strided AreaDetector arrays are big endian - so let OpenGL take care of byteswapping if that doesn't match up with the system/array endianness gl.glPixelStorei(gl.GL_UNPACK_SWAP_BYTES, int(not array_data.dtype.isnative)) | 554 | en | 0.687178 |
import base64 as _base64
import hashlib as _hashlib
import http.server as _BaseHTTPServer
import os as _os
import re as _re
import urllib.parse as _urlparse
import webbrowser as _webbrowser
from http import HTTPStatus as _StatusCodes
from multiprocessing import get_context as _mp_get_context
from urllib.parse import urlencode as _urlencode
import keyring as _keyring
import requests as _requests
from flytekit.loggers import auth_logger
_code_verifier_length = 64
_random_seed_length = 40
_utf_8 = "utf-8"
# Identifies the service used for storing passwords in keyring
_keyring_service_name = "flyteauth"
# Identifies the key used for storing and fetching from keyring. In our case, instead of a username as the keyring docs
# suggest, we are storing a user's oidc.
_keyring_access_token_storage_key = "access_token"
_keyring_refresh_token_storage_key = "refresh_token"
def _generate_code_verifier():
"""
Generates a 'code_verifier' as described in https://tools.ietf.org/html/rfc7636#section-4.1
Adapted from https://github.com/openstack/deb-python-oauth2client/blob/master/oauth2client/_pkce.py.
:return str:
"""
code_verifier = _base64.urlsafe_b64encode(_os.urandom(_code_verifier_length)).decode(_utf_8)
# Eliminate invalid characters.
code_verifier = _re.sub(r"[^a-zA-Z0-9_\-.~]+", "", code_verifier)
if len(code_verifier) < 43:
raise ValueError("Verifier too short. number of bytes must be > 30.")
elif len(code_verifier) > 128:
raise ValueError("Verifier too long. number of bytes must be < 97.")
return code_verifier
def _generate_state_parameter():
state = _base64.urlsafe_b64encode(_os.urandom(_random_seed_length)).decode(_utf_8)
# Eliminate invalid characters.
code_verifier = _re.sub("[^a-zA-Z0-9-_.,]+", "", state)
return code_verifier
def _create_code_challenge(code_verifier):
"""
Adapted from https://github.com/openstack/deb-python-oauth2client/blob/master/oauth2client/_pkce.py.
:param str code_verifier: represents a code verifier generated by generate_code_verifier()
:return str: urlsafe base64-encoded sha256 hash digest
"""
code_challenge = _hashlib.sha256(code_verifier.encode(_utf_8)).digest()
code_challenge = _base64.urlsafe_b64encode(code_challenge).decode(_utf_8)
# Eliminate invalid characters
code_challenge = code_challenge.replace("=", "")
return code_challenge
class AuthorizationCode(object):
def __init__(self, code, state):
self._code = code
self._state = state
@property
def code(self):
return self._code
@property
def state(self):
return self._state
class OAuthCallbackHandler(_BaseHTTPServer.BaseHTTPRequestHandler):
"""
A simple wrapper around BaseHTTPServer.BaseHTTPRequestHandler that handles a callback URL that accepts an
authorization token.
"""
def do_GET(self):
url = _urlparse.urlparse(self.path)
if url.path.strip("/") == self.server.redirect_path.strip("/"):
self.send_response(_StatusCodes.OK)
self.end_headers()
self.handle_login(dict(_urlparse.parse_qsl(url.query)))
else:
self.send_response(_StatusCodes.NOT_FOUND)
def handle_login(self, data):
self.server.handle_authorization_code(AuthorizationCode(data["code"], data["state"]))
class OAuthHTTPServer(_BaseHTTPServer.HTTPServer):
"""
A simple wrapper around the BaseHTTPServer.HTTPServer implementation that binds an authorization_client for handling
authorization code callbacks.
"""
def __init__(
self,
server_address,
RequestHandlerClass,
bind_and_activate=True,
redirect_path=None,
queue=None,
):
_BaseHTTPServer.HTTPServer.__init__(self, server_address, RequestHandlerClass, bind_and_activate)
self._redirect_path = redirect_path
self._auth_code = None
self._queue = queue
@property
def redirect_path(self):
return self._redirect_path
def handle_authorization_code(self, auth_code):
self._queue.put(auth_code)
self.server_close()
def handle_request(self, queue=None):
self._queue = queue
return super().handle_request()
class Credentials(object):
def __init__(self, access_token=None):
self._access_token = access_token
@property
def access_token(self):
return self._access_token
class AuthorizationClient(object):
def __init__(
self,
auth_endpoint=None,
token_endpoint=None,
scopes=None,
client_id=None,
redirect_uri=None,
client_secret=None,
):
self._auth_endpoint = auth_endpoint
self._token_endpoint = token_endpoint
self._client_id = client_id
self._scopes = scopes
self._redirect_uri = redirect_uri
self._code_verifier = _generate_code_verifier()
code_challenge = _create_code_challenge(self._code_verifier)
self._code_challenge = code_challenge
state = _generate_state_parameter()
self._state = state
self._credentials = None
self._refresh_token = None
self._headers = {"content-type": "application/x-www-form-urlencoded"}
self._expired = False
self._client_secret = client_secret
self._params = {
"client_id": client_id, # This must match the Client ID of the OAuth application.
"response_type": "code", # Indicates the authorization code grant
"scope": " ".join(s.strip("' ") for s in scopes).strip(
"[]'"
), # ensures that the /token endpoint returns an ID and refresh token
# callback location where the user-agent will be directed to.
"redirect_uri": self._redirect_uri,
"state": state,
"code_challenge": code_challenge,
"code_challenge_method": "S256",
}
# Prefer to use already-fetched token values when they've been set globally.
self._refresh_token = _keyring.get_password(_keyring_service_name, _keyring_refresh_token_storage_key)
access_token = _keyring.get_password(_keyring_service_name, _keyring_access_token_storage_key)
if access_token:
self._credentials = Credentials(access_token=access_token)
def __repr__(self):
return f"AuthorizationClient({self._auth_endpoint}, {self._token_endpoint}, {self._client_id}, {self._scopes}, {self._redirect_uri})"
@property
def has_valid_credentials(self) -> bool:
return self._credentials is not None
@property
def can_refresh_token(self) -> bool:
return self._refresh_token is not None
def start_authorization_flow(self):
# In the absence of globally-set token values, initiate the token request flow
ctx = _mp_get_context("fork")
q = ctx.Queue()
# First prepare the callback server in the background
server = self._create_callback_server()
server_process = ctx.Process(target=server.handle_request, args=(q,))
server_process.daemon = True
server_process.start()
# Send the call to request the authorization code in the background
self._request_authorization_code()
# Request the access token once the auth code has been received.
auth_code = q.get()
server_process.terminate()
self.request_access_token(auth_code)
def _create_callback_server(self):
server_url = _urlparse.urlparse(self._redirect_uri)
server_address = (server_url.hostname, server_url.port)
return OAuthHTTPServer(server_address, OAuthCallbackHandler, redirect_path=server_url.path)
def _request_authorization_code(self):
scheme, netloc, path, _, _, _ = _urlparse.urlparse(self._auth_endpoint)
query = _urlencode(self._params)
endpoint = _urlparse.urlunparse((scheme, netloc, path, None, query, None))
auth_logger.debug(f"Requesting authorization code through {endpoint}")
_webbrowser.open_new_tab(endpoint)
def _initialize_credentials(self, auth_token_resp):
"""
The auth_token_resp body is of the form:
{
"access_token": "foo",
"refresh_token": "bar",
"token_type": "Bearer"
}
"""
response_body = auth_token_resp.json()
if "access_token" not in response_body:
raise ValueError('Expected "access_token" in response from oauth server')
if "refresh_token" in response_body:
self._refresh_token = response_body["refresh_token"]
access_token = response_body["access_token"]
refresh_token = response_body["refresh_token"]
_keyring.set_password(_keyring_service_name, _keyring_access_token_storage_key, access_token)
_keyring.set_password(_keyring_service_name, _keyring_refresh_token_storage_key, refresh_token)
self._credentials = Credentials(access_token=access_token)
def request_access_token(self, auth_code):
if self._state != auth_code.state:
raise ValueError(f"Unexpected state parameter [{auth_code.state}] passed")
self._params.update(
{
"code": auth_code.code,
"code_verifier": self._code_verifier,
"grant_type": "authorization_code",
}
)
resp = _requests.post(
url=self._token_endpoint,
data=self._params,
headers=self._headers,
allow_redirects=False,
)
if resp.status_code != _StatusCodes.OK:
# TODO: handle expected (?) error cases:
# https://auth0.com/docs/flows/guides/device-auth/call-api-device-auth#token-responses
raise Exception(
"Failed to request access token with response: [{}] {}".format(resp.status_code, resp.content)
)
self._initialize_credentials(resp)
def refresh_access_token(self):
if self._refresh_token is None:
raise ValueError("no refresh token available with which to refresh authorization credentials")
resp = _requests.post(
url=self._token_endpoint,
data={"grant_type": "refresh_token", "client_id": self._client_id, "refresh_token": self._refresh_token},
headers=self._headers,
allow_redirects=False,
)
if resp.status_code != _StatusCodes.OK:
self._expired = True
# In the absence of a successful response, assume the refresh token is expired. This should indicate
# to the caller that the AuthorizationClient is defunct and a new one needs to be re-initialized.
_keyring.delete_password(_keyring_service_name, _keyring_access_token_storage_key)
_keyring.delete_password(_keyring_service_name, _keyring_refresh_token_storage_key)
return
self._initialize_credentials(resp)
@property
def credentials(self):
"""
:return flytekit.clis.auth.auth.Credentials:
"""
return self._credentials
@property
def expired(self):
"""
:return bool:
"""
return self._expired
| flytekit/clis/auth/auth.py | 11,325 | A simple wrapper around BaseHTTPServer.BaseHTTPRequestHandler that handles a callback URL that accepts an
authorization token.
A simple wrapper around the BaseHTTPServer.HTTPServer implementation that binds an authorization_client for handling
authorization code callbacks.
Adapted from https://github.com/openstack/deb-python-oauth2client/blob/master/oauth2client/_pkce.py.
:param str code_verifier: represents a code verifier generated by generate_code_verifier()
:return str: urlsafe base64-encoded sha256 hash digest
Generates a 'code_verifier' as described in https://tools.ietf.org/html/rfc7636#section-4.1
Adapted from https://github.com/openstack/deb-python-oauth2client/blob/master/oauth2client/_pkce.py.
:return str:
The auth_token_resp body is of the form:
{
"access_token": "foo",
"refresh_token": "bar",
"token_type": "Bearer"
}
:return flytekit.clis.auth.auth.Credentials:
:return bool:
Identifies the service used for storing passwords in keyring Identifies the key used for storing and fetching from keyring. In our case, instead of a username as the keyring docs suggest, we are storing a user's oidc. Eliminate invalid characters. Eliminate invalid characters. Eliminate invalid characters This must match the Client ID of the OAuth application. Indicates the authorization code grant ensures that the /token endpoint returns an ID and refresh token callback location where the user-agent will be directed to. Prefer to use already-fetched token values when they've been set globally. In the absence of globally-set token values, initiate the token request flow First prepare the callback server in the background Send the call to request the authorization code in the background Request the access token once the auth code has been received. TODO: handle expected (?) error cases: https://auth0.com/docs/flows/guides/device-auth/call-api-device-authtoken-responses In the absence of a successful response, assume the refresh token is expired. This should indicate to the caller that the AuthorizationClient is defunct and a new one needs to be re-initialized. | 2,087 | en | 0.758998 |
# Copyright (c) 2012-2016 Seafile Ltd.
import logging
from rest_framework.authentication import SessionAuthentication
from rest_framework.permissions import IsAdminUser
from rest_framework.response import Response
from rest_framework.views import APIView
from rest_framework import status
from seaserv import seafile_api, ccnet_api
from seahub.group.utils import get_group_member_info, is_group_member
from seahub.group.signals import add_user_to_group
from seahub.avatar.settings import AVATAR_DEFAULT_SIZE
from seahub.base.accounts import User
from seahub.base.templatetags.seahub_tags import email2nickname
from seahub.api2.authentication import TokenAuthentication
from seahub.api2.throttling import UserRateThrottle
from seahub.api2.utils import api_error
logger = logging.getLogger(__name__)
class AdminGroupMembers(APIView):
authentication_classes = (TokenAuthentication, SessionAuthentication)
throttle_classes = (UserRateThrottle,)
permission_classes = (IsAdminUser,)
def get(self, request, group_id, format=None):
""" List all group members
Permission checking:
1. only admin can perform this action.
"""
if not request.user.admin_permissions.can_manage_group():
return api_error(status.HTTP_403_FORBIDDEN, 'Permission denied.')
group_id = int(group_id)
group = ccnet_api.get_group(group_id)
if not group:
error_msg = 'Group %d not found.' % group_id
return api_error(status.HTTP_404_NOT_FOUND, error_msg)
try:
avatar_size = int(request.GET.get('avatar_size',
AVATAR_DEFAULT_SIZE))
except ValueError:
avatar_size = AVATAR_DEFAULT_SIZE
try:
members = ccnet_api.get_group_members(group_id)
except Exception as e:
logger.error(e)
error_msg = 'Internal Server Error'
return api_error(status.HTTP_500_INTERNAL_SERVER_ERROR, error_msg)
group_members_info = []
for m in members:
member_info = get_group_member_info(request, group_id, m.user_name, avatar_size)
group_members_info.append(member_info)
group_members = {
'group_id': group_id,
'group_name': group.group_name,
'members': group_members_info
}
return Response(group_members)
def post(self, request, group_id):
"""
Bulk add group members.
Permission checking:
1. only admin can perform this action.
"""
if not request.user.admin_permissions.can_manage_group():
return api_error(status.HTTP_403_FORBIDDEN, 'Permission denied.')
# argument check
group_id = int(group_id)
group = ccnet_api.get_group(group_id)
if not group:
error_msg = 'Group %d not found.' % group_id
return api_error(status.HTTP_404_NOT_FOUND, error_msg)
emails = request.POST.getlist('email', '')
if not emails:
error_msg = 'Email invalid.'
return api_error(status.HTTP_400_BAD_REQUEST, error_msg)
result = {}
result['failed'] = []
result['success'] = []
emails_need_add = []
for email in emails:
try:
User.objects.get(email=email)
except User.DoesNotExist:
result['failed'].append({
'email': email,
'error_msg': 'User %s not found.' % email
})
continue
if is_group_member(group_id, email, in_structure=False):
result['failed'].append({
'email': email,
'error_msg': 'User %s is already a group member.' % email2nickname(email)
})
continue
emails_need_add.append(email)
# Add user to group.
for email in emails_need_add:
try:
ccnet_api.group_add_member(group_id, group.creator_name, email)
member_info = get_group_member_info(request, group_id, email)
result['success'].append(member_info)
except Exception as e:
logger.error(e)
result['failed'].append({
'email': email,
'error_msg': 'Internal Server Error'
})
add_user_to_group.send(sender=None,
group_staff=request.user.username,
group_id=group_id,
added_user=email)
return Response(result)
class AdminGroupMember(APIView):
authentication_classes = (TokenAuthentication, SessionAuthentication)
throttle_classes = (UserRateThrottle,)
permission_classes = (IsAdminUser,)
def put(self, request, group_id, email, format=None):
""" update role of a group member
Permission checking:
1. only admin can perform this action.
"""
if not request.user.admin_permissions.can_manage_group():
return api_error(status.HTTP_403_FORBIDDEN, 'Permission denied.')
# argument check
group_id = int(group_id)
group = ccnet_api.get_group(group_id)
if not group:
error_msg = 'Group %d not found.' % group_id
return api_error(status.HTTP_404_NOT_FOUND, error_msg)
try:
User.objects.get(email=email)
except User.DoesNotExist:
error_msg = 'User %s not found.' % email
return api_error(status.HTTP_404_NOT_FOUND, error_msg)
try:
if not is_group_member(group_id, email):
error_msg = 'Email %s invalid.' % email
return api_error(status.HTTP_400_BAD_REQUEST, error_msg)
except Exception as e:
logger.error(e)
error_msg = 'Internal Server Error'
return api_error(status.HTTP_500_INTERNAL_SERVER_ERROR, error_msg)
is_admin = request.data.get('is_admin', '')
try:
# set/unset a specific group member as admin
if is_admin.lower() == 'true':
ccnet_api.group_set_admin(group_id, email)
elif is_admin.lower() == 'false':
ccnet_api.group_unset_admin(group_id, email)
else:
error_msg = 'is_admin invalid.'
return api_error(status.HTTP_400_BAD_REQUEST, error_msg)
except Exception as e:
logger.error(e)
error_msg = 'Internal Server Error'
return api_error(status.HTTP_500_INTERNAL_SERVER_ERROR, error_msg)
member_info = get_group_member_info(request, group_id, email)
return Response(member_info)
def delete(self, request, group_id, email, format=None):
""" Delete an user from group
Permission checking:
1. only admin can perform this action.
"""
if not request.user.admin_permissions.can_manage_group():
return api_error(status.HTTP_403_FORBIDDEN, 'Permission denied.')
# argument check
group_id = int(group_id)
group = ccnet_api.get_group(group_id)
if not group:
error_msg = 'Group %d not found.' % group_id
return api_error(status.HTTP_404_NOT_FOUND, error_msg)
# delete member from group
try:
if not is_group_member(group_id, email):
return Response({'success': True})
except Exception as e:
logger.error(e)
error_msg = 'Internal Server Error'
return api_error(status.HTTP_500_INTERNAL_SERVER_ERROR, error_msg)
if group.creator_name == email:
error_msg = '%s is group owner, can not be removed.' % email
return api_error(status.HTTP_403_FORBIDDEN, error_msg)
try:
ccnet_api.group_remove_member(group_id, group.creator_name, email)
# remove repo-group share info of all 'email' owned repos
seafile_api.remove_group_repos_by_owner(group_id, email)
except Exception as e:
logger.error(e)
error_msg = 'Internal Server Error'
return api_error(status.HTTP_500_INTERNAL_SERVER_ERROR, error_msg)
return Response({'success': True})
| seahub/api2/endpoints/admin/group_members.py | 8,391 | Delete an user from group
Permission checking:
1. only admin can perform this action.
List all group members
Permission checking:
1. only admin can perform this action.
Bulk add group members.
Permission checking:
1. only admin can perform this action.
update role of a group member
Permission checking:
1. only admin can perform this action.
Copyright (c) 2012-2016 Seafile Ltd. argument check Add user to group. argument check set/unset a specific group member as admin argument check delete member from group remove repo-group share info of all 'email' owned repos | 573 | en | 0.874384 |
# Copyright 2013 Rackspace Hosting
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from django.conf import settings
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon.utils import memoized
from horizon import workflows
from openstack_dashboard import api as dash_api
from openstack_dashboard.contrib.trove import api
from openstack_dashboard.dashboards.project.instances \
import utils as instance_utils
LOG = logging.getLogger(__name__)
class SetInstanceDetailsAction(workflows.Action):
name = forms.CharField(max_length=80, label=_("Instance Name"))
flavor = forms.ChoiceField(label=_("Flavor"),
help_text=_("Size of image to launch."))
volume = forms.IntegerField(label=_("Volume Size"),
min_value=0,
initial=1,
help_text=_("Size of the volume in GB."))
datastore = forms.ChoiceField(label=_("Datastore"),
help_text=_(
"Type and version of datastore."))
class Meta(object):
name = _("Details")
help_text_template = "project/databases/_launch_details_help.html"
def clean(self):
if self.data.get("datastore", None) == "select_datastore_type_version":
msg = _("You must select a datastore type and version.")
self._errors["datastore"] = self.error_class([msg])
return self.cleaned_data
@memoized.memoized_method
def flavors(self, request):
try:
return api.trove.flavor_list(request)
except Exception:
LOG.exception("Exception while obtaining flavors list")
redirect = reverse("horizon:project:databases:index")
exceptions.handle(request,
_('Unable to obtain flavors.'),
redirect=redirect)
def populate_flavor_choices(self, request, context):
flavors = self.flavors(request)
if flavors:
return instance_utils.sort_flavor_list(request, flavors)
return []
@memoized.memoized_method
def datastores(self, request):
try:
return api.trove.datastore_list(request)
except Exception:
LOG.exception("Exception while obtaining datastores list")
self._datastores = []
@memoized.memoized_method
def datastore_versions(self, request, datastore):
try:
return api.trove.datastore_version_list(request, datastore)
except Exception:
LOG.exception("Exception while obtaining datastore version list")
self._datastore_versions = []
def populate_datastore_choices(self, request, context):
choices = ()
set_initial = False
datastores = self.datastores(request)
if datastores is not None:
num_datastores_with_one_version = 0
for ds in datastores:
versions = self.datastore_versions(request, ds.name)
if not set_initial:
if len(versions) >= 2:
set_initial = True
elif len(versions) == 1:
num_datastores_with_one_version += 1
if num_datastores_with_one_version > 1:
set_initial = True
if versions:
# only add to choices if datastore has at least one version
version_choices = ()
for v in versions:
version_choices = (version_choices +
((ds.name + ',' + v.name, v.name),))
datastore_choices = (ds.name, version_choices)
choices = choices + (datastore_choices,)
if set_initial:
# prepend choice to force user to choose
initial = (('select_datastore_type_version',
_('Select datastore type and version')))
choices = (initial,) + choices
return choices
TROVE_ADD_USER_PERMS = getattr(settings, 'TROVE_ADD_USER_PERMS', [])
TROVE_ADD_DATABASE_PERMS = getattr(settings, 'TROVE_ADD_DATABASE_PERMS', [])
TROVE_ADD_PERMS = TROVE_ADD_USER_PERMS + TROVE_ADD_DATABASE_PERMS
class SetInstanceDetails(workflows.Step):
action_class = SetInstanceDetailsAction
contributes = ("name", "volume", "flavor", "datastore")
class SetNetworkAction(workflows.Action):
network = forms.MultipleChoiceField(label=_("Networks"),
widget=forms.CheckboxSelectMultiple(),
error_messages={
'required': _(
"At least one network must"
" be specified.")},
help_text=_("Launch instance with"
" these networks"))
def __init__(self, request, *args, **kwargs):
super(SetNetworkAction, self).__init__(request, *args, **kwargs)
network_list = self.fields["network"].choices
if len(network_list) == 1:
self.fields['network'].initial = [network_list[0][0]]
class Meta(object):
name = _("Networking")
permissions = ('openstack.services.network',)
help_text = _("Select networks for your instance.")
def populate_network_choices(self, request, context):
try:
tenant_id = self.request.user.tenant_id
networks = dash_api.neutron.network_list_for_tenant(request,
tenant_id)
network_list = [(network.id, network.name_or_id)
for network in networks]
except Exception:
network_list = []
exceptions.handle(request,
_('Unable to retrieve networks.'))
return network_list
class SetNetwork(workflows.Step):
action_class = SetNetworkAction
template_name = "project/databases/_launch_networks.html"
contributes = ("network_id",)
def contribute(self, data, context):
if data:
networks = self.workflow.request.POST.getlist("network")
# If no networks are explicitly specified, network list
# contains an empty string, so remove it.
networks = [n for n in networks if n != '']
if networks:
context['network_id'] = networks
return context
class AddDatabasesAction(workflows.Action):
"""Initialize the database with users/databases. This tab will honor
the settings which should be a list of permissions required:
* TROVE_ADD_USER_PERMS = []
* TROVE_ADD_DATABASE_PERMS = []
"""
databases = forms.CharField(label=_('Initial Databases'),
required=False,
help_text=_('Comma separated list of '
'databases to create'))
user = forms.CharField(label=_('Initial Admin User'),
required=False,
help_text=_("Initial admin user to add"))
password = forms.CharField(widget=forms.PasswordInput(),
label=_("Password"),
required=False)
host = forms.CharField(label=_("Allowed Host (optional)"),
required=False,
help_text=_("Host or IP that the user is allowed "
"to connect through."))
class Meta(object):
name = _("Initialize Databases")
permissions = TROVE_ADD_PERMS
help_text_template = "project/databases/_launch_initialize_help.html"
def clean(self):
cleaned_data = super(AddDatabasesAction, self).clean()
if cleaned_data.get('user'):
if not cleaned_data.get('password'):
msg = _('You must specify a password if you create a user.')
self._errors["password"] = self.error_class([msg])
if not cleaned_data.get('databases'):
msg = _('You must specify at least one database if '
'you create a user.')
self._errors["databases"] = self.error_class([msg])
return cleaned_data
class InitializeDatabase(workflows.Step):
action_class = AddDatabasesAction
contributes = ["databases", 'user', 'password', 'host']
class AdvancedAction(workflows.Action):
initial_state = forms.ChoiceField(
label=_('Source for Initial State'),
required=False,
help_text=_("Choose initial state."),
choices=[
('', _('None')),
('backup', _('Restore from Backup')),
('master', _('Replicate from Instance'))],
widget=forms.Select(attrs={
'class': 'switchable',
'data-slug': 'initial_state'
}))
backup = forms.ChoiceField(
label=_('Backup Name'),
required=False,
help_text=_('Select a backup to restore'),
widget=forms.Select(attrs={
'class': 'switched',
'data-switch-on': 'initial_state',
'data-initial_state-backup': _('Backup Name')
}))
master = forms.ChoiceField(
label=_('Master Instance Name'),
required=False,
help_text=_('Select a master instance'),
widget=forms.Select(attrs={
'class': 'switched',
'data-switch-on': 'initial_state',
'data-initial_state-master': _('Master Instance Name')
}))
class Meta(object):
name = _("Advanced")
help_text_template = "project/databases/_launch_advanced_help.html"
def populate_backup_choices(self, request, context):
try:
backups = api.trove.backup_list(request)
choices = [(b.id, b.name) for b in backups
if b.status == 'COMPLETED']
except Exception:
choices = []
if choices:
choices.insert(0, ("", _("Select backup")))
else:
choices.insert(0, ("", _("No backups available")))
return choices
def populate_master_choices(self, request, context):
try:
instances = api.trove.instance_list(request)
choices = [(i.id, i.name) for i in
instances if i.status == 'ACTIVE']
except Exception:
choices = []
if choices:
choices.insert(0, ("", _("Select instance")))
else:
choices.insert(0, ("", _("No instances available")))
return choices
def clean(self):
cleaned_data = super(AdvancedAction, self).clean()
initial_state = cleaned_data.get("initial_state")
if initial_state == 'backup':
backup = self.cleaned_data['backup']
if backup:
try:
bkup = api.trove.backup_get(self.request, backup)
self.cleaned_data['backup'] = bkup.id
except Exception:
raise forms.ValidationError(_("Unable to find backup!"))
else:
raise forms.ValidationError(_("A backup must be selected!"))
cleaned_data['master'] = None
elif initial_state == 'master':
master = self.cleaned_data['master']
if master:
try:
api.trove.instance_get(self.request, master)
except Exception:
raise forms.ValidationError(
_("Unable to find master instance!"))
else:
raise forms.ValidationError(
_("A master instance must be selected!"))
cleaned_data['backup'] = None
else:
cleaned_data['master'] = None
cleaned_data['backup'] = None
return cleaned_data
class Advanced(workflows.Step):
action_class = AdvancedAction
contributes = ['backup', 'master']
class LaunchInstance(workflows.Workflow):
slug = "launch_instance"
name = _("Launch Instance")
finalize_button_name = _("Launch")
success_message = _('Launched %(count)s named "%(name)s".')
failure_message = _('Unable to launch %(count)s named "%(name)s".')
success_url = "horizon:project:databases:index"
default_steps = (SetInstanceDetails,
SetNetwork,
InitializeDatabase,
Advanced)
def __init__(self, request=None, context_seed=None, entry_point=None,
*args, **kwargs):
super(LaunchInstance, self).__init__(request, context_seed,
entry_point, *args, **kwargs)
self.attrs['autocomplete'] = (
settings.HORIZON_CONFIG.get('password_autocomplete'))
def format_status_message(self, message):
name = self.context.get('name', 'unknown instance')
return message % {"count": _("instance"), "name": name}
def _get_databases(self, context):
"""Returns the initial databases for this instance."""
databases = None
if context.get('databases'):
dbs = context['databases']
databases = [{'name': d.strip()} for d in dbs.split(',')]
return databases
def _get_users(self, context):
users = None
if context.get('user'):
user = {
'name': context['user'],
'password': context['password'],
'databases': self._get_databases(context),
}
if context['host']:
user['host'] = context['host']
users = [user]
return users
def _get_backup(self, context):
backup = None
if context.get('backup'):
backup = {'backupRef': context['backup']}
return backup
def _get_nics(self, context):
netids = context.get('network_id', None)
if netids:
return [{"net-id": netid, "v4-fixed-ip": ""}
for netid in netids]
else:
return None
def handle(self, request, context):
try:
datastore = self.context['datastore'].split(',')[0]
datastore_version = self.context['datastore'].split(',')[1]
LOG.info("Launching database instance with parameters "
"{name=%s, volume=%s, flavor=%s, "
"datastore=%s, datastore_version=%s, "
"dbs=%s, users=%s, "
"backups=%s, nics=%s, replica_of=%s}",
context['name'], context['volume'], context['flavor'],
datastore, datastore_version,
self._get_databases(context), self._get_users(context),
self._get_backup(context), self._get_nics(context),
context.get('master'))
api.trove.instance_create(request,
context['name'],
context['volume'],
context['flavor'],
datastore=datastore,
datastore_version=datastore_version,
databases=self._get_databases(context),
users=self._get_users(context),
restore_point=self._get_backup(context),
nics=self._get_nics(context),
replica_of=context.get('master'))
return True
except Exception:
exceptions.handle(request)
return False
| openstack_dashboard/contrib/trove/content/databases/workflows/create_instance.py | 16,647 | Initialize the database with users/databases. This tab will honor
the settings which should be a list of permissions required:
* TROVE_ADD_USER_PERMS = []
* TROVE_ADD_DATABASE_PERMS = []
Returns the initial databases for this instance.
Copyright 2013 Rackspace Hosting Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. only add to choices if datastore has at least one version prepend choice to force user to choose If no networks are explicitly specified, network list contains an empty string, so remove it. | 1,012 | en | 0.817702 |
from tkinter import *
from time import *
## 전역 변수 선언 부분 ##
fnameList = ["jeju1.gif", "jeju2.gif", "jeju3.gif", "jeju4.gif", "jeju5.gif", "jeju6.gif", "jeju7.gif", "jeju8.gif", "jeju9.gif", "jeju10.gif"]
photoList = [None] * 9
num1,num2,num3 = 0,1,2
## 함수 선언 부분 ##
def clickNext() :
global num1,num2,num3
num1 += 1
num2 += 1
num3 += 1
if num1 > 9 :
num1 = 0
if num2 > 9 :
num2 = 0
if num3 > 9 :
num3 = 0
photo = PhotoImage(file="gif/" + fnameList[num1])
photo = photo.subsample(2, 2)
photo1 = PhotoImage(file="gif/" + fnameList[num2])
photo1 = photo1.subsample(4, 4)
photo2 = PhotoImage(file="gif/" + fnameList[num3])
photo2 = photo2.subsample(4, 4)
pLabel.configure(image = photo)
pLabel.image=photo
pLabel1.configure(image = photo1)
pLabel1.image=photo1
pLabel2.configure(image = photo2)
pLabel2.image=photo2
def clickPrev() :
global num1,num2,num3
num1 -= 1
num2 -= 1
num3 -= 1
if num1 < 0 :
num1 = 9
if num2 < 0 :
num2 = 9
if num3 < 0 :
num3 = 9
photo = PhotoImage(file="gif/" + fnameList[num1])
photo = photo.subsample(2, 2)
photo1 = PhotoImage(file="gif/" + fnameList[num2])
photo1 = photo1.subsample(4, 4)
photo2 = PhotoImage(file="gif/" + fnameList[num3])
photo2 = photo2.subsample(4, 4)
pLabel.configure(image = photo)
pLabel.image=photo
pLabel1.configure(image = photo1)
pLabel1.image=photo1
pLabel2.configure(image = photo2)
pLabel2.image=photo2
def clickFirst():
global num1,num2,num3
num1,num2,num3 = 0, 9 , 1
photo = PhotoImage(file="gif/" + fnameList[num1])
photo = photo.subsample(2, 2)
photo1 = PhotoImage(file="gif/" + fnameList[num2])
photo1 = photo1.subsample(4, 4)
photo2 = PhotoImage(file="gif/" + fnameList[num3])
photo2 = photo2.subsample(4, 4)
pLabel.configure(image=photo)
pLabel.image = photo
pLabel1.configure(image=photo1)
pLabel1.image = photo1
pLabel2.configure(image=photo2)
pLabel2.image = photo2
def clickEnd() :
global num1,num2,num3
num1,num2,num3 = 9, 8 ,0
photo = PhotoImage(file="gif/" + fnameList[num1])
photo = photo.subsample(2, 2)
photo1 = PhotoImage(file="gif/" + fnameList[num2])
photo1 = photo1.subsample(4, 4)
photo2 = PhotoImage(file="gif/" + fnameList[num3])
photo2 = photo2.subsample(4, 4)
pLabel.configure(image = photo)
pLabel.image=photo
pLabel1.configure(image = photo1)
pLabel1.image=photo1
pLabel2.configure(image = photo2)
pLabel2.image=photo2
## 메인 코드 부분
window = Tk()
window.geometry("730x330")
window.title("사진 앨범 보기")
window.configure(background="white")
btnPrev = Button(window, text = "<< 이전", command = clickPrev, width = 10, background="skyblue")
btnNext = Button(window, text = "다음 >>", command = clickNext, width = 10, background="skyblue")
btnFirst = Button(window, text = "처 음", command = clickFirst, width = 10, background="skyblue")
btnEnd = Button(window, text = "마지막", command = clickEnd, width = 10, background="skyblue")
photo = PhotoImage(file = "gif/" + fnameList[0])
photo = photo.subsample(2,2)
pLabel = Label(window, image = photo)
photo1 = PhotoImage(file = "gif/" + fnameList[9])
photo1 = photo1.subsample(4,4)
pLabel1 = Label(window, image = photo1)
photo2 = PhotoImage(file = "gif/" + fnameList[1])
photo2 = photo2.subsample(4,4)
pLabel2 = Label(window, image = photo2)
btnPrev.place(x = 280, y = 270)
btnNext.place(x = 380, y = 270)
btnFirst.place(x = 180, y = 270)
btnEnd.place(x = 480, y = 270)
pLabel1.place(x = 20, y = 50)
pLabel.place(x = 200, y = 10)
pLabel2.place(x = 545, y = 50)
window.mainloop()
| 4-1/WIndow Programing/20180502/p1.py | 3,809 | 전역 변수 선언 부분 함수 선언 부분 메인 코드 부분 | 33 | ko | 1.00007 |
# -*- coding: utf-8 -*-
# FOGLAMP_BEGIN
# See: http://foglamp.readthedocs.io/
# FOGLAMP_END
import pytest
import asyncio
from unittest.mock import patch, call, MagicMock
from foglamp.common import logger
from foglamp.common.storage_client.storage_client import ReadingsStorageClient, StorageClient
from foglamp.common.statistics import Statistics
from foglamp.tasks.purge.purge import Purge
from foglamp.common.process import FoglampProcess
from foglamp.common.configuration_manager import ConfigurationManager
from foglamp.common.audit_logger import AuditLogger
from foglamp.common.storage_client.exceptions import *
__author__ = "Vaibhav Singhal"
__copyright__ = "Copyright (c) 2017 OSIsoft, LLC"
__license__ = "Apache 2.0"
__version__ = "${VERSION}"
@pytest.allure.feature("unit")
@pytest.allure.story("tasks", "purge")
class TestPurge:
"""Test the units of purge.py"""
def test_init(self, event_loop):
"""Test that creating an instance of Purge calls init of FoglampProcess and creates loggers"""
mockStorageClient = MagicMock(spec=StorageClient)
mockAuditLogger = AuditLogger(mockStorageClient)
with patch.object(FoglampProcess, "__init__") as mock_process:
with patch.object(logger, "setup") as log:
with patch.object(mockAuditLogger, "__init__", return_value=None):
p = Purge(loop=event_loop)
assert isinstance(p, Purge)
assert isinstance(p._audit, AuditLogger)
log.assert_called_once_with("Data Purge")
mock_process.assert_called_once_with()
def test_write_statistics(self, event_loop):
"""Test that write_statistics calls update statistics with defined keys and value increments"""
@asyncio.coroutine
def mock_s_update():
return ""
mockStorageClient = MagicMock(spec=StorageClient)
mockAuditLogger = AuditLogger(mockStorageClient)
with patch.object(FoglampProcess, '__init__'):
with patch.object(Statistics, 'update', return_value=mock_s_update()) as mock_stats_update:
with patch.object(mockAuditLogger, "__init__", return_value=None):
p = Purge(loop=event_loop)
p._storage = mockStorageClient
p.write_statistics(1, 2)
mock_stats_update.assert_has_calls([call('PURGED', 1), call('UNSNPURGED', 2)])
def test_set_configuration(self, event_loop):
"""Test that purge's set_configuration returns configuration item with key 'PURGE_READ' """
@asyncio.coroutine
def mock_cm_return():
return ""
mockStorageClient = MagicMock(spec=StorageClient)
mockAuditLogger = AuditLogger(mockStorageClient)
with patch.object(FoglampProcess, '__init__'):
with patch.object(mockAuditLogger, "__init__", return_value=None):
p = Purge(loop=event_loop)
p._storage = MagicMock(spec=StorageClient)
mock_cm = ConfigurationManager(p._storage)
with patch.object(mock_cm, 'create_category', return_value=mock_cm_return()) as mock_create_cat:
with patch.object(mock_cm, 'get_category_all_items', return_value=mock_cm_return()) \
as mock_get_cat:
p.set_configuration()
mock_get_cat.assert_called_once_with('PURGE_READ')
args, kwargs = mock_create_cat.call_args
assert len(args) == 3
assert args[0] == 'PURGE_READ'
@pytest.fixture()
def store_purge(self, **kwargs):
if kwargs.get('age') == '-1' or kwargs.get('size') == '-1':
raise StorageServerError(400, "Bla", "Some Error")
return {"readings": 10, "removed": 1, "unsentPurged": 2, "unsentRetained": 7}
config = {"purgeAgeSize": {"retainUnsent": {"value": "False"}, "age": {"value": "72"}, "size": {"value": "20"}},
"purgeAge": {"retainUnsent": {"value": "False"}, "age": {"value": "72"}, "size": {"value": "0"}},
"purgeSize": {"retainUnsent": {"value": "False"}, "age": {"value": "0"}, "size": {"value": "100"}},
"retainAgeSize": {"retainUnsent": {"value": "True"}, "age": {"value": "72"}, "size": {"value": "20"}},
"retainAge": {"retainUnsent": {"value": "True"}, "age": {"value": "72"}, "size": {"value": "0"}},
"retainSize": {"retainUnsent": {"value": "True"}, "age": {"value": "0"}, "size": {"value": "100"}}}
@pytest.mark.parametrize("conf, expected_return, expected_calls", [
(config["purgeAgeSize"], (2, 4), {'sent_id': 0, 'size': '20', 'flag': 'purge'}),
(config["purgeAge"], (1, 2), {'sent_id': 0, 'age': '72', 'flag': 'purge'}),
(config["purgeSize"], (1, 2), {'sent_id': 0, 'size': '100', 'flag': 'purge'}),
(config["retainAgeSize"], (2, 4), {'sent_id': 0, 'size': '20', 'flag': 'retain'}),
(config["retainAge"], (1, 2), {'sent_id': 0, 'age': '72', 'flag': 'retain'}),
(config["retainSize"], (1, 2), {'sent_id': 0, 'size': '100', 'flag': 'retain'})
])
def test_purge_data(self, event_loop, conf, expected_return, expected_calls):
"""Test that purge_data calls Storage's purge with defined configuration"""
@asyncio.coroutine
def mock_audit_info():
return ""
mockStorageClient = MagicMock(spec=StorageClient)
mockAuditLogger = AuditLogger(mockStorageClient)
with patch.object(FoglampProcess, '__init__'):
with patch.object(mockAuditLogger, "__init__", return_value=None):
p = Purge(loop=event_loop)
p._logger = logger
p._logger.info = MagicMock()
p._logger.error = MagicMock()
p._storage = MagicMock(spec=StorageClient)
p._readings_storage = MagicMock(spec=ReadingsStorageClient)
audit = p._audit
with patch.object(p._readings_storage, 'purge', side_effect=self.store_purge) as mock_storage_purge:
with patch.object(audit, 'information', return_value=mock_audit_info()) as audit_info:
# Test the positive case when all if conditions in purge_data pass
assert expected_return == p.purge_data(conf)
assert audit_info.called
args, kwargs = mock_storage_purge.call_args
assert kwargs == expected_calls
@pytest.mark.parametrize("conf, expected_return", [
({"retainUnsent": {"value": "False"}, "age": {"value": "0"}, "size": {"value": "0"}}, (0, 0)),
({"retainUnsent": {"value": "True"}, "age": {"value": "0"}, "size": {"value": "0"}}, (0, 0))
])
def test_purge_data_no_data_purged(self, event_loop, conf, expected_return):
"""Test that purge_data logs message when no data was purged"""
@asyncio.coroutine
def mock_audit_info():
return ""
mockStorageClient = MagicMock(spec=StorageClient)
mockAuditLogger = AuditLogger(mockStorageClient)
with patch.object(FoglampProcess, '__init__'):
with patch.object(mockAuditLogger, "__init__", return_value=None):
p = Purge(loop=event_loop)
p._logger = logger
p._logger.info = MagicMock()
p._logger.error = MagicMock()
p._storage = MagicMock(spec=StorageClient)
p._readings_storage = MagicMock(spec=ReadingsStorageClient)
audit = p._audit
with patch.object(p._readings_storage, 'purge', side_effect=self.store_purge):
with patch.object(audit, 'information', return_value=mock_audit_info()):
assert expected_return == p.purge_data(conf)
p._logger.info.assert_called_once_with("No rows purged")
@pytest.mark.parametrize("conf, expected_return", [
({"retainUnsent": {"value": "True"}, "age": {"value": "-1"}, "size": {"value": "-1"}}, (0, 0))
])
def test_purge_error_storage_response(self, event_loop, conf, expected_return):
"""Test that purge_data logs error when storage purge returns an error response"""
@asyncio.coroutine
def mock_audit_info():
return ""
mockStorageClient = MagicMock(spec=StorageClient)
mockAuditLogger = AuditLogger(mockStorageClient)
with patch.object(FoglampProcess, '__init__'):
with patch.object(mockAuditLogger, "__init__", return_value=None):
p = Purge(loop=event_loop)
p._logger = logger
p._logger.info = MagicMock()
p._logger.error = MagicMock()
p._storage = MagicMock(spec=StorageClient)
p._readings_storage = MagicMock(spec=ReadingsStorageClient)
audit = p._audit
with patch.object(p._readings_storage, 'purge', side_effect=self.store_purge):
with patch.object(audit, 'information', return_value=mock_audit_info()):
assert expected_return == p.purge_data(conf)
@pytest.mark.parametrize("conf, expected_error_key",
[({"retainUnsent": {"value": "True"}, "age": {"value": "bla"}, "size": {"value": "0"}},
"age"),
({"retainUnsent": {"value": "True"}, "age": {"value": "0"}, "size": {"value": "bla"}},
"size")])
def test_purge_data_invalid_conf(self, event_loop, conf, expected_error_key):
"""Test that purge_data raises exception when called with invalid configuration"""
@asyncio.coroutine
def mock_audit_info():
return ""
mockStorageClient = MagicMock(spec=StorageClient)
mockAuditLogger = AuditLogger(mockStorageClient)
with patch.object(FoglampProcess, '__init__'):
with patch.object(mockAuditLogger, "__init__", return_value=None):
p = Purge(loop=event_loop)
p._logger = logger
p._logger.info = MagicMock()
p._logger.error = MagicMock()
p._storage = MagicMock(spec=StorageClient)
p._readings_storage = MagicMock(spec=ReadingsStorageClient)
audit = p._audit
with patch.object(p._readings_storage, 'purge', side_effect=self.store_purge):
with patch.object(audit, 'information', return_value=mock_audit_info()):
# Test the code block when purge failed because of invalid configuration
p.purge_data(conf)
p._logger.error.assert_called_with('Configuration item {} bla should be integer!'.
format(expected_error_key))
def test_run(self, event_loop):
"""Test that run calls all units of purge process"""
@asyncio.coroutine
def mock_audit_info():
return ""
mockStorageClient = MagicMock(spec=StorageClient)
mockAuditLogger = AuditLogger(mockStorageClient)
with patch.object(FoglampProcess, '__init__'):
with patch.object(mockAuditLogger, "__init__", return_value=None):
p = Purge(loop=event_loop)
config = "Some config"
p._logger.exception = MagicMock()
with patch.object(p, 'set_configuration', return_value=config) as mock_set_config:
with patch.object(p, 'purge_data', return_value=(1, 2)) as mock_purge_data:
with patch.object(p, 'write_statistics') as mock_write_stats:
p.run()
# Test the positive case when no error in try block
mock_set_config.assert_called_once_with()
mock_purge_data.assert_called_once_with(config)
mock_write_stats.assert_called_once_with(1, 2)
def test_run_exception(self, event_loop):
"""Test that run calls all units of purge process and checks the exception handling"""
@asyncio.coroutine
def mock_audit_info():
return ""
mockStorageClient = MagicMock(spec=StorageClient)
mockAuditLogger = AuditLogger(mockStorageClient)
with patch.object(FoglampProcess, '__init__'):
with patch.object(mockAuditLogger, "__init__", return_value=None):
p = Purge(loop=event_loop)
config = "Some config"
p._logger.exception = MagicMock()
with patch.object(p, 'set_configuration', return_value=config):
with patch.object(p, 'purge_data', return_value=Exception()):
with patch.object(p, 'write_statistics'):
p.run()
# Test the negative case when function purge_data raise some exception
p._logger.exception.assert_called_once_with("'Exception' object is not iterable")
| tests/unit/python/foglamp/tasks/purge/test_purge.py | 13,189 | Test the units of purge.py
Test that creating an instance of Purge calls init of FoglampProcess and creates loggers
Test that purge_data calls Storage's purge with defined configuration
Test that purge_data raises exception when called with invalid configuration
Test that purge_data logs message when no data was purged
Test that purge_data logs error when storage purge returns an error response
Test that run calls all units of purge process
Test that run calls all units of purge process and checks the exception handling
Test that purge's set_configuration returns configuration item with key 'PURGE_READ'
Test that write_statistics calls update statistics with defined keys and value increments
-*- coding: utf-8 -*- FOGLAMP_BEGIN See: http://foglamp.readthedocs.io/ FOGLAMP_END Test the positive case when all if conditions in purge_data pass Test the code block when purge failed because of invalid configuration Test the positive case when no error in try block Test the negative case when function purge_data raise some exception | 1,042 | en | 0.720903 |
''' written by Emanuel Ramirez (emanuel2718@gmail.com) '''
class LanguageFlagNotFound(Exception):
pass
class AlgorithmFlagNotFound(Exception):
pass
| algocli/errors.py | 165 | written by Emanuel Ramirez (emanuel2718@gmail.com) | 50 | en | 0.727127 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Update handling."""
from __future__ import print_function, unicode_literals, absolute_import
import re, time, os, threading, zipfile, tarfile
try: # Python 2
# pylint:disable=import-error, no-name-in-module
from urllib import quote, unquote
from urlparse import urlparse
except ImportError: # Python 3
# pylint:disable=import-error, no-name-in-module,ungrouped-imports
from urllib.parse import quote, unquote, urlparse
from .lnp import lnp
from . import launcher, paths, download, log
from .json_config import JSONConfiguration
def updates_configured():
"""Returns True if update checking have been configured."""
return prepare_updater() is not None
def check_update():
"""Checks for updates using the URL specified in PyLNP.json."""
if not updates_configured():
return
if not lnp.userconfig.has_value('updateDays'):
interval = lnp.config.get_value('updates/defaultInterval', -1)
if interval != -1 and lnp.ui.on_request_update_permission(interval):
next_update(interval)
else:
next_update(-1)
if lnp.userconfig.get_value('updateDays', -1) == -1:
return
if lnp.userconfig.get_number('nextUpdate') < time.time():
t = threading.Thread(target=perform_update_check)
t.daemon = True
t.start()
def perform_update_check():
"""Performs the actual update check. Runs in a thread."""
# pylint:disable=bare-except
prepare_updater()
if lnp.updater.update_needed():
lnp.new_version = lnp.updater.get_version()
lnp.ui.on_update_available()
def prepare_updater():
"""Returns an Updater object for the configured updater."""
if lnp.updater:
return lnp.updater
updaters = {'regex': RegexUpdater, 'json': JSONUpdater, 'dffd': DFFDUpdater}
updater_id = lnp.config.get('updates/updateMethod', None)
if updater_id is None:
#TODO: Remove this after packs have had time to migrate
log.w(
'Update method not configured in PyLNP.json! Will attempt to '
'auto-detect. Please set this value correctly, auto-detection will '
'go away eventually!')
if lnp.config.get_string('updates/dffdID'):
updater_id = 'dffd'
log.w('Updater detected: dffd')
elif lnp.config.get_string('updates/versionRegex'):
updater_id = 'regex'
log.w('Updater detected: regex')
elif lnp.config.get_string('updates/versionJsonPath'):
updater_id = 'json'
log.w('Updater detected: json')
else:
log.w('Could not detect update method, updates will not work')
return None
elif updater_id == '' or not lnp.config.get('updates'):
return None
if updater_id not in updaters:
log.e('Unknown update method: '+updater_id)
return None
lnp.updater = updaters[updater_id]()
return lnp.updater
def next_update(days):
"""Sets the next update check to occur in <days> days."""
lnp.userconfig['nextUpdate'] = (time.time() + days * 24 * 60 * 60)
lnp.userconfig['updateDays'] = days
lnp.save_config()
def start_update():
"""Launches a webbrowser to the specified update URL."""
launcher.open_url(lnp.updater.get_download_url())
def download_df_baseline(immediate=False):
"""Download the current version of DF from Bay12 Games to serve as a
baseline, in LNP/Baselines/"""
filename = lnp.df_info.get_archive_name()
url = 'http://www.bay12games.com/dwarves/' + filename
target = os.path.join(paths.get('baselines'), filename)
queue_name = 'immediate' if immediate else 'baselines'
download.download(queue_name, url, target)
def direct_download_pack():
"""Directly download a new version of the pack to the current BASEDIR"""
url = lnp.updater.get_direct_url()
fname = lnp.updater.get_direct_filename()
target = os.path.join(lnp.BASEDIR, fname)
download.download('updates', url, target,
end_callback=extract_new_pack)
def extract_new_pack(_, fname, bool_val):
"""Extract a downloaded new pack to a sibling dir of the current pack."""
exts = ('.zip', '.bz2', '.gz', '.7z', '.xz')
if not bool_val or not any(fname.endswith(ext) for ext in exts):
return None
archive = os.path.join(lnp.BASEDIR, os.path.basename(fname))
return extract_archive(archive, os.path.join(lnp.BASEDIR, '..'))
def extract_archive(fname, target):
"""Extract the archive fname to dir target, avoiding explosions."""
if zipfile.is_zipfile(fname):
zf = zipfile.ZipFile(fname)
namelist = zf.namelist()
topdir = namelist[0].split(os.path.sep)[0]
if not all(f.startswith(topdir) for f in namelist):
target = os.path.join(target, os.path.basename(fname).split('.')[0])
zf.extractall(target)
os.remove(fname)
return True
if tarfile.is_tarfile(fname):
tf = tarfile.open(fname)
namelist = tf.getmembers()
topdir = namelist[0].split(os.path.sep)[0]
if not all(f.startswith(topdir) for f in namelist):
target = os.path.join(target, fname.split('.')[0])
tf.extractall(target)
os.remove(fname)
return True
# TODO: support '*.xz' and '*.7z' files.
return False
#pylint: disable=attribute-defined-outside-init, no-self-use
class Updater(object):
"""General class for checking for updates."""
def update_needed(self):
"""Checks if an update is necessary."""
self.text = download.download_str(self.get_check_url())
if self.text is None:
log.e("Error checking for updates, could not download text")
curr_version = lnp.config.get_string('updates/packVersion')
if not curr_version:
log.e("Current pack version is not set, cannot check for updates")
return False
return self.get_version() != curr_version
def get_check_url(self):
"""Returns the URL used to check for updates."""
return lnp.config.get_string('updates/checkURL')
def get_version(self):
"""Returns the version listed at the update URL. Must be overridden by
subclasses."""
pass
def get_download_url(self):
"""Returns a URL from which the user can download the update."""
return lnp.config.get_string('updates/downloadURL')
def get_direct_url(self):
"""Returns a URL pointing directly to the update, for download by the
program."""
return lnp.config.get_string('updates/directURL')
def get_direct_filename(self):
"""Returns the filename that should be used for direct downloads."""
directFilename = lnp.config.get_string('updates/directFilename')
if directFilename:
return directFilename
url_fragments = urlparse(self.get_direct_url())
return os.path.basename(unquote(url_fragments.path))
class RegexUpdater(Updater):
"""Updater class which uses regular expressions to locate the version (and
optionally also the download URLs)."""
def get_version(self):
versionRegex = lnp.config.get_string('updates/versionRegex')
if not versionRegex:
log.e('Version regex not configured!')
return re.search(versionRegex, self.text).group(1)
def get_download_url(self):
urlRegex = lnp.config.get_string('updates/downloadURLRegex')
result = ''
if urlRegex:
result = re.search(urlRegex, self.text).group(1)
if result:
return result
else:
return super(RegexUpdater, self).get_download_url()
def get_direct_url(self):
urlRegex = lnp.config.get_string('updates/directURLRegex')
result = ''
if urlRegex:
result = re.search(urlRegex, self.text).group(1)
if result:
return result
else:
return super(RegexUpdater, self).get_direct_url()
class JSONUpdater(Updater):
"""Updater class which uses a JSON object to locate the version (and
optionally also the download URLs)."""
def get_version(self):
self.json = JSONConfiguration.from_text(self.text)
jsonPath = lnp.config.get_string('updates/versionJsonPath')
if not jsonPath:
log.e('JSON path to version not configured!')
return self.json.get_string(jsonPath)
def get_download_url(self):
jsonPath = lnp.config.get_string('updates/downloadURLJsonPath')
result = ''
if jsonPath:
result = self.json.get_string(jsonPath)
if result:
return result
else:
return super(JSONUpdater, self).get_download_url()
def get_direct_url(self):
jsonPath = lnp.config.get_string('updates/directURLJsonPath')
result = ''
if jsonPath:
result = self.json.get_string(jsonPath)
if result:
return result
else:
return super(JSONUpdater, self).get_direct_url()
def get_direct_filename(self):
jsonPath = lnp.config.get_string('updates/directFilenameJsonPath')
result = ''
if jsonPath:
result = self.json.get_string(jsonPath)
if result:
return result
else:
return super(JSONUpdater, self).get_direct_filename()
class DFFDUpdater(Updater):
"""Updater class for DFFD-hosted downloads."""
def get_check_url(self):
self.dffd_id = lnp.config.get_string('updates/dffdID')
if not self.dffd_id:
log.e('Field "updates/dffdID" must be set in PyLNP.json')
return 'http://dffd.bay12games.com/file_data/{}.json'.format(
self.dffd_id)
def get_version(self):
self.json = JSONConfiguration.from_text(self.text)
return self.json.get_string('version')
def get_download_url(self):
return 'http://dffd.bay12games.com/file.php?id='+self.dffd_id
def get_direct_url(self):
result = 'http://dffd.bay12games.com/download.php?id={0}&f={1}'
return result.format(
self.dffd_id, quote(self.json.get_string('filename')))
def get_direct_filename(self):
return self.json.get_string('filename')
| core/update.py | 10,344 | Updater class for DFFD-hosted downloads.
Updater class which uses a JSON object to locate the version (and
optionally also the download URLs).
Updater class which uses regular expressions to locate the version (and
optionally also the download URLs).
General class for checking for updates.
Checks for updates using the URL specified in PyLNP.json.
Directly download a new version of the pack to the current BASEDIR
Download the current version of DF from Bay12 Games to serve as a
baseline, in LNP/Baselines/
Extract the archive fname to dir target, avoiding explosions.
Extract a downloaded new pack to a sibling dir of the current pack.
Returns the URL used to check for updates.
Returns the filename that should be used for direct downloads.
Returns a URL pointing directly to the update, for download by the
program.
Returns a URL from which the user can download the update.
Returns the version listed at the update URL. Must be overridden by
subclasses.
Sets the next update check to occur in <days> days.
Performs the actual update check. Runs in a thread.
Returns an Updater object for the configured updater.
Launches a webbrowser to the specified update URL.
Checks if an update is necessary.
Returns True if update checking have been configured.
Update handling.
!/usr/bin/env python -*- coding: utf-8 -*- Python 2 pylint:disable=import-error, no-name-in-module Python 3 pylint:disable=import-error, no-name-in-module,ungrouped-imports pylint:disable=bare-exceptTODO: Remove this after packs have had time to migrate TODO: support '*.xz' and '*.7z' files.pylint: disable=attribute-defined-outside-init, no-self-use | 1,628 | en | 0.742539 |
import lightgbm as lgb
import xgboost as xgb
from sklearn.metrics import mean_squared_error, mean_absolute_error, explained_variance_score, r2_score
from matplotlib import pyplot as plt
import numpy as np
import pandas as pd
def compile_model(network):
"""
:param network dict: dictionary with network parameters
:return: compiled model
"""
model = lgb.LGBMRegressor(num_leaves=network.get('num_leaves', 31),
learning_rate=network.get('learning_rate', 0.1),
n_estimators=network.get('n_estimators', 20),
max_bin=network.get('max_bin', 1000),
colsample_bytree=network.get('colsample_bytree', 0.5),
subsample_for_bin=network.get('subsample_for_bin', 200000),
boosting_type=network.get('boosting_type', 'gbdt'),
num_iterations=network.get('num_iterations', 100),
extra_trees=network.get('extra_trees', False),
reg_sqrt= network.get('reg_sqrt', False),
bagging_freq = network.get('bagging_freq', 1),
bagging_fraction = network.get('bagging_fraction', 0.1))
return model
def train_and_score(network, x_train, y_train, x_test, y_test):
"""
:param network dict: dictionary with network parameters
:param x_train array: numpy array with features for traning
:param y_train array: numpy array with labels for traning
:param x_test array: numpy array with labels for test
:param y_test array: numpy array with labels for test
:return float: score
"""
model = compile_model(network)
model.fit(x_train, y_train)
y_pred = model.predict(np.array(x_test))
true = y_test
pred = y_pred
print(' R2 = ', r2_score(true, pred))
return r2_score(true, pred), model
| gb_rf_evolution/gb_train.py | 1,976 | :param network dict: dictionary with network parameters
:return: compiled model
:param network dict: dictionary with network parameters
:param x_train array: numpy array with features for traning
:param y_train array: numpy array with labels for traning
:param x_test array: numpy array with labels for test
:param y_test array: numpy array with labels for test
:return float: score | 382 | en | 0.52218 |
from logging import getLogger
from mpi4py import MPI
logger = getLogger('om.mpi_ctrl')
WORKTAG = 0
DIETAG = 1
class MpiMaster(object):
def __init__(self, run_control, comm, rank, size):
self.run_control = run_control
self.comm = comm
self.rank = rank
self.size = size
logger.info('Initialized MPI master: {}/{}', rank, size)
def run(self):
task_master = self.run_control._task_master
status = MPI.Status()
# Launch all tasks initially.
if self.size > len(task_master.pending_tasks):
logger.warning('MPI size > # of pending tasks, not sure what will happen')
waiting_dests = list(range(1, self.size)[::-1])
# TODO: should handle exception in slave by consuming all data and issuing dies.
# Farm out rest of work when a worker reports back that it's done.
while True:
try:
task = task_master.get_next_pending()
if not task:
# There are tasks with unmet dependencies.
waiting_dests.append(dest)
logger.debug('appended waiting dests: {}', waiting_dests)
except StopIteration:
logger.debug('All tasks sent')
break
need_to_block = not waiting_dests or not task
if need_to_block:
# Block until notified of completion.
rdata = self.comm.recv(source=MPI.ANY_SOURCE, tag=MPI.ANY_TAG, status=status)
logger.info('Data received from {}', status.Get_source())
logger.debug('data: {}', rdata)
if rdata['command'] == 'error':
logger.error('Rank {} raised error', status.Get_source())
logger.error(rdata['msg'])
raise Exception('Unrecoverable error')
received_task = rdata['task'] # reconstituted via pickle.
task_master.update_task(received_task.index, received_task.status)
if task:
if waiting_dests:
# Clear backlog of waiting dests.
logger.debug('pop waiting dests: {}', waiting_dests)
dest = waiting_dests.pop()
else:
dest = status.Get_source()
data = {'command': 'run_task', 'task': task}
logger.info('Sending data to {}', dest)
logger.debug('data: {}', data)
self.comm.send(data, dest=dest, tag=WORKTAG)
# We are done! Listen for final data responses.
for dest in range(1, self.size - len(waiting_dests)):
rdata = self.comm.recv(source=MPI.ANY_SOURCE, tag=MPI.ANY_TAG, status=status)
if rdata['command'] == 'error':
logger.error('Rank {} raised error', status.Get_source())
logger.error(rdata['msg'])
raise Exception('Unrecoverable error')
received_task = rdata['task'] # reconstituted via pickle.
task_master.update_task(received_task.index, received_task.status)
logger.info('Final data received from {}', status.Get_source())
logger.debug('data: {}', rdata)
# Send all slaves a die command.
for dest in range(1, self.size):
data = {'command': 'die'}
logger.info('Sending die to {}', dest)
self.comm.send(data, dest=dest, tag=DIETAG)
logger.info('Finished')
class MpiSlave(object):
def __init__(self, run_control, comm, rank, size):
self.run_control = run_control
self.comm = comm
self.rank = rank
self.size = size
logger.info('Initialized MPI slave: {}/{}', rank, size)
def listen(self):
try:
status = MPI.Status()
while True:
logger.debug('Waiting for data')
data = self.comm.recv(source=0, tag=MPI.ANY_TAG, status=status)
logger.debug('Received data: {}', data)
if status.Get_tag() == DIETAG:
break
else:
self.run_control.run_task(data['task'])
data['task'].status = 'done'
self.comm.send(data, dest=0, tag=WORKTAG)
logger.debug('Finished')
except Exception as e:
logger.error(e)
data = {'command': 'error', 'msg': str(e)}
self.comm.send(data, dest=0, tag=WORKTAG)
return
| omnium/run_control/mpi_control.py | 4,547 | Launch all tasks initially. TODO: should handle exception in slave by consuming all data and issuing dies. Farm out rest of work when a worker reports back that it's done. There are tasks with unmet dependencies. Block until notified of completion. reconstituted via pickle. Clear backlog of waiting dests. We are done! Listen for final data responses. reconstituted via pickle. Send all slaves a die command. | 409 | en | 0.92535 |
# needs mayavi2
# run with ipython -wthread
import networkx as nx
import numpy as np
from enthought.mayavi import mlab
# some graphs to try
#H=nx.krackhardt_kite_graph()
#H=nx.Graph();H.add_edge('a','b');H.add_edge('a','c');H.add_edge('a','d')
#H=nx.grid_2d_graph(4,5)
H=nx.cycle_graph(20)
# reorder nodes from 0,len(G)-1
G=nx.convert_node_labels_to_integers(H)
# 3d spring layout
pos=nx.spring_layout(G,dim=3)
# numpy array of x,y,z positions in sorted node order
xyz=np.array([pos[v] for v in sorted(G)])
# scalar colors
scalars=np.array(list(G.nodes()))+5
mlab.figure(1, bgcolor=(0, 0, 0))
mlab.clf()
pts = mlab.points3d(xyz[:,0], xyz[:,1], xyz[:,2],
scalars,
scale_factor=0.1,
scale_mode='none',
colormap='Blues',
resolution=20)
pts.mlab_source.dataset.lines = np.array(list(G.edges()))
tube = mlab.pipeline.tube(pts, tube_radius=0.01)
mlab.pipeline.surface(tube, color=(0.8, 0.8, 0.8))
mlab.savefig('mayavi2_spring.png')
# mlab.show() # interactive window
| examples/3d_drawing/mayavi2_spring.py | 1,103 | needs mayavi2 run with ipython -wthread some graphs to tryH=nx.krackhardt_kite_graph()H=nx.Graph();H.add_edge('a','b');H.add_edge('a','c');H.add_edge('a','d')H=nx.grid_2d_graph(4,5) reorder nodes from 0,len(G)-1 3d spring layout numpy array of x,y,z positions in sorted node order scalar colors mlab.show() interactive window | 327 | en | 0.40865 |
#!/usr/bin/env python
import getopt
import sys
from coapthon.server.coap import CoAP
from exampleresources import BasicResource, Long, Separate, Storage, Big, voidResource, XMLResource, ETAGResource, \
Child, \
MultipleEncodingResource, AdvancedResource, AdvancedResourceSeparate, DynamicResource
__author__ = 'Giacomo Tanganelli'
class CoAPServer(CoAP):
def __init__(self, host, port, multicast=False):
CoAP.__init__(self, (host, port), multicast)
self.add_resource('basic/', BasicResource())
self.add_resource('storage/', Storage())
self.add_resource('separate/', Separate())
self.add_resource('long/', Long())
self.add_resource('big/', Big())
self.add_resource('void/', voidResource())
self.add_resource('xml/', XMLResource())
self.add_resource('encoding/', MultipleEncodingResource())
self.add_resource('etag/', ETAGResource())
self.add_resource('child/', Child())
self.add_resource('advanced/', AdvancedResource())
self.add_resource('advancedSeparate/', AdvancedResourceSeparate())
self.add_resource('dynamic/', DynamicResource())
print "CoAP Server start on " + host + ":" + str(port)
print self.root.dump()
def usage(): # pragma: no cover
print "coapserver.py -i <ip address> -p <port>"
def main(argv): # pragma: no cover
ip = "0.0.0.0"
port = 5683
multicast = False
try:
opts, args = getopt.getopt(argv, "hi:p:m", ["ip=", "port=", "multicast"])
except getopt.GetoptError:
usage()
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
usage()
sys.exit()
elif opt in ("-i", "--ip"):
ip = arg
elif opt in ("-p", "--port"):
port = int(arg)
elif opt in ("-m", "--multicast"):
multicast = True
server = CoAPServer(ip, port, multicast)
try:
server.listen(10)
except KeyboardInterrupt:
print "Server Shutdown"
server.close()
print "Exiting..."
if __name__ == "__main__": # pragma: no cover
main(sys.argv[1:])
| coapserver.py | 2,220 | !/usr/bin/env python pragma: no cover pragma: no cover pragma: no cover | 71 | en | 0.402775 |
"""
Prime Developer Trial
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: v1
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from fds.sdk.QuotesAPIforDigitalPortals.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
OpenApiModel
)
from fds.sdk.QuotesAPIforDigitalPortals.exceptions import ApiAttributeError
def lazy_import():
from fds.sdk.QuotesAPIforDigitalPortals.model.prices_trading_schedule_event_list_data_filter import PricesTradingScheduleEventListDataFilter
globals()['PricesTradingScheduleEventListDataFilter'] = PricesTradingScheduleEventListDataFilter
class PricesTradingScheduleEventListData(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'id': (str,), # noqa: E501
'filter': (PricesTradingScheduleEventListDataFilter,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'id': 'id', # noqa: E501
'filter': 'filter', # noqa: E501
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, id, *args, **kwargs): # noqa: E501
"""PricesTradingScheduleEventListData - a model defined in OpenAPI
Args:
id (str): Identifier of the notation.
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
filter (PricesTradingScheduleEventListDataFilter): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.id = id
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, id, *args, **kwargs): # noqa: E501
"""PricesTradingScheduleEventListData - a model defined in OpenAPI
Args:
id (str): Identifier of the notation.
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
filter (PricesTradingScheduleEventListDataFilter): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.id = id
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.")
| code/python/QuotesAPIforDigitalPortals/v2/fds/sdk/QuotesAPIforDigitalPortals/model/prices_trading_schedule_event_list_data.py | 11,835 | NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
PricesTradingScheduleEventListData - a model defined in OpenAPI
Args:
id (str): Identifier of the notation.
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
filter (PricesTradingScheduleEventListDataFilter): [optional] # noqa: E501
PricesTradingScheduleEventListData - a model defined in OpenAPI
Args:
id (str): Identifier of the notation.
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
filter (PricesTradingScheduleEventListDataFilter): [optional] # noqa: E501
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
Prime Developer Trial
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: v1
Generated by: https://openapi-generator.tech
noqa: F401 noqa: F401 noqa: F401 noqa: E501 noqa: E501 noqa: E501 noqa: E501 noqa: E501 noqa: E501 discard variable. noqa: E501 discard variable. | 6,095 | en | 0.791051 |
#!/usr/bin/env python3
"""Define public exports."""
__all__ = ["OutputFileExists", "InvalidDomain", "FileWriteError", "NoDomains"]
class NoDomains(Exception):
"""Raise when no domains are passed to findcdn main."""
def __init__(self, error):
"""Instantiate super class with passed message."""
self.message = "No domains were passed!"
super().__init__(self.message)
class OutputFileExists(Exception):
"""Raise when file already exists when writing in findcdn."""
def __init__(self, outFile):
"""Instantiate super class with passed message with passed in filename."""
self.message = "A file with the name " + outFile + " already exists!"
super().__init__(self.message)
class InvalidDomain(Exception):
"""Raise when an invalid domain is inputted in findcnd.main()."""
def __init__(self, item):
"""Instantiate super class with passed message with passed in item."""
self.message = item + " is not a valid domain in findcdn.main()"
super().__init__(self.message)
class FileWriteError(Exception):
"""Raise when there is a problem writing to a file in findcnd."""
def __init__(self, error):
"""Instantiate super class with passed message using passed in error."""
self.message = (
"The following error occurred in findcdn while file writing:\n"
+ repr(error)
)
super().__init__(self.message)
| src/findcdn/findcdn_err.py | 1,458 | Raise when there is a problem writing to a file in findcnd.
Raise when an invalid domain is inputted in findcnd.main().
Raise when no domains are passed to findcdn main.
Raise when file already exists when writing in findcdn.
Instantiate super class with passed message.
Instantiate super class with passed message with passed in filename.
Instantiate super class with passed message with passed in item.
Instantiate super class with passed message using passed in error.
Define public exports.
!/usr/bin/env python3 | 517 | en | 0.729886 |
import pandas as pd
from utils.config import Config
import numpy as np
import pandas as pd
def fun_clean_categogy1(array, keyw1, index, BOW):
compty = 0
c = 0
for elm in array:
if elm == "oui" or elm == "parfois":
BOW[c].append(keyw1[index])
compty += 1
c += 1
# print(compty)
return BOW
#Ajout des keywords de la catégorie 2 ATTENTION, ici j'ajoute tout le contenu des colonnes, donc il peut y avoir
# une grande variété de mots qui sugissent à cause d'ici. De plus, ce sont souvent des mots composés ou des
# séquences de mots. On peut envisager de ne sélectionner que le premier mot par exemple.
def fun_clean_categogy2(array, BOW):
compty = 0
c = 0
for elm in array:
if not elm == "":
if not BOW[c].__contains__(elm):
BOW[c].append(elm)
compty += 1
c += 1
# print(compty)
return BOW
def fun_clean_categogy3(array, keyw3, index, BOW, list_THR):
compty = 0
c = 0
for elm in array:
# print(elm)
if not np.isnan(float(str(elm).replace(",", "."))):
if float(str(elm).replace(",", ".")) > list_THR[index]:
if not BOW[c].__contains__(elm):
BOW[c].append(keyw3[index])
compty += 1
c += 1
print(compty)
return BOW
if __name__ == '__main__':
# %%
df = pd.read_csv(Config.csv_files[-1], sep=';', encoding='ISO-8859-1')
df.columns
#
# d = {'col1': [1, 2], 'col2': [3, 4]}
# df = pd.DataFrame(data=d)
List_cat1 = ["difficulté endormisst", "fatigue au reveil", "hyperacousie", "surdité", "SDE", "vertiges",
"depression", "anxiété"]
#Keywords à associer aux colonnes de la catégorie 1
keyw1 = ["endormissement", "fatigue", "hyperacousie", "surdité", "somnolence", "vertige", "dépression", "anxiété"]
List_cat2 = ["timbre acouphène", "type de douleurs", "type otalgie", "type de vertiges",
"caractere particulier", "mode apparition"]
List_cat3 = ["EVA depression", "epworth", "EVA anxiété", "EVA douleurs", "EVA hyperac", "EVA hypoac",
"EVA Otalgie 1", "EVA SADAM", "EVA vertiges", "ISI", "score khalfa hyperacousie", "EVA concentration"]
# Keywords à associer aux colonnes de la catégorie 3
keyw3 = ["dépression", "somnolence", "anxiété", "douleurs", "hyperacousie", "hypoacousie", "otalgie", "mâchoire",
"vertige", "sommeil", "hyperacousie", "concentration"]
# seuils de sélections à associer aux colonnes de la catégorie 3
List_THR = [5, 10, 5, 5, 5, 5, 4, 3, 3, 12, 20, 5]
cat4 = ["intensité ac"]
compt = 0
#Liste de mots clés associés à chaque patient. Une liste par patient
BOW = [[] for i in range(len(df[df.columns[0]]))]
#ajout des keywords de la categorie 1 à la liste des bag of words BOW
for colname in List_cat1:
# print(df[colname]) # show value before
print(colname)
BOW = fun_clean_categogy1(df[colname], keyw1, compt, BOW)
compt += 1
# ajout des keywords de la categorie 2 à la liste des bag of words BOW
compt=0
for colname in List_cat2:
print(colname)
BOW = fun_clean_categogy2(df[colname], BOW)
compt += 1
# ajout des keywords de la categorie 3 à la liste des bag of words BOW
compt=0
for colname in List_cat3:
print(colname)
BOW = fun_clean_categogy3(df[colname], keyw3, compt, BOW, List_THR)
compt += 1
#Nettoyage des valeurs "NaN" copiées par erreur par la catégorie 2
for elm in BOW:
if elm.__contains__(np.nan):
elm.pop(elm.index(np.nan))
print(BOW[:200]) # petit extrait de la liste des bag of words
BOW2=[]
for elm in BOW:
stri=""
for st in elm:
stri = stri + " " + st
BOW2.append(stri)
df2 = pd.DataFrame(BOW2)
df2.to_csv('lettres_persanes.csv', sep=';', encoding='ISO-8859-1')
print(df2)
| notebooks/template_preprocessing_columns.py | 4,073 | print(compty)Ajout des keywords de la catégorie 2 ATTENTION, ici j'ajoute tout le contenu des colonnes, donc il peut y avoir une grande variété de mots qui sugissent à cause d'ici. De plus, ce sont souvent des mots composés ou des séquences de mots. On peut envisager de ne sélectionner que le premier mot par exemple. print(compty) print(elm) %% d = {'col1': [1, 2], 'col2': [3, 4]} df = pd.DataFrame(data=d)Keywords à associer aux colonnes de la catégorie 1 Keywords à associer aux colonnes de la catégorie 3 seuils de sélections à associer aux colonnes de la catégorie 3Liste de mots clés associés à chaque patient. Une liste par patientajout des keywords de la categorie 1 à la liste des bag of words BOW print(df[colname]) show value before ajout des keywords de la categorie 2 à la liste des bag of words BOW ajout des keywords de la categorie 3 à la liste des bag of words BOWNettoyage des valeurs "NaN" copiées par erreur par la catégorie 2 petit extrait de la liste des bag of words | 993 | fr | 0.983012 |
# -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
import sphinx_rtd_theme
from sphinx.domains.python import PythonDomain
sys.path.insert(0, os.path.abspath('..'))
from dival import __version__
# -- Project information -----------------------------------------------------
project = 'Deep Inversion Validation Library'
copyright = ('2020, Johannes Leuschner, Maximilian Schmidt, '
'Daniel Otero Baguer, David Erzmann')
author = ('Johannes Leuschner, Maximilian Schmidt, '
'Daniel Otero Baguer, David Erzmann')
# The short X.Y version
version = __version__
# The full version, including alpha/beta/rc tags
release = ''
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.doctest',
'sphinx.ext.todo',
'sphinx.ext.mathjax',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
'sphinx.ext.napoleon',
'sphinx_rtd_theme'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = None
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "sphinx_rtd_theme"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'DeepInversionValidationLibrarydoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'DeepInversionValidationLibrary.tex',
'Deep Inversion Validation Library Documentation',
('Johannes Leuschner, Maximilian Schmidt, '
'Daniel Otero Baguer, David Erzmann'), 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'deepinversionvalidationlibrary',
'Deep Inversion Validation Library Documentation', [author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'DeepInversionValidationLibrary',
'Deep Inversion Validation Library Documentation', author,
'DeepInversionValidationLibrary', 'One line description of project.',
'Miscellaneous'),
]
# -- Options for Epub output -------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# -- Extension configuration -------------------------------------------------
# -- Options for todo extension ----------------------------------------------
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
autodoc_member_order = 'bysource'
def skip(app, what, name, obj, would_skip, options):
if name == "__init__":
return False
return would_skip
class MyPythonDomain(PythonDomain):
def find_obj(self, env, modname, classname, name, type, searchmode=0):
orig_matches = PythonDomain.find_obj(self, env, modname, classname,
name, type, searchmode)
# longest match is supposed to be original definition
return sorted(orig_matches, key=lambda m: len(m[0]))[-1:]
def setup(app):
app.connect("autodoc-skip-member", skip)
app.add_domain(MyPythonDomain, override=True)
app.add_css_file('css/custom.css')
| docs/conf.py | 6,898 | -*- coding: utf-8 -*- Configuration file for the Sphinx documentation builder. This file does only contain a selection of the most common options. For a full list see the documentation: http://www.sphinx-doc.org/en/master/config -- Path setup -------------------------------------------------------------- If extensions (or modules to document with autodoc) are in another directory, add these directories to sys.path here. If the directory is relative to the documentation root, use os.path.abspath to make it absolute, like shown here. -- Project information ----------------------------------------------------- The short X.Y version The full version, including alpha/beta/rc tags -- General configuration --------------------------------------------------- If your documentation needs a minimal Sphinx version, state it here. needs_sphinx = '1.0' Add any Sphinx extension module names here, as strings. They can be extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. Add any paths that contain templates here, relative to this directory. The suffix(es) of source filenames. You can specify multiple suffix as a list of string: source_suffix = ['.rst', '.md'] The master toctree document. The language for content autogenerated by Sphinx. Refer to documentation for a list of supported languages. This is also used if you do content translation via gettext catalogs. Usually you set "language" from the command line for these cases. List of patterns, relative to source directory, that match files and directories to ignore when looking for source files. This pattern also affects html_static_path and html_extra_path. The name of the Pygments (syntax highlighting) style to use. -- Options for HTML output ------------------------------------------------- The theme to use for HTML and HTML Help pages. See the documentation for a list of builtin themes. Theme options are theme-specific and customize the look and feel of a theme further. For a list of options available for each theme, see the documentation. html_theme_options = {} Add any paths that contain custom static files (such as style sheets) here, relative to this directory. They are copied after the builtin static files, so a file named "default.css" will overwrite the builtin "default.css". Custom sidebar templates, must be a dictionary that maps document names to template names. The default sidebars (for documents that don't match any pattern) are defined by theme itself. Builtin themes are using these templates by default: ``['localtoc.html', 'relations.html', 'sourcelink.html', 'searchbox.html']``. html_sidebars = {} -- Options for HTMLHelp output --------------------------------------------- Output file base name for HTML help builder. -- Options for LaTeX output ------------------------------------------------ The paper size ('letterpaper' or 'a4paper'). 'papersize': 'letterpaper', The font size ('10pt', '11pt' or '12pt'). 'pointsize': '10pt', Additional stuff for the LaTeX preamble. 'preamble': '', Latex figure (float) alignment 'figure_align': 'htbp', Grouping the document tree into LaTeX files. List of tuples (source start file, target name, title, author, documentclass [howto, manual, or own class]). -- Options for manual page output ------------------------------------------ One entry per manual page. List of tuples (source start file, name, description, authors, manual section). -- Options for Texinfo output ---------------------------------------------- Grouping the document tree into Texinfo files. List of tuples (source start file, target name, title, author, dir menu entry, description, category) -- Options for Epub output ------------------------------------------------- Bibliographic Dublin Core info. The unique identifier of the text. This can be a ISBN number or the project homepage. epub_identifier = '' A unique identification for the text. epub_uid = '' A list of files that should not be packed into the epub file. -- Extension configuration ------------------------------------------------- -- Options for todo extension ---------------------------------------------- If true, `todo` and `todoList` produce output, else they produce nothing. longest match is supposed to be original definition | 4,246 | en | 0.610065 |
import time
import scipy.io.wavfile as wavfile
import numpy as np
import speech_recognition as sr
import librosa
import argparse
import os
from glob import glob
from pydub import AudioSegment
from pydub.silence import split_on_silence, detect_nonsilent
from pydub.playback import play
import pysrt
import math
import shutil
def get_arguments():
parser = argparse.ArgumentParser()
parser.add_argument('-v', '--video', type=str, required=True, help='Path to video *.mp4 file')
parser.add_argument('-o', '--output', type=str, default='output/', help='Output file location')
parser.add_argument('-l', '--lang', type=str, default='en', help='Language of the video file')
arguments = parser.parse_args()
return arguments
def recognize(wav_filename, lang):
data, s = librosa.load(wav_filename)
librosa.output.write_wav('output/tmp.wav', data, s)
y = (np.iinfo(np.int32).max * (data/np.abs(data).max())).astype(np.int32)
wavfile.write('output/tmp_32.wav', s, y)
r = sr.Recognizer()
with sr.AudioFile('output/tmp_32.wav') as source:
audio = r.record(source)
print('Audio file has been loaded')
try:
result = r.recognize_google(audio, language = lang).lower()
except sr.UnknownValueError:
print("Failed to determine audio file")
result = ''
# finally:
# os.remove(wav_filename)
return result
def get_audio(videofile, audiofile):
os.system('ffmpeg -y -threads 4 -i {} -f wav -ab 192000 -vn {}'.format(videofile, audiofile))
def split_into_frames(audiofile, samplesLocation):
os.system('rm {}/*'.format(samplesLocation))
time.sleep(2.0)
data, sr = librosa.load(audiofile)
duration = librosa.get_duration(data, sr)
print('video duration, hours: {}'.format(duration/3600))
for i in range(0,int(duration-1),20):
tmp_batch = data[(i)*sr:sr*(i+20)]
librosa.output.write_wav('{}/{}.wav'.format(samplesLocation, chr(int(i/20)+65)), tmp_batch, sr)
def separate_music_voice(audioFile, outputLocation):
os.system('spleeter separate -i {} -p spleeter:2stems -o {}'.format(audioFile, outputLocation))
# Define a function to normalize a chunk to a target amplitude.
def match_target_amplitude(aChunk, target_dBFS):
''' Normalize given audio chunk '''
change_in_dBFS = target_dBFS - aChunk.dBFS
return aChunk.apply_gain(change_in_dBFS)
def get_timestamp(duration):
hr = math.floor(duration / 3600000)
total_min = duration % 3600000
mins = math.floor(total_min / 60000)
total_secs = total_min % 60000
secs = math.floor(total_secs / 1000)
milisecs = total_min % 1000
return "{:02d}:{:02d}:{:02d},{:03d}".format(hr, mins, secs, milisecs)
def gen_subtitle(wavFile, samplesLocation, srtFile, lang):
srt_file = pysrt.SubRipFile()
# Load your audio.
print("loading wav file...")
# song = AudioSegment.from_mp3("your_audio.mp3")
#song = AudioSegment.from_wav("vocals.wav")
song = AudioSegment.from_file(wavFile, format="wav")
# play(song)
dBFS = song.dBFS
# Nonsilence track start and end positions.
nonsilence = detect_nonsilent(
song,
min_silence_len = 500,
silence_thresh = dBFS-16
)
file_count = len(nonsilence)
print("Nonsilence chunk length {}".format(str(file_count)))
# for [start, end] in nonsilence:
# print("start: {0} end: {1}".format(get_timestamp(start), get_timestamp(end)))
# Split track where the silence is 2 seconds or more and get chunks using
# the imported function.
print("Start spliting file...")
chunks = split_on_silence(
song,
min_silence_len = 500,
silence_thresh = dBFS-16,
# optional
keep_silence = 250
)
print("Spliting done..." + str(len(chunks)))
# Process each chunk with your parameters
for i, chunk in enumerate(chunks):
# Create a silence chunk that's 0.5 seconds (or 500 ms) long for padding.
silence_chunk = AudioSegment.silent(duration=1000)
# Add the padding chunk to beginning and end of the entire chunk.
audio_chunk = silence_chunk + chunk + silence_chunk
# audio_chunk = chunk
# Normalize the entire chunk.
normalized_chunk = match_target_amplitude(audio_chunk, -20.0)
# Export the audio chunk with new bitrate.
starttime = nonsilence[i][0]
endtime = nonsilence[i][1]
print("\n>>{} of {}, Exporting {}chunk{}.wav start: {} end: {}".format(i, file_count, samplesLocation, i, starttime, endtime))
chunk_file_path = "{}chunk{}.wav".format(samplesLocation, str(i))
normalized_chunk.export(
chunk_file_path,
bitrate = "192k",
format = "wav"
)
time.sleep(2)
print("Going to generete the dialogs of file {}".format(chunk_file_path))
dialogs = recognize(chunk_file_path, lang)
print("{} file dialog is: {}".format(chunk_file_path, dialogs))
start_time = get_timestamp(starttime)
end_time = get_timestamp(endtime)
sub = pysrt.SubRipItem((i+1), start=start_time, end=end_time, text="{} {}".format(str(i+1), dialogs))
srt_file.append(sub)
srt_file.save(srtFile)
if __name__ == '__main__':
outputLoc = 'output/'
inputWaveFile = 'current.wav'
vocals_file = 'current/vocals.wav'
samples_location = 'samples/'
srt_file = '.srt'
start = time.time()
args = get_arguments()
outputLoc = args.output
shutil.rmtree(outputLoc)
time.sleep(2)
os.makedirs(outputLoc, exist_ok=True)
inputWaveFile = outputLoc + inputWaveFile
vocals_file = outputLoc + vocals_file
samples_location = outputLoc + samples_location
os.makedirs(samples_location, exist_ok=True)
srt_file = os.path.splitext(args.video)[0] + srt_file
print('srt file will be {}'.format(srt_file))
time.sleep(2)
get_audio(args.video, inputWaveFile)
separate_music_voice(inputWaveFile, outputLoc)
gen_subtitle(vocals_file, samples_location, srt_file, args.lang)
end = time.time()
print('elapsed time: {}'.format(end - start))
# shutil.rmtree(outputLoc)
| src/subtitle.py | 6,237 | Normalize given audio chunk
finally: os.remove(wav_filename) Define a function to normalize a chunk to a target amplitude. Load your audio. song = AudioSegment.from_mp3("your_audio.mp3")song = AudioSegment.from_wav("vocals.wav") play(song) Nonsilence track start and end positions. for [start, end] in nonsilence: print("start: {0} end: {1}".format(get_timestamp(start), get_timestamp(end))) Split track where the silence is 2 seconds or more and get chunks using the imported function. optional Process each chunk with your parameters Create a silence chunk that's 0.5 seconds (or 500 ms) long for padding. Add the padding chunk to beginning and end of the entire chunk. audio_chunk = chunk Normalize the entire chunk. Export the audio chunk with new bitrate. shutil.rmtree(outputLoc) | 803 | en | 0.610057 |
"""
Definition of TreeNode:
class TreeNode:
def __init__(self, val):
self.val = val
self.left, self.right = None, None
"""
class Solution:
"""
@param: root: The root of the binary search tree.
@param: A: A TreeNode in a Binary.
@param: B: A TreeNode in a Binary.
@return: Return the least common ancestor(LCA) of the two nodes.
"""
def lowestCommonAncestor(self, root, A, B):
if root is None or root is A or root is B:
return root
left = self.lowestCommonAncestor(root.left, A, B)
right = self.lowestCommonAncestor(root.right, A, B)
if left and right:
return root
if left:
return left
return right | Lintcode/Ladder_11_15_A/88. Lowest Common Ancestor of a Binary Tree.py | 734 | @param: root: The root of the binary search tree.
@param: A: A TreeNode in a Binary.
@param: B: A TreeNode in a Binary.
@return: Return the least common ancestor(LCA) of the two nodes.
Definition of TreeNode:
class TreeNode:
def __init__(self, val):
self.val = val
self.left, self.right = None, None | 319 | en | 0.566927 |
import numpy as np
from yt.utilities.on_demand_imports import _h5py as h5py
from yt.funcs import \
mylog
from yt.geometry.selection_routines import GridSelector
from yt.utilities.io_handler import \
BaseIOHandler
def _grid_dname(grid_id):
return "/data/grid_%010i" % grid_id
def _field_dname(grid_id, field_name):
return "%s/%s" % (_grid_dname(grid_id), field_name)
# TODO all particle bits were removed
class IOHandlerGDFHDF5(BaseIOHandler):
_dataset_type = "grid_data_format"
_offset_string = 'data:offsets=0'
_data_string = 'data:datatype=0'
def _read_fluid_selection(self, chunks, selector, fields, size):
from sys import version
rv = {}
chunks = list(chunks)
if isinstance(selector, GridSelector):
if not (len(chunks) == len(chunks[0].objs) == 1):
raise RuntimeError
grid = chunks[0].objs[0]
h5f = h5py.File(grid.filename, mode='r')
gds = h5f.get(_grid_dname(grid.id))
for ftype, fname in fields:
if self.ds.field_ordering == 1:
rv[(ftype, fname)] = gds.get(fname)[()].swapaxes(0, 2)
else:
rv[(ftype, fname)] = gds.get(fname)[()]
h5f.close()
return rv
if size is None:
size = sum((grid.count(selector) for chunk in chunks
for grid in chunk.objs))
if any((ftype != "gdf" for ftype, fname in fields)):
raise NotImplementedError
for field in fields:
ftype, fname = field
fsize = size
# check the dtype instead
rv[field] = np.empty(fsize, dtype="float64")
ngrids = sum(len(chunk.objs) for chunk in chunks)
mylog.debug("Reading %s cells of %s fields in %s blocks",
size, [fn for ft, fn in fields], ngrids)
ind = 0
for chunk in chunks:
fid = None
for grid in chunk.objs:
if grid.filename is None:
continue
if fid is None:
if version < '3':
fid = h5py.h5f.open(grid.filename,h5py.h5f.ACC_RDONLY)
else:
fid = h5py.h5f.open(bytes(grid.filename,'utf-8'),h5py.h5f.ACC_RDONLY)
if self.ds.field_ordering == 1:
# check the dtype instead
data = np.empty(grid.ActiveDimensions[::-1],
dtype="float64")
data_view = data.swapaxes(0, 2)
else:
# check the dtype instead
data_view = data = np.empty(grid.ActiveDimensions,
dtype="float64")
for field in fields:
ftype, fname = field
if version < '3':
dg = h5py.h5d.open(fid, _field_dname(grid.id, fname))
else:
dg = h5py.h5d.open(fid, bytes(_field_dname(grid.id, fname),'utf-8'))
dg.read(h5py.h5s.ALL, h5py.h5s.ALL, data)
# caches
nd = grid.select(selector, data_view, rv[field], ind)
ind += nd # I don't get that part, only last nd is added
if fid is not None:
fid.close()
return rv
| yt/frontends/gdf/io.py | 3,452 | TODO all particle bits were removed check the dtype instead check the dtype instead check the dtype instead caches I don't get that part, only last nd is added | 159 | en | 0.890741 |
#######################
# Dennis MUD #
# telnet.py #
# Copyright 2018-2021 #
# Michael D. Reiley #
#######################
# **********
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
# **********
# Parts of codebase borrowed from https://github.com/TKeesh/WebSocketChat
import traceback
from lib.logger import Logger
from twisted.internet import protocol
from twisted.protocols.basic import LineReceiver
# Read the motd file.
try:
with open("motd.telnet.txt") as f:
motd = f.read()
except:
motd = None
class ServerProtocol(LineReceiver):
def __init__(self, factory):
self.factory = factory
self.peer = None
self._log = Logger("telnet")
def connectionMade(self):
p = self.transport.getPeer()
self.peer = p.host + ':' + str(p.port)
self.factory.register(self)
self._log.info("Client connected: {peer}", peer=self.peer)
if motd:
self.factory.communicate(self.peer, motd.encode('utf-8'))
def connectionLost(self, reason):
self.factory.unregister(self)
self._log.info("Client disconnected: {peer}", peer=self.peer)
def lineReceived(self, line):
# Don't log passwords.
passcheck = line.split(b' ')
if passcheck[0] == b'login' and len(passcheck) > 2:
passcheck = b' '.join(passcheck[:2] + [b'********'])
self._log.info("Client {peer} sending message: {line}", peer=self.peer, line=passcheck)
elif passcheck[0] == b'register' and len(passcheck) > 2:
passcheck = b' '.join(passcheck[:2] + [b'********'])
self._log.info("Client {peer} sending message: {line}", peer=self.peer, line=passcheck)
elif passcheck[0] == b'password' and len(passcheck) > 1:
passcheck = b' '.join(passcheck[:1] + [b'********'])
self._log.info("Client {peer} sending message: {line}", peer=self.peer, line=passcheck)
else:
self._log.info("Client {peer} sending message: {line}", peer=self.peer, line=line)
# Try to decode the line.
try:
line = line.decode('utf-8')
except:
self._log.info("Discarded garbage line from {peer}", peer=self.peer)
return
# Did we receive the quit pseudo-command?
if line == "quit":
self.transport.loseConnection()
return
# Run the command while handling errors.
try:
self.factory.router.shell.command(self.factory.router[self.peer]["console"], line)
except:
self.factory.communicate(self.peer, traceback.format_exc().encode('utf-8'))
self._log.error(traceback.format_exc())
class ServerFactory(protocol.Factory):
def __init__(self, router, *args, **kwargs):
self.router = router
self.router.telnet_factory = self
super(ServerFactory, self).__init__(*args)
self.clients = []
def buildProtocol(self, addr):
return ServerProtocol(self)
def register(self, client):
self.clients.append({'client-peer': client.peer, 'client': client})
self.router.register(client.peer, "telnet")
def unregister(self, client):
self.router.unregister(client.peer)
for c in self.clients:
if c['client-peer'] == client.peer:
self.clients.remove(c)
def communicate(self, peer, payload):
client = None
for c in self.clients:
if c['client-peer'] == peer:
client = c['client']
if client:
# Telnet wants a CRLF instead of just an LF. Some clients require this to display properly.
client.sendLine(payload.decode('utf-8').replace('\n', '\r\n').encode('utf-8'))
| lib/telnet.py | 4,782 | Dennis MUD telnet.py Copyright 2018-2021 Michael D. Reiley ********** Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ********** Parts of codebase borrowed from https://github.com/TKeesh/WebSocketChat Read the motd file. Don't log passwords. Try to decode the line. Did we receive the quit pseudo-command? Run the command while handling errors. Telnet wants a CRLF instead of just an LF. Some clients require this to display properly. | 1,432 | en | 0.876923 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
import tensorflow as tf
tf.logging.set_verbosity(tf.logging.INFO)
# Data sets
IRIS_TRAINING = os.path.join(os.path.dirname(__file__), "iris_training.csv")
IRIS_TEST = os.path.join(os.path.dirname(__file__), "iris_test.csv")
def main(unused_argv):
# Load datasets.
training_set = tf.contrib.learn.datasets.base.load_csv_with_header(
filename=IRIS_TRAINING, target_dtype=np.int, features_dtype=np.float32)
test_set = tf.contrib.learn.datasets.base.load_csv_with_header(
filename=IRIS_TEST, target_dtype=np.int, features_dtype=np.float32)
# Specify that all features have real-value data
feature_columns = [tf.contrib.layers.real_valued_column("", dimension=4)]
validation_monitor = tf.contrib.learn.monitors.ValidationMonitor(
test_set.data,
test_set.target,
every_n_steps=50)
# Build 3 layer DNN with 10, 20, 10 units respectively.
# classifier = tf.contrib.learn.DNNClassifier(feature_columns=feature_columns,
# hidden_units=[10, 20, 10],
# n_classes=3,
# model_dir="/tmp/iris_model")
classifier = tf.contrib.learn.DNNClassifier(
feature_columns=feature_columns,
hidden_units=[10],
n_classes=3,
model_dir="/tmp/iris_model",
config=tf.contrib.learn.RunConfig(save_checkpoints_secs=1))
# Fit model.
# classifier.fit(x=training_set.data,
# y=training_set.target,
# steps=2000)
classifier.fit(x=training_set.data,
y=training_set.target,
steps=2000,
monitors=[validation_monitor])
# Evaluate accuracy.
accuracy_score = classifier.evaluate(x=test_set.data,
y=test_set.target)["accuracy"]
print('Accuracy: {0:f}'.format(accuracy_score))
# Classify two new flower samples.
new_samples = np.array(
[[6.4, 3.2, 4.5, 1.5], [5.8, 3.1, 5.0, 1.7]], dtype=np.float32)
y = list(classifier.predict(new_samples, as_iterable=True))
print('Predictions: {}'.format(str(y)))
if __name__ == "__main__":
tf.app.run()
| agents/tensorflow_iris.py | 2,393 | Data sets Load datasets. Specify that all features have real-value data Build 3 layer DNN with 10, 20, 10 units respectively. classifier = tf.contrib.learn.DNNClassifier(feature_columns=feature_columns, hidden_units=[10, 20, 10], n_classes=3, model_dir="/tmp/iris_model") Fit model. classifier.fit(x=training_set.data, y=training_set.target, steps=2000) Evaluate accuracy. Classify two new flower samples. | 567 | en | 0.42139 |
# encoding: utf-8
"""
@author: sherlock
@contact: sherlockliao01@gmail.com
"""
import glob
import re
import os.path as osp
from .bases import BaseImageDataset
class Market1501(BaseImageDataset):
"""
Market1501
Reference:
Zheng et al. Scalable Person Re-identification: A Benchmark. ICCV 2015.
URL: http://www.liangzheng.org/Project/project_reid.html
Dataset statistics:
# identities: 1501 (+1 for background)
# images: 12936 (train) + 3368 (query) + 15913 (gallery)
"""
dataset_dir = 'market1501'
def __init__(self, root='/raid/home/zhihui/reid_strong_baseline/data/Market-1501-fixed/', verbose=True, **kwargs):
super(Market1501, self).__init__()
self.dataset_dir = root
self.train_dir = osp.join(self.dataset_dir, 'bounding_box_train')
self.query_dir = osp.join(self.dataset_dir, 'query')
self.gallery_dir = osp.join(self.dataset_dir, 'bounding_box_test')
self._check_before_run()
train = self._process_dir(self.train_dir, relabel=True)
query = self._process_dir(self.query_dir, relabel=False)
gallery = self._process_dir(self.gallery_dir, relabel=False)
if verbose:
print("=> Market1501 loaded")
self.print_dataset_statistics(train, query, gallery)
self.train = train
self.query = query
self.gallery = gallery
self.num_train_pids, self.num_train_imgs, self.num_train_cams = self.get_imagedata_info(self.train)
self.num_query_pids, self.num_query_imgs, self.num_query_cams = self.get_imagedata_info(self.query)
self.num_gallery_pids, self.num_gallery_imgs, self.num_gallery_cams = self.get_imagedata_info(self.gallery)
def _process_dir(self, dir_path, relabel=False):
img_paths = glob.glob(osp.join(dir_path, '*.jpg'))
pattern = re.compile(r'([-\d]+)_c(\d)')
pid_container = set()
for img_path in img_paths:
pid, _ = map(int, pattern.search(img_path).groups())
if pid == -1: continue # junk images are just ignored
pid_container.add(pid)
pid2label = {pid: label for label, pid in enumerate(pid_container)}
dataset = []
for img_path in img_paths:
pid, camid = map(int, pattern.search(img_path).groups())
if pid == -1: continue # junk images are just ignored
assert 0 <= pid <= 1501 # pid == 0 means background
assert 1 <= camid <= 6
camid -= 1 # index starts from 0
if relabel: pid = pid2label[pid]
dataset.append((img_path, pid, camid))
return dataset
| data/datasets/market1501.py | 2,658 | Market1501
Reference:
Zheng et al. Scalable Person Re-identification: A Benchmark. ICCV 2015.
URL: http://www.liangzheng.org/Project/project_reid.html
Dataset statistics:
# identities: 1501 (+1 for background)
# images: 12936 (train) + 3368 (query) + 15913 (gallery)
@author: sherlock
@contact: sherlockliao01@gmail.com
encoding: utf-8 junk images are just ignored junk images are just ignored pid == 0 means background index starts from 0 | 443 | en | 0.716542 |
"""
Django settings for pets_forum project.
Generated by 'django-admin startproject' using Django 1.11.11.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'f9um$ll0_p4=p&p(iwkeu1hk+-en9c%q#@aul(n!7ecb^%z8x8'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'pets_forum.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'pets_forum.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
| pets_forum/pets_forum/settings.py | 3,112 | Django settings for pets_forum project.
Generated by 'django-admin startproject' using Django 1.11.11.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
Build paths inside the project like this: os.path.join(BASE_DIR, ...) Quick-start development settings - unsuitable for production See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/ SECURITY WARNING: keep the secret key used in production secret! SECURITY WARNING: don't run with debug turned on in production! Application definition Database https://docs.djangoproject.com/en/1.11/ref/settings/databases Password validation https://docs.djangoproject.com/en/1.11/ref/settings/auth-password-validators Internationalization https://docs.djangoproject.com/en/1.11/topics/i18n/ Static files (CSS, JavaScript, Images) https://docs.djangoproject.com/en/1.11/howto/static-files/ | 1,000 | en | 0.649352 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 30 03:08:17 2017
@author: aditya
"""
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Nov 26 12:46:25 2017
@author: aditya
"""
import math
import os
from tensorflow.examples.tutorials.mnist import input_data
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
FLAGS = None
MNIST_IMAGE_SIZE = 28
MNIST_IMAGE_PIXELS =28*28
OUTPUT_CLASSES = 10
Batch_Size = 100
LEARNING_RATE = 0.01
hiddenlayer_units =16
expno = "1"
def deepnnwithrelu(images):
#Code boorrowed from https://github.com/tensorflow/tensorflow/blob/r1.4/tensorflow/examples/tutorials/mnist/mnist.py
with tf.name_scope('hiddenlayer1'):
weights = tf.Variable(
tf.truncated_normal([MNIST_IMAGE_PIXELS, hiddenlayer_units],
stddev=1.0 / math.sqrt(float(MNIST_IMAGE_PIXELS))),
name='weights')
biases = tf.Variable(tf.zeros([hiddenlayer_units]),
name='biases')
hidden1 = tf.nn.relu(tf.matmul(images, weights) + biases)
# Hidden 2
with tf.name_scope('hiddenlayer2'):
weights = tf.Variable(
tf.truncated_normal([hiddenlayer_units, hiddenlayer_units],
stddev=1.0 / math.sqrt(float(hiddenlayer_units))),
name='weights')
biases = tf.Variable(tf.zeros([hiddenlayer_units]),
name='biases')
hidden2 = tf.nn.relu(tf.matmul(hidden1, weights) + biases)
with tf.name_scope('hiddenlayer3'):
weights = tf.Variable(
tf.truncated_normal([hiddenlayer_units, hiddenlayer_units],
stddev=1.0 / math.sqrt(float(hiddenlayer_units))),
name='weights')
biases = tf.Variable(tf.zeros([hiddenlayer_units]),
name='biases')
hidden3 = tf.nn.relu(tf.matmul(hidden2, weights) + biases)
with tf.name_scope('hiddenlayer4'):
weights = tf.Variable(
tf.truncated_normal([hiddenlayer_units, hiddenlayer_units],
stddev=1.0 / math.sqrt(float(hiddenlayer_units))),
name='weights')
biases = tf.Variable(tf.zeros([hiddenlayer_units]),
name='biases')
hidden4 = tf.nn.relu(tf.matmul(hidden3, weights) + biases)
with tf.name_scope('hiddenlayer5'):
weights = tf.Variable(
tf.truncated_normal([hiddenlayer_units, hiddenlayer_units],
stddev=1.0 / math.sqrt(float(hiddenlayer_units))),
name='weights')
biases = tf.Variable(tf.zeros([hiddenlayer_units]),
name='biases')
hidden5 = tf.nn.relu(tf.matmul(hidden4, weights) + biases)
with tf.name_scope('hiddenlayer6'):
weights = tf.Variable(
tf.truncated_normal([hiddenlayer_units, hiddenlayer_units],
stddev=1.0 / math.sqrt(float(hiddenlayer_units))),
name='weights')
biases = tf.Variable(tf.zeros([hiddenlayer_units]),
name='biases')
hidden6 = tf.nn.relu(tf.matmul(hidden5, weights) + biases)
# Dropout - controls the complexity of the model, prevents co-adaptation of
# features.
# Map the 1024 features to 10 classes, one for each digit
with tf.name_scope('finallayer'):
W_fc2 = weight_variable([hiddenlayer_units, 10])
b_fc2 = bias_variable([10])
y_output = tf.matmul(hidden6, W_fc2) + b_fc2
return y_output
def deepnnwithsigmoid(images):
with tf.name_scope('hiddenlayer1'):
weights = tf.Variable(
tf.truncated_normal([MNIST_IMAGE_PIXELS, hiddenlayer_units],
stddev=1.0 / math.sqrt(float(MNIST_IMAGE_PIXELS))),
name='weights')
biases = tf.Variable(tf.zeros([hiddenlayer_units]),
name='biases')
hidden1 = tf.nn.sigmoid(tf.matmul(images, weights) + biases)
# Hidden 2
with tf.name_scope('hiddenlayer2'):
weights = tf.Variable(
tf.truncated_normal([hiddenlayer_units, hiddenlayer_units],
stddev=1.0 / math.sqrt(float(hiddenlayer_units))),
name='weights')
biases = tf.Variable(tf.zeros([hiddenlayer_units]),
name='biases')
hidden2 = tf.nn.sigmoid(tf.matmul(hidden1, weights) + biases)
with tf.name_scope('hiddenlayer3'):
weights = tf.Variable(
tf.truncated_normal([hiddenlayer_units, hiddenlayer_units],
stddev=1.0 / math.sqrt(float(hiddenlayer_units))),
name='weights')
biases = tf.Variable(tf.zeros([hiddenlayer_units]),
name='biases')
hidden3 = tf.nn.sigmoid(tf.matmul(hidden2, weights) + biases)
with tf.name_scope('hiddenlayer4'):
weights = tf.Variable(
tf.truncated_normal([hiddenlayer_units, hiddenlayer_units],
stddev=1.0 / math.sqrt(float(hiddenlayer_units))),
name='weights')
biases = tf.Variable(tf.zeros([hiddenlayer_units]),
name='biases')
hidden4 = tf.nn.sigmoid(tf.matmul(hidden3, weights) + biases)
with tf.name_scope('hiddenlayer5'):
weights = tf.Variable(
tf.truncated_normal([hiddenlayer_units, hiddenlayer_units],
stddev=1.0 / math.sqrt(float(hiddenlayer_units))),
name='weights')
biases = tf.Variable(tf.zeros([hiddenlayer_units]),
name='biases')
hidden5 = tf.nn.sigmoid(tf.matmul(hidden4, weights) + biases)
with tf.name_scope('hiddenlayer6'):
weights = tf.Variable(
tf.truncated_normal([hiddenlayer_units, hiddenlayer_units],
stddev=1.0 / math.sqrt(float(hiddenlayer_units))),
name='weights')
biases = tf.Variable(tf.zeros([hiddenlayer_units]),
name='biases')
hidden6 = tf.nn.sigmoid(tf.matmul(hidden5, weights) + biases)
with tf.name_scope('finallayer'):
W_fc2 = weight_variable([hiddenlayer_units, 10])
b_fc2 = bias_variable([10])
y_output = tf.matmul(hidden6, W_fc2) + b_fc2
return y_output
def deepnnwithelu(images):
with tf.name_scope('hiddenlayer1'):
weights = tf.Variable(
tf.truncated_normal([MNIST_IMAGE_PIXELS, hiddenlayer_units],
stddev=1.0 / math.sqrt(float(MNIST_IMAGE_PIXELS))),
name='weights')
biases = tf.Variable(tf.zeros([hiddenlayer_units]),
name='biases')
hidden1 = tf.nn.elu(tf.matmul(images, weights) + biases)
# Hidden 2
with tf.name_scope('hiddenlayer2'):
weights = tf.Variable(
tf.truncated_normal([hiddenlayer_units, hiddenlayer_units],
stddev=1.0 / math.sqrt(float(hiddenlayer_units))),
name='weights')
biases = tf.Variable(tf.zeros([hiddenlayer_units]),
name='biases')
hidden2 = tf.nn.elu(tf.matmul(hidden1, weights) + biases)
with tf.name_scope('hiddenlayer3'):
weights = tf.Variable(
tf.truncated_normal([hiddenlayer_units, hiddenlayer_units],
stddev=1.0 / math.sqrt(float(hiddenlayer_units))),
name='weights')
biases = tf.Variable(tf.zeros([hiddenlayer_units]),
name='biases')
hidden3 = tf.nn.elu(tf.matmul(hidden2, weights) + biases)
with tf.name_scope('hiddenlayer4'):
weights = tf.Variable(
tf.truncated_normal([hiddenlayer_units, hiddenlayer_units],
stddev=1.0 / math.sqrt(float(hiddenlayer_units))),
name='weights')
biases = tf.Variable(tf.zeros([hiddenlayer_units]),
name='biases')
hidden4 = tf.nn.elu(tf.matmul(hidden3, weights) + biases)
with tf.name_scope('hiddenlayer5'):
weights = tf.Variable(
tf.truncated_normal([hiddenlayer_units, hiddenlayer_units],
stddev=1.0 / math.sqrt(float(hiddenlayer_units))),
name='weights')
biases = tf.Variable(tf.zeros([hiddenlayer_units]),
name='biases')
hidden5 = tf.nn.elu(tf.matmul(hidden4, weights) + biases)
with tf.name_scope('hiddenlayer6'):
weights = tf.Variable(
tf.truncated_normal([hiddenlayer_units, hiddenlayer_units],
stddev=1.0 / math.sqrt(float(hiddenlayer_units))),
name='weights')
biases = tf.Variable(tf.zeros([hiddenlayer_units]),
name='biases')
hidden6 = tf.nn.elu(tf.matmul(hidden5, weights) + biases)
with tf.name_scope('finallayer'):
W_fc2 = weight_variable([hiddenlayer_units, 10])
b_fc2 = bias_variable([10])
y_output = tf.matmul(hidden6, W_fc2) + b_fc2
return y_output
# Code Borrowed from https://github.com/tensorflow/tensorflow/blob/r1.4/tensorflow/examples/tutorials/mnist/mnist_deep.py
def weight_variable(shape):
"""weight_variable generates a weight variable of a given shape."""
initial = tf.truncated_normal(shape, stddev=0.1/math.sqrt(float(hiddenlayer_units)))
return tf.Variable(initial)
def bias_variable(shape):
"""bias_variable generates a bias variable of a given shape."""
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
def appstart(stri):
# Import data
mnist = input_data.read_data_sets("../Data/MNIST_data", one_hot=True)
# Create the model
x = tf.placeholder(tf.float32, [None, 784])
# Define loss and optimizer
y_ = tf.placeholder(tf.float32, [None, 10])
# Build the graph for the deep net
if(stri=="relu"):
y_output = deepnnwithrelu(x)
elif(stri=="elu"):
y_output = deepnnwithelu(x)
else:
y_output = deepnnwithsigmoid(x)
#Code Borrowed from https://github.com/tensorflow/tensorflow/blob/r1.4/tensorflow/examples/tutorials/mnist/mnist_deep.py
with tf.name_scope('loss'):
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(labels=y_,
logits=y_output)
cross_entropy = tf.reduce_mean(cross_entropy)
with tf.name_scope('adam_optimizer'):
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
with tf.name_scope('accuracy'):
correct_prediction = tf.equal(tf.argmax(y_output, 1), tf.argmax(y_, 1))
correct_prediction = tf.cast(correct_prediction, tf.float32)
accuracy = tf.reduce_mean(correct_prediction)
graph_location = "tfgraphs/"+expno
print('Saving graph to: %s' % graph_location)
train_writer = tf.summary.FileWriter(graph_location)
train_writer.add_graph(tf.get_default_graph())
resultarray =[]
iterarray=[]
accarray=[]
testaccarray = []
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for i in range(20000):
batch = mnist.train.next_batch(50)
if i % 100 == 0:
train_accuracy = accuracy.eval(feed_dict={
x: batch[0], y_: batch[1]})
testaccuracy = accuracy.eval(feed_dict={
x: mnist.test.images, y_: mnist.test.labels})
#print('step %d, training accuracy %g' % (i, train_accuracy))
#print('test accuracy %g' %testaccuracy)
iterarray.append(i)
accarray.append(train_accuracy)
testaccarray.append(testaccuracy)
train_step.run(feed_dict={x: batch[0], y_: batch[1]})
resultarray.append(iterarray)
resultarray.append(accarray)
resultarray.append(testaccarray)
return resultarray
def progstart():
rarray =[]
rarray.append(appstart("sigmoid"))
rarray.append(appstart("relu"))
rarray.append(appstart("elu"))
if not os.path.exists('figures'):
os.makedirs('figures')
fig1 = plt.figure()
axes1 = fig1.add_axes([0.1,0.1,0.8,0.8])
axes1.plot(rarray[0][0],rarray[0][1],'r')
axes1.plot(rarray[0][0],rarray[1][1],'b')
axes1.plot(rarray[0][0],rarray[2][1],'g')
axes1.set_xlabel('Train Iterations')
axes1.set_ylabel('Train accuracy')
fig1.savefig('figures/'+expno+'_trainAccuracy.png')
fig2 = plt.figure()
axes2 = fig2.add_axes([0.1,0.1,0.8,0.8])
axes2.plot(rarray[0][0],rarray[0][2],'r')
axes2.plot(rarray[0][0],rarray[1][2],'b')
axes2.plot(rarray[0][0],rarray[2][2],'g')
axes2.set_xlabel('Train Iterations')
axes2.set_ylabel('Test accuracy')
fig2.savefig('figures/'+expno+'_testAccuracy.png')
plt.plot()
progstart()
| FSL - Entire Project + Report/Final Project/Code/Exp1.py | 12,303 | bias_variable generates a bias variable of a given shape.
weight_variable generates a weight variable of a given shape.
Created on Thu Nov 30 03:08:17 2017
@author: aditya
!/usr/bin/env python3 -*- coding: utf-8 -*-!/usr/bin/env python3 -*- coding: utf-8 -*-Code boorrowed from https://github.com/tensorflow/tensorflow/blob/r1.4/tensorflow/examples/tutorials/mnist/mnist.py Hidden 2 Dropout - controls the complexity of the model, prevents co-adaptation of features. Map the 1024 features to 10 classes, one for each digit Hidden 2 Hidden 2 Code Borrowed from https://github.com/tensorflow/tensorflow/blob/r1.4/tensorflow/examples/tutorials/mnist/mnist_deep.py Import data Create the model Define loss and optimizer Build the graph for the deep netCode Borrowed from https://github.com/tensorflow/tensorflow/blob/r1.4/tensorflow/examples/tutorials/mnist/mnist_deep.py print('step %d, training accuracy %g' % (i, train_accuracy))print('test accuracy %g' %testaccuracy) | 970 | en | 0.692586 |
from os.path import join, dirname, abspath
here = lambda *paths: join(dirname(abspath(__file__)), *paths)
PROJECT_ROOT = here('..')
root = lambda *paths: join(PROJECT_ROOT, *paths)
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@example.com'),
)
EMAIL_BACKEND = 'django.core.mail.backends.filebased.EmailBackend'
EMAIL_FILE_PATH = here('tmp', 'app-mails')
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': ':memory:', # Or path to database file if using sqlite3.
# The following settings are not used with sqlite3:
'USER': '',
'PASSWORD': '',
'HOST': '', # Empty for localhost through domain sockets or '127.0.0.1' for localhost through TCP.
'PORT': '', # Set to empty string for default.
}
}
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = []
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/var/www/example.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://example.com/media/", "http://media.example.com/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/var/www/example.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://example.com/static/", "http://static.example.com/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = '^1mo-5o90rn7nzy4fm94_=rtg-l9^x&tez8^9#1ktl4r6s_w^l'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'tests.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'tests.wsgi.application'
TEMPLATES = {
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
},
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django_outbox',
# Uncomment the next line to enable the admin:
# 'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
)
SESSION_SERIALIZER = 'django.contrib.sessions.serializers.JSONSerializer'
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
| tests/settings.py | 5,631 | ('Your Name', 'your_email@example.com'), Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'. Or path to database file if using sqlite3. The following settings are not used with sqlite3: Empty for localhost through domain sockets or '127.0.0.1' for localhost through TCP. Set to empty string for default. Hosts/domain names that are valid for this site; required if DEBUG is False See https://docs.djangoproject.com/en/1.5/ref/settings/allowed-hosts Local time zone for this installation. Choices can be found here: http://en.wikipedia.org/wiki/List_of_tz_zones_by_name although not all choices may be available on all operating systems. In a Windows environment this must be set to your system time zone. Language code for this installation. All choices can be found here: http://www.i18nguy.com/unicode/language-identifiers.html If you set this to False, Django will make some optimizations so as not to load the internationalization machinery. If you set this to False, Django will not format dates, numbers and calendars according to the current locale. If you set this to False, Django will not use timezone-aware datetimes. Absolute filesystem path to the directory that will hold user-uploaded files. Example: "/var/www/example.com/media/" URL that handles the media served from MEDIA_ROOT. Make sure to use a trailing slash. Examples: "http://example.com/media/", "http://media.example.com/" Absolute path to the directory static files should be collected to. Don't put anything in this directory yourself; store your static files in apps' "static/" subdirectories and in STATICFILES_DIRS. Example: "/var/www/example.com/static/" URL prefix for static files. Example: "http://example.com/static/", "http://static.example.com/" Additional locations of static files Put strings here, like "/home/html/static" or "C:/www/django/static". Always use forward slashes, even on Windows. Don't forget to use absolute paths, not relative paths. List of finder classes that know how to find static files in various locations. 'django.contrib.staticfiles.finders.DefaultStorageFinder', Make this unique, and don't share it with anybody. List of callables that know how to import templates from various sources. 'django.template.loaders.eggs.Loader', Uncomment the next line for simple clickjacking protection: 'django.middleware.clickjacking.XFrameOptionsMiddleware', Python dotted path to the WSGI application used by Django's runserver. Uncomment the next line to enable the admin: 'django.contrib.admin', Uncomment the next line to enable admin documentation: 'django.contrib.admindocs', A sample logging configuration. The only tangible logging performed by this configuration is to send an email to the site admins on every HTTP 500 error when DEBUG=False. See http://docs.djangoproject.com/en/dev/topics/logging for more details on how to customize your logging configuration. | 2,891 | en | 0.752139 |
# Copyright 2015 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from neutron_lib.callbacks import events as callbacks_events
from neutron_lib.callbacks import registry as callbacks_registry
from neutron_lib.callbacks import resources as callbacks_resources
from neutron_lib import constants
import testtools
from neutron.agent.common import ovs_lib
from neutron.agent.common import utils
from neutron.agent import firewall
from neutron.agent.linux.openvswitch_firewall import constants as ovsfw_consts
from neutron.agent.linux.openvswitch_firewall import exceptions
from neutron.agent.linux.openvswitch_firewall import firewall as ovsfw
from neutron.common import constants as n_const
from neutron.plugins.ml2.drivers.openvswitch.agent.common import constants \
as ovs_consts
from neutron.plugins.ml2.drivers.openvswitch.agent.openflow.ovs_ofctl \
import ovs_bridge
from neutron.tests import base
TESTING_VLAN_TAG = 1
def create_ofport(port_dict):
ovs_port = mock.Mock(vif_mac='00:00:00:00:00:00', ofport=1,
port_name="port-name")
return ovsfw.OFPort(port_dict, ovs_port, vlan_tag=TESTING_VLAN_TAG)
class TestCreateRegNumbers(base.BaseTestCase):
def test_no_registers_defined(self):
flow = {'foo': 'bar'}
ovsfw.create_reg_numbers(flow)
self.assertEqual({'foo': 'bar'}, flow)
def test_all_registers_defined(self):
flow = {'foo': 'bar', 'reg_port': 1, 'reg_net': 2,
'reg_remote_group': 3}
expected_flow = {'foo': 'bar',
'reg{:d}'.format(ovsfw_consts.REG_PORT): 1,
'reg{:d}'.format(ovsfw_consts.REG_NET): 2,
'reg{:d}'.format(ovsfw_consts.REG_REMOTE_GROUP): 3}
ovsfw.create_reg_numbers(flow)
self.assertEqual(expected_flow, flow)
class TestSecurityGroup(base.BaseTestCase):
def setUp(self):
super(TestSecurityGroup, self).setUp()
self.sg = ovsfw.SecurityGroup('123')
self.sg.members = {'type': [1, 2, 3, 4]}
def test_update_rules_split(self):
rules = [
{'foo': 'bar', 'rule': 'all'}, {'bar': 'foo'},
{'remote_group_id': '123456', 'foo': 'bar'}]
expected_raw_rules = [{'foo': 'bar', 'rule': 'all'}, {'bar': 'foo'}]
expected_remote_rules = [{'remote_group_id': '123456', 'foo': 'bar'}]
self.sg.update_rules(rules)
self.assertEqual(expected_raw_rules, self.sg.raw_rules)
self.assertEqual(expected_remote_rules, self.sg.remote_rules)
def test_update_rules_protocols(self):
rules = [
{'foo': 'bar', 'protocol': constants.PROTO_NAME_ICMP,
'ethertype': constants.IPv4},
{'foo': 'bar', 'protocol': constants.PROTO_NAME_ICMP,
'ethertype': constants.IPv6},
{'foo': 'bar', 'protocol': constants.PROTO_NAME_IPV6_ICMP_LEGACY,
'ethertype': constants.IPv6},
{'foo': 'bar', 'protocol': constants.PROTO_NAME_TCP},
{'foo': 'bar', 'protocol': '94'},
{'foo': 'bar', 'protocol': 'baz'},
{'foo': 'no_proto'}]
self.sg.update_rules(rules)
self.assertEqual({'foo': 'no_proto'}, self.sg.raw_rules.pop())
protos = [rule['protocol'] for rule in self.sg.raw_rules]
self.assertEqual([constants.PROTO_NUM_ICMP,
constants.PROTO_NUM_IPV6_ICMP,
constants.PROTO_NUM_IPV6_ICMP,
constants.PROTO_NUM_TCP,
94,
'baz'], protos)
def test_get_ethertype_filtered_addresses(self):
addresses = self.sg.get_ethertype_filtered_addresses('type')
expected_addresses = [1, 2, 3, 4]
self.assertEqual(expected_addresses, addresses)
class TestOFPort(base.BaseTestCase):
def setUp(self):
super(TestOFPort, self).setUp()
self.ipv4_addresses = ['10.0.0.1', '192.168.0.1']
self.ipv6_addresses = ['fe80::f816:3eff:fe2e:1']
port_dict = {'device': 1,
'fixed_ips': self.ipv4_addresses + self.ipv6_addresses}
self.port = create_ofport(port_dict)
def test_ipv4_address(self):
ipv4_addresses = self.port.ipv4_addresses
self.assertEqual(self.ipv4_addresses, ipv4_addresses)
def test_ipv6_address(self):
ipv6_addresses = self.port.ipv6_addresses
self.assertEqual(self.ipv6_addresses, ipv6_addresses)
def test__get_allowed_pairs(self):
port = {
'allowed_address_pairs': [
{'mac_address': 'foo', 'ip_address': '10.0.0.1'},
{'mac_address': 'bar', 'ip_address': '192.168.0.1'},
{'mac_address': 'qux', 'ip_address': '169.254.0.0/16'},
{'mac_address': 'baz', 'ip_address': '2003::f'},
]}
allowed_pairs_v4 = ovsfw.OFPort._get_allowed_pairs(port, version=4)
allowed_pairs_v6 = ovsfw.OFPort._get_allowed_pairs(port, version=6)
expected_aap_v4 = {('foo', '10.0.0.1'), ('bar', '192.168.0.1'),
('qux', '169.254.0.0/16')}
expected_aap_v6 = {('baz', '2003::f')}
self.assertEqual(expected_aap_v4, allowed_pairs_v4)
self.assertEqual(expected_aap_v6, allowed_pairs_v6)
def test__get_allowed_pairs_empty(self):
port = {}
allowed_pairs = ovsfw.OFPort._get_allowed_pairs(port, version=4)
self.assertFalse(allowed_pairs)
def test_update(self):
old_port_dict = self.port.neutron_port_dict
new_port_dict = old_port_dict.copy()
added_ips = [1, 2, 3]
new_port_dict.update({
'fixed_ips': added_ips,
'allowed_address_pairs': [
{'mac_address': '00:00:00:00:00:01',
'ip_address': '192.168.0.1'},
{'mac_address': '00:00:00:00:00:01',
'ip_address': '2003::f'}],
})
self.port.update(new_port_dict)
self.assertEqual(new_port_dict, self.port.neutron_port_dict)
self.assertIsNot(new_port_dict, self.port.neutron_port_dict)
self.assertEqual(added_ips, self.port.fixed_ips)
self.assertEqual({('00:00:00:00:00:01', '192.168.0.1')},
self.port.allowed_pairs_v4)
self.assertIn(('00:00:00:00:00:01', '2003::f'),
self.port.allowed_pairs_v6)
class TestSGPortMap(base.BaseTestCase):
def setUp(self):
super(TestSGPortMap, self).setUp()
self.map = ovsfw.SGPortMap()
def test_get_or_create_sg_existing_sg(self):
self.map.sec_groups['id'] = mock.sentinel
sg = self.map.get_or_create_sg('id')
self.assertIs(mock.sentinel, sg)
def test_get_or_create_sg_nonexisting_sg(self):
with mock.patch.object(ovsfw, 'SecurityGroup') as sg_mock:
sg = self.map.get_or_create_sg('id')
self.assertEqual(sg_mock.return_value, sg)
def _check_port(self, port_id, expected_sg_ids):
port = self.map.ports[port_id]
expected_sgs = [self.map.sec_groups[sg_id]
for sg_id in expected_sg_ids]
self.assertEqual(port.sec_groups, expected_sgs)
def _check_sg(self, sg_id, expected_port_ids):
sg = self.map.sec_groups[sg_id]
expected_ports = {self.map.ports[port_id]
for port_id in expected_port_ids}
self.assertEqual(sg.ports, expected_ports)
def _create_ports_and_sgroups(self):
sg_1 = ovsfw.SecurityGroup(1)
sg_2 = ovsfw.SecurityGroup(2)
sg_3 = ovsfw.SecurityGroup(3)
port_a = create_ofport({'device': 'a'})
port_b = create_ofport({'device': 'b'})
self.map.ports = {'a': port_a, 'b': port_b}
self.map.sec_groups = {1: sg_1, 2: sg_2, 3: sg_3}
port_a.sec_groups = [sg_1, sg_2]
port_b.sec_groups = [sg_2, sg_3]
sg_1.ports = {port_a}
sg_2.ports = {port_a, port_b}
sg_3.ports = {port_b}
def test_create_port(self):
port = create_ofport({'device': 'a'})
sec_groups = ['1', '2']
port_dict = {'security_groups': sec_groups}
self.map.create_port(port, port_dict)
self._check_port('a', sec_groups)
self._check_sg('1', ['a'])
self._check_sg('2', ['a'])
def test_update_port_sg_added(self):
self._create_ports_and_sgroups()
port_dict = {'security_groups': [1, 2, 3]}
self.map.update_port(self.map.ports['b'], port_dict)
self._check_port('a', [1, 2])
self._check_port('b', [1, 2, 3])
self._check_sg(1, ['a', 'b'])
self._check_sg(2, ['a', 'b'])
self._check_sg(3, ['b'])
def test_update_port_sg_removed(self):
self._create_ports_and_sgroups()
port_dict = {'security_groups': [1]}
self.map.update_port(self.map.ports['b'], port_dict)
self._check_port('a', [1, 2])
self._check_port('b', [1])
self._check_sg(1, ['a', 'b'])
self._check_sg(2, ['a'])
self._check_sg(3, [])
def test_remove_port(self):
self._create_ports_and_sgroups()
self.map.remove_port(self.map.ports['a'])
self._check_port('b', [2, 3])
self._check_sg(1, [])
self._check_sg(2, ['b'])
self._check_sg(3, ['b'])
self.assertNotIn('a', self.map.ports)
def test_update_rules(self):
"""Just make sure it doesn't crash"""
self.map.update_rules(1, [])
def test_update_members(self):
"""Just make sure we doesn't crash"""
self.map.update_members(1, [])
class TestConjIdMap(base.BaseTestCase):
def setUp(self):
super(TestConjIdMap, self).setUp()
self.conj_id_map = ovsfw.ConjIdMap()
def test_get_conj_id(self):
allocated = []
for direction in [firewall.EGRESS_DIRECTION,
firewall.INGRESS_DIRECTION]:
id_ = self.conj_id_map.get_conj_id(
'sg', 'remote', direction, constants.IPv4)
allocated.append(id_)
self.assertEqual(len(set(allocated)), 2)
self.assertEqual(len(self.conj_id_map.id_map), 2)
self.assertEqual(self.conj_id_map.get_conj_id(
'sg', 'remote', firewall.EGRESS_DIRECTION, constants.IPv4),
allocated[0])
def test_get_conj_id_invalid(self):
self.assertRaises(ValueError, self.conj_id_map.get_conj_id,
'sg', 'remote', 'invalid-direction',
constants.IPv6)
def test_delete_sg(self):
test_data = [('sg1', 'sg1'), ('sg1', 'sg2')]
ids = []
for sg_id, remote_sg_id in test_data:
ids.append(self.conj_id_map.get_conj_id(
sg_id, remote_sg_id,
firewall.INGRESS_DIRECTION, constants.IPv6))
result = self.conj_id_map.delete_sg('sg1')
self.assertIn(('sg1', ids[0]), result)
self.assertIn(('sg2', ids[1]), result)
self.assertFalse(self.conj_id_map.id_map)
reallocated = self.conj_id_map.get_conj_id(
'sg-foo', 'sg-foo', firewall.INGRESS_DIRECTION,
constants.IPv6)
self.assertIn(reallocated, ids)
class TestConjIPFlowManager(base.BaseTestCase):
def setUp(self):
super(TestConjIPFlowManager, self).setUp()
self.driver = mock.Mock()
self.manager = ovsfw.ConjIPFlowManager(self.driver)
self.vlan_tag = 100
self.conj_id = 16
def test_update_flows_for_vlan(self):
remote_group = self.driver.sg_port_map.get_sg.return_value
remote_group.get_ethertype_filtered_addresses.return_value = [
'10.22.3.4']
with mock.patch.object(self.manager.conj_id_map,
'get_conj_id') as get_conj_id_mock:
get_conj_id_mock.return_value = self.conj_id
self.manager.add(self.vlan_tag, 'sg', 'remote_id',
firewall.INGRESS_DIRECTION, constants.IPv4, 0)
self.manager.add(self.vlan_tag, 'sg', 'remote_id',
firewall.INGRESS_DIRECTION, constants.IPv4, 3)
self.manager.update_flows_for_vlan(self.vlan_tag)
self.assertEqual(self.driver._add_flow.call_args_list,
[mock.call(actions='conjunction(16,1/2)', ct_state='+est-rel-rpl',
dl_type=2048, nw_src='10.22.3.4/32', priority=70,
reg_net=self.vlan_tag, table=82),
mock.call(actions='conjunction(17,1/2)', ct_state='+new-est',
dl_type=2048, nw_src='10.22.3.4/32', priority=70,
reg_net=self.vlan_tag, table=82),
mock.call(actions='conjunction(22,1/2)', ct_state='+est-rel-rpl',
dl_type=2048, nw_src='10.22.3.4/32', priority=73,
reg_net=self.vlan_tag, table=82),
mock.call(actions='conjunction(23,1/2)', ct_state='+new-est',
dl_type=2048, nw_src='10.22.3.4/32', priority=73,
reg_net=self.vlan_tag, table=82)])
def test_sg_removed(self):
with mock.patch.object(self.manager.conj_id_map,
'get_conj_id') as get_id_mock, \
mock.patch.object(self.manager.conj_id_map,
'delete_sg') as delete_sg_mock:
get_id_mock.return_value = self.conj_id
delete_sg_mock.return_value = [('remote_id', self.conj_id)]
self.manager.add(self.vlan_tag, 'sg', 'remote_id',
firewall.INGRESS_DIRECTION, constants.IPv4, 0)
self.manager.flow_state[self.vlan_tag][(
firewall.INGRESS_DIRECTION, constants.IPv4)] = {
'10.22.3.4': [self.conj_id]}
self.manager.sg_removed('sg')
self.driver._add_flow.assert_not_called()
self.driver.delete_flows_for_ip_addresses.assert_called_once_with(
{'10.22.3.4'}, firewall.INGRESS_DIRECTION, constants.IPv4,
self.vlan_tag)
class FakeOVSPort(object):
def __init__(self, name, port, mac):
self.port_name = name
self.ofport = port
self.vif_mac = mac
class TestOVSFirewallDriver(base.BaseTestCase):
def setUp(self):
super(TestOVSFirewallDriver, self).setUp()
mock_bridge = mock.patch.object(
ovs_lib, 'OVSBridge', autospec=True).start()
self.firewall = ovsfw.OVSFirewallDriver(mock_bridge)
self.mock_bridge = self.firewall.int_br
self.mock_bridge.reset_mock()
self.fake_ovs_port = FakeOVSPort('port', 1, '00:00:00:00:00:00')
self.mock_bridge.br.get_vif_port_by_id.return_value = \
self.fake_ovs_port
def _prepare_security_group(self):
security_group_rules = [
{'ethertype': constants.IPv4,
'protocol': constants.PROTO_NAME_TCP,
'direction': firewall.INGRESS_DIRECTION,
'port_range_min': 123,
'port_range_max': 123}]
self.firewall.update_security_group_rules(1, security_group_rules)
security_group_rules = [
{'ethertype': constants.IPv4,
'protocol': constants.PROTO_NAME_UDP,
'direction': firewall.EGRESS_DIRECTION},
{'ethertype': constants.IPv6,
'protocol': constants.PROTO_NAME_TCP,
'remote_group_id': 2,
'direction': firewall.EGRESS_DIRECTION}]
self.firewall.update_security_group_rules(2, security_group_rules)
@property
def port_ofport(self):
return self.mock_bridge.br.get_vif_port_by_id.return_value.ofport
@property
def port_mac(self):
return self.mock_bridge.br.get_vif_port_by_id.return_value.vif_mac
def test_callbacks_registered(self):
with mock.patch.object(callbacks_registry, "subscribe") as subscribe:
firewall = ovsfw.OVSFirewallDriver(mock.MagicMock())
subscribe.assert_called_once_with(
firewall._init_firewall_callback,
callbacks_resources.AGENT,
callbacks_events.OVS_RESTARTED)
def test_initialize_bridge(self):
br = self.firewall.initialize_bridge(self.mock_bridge)
self.assertEqual(br, self.mock_bridge.deferred.return_value)
def test__add_flow_dl_type_formatted_to_string(self):
dl_type = 0x0800
self.firewall._add_flow(dl_type=dl_type)
def test__add_flow_registers_are_replaced(self):
self.firewall._add_flow(in_port=1, reg_port=1, reg_net=2)
expected_calls = {'in_port': 1,
'reg{:d}'.format(ovsfw_consts.REG_PORT): 1,
'reg{:d}'.format(ovsfw_consts.REG_NET): 2}
self.mock_bridge.br.add_flow.assert_called_once_with(
**expected_calls)
def test__drop_all_unmatched_flows(self):
self.firewall._drop_all_unmatched_flows()
expected_calls = [
mock.call(actions='drop', priority=0,
table=ovs_consts.BASE_EGRESS_TABLE),
mock.call(actions='drop', priority=0,
table=ovs_consts.RULES_EGRESS_TABLE),
mock.call(actions='drop', priority=0,
table=ovs_consts.ACCEPT_OR_INGRESS_TABLE),
mock.call(actions='drop', priority=0,
table=ovs_consts.BASE_INGRESS_TABLE),
mock.call(actions='drop', priority=0,
table=ovs_consts.RULES_INGRESS_TABLE)]
actual_calls = self.firewall.int_br.br.add_flow.call_args_list
self.assertEqual(expected_calls, actual_calls)
def test_get_or_create_ofport_non_existing(self):
port_dict = {
'device': 'port-id',
'security_groups': [123, 456]}
port = self.firewall.get_or_create_ofport(port_dict)
sg1, sg2 = sorted(
self.firewall.sg_port_map.sec_groups.values(),
key=lambda x: x.id)
self.assertIn(port, self.firewall.sg_port_map.ports.values())
self.assertEqual(
sorted(port.sec_groups, key=lambda x: x.id), [sg1, sg2])
self.assertIn(port, sg1.ports)
self.assertIn(port, sg2.ports)
def test_get_or_create_ofport_existing(self):
port_dict = {
'device': 'port-id',
'security_groups': [123, 456]}
of_port = create_ofport(port_dict)
self.firewall.sg_port_map.ports[of_port.id] = of_port
port = self.firewall.get_or_create_ofport(port_dict)
sg1, sg2 = sorted(
self.firewall.sg_port_map.sec_groups.values(),
key=lambda x: x.id)
self.assertIs(of_port, port)
self.assertIn(port, self.firewall.sg_port_map.ports.values())
self.assertEqual(
sorted(port.sec_groups, key=lambda x: x.id), [sg1, sg2])
self.assertIn(port, sg1.ports)
self.assertIn(port, sg2.ports)
def test_get_or_create_ofport_changed(self):
port_dict = {
'device': 'port-id',
'security_groups': [123, 456]}
of_port = create_ofport(port_dict)
self.firewall.sg_port_map.ports[of_port.id] = of_port
fake_ovs_port = FakeOVSPort('port', 2, '00:00:00:00:00:00')
self.mock_bridge.br.get_vif_port_by_id.return_value = \
fake_ovs_port
port = self.firewall.get_or_create_ofport(port_dict)
self.assertEqual(port.ofport, 2)
def test_get_or_create_ofport_missing(self):
port_dict = {
'device': 'port-id',
'security_groups': [123, 456]}
self.mock_bridge.br.get_vif_port_by_id.return_value = None
with testtools.ExpectedException(exceptions.OVSFWPortNotFound):
self.firewall.get_or_create_ofport(port_dict)
def test_get_or_create_ofport_missing_nocreate(self):
port_dict = {
'device': 'port-id',
'security_groups': [123, 456]}
self.mock_bridge.br.get_vif_port_by_id.return_value = None
self.assertIsNone(self.firewall.get_ofport(port_dict))
self.assertFalse(self.mock_bridge.br.get_vif_port_by_id.called)
def test_is_port_managed_managed_port(self):
port_dict = {'device': 'port-id'}
self.firewall.sg_port_map.ports[port_dict['device']] = object()
is_managed = self.firewall.is_port_managed(port_dict)
self.assertTrue(is_managed)
def test_is_port_managed_not_managed_port(self):
port_dict = {'device': 'port-id'}
is_managed = self.firewall.is_port_managed(port_dict)
self.assertFalse(is_managed)
def test_prepare_port_filter(self):
port_dict = {'device': 'port-id',
'security_groups': [1],
'fixed_ips': ["10.0.0.1"]}
self._prepare_security_group()
self.firewall.prepare_port_filter(port_dict)
exp_egress_classifier = mock.call(
actions='set_field:{:d}->reg5,set_field:{:d}->reg6,'
'resubmit(,{:d})'.format(
self.port_ofport, TESTING_VLAN_TAG,
ovs_consts.BASE_EGRESS_TABLE),
in_port=self.port_ofport,
priority=100,
table=ovs_consts.TRANSIENT_TABLE)
exp_ingress_classifier = mock.call(
actions='set_field:{:d}->reg5,set_field:{:d}->reg6,'
'strip_vlan,resubmit(,{:d})'.format(
self.port_ofport, TESTING_VLAN_TAG,
ovs_consts.BASE_INGRESS_TABLE),
dl_dst=self.port_mac,
dl_vlan='0x%x' % TESTING_VLAN_TAG,
priority=90,
table=ovs_consts.TRANSIENT_TABLE)
filter_rule = mock.call(
actions='ct(commit,zone=NXM_NX_REG6[0..15]),'
'output:{:d},resubmit(,{:d})'.format(
self.port_ofport,
ovs_consts.ACCEPTED_INGRESS_TRAFFIC_TABLE),
dl_type="0x{:04x}".format(n_const.ETHERTYPE_IP),
nw_proto=constants.PROTO_NUM_TCP,
priority=77,
reg5=self.port_ofport,
ct_state=ovsfw_consts.OF_STATE_NEW_NOT_ESTABLISHED,
table=ovs_consts.RULES_INGRESS_TABLE,
tcp_dst='0x007b')
calls = self.mock_bridge.br.add_flow.call_args_list
for call in exp_ingress_classifier, exp_egress_classifier, filter_rule:
self.assertIn(call, calls)
def test_prepare_port_filter_port_security_disabled(self):
port_dict = {'device': 'port-id',
'security_groups': [1],
'port_security_enabled': False}
self._prepare_security_group()
with mock.patch.object(
self.firewall, 'initialize_port_flows') as m_init_flows:
self.firewall.prepare_port_filter(port_dict)
self.assertFalse(m_init_flows.called)
def test_prepare_port_filter_initialized_port(self):
port_dict = {'device': 'port-id',
'security_groups': [1]}
self._prepare_security_group()
self.firewall.prepare_port_filter(port_dict)
self.assertFalse(self.mock_bridge.br.delete_flows.called)
self.firewall.prepare_port_filter(port_dict)
self.assertTrue(self.mock_bridge.br.delete_flows.called)
def test_update_port_filter(self):
port_dict = {'device': 'port-id',
'security_groups': [1]}
self._prepare_security_group()
self.firewall.prepare_port_filter(port_dict)
port_dict['security_groups'] = [2]
self.mock_bridge.reset_mock()
self.firewall.update_port_filter(port_dict)
self.assertTrue(self.mock_bridge.br.delete_flows.called)
conj_id = self.firewall.conj_ip_manager.conj_id_map.get_conj_id(
2, 2, firewall.EGRESS_DIRECTION, constants.IPv6)
filter_rules = [mock.call(
actions='resubmit(,{:d})'.format(
ovs_consts.ACCEPT_OR_INGRESS_TABLE),
dl_type="0x{:04x}".format(n_const.ETHERTYPE_IP),
nw_proto=constants.PROTO_NUM_UDP,
priority=77,
ct_state=ovsfw_consts.OF_STATE_NEW_NOT_ESTABLISHED,
reg5=self.port_ofport,
table=ovs_consts.RULES_EGRESS_TABLE),
mock.call(
actions='conjunction({:d},2/2)'.format(conj_id + 6),
ct_state=ovsfw_consts.OF_STATE_ESTABLISHED_NOT_REPLY,
dl_type=mock.ANY,
nw_proto=6,
priority=73, reg5=self.port_ofport,
table=ovs_consts.RULES_EGRESS_TABLE)]
self.mock_bridge.br.add_flow.assert_has_calls(
filter_rules, any_order=True)
def test_update_port_filter_create_new_port_if_not_present(self):
port_dict = {'device': 'port-id',
'security_groups': [1]}
self._prepare_security_group()
with mock.patch.object(
self.firewall, 'prepare_port_filter'
) as prepare_mock, mock.patch.object(
self.firewall, 'initialize_port_flows'
) as initialize_port_flows_mock, mock.patch.object(
self.firewall, 'add_flows_from_rules'
) as add_flows_from_rules_mock:
self.firewall.update_port_filter(port_dict)
self.assertFalse(prepare_mock.called)
self.assertFalse(self.mock_bridge.br.delete_flows.called)
self.assertTrue(initialize_port_flows_mock.called)
self.assertTrue(add_flows_from_rules_mock.called)
def test_update_port_filter_port_security_disabled(self):
port_dict = {'device': 'port-id',
'security_groups': [1]}
self._prepare_security_group()
self.firewall.prepare_port_filter(port_dict)
port_dict['port_security_enabled'] = False
self.firewall.update_port_filter(port_dict)
self.assertTrue(self.mock_bridge.br.delete_flows.called)
def test_update_port_filter_applies_added_flows(self):
"""Check flows are applied right after _set_flows is called."""
port_dict = {'device': 'port-id',
'security_groups': [1]}
self._prepare_security_group()
self.firewall.prepare_port_filter(port_dict)
with self.firewall.defer_apply():
self.firewall.update_port_filter(port_dict)
self.assertEqual(2, self.mock_bridge.apply_flows.call_count)
def test_remove_port_filter(self):
port_dict = {'device': 'port-id',
'security_groups': [1]}
self._prepare_security_group()
self.firewall.prepare_port_filter(port_dict)
self.firewall.remove_port_filter(port_dict)
self.assertTrue(self.mock_bridge.br.delete_flows.called)
self.assertIn(1, self.firewall.sg_to_delete)
def test_remove_port_filter_port_security_disabled(self):
port_dict = {'device': 'port-id',
'security_groups': [1]}
self.firewall.remove_port_filter(port_dict)
self.assertFalse(self.mock_bridge.br.delete_flows.called)
def test_update_security_group_rules(self):
"""Just make sure it doesn't crash"""
new_rules = [
{'ethertype': constants.IPv4,
'direction': firewall.INGRESS_DIRECTION,
'protocol': constants.PROTO_NAME_ICMP},
{'ethertype': constants.IPv4,
'direction': firewall.EGRESS_DIRECTION,
'remote_group_id': 2}]
self.firewall.update_security_group_rules(1, new_rules)
def test_update_security_group_members(self):
"""Just make sure it doesn't crash"""
new_members = {constants.IPv4: [1, 2, 3, 4]}
self.firewall.update_security_group_members(2, new_members)
def test__cleanup_stale_sg(self):
self._prepare_security_group()
self.firewall.sg_to_delete = {1}
with mock.patch.object(self.firewall.conj_ip_manager,
'sg_removed') as sg_removed_mock,\
mock.patch.object(self.firewall.sg_port_map,
'delete_sg') as delete_sg_mock:
self.firewall._cleanup_stale_sg()
sg_removed_mock.assert_called_once_with(1)
delete_sg_mock.assert_called_once_with(1)
def test_get_ovs_port(self):
ovs_port = self.firewall.get_ovs_port('port_id')
self.assertEqual(self.fake_ovs_port, ovs_port)
def test_get_ovs_port_non_existent(self):
self.mock_bridge.br.get_vif_port_by_id.return_value = None
with testtools.ExpectedException(exceptions.OVSFWPortNotFound):
self.firewall.get_ovs_port('port_id')
def test__initialize_egress_no_port_security_sends_to_egress(self):
self.mock_bridge.br.db_get_val.return_value = {'tag': TESTING_VLAN_TAG}
self.firewall._initialize_egress_no_port_security('port_id')
expected_call = mock.call(
table=ovs_consts.TRANSIENT_TABLE,
priority=100,
in_port=self.fake_ovs_port.ofport,
actions='set_field:%d->reg%d,'
'set_field:%d->reg%d,'
'resubmit(,%d)' % (
self.fake_ovs_port.ofport,
ovsfw_consts.REG_PORT,
TESTING_VLAN_TAG,
ovsfw_consts.REG_NET,
ovs_consts.ACCEPT_OR_INGRESS_TABLE)
)
calls = self.mock_bridge.br.add_flow.call_args_list
self.assertIn(expected_call, calls)
def test__initialize_egress_no_port_security_no_tag(self):
self.mock_bridge.br.db_get_val.return_value = {}
self.firewall._initialize_egress_no_port_security('port_id')
self.assertFalse(self.mock_bridge.br.add_flow.called)
def test__remove_egress_no_port_security_deletes_flow(self):
self.mock_bridge.br.db_get_val.return_value = {'tag': TESTING_VLAN_TAG}
self.firewall.sg_port_map.unfiltered['port_id'] = 1
self.firewall._remove_egress_no_port_security('port_id')
expected_call = mock.call(
table=ovs_consts.TRANSIENT_TABLE,
in_port=self.fake_ovs_port.ofport,
)
calls = self.mock_bridge.br.delete_flows.call_args_list
self.assertIn(expected_call, calls)
def test__remove_egress_no_port_security_non_existing_port(self):
with testtools.ExpectedException(exceptions.OVSFWPortNotHandled):
self.firewall._remove_egress_no_port_security('foo')
def test_process_trusted_ports_caches_port_id(self):
self.firewall.process_trusted_ports(['port_id'])
self.assertIn('port_id', self.firewall.sg_port_map.unfiltered)
def test_process_trusted_ports_port_not_found(self):
"""Check that exception is not propagated outside."""
self.mock_bridge.br.get_vif_port_by_id.return_value = None
self.firewall.process_trusted_ports(['port_id'])
# Processing should have failed so port is not cached
self.assertNotIn('port_id', self.firewall.sg_port_map.unfiltered)
def test_remove_trusted_ports_clears_cached_port_id(self):
self.firewall.sg_port_map.unfiltered['port_id'] = 1
self.firewall.remove_trusted_ports(['port_id'])
self.assertNotIn('port_id', self.firewall.sg_port_map.unfiltered)
def test_remove_trusted_ports_not_managed_port(self):
"""Check that exception is not propagated outside."""
self.firewall.remove_trusted_ports(['port_id'])
class TestCookieContext(base.BaseTestCase):
def setUp(self):
super(TestCookieContext, self).setUp()
# Don't attempt to connect to ovsdb
mock.patch('neutron.agent.ovsdb.api.from_config').start()
# Don't trigger iptables -> ovsfw migration
mock.patch(
'neutron.agent.linux.openvswitch_firewall.iptables.Helper').start()
self.execute = mock.patch.object(
utils, "execute", spec=utils.execute).start()
bridge = ovs_bridge.OVSAgentBridge('foo')
mock.patch.object(
ovsfw.OVSFirewallDriver, 'initialize_bridge',
return_value=bridge.deferred(
full_ordered=True, use_bundle=True)).start()
self.firewall = ovsfw.OVSFirewallDriver(bridge)
# Remove calls from firewall initialization
self.execute.reset_mock()
def test_cookie_is_different_in_context(self):
default_cookie = self.firewall.int_br.br.default_cookie
with self.firewall.update_cookie_context():
self.firewall._add_flow(actions='drop')
update_cookie = self.firewall._update_cookie
self.firewall._add_flow(actions='drop')
expected_calls = [
mock.call(
mock.ANY,
process_input='hard_timeout=0,idle_timeout=0,priority=1,'
'cookie=%d,actions=drop' % cookie,
run_as_root=mock.ANY,
) for cookie in (update_cookie, default_cookie)
]
self.execute.assert_has_calls(expected_calls)
def test_context_cookie_is_not_left_as_used(self):
with self.firewall.update_cookie_context():
update_cookie = self.firewall._update_cookie
self.assertNotIn(
update_cookie,
self.firewall.int_br.br._reserved_cookies)
| neutron/tests/unit/agent/linux/openvswitch_firewall/test_firewall.py | 33,871 | Check that exception is not propagated outside.
Check that exception is not propagated outside.
Just make sure we doesn't crash
Check flows are applied right after _set_flows is called.
Just make sure it doesn't crash
Just make sure it doesn't crash
Just make sure it doesn't crash
Copyright 2015 Red Hat, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Processing should have failed so port is not cached Don't attempt to connect to ovsdb Don't trigger iptables -> ovsfw migration Remove calls from firewall initialization | 1,032 | en | 0.930864 |
"""
Test Sermin config module
"""
from sermin.config.module import Registry, Namespace, Setting, settings
from sermin.config.utils import parse_args
from .utils import SafeTestCase
class SettingsTest(SafeTestCase):
def setUp(self):
self.old_settings = settings._namespaces
settings._clear()
def tearDown(self):
settings.__dict__['_namespaces'] = self.old_settings
def test_settings_exists(self):
self.assertIsInstance(settings, Registry)
def test_create_namespace(self):
settings.test = 'Test settings'
self.assertIsInstance(settings.test, Namespace)
self.assertEqual(settings.test._label, 'Test settings')
def test_create_setting(self):
settings.test = 'Test settings'
settings.test.setting = Setting('Test setting')
self.assertIsInstance(settings.test._settings['setting'], Setting)
self.assertEqual(
settings.test._settings['setting'].label, 'Test setting',
)
self.assertEqual(settings.test.setting, None)
def test_set_setting(self):
settings.test = 'Test settings'
settings.test.setting = Setting('Test setting')
settings.test.setting = 'Testing'
self.assertEqual(settings.test.setting, 'Testing')
def test_cannot_redefine_namespace(self):
settings.test = 'Test settings'
with self.assertRaisesRegexp(
ValueError, r'^Namespaces cannot be redefined$',
):
settings.test = 'Second assignment'
def test_cannot_redefine_setting(self):
settings.test = 'Test settings'
settings.test.setting = Setting('Test setting')
with self.assertRaisesRegexp(
ValueError, r'^Settings cannot be redefined$',
):
settings.test.setting = Setting('Second assignment')
def test_setting_evaluates_bool(self):
settings.test = 'Test settings'
settings.test.setting = Setting('Test')
settings.test.setting = False
self.assertTrue(type(settings.test.setting), bool)
self.assertFalse(settings.test.setting)
class ParseArgsTest(SafeTestCase):
def test_empty(self):
unnamed, named = parse_args('')
self.assertIsInstance(unnamed, list)
self.assertIsInstance(named, dict)
self.assertEqual(len(unnamed), 0)
self.assertEqual(len(named), 0)
| tests/test_config.py | 2,391 | Test Sermin config module | 25 | ru | 0.064592 |
import numpy as np
from rafiki.constants import TaskType
def ensemble_predictions(predictions_list, predict_label_mappings, task):
# TODO: Better ensembling of predictions based on `predict_label_mapping` & `task` of models
if len(predictions_list) == 0 or len(predictions_list[0]) == 0:
return []
# By default, just return some trial's predictions
index = 0
predictions = predictions_list[index]
predict_label_mapping = predict_label_mappings[index]
if task == TaskType.IMAGE_CLASSIFICATION:
# Map probabilities to most probable label
pred_indices = np.argmax(predictions, axis=1)
predictions = [predict_label_mapping[str(i)] for i in pred_indices]
return predictions
| rafiki/predictor/ensemble.py | 743 | TODO: Better ensembling of predictions based on `predict_label_mapping` & `task` of models By default, just return some trial's predictions Map probabilities to most probable label | 180 | en | 0.828244 |
"""Auto-generated file, do not edit by hand. EC metadata"""
from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata
PHONE_METADATA_EC = PhoneMetadata(id='EC', country_code=None, international_prefix=None,
general_desc=PhoneNumberDesc(national_number_pattern='[19]\\d{2}', possible_number_pattern='\\d{3}'),
toll_free=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
premium_rate=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
emergency=PhoneNumberDesc(national_number_pattern='1(?:0[12]|12)|911', possible_number_pattern='\\d{3}', example_number='911'),
short_code=PhoneNumberDesc(national_number_pattern='1(?:0[12]|12)|911', possible_number_pattern='\\d{3}', example_number='911'),
standard_rate=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
carrier_specific=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
short_data=True)
| python/phonenumbers/shortdata/region_EC.py | 993 | Auto-generated file, do not edit by hand. EC metadata | 53 | en | 0.768489 |
import django_filters
from django_filters import DateFilter
from .models import Pet
class PetFilter(django_filters.FilterSet):
# name = django_filters.CharFilter(lookup_expr='iexact')
start_date = DateFilter(field_name = "age", lookup_expr='gte') #greater or equal to
end_date = DateFilter(field_name = "age", lookup_expr='lte') #less or equal to
class Meta:
model = Pet
fields = ['age',
'pet_type','breed','size', 'sex' ,'vaccinated',
'castrated','dewormed','vulnerable', ]
exclude = ['age']
| pets/filters.py | 576 | name = django_filters.CharFilter(lookup_expr='iexact')greater or equal toless or equal to | 89 | en | 0.472622 |
# coding:utf-8
import sys
import codecs
from pathlib import Path
from collections import defaultdict
MAIN_PATH = Path(__file__).absolute().parent.parent.parent
sys.path.insert(0, str(MAIN_PATH))
from log import log_info as _info
from log import log_error as _error
from log import print_process as _process
class Vertex(object):
def __init__(self, value):
self.value = value
def __eq__(self, other):
return self.value == other.value
def __str__(self):
return str(self.value)
def __lt__(self, other):
return self.value < other.value
def __hash__(self):
return hash(self.value)
class Graph(object):
def __init__(self):
self.graph = defaultdict(list)
def addEdge(self, u, v):
self.graph[u].append(v)
# for saving the nodes which have no outgoing arc
if v not in self.graph.keys():
self.graph[v] = []
def DFSSearchInner(self, u, explored_list):
explored_list[u] = True
self.cache.append(u)
for v in self.graph[u]:
if not explored_list[v]:
self.DFSSearchInner(v, explored_list)
def DFSSearch(self, u):
explored_list = {}
for v in self.graph.keys():
explored_list[v] = False
self.cache = []
self.DFSSearchInner(u, explored_list)
return self.cache
def SCCSearch(self, v_sorted):
self.t = 0
self.finish_time = {}
explored_list = {}
for v in self.graph.keys():
explored_list[v] = False
leaders = {}
for v in v_sorted:
if not explored_list[v]:
leaders[v] = []
self.SCCSearch_DFS(v, explored_list, leaders[v])
return self.finish_time, leaders
def SCCSearch_DFS(self, v, explored_list, leaders):
explored_list[v] = True
for u in self.graph[v]:
if not explored_list[u]:
leaders.append(u)
self.SCCSearch_DFS(u, explored_list, leaders)
self.t += 1
self.finish_time[v] = self.t
def readFile(path):
_info('Start building graph...')
graph = Graph()
with codecs.open(path, 'r', 'utf-8') as file:
data = file.read().split('\n')
for line in data:
line_split = line.split(' ')
u, other = line_split[0], line_split[1:]
u_obj = Vertex(u)
for v in other:
v_obj = Vertex(v)
graph.addEdge(u_obj, v_obj)
_info('Finish building graph...')
return graph
def reverseGraph(graph):
v_unsorted = list(graph.graph.keys())
v_sorted = sorted(v_unsorted, reverse=True)
# reverse the graph
graph_rev = Graph()
for v in v_unsorted:
for u in graph.graph[v]:
graph_rev.addEdge(u, v)
return graph_rev, v_sorted
if __name__ == '__main__':
graph = readFile('test_scc.txt')
# sanity check
_info('Check the graph:')
cache = graph.DFSSearch(Vertex('b'))
for v in cache:
print(v, end=' ')
_info('Finish checking!', head='\n INFO')
# reverse the graph
_info('Reverse the graph...')
graph_rev, v_sorted = reverseGraph(graph)
# sanity check
_info('Check the graph:')
cache = graph_rev.DFSSearch(Vertex('a'))
for v in cache:
print(v, end=' ')
_info('Finish checking!', head='\n INFO')
# find SCCs
finish_time, _ = graph_rev.SCCSearch(v_sorted)
v_2nd_pass = reversed([v for v, _ in finish_time.items()])
_info('Start finding SCCs...')
_, leaders = graph.SCCSearch(v_2nd_pass)
for k in leaders.keys():
print(k)
_info('Result:')
for key, value in leaders.items():
print(key)
for v in value:
print(v)
print() | Course_2/Week_01/3_SCC.py | 3,473 | coding:utf-8 for saving the nodes which have no outgoing arc reverse the graph sanity check reverse the graph sanity check find SCCs | 132 | en | 0.886687 |
from deadfroglib import *
import Image
import math
# set up the colors
BLACK = 0xff000000
WHITE = 0xffffffff
im = Image.open("willow.bmp")
imOut = Image.new(im.mode, im.size)
graph3d = CreateGraph3d()
minA = 1000
maxA = -1000
minB = 1000
maxB = -1000
minC = 1000
maxC = -1000
err = 0.0
for y in range(im.size[1]):
for x in range(im.size[0]):
(r, g, b) = im.getpixel((x, y))
# ya = round(0.299*r + 0.587*g + 0.114*b)
# cb = round(128 - 0.168736*r - 0.331264*g + 0.5*b)
# cr = round(128 + 0.5*r - 0.418688*g - 0.081312*b)
# ya = round( r + g + b)
# cb = round( r - g)
# cr = round( r + g - 2*b)
ya = round((+0.61333*r + 0.58095*g + 0.53509*b) * 0.575)
cb = round((-0.32762*r + 0.80357*g - 0.49693*b) * 0.575) + 128
cr = round((+0.71868*r - 0.12948*g - 0.68318*b) * 0.575) + 128
# {{0.61333, 0.58095, 0.53509}, {-0.32762, 0.80357, -0.49693}, {0.71868, -0.12948, -0.68318}}
if ya < minA: minA = ya
if ya > maxA: maxA = ya
if cr < minB: minB = cr
if cr > maxB: maxB = cr
if cb < minC: minC = cb
if cb > maxC: maxC = cb
# Invert Y Cr Cb
r2 = ya + 1.402 * (cr - 128)
g2 = ya - 0.34414 * (cb - 128) - 0.71414 * (cr - 128)
b2 = ya + 1.772 * (cb - 128)
# # Invert custom colour space
# cr -= 128
# cb -= 128
# ya /= 0.575
# cr /= 0.575
# cb /= 0.575
# r2 = 0.61333 * ya - 0.32762 * cb + 0.71868 * cr
# g2 = 0.58095 * ya + 0.80357 * cb - 0.12948 * cr
# b2 = 0.53509 * ya - 0.49693 * cb - 0.68318 * cr
# if r != r2 or g != g2 or b != b2:
# print "%5.2f %5.2f %5.2f" % (r,g,b)
# print "%5.2f %5.2f %5.2f" % (r2,g2,b2)
# print
# Calc RMS error for this pixel
err += math.sqrt((r - round(r2)) ** 2 +
(g - round(g2)) ** 2 +
(b - round(b2)) ** 2)
col = (r << 16) + (g << 8) + b
# Graph3dAddPoint(graph3d, ya-128, cr, cb, col)
# print "%6.2f %6.2f %6.2f" % (ya, cr, cb)
Graph3dAddPoint(graph3d, r-128, g-128, b-128, col)
imOut.putpixel((x,y), (int(ya), int(cr), int(cb)))
imOut.save("foo.bmp")
print "Min:", minA, minB, minC
print "Max:", maxA, maxB, maxC
#Graph3dAddPoint(graph3d, -128, -128, -128, WHITE)
#Graph3dAddPoint(graph3d, 128, 128, 128, WHITE)
print "err", err / (im.size[0] * im.size[1])
# set up the window
screenw = 600
screenh = 600
win = CreateWin(500, 50, screenw, screenh, True, '3d plot')
input = win.contents.inputManager.contents
font = CreateTextRenderer("Fixedsys", 8, True)
dist = 730.0
zoom = 800.0
rotX = 0.0
rotZ = 0.0
cx = screenw / 2
cy = screenh / 2
while not win.contents.windowClosed and input.keys[KEY_ESC] == 0:
bmp = AdvanceWin(win)
ClearBitmap(bmp, WHITE)
if input.lmb:
if input.keys[KEY_SHIFT]:
cx += input.mouseVelX
cy += input.mouseVelY
else:
rotX -= float(input.mouseVelY) * 0.01
rotZ += float(input.mouseVelX) * 0.01
#rotZ += 0.03
zoom *= 1.0 + (input.mouseVelZ * 0.05)
Graph3dRender(graph3d, bmp, cx, cy, dist, zoom, BLACK, rotX, rotZ)
DrawTextSimple(font, BLACK, bmp, screenw - 100, 5, str(win.contents.fps))
RectFill(bmp, 0, screenh - 40, 230, 40, WHITE)
DrawTextSimple(font, BLACK, bmp, 10, screenh - 35, "Hold left mouse to rotate")
DrawTextSimple(font, BLACK, bmp, 10, screenh - 20, "Mouse wheel to zoom")
| python/graph3d.py | 3,555 | set up the colors ya = round(0.299*r + 0.587*g + 0.114*b) cb = round(128 - 0.168736*r - 0.331264*g + 0.5*b) cr = round(128 + 0.5*r - 0.418688*g - 0.081312*b) ya = round( r + g + b) cb = round( r - g) cr = round( r + g - 2*b) {{0.61333, 0.58095, 0.53509}, {-0.32762, 0.80357, -0.49693}, {0.71868, -0.12948, -0.68318}} Invert Y Cr Cb Invert custom colour space cr -= 128 cb -= 128 ya /= 0.575 cr /= 0.575 cb /= 0.575 r2 = 0.61333 * ya - 0.32762 * cb + 0.71868 * cr g2 = 0.58095 * ya + 0.80357 * cb - 0.12948 * cr b2 = 0.53509 * ya - 0.49693 * cb - 0.68318 * cr if r != r2 or g != g2 or b != b2: print "%5.2f %5.2f %5.2f" % (r,g,b) print "%5.2f %5.2f %5.2f" % (r2,g2,b2) print Calc RMS error for this pixel Graph3dAddPoint(graph3d, ya-128, cr, cb, col) print "%6.2f %6.2f %6.2f" % (ya, cr, cb)Graph3dAddPoint(graph3d, -128, -128, -128, WHITE)Graph3dAddPoint(graph3d, 128, 128, 128, WHITE) set up the windowrotZ += 0.03 | 1,089 | en | 0.431921 |
# Copyright (c) OpenMMLab. All rights reserved.
log_level = 'INFO'
load_from = None
resume_from = None
dist_params = dict(backend='nccl')
workflow = [('train', 1)]
checkpoint_config = dict(interval=10)
evaluation = dict(interval=10, metric='mAP', key_indicator='AP')
optimizer = dict(
type='Adam',
lr=5e-4,
)
optimizer_config = dict(grad_clip=None)
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=0.001,
step=[170, 200])
total_epochs = 210
log_config = dict(
interval=50,
hooks=[
dict(type='TextLoggerHook'),
# dict(type='TensorboardLoggerHook')
])
channel_cfg = dict(
num_output_channels=17,
dataset_joints=17,
dataset_channel=[
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16],
],
inference_channel=[
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16
])
# model settings
model = dict(
type='TopDown',
pretrained='https://download.openmmlab.com/mmpose/'
'pretrain_models/hrnet_w32-36af842e.pth',
backbone=dict(
type='HRNet',
in_channels=3,
extra=dict(
stage1=dict(
num_modules=1,
num_branches=1,
block='BOTTLENECK',
num_blocks=(4, ),
num_channels=(64, )),
stage2=dict(
num_modules=1,
num_branches=2,
block='BASIC',
num_blocks=(4, 4),
num_channels=(32, 64)),
stage3=dict(
num_modules=4,
num_branches=3,
block='BASIC',
num_blocks=(4, 4, 4),
num_channels=(32, 64, 128)),
stage4=dict(
num_modules=3,
num_branches=4,
block='BASIC',
num_blocks=(4, 4, 4, 4),
num_channels=(32, 64, 128, 256))),
),
keypoint_head=dict(
type='TopdownHeatmapSimpleHead',
in_channels=32,
out_channels=channel_cfg['num_output_channels'],
num_deconv_layers=0,
extra=dict(final_conv_kernel=1, ),
loss_keypoint=dict(type='JointsMSELoss', use_target_weight=True)),
train_cfg=dict(),
test_cfg=dict(
flip_test=True,
post_process='default',
shift_heatmap=True,
modulate_kernel=11))
data_cfg = dict(
image_size=[192, 256],
heatmap_size=[48, 64],
num_output_channels=channel_cfg['num_output_channels'],
num_joints=channel_cfg['dataset_joints'],
dataset_channel=channel_cfg['dataset_channel'],
inference_channel=channel_cfg['inference_channel'],
soft_nms=False,
nms_thr=1.0,
oks_thr=0.9,
vis_thr=0.2,
use_gt_bbox=False,
det_bbox_thr=0.0,
bbox_file='data/coco/person_detection_results/'
'COCO_val2017_detections_AP_H_56_person.json',
)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='TopDownRandomFlip', flip_prob=0.5),
dict(
type='TopDownHalfBodyTransform',
num_joints_half_body=8,
prob_half_body=0.3),
dict(
type='TopDownGetRandomScaleRotation', rot_factor=40, scale_factor=0.5),
dict(type='TopDownAffine'),
dict(type='ToTensor'),
dict(
type='NormalizeTensor',
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
dict(type='TopDownGenerateTarget', sigma=2),
dict(
type='Collect',
keys=['img', 'target', 'target_weight'],
meta_keys=[
'image_file', 'joints_3d', 'joints_3d_visible', 'center', 'scale',
'rotation', 'bbox_score', 'flip_pairs'
]),
]
val_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='TopDownAffine'),
dict(type='ToTensor'),
dict(
type='NormalizeTensor',
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
dict(
type='Collect',
keys=['img'],
meta_keys=[
'image_file', 'center', 'scale', 'rotation', 'bbox_score',
'flip_pairs'
]),
]
test_pipeline = val_pipeline
data_root = 'data/coco'
data = dict(
samples_per_gpu=64,
workers_per_gpu=2,
val_dataloader=dict(samples_per_gpu=32),
test_dataloader=dict(samples_per_gpu=32),
train=dict(
type='TopDownCocoDataset',
ann_file=f'{data_root}/annotations/person_keypoints_train2017.json',
img_prefix=f'{data_root}/train2017/',
data_cfg=data_cfg,
pipeline=train_pipeline),
val=dict(
type='TopDownCocoDataset',
ann_file=f'{data_root}/annotations/person_keypoints_val2017.json',
img_prefix=f'{data_root}/val2017/',
data_cfg=data_cfg,
pipeline=val_pipeline),
test=dict(
type='TopDownCocoDataset',
ann_file=f'{data_root}/annotations/person_keypoints_val2017.json',
img_prefix=f'{data_root}/val2017/',
data_cfg=data_cfg,
pipeline=val_pipeline),
)
| demo/hrnet_w32_coco_256x192.py | 5,013 | Copyright (c) OpenMMLab. All rights reserved. learning policy dict(type='TensorboardLoggerHook') model settings | 111 | en | 0.768964 |
import codecs
from xml.sax.saxutils import quoteattr, escape
__all__ = ['XMLWriter']
ESCAPE_ENTITIES = {
'\r': ' '
}
class XMLWriter(object):
def __init__(self, stream, namespace_manager, encoding=None,
decl=1, extra_ns=None):
encoding = encoding or 'utf-8'
encoder, decoder, stream_reader, stream_writer = \
codecs.lookup(encoding)
self.stream = stream = stream_writer(stream)
if decl:
stream.write('<?xml version="1.0" encoding="%s"?>' % encoding)
self.element_stack = []
self.nm = namespace_manager
self.extra_ns = extra_ns or {}
self.closed = True
def __get_indent(self):
return " " * len(self.element_stack)
indent = property(__get_indent)
def __close_start_tag(self):
if not self.closed: # TODO:
self.closed = True
self.stream.write(">")
def push(self, uri):
self.__close_start_tag()
write = self.stream.write
write("\n")
write(self.indent)
write("<%s" % self.qname(uri))
self.element_stack.append(uri)
self.closed = False
self.parent = False
def pop(self, uri=None):
top = self.element_stack.pop()
if uri:
assert uri == top
write = self.stream.write
if not self.closed:
self.closed = True
write("/>")
else:
if self.parent:
write("\n")
write(self.indent)
write("</%s>" % self.qname(top))
self.parent = True
def element(self, uri, content, attributes={}):
"""Utility method for adding a complete simple element"""
self.push(uri)
for k, v in attributes.iteritems():
self.attribute(k, v)
self.text(content)
self.pop()
def namespaces(self, namespaces=None):
if not namespaces:
namespaces = self.nm.namespaces()
write = self.stream.write
write("\n")
for prefix, namespace in namespaces:
if prefix:
write(' xmlns:%s="%s"\n' % (prefix, namespace))
# Allow user-provided namespace bindings to prevail
elif prefix not in self.extra_ns:
write(' xmlns="%s"\n' % namespace)
for prefix, namespace in self.extra_ns.items():
if prefix:
write(' xmlns:%s="%s"\n' % (prefix, namespace))
else:
write(' xmlns="%s"\n' % namespace)
def attribute(self, uri, value):
write = self.stream.write
write(" %s=%s" % (self.qname(uri), quoteattr(value)))
def text(self, text):
self.__close_start_tag()
if "<" in text and ">" in text and not "]]>" in text:
self.stream.write("<![CDATA[")
self.stream.write(text)
self.stream.write("]]>")
else:
self.stream.write(escape(text, ESCAPE_ENTITIES))
def qname(self, uri):
"""Compute qname for a uri using our extra namespaces,
or the given namespace manager"""
for pre, ns in self.extra_ns.items():
if uri.startswith(ns):
if pre != "":
return ":".join(pre, uri[len(ns):])
else:
return uri[len(ns):]
return self.nm.qname(uri)
| lib/rdflib/plugins/serializers/xmlwriter.py | 3,396 | Utility method for adding a complete simple element
Compute qname for a uri using our extra namespaces,
or the given namespace manager
TODO: Allow user-provided namespace bindings to prevail | 192 | en | 0.358571 |
from tests.conftest import log_in
def test_logout_auth_user(test_client):
"""
GIVEN a flask app
WHEN an authorized user logs out
THEN check that the user was logged out successfully
"""
log_in(test_client)
response = test_client.get("auth/logout", follow_redirects=True)
assert response.status_code == 200
# assert b"<!-- index.html -->" in response.data # Removed -- COVID
assert b"You have been logged out." in response.data
def test_logout_anon_user(test_client):
"""
GIVEN a flask app
WHEN an anon user attemps to log out
THEN check that a message flashes informing them that they are already logged out.
"""
response = test_client.get("auth/logout", follow_redirects=True)
assert response.status_code == 200
# assert b"<!-- index.html -->" in response.data # Removed -- COVID
assert b"You were not, and still are not, logged in." in response.data
| tests/test_auth/test_logout.py | 932 | GIVEN a flask app
WHEN an anon user attemps to log out
THEN check that a message flashes informing them that they are already logged out.
GIVEN a flask app
WHEN an authorized user logs out
THEN check that the user was logged out successfully
assert b"<!-- index.html -->" in response.data Removed -- COVID assert b"<!-- index.html -->" in response.data Removed -- COVID | 373 | en | 0.837699 |
def demo():
"""Output:
---------⌝
----------
----?????-
----------
----------
--!!!-----
--!!!-----
----------
----------
⌞---------
"""
n = 10
# Construction is easy:
grid = {}
# Assignment is easy:
grid[(0, 0)] = "⌞"
grid[(n - 1, n - 1)] = "⌝"
# Helper functions that just work on the dictionary:
fill(grid, "!", start=(2, 3), stop=(5, 5))
fill(grid, "?", start=(4, 7), stop=(9, 8))
print(stringify(grid, n))
def fill(grid: dict, value: str, start=(0, 0), stop=(0, 0)):
"""Using product allows for flatter loops."""
from itertools import product
for coord in product(range(start[0], stop[0]), range(start[1], stop[1])):
grid[coord] = value
def stringify(grid: dict, n: int) -> str:
"""Stringify with (0, 0) in the lower-left corner."""
rows = []
for y in reversed(range(n)):
row = []
for x in range(n):
value = grid.get((x, y), "-")
row.append(value)
rows.append(row)
return "\n".join("".join(row) for row in rows)
if __name__ == "__main__":
demo()
| examples/grids/python/grid.py | 1,149 | Output:
---------⌝
----------
----?????-
----------
----------
--!!!-----
--!!!-----
----------
----------
⌞---------
Using product allows for flatter loops.
Stringify with (0, 0) in the lower-left corner.
Construction is easy: Assignment is easy: Helper functions that just work on the dictionary: | 300 | en | 0.518321 |
# coding: utf-8
"""
Apteco API
An API to allow access to Apteco Marketing Suite resources # noqa: E501
The version of the OpenAPI document: v2
Contact: support@apteco.com
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
class Selection(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'ancestor_counts': 'bool',
'record_set': 'RecordSet',
'rule': 'Rule',
'rfv': 'RFV',
'n_per': 'NPer',
'top_n': 'TopN',
'limits': 'Limits',
'table_name': 'str',
'name': 'str'
}
attribute_map = {
'ancestor_counts': 'ancestorCounts',
'record_set': 'recordSet',
'rule': 'rule',
'rfv': 'rfv',
'n_per': 'nPer',
'top_n': 'topN',
'limits': 'limits',
'table_name': 'tableName',
'name': 'name'
}
def __init__(self, ancestor_counts=None, record_set=None, rule=None, rfv=None, n_per=None, top_n=None, limits=None, table_name=None, name=None): # noqa: E501
"""Selection - a model defined in OpenAPI""" # noqa: E501
self._ancestor_counts = None
self._record_set = None
self._rule = None
self._rfv = None
self._n_per = None
self._top_n = None
self._limits = None
self._table_name = None
self._name = None
self.discriminator = None
if ancestor_counts is not None:
self.ancestor_counts = ancestor_counts
if record_set is not None:
self.record_set = record_set
if rule is not None:
self.rule = rule
if rfv is not None:
self.rfv = rfv
if n_per is not None:
self.n_per = n_per
if top_n is not None:
self.top_n = top_n
if limits is not None:
self.limits = limits
self.table_name = table_name
if name is not None:
self.name = name
@property
def ancestor_counts(self):
"""Gets the ancestor_counts of this Selection. # noqa: E501
:return: The ancestor_counts of this Selection. # noqa: E501
:rtype: bool
"""
return self._ancestor_counts
@ancestor_counts.setter
def ancestor_counts(self, ancestor_counts):
"""Sets the ancestor_counts of this Selection.
:param ancestor_counts: The ancestor_counts of this Selection. # noqa: E501
:type: bool
"""
self._ancestor_counts = ancestor_counts
@property
def record_set(self):
"""Gets the record_set of this Selection. # noqa: E501
:return: The record_set of this Selection. # noqa: E501
:rtype: RecordSet
"""
return self._record_set
@record_set.setter
def record_set(self, record_set):
"""Sets the record_set of this Selection.
:param record_set: The record_set of this Selection. # noqa: E501
:type: RecordSet
"""
self._record_set = record_set
@property
def rule(self):
"""Gets the rule of this Selection. # noqa: E501
:return: The rule of this Selection. # noqa: E501
:rtype: Rule
"""
return self._rule
@rule.setter
def rule(self, rule):
"""Sets the rule of this Selection.
:param rule: The rule of this Selection. # noqa: E501
:type: Rule
"""
self._rule = rule
@property
def rfv(self):
"""Gets the rfv of this Selection. # noqa: E501
:return: The rfv of this Selection. # noqa: E501
:rtype: RFV
"""
return self._rfv
@rfv.setter
def rfv(self, rfv):
"""Sets the rfv of this Selection.
:param rfv: The rfv of this Selection. # noqa: E501
:type: RFV
"""
self._rfv = rfv
@property
def n_per(self):
"""Gets the n_per of this Selection. # noqa: E501
:return: The n_per of this Selection. # noqa: E501
:rtype: NPer
"""
return self._n_per
@n_per.setter
def n_per(self, n_per):
"""Sets the n_per of this Selection.
:param n_per: The n_per of this Selection. # noqa: E501
:type: NPer
"""
self._n_per = n_per
@property
def top_n(self):
"""Gets the top_n of this Selection. # noqa: E501
:return: The top_n of this Selection. # noqa: E501
:rtype: TopN
"""
return self._top_n
@top_n.setter
def top_n(self, top_n):
"""Sets the top_n of this Selection.
:param top_n: The top_n of this Selection. # noqa: E501
:type: TopN
"""
self._top_n = top_n
@property
def limits(self):
"""Gets the limits of this Selection. # noqa: E501
:return: The limits of this Selection. # noqa: E501
:rtype: Limits
"""
return self._limits
@limits.setter
def limits(self, limits):
"""Sets the limits of this Selection.
:param limits: The limits of this Selection. # noqa: E501
:type: Limits
"""
self._limits = limits
@property
def table_name(self):
"""Gets the table_name of this Selection. # noqa: E501
:return: The table_name of this Selection. # noqa: E501
:rtype: str
"""
return self._table_name
@table_name.setter
def table_name(self, table_name):
"""Sets the table_name of this Selection.
:param table_name: The table_name of this Selection. # noqa: E501
:type: str
"""
if table_name is None:
raise ValueError("Invalid value for `table_name`, must not be `None`") # noqa: E501
self._table_name = table_name
@property
def name(self):
"""Gets the name of this Selection. # noqa: E501
:return: The name of this Selection. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this Selection.
:param name: The name of this Selection. # noqa: E501
:type: str
"""
self._name = name
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Selection):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| apteco_api/models/selection.py | 8,038 | NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Returns true if both objects are equal
Selection - a model defined in OpenAPI
Returns true if both objects are not equal
For `print` and `pprint`
Gets the ancestor_counts of this Selection. # noqa: E501
:return: The ancestor_counts of this Selection. # noqa: E501
:rtype: bool
Sets the ancestor_counts of this Selection.
:param ancestor_counts: The ancestor_counts of this Selection. # noqa: E501
:type: bool
Gets the limits of this Selection. # noqa: E501
:return: The limits of this Selection. # noqa: E501
:rtype: Limits
Sets the limits of this Selection.
:param limits: The limits of this Selection. # noqa: E501
:type: Limits
Gets the n_per of this Selection. # noqa: E501
:return: The n_per of this Selection. # noqa: E501
:rtype: NPer
Sets the n_per of this Selection.
:param n_per: The n_per of this Selection. # noqa: E501
:type: NPer
Gets the name of this Selection. # noqa: E501
:return: The name of this Selection. # noqa: E501
:rtype: str
Sets the name of this Selection.
:param name: The name of this Selection. # noqa: E501
:type: str
Gets the record_set of this Selection. # noqa: E501
:return: The record_set of this Selection. # noqa: E501
:rtype: RecordSet
Sets the record_set of this Selection.
:param record_set: The record_set of this Selection. # noqa: E501
:type: RecordSet
Gets the rfv of this Selection. # noqa: E501
:return: The rfv of this Selection. # noqa: E501
:rtype: RFV
Sets the rfv of this Selection.
:param rfv: The rfv of this Selection. # noqa: E501
:type: RFV
Gets the rule of this Selection. # noqa: E501
:return: The rule of this Selection. # noqa: E501
:rtype: Rule
Sets the rule of this Selection.
:param rule: The rule of this Selection. # noqa: E501
:type: Rule
Gets the table_name of this Selection. # noqa: E501
:return: The table_name of this Selection. # noqa: E501
:rtype: str
Sets the table_name of this Selection.
:param table_name: The table_name of this Selection. # noqa: E501
:type: str
Returns the model properties as a dict
Returns the string representation of the model
Gets the top_n of this Selection. # noqa: E501
:return: The top_n of this Selection. # noqa: E501
:rtype: TopN
Sets the top_n of this Selection.
:param top_n: The top_n of this Selection. # noqa: E501
:type: TopN
Apteco API
An API to allow access to Apteco Marketing Suite resources # noqa: E501
The version of the OpenAPI document: v2
Contact: support@apteco.com
Generated by: https://openapi-generator.tech
coding: utf-8 noqa: F401 noqa: E501 noqa: E501 noqa: E501 | 2,689 | en | 0.735025 |
"""
mavDynamics
- this file implements the dynamic equations of motion for MAV
- use unit quaternion for the attitude state
part of mavPySim
- Beard & McLain, PUP, 2012
- Update history:
12/20/2018 - RWB
2/24/2020
"""
import sys
sys.path.append('..')
import numpy as np
# load message types
from message_types.msg_state import msgState
import parameters.aerosonde_parameters as MAV
from tools.rotations import Quaternion2Rotation, Quaternion2Euler, skew, quat_prod
import mavsim_python_chap5_model_coef as chap5
class mavDynamics:
def __init__(self, Ts):
self._ts_simulation = Ts
# set initial states based on parameter file
# _state is the 13x1 internal state of the aircraft that is being propagated:
# _state = [pn, pe, pd, u, v, w, e0, e1, e2, e3, p, q, r]
# We will also need a variety of other elements that are functions of the _state and the wind.
# self.true_state is a 19x1 vector that is estimated and used by the autopilot to control the aircraft:
# true_state = [pn, pe, h, Va, alpha, beta, phi, theta, chi, p, q, r, Vg, wn, we, psi, gyro_bx, gyro_by, gyro_bz]
self._state = np.array([[MAV.pn0], # (0)
[MAV.pe0], # (1)
[MAV.pd0], # (2)
[MAV.u0], # (3)
[MAV.v0], # (4)
[MAV.w0], # (5)
[MAV.e0], # (6)
[MAV.e1], # (7)
[MAV.e2], # (8)
[MAV.e3], # (9)
[MAV.p0], # (10)
[MAV.q0], # (11)
[MAV.r0]]) # (12)
# store wind data for fast recall since it is used at various points in simulation
self._wind = np.array([[0.], [0.], [0.]]) # wind in NED frame in meters/sec
self._update_velocity_data()
# store forces to avoid recalculation in the sensors function
self._forces = np.array([[], [], []])
ur = self._state.item(3)
vr = self._state.item(4)
wr = self._state.item(5)
self._Va = np.sqrt(ur**2 + vr**2 + wr**2)
self._alpha = np.arctan2(wr,ur)
self._beta = np.arcsin(vr/self._Va)
# initialize true_state message
self.true_state = msgState()
###################################
# public functions
def update(self, delta, wind):
"""
Integrate the differential equations defining dynamics, update sensors
delta = (delta_a, delta_e, delta_r, delta_t) are the control inputs
wind is the wind vector in inertial coordinates
Ts is the time step between function calls.
"""
# get forces and moments acting on rigid bod
forces_moments = self._forces_moments(delta)
# Integrate ODE using Runge-Kutta RK4 algorithm
time_step = self._ts_simulation
k1 = self._derivatives(self._state, forces_moments)
k2 = self._derivatives(self._state + time_step/2.*k1, forces_moments)
k3 = self._derivatives(self._state + time_step/2.*k2, forces_moments)
k4 = self._derivatives(self._state + time_step*k3, forces_moments)
self._state += time_step/6 * (k1 + 2*k2 + 2*k3 + k4)
# normalize the quaternion
e0 = self._state.item(6)
e1 = self._state.item(7)
e2 = self._state.item(8)
e3 = self._state.item(9)
normE = np.sqrt(e0**2+e1**2+e2**2+e3**2)
self._state[6][0] = self._state.item(6)/normE
self._state[7][0] = self._state.item(7)/normE
self._state[8][0] = self._state.item(8)/normE
self._state[9][0] = self._state.item(9)/normE
# update the airspeed, angle of attack, and side slip angles using new state
self._update_velocity_data(wind)
# update the message class for the true state
self._update_true_state()
def external_set_state(self, new_state):
self._state = new_state
###################################
# private functions
def _derivatives(self, x, u):
"""
for the dynamics xdot = f(x, u), returns fdot(x, u)
"""
# Get force, moment (torque)
f_b = u[:3]
m_b = u[3:]
# Get position, velocity, quaternion (rotation), angular velocity
r_i = x[:3] # wrt to i-frame
v_b = x[3:6] # wrt to i-frame
q_ib = x[6:10] # for rotation b to i-frame
w_b = x[10:] # wrt to b-frame
# Normalize quat. -> rotation
q_ib = q_ib/np.linalg.norm(q_ib) # normalize
R_ib = Quaternion2Rotation(q_ib)
# Compute equations of motion
# d/dt(r_i)
rdot_i = R_ib @ v_b
# d/dt(v_b)
vdot_b = (1/MAV.mass)*f_b-skew(w_b) @ v_b
# d/dt(q_ib)
wq_ib = np.zeros((4,1))
wq_ib[1:] = w_b
qdot_ib = 0.5 * quat_prod(wq_ib, q_ib)
wt_b = skew(w_b)
# d/dt(w_b)
wdot_b = np.linalg.inv(MAV.J) @ (m_b - (wt_b @ (MAV.J @ w_b)))
x_out = np.concatenate([rdot_i,vdot_b,qdot_ib,np.array(wdot_b)],axis = 0)
return x_out
def _update_velocity_data(self, wind=np.zeros((6,1))):
steady_state = wind[0:3]
gust = wind[3:6]
ur = self._state.item(3) - steady_state[0] - gust[0]
vr = self._state.item(4) - steady_state[1] - gust[1]
wr = self._state.item(5) - steady_state[2] - gust[2]
self._Va = np.sqrt(ur**2 + vr**2 + wr**2)
self._alpha = np.arctan2(wr,ur)
self._beta = np.arcsin(vr/self._Va)
def thrust_from_prop(self, delta_t):
# compute thrust and torque due to propeller (See addendum by McLain)
# map delta_t throttle command (0 to 1) into motor input voltage
V_in = MAV.V_max * delta_t
KQ = MAV.KQ
# Quadratic formula to solve for motor speed
a = MAV.C_Q0 * MAV.rho * np.power(MAV.D_prop, 5) / ((2. * np.pi )**2 )
b = (MAV.C_Q1 * MAV.rho * np.power(MAV.D_prop, 4) / (2.*np.pi)) * self._Va + KQ**2/MAV.R_motor
c = MAV.C_Q2 * MAV.rho * np.power(MAV.D_prop, 3) * self._Va**2 - (KQ / MAV.R_motor ) * V_in + KQ * MAV.i0
# Consider only positive root
Omega_op = (-b + np.sqrt(b**2 - 4*a* c)) / (2. * a )
# compute advance ratio
J_op = 2 * np.pi * self._Va / (Omega_op * MAV.D_prop)
# compute nondimensionalized coefficients of thrust and torque
C_T = MAV.C_T2 * J_op **2 + MAV.C_T1 * J_op + MAV.C_T0
C_Q = MAV.C_Q2 * J_op **2 + MAV.C_Q1 * J_op + MAV.C_Q0
# add thrust and torque due to propeller
n = Omega_op / (2 * np.pi )
fx = MAV.rho * n**2 * np.power(MAV.D_prop, 4) * C_T
Mx = -MAV.rho * n**2 * np.power(MAV.D_prop, 5) * C_Q
return fx,Mx
def sigma(self,alpha):
# pseudo sigmoid functions with cutoff +- alpha_0, returns coef btw 0 and 1
a1 = -MAV.M * (alpha - MAV.alpha0)
a2 = MAV.M * (alpha + MAV.alpha0)
sigma_alpha = (1 + np.exp(a1)+np.exp(a2)) / ((1+np.exp(a1))*(1+np.exp(a2)))
return sigma_alpha
def CL(self,alpha):
CL0 = MAV.C_L_0
CLA = MAV.C_L_alpha
sigma_alpha = self.sigma(alpha)
# returns lift coefficient using eq 4.9
CL_alpha = (1-sigma_alpha)*(CL0 + CLA*alpha) + sigma_alpha*(2*np.sign(alpha)*np.sin(alpha)**2 * np.cos(alpha))
return CL_alpha
def CD(self,alpha):
# returns drag coefficient using eq 4.11
CD_alpha = MAV.C_D_p + (MAV.C_L_0 + MAV.C_L_alpha*alpha)**2/(np.pi*MAV.e*MAV.AR)
return CD_alpha
def Cx(self,alpha):
return -self.CD(alpha)*np.cos(alpha) + self.CL(alpha)*np.sin(alpha)
def Cx_q(self,alpha):
return -MAV.C_D_q*np.cos(alpha) + MAV.C_L_q*np.sin(alpha)
def Cx_deltae(self,alpha):
return -MAV.C_D_delta_e*np.cos(alpha) + MAV.C_L_delta_e*np.sin(alpha)
def Cz(self,alpha):
return -self.CD(alpha)*np.sin(alpha)-self.CL(alpha)*np.cos(alpha)
def Cz_q(self,alpha):
return -MAV.C_D_q*np.sin(alpha)-MAV.C_L_q*np.cos(alpha)
def Cz_deltae(self,alpha):
return -MAV.C_D_delta_e*np.sin(alpha)-MAV.C_L_delta_e*np.cos(alpha)
def _forces_moments(self, delta):
"""
return the forces on the UAV based on the state, wind, and control surfaces
:param delta: np.matrix(delta_e, delta_a, delta_r, delta_t)
:return: Forces and Moments on the UAV np.matrix(Fx, Fy, Fz, Ml, Mn, Mm)
"""
phi, theta, psi = Quaternion2Euler(self._state[6:10])
p = self._state.item(10)
q = self._state.item(11)
r = self._state.item(12)
delta_e = delta.item(0)
delta_a = delta.item(1)
delta_r = delta.item(2)
delta_t = delta.item(3)
# Gravitational Components of Force, Moments = 0
mg = MAV.mass*MAV.gravity
fx_grav = -mg*np.sin(theta)
fy_grav = mg* np.cos(theta) * np.sin(phi)
fz_grav = mg* np.cos(theta) * np.cos(phi)
# Thrust Components of Force and Moments
fx_thrust,Mx_thrust = self.thrust_from_prop(delta_t)
fy_thrust = 0
fz_thrust = 0
My_thrust = 0
Mz_thrust = 0
# Aerodynamic Components of Forces and Moments
b = MAV.b
cyp = MAV.C_Y_p
cyr = MAV.C_Y_r
cydeltaa = MAV.C_Y_delta_a
cydeltar = MAV.C_Y_delta_r
aero_coef = 0.5*MAV.rho*self._Va**2*MAV.S_wing
fx_aero = aero_coef * (self.Cx(self._alpha) + self.Cx_q(self._alpha)*MAV.c/(2*self._Va)*q + self.Cx_deltae(self._alpha)*delta_e)
fy_aero = aero_coef * (MAV.C_Y_0 + MAV.C_Y_beta*self._beta + MAV.C_Y_p*b/(2*self._Va)*p + cyr * b/(2*self._Va)*r + cydeltaa * delta_a + cydeltar* delta_r)
fz_aero = aero_coef * (self.Cz(self._alpha) + self.Cz_q(self._alpha)*MAV.c/(2*self._Va)*q + self.Cz_deltae(self._alpha)*delta_e)
Mx_aero = aero_coef * MAV.b * (MAV.C_ell_0 + MAV.C_ell_beta*self._beta + MAV.C_ell_p*b/(2*self._Va)*p + MAV.C_ell_r*b/(2*self._Va)*r + MAV.C_ell_delta_a*delta_a + MAV.C_ell_delta_r*delta_r)
My_aero = aero_coef * MAV.c * (MAV.C_m_0 + MAV.C_m_alpha*self._alpha + MAV.C_m_q*MAV.c/(2*self._Va)*q + MAV.C_m_delta_e*delta_e)
Mz_aero = aero_coef * MAV.b * (MAV.C_n_0 + MAV.C_n_beta*self._beta + MAV.C_n_p*MAV.b/(2*self._Va)*p + MAV.C_n_r*MAV.b/(2*self._Va)*r + MAV.C_n_delta_a*delta_a + MAV.C_n_delta_r*delta_r)
fx = fx_grav + fx_aero + fx_thrust
fy = fy_grav + fy_aero + fy_thrust
fz = fz_grav + fz_aero + fz_thrust
# print('fx = ',fx)
# print('fy = ',fy)
# print('fz = ',fz)
Mx = Mx_aero + Mx_thrust
My = My_aero + My_thrust
Mz = Mz_aero + Mz_thrust
# print('Mx = ',Mx)
# print('My = ',My)
# print('Mz = ',Mz)
self._forces[0] = fx
self._forces[1] = fy
self._forces[2] = fz
fm = np.reshape(np.array([fx, fy, fz, Mx, My, Mz]),[6,1])
return fm
def _update_true_state(self):
# update the class structure for the true state:
# [pn, pe, h, Va, alpha, beta, phi, theta, chi, p, q, r, Vg, wn, we, psi, gyro_bx, gyro_by, gyro_bz]
phi, theta, psi = Quaternion2Euler(self._state[6:10])
pdot = Quaternion2Rotation(self._state[6:10]) @ self._state[3:6]
self.true_state.pn = self._state.item(0)
self.true_state.pe = self._state.item(1)
self.true_state.h = -self._state.item(2)
self.true_state.Va = self._Va
self.true_state.alpha = self._alpha
self.true_state.beta = self._beta
self.true_state.phi = phi
self.true_state.theta = theta
self.true_state.psi = psi
self.true_state.Vg = np.linalg.norm(pdot)
self.true_state.gamma = np.arcsin(pdot.item(2) / self.true_state.Vg)
self.true_state.chi = np.arctan2(pdot.item(1), pdot.item(0))
self.true_state.p = self._state.item(10)
self.true_state.q = self._state.item(11)
self.true_state.r = self._state.item(12)
self.true_state.wn = self._wind.item(0)
self.true_state.we = self._wind.item(1)
| Lectures/MAV_Dynamics/mav_dynamics.py | 12,549 | for the dynamics xdot = f(x, u), returns fdot(x, u)
return the forces on the UAV based on the state, wind, and control surfaces
:param delta: np.matrix(delta_e, delta_a, delta_r, delta_t)
:return: Forces and Moments on the UAV np.matrix(Fx, Fy, Fz, Ml, Mn, Mm)
Integrate the differential equations defining dynamics, update sensors
delta = (delta_a, delta_e, delta_r, delta_t) are the control inputs
wind is the wind vector in inertial coordinates
Ts is the time step between function calls.
mavDynamics
- this file implements the dynamic equations of motion for MAV
- use unit quaternion for the attitude state
part of mavPySim
- Beard & McLain, PUP, 2012
- Update history:
12/20/2018 - RWB
2/24/2020
load message types set initial states based on parameter file _state is the 13x1 internal state of the aircraft that is being propagated: _state = [pn, pe, pd, u, v, w, e0, e1, e2, e3, p, q, r] We will also need a variety of other elements that are functions of the _state and the wind. self.true_state is a 19x1 vector that is estimated and used by the autopilot to control the aircraft: true_state = [pn, pe, h, Va, alpha, beta, phi, theta, chi, p, q, r, Vg, wn, we, psi, gyro_bx, gyro_by, gyro_bz] (0) (1) (2) (3) (4) (5) (6) (7) (8) (9) (10) (11) (12) store wind data for fast recall since it is used at various points in simulation wind in NED frame in meters/sec store forces to avoid recalculation in the sensors function initialize true_state message public functions get forces and moments acting on rigid bod Integrate ODE using Runge-Kutta RK4 algorithm normalize the quaternion update the airspeed, angle of attack, and side slip angles using new state update the message class for the true state private functions Get force, moment (torque) Get position, velocity, quaternion (rotation), angular velocity wrt to i-frame wrt to i-frame for rotation b to i-frame wrt to b-frame Normalize quat. -> rotation normalize Compute equations of motion d/dt(r_i) d/dt(v_b) d/dt(q_ib) d/dt(w_b) compute thrust and torque due to propeller (See addendum by McLain) map delta_t throttle command (0 to 1) into motor input voltage Quadratic formula to solve for motor speed Consider only positive root compute advance ratio compute nondimensionalized coefficients of thrust and torque add thrust and torque due to propeller pseudo sigmoid functions with cutoff +- alpha_0, returns coef btw 0 and 1 returns lift coefficient using eq 4.9 returns drag coefficient using eq 4.11 Gravitational Components of Force, Moments = 0 Thrust Components of Force and Moments Aerodynamic Components of Forces and Moments print('fx = ',fx) print('fy = ',fy) print('fz = ',fz) print('Mx = ',Mx) print('My = ',My) print('Mz = ',Mz) update the class structure for the true state: [pn, pe, h, Va, alpha, beta, phi, theta, chi, p, q, r, Vg, wn, we, psi, gyro_bx, gyro_by, gyro_bz] | 2,904 | en | 0.766327 |
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
# ==============================================================================
from .kernel import NativeKernel
from ....variables import Variable
from ....variables import PositiveTransformation
from .....util.customop import broadcast_to_w_samples
class Bias(NativeKernel):
"""
Bias kernel, which produces a constant value for every entries of the covariance matrix.
.. math::
k(x,y) = \\sigma^2
:param input_dim: the number of dimensions of the kernel. (The total number of active dimensions).
:type input_dim: int
:param variance: the initial value for the variance parameter.
:type variance: float or MXNet NDArray
:param name: the name of the kernel. The name is used to access kernel parameters.
:type name: str
:param active_dims: The dimensions of the inputs that are taken for the covariance matrix computation.
(default: None, taking all the dimensions).
:type active_dims: [int] or None
:param dtype: the data type for float point numbers.
:type dtype: numpy.float32 or numpy.float64
:param ctx: the mxnet context (default: None/current context).
:type ctx: None or mxnet.cpu or mxnet.gpu
"""
broadcastable = True
def __init__(self, input_dim, variance=1., name='bias', active_dims=None,
dtype=None, ctx=None):
super(Bias, self).__init__(input_dim=input_dim, name=name,
active_dims=active_dims, dtype=dtype,
ctx=ctx)
if not isinstance(variance, Variable):
variance = Variable(shape=(1,),
transformation=PositiveTransformation(),
initial_value=variance)
self.variance = variance
def _compute_K(self, F, X, variance, X2=None):
"""
The internal interface for the actual covariance matrix computation.
:param F: MXNet computation type <mx.sym, mx.nd>.
:param X: the first set of inputs to the kernel.
:type X: MXNet NDArray or MXNet Symbol
:param X2: (optional) the second set of arguments to the kernel. If X2 is None,
this computes a square covariance matrix of X. In other words, X2 is internally treated as X.
:type X2: MXNet NDArray or MXNet Symbol
:param variance: the variance parameter.
:type variance: MXNet NDArray or MXNet Symbol
:return: The covariance matrix.
:rtype: MXNet NDArray or MXNet Symbol
"""
if X2 is None:
X2 = X
return broadcast_to_w_samples(F, variance, X.shape[:-1] +
(X2.shape[-2],))
def _compute_Kdiag(self, F, X, variance):
"""
The internal interface for the actual computation for the diagonal.
:param F: MXNet computation type <mx.sym, mx.nd>.
:param X: the first set of inputs to the kernel.
:type X: MXNet NDArray or MXNet Symbol
:param variance: the variance parameter.
:type variance: MXNet NDArray or MXNet Symbol
:return: The covariance matrix.
:rtype: MXNet NDArray or MXNet Symbol
"""
return broadcast_to_w_samples(F, variance, X.shape[:-1])
class White(NativeKernel):
"""
White kernel, which produces a constant value for the diagonal of the covariance matrix.
.. math::
K = \\sigma^2 I
:param input_dim: the number of dimensions of the kernel. (The total number of active dimensions).
:type input_dim: int
:param variance: the initial value for the variance parameter.
:type variance: float or MXNet NDArray
:param name: the name of the kernel. The name is used to access kernel parameters.
:type name: str
:param active_dims: The dimensions of the inputs that are taken for the covariance matrix computation.
(default: None, taking all the dimensions).
:type active_dims: [int] or None
:param dtype: the data type for float point numbers.
:type dtype: numpy.float32 or numpy.float64
:param ctx: the mxnet context (default: None/current context).
:type ctx: None or mxnet.cpu or mxnet.gpu
"""
broadcastable = True
def __init__(self, input_dim, variance=1., name='white', active_dims=None,
dtype=None, ctx=None):
super(White, self).__init__(input_dim=input_dim, name=name,
active_dims=active_dims, dtype=dtype,
ctx=ctx)
if not isinstance(variance, Variable):
variance = Variable(shape=(1,),
transformation=PositiveTransformation(),
initial_value=variance)
self.variance = variance
def _compute_K(self, F, X, variance, X2=None):
"""
The internal interface for the actual covariance matrix computation.
:param F: MXNet computation type <mx.sym, mx.nd>
:param X: the first set of inputs to the kernel.
:type X: MXNet NDArray or MXNet Symbol
:param X2: (optional) the second set of arguments to the kernel. If X2 is None, this computes a square
covariance matrix of X. In other words, X2 is internally treated as X.
:type X2: MXNet NDArray or MXNet Symbol
:param variance: the variance parameter.
:type variance: MXNet NDArray or MXNet Symbol
:return: The covariance matrix.
:rtype: MXNet NDArray or MXNet Symbol
"""
if X2 is None:
Imat = F.eye(N=X.shape[-2:-1][0],
ctx=self.ctx,
dtype=self.dtype)
Imat = broadcast_to_w_samples(F, Imat, X.shape[:-1] +
X.shape[-2:-1], False)
return Imat * broadcast_to_w_samples(F, variance, X.shape[:-1] +
X.shape[-2:-1])
else:
return F.zeros(shape=X.shape[:-1] + X2.shape[-2:-1], ctx=self.ctx,
dtype=self.dtype)
def _compute_Kdiag(self, F, X, variance):
"""
The internal interface for the actual computation for the diagonal of the covariance matrix.
:param F: MXNet computation type <mx.sym, mx.nd>.
:param X: the first set of inputs to the kernel.
:type X: MXNet NDArray or MXNet Symbol
:param variance: the variance parameter.
:type variance: MXNet NDArray or MXNet Symbol
:return: The covariance matrix.
:rtype: MXNet NDArray or MXNet Symbol
"""
return broadcast_to_w_samples(F, variance, X.shape[:-1])
| mxfusion/components/distributions/gp/kernels/static.py | 7,240 | Bias kernel, which produces a constant value for every entries of the covariance matrix.
.. math::
k(x,y) = \sigma^2
:param input_dim: the number of dimensions of the kernel. (The total number of active dimensions).
:type input_dim: int
:param variance: the initial value for the variance parameter.
:type variance: float or MXNet NDArray
:param name: the name of the kernel. The name is used to access kernel parameters.
:type name: str
:param active_dims: The dimensions of the inputs that are taken for the covariance matrix computation.
(default: None, taking all the dimensions).
:type active_dims: [int] or None
:param dtype: the data type for float point numbers.
:type dtype: numpy.float32 or numpy.float64
:param ctx: the mxnet context (default: None/current context).
:type ctx: None or mxnet.cpu or mxnet.gpu
White kernel, which produces a constant value for the diagonal of the covariance matrix.
.. math::
K = \sigma^2 I
:param input_dim: the number of dimensions of the kernel. (The total number of active dimensions).
:type input_dim: int
:param variance: the initial value for the variance parameter.
:type variance: float or MXNet NDArray
:param name: the name of the kernel. The name is used to access kernel parameters.
:type name: str
:param active_dims: The dimensions of the inputs that are taken for the covariance matrix computation.
(default: None, taking all the dimensions).
:type active_dims: [int] or None
:param dtype: the data type for float point numbers.
:type dtype: numpy.float32 or numpy.float64
:param ctx: the mxnet context (default: None/current context).
:type ctx: None or mxnet.cpu or mxnet.gpu
The internal interface for the actual covariance matrix computation.
:param F: MXNet computation type <mx.sym, mx.nd>.
:param X: the first set of inputs to the kernel.
:type X: MXNet NDArray or MXNet Symbol
:param X2: (optional) the second set of arguments to the kernel. If X2 is None,
this computes a square covariance matrix of X. In other words, X2 is internally treated as X.
:type X2: MXNet NDArray or MXNet Symbol
:param variance: the variance parameter.
:type variance: MXNet NDArray or MXNet Symbol
:return: The covariance matrix.
:rtype: MXNet NDArray or MXNet Symbol
The internal interface for the actual covariance matrix computation.
:param F: MXNet computation type <mx.sym, mx.nd>
:param X: the first set of inputs to the kernel.
:type X: MXNet NDArray or MXNet Symbol
:param X2: (optional) the second set of arguments to the kernel. If X2 is None, this computes a square
covariance matrix of X. In other words, X2 is internally treated as X.
:type X2: MXNet NDArray or MXNet Symbol
:param variance: the variance parameter.
:type variance: MXNet NDArray or MXNet Symbol
:return: The covariance matrix.
:rtype: MXNet NDArray or MXNet Symbol
The internal interface for the actual computation for the diagonal.
:param F: MXNet computation type <mx.sym, mx.nd>.
:param X: the first set of inputs to the kernel.
:type X: MXNet NDArray or MXNet Symbol
:param variance: the variance parameter.
:type variance: MXNet NDArray or MXNet Symbol
:return: The covariance matrix.
:rtype: MXNet NDArray or MXNet Symbol
The internal interface for the actual computation for the diagonal of the covariance matrix.
:param F: MXNet computation type <mx.sym, mx.nd>.
:param X: the first set of inputs to the kernel.
:type X: MXNet NDArray or MXNet Symbol
:param variance: the variance parameter.
:type variance: MXNet NDArray or MXNet Symbol
:return: The covariance matrix.
:rtype: MXNet NDArray or MXNet Symbol
Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at http://www.apache.org/licenses/LICENSE-2.0 or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ============================================================================== | 4,204 | en | 0.647778 |
from typing import Any
import tensorflow as tf
from .tf_util import scope_name as get_scope_name
def absolute_scope_name(relative_scope_name):
"""Appends parent scope name to `relative_scope_name`"""
base = get_scope_name()
if len(base) > 0:
base += '/'
return base + relative_scope_name
def _infer_scope_name(self, scope_name):
return scope_name if scope_name is not None else type(self).__name__
infer_rel_scope_name = _infer_scope_name
def infer_abs_scope_name(self, scope_name: str = None):
scope_name = infer_rel_scope_name(self, scope_name)
return absolute_scope_name(scope_name)
class Scope(object):
def __init__(self, scope_name: str, obj: Any = None):
self.rel = self.abs = None
self.setup(scope_name, obj)
def setup(self, scope_name: str, obj: Any = None):
if scope_name is None:
assert obj is not None, 'Must provide either scope_name or a reference object to infer scope_name'
scope_name = type(obj).__name__
self.rel = scope_name
self.abs = absolute_scope_name(self.rel)
def make_unique(self, graph=None):
if graph is None:
graph = tf.get_default_graph()
self.rel = graph.unique_name(self.rel)
self.setup(self.rel)
@property
def exact_rel_pattern(self) -> str:
return self.abs + '/'
@property
def exact_abs_pattern(self) -> str:
return '^' + self.abs + '/'
class UninitializedScope(Scope):
# noinspection PyMissingConstructor
def __init__(self):
pass
def __getattribute__(self, item):
raise AttributeError('The scope is only available after you call super constructor __init__.\n'
'Alternatively, manually setup the scope with self.setup_scope(scope_name)')
| sandblox/util/scope.py | 1,650 | Appends parent scope name to `relative_scope_name`
noinspection PyMissingConstructor | 86 | en | 0.450069 |
# coding: utf-8
"""
Copyright 2016 SmartBear Software
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Ref: https://github.com/swagger-api/swagger-codegen
"""
from pprint import pformat
from six import iteritems
import re
import json
from ..utils import sanitize_for_serialization
class TextBotFlowLaunchResponse(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self):
"""
TextBotFlowLaunchResponse - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'id': 'str'
}
self.attribute_map = {
'id': 'id'
}
self._id = None
@property
def id(self):
"""
Gets the id of this TextBotFlowLaunchResponse.
The session ID of the bot flow, used to send to subsequent turn requests
:return: The id of this TextBotFlowLaunchResponse.
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""
Sets the id of this TextBotFlowLaunchResponse.
The session ID of the bot flow, used to send to subsequent turn requests
:param id: The id of this TextBotFlowLaunchResponse.
:type: str
"""
self._id = id
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_json(self):
"""
Returns the model as raw JSON
"""
return json.dumps(sanitize_for_serialization(self.to_dict()))
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| build/PureCloudPlatformClientV2/models/text_bot_flow_launch_response.py | 3,644 | NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Returns true if both objects are equal
TextBotFlowLaunchResponse - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
Returns true if both objects are not equal
For `print` and `pprint`
Gets the id of this TextBotFlowLaunchResponse.
The session ID of the bot flow, used to send to subsequent turn requests
:return: The id of this TextBotFlowLaunchResponse.
:rtype: str
Sets the id of this TextBotFlowLaunchResponse.
The session ID of the bot flow, used to send to subsequent turn requests
:param id: The id of this TextBotFlowLaunchResponse.
:type: str
Returns the model properties as a dict
Returns the model as raw JSON
Returns the string representation of the model
Copyright 2016 SmartBear Software
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Ref: https://github.com/swagger-api/swagger-codegen
coding: utf-8 | 1,651 | en | 0.808969 |
# Generated by Django 3.1.2 on 2020-10-29 20:54
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Quote',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('text', models.TextField()),
('author', models.CharField(max_length=512)),
],
),
migrations.CreateModel(
name='ScrapyItem',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('unique_id', models.CharField(max_length=100, null=True)),
('data', models.TextField()),
('date', models.DateTimeField(default=django.utils.timezone.now)),
],
),
]
| crawler/migrations/0001_initial.py | 992 | Generated by Django 3.1.2 on 2020-10-29 20:54 | 45 | en | 0.735655 |
from django.db import models
from django.utils.translation import gettext_lazy as _
from oscar.apps.offer.abstract_models import AbstractConditionalOffer, AbstractBenefit
from oscar.core.loading import get_class
class ConditionalOffer(AbstractConditionalOffer):
SITE, FLASH_SALE, VOUCHER, USER, SESSION = "Site", "Flash Sale", "Voucher", "User", "Session"
TYPE_CHOICES = (
(SITE, _("Site offer - available to all users")),
(FLASH_SALE, _("Flash Sale offer - short-term discount for the specific product")),
(VOUCHER, _("Voucher offer - only available after entering the appropriate voucher code")),
(USER, _("User offer - available to certain types of user")),
(SESSION, _("Session offer - temporary offer, available for a user for the duration of their session")),
)
offer_type = models.CharField(_("Type"), choices=TYPE_CHOICES, default=SITE, max_length=128)
class Benefit(AbstractBenefit):
PERCENTAGE, FIXED, MULTIBUY, FIXED_PRICE, FIXED_PER_PRODUCT = (
"Percentage", "Absolute", "Multibuy", "Fixed price", "Fixed per product")
SHIPPING_PERCENTAGE, SHIPPING_ABSOLUTE, SHIPPING_FIXED_PRICE = (
'Shipping percentage', 'Shipping absolute', 'Shipping fixed price')
TYPE_CHOICES = (
(PERCENTAGE, _("Discount is a percentage off of the product's value")),
(FIXED, _("Discount is a fixed amount off of the product's value")),
(FIXED_PER_PRODUCT, _("Discount is a fixed amount off of each product's value that match condition")),
(MULTIBUY, _("Discount is to give the cheapest product for free")),
(FIXED_PRICE,
_("Get the products that meet the condition for a fixed price")),
(SHIPPING_ABSOLUTE,
_("Discount is a fixed amount of the shipping cost")),
(SHIPPING_FIXED_PRICE, _("Get shipping for a fixed price")),
(SHIPPING_PERCENTAGE, _("Discount is a percentage off of the shipping"
" cost")),
)
type = models.CharField(_("Type"), max_length=128, choices=TYPE_CHOICES, blank=True)
def apply_to_product(self, price):
if self.type in [self.PERCENTAGE, self.FIXED_PRICE, self.FIXED_PER_PRODUCT]:
return self.proxy().apply_to_product(price)
@property
def proxy_map(self):
custom_proxy_map = super().proxy_map
custom_proxy_map[self.PERCENTAGE] = get_class('offer.benefits', 'CustomPercentageDiscountBenefit')
custom_proxy_map[self.FIXED_PRICE] = get_class('offer.benefits', 'CustomFixedPriceBenefit')
custom_proxy_map[self.FIXED_PER_PRODUCT] = get_class(
'offer.benefits', 'CustomAbsoluteDiscountPerProductBenefit'
)
return custom_proxy_map
from oscar.apps.offer.models import * # noqa isort:skip
from .benefits import * # noqa isort:skip
| sandbox/offer/models.py | 2,841 | noqa isort:skip noqa isort:skip | 31 | gu | 0.190627 |
"""Metadata State Manager."""
import asyncio
import logging
from typing import Dict, List, Optional, Set, Tuple, Type
from pydantic import ValidationError
from astoria.common.components import StateManager
from astoria.common.disks import DiskInfo, DiskType, DiskUUID
from astoria.common.ipc import (
MetadataManagerMessage,
MetadataSetManagerRequest,
RequestResponse,
)
from astoria.common.metadata import Metadata
from astoria.common.mixins.disk_handler import DiskHandlerMixin
from .metadata_cache import MetadataCache
from .metadata_disk_lifecycle import (
AbstractMetadataDiskLifecycle,
MetadataDiskLifecycle,
UsercodeDiskLifecycle,
)
LOGGER = logging.getLogger(__name__)
class MetadataManager(DiskHandlerMixin, StateManager[MetadataManagerMessage]):
"""Astoria Metadata State Manager."""
name = "astmetad"
dependencies = ["astdiskd"]
DISK_TYPE_LIFECYCLE_MAP: Dict[DiskType, Type[AbstractMetadataDiskLifecycle]] = {
DiskType.USERCODE: UsercodeDiskLifecycle,
DiskType.METADATA: MetadataDiskLifecycle,
}
DISK_TYPE_OVERRIDE_MAP: Dict[DiskType, Set[str]] = {
DiskType.USERCODE: {
"usercode_entrypoint", "wifi_ssid",
"wifi_psk", "wifi_region", "wifi_enabled",
},
DiskType.METADATA: {
"arena", "zone", "mode", "marker_offset", "game_timeout", "wifi_enabled",
},
}
MUTABLE_ATTRS_BY_REQUEST: Set[str] = {"arena", "zone", "mode"}
CACHED_ATTRS: Set[str] = {"wifi_ssid", "wifi_psk", "wifi_region"}
def _init(self) -> None:
self._lifecycles: Dict[DiskType, Optional[AbstractMetadataDiskLifecycle]] = {
disk_type: None
for disk_type in self.DISK_TYPE_LIFECYCLE_MAP
}
self._cache = MetadataCache(
self.CACHED_ATTRS,
cache_path=self.config.system.cache_dir / "astmetad-metadata.json",
)
self._cur_disks: Dict[DiskUUID, DiskInfo] = {}
self._mqtt.subscribe("astdiskd", self.handle_astdiskd_disk_info_message)
self._requested_data: Dict[str, str] = {}
self._register_request(
"mutate",
MetadataSetManagerRequest,
self.handle_mutation_request,
)
@property
def offline_status(self) -> MetadataManagerMessage:
"""
Status to publish when the manager goes offline.
This status should ensure that any other components relying
on this data go into a safe state.
"""
return MetadataManagerMessage(
status=MetadataManagerMessage.Status.STOPPED,
metadata=Metadata.init(self.config),
)
async def main(self) -> None:
"""Main routine for astmetad."""
self.update_status()
# Wait whilst the program is running.
await self.wait_loop()
for uuid, info in self._cur_disks.items():
asyncio.ensure_future(self.handle_disk_removal(uuid, info))
async def handle_disk_insertion(self, uuid: DiskUUID, disk_info: DiskInfo) -> None:
"""Handle a disk insertion."""
LOGGER.debug(f"Disk inserted: {uuid} ({disk_info.disk_type})")
for disk_type, lifecycle_class in self.DISK_TYPE_LIFECYCLE_MAP.items():
if disk_info.disk_type is disk_type:
LOGGER.info(
f"{disk_type.name} disk {uuid} is mounted"
f" at {disk_info.mount_path}",
)
if self._lifecycles[disk_type] is None:
LOGGER.debug(f"Starting lifecycle for {uuid}")
self._lifecycles[disk_type] = lifecycle_class(
uuid,
disk_info,
self.config,
)
self.update_status()
else:
LOGGER.warn(
"Cannot use metadata, there is already a lifecycle present.",
)
async def handle_disk_removal(self, uuid: DiskUUID, disk_info: DiskInfo) -> None:
"""Handle a disk removal."""
LOGGER.debug(f"Disk removed: {uuid} ({disk_info.disk_type})")
for disk_type, lifecycle_class in self.DISK_TYPE_LIFECYCLE_MAP.items():
if disk_info.disk_type is disk_type:
LOGGER.info(f"Metadata disk {uuid} removed ({disk_info.mount_path})")
lifecycle = self._lifecycles[disk_type]
if lifecycle is not None and lifecycle._uuid == disk_info.uuid:
self._lifecycles[disk_type] = None
self.update_status()
async def handle_mutation_request(
self,
request: MetadataSetManagerRequest,
) -> RequestResponse:
"""Handle a request to mutate metadata."""
if request.attr not in self.MUTABLE_ATTRS_BY_REQUEST:
return RequestResponse(
uuid=request.uuid,
success=False,
reason=f"{request.attr} is not a mutable attribute",
)
if len(request.value) == 0:
# Stop mutating the attr if it is empty.
try:
del self._requested_data[request.attr]
LOGGER.info(f"{request.attr} override has been removed by request")
self.update_status()
except KeyError:
pass
else:
# Store the old value, just in case we need to set it back.
if request.attr in self._requested_data:
old_value = self._requested_data[request.attr]
else:
old_value = None
# Attempt to update the data, reset it if it is invalid.
try:
self._requested_data[request.attr] = request.value
self.update_status()
LOGGER.info(
f"{request.attr} has been overridden to {request.value} by request",
)
except ValidationError as e:
# Set the requested data back to the old value
if old_value is not None:
self._requested_data[request.attr] = old_value
LOGGER.warning(f"Unable to set {request.attr} to {request.value}.")
LOGGER.warning(str(e))
return RequestResponse(
uuid=request.uuid,
success=False,
reason=f"{request.value} is not a valid value for {request.attr}",
)
return RequestResponse(
uuid=request.uuid,
success=True,
)
def get_current_metadata(self) -> Metadata:
"""
Calculate the current metadata.
Takes the default, static metadata based on the config and system
information. It then overlays data from other sources in a priority order,
whereby each source has a set of permitted attributes in the metadata that
can be overridden.
"""
# Metadata sources in priority order.
metadata_sources: List[Tuple[Set[str], Dict[str, str]]] = [
(self.CACHED_ATTRS, self._cache.data),
(self.MUTABLE_ATTRS_BY_REQUEST, self._requested_data),
]
for disk_type, val in self._lifecycles.items():
if val is not None:
# Add disk-based metadata source if it is present.
metadata_sources.append(
(
self.DISK_TYPE_OVERRIDE_MAP[disk_type],
val.diff_data,
),
)
metadata = Metadata.init(self.config)
for permitted_attrs, diff_data in metadata_sources:
for k, v in diff_data.items():
if k in permitted_attrs:
metadata.__setattr__(k, v)
else:
LOGGER.warning(
f"There was an attempt to mutate {k}, but it was not permitted.",
)
# Update the cache with the new values.
for key in self.CACHED_ATTRS:
self._cache.update_cached_attr(key, metadata.__getattribute__(key))
return metadata
def update_status(self) -> None:
"""Update the status of the manager."""
self.status = MetadataManagerMessage(
status=MetadataManagerMessage.Status.RUNNING,
metadata=self.get_current_metadata(),
)
| astoria/astmetad/metadata_manager.py | 8,452 | Astoria Metadata State Manager.
Calculate the current metadata.
Takes the default, static metadata based on the config and system
information. It then overlays data from other sources in a priority order,
whereby each source has a set of permitted attributes in the metadata that
can be overridden.
Status to publish when the manager goes offline.
This status should ensure that any other components relying
on this data go into a safe state.
Update the status of the manager.
Metadata State Manager.
Wait whilst the program is running. Stop mutating the attr if it is empty. Store the old value, just in case we need to set it back. Attempt to update the data, reset it if it is invalid. Set the requested data back to the old value Metadata sources in priority order. Add disk-based metadata source if it is present. Update the cache with the new values. | 860 | en | 0.816181 |
#
# This file is part of the PyMeasure package.
#
# Copyright (c) 2013-2021 PyMeasure Developers
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
import logging
from logging import Handler
from .Qt import QtCore
log = logging.getLogger(__name__)
log.addHandler(logging.NullHandler())
class LogHandler(Handler):
# Class Emitter is added to keep compatibility with PySide2
# 1. Signal needs to be class attribute of a QObject subclass
# 2. logging Handler emit method clashes with QObject emit method
# 3. As a consequence, the LogHandler cannot inherit both from
# Handler and QObject
# 4. A new utility class Emitter subclass of QObject is
# introduced to handle record Signal and workaround the problem
class Emitter(QtCore.QObject):
record = QtCore.QSignal(object)
def __init__(self):
super().__init__()
self.emitter = self.Emitter()
def connect(self, *args, **kwargs):
return self.emitter.record.connect(*args, **kwargs)
def emit(self, record):
self.emitter.record.emit(self.format(record))
| pymeasure/display/log.py | 2,097 | This file is part of the PyMeasure package. Copyright (c) 2013-2021 PyMeasure Developers Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. Class Emitter is added to keep compatibility with PySide2 1. Signal needs to be class attribute of a QObject subclass 2. logging Handler emit method clashes with QObject emit method 3. As a consequence, the LogHandler cannot inherit both from Handler and QObject 4. A new utility class Emitter subclass of QObject is introduced to handle record Signal and workaround the problem | 1,488 | en | 0.890806 |
import re
import setuptools
import setuptools.command.develop
import setuptools.command.install
import subprocess
import sys
try:
result = subprocess.run(
[sys.executable, "-m", "pip", "show", "pkg_utils"],
check=True, capture_output=True)
match = re.search(r'\nVersion: (.*?)\n', result.stdout.decode(), re.DOTALL)
assert match and tuple(match.group(1).split('.')) >= ('0', '0', '5')
except (subprocess.CalledProcessError, AssertionError):
subprocess.run(
[sys.executable, "-m", "pip", "install", "-U", "pkg_utils"],
check=True)
import os
import pkg_utils
name = 'biosimulators_ginsim'
dirname = os.path.dirname(__file__)
# get package metadata
md = pkg_utils.get_package_metadata(dirname, name)
# install package
setuptools.setup(
name=name,
version=md.version,
description=("BioSimulators-compliant command-line interface to "
"the GINsim simulation program."),
long_description=md.long_description,
url="https://github.com/biosimulators/Biosimulators_GINsim",
download_url="https://github.com/biosimulators/Biosimulators_GINsim",
author='BioSimulators Team',
author_email="info@biosimulators.org",
license="MIT",
keywords=[
'systems biology',
'computational biology',
'logical model',
'numerical simulation',
'BioSimulators',
'SBML',
'SED-ML',
'COMBINE',
'OMEX',
'GINsim',
],
packages=setuptools.find_packages(exclude=['tests', 'tests.*']),
install_requires=md.install_requires,
extras_require=md.extras_require,
tests_require=md.tests_require,
dependency_links=md.dependency_links,
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Topic :: Scientific/Engineering :: Bio-Informatics',
],
entry_points={
'console_scripts': [
'biosimulators-ginsim = biosimulators_ginsim.__main__:main',
],
},
)
| setup.py | 2,070 | get package metadata install package | 36 | en | 0.427415 |
"""
Top-level module of Jina.
The primary function of this module is to import all of the public Jina
interfaces into a single place. The interfaces themselves are located in
sub-modules, as described below.
"""
# DO SOME OS-WISE PATCHES
import datetime as _datetime
import os as _os
import platform as _platform
import signal as _signal
import sys as _sys
import types as _types
from google.protobuf.internal import api_implementation as _api_implementation
if _api_implementation._default_implementation_type != 'cpp':
import warnings as _warnings
_warnings.warn(
'''
You are using Python protobuf backend, not the C++ version, which is much faster.
This is often due to C++ implementation failed to compile while installing Protobuf
- You are using in Python 3.9 (https://github.com/jina-ai/jina/issues/1801)
- You are using on architecture other than x86_64/armv6/armv7
- You installation is broken, try `pip install --force protobuf`
- You have C++ backend but you shut it down, try `export PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION=cpp`
''',
RuntimeWarning,
)
if _sys.version_info < (3, 7, 0) or _sys.version_info >= (3, 10, 0):
raise OSError(f'Jina requires Python 3.7/3.8/3.9, but yours is {_sys.version_info}')
if _sys.version_info >= (3, 8, 0) and _platform.system() == 'Darwin':
# temporary fix for python 3.8 on macos where the default start is set to "spawn"
# https://docs.python.org/3/library/multiprocessing.html#contexts-and-start-methods
from multiprocessing import set_start_method as _set_start_method
_set_start_method('fork')
# fix fork error on MacOS but seems no effect? must do EXPORT manually before jina start
_os.environ['OBJC_DISABLE_INITIALIZE_FORK_SAFETY'] = 'YES'
# Underscore variables shared globally
__copyright__ = "Copyright (c) 2020 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
# do not change this line manually
# this is managed by git tag and updated on every release
# NOTE: this represents the NEXT release version
__version__ = '1.0.10'
# do not change this line manually
# this is managed by proto/build-proto.sh and updated on every execution
__proto_version__ = '0.0.78'
__uptime__ = _datetime.datetime.now().isoformat()
# update on MacOS
# 1. clean this tuple,
# 2. grep -rohEI --exclude-dir=jina/hub --exclude-dir=tests --include \*.py "\'JINA_.*?\'" jina | sort -u | sed "s/$/,/g"
# 3. copy all lines EXCEPT the first (which is the grep command in the last line)
__jina_env__ = (
'JINA_ARRAY_QUANT',
'JINA_BINARY_DELIMITER',
'JINA_CONTRIB_MODULE',
'JINA_CONTRIB_MODULE_IS_LOADING',
'JINA_CONTROL_PORT',
'JINA_DEFAULT_HOST',
'JINA_DISABLE_UVLOOP',
'JINA_EXECUTOR_WORKDIR',
'JINA_FULL_CLI',
'JINA_IPC_SOCK_TMP',
'JINA_LOG_CONFIG',
'JINA_LOG_ID',
'JINA_LOG_LEVEL',
'JINA_LOG_NO_COLOR',
'JINA_LOG_WORKSPACE',
'JINA_POD_NAME',
'JINA_RAISE_ERROR_EARLY',
'JINA_RANDOM_PORTS',
'JINA_RANDOM_PORT_MAX',
'JINA_RANDOM_PORT_MIN',
'JINA_SOCKET_HWM',
'JINA_VCS_VERSION',
'JINA_WARN_UNNAMED',
'JINA_WORKSPACE',
)
__default_host__ = _os.environ.get('JINA_DEFAULT_HOST', '0.0.0.0')
__ready_msg__ = 'ready and listening'
__stop_msg__ = 'terminated'
__binary_delimiter__ = _os.environ.get(
'JINA_BINARY_DELIMITER', '460841a0a8a430ae25d9ad7c1f048c57'
).encode()
__root_dir__ = _os.path.dirname(_os.path.abspath(__file__))
_names_with_underscore = [
'__version__',
'__copyright__',
'__license__',
'__proto_version__',
'__default_host__',
'__ready_msg__',
'__stop_msg__',
'__binary_delimiter__',
'__jina_env__',
'__uptime__',
'__root_dir__',
]
# Primitive data type,
# note, they must be loaded BEFORE all executors/drivers/... to avoid cyclic imports
from jina.types.ndarray.generic import NdArray
from jina.types.request import Request, Response
from jina.types.message import Message
from jina.types.querylang import QueryLang
from jina.types.document import Document
from jina.types.document.multimodal import MultimodalDocument
from jina.types.sets import DocumentSet, QueryLangSet
# ADD GLOBAL NAMESPACE VARIABLES
JINA_GLOBAL = _types.SimpleNamespace()
import jina.importer as _ji
# driver first, as executor may contain driver
_ji.import_classes('jina.drivers', show_import_table=False, import_once=True)
_ji.import_classes('jina.executors', show_import_table=False, import_once=True)
_ji.import_classes('jina.hub', show_import_table=False, import_once=True)
_signal.signal(_signal.SIGINT, _signal.default_int_handler)
def _set_nofile(nofile_atleast=4096):
"""
sets nofile soft limit to at least 4096, useful for running matlplotlib/seaborn on
parallel executing plot generators vs. Ubuntu default ulimit -n 1024 or OS X El Captian 256
temporary setting extinguishing with Python session.
"""
try:
import resource as res
except ImportError: # Windows
res = None
from .logging import default_logger
if res is None:
return (None,) * 2
soft, ohard = res.getrlimit(res.RLIMIT_NOFILE)
hard = ohard
if soft < nofile_atleast:
soft = nofile_atleast
if hard < soft:
hard = soft
default_logger.debug(f'setting soft & hard ulimit -n {soft} {hard}')
try:
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except (ValueError, res.error):
try:
hard = soft
default_logger.warning(
f'trouble with max limit, retrying with soft,hard {soft},{hard}'
)
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except Exception:
default_logger.warning('failed to set ulimit, giving up')
soft, hard = res.getrlimit(res.RLIMIT_NOFILE)
default_logger.debug(f'ulimit -n soft,hard: {soft} {hard}')
return soft, hard
_set_nofile()
# Flow
from jina.flow import Flow
from jina.flow.asyncio import AsyncFlow
# Client
from jina.clients import Client
from jina.clients.asyncio import AsyncClient
# Executor
from jina.executors import BaseExecutor as Executor
from jina.executors.classifiers import BaseClassifier as Classifier
from jina.executors.crafters import BaseCrafter as Crafter
from jina.executors.encoders import BaseEncoder as Encoder
from jina.executors.evaluators import BaseEvaluator as Evaluator
from jina.executors.indexers import BaseIndexer as Indexer
from jina.executors.rankers import BaseRanker as Ranker
from jina.executors.segmenters import BaseSegmenter as Segmenter
__all__ = [_s for _s in dir() if not _s.startswith('_')]
__all__.extend([_s for _s in _names_with_underscore])
| jina/__init__.py | 6,764 | sets nofile soft limit to at least 4096, useful for running matlplotlib/seaborn on
parallel executing plot generators vs. Ubuntu default ulimit -n 1024 or OS X El Captian 256
temporary setting extinguishing with Python session.
Top-level module of Jina.
The primary function of this module is to import all of the public Jina
interfaces into a single place. The interfaces themselves are located in
sub-modules, as described below.
DO SOME OS-WISE PATCHES temporary fix for python 3.8 on macos where the default start is set to "spawn" https://docs.python.org/3/library/multiprocessing.htmlcontexts-and-start-methods fix fork error on MacOS but seems no effect? must do EXPORT manually before jina start Underscore variables shared globally do not change this line manually this is managed by git tag and updated on every release NOTE: this represents the NEXT release version do not change this line manually this is managed by proto/build-proto.sh and updated on every execution update on MacOS 1. clean this tuple, 2. grep -rohEI --exclude-dir=jina/hub --exclude-dir=tests --include \*.py "\'JINA_.*?\'" jina | sort -u | sed "s/$/,/g" 3. copy all lines EXCEPT the first (which is the grep command in the last line) Primitive data type, note, they must be loaded BEFORE all executors/drivers/... to avoid cyclic imports ADD GLOBAL NAMESPACE VARIABLES driver first, as executor may contain driver Windows Flow Client Executor | 1,430 | en | 0.822021 |
from niaaml.classifiers.classifier import Classifier
from niaaml.utilities import MinMax
from niaaml.utilities import ParameterDefinition
from sklearn.ensemble import RandomForestClassifier as RF
import numpy as np
import warnings
from sklearn.exceptions import ChangedBehaviorWarning, ConvergenceWarning, DataConversionWarning, DataDimensionalityWarning, EfficiencyWarning, FitFailedWarning, NonBLASDotWarning, UndefinedMetricWarning
__all__ = ['RandomForest']
class RandomForest(Classifier):
r"""Implementation of random forest classifier.
Date:
2020
Author:
Luka Pečnik
License:
MIT
Reference:
Breiman, “Random Forests”, Machine Learning, 45(1), 5-32, 2001.
Documentation:
https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.RandomForestClassifier.html
See Also:
* :class:`niaaml.classifiers.Classifier`
"""
Name = 'Random Forest Classifier'
def __init__(self, **kwargs):
r"""Initialize RandomForestClassifier instance.
"""
warnings.filterwarnings(action='ignore', category=ChangedBehaviorWarning)
warnings.filterwarnings(action='ignore', category=ConvergenceWarning)
warnings.filterwarnings(action='ignore', category=DataConversionWarning)
warnings.filterwarnings(action='ignore', category=DataDimensionalityWarning)
warnings.filterwarnings(action='ignore', category=EfficiencyWarning)
warnings.filterwarnings(action='ignore', category=FitFailedWarning)
warnings.filterwarnings(action='ignore', category=NonBLASDotWarning)
warnings.filterwarnings(action='ignore', category=UndefinedMetricWarning)
self._params = dict(
n_estimators = ParameterDefinition(MinMax(min=10, max=111), np.uint)
)
self.__random_forest_classifier = RF()
def set_parameters(self, **kwargs):
r"""Set the parameters/arguments of the algorithm.
"""
self.__random_forest_classifier.set_params(**kwargs)
def fit(self, x, y, **kwargs):
r"""Fit RandomForestClassifier.
Arguments:
x (pandas.core.frame.DataFrame): n samples to classify.
y (pandas.core.series.Series): n classes of the samples in the x array.
Returns:
None
"""
self.__random_forest_classifier.fit(x, y)
def predict(self, x, **kwargs):
r"""Predict class for each sample (row) in x.
Arguments:
x (pandas.core.frame.DataFrame): n samples to classify.
Returns:
pandas.core.series.Series: n predicted classes.
"""
return self.__random_forest_classifier.predict(x)
def to_string(self):
r"""User friendly representation of the object.
Returns:
str: User friendly representation of the object.
"""
return Classifier.to_string(self).format(name=self.Name, args=self._parameters_to_string(self.__random_forest_classifier.get_params())) | niaaml/classifiers/random_forest.py | 3,040 | Implementation of random forest classifier.
Date:
2020
Author:
Luka Pečnik
License:
MIT
Reference:
Breiman, “Random Forests”, Machine Learning, 45(1), 5-32, 2001.
Documentation:
https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.RandomForestClassifier.html
See Also:
* :class:`niaaml.classifiers.Classifier`
Initialize RandomForestClassifier instance.
Fit RandomForestClassifier.
Arguments:
x (pandas.core.frame.DataFrame): n samples to classify.
y (pandas.core.series.Series): n classes of the samples in the x array.
Returns:
None
Predict class for each sample (row) in x.
Arguments:
x (pandas.core.frame.DataFrame): n samples to classify.
Returns:
pandas.core.series.Series: n predicted classes.
Set the parameters/arguments of the algorithm.
User friendly representation of the object.
Returns:
str: User friendly representation of the object. | 940 | en | 0.517208 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, List, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class PrivateEndpointConnectionsOperations(object):
"""PrivateEndpointConnectionsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.iothub.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
resource_group_name, # type: str
resource_name, # type: str
**kwargs # type: Any
):
# type: (...) -> List["_models.PrivateEndpointConnection"]
"""List private endpoint connections.
List private endpoint connection properties.
:param resource_group_name: The name of the resource group that contains the IoT hub.
:type resource_group_name: str
:param resource_name: The name of the IoT hub.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: list of PrivateEndpointConnection, or the result of cls(response)
:rtype: list[~azure.mgmt.iothub.models.PrivateEndpointConnection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[List["_models.PrivateEndpointConnection"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
accept = "application/json"
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'resourceName': self._serialize.url("resource_name", resource_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorDetails, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('[PrivateEndpointConnection]', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/iotHubs/{resourceName}/privateEndpointConnections'} # type: ignore
def get(
self,
resource_group_name, # type: str
resource_name, # type: str
private_endpoint_connection_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.PrivateEndpointConnection"
"""Get private endpoint connection.
Get private endpoint connection properties.
:param resource_group_name: The name of the resource group that contains the IoT hub.
:type resource_group_name: str
:param resource_name: The name of the IoT hub.
:type resource_name: str
:param private_endpoint_connection_name: The name of the private endpoint connection.
:type private_endpoint_connection_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PrivateEndpointConnection, or the result of cls(response)
:rtype: ~azure.mgmt.iothub.models.PrivateEndpointConnection
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PrivateEndpointConnection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'resourceName': self._serialize.url("resource_name", resource_name, 'str'),
'privateEndpointConnectionName': self._serialize.url("private_endpoint_connection_name", private_endpoint_connection_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorDetails, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('PrivateEndpointConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/iotHubs/{resourceName}/privateEndpointConnections/{privateEndpointConnectionName}'} # type: ignore
def _update_initial(
self,
resource_group_name, # type: str
resource_name, # type: str
private_endpoint_connection_name, # type: str
private_endpoint_connection, # type: "_models.PrivateEndpointConnection"
**kwargs # type: Any
):
# type: (...) -> "_models.PrivateEndpointConnection"
cls = kwargs.pop('cls', None) # type: ClsType["_models.PrivateEndpointConnection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'resourceName': self._serialize.url("resource_name", resource_name, 'str'),
'privateEndpointConnectionName': self._serialize.url("private_endpoint_connection_name", private_endpoint_connection_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(private_endpoint_connection, 'PrivateEndpointConnection')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorDetails, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('PrivateEndpointConnection', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('PrivateEndpointConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/iotHubs/{resourceName}/privateEndpointConnections/{privateEndpointConnectionName}'} # type: ignore
def begin_update(
self,
resource_group_name, # type: str
resource_name, # type: str
private_endpoint_connection_name, # type: str
private_endpoint_connection, # type: "_models.PrivateEndpointConnection"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.PrivateEndpointConnection"]
"""Update private endpoint connection.
Update the status of a private endpoint connection with the specified name.
:param resource_group_name: The name of the resource group that contains the IoT hub.
:type resource_group_name: str
:param resource_name: The name of the IoT hub.
:type resource_name: str
:param private_endpoint_connection_name: The name of the private endpoint connection.
:type private_endpoint_connection_name: str
:param private_endpoint_connection: The private endpoint connection with updated properties.
:type private_endpoint_connection: ~azure.mgmt.iothub.models.PrivateEndpointConnection
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either PrivateEndpointConnection or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.iothub.models.PrivateEndpointConnection]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.PrivateEndpointConnection"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._update_initial(
resource_group_name=resource_group_name,
resource_name=resource_name,
private_endpoint_connection_name=private_endpoint_connection_name,
private_endpoint_connection=private_endpoint_connection,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('PrivateEndpointConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'resourceName': self._serialize.url("resource_name", resource_name, 'str'),
'privateEndpointConnectionName': self._serialize.url("private_endpoint_connection_name", private_endpoint_connection_name, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/iotHubs/{resourceName}/privateEndpointConnections/{privateEndpointConnectionName}'} # type: ignore
def _delete_initial(
self,
resource_group_name, # type: str
resource_name, # type: str
private_endpoint_connection_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Optional["_models.PrivateEndpointConnection"]
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.PrivateEndpointConnection"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'resourceName': self._serialize.url("resource_name", resource_name, 'str'),
'privateEndpointConnectionName': self._serialize.url("private_endpoint_connection_name", private_endpoint_connection_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorDetails, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('PrivateEndpointConnection', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('PrivateEndpointConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/iotHubs/{resourceName}/privateEndpointConnections/{privateEndpointConnectionName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
resource_name, # type: str
private_endpoint_connection_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.PrivateEndpointConnection"]
"""Delete private endpoint connection.
Delete private endpoint connection with the specified name.
:param resource_group_name: The name of the resource group that contains the IoT hub.
:type resource_group_name: str
:param resource_name: The name of the IoT hub.
:type resource_name: str
:param private_endpoint_connection_name: The name of the private endpoint connection.
:type private_endpoint_connection_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either PrivateEndpointConnection or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.iothub.models.PrivateEndpointConnection]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.PrivateEndpointConnection"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
resource_name=resource_name,
private_endpoint_connection_name=private_endpoint_connection_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('PrivateEndpointConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'resourceName': self._serialize.url("resource_name", resource_name, 'str'),
'privateEndpointConnectionName': self._serialize.url("private_endpoint_connection_name", private_endpoint_connection_name, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/iotHubs/{resourceName}/privateEndpointConnections/{privateEndpointConnectionName}'} # type: ignore
| sdk/iothub/azure-mgmt-iothub/azure/mgmt/iothub/v2020_03_01/operations/_private_endpoint_connections_operations.py | 22,860 | PrivateEndpointConnectionsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.iothub.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
Delete private endpoint connection.
Delete private endpoint connection with the specified name.
:param resource_group_name: The name of the resource group that contains the IoT hub.
:type resource_group_name: str
:param resource_name: The name of the IoT hub.
:type resource_name: str
:param private_endpoint_connection_name: The name of the private endpoint connection.
:type private_endpoint_connection_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either PrivateEndpointConnection or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.iothub.models.PrivateEndpointConnection]
:raises ~azure.core.exceptions.HttpResponseError:
Update private endpoint connection.
Update the status of a private endpoint connection with the specified name.
:param resource_group_name: The name of the resource group that contains the IoT hub.
:type resource_group_name: str
:param resource_name: The name of the IoT hub.
:type resource_name: str
:param private_endpoint_connection_name: The name of the private endpoint connection.
:type private_endpoint_connection_name: str
:param private_endpoint_connection: The private endpoint connection with updated properties.
:type private_endpoint_connection: ~azure.mgmt.iothub.models.PrivateEndpointConnection
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either PrivateEndpointConnection or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.iothub.models.PrivateEndpointConnection]
:raises ~azure.core.exceptions.HttpResponseError:
Get private endpoint connection.
Get private endpoint connection properties.
:param resource_group_name: The name of the resource group that contains the IoT hub.
:type resource_group_name: str
:param resource_name: The name of the IoT hub.
:type resource_name: str
:param private_endpoint_connection_name: The name of the private endpoint connection.
:type private_endpoint_connection_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PrivateEndpointConnection, or the result of cls(response)
:rtype: ~azure.mgmt.iothub.models.PrivateEndpointConnection
:raises: ~azure.core.exceptions.HttpResponseError
List private endpoint connections.
List private endpoint connection properties.
:param resource_group_name: The name of the resource group that contains the IoT hub.
:type resource_group_name: str
:param resource_name: The name of the IoT hub.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: list of PrivateEndpointConnection, or the result of cls(response)
:rtype: list[~azure.mgmt.iothub.models.PrivateEndpointConnection]
:raises: ~azure.core.exceptions.HttpResponseError
coding=utf-8 -------------------------------------------------------------------------- Copyright (c) Microsoft Corporation. All rights reserved. Licensed under the MIT License. See License.txt in the project root for license information. Code generated by Microsoft (R) AutoRest Code Generator. Changes may cause incorrect behavior and will be lost if the code is regenerated. -------------------------------------------------------------------------- pylint: disable=unused-import,ungrouped-imports type: str type: str type: Any type: (...) -> List["_models.PrivateEndpointConnection"] type: ClsType[List["_models.PrivateEndpointConnection"]] Construct URL type: ignore Construct parameters type: Dict[str, Any] Construct headers type: Dict[str, Any] type: ignore type: str type: str type: str type: Any type: (...) -> "_models.PrivateEndpointConnection" type: ClsType["_models.PrivateEndpointConnection"] Construct URL type: ignore Construct parameters type: Dict[str, Any] Construct headers type: Dict[str, Any] type: ignore type: str type: str type: str type: "_models.PrivateEndpointConnection" type: Any type: (...) -> "_models.PrivateEndpointConnection" type: ClsType["_models.PrivateEndpointConnection"] Construct URL type: ignore Construct parameters type: Dict[str, Any] Construct headers type: Dict[str, Any] type: Dict[str, Any] type: ignore type: str type: str type: str type: "_models.PrivateEndpointConnection" type: Any type: (...) -> LROPoller["_models.PrivateEndpointConnection"] type: Union[bool, PollingMethod] type: ClsType["_models.PrivateEndpointConnection"] type: Optional[str] type: ignore type: str type: str type: str type: Any type: (...) -> Optional["_models.PrivateEndpointConnection"] type: ClsType[Optional["_models.PrivateEndpointConnection"]] Construct URL type: ignore Construct parameters type: Dict[str, Any] Construct headers type: Dict[str, Any] type: ignore type: str type: str type: str type: Any type: (...) -> LROPoller["_models.PrivateEndpointConnection"] type: Union[bool, PollingMethod] type: ClsType["_models.PrivateEndpointConnection"] type: Optional[str] type: ignore | 6,337 | en | 0.546018 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2017/8/25 22:42
# @Author : Tom.lee
# @Site :
# @File : mysql_lock.py
# @Software: PyCharm
"""
通过MySQL实现分布式锁服务
"""
import MySQLdb
import logging
import time
FORMAT_STR = '%(asctime)s -%(module)s:%(filename)s-L%(lineno)d-%(levelname)s: %(message)s'
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter(FORMAT_STR)
handler = logging.StreamHandler()
handler.setFormatter(formatter)
logger.addHandler(handler)
logging.info("Current log level is : %s", logging.getLevelName(logger.getEffectiveLevel()))
class MySqlLock(object):
LOCK_SQL = "SELECT get_lock('{key}', {timeout}) FROM dual"
UNLOCK_SQL = "SELECT release_lock('{key}') FROM dual"
def __init__(self, lock_key=None, *args, **kwargs):
"""
:param lock_key:
:param args: 参数与MySQLdb初始化参数一致.
:param kwargs: 参数与MySQLdb初始化参数一致.
host='localhost'
user='test'
passwd='test'
db='test'
"""
self.__db = MySQLdb.connect(*args, **kwargs)
self.lock_key = lock_key or '7ab18906739e4662ac01e69f5ebb7352'
def _execute(self, sql):
"""
MySQL数据库操作
:param sql:
:return: (1L,) --> tuple
"""
res = (-1,)
cursor = self.__db.cursor()
try:
cursor.execute(sql)
if cursor.rowcount != 1:
logging.error("Multiple rows returned in mysql lock function.")
else:
res = cursor.fetchone()
except Exception, ex:
logging.error("执行SQL\"%s\" 失败! 异常信息: %s", sql, str(ex))
finally:
cursor.close()
return res
def lock(self, timeout):
"""
MySQL数据库加锁
:param timeout: 超时时间
:return:
"""
# 加锁操作
lk = self._execute(self.LOCK_SQL.format(key=self.lock_key, timeout=timeout))
if lk[0] == 0:
logging.debug("锁'%s'已经被创建.", self.lock_key)
return False
elif lk[0] == 1:
logging.debug("创建锁'%s'." % self.lock_key)
return True
else:
logging.error("获取锁失败!")
return None
def unlock(self):
"""
释放MySQL锁.
:return:
"""
# 释放操作
uk = self._execute(self.UNLOCK_SQL.format(key=self.lock_key))
if uk[0] == 0:
logging.debug("释放锁'%s'失败(该锁被其他进程持有)" % self.lock_key)
return False
elif uk[0] == 1:
logging.debug("释放锁'%s'." % self.lock_key)
return True
else:
logging.error("锁'%s'不存在." % self.lock_key)
return None
if __name__ == "__main__":
l = MySqlLock(host='localhost', user='root', passwd='root', db='iaasms')
ret = l.lock(15)
if not ret:
logging.error("获取锁失败,退出!")
quit()
time.sleep(15) # 模拟跨进程的同步操作!
# raise Exception('模拟操作异常,mysql会自动释放该进程持有的锁.')
# TODO something
print 'hello ok!'
l.unlock()
| contributed_modules/mysql/mysqldb_/mysql_lock.py | 3,349 | !/usr/bin/env python -*- coding: utf-8 -*- @Time : 2017/8/25 22:42 @Author : Tom.lee @Site : @File : mysql_lock.py @Software: PyCharm 加锁操作 释放操作 模拟跨进程的同步操作! raise Exception('模拟操作异常,mysql会自动释放该进程持有的锁.') TODO something | 226 | zh | 0.328903 |
# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from __future__ import unicode_literals
from ..arithmetic import AddScalarVolumes
def test_AddScalarVolumes_inputs():
input_map = dict(args=dict(argstr='%s',
),
environ=dict(nohash=True,
usedefault=True,
),
ignore_exception=dict(nohash=True,
usedefault=True,
),
inputVolume1=dict(argstr='%s',
position=-3,
),
inputVolume2=dict(argstr='%s',
position=-2,
),
order=dict(argstr='--order %s',
),
outputVolume=dict(argstr='%s',
hash_files=False,
position=-1,
),
terminal_output=dict(nohash=True,
),
)
inputs = AddScalarVolumes.input_spec()
for key, metadata in list(input_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(inputs.traits()[key], metakey) == value
def test_AddScalarVolumes_outputs():
output_map = dict(outputVolume=dict(position=-1,
),
)
outputs = AddScalarVolumes.output_spec()
for key, metadata in list(output_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(outputs.traits()[key], metakey) == value
| nipype/interfaces/slicer/filtering/tests/test_auto_AddScalarVolumes.py | 1,182 | AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT | 51 | en | 0.653745 |
#coding:utf-8
# Chainer version 3.2 (use version 3.x)
#
# This is based on <https://raw.githubusercontent.com/chainer/chainer/v3/examples/mnist/train_mnist.py>
#
# This used mean_absolute_error as loss function.
# Check version
# Python 3.6.4 on win32 (Windows 10)
# Chainer 3.2.0
# numpy 1.14.0
# matplotlib 2.1.1
from __future__ import print_function
import argparse
import chainer
import chainer.functions as F
import chainer.links as L
from chainer import training, cuda
from chainer.training import extensions
from chainer.functions.loss.mean_squared_error import mean_squared_error
from chainer.functions.loss.mean_absolute_error import mean_absolute_error
from TM_dataset import *
from plot_report_logscale import *
class MLP(chainer.Chain):
def __init__(self, n_units, n_out):
super(MLP, self).__init__()
with self.init_scope():
# the size of the inputs to each layer will be inferred
self.l1 = L.Linear(None, n_units) # n_in -> n_units
self.l2 = L.Linear(None, n_units) # n_units -> n_units
self.l3 = L.Linear(None, n_units) # n_units -> n_units
self.l4 = L.Linear(None, n_units) # n_units -> n_units
self.l5 = L.Linear(None, n_out) # n_units -> n_out
# set random seed as fix value, avoid different result every time
np.random.seed(100)
def __call__(self, x):
h1 = F.relu(self.l1(x)) #F.sigmoid(self.l1(x))
h2 = F.relu(self.l2(h1)) #F.sigmoid(self.l2(h1))
h3 = F.relu(self.l3(h2)) #F.sigmoid(self.l3(h2))
h4 = F.relu(self.l4(h3)) #F.sigmoid(self.l4(h3))
return self.l5(h4)
IN_CHANNELS =1 # input MONOCOLOR
OUT_CHANNELS= 100 # Middle layer channels
class CNN(chainer.Chain):
# INPUT -> ((CONV -> RELU) -> POOL) ->((CONV -> RELU) -> POOL) -> ->((CONV -> RELU) -> POOL) -> FC
def __init__(self, n_units, n_out, in_channels=IN_CHANNELS, out_channels=OUT_CHANNELS):
super(CNN, self).__init__()
with self.init_scope():
self.conv1=L.Convolution2D(in_channels, out_channels, (3,1) , pad=0)
self.conv2=L.Convolution2D(out_channels, out_channels, (3,1) , pad=0)
self.conv3=L.Convolution2D(out_channels, out_channels, (3,1) , pad=0)
self.l1=L.Linear( None, n_out)
# set random seed as fix value, avoid different result every time
np.random.seed(100)
def __call__(self, x):
h1 = F.relu(self.conv1(x))
h2 = F.max_pooling_2d( h1, (2,1) )
h3 = F.relu(self.conv2(h2))
h4 = F.max_pooling_2d( h3, (2,1) )
h5 = F.relu(self.conv3(h4))
h6 = F.max_pooling_2d( h5, (2,1) )
y = self.l1(h6)
return y
def main():
parser = argparse.ArgumentParser(description='estimation from formant to vocal tube model parameter')
parser.add_argument('--batchsize', '-b', type=int, default=100,
help='Number of images in each mini-batch')
parser.add_argument('--epoch', '-e', type=int, default=650,
help='Number of sweeps over the dataset to train')
parser.add_argument('--frequency', '-f', type=int, default=-1,
help='Frequency of taking a snapshot')
parser.add_argument('--gpu', '-g', type=int, default=-1,
help='GPU ID (negative value indicates CPU)')
parser.add_argument('--out', '-o', default='result',
help='Prefix Directory Name to output the result')
parser.add_argument('--resume', '-r', default='',
help='Resume the training from snapshot')
parser.add_argument('--unit', '-u', type=int, default=100,
help='Number of units')
parser.add_argument('--noplot', dest='plot', action='store_false',
help='Disable PlotReport extension')
parser.add_argument('--delta', '-d', type=float, default=0.5,
help='delta for length and area: for train')
parser.add_argument('--delta_for_test', '-t', type=float, default=1.5,
help='delta for length and area: for test')
parser.add_argument('--model_type', '-m', default='MLP',
help='choice MLP or CNN')
args = parser.parse_args()
print('GPU: {}'.format(args.gpu))
print('# unit: {}'.format(args.unit))
print('# Minibatch-size: {}'.format(args.batchsize))
print('# epoch: {}'.format(args.epoch))
# check model type
if args.model_type == 'CNN':
CNN_flag=True
print('# CNN')
else:
CNN_flag=False
print('# MLP')
# Load dataset
train = TM_DatsSet(args.delta, args.delta, CNN_flag)
test = TM_DatsSet(args.delta_for_test, args.delta_for_test, CNN_flag)
n_out= train.n_out
out_dir= args.out + train.suffix_list
print('# result directory: ', out_dir)
# Set up a neural network to train
# Classifier reports mean_absolute/squared_error loss and accuracy at everypha=
# iteration, which will be used by the PrintReport extension below.
# 損失の評価は独立事象ではないのでsoftmaxより距離の方が妥当かな
#
if CNN_flag:
model = L.Classifier(CNN(args.unit, n_out), lossfun=mean_absolute_error) #mean_squared_error)
else:
model = L.Classifier(MLP(args.unit, n_out), lossfun=mean_absolute_error) #mean_squared_error)
model.compute_accuracy= False # no need compute accuracy
if args.gpu >= 0:
# Make a specified GPU current
chainer.cuda.get_device_from_id(args.gpu).use()
model.to_gpu() # Copy the model to the GPU
# Setup an optimizer
optimizer = chainer.optimizers.Adam(alpha=0.001) #alpha=0.0001)
optimizer.setup(model)
train_iter = chainer.iterators.SerialIterator(train, args.batchsize)
test_iter = chainer.iterators.SerialIterator(test, args.batchsize,
repeat=False, shuffle=False)
# Set up a trainer
updater = training.StandardUpdater(train_iter, optimizer, device=args.gpu)
trainer = training.Trainer(updater, (args.epoch, 'epoch'), out=out_dir)
# Evaluate the model with the test dataset for each epoch
trainer.extend(extensions.Evaluator(test_iter, model, device=args.gpu))
# Dump a computational graph from 'loss' variable at the first iteration
# The "main" refers to the target link of the "main" optimizer.
trainer.extend(extensions.dump_graph('main/loss'))
# Take a snapshot for each specified epoch
frequency = args.epoch if args.frequency == -1 else max(1, args.frequency)
trainer.extend(extensions.snapshot(), trigger=(frequency, 'epoch'))
# Write a log of evaluation statistics for each epoch
trainer.extend(extensions.LogReport())
# Save two plot images to the result dir
if args.plot and extensions.PlotReport.available():
trainer.extend(
PlotReport2(['main/loss', 'validation/main/loss'],
'epoch', file_name='loss.png', LogScale=True))
# Print selected entries of the log to stdout
# Here "main" refers to the target link of the "main" optimizer again, and
# "validation" refers to the default name of the Evaluator extension.
# Entries other than 'epoch' are reported by the Classifier link, called by
# either the updater or the evaluator.
trainer.extend(extensions.PrintReport(
['epoch', 'main/loss', 'validation/main/loss', 'elapsed_time']))
# Print a progress bar to stdout
trainer.extend(extensions.ProgressBar())
if args.resume:
# Resume from a snapshot
chainer.serializers.load_npz(args.resume, trainer)
# Run the training
trainer.run()
# Accuracy rate
print('checking accuracy rate... ') # transfer one by one data will take long time. It needs to improvement.
c0=0
for loop in range(train.__len__()):
x1=train.get_example(loop)
if args.gpu >= 0: # gpu
x_batch = cuda.cupy.asarray([x1[0]])
y_gpu=model.predictor(x_batch)
y=cuda.to_cpu(y_gpu.data)
else: # cpu
x_batch = np.asarray([x1[0]])
y_cpu=model.predictor(x_batch)
y=y_cpu.data[0]
#print ('input ', x1[0] )
#print (' predicted ', y, '(', np.round(y) , ')', 'correct ', x1[1] )
# 正解率、周波数のindex[整数]を指すことを想定しているので、四捨五入して一致していればOKとした。
if np.round(y) == np.round(x1[1]):
c0+= 1
print ('Accuracy rate (index is equal, ratio[%]) ', (c0 * 100.0 / train.__len__() ) )
if __name__ == '__main__':
main()
| train.py | 9,050 | coding:utf-8 Chainer version 3.2 (use version 3.x) This is based on <https://raw.githubusercontent.com/chainer/chainer/v3/examples/mnist/train_mnist.py> This used mean_absolute_error as loss function. Check version Python 3.6.4 on win32 (Windows 10) Chainer 3.2.0 numpy 1.14.0 matplotlib 2.1.1 the size of the inputs to each layer will be inferred n_in -> n_units n_units -> n_units n_units -> n_units n_units -> n_units n_units -> n_out set random seed as fix value, avoid different result every timeF.sigmoid(self.l1(x))F.sigmoid(self.l2(h1))F.sigmoid(self.l3(h2))F.sigmoid(self.l4(h3)) input MONOCOLOR Middle layer channels INPUT -> ((CONV -> RELU) -> POOL) ->((CONV -> RELU) -> POOL) -> ->((CONV -> RELU) -> POOL) -> FC set random seed as fix value, avoid different result every time check model type Load dataset Set up a neural network to train Classifier reports mean_absolute/squared_error loss and accuracy at everypha= iteration, which will be used by the PrintReport extension below. 損失の評価は独立事象ではないのでsoftmaxより距離の方が妥当かなmean_squared_error)mean_squared_error) no need compute accuracy Make a specified GPU current Copy the model to the GPU Setup an optimizeralpha=0.0001) Set up a trainer Evaluate the model with the test dataset for each epoch Dump a computational graph from 'loss' variable at the first iteration The "main" refers to the target link of the "main" optimizer. Take a snapshot for each specified epoch Write a log of evaluation statistics for each epoch Save two plot images to the result dir Print selected entries of the log to stdout Here "main" refers to the target link of the "main" optimizer again, and "validation" refers to the default name of the Evaluator extension. Entries other than 'epoch' are reported by the Classifier link, called by either the updater or the evaluator. Print a progress bar to stdout Resume from a snapshot Run the training Accuracy rate transfer one by one data will take long time. It needs to improvement. gpu cpuprint ('input ', x1[0] )print (' predicted ', y, '(', np.round(y) , ')', 'correct ', x1[1] ) 正解率、周波数のindex[整数]を指すことを想定しているので、四捨五入して一致していればOKとした。 | 2,133 | en | 0.604918 |
"""
Forward Chaining, K-Fold and Group K-Fold algorithms to split a given training dataset into train (X, y) and validation (Xcv, ycv) sets
"""
import numpy as np
def split_train_val_forwardChaining(sequence, numInputs, numOutputs, numJumps):
""" Returns sets to train and cross-validate a model using forward chaining technique
Parameters:
sequence (array) : Full training dataset
numInputs (int) : Number of inputs X and Xcv used at each training and validation
numOutputs (int) : Number of outputs y and ycv used at each training and validation
numJumps (int) : Number of sequence samples to be ignored between (X,y) sets
Returns:
X (2D array) : Array of numInputs arrays used for training
y (2D array) : Array of numOutputs arrays used for training
Xcv (2D array) : Array of numInputs arrays used for cross-validation
ycv (2D array) : Array of numOutputs arrays used for cross-validation
"""
X, y, Xcv, ycv = dict(), dict(), dict(), dict()
j=2; # Tracks index of CV set at each train/val split
# Iterate through all train/val splits
while 1:
start_ix=0; end_ix=0; startCv_ix=0; endCv_ix=0;
X_it, y_it, Xcv_it, ycv_it = list(), list(), list(), list()
i=0; # Index of individual training set at each train/val split
# Iterate until index of individual training set is smaller than index of cv set
while (i < j):
## TRAINING DATA
start_ix = numJumps*i;
end_ix = start_ix + numInputs;
seq_x = sequence[start_ix:end_ix]
X_it.append(seq_x)
seq_y = sequence[end_ix:end_ix+numOutputs]
y_it.append(seq_y)
i+=1;
# Once val data crosses time series length return
if (((end_ix+numInputs)+numOutputs) > len(sequence)):
break
## CROSS-VALIDATION DATA
startCv_ix = end_ix;
endCv_ix = end_ix + numInputs;
seq_xcv = sequence[startCv_ix:endCv_ix]
Xcv_it.append(seq_xcv)
seq_ycv = sequence[endCv_ix:endCv_ix+numOutputs]
ycv_it.append(seq_ycv)
## Add another train/val split
X[j-2] = np.array(X_it)
y[j-2] = np.array(y_it)
Xcv[j-2] = np.array(Xcv_it)
ycv[j-2] = np.array(ycv_it)
j+=1;
if (len(X)==0 or len(Xcv)==0):
print("The sequence provided does not has size enough to populate the return arrays")
return X, y, Xcv, ycv
def split_train_val_kFold(sequence, numInputs, numOutputs, numJumps):
""" Returns sets to train and cross-validate a model using K-Fold technique
Parameters:
sequence (array) : Full training dataset
numInputs (int) : Number of inputs X and Xcv used at each training
numOutputs (int) : Number of outputs y and ycv used at each training
numJumps (int) : Number of sequence samples to be ignored between (X,y) sets
Returns:
X (2D array) : Array of numInputs arrays used for training
y (2D array) : Array of numOutputs arrays used for training
Xcv (2D array) : Array of numInputs arrays used for cross-validation
ycv (2D array) : Array of numOutputs arrays used for cross-validation
"""
X, y, Xcv, ycv = dict(), dict(), dict(), dict()
j=2; # Tracks index of CV set at each train/val split
theEnd = 0; # Flag to terminate function
# Iterate until val set falls outside time series length
while 1:
start_ix=0; end_ix=0; startCv_ix=0; endCv_ix=0;
X_it, y_it, Xcv_it, ycv_it = list(), list(), list(), list()
i=0; # Index of individual training set at each train/val split
n=0; # Number of numJumps
# Iterate through all train/val splits
while 1:
if (i != j):
## TRAINING DATA
start_ix = endCv_ix + numJumps*n;
end_ix = start_ix + numInputs;
n +=1;
# Leave train/val split loop once training data crosses time series length
if end_ix+numOutputs > len(sequence):
break;
seq_x = sequence[start_ix:end_ix]
X_it.append(seq_x)
seq_y = sequence[end_ix:end_ix+numOutputs]
y_it.append(seq_y)
else:
## CROSS-VALIDATION DATA
startCv_ix = end_ix;
endCv_ix = end_ix + numInputs;
n = 0;
# Once val data crosses time series length exit tran/val split loop and return
if endCv_ix+numOutputs > len(sequence):
theEnd = 1;
break;
seq_xcv = sequence[startCv_ix:endCv_ix]
Xcv_it.append(seq_xcv)
seq_ycv = sequence[endCv_ix:endCv_ix+numOutputs]
ycv_it.append(seq_ycv)
i+=1;
# Only add a train/val split if the time series length has not been crossed
if (theEnd == 1):
break
## Add another train/val split
X[j-2] = np.array(X_it)
y[j-2] = np.array(y_it)
Xcv[j-2] = np.array(Xcv_it)
ycv[j-2] = np.array(ycv_it)
j+=1;
if (len(X)==0 or len(Xcv)==0):
print("The sequence provided does not has size enough to populate the return arrays")
return X, y, Xcv, ycv
def split_train_val_groupKFold(sequence, numInputs, numOutputs, numJumps):
""" Returns sets to train and cross-validate a model using group K-Fold technique
Parameters:
sequence (array) : Full training dataset
numInputs (int) : Number of inputs X and Xcv used at each training
numOutputs (int) : Number of outputs y and ycv used at each training
numJumps (int) : Number of sequence samples to be ignored between (X,y) sets
Returns:
X (2D array) : Array of numInputs arrays used for training
y (2D array) : Array of numOutputs arrays used for training
Xcv (2D array) : Array of numInputs arrays used for cross-validation
ycv (2D array) : Array of numOutputs arrays used for cross-validation
"""
X, y, Xcv, ycv = dict(), dict(), dict(), dict()
# Iterate through 5 train/val splits
for j in np.arange(5):
start_ix=0; end_ix=0; startCv_ix=0; endCv_ix=0;
X_it, y_it, Xcv_it, ycv_it = list(), list(), list(), list()
i=0; # Index of individual training set at each train/val split
n=0; # Number of numJumps
while 1:
if ((i+1+j)%(5) != 0):
# TRAINING DATA
start_ix = endCv_ix + numJumps*n;
end_ix = start_ix + numInputs;
n+=1;
# Leave train/val split loop once training data crosses time series length
if end_ix+numOutputs > len(sequence)-1:
break
seq_x = sequence[start_ix:end_ix]
X_it.append(seq_x)
seq_y = sequence[end_ix:end_ix+numOutputs]
y_it.append(seq_y)
else:
# CROSS-VALIDATION DATA
startCv_ix = end_ix;
endCv_ix = end_ix + numInputs;
n=0;
# Once val data crosses time series length return
if ((endCv_ix+numOutputs) > len(sequence)):
break
seq_xcv = sequence[startCv_ix:endCv_ix]
Xcv_it.append(seq_xcv)
seq_ycv = sequence[endCv_ix:endCv_ix+numOutputs]
ycv_it.append(seq_ycv)
i+=1;
## Add another train/val split
X[j] = np.array(X_it)
y[j] = np.array(y_it)
Xcv[j] = np.array(Xcv_it)
ycv[j] = np.array(ycv_it)
if (len(X)==0 or len(Xcv)==0):
print("The sequence provided does not has size enough to populate the return arrays")
return X, y, Xcv, ycv
| tsxv/splitTrainVal.py | 8,290 | Returns sets to train and cross-validate a model using forward chaining technique
Parameters:
sequence (array) : Full training dataset
numInputs (int) : Number of inputs X and Xcv used at each training and validation
numOutputs (int) : Number of outputs y and ycv used at each training and validation
numJumps (int) : Number of sequence samples to be ignored between (X,y) sets
Returns:
X (2D array) : Array of numInputs arrays used for training
y (2D array) : Array of numOutputs arrays used for training
Xcv (2D array) : Array of numInputs arrays used for cross-validation
ycv (2D array) : Array of numOutputs arrays used for cross-validation
Returns sets to train and cross-validate a model using group K-Fold technique
Parameters:
sequence (array) : Full training dataset
numInputs (int) : Number of inputs X and Xcv used at each training
numOutputs (int) : Number of outputs y and ycv used at each training
numJumps (int) : Number of sequence samples to be ignored between (X,y) sets
Returns:
X (2D array) : Array of numInputs arrays used for training
y (2D array) : Array of numOutputs arrays used for training
Xcv (2D array) : Array of numInputs arrays used for cross-validation
ycv (2D array) : Array of numOutputs arrays used for cross-validation
Returns sets to train and cross-validate a model using K-Fold technique
Parameters:
sequence (array) : Full training dataset
numInputs (int) : Number of inputs X and Xcv used at each training
numOutputs (int) : Number of outputs y and ycv used at each training
numJumps (int) : Number of sequence samples to be ignored between (X,y) sets
Returns:
X (2D array) : Array of numInputs arrays used for training
y (2D array) : Array of numOutputs arrays used for training
Xcv (2D array) : Array of numInputs arrays used for cross-validation
ycv (2D array) : Array of numOutputs arrays used for cross-validation
Forward Chaining, K-Fold and Group K-Fold algorithms to split a given training dataset into train (X, y) and validation (Xcv, ycv) sets
Tracks index of CV set at each train/val split Iterate through all train/val splits Index of individual training set at each train/val split Iterate until index of individual training set is smaller than index of cv set TRAINING DATA Once val data crosses time series length return CROSS-VALIDATION DATA Add another train/val split Tracks index of CV set at each train/val split Flag to terminate function Iterate until val set falls outside time series length Index of individual training set at each train/val split Number of numJumps Iterate through all train/val splits TRAINING DATA Leave train/val split loop once training data crosses time series length CROSS-VALIDATION DATA Once val data crosses time series length exit tran/val split loop and return Only add a train/val split if the time series length has not been crossed Add another train/val split Iterate through 5 train/val splits Index of individual training set at each train/val split Number of numJumps TRAINING DATA Leave train/val split loop once training data crosses time series length CROSS-VALIDATION DATA Once val data crosses time series length return Add another train/val split | 3,321 | en | 0.798266 |
# Lint as: python3
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implementation of SpineNet model.
X. Du, T-Y. Lin, P. Jin, G. Ghiasi, M. Tan, Y. Cui, Q. V. Le, X. Song
SpineNet: Learning Scale-Permuted Backbone for Recognition and Localization
https://arxiv.org/abs/1912.05027
"""
import math
# Import libraries
from absl import logging
import tensorflow as tf
from official.modeling import tf_utils
from official.vision.beta.modeling.layers import nn_blocks
from official.vision.beta.ops import spatial_transform_ops
layers = tf.keras.layers
FILTER_SIZE_MAP = {
1: 32,
2: 64,
3: 128,
4: 256,
5: 256,
6: 256,
7: 256,
}
# The fixed SpineNet architecture discovered by NAS.
# Each element represents a specification of a building block:
# (block_level, block_fn, (input_offset0, input_offset1), is_output).
SPINENET_BLOCK_SPECS = [
(2, 'bottleneck', (0, 1), False),
(4, 'residual', (0, 1), False),
(3, 'bottleneck', (2, 3), False),
(4, 'bottleneck', (2, 4), False),
(6, 'residual', (3, 5), False),
(4, 'bottleneck', (3, 5), False),
(5, 'residual', (6, 7), False),
(7, 'residual', (6, 8), False),
(5, 'bottleneck', (8, 9), False),
(5, 'bottleneck', (8, 10), False),
(4, 'bottleneck', (5, 10), True),
(3, 'bottleneck', (4, 10), True),
(5, 'bottleneck', (7, 12), True),
(7, 'bottleneck', (5, 14), True),
(6, 'bottleneck', (12, 14), True),
]
SCALING_MAP = {
'49S': {
'endpoints_num_filters': 128,
'filter_size_scale': 0.65,
'resample_alpha': 0.5,
'block_repeats': 1,
},
'49': {
'endpoints_num_filters': 256,
'filter_size_scale': 1.0,
'resample_alpha': 0.5,
'block_repeats': 1,
},
'96': {
'endpoints_num_filters': 256,
'filter_size_scale': 1.0,
'resample_alpha': 0.5,
'block_repeats': 2,
},
'143': {
'endpoints_num_filters': 256,
'filter_size_scale': 1.0,
'resample_alpha': 1.0,
'block_repeats': 3,
},
'190': {
'endpoints_num_filters': 512,
'filter_size_scale': 1.3,
'resample_alpha': 1.0,
'block_repeats': 4,
},
}
class BlockSpec(object):
"""A container class that specifies the block configuration for SpineNet."""
def __init__(self, level, block_fn, input_offsets, is_output):
self.level = level
self.block_fn = block_fn
self.input_offsets = input_offsets
self.is_output = is_output
def build_block_specs(block_specs=None):
"""Builds the list of BlockSpec objects for SpineNet."""
if not block_specs:
block_specs = SPINENET_BLOCK_SPECS
logging.info('Building SpineNet block specs: %s', block_specs)
return [BlockSpec(*b) for b in block_specs]
@tf.keras.utils.register_keras_serializable(package='Vision')
class SpineNet(tf.keras.Model):
"""Class to build SpineNet models."""
def __init__(self,
input_specs=tf.keras.layers.InputSpec(shape=[None, 640, 640, 3]),
min_level=3,
max_level=7,
block_specs=build_block_specs(),
endpoints_num_filters=256,
resample_alpha=0.5,
block_repeats=1,
filter_size_scale=1.0,
kernel_initializer='VarianceScaling',
kernel_regularizer=None,
bias_regularizer=None,
activation='relu',
use_sync_bn=False,
norm_momentum=0.99,
norm_epsilon=0.001,
**kwargs):
"""SpineNet model."""
self._input_specs = input_specs
self._min_level = min_level
self._max_level = max_level
self._block_specs = block_specs
self._endpoints_num_filters = endpoints_num_filters
self._resample_alpha = resample_alpha
self._block_repeats = block_repeats
self._filter_size_scale = filter_size_scale
self._kernel_initializer = kernel_initializer
self._kernel_regularizer = kernel_regularizer
self._bias_regularizer = bias_regularizer
self._activation = activation
self._use_sync_bn = use_sync_bn
self._norm_momentum = norm_momentum
self._norm_epsilon = norm_epsilon
if activation == 'relu':
self._activation_fn = tf.nn.relu
elif activation == 'swish':
self._activation_fn = tf.nn.swish
else:
raise ValueError('Activation {} not implemented.'.format(activation))
self._init_block_fn = 'bottleneck'
self._num_init_blocks = 2
if use_sync_bn:
self._norm = layers.experimental.SyncBatchNormalization
else:
self._norm = layers.BatchNormalization
if tf.keras.backend.image_data_format() == 'channels_last':
self._bn_axis = -1
else:
self._bn_axis = 1
# Build SpineNet.
inputs = tf.keras.Input(shape=input_specs.shape[1:])
net = self._build_stem(inputs=inputs)
net = self._build_scale_permuted_network(
net=net, input_width=input_specs.shape[1])
endpoints = self._build_endpoints(net=net)
self._output_specs = {l: endpoints[l].get_shape() for l in endpoints}
super(SpineNet, self).__init__(inputs=inputs, outputs=endpoints)
def _block_group(self,
inputs,
filters,
strides,
block_fn_cand,
block_repeats=1,
name='block_group'):
"""Creates one group of blocks for the SpineNet model."""
block_fn_candidates = {
'bottleneck': nn_blocks.BottleneckBlock,
'residual': nn_blocks.ResidualBlock,
}
block_fn = block_fn_candidates[block_fn_cand]
_, _, _, num_filters = inputs.get_shape().as_list()
if block_fn_cand == 'bottleneck':
use_projection = not (num_filters == (filters * 4) and strides == 1)
else:
use_projection = not (num_filters == filters and strides == 1)
x = block_fn(
filters=filters,
strides=strides,
use_projection=use_projection,
kernel_initializer=self._kernel_initializer,
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer,
activation=self._activation,
use_sync_bn=self._use_sync_bn,
norm_momentum=self._norm_momentum,
norm_epsilon=self._norm_epsilon)(
inputs)
for _ in range(1, block_repeats):
x = block_fn(
filters=filters,
strides=1,
use_projection=False,
kernel_initializer=self._kernel_initializer,
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer,
activation=self._activation,
use_sync_bn=self._use_sync_bn,
norm_momentum=self._norm_momentum,
norm_epsilon=self._norm_epsilon)(
x)
return tf.identity(x, name=name)
def _build_stem(self, inputs):
"""Build SpineNet stem."""
x = layers.Conv2D(
filters=64,
kernel_size=7,
strides=2,
use_bias=False,
padding='same',
kernel_initializer=self._kernel_initializer,
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer)(
inputs)
x = self._norm(
axis=self._bn_axis,
momentum=self._norm_momentum,
epsilon=self._norm_epsilon)(
x)
x = tf_utils.get_activation(self._activation_fn)(x)
x = layers.MaxPool2D(pool_size=3, strides=2, padding='same')(x)
net = []
# Build the initial level 2 blocks.
for i in range(self._num_init_blocks):
x = self._block_group(
inputs=x,
filters=int(FILTER_SIZE_MAP[2] * self._filter_size_scale),
strides=1,
block_fn_cand=self._init_block_fn,
block_repeats=self._block_repeats,
name='stem_block_{}'.format(i + 1))
net.append(x)
return net
def _build_scale_permuted_network(self,
net,
input_width,
weighted_fusion=False):
"""Build scale-permuted network."""
net_sizes = [int(math.ceil(input_width / 2**2))] * len(net)
net_block_fns = [self._init_block_fn] * len(net)
num_outgoing_connections = [0] * len(net)
endpoints = {}
for i, block_spec in enumerate(self._block_specs):
# Find out specs for the target block.
target_width = int(math.ceil(input_width / 2**block_spec.level))
target_num_filters = int(FILTER_SIZE_MAP[block_spec.level] *
self._filter_size_scale)
target_block_fn = block_spec.block_fn
# Resample then merge input0 and input1.
parents = []
input0 = block_spec.input_offsets[0]
input1 = block_spec.input_offsets[1]
x0 = self._resample_with_alpha(
inputs=net[input0],
input_width=net_sizes[input0],
input_block_fn=net_block_fns[input0],
target_width=target_width,
target_num_filters=target_num_filters,
target_block_fn=target_block_fn,
alpha=self._resample_alpha)
parents.append(x0)
num_outgoing_connections[input0] += 1
x1 = self._resample_with_alpha(
inputs=net[input1],
input_width=net_sizes[input1],
input_block_fn=net_block_fns[input1],
target_width=target_width,
target_num_filters=target_num_filters,
target_block_fn=target_block_fn,
alpha=self._resample_alpha)
parents.append(x1)
num_outgoing_connections[input1] += 1
# Merge 0 outdegree blocks to the output block.
if block_spec.is_output:
for j, (j_feat,
j_connections) in enumerate(zip(net, num_outgoing_connections)):
if j_connections == 0 and (j_feat.shape[2] == target_width and
j_feat.shape[3] == x0.shape[3]):
parents.append(j_feat)
num_outgoing_connections[j] += 1
# pylint: disable=g-direct-tensorflow-import
if weighted_fusion:
dtype = parents[0].dtype
parent_weights = [
tf.nn.relu(tf.cast(tf.Variable(1.0, name='block{}_fusion{}'.format(
i, j)), dtype=dtype)) for j in range(len(parents))]
weights_sum = tf.add_n(parent_weights)
parents = [
parents[i] * parent_weights[i] / (weights_sum + 0.0001)
for i in range(len(parents))
]
# Fuse all parent nodes then build a new block.
x = tf_utils.get_activation(self._activation_fn)(tf.add_n(parents))
x = self._block_group(
inputs=x,
filters=target_num_filters,
strides=1,
block_fn_cand=target_block_fn,
block_repeats=self._block_repeats,
name='scale_permuted_block_{}'.format(i + 1))
net.append(x)
net_sizes.append(target_width)
net_block_fns.append(target_block_fn)
num_outgoing_connections.append(0)
# Save output feats.
if block_spec.is_output:
if block_spec.level in endpoints:
raise ValueError('Duplicate feats found for output level {}.'.format(
block_spec.level))
if (block_spec.level < self._min_level or
block_spec.level > self._max_level):
raise ValueError('Output level is out of range [{}, {}]'.format(
self._min_level, self._max_level))
endpoints[str(block_spec.level)] = x
return endpoints
def _build_endpoints(self, net):
"""Match filter size for endpoints before sharing conv layers."""
endpoints = {}
for level in range(self._min_level, self._max_level + 1):
x = layers.Conv2D(
filters=self._endpoints_num_filters,
kernel_size=1,
strides=1,
use_bias=False,
kernel_initializer=self._kernel_initializer,
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer)(
net[str(level)])
x = self._norm(
axis=self._bn_axis,
momentum=self._norm_momentum,
epsilon=self._norm_epsilon)(
x)
x = tf_utils.get_activation(self._activation_fn)(x)
endpoints[str(level)] = x
return endpoints
def _resample_with_alpha(self,
inputs,
input_width,
input_block_fn,
target_width,
target_num_filters,
target_block_fn,
alpha=0.5):
"""Match resolution and feature dimension."""
_, _, _, input_num_filters = inputs.get_shape().as_list()
if input_block_fn == 'bottleneck':
input_num_filters /= 4
new_num_filters = int(input_num_filters * alpha)
x = layers.Conv2D(
filters=new_num_filters,
kernel_size=1,
strides=1,
use_bias=False,
kernel_initializer=self._kernel_initializer,
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer)(
inputs)
x = self._norm(
axis=self._bn_axis,
momentum=self._norm_momentum,
epsilon=self._norm_epsilon)(
x)
x = tf_utils.get_activation(self._activation_fn)(x)
# Spatial resampling.
if input_width > target_width:
x = layers.Conv2D(
filters=new_num_filters,
kernel_size=3,
strides=2,
padding='SAME',
use_bias=False,
kernel_initializer=self._kernel_initializer,
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer)(
x)
x = self._norm(
axis=self._bn_axis,
momentum=self._norm_momentum,
epsilon=self._norm_epsilon)(
x)
x = tf_utils.get_activation(self._activation_fn)(x)
input_width /= 2
while input_width > target_width:
x = layers.MaxPool2D(pool_size=3, strides=2, padding='SAME')(x)
input_width /= 2
elif input_width < target_width:
scale = target_width // input_width
x = spatial_transform_ops.nearest_upsampling(x, scale=scale)
# Last 1x1 conv to match filter size.
if target_block_fn == 'bottleneck':
target_num_filters *= 4
x = layers.Conv2D(
filters=target_num_filters,
kernel_size=1,
strides=1,
use_bias=False,
kernel_initializer=self._kernel_initializer,
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer)(
x)
x = self._norm(
axis=self._bn_axis,
momentum=self._norm_momentum,
epsilon=self._norm_epsilon)(
x)
return x
def get_config(self):
config_dict = {
'min_level': self._min_level,
'max_level': self._max_level,
'endpoints_num_filters': self._endpoints_num_filters,
'resample_alpha': self._resample_alpha,
'block_repeats': self._block_repeats,
'filter_size_scale': self._filter_size_scale,
'kernel_initializer': self._kernel_initializer,
'kernel_regularizer': self._kernel_regularizer,
'bias_regularizer': self._bias_regularizer,
'activation': self._activation,
'use_sync_bn': self._use_sync_bn,
'norm_momentum': self._norm_momentum,
'norm_epsilon': self._norm_epsilon
}
return config_dict
@classmethod
def from_config(cls, config, custom_objects=None):
return cls(**config)
@property
def output_specs(self):
"""A dict of {level: TensorShape} pairs for the model output."""
return self._output_specs
| official/vision/beta/modeling/backbones/spinenet.py | 16,368 | A container class that specifies the block configuration for SpineNet.
Class to build SpineNet models.
SpineNet model.
Creates one group of blocks for the SpineNet model.
Match filter size for endpoints before sharing conv layers.
Build scale-permuted network.
Build SpineNet stem.
Match resolution and feature dimension.
Builds the list of BlockSpec objects for SpineNet.
A dict of {level: TensorShape} pairs for the model output.
Implementation of SpineNet model.
X. Du, T-Y. Lin, P. Jin, G. Ghiasi, M. Tan, Y. Cui, Q. V. Le, X. Song
SpineNet: Learning Scale-Permuted Backbone for Recognition and Localization
https://arxiv.org/abs/1912.05027
Lint as: python3 Copyright 2020 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ============================================================================== Import libraries The fixed SpineNet architecture discovered by NAS. Each element represents a specification of a building block: (block_level, block_fn, (input_offset0, input_offset1), is_output). Build SpineNet. Build the initial level 2 blocks. Find out specs for the target block. Resample then merge input0 and input1. Merge 0 outdegree blocks to the output block. pylint: disable=g-direct-tensorflow-import Fuse all parent nodes then build a new block. Save output feats. Spatial resampling. Last 1x1 conv to match filter size. | 1,860 | en | 0.765683 |
"""This module implements row model of Amazon.co.jp CSV."""
from dataclasses import dataclass
from datetime import datetime
from typing import ClassVar, Optional
from zaimcsvconverter import CONFIG
from zaimcsvconverter.file_csv_convert import FileCsvConvert
from zaimcsvconverter.inputcsvformats import InputItemRow, InputItemRowData, InputRowFactory
from zaimcsvconverter.models import FileCsvConvertId, Store, StoreRowData
@dataclass
class Amazon201911RowData(InputItemRowData):
"""This class implements data class for wrapping list of Amazon.co.jp CSV row model."""
# Reason: This implement depends on design of CSV. pylint: disable=too-many-instance-attributes
ITEM_NAME_ENTIRE_ORDER: ClassVar[str] = "(注文全体)"
ITEM_NAME_BILLING_TO_CREDIT_CARD: ClassVar[str] = "(クレジットカードへの請求)"
ITEM_NAME_SHIPPING_HANDLING: ClassVar[str] = "(配送料・手数料)"
_ordered_date: str
order_id: str
_item_name: str
note: str
_price: str
_number: str
_subtotal_price_item: str
_total_order: str
destination: str
status: str
billing_address: str
billing_amount: str
credit_card_billing_date: str
credit_card_billing_amount: str
credit_card_identity: str
url_order_summary: str
url_receipt: str
url_item: str
@property
def date(self) -> datetime:
return datetime.strptime(self._ordered_date, "%Y/%m/%d")
@property
def item_name(self) -> str:
return self._item_name
@property
def price(self) -> Optional[int]:
return None if self._price == "" else int(self._price)
@property
def number(self) -> Optional[int]:
return None if self._number == "" else int(self._number)
@property
def total_order(self) -> Optional[int]:
return None if self._total_order == "" else int(self._total_order)
@property
def subtotal_price_item(self) -> Optional[int]:
return None if self._subtotal_price_item == "" else int(self._subtotal_price_item)
@property
def validate(self) -> bool:
self.stock_error(lambda: self.date, f"Invalid ordered date. Ordered date = {self._ordered_date}")
self.stock_error(lambda: self.price, f"Invalid price. Price = {self._price}")
self.stock_error(lambda: self.number, f"Invalid number. Number = {self._number}")
self.stock_error(lambda: self.total_order, f"Invalid total order. Total order = {self._total_order}")
return super().validate
@property
def is_entire_order(self) -> bool:
return (
self._item_name == self.ITEM_NAME_ENTIRE_ORDER
and self.price is None
and self.number is None
and self.subtotal_price_item is None
and self.total_order is not None
and self.total_order > 0
and self.credit_card_billing_date == ""
and self.credit_card_billing_amount == ""
)
@property
def is_billing_to_credit_card(self) -> bool:
return (
self._item_name == self.ITEM_NAME_BILLING_TO_CREDIT_CARD
and self.price is None
and self.number is None
and self.subtotal_price_item is None
and self.total_order is None
and self.credit_card_billing_date != ""
and self.credit_card_billing_amount != ""
)
@property
def is_shipping_handling(self) -> bool:
return (
self._item_name == self.ITEM_NAME_SHIPPING_HANDLING
and self.price is None
and self.number is None
and self.subtotal_price_item is not None
and self.total_order is None
and self.credit_card_billing_date == ""
and self.credit_card_billing_amount == ""
)
@property
def is_discount(self) -> bool:
# Includes Amazon point
return (
not self.is_entire_order
and not self.is_billing_to_credit_card
and not self.is_shipping_handling
and self.total_order is not None
and self.total_order < 0
)
@property
def is_payment(self) -> bool:
return (
not self.is_entire_order
and not self.is_billing_to_credit_card
and not self.is_shipping_handling
and not self.is_discount
and self.price is not None
and self.price > 0
and self.number is not None
and self.number > 0
)
@property
def is_free_kindle(self) -> bool:
return (
self.price == 0
and self.total_order == 0
and self.subtotal_price_item == 0
and self.destination == ""
and self.status.startswith("デジタル注文:")
and self.billing_amount == "0"
and self.credit_card_billing_date == ""
and self.credit_card_billing_amount == ""
and self.credit_card_identity == ""
)
class Amazon201911Row(InputItemRow):
"""This class implements row model of Amazon.co.jp CSV."""
def __init__(self, row_data: Amazon201911RowData):
super().__init__(FileCsvConvert.AMAZON.value, row_data)
self._store: Store = Store(FileCsvConvertId.AMAZON, StoreRowData("Amazon.co.jp", CONFIG.amazon.store_name_zaim))
@property
def store(self) -> Store:
return self._store
class Amazon201911RowToSkip(Amazon201911Row):
@property
def is_row_to_skip(self) -> bool:
return True
class Amazon201911DiscountRow(Amazon201911Row):
"""This class implements row model of Amazon.co.jp CSV."""
def __init__(self, row_data: Amazon201911RowData):
super().__init__(row_data)
self._total_order: Optional[int] = row_data.total_order
@property
def total_order(self) -> int:
if self._total_order is None:
raise ValueError("Total order on discount row is not allowed empty.")
return self._total_order
@property
def validate(self) -> bool:
self.stock_error(
lambda: self.total_order, f"Total order in discount row is required. Total order = {self._total_order}"
)
return super().validate
class Amazon201911ShippingHandlingRow(Amazon201911Row):
"""Row model of shipping / handling of Amazon.co.jp CSV."""
def __init__(self, row_data):
super().__init__(row_data)
self._subtotal_price_item: Optional[int] = row_data.subtotal_price_item
@property
def subtotal_price_item(self):
if self._subtotal_price_item is None:
raise ValueError("Subtotal price item on shipping handling row is not allowed empty.")
return self._subtotal_price_item
@property
def validate(self) -> bool:
self.stock_error(
lambda: self.subtotal_price_item,
"Subtotal price item in Shipping handling row is required. "
f"Subtotal price item = {self.subtotal_price_item}",
)
return super().validate
class Amazon201911PaymentRow(Amazon201911Row):
"""This class implements row model of Amazon.co.jp CSV."""
def __init__(self, row_data: Amazon201911RowData):
super().__init__(row_data)
self._price: Optional[int] = row_data.price
self._number: Optional[int] = row_data.number
@property
def price(self) -> int:
if self._price is None:
raise ValueError("Price on payment row is not allowed empty.")
return self._price
@property
def number(self) -> int:
if self._number is None:
raise ValueError("Number on payment row is not allowed empty.")
return self._number
@property
def validate(self) -> bool:
self.stock_error(lambda: self.price, f"Price in payment row is required. Price = {self._price}")
self.stock_error(lambda: self.number, f"Number in payment row is required. Number = {self._number}")
return super().validate
class Amazon201911RowFactory(InputRowFactory[Amazon201911RowData, Amazon201911Row]):
"""This class implements factory to create Amazon.co.jp CSV row instance."""
def create(self, input_row_data: Amazon201911RowData) -> Amazon201911Row:
# @see https://github.com/furyutei/amzOrderHistoryFilter/issues/3#issuecomment-543645937
if input_row_data.is_billing_to_credit_card or input_row_data.is_free_kindle:
return Amazon201911RowToSkip(input_row_data)
if input_row_data.is_discount:
return Amazon201911DiscountRow(input_row_data)
if input_row_data.is_shipping_handling:
return Amazon201911ShippingHandlingRow(input_row_data)
if input_row_data.is_payment:
return Amazon201911PaymentRow(input_row_data)
raise ValueError(
'Cash flow kind is not supported. "'
f'Order date = {input_row_data.date}, "'
f'"item name = {input_row_data.item_name}'
) # pragma: no cover
# Reason: This line is insurance for future development so process must be not able to reach
| zaimcsvconverter/inputcsvformats/amazon_201911.py | 9,135 | This class implements row model of Amazon.co.jp CSV.
This class implements row model of Amazon.co.jp CSV.
This class implements row model of Amazon.co.jp CSV.
This class implements data class for wrapping list of Amazon.co.jp CSV row model.
This class implements factory to create Amazon.co.jp CSV row instance.
Row model of shipping / handling of Amazon.co.jp CSV.
This module implements row model of Amazon.co.jp CSV.
Reason: This implement depends on design of CSV. pylint: disable=too-many-instance-attributes Includes Amazon point @see https://github.com/furyutei/amzOrderHistoryFilter/issues/3issuecomment-543645937 pragma: no cover Reason: This line is insurance for future development so process must be not able to reach | 731 | en | 0.630207 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# flake8: noqa
#
# AppDaemon documentation build configuration file, created by
# sphinx-quickstart on Fri Aug 11 14:36:18 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
autodoc_mock_imports = ["iso8601", "dateutil"]
sys.path.insert(0, os.path.abspath(".."))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ["sphinx.ext.autodoc", "sphinx.ext.napoleon"]
autodoc_member_order = "bysource"
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = ".rst"
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master doctree document.
master_doc = "index"
# General information about the project.
project = "AppDaemon"
copyright = "2021, Andrew Cockburn"
author = "Andrew Cockburn"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = "4.0.7"
# The full version, including alpha/beta/rc tags.
release = "4.0.7"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ["_build"]
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "sphinx_rtd_theme"
# html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr'
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = "AppDaemondoc"
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, "AppDaemon.tex", "AppDaemon Documentation", "Andrew Cockburn", "manual",),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [(master_doc, "appdaemon", "AppDaemon Documentation", [author], 1)]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc,
"AppDaemon",
"AppDaemon Documentation",
author,
"AppDaemon",
"Sandboxed python Apps for automation",
"Miscellaneous",
),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False
# omit class name during the generation of the doc
add_module_names = False
| docs/conf.py | 9,549 | !/usr/bin/env python3 -*- coding: utf-8 -*- flake8: noqa AppDaemon documentation build configuration file, created by sphinx-quickstart on Fri Aug 11 14:36:18 2017. This file is execfile()d with the current directory set to its containing dir. Note that not all possible configuration values are present in this autogenerated file. All configuration values have a default; values that are commented out serve to show the default. If extensions (or modules to document with autodoc) are in another directory, add these directories to sys.path here. If the directory is relative to the documentation root, use os.path.abspath to make it absolute, like shown here. sys.path.insert(0, os.path.abspath('.')) -- General configuration ------------------------------------------------ If your documentation needs a minimal Sphinx version, state it here. needs_sphinx = '1.0' Add any Sphinx extension module names here, as strings. They can be extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. Add any paths that contain templates here, relative to this directory. The suffix(es) of source filenames. You can specify multiple suffix as a list of string: source_suffix = ['.rst', '.md'] The encoding of source files. source_encoding = 'utf-8-sig' The master doctree document. General information about the project. The version info for the project you're documenting, acts as replacement for |version| and |release|, also used in various other places throughout the built documents. The short X.Y version. The full version, including alpha/beta/rc tags. The language for content autogenerated by Sphinx. Refer to documentation for a list of supported languages. This is also used if you do content translation via gettext catalogs. Usually you set "language" from the command line for these cases. There are two options for replacing |today|: either, you set today to some non-false value, then it is used: today = '' Else, today_fmt is used as the format for a strftime call. today_fmt = '%B %d, %Y' List of patterns, relative to source directory, that match files and directories to ignore when looking for source files. The reST default role (used for this markup: `text`) to use for all documents. default_role = None If true, '()' will be appended to :func: etc. cross-reference text. add_function_parentheses = True If true, the current module name will be prepended to all description unit titles (such as .. function::). add_module_names = True If true, sectionauthor and moduleauthor directives will be shown in the output. They are ignored by default. show_authors = False The name of the Pygments (syntax highlighting) style to use. A list of ignored prefixes for module index sorting. modindex_common_prefix = [] If true, keep warnings as "system message" paragraphs in the built documents. keep_warnings = False -- Options for HTML output ---------------------------------------------- The theme to use for HTML and HTML Help pages. See the documentation for a list of builtin themes. html_theme = 'alabaster' Theme options are theme-specific and customize the look and feel of a theme further. For a list of options available for each theme, see the documentation. html_theme_options = {} Add any paths that contain custom themes here, relative to this directory. html_theme_path = [] The name for this set of Sphinx documents. If None, it defaults to "<project> v<release> documentation". html_title = None A shorter title for the navigation bar. Default is the same as html_title. html_short_title = None The name of an image file (relative to this directory) to place at the top of the sidebar. html_logo = None The name of an image file (relative to this directory) to use as a favicon of the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 pixels large. html_favicon = None Add any paths that contain custom static files (such as style sheets) here, relative to this directory. They are copied after the builtin static files, so a file named "default.css" will overwrite the builtin "default.css". Add any extra paths that contain custom files (such as robots.txt or .htaccess) here, relative to this directory. These files are copied directly to the root of the documentation. html_extra_path = [] If not '', a 'Last updated on:' timestamp is inserted at every page bottom, using the given strftime format. html_last_updated_fmt = '%b %d, %Y' If true, SmartyPants will be used to convert quotes and dashes to typographically correct entities. html_use_smartypants = True Custom sidebar templates, maps document names to template names. html_sidebars = {} Additional templates that should be rendered to pages, maps page names to template names. html_additional_pages = {} If false, no module index is generated. html_domain_indices = True If false, no index is generated. html_use_index = True If true, the index is split into individual pages for each letter. html_split_index = False If true, links to the reST sources are added to the pages. html_show_sourcelink = True If true, "Created using Sphinx" is shown in the HTML footer. Default is True. html_show_sphinx = True If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. html_show_copyright = True If true, an OpenSearch description file will be output, and all pages will contain a <link> tag referring to it. The value of this option must be the base URL from which the finished HTML is served. html_use_opensearch = '' This is the file name suffix for HTML files (e.g. ".xhtml"). html_file_suffix = None Language to be used for generating the HTML full-text search index. Sphinx supports the following languages: 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja' 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr' html_search_language = 'en' A dictionary with options for the search language support, empty by default. Now only 'ja' uses this config value html_search_options = {'type': 'default'} The name of a javascript file (relative to the configuration directory) that implements a search results scorer. If empty, the default will be used. html_search_scorer = 'scorer.js' Output file base name for HTML help builder. -- Options for LaTeX output --------------------------------------------- The paper size ('letterpaper' or 'a4paper').'papersize': 'letterpaper', The font size ('10pt', '11pt' or '12pt').'pointsize': '10pt', Additional stuff for the LaTeX preamble.'preamble': '', Latex figure (float) alignment'figure_align': 'htbp', Grouping the document tree into LaTeX files. List of tuples (source start file, target name, title, author, documentclass [howto, manual, or own class]). The name of an image file (relative to this directory) to place at the top of the title page. latex_logo = None For "manual" documents, if this is true, then toplevel headings are parts, not chapters. latex_use_parts = False If true, show page references after internal links. latex_show_pagerefs = False If true, show URL addresses after external links. latex_show_urls = False Documents to append as an appendix to all manuals. latex_appendices = [] If false, no module index is generated. latex_domain_indices = True -- Options for manual page output --------------------------------------- One entry per manual page. List of tuples (source start file, name, description, authors, manual section). If true, show URL addresses after external links. man_show_urls = False -- Options for Texinfo output ------------------------------------------- Grouping the document tree into Texinfo files. List of tuples (source start file, target name, title, author, dir menu entry, description, category) Documents to append as an appendix to all manuals. texinfo_appendices = [] If false, no module index is generated. texinfo_domain_indices = True How to display URL addresses: 'footnote', 'no', or 'inline'. texinfo_show_urls = 'footnote' If true, do not generate a @detailmenu in the "Top" node's menu. texinfo_no_detailmenu = False omit class name during the generation of the doc | 8,018 | en | 0.661719 |
# -*- coding: utf-8 -*-
import pytest
from pyleecan.Classes.LamSlotMag import LamSlotMag
from pyleecan.Classes.SlotM14 import SlotM14
from numpy import pi, exp, sqrt, angle
from pyleecan.Methods.Slot.Slot.comp_height import comp_height
from pyleecan.Methods.Slot.Slot.comp_surface import comp_surface
from pyleecan.Methods.Slot.Slot.comp_angle_opening import comp_angle_opening
from pyleecan.Methods.Slot.Slot.comp_height_active import comp_height_active
from pyleecan.Methods.Slot.Slot.comp_surface_active import comp_surface_active
from pyleecan.Methods import ParentMissingError
Mag14_test = list()
# Internal Slot inset
lam = LamSlotMag(Rint=40e-3, Rext=90e-3, is_internal=True)
lam.slot = SlotM14(Zs=4, W0=0.628, H0=0.02, Hmag=0.02, Wmag=0.628, Rtopm=0.04)
Mag14_test.append(
{
"test_obj": lam,
"Rmec": 90e-3,
"S_exp": 0.0010048,
"SA_exp": 9.022e-4,
"Ao": 0.628,
"H_exp": 0.02,
"HA_exp": 0.02,
}
)
# Internal slot surface
lam = LamSlotMag(Rint=40e-3, Rext=90e-3, is_internal=True)
lam.slot = SlotM14(Zs=8, W0=0.628, H0=0, Hmag=0.02, Wmag=0.628, Rtopm=0.05)
Mag14_test.append(
{
"test_obj": lam,
"Rmec": 0.11,
"S_exp": 0,
"SA_exp": 1.1089e-3,
"Ao": 0.628,
"H_exp": 0,
"HA_exp": 0.02,
}
)
# For AlmostEqual
DELTA = 1e-4
@pytest.mark.METHODS
class Test_Magnet_Type_14_meth(object):
"""unittest for MagnetType14 methods"""
@pytest.mark.parametrize("test_dict", Mag14_test)
def test_comp_surface(self, test_dict):
"""Check that the computation of the surface is correct"""
test_obj = test_dict["test_obj"]
result = test_obj.slot.comp_surface()
a = result
b = test_dict["S_exp"]
msg = "Return " + str(a) + " expected " + str(b)
assert a == pytest.approx(b, rel=DELTA), msg
# Check that the analytical method returns the same result as the numerical one
b = comp_surface(test_obj.slot)
msg = "Return " + str(a) + " expected " + str(b)
assert a == pytest.approx(b, rel=DELTA), msg
@pytest.mark.parametrize("test_dict", Mag14_test)
def test_comp_surface_active(self, test_dict):
"""Check that the computation of the active surface is correct"""
test_obj = test_dict["test_obj"]
result = test_obj.slot.comp_surface_active()
a = result
b = test_dict["SA_exp"]
msg = "Return " + str(a) + " expected " + str(b)
assert a == pytest.approx(b, rel=DELTA), msg
# Check that the analytical method returns the same result as the numerical one
b = comp_surface_active(test_obj.slot, Ndisc=1000)
msg = "Return " + str(a) + " expected " + str(b)
assert a == pytest.approx(b, rel=DELTA), msg
@pytest.mark.parametrize("test_dict", Mag14_test)
def test_comp_height(self, test_dict):
"""Check that the computation of the height is correct"""
test_obj = test_dict["test_obj"]
result = test_obj.slot.comp_height()
a = result
b = test_dict["H_exp"]
msg = "Return " + str(a) + " expected " + str(b)
assert a == pytest.approx(b, rel=DELTA), msg
# Check that the analytical method returns the same result as the numerical one
b = comp_height(test_obj.slot)
msg = "Return " + str(a) + " expected " + str(b)
assert a == pytest.approx(b, rel=DELTA), msg
@pytest.mark.parametrize("test_dict", Mag14_test)
def test_comp_height_active(self, test_dict):
"""Check that the computation of the active height is correct"""
test_obj = test_dict["test_obj"]
result = test_obj.slot.comp_height_active()
a = result
b = test_dict["HA_exp"]
msg = "Return " + str(a) + " expected " + str(b)
# assert a == pytest.approx(b, rel=DELTA), msg
# Check that the analytical method returns the same result as the numerical one
b = comp_height_active(test_obj.slot)
msg = "Return " + str(a) + " expected " + str(b)
assert a == pytest.approx(b, rel=DELTA), msg
@pytest.mark.parametrize("test_dict", Mag14_test)
def test_comp_angle_opening(self, test_dict):
"""Check that the computation of the average opening angle is correct"""
test_obj = test_dict["test_obj"]
a = test_obj.slot.comp_angle_opening()
assert a == pytest.approx(test_dict["Ao"], rel=DELTA)
# Check that the analytical method returns the same result as the numerical one
b = comp_angle_opening(test_obj.slot)
msg = "Return " + str(a) + " expected " + str(b)
assert a == pytest.approx(b, rel=DELTA)
@pytest.mark.parametrize("test_dict", Mag14_test)
def test_comp_width_opening(self, test_dict):
"""Check that the computation of the average opening width is correct"""
test_obj = test_dict["test_obj"]
a = test_obj.slot.comp_width_opening()
point_dict = test_obj.slot._comp_point_coordinate()
assert a == pytest.approx(abs(point_dict["Z1"] - point_dict["Z4"]), rel=DELTA)
@pytest.mark.parametrize("test_dict", Mag14_test)
def test_comp_mec_radius(self, test_dict):
"""Check that the computation of the mechanical radius is correct"""
test_obj = test_dict["test_obj"]
a = test_obj.comp_radius_mec()
assert a == pytest.approx(test_dict["Rmec"], rel=DELTA)
@pytest.mark.parametrize("test_dict", Mag14_test)
def test_comp_point_coordinate(self, test_dict):
"""Check that the point coordinates are correct"""
test_obj = test_dict["test_obj"]
point_dict = test_obj.slot._comp_point_coordinate()
Z1 = point_dict["Z1"]
Z2 = point_dict["Z2"]
Z3 = point_dict["Z3"]
Z4 = point_dict["Z4"]
ZM0 = point_dict["ZM0"]
ZM1 = point_dict["ZM1"]
ZM2 = point_dict["ZM2"]
ZM3 = point_dict["ZM3"]
ZM4 = point_dict["ZM4"]
W0 = test_obj.slot.W0
H0 = test_obj.slot.H0
Wmag = test_obj.slot.Wmag
Hmag = test_obj.slot.Hmag
Rbo = test_obj.get_Rbo()
assert abs(Z1) == pytest.approx(Rbo, rel=DELTA)
assert angle(Z1) == pytest.approx(-W0 / 2, rel=DELTA)
assert abs(Z4) == pytest.approx(Rbo, rel=DELTA)
assert angle(Z4) == pytest.approx(W0 / 2, rel=DELTA)
if test_obj.is_internal:
assert abs(Z2) == pytest.approx(Rbo - H0, rel=DELTA)
assert abs(Z3) == pytest.approx(Rbo - H0, rel=DELTA)
else:
assert abs(Z3) == pytest.approx(Rbo + H0, rel=DELTA)
assert abs(Z2) == pytest.approx(Rbo + H0, rel=DELTA)
assert angle(Z2) == pytest.approx(-W0 / 2, rel=DELTA)
assert angle(Z3) == pytest.approx(W0 / 2, rel=DELTA)
assert angle(ZM1) == pytest.approx(angle(ZM2), rel=DELTA)
assert angle(ZM1) == pytest.approx(-Wmag / 2, rel=DELTA)
assert angle(ZM3) == pytest.approx(angle(ZM4), rel=DELTA)
assert angle(ZM3) == pytest.approx(Wmag / 2, rel=DELTA)
if test_obj.is_internal:
assert ZM0 == pytest.approx(Rbo + Hmag - H0, rel=DELTA)
else:
assert ZM0 == pytest.approx(Rbo - Hmag + H0, rel=DELTA)
| Tests/Methods/Slot/test_SlotM14_meth.py | 7,279 | unittest for MagnetType14 methods
Check that the computation of the average opening angle is correct
Check that the computation of the height is correct
Check that the computation of the active height is correct
Check that the computation of the mechanical radius is correct
Check that the point coordinates are correct
Check that the computation of the surface is correct
Check that the computation of the active surface is correct
Check that the computation of the average opening width is correct
-*- coding: utf-8 -*- Internal Slot inset Internal slot surface For AlmostEqual Check that the analytical method returns the same result as the numerical one Check that the analytical method returns the same result as the numerical one Check that the analytical method returns the same result as the numerical one assert a == pytest.approx(b, rel=DELTA), msg Check that the analytical method returns the same result as the numerical one Check that the analytical method returns the same result as the numerical one | 1,016 | en | 0.846022 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from heat.tests.v1_1 import fakes
from heat.engine.resources import instance as instances
from heat.engine.resources import nova_utils
from heat.common import template_format
from heat.engine import scheduler
from heat.tests.common import HeatTestCase
from heat.tests import utils
nokey_template = '''
{
"AWSTemplateFormatVersion" : "2010-09-09",
"Description" : "NoKey Test",
"Parameters" : {},
"Resources" : {
"WebServer": {
"Type": "AWS::EC2::Instance",
"Properties": {
"ImageId" : "foo",
"InstanceType" : "m1.large",
"UserData" : "some data"
}
}
}
}
'''
class nokeyTest(HeatTestCase):
def setUp(self):
super(nokeyTest, self).setUp()
self.fc = fakes.FakeClient()
utils.setup_dummy_db()
def test_nokey_create(self):
stack_name = 'instance_create_test_nokey_stack'
t = template_format.parse(nokey_template)
stack = utils.parse_stack(t, stack_name=stack_name)
t['Resources']['WebServer']['Properties']['ImageId'] = 'CentOS 5.2'
t['Resources']['WebServer']['Properties']['InstanceType'] = \
'256 MB Server'
instance = instances.Instance('create_instance_name',
t['Resources']['WebServer'], stack)
self.m.StubOutWithMock(instance, 'nova')
instance.nova().MultipleTimes().AndReturn(self.fc)
instance.t = instance.stack.resolve_runtime_data(instance.t)
# need to resolve the template functions
server_userdata = nova_utils.build_userdata(
instance,
instance.t['Properties']['UserData'])
instance.mime_string = server_userdata
self.m.StubOutWithMock(self.fc.servers, 'create')
self.fc.servers.create(
image=1, flavor=1, key_name=None,
name=utils.PhysName(stack_name, instance.name),
security_groups=None,
userdata=server_userdata, scheduler_hints=None,
meta=None, nics=None, availability_zone=None).AndReturn(
self.fc.servers.list()[1])
self.m.ReplayAll()
scheduler.TaskRunner(instance.create)()
| heat/tests/test_nokey.py | 2,798 | vim: tabstop=4 shiftwidth=4 softtabstop=4 Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. need to resolve the template functions | 630 | en | 0.829516 |
#!/usr/bin/env python2.7
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
from six.moves import range
import numpy as np
import utool
from ibeis.control import SQLDatabaseControl as sqldbc
from ibeis.control._sql_helpers import _results_gen
from os.path import join
print, print_, printDBG, rrr, profile = utool.inject(__name__, '[TEST_SQL_NUMPY] ')
# list of 10,000 chips with 3,000 features apeice.
def grab_numpy_testdata(shape=(3e3, 128), dtype=np.uint8):
ndata = utool.get_argval('--ndata', type_=int, default=2)
print('[TEST] build ndata=%d numpy arrays with shape=%r' % (ndata, shape))
print(' * expected_memory(table_list) = %s' % utool.byte_str2(ndata * np.product(shape)))
table_list = [np.empty(shape, dtype=dtype) for i in range(ndata)]
print(' * memory+overhead(table_list) = %s' % utool.byte_str2(utool.get_object_size(table_list)))
return table_list
def TEST_SQL_NUMPY():
sqldb_fname = 'temp_test_sql_numpy.sqlite3'
sqldb_dpath = utool.util_cplat.get_app_resource_dir('ibeis', 'testfiles')
utool.ensuredir(sqldb_dpath)
utool.util_path.remove_file(join(sqldb_dpath, sqldb_fname), dryrun=False)
db = sqldbc.SQLDatabaseController(sqldb_dpath=sqldb_dpath,
sqldb_fname=sqldb_fname)
db.add_table('temp', [
('temp_id', 'INTEGER PRIMARY KEY'),
('temp_hash', 'NUMPY'),
])
tt = utool.tic()
feats_list = grab_numpy_testdata(shape=(3e3, 128), dtype=np.uint8)
print(' * numpy.new time=%r sec' % utool.toc(tt))
print('[TEST] insert numpy arrays')
tt = utool.tic()
feats_iter = ((feats, ) for feats in feats_list)
db.executemany(operation='''
INSERT
INTO temp
(
temp_hash
)
VALUES (?)
''', params_iter=feats_iter)
print(' * execute insert time=%r sec' % utool.toc(tt))
print('[TEST] save sql database')
tt = utool.tic()
#db.cur.commit()
db.connection.commit()
print(' * commit time=%r sec' % utool.toc(tt))
print('[TEST] read from sql database')
tt = utool.tic()
db.cur.execute('SELECT temp_hash FROM temp', [])
print(' * execute select time=%r sec' % utool.toc(tt))
tt = utool.tic()
result_list = _results_gen(db.cur)
print(' * iter results time=%r sec' % utool.toc(tt))
print(' * memory(result_list) = %s' % utool.byte_str2(utool.get_object_size(result_list)))
del result_list
#print('[TEST] result_list=%r' % result_list)
print('[TEST] dump sql database')
tt = utool.tic()
db.dump('temp.dump.txt')
print(' * dump time=%r sec' % utool.toc(tt))
#with open('temp.dump.txt') as file_:
# print(file_.read())
return locals()
if __name__ == '__main__':
import multiprocessing
multiprocessing.freeze_support() # For win32
test_locals = utool.run_test(TEST_SQL_NUMPY)
execstr = utool.execstr_dict(test_locals, 'test_locals')
exec(execstr)
| _broken/test_sql_numpy.py | 3,008 | !/usr/bin/env python2.7 -*- coding: utf-8 -*- list of 10,000 chips with 3,000 features apeice.db.cur.commit()print('[TEST] result_list=%r' % result_list)with open('temp.dump.txt') as file_: print(file_.read()) For win32 | 222 | en | 0.603988 |
"""
Tests for Markov Autoregression models
Author: Chad Fulton
License: BSD-3
"""
import warnings
import os
import numpy as np
from numpy.testing import assert_equal, assert_allclose
import pandas as pd
import pytest
from statsmodels.tools import add_constant
from statsmodels.tsa.regime_switching import markov_autoregression
current_path = os.path.dirname(os.path.abspath(__file__))
rgnp = [2.59316421, 2.20217133, 0.45827562, 0.9687438,
-0.24130757, 0.89647478, 2.05393219, 1.73353648,
0.93871289, -0.46477833, -0.80983406, -1.39763689,
-0.39886093, 1.1918416, 1.45620048, 2.11808228,
1.08957863, 1.32390273, 0.87296367, -0.19773273,
0.45420215, 0.07221876, 1.1030364, 0.82097489,
-0.05795795, 0.58447772, -1.56192672, -2.05041027,
0.53637183, 2.33676839, 2.34014559, 1.2339263,
1.8869648, -0.45920792, 0.84940469, 1.70139849,
-0.28756312, 0.09594627, -0.86080289, 1.03447127,
1.23685944, 1.42004502, 2.22410631, 1.30210173,
1.03517699, 0.9253425, -0.16559951, 1.3444382,
1.37500131, 1.73222184, 0.71605635, 2.21032143,
0.85333031, 1.00238776, 0.42725441, 2.14368343,
1.43789184, 1.57959926, 2.27469826, 1.95962656,
0.25992399, 1.01946914, 0.49016398, 0.5636338,
0.5959546, 1.43082857, 0.56230122, 1.15388393,
1.68722844, 0.77438205, -0.09647045, 1.39600146,
0.13646798, 0.55223715, -0.39944872, -0.61671102,
-0.08722561, 1.2101835, -0.90729755, 2.64916158,
-0.0080694, 0.51111895, -0.00401437, 2.16821432,
1.92586732, 1.03504717, 1.85897219, 2.32004929,
0.25570789, -0.09855274, 0.89073682, -0.55896485,
0.28350255, -1.31155407, -0.88278776, -1.97454941,
1.01275265, 1.68264723, 1.38271284, 1.86073637,
0.4447377, 0.41449001, 0.99202275, 1.36283576,
1.59970522, 1.98845816, -0.25684232, 0.87786949,
3.1095655, 0.85324478, 1.23337317, 0.00314302,
-0.09433369, 0.89883322, -0.19036628, 0.99772376,
-2.39120054, 0.06649673, 1.26136017, 1.91637838,
-0.3348029, 0.44207108, -1.40664911, -1.52129889,
0.29919869, -0.80197448, 0.15204792, 0.98585027,
2.13034606, 1.34397924, 1.61550522, 2.70930099,
1.24461412, 0.50835466, 0.14802167]
rec = [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1,
1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0]
def test_predict():
# AR(1) without mean, k_regimes=2
endog = np.ones(10)
with pytest.warns(FutureWarning):
markov_autoregression.MarkovAutoregression(
endog,
k_regimes=2,
order=1,
trend='nc'
)
mod = markov_autoregression.MarkovAutoregression(
endog, k_regimes=2, order=1, trend='n')
assert_equal(mod.nobs, 9)
assert_equal(mod.endog, np.ones(9))
params = np.r_[0.5, 0.5, 1., 0.1, 0.5]
mod_resid = mod._resid(params)
resids = np.zeros((2, 2, mod.nobs))
# Resids when: S_{t} = 0
resids[0, :, :] = np.ones(9) - 0.1 * np.ones(9)
assert_allclose(mod_resid[0, :, :], resids[0, :, :])
# Resids when: S_{t} = 1
resids[1, :, :] = np.ones(9) - 0.5 * np.ones(9)
assert_allclose(mod_resid[1, :, :], resids[1, :, :])
# AR(1) with mean, k_regimes=2
endog = np.arange(10)
mod = markov_autoregression.MarkovAutoregression(
endog, k_regimes=2, order=1)
assert_equal(mod.nobs, 9)
assert_equal(mod.endog, np.arange(1, 10))
params = np.r_[0.5, 0.5, 2., 3., 1., 0.1, 0.5]
mod_resid = mod._resid(params)
resids = np.zeros((2, 2, mod.nobs))
# Resids when: S_t = 0, S_{t-1} = 0
resids[0, 0, :] = (np.arange(1, 10) - 2.) - 0.1 * (np.arange(9) - 2.)
assert_allclose(mod_resid[0, 0, :], resids[0, 0, :])
# Resids when: S_t = 0, S_{t-1} = 1
resids[0, 1, :] = (np.arange(1, 10) - 2.) - 0.1 * (np.arange(9) - 3.)
assert_allclose(mod_resid[0, 1, :], resids[0, 1, :])
# Resids when: S_t = 1, S_{t-1} = 0
resids[1, 0, :] = (np.arange(1, 10) - 3.) - 0.5 * (np.arange(9) - 2.)
assert_allclose(mod_resid[1, 0, :], resids[1, 0, :])
# Resids when: S_t = 1, S_{t-1} = 1
resids[1, 1, :] = (np.arange(1, 10) - 3.) - 0.5 * (np.arange(9) - 3.)
assert_allclose(mod_resid[1, 1, :], resids[1, 1, :])
# AR(2) with mean, k_regimes=3
endog = np.arange(10)
mod = markov_autoregression.MarkovAutoregression(
endog, k_regimes=3, order=2)
assert_equal(mod.nobs, 8)
assert_equal(mod.endog, np.arange(2, 10))
params = np.r_[[0.3] * 6, 2., 3., 4, 1., 0.1, 0.5, 0.8, -0.05, -0.25, -0.4]
mod_resid = mod._resid(params)
resids = np.zeros((3, 3, 3, mod.nobs))
# Resids when: S_t = 0, S_{t-1} = 0, S_{t-2} = 0
resids[0, 0, 0, :] = (
(np.arange(2, 10) - 2.) -
0.1 * (np.arange(1, 9) - 2.) -
(-0.05) * (np.arange(8) - 2.))
assert_allclose(mod_resid[0, 0, 0, :], resids[0, 0, 0, :])
# Resids when: S_t = 1, S_{t-1} = 0, S_{t-2} = 0
resids[1, 0, 0, :] = (
(np.arange(2, 10) - 3.) -
0.5 * (np.arange(1, 9) - 2.) -
(-0.25) * (np.arange(8) - 2.))
assert_allclose(mod_resid[1, 0, 0, :], resids[1, 0, 0, :])
# Resids when: S_t = 0, S_{t-1} = 2, S_{t-2} = 1
resids[0, 2, 1, :] = (
(np.arange(2, 10) - 2.) -
0.1 * (np.arange(1, 9) - 4.) -
(-0.05) * (np.arange(8) - 3.))
assert_allclose(mod_resid[0, 2, 1, :], resids[0, 2, 1, :])
# AR(1) with mean + non-switching exog
endog = np.arange(10)
exog = np.r_[0.4, 5, 0.2, 1.2, -0.3, 2.5, 0.2, -0.7, 2., -1.1]
mod = markov_autoregression.MarkovAutoregression(
endog, k_regimes=2, order=1, exog=exog)
assert_equal(mod.nobs, 9)
assert_equal(mod.endog, np.arange(1, 10))
params = np.r_[0.5, 0.5, 2., 3., 1.5, 1., 0.1, 0.5]
mod_resid = mod._resid(params)
resids = np.zeros((2, 2, mod.nobs))
# Resids when: S_t = 0, S_{t-1} = 0
resids[0, 0, :] = (
(np.arange(1, 10) - 2. - 1.5 * exog[1:]) -
0.1 * (np.arange(9) - 2. - 1.5 * exog[:-1]))
assert_allclose(mod_resid[0, 0, :], resids[0, 0, :])
# Resids when: S_t = 0, S_{t-1} = 1
resids[0, 1, :] = (
(np.arange(1, 10) - 2. - 1.5 * exog[1:]) -
0.1 * (np.arange(9) - 3. - 1.5 * exog[:-1]))
assert_allclose(mod_resid[0, 1, :], resids[0, 1, :])
# Resids when: S_t = 1, S_{t-1} = 0
resids[1, 0, :] = (
(np.arange(1, 10) - 3. - 1.5 * exog[1:]) -
0.5 * (np.arange(9) - 2. - 1.5 * exog[:-1]))
assert_allclose(mod_resid[1, 0, :], resids[1, 0, :])
# Resids when: S_t = 1, S_{t-1} = 1
resids[1, 1, :] = (
(np.arange(1, 10) - 3. - 1.5 * exog[1:]) -
0.5 * (np.arange(9) - 3. - 1.5 * exog[:-1]))
assert_allclose(mod_resid[1, 1, :], resids[1, 1, :])
def test_conditional_loglikelihoods():
# AR(1) without mean, k_regimes=2, non-switching variance
endog = np.ones(10)
mod = markov_autoregression.MarkovAutoregression(
endog, k_regimes=2, order=1)
assert_equal(mod.nobs, 9)
assert_equal(mod.endog, np.ones(9))
params = np.r_[0.5, 0.5, 2., 3., 2., 0.1, 0.5]
resid = mod._resid(params)
conditional_likelihoods = (
np.exp(-0.5 * resid**2 / 2) / np.sqrt(2 * np.pi * 2))
assert_allclose(mod._conditional_loglikelihoods(params),
np.log(conditional_likelihoods))
# AR(1) without mean, k_regimes=3, switching variance
endog = np.ones(10)
mod = markov_autoregression.MarkovAutoregression(
endog, k_regimes=3, order=1, switching_variance=True)
assert_equal(mod.nobs, 9)
assert_equal(mod.endog, np.ones(9))
params = np.r_[[0.3]*6, 2., 3., 4., 1.5, 3., 4.5, 0.1, 0.5, 0.8]
mod_conditional_loglikelihoods = mod._conditional_loglikelihoods(params)
conditional_likelihoods = mod._resid(params)
# S_t = 0
conditional_likelihoods[0, :, :] = (
np.exp(-0.5 * conditional_likelihoods[0, :, :]**2 / 1.5) /
np.sqrt(2 * np.pi * 1.5))
assert_allclose(mod_conditional_loglikelihoods[0, :, :],
np.log(conditional_likelihoods[0, :, :]))
# S_t = 1
conditional_likelihoods[1, :, :] = (
np.exp(-0.5 * conditional_likelihoods[1, :, :]**2 / 3.) /
np.sqrt(2 * np.pi * 3.))
assert_allclose(mod_conditional_loglikelihoods[1, :, :],
np.log(conditional_likelihoods[1, :, :]))
# S_t = 2
conditional_likelihoods[2, :, :] = (
np.exp(-0.5 * conditional_likelihoods[2, :, :]**2 / 4.5) /
np.sqrt(2 * np.pi * 4.5))
assert_allclose(mod_conditional_loglikelihoods[2, :, :],
np.log(conditional_likelihoods[2, :, :]))
class MarkovAutoregression(object):
@classmethod
def setup_class(cls, true, endog, atol=1e-5, rtol=1e-7, **kwargs):
cls.model = markov_autoregression.MarkovAutoregression(endog, **kwargs)
cls.true = true
cls.result = cls.model.smooth(cls.true['params'])
cls.atol = atol
cls.rtol = rtol
def test_llf(self):
assert_allclose(self.result.llf, self.true['llf'], atol=self.atol,
rtol=self.rtol)
def test_fit(self, **kwargs):
# Test fitting against Stata
with warnings.catch_warnings():
warnings.simplefilter("ignore")
res = self.model.fit(disp=False, **kwargs)
assert_allclose(res.llf, self.true['llf_fit'], atol=self.atol,
rtol=self.rtol)
@pytest.mark.smoke
def test_fit_em(self, **kwargs):
# Test EM fitting (smoke test)
res_em = self.model._fit_em(**kwargs)
assert_allclose(res_em.llf, self.true['llf_fit_em'], atol=self.atol,
rtol=self.rtol)
hamilton_ar2_short_filtered_joint_probabilities = np.array([
[[[4.99506987e-02, 6.44048275e-04, 6.22227140e-05,
4.45756755e-06, 5.26645567e-07, 7.99846146e-07,
1.19425705e-05, 6.87762063e-03],
[1.95930395e-02, 3.25884335e-04, 1.12955091e-04,
3.38537103e-04, 9.81927968e-06, 2.71696750e-05,
5.83828290e-03, 7.64261509e-02]],
[[1.97113193e-03, 9.50372207e-05, 1.98390978e-04,
1.88188953e-06, 4.83449400e-07, 1.14872860e-05,
4.02918239e-06, 4.35015431e-04],
[2.24870443e-02, 1.27331172e-03, 9.62155856e-03,
4.04178695e-03, 2.75516282e-04, 1.18179572e-02,
5.99778157e-02, 1.48149567e-01]]],
[[[6.70912859e-02, 1.84223872e-02, 2.55621792e-04,
4.48500688e-05, 7.80481515e-05, 2.73734559e-06,
7.59835896e-06, 1.42930726e-03],
[2.10053328e-02, 7.44036383e-03, 3.70388879e-04,
2.71878370e-03, 1.16152088e-03, 7.42182691e-05,
2.96490192e-03, 1.26774695e-02]],
[[8.09335679e-02, 8.31016518e-02, 2.49149080e-02,
5.78825626e-04, 2.19019941e-03, 1.20179130e-03,
7.83659430e-05, 2.76363377e-03],
[7.36967899e-01, 8.88697316e-01, 9.64463954e-01,
9.92270877e-01, 9.96283886e-01, 9.86863839e-01,
9.31117063e-01, 7.51241236e-01]]]])
hamilton_ar2_short_predicted_joint_probabilities = np.array([[
[[[1.20809334e-01, 3.76964436e-02, 4.86045844e-04,
4.69578023e-05, 3.36400588e-06, 3.97445190e-07,
6.03622290e-07, 9.01273552e-06],
[3.92723623e-02, 1.47863379e-02, 2.45936108e-04,
8.52441571e-05, 2.55484811e-04, 7.41034525e-06,
2.05042201e-05, 4.40599447e-03]],
[[4.99131230e-03, 1.48756005e-03, 7.17220245e-05,
1.49720314e-04, 1.42021122e-06, 3.64846209e-07,
8.66914462e-06, 3.04071516e-06],
[4.70476003e-02, 1.69703652e-02, 9.60933974e-04,
7.26113047e-03, 3.05022748e-03, 2.07924699e-04,
8.91869322e-03, 4.52636381e-02]]],
[[[4.99131230e-03, 6.43506069e-03, 1.76698327e-03,
2.45179642e-05, 4.30179435e-06, 7.48598845e-06,
2.62552503e-07, 7.28796600e-07],
[1.62256192e-03, 2.01472650e-03, 7.13642497e-04,
3.55258493e-05, 2.60772139e-04, 1.11407276e-04,
7.11864528e-06, 2.84378568e-04]],
[[5.97950448e-03, 7.76274317e-03, 7.97069493e-03,
2.38971340e-03, 5.55180599e-05, 2.10072977e-04,
1.15269812e-04, 7.51646942e-06],
[5.63621989e-02, 7.06862760e-02, 8.52394030e-02,
9.25065601e-02, 9.51736612e-02, 9.55585689e-02,
9.46550451e-02, 8.93080931e-02]]]],
[[[[3.92723623e-02, 1.22542551e-02, 1.58002431e-04,
1.52649118e-05, 1.09356167e-06, 1.29200377e-07,
1.96223855e-07, 2.92983500e-06],
[1.27665503e-02, 4.80670161e-03, 7.99482261e-05,
2.77109335e-05, 8.30522919e-05, 2.40893443e-06,
6.66545485e-06, 1.43228843e-03]],
[[1.62256192e-03, 4.83571884e-04, 2.33151963e-05,
4.86706634e-05, 4.61678312e-07, 1.18603191e-07,
2.81814142e-06, 9.88467229e-07],
[1.52941031e-02, 5.51667911e-03, 3.12377744e-04,
2.36042810e-03, 9.91559466e-04, 6.75915830e-05,
2.89926399e-03, 1.47141776e-02]]],
[[[4.70476003e-02, 6.06562252e-02, 1.66554040e-02,
2.31103828e-04, 4.05482745e-05, 7.05621631e-05,
2.47479309e-06, 6.86956236e-06],
[1.52941031e-02, 1.89906063e-02, 6.72672133e-03,
3.34863029e-04, 2.45801156e-03, 1.05011361e-03,
6.70996238e-05, 2.68052335e-03]],
[[5.63621989e-02, 7.31708248e-02, 7.51309569e-02,
2.25251946e-02, 5.23307566e-04, 1.98012644e-03,
1.08652148e-03, 7.08494735e-05],
[5.31264334e-01, 6.66281623e-01, 8.03457913e-01,
8.71957394e-01, 8.97097216e-01, 9.00725317e-01,
8.92208794e-01, 8.41808970e-01]]]]])
hamilton_ar2_short_smoothed_joint_probabilities = np.array([
[[[1.29898189e-02, 1.66298475e-04, 1.29822987e-05,
9.95268382e-07, 1.84473346e-07, 7.18761267e-07,
1.69576494e-05, 6.87762063e-03],
[5.09522472e-03, 8.41459714e-05, 2.35672254e-05,
7.55872505e-05, 3.43949612e-06, 2.44153330e-05,
8.28997024e-03, 7.64261509e-02]],
[[5.90021731e-04, 2.55342733e-05, 4.50698224e-05,
5.30734135e-07, 1.80741761e-07, 1.11483792e-05,
5.98539007e-06, 4.35015431e-04],
[6.73107901e-03, 3.42109009e-04, 2.18579464e-03,
1.13987259e-03, 1.03004157e-04, 1.14692946e-02,
8.90976350e-02, 1.48149567e-01]]],
[[[6.34648123e-02, 1.79187451e-02, 2.37462147e-04,
3.55542558e-05, 7.63980455e-05, 2.90520820e-06,
8.17644492e-06, 1.42930726e-03],
[1.98699352e-02, 7.23695477e-03, 3.44076057e-04,
2.15527721e-03, 1.13696383e-03, 7.87695658e-05,
3.19047276e-03, 1.26774695e-02]],
[[8.81925054e-02, 8.33092133e-02, 2.51106301e-02,
5.81007470e-04, 2.19065072e-03, 1.20221350e-03,
7.56893839e-05, 2.76363377e-03],
[8.03066603e-01, 8.90916999e-01, 9.72040418e-01,
9.96011175e-01, 9.96489179e-01, 9.87210535e-01,
8.99315113e-01, 7.51241236e-01]]]])
class TestHamiltonAR2Short(MarkovAutoregression):
# This is just a set of regression tests
@classmethod
def setup_class(cls):
true = {
'params': np.r_[0.754673, 0.095915, -0.358811, 1.163516,
np.exp(-0.262658)**2, 0.013486, -0.057521],
'llf': -10.14066,
'llf_fit': -4.0523073,
'llf_fit_em': -8.885836
}
super(TestHamiltonAR2Short, cls).setup_class(
true, rgnp[-10:], k_regimes=2, order=2, switching_ar=False)
def test_fit_em(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
super(TestHamiltonAR2Short, self).test_fit_em()
def test_filter_output(self, **kwargs):
res = self.result
# Filtered
assert_allclose(res.filtered_joint_probabilities,
hamilton_ar2_short_filtered_joint_probabilities)
# Predicted
desired = hamilton_ar2_short_predicted_joint_probabilities
if desired.ndim > res.predicted_joint_probabilities.ndim:
desired = desired.sum(axis=-2)
assert_allclose(res.predicted_joint_probabilities, desired)
def test_smoother_output(self, **kwargs):
res = self.result
# Filtered
assert_allclose(res.filtered_joint_probabilities,
hamilton_ar2_short_filtered_joint_probabilities)
# Predicted
desired = hamilton_ar2_short_predicted_joint_probabilities
if desired.ndim > res.predicted_joint_probabilities.ndim:
desired = desired.sum(axis=-2)
assert_allclose(res.predicted_joint_probabilities, desired)
# Smoothed, entry-by-entry
assert_allclose(
res.smoothed_joint_probabilities[..., -1],
hamilton_ar2_short_smoothed_joint_probabilities[..., -1])
assert_allclose(
res.smoothed_joint_probabilities[..., -2],
hamilton_ar2_short_smoothed_joint_probabilities[..., -2])
assert_allclose(
res.smoothed_joint_probabilities[..., -3],
hamilton_ar2_short_smoothed_joint_probabilities[..., -3])
assert_allclose(
res.smoothed_joint_probabilities[..., :-3],
hamilton_ar2_short_smoothed_joint_probabilities[..., :-3])
hamilton_ar4_filtered = [
0.776712, 0.949192, 0.996320, 0.990258, 0.940111, 0.537442,
0.140001, 0.008942, 0.048480, 0.614097, 0.910889, 0.995463,
0.979465, 0.992324, 0.984561, 0.751038, 0.776268, 0.522048,
0.814956, 0.821786, 0.472729, 0.673567, 0.029031, 0.001556,
0.433276, 0.985463, 0.995025, 0.966067, 0.998445, 0.801467,
0.960997, 0.996431, 0.461365, 0.199357, 0.027398, 0.703626,
0.946388, 0.985321, 0.998244, 0.989567, 0.984510, 0.986811,
0.793788, 0.973675, 0.984848, 0.990418, 0.918427, 0.998769,
0.977647, 0.978742, 0.927635, 0.998691, 0.988934, 0.991654,
0.999288, 0.999073, 0.918636, 0.987710, 0.966876, 0.910015,
0.826150, 0.969451, 0.844049, 0.941525, 0.993363, 0.949978,
0.615206, 0.970915, 0.787585, 0.707818, 0.200476, 0.050835,
0.140723, 0.809850, 0.086422, 0.990344, 0.785963, 0.817425,
0.659152, 0.996578, 0.992860, 0.948501, 0.996883, 0.999712,
0.906694, 0.725013, 0.963690, 0.386960, 0.241302, 0.009078,
0.015789, 0.000896, 0.541530, 0.928686, 0.953704, 0.992741,
0.935877, 0.918958, 0.977316, 0.987941, 0.987300, 0.996769,
0.645469, 0.921285, 0.999917, 0.949335, 0.968914, 0.886025,
0.777141, 0.904381, 0.368277, 0.607429, 0.002491, 0.227610,
0.871284, 0.987717, 0.288705, 0.512124, 0.030329, 0.005177,
0.256183, 0.020955, 0.051620, 0.549009, 0.991715, 0.987892,
0.995377, 0.999833, 0.993756, 0.956164, 0.927714]
hamilton_ar4_smoothed = [
0.968096, 0.991071, 0.998559, 0.958534, 0.540652, 0.072784,
0.010999, 0.006228, 0.172144, 0.898574, 0.989054, 0.998293,
0.986434, 0.993248, 0.976868, 0.858521, 0.847452, 0.675670,
0.596294, 0.165407, 0.035270, 0.127967, 0.007414, 0.004944,
0.815829, 0.998128, 0.998091, 0.993227, 0.999283, 0.921100,
0.977171, 0.971757, 0.124680, 0.063710, 0.114570, 0.954701,
0.994852, 0.997302, 0.999345, 0.995817, 0.996218, 0.994580,
0.933990, 0.996054, 0.998151, 0.996976, 0.971489, 0.999786,
0.997362, 0.996755, 0.993053, 0.999947, 0.998469, 0.997987,
0.999830, 0.999360, 0.953176, 0.992673, 0.975235, 0.938121,
0.946784, 0.986897, 0.905792, 0.969755, 0.995379, 0.914480,
0.772814, 0.931385, 0.541742, 0.394596, 0.063428, 0.027829,
0.124527, 0.286105, 0.069362, 0.995950, 0.961153, 0.962449,
0.945022, 0.999855, 0.998943, 0.980041, 0.999028, 0.999838,
0.863305, 0.607421, 0.575983, 0.013300, 0.007562, 0.000635,
0.001806, 0.002196, 0.803550, 0.972056, 0.984503, 0.998059,
0.985211, 0.988486, 0.994452, 0.994498, 0.998873, 0.999192,
0.870482, 0.976282, 0.999961, 0.984283, 0.973045, 0.786176,
0.403673, 0.275418, 0.115199, 0.257560, 0.004735, 0.493936,
0.907360, 0.873199, 0.052959, 0.076008, 0.001653, 0.000847,
0.062027, 0.021257, 0.219547, 0.955654, 0.999851, 0.997685,
0.998324, 0.999939, 0.996858, 0.969209, 0.927714]
class TestHamiltonAR4(MarkovAutoregression):
@classmethod
def setup_class(cls):
# Results from E-views:
# Dependent variable followed by a list of switching regressors:
# rgnp c
# List of non-switching regressors:
# ar(1) ar(2) ar(3) ar(4)
# Do not check "Regime specific error variances"
# Switching type: Markov
# Number of Regimes: 2
# Probability regressors:
# c
# Method SWITCHREG
# Sample 1951q1 1984q4
true = {
'params': np.r_[0.754673, 0.095915, -0.358811, 1.163516,
np.exp(-0.262658)**2, 0.013486, -0.057521,
-0.246983, -0.212923],
'llf': -181.26339,
'llf_fit': -181.26339,
'llf_fit_em': -183.85444,
'bse_oim': np.r_[.0965189, .0377362, .2645396, .0745187, np.nan,
.1199942, .137663, .1069103, .1105311, ]
}
super(TestHamiltonAR4, cls).setup_class(
true, rgnp, k_regimes=2, order=4, switching_ar=False)
def test_filtered_regimes(self):
res = self.result
assert_equal(len(res.filtered_marginal_probabilities[:, 1]),
self.model.nobs)
assert_allclose(res.filtered_marginal_probabilities[:, 1],
hamilton_ar4_filtered, atol=1e-5)
def test_smoothed_regimes(self):
res = self.result
assert_equal(len(res.smoothed_marginal_probabilities[:, 1]),
self.model.nobs)
assert_allclose(res.smoothed_marginal_probabilities[:, 1],
hamilton_ar4_smoothed, atol=1e-5)
def test_bse(self):
# Cannot compare middle element of bse because we estimate sigma^2
# rather than sigma
bse = self.result.cov_params_approx.diagonal()**0.5
assert_allclose(bse[:4], self.true['bse_oim'][:4], atol=1e-6)
assert_allclose(bse[6:], self.true['bse_oim'][6:], atol=1e-6)
class TestHamiltonAR2Switch(MarkovAutoregression):
# Results from Stata, see http://www.stata.com/manuals14/tsmswitch.pdf
@classmethod
def setup_class(cls):
path = os.path.join(current_path, 'results',
'results_predict_rgnp.csv')
results = pd.read_csv(path)
true = {
'params': np.r_[.3812383, .3564492, -.0055216, 1.195482,
.6677098**2, .3710719, .4621503, .7002937,
-.3206652],
'llf': -179.32354,
'llf_fit': -179.38684,
'llf_fit_em': -184.99606,
'bse_oim': np.r_[.1424841, .0994742, .2057086, .1225987, np.nan,
.1754383, .1652473, .187409, .1295937],
'smoothed0': results.iloc[3:]['switchar2_sm1'],
'smoothed1': results.iloc[3:]['switchar2_sm2'],
'predict0': results.iloc[3:]['switchar2_yhat1'],
'predict1': results.iloc[3:]['switchar2_yhat2'],
'predict_predicted': results.iloc[3:]['switchar2_pyhat'],
'predict_filtered': results.iloc[3:]['switchar2_fyhat'],
'predict_smoothed': results.iloc[3:]['switchar2_syhat'],
}
super(TestHamiltonAR2Switch, cls).setup_class(
true, rgnp, k_regimes=2, order=2)
def test_smoothed_marginal_probabilities(self):
assert_allclose(self.result.smoothed_marginal_probabilities[:, 0],
self.true['smoothed0'], atol=1e-6)
assert_allclose(self.result.smoothed_marginal_probabilities[:, 1],
self.true['smoothed1'], atol=1e-6)
def test_predict(self):
# Smoothed
actual = self.model.predict(
self.true['params'], probabilities='smoothed')
assert_allclose(actual, self.true['predict_smoothed'], atol=1e-6)
actual = self.model.predict(
self.true['params'], probabilities=None)
assert_allclose(actual, self.true['predict_smoothed'], atol=1e-6)
actual = self.result.predict(probabilities='smoothed')
assert_allclose(actual, self.true['predict_smoothed'], atol=1e-6)
actual = self.result.predict(probabilities=None)
assert_allclose(actual, self.true['predict_smoothed'], atol=1e-6)
def test_bse(self):
# Cannot compare middle element of bse because we estimate sigma^2
# rather than sigma
bse = self.result.cov_params_approx.diagonal()**0.5
assert_allclose(bse[:4], self.true['bse_oim'][:4], atol=1e-7)
assert_allclose(bse[6:], self.true['bse_oim'][6:], atol=1e-7)
hamilton_ar1_switch_filtered = [
0.840288, 0.730337, 0.900234, 0.596492, 0.921618, 0.983828,
0.959039, 0.898366, 0.477335, 0.251089, 0.049367, 0.386782,
0.942868, 0.965632, 0.982857, 0.897603, 0.946986, 0.916413,
0.640912, 0.849296, 0.778371, 0.954420, 0.929906, 0.723930,
0.891196, 0.061163, 0.004806, 0.977369, 0.997871, 0.977950,
0.896580, 0.963246, 0.430539, 0.906586, 0.974589, 0.514506,
0.683457, 0.276571, 0.956475, 0.966993, 0.971618, 0.987019,
0.916670, 0.921652, 0.930265, 0.655554, 0.965858, 0.964981,
0.976790, 0.868267, 0.983240, 0.852052, 0.919150, 0.854467,
0.987868, 0.935840, 0.958138, 0.979535, 0.956541, 0.716322,
0.919035, 0.866437, 0.899609, 0.914667, 0.976448, 0.867252,
0.953075, 0.977850, 0.884242, 0.688299, 0.968461, 0.737517,
0.870674, 0.559413, 0.380339, 0.582813, 0.941311, 0.240020,
0.999349, 0.619258, 0.828343, 0.729726, 0.991009, 0.966291,
0.899148, 0.970798, 0.977684, 0.695877, 0.637555, 0.915824,
0.434600, 0.771277, 0.113756, 0.144002, 0.008466, 0.994860,
0.993173, 0.961722, 0.978555, 0.789225, 0.836283, 0.940383,
0.968368, 0.974473, 0.980248, 0.518125, 0.904086, 0.993023,
0.802936, 0.920906, 0.685445, 0.666524, 0.923285, 0.643861,
0.938184, 0.008862, 0.945406, 0.990061, 0.991500, 0.486669,
0.805039, 0.089036, 0.025067, 0.863309, 0.352784, 0.733295,
0.928710, 0.984257, 0.926597, 0.959887, 0.984051, 0.872682,
0.824375, 0.780157]
hamilton_ar1_switch_smoothed = [
0.900074, 0.758232, 0.914068, 0.637248, 0.901951, 0.979905,
0.958935, 0.888641, 0.261602, 0.148761, 0.056919, 0.424396,
0.932184, 0.954962, 0.983958, 0.895595, 0.949519, 0.923473,
0.678898, 0.848793, 0.807294, 0.958868, 0.942936, 0.809137,
0.960892, 0.032947, 0.007127, 0.967967, 0.996551, 0.979278,
0.896181, 0.987462, 0.498965, 0.908803, 0.986893, 0.488720,
0.640492, 0.325552, 0.951996, 0.959703, 0.960914, 0.986989,
0.916779, 0.924570, 0.935348, 0.677118, 0.960749, 0.958966,
0.976974, 0.838045, 0.986562, 0.847774, 0.908866, 0.821110,
0.984965, 0.915302, 0.938196, 0.976518, 0.973780, 0.744159,
0.922006, 0.873292, 0.904035, 0.917547, 0.978559, 0.870915,
0.948420, 0.979747, 0.884791, 0.711085, 0.973235, 0.726311,
0.828305, 0.446642, 0.411135, 0.639357, 0.973151, 0.141707,
0.999805, 0.618207, 0.783239, 0.672193, 0.987618, 0.964655,
0.877390, 0.962437, 0.989002, 0.692689, 0.699370, 0.937934,
0.522535, 0.824567, 0.058746, 0.146549, 0.009864, 0.994072,
0.992084, 0.956945, 0.984297, 0.795926, 0.845698, 0.935364,
0.963285, 0.972767, 0.992168, 0.528278, 0.826349, 0.996574,
0.811431, 0.930873, 0.680756, 0.721072, 0.937977, 0.731879,
0.996745, 0.016121, 0.951187, 0.989820, 0.996968, 0.592477,
0.889144, 0.036015, 0.040084, 0.858128, 0.418984, 0.746265,
0.907990, 0.980984, 0.900449, 0.934741, 0.986807, 0.872818,
0.812080, 0.780157]
class TestHamiltonAR1Switch(MarkovAutoregression):
@classmethod
def setup_class(cls):
# Results from E-views:
# Dependent variable followed by a list of switching regressors:
# rgnp c ar(1)
# List of non-switching regressors: <blank>
# Do not check "Regime specific error variances"
# Switching type: Markov
# Number of Regimes: 2
# Probability regressors:
# c
# Method SWITCHREG
# Sample 1951q1 1984q4
true = {
'params': np.r_[0.85472458, 0.53662099, 1.041419, -0.479157,
np.exp(-0.231404)**2, 0.243128, 0.713029],
'llf': -186.7575,
'llf_fit': -186.7575,
'llf_fit_em': -189.25446
}
super(TestHamiltonAR1Switch, cls).setup_class(
true, rgnp, k_regimes=2, order=1)
def test_filtered_regimes(self):
assert_allclose(self.result.filtered_marginal_probabilities[:, 0],
hamilton_ar1_switch_filtered, atol=1e-5)
def test_smoothed_regimes(self):
assert_allclose(self.result.smoothed_marginal_probabilities[:, 0],
hamilton_ar1_switch_smoothed, atol=1e-5)
def test_expected_durations(self):
expected_durations = [6.883477, 1.863513]
assert_allclose(self.result.expected_durations, expected_durations,
atol=1e-5)
hamilton_ar1_switch_tvtp_filtered = [
0.999996, 0.999211, 0.999849, 0.996007, 0.999825, 0.999991,
0.999981, 0.999819, 0.041745, 0.001116, 1.74e-05, 0.000155,
0.999976, 0.999958, 0.999993, 0.999878, 0.999940, 0.999791,
0.996553, 0.999486, 0.998485, 0.999894, 0.999765, 0.997657,
0.999619, 0.002853, 1.09e-05, 0.999884, 0.999996, 0.999997,
0.999919, 0.999987, 0.989762, 0.999807, 0.999978, 0.050734,
0.010660, 0.000217, 0.006174, 0.999977, 0.999954, 0.999995,
0.999934, 0.999867, 0.999824, 0.996783, 0.999941, 0.999948,
0.999981, 0.999658, 0.999994, 0.999753, 0.999859, 0.999330,
0.999993, 0.999956, 0.999970, 0.999996, 0.999991, 0.998674,
0.999869, 0.999432, 0.999570, 0.999600, 0.999954, 0.999499,
0.999906, 0.999978, 0.999712, 0.997441, 0.999948, 0.998379,
0.999578, 0.994745, 0.045936, 0.006816, 0.027384, 0.000278,
1.000000, 0.996382, 0.999541, 0.998130, 0.999992, 0.999990,
0.999860, 0.999986, 0.999997, 0.998520, 0.997777, 0.999821,
0.033353, 0.011629, 6.95e-05, 4.52e-05, 2.04e-06, 0.999963,
0.999977, 0.999949, 0.999986, 0.999240, 0.999373, 0.999858,
0.999946, 0.999972, 0.999991, 0.994039, 0.999817, 0.999999,
0.999715, 0.999924, 0.997763, 0.997944, 0.999825, 0.996592,
0.695147, 0.000161, 0.999665, 0.999928, 0.999988, 0.992742,
0.374214, 0.001569, 2.16e-05, 0.000941, 4.32e-05, 0.000556,
0.999955, 0.999993, 0.999942, 0.999973, 0.999999, 0.999919,
0.999438, 0.998738]
hamilton_ar1_switch_tvtp_smoothed = [
0.999997, 0.999246, 0.999918, 0.996118, 0.999740, 0.999990,
0.999984, 0.999783, 0.035454, 0.000958, 1.53e-05, 0.000139,
0.999973, 0.999939, 0.999994, 0.999870, 0.999948, 0.999884,
0.997243, 0.999668, 0.998424, 0.999909, 0.999860, 0.998037,
0.999559, 0.002533, 1.16e-05, 0.999801, 0.999993, 0.999997,
0.999891, 0.999994, 0.990096, 0.999753, 0.999974, 0.048495,
0.009289, 0.000542, 0.005991, 0.999974, 0.999929, 0.999995,
0.999939, 0.999880, 0.999901, 0.996221, 0.999937, 0.999935,
0.999985, 0.999450, 0.999995, 0.999768, 0.999897, 0.998930,
0.999992, 0.999949, 0.999954, 0.999995, 0.999994, 0.998687,
0.999902, 0.999547, 0.999653, 0.999538, 0.999966, 0.999485,
0.999883, 0.999982, 0.999831, 0.996940, 0.999968, 0.998678,
0.999780, 0.993895, 0.055372, 0.020421, 0.022913, 0.000127,
1.000000, 0.997072, 0.999715, 0.996893, 0.999990, 0.999991,
0.999811, 0.999978, 0.999998, 0.999100, 0.997866, 0.999787,
0.034912, 0.009932, 5.91e-05, 3.99e-05, 1.77e-06, 0.999954,
0.999976, 0.999932, 0.999991, 0.999429, 0.999393, 0.999845,
0.999936, 0.999961, 0.999995, 0.994246, 0.999570, 1.000000,
0.999702, 0.999955, 0.998611, 0.998019, 0.999902, 0.998486,
0.673991, 0.000205, 0.999627, 0.999902, 0.999994, 0.993707,
0.338707, 0.001359, 2.36e-05, 0.000792, 4.47e-05, 0.000565,
0.999932, 0.999993, 0.999931, 0.999950, 0.999999, 0.999940,
0.999626, 0.998738]
expected_durations = [
[710.7573, 1.000391], [710.7573, 1.000391], [710.7573, 1.000391],
[710.7573, 1.000391], [710.7573, 1.000391], [710.7573, 1.000391],
[710.7573, 1.000391], [710.7573, 1.000391], [1.223309, 1864.084],
[1.223309, 1864.084], [1.223309, 1864.084], [1.223309, 1864.084],
[710.7573, 1.000391], [710.7573, 1.000391], [710.7573, 1.000391],
[710.7573, 1.000391], [710.7573, 1.000391], [710.7573, 1.000391],
[710.7573, 1.000391], [710.7573, 1.000391], [710.7573, 1.000391],
[710.7573, 1.000391], [710.7573, 1.000391], [710.7573, 1.000391],
[710.7573, 1.000391], [1.223309, 1864.084], [1.223309, 1864.084],
[710.7573, 1.000391], [710.7573, 1.000391], [710.7573, 1.000391],
[710.7573, 1.000391], [710.7573, 1.000391], [710.7573, 1.000391],
[710.7573, 1.000391], [710.7573, 1.000391], [1.223309, 1864.084],
[1.223309, 1864.084], [1.223309, 1864.084], [1.223309, 1864.084],
[710.7573, 1.000391], [710.7573, 1.000391], [710.7573, 1.000391],
[710.7573, 1.000391], [710.7573, 1.000391], [710.7573, 1.000391],
[710.7573, 1.000391], [710.7573, 1.000391], [710.7573, 1.000391],
[710.7573, 1.000391], [710.7573, 1.000391], [710.7573, 1.000391],
[710.7573, 1.000391], [710.7573, 1.000391], [710.7573, 1.000391],
[710.7573, 1.000391], [710.7573, 1.000391], [710.7573, 1.000391],
[710.7573, 1.000391], [710.7573, 1.000391], [710.7573, 1.000391],
[710.7573, 1.000391], [710.7573, 1.000391], [710.7573, 1.000391],
[710.7573, 1.000391], [710.7573, 1.000391], [710.7573, 1.000391],
[710.7573, 1.000391], [710.7573, 1.000391], [710.7573, 1.000391],
[710.7573, 1.000391], [710.7573, 1.000391], [710.7573, 1.000391],
[710.7573, 1.000391], [710.7573, 1.000391], [1.223309, 1864.084],
[1.223309, 1864.084], [1.223309, 1864.084], [1.223309, 1864.084],
[710.7573, 1.000391], [710.7573, 1.000391], [710.7573, 1.000391],
[710.7573, 1.000391], [710.7573, 1.000391], [710.7573, 1.000391],
[710.7573, 1.000391], [710.7573, 1.000391], [710.7573, 1.000391],
[710.7573, 1.000391], [710.7573, 1.000391], [710.7573, 1.000391],
[1.223309, 1864.084], [1.223309, 1864.084], [1.223309, 1864.084],
[1.223309, 1864.084], [1.223309, 1864.084], [710.7573, 1.000391],
[710.7573, 1.000391], [710.7573, 1.000391], [710.7573, 1.000391],
[710.7573, 1.000391], [710.7573, 1.000391], [710.7573, 1.000391],
[710.7573, 1.000391], [710.7573, 1.000391], [710.7573, 1.000391],
[710.7573, 1.000391], [710.7573, 1.000391], [710.7573, 1.000391],
[710.7573, 1.000391], [710.7573, 1.000391], [710.7573, 1.000391],
[710.7573, 1.000391], [710.7573, 1.000391], [710.7573, 1.000391],
[1.223309, 1864.084], [1.223309, 1864.084], [710.7573, 1.000391],
[710.7573, 1.000391], [710.7573, 1.000391], [710.7573, 1.000391],
[1.223309, 1864.084], [1.223309, 1864.084], [1.223309, 1864.084],
[1.223309, 1864.084], [1.223309, 1864.084], [1.223309, 1864.084],
[710.7573, 1.000391], [710.7573, 1.000391], [710.7573, 1.000391],
[710.7573, 1.000391], [710.7573, 1.000391], [710.7573, 1.000391],
[710.7573, 1.000391], [710.7573, 1.000391]]
class TestHamiltonAR1SwitchTVTP(MarkovAutoregression):
@classmethod
def setup_class(cls):
# Results from E-views:
# Dependent variable followed by a list of switching regressors:
# rgnp c ar(1)
# List of non-switching regressors: <blank>
# Do not check "Regime specific error variances"
# Switching type: Markov
# Number of Regimes: 2
# Probability regressors:
# c recession
# Method SWITCHREG
# Sample 1951q1 1984q4
true = {
'params': np.r_[6.564923, 7.846371, -8.064123, -15.37636,
1.027190, -0.719760,
np.exp(-0.217003)**2, 0.161489, 0.022536],
'llf': -163.914049,
'llf_fit': -161.786477,
'llf_fit_em': -163.914049
}
exog_tvtp = np.c_[np.ones(len(rgnp)), rec]
super(TestHamiltonAR1SwitchTVTP, cls).setup_class(
true, rgnp, k_regimes=2, order=1, exog_tvtp=exog_tvtp)
@pytest.mark.skip # TODO(ChadFulton): give reason for skip
def test_fit_em(self):
pass
def test_filtered_regimes(self):
assert_allclose(self.result.filtered_marginal_probabilities[:, 0],
hamilton_ar1_switch_tvtp_filtered, atol=1e-5)
def test_smoothed_regimes(self):
assert_allclose(self.result.smoothed_marginal_probabilities[:, 0],
hamilton_ar1_switch_tvtp_smoothed, atol=1e-5)
def test_expected_durations(self):
assert_allclose(self.result.expected_durations, expected_durations,
rtol=1e-5, atol=1e-7)
class TestFilardo(MarkovAutoregression):
@classmethod
def setup_class(cls):
path = os.path.join(current_path, 'results', 'mar_filardo.csv')
cls.mar_filardo = pd.read_csv(path)
true = {
'params': np.r_[4.35941747, -1.6493936, 1.7702123, 0.9945672,
0.517298, -0.865888,
np.exp(-0.362469)**2,
0.189474, 0.079344, 0.110944, 0.122251],
'llf': -586.5718,
'llf_fit': -586.5718,
'llf_fit_em': -586.5718
}
endog = cls.mar_filardo['dlip'].iloc[1:].values
exog_tvtp = add_constant(
cls.mar_filardo['dmdlleading'].iloc[:-1].values)
super(TestFilardo, cls).setup_class(
true, endog, k_regimes=2, order=4, switching_ar=False,
exog_tvtp=exog_tvtp)
@pytest.mark.skip # TODO(ChadFulton): give reason for skip
def test_fit(self, **kwargs):
pass
@pytest.mark.skip # TODO(ChadFulton): give reason for skip
def test_fit_em(self):
pass
def test_filtered_regimes(self):
assert_allclose(self.result.filtered_marginal_probabilities[:, 0],
self.mar_filardo['filtered_0'].iloc[5:], atol=1e-5)
def test_smoothed_regimes(self):
assert_allclose(self.result.smoothed_marginal_probabilities[:, 0],
self.mar_filardo['smoothed_0'].iloc[5:], atol=1e-5)
def test_expected_durations(self):
assert_allclose(self.result.expected_durations,
self.mar_filardo[['duration0', 'duration1']].iloc[5:],
rtol=1e-5, atol=1e-7)
class TestFilardoPandas(MarkovAutoregression):
@classmethod
def setup_class(cls):
path = os.path.join(current_path, 'results', 'mar_filardo.csv')
cls.mar_filardo = pd.read_csv(path)
cls.mar_filardo.index = pd.date_range('1948-02-01', '1991-04-01',
freq='MS')
true = {
'params': np.r_[4.35941747, -1.6493936, 1.7702123, 0.9945672,
0.517298, -0.865888,
np.exp(-0.362469)**2,
0.189474, 0.079344, 0.110944, 0.122251],
'llf': -586.5718,
'llf_fit': -586.5718,
'llf_fit_em': -586.5718
}
endog = cls.mar_filardo['dlip'].iloc[1:]
exog_tvtp = add_constant(
cls.mar_filardo['dmdlleading'].iloc[:-1])
super(TestFilardoPandas, cls).setup_class(
true, endog, k_regimes=2, order=4, switching_ar=False,
exog_tvtp=exog_tvtp)
@pytest.mark.skip # TODO(ChadFulton): give reason for skip
def test_fit(self, **kwargs):
pass
@pytest.mark.skip # TODO(ChadFulton): give reason for skip
def test_fit_em(self):
pass
def test_filtered_regimes(self):
assert_allclose(self.result.filtered_marginal_probabilities[0],
self.mar_filardo['filtered_0'].iloc[5:], atol=1e-5)
def test_smoothed_regimes(self):
assert_allclose(self.result.smoothed_marginal_probabilities[0],
self.mar_filardo['smoothed_0'].iloc[5:], atol=1e-5)
def test_expected_durations(self):
assert_allclose(self.result.expected_durations,
self.mar_filardo[['duration0', 'duration1']].iloc[5:],
rtol=1e-5, atol=1e-7)
| statsmodels/tsa/regime_switching/tests/test_markov_autoregression.py | 41,400 | Tests for Markov Autoregression models
Author: Chad Fulton
License: BSD-3
AR(1) without mean, k_regimes=2 Resids when: S_{t} = 0 Resids when: S_{t} = 1 AR(1) with mean, k_regimes=2 Resids when: S_t = 0, S_{t-1} = 0 Resids when: S_t = 0, S_{t-1} = 1 Resids when: S_t = 1, S_{t-1} = 0 Resids when: S_t = 1, S_{t-1} = 1 AR(2) with mean, k_regimes=3 Resids when: S_t = 0, S_{t-1} = 0, S_{t-2} = 0 Resids when: S_t = 1, S_{t-1} = 0, S_{t-2} = 0 Resids when: S_t = 0, S_{t-1} = 2, S_{t-2} = 1 AR(1) with mean + non-switching exog Resids when: S_t = 0, S_{t-1} = 0 Resids when: S_t = 0, S_{t-1} = 1 Resids when: S_t = 1, S_{t-1} = 0 Resids when: S_t = 1, S_{t-1} = 1 AR(1) without mean, k_regimes=2, non-switching variance AR(1) without mean, k_regimes=3, switching variance S_t = 0 S_t = 1 S_t = 2 Test fitting against Stata Test EM fitting (smoke test) This is just a set of regression tests Filtered Predicted Filtered Predicted Smoothed, entry-by-entry Results from E-views: Dependent variable followed by a list of switching regressors: rgnp c List of non-switching regressors: ar(1) ar(2) ar(3) ar(4) Do not check "Regime specific error variances" Switching type: Markov Number of Regimes: 2 Probability regressors: c Method SWITCHREG Sample 1951q1 1984q4 Cannot compare middle element of bse because we estimate sigma^2 rather than sigma Results from Stata, see http://www.stata.com/manuals14/tsmswitch.pdf Smoothed Cannot compare middle element of bse because we estimate sigma^2 rather than sigma Results from E-views: Dependent variable followed by a list of switching regressors: rgnp c ar(1) List of non-switching regressors: <blank> Do not check "Regime specific error variances" Switching type: Markov Number of Regimes: 2 Probability regressors: c Method SWITCHREG Sample 1951q1 1984q4 Results from E-views: Dependent variable followed by a list of switching regressors: rgnp c ar(1) List of non-switching regressors: <blank> Do not check "Regime specific error variances" Switching type: Markov Number of Regimes: 2 Probability regressors: c recession Method SWITCHREG Sample 1951q1 1984q4 TODO(ChadFulton): give reason for skip TODO(ChadFulton): give reason for skip TODO(ChadFulton): give reason for skip TODO(ChadFulton): give reason for skip TODO(ChadFulton): give reason for skip | 2,324 | en | 0.752915 |
import tensorflow as tf
# DISCLAIMER:
# Parts of this code file were originally forked from
# https://github.com/tkipf/gcn
# which itself was very inspired by the keras package
def masked_logit_cross_entropy(preds, labels, mask):
"""Logit cross-entropy loss with masking."""
loss = tf.nn.sigmoid_cross_entropy_with_logits(logits=preds, labels=labels)
loss = tf.reduce_sum(input_tensor=loss, axis=1)
mask = tf.cast(mask, dtype=tf.float32)
mask /= tf.maximum(tf.reduce_sum(input_tensor=mask), tf.constant([1.]))
loss *= mask
return tf.reduce_mean(input_tensor=loss)
def masked_softmax_cross_entropy(preds, labels, mask):
"""Softmax cross-entropy loss with masking."""
loss = tf.nn.softmax_cross_entropy_with_logits(logits=preds, labels=tf.stop_gradient(labels))
mask = tf.cast(mask, dtype=tf.float32)
mask /= tf.maximum(tf.reduce_sum(input_tensor=mask), tf.constant([1.]))
loss *= mask
return tf.reduce_mean(input_tensor=loss)
def masked_l2(preds, actuals, mask):
"""L2 loss with masking."""
loss = tf.nn.l2_loss(preds, actuals)
mask = tf.cast(mask, dtype=tf.float32)
mask /= tf.reduce_mean(input_tensor=mask)
loss *= mask
return tf.reduce_mean(input_tensor=loss)
def masked_accuracy(preds, labels, mask):
"""Accuracy with masking."""
correct_prediction = tf.equal(tf.argmax(input=preds, axis=1), tf.argmax(input=labels, axis=1))
accuracy_all = tf.cast(correct_prediction, tf.float32)
mask = tf.cast(mask, dtype=tf.float32)
mask /= tf.reduce_mean(input_tensor=mask)
accuracy_all *= mask
return tf.reduce_mean(input_tensor=accuracy_all)
| graphsage/metrics.py | 1,683 | Accuracy with masking.
L2 loss with masking.
Logit cross-entropy loss with masking.
Softmax cross-entropy loss with masking.
DISCLAIMER: Parts of this code file were originally forked from https://github.com/tkipf/gcn which itself was very inspired by the keras package | 271 | en | 0.980907 |
import requests
import re
import random
import time
from bs4 import BeautifulSoup
import os
import lpmysql
import json
def getindex():
url = 'http://freeget.co'
headers = {'User-Agent': "Mozilla/5.0 (iPhone; CPU iPhone OS 9_1 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) Version/9.0 Mobile/13B143 Safari/601.1",
"contentType":"text/html;charset=utf-8",
# Accept:*/*
# Accept-Encoding:gzip, deflate
# Accept-Language:zh-CN,zh;q=0.8
# Connection:keep-alive
}
html = requests.get(url,headers=headers) ##这儿更改了一下(是不是发现 self 没见了?)
print(html.content)
print(dir(html))
print(html.headers)
def getbtn():
url = 'http://freeget.co/video/extraction'
headers = {'User-Agent': "Mozilla/5.0 (iPhone; CPU iPhone OS 9_1 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) Version/9.0 Mobile/13B143 Safari/601.1",
"contentType":"text/html;charset=utf-8",
# "X - CSRFToken":"1504169114##45f7200f8dba99432cc422ed552b3bbf3baff85b",
"X - Requested - With": "XMLHttpRequest",
# X - CSRFToken: 1504164180 ##fdbd5ae5ec0c76632937754c20e90c582f2f7c28
# X - Requested - With: XMLHttpRequest
# Accept:*/*
# Accept-Encoding:gzip, deflate
# Accept-Language:zh-CN,zh;q=0.8
# Connection:keep-alive
}
payload = {"url":"1111111111111111111111111111111111111111111111","_csrf" : "1504169114##45f7200f8dba99432cc422ed552b3bbf3baff85b"}
html = requests.post(url,headers=headers,data=payload) ##这儿更改了一下(是不是发现 self 没见了?)
print(html.content)
print(dir(html))
print(html.headers)
# getindex()
getbtn()
# http://www.cnblogs.com/xwang/p/3757711.html
# pythonrequests 设置CSRF
# http://blog.csdn.net/u011061889/article/details/72904821
| freeget.py | 1,983 | Accept:*/* Accept-Encoding:gzip, deflate Accept-Language:zh-CN,zh;q=0.8 Connection:keep-alive这儿更改了一下(是不是发现 self 没见了?) "X - CSRFToken":"150416911445f7200f8dba99432cc422ed552b3bbf3baff85b", X - CSRFToken: 1504164180 fdbd5ae5ec0c76632937754c20e90c582f2f7c28 X - Requested - With: XMLHttpRequest Accept:*/* Accept-Encoding:gzip, deflate Accept-Language:zh-CN,zh;q=0.8 Connection:keep-alive这儿更改了一下(是不是发现 self 没见了?) getindex() http://www.cnblogs.com/xwang/p/3757711.html pythonrequests 设置CSRF http://blog.csdn.net/u011061889/article/details/72904821 | 546 | en | 0.259376 |
"""Python Interface for Residue-Residue Contact Predictions"""
import os
import sys
from distutils.command.build import build
from distutils.util import convert_path
from setuptools import setup, Extension
from Cython.Distutils import build_ext
import numpy as np
# ==============================================================
# Setup.py command extensions
# ==============================================================
# Credits to http://stackoverflow.com/a/33181352
class BuildCommand(build):
user_options = build.user_options + [
("script-python-path=", None, "Path to Python interpreter to be included in the scripts")
]
def initialize_options(self):
build.initialize_options(self)
self.script_python_path = None
def finalize_options(self):
build.finalize_options(self)
def run(self):
global script_python_path
script_python_path = self.script_python_path
build.run(self)
# ==============================================================
# Functions, functions, functions ...
# ==============================================================
def dependencies():
with open("requirements.txt", "r") as f_in:
deps = f_in.read().splitlines()
return deps
def extensions():
exts = ["conkit/core/ext/c_contactmap.pyx", "conkit/core/ext/c_sequencefile.pyx", "conkit/misc/ext/c_bandwidth.pyx"]
extensions = []
for ext in exts:
extensions.append(
Extension(
ext.replace('/', '.').rsplit('.', 1)[0],
[ext],
include_dirs=[np.get_include()],
))
return extensions
def readme():
with open("README.rst", "r") as f_in:
return f_in.read()
def scripts():
extension = ".bat" if sys.platform.startswith("win") else ""
header = "" if sys.platform.startswith("win") else "#!/bin/sh"
bin_dir = "bin"
command_dir = convert_path("conkit/command_line")
scripts = []
for file in os.listdir(command_dir):
if not file.startswith("_") and file.endswith(".py"):
# Make sure we have a workable name
f_name = os.path.basename(file).rsplit(".", 1)[0]
for c in [".", "_"]:
new_f_name = f_name.replace(c, "-")
# Write the content of the script
script = os.path.join(bin_dir, new_f_name + extension)
with open(script, "w") as f_out:
f_out.write(header + os.linesep)
# BATCH file
if sys.platform.startswith("win"):
string = "@{0} -m conkit.command_line.{1} %*"
# BASH file
else:
string = '{0} -m conkit.command_line.{1} "$@"'
f_out.write(string.format(PYTHON_EXE, f_name) + os.linesep)
os.chmod(script, 0o777)
scripts.append(script)
return scripts
def version():
# Credits to http://stackoverflow.com/a/24517154
main_ns = {}
ver_path = convert_path("conkit/version.py")
with open(ver_path) as f_in:
exec(f_in.read(), main_ns)
return main_ns["__version__"]
# ==============================================================
# Determine the Python executable
# ==============================================================
PYTHON_EXE = None
for arg in sys.argv:
if arg[0:20] == "--script-python-path" and len(arg) == 20:
option, value = arg, sys.argv[sys.argv.index(arg) + 1]
PYTHON_EXE = value
elif arg[0:20] == "--script-python-path" and arg[20] == "=":
option, value = arg[:20], arg[21:]
PYTHON_EXE = value
if not PYTHON_EXE:
PYTHON_EXE = sys.executable
# ==============================================================
# Define all the relevant options
# ==============================================================
AUTHOR = "Felix Simkovic"
AUTHOR_EMAIL = "felixsimkovic@me.com"
DESCRIPTION = __doc__.replace("\n", "")
DEPENDENCIES = dependencies()
EXT_MODULES = extensions()
LICENSE = "BSD License"
LONG_DESCRIPTION = readme()
PACKAGE_DIR = "conkit"
PACKAGE_NAME = "conkit"
PLATFORMS = ["POSIX", "Mac OS", "Windows", "Unix"]
SCRIPTS = scripts()
URL = "http://www.conkit.org/en/latest/"
VERSION = version()
PACKAGES = [
"conkit",
"conkit/applications",
"conkit/command_line",
"conkit/core",
"conkit/core/ext",
"conkit/io",
"conkit/misc",
"conkit/misc/ext",
"conkit/plot",
]
CLASSIFIERS = [
"Development Status :: 4 - Beta",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Topic :: Scientific/Engineering :: Bio-Informatics",
]
TEST_REQUIREMENTS = [
"codecov",
"coverage",
"pytest",
"pytest-cov",
"pytest-pep8",
"pytest-helpers-namespace",
]
setup(
cmdclass={
'build': BuildCommand,
'build_ext': build_ext,
},
author=AUTHOR,
author_email=AUTHOR_EMAIL,
name=PACKAGE_NAME,
description=DESCRIPTION,
ext_modules=EXT_MODULES,
include_dirs=[np.get_include()],
long_description=LONG_DESCRIPTION,
license=LICENSE,
version=VERSION,
url=URL,
packages=PACKAGES,
package_dir={PACKAGE_NAME: PACKAGE_DIR},
scripts=SCRIPTS,
platforms=PLATFORMS,
classifiers=CLASSIFIERS,
install_requires=DEPENDENCIES,
tests_require=TEST_REQUIREMENTS,
setup_requires=['pytest-runner'],
include_package_data=True,
zip_safe=False,
)
| setup.py | 5,684 | Python Interface for Residue-Residue Contact Predictions
============================================================== Setup.py command extensions ============================================================== Credits to http://stackoverflow.com/a/33181352 ============================================================== Functions, functions, functions ... ============================================================== Make sure we have a workable name Write the content of the script BATCH file BASH file Credits to http://stackoverflow.com/a/24517154 ============================================================== Determine the Python executable ============================================================== ============================================================== Define all the relevant options ============================================================== | 871 | en | 0.48574 |
"""
Django settings for mymedicalassistant project.
Generated by 'django-admin startproject' using Django 3.1.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
import os
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve(strict=True).parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'va(i+)gea_&5z@=q%_-d7&ezvkqhq9#$sq1_oco8k!n#2yl!7&'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'apptSchedule',
'clinicInformation',
# 'medInformation',
'medSchedule',
'user',
'rest_framework',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mymedicalassistant.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR,'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mymedicalassistant.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
# STATICFILES_DIRS = [os.path.join(BASE_DIR, 'static')]
REST_FRAMEWORK = {
'DEFAULT_PERMISSION_CLASSES':[
'rest_framework.permissions.AllowAny',
]
}
# ??????do we need this for all the models or just for the user models!!!??????
AUTH_USER_MODEL = 'user.CustomUser'
LOGIN_REDIRECT_URL = 'home'
LOGOUT_REDIRECT_URL = 'home'
| mymedicalassistant/settings.py | 3,614 | Django settings for mymedicalassistant project.
Generated by 'django-admin startproject' using Django 3.1.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
Build paths inside the project like this: BASE_DIR / 'subdir'. Quick-start development settings - unsuitable for production See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/ SECURITY WARNING: keep the secret key used in production secret! SECURITY WARNING: don't run with debug turned on in production! Application definition 'medInformation', Database https://docs.djangoproject.com/en/3.1/ref/settings/databases Password validation https://docs.djangoproject.com/en/3.1/ref/settings/auth-password-validators Internationalization https://docs.djangoproject.com/en/3.1/topics/i18n/ Static files (CSS, JavaScript, Images) https://docs.djangoproject.com/en/3.1/howto/static-files/ STATICFILES_DIRS = [os.path.join(BASE_DIR, 'static')] ??????do we need this for all the models or just for the user models!!!?????? | 1,140 | en | 0.684227 |
from deeplearning import logger, tf_util as U
import tensorflow as tf
from rl.runner import Runner
from rl.vec_env.subproc_vec_env import SubprocVecEnv
from collections import namedtuple
import os, time
class RLExperiment(U.Experiment):
def load_env_fn(self):
fname = os.path.join(self.logdir, 'checkpoints/env_fn.pkl')
assert os.path.exists(fname), "No env function saved."
return U.load(fname)
def save_env_fn(self, env_fn):
fname = os.path.join(self.logdir, 'checkpoints/env_fn.pkl')
U.save(fname, env_fn)
class OnlineRLAlgorithm(object):
def __init__(self, logdir, env_fn, model_fn, nenv, rollout_length, batch_size, callback=None, runner_flags=[], **kwargs):
self.exp = RLExperiment(logdir)
self.exp.save_model_fn(model_fn)
self.exp.save_env_fn(env_fn)
logger.configure(os.path.join(logdir, 'logs'), ['stdout', 'log', 'json'])
self.logdir = logdir
self.batch_size = batch_size
self.rollout_length = rollout_length
self.args = namedtuple('Args', kwargs.keys())(**kwargs)
self.nenv = nenv
self.timesteps_per_step = self.nenv * self.rollout_length
self.env = self._make_env(env_fn, nenv)
self.actor = model_fn(self.env)
self.actor.build('model', self.nenv, 1)
self.loss = self._def_loss(model_fn, self.env)
self.opt = self._def_opt(self.loss)
self.opt.build('model', self.nenv, batch_size, reuse=tf.AUTO_REUSE)
self.runner = Runner(self.env, self.actor, rollout_length, *runner_flags)
self.callback = callback
if callback is not None:
assert callable(callback)
self.init_session()
self.load()
def _make_env(self, env_fn, nenv):
def make_env(rank):
def _env():
return env_fn(rank)
return _env
return SubprocVecEnv([make_env(i) for i in range(nenv)])
def _def_loss(self, model_fn, env):
"""
returns a module for and the loss
"""
raise NotImplementedError
def _def_opt(self, loss):
"""
returns a module for and the optimizer
"""
raise NotImplementedError
def _before_step(self):
pass
def _process_rollout(self, rollout):
raise NotImplementedError
def _update_model(self, data):
raise NotImplementedError
def _after_step(self, rollout, data, update_out):
pass
def step(self):
if self.callback is not None:
self.callback(locals(), globals())
self._before_step()
rollout = self.runner.rollout()
self.t += self.timesteps_per_step
data = self._process_rollout(rollout)
outs = self._update_model(data)
self._after_step(rollout, data, outs)
def train(self, maxtimesteps=None, maxseconds=None, save_freq=None):
assert maxtimesteps is not None or maxseconds is not None
start_time = time.time()
while True:
if maxtimesteps is not None and self.t >= maxtimesteps:
break
if maxseconds is not None and time.time() - start_time >= maxtimesteps:
break
t = self.t
self.step()
if save_freq and t // save_freq != self.t // save_freq:
self.save()
self.save()
def save(self):
self.exp.save(self.t)
def load(self, t=None):
self.t = self.exp.load(t)
def init_session(self):
if tf.get_default_session() is None:
U.make_session().__enter__()
U.initialize()
def close(self):
if hasattr(self.env, 'close'):
self.env.close()
tf.get_default_session().__exit__(None, None, None)
logger.reset()
if __name__=='__main__':
from deeplearning.layers import Adam, Placeholder
from deeplearning.module import Module
from rl.rl_module import Policy
import tensorflow as tf
import gym
from rl import util
class TestAlg(OnlineRLAlgorithm):
def _def_loss(self, model_fn):
class Ent(Module):
def _build(self, inputs):
return self.modules[0]._entropy
return Ent('l', model_fn(self.env))
def _def_opt(self, loss):
return Adam('opt', loss)
def _before_step(self):
logger.log("Before Step")
def _process_rollout(self, rollout):
return rollout.numpy()
def _update_model(self, data):
self.opt.update(util.swap01andflatten(data['obs']))
def _after_step(self, rollout, data, update_outs):
logger.log("After Step")
def model_fn(env):
x = Placeholder(tf.float32, env.observation_space.shape, 'x')
return Policy('pi', x, ac_space=env.action_space)
def env_fn(rank):
env = gym.make('CartPole-v1')
env.seed(rank)
return env
alg = TestAlg('./test_logs', env_fn, model_fn, 2, 64, 64)
alg.train(1024, save_freq=128)
| rl/algorithms/core.py | 5,072 | returns a module for and the loss
returns a module for and the optimizer | 72 | en | 0.327079 |
import datetime
import logging
import time
import dataset
import discord
import privatebinapi
from discord.ext import commands
from discord.ext.commands import Cog, Bot
from discord_slash import cog_ext, SlashContext
from discord_slash.model import SlashCommandPermissionType
from discord_slash.utils.manage_commands import create_option, create_permission
import utils.duration
from cogs.commands import settings
from utils import database
from utils import embeds
from utils.moderation import can_action_member
from utils.record import record_usage
# Enabling logs
log = logging.getLogger(__name__)
class MuteCog(Cog):
""" Mute Cog """
def __init__(self, bot):
self.bot = bot
@staticmethod
async def mute_member(ctx: SlashContext, member: discord.Member, reason: str, temporary: bool = False, end_time: float = None) -> None:
role = discord.utils.get(ctx.guild.roles, id=settings.get_value("role_muted"))
await member.add_roles(role, reason=reason)
# Open a connection to the database.
db = dataset.connect(database.get_db())
# Add the mute to the mod_log database.
db["mod_logs"].insert(dict(
user_id=member.id, mod_id=ctx.author.id, timestamp=int(time.time()), reason=reason, type="mute"
))
# Occurs when the duration parameter in /mute is specified (tempmute).
if temporary:
db["timed_mod_actions"].insert(dict(
user_id=member.id,
mod_id=ctx.author.id,
action_type="mute",
reason=reason,
start_time=datetime.datetime.now(tz=datetime.timezone.utc).timestamp(),
end_time=end_time,
is_done=False
))
# Commit the changes to the database.
db.commit()
db.close()
async def unmute_member(self, member: discord.Member, reason: str, ctx: SlashContext = None, guild: discord.Guild = None) -> None:
guild = guild or ctx.guild
moderator = ctx.author if ctx else self.bot.user
# Removes "Muted" role from member.
role = discord.utils.get(guild.roles, id=settings.get_value("role_muted"))
await member.remove_roles(role, reason=reason)
# Open a connection to the database.
db = dataset.connect(database.get_db())
# Add the unmute to the mod_log database.
db["mod_logs"].insert(dict(
user_id=member.id, mod_id=moderator.id, timestamp=int(time.time()), reason=reason, type="unmute"
))
tempmute_entry = db["timed_mod_actions"].find_one(user_id=member.id, is_done=False)
if tempmute_entry:
db["timed_mod_actions"].update(dict(id=tempmute_entry["id"], is_done=True), ["id"])
# Commit the changes to the database and close the connection.
db.commit()
db.close()
@staticmethod
async def is_user_muted(ctx: SlashContext, member: discord.Member) -> bool:
if discord.utils.get(ctx.guild.roles, id=settings.get_value("role_muted")) in member.roles:
return True
return False
@staticmethod
async def send_muted_dm_embed(ctx: SlashContext, member: discord.Member, channel: discord.TextChannel, reason: str = None, duration: str = None) -> bool:
if not duration:
duration = "Indefinite"
try: # In case user has DMs blocked.
dm_channel = await member.create_dm()
embed = embeds.make_embed(
author=False,
title=f"Uh-oh, you've been muted!",
description="If you believe this was a mistake, contact staff.",
color=0x8083b0
)
embed.add_field(name="Server:", value=f"[{ctx.guild}](https://discord.gg/piracy/)", inline=True)
embed.add_field(name="Moderator:", value=ctx.author.mention, inline=True)
embed.add_field(name="Length:", value=duration, inline=True)
embed.add_field(name="Mute Channel:", value=channel.mention, inline=True)
embed.add_field(name="Reason:", value=reason, inline=False)
embed.set_image(url="https://i.imgur.com/840Q48l.gif")
await dm_channel.send(embed=embed)
return True
except discord.HTTPException:
return False
async def send_unmuted_dm_embed(self, member: discord.Member, reason: str, ctx: SlashContext = None, guild: discord.Guild = None) -> bool:
guild = guild or ctx.guild
moderator = ctx.author if ctx else self.bot.user
# Send member message telling them that they were unmuted and why.
try: # In case user has DMs blocked.
channel = await member.create_dm()
embed = embeds.make_embed(
author=False,
title=f"Yay, you've been unmuted!",
description="Review our server rules to avoid being actioned again in the future.",
color=0x8a3ac5
)
embed.add_field(name="Server:", value=f"[{guild}](https://discord.gg/piracy/)", inline=True)
embed.add_field(name="Moderator:", value=moderator.mention, inline=True)
embed.add_field(name="Reason:", value=reason, inline=False)
embed.set_image(url="https://i.imgur.com/U5Fvr2Y.gif")
await channel.send(embed=embed)
return True
except discord.HTTPException:
return False
@staticmethod
async def create_mute_channel(ctx: SlashContext, member: discord.Member, reason: str, duration: str = None):
if not duration:
duration = "Indefinite"
# Create a channel in the category specified in settings.
category = discord.utils.get(ctx.guild.categories, id=settings.get_value("category_tickets"))
channel = await ctx.guild.create_text_channel(f"mute-{member.id}", category=category)
# Give both the staff and the user perms to access the channel.
await channel.set_permissions(discord.utils.get(ctx.guild.roles, id=settings.get_value("role_trial_mod")), read_messages=True)
await channel.set_permissions(discord.utils.get(ctx.guild.roles, id=settings.get_value("role_staff")), read_messages=True)
await channel.set_permissions(member, read_messages=True)
# Create embed at the start of the channel letting the user know how long they're muted for and why.
embed = embeds.make_embed(title="🤐 You were muted", description="If you have any questions or concerns about your mute, you may voice them here.")
embed.add_field(name="User:", value=member.mention, inline=True)
embed.add_field(name="Moderator:", value=ctx.author.mention, inline=True)
embed.add_field(name="Length:", value=duration, inline=True)
embed.add_field(name="Reason:", value=reason, inline=False)
await channel.send(embed=embed)
# Embed mentions don't count as a ping so this is a workaround to that.
ping = await channel.send(member.mention)
await ping.delete()
return channel
async def archive_mute_channel(self, user_id: int, reason: str = None, ctx: SlashContext = None, guild: int = None):
# Discord caps embed fields at a ridiculously low character limit, avoids problems with future embeds.
if not reason:
reason = "No reason provided."
# Discord caps embed fields at a ridiculously low character limit, avoids problems with future embeds.
elif len(reason) > 512:
await embeds.error_message(ctx=ctx, description="Reason must be less than 512 characters.")
return
guild = guild or ctx.guild
category = discord.utils.get(guild.categories, id=settings.get_value("category_tickets"))
mute_channel = discord.utils.get(category.channels, name=f"mute-{user_id}")
# Open a connection to the database.
db = dataset.connect(database.get_db())
# TODO: Get the mute reason by looking up the latest mute for the user and getting the reason column data.
table = db["mod_logs"]
# Gets the most recent mute for the user, sorted by descending (-) ID.
mute_entry = table.find_one(user_id=user_id, type="mute", order_by="-id")
unmute_entry = table.find_one(user_id=user_id, type="unmute", order_by="-id")
mute_reason = mute_entry["reason"]
muter = await self.bot.fetch_user(mute_entry["mod_id"])
unmuter = await self.bot.fetch_user(unmute_entry["mod_id"])
# Commit the changes to the database and close the connection.
db.commit()
db.close()
# Get the member object of the ticket creator.
member = await self.bot.fetch_user(user_id)
# Initialize the PrivateBin message log string.
message_log = (
f"Muted User: {member} ({member.id})\n\n"
f"Muted By: {muter} ({muter.id})\n"
f"Mute Reason: {mute_reason}\n\n"
f"Unmuted By: {unmuter} ({unmuter.id})\n"
f"Unmute Reason: {reason}\n\n"
)
# Initialize a list of moderator IDs as a set for no duplicates.
mod_list = set()
# Add the original muting moderator to avoid a blank embed field if no one interacts.
mod_list.add(muter)
# Fetch the staff and trial mod role.
role_staff = discord.utils.get(guild.roles, id=settings.get_value("role_staff"))
role_trial_mod = discord.utils.get(guild.roles, id=settings.get_value("role_trial_mod"))
# TODO: Implement so it gets the channel when the moderator is the bot
# Loop through all messages in the ticket from old to new.
async for message in mute_channel.history(oldest_first=True):
# Ignore the bot replies.
if not message.author.bot:
# Time format is unnecessarily lengthy so trimming it down and keep the log go easier on the eyes.
formatted_time = str(message.created_at).split(".")[-2]
# Append the new messages to the current log as we loop.
message_log += f"[{formatted_time}] {message.author}: {message.content}\n"
# Iterates only through members that is still in the server.
if isinstance(member, discord.Member):
# If the messenger has either staff role or trial mod role, add their ID to the mod_list set.
if role_staff in message.author.roles or role_trial_mod in message.author.roles:
mod_list.add(message.author)
# Dump message log to PrivateBin. This returns a dictionary, but only the url is needed for the embed.
url = privatebinapi.send("https://bin.piracy.moe", text=message_log, expiration="never")["full_url"]
# Get the amount of time elapsed since the user was muted.
time_delta = datetime.datetime.utcnow() - mute_channel.created_at
days = time_delta.days
# Hours are the time delta in seconds divided by 3600.
hours, remainder = divmod(time_delta.seconds, 3600)
# Minutes are the hour remainder divided by 60. The minutes remainder are the seconds.
minutes, seconds = divmod(remainder, 60)
# String that will store the duration in a more digestible format.
elapsed_time = ""
duration = dict(
days=days,
hours=hours,
minutes=minutes,
seconds=seconds
)
for time_unit in duration:
# If the time value is 0, skip it.
if duration[time_unit] == 0:
continue
# If the time value is 1, make the time unit into singular form.
if duration[time_unit] == 1:
elapsed_time += f"{duration[time_unit]} {time_unit[:-1]} "
else:
elapsed_time += f"{duration[time_unit]} {time_unit} "
# Create the embed in #mute-log.
embed = embeds.make_embed(
title=f"{mute_channel.name} archived",
thumbnail_url="https://i.imgur.com/A4c19BJ.png",
color="blurple"
)
embed.add_field(name="Muted User:", value=member.mention, inline=True)
embed.add_field(name="Muted By:", value=muter.mention, inline=True)
embed.add_field(name="Unmuted By:", value=unmuter.mention, inline=True)
embed.add_field(name="Mute Reason:", value=mute_reason, inline=False)
embed.add_field(name="Unmute Reason:", value=reason, inline=False)
embed.add_field(name="Duration:", value=elapsed_time, inline=False)
embed.add_field(name="Participating Moderators:", value=" ".join(mod.mention for mod in mod_list), inline=False)
embed.add_field(name="Mute Log: ", value=url, inline=False)
# Send the embed to #mute-log.
mute_log = discord.utils.get(guild.channels, id=settings.get_value("channel_mute_log"))
await mute_log.send(embed=embed)
# Delete the mute channel.
await mute_channel.delete()
@commands.bot_has_permissions(manage_roles=True, send_messages=True)
@commands.before_invoke(record_usage)
@cog_ext.cog_slash(
name="mute",
description="Mutes a member in the server",
guild_ids=[settings.get_value("guild_id")],
options=[
create_option(
name="member",
description="The member that will be muted",
option_type=6,
required=True
),
create_option(
name="reason",
description="The reason why the member is being muted",
option_type=3,
required=False
),
create_option(
name="duration",
description="The length of time the user will be muted for",
option_type=3,
required=False
),
],
default_permission=False,
permissions={
settings.get_value("guild_id"): [
create_permission(settings.get_value("role_staff"), SlashCommandPermissionType.ROLE, True),
create_permission(settings.get_value("role_trial_mod"), SlashCommandPermissionType.ROLE, True)
]
}
)
async def mute(self, ctx: SlashContext, member: discord.Member, duration: str = None, reason: str = None):
""" Mutes member in guild. """
await ctx.defer()
# If we received an int instead of a discord.Member, the user is not in the server.
if not isinstance(member, discord.Member):
await embeds.error_message(ctx=ctx, description=f"That user is not in the server.")
return
# Checks if invoker can action that member (self, bot, etc.)
if not await can_action_member(bot=self.bot, ctx=ctx, member=member):
await embeds.error_message(ctx=ctx, description=f"You cannot action {member.mention}.")
return
# Check if the user is muted already.
if await self.is_user_muted(ctx=ctx, member=member):
await embeds.error_message(ctx=ctx, description=f"{member.mention} is already muted.")
return
# Automatically default the reason string to N/A when the moderator does not provide a reason.
if not reason:
reason = "No reason provided."
# Discord caps embed fields at a ridiculously low character limit, avoids problems with future embeds.
elif len(reason) > 512:
await embeds.error_message(ctx=ctx, description="Reason must be less than 512 characters.")
return
# If the duration is not specified, default it to a permanent mute.
if not duration:
# Start creating the embed that will be used to alert the moderator that the user was successfully muted.
embed = embeds.make_embed(
ctx=ctx,
title=f"Muting member: {member.name}",
description=f"{member.mention} was muted by {ctx.author.mention} for: {reason}",
thumbnail_url="https://i.imgur.com/rHtYWIt.png",
color="soft_red",
)
# Create the mute channel in the Staff category.
channel = await self.create_mute_channel(ctx=ctx, member=member, reason=reason)
# Attempt to DM the user to let them know they were muted.
if not await self.send_muted_dm_embed(ctx=ctx, member=member, channel=channel, reason=reason):
embed.add_field(name="Notice:", value=f"Unable to message {member.mention} about this action. This can be caused by the user not being in the server, having DMs disabled, or having the bot blocked.")
# Mutes the user and returns the embed letting the moderator know they were successfully muted.
await self.mute_member(ctx=ctx, member=member, reason=reason)
await ctx.send(embed=embed)
return
# Get the duration string for embed and mute end time for the specified duration.
duration_string, mute_end_time = utils.duration.get_duration(duration=duration)
# If the duration string is empty due to Regex not matching anything, send and error embed and return.
if not duration_string:
await embeds.error_message(ctx=ctx, description=f"Duration syntax: `#d#h#m#s` (day, hour, min, sec)\nYou can specify up to all four but you only need one.")
return
# Start creating the embed that will be used to alert the moderator that the user was successfully muted.
embed = embeds.make_embed(
ctx=ctx,
title=f"Muting member: {member}",
thumbnail_url="https://i.imgur.com/rHtYWIt.png",
color="soft_red"
)
embed.description = f"{member.mention} was muted by {ctx.author.mention} for: {reason}"
embed.add_field(name="Duration:", value=duration_string, inline=False)
# Create the mute channel in the Staff category.
channel = await self.create_mute_channel(ctx=ctx, member=member, reason=reason, duration=duration_string)
# Attempt to DM the user to let them know they were muted.
if not await self.send_muted_dm_embed(ctx=ctx, member=member, channel=channel, reason=reason, duration=duration_string):
embed.add_field(name="Notice:", value=f"Unable to message {member.mention} about this action. This can be caused by the user not being in the server, having DMs disabled, or having the bot blocked.")
# Mutes the user and stores the unmute time in the database for the background task.
await self.mute_member(ctx=ctx, member=member, reason=reason, temporary=True, end_time=mute_end_time.timestamp())
await ctx.send(embed=embed)
@commands.bot_has_permissions(manage_roles=True, send_messages=True)
@commands.before_invoke(record_usage)
@cog_ext.cog_slash(
name="unmute",
description="Unmutes a member in the server",
guild_ids=[settings.get_value("guild_id")],
options=[
create_option(
name="member",
description="The member that will be unmuted",
option_type=6,
required=True
),
create_option(
name="reason",
description="The reason why the member is being unmuted",
option_type=3,
required=False
),
],
default_permission=False,
permissions={
settings.get_value("guild_id"): [
create_permission(settings.get_value("role_staff"), SlashCommandPermissionType.ROLE, True),
create_permission(settings.get_value("role_trial_mod"), SlashCommandPermissionType.ROLE, True)
]
}
)
async def unmute(self, ctx: SlashContext, member: discord.Member, reason: str = None):
""" Unmutes member in guild. """
await ctx.defer()
# If we received an int instead of a discord.Member, the user is not in the server.
if not isinstance(member, discord.Member):
await embeds.error_message(ctx=ctx, description=f"That user is not in the server.")
return
# Checks if invoker can action that member (self, bot, etc.)
if not await can_action_member(bot=self.bot, ctx=ctx, member=member):
await embeds.error_message(ctx=ctx, description=f"You cannot action {member.mention}.")
return
# Check if the user is not muted already.
if not await self.is_user_muted(ctx=ctx, member=member):
await embeds.error_message(ctx=ctx, description=f"{member.mention} is not muted.")
return
# Automatically default the reason string to N/A when the moderator does not provide a reason.
if not reason:
reason = "No reason provided."
# Discord caps embed fields at a ridiculously low character limit, avoids problems with future embeds.
elif len(reason) > 512:
await embeds.error_message(ctx=ctx, description="Reason must be less than 512 characters.")
return
# Start creating the embed that will be used to alert the moderator that the user was successfully unmuted.
embed = embeds.make_embed(
ctx=ctx,
title=f"Unmuting member: {member.name}",
color="soft_green",
thumbnail_url="https://i.imgur.com/W7DpUHC.png"
)
embed.description = f"{member.mention} was unmuted by {ctx.author.mention} for: {reason}"
# Unmutes the user and and archives the channel. Execution order is important here, otherwise the wrong unmuter will be used in the embed.
await self.unmute_member(ctx=ctx, member=member, reason=reason)
await self.archive_mute_channel(ctx=ctx, user_id=member.id, reason=reason)
# Attempt to DM the user to let them and the mods know they were unmuted.
if not await self.send_unmuted_dm_embed(ctx=ctx, member=member, reason=reason):
embed.add_field(name="Notice:", value=f"Unable to message {member.mention} about this action. This can be caused by the user not being in the server, having DMs disabled, or having the bot blocked.")
# If the mod sent the /unmute in the mute channel, this will cause a errors.NotFound 404.
# We cannot send the embed and then archive the channel because that will cause a error.AlreadyResponded.
try:
await ctx.send(embed=embed)
except discord.HTTPException:
pass
def setup(bot: Bot) -> None:
""" Load the Mute cog. """
bot.add_cog(MuteCog(bot))
log.info("Commands loaded: mutes")
| cogs/commands/moderation/mutes.py | 22,845 | Mute Cog
Load the Mute cog.
Enabling logs Open a connection to the database. Add the mute to the mod_log database. Occurs when the duration parameter in /mute is specified (tempmute). Commit the changes to the database. Removes "Muted" role from member. Open a connection to the database. Add the unmute to the mod_log database. Commit the changes to the database and close the connection. In case user has DMs blocked. Send member message telling them that they were unmuted and why. In case user has DMs blocked. Create a channel in the category specified in settings. Give both the staff and the user perms to access the channel. Create embed at the start of the channel letting the user know how long they're muted for and why. Embed mentions don't count as a ping so this is a workaround to that. Discord caps embed fields at a ridiculously low character limit, avoids problems with future embeds. Discord caps embed fields at a ridiculously low character limit, avoids problems with future embeds. Open a connection to the database. TODO: Get the mute reason by looking up the latest mute for the user and getting the reason column data. Gets the most recent mute for the user, sorted by descending (-) ID. Commit the changes to the database and close the connection. Get the member object of the ticket creator. Initialize the PrivateBin message log string. Initialize a list of moderator IDs as a set for no duplicates. Add the original muting moderator to avoid a blank embed field if no one interacts. Fetch the staff and trial mod role. TODO: Implement so it gets the channel when the moderator is the bot Loop through all messages in the ticket from old to new. Ignore the bot replies. Time format is unnecessarily lengthy so trimming it down and keep the log go easier on the eyes. Append the new messages to the current log as we loop. Iterates only through members that is still in the server. If the messenger has either staff role or trial mod role, add their ID to the mod_list set. Dump message log to PrivateBin. This returns a dictionary, but only the url is needed for the embed. Get the amount of time elapsed since the user was muted. Hours are the time delta in seconds divided by 3600. Minutes are the hour remainder divided by 60. The minutes remainder are the seconds. String that will store the duration in a more digestible format. If the time value is 0, skip it. If the time value is 1, make the time unit into singular form. Create the embed in mute-log. Send the embed to mute-log. Delete the mute channel. If we received an int instead of a discord.Member, the user is not in the server. Checks if invoker can action that member (self, bot, etc.) Check if the user is muted already. Automatically default the reason string to N/A when the moderator does not provide a reason. Discord caps embed fields at a ridiculously low character limit, avoids problems with future embeds. If the duration is not specified, default it to a permanent mute. Start creating the embed that will be used to alert the moderator that the user was successfully muted. Create the mute channel in the Staff category. Attempt to DM the user to let them know they were muted. Mutes the user and returns the embed letting the moderator know they were successfully muted. Get the duration string for embed and mute end time for the specified duration. If the duration string is empty due to Regex not matching anything, send and error embed and return. Start creating the embed that will be used to alert the moderator that the user was successfully muted. Create the mute channel in the Staff category. Attempt to DM the user to let them know they were muted. Mutes the user and stores the unmute time in the database for the background task. If we received an int instead of a discord.Member, the user is not in the server. Checks if invoker can action that member (self, bot, etc.) Check if the user is not muted already. Automatically default the reason string to N/A when the moderator does not provide a reason. Discord caps embed fields at a ridiculously low character limit, avoids problems with future embeds. Start creating the embed that will be used to alert the moderator that the user was successfully unmuted. Unmutes the user and and archives the channel. Execution order is important here, otherwise the wrong unmuter will be used in the embed. Attempt to DM the user to let them and the mods know they were unmuted. If the mod sent the /unmute in the mute channel, this will cause a errors.NotFound 404. We cannot send the embed and then archive the channel because that will cause a error.AlreadyResponded. | 4,639 | en | 0.918496 |
# -*- coding: utf-8 -*-
### 기본 라이브러리 불러오기
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
'''
[Step 1] 데이터 준비 - read_csv() 함수로 자동차 연비 데이터셋 가져오기
'''
# CSV 파일을 데이터프레임으로 변환
df = pd.read_csv('./auto-mpg.csv', header=None)
# 열 이름 지정
df.columns = ['mpg','cylinders','displacement','horsepower','weight',
'acceleration','model year','origin','name']
# 데이터 살펴보기
print(df.head())
print('\n')
# IPython 디스플레이 설정 - 출력할 열의 개수 한도 늘리기
pd.set_option('display.max_columns', 10)
print(df.head())
print('\n')
'''
[Step 2] 데이터 탐색
'''
# 데이터 자료형 확인
print(df.info())
print('\n')
# 데이터 통계 요약정보 확인
print(df.describe())
print('\n')
# horsepower 열의 자료형 변경 (문자열 ->숫자)
print(df['horsepower'].unique()) # horsepower 열의 고유값 확인
print('\n')
df['horsepower'].replace('?', np.nan, inplace=True) # '?'을 np.nan으로 변경
df.dropna(subset=['horsepower'], axis=0, inplace=True) # 누락데이터 행을 삭제
df['horsepower'] = df['horsepower'].astype('float') # 문자열을 실수형으로 변환
print(df.describe()) # 데이터 통계 요약정보 확인
print('\n')
'''
[Step 3] 속성(feature 또는 variable) 선택
'''
# 분석에 활용할 열(속성)을 선택 (연비, 실린더, 출력, 중량)
ndf = df[['mpg', 'cylinders', 'horsepower', 'weight']]
print(ndf.head())
print('\n')
### 종속 변수 Y인 "연비(mpg)"와 다른 변수 간의 선형관계를 그래프(산점도)로 확인
# Matplotlib으로 산점도 그리기
ndf.plot(kind='scatter', x='weight', y='mpg', c='coral', s=10, figsize=(10, 5))
plt.show()
plt.close()
# seaborn으로 산점도 그리기
fig = plt.figure(figsize=(10, 5))
ax1 = fig.add_subplot(1, 2, 1)
ax2 = fig.add_subplot(1, 2, 2)
sns.regplot(x='weight', y='mpg', data=ndf, ax=ax1) # 회귀선 표시
sns.regplot(x='weight', y='mpg', data=ndf, ax=ax2, fit_reg=False) #회귀선 미표시
plt.show()
plt.close()
# seaborn 조인트 그래프 - 산점도, 히스토그램
sns.jointplot(x='weight', y='mpg', data=ndf) # 회귀선 없음
sns.jointplot(x='weight', y='mpg', kind='reg', data=ndf) # 회귀선 표시
plt.show()
plt.close()
# seaborn pariplot으로 두 변수 간의 모든 경우의 수 그리기
sns.pairplot(ndf)
plt.show()
plt.close()
'''
Step 4: 데이터셋 구분 - 훈련용(train data)/ 검증용(test data)
'''
# 속성(변수) 선택
X=ndf[['weight']] #독립 변수 X
y=ndf['mpg'] #종속 변수 Y
# train data 와 test data로 구분(7:3 비율)
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, #독립 변수
y, #종속 변수
test_size=0.3, #검증 30%
random_state=10) #랜덤 추출 값
print('train data 개수: ', len(X_train))
print('test data 개수: ', len(X_test))
'''
Step 5: 단순회귀분석 모형 - sklearn 사용
'''
# sklearn 라이브러리에서 선형회귀분석 모듈 가져오기
from sklearn.linear_model import LinearRegression
# 단순회귀분석 모형 객체 생성
lr = LinearRegression()
# train data를 가지고 모형 학습
lr.fit(X_train, y_train)
# 학습을 마친 모형에 test data를 적용하여 결정계수(R-제곱) 계산
r_square = lr.score(X_test, y_test)
print(r_square)
print('\n')
# 회귀식의 기울기
print('기울기 a: ', lr.coef_)
print('\n')
# 회귀식의 y절편
print('y절편 b', lr.intercept_)
print('\n')
# 모형에 전체 X 데이터를 입력하여 예측한 값 y_hat을 실제 값 y와 비교
y_hat = lr.predict(X)
plt.figure(figsize=(10, 5))
ax1 = sns.distplot(y, hist=False, label="y")
ax2 = sns.distplot(y_hat, hist=False, label="y_hat", ax=ax1)
plt.show()
plt.close() | 7.1_simple_linear_regression.py | 4,174 | -*- coding: utf-8 -*- 기본 라이브러리 불러오기 CSV 파일을 데이터프레임으로 변환 열 이름 지정 데이터 살펴보기 IPython 디스플레이 설정 - 출력할 열의 개수 한도 늘리기 데이터 자료형 확인 데이터 통계 요약정보 확인 horsepower 열의 자료형 변경 (문자열 ->숫자) horsepower 열의 고유값 확인 '?'을 np.nan으로 변경 누락데이터 행을 삭제 문자열을 실수형으로 변환 데이터 통계 요약정보 확인 분석에 활용할 열(속성)을 선택 (연비, 실린더, 출력, 중량) 종속 변수 Y인 "연비(mpg)"와 다른 변수 간의 선형관계를 그래프(산점도)로 확인 Matplotlib으로 산점도 그리기 seaborn으로 산점도 그리기 회귀선 표시회귀선 미표시 seaborn 조인트 그래프 - 산점도, 히스토그램 회귀선 없음 회귀선 표시 seaborn pariplot으로 두 변수 간의 모든 경우의 수 그리기 속성(변수) 선택독립 변수 X종속 변수 Y train data 와 test data로 구분(7:3 비율)독립 변수 종속 변수검증 30%랜덤 추출 값 sklearn 라이브러리에서 선형회귀분석 모듈 가져오기 단순회귀분석 모형 객체 생성 train data를 가지고 모형 학습 학습을 마친 모형에 test data를 적용하여 결정계수(R-제곱) 계산 회귀식의 기울기 회귀식의 y절편 모형에 전체 X 데이터를 입력하여 예측한 값 y_hat을 실제 값 y와 비교 | 721 | ko | 1.000048 |
from datetime import datetime, timedelta as td
import json
import os
import re
from secrets import token_urlsafe
from urllib.parse import urlencode
from cron_descriptor import ExpressionDescriptor
from croniter import croniter
from django.conf import settings
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.core import signing
from django.core.exceptions import PermissionDenied
from django.db.models import Count
from django.http import (
Http404,
HttpResponse,
HttpResponseBadRequest,
HttpResponseForbidden,
JsonResponse,
)
from django.shortcuts import get_object_or_404, redirect, render
from django.template.loader import get_template, render_to_string
from django.urls import reverse
from django.utils import timezone
from django.views.decorators.csrf import csrf_exempt
from django.views.decorators.http import require_POST
from hc.accounts.models import Project, Member
from hc.api.models import (
DEFAULT_GRACE,
DEFAULT_TIMEOUT,
MAX_DELTA,
Channel,
Check,
Ping,
Notification,
)
from hc.api.transports import Telegram
from hc.front.decorators import require_setting
from hc.front import forms
from hc.front.schemas import telegram_callback
from hc.front.templatetags.hc_extras import (
num_down_title,
down_title,
sortchecks,
site_hostname,
site_scheme,
)
from hc.lib import jsonschema
from hc.lib.badges import get_badge_url
import pytz
from pytz.exceptions import UnknownTimeZoneError
import requests
VALID_SORT_VALUES = ("name", "-name", "last_ping", "-last_ping", "created")
STATUS_TEXT_TMPL = get_template("front/log_status_text.html")
LAST_PING_TMPL = get_template("front/last_ping_cell.html")
EVENTS_TMPL = get_template("front/details_events.html")
DOWNTIMES_TMPL = get_template("front/details_downtimes.html")
def _tags_statuses(checks):
tags, down, grace, num_down = {}, {}, {}, 0
for check in checks:
status = check.get_status()
if status == "down":
num_down += 1
for tag in check.tags_list():
down[tag] = "down"
elif status == "grace":
for tag in check.tags_list():
grace[tag] = "grace"
else:
for tag in check.tags_list():
tags[tag] = "up"
tags.update(grace)
tags.update(down)
return tags, num_down
def _get_check_for_user(request, code):
""" Return specified check if current user has access to it. """
assert request.user.is_authenticated
check = get_object_or_404(Check.objects.select_related("project"), code=code)
if request.user.is_superuser:
return check, True
if request.user.id == check.project.owner_id:
return check, True
membership = get_object_or_404(Member, project=check.project, user=request.user)
return check, membership.rw
def _get_rw_check_for_user(request, code):
check, rw = _get_check_for_user(request, code)
if not rw:
raise PermissionDenied
return check
def _get_channel_for_user(request, code):
""" Return specified channel if current user has access to it. """
assert request.user.is_authenticated
channel = get_object_or_404(Channel.objects.select_related("project"), code=code)
if request.user.is_superuser:
return channel, True
if request.user.id == channel.project.owner_id:
return channel, True
membership = get_object_or_404(Member, project=channel.project, user=request.user)
return channel, membership.rw
def _get_rw_channel_for_user(request, code):
channel, rw = _get_channel_for_user(request, code)
if not rw:
raise PermissionDenied
return channel
def _get_project_for_user(request, project_code):
""" Check access, return (project, rw) tuple. """
project = get_object_or_404(Project, code=project_code)
if request.user.is_superuser:
return project, True
if request.user.id == project.owner_id:
return project, True
membership = get_object_or_404(Member, project=project, user=request.user)
return project, membership.rw
def _get_rw_project_for_user(request, project_code):
""" Check access, return (project, rw) tuple. """
project, rw = _get_project_for_user(request, project_code)
if not rw:
raise PermissionDenied
return project
def _refresh_last_active_date(profile):
""" Update last_active_date if it is more than a day old. """
now = timezone.now()
if profile.last_active_date is None or (now - profile.last_active_date).days > 0:
profile.last_active_date = now
profile.save()
@login_required
def my_checks(request, code):
_refresh_last_active_date(request.profile)
project, rw = _get_project_for_user(request, code)
if request.GET.get("sort") in VALID_SORT_VALUES:
request.profile.sort = request.GET["sort"]
request.profile.save()
if request.session.get("last_project_id") != project.id:
request.session["last_project_id"] = project.id
q = Check.objects.filter(project=project)
checks = list(q.prefetch_related("channel_set"))
sortchecks(checks, request.profile.sort)
tags_statuses, num_down = _tags_statuses(checks)
pairs = list(tags_statuses.items())
pairs.sort(key=lambda pair: pair[0].lower())
channels = Channel.objects.filter(project=project)
channels = list(channels.order_by("created"))
hidden_checks = set()
# Hide checks that don't match selected tags:
selected_tags = set(request.GET.getlist("tag", []))
if selected_tags:
for check in checks:
if not selected_tags.issubset(check.tags_list()):
hidden_checks.add(check)
# Hide checks that don't match the search string:
search = request.GET.get("search", "")
if search:
for check in checks:
search_key = "%s\n%s" % (check.name.lower(), check.code)
if search not in search_key:
hidden_checks.add(check)
# Do we need to show the "Last Duration" header?
show_last_duration = False
for check in checks:
if check.clamped_last_duration():
show_last_duration = True
break
ctx = {
"page": "checks",
"rw": rw,
"checks": checks,
"channels": channels,
"num_down": num_down,
"tags": pairs,
"ping_endpoint": settings.PING_ENDPOINT,
"timezones": pytz.all_timezones,
"project": project,
"num_available": project.num_checks_available(),
"sort": request.profile.sort,
"selected_tags": selected_tags,
"search": search,
"hidden_checks": hidden_checks,
"show_last_duration": show_last_duration,
}
return render(request, "front/my_checks.html", ctx)
@login_required
def status(request, code):
_get_project_for_user(request, code)
checks = list(Check.objects.filter(project__code=code))
details = []
for check in checks:
ctx = {"check": check}
details.append(
{
"code": str(check.code),
"status": check.get_status(),
"last_ping": LAST_PING_TMPL.render(ctx),
"started": check.last_start is not None,
}
)
tags_statuses, num_down = _tags_statuses(checks)
return JsonResponse(
{"details": details, "tags": tags_statuses, "title": num_down_title(num_down)}
)
@login_required
@require_POST
def switch_channel(request, code, channel_code):
check = _get_rw_check_for_user(request, code)
channel = get_object_or_404(Channel, code=channel_code)
if channel.project_id != check.project_id:
return HttpResponseBadRequest()
if request.POST.get("state") == "on":
channel.checks.add(check)
else:
channel.checks.remove(check)
return HttpResponse()
def index(request):
if request.user.is_authenticated:
projects = list(request.profile.projects())
ctx = {
"page": "projects",
"projects": projects,
"last_project_id": request.session.get("last_project_id"),
}
return render(request, "front/projects.html", ctx)
check = Check()
ctx = {
"page": "welcome",
"check": check,
"ping_url": check.url(),
"enable_apprise": settings.APPRISE_ENABLED is True,
"enable_call": settings.TWILIO_AUTH is not None,
"enable_discord": settings.DISCORD_CLIENT_ID is not None,
"enable_linenotify": settings.LINENOTIFY_CLIENT_ID is not None,
"enable_matrix": settings.MATRIX_ACCESS_TOKEN is not None,
"enable_pdc": settings.PD_VENDOR_KEY is not None,
"enable_pushbullet": settings.PUSHBULLET_CLIENT_ID is not None,
"enable_pushover": settings.PUSHOVER_API_TOKEN is not None,
"enable_shell": settings.SHELL_ENABLED is True,
"enable_slack_btn": settings.SLACK_CLIENT_ID is not None,
"enable_sms": settings.TWILIO_AUTH is not None,
"enable_telegram": settings.TELEGRAM_TOKEN is not None,
"enable_trello": settings.TRELLO_APP_KEY is not None,
"enable_whatsapp": settings.TWILIO_USE_WHATSAPP,
"registration_open": settings.REGISTRATION_OPEN,
}
return render(request, "front/welcome.html", ctx)
def dashboard(request):
return render(request, "front/dashboard.html", {})
def serve_doc(request, doc="introduction"):
# Filenames in /templates/docs/ consist of lowercase letters and underscores,
# -- make sure we don't accept anything else
if not re.match(r"^[a-z_]+$", doc):
raise Http404("not found")
path = os.path.join(settings.BASE_DIR, "templates/docs", doc + ".html")
if not os.path.exists(path):
raise Http404("not found")
replaces = {
"{{ default_timeout }}": str(int(DEFAULT_TIMEOUT.total_seconds())),
"{{ default_grace }}": str(int(DEFAULT_GRACE.total_seconds())),
"SITE_NAME": settings.SITE_NAME,
"SITE_ROOT": settings.SITE_ROOT,
"SITE_HOSTNAME": site_hostname(),
"SITE_SCHEME": site_scheme(),
"PING_ENDPOINT": settings.PING_ENDPOINT,
"PING_URL": settings.PING_ENDPOINT + "your-uuid-here",
"IMG_URL": os.path.join(settings.STATIC_URL, "img/docs"),
}
content = open(path, "r", encoding="utf-8").read()
for placeholder, value in replaces.items():
content = content.replace(placeholder, value)
ctx = {
"page": "docs",
"section": doc,
"content": content,
"first_line": content.split("\n")[0],
}
return render(request, "front/docs_single.html", ctx)
def docs_cron(request):
return render(request, "front/docs_cron.html", {})
@require_POST
@login_required
def add_check(request, code):
project = _get_rw_project_for_user(request, code)
if project.num_checks_available() <= 0:
return HttpResponseBadRequest()
check = Check(project=project)
check.save()
check.assign_all_channels()
url = reverse("hc-details", args=[check.code])
return redirect(url + "?new")
@require_POST
@login_required
def update_name(request, code):
check = _get_rw_check_for_user(request, code)
form = forms.NameTagsForm(request.POST)
if form.is_valid():
check.name = form.cleaned_data["name"]
check.tags = form.cleaned_data["tags"]
check.desc = form.cleaned_data["desc"]
check.save()
if "/details/" in request.META.get("HTTP_REFERER", ""):
return redirect("hc-details", code)
return redirect("hc-checks", check.project.code)
@require_POST
@login_required
def filtering_rules(request, code):
check = _get_rw_check_for_user(request, code)
form = forms.FilteringRulesForm(request.POST)
if form.is_valid():
check.subject = form.cleaned_data["subject"]
check.subject_fail = form.cleaned_data["subject_fail"]
check.methods = form.cleaned_data["methods"]
check.manual_resume = form.cleaned_data["manual_resume"]
check.save()
return redirect("hc-details", code)
@require_POST
@login_required
def update_timeout(request, code):
check = _get_rw_check_for_user(request, code)
kind = request.POST.get("kind")
if kind == "simple":
form = forms.TimeoutForm(request.POST)
if not form.is_valid():
return HttpResponseBadRequest()
check.kind = "simple"
check.timeout = form.cleaned_data["timeout"]
check.grace = form.cleaned_data["grace"]
elif kind == "cron":
form = forms.CronForm(request.POST)
if not form.is_valid():
return HttpResponseBadRequest()
check.kind = "cron"
check.schedule = form.cleaned_data["schedule"]
check.tz = form.cleaned_data["tz"]
check.grace = td(minutes=form.cleaned_data["grace"])
check.alert_after = check.going_down_after()
if check.status == "up" and check.alert_after < timezone.now():
# Checks can flip from "up" to "down" state as a result of changing check's
# schedule. We don't want to send notifications when changing schedule
# interactively in the web UI. So we update the `alert_after` and `status`
# fields here the same way as `sendalerts` would do, but without sending
# an actual alert:
check.alert_after = None
check.status = "down"
check.save()
if "/details/" in request.META.get("HTTP_REFERER", ""):
return redirect("hc-details", code)
return redirect("hc-checks", check.project.code)
@require_POST
def cron_preview(request):
schedule = request.POST.get("schedule", "")
tz = request.POST.get("tz")
ctx = {"tz": tz, "dates": []}
try:
zone = pytz.timezone(tz)
now_local = timezone.localtime(timezone.now(), zone)
if len(schedule.split()) != 5:
raise ValueError()
it = croniter(schedule, now_local)
for i in range(0, 6):
ctx["dates"].append(it.get_next(datetime))
ctx["desc"] = str(ExpressionDescriptor(schedule, use_24hour_time_format=True))
except UnknownTimeZoneError:
ctx["bad_tz"] = True
except:
ctx["bad_schedule"] = True
return render(request, "front/cron_preview.html", ctx)
@login_required
def ping_details(request, code, n=None):
check, rw = _get_check_for_user(request, code)
q = Ping.objects.filter(owner=check)
if n:
q = q.filter(n=n)
try:
ping = q.latest("created")
except Ping.DoesNotExist:
return render(request, "front/ping_details_not_found.html")
ctx = {"check": check, "ping": ping}
return render(request, "front/ping_details.html", ctx)
@require_POST
@login_required
def pause(request, code):
check = _get_rw_check_for_user(request, code)
check.status = "paused"
check.last_start = None
check.alert_after = None
check.save()
# Don't redirect after an AJAX request:
if request.META.get("HTTP_X_REQUESTED_WITH") == "XMLHttpRequest":
return HttpResponse()
return redirect("hc-details", code)
@require_POST
@login_required
def resume(request, code):
check = _get_rw_check_for_user(request, code)
check.status = "new"
check.last_start = None
check.last_ping = None
check.alert_after = None
check.save()
return redirect("hc-details", code)
@require_POST
@login_required
def remove_check(request, code):
check = _get_rw_check_for_user(request, code)
project = check.project
check.delete()
return redirect("hc-checks", project.code)
def _get_events(check, limit):
pings = Ping.objects.filter(owner=check).order_by("-id")[:limit]
pings = list(pings)
prev = None
for ping in reversed(pings):
if ping.kind != "start" and prev and prev.kind == "start":
delta = ping.created - prev.created
if delta < MAX_DELTA:
setattr(ping, "delta", delta)
prev = ping
alerts = []
if len(pings):
cutoff = pings[-1].created
alerts = Notification.objects.select_related("channel").filter(
owner=check, check_status="down", created__gt=cutoff
)
events = pings + list(alerts)
events.sort(key=lambda el: el.created, reverse=True)
return events
@login_required
def log(request, code):
check, rw = _get_check_for_user(request, code)
limit = check.project.owner_profile.ping_log_limit
ctx = {
"project": check.project,
"check": check,
"events": _get_events(check, limit),
"limit": limit,
"show_limit_notice": check.n_pings > limit and settings.USE_PAYMENTS,
}
return render(request, "front/log.html", ctx)
@login_required
def details(request, code):
_refresh_last_active_date(request.profile)
check, rw = _get_check_for_user(request, code)
channels = Channel.objects.filter(project=check.project)
channels = list(channels.order_by("created"))
all_tags = set()
q = Check.objects.filter(project=check.project).exclude(tags="")
for tags in q.values_list("tags", flat=True):
all_tags.update(tags.split(" "))
ctx = {
"page": "details",
"project": check.project,
"check": check,
"rw": rw,
"channels": channels,
"enabled_channels": list(check.channel_set.all()),
"timezones": pytz.all_timezones,
"downtimes": check.downtimes(months=3),
"is_new": "new" in request.GET,
"is_copied": "copied" in request.GET,
"all_tags": " ".join(sorted(all_tags)),
}
return render(request, "front/details.html", ctx)
@login_required
def transfer(request, code):
check = _get_rw_check_for_user(request, code)
if request.method == "POST":
target_project = _get_rw_project_for_user(request, request.POST["project"])
if target_project.num_checks_available() <= 0:
return HttpResponseBadRequest()
check.project = target_project
check.save()
check.assign_all_channels()
messages.success(request, "Check transferred successfully!")
return redirect("hc-details", code)
ctx = {"check": check}
return render(request, "front/transfer_modal.html", ctx)
@require_POST
@login_required
def copy(request, code):
check = _get_rw_check_for_user(request, code)
if check.project.num_checks_available() <= 0:
return HttpResponseBadRequest()
new_name = check.name + " (copy)"
# Make sure we don't exceed the 100 character db field limit:
if len(new_name) > 100:
new_name = check.name[:90] + "... (copy)"
copied = Check(project=check.project)
copied.name = new_name
copied.desc, copied.tags = check.desc, check.tags
copied.subject, copied.subject_fail = check.subject, check.subject_fail
copied.methods = check.methods
copied.manual_resume = check.manual_resume
copied.kind = check.kind
copied.timeout, copied.grace = check.timeout, check.grace
copied.schedule, copied.tz = check.schedule, check.tz
copied.save()
copied.channel_set.add(*check.channel_set.all())
url = reverse("hc-details", args=[copied.code])
return redirect(url + "?copied")
@login_required
def status_single(request, code):
check, rw = _get_check_for_user(request, code)
status = check.get_status()
events = _get_events(check, 20)
updated = "1"
if len(events):
updated = str(events[0].created.timestamp())
doc = {
"status": status,
"status_text": STATUS_TEXT_TMPL.render({"check": check, "rw": rw}),
"title": down_title(check),
"updated": updated,
}
if updated != request.GET.get("u"):
doc["events"] = EVENTS_TMPL.render({"check": check, "events": events})
doc["downtimes"] = DOWNTIMES_TMPL.render({"downtimes": check.downtimes(3)})
return JsonResponse(doc)
@login_required
def badges(request, code):
project, rw = _get_project_for_user(request, code)
tags = set()
for check in Check.objects.filter(project=project):
tags.update(check.tags_list())
sorted_tags = sorted(tags, key=lambda s: s.lower())
sorted_tags.append("*") # For the "overall status" badge
key = project.badge_key
urls = []
for tag in sorted_tags:
urls.append(
{
"tag": tag,
"svg": get_badge_url(key, tag),
"svg3": get_badge_url(key, tag, with_late=True),
"json": get_badge_url(key, tag, fmt="json"),
"json3": get_badge_url(key, tag, fmt="json", with_late=True),
"shields": get_badge_url(key, tag, fmt="shields"),
"shields3": get_badge_url(key, tag, fmt="shields", with_late=True),
}
)
ctx = {
"have_tags": len(urls) > 1,
"page": "badges",
"project": project,
"badges": urls,
}
return render(request, "front/badges.html", ctx)
@login_required
def channels(request, code):
project, rw = _get_project_for_user(request, code)
if request.method == "POST":
if not rw:
return HttpResponseForbidden()
code = request.POST["channel"]
try:
channel = Channel.objects.get(code=code)
except Channel.DoesNotExist:
return HttpResponseBadRequest()
if channel.project_id != project.id:
return HttpResponseForbidden()
new_checks = []
for key in request.POST:
if key.startswith("check-"):
code = key[6:]
try:
check = Check.objects.get(code=code)
except Check.DoesNotExist:
return HttpResponseBadRequest()
if check.project_id != project.id:
return HttpResponseForbidden()
new_checks.append(check)
channel.checks.set(new_checks)
return redirect("hc-channels", project.code)
channels = Channel.objects.filter(project=project)
channels = channels.order_by("created")
channels = channels.annotate(n_checks=Count("checks"))
ctx = {
"page": "channels",
"rw": rw,
"project": project,
"profile": project.owner_profile,
"channels": channels,
"enable_apprise": settings.APPRISE_ENABLED is True,
"enable_call": settings.TWILIO_AUTH is not None,
"enable_discord": settings.DISCORD_CLIENT_ID is not None,
"enable_linenotify": settings.LINENOTIFY_CLIENT_ID is not None,
"enable_matrix": settings.MATRIX_ACCESS_TOKEN is not None,
"enable_pdc": settings.PD_VENDOR_KEY is not None,
"enable_pushbullet": settings.PUSHBULLET_CLIENT_ID is not None,
"enable_pushover": settings.PUSHOVER_API_TOKEN is not None,
"enable_shell": settings.SHELL_ENABLED is True,
"enable_slack_btn": settings.SLACK_CLIENT_ID is not None,
"enable_sms": settings.TWILIO_AUTH is not None,
"enable_telegram": settings.TELEGRAM_TOKEN is not None,
"enable_trello": settings.TRELLO_APP_KEY is not None,
"enable_whatsapp": settings.TWILIO_USE_WHATSAPP,
"use_payments": settings.USE_PAYMENTS,
}
return render(request, "front/channels.html", ctx)
@login_required
def channel_checks(request, code):
channel = _get_rw_channel_for_user(request, code)
assigned = set(channel.checks.values_list("code", flat=True).distinct())
checks = Check.objects.filter(project=channel.project).order_by("created")
ctx = {"checks": checks, "assigned": assigned, "channel": channel}
return render(request, "front/channel_checks.html", ctx)
@require_POST
@login_required
def update_channel_name(request, code):
channel = _get_rw_channel_for_user(request, code)
form = forms.ChannelNameForm(request.POST)
if form.is_valid():
channel.name = form.cleaned_data["name"]
channel.save()
return redirect("hc-channels", channel.project.code)
def verify_email(request, code, token):
channel = get_object_or_404(Channel, code=code)
if channel.make_token() == token:
channel.email_verified = True
channel.save()
return render(request, "front/verify_email_success.html")
return render(request, "bad_link.html")
@csrf_exempt
def unsubscribe_email(request, code, signed_token):
# Some email servers open links in emails to check for malicious content.
# To work around this, on GET requests we serve a confirmation form.
# If the signature is at least 5 minutes old, we also include JS code to
# auto-submit the form.
ctx = {}
if ":" in signed_token:
signer = signing.TimestampSigner(salt="alerts")
# First, check the signature without looking at the timestamp:
try:
token = signer.unsign(signed_token)
except signing.BadSignature:
return render(request, "bad_link.html")
# Check if timestamp is older than 5 minutes:
try:
signer.unsign(signed_token, max_age=300)
except signing.SignatureExpired:
ctx["autosubmit"] = True
else:
token = signed_token
channel = get_object_or_404(Channel, code=code, kind="email")
if channel.make_token() != token:
return render(request, "bad_link.html")
if request.method != "POST":
return render(request, "accounts/unsubscribe_submit.html", ctx)
channel.delete()
return render(request, "front/unsubscribe_success.html")
@require_POST
@login_required
def send_test_notification(request, code):
channel, rw = _get_channel_for_user(request, code)
dummy = Check(name="TEST", status="down")
dummy.last_ping = timezone.now() - td(days=1)
dummy.n_pings = 42
if channel.kind == "webhook" and not channel.url_down:
if channel.url_up:
# If we don't have url_down, but do have have url_up then
# send "TEST is UP" notification instead:
dummy.status = "up"
# Delete all older test notifications for this channel
Notification.objects.filter(channel=channel, owner=None).delete()
# Send the test notification
error = channel.notify(dummy, is_test=True)
if error:
messages.warning(request, "Could not send a test notification. %s" % error)
else:
messages.success(request, "Test notification sent!")
return redirect("hc-channels", channel.project.code)
@require_POST
@login_required
def remove_channel(request, code):
channel = _get_rw_channel_for_user(request, code)
project = channel.project
channel.delete()
return redirect("hc-channels", project.code)
@login_required
def add_email(request, code):
project = _get_rw_project_for_user(request, code)
if request.method == "POST":
form = forms.AddEmailForm(request.POST)
if form.is_valid():
channel = Channel(project=project, kind="email")
channel.value = json.dumps(
{
"value": form.cleaned_data["value"],
"up": form.cleaned_data["up"],
"down": form.cleaned_data["down"],
}
)
channel.save()
channel.assign_all_checks()
is_own_email = form.cleaned_data["value"] == request.user.email
if is_own_email or not settings.EMAIL_USE_VERIFICATION:
# If user is subscribing *their own* address
# we can skip the verification step.
# Additionally, in self-hosted setting, administator has the
# option to disable the email verification step altogether.
channel.email_verified = True
channel.save()
else:
channel.send_verify_link()
return redirect("hc-channels", project.code)
else:
form = forms.AddEmailForm()
ctx = {
"page": "channels",
"project": project,
"use_verification": settings.EMAIL_USE_VERIFICATION,
"form": form,
}
return render(request, "integrations/add_email.html", ctx)
@login_required
def add_webhook(request, code):
project = _get_rw_project_for_user(request, code)
if request.method == "POST":
form = forms.WebhookForm(request.POST)
if form.is_valid():
channel = Channel(project=project, kind="webhook")
channel.name = form.cleaned_data["name"]
channel.value = form.get_value()
channel.save()
channel.assign_all_checks()
return redirect("hc-channels", project.code)
else:
form = forms.WebhookForm()
ctx = {
"page": "channels",
"project": project,
"form": form,
}
return render(request, "integrations/webhook_form.html", ctx)
@login_required
def edit_webhook(request, code):
channel = _get_rw_channel_for_user(request, code)
if channel.kind != "webhook":
return HttpResponseBadRequest()
if request.method == "POST":
form = forms.WebhookForm(request.POST)
if form.is_valid():
channel.name = form.cleaned_data["name"]
channel.value = form.get_value()
channel.save()
return redirect("hc-channels", channel.project.code)
else:
def flatten(d):
return "\n".join("%s: %s" % pair for pair in d.items())
doc = json.loads(channel.value)
doc["headers_down"] = flatten(doc["headers_down"])
doc["headers_up"] = flatten(doc["headers_up"])
doc["name"] = channel.name
form = forms.WebhookForm(doc)
ctx = {
"page": "channels",
"project": channel.project,
"channel": channel,
"form": form,
}
return render(request, "integrations/webhook_form.html", ctx)
@require_setting("SHELL_ENABLED")
@login_required
def add_shell(request, code):
project = _get_rw_project_for_user(request, code)
if request.method == "POST":
form = forms.AddShellForm(request.POST)
if form.is_valid():
channel = Channel(project=project, kind="shell")
channel.value = form.get_value()
channel.save()
channel.assign_all_checks()
return redirect("hc-channels", project.code)
else:
form = forms.AddShellForm()
ctx = {
"page": "channels",
"project": project,
"form": form,
}
return render(request, "integrations/add_shell.html", ctx)
@login_required
def add_pd(request, code):
project = _get_rw_project_for_user(request, code)
if request.method == "POST":
form = forms.AddPdForm(request.POST)
if form.is_valid():
channel = Channel(project=project, kind="pd")
channel.value = form.cleaned_data["value"]
channel.save()
channel.assign_all_checks()
return redirect("hc-channels", project.code)
else:
form = forms.AddPdForm()
ctx = {"page": "channels", "form": form}
return render(request, "integrations/add_pd.html", ctx)
@require_setting("PD_VENDOR_KEY")
def pdc_help(request):
ctx = {"page": "channels"}
return render(request, "integrations/add_pdc.html", ctx)
@require_setting("PD_VENDOR_KEY")
@login_required
def add_pdc(request, code):
project = _get_rw_project_for_user(request, code)
state = token_urlsafe()
callback = settings.SITE_ROOT + reverse(
"hc-add-pdc-complete", args=[project.code, state]
)
connect_url = "https://connect.pagerduty.com/connect?" + urlencode(
{"vendor": settings.PD_VENDOR_KEY, "callback": callback}
)
ctx = {"page": "channels", "project": project, "connect_url": connect_url}
request.session["pd"] = state
return render(request, "integrations/add_pdc.html", ctx)
@require_setting("PD_VENDOR_KEY")
@login_required
def add_pdc_complete(request, code, state):
if "pd" not in request.session:
return HttpResponseBadRequest()
project = _get_rw_project_for_user(request, code)
session_state = request.session.pop("pd")
if session_state != state:
return HttpResponseBadRequest()
if request.GET.get("error") == "cancelled":
messages.warning(request, "PagerDuty setup was cancelled.")
return redirect("hc-channels", project.code)
channel = Channel(kind="pd", project=project)
channel.value = json.dumps(
{
"service_key": request.GET.get("service_key"),
"account": request.GET.get("account"),
}
)
channel.save()
channel.assign_all_checks()
messages.success(request, "The PagerDuty integration has been added!")
return redirect("hc-channels", project.code)
@login_required
def add_pagertree(request, code):
project = _get_rw_project_for_user(request, code)
if request.method == "POST":
form = forms.AddUrlForm(request.POST)
if form.is_valid():
channel = Channel(project=project, kind="pagertree")
channel.value = form.cleaned_data["value"]
channel.save()
channel.assign_all_checks()
return redirect("hc-channels", project.code)
else:
form = forms.AddUrlForm()
ctx = {"page": "channels", "project": project, "form": form}
return render(request, "integrations/add_pagertree.html", ctx)
@login_required
def add_slack(request, code):
project = _get_rw_project_for_user(request, code)
if request.method == "POST":
form = forms.AddUrlForm(request.POST)
if form.is_valid():
channel = Channel(project=project, kind="slack")
channel.value = form.cleaned_data["value"]
channel.save()
channel.assign_all_checks()
return redirect("hc-channels", project.code)
else:
form = forms.AddUrlForm()
ctx = {
"page": "channels",
"form": form,
}
return render(request, "integrations/add_slack.html", ctx)
@require_setting("SLACK_CLIENT_ID")
def slack_help(request):
ctx = {"page": "channels"}
return render(request, "integrations/add_slack_btn.html", ctx)
@require_setting("SLACK_CLIENT_ID")
@login_required
def add_slack_btn(request, code):
project = _get_rw_project_for_user(request, code)
state = token_urlsafe()
authorize_url = "https://slack.com/oauth/v2/authorize?" + urlencode(
{
"scope": "incoming-webhook",
"client_id": settings.SLACK_CLIENT_ID,
"state": state,
}
)
ctx = {
"project": project,
"page": "channels",
"authorize_url": authorize_url,
}
request.session["add_slack"] = (state, str(project.code))
return render(request, "integrations/add_slack_btn.html", ctx)
@require_setting("SLACK_CLIENT_ID")
@login_required
def add_slack_complete(request):
if "add_slack" not in request.session:
return HttpResponseForbidden()
state, code = request.session.pop("add_slack")
project = _get_rw_project_for_user(request, code)
if request.GET.get("error") == "access_denied":
messages.warning(request, "Slack setup was cancelled.")
return redirect("hc-channels", project.code)
if request.GET.get("state") != state:
return HttpResponseForbidden()
result = requests.post(
"https://slack.com/api/oauth.v2.access",
{
"client_id": settings.SLACK_CLIENT_ID,
"client_secret": settings.SLACK_CLIENT_SECRET,
"code": request.GET.get("code"),
},
)
doc = result.json()
if doc.get("ok"):
channel = Channel(kind="slack", project=project)
channel.value = result.text
channel.save()
channel.assign_all_checks()
messages.success(request, "The Slack integration has been added!")
else:
s = doc.get("error")
messages.warning(request, "Error message from slack: %s" % s)
return redirect("hc-channels", project.code)
@login_required
def add_mattermost(request, code):
project = _get_rw_project_for_user(request, code)
if request.method == "POST":
form = forms.AddUrlForm(request.POST)
if form.is_valid():
channel = Channel(project=project, kind="mattermost")
channel.value = form.cleaned_data["value"]
channel.save()
channel.assign_all_checks()
return redirect("hc-channels", project.code)
else:
form = forms.AddUrlForm()
ctx = {"page": "channels", "form": form, "project": project}
return render(request, "integrations/add_mattermost.html", ctx)
@require_setting("PUSHBULLET_CLIENT_ID")
@login_required
def add_pushbullet(request, code):
project = _get_rw_project_for_user(request, code)
state = token_urlsafe()
authorize_url = "https://www.pushbullet.com/authorize?" + urlencode(
{
"client_id": settings.PUSHBULLET_CLIENT_ID,
"redirect_uri": settings.SITE_ROOT + reverse(add_pushbullet_complete),
"response_type": "code",
"state": state,
}
)
ctx = {
"page": "channels",
"project": project,
"authorize_url": authorize_url,
}
request.session["add_pushbullet"] = (state, str(project.code))
return render(request, "integrations/add_pushbullet.html", ctx)
@require_setting("PUSHBULLET_CLIENT_ID")
@login_required
def add_pushbullet_complete(request):
if "add_pushbullet" not in request.session:
return HttpResponseForbidden()
state, code = request.session.pop("add_pushbullet")
project = _get_rw_project_for_user(request, code)
if request.GET.get("error") == "access_denied":
messages.warning(request, "Pushbullet setup was cancelled.")
return redirect("hc-channels", project.code)
if request.GET.get("state") != state:
return HttpResponseForbidden()
result = requests.post(
"https://api.pushbullet.com/oauth2/token",
{
"client_id": settings.PUSHBULLET_CLIENT_ID,
"client_secret": settings.PUSHBULLET_CLIENT_SECRET,
"code": request.GET.get("code"),
"grant_type": "authorization_code",
},
)
doc = result.json()
if "access_token" in doc:
channel = Channel(kind="pushbullet", project=project)
channel.value = doc["access_token"]
channel.save()
channel.assign_all_checks()
messages.success(request, "The Pushbullet integration has been added!")
else:
messages.warning(request, "Something went wrong")
return redirect("hc-channels", project.code)
@require_setting("DISCORD_CLIENT_ID")
@login_required
def add_discord(request, code):
project = _get_rw_project_for_user(request, code)
state = token_urlsafe()
auth_url = "https://discordapp.com/api/oauth2/authorize?" + urlencode(
{
"client_id": settings.DISCORD_CLIENT_ID,
"scope": "webhook.incoming",
"redirect_uri": settings.SITE_ROOT + reverse(add_discord_complete),
"response_type": "code",
"state": state,
}
)
ctx = {"page": "channels", "project": project, "authorize_url": auth_url}
request.session["add_discord"] = (state, str(project.code))
return render(request, "integrations/add_discord.html", ctx)
@require_setting("DISCORD_CLIENT_ID")
@login_required
def add_discord_complete(request):
if "add_discord" not in request.session:
return HttpResponseForbidden()
state, code = request.session.pop("add_discord")
project = _get_rw_project_for_user(request, code)
if request.GET.get("error") == "access_denied":
messages.warning(request, "Discord setup was cancelled.")
return redirect("hc-channels", project.code)
if request.GET.get("state") != state:
return HttpResponseForbidden()
result = requests.post(
"https://discordapp.com/api/oauth2/token",
{
"client_id": settings.DISCORD_CLIENT_ID,
"client_secret": settings.DISCORD_CLIENT_SECRET,
"code": request.GET.get("code"),
"grant_type": "authorization_code",
"redirect_uri": settings.SITE_ROOT + reverse(add_discord_complete),
},
)
doc = result.json()
if "access_token" in doc:
channel = Channel(kind="discord", project=project)
channel.value = result.text
channel.save()
channel.assign_all_checks()
messages.success(request, "The Discord integration has been added!")
else:
messages.warning(request, "Something went wrong.")
return redirect("hc-channels", project.code)
@require_setting("PUSHOVER_API_TOKEN")
def pushover_help(request):
ctx = {"page": "channels"}
return render(request, "integrations/add_pushover_help.html", ctx)
@require_setting("PUSHOVER_API_TOKEN")
@login_required
def add_pushover(request, code):
project = _get_rw_project_for_user(request, code)
if request.method == "POST":
state = token_urlsafe()
failure_url = settings.SITE_ROOT + reverse("hc-channels", args=[project.code])
success_url = (
settings.SITE_ROOT
+ reverse("hc-add-pushover", args=[project.code])
+ "?"
+ urlencode(
{
"state": state,
"prio": request.POST.get("po_priority", "0"),
"prio_up": request.POST.get("po_priority_up", "0"),
}
)
)
subscription_url = (
settings.PUSHOVER_SUBSCRIPTION_URL
+ "?"
+ urlencode({"success": success_url, "failure": failure_url})
)
request.session["pushover"] = state
return redirect(subscription_url)
# Handle successful subscriptions
if "pushover_user_key" in request.GET:
if "pushover" not in request.session:
return HttpResponseForbidden()
state = request.session.pop("pushover")
if request.GET.get("state") != state:
return HttpResponseForbidden()
if request.GET.get("pushover_unsubscribed") == "1":
# Unsubscription: delete all Pushover channels for this project
Channel.objects.filter(project=project, kind="po").delete()
return redirect("hc-channels", project.code)
form = forms.AddPushoverForm(request.GET)
if not form.is_valid():
return HttpResponseBadRequest()
channel = Channel(project=project, kind="po")
channel.value = form.get_value()
channel.save()
channel.assign_all_checks()
messages.success(request, "The Pushover integration has been added!")
return redirect("hc-channels", project.code)
# Show Integration Settings form
ctx = {
"page": "channels",
"project": project,
"po_retry_delay": td(seconds=settings.PUSHOVER_EMERGENCY_RETRY_DELAY),
"po_expiration": td(seconds=settings.PUSHOVER_EMERGENCY_EXPIRATION),
}
return render(request, "integrations/add_pushover.html", ctx)
@login_required
def add_opsgenie(request, code):
project = _get_rw_project_for_user(request, code)
if request.method == "POST":
form = forms.AddOpsGenieForm(request.POST)
if form.is_valid():
channel = Channel(project=project, kind="opsgenie")
v = {"region": form.cleaned_data["region"], "key": form.cleaned_data["key"]}
channel.value = json.dumps(v)
channel.save()
channel.assign_all_checks()
return redirect("hc-channels", project.code)
else:
form = forms.AddOpsGenieForm()
ctx = {"page": "channels", "project": project, "form": form}
return render(request, "integrations/add_opsgenie.html", ctx)
@login_required
def add_victorops(request, code):
project = _get_rw_project_for_user(request, code)
if request.method == "POST":
form = forms.AddUrlForm(request.POST)
if form.is_valid():
channel = Channel(project=project, kind="victorops")
channel.value = form.cleaned_data["value"]
channel.save()
channel.assign_all_checks()
return redirect("hc-channels", project.code)
else:
form = forms.AddUrlForm()
ctx = {"page": "channels", "project": project, "form": form}
return render(request, "integrations/add_victorops.html", ctx)
@login_required
def add_zulip(request, code):
project = _get_rw_project_for_user(request, code)
if request.method == "POST":
form = forms.AddZulipForm(request.POST)
if form.is_valid():
channel = Channel(project=project, kind="zulip")
channel.value = form.get_value()
channel.save()
channel.assign_all_checks()
return redirect("hc-channels", project.code)
else:
form = forms.AddZulipForm()
ctx = {"page": "channels", "project": project, "form": form}
return render(request, "integrations/add_zulip.html", ctx)
@csrf_exempt
@require_POST
def telegram_bot(request):
try:
doc = json.loads(request.body.decode())
jsonschema.validate(doc, telegram_callback)
except ValueError:
return HttpResponseBadRequest()
except jsonschema.ValidationError:
# We don't recognize the message format, but don't want Telegram
# retrying this over and over again, so respond with 200 OK
return HttpResponse()
if "/start" not in doc["message"]["text"]:
return HttpResponse()
chat = doc["message"]["chat"]
name = max(chat.get("title", ""), chat.get("username", ""))
invite = render_to_string(
"integrations/telegram_invite.html",
{"qs": signing.dumps((chat["id"], chat["type"], name))},
)
Telegram.send(chat["id"], invite)
return HttpResponse()
@require_setting("TELEGRAM_TOKEN")
def telegram_help(request):
ctx = {
"page": "channels",
"bot_name": settings.TELEGRAM_BOT_NAME,
}
return render(request, "integrations/add_telegram.html", ctx)
@require_setting("TELEGRAM_TOKEN")
@login_required
def add_telegram(request):
chat_id, chat_type, chat_name = None, None, None
qs = request.META["QUERY_STRING"]
if qs:
try:
chat_id, chat_type, chat_name = signing.loads(qs, max_age=600)
except signing.BadSignature:
return render(request, "bad_link.html")
if request.method == "POST":
project = _get_rw_project_for_user(request, request.POST.get("project"))
channel = Channel(project=project, kind="telegram")
channel.value = json.dumps(
{"id": chat_id, "type": chat_type, "name": chat_name}
)
channel.save()
channel.assign_all_checks()
messages.success(request, "The Telegram integration has been added!")
return redirect("hc-channels", project.code)
ctx = {
"page": "channels",
"projects": request.profile.projects(),
"chat_id": chat_id,
"chat_type": chat_type,
"chat_name": chat_name,
"bot_name": settings.TELEGRAM_BOT_NAME,
}
return render(request, "integrations/add_telegram.html", ctx)
@require_setting("TWILIO_AUTH")
@login_required
def add_sms(request, code):
project = _get_rw_project_for_user(request, code)
if request.method == "POST":
form = forms.AddSmsForm(request.POST)
if form.is_valid():
channel = Channel(project=project, kind="sms")
channel.name = form.cleaned_data["label"]
channel.value = json.dumps({"value": form.cleaned_data["value"]})
channel.save()
channel.assign_all_checks()
return redirect("hc-channels", project.code)
else:
form = forms.AddSmsForm()
ctx = {
"page": "channels",
"project": project,
"form": form,
"profile": project.owner_profile,
}
return render(request, "integrations/add_sms.html", ctx)
@require_setting("TWILIO_AUTH")
@login_required
def add_call(request, code):
project = _get_rw_project_for_user(request, code)
if request.method == "POST":
form = forms.AddSmsForm(request.POST)
if form.is_valid():
channel = Channel(project=project, kind="call")
channel.name = form.cleaned_data["label"]
channel.value = json.dumps({"value": form.cleaned_data["value"]})
channel.save()
channel.assign_all_checks()
return redirect("hc-channels", project.code)
else:
form = forms.AddSmsForm()
ctx = {
"page": "channels",
"project": project,
"form": form,
"profile": project.owner_profile,
}
return render(request, "integrations/add_call.html", ctx)
@require_setting("TWILIO_USE_WHATSAPP")
@login_required
def add_whatsapp(request, code):
project = _get_rw_project_for_user(request, code)
if request.method == "POST":
form = forms.AddSmsForm(request.POST)
if form.is_valid():
channel = Channel(project=project, kind="whatsapp")
channel.name = form.cleaned_data["label"]
channel.value = json.dumps(
{
"value": form.cleaned_data["value"],
"up": form.cleaned_data["up"],
"down": form.cleaned_data["down"],
}
)
channel.save()
channel.assign_all_checks()
return redirect("hc-channels", project.code)
else:
form = forms.AddSmsForm()
ctx = {
"page": "channels",
"project": project,
"form": form,
"profile": project.owner_profile,
}
return render(request, "integrations/add_whatsapp.html", ctx)
@require_setting("TRELLO_APP_KEY")
@login_required
def add_trello(request, code):
project = _get_rw_project_for_user(request, code)
if request.method == "POST":
channel = Channel(project=project, kind="trello")
channel.value = request.POST["settings"]
channel.save()
channel.assign_all_checks()
return redirect("hc-channels", project.code)
return_url = settings.SITE_ROOT + reverse("hc-add-trello", args=[project.code])
authorize_url = "https://trello.com/1/authorize?" + urlencode(
{
"expiration": "never",
"name": settings.SITE_NAME,
"scope": "read,write",
"response_type": "token",
"key": settings.TRELLO_APP_KEY,
"return_url": return_url,
}
)
ctx = {
"page": "channels",
"project": project,
"authorize_url": authorize_url,
}
return render(request, "integrations/add_trello.html", ctx)
@require_setting("MATRIX_ACCESS_TOKEN")
@login_required
def add_matrix(request, code):
project = _get_rw_project_for_user(request, code)
if request.method == "POST":
form = forms.AddMatrixForm(request.POST)
if form.is_valid():
channel = Channel(project=project, kind="matrix")
channel.value = form.cleaned_data["room_id"]
# If user supplied room alias instead of ID, use it as channel name
alias = form.cleaned_data["alias"]
if not alias.startswith("!"):
channel.name = alias
channel.save()
channel.assign_all_checks()
messages.success(request, "The Matrix integration has been added!")
return redirect("hc-channels", project.code)
else:
form = forms.AddMatrixForm()
ctx = {
"page": "channels",
"project": project,
"form": form,
"matrix_user_id": settings.MATRIX_USER_ID,
}
return render(request, "integrations/add_matrix.html", ctx)
@require_setting("APPRISE_ENABLED")
@login_required
def add_apprise(request, code):
project = _get_rw_project_for_user(request, code)
if request.method == "POST":
form = forms.AddAppriseForm(request.POST)
if form.is_valid():
channel = Channel(project=project, kind="apprise")
channel.value = form.cleaned_data["url"]
channel.save()
channel.assign_all_checks()
messages.success(request, "The Apprise integration has been added!")
return redirect("hc-channels", project.code)
else:
form = forms.AddAppriseForm()
ctx = {"page": "channels", "project": project, "form": form}
return render(request, "integrations/add_apprise.html", ctx)
@require_setting("TRELLO_APP_KEY")
@login_required
@require_POST
def trello_settings(request):
token = request.POST.get("token")
url = "https://api.trello.com/1/members/me/boards?" + urlencode(
{
"key": settings.TRELLO_APP_KEY,
"token": token,
"fields": "id,name",
"lists": "open",
"list_fields": "id,name",
}
)
r = requests.get(url)
ctx = {"token": token, "data": r.json()}
return render(request, "integrations/trello_settings.html", ctx)
@login_required
def add_msteams(request, code):
project = _get_rw_project_for_user(request, code)
if request.method == "POST":
form = forms.AddUrlForm(request.POST)
if form.is_valid():
channel = Channel(project=project, kind="msteams")
channel.value = form.cleaned_data["value"]
channel.save()
channel.assign_all_checks()
return redirect("hc-channels", project.code)
else:
form = forms.AddUrlForm()
ctx = {"page": "channels", "project": project, "form": form}
return render(request, "integrations/add_msteams.html", ctx)
@login_required
def add_prometheus(request, code):
project, rw = _get_project_for_user(request, code)
ctx = {"page": "channels", "project": project}
return render(request, "integrations/add_prometheus.html", ctx)
def metrics(request, code, key):
if len(key) != 32:
return HttpResponseBadRequest()
q = Project.objects.filter(code=code, api_key_readonly=key)
try:
project = q.get()
except Project.DoesNotExist:
return HttpResponseForbidden()
checks = Check.objects.filter(project_id=project.id).order_by("id")
def esc(s):
return s.replace("\\", "\\\\").replace('"', '\\"').replace("\n", "\\n")
def output(checks):
yield "# HELP hc_check_up Whether the check is currently up (1 for yes, 0 for no).\n"
yield "# TYPE hc_check_up gauge\n"
TMPL = """hc_check_up{name="%s", tags="%s", unique_key="%s"} %d\n"""
for check in checks:
value = 0 if check.get_status() == "down" else 1
yield TMPL % (esc(check.name), esc(check.tags), check.unique_key, value)
tags_statuses, num_down = _tags_statuses(checks)
yield "\n"
yield "# HELP hc_tag_up Whether all checks with this tag are up (1 for yes, 0 for no).\n"
yield "# TYPE hc_tag_up gauge\n"
TMPL = """hc_tag_up{tag="%s"} %d\n"""
for tag in sorted(tags_statuses):
value = 0 if tags_statuses[tag] == "down" else 1
yield TMPL % (esc(tag), value)
yield "\n"
yield "# HELP hc_checks_total The total number of checks.\n"
yield "# TYPE hc_checks_total gauge\n"
yield "hc_checks_total %d\n" % len(checks)
yield "\n"
yield "# HELP hc_checks_down_total The number of checks currently down.\n"
yield "# TYPE hc_checks_down_total gauge\n"
yield "hc_checks_down_total %d\n" % num_down
return HttpResponse(output(checks), content_type="text/plain")
@login_required
def add_spike(request, code):
project = _get_rw_project_for_user(request, code)
if request.method == "POST":
form = forms.AddUrlForm(request.POST)
if form.is_valid():
channel = Channel(project=project, kind="spike")
channel.value = form.cleaned_data["value"]
channel.save()
channel.assign_all_checks()
return redirect("hc-channels", project.code)
else:
form = forms.AddUrlForm()
ctx = {"page": "channels", "project": project, "form": form}
return render(request, "integrations/add_spike.html", ctx)
@require_setting("LINENOTIFY_CLIENT_ID")
@login_required
def add_linenotify(request, code):
project = _get_rw_project_for_user(request, code)
state = token_urlsafe()
authorize_url = " https://notify-bot.line.me/oauth/authorize?" + urlencode(
{
"client_id": settings.LINENOTIFY_CLIENT_ID,
"redirect_uri": settings.SITE_ROOT + reverse(add_linenotify_complete),
"response_type": "code",
"state": state,
"scope": "notify",
}
)
ctx = {
"page": "channels",
"project": project,
"authorize_url": authorize_url,
}
request.session["add_linenotify"] = (state, str(project.code))
return render(request, "integrations/add_linenotify.html", ctx)
@require_setting("LINENOTIFY_CLIENT_ID")
@login_required
def add_linenotify_complete(request):
if "add_linenotify" not in request.session:
return HttpResponseForbidden()
state, code = request.session.pop("add_linenotify")
if request.GET.get("state") != state:
return HttpResponseForbidden()
project = _get_rw_project_for_user(request, code)
if request.GET.get("error") == "access_denied":
messages.warning(request, "LINE Notify setup was cancelled.")
return redirect("hc-channels", project.code)
# Exchange code for access token
result = requests.post(
"https://notify-bot.line.me/oauth/token",
{
"grant_type": "authorization_code",
"code": request.GET.get("code"),
"redirect_uri": settings.SITE_ROOT + reverse(add_linenotify_complete),
"client_id": settings.LINENOTIFY_CLIENT_ID,
"client_secret": settings.LINENOTIFY_CLIENT_SECRET,
},
)
doc = result.json()
if doc.get("status") != 200:
messages.warning(request, "Something went wrong.")
return redirect("hc-channels", project.code)
# Fetch notification target's name, will use it as channel name:
token = doc["access_token"]
result = requests.get(
"https://notify-api.line.me/api/status",
headers={"Authorization": "Bearer %s" % token},
)
doc = result.json()
channel = Channel(kind="linenotify", project=project)
channel.name = doc.get("target")
channel.value = token
channel.save()
channel.assign_all_checks()
messages.success(request, "The LINE Notify integration has been added!")
return redirect("hc-channels", project.code)
# Forks: add custom views after this line
def stats(request):
return render(request, "front/stats.html", {})
| hc/front/views.py | 59,311 | Return specified channel if current user has access to it.
Return specified check if current user has access to it.
Check access, return (project, rw) tuple.
Check access, return (project, rw) tuple.
Update last_active_date if it is more than a day old.
Hide checks that don't match selected tags: Hide checks that don't match the search string: Do we need to show the "Last Duration" header? Filenames in /templates/docs/ consist of lowercase letters and underscores, -- make sure we don't accept anything else Checks can flip from "up" to "down" state as a result of changing check's schedule. We don't want to send notifications when changing schedule interactively in the web UI. So we update the `alert_after` and `status` fields here the same way as `sendalerts` would do, but without sending an actual alert: Don't redirect after an AJAX request: Make sure we don't exceed the 100 character db field limit: For the "overall status" badge Some email servers open links in emails to check for malicious content. To work around this, on GET requests we serve a confirmation form. If the signature is at least 5 minutes old, we also include JS code to auto-submit the form. First, check the signature without looking at the timestamp: Check if timestamp is older than 5 minutes: If we don't have url_down, but do have have url_up then send "TEST is UP" notification instead: Delete all older test notifications for this channel Send the test notification If user is subscribing *their own* address we can skip the verification step. Additionally, in self-hosted setting, administator has the option to disable the email verification step altogether. Handle successful subscriptions Unsubscription: delete all Pushover channels for this project Show Integration Settings form We don't recognize the message format, but don't want Telegram retrying this over and over again, so respond with 200 OK If user supplied room alias instead of ID, use it as channel name Exchange code for access token Fetch notification target's name, will use it as channel name: Forks: add custom views after this line | 2,106 | en | 0.90366 |
# Copyright 2020 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Union
import numpy as np
import torch
from .utils import get_mask_edges, get_surface_distance
def compute_average_surface_distance(
seg_pred: Union[np.ndarray, torch.Tensor],
seg_gt: Union[np.ndarray, torch.Tensor],
label_idx: int,
symmetric: bool = False,
distance_metric: str = "euclidean",
):
"""
This function is used to compute the Average Surface Distance from `seg_pred` to `seg_gt`
under the default setting.
In addition, if sets ``symmetric = True``, the average symmetric surface distance between
these two inputs will be returned.
Args:
seg_pred: first binary or labelfield image.
seg_gt: second binary or labelfield image.
label_idx: for labelfield images, convert to binary with
`seg_pred = seg_pred == label_idx`.
symmetric: if calculate the symmetric average surface distance between
`seg_pred` and `seg_gt`. Defaults to ``False``.
distance_metric: : [``"euclidean"``, ``"chessboard"``, ``"taxicab"``]
the metric used to compute surface distance. Defaults to ``"euclidean"``.
"""
(edges_pred, edges_gt) = get_mask_edges(seg_pred, seg_gt, label_idx)
surface_distance = get_surface_distance(edges_pred, edges_gt, label_idx, distance_metric=distance_metric)
if surface_distance.shape == (0,):
return np.inf
avg_surface_distance = surface_distance.mean()
if not symmetric:
return avg_surface_distance
surface_distance_2 = get_surface_distance(edges_gt, edges_pred, label_idx, distance_metric=distance_metric)
if surface_distance_2.shape == (0,):
return np.inf
avg_surface_distance_2 = surface_distance_2.mean()
return np.mean((avg_surface_distance, avg_surface_distance_2))
| monai/metrics/surface_distance.py | 2,371 | This function is used to compute the Average Surface Distance from `seg_pred` to `seg_gt`
under the default setting.
In addition, if sets ``symmetric = True``, the average symmetric surface distance between
these two inputs will be returned.
Args:
seg_pred: first binary or labelfield image.
seg_gt: second binary or labelfield image.
label_idx: for labelfield images, convert to binary with
`seg_pred = seg_pred == label_idx`.
symmetric: if calculate the symmetric average surface distance between
`seg_pred` and `seg_gt`. Defaults to ``False``.
distance_metric: : [``"euclidean"``, ``"chessboard"``, ``"taxicab"``]
the metric used to compute surface distance. Defaults to ``"euclidean"``.
Copyright 2020 MONAI Consortium Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. | 1,291 | en | 0.781085 |
"""
This version considers task's datasets have equal number of labeled samples
"""
import os
import json
from collections import defaultdict
import numpy as np
from tensorboardX import SummaryWriter
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from torch.autograd import grad as torch_grad
import torch.optim as optim
from torch.utils.data import DataLoader
import torchvision
from torchvision import transforms
import util
from util import in_feature_size
import alpha_opt
import data_loading as db
from torch.optim import lr_scheduler
class MTL_pairwise(object):
def __init__(self, ft_extrctor_prp, hypoth_prp, discrm_prp, **kwargs):
final_results = defaultdict()
# ######################### argument definition ###############
self.criterion = kwargs ['criterion']
self.c3_value = kwargs['c3']
self.grad_weight = kwargs['grad_weight']
self.img_size = kwargs['img_size']
self.num_chnnl = kwargs['chnnl']
self.lr = kwargs['lr']
self.momentum = kwargs['momentum']
self.epochs = kwargs['epochs']
num_tr_smpl = kwargs['tr_smpl']
num_test_smpl = kwargs['test_smpl']
self.trial = kwargs['Trials']
self.tsklist = kwargs['tsk_list']
self.num_tsk = len(self.tsklist)
if self.criterion=='wasserstien': self.stp_sz_sch = 30
else: self.stp_sz_sch = 50
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
self.alpha = np.ones((self.num_tsk, self.num_tsk)) * (0.1 / (self.num_tsk - 1))
np.fill_diagonal(self.alpha, 0.9)
self.wrdir = os.path.join(os.getcwd(), '_'.join( self.tsklist)+'_'+str(num_tr_smpl)+'_'+ str(self.epochs)+'_'+self.criterion, 'runs_'+str(self.c3_value))
try:
os.makedirs(self.wrdir)
except OSError:
if not os.path.isdir(self.wrdir):
raise
with open(os.path.join(self.wrdir, 'info_itr_'+str(self.trial)+'.json'), 'a') as outfile:
json.dump([ft_extrctor_prp,hypoth_prp,discrm_prp], outfile)
json.dump(kwargs, outfile)
# Constructing F -> H and F -> D
self.FE = util.feature_extractor(ft_extrctor_prp).construct().to(self.device)
print (self.FE)
self.hypothesis = [util.classifier(hypoth_prp).to(self.device) for _ in range(self.num_tsk)]
print (self.hypothesis[0])
self.discrm = {'{}{}'.format(i, j): util.classifier(discrm_prp).to(self.device)for i in range(self.num_tsk) for
j in range(i + 1, self.num_tsk)}
print (self.discrm['01'])
all_parameters_h = sum([list(h.parameters()) for h in self.hypothesis], [])
all_parameters_discrm = sum([list(self.discrm[d].parameters()) for d in self.discrm], [])
self.optimizer = optim.SGD(list(self.FE.parameters()) + list(all_parameters_h) + list(all_parameters_discrm),
lr=self.lr,
momentum=self.momentum)
self.scheduler = lr_scheduler.StepLR(self.optimizer, step_size=self.stp_sz_sch, gamma=0.5)
train_loader, test_loader, validation_loader = db.data_loading(self.img_size, num_tr_smpl,num_test_smpl, self.tsklist )
self.writer = SummaryWriter(os.path.join(self.wrdir, 'itr'+str(self.trial)))
Total_loss = []
for epoch in range(self.epochs):
self.scheduler.step(epoch)
whole_loss = self.model_fit(train_loader, epoch)
Total_loss.append(whole_loss)
tasks_trAcc = self.model_eval(train_loader, epoch, 'train')
tasks_valAcc = self.model_eval(validation_loader, epoch, 'validation')
tasks_teAcc = self.model_eval(test_loader, epoch, 'test')
# if np.abs(np.mean(Total_loss[-5:-1]) - Total_loss[-1]) < 0.002 :
# print('Stop learning, reach to a stable point at epoch {:d} with total loss {:.4f}'.format(epoch,
# Total_loss[-1]))
# break
if 1.5*np.mean(Total_loss[-5:-1]) < Total_loss[-1]:
print ('****** Increasing of training error')
break
final_results['alpha_c3_'+str(self.c3_value)] = (self.alpha).tolist()
final_results['Tasks_val_Acc_c3_'+str(self.c3_value)] = (tasks_valAcc).tolist()
final_results['Tasks_test_Acc_c3_' + str(self.c3_value) ] = (tasks_teAcc).tolist()
final_results['Tasks_train_Acc_c3_'+str(self.c3_value)] = (tasks_trAcc).tolist()
with open(os.path.join(self.wrdir, 'info_itr_'+str(self.trial)+'.json'), 'a') as outfile:
json.dump(final_results, outfile)
final_prmtr = defaultdict()
final_prmtr['FE'] = self.FE.state_dict()
for i,h in enumerate(self.hypothesis):
final_prmtr['hypo'+str(i)] = h.state_dict()
for k, D in self.discrm.items():
final_prmtr['dicrm'+k] = D.state_dict()
torch.save(final_prmtr, os.path.join(self.wrdir, 'itr'+str(self.trial),'MTL_parameters.pt'))
self.writer.close()
def model_fit(self, data_loader, epoch):
discrm_distnc_mtrx = np.zeros((self.num_tsk, self.num_tsk))
loss_mtrx_hypo_vlue = np.zeros((self.num_tsk, self.num_tsk))
weigh_loss_hypo_vlue, correct_hypo = np.zeros(self.num_tsk), np.zeros(self.num_tsk)
Total_loss = 0
n_batch = 0
# set train mode
self.FE.train()
for t in range(self.num_tsk):
self.hypothesis[t].train()
for j in range(t + 1, self.num_tsk):
self.discrm['{}{}'.format(t, j)].train()
# #####
for tasks_batch in zip(*data_loader):
Loss_1, Loss_2 = 0, 0
n_batch += 1
# data = (x,y)
inputs = torch.cat([batch[0] for batch in tasks_batch])
btch_sz = len(tasks_batch[0][0])
targets = torch.cat([batch[1] for batch in tasks_batch])
# inputs = (x1,...,xT) targets = (y1,...,yT)
inputs = inputs.to(self.device)
targets = targets.to(self.device)
features = self.FE(inputs)
features = features.view(features.size(0), -1)
for t in range(self.num_tsk):
w = torch.tensor([np.tile(self.alpha[t, i], reps=len(data[0])) for i, data in enumerate(tasks_batch)],
dtype=torch.float).view(-1)
w = w.to(self.device)
label_prob = self.hypothesis[t](features)
pred = label_prob[t * (btch_sz):(t + 1) * btch_sz].argmax(dim=1, keepdim=True)
correct_hypo[t] += (
(pred.eq(targets[t * btch_sz:(t + 1) * btch_sz].view_as(pred)).sum().item()) / btch_sz)
hypo_loss = torch.mean(w * F.cross_entropy(label_prob, targets, reduction='none'))
# definition of loss to be optimized
Loss_1 += hypo_loss
weigh_loss_hypo_vlue[t] += hypo_loss.item()
loss_mtrx_hypo_vlue[t, :] += [F.cross_entropy(label_prob[j * (btch_sz):(j + 1) * btch_sz, :],
targets[j * (btch_sz):(j + 1) * btch_sz],
reduction='mean').item() for j in range(self.num_tsk)]
for k in range(t + 1, self.num_tsk):
# w = (alpha_{tk}+alpha_{kt}) assumption: matrix alpha is not symmetric
alpha_domain = torch.tensor(self.alpha[t, k] + self.alpha[k, t], dtype=torch.float)
alpha_domain = alpha_domain.to(self.device)
if self.criterion =='h_divergence':
domain_y = torch.cat([torch.ones(len(tasks_batch[t][0]), dtype=torch.float),
torch.zeros(len(tasks_batch[k][0]), dtype=torch.float)])
# domain_x = torch.cat([tasks_batch[t-1][0], tasks_batch[k-1][0] ])
domain_y = domain_y.to(self.device)
domain_features = torch.cat([features[t * btch_sz:(t + 1) * btch_sz], features[k * btch_sz:(k + 1) * btch_sz]])
domain_features = domain_features.view(domain_features.size(0), -1)
domain_pred = self.discrm['{}{}'.format(t, k)](domain_features).squeeze()
disc_loss = F.binary_cross_entropy(domain_pred, domain_y)
# discriminator accuracy defines H-divergence
domain_lbl = domain_pred >= 0.5
domain_lbl = domain_lbl.type(torch.cuda.FloatTensor)
discrm_distnc_mtrx[t, k] += (domain_lbl.eq(domain_y).sum().item()) / len(domain_y)
discrm_distnc_mtrx[k, t] = discrm_distnc_mtrx[t, k]
print(discrm_distnc_mtrx[t, :])
elif self.criterion =='wasserstien':
features_t = features[t * btch_sz:(t + 1) * btch_sz]
features_t = features_t.view(features_t.size(0), -1)
features_k = features[k * btch_sz:(k + 1) * btch_sz]
features_k = features_k.view(features_k.size(0), -1)
pred_k = self.discrm['{}{}'.format(t, k)](features_k).squeeze()
pred_t = self.discrm['{}{}'.format(t, k)](features_t).squeeze()
gradient_pntly=self.gradient_penalty(inputs[t * btch_sz:(t + 1) * btch_sz],inputs[k * btch_sz:(k + 1) * btch_sz], t, k)
# critic loss ---> E(f(x)) - E(f(y)) + gamma* ||grad(f(x+y/2))-1||
disc_loss = (pred_t.mean() - pred_k.mean() ) + self.grad_weight *gradient_pntly
# negative sign compute wasserstien distance
discrm_distnc_mtrx[t, k] += -(pred_t.mean() - pred_k.mean()).item()
discrm_distnc_mtrx[k, t] = discrm_distnc_mtrx[t, k]
disc_loss = alpha_domain * disc_loss
Loss_2 += disc_loss
if n_batch % 500 == 0:
grid_img = torchvision.utils.make_grid(inputs, nrow=5, padding=30)
self.writer.add_image('result Image', grid_img)
Loss = torch.mean(Loss_1) + Loss_2 * (1 / self.num_tsk)
Total_loss += Loss.item()
# loss formula for all tasks regarding the current batch
self.optimizer.zero_grad()
Loss.backward()
self.optimizer.step()
discrm_distnc_mtrx /= n_batch
weigh_loss_hypo_vlue /= n_batch
loss_mtrx_hypo_vlue /= n_batch
correct_hypo /= n_batch
Total_loss /= n_batch
print('================== epoch {:d} ========'.format(epoch))
print('Final Total Loss {:.3f}'.format(Total_loss ))
print('discriminator distance based on '+self.criterion +'\n'+ str(discrm_distnc_mtrx))
print(' hypothesis loss \n' + str(loss_mtrx_hypo_vlue))
print(' hypothesis accuracy \n' + str(correct_hypo * 100))
print('coefficient:',self.alpha)
self.writer.add_scalars('MTL_total_loss', {'MTL_total_loss': Total_loss}, epoch)
for t in range(self.num_tsk):
# self.writer.add_scalars('task_' + str(t) + '/loss', {'loss_train': loss_mtrx_hypo_vlue[t, t]}, epoch)
for j in range(self.num_tsk):
if j != t:
self.writer.add_scalars('task_' + str(t) + '/Discrm_distance',
{'loss_D' + '_'.join([self.tsklist[t],self.tsklist[j]]): discrm_distnc_mtrx[t, j]}, epoch)
self.writer.add_scalars('task_' + str(t) + '/alpha',
{'alpha' + '_'.join([self.tsklist[t],self.tsklist[j]]): self.alpha[t, j]}, epoch)
if epoch % 1 == 0:
c_2, c_3 = 1 * np.ones(self.num_tsk), self.c3_value * np.ones(self.num_tsk)
self.alpha = alpha_opt.min_alphacvx(self.alpha.T, c_2, c_3, loss_mtrx_hypo_vlue.T, discrm_distnc_mtrx.T)
self.alpha = self.alpha.T
return Total_loss
def model_eval(self, data_loader, epoch, phase='test'):
loss_hypo_vlue = np.zeros(self.num_tsk)
correct_hypo = np.zeros(self.num_tsk)
self.FE.eval()
for t in range(self.num_tsk):
n_batch_t = 0
self.hypothesis[t].eval()
for j in range(t + 1, self.num_tsk):
self.discrm['{}{}'.format(t, j)].eval()
for inputs, targets in (data_loader[t]):
n_batch_t += 1
inputs = inputs.to(self.device)
targets = targets.to(self.device)
features = self.FE(inputs)
features = features.view(features.size(0), -1)
label_prob = self.hypothesis[t](features)
pred = label_prob.argmax(dim=1, keepdim=True) # get the index of the max log-probability
correct_hypo[t] += ((pred.eq(targets.view_as(pred)).sum().item()) / len(pred))
loss_hypo_vlue[t] += F.cross_entropy(label_prob, targets, reduction='mean').item()
if n_batch_t % 100 == 0:
grid_img = torchvision.utils.make_grid(inputs, nrow=5, padding=30)
self.writer.add_image('result Image_' + phase, grid_img)
loss_hypo_vlue[t] /= n_batch_t
correct_hypo[t] /= n_batch_t
self.writer.add_scalars('task_' + str(t) + '/loss', {'loss_' + phase: loss_hypo_vlue[t]}, epoch)
self.writer.add_scalars('task_' + str(t) + '/Acc', {'Acc_' + phase: correct_hypo[t]}, epoch)
print('\t === hypothesiz **' + phase + '** loss \n' + str(loss_hypo_vlue))
print('\t === hypothesiz **' + phase + '** accuracy \n' + str(correct_hypo * 100))
return correct_hypo
def gradient_penalty(self, data_t, data_k, t, k):
batch_size = data_k.size()[0]
# Calculate interpolation
theta = torch.rand(batch_size, 1, 1,1)
theta = theta.expand_as(data_t)
theta = theta.to(self.device)
interpolated = theta * data_t + (1 - theta) * data_k
# computing gradient w.r.t interplated sample
interpolated = Variable(interpolated, requires_grad=True)
interpolated = interpolated.to(self.device)
features_intrpltd = self.FE(interpolated)
features_intrpltd = features_intrpltd.view(features_intrpltd.size(0), -1)
# Calculate probability of interpolated examples
prob_interpolated = self.discrm['{}{}'.format(t, k)](features_intrpltd).squeeze()
# Calculate gradients of probabilities with respect to examples
gradients = torch_grad(outputs=prob_interpolated, inputs=interpolated,
grad_outputs=torch.ones(
prob_interpolated.size()).to(self.device),
create_graph=True, retain_graph=True)[0]
# Gradients have shape (batch_size, num_channels, img_width, img_height),
# so flatten to easily take norm per example in batch
gradients = gradients.view(batch_size, -1)
# Derivatives of the gradient close to 0 can cause problems because of
# the square root, so manually calculate norm and add epsilon
gradients_norm = torch.sqrt(torch.sum(gradients ** 2, dim=1) + 1e-12)
# Return gradient penalty
return ((gradients_norm - 1) ** 2).mean()
def main():
""""options for criterion is wasserstien, h_divergence"""
# criterion = ['wasserstien', 'h_divergence']
itertn = 1
# for c3_value in [0.5, 0.2, 1]:
c3_value = 0.5
for trial in range(1):
args = {'img_size': 28,
'chnnl': 1,
'lr': 0.01,
'momentum': 0.9,
'epochs': 1,
'tr_smpl': 1000,
'test_smpl': 10000,
'tsk_list': ['mnist', 'svhn', 'm_mnist'],
'grad_weight': 1,
'Trials': trial,
#'criterion': 'h_divergence',
'criterion': 'wasserstien',
'c3':c3_value}
ft_extrctor_prp = {'layer1': {'conv': [1, 32, 5, 1, 2], 'elu': [], 'maxpool': [3, 2, 0]},
'layer2': {'conv': [32, 64, 5, 1, 2], 'elu': [], 'maxpool': [3, 2, 0]}}
hypoth_prp = {
'layer3': {'fc': [util.in_feature_size(ft_extrctor_prp, args['img_size']), 128], 'act_fn': 'elu'},
'layer4': {'fc': [128, 10], 'act_fn': 'softmax'}}
discrm_prp = {'reverse_gradient': {},
'layer3': {'fc': [util.in_feature_size(ft_extrctor_prp, args['img_size']), 128],
'act_fn': 'elu'},
'layer4': {'fc': [128, 1], 'act_fn': 'sigm'}}
mtl = MTL_pairwise(ft_extrctor_prp, hypoth_prp, discrm_prp, **args)
del mtl
if __name__ == '__main__':
main()
| MTL.py | 17,359 | "options for criterion is wasserstien, h_divergence
This version considers task's datasets have equal number of labeled samples
argument definition Constructing F -> H and F -> D if np.abs(np.mean(Total_loss[-5:-1]) - Total_loss[-1]) < 0.002 : print('Stop learning, reach to a stable point at epoch {:d} with total loss {:.4f}'.format(epoch, Total_loss[-1])) break set train mode data = (x,y) inputs = (x1,...,xT) targets = (y1,...,yT) definition of loss to be optimized w = (alpha_{tk}+alpha_{kt}) assumption: matrix alpha is not symmetric domain_x = torch.cat([tasks_batch[t-1][0], tasks_batch[k-1][0] ]) discriminator accuracy defines H-divergence critic loss ---> E(f(x)) - E(f(y)) + gamma* ||grad(f(x+y/2))-1|| negative sign compute wasserstien distance loss formula for all tasks regarding the current batch self.writer.add_scalars('task_' + str(t) + '/loss', {'loss_train': loss_mtrx_hypo_vlue[t, t]}, epoch) get the index of the max log-probability Calculate interpolation computing gradient w.r.t interplated sample Calculate probability of interpolated examples Calculate gradients of probabilities with respect to examples Gradients have shape (batch_size, num_channels, img_width, img_height), so flatten to easily take norm per example in batch Derivatives of the gradient close to 0 can cause problems because of the square root, so manually calculate norm and add epsilon Return gradient penalty criterion = ['wasserstien', 'h_divergence'] for c3_value in [0.5, 0.2, 1]:'criterion': 'h_divergence', | 1,616 | en | 0.664247 |
from django.test import TestCase
class PollsViewsTestCase(TestCase):
fixtures = ['polls_views_testdata.json']
def test_index(self):
resp = self.client.get('/polls/')
self.assertEqual(resp.status_code, 200)
self.assertTrue('latest_poll_list' in resp.context)
self.assertEqual([poll.pk for poll in resp.context['latest_poll_list']], [1])
def test_choices(self):
resp = self.client.get('/polls/')
self.assertEqual(resp.status_code, 200)
self.assertTrue('latest_poll_list' in resp.context)
self.assertEqual([poll.pk for poll in resp.context['latest_poll_list']], [1])
poll_1 = resp.context['latest_poll_list'][0]
self.assertEqual(poll_1.question, 'Are you learning about testing in Django?')
self.assertEqual(poll_1.choice_set.count(), 2)
choices = poll_1.choice_set.all()
self.assertEqual(choices[0].choice, 'Yes')
self.assertEqual(choices[0].votes, 1)
self.assertEqual(choices[1].choice, 'No')
self.assertEqual(choices[1].votes, 0)
def test_detail(self):
resp = self.client.get('/polls/1/')
self.assertEqual(resp.status_code, 200)
self.assertEqual(resp.context['polls'].pk, 1)
self.assertEqual(resp.context['polls'].question, 'Are you learning about testing in Django?')
# Ensure that non-existent polls throw a 404.
resp = self.client.get('/polls/2/')
self.assertEqual(resp.status_code, 404)
| tdd/polls/tests/test_fixtures.py | 1,497 | Ensure that non-existent polls throw a 404. | 43 | en | 0.586318 |
"""
Support for a local MQTT broker.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/mqtt/#use-the-embedded-broker
"""
import logging
import tempfile
from homeassistant.core import callback
from homeassistant.components.mqtt import PROTOCOL_311
from homeassistant.const import EVENT_HOMEASSISTANT_STOP
from homeassistant.util.async import run_coroutine_threadsafe
REQUIREMENTS = ['hbmqtt==0.8']
DEPENDENCIES = ['http']
def start(hass, server_config):
"""Initialize MQTT Server."""
from hbmqtt.broker import Broker, BrokerException
try:
passwd = tempfile.NamedTemporaryFile()
if server_config is None:
server_config, client_config = generate_config(hass, passwd)
else:
client_config = None
broker = Broker(server_config, hass.loop)
run_coroutine_threadsafe(broker.start(), hass.loop).result()
except BrokerException:
logging.getLogger(__name__).exception('Error initializing MQTT server')
return False, None
finally:
passwd.close()
@callback
def shutdown_mqtt_server(event):
"""Shut down the MQTT server."""
hass.async_add_job(broker.shutdown())
hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, shutdown_mqtt_server)
return True, client_config
def generate_config(hass, passwd):
"""Generate a configuration based on current Home Assistant instance."""
config = {
'listeners': {
'default': {
'max-connections': 50000,
'bind': '0.0.0.0:1883',
'type': 'tcp',
},
'ws-1': {
'bind': '0.0.0.0:8080',
'type': 'ws',
},
},
'auth': {
'allow-anonymous': hass.config.api.api_password is None
},
'plugins': ['auth_anonymous'],
}
if hass.config.api.api_password:
username = 'homeassistant'
password = hass.config.api.api_password
# Encrypt with what hbmqtt uses to verify
from passlib.apps import custom_app_context
passwd.write(
'homeassistant:{}\n'.format(
custom_app_context.encrypt(
hass.config.api.api_password)).encode('utf-8'))
passwd.flush()
config['auth']['password-file'] = passwd.name
config['plugins'].append('auth_file')
else:
username = None
password = None
client_config = ('localhost', 1883, username, password, None, PROTOCOL_311)
return config, client_config
| homeassistant/components/mqtt/server.py | 2,619 | Encrypt with what hbmqtt uses to verify | 39 | en | 0.898887 |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.test_util."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import threading
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
from google.protobuf import text_format
from tensorflow.core.framework import graph_pb2
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.platform import googletest
from tensorflow.python.ops import constant_op
from tensorflow.python.ops import logging_ops
class TestUtilTest(test_util.TensorFlowTestCase):
def test_assert_ops_in_graph(self):
with self.test_session():
constant_op.constant(["hello", "taffy"], name="hello")
test_util.assert_ops_in_graph({"hello": "Const"}, ops.get_default_graph())
self.assertRaises(
ValueError, test_util.assert_ops_in_graph, {"bye": "Const"},
ops.get_default_graph())
self.assertRaises(
ValueError, test_util.assert_ops_in_graph, {"hello": "Variable"},
ops.get_default_graph())
def test_assert_equal_graph_def(self):
with tf.Graph().as_default() as g:
def_empty = g.as_graph_def()
tf.constant(5, name="five")
tf.constant(7, name="seven")
def_57 = g.as_graph_def()
with tf.Graph().as_default() as g:
tf.constant(7, name="seven")
tf.constant(5, name="five")
def_75 = g.as_graph_def()
# Comparing strings is order dependent
self.assertNotEqual(str(def_57), str(def_75))
# assert_equal_graph_def doesn't care about order
tf.test.assert_equal_graph_def(def_57, def_75)
# Compare two unequal graphs
with self.assertRaisesRegexp(AssertionError,
r"^Found unexpected node 'seven"):
tf.test.assert_equal_graph_def(def_57, def_empty)
def testIsGoogleCudaEnabled(self):
# The test doesn't assert anything. It ensures the py wrapper
# function is generated correctly.
if test_util.IsGoogleCudaEnabled():
print("GoogleCuda is enabled")
else:
print("GoogleCuda is disabled")
def testAssertProtoEqualsStr(self):
graph_str = "node { name: 'w1' op: 'params' }"
graph_def = graph_pb2.GraphDef()
text_format.Merge(graph_str, graph_def)
# test string based comparison
self.assertProtoEquals(graph_str, graph_def)
# test original comparison
self.assertProtoEquals(graph_def, graph_def)
def testNDArrayNear(self):
a1 = np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
a2 = np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
a3 = np.array([[10.0, 20.0, 30.0], [40.0, 50.0, 60.0]])
self.assertTrue(self._NDArrayNear(a1, a2, 1e-5))
self.assertFalse(self._NDArrayNear(a1, a3, 1e-5))
def testCheckedThreadSucceeds(self):
def noop(ev):
ev.set()
event_arg = threading.Event()
self.assertFalse(event_arg.is_set())
t = self.checkedThread(target=noop, args=(event_arg,))
t.start()
t.join()
self.assertTrue(event_arg.is_set())
def testCheckedThreadFails(self):
def err_func():
return 1 // 0
t = self.checkedThread(target=err_func)
t.start()
with self.assertRaises(self.failureException) as fe:
t.join()
self.assertTrue("integer division or modulo by zero" in str(fe.exception))
def testCheckedThreadWithWrongAssertionFails(self):
x = 37
def err_func():
self.assertTrue(x < 10)
t = self.checkedThread(target=err_func)
t.start()
with self.assertRaises(self.failureException) as fe:
t.join()
self.assertTrue("False is not true" in str(fe.exception))
def testMultipleThreadsWithOneFailure(self):
def err_func(i):
self.assertTrue(i != 7)
threads = [self.checkedThread(target=err_func, args=(i,))
for i in range(10)]
for t in threads:
t.start()
for i, t in enumerate(threads):
if i == 7:
with self.assertRaises(self.failureException):
t.join()
else:
t.join()
def _WeMustGoDeeper(self, msg):
with self.assertRaisesOpError(msg):
node_def = ops._NodeDef("op_type", "name")
node_def_orig = ops._NodeDef("op_type_orig", "orig")
op_orig = ops.Operation(node_def_orig, ops.get_default_graph())
op = ops.Operation(node_def, ops.get_default_graph(), original_op=op_orig)
raise errors.UnauthenticatedError(node_def, op, "true_err")
def testAssertRaisesOpErrorDoesNotPassMessageDueToLeakedStack(self):
with self.assertRaises(AssertionError):
self._WeMustGoDeeper("this_is_not_the_error_you_are_looking_for")
self._WeMustGoDeeper("true_err")
self._WeMustGoDeeper("name")
self._WeMustGoDeeper("orig")
def testAllCloseScalars(self):
self.assertAllClose(7, 7 + 1e-8)
with self.assertRaisesRegexp(AssertionError, r"Not equal to tolerance"):
self.assertAllClose(7, 8)
def testArrayNear(self):
a = [1, 2]
b = [1, 2, 5]
with self.assertRaises(AssertionError):
self.assertArrayNear(a, b, 0.001)
a = [1, 2]
b = [[1, 2], [3, 4]]
with self.assertRaises(TypeError):
self.assertArrayNear(a, b, 0.001)
a = [1, 2]
b = [1, 2]
self.assertArrayNear(a, b, 0.001)
def testForceGPU(self):
with self.assertRaisesRegexp(errors.InvalidArgumentError,
"Cannot assign a device to node"):
with self.test_session(force_gpu=True):
# this relies on us not having a GPU implementation for assert, which
# seems sensible
x = [True]
y = [15]
logging_ops.Assert(x, y).run()
if __name__ == "__main__":
googletest.main()
| tensorflow/python/framework/test_util_test.py | 6,431 | Tests for tensorflow.ops.test_util.
Copyright 2015 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ============================================================================== pylint: disable=redefined-builtin Comparing strings is order dependent assert_equal_graph_def doesn't care about order Compare two unequal graphs The test doesn't assert anything. It ensures the py wrapper function is generated correctly. test string based comparison test original comparison this relies on us not having a GPU implementation for assert, which seems sensible | 1,074 | en | 0.850603 |
from baselines.deepq import models # noqa F401
from baselines.deepq.deepq_learner import DEEPQ # noqa F401
from baselines.deepq.deepq import learn # noqa F401
from baselines.deepq.replay_buffer import ReplayBuffer, PrioritizedReplayBuffer # noqa F401
def wrap_atari_dqn(env):
from baselines.common.atari_wrappers import wrap_deepmind
return wrap_deepmind(env, frame_stack=True, scale=False)
| baselines/deepq/__init__.py | 404 | noqa F401 noqa F401 noqa F401 noqa F401 | 39 | uz | 0.217007 |
# Generated by Django 3.1.4 on 2020-12-07 19:08
from django.conf import settings
import django.contrib.auth.models
import django.contrib.auth.validators
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0012_alter_user_first_name_max_length'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('username', models.CharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=150, unique=True, validators=[django.contrib.auth.validators.UnicodeUsernameValidator()], verbose_name='username')),
('first_name', models.CharField(blank=True, max_length=150, verbose_name='first name')),
('last_name', models.CharField(blank=True, max_length=150, verbose_name='last name')),
('email', models.EmailField(blank=True, max_length=254, verbose_name='email address')),
('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status')),
('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active')),
('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'verbose_name': 'user',
'verbose_name_plural': 'users',
'abstract': False,
},
managers=[
('objects', django.contrib.auth.models.UserManager()),
],
),
migrations.CreateModel(
name='Agent',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Lead',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('first_name', models.CharField(max_length=20)),
('last_name', models.CharField(max_length=20)),
('age', models.IntegerField(default=0)),
('agent', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='leads.agent')),
],
),
]
| leads/migrations/0001_initial.py | 3,792 | Generated by Django 3.1.4 on 2020-12-07 19:08 | 45 | en | 0.712127 |
# Generated from decafJavier.g4 by ANTLR 4.9.2
# encoding: utf-8
from antlr4 import *
from io import StringIO
import sys
if sys.version_info[1] > 5:
from typing import TextIO
else:
from typing.io import TextIO
def serializedATN():
with StringIO() as buf:
buf.write("\3\u608b\ua72a\u8133\ub9ed\u417c\u3be7\u7786\u5964\3;")
buf.write("\u0120\4\2\t\2\4\3\t\3\4\4\t\4\4\5\t\5\4\6\t\6\4\7\t\7")
buf.write("\4\b\t\b\4\t\t\t\4\n\t\n\4\13\t\13\4\f\t\f\4\r\t\r\4\16")
buf.write("\t\16\4\17\t\17\4\20\t\20\4\21\t\21\4\22\t\22\4\23\t\23")
buf.write("\4\24\t\24\4\25\t\25\4\26\t\26\4\27\t\27\4\30\t\30\4\31")
buf.write("\t\31\4\32\t\32\3\2\3\2\3\2\3\2\7\29\n\2\f\2\16\2<\13")
buf.write("\2\3\2\7\2?\n\2\f\2\16\2B\13\2\3\2\3\2\3\3\3\3\3\3\3\3")
buf.write("\3\3\3\3\3\3\7\3M\n\3\f\3\16\3P\13\3\3\3\3\3\3\4\3\4\3")
buf.write("\4\3\4\7\4X\n\4\f\4\16\4[\13\4\3\4\3\4\3\5\3\5\3\5\3\5")
buf.write("\3\5\3\6\3\6\5\6f\n\6\3\7\3\7\3\b\3\b\3\b\3\b\3\b\3\b")
buf.write("\3\b\3\b\3\b\3\b\7\bt\n\b\f\b\16\bw\13\b\5\by\n\b\3\b")
buf.write("\3\b\3\b\3\t\3\t\5\t\u0080\n\t\3\n\3\n\7\n\u0084\n\n\f")
buf.write("\n\16\n\u0087\13\n\3\n\7\n\u008a\n\n\f\n\16\n\u008d\13")
buf.write("\n\3\n\3\n\3\13\3\13\3\13\3\13\3\13\3\13\3\13\3\13\3\13")
buf.write("\3\13\3\13\3\13\3\13\3\13\3\13\3\13\3\13\5\13\u00a2\n")
buf.write("\13\3\13\3\13\3\13\3\13\3\13\3\13\3\13\3\13\3\13\3\13")
buf.write("\3\13\3\13\3\13\5\13\u00b1\n\13\3\13\3\13\3\13\3\13\5")
buf.write("\13\u00b7\n\13\3\13\5\13\u00ba\n\13\3\13\3\13\3\13\3\13")
buf.write("\5\13\u00c0\n\13\3\f\3\f\3\f\3\f\3\f\7\f\u00c7\n\f\f\f")
buf.write("\16\f\u00ca\13\f\5\f\u00cc\n\f\3\f\3\f\3\r\3\r\3\r\3\r")
buf.write("\3\r\3\r\3\r\3\r\3\r\3\r\3\r\7\r\u00db\n\r\f\r\16\r\u00de")
buf.write("\13\r\5\r\u00e0\n\r\3\r\3\r\5\r\u00e4\n\r\3\16\3\16\3")
buf.write("\16\3\16\3\16\3\16\3\16\3\16\3\16\3\16\3\16\3\16\5\16")
buf.write("\u00f2\n\16\3\16\3\16\3\16\3\16\7\16\u00f8\n\16\f\16\16")
buf.write("\16\u00fb\13\16\3\17\3\17\5\17\u00ff\n\17\3\20\3\20\5")
buf.write("\20\u0103\n\20\3\21\3\21\3\22\3\22\3\23\3\23\3\24\3\24")
buf.write("\3\25\3\25\3\25\5\25\u0110\n\25\3\26\3\26\3\26\3\26\5")
buf.write("\26\u0116\n\26\3\27\3\27\3\30\3\30\3\31\3\31\3\32\3\32")
buf.write("\3\32\2\3\32\33\2\4\6\b\n\f\16\20\22\24\26\30\32\34\36")
buf.write(" \"$&(*,.\60\62\2\t\4\2\61\61\63\63\3\2%(\3\2,-\3\2\"")
buf.write("#\3\2\35!\4\2\13\13\r\r\3\2)+\2\u012e\2\64\3\2\2\2\4E")
buf.write("\3\2\2\2\6S\3\2\2\2\b^\3\2\2\2\ne\3\2\2\2\fg\3\2\2\2\16")
buf.write("i\3\2\2\2\20\177\3\2\2\2\22\u0081\3\2\2\2\24\u00bf\3\2")
buf.write("\2\2\26\u00c1\3\2\2\2\30\u00e3\3\2\2\2\32\u00f1\3\2\2")
buf.write("\2\34\u00fe\3\2\2\2\36\u0102\3\2\2\2 \u0104\3\2\2\2\"")
buf.write("\u0106\3\2\2\2$\u0108\3\2\2\2&\u010a\3\2\2\2(\u010f\3")
buf.write("\2\2\2*\u0115\3\2\2\2,\u0117\3\2\2\2.\u0119\3\2\2\2\60")
buf.write("\u011b\3\2\2\2\62\u011d\3\2\2\2\64\65\7\3\2\2\65\66\7")
buf.write("\4\2\2\66:\7\24\2\2\679\5\6\4\28\67\3\2\2\29<\3\2\2\2")
buf.write(":8\3\2\2\2:;\3\2\2\2;@\3\2\2\2<:\3\2\2\2=?\5\16\b\2>=")
buf.write("\3\2\2\2?B\3\2\2\2@>\3\2\2\2@A\3\2\2\2AC\3\2\2\2B@\3\2")
buf.write("\2\2CD\7\25\2\2D\3\3\2\2\2EF\5.\30\2FG\5\n\6\2GN\3\2\2")
buf.write("\2HI\7\32\2\2IJ\5.\30\2JK\5\n\6\2KM\3\2\2\2LH\3\2\2\2")
buf.write("MP\3\2\2\2NL\3\2\2\2NO\3\2\2\2OQ\3\2\2\2PN\3\2\2\2QR\7")
buf.write("\23\2\2R\5\3\2\2\2ST\5.\30\2TY\5\n\6\2UV\7\32\2\2VX\5")
buf.write("\n\6\2WU\3\2\2\2X[\3\2\2\2YW\3\2\2\2YZ\3\2\2\2Z\\\3\2")
buf.write("\2\2[Y\3\2\2\2\\]\7\23\2\2]\7\3\2\2\2^_\7.\2\2_`\7\26")
buf.write("\2\2`a\5 \21\2ab\7\27\2\2b\t\3\2\2\2cf\5\f\7\2df\5\b\5")
buf.write("\2ec\3\2\2\2ed\3\2\2\2f\13\3\2\2\2gh\7.\2\2h\r\3\2\2\2")
buf.write("ij\5\20\t\2jk\5\62\32\2kx\7\30\2\2lm\5.\30\2mn\5\f\7\2")
buf.write("nu\3\2\2\2op\7\32\2\2pq\5.\30\2qr\5\f\7\2rt\3\2\2\2so")
buf.write("\3\2\2\2tw\3\2\2\2us\3\2\2\2uv\3\2\2\2vy\3\2\2\2wu\3\2")
buf.write("\2\2xl\3\2\2\2xy\3\2\2\2yz\3\2\2\2z{\7\31\2\2{|\5\22\n")
buf.write("\2|\17\3\2\2\2}\u0080\5.\30\2~\u0080\7\21\2\2\177}\3\2")
buf.write("\2\2\177~\3\2\2\2\u0080\21\3\2\2\2\u0081\u0085\7\24\2")
buf.write("\2\u0082\u0084\5\4\3\2\u0083\u0082\3\2\2\2\u0084\u0087")
buf.write("\3\2\2\2\u0085\u0083\3\2\2\2\u0085\u0086\3\2\2\2\u0086")
buf.write("\u008b\3\2\2\2\u0087\u0085\3\2\2\2\u0088\u008a\5\24\13")
buf.write("\2\u0089\u0088\3\2\2\2\u008a\u008d\3\2\2\2\u008b\u0089")
buf.write("\3\2\2\2\u008b\u008c\3\2\2\2\u008c\u008e\3\2\2\2\u008d")
buf.write("\u008b\3\2\2\2\u008e\u008f\7\25\2\2\u008f\23\3\2\2\2\u0090")
buf.write("\u0091\5\34\17\2\u0091\u0092\5\60\31\2\u0092\u0093\5\32")
buf.write("\16\2\u0093\u00c0\3\2\2\2\u0094\u0095\5\34\17\2\u0095")
buf.write("\u0096\5\60\31\2\u0096\u0097\5\32\16\2\u0097\u0098\7\23")
buf.write("\2\2\u0098\u00c0\3\2\2\2\u0099\u00c0\5\30\r\2\u009a\u009b")
buf.write("\7\5\2\2\u009b\u009c\7\30\2\2\u009c\u009d\5\32\16\2\u009d")
buf.write("\u009e\7\31\2\2\u009e\u00a1\5\22\n\2\u009f\u00a0\7\6\2")
buf.write("\2\u00a0\u00a2\5\22\n\2\u00a1\u009f\3\2\2\2\u00a1\u00a2")
buf.write("\3\2\2\2\u00a2\u00c0\3\2\2\2\u00a3\u00a4\5\f\7\2\u00a4")
buf.write("\u00a5\7)\2\2\u00a5\u00a6\5\32\16\2\u00a6\u00a7\7\23\2")
buf.write("\2\u00a7\u00c0\3\2\2\2\u00a8\u00a9\7\b\2\2\u00a9\u00aa")
buf.write("\5\32\16\2\u00aa\u00ab\7\23\2\2\u00ab\u00c0\3\2\2\2\u00ac")
buf.write("\u00ad\7\7\2\2\u00ad\u00b0\5\f\7\2\u00ae\u00af\7)\2\2")
buf.write("\u00af\u00b1\5 \21\2\u00b0\u00ae\3\2\2\2\u00b0\u00b1\3")
buf.write("\2\2\2\u00b1\u00b2\3\2\2\2\u00b2\u00b9\7\32\2\2\u00b3")
buf.write("\u00b6\5\f\7\2\u00b4\u00b5\7)\2\2\u00b5\u00b7\5 \21\2")
buf.write("\u00b6\u00b4\3\2\2\2\u00b6\u00b7\3\2\2\2\u00b7\u00ba\3")
buf.write("\2\2\2\u00b8\u00ba\5 \21\2\u00b9\u00b3\3\2\2\2\u00b9\u00b8")
buf.write("\3\2\2\2\u00ba\u00bb\3\2\2\2\u00bb\u00bc\5\22\n\2\u00bc")
buf.write("\u00c0\3\2\2\2\u00bd\u00be\7\t\2\2\u00be\u00c0\7\23\2")
buf.write("\2\u00bf\u0090\3\2\2\2\u00bf\u0094\3\2\2\2\u00bf\u0099")
buf.write("\3\2\2\2\u00bf\u009a\3\2\2\2\u00bf\u00a3\3\2\2\2\u00bf")
buf.write("\u00a8\3\2\2\2\u00bf\u00ac\3\2\2\2\u00bf\u00bd\3\2\2\2")
buf.write("\u00c0\25\3\2\2\2\u00c1\u00c2\5\62\32\2\u00c2\u00cb\7")
buf.write("\30\2\2\u00c3\u00c8\5\32\16\2\u00c4\u00c5\7\32\2\2\u00c5")
buf.write("\u00c7\5\32\16\2\u00c6\u00c4\3\2\2\2\u00c7\u00ca\3\2\2")
buf.write("\2\u00c8\u00c6\3\2\2\2\u00c8\u00c9\3\2\2\2\u00c9\u00cc")
buf.write("\3\2\2\2\u00ca\u00c8\3\2\2\2\u00cb\u00c3\3\2\2\2\u00cb")
buf.write("\u00cc\3\2\2\2\u00cc\u00cd\3\2\2\2\u00cd\u00ce\7\31\2")
buf.write("\2\u00ce\27\3\2\2\2\u00cf\u00e4\5\26\f\2\u00d0\u00d1\5")
buf.write("\26\f\2\u00d1\u00d2\7\23\2\2\u00d2\u00e4\3\2\2\2\u00d3")
buf.write("\u00d4\7\22\2\2\u00d4\u00d5\7\30\2\2\u00d5\u00df\7\65")
buf.write("\2\2\u00d6\u00d7\7\32\2\2\u00d7\u00dc\5\36\20\2\u00d8")
buf.write("\u00d9\7\32\2\2\u00d9\u00db\5\36\20\2\u00da\u00d8\3\2")
buf.write("\2\2\u00db\u00de\3\2\2\2\u00dc\u00da\3\2\2\2\u00dc\u00dd")
buf.write("\3\2\2\2\u00dd\u00e0\3\2\2\2\u00de\u00dc\3\2\2\2\u00df")
buf.write("\u00d6\3\2\2\2\u00df\u00e0\3\2\2\2\u00e0\u00e1\3\2\2\2")
buf.write("\u00e1\u00e2\7\31\2\2\u00e2\u00e4\7\23\2\2\u00e3\u00cf")
buf.write("\3\2\2\2\u00e3\u00d0\3\2\2\2\u00e3\u00d3\3\2\2\2\u00e4")
buf.write("\31\3\2\2\2\u00e5\u00e6\b\16\1\2\u00e6\u00f2\5\34\17\2")
buf.write("\u00e7\u00f2\5(\25\2\u00e8\u00e9\7\36\2\2\u00e9\u00f2")
buf.write("\5\32\16\6\u00ea\u00f2\5\30\r\2\u00eb\u00ec\7$\2\2\u00ec")
buf.write("\u00f2\5\32\16\4\u00ed\u00ee\7\30\2\2\u00ee\u00ef\5\32")
buf.write("\16\2\u00ef\u00f0\7\31\2\2\u00f0\u00f2\3\2\2\2\u00f1\u00e5")
buf.write("\3\2\2\2\u00f1\u00e7\3\2\2\2\u00f1\u00e8\3\2\2\2\u00f1")
buf.write("\u00ea\3\2\2\2\u00f1\u00eb\3\2\2\2\u00f1\u00ed\3\2\2\2")
buf.write("\u00f2\u00f9\3\2\2\2\u00f3\u00f4\f\7\2\2\u00f4\u00f5\5")
buf.write("*\26\2\u00f5\u00f6\5\32\16\b\u00f6\u00f8\3\2\2\2\u00f7")
buf.write("\u00f3\3\2\2\2\u00f8\u00fb\3\2\2\2\u00f9\u00f7\3\2\2\2")
buf.write("\u00f9\u00fa\3\2\2\2\u00fa\33\3\2\2\2\u00fb\u00f9\3\2")
buf.write("\2\2\u00fc\u00ff\5\f\7\2\u00fd\u00ff\5\b\5\2\u00fe\u00fc")
buf.write("\3\2\2\2\u00fe\u00fd\3\2\2\2\u00ff\35\3\2\2\2\u0100\u0103")
buf.write("\5\32\16\2\u0101\u0103\7\65\2\2\u0102\u0100\3\2\2\2\u0102")
buf.write("\u0101\3\2\2\2\u0103\37\3\2\2\2\u0104\u0105\t\2\2\2\u0105")
buf.write("!\3\2\2\2\u0106\u0107\t\3\2\2\u0107#\3\2\2\2\u0108\u0109")
buf.write("\t\4\2\2\u0109%\3\2\2\2\u010a\u010b\t\5\2\2\u010b\'\3")
buf.write("\2\2\2\u010c\u0110\5 \21\2\u010d\u0110\7\60\2\2\u010e")
buf.write("\u0110\7\64\2\2\u010f\u010c\3\2\2\2\u010f\u010d\3\2\2")
buf.write("\2\u010f\u010e\3\2\2\2\u0110)\3\2\2\2\u0111\u0116\5,\27")
buf.write("\2\u0112\u0116\5\"\22\2\u0113\u0116\5$\23\2\u0114\u0116")
buf.write("\5&\24\2\u0115\u0111\3\2\2\2\u0115\u0112\3\2\2\2\u0115")
buf.write("\u0113\3\2\2\2\u0115\u0114\3\2\2\2\u0116+\3\2\2\2\u0117")
buf.write("\u0118\t\6\2\2\u0118-\3\2\2\2\u0119\u011a\t\7\2\2\u011a")
buf.write("/\3\2\2\2\u011b\u011c\t\b\2\2\u011c\61\3\2\2\2\u011d\u011e")
buf.write("\7.\2\2\u011e\63\3\2\2\2\34:@NYeux\177\u0085\u008b\u00a1")
buf.write("\u00b0\u00b6\u00b9\u00bf\u00c8\u00cb\u00dc\u00df\u00e3")
buf.write("\u00f1\u00f9\u00fe\u0102\u010f\u0115")
return buf.getvalue()
class decafJavierParser ( Parser ):
grammarFileName = "decafJavier.g4"
atn = ATNDeserializer().deserialize(serializedATN())
decisionsToDFA = [ DFA(ds, i) for i, ds in enumerate(atn.decisionToState) ]
sharedContextCache = PredictionContextCache()
literalNames = [ "<INVALID>", "'class'", "'Program'", "'if'", "'else'",
"'for'", "'return'", "'break'", "'continue'", "'boolean'",
"'char'", "'int'", "'string'", "'True'", "'False'",
"'void'", "'callout'", "';'", "'{'", "'}'", "'['",
"']'", "'('", "')'", "','", "'\"'", "'''", "'+'", "'-'",
"'*'", "'/'", "'%'", "'&&'", "'||'", "'!'", "'>'",
"'<'", "'>='", "'<='", "'='", "'+='", "'-='", "'=='",
"'!='" ]
symbolicNames = [ "<INVALID>", "CLASS", "PROGRAM", "IF", "ELSE", "FOR",
"RETURN", "BREAK", "CONTINUE", "BOOLEAN", "CHAR",
"INT", "STRING", "TRUE", "FALSE", "VOID", "CALLOUT",
"SEMICOLON", "LCURLY", "RCURLY", "LSQUARE", "RSQUARE",
"LROUND", "RROUND", "COMMA", "QUOTES", "APOSTROPHE",
"ADD", "SUB", "MULTIPLY", "DIVIDE", "REMINDER", "AND",
"OR", "NOT", "GREATER_OP", "LESS_OP", "GREATER_eq_op",
"LESS_eq_op", "EQUAL_OP", "ADD_eq_op", "SUB_eq_op",
"EQUALITY_OP", "UNEQUALITY_OP", "ID", "ALPHA", "CHAR_LITERAL",
"DECIMAL_LITERAL", "DIGIT", "HEX_LITERAL", "BOOL_LITERAL",
"STRING_LITERAL", "ALPHA_NUM", "HEX_DIGIT", "LINE_COMMENT",
"COMMENT", "NEWLINE", "WHITESPACE" ]
RULE_program = 0
RULE_vardeclr = 1
RULE_field_declr = 2
RULE_array_id = 3
RULE_field_var = 4
RULE_var_id = 5
RULE_method_declr = 6
RULE_return_type = 7
RULE_block = 8
RULE_statement = 9
RULE_method_call_inter = 10
RULE_method_call = 11
RULE_expr = 12
RULE_location = 13
RULE_callout_arg = 14
RULE_int_literal = 15
RULE_rel_op = 16
RULE_eq_op = 17
RULE_cond_op = 18
RULE_literal = 19
RULE_bin_op = 20
RULE_arith_op = 21
RULE_var_type = 22
RULE_assign_op = 23
RULE_method_name = 24
ruleNames = [ "program", "vardeclr", "field_declr", "array_id", "field_var",
"var_id", "method_declr", "return_type", "block", "statement",
"method_call_inter", "method_call", "expr", "location",
"callout_arg", "int_literal", "rel_op", "eq_op", "cond_op",
"literal", "bin_op", "arith_op", "var_type", "assign_op",
"method_name" ]
EOF = Token.EOF
CLASS=1
PROGRAM=2
IF=3
ELSE=4
FOR=5
RETURN=6
BREAK=7
CONTINUE=8
BOOLEAN=9
CHAR=10
INT=11
STRING=12
TRUE=13
FALSE=14
VOID=15
CALLOUT=16
SEMICOLON=17
LCURLY=18
RCURLY=19
LSQUARE=20
RSQUARE=21
LROUND=22
RROUND=23
COMMA=24
QUOTES=25
APOSTROPHE=26
ADD=27
SUB=28
MULTIPLY=29
DIVIDE=30
REMINDER=31
AND=32
OR=33
NOT=34
GREATER_OP=35
LESS_OP=36
GREATER_eq_op=37
LESS_eq_op=38
EQUAL_OP=39
ADD_eq_op=40
SUB_eq_op=41
EQUALITY_OP=42
UNEQUALITY_OP=43
ID=44
ALPHA=45
CHAR_LITERAL=46
DECIMAL_LITERAL=47
DIGIT=48
HEX_LITERAL=49
BOOL_LITERAL=50
STRING_LITERAL=51
ALPHA_NUM=52
HEX_DIGIT=53
LINE_COMMENT=54
COMMENT=55
NEWLINE=56
WHITESPACE=57
def __init__(self, input:TokenStream, output:TextIO = sys.stdout):
super().__init__(input, output)
self.checkVersion("4.9.2")
self._interp = ParserATNSimulator(self, self.atn, self.decisionsToDFA, self.sharedContextCache)
self._predicates = None
class ProgramContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def CLASS(self):
return self.getToken(decafJavierParser.CLASS, 0)
def PROGRAM(self):
return self.getToken(decafJavierParser.PROGRAM, 0)
def LCURLY(self):
return self.getToken(decafJavierParser.LCURLY, 0)
def RCURLY(self):
return self.getToken(decafJavierParser.RCURLY, 0)
def field_declr(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(decafJavierParser.Field_declrContext)
else:
return self.getTypedRuleContext(decafJavierParser.Field_declrContext,i)
def method_declr(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(decafJavierParser.Method_declrContext)
else:
return self.getTypedRuleContext(decafJavierParser.Method_declrContext,i)
def getRuleIndex(self):
return decafJavierParser.RULE_program
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterProgram" ):
listener.enterProgram(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitProgram" ):
listener.exitProgram(self)
def program(self):
localctx = decafJavierParser.ProgramContext(self, self._ctx, self.state)
self.enterRule(localctx, 0, self.RULE_program)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 50
self.match(decafJavierParser.CLASS)
self.state = 51
self.match(decafJavierParser.PROGRAM)
self.state = 52
self.match(decafJavierParser.LCURLY)
self.state = 56
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,0,self._ctx)
while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:
if _alt==1:
self.state = 53
self.field_declr()
self.state = 58
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,0,self._ctx)
self.state = 62
self._errHandler.sync(self)
_la = self._input.LA(1)
while (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << decafJavierParser.BOOLEAN) | (1 << decafJavierParser.INT) | (1 << decafJavierParser.VOID))) != 0):
self.state = 59
self.method_declr()
self.state = 64
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 65
self.match(decafJavierParser.RCURLY)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class VardeclrContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def SEMICOLON(self):
return self.getToken(decafJavierParser.SEMICOLON, 0)
def var_type(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(decafJavierParser.Var_typeContext)
else:
return self.getTypedRuleContext(decafJavierParser.Var_typeContext,i)
def field_var(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(decafJavierParser.Field_varContext)
else:
return self.getTypedRuleContext(decafJavierParser.Field_varContext,i)
def COMMA(self, i:int=None):
if i is None:
return self.getTokens(decafJavierParser.COMMA)
else:
return self.getToken(decafJavierParser.COMMA, i)
def getRuleIndex(self):
return decafJavierParser.RULE_vardeclr
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterVardeclr" ):
listener.enterVardeclr(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitVardeclr" ):
listener.exitVardeclr(self)
def vardeclr(self):
localctx = decafJavierParser.VardeclrContext(self, self._ctx, self.state)
self.enterRule(localctx, 2, self.RULE_vardeclr)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 67
self.var_type()
self.state = 68
self.field_var()
self.state = 76
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==decafJavierParser.COMMA:
self.state = 70
self.match(decafJavierParser.COMMA)
self.state = 71
self.var_type()
self.state = 72
self.field_var()
self.state = 78
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 79
self.match(decafJavierParser.SEMICOLON)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Field_declrContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def var_type(self):
return self.getTypedRuleContext(decafJavierParser.Var_typeContext,0)
def field_var(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(decafJavierParser.Field_varContext)
else:
return self.getTypedRuleContext(decafJavierParser.Field_varContext,i)
def SEMICOLON(self):
return self.getToken(decafJavierParser.SEMICOLON, 0)
def COMMA(self, i:int=None):
if i is None:
return self.getTokens(decafJavierParser.COMMA)
else:
return self.getToken(decafJavierParser.COMMA, i)
def getRuleIndex(self):
return decafJavierParser.RULE_field_declr
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterField_declr" ):
listener.enterField_declr(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitField_declr" ):
listener.exitField_declr(self)
def field_declr(self):
localctx = decafJavierParser.Field_declrContext(self, self._ctx, self.state)
self.enterRule(localctx, 4, self.RULE_field_declr)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 81
self.var_type()
self.state = 82
self.field_var()
self.state = 87
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==decafJavierParser.COMMA:
self.state = 83
self.match(decafJavierParser.COMMA)
self.state = 84
self.field_var()
self.state = 89
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 90
self.match(decafJavierParser.SEMICOLON)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Array_idContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def ID(self):
return self.getToken(decafJavierParser.ID, 0)
def LSQUARE(self):
return self.getToken(decafJavierParser.LSQUARE, 0)
def int_literal(self):
return self.getTypedRuleContext(decafJavierParser.Int_literalContext,0)
def RSQUARE(self):
return self.getToken(decafJavierParser.RSQUARE, 0)
def getRuleIndex(self):
return decafJavierParser.RULE_array_id
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterArray_id" ):
listener.enterArray_id(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitArray_id" ):
listener.exitArray_id(self)
def array_id(self):
localctx = decafJavierParser.Array_idContext(self, self._ctx, self.state)
self.enterRule(localctx, 6, self.RULE_array_id)
try:
self.enterOuterAlt(localctx, 1)
self.state = 92
self.match(decafJavierParser.ID)
self.state = 93
self.match(decafJavierParser.LSQUARE)
self.state = 94
self.int_literal()
self.state = 95
self.match(decafJavierParser.RSQUARE)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Field_varContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def var_id(self):
return self.getTypedRuleContext(decafJavierParser.Var_idContext,0)
def array_id(self):
return self.getTypedRuleContext(decafJavierParser.Array_idContext,0)
def getRuleIndex(self):
return decafJavierParser.RULE_field_var
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterField_var" ):
listener.enterField_var(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitField_var" ):
listener.exitField_var(self)
def field_var(self):
localctx = decafJavierParser.Field_varContext(self, self._ctx, self.state)
self.enterRule(localctx, 8, self.RULE_field_var)
try:
self.state = 99
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,4,self._ctx)
if la_ == 1:
self.enterOuterAlt(localctx, 1)
self.state = 97
self.var_id()
pass
elif la_ == 2:
self.enterOuterAlt(localctx, 2)
self.state = 98
self.array_id()
pass
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Var_idContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def ID(self):
return self.getToken(decafJavierParser.ID, 0)
def getRuleIndex(self):
return decafJavierParser.RULE_var_id
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterVar_id" ):
listener.enterVar_id(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitVar_id" ):
listener.exitVar_id(self)
def var_id(self):
localctx = decafJavierParser.Var_idContext(self, self._ctx, self.state)
self.enterRule(localctx, 10, self.RULE_var_id)
try:
self.enterOuterAlt(localctx, 1)
self.state = 101
self.match(decafJavierParser.ID)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Method_declrContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def return_type(self):
return self.getTypedRuleContext(decafJavierParser.Return_typeContext,0)
def method_name(self):
return self.getTypedRuleContext(decafJavierParser.Method_nameContext,0)
def LROUND(self):
return self.getToken(decafJavierParser.LROUND, 0)
def RROUND(self):
return self.getToken(decafJavierParser.RROUND, 0)
def block(self):
return self.getTypedRuleContext(decafJavierParser.BlockContext,0)
def var_type(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(decafJavierParser.Var_typeContext)
else:
return self.getTypedRuleContext(decafJavierParser.Var_typeContext,i)
def var_id(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(decafJavierParser.Var_idContext)
else:
return self.getTypedRuleContext(decafJavierParser.Var_idContext,i)
def COMMA(self, i:int=None):
if i is None:
return self.getTokens(decafJavierParser.COMMA)
else:
return self.getToken(decafJavierParser.COMMA, i)
def getRuleIndex(self):
return decafJavierParser.RULE_method_declr
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterMethod_declr" ):
listener.enterMethod_declr(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitMethod_declr" ):
listener.exitMethod_declr(self)
def method_declr(self):
localctx = decafJavierParser.Method_declrContext(self, self._ctx, self.state)
self.enterRule(localctx, 12, self.RULE_method_declr)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 103
self.return_type()
self.state = 104
self.method_name()
self.state = 105
self.match(decafJavierParser.LROUND)
self.state = 118
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==decafJavierParser.BOOLEAN or _la==decafJavierParser.INT:
self.state = 106
self.var_type()
self.state = 107
self.var_id()
self.state = 115
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==decafJavierParser.COMMA:
self.state = 109
self.match(decafJavierParser.COMMA)
self.state = 110
self.var_type()
self.state = 111
self.var_id()
self.state = 117
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 120
self.match(decafJavierParser.RROUND)
self.state = 121
self.block()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Return_typeContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def var_type(self):
return self.getTypedRuleContext(decafJavierParser.Var_typeContext,0)
def VOID(self):
return self.getToken(decafJavierParser.VOID, 0)
def getRuleIndex(self):
return decafJavierParser.RULE_return_type
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterReturn_type" ):
listener.enterReturn_type(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitReturn_type" ):
listener.exitReturn_type(self)
def return_type(self):
localctx = decafJavierParser.Return_typeContext(self, self._ctx, self.state)
self.enterRule(localctx, 14, self.RULE_return_type)
try:
self.enterOuterAlt(localctx, 1)
self.state = 125
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [decafJavierParser.BOOLEAN, decafJavierParser.INT]:
self.state = 123
self.var_type()
pass
elif token in [decafJavierParser.VOID]:
self.state = 124
self.match(decafJavierParser.VOID)
pass
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class BlockContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def LCURLY(self):
return self.getToken(decafJavierParser.LCURLY, 0)
def RCURLY(self):
return self.getToken(decafJavierParser.RCURLY, 0)
def vardeclr(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(decafJavierParser.VardeclrContext)
else:
return self.getTypedRuleContext(decafJavierParser.VardeclrContext,i)
def statement(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(decafJavierParser.StatementContext)
else:
return self.getTypedRuleContext(decafJavierParser.StatementContext,i)
def getRuleIndex(self):
return decafJavierParser.RULE_block
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterBlock" ):
listener.enterBlock(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitBlock" ):
listener.exitBlock(self)
def block(self):
localctx = decafJavierParser.BlockContext(self, self._ctx, self.state)
self.enterRule(localctx, 16, self.RULE_block)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 127
self.match(decafJavierParser.LCURLY)
self.state = 131
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==decafJavierParser.BOOLEAN or _la==decafJavierParser.INT:
self.state = 128
self.vardeclr()
self.state = 133
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 137
self._errHandler.sync(self)
_la = self._input.LA(1)
while (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << decafJavierParser.IF) | (1 << decafJavierParser.FOR) | (1 << decafJavierParser.RETURN) | (1 << decafJavierParser.BREAK) | (1 << decafJavierParser.CALLOUT) | (1 << decafJavierParser.ID))) != 0):
self.state = 134
self.statement()
self.state = 139
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 140
self.match(decafJavierParser.RCURLY)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class StatementContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def location(self):
return self.getTypedRuleContext(decafJavierParser.LocationContext,0)
def assign_op(self):
return self.getTypedRuleContext(decafJavierParser.Assign_opContext,0)
def expr(self):
return self.getTypedRuleContext(decafJavierParser.ExprContext,0)
def SEMICOLON(self):
return self.getToken(decafJavierParser.SEMICOLON, 0)
def method_call(self):
return self.getTypedRuleContext(decafJavierParser.Method_callContext,0)
def IF(self):
return self.getToken(decafJavierParser.IF, 0)
def LROUND(self):
return self.getToken(decafJavierParser.LROUND, 0)
def RROUND(self):
return self.getToken(decafJavierParser.RROUND, 0)
def block(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(decafJavierParser.BlockContext)
else:
return self.getTypedRuleContext(decafJavierParser.BlockContext,i)
def ELSE(self):
return self.getToken(decafJavierParser.ELSE, 0)
def var_id(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(decafJavierParser.Var_idContext)
else:
return self.getTypedRuleContext(decafJavierParser.Var_idContext,i)
def EQUAL_OP(self, i:int=None):
if i is None:
return self.getTokens(decafJavierParser.EQUAL_OP)
else:
return self.getToken(decafJavierParser.EQUAL_OP, i)
def RETURN(self):
return self.getToken(decafJavierParser.RETURN, 0)
def FOR(self):
return self.getToken(decafJavierParser.FOR, 0)
def COMMA(self):
return self.getToken(decafJavierParser.COMMA, 0)
def int_literal(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(decafJavierParser.Int_literalContext)
else:
return self.getTypedRuleContext(decafJavierParser.Int_literalContext,i)
def BREAK(self):
return self.getToken(decafJavierParser.BREAK, 0)
def getRuleIndex(self):
return decafJavierParser.RULE_statement
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterStatement" ):
listener.enterStatement(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitStatement" ):
listener.exitStatement(self)
def statement(self):
localctx = decafJavierParser.StatementContext(self, self._ctx, self.state)
self.enterRule(localctx, 18, self.RULE_statement)
self._la = 0 # Token type
try:
self.state = 189
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,14,self._ctx)
if la_ == 1:
self.enterOuterAlt(localctx, 1)
self.state = 142
self.location()
self.state = 143
self.assign_op()
self.state = 144
self.expr(0)
pass
elif la_ == 2:
self.enterOuterAlt(localctx, 2)
self.state = 146
self.location()
self.state = 147
self.assign_op()
self.state = 148
self.expr(0)
self.state = 149
self.match(decafJavierParser.SEMICOLON)
pass
elif la_ == 3:
self.enterOuterAlt(localctx, 3)
self.state = 151
self.method_call()
pass
elif la_ == 4:
self.enterOuterAlt(localctx, 4)
self.state = 152
self.match(decafJavierParser.IF)
self.state = 153
self.match(decafJavierParser.LROUND)
self.state = 154
self.expr(0)
self.state = 155
self.match(decafJavierParser.RROUND)
self.state = 156
self.block()
self.state = 159
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==decafJavierParser.ELSE:
self.state = 157
self.match(decafJavierParser.ELSE)
self.state = 158
self.block()
pass
elif la_ == 5:
self.enterOuterAlt(localctx, 5)
self.state = 161
self.var_id()
self.state = 162
self.match(decafJavierParser.EQUAL_OP)
self.state = 163
self.expr(0)
self.state = 164
self.match(decafJavierParser.SEMICOLON)
pass
elif la_ == 6:
self.enterOuterAlt(localctx, 6)
self.state = 166
self.match(decafJavierParser.RETURN)
self.state = 167
self.expr(0)
self.state = 168
self.match(decafJavierParser.SEMICOLON)
pass
elif la_ == 7:
self.enterOuterAlt(localctx, 7)
self.state = 170
self.match(decafJavierParser.FOR)
self.state = 171
self.var_id()
self.state = 174
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==decafJavierParser.EQUAL_OP:
self.state = 172
self.match(decafJavierParser.EQUAL_OP)
self.state = 173
self.int_literal()
self.state = 176
self.match(decafJavierParser.COMMA)
self.state = 183
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [decafJavierParser.ID]:
self.state = 177
self.var_id()
self.state = 180
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==decafJavierParser.EQUAL_OP:
self.state = 178
self.match(decafJavierParser.EQUAL_OP)
self.state = 179
self.int_literal()
pass
elif token in [decafJavierParser.DECIMAL_LITERAL, decafJavierParser.HEX_LITERAL]:
self.state = 182
self.int_literal()
pass
else:
raise NoViableAltException(self)
self.state = 185
self.block()
pass
elif la_ == 8:
self.enterOuterAlt(localctx, 8)
self.state = 187
self.match(decafJavierParser.BREAK)
self.state = 188
self.match(decafJavierParser.SEMICOLON)
pass
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Method_call_interContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def method_name(self):
return self.getTypedRuleContext(decafJavierParser.Method_nameContext,0)
def LROUND(self):
return self.getToken(decafJavierParser.LROUND, 0)
def RROUND(self):
return self.getToken(decafJavierParser.RROUND, 0)
def expr(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(decafJavierParser.ExprContext)
else:
return self.getTypedRuleContext(decafJavierParser.ExprContext,i)
def COMMA(self, i:int=None):
if i is None:
return self.getTokens(decafJavierParser.COMMA)
else:
return self.getToken(decafJavierParser.COMMA, i)
def getRuleIndex(self):
return decafJavierParser.RULE_method_call_inter
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterMethod_call_inter" ):
listener.enterMethod_call_inter(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitMethod_call_inter" ):
listener.exitMethod_call_inter(self)
def method_call_inter(self):
localctx = decafJavierParser.Method_call_interContext(self, self._ctx, self.state)
self.enterRule(localctx, 20, self.RULE_method_call_inter)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 191
self.method_name()
self.state = 192
self.match(decafJavierParser.LROUND)
self.state = 201
self._errHandler.sync(self)
_la = self._input.LA(1)
if (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << decafJavierParser.CALLOUT) | (1 << decafJavierParser.LROUND) | (1 << decafJavierParser.SUB) | (1 << decafJavierParser.NOT) | (1 << decafJavierParser.ID) | (1 << decafJavierParser.CHAR_LITERAL) | (1 << decafJavierParser.DECIMAL_LITERAL) | (1 << decafJavierParser.HEX_LITERAL) | (1 << decafJavierParser.BOOL_LITERAL))) != 0):
self.state = 193
self.expr(0)
self.state = 198
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==decafJavierParser.COMMA:
self.state = 194
self.match(decafJavierParser.COMMA)
self.state = 195
self.expr(0)
self.state = 200
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 203
self.match(decafJavierParser.RROUND)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Method_callContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def method_call_inter(self):
return self.getTypedRuleContext(decafJavierParser.Method_call_interContext,0)
def SEMICOLON(self):
return self.getToken(decafJavierParser.SEMICOLON, 0)
def CALLOUT(self):
return self.getToken(decafJavierParser.CALLOUT, 0)
def LROUND(self):
return self.getToken(decafJavierParser.LROUND, 0)
def STRING_LITERAL(self):
return self.getToken(decafJavierParser.STRING_LITERAL, 0)
def RROUND(self):
return self.getToken(decafJavierParser.RROUND, 0)
def COMMA(self, i:int=None):
if i is None:
return self.getTokens(decafJavierParser.COMMA)
else:
return self.getToken(decafJavierParser.COMMA, i)
def callout_arg(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(decafJavierParser.Callout_argContext)
else:
return self.getTypedRuleContext(decafJavierParser.Callout_argContext,i)
def getRuleIndex(self):
return decafJavierParser.RULE_method_call
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterMethod_call" ):
listener.enterMethod_call(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitMethod_call" ):
listener.exitMethod_call(self)
def method_call(self):
localctx = decafJavierParser.Method_callContext(self, self._ctx, self.state)
self.enterRule(localctx, 22, self.RULE_method_call)
self._la = 0 # Token type
try:
self.state = 225
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,19,self._ctx)
if la_ == 1:
self.enterOuterAlt(localctx, 1)
self.state = 205
self.method_call_inter()
pass
elif la_ == 2:
self.enterOuterAlt(localctx, 2)
self.state = 206
self.method_call_inter()
self.state = 207
self.match(decafJavierParser.SEMICOLON)
pass
elif la_ == 3:
self.enterOuterAlt(localctx, 3)
self.state = 209
self.match(decafJavierParser.CALLOUT)
self.state = 210
self.match(decafJavierParser.LROUND)
self.state = 211
self.match(decafJavierParser.STRING_LITERAL)
self.state = 221
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==decafJavierParser.COMMA:
self.state = 212
self.match(decafJavierParser.COMMA)
self.state = 213
self.callout_arg()
self.state = 218
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==decafJavierParser.COMMA:
self.state = 214
self.match(decafJavierParser.COMMA)
self.state = 215
self.callout_arg()
self.state = 220
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 223
self.match(decafJavierParser.RROUND)
self.state = 224
self.match(decafJavierParser.SEMICOLON)
pass
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class ExprContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def location(self):
return self.getTypedRuleContext(decafJavierParser.LocationContext,0)
def literal(self):
return self.getTypedRuleContext(decafJavierParser.LiteralContext,0)
def SUB(self):
return self.getToken(decafJavierParser.SUB, 0)
def expr(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(decafJavierParser.ExprContext)
else:
return self.getTypedRuleContext(decafJavierParser.ExprContext,i)
def method_call(self):
return self.getTypedRuleContext(decafJavierParser.Method_callContext,0)
def NOT(self):
return self.getToken(decafJavierParser.NOT, 0)
def LROUND(self):
return self.getToken(decafJavierParser.LROUND, 0)
def RROUND(self):
return self.getToken(decafJavierParser.RROUND, 0)
def bin_op(self):
return self.getTypedRuleContext(decafJavierParser.Bin_opContext,0)
def getRuleIndex(self):
return decafJavierParser.RULE_expr
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterExpr" ):
listener.enterExpr(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitExpr" ):
listener.exitExpr(self)
def expr(self, _p:int=0):
_parentctx = self._ctx
_parentState = self.state
localctx = decafJavierParser.ExprContext(self, self._ctx, _parentState)
_prevctx = localctx
_startState = 24
self.enterRecursionRule(localctx, 24, self.RULE_expr, _p)
try:
self.enterOuterAlt(localctx, 1)
self.state = 239
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,20,self._ctx)
if la_ == 1:
self.state = 228
self.location()
pass
elif la_ == 2:
self.state = 229
self.literal()
pass
elif la_ == 3:
self.state = 230
self.match(decafJavierParser.SUB)
self.state = 231
self.expr(4)
pass
elif la_ == 4:
self.state = 232
self.method_call()
pass
elif la_ == 5:
self.state = 233
self.match(decafJavierParser.NOT)
self.state = 234
self.expr(2)
pass
elif la_ == 6:
self.state = 235
self.match(decafJavierParser.LROUND)
self.state = 236
self.expr(0)
self.state = 237
self.match(decafJavierParser.RROUND)
pass
self._ctx.stop = self._input.LT(-1)
self.state = 247
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,21,self._ctx)
while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:
if _alt==1:
if self._parseListeners is not None:
self.triggerExitRuleEvent()
_prevctx = localctx
localctx = decafJavierParser.ExprContext(self, _parentctx, _parentState)
self.pushNewRecursionContext(localctx, _startState, self.RULE_expr)
self.state = 241
if not self.precpred(self._ctx, 5):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 5)")
self.state = 242
self.bin_op()
self.state = 243
self.expr(6)
self.state = 249
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,21,self._ctx)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.unrollRecursionContexts(_parentctx)
return localctx
class LocationContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def var_id(self):
return self.getTypedRuleContext(decafJavierParser.Var_idContext,0)
def array_id(self):
return self.getTypedRuleContext(decafJavierParser.Array_idContext,0)
def getRuleIndex(self):
return decafJavierParser.RULE_location
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterLocation" ):
listener.enterLocation(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitLocation" ):
listener.exitLocation(self)
def location(self):
localctx = decafJavierParser.LocationContext(self, self._ctx, self.state)
self.enterRule(localctx, 26, self.RULE_location)
try:
self.state = 252
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,22,self._ctx)
if la_ == 1:
self.enterOuterAlt(localctx, 1)
self.state = 250
self.var_id()
pass
elif la_ == 2:
self.enterOuterAlt(localctx, 2)
self.state = 251
self.array_id()
pass
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Callout_argContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def expr(self):
return self.getTypedRuleContext(decafJavierParser.ExprContext,0)
def STRING_LITERAL(self):
return self.getToken(decafJavierParser.STRING_LITERAL, 0)
def getRuleIndex(self):
return decafJavierParser.RULE_callout_arg
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterCallout_arg" ):
listener.enterCallout_arg(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitCallout_arg" ):
listener.exitCallout_arg(self)
def callout_arg(self):
localctx = decafJavierParser.Callout_argContext(self, self._ctx, self.state)
self.enterRule(localctx, 28, self.RULE_callout_arg)
try:
self.state = 256
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [decafJavierParser.CALLOUT, decafJavierParser.LROUND, decafJavierParser.SUB, decafJavierParser.NOT, decafJavierParser.ID, decafJavierParser.CHAR_LITERAL, decafJavierParser.DECIMAL_LITERAL, decafJavierParser.HEX_LITERAL, decafJavierParser.BOOL_LITERAL]:
self.enterOuterAlt(localctx, 1)
self.state = 254
self.expr(0)
pass
elif token in [decafJavierParser.STRING_LITERAL]:
self.enterOuterAlt(localctx, 2)
self.state = 255
self.match(decafJavierParser.STRING_LITERAL)
pass
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Int_literalContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def DECIMAL_LITERAL(self):
return self.getToken(decafJavierParser.DECIMAL_LITERAL, 0)
def HEX_LITERAL(self):
return self.getToken(decafJavierParser.HEX_LITERAL, 0)
def getRuleIndex(self):
return decafJavierParser.RULE_int_literal
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterInt_literal" ):
listener.enterInt_literal(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitInt_literal" ):
listener.exitInt_literal(self)
def int_literal(self):
localctx = decafJavierParser.Int_literalContext(self, self._ctx, self.state)
self.enterRule(localctx, 30, self.RULE_int_literal)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 258
_la = self._input.LA(1)
if not(_la==decafJavierParser.DECIMAL_LITERAL or _la==decafJavierParser.HEX_LITERAL):
self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Rel_opContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def GREATER_OP(self):
return self.getToken(decafJavierParser.GREATER_OP, 0)
def LESS_OP(self):
return self.getToken(decafJavierParser.LESS_OP, 0)
def LESS_eq_op(self):
return self.getToken(decafJavierParser.LESS_eq_op, 0)
def GREATER_eq_op(self):
return self.getToken(decafJavierParser.GREATER_eq_op, 0)
def getRuleIndex(self):
return decafJavierParser.RULE_rel_op
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterRel_op" ):
listener.enterRel_op(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitRel_op" ):
listener.exitRel_op(self)
def rel_op(self):
localctx = decafJavierParser.Rel_opContext(self, self._ctx, self.state)
self.enterRule(localctx, 32, self.RULE_rel_op)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 260
_la = self._input.LA(1)
if not((((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << decafJavierParser.GREATER_OP) | (1 << decafJavierParser.LESS_OP) | (1 << decafJavierParser.GREATER_eq_op) | (1 << decafJavierParser.LESS_eq_op))) != 0)):
self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Eq_opContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def EQUALITY_OP(self):
return self.getToken(decafJavierParser.EQUALITY_OP, 0)
def UNEQUALITY_OP(self):
return self.getToken(decafJavierParser.UNEQUALITY_OP, 0)
def getRuleIndex(self):
return decafJavierParser.RULE_eq_op
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterEq_op" ):
listener.enterEq_op(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitEq_op" ):
listener.exitEq_op(self)
def eq_op(self):
localctx = decafJavierParser.Eq_opContext(self, self._ctx, self.state)
self.enterRule(localctx, 34, self.RULE_eq_op)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 262
_la = self._input.LA(1)
if not(_la==decafJavierParser.EQUALITY_OP or _la==decafJavierParser.UNEQUALITY_OP):
self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Cond_opContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def AND(self):
return self.getToken(decafJavierParser.AND, 0)
def OR(self):
return self.getToken(decafJavierParser.OR, 0)
def getRuleIndex(self):
return decafJavierParser.RULE_cond_op
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterCond_op" ):
listener.enterCond_op(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitCond_op" ):
listener.exitCond_op(self)
def cond_op(self):
localctx = decafJavierParser.Cond_opContext(self, self._ctx, self.state)
self.enterRule(localctx, 36, self.RULE_cond_op)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 264
_la = self._input.LA(1)
if not(_la==decafJavierParser.AND or _la==decafJavierParser.OR):
self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class LiteralContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def int_literal(self):
return self.getTypedRuleContext(decafJavierParser.Int_literalContext,0)
def CHAR_LITERAL(self):
return self.getToken(decafJavierParser.CHAR_LITERAL, 0)
def BOOL_LITERAL(self):
return self.getToken(decafJavierParser.BOOL_LITERAL, 0)
def getRuleIndex(self):
return decafJavierParser.RULE_literal
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterLiteral" ):
listener.enterLiteral(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitLiteral" ):
listener.exitLiteral(self)
def literal(self):
localctx = decafJavierParser.LiteralContext(self, self._ctx, self.state)
self.enterRule(localctx, 38, self.RULE_literal)
try:
self.state = 269
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [decafJavierParser.DECIMAL_LITERAL, decafJavierParser.HEX_LITERAL]:
self.enterOuterAlt(localctx, 1)
self.state = 266
self.int_literal()
pass
elif token in [decafJavierParser.CHAR_LITERAL]:
self.enterOuterAlt(localctx, 2)
self.state = 267
self.match(decafJavierParser.CHAR_LITERAL)
pass
elif token in [decafJavierParser.BOOL_LITERAL]:
self.enterOuterAlt(localctx, 3)
self.state = 268
self.match(decafJavierParser.BOOL_LITERAL)
pass
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Bin_opContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def arith_op(self):
return self.getTypedRuleContext(decafJavierParser.Arith_opContext,0)
def rel_op(self):
return self.getTypedRuleContext(decafJavierParser.Rel_opContext,0)
def eq_op(self):
return self.getTypedRuleContext(decafJavierParser.Eq_opContext,0)
def cond_op(self):
return self.getTypedRuleContext(decafJavierParser.Cond_opContext,0)
def getRuleIndex(self):
return decafJavierParser.RULE_bin_op
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterBin_op" ):
listener.enterBin_op(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitBin_op" ):
listener.exitBin_op(self)
def bin_op(self):
localctx = decafJavierParser.Bin_opContext(self, self._ctx, self.state)
self.enterRule(localctx, 40, self.RULE_bin_op)
try:
self.state = 275
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [decafJavierParser.ADD, decafJavierParser.SUB, decafJavierParser.MULTIPLY, decafJavierParser.DIVIDE, decafJavierParser.REMINDER]:
self.enterOuterAlt(localctx, 1)
self.state = 271
self.arith_op()
pass
elif token in [decafJavierParser.GREATER_OP, decafJavierParser.LESS_OP, decafJavierParser.GREATER_eq_op, decafJavierParser.LESS_eq_op]:
self.enterOuterAlt(localctx, 2)
self.state = 272
self.rel_op()
pass
elif token in [decafJavierParser.EQUALITY_OP, decafJavierParser.UNEQUALITY_OP]:
self.enterOuterAlt(localctx, 3)
self.state = 273
self.eq_op()
pass
elif token in [decafJavierParser.AND, decafJavierParser.OR]:
self.enterOuterAlt(localctx, 4)
self.state = 274
self.cond_op()
pass
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Arith_opContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def ADD(self):
return self.getToken(decafJavierParser.ADD, 0)
def SUB(self):
return self.getToken(decafJavierParser.SUB, 0)
def MULTIPLY(self):
return self.getToken(decafJavierParser.MULTIPLY, 0)
def DIVIDE(self):
return self.getToken(decafJavierParser.DIVIDE, 0)
def REMINDER(self):
return self.getToken(decafJavierParser.REMINDER, 0)
def getRuleIndex(self):
return decafJavierParser.RULE_arith_op
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterArith_op" ):
listener.enterArith_op(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitArith_op" ):
listener.exitArith_op(self)
def arith_op(self):
localctx = decafJavierParser.Arith_opContext(self, self._ctx, self.state)
self.enterRule(localctx, 42, self.RULE_arith_op)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 277
_la = self._input.LA(1)
if not((((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << decafJavierParser.ADD) | (1 << decafJavierParser.SUB) | (1 << decafJavierParser.MULTIPLY) | (1 << decafJavierParser.DIVIDE) | (1 << decafJavierParser.REMINDER))) != 0)):
self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Var_typeContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def INT(self):
return self.getToken(decafJavierParser.INT, 0)
def BOOLEAN(self):
return self.getToken(decafJavierParser.BOOLEAN, 0)
def getRuleIndex(self):
return decafJavierParser.RULE_var_type
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterVar_type" ):
listener.enterVar_type(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitVar_type" ):
listener.exitVar_type(self)
def var_type(self):
localctx = decafJavierParser.Var_typeContext(self, self._ctx, self.state)
self.enterRule(localctx, 44, self.RULE_var_type)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 279
_la = self._input.LA(1)
if not(_la==decafJavierParser.BOOLEAN or _la==decafJavierParser.INT):
self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Assign_opContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def EQUAL_OP(self):
return self.getToken(decafJavierParser.EQUAL_OP, 0)
def ADD_eq_op(self):
return self.getToken(decafJavierParser.ADD_eq_op, 0)
def SUB_eq_op(self):
return self.getToken(decafJavierParser.SUB_eq_op, 0)
def getRuleIndex(self):
return decafJavierParser.RULE_assign_op
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterAssign_op" ):
listener.enterAssign_op(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitAssign_op" ):
listener.exitAssign_op(self)
def assign_op(self):
localctx = decafJavierParser.Assign_opContext(self, self._ctx, self.state)
self.enterRule(localctx, 46, self.RULE_assign_op)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 281
_la = self._input.LA(1)
if not((((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << decafJavierParser.EQUAL_OP) | (1 << decafJavierParser.ADD_eq_op) | (1 << decafJavierParser.SUB_eq_op))) != 0)):
self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Method_nameContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def ID(self):
return self.getToken(decafJavierParser.ID, 0)
def getRuleIndex(self):
return decafJavierParser.RULE_method_name
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterMethod_name" ):
listener.enterMethod_name(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitMethod_name" ):
listener.exitMethod_name(self)
def method_name(self):
localctx = decafJavierParser.Method_nameContext(self, self._ctx, self.state)
self.enterRule(localctx, 48, self.RULE_method_name)
try:
self.enterOuterAlt(localctx, 1)
self.state = 283
self.match(decafJavierParser.ID)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
def sempred(self, localctx:RuleContext, ruleIndex:int, predIndex:int):
if self._predicates == None:
self._predicates = dict()
self._predicates[12] = self.expr_sempred
pred = self._predicates.get(ruleIndex, None)
if pred is None:
raise Exception("No predicate with index:" + str(ruleIndex))
else:
return pred(localctx, predIndex)
def expr_sempred(self, localctx:ExprContext, predIndex:int):
if predIndex == 0:
return self.precpred(self._ctx, 5)
| Python3/decafJavierParser.py | 78,103 | Generated from decafJavier.g4 by ANTLR 4.9.2 encoding: utf-8 Token type Token type Token type Token type Token type Token type Token type Token type Token type Token type Token type Token type Token type Token type Token type | 225 | en | 0.249348 |
import numpy as np
from dct_image_transform.dct import dct2
def reflection(image,axis=0):
'''
8x8のブロックごとに離散コサイン変換された画像(以下DCT画像)を鏡像変換する.
Parameters
----------
image:幅と高さが8の倍数である画像を表す2次元配列. 8の倍数でない場合の動作は未定義.
axis:変換する軸. defalutは`axis=0`
Returns
-------
`image`を鏡像変換したDCT画像を表す2次元配列を返す. `image`の値は変わらない.
Examples
--------
>>> import numpy as np
>>> a = np.arange(64).reshape((8,8))
>>> a
array([[ 0, 1, 2, 3, 4, 5, 6, 7],
[ 8, 9, 10, 11, 12, 13, 14, 15],
[16, 17, 18, 19, 20, 21, 22, 23],
[24, 25, 26, 27, 28, 29, 30, 31],
[32, 33, 34, 35, 36, 37, 38, 39],
[40, 41, 42, 43, 44, 45, 46, 47],
[48, 49, 50, 51, 52, 53, 54, 55],
[56, 57, 58, 59, 60, 61, 62, 63]])
>>> dct_image_transform.reflection.reflection(a,axis=0)
array([[ 5.77395663e-15, 1.00000000e+00, 2.00000000e+00,
3.00000000e+00, 4.00000000e+00, 5.00000000e+00,
6.00000000e+00, 7.00000000e+00],
[-8.00000000e+00, -9.00000000e+00, -1.00000000e+01,
-1.10000000e+01, -1.20000000e+01, -1.30000000e+01,
-1.40000000e+01, -1.50000000e+01],
[ 1.60000000e+01, 1.70000000e+01, 1.80000000e+01,
1.90000000e+01, 2.00000000e+01, 2.10000000e+01,
2.20000000e+01, 2.30000000e+01],
[-2.40000000e+01, -2.50000000e+01, -2.60000000e+01,
-2.70000000e+01, -2.80000000e+01, -2.90000000e+01,
-3.00000000e+01, -3.10000000e+01],
[ 3.20000000e+01, 3.30000000e+01, 3.40000000e+01,
3.50000000e+01, 3.60000000e+01, 3.70000000e+01,
3.80000000e+01, 3.90000000e+01],
[-4.00000000e+01, -4.10000000e+01, -4.20000000e+01,
-4.30000000e+01, -4.40000000e+01, -4.50000000e+01,
-4.60000000e+01, -4.70000000e+01],
[ 4.80000000e+01, 4.90000000e+01, 5.00000000e+01,
5.10000000e+01, 5.20000000e+01, 5.30000000e+01,
5.40000000e+01, 5.50000000e+01],
[-5.60000000e+01, -5.70000000e+01, -5.80000000e+01,
-5.90000000e+01, -6.00000000e+01, -6.10000000e+01,
-6.20000000e+01, -6.30000000e+01]])
'''
R = np.zeros((8,8),dtype=np.float)
for i in range(8):
R[i,7-i] = 1
R = dct2(R)
if axis == 0:
return np.vstack(list(map(lambda m:np.dot(R,m),np.flip(np.vsplit(image,range(8,image.shape[1],8)),0))))
elif axis == 1:
return np.hstack(list(map(lambda m:np.dot(m,R),np.flip(np.hsplit(image,range(8,image.shape[1],8)),0)))) | dct_image_transform/reflection.py | 2,823 | 8x8のブロックごとに離散コサイン変換された画像(以下DCT画像)を鏡像変換する.
Parameters
----------
image:幅と高さが8の倍数である画像を表す2次元配列. 8の倍数でない場合の動作は未定義.
axis:変換する軸. defalutは`axis=0`
Returns
-------
`image`を鏡像変換したDCT画像を表す2次元配列を返す. `image`の値は変わらない.
Examples
--------
>>> import numpy as np
>>> a = np.arange(64).reshape((8,8))
>>> a
array([[ 0, 1, 2, 3, 4, 5, 6, 7],
[ 8, 9, 10, 11, 12, 13, 14, 15],
[16, 17, 18, 19, 20, 21, 22, 23],
[24, 25, 26, 27, 28, 29, 30, 31],
[32, 33, 34, 35, 36, 37, 38, 39],
[40, 41, 42, 43, 44, 45, 46, 47],
[48, 49, 50, 51, 52, 53, 54, 55],
[56, 57, 58, 59, 60, 61, 62, 63]])
>>> dct_image_transform.reflection.reflection(a,axis=0)
array([[ 5.77395663e-15, 1.00000000e+00, 2.00000000e+00,
3.00000000e+00, 4.00000000e+00, 5.00000000e+00,
6.00000000e+00, 7.00000000e+00],
[-8.00000000e+00, -9.00000000e+00, -1.00000000e+01,
-1.10000000e+01, -1.20000000e+01, -1.30000000e+01,
-1.40000000e+01, -1.50000000e+01],
[ 1.60000000e+01, 1.70000000e+01, 1.80000000e+01,
1.90000000e+01, 2.00000000e+01, 2.10000000e+01,
2.20000000e+01, 2.30000000e+01],
[-2.40000000e+01, -2.50000000e+01, -2.60000000e+01,
-2.70000000e+01, -2.80000000e+01, -2.90000000e+01,
-3.00000000e+01, -3.10000000e+01],
[ 3.20000000e+01, 3.30000000e+01, 3.40000000e+01,
3.50000000e+01, 3.60000000e+01, 3.70000000e+01,
3.80000000e+01, 3.90000000e+01],
[-4.00000000e+01, -4.10000000e+01, -4.20000000e+01,
-4.30000000e+01, -4.40000000e+01, -4.50000000e+01,
-4.60000000e+01, -4.70000000e+01],
[ 4.80000000e+01, 4.90000000e+01, 5.00000000e+01,
5.10000000e+01, 5.20000000e+01, 5.30000000e+01,
5.40000000e+01, 5.50000000e+01],
[-5.60000000e+01, -5.70000000e+01, -5.80000000e+01,
-5.90000000e+01, -6.00000000e+01, -6.10000000e+01,
-6.20000000e+01, -6.30000000e+01]]) | 1,967 | en | 0.316806 |
from __future__ import absolute_import
from django.conf import settings
import ujson
from zproject.backends import password_auth_enabled, dev_auth_enabled, google_auth_enabled, github_auth_enabled
def add_settings(request):
realm = request.user.realm if hasattr(request.user, "realm") else None
return {
# We use the not_voyager variable name so that templates
# will render even if the appropriate context is not provided
# to the template
'not_voyager': not settings.VOYAGER,
'zulip_com': settings.ZULIP_COM,
'custom_logo_url': settings.CUSTOM_LOGO_URL,
'register_link_disabled': settings.REGISTER_LINK_DISABLED,
'show_oss_announcement': settings.SHOW_OSS_ANNOUNCEMENT,
'zulip_admin': settings.ZULIP_ADMINISTRATOR,
'login_url': settings.HOME_NOT_LOGGED_IN,
'only_sso': settings.ONLY_SSO,
'external_api_path': settings.EXTERNAL_API_PATH,
'external_api_uri': settings.EXTERNAL_API_URI,
'external_uri_scheme': settings.EXTERNAL_URI_SCHEME,
'api_site_required': settings.EXTERNAL_API_PATH != "api.zulip.com",
'email_integration_enabled': settings.EMAIL_GATEWAY_BOT != "",
'email_gateway_example': settings.EMAIL_GATEWAY_EXAMPLE,
'open_realm_creation': settings.OPEN_REALM_CREATION,
'password_auth_enabled': password_auth_enabled(realm),
'dev_auth_enabled': dev_auth_enabled(),
'google_auth_enabled': google_auth_enabled(),
'github_auth_enabled': github_auth_enabled(),
'development_environment': settings.DEVELOPMENT,
}
def add_metrics(request):
return {
'dropboxAppKey': settings.DROPBOX_APP_KEY
}
| zerver/context_processors.py | 1,874 | We use the not_voyager variable name so that templates will render even if the appropriate context is not provided to the template | 130 | en | 0.654967 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
"""The setup script."""
import sys
from setuptools import setup, find_packages
if sys.version_info < (3, 7):
print(
"glean_parser requires at least Python 3.7",
file=sys.stderr
)
sys.exit(1)
with open('README.rst') as readme_file:
readme = readme_file.read()
with open('HISTORY.rst') as history_file:
history = history_file.read()
requirements = [
'Click>=6.0',
'PyYAML>=3.13',
'jsonschema>=3.0.0',
'inflection>=0.3.1',
'Jinja2>=2.10',
'diskcache>=3.1.0',
'appdirs>=1.4.3'
]
setup_requirements = ['pytest-runner', ]
test_requirements = ['pytest', ]
setup(
author="Michael Droettboom",
author_email='mdroettboom@mozilla.com',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'Natural Language :: English',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.7',
],
description="Parser tools for Mozilla's glean telemetry",
entry_points={
'console_scripts': [
'glean_parser=glean_parser.__main__:main',
],
},
install_requires=requirements,
long_description=readme + '\n\n' + history,
include_package_data=True,
keywords='glean_parser',
name='glean_parser',
packages=find_packages(include=['glean_parser']),
setup_requires=setup_requirements,
test_suite='tests',
tests_require=test_requirements,
url='https://github.com/mozilla/glean_parser',
version='0.23.1',
zip_safe=False,
)
| setup.py | 1,805 | The setup script.
!/usr/bin/env python -*- coding: utf-8 -*- This Source Code Form is subject to the terms of the Mozilla Public License, v. 2.0. If a copy of the MPL was not distributed with this file, You can obtain one at http://mozilla.org/MPL/2.0/. | 254 | en | 0.887499 |
import taichi as ti
from mpl_toolkits.mplot3d import Axes3D
import os
import math
import numpy as np
import random
import cv2
import matplotlib.pyplot as plt
import time
import taichi as tc
real = ti.f32
ti.set_default_fp(real)
dim = 3
# this will be overwritten
n_particles = 0
n_solid_particles = 0
n_actuators = 0
n_grid = 64
dx = 1 / n_grid
inv_dx = 1 / dx
dt = 2e-3
p_vol = 1
E = 10
# TODO: update
mu = E
la = E
max_steps = 512
steps = 512
gravity = 10
target = [0.8, 0.2, 0.2]
use_apic = False
scalar = lambda: ti.var(dt=real)
vec = lambda: ti.Vector(dim, dt=real)
mat = lambda: ti.Matrix(dim, dim, dt=real)
actuator_id = ti.global_var(ti.i32)
particle_type = ti.global_var(ti.i32)
x, v = vec(), vec()
grid_v_in, grid_m_in = vec(), scalar()
grid_v_out = vec()
C, F = mat(), mat()
screen = ti.Vector(3, dt=real)
loss = scalar()
n_sin_waves = 4
weights = scalar()
bias = scalar()
x_avg = vec()
actuation = scalar()
actuation_omega = 40
act_strength = 5
# ti.cfg.arch = ti.x86_64
# ti.cfg.use_llvm = True
ti.cfg.arch = ti.cuda
# ti.cfg.print_ir = True
visualize_resolution = 256
@ti.layout
def place():
ti.root.dense(ti.ij, (n_actuators, n_sin_waves)).place(weights)
ti.root.dense(ti.i, n_actuators).place(bias)
ti.root.dense(ti.ij, (max_steps, n_actuators)).place(actuation)
ti.root.dense(ti.i, n_particles).place(actuator_id, particle_type)
ti.root.dense(ti.l, max_steps).dense(ti.k, n_particles).place(x, v, C, F)
ti.root.dense(ti.ijk, n_grid).place(grid_v_in, grid_m_in, grid_v_out)
ti.root.place(loss, x_avg)
ti.root.dense(ti.ij, (visualize_resolution, visualize_resolution)).place(screen)
ti.root.lazy_grad()
def zero_vec():
return [0.0, 0.0, 0.0]
def zero_matrix():
return [zero_vec(), zero_vec(), zero_vec()]
@ti.kernel
def clear_grid():
for i, j, k in grid_m_in:
grid_v_in[i, j, k] = [0, 0, 0]
grid_m_in[i, j, k] = 0
grid_v_in.grad[i, j, k] = [0, 0, 0]
grid_m_in.grad[i, j, k] = 0
grid_v_out.grad[i, j, k] = [0, 0, 0]
@ti.kernel
def clear_particle_grad():
# for all time steps and all particles
for f, i in x:
x.grad[f, i] = zero_vec()
v.grad[f, i] = zero_vec()
C.grad[f, i] = zero_matrix()
F.grad[f, i] = zero_matrix()
@ti.kernel
def clear_actuation_grad():
for t, i in actuation:
actuation[t, i] = 0.0
@ti.kernel
def p2g(f: ti.i32):
for p in range(0, n_particles):
base = ti.cast(x[f, p] * inv_dx - 0.5, ti.i32)
fx = x[f, p] * inv_dx - ti.cast(base, ti.i32)
w = [0.5 * ti.sqr(1.5 - fx), 0.75 - ti.sqr(fx - 1),
0.5 * ti.sqr(fx - 0.5)]
new_F = (ti.Matrix.diag(dim=dim, val=1) + dt * C[f, p]) @ F[f, p]
J = ti.determinant(new_F)
if particle_type[p] == 0: # fluid
sqrtJ = ti.sqrt(J)
# TODO: need pow(x, 1/3)
new_F = ti.Matrix([[sqrtJ, 0, 0], [0, sqrtJ, 0], [0, 0, 1]])
F[f + 1, p] = new_F
# r, s = ti.polar_decompose(new_F)
act_id = actuator_id[p]
act = actuation[f, ti.max(0, act_id)] * act_strength
if act_id == -1:
act = 0.0
# ti.print(act)
A = ti.Matrix([[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 1.0]]) * act
cauchy = ti.Matrix(zero_matrix())
mass = 0.0
ident = [[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]
if particle_type[p] == 0:
mass = 4
cauchy = ti.Matrix(ident) * (J - 1) * E
else:
mass = 1
cauchy = mu * (new_F @ ti.transposed(new_F)) + ti.Matrix(ident) * (la * ti.log(J) - mu)
cauchy += new_F @ A @ ti.transposed(new_F)
stress = -(dt * p_vol * 4 * inv_dx * inv_dx) * cauchy
affine = stress + mass * C[f, p]
for i in ti.static(range(3)):
for j in ti.static(range(3)):
for k in ti.static(range(3)):
offset = ti.Vector([i, j, k])
dpos = (ti.cast(ti.Vector([i, j, k]), real) - fx) * dx
weight = w[i](0) * w[j](1) * w[k](2)
grid_v_in[base + offset].atomic_add(
weight * (mass * v[f, p] + affine @ dpos))
grid_m_in[base + offset].atomic_add(weight * mass)
bound = 3
coeff = 1.5
@ti.kernel
def grid_op():
for i, j, k in grid_m_in:
inv_m = 1 / (grid_m_in[i, j, k] + 1e-10)
v_out = inv_m * grid_v_in[i, j, k]
v_out[1] -= dt * gravity
if i < bound and v_out[0] < 0:
v_out[0] = 0
v_out[1] = 0
v_out[2] = 0
if i > n_grid - bound and v_out[0] > 0:
v_out[0] = 0
v_out[1] = 0
v_out[2] = 0
if k < bound and v_out[2] < 0:
v_out[0] = 0
v_out[1] = 0
v_out[2] = 0
if k > n_grid - bound and v_out[2] > 0:
v_out[0] = 0
v_out[1] = 0
v_out[2] = 0
if j < bound and v_out[1] < 0:
v_out[0] = 0
v_out[1] = 0
v_out[2] = 0
normal = ti.Vector([0.0, 1.0, 0.0])
lsq = ti.sqr(normal).sum()
if lsq > 0.5:
if ti.static(coeff < 0):
v_out[0] = 0
v_out[1] = 0
v_out[2] = 0
else:
lin = (ti.transposed(v_out) @ normal)(0)
if lin < 0:
vit = v_out - lin * normal
lit = vit.norm() + 1e-10
if lit + coeff * lin <= 0:
v_out[0] = 0
v_out[1] = 0
v_out[2] = 0
else:
v_out = (1 + coeff * lin / lit) * vit
if j > n_grid - bound and v_out[1] > 0:
v_out[0] = 0
v_out[1] = 0
v_out[2] = 0
grid_v_out[i, j, k] = v_out
@ti.kernel
def g2p(f: ti.i32):
for p in range(0, n_particles):
base = ti.cast(x[f, p] * inv_dx - 0.5, ti.i32)
fx = x[f, p] * inv_dx - ti.cast(base, real)
w = [0.5 * ti.sqr(1.5 - fx), 0.75 - ti.sqr(fx - 1.0),
0.5 * ti.sqr(fx - 0.5)]
new_v = ti.Vector(zero_vec())
new_C = ti.Matrix(zero_matrix())
for i in ti.static(range(3)):
for j in ti.static(range(3)):
for k in ti.static(range(3)):
dpos = ti.cast(ti.Vector([i, j, k]), real) - fx
g_v = grid_v_out[base(0) + i, base(1) + j, base(2) + k]
weight = w[i](0) * w[j](1) * w[k](2)
new_v += weight * g_v
new_C += 4 * weight * ti.outer_product(g_v, dpos) * inv_dx
v[f + 1, p] = new_v
x[f + 1, p] = x[f, p] + dt * v[f + 1, p]
C[f + 1, p] = new_C
@ti.kernel
def compute_actuation(t: ti.i32):
for i in range(n_actuators):
act = 0.0
for j in ti.static(range(n_sin_waves)):
act += weights[i, j] * ti.sin(
actuation_omega * t * dt + 2 * math.pi / n_sin_waves * j)
act += bias[i]
actuation[t, i] = ti.tanh(act)
@ti.kernel
def compute_x_avg():
for i in range(n_particles):
contrib = 0.0
if particle_type[i] == 1:
contrib = 1.0 / n_solid_particles
x_avg[None].atomic_add(contrib * x[steps - 1, i])
@ti.kernel
def compute_loss():
dist = x_avg[None][0]
loss[None] = -dist
def forward(total_steps=steps):
# simulation
for s in range(total_steps - 1):
clear_grid()
compute_actuation()
p2g(s)
grid_op()
g2p(s)
x_avg[None] = [0, 0, 0]
compute_x_avg()
compute_loss()
return loss[None]
def backward():
clear_particle_grad()
compute_loss.grad()
compute_x_avg.grad()
for s in reversed(range(steps - 1)):
# Since we do not store the grid history (to save space), we redo p2g and grid op
clear_grid()
p2g(s)
grid_op()
g2p.grad(s)
grid_op.grad()
p2g.grad(s)
compute_actuation.grad()
class Scene:
def __init__(self):
self.n_particles = 0
self.n_solid_particles = 0
self.x = []
self.actuator_id = []
self.particle_type = []
self.offset_x = 0
self.offset_y = 0
self.offset_z = 0
self.num_actuators = 0
def new_actuator(self):
self.num_actuators += 1
global n_actuators
n_actuators = self.num_actuators
return self.num_actuators - 1
def add_rect(self, x, y, z, w, h, d, actuation, ptype=1):
if ptype == 0:
assert actuation == -1
global n_particles
density = 3
w_count = int(w / dx * density)
h_count = int(h / dx * density)
d_count = int(d / dx * density)
real_dx = w / w_count
real_dy = h / h_count
real_dz = d / d_count
if ptype == 1:
for i in range(w_count):
for j in range(h_count):
for k in range(d_count):
self.x.append([x + (i + 0.5) * real_dx + self.offset_x,
y + (j + 0.5) * real_dy + self.offset_y,
z + (k + 0.5) * real_dz + self.offset_z])
self.actuator_id.append(actuation)
self.particle_type.append(ptype)
self.n_particles += 1
self.n_solid_particles += int(ptype == 1)
if self.n_particles % 1000 == 0:
print("num particles", self.n_particles)
else:
for i in range(w_count):
for j in range(h_count):
for k in range(d_count):
self.x.append([x + random.random() * w + self.offset_x,
y + random.random() * h + self.offset_y,
z + random.random() * d + self.offset_z])
self.actuator_id.append(actuation)
self.particle_type.append(ptype)
self.n_particles += 1
self.n_solid_particles += int(ptype == 1)
if self.n_particles % 1000 == 0:
print("num particles", self.n_particles)
def set_offset(self, x, y, z):
self.offset_x = x
self.offset_y = y
self.offset_z = z
def finalize(self):
global n_particles, n_solid_particles
n_particles = self.n_particles
n_solid_particles = max(self.n_solid_particles, 1)
print('n_particles', n_particles)
print('n_solid', n_solid_particles)
def set_n_actuators(self, n_act):
global n_actuators
n_actuators = n_act
gui = tc.core.GUI("Differentiable MPM", tc.veci(1024, 1024))
canvas = gui.get_canvas()
@ti.kernel
def splat(t: ti.i32):
for i in range(n_particles):
pos = ti.cast(x[t, i] * visualize_resolution, ti.i32)
screen[pos[0], pos[1]][0] += 0.1
res = [visualize_resolution, visualize_resolution]
@ti.kernel
def copy_back_and_clear(img: np.ndarray):
for i in range(res[0]):
for j in range(res[1]):
coord = ((res[1] - 1 - j) * res[0] + i) * 3
for c in ti.static(range(3)):
img[coord + c] = screen[i, j][2 - c]
screen[i, j][2 - c] = 0
def robot(scene):
block_size = 0.1
# scene.set_offset(0.1, 0.10, 0.3)
scene.set_offset(0.1, 0.05, 0.3)
def add_leg(x, y, z):
for i in range(4):
scene.add_rect(x + block_size / 2 * (i // 2), y + 0.7 * block_size / 2 * (i % 2), z, block_size / 2, 0.7 * block_size / 2, block_size, scene.new_actuator())
for i in range(4):
add_leg(i // 2 * block_size * 2, 0.0, i % 2 * block_size * 2)
for i in range(3):
scene.add_rect(block_size * i, 0, block_size, block_size, block_size * 0.7, block_size, -1, 1)
# scene.set_offset(0.1, 0.03, 0.3)
scene.add_rect(0.1, 0.15, 0.1, 0.2, 0.05, 0.2, -1, 0)
# scene.
def main():
tc.set_gdb_trigger()
# initialization
scene = Scene()
# fish(scene)
robot(scene)
# scene.add_rect(0.4, 0.4, 0.2, 0.1, 0.3, 0.1, -1, 1)
scene.finalize()
for i in range(n_actuators):
for j in range(n_sin_waves):
weights[i, j] = np.random.randn() * 0.01
for i in range(scene.n_particles):
x[0, i] = scene.x[i]
F[0, i] = [[1, 0, 0], [0, 1, 0], [0, 0, 1]]
actuator_id[i] = scene.actuator_id[i]
particle_type[i] = scene.particle_type[i]
fig = plt.figure()
plt.ion()
ax = fig.add_subplot(111, projection='3d')
losses = []
for iter in range(501):
ti.clear_all_gradients()
l = forward()
losses.append(l)
loss.grad[None] = 1
backward()
print('i=', iter, 'loss=', l)
learning_rate = 10
for i in range(n_actuators):
for j in range(n_sin_waves):
# print(weights.grad[i, j])
weights[i, j] -= learning_rate * weights.grad[i, j]
bias[i] -= learning_rate * bias.grad[i]
if iter % 50 == 0:
# visualize
print("Dumping particles...")
for s in range(7, steps, 2):
def to255(x):
return int(max(min(x * 255, 255), 0))
xs, ys, zs = [], [], []
us, vs, ws = [], [], []
cs = []
folder = 'mpm3d/iter{:04d}/'.format(iter)
os.makedirs(folder, exist_ok=True)
for i in range(n_particles):
xs.append(x[s, i][0])
ys.append(x[s, i][1])
zs.append(x[s, i][2])
us.append(v[s, i][0])
vs.append(v[s, i][1])
ws.append(v[s, i][2])
if particle_type[i] == 0:
# fluid
r = 0.3
g = 0.3
b = 1.0
else:
# neohookean
if actuator_id[i] != -1:
# actuated
act = actuation[s, actuator_id[i]] * 0.5
r = 0.5 - act
g = 0.5 - abs(act)
b = 0.5 + act
else:
r, g, b = 0.4, 0.4, 0.4
color = to255(r) * 65536 + 256 * to255(g) + to255(b)
cs.append(color)
data = np.array(xs + ys + zs + us + vs + ws + cs, dtype=np.float32)
data.tofile(open('{}/{:04}.bin'.format(folder, s), 'wb'))
print("Particles dumped")
if __name__ == '__main__':
main()
| examples/difftaichi/liquid.py | 13,182 | this will be overwritten TODO: update ti.cfg.arch = ti.x86_64 ti.cfg.use_llvm = True ti.cfg.print_ir = True for all time steps and all particles fluid TODO: need pow(x, 1/3) r, s = ti.polar_decompose(new_F) ti.print(act) simulation Since we do not store the grid history (to save space), we redo p2g and grid op scene.set_offset(0.1, 0.10, 0.3) scene.set_offset(0.1, 0.03, 0.3) scene. initialization fish(scene) scene.add_rect(0.4, 0.4, 0.2, 0.1, 0.3, 0.1, -1, 1) print(weights.grad[i, j]) visualize fluid neohookean actuated | 525 | en | 0.389814 |
#!/usr/bin/env python
import sys
last_pkt_num = -1
daystart_pkt_num = -1
daystart_recv_time = -1
daystart_hwrecv_time = -1
dayend_pkt_num = -1
dayend_recv_time = -1
dayend_hwrecv_time = -1
def process_line(line):
global last_pkt_num
global daystart_pkt_num, daystart_recv_time, daystart_hwrecv_time
global dayend_pkt_num, dayend_recv_time, dayend_hwrecv_time
parts = line.split()
pkt_num = long(parts[1])
sent_time = long(parts[3])
recv_time = long(parts[5])
hw_recv_time = long(parts[7])
# read in the first line
if (daystart_pkt_num == -1):
last_pkt_num = pkt_num
daystart_pkt_num = pkt_num
daystart_recv_time = recv_time
daystart_hwrecv_time = hw_recv_time
dayend_pkt_num = pkt_num
dayend_recv_time = recv_time
dayend_hwrecv_time = hw_recv_time
return
# skip through the day, looking for a gap
if (pkt_num == last_pkt_num + 1):
last_pkt_num = pkt_num
dayend_pkt_num = pkt_num
dayend_recv_time = recv_time
dayend_hwrecv_time = hw_recv_time
return
# we found a gap
dstr = "D {} pkts long, {} us (utime), {} us (hw)".format(
dayend_pkt_num - daystart_pkt_num,
dayend_recv_time - daystart_recv_time,
dayend_hwrecv_time - daystart_hwrecv_time)
print(dstr)
nstr = "\t\t\t\t\t\t\t\tN {} pkts long, {} us (utime), {} us (hw)".format(
pkt_num - dayend_pkt_num,
recv_time - dayend_recv_time,
hw_recv_time - dayend_hwrecv_time)
print(nstr)
last_pkt_num = pkt_num
daystart_pkt_num = pkt_num
daystart_recv_time = recv_time
daystart_hwrecv_time = hw_recv_time
def main(argv):
if (len(argv) == 1):
fin = sys.stdin
else:
fin = open(argv[1])
while 1:
try:
line = fin.readline()
except KeyboardInterrupt:
break
if not line:
break
process_line(line)
if __name__ == "__main__":
main(sys.argv)
| src/scripts/process-loss-rate-output.py | 2,039 | !/usr/bin/env python read in the first line skip through the day, looking for a gap we found a gap | 98 | en | 0.898472 |
# -*- coding: utf8 -*-
import json
from activity.womail.womail import WoMail
class DailySign(WoMail):
def __init__(self, mobile, openId):
super(DailySign, self).__init__(mobile, openId)
self.session.headers.update({
# 'Origin': 'https://nyan.mail.wo.cn',
'Referer': 'https://nyan.mail.wo.cn/cn/sign/wap/index.html',
'User-Agent': 'Mozilla/5.0 (Linux; Android 8.1.0; MI 8 SE Build/OPM1.171019.019; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/78.0.3904.62 XWEB/2797 MMWEBSDK/20210501 Mobile Safari/537.36 MMWEBID/107 MicroMessenger/8.0.6.1900(0x28000635) Process/toolsmp WeChat/arm64 Weixin NetType/4G Language/zh_CN ABI/arm64',
'X-Requested-With': 'com.tencent.mm' # XMLHttpRequest
})
self.message = ''
def login(self):
url = f'https://nyan.mail.wo.cn/cn/sign/index/index?mobile={self.mobile}&userName=&openId={self.openId}'
self.session.get(url=url)
print(self.session.cookies.get_dict())
def index(self):
url = 'https://nyan.mail.wo.cn/cn/sign/wap/index.html'
self.session.get(url=url)
def userInfo(self):
url = f'https://nyan.mail.wo.cn/cn/sign/index/userinfo.do?rand={self.randomNum}'
resp = self.session.post(url=url)
data = resp.json()
try:
print(json.dumps(data, indent=4, ensure_ascii=False))
return str(data['result']['lastDay']), str(data['result']['keepSign'])
except:
print(resp.text)
def isLogin(self):
url = f'https://nyan.mail.wo.cn/cn/sign/user/isLoginMail.do?rand={self.randomNum}'
resp = self.session.post(url=url)
print(resp.text)
def check(self):
url = f'https://nyan.mail.wo.cn/cn/sign/user/checkin.do?rand={self.randomNum}'
resp = self.session.post(url=url)
print(resp.text)
def prizeDetail(self):
url = f'https://nyan.mail.wo.cn/cn/sign/user/prizes.do?rand={self.randomNum}'
resp = self.session.post(url=url)
data = resp.json()
if len(data['result']) > 3:
data['result'] = data['result'][:3]
print(json.dumps(data, indent=4, ensure_ascii=False))
def doTask(self, task_name):
url = f'https://nyan.mail.wo.cn/cn/sign/user/doTask.do?rand={self.randomNum}'
data = {
'taskName': task_name
}
resp = self.session.post(url=url, data=data)
print(resp.text)
def overTask(self):
url = f'https://nyan.mail.wo.cn/cn/sign/user/overtask.do?rand={self.randomNum}'
data = {
'taskLevel': '2'
}
resp = self.session.post(url=url, data=data)
data = resp.json()
print(json.dumps(data, indent=4, ensure_ascii=False))
result = [item['taskName'] for item in data['result']]
# data = {
# 'taskLevel': '1'
# }
return result
def run(self):
if int(self.now_date.replace('-', '')) > 20220228:
return
try:
self.login()
self.index()
result = self.overTask()
for task_name in ["loginmail", "clubactivity", "club"]: # , "download"
if task_name in result:
continue
self.doTask(task_name)
self.flushTime(1)
else:
print("积分签到任务已完成")
lastDay, keepSign = self.userInfo()
if keepSign == '21':
print('跳过21天之后的打卡')
self.message = '每日签到: 跳过21天之后的打卡'
self.recordLog(self.message)
return
else:
if self.now_date.replace('-', '') == lastDay:
print("今日已打卡")
return
else:
self.check()
self.prizeDetail()
lastDay, _ = self.userInfo()
if self.now_date.replace('-', '') == lastDay:
self.message = '每日签到: 已签到'
else:
self.message = '每日签到: 未签到'
self.recordLog(self.message)
except Exception as e:
print(e)
if __name__ == "__main__":
pass
| activity/womail/dailyTask.py | 4,340 | -*- coding: utf8 -*- 'Origin': 'https://nyan.mail.wo.cn', XMLHttpRequest data = { 'taskLevel': '1' } , "download" | 117 | en | 0.146124 |
'''
References:
- An Outline of Set Theory, Henle
'''
from . import fol
class ElementSymbol(fol.ImproperSymbol):
def __init__(self):
fol.PrimitiveSymbol.__init__('∈')
def symbol_type(self) -> str:
return 'element of'
@staticmethod
def new() -> "ElementSymbol":
return ElementSymbol()
| ddq_1/lang/set.py | 332 | References:
- An Outline of Set Theory, Henle | 46 | en | 0.720767 |
import structlog
from pathlib import Path
from typing import Any, Dict, Generator, Iterable, Optional, Tuple
from normality import normalize, WS
from followthemoney.schema import Schema
from followthemoney.types import registry
from opensanctions import settings
from nomenklatura.loader import Loader
from nomenklatura.index import Index
from opensanctions.core.entity import Entity
from opensanctions.core.dataset import Dataset
log = structlog.get_logger(__name__)
def get_index_path(dataset: Dataset) -> Path:
index_dir = settings.DATA_PATH.joinpath("index")
index_dir.mkdir(exist_ok=True)
return index_dir.joinpath(f"{dataset.name}.pkl")
def get_index(
dataset: Dataset, loader: Loader[Dataset, Entity]
) -> Index[Dataset, Entity]:
"""Load the search index for the given dataset or generate one if it does
not exist."""
path = get_index_path(dataset)
index = Index.load(loader, path)
return index
| opensanctions/core/index.py | 944 | Load the search index for the given dataset or generate one if it does
not exist. | 81 | en | 0.593916 |
# coding: utf-8
from __future__ import unicode_literals
import base64
import datetime
import hashlib
import json
import netrc
import os
import random
import re
import socket
import ssl
import sys
import time
import math
from ..compat import (
compat_cookiejar_Cookie,
compat_cookies,
compat_etree_Element,
compat_etree_fromstring,
compat_getpass,
compat_integer_types,
compat_http_client,
compat_os_name,
compat_str,
compat_urllib_error,
compat_urllib_parse_unquote,
compat_urllib_parse_urlencode,
compat_urllib_request,
compat_urlparse,
compat_xml_parse_error,
)
from ..downloader.f4m import (
get_base_url,
remove_encrypted_media,
)
from ..utils import (
NO_DEFAULT,
age_restricted,
base_url,
bug_reports_message,
clean_html,
compiled_regex_type,
determine_ext,
determine_protocol,
dict_get,
error_to_compat_str,
ExtractorError,
extract_attributes,
fix_xml_ampersands,
float_or_none,
GeoRestrictedError,
GeoUtils,
int_or_none,
js_to_json,
JSON_LD_RE,
mimetype2ext,
orderedSet,
parse_bitrate,
parse_codecs,
parse_duration,
parse_iso8601,
parse_m3u8_attributes,
parse_resolution,
RegexNotFoundError,
sanitized_Request,
sanitize_filename,
str_or_none,
str_to_int,
strip_or_none,
unescapeHTML,
unified_strdate,
unified_timestamp,
update_Request,
update_url_query,
urljoin,
url_basename,
url_or_none,
xpath_element,
xpath_text,
xpath_with_ns,
)
class InfoExtractor(object):
"""Information Extractor class.
Information extractors are the classes that, given a URL, extract
information about the video (or videos) the URL refers to. This
information includes the real video URL, the video title, author and
others. The information is stored in a dictionary which is then
passed to the YoutubeDL. The YoutubeDL processes this
information possibly downloading the video to the file system, among
other possible outcomes.
The type field determines the type of the result.
By far the most common value (and the default if _type is missing) is
"video", which indicates a single video.
For a video, the dictionaries must include the following fields:
id: Video identifier.
title: Video title, unescaped.
Additionally, it must contain either a formats entry or a url one:
formats: A list of dictionaries for each format available, ordered
from worst to best quality.
Potential fields:
* url The mandatory URL representing the media:
for plain file media - HTTP URL of this file,
for RTMP - RTMP URL,
for HLS - URL of the M3U8 media playlist,
for HDS - URL of the F4M manifest,
for DASH
- HTTP URL to plain file media (in case of
unfragmented media)
- URL of the MPD manifest or base URL
representing the media if MPD manifest
is parsed from a string (in case of
fragmented media)
for MSS - URL of the ISM manifest.
* manifest_url
The URL of the manifest file in case of
fragmented media:
for HLS - URL of the M3U8 master playlist,
for HDS - URL of the F4M manifest,
for DASH - URL of the MPD manifest,
for MSS - URL of the ISM manifest.
* ext Will be calculated from URL if missing
* format A human-readable description of the format
("mp4 container with h264/opus").
Calculated from the format_id, width, height.
and format_note fields if missing.
* format_id A short description of the format
("mp4_h264_opus" or "19").
Technically optional, but strongly recommended.
* format_note Additional info about the format
("3D" or "DASH video")
* width Width of the video, if known
* height Height of the video, if known
* resolution Textual description of width and height
* tbr Average bitrate of audio and video in KBit/s
* abr Average audio bitrate in KBit/s
* acodec Name of the audio codec in use
* asr Audio sampling rate in Hertz
* vbr Average video bitrate in KBit/s
* fps Frame rate
* vcodec Name of the video codec in use
* container Name of the container format
* filesize The number of bytes, if known in advance
* filesize_approx An estimate for the number of bytes
* player_url SWF Player URL (used for rtmpdump).
* protocol The protocol that will be used for the actual
download, lower-case.
"http", "https", "rtsp", "rtmp", "rtmpe",
"m3u8", "m3u8_native" or "http_dash_segments".
* fragment_base_url
Base URL for fragments. Each fragment's path
value (if present) will be relative to
this URL.
* fragments A list of fragments of a fragmented media.
Each fragment entry must contain either an url
or a path. If an url is present it should be
considered by a client. Otherwise both path and
fragment_base_url must be present. Here is
the list of all potential fields:
* "url" - fragment's URL
* "path" - fragment's path relative to
fragment_base_url
* "duration" (optional, int or float)
* "filesize" (optional, int)
* preference Order number of this format. If this field is
present and not None, the formats get sorted
by this field, regardless of all other values.
-1 for default (order by other properties),
-2 or smaller for less than default.
< -1000 to hide the format (if there is
another one which is strictly better)
* language Language code, e.g. "de" or "en-US".
* language_preference Is this in the language mentioned in
the URL?
10 if it's what the URL is about,
-1 for default (don't know),
-10 otherwise, other values reserved for now.
* quality Order number of the video quality of this
format, irrespective of the file format.
-1 for default (order by other properties),
-2 or smaller for less than default.
* source_preference Order number for this video source
(quality takes higher priority)
-1 for default (order by other properties),
-2 or smaller for less than default.
* http_headers A dictionary of additional HTTP headers
to add to the request.
* stretched_ratio If given and not 1, indicates that the
video's pixels are not square.
width : height ratio as float.
* no_resume The server does not support resuming the
(HTTP or RTMP) download. Boolean.
* downloader_options A dictionary of downloader options as
described in FileDownloader
url: Final video URL.
ext: Video filename extension.
format: The video format, defaults to ext (used for --get-format)
player_url: SWF Player URL (used for rtmpdump).
The following fields are optional:
alt_title: A secondary title of the video.
display_id An alternative identifier for the video, not necessarily
unique, but available before title. Typically, id is
something like "4234987", title "Dancing naked mole rats",
and display_id "dancing-naked-mole-rats"
thumbnails: A list of dictionaries, with the following entries:
* "id" (optional, string) - Thumbnail format ID
* "url"
* "preference" (optional, int) - quality of the image
* "width" (optional, int)
* "height" (optional, int)
* "resolution" (optional, string "{width}x{height}",
deprecated)
* "filesize" (optional, int)
thumbnail: Full URL to a video thumbnail image.
description: Full video description.
uploader: Full name of the video uploader.
license: License name the video is licensed under.
creator: The creator of the video.
release_date: The date (YYYYMMDD) when the video was released.
timestamp: UNIX timestamp of the moment the video became available.
upload_date: Video upload date (YYYYMMDD).
If not explicitly set, calculated from timestamp.
uploader_id: Nickname or id of the video uploader.
uploader_url: Full URL to a personal webpage of the video uploader.
channel: Full name of the channel the video is uploaded on.
Note that channel fields may or may not repeat uploader
fields. This depends on a particular extractor.
channel_id: Id of the channel.
channel_url: Full URL to a channel webpage.
location: Physical location where the video was filmed.
subtitles: The available subtitles as a dictionary in the format
{tag: subformats}. "tag" is usually a language code, and
"subformats" is a list sorted from lower to higher
preference, each element is a dictionary with the "ext"
entry and one of:
* "data": The subtitles file contents
* "url": A URL pointing to the subtitles file
"ext" will be calculated from URL if missing
automatic_captions: Like 'subtitles', used by the YoutubeIE for
automatically generated captions
duration: Length of the video in seconds, as an integer or float.
view_count: How many users have watched the video on the platform.
like_count: Number of positive ratings of the video
dislike_count: Number of negative ratings of the video
repost_count: Number of reposts of the video
average_rating: Average rating give by users, the scale used depends on the webpage
comment_count: Number of comments on the video
comments: A list of comments, each with one or more of the following
properties (all but one of text or html optional):
* "author" - human-readable name of the comment author
* "author_id" - user ID of the comment author
* "id" - Comment ID
* "html" - Comment as HTML
* "text" - Plain text of the comment
* "timestamp" - UNIX timestamp of comment
* "parent" - ID of the comment this one is replying to.
Set to "root" to indicate that this is a
comment to the original video.
age_limit: Age restriction for the video, as an integer (years)
webpage_url: The URL to the video webpage, if given to youtube-dl it
should allow to get the same result again. (It will be set
by YoutubeDL if it's missing)
categories: A list of categories that the video falls in, for example
["Sports", "Berlin"]
tags: A list of tags assigned to the video, e.g. ["sweden", "pop music"]
is_live: True, False, or None (=unknown). Whether this video is a
live stream that goes on instead of a fixed-length video.
start_time: Time in seconds where the reproduction should start, as
specified in the URL.
end_time: Time in seconds where the reproduction should end, as
specified in the URL.
chapters: A list of dictionaries, with the following entries:
* "start_time" - The start time of the chapter in seconds
* "end_time" - The end time of the chapter in seconds
* "title" (optional, string)
The following fields should only be used when the video belongs to some logical
chapter or section:
chapter: Name or title of the chapter the video belongs to.
chapter_number: Number of the chapter the video belongs to, as an integer.
chapter_id: Id of the chapter the video belongs to, as a unicode string.
The following fields should only be used when the video is an episode of some
series, programme or podcast:
series: Title of the series or programme the video episode belongs to.
season: Title of the season the video episode belongs to.
season_number: Number of the season the video episode belongs to, as an integer.
season_id: Id of the season the video episode belongs to, as a unicode string.
episode: Title of the video episode. Unlike mandatory video title field,
this field should denote the exact title of the video episode
without any kind of decoration.
episode_number: Number of the video episode within a season, as an integer.
episode_id: Id of the video episode, as a unicode string.
The following fields should only be used when the media is a track or a part of
a music album:
track: Title of the track.
track_number: Number of the track within an album or a disc, as an integer.
track_id: Id of the track (useful in case of custom indexing, e.g. 6.iii),
as a unicode string.
artist: Artist(s) of the track.
genre: Genre(s) of the track.
album: Title of the album the track belongs to.
album_type: Type of the album (e.g. "Demo", "Full-length", "Split", "Compilation", etc).
album_artist: List of all artists appeared on the album (e.g.
"Ash Borer / Fell Voices" or "Various Artists", useful for splits
and compilations).
disc_number: Number of the disc or other physical medium the track belongs to,
as an integer.
release_year: Year (YYYY) when the album was released.
Unless mentioned otherwise, the fields should be Unicode strings.
Unless mentioned otherwise, None is equivalent to absence of information.
_type "playlist" indicates multiple videos.
There must be a key "entries", which is a list, an iterable, or a PagedList
object, each element of which is a valid dictionary by this specification.
Additionally, playlists can have "id", "title", "description", "uploader",
"uploader_id", "uploader_url", "duration" attributes with the same semantics
as videos (see above).
_type "multi_video" indicates that there are multiple videos that
form a single show, for examples multiple acts of an opera or TV episode.
It must have an entries key like a playlist and contain all the keys
required for a video at the same time.
_type "url" indicates that the video must be extracted from another
location, possibly by a different extractor. Its only required key is:
"url" - the next URL to extract.
The key "ie_key" can be set to the class name (minus the trailing "IE",
e.g. "Youtube") if the extractor class is known in advance.
Additionally, the dictionary may have any properties of the resolved entity
known in advance, for example "title" if the title of the referred video is
known ahead of time.
_type "url_transparent" entities have the same specification as "url", but
indicate that the given additional information is more precise than the one
associated with the resolved URL.
This is useful when a site employs a video service that hosts the video and
its technical metadata, but that video service does not embed a useful
title, description etc.
Subclasses of this one should re-define the _real_initialize() and
_real_extract() methods and define a _VALID_URL regexp.
Probably, they should also be added to the list of extractors.
_GEO_BYPASS attribute may be set to False in order to disable
geo restriction bypass mechanisms for a particular extractor.
Though it won't disable explicit geo restriction bypass based on
country code provided with geo_bypass_country.
_GEO_COUNTRIES attribute may contain a list of presumably geo unrestricted
countries for this extractor. One of these countries will be used by
geo restriction bypass mechanism right away in order to bypass
geo restriction, of course, if the mechanism is not disabled.
_GEO_IP_BLOCKS attribute may contain a list of presumably geo unrestricted
IP blocks in CIDR notation for this extractor. One of these IP blocks
will be used by geo restriction bypass mechanism similarly
to _GEO_COUNTRIES.
Finally, the _WORKING attribute should be set to False for broken IEs
in order to warn the users and skip the tests.
"""
_ready = False
_downloader = None
_x_forwarded_for_ip = None
_GEO_BYPASS = True
_GEO_COUNTRIES = None
_GEO_IP_BLOCKS = None
_WORKING = True
def __init__(self, downloader=None):
"""Constructor. Receives an optional downloader."""
self._ready = False
self._x_forwarded_for_ip = None
self.set_downloader(downloader)
@classmethod
def suitable(cls, url):
"""Receives a URL and returns True if suitable for this IE."""
# This does not use has/getattr intentionally - we want to know whether
# we have cached the regexp for *this* class, whereas getattr would also
# match the superclass
if '_VALID_URL_RE' not in cls.__dict__:
cls._VALID_URL_RE = re.compile(cls._VALID_URL)
return cls._VALID_URL_RE.match(url) is not None
@classmethod
def _match_id(cls, url):
if '_VALID_URL_RE' not in cls.__dict__:
cls._VALID_URL_RE = re.compile(cls._VALID_URL)
m = cls._VALID_URL_RE.match(url)
assert m
return compat_str(m.group('id'))
@classmethod
def working(cls):
"""Getter method for _WORKING."""
return cls._WORKING
def initialize(self):
"""Initializes an instance (authentication, etc)."""
self._initialize_geo_bypass({
'countries': self._GEO_COUNTRIES,
'ip_blocks': self._GEO_IP_BLOCKS,
})
if not self._ready:
self._real_initialize()
self._ready = True
def _initialize_geo_bypass(self, geo_bypass_context):
"""
Initialize geo restriction bypass mechanism.
This method is used to initialize geo bypass mechanism based on faking
X-Forwarded-For HTTP header. A random country from provided country list
is selected and a random IP belonging to this country is generated. This
IP will be passed as X-Forwarded-For HTTP header in all subsequent
HTTP requests.
This method will be used for initial geo bypass mechanism initialization
during the instance initialization with _GEO_COUNTRIES and
_GEO_IP_BLOCKS.
You may also manually call it from extractor's code if geo bypass
information is not available beforehand (e.g. obtained during
extraction) or due to some other reason. In this case you should pass
this information in geo bypass context passed as first argument. It may
contain following fields:
countries: List of geo unrestricted countries (similar
to _GEO_COUNTRIES)
ip_blocks: List of geo unrestricted IP blocks in CIDR notation
(similar to _GEO_IP_BLOCKS)
"""
if not self._x_forwarded_for_ip:
# Geo bypass mechanism is explicitly disabled by user
if not self._downloader.params.get('geo_bypass', True):
return
if not geo_bypass_context:
geo_bypass_context = {}
# Backward compatibility: previously _initialize_geo_bypass
# expected a list of countries, some 3rd party code may still use
# it this way
if isinstance(geo_bypass_context, (list, tuple)):
geo_bypass_context = {
'countries': geo_bypass_context,
}
# The whole point of geo bypass mechanism is to fake IP
# as X-Forwarded-For HTTP header based on some IP block or
# country code.
# Path 1: bypassing based on IP block in CIDR notation
# Explicit IP block specified by user, use it right away
# regardless of whether extractor is geo bypassable or not
ip_block = self._downloader.params.get('geo_bypass_ip_block', None)
# Otherwise use random IP block from geo bypass context but only
# if extractor is known as geo bypassable
if not ip_block:
ip_blocks = geo_bypass_context.get('ip_blocks')
if self._GEO_BYPASS and ip_blocks:
ip_block = random.choice(ip_blocks)
if ip_block:
self._x_forwarded_for_ip = GeoUtils.random_ipv4(ip_block)
if self._downloader.params.get('verbose', False):
self._downloader.to_screen(
'[debug] Using fake IP %s as X-Forwarded-For.'
% self._x_forwarded_for_ip)
return
# Path 2: bypassing based on country code
# Explicit country code specified by user, use it right away
# regardless of whether extractor is geo bypassable or not
country = self._downloader.params.get('geo_bypass_country', None)
# Otherwise use random country code from geo bypass context but
# only if extractor is known as geo bypassable
if not country:
countries = geo_bypass_context.get('countries')
if self._GEO_BYPASS and countries:
country = random.choice(countries)
if country:
self._x_forwarded_for_ip = GeoUtils.random_ipv4(country)
if self._downloader.params.get('verbose', False):
self._downloader.to_screen(
'[debug] Using fake IP %s (%s) as X-Forwarded-For.'
% (self._x_forwarded_for_ip, country.upper()))
def extract(self, url):
"""Extracts URL information and returns it in list of dicts."""
try:
for _ in range(2):
try:
self.initialize()
ie_result = self._real_extract(url)
if self._x_forwarded_for_ip:
ie_result['__x_forwarded_for_ip'] = self._x_forwarded_for_ip
return ie_result
except GeoRestrictedError as e:
if self.__maybe_fake_ip_and_retry(e.countries):
continue
raise
except ExtractorError:
raise
except compat_http_client.IncompleteRead as e:
raise ExtractorError('A network error has occurred.', cause=e, expected=True)
except (KeyError, StopIteration) as e:
raise ExtractorError('An extractor error has occurred.', cause=e)
def __maybe_fake_ip_and_retry(self, countries):
if (not self._downloader.params.get('geo_bypass_country', None)
and self._GEO_BYPASS
and self._downloader.params.get('geo_bypass', True)
and not self._x_forwarded_for_ip
and countries):
country_code = random.choice(countries)
self._x_forwarded_for_ip = GeoUtils.random_ipv4(country_code)
if self._x_forwarded_for_ip:
self.report_warning(
'Video is geo restricted. Retrying extraction with fake IP %s (%s) as X-Forwarded-For.'
% (self._x_forwarded_for_ip, country_code.upper()))
return True
return False
def set_downloader(self, downloader):
"""Sets the downloader for this IE."""
self._downloader = downloader
def _real_initialize(self):
"""Real initialization process. Redefine in subclasses."""
pass
def _real_extract(self, url):
"""Real extraction process. Redefine in subclasses."""
pass
@classmethod
def ie_key(cls):
"""A string for getting the InfoExtractor with get_info_extractor"""
return compat_str(cls.__name__[:-2])
@property
def IE_NAME(self):
return compat_str(type(self).__name__[:-2])
@staticmethod
def __can_accept_status_code(err, expected_status):
assert isinstance(err, compat_urllib_error.HTTPError)
if expected_status is None:
return False
if isinstance(expected_status, compat_integer_types):
return err.code == expected_status
elif isinstance(expected_status, (list, tuple)):
return err.code in expected_status
elif callable(expected_status):
return expected_status(err.code) is True
else:
assert False
def _request_webpage(self, url_or_request, video_id, note=None, errnote=None, fatal=True, data=None, headers={}, query={}, expected_status=None):
"""
Return the response handle.
See _download_webpage docstring for arguments specification.
"""
if note is None:
self.report_download_webpage(video_id)
elif note is not False:
if video_id is None:
self.to_screen('%s' % (note,))
else:
self.to_screen('%s: %s' % (video_id, note))
# Some sites check X-Forwarded-For HTTP header in order to figure out
# the origin of the client behind proxy. This allows bypassing geo
# restriction by faking this header's value to IP that belongs to some
# geo unrestricted country. We will do so once we encounter any
# geo restriction error.
if self._x_forwarded_for_ip:
if 'X-Forwarded-For' not in headers:
headers['X-Forwarded-For'] = self._x_forwarded_for_ip
if isinstance(url_or_request, compat_urllib_request.Request):
url_or_request = update_Request(
url_or_request, data=data, headers=headers, query=query)
else:
if query:
url_or_request = update_url_query(url_or_request, query)
if data is not None or headers:
url_or_request = sanitized_Request(url_or_request, data, headers)
exceptions = [compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error]
if hasattr(ssl, 'CertificateError'):
exceptions.append(ssl.CertificateError)
try:
return self._downloader.urlopen(url_or_request)
except tuple(exceptions) as err:
if isinstance(err, compat_urllib_error.HTTPError):
if self.__can_accept_status_code(err, expected_status):
# Retain reference to error to prevent file object from
# being closed before it can be read. Works around the
# effects of <https://bugs.python.org/issue15002>
# introduced in Python 3.4.1.
err.fp._error = err
return err.fp
if errnote is False:
return False
if errnote is None:
errnote = 'Unable to download webpage'
errmsg = '%s: %s' % (errnote, error_to_compat_str(err))
if fatal:
raise ExtractorError(errmsg, sys.exc_info()[2], cause=err)
else:
self._downloader.report_warning(errmsg)
return False
def _download_webpage_handle(self, url_or_request, video_id, note=None, errnote=None, fatal=True, encoding=None, data=None, headers={}, query={}, expected_status=None):
"""
Return a tuple (page content as string, URL handle).
See _download_webpage docstring for arguments specification.
"""
# Strip hashes from the URL (#1038)
if isinstance(url_or_request, (compat_str, str)):
url_or_request = url_or_request.partition('#')[0]
urlh = self._request_webpage(url_or_request, video_id, note, errnote, fatal, data=data, headers=headers, query=query, expected_status=expected_status)
if urlh is False:
assert not fatal
return False
content = self._webpage_read_content(urlh, url_or_request, video_id, note, errnote, fatal, encoding=encoding)
return (content, urlh)
@staticmethod
def _guess_encoding_from_content(content_type, webpage_bytes):
m = re.match(r'[a-zA-Z0-9_.-]+/[a-zA-Z0-9_.-]+\s*;\s*charset=(.+)', content_type)
if m:
encoding = m.group(1)
else:
m = re.search(br'<meta[^>]+charset=[\'"]?([^\'")]+)[ /\'">]',
webpage_bytes[:1024])
if m:
encoding = m.group(1).decode('ascii')
elif webpage_bytes.startswith(b'\xff\xfe'):
encoding = 'utf-16'
else:
encoding = 'utf-8'
return encoding
def __check_blocked(self, content):
first_block = content[:512]
if ('<title>Access to this site is blocked</title>' in content
and 'Websense' in first_block):
msg = 'Access to this webpage has been blocked by Websense filtering software in your network.'
blocked_iframe = self._html_search_regex(
r'<iframe src="([^"]+)"', content,
'Websense information URL', default=None)
if blocked_iframe:
msg += ' Visit %s for more details' % blocked_iframe
raise ExtractorError(msg, expected=True)
if '<title>The URL you requested has been blocked</title>' in first_block:
msg = (
'Access to this webpage has been blocked by Indian censorship. '
'Use a VPN or proxy server (with --proxy) to route around it.')
block_msg = self._html_search_regex(
r'</h1><p>(.*?)</p>',
content, 'block message', default=None)
if block_msg:
msg += ' (Message: "%s")' % block_msg.replace('\n', ' ')
raise ExtractorError(msg, expected=True)
if ('<title>TTK :: Доступ к ресурсу ограничен</title>' in content
and 'blocklist.rkn.gov.ru' in content):
raise ExtractorError(
'Access to this webpage has been blocked by decision of the Russian government. '
'Visit http://blocklist.rkn.gov.ru/ for a block reason.',
expected=True)
def _webpage_read_content(self, urlh, url_or_request, video_id, note=None, errnote=None, fatal=True, prefix=None, encoding=None):
content_type = urlh.headers.get('Content-Type', '')
webpage_bytes = urlh.read()
if prefix is not None:
webpage_bytes = prefix + webpage_bytes
if not encoding:
encoding = self._guess_encoding_from_content(content_type, webpage_bytes)
if self._downloader.params.get('dump_intermediate_pages', False):
self.to_screen('Dumping request to ' + urlh.geturl())
dump = base64.b64encode(webpage_bytes).decode('ascii')
self._downloader.to_screen(dump)
if self._downloader.params.get('write_pages', False):
basen = '%s_%s' % (video_id, urlh.geturl())
if len(basen) > 240:
h = '___' + hashlib.md5(basen.encode('utf-8')).hexdigest()
basen = basen[:240 - len(h)] + h
raw_filename = basen + '.dump'
filename = sanitize_filename(raw_filename, restricted=True)
self.to_screen('Saving request to ' + filename)
# Working around MAX_PATH limitation on Windows (see
# http://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx)
if compat_os_name == 'nt':
absfilepath = os.path.abspath(filename)
if len(absfilepath) > 259:
filename = '\\\\?\\' + absfilepath
with open(filename, 'wb') as outf:
outf.write(webpage_bytes)
try:
content = webpage_bytes.decode(encoding, 'replace')
except LookupError:
content = webpage_bytes.decode('utf-8', 'replace')
self.__check_blocked(content)
return content
def _download_webpage(
self, url_or_request, video_id, note=None, errnote=None,
fatal=True, tries=1, timeout=5, encoding=None, data=None,
headers={}, query={}, expected_status=None):
"""
Return the data of the page as a string.
Arguments:
url_or_request -- plain text URL as a string or
a compat_urllib_request.Requestobject
video_id -- Video/playlist/item identifier (string)
Keyword arguments:
note -- note printed before downloading (string)
errnote -- note printed in case of an error (string)
fatal -- flag denoting whether error should be considered fatal,
i.e. whether it should cause ExtractionError to be raised,
otherwise a warning will be reported and extraction continued
tries -- number of tries
timeout -- sleep interval between tries
encoding -- encoding for a page content decoding, guessed automatically
when not explicitly specified
data -- POST data (bytes)
headers -- HTTP headers (dict)
query -- URL query (dict)
expected_status -- allows to accept failed HTTP requests (non 2xx
status code) by explicitly specifying a set of accepted status
codes. Can be any of the following entities:
- an integer type specifying an exact failed status code to
accept
- a list or a tuple of integer types specifying a list of
failed status codes to accept
- a callable accepting an actual failed status code and
returning True if it should be accepted
Note that this argument does not affect success status codes (2xx)
which are always accepted.
"""
success = False
try_count = 0
while success is False:
try:
res = self._download_webpage_handle(
url_or_request, video_id, note, errnote, fatal,
encoding=encoding, data=data, headers=headers, query=query,
expected_status=expected_status)
success = True
except compat_http_client.IncompleteRead as e:
try_count += 1
if try_count >= tries:
raise e
self._sleep(timeout, video_id)
if res is False:
return res
else:
content, _ = res
return content
def _download_xml_handle(
self, url_or_request, video_id, note='Downloading XML',
errnote='Unable to download XML', transform_source=None,
fatal=True, encoding=None, data=None, headers={}, query={},
expected_status=None):
"""
Return a tuple (xml as an compat_etree_Element, URL handle).
See _download_webpage docstring for arguments specification.
"""
res = self._download_webpage_handle(
url_or_request, video_id, note, errnote, fatal=fatal,
encoding=encoding, data=data, headers=headers, query=query,
expected_status=expected_status)
if res is False:
return res
xml_string, urlh = res
return self._parse_xml(
xml_string, video_id, transform_source=transform_source,
fatal=fatal), urlh
def _download_xml(
self, url_or_request, video_id,
note='Downloading XML', errnote='Unable to download XML',
transform_source=None, fatal=True, encoding=None,
data=None, headers={}, query={}, expected_status=None):
"""
Return the xml as an compat_etree_Element.
See _download_webpage docstring for arguments specification.
"""
res = self._download_xml_handle(
url_or_request, video_id, note=note, errnote=errnote,
transform_source=transform_source, fatal=fatal, encoding=encoding,
data=data, headers=headers, query=query,
expected_status=expected_status)
return res if res is False else res[0]
def _parse_xml(self, xml_string, video_id, transform_source=None, fatal=True):
if transform_source:
xml_string = transform_source(xml_string)
try:
return compat_etree_fromstring(xml_string.encode('utf-8'))
except compat_xml_parse_error as ve:
errmsg = '%s: Failed to parse XML ' % video_id
if fatal:
raise ExtractorError(errmsg, cause=ve)
else:
self.report_warning(errmsg + str(ve))
def _download_json_handle(
self, url_or_request, video_id, note='Downloading JSON metadata',
errnote='Unable to download JSON metadata', transform_source=None,
fatal=True, encoding=None, data=None, headers={}, query={},
expected_status=None):
"""
Return a tuple (JSON object, URL handle).
See _download_webpage docstring for arguments specification.
"""
res = self._download_webpage_handle(
url_or_request, video_id, note, errnote, fatal=fatal,
encoding=encoding, data=data, headers=headers, query=query,
expected_status=expected_status)
if res is False:
return res
json_string, urlh = res
return self._parse_json(
json_string, video_id, transform_source=transform_source,
fatal=fatal), urlh
def _download_json(
self, url_or_request, video_id, note='Downloading JSON metadata',
errnote='Unable to download JSON metadata', transform_source=None,
fatal=True, encoding=None, data=None, headers={}, query={},
expected_status=None):
"""
Return the JSON object as a dict.
See _download_webpage docstring for arguments specification.
"""
res = self._download_json_handle(
url_or_request, video_id, note=note, errnote=errnote,
transform_source=transform_source, fatal=fatal, encoding=encoding,
data=data, headers=headers, query=query,
expected_status=expected_status)
return res if res is False else res[0]
def _parse_json(self, json_string, video_id, transform_source=None, fatal=True):
if transform_source:
json_string = transform_source(json_string)
try:
return json.loads(json_string)
except ValueError as ve:
errmsg = '%s: Failed to parse JSON ' % video_id
if fatal:
raise ExtractorError(errmsg, cause=ve)
else:
self.report_warning(errmsg + str(ve))
def report_warning(self, msg, video_id=None):
idstr = '' if video_id is None else '%s: ' % video_id
self._downloader.report_warning(
'[%s] %s%s' % (self.IE_NAME, idstr, msg))
def to_screen(self, msg):
"""Print msg to screen, prefixing it with '[ie_name]'"""
self._downloader.to_screen('[%s] %s' % (self.IE_NAME, msg))
def report_extraction(self, id_or_name):
"""Report information extraction."""
self.to_screen('%s: Extracting information' % id_or_name)
def report_download_webpage(self, video_id):
"""Report webpage download."""
self.to_screen('%s: Downloading webpage' % video_id)
def report_age_confirmation(self):
"""Report attempt to confirm age."""
self.to_screen('Confirming age')
def report_login(self):
"""Report attempt to log in."""
self.to_screen('Logging in')
@staticmethod
def raise_login_required(msg='This video is only available for registered users'):
raise ExtractorError(
'%s. Use --username and --password or --netrc to provide account credentials.' % msg,
expected=True)
@staticmethod
def raise_geo_restricted(msg='This video is not available from your location due to geo restriction', countries=None):
raise GeoRestrictedError(msg, countries=countries)
# Methods for following #608
@staticmethod
def url_result(url, ie=None, video_id=None, video_title=None):
"""Returns a URL that points to a page that should be processed"""
# TODO: ie should be the class used for getting the info
video_info = {'_type': 'url',
'url': url,
'ie_key': ie}
if video_id is not None:
video_info['id'] = video_id
if video_title is not None:
video_info['title'] = video_title
return video_info
def playlist_from_matches(self, matches, playlist_id=None, playlist_title=None, getter=None, ie=None):
urls = orderedSet(
self.url_result(self._proto_relative_url(getter(m) if getter else m), ie)
for m in matches)
return self.playlist_result(
urls, playlist_id=playlist_id, playlist_title=playlist_title)
@staticmethod
def playlist_result(entries, playlist_id=None, playlist_title=None, playlist_description=None):
"""Returns a playlist"""
video_info = {'_type': 'playlist',
'entries': entries}
if playlist_id:
video_info['id'] = playlist_id
if playlist_title:
video_info['title'] = playlist_title
if playlist_description:
video_info['description'] = playlist_description
return video_info
def _search_regex(self, pattern, string, name, default=NO_DEFAULT, fatal=True, flags=0, group=None):
"""
Perform a regex search on the given string, using a single or a list of
patterns returning the first matching group.
In case of failure return a default value or raise a WARNING or a
RegexNotFoundError, depending on fatal, specifying the field name.
"""
if isinstance(pattern, (str, compat_str, compiled_regex_type)):
mobj = re.search(pattern, string, flags)
else:
for p in pattern:
mobj = re.search(p, string, flags)
if mobj:
break
if not self._downloader.params.get('no_color') and compat_os_name != 'nt' and sys.stderr.isatty():
_name = '\033[0;34m%s\033[0m' % name
else:
_name = name
if mobj:
if group is None:
# return the first matching group
return next(g for g in mobj.groups() if g is not None)
else:
return mobj.group(group)
elif default is not NO_DEFAULT:
return default
elif fatal:
raise RegexNotFoundError('Unable to extract %s' % _name)
else:
self._downloader.report_warning('unable to extract %s' % _name + bug_reports_message())
return None
def _html_search_regex(self, pattern, string, name, default=NO_DEFAULT, fatal=True, flags=0, group=None):
"""
Like _search_regex, but strips HTML tags and unescapes entities.
"""
res = self._search_regex(pattern, string, name, default, fatal, flags, group)
if res:
return clean_html(res).strip()
else:
return res
def _get_netrc_login_info(self, netrc_machine=None):
username = None
password = None
netrc_machine = netrc_machine or self._NETRC_MACHINE
if self._downloader.params.get('usenetrc', False):
try:
info = netrc.netrc().authenticators(netrc_machine)
if info is not None:
username = info[0]
password = info[2]
else:
raise netrc.NetrcParseError(
'No authenticators for %s' % netrc_machine)
except (IOError, netrc.NetrcParseError) as err:
self._downloader.report_warning(
'parsing .netrc: %s' % error_to_compat_str(err))
return username, password
def _get_login_info(self, username_option='username', password_option='password', netrc_machine=None):
"""
Get the login info as (username, password)
First look for the manually specified credentials using username_option
and password_option as keys in params dictionary. If no such credentials
available look in the netrc file using the netrc_machine or _NETRC_MACHINE
value.
If there's no info available, return (None, None)
"""
if self._downloader is None:
return (None, None)
downloader_params = self._downloader.params
# Attempt to use provided username and password or .netrc data
if downloader_params.get(username_option) is not None:
username = downloader_params[username_option]
password = downloader_params[password_option]
else:
username, password = self._get_netrc_login_info(netrc_machine)
return username, password
def _get_tfa_info(self, note='two-factor verification code'):
"""
Get the two-factor authentication info
TODO - asking the user will be required for sms/phone verify
currently just uses the command line option
If there's no info available, return None
"""
if self._downloader is None:
return None
downloader_params = self._downloader.params
if downloader_params.get('twofactor') is not None:
return downloader_params['twofactor']
return compat_getpass('Type %s and press [Return]: ' % note)
# Helper functions for extracting OpenGraph info
@staticmethod
def _og_regexes(prop):
content_re = r'content=(?:"([^"]+?)"|\'([^\']+?)\'|\s*([^\s"\'=<>`]+?))'
property_re = (r'(?:name|property)=(?:\'og[:-]%(prop)s\'|"og[:-]%(prop)s"|\s*og[:-]%(prop)s\b)'
% {'prop': re.escape(prop)})
template = r'<meta[^>]+?%s[^>]+?%s'
return [
template % (property_re, content_re),
template % (content_re, property_re),
]
@staticmethod
def _meta_regex(prop):
return r'''(?isx)<meta
(?=[^>]+(?:itemprop|name|property|id|http-equiv)=(["\']?)%s\1)
[^>]+?content=(["\'])(?P<content>.*?)\2''' % re.escape(prop)
def _og_search_property(self, prop, html, name=None, **kargs):
if not isinstance(prop, (list, tuple)):
prop = [prop]
if name is None:
name = 'OpenGraph %s' % prop[0]
og_regexes = []
for p in prop:
og_regexes.extend(self._og_regexes(p))
escaped = self._search_regex(og_regexes, html, name, flags=re.DOTALL, **kargs)
if escaped is None:
return None
return unescapeHTML(escaped)
def _og_search_thumbnail(self, html, **kargs):
return self._og_search_property('image', html, 'thumbnail URL', fatal=False, **kargs)
def _og_search_description(self, html, **kargs):
return self._og_search_property('description', html, fatal=False, **kargs)
def _og_search_title(self, html, **kargs):
return self._og_search_property('title', html, **kargs)
def _og_search_video_url(self, html, name='video url', secure=True, **kargs):
regexes = self._og_regexes('video') + self._og_regexes('video:url')
if secure:
regexes = self._og_regexes('video:secure_url') + regexes
return self._html_search_regex(regexes, html, name, **kargs)
def _og_search_url(self, html, **kargs):
return self._og_search_property('url', html, **kargs)
def _html_search_meta(self, name, html, display_name=None, fatal=False, **kwargs):
if not isinstance(name, (list, tuple)):
name = [name]
if display_name is None:
display_name = name[0]
return self._html_search_regex(
[self._meta_regex(n) for n in name],
html, display_name, fatal=fatal, group='content', **kwargs)
def _dc_search_uploader(self, html):
return self._html_search_meta('dc.creator', html, 'uploader')
def _rta_search(self, html):
# See http://www.rtalabel.org/index.php?content=howtofaq#single
if re.search(r'(?ix)<meta\s+name="rating"\s+'
r' content="RTA-5042-1996-1400-1577-RTA"',
html):
return 18
return 0
def _media_rating_search(self, html):
# See http://www.tjg-designs.com/WP/metadata-code-examples-adding-metadata-to-your-web-pages/
rating = self._html_search_meta('rating', html)
if not rating:
return None
RATING_TABLE = {
'safe for kids': 0,
'general': 8,
'14 years': 14,
'mature': 17,
'restricted': 19,
}
return RATING_TABLE.get(rating.lower())
def _family_friendly_search(self, html):
# See http://schema.org/VideoObject
family_friendly = self._html_search_meta(
'isFamilyFriendly', html, default=None)
if not family_friendly:
return None
RATING_TABLE = {
'1': 0,
'true': 0,
'0': 18,
'false': 18,
}
return RATING_TABLE.get(family_friendly.lower())
def _twitter_search_player(self, html):
return self._html_search_meta('twitter:player', html,
'twitter card player')
def _search_json_ld(self, html, video_id, expected_type=None, **kwargs):
json_ld_list = list(re.finditer(JSON_LD_RE, html))
default = kwargs.get('default', NO_DEFAULT)
# JSON-LD may be malformed and thus `fatal` should be respected.
# At the same time `default` may be passed that assumes `fatal=False`
# for _search_regex. Let's simulate the same behavior here as well.
fatal = kwargs.get('fatal', True) if default == NO_DEFAULT else False
json_ld = []
for mobj in json_ld_list:
json_ld_item = self._parse_json(
mobj.group('json_ld'), video_id, fatal=fatal)
if not json_ld_item:
continue
if isinstance(json_ld_item, dict):
json_ld.append(json_ld_item)
elif isinstance(json_ld_item, (list, tuple)):
json_ld.extend(json_ld_item)
if json_ld:
json_ld = self._json_ld(json_ld, video_id, fatal=fatal, expected_type=expected_type)
if json_ld:
return json_ld
if default is not NO_DEFAULT:
return default
elif fatal:
raise RegexNotFoundError('Unable to extract JSON-LD')
else:
self._downloader.report_warning('unable to extract JSON-LD %s' % bug_reports_message())
return {}
def _json_ld(self, json_ld, video_id, fatal=True, expected_type=None):
if isinstance(json_ld, compat_str):
json_ld = self._parse_json(json_ld, video_id, fatal=fatal)
if not json_ld:
return {}
info = {}
if not isinstance(json_ld, (list, tuple, dict)):
return info
if isinstance(json_ld, dict):
json_ld = [json_ld]
INTERACTION_TYPE_MAP = {
'CommentAction': 'comment',
'AgreeAction': 'like',
'DisagreeAction': 'dislike',
'LikeAction': 'like',
'DislikeAction': 'dislike',
'ListenAction': 'view',
'WatchAction': 'view',
'ViewAction': 'view',
}
def extract_interaction_type(e):
interaction_type = e.get('interactionType')
if isinstance(interaction_type, dict):
interaction_type = interaction_type.get('@type')
return str_or_none(interaction_type)
def extract_interaction_statistic(e):
interaction_statistic = e.get('interactionStatistic')
if isinstance(interaction_statistic, dict):
interaction_statistic = [interaction_statistic]
if not isinstance(interaction_statistic, list):
return
for is_e in interaction_statistic:
if not isinstance(is_e, dict):
continue
if is_e.get('@type') != 'InteractionCounter':
continue
interaction_type = extract_interaction_type(is_e)
if not interaction_type:
continue
# For interaction count some sites provide string instead of
# an integer (as per spec) with non digit characters (e.g. ",")
# so extracting count with more relaxed str_to_int
interaction_count = str_to_int(is_e.get('userInteractionCount'))
if interaction_count is None:
continue
count_kind = INTERACTION_TYPE_MAP.get(interaction_type.split('/')[-1])
if not count_kind:
continue
count_key = '%s_count' % count_kind
if info.get(count_key) is not None:
continue
info[count_key] = interaction_count
def extract_video_object(e):
assert e['@type'] == 'VideoObject'
info.update({
'url': url_or_none(e.get('contentUrl')),
'title': unescapeHTML(e.get('name')),
'description': unescapeHTML(e.get('description')),
'thumbnail': url_or_none(e.get('thumbnailUrl') or e.get('thumbnailURL')),
'duration': parse_duration(e.get('duration')),
'timestamp': unified_timestamp(e.get('uploadDate')),
'uploader': str_or_none(e.get('author')),
'filesize': float_or_none(e.get('contentSize')),
'tbr': int_or_none(e.get('bitrate')),
'width': int_or_none(e.get('width')),
'height': int_or_none(e.get('height')),
'view_count': int_or_none(e.get('interactionCount')),
})
extract_interaction_statistic(e)
for e in json_ld:
if '@context' in e:
item_type = e.get('@type')
if expected_type is not None and expected_type != item_type:
continue
if item_type in ('TVEpisode', 'Episode'):
episode_name = unescapeHTML(e.get('name'))
info.update({
'episode': episode_name,
'episode_number': int_or_none(e.get('episodeNumber')),
'description': unescapeHTML(e.get('description')),
})
if not info.get('title') and episode_name:
info['title'] = episode_name
part_of_season = e.get('partOfSeason')
if isinstance(part_of_season, dict) and part_of_season.get('@type') in ('TVSeason', 'Season', 'CreativeWorkSeason'):
info.update({
'season': unescapeHTML(part_of_season.get('name')),
'season_number': int_or_none(part_of_season.get('seasonNumber')),
})
part_of_series = e.get('partOfSeries') or e.get('partOfTVSeries')
if isinstance(part_of_series, dict) and part_of_series.get('@type') in ('TVSeries', 'Series', 'CreativeWorkSeries'):
info['series'] = unescapeHTML(part_of_series.get('name'))
elif item_type == 'Movie':
info.update({
'title': unescapeHTML(e.get('name')),
'description': unescapeHTML(e.get('description')),
'duration': parse_duration(e.get('duration')),
'timestamp': unified_timestamp(e.get('dateCreated')),
})
elif item_type in ('Article', 'NewsArticle'):
info.update({
'timestamp': parse_iso8601(e.get('datePublished')),
'title': unescapeHTML(e.get('headline')),
'description': unescapeHTML(e.get('articleBody')),
})
elif item_type == 'VideoObject':
extract_video_object(e)
if expected_type is None:
continue
else:
break
video = e.get('video')
if isinstance(video, dict) and video.get('@type') == 'VideoObject':
extract_video_object(video)
if expected_type is None:
continue
else:
break
return dict((k, v) for k, v in info.items() if v is not None)
@staticmethod
def _hidden_inputs(html):
html = re.sub(r'<!--(?:(?!<!--).)*-->', '', html)
hidden_inputs = {}
for input in re.findall(r'(?i)(<input[^>]+>)', html):
attrs = extract_attributes(input)
if not input:
continue
if attrs.get('type') not in ('hidden', 'submit'):
continue
name = attrs.get('name') or attrs.get('id')
value = attrs.get('value')
if name and value is not None:
hidden_inputs[name] = value
return hidden_inputs
def _form_hidden_inputs(self, form_id, html):
form = self._search_regex(
r'(?is)<form[^>]+?id=(["\'])%s\1[^>]*>(?P<form>.+?)</form>' % form_id,
html, '%s form' % form_id, group='form')
return self._hidden_inputs(form)
def _sort_formats(self, formats, field_preference=None):
if not formats:
raise ExtractorError('No video formats found')
for f in formats:
# Automatically determine tbr when missing based on abr and vbr (improves
# formats sorting in some cases)
if 'tbr' not in f and f.get('abr') is not None and f.get('vbr') is not None:
f['tbr'] = f['abr'] + f['vbr']
def _formats_key(f):
# TODO remove the following workaround
from ..utils import determine_ext
if not f.get('ext') and 'url' in f:
f['ext'] = determine_ext(f['url'])
if isinstance(field_preference, (list, tuple)):
return tuple(
f.get(field)
if f.get(field) is not None
else ('' if field == 'format_id' else -1)
for field in field_preference)
preference = f.get('preference')
if preference is None:
preference = 0
if f.get('ext') in ['f4f', 'f4m']: # Not yet supported
preference -= 0.5
protocol = f.get('protocol') or determine_protocol(f)
proto_preference = 0 if protocol in ['http', 'https'] else (-0.5 if protocol == 'rtsp' else -0.1)
if f.get('vcodec') == 'none': # audio only
preference -= 50
if self._downloader.params.get('prefer_free_formats'):
ORDER = ['aac', 'mp3', 'm4a', 'webm', 'ogg', 'opus']
else:
ORDER = ['webm', 'opus', 'ogg', 'mp3', 'aac', 'm4a']
ext_preference = 0
try:
audio_ext_preference = ORDER.index(f['ext'])
except ValueError:
audio_ext_preference = -1
else:
if f.get('acodec') == 'none': # video only
preference -= 40
if self._downloader.params.get('prefer_free_formats'):
ORDER = ['flv', 'mp4', 'webm']
else:
ORDER = ['webm', 'flv', 'mp4']
try:
ext_preference = ORDER.index(f['ext'])
except ValueError:
ext_preference = -1
audio_ext_preference = 0
return (
preference,
f.get('language_preference') if f.get('language_preference') is not None else -1,
f.get('quality') if f.get('quality') is not None else -1,
f.get('tbr') if f.get('tbr') is not None else -1,
f.get('filesize') if f.get('filesize') is not None else -1,
f.get('vbr') if f.get('vbr') is not None else -1,
f.get('height') if f.get('height') is not None else -1,
f.get('width') if f.get('width') is not None else -1,
proto_preference,
ext_preference,
f.get('abr') if f.get('abr') is not None else -1,
audio_ext_preference,
f.get('fps') if f.get('fps') is not None else -1,
f.get('filesize_approx') if f.get('filesize_approx') is not None else -1,
f.get('source_preference') if f.get('source_preference') is not None else -1,
f.get('format_id') if f.get('format_id') is not None else '',
)
formats.sort(key=_formats_key)
def _check_formats(self, formats, video_id):
if formats:
formats[:] = filter(
lambda f: self._is_valid_url(
f['url'], video_id,
item='%s video format' % f.get('format_id') if f.get('format_id') else 'video'),
formats)
@staticmethod
def _remove_duplicate_formats(formats):
format_urls = set()
unique_formats = []
for f in formats:
if f['url'] not in format_urls:
format_urls.add(f['url'])
unique_formats.append(f)
formats[:] = unique_formats
def _is_valid_url(self, url, video_id, item='video', headers={}):
url = self._proto_relative_url(url, scheme='http:')
# For now assume non HTTP(S) URLs always valid
if not (url.startswith('http://') or url.startswith('https://')):
return True
try:
self._request_webpage(url, video_id, 'Checking %s URL' % item, headers=headers)
return True
except ExtractorError as e:
self.to_screen(
'%s: %s URL is invalid, skipping: %s'
% (video_id, item, error_to_compat_str(e.cause)))
return False
def http_scheme(self):
""" Either "http:" or "https:", depending on the user's preferences """
return (
'http:'
if self._downloader.params.get('prefer_insecure', False)
else 'https:')
def _proto_relative_url(self, url, scheme=None):
if url is None:
return url
if url.startswith('//'):
if scheme is None:
scheme = self.http_scheme()
return scheme + url
else:
return url
def _sleep(self, timeout, video_id, msg_template=None):
if msg_template is None:
msg_template = '%(video_id)s: Waiting for %(timeout)s seconds'
msg = msg_template % {'video_id': video_id, 'timeout': timeout}
self.to_screen(msg)
time.sleep(timeout)
def _extract_f4m_formats(self, manifest_url, video_id, preference=None, f4m_id=None,
transform_source=lambda s: fix_xml_ampersands(s).strip(),
fatal=True, m3u8_id=None, data=None, headers={}, query={}):
manifest = self._download_xml(
manifest_url, video_id, 'Downloading f4m manifest',
'Unable to download f4m manifest',
# Some manifests may be malformed, e.g. prosiebensat1 generated manifests
# (see https://github.com/ytdl-org/youtube-dl/issues/6215#issuecomment-121704244)
transform_source=transform_source,
fatal=fatal, data=data, headers=headers, query=query)
if manifest is False:
return []
return self._parse_f4m_formats(
manifest, manifest_url, video_id, preference=preference, f4m_id=f4m_id,
transform_source=transform_source, fatal=fatal, m3u8_id=m3u8_id)
def _parse_f4m_formats(self, manifest, manifest_url, video_id, preference=None, f4m_id=None,
transform_source=lambda s: fix_xml_ampersands(s).strip(),
fatal=True, m3u8_id=None):
if not isinstance(manifest, compat_etree_Element) and not fatal:
return []
# currently youtube-dl cannot decode the playerVerificationChallenge as Akamai uses Adobe Alchemy
akamai_pv = manifest.find('{http://ns.adobe.com/f4m/1.0}pv-2.0')
if akamai_pv is not None and ';' in akamai_pv.text:
playerVerificationChallenge = akamai_pv.text.split(';')[0]
if playerVerificationChallenge.strip() != '':
return []
formats = []
manifest_version = '1.0'
media_nodes = manifest.findall('{http://ns.adobe.com/f4m/1.0}media')
if not media_nodes:
manifest_version = '2.0'
media_nodes = manifest.findall('{http://ns.adobe.com/f4m/2.0}media')
# Remove unsupported DRM protected media from final formats
# rendition (see https://github.com/ytdl-org/youtube-dl/issues/8573).
media_nodes = remove_encrypted_media(media_nodes)
if not media_nodes:
return formats
manifest_base_url = get_base_url(manifest)
bootstrap_info = xpath_element(
manifest, ['{http://ns.adobe.com/f4m/1.0}bootstrapInfo', '{http://ns.adobe.com/f4m/2.0}bootstrapInfo'],
'bootstrap info', default=None)
vcodec = None
mime_type = xpath_text(
manifest, ['{http://ns.adobe.com/f4m/1.0}mimeType', '{http://ns.adobe.com/f4m/2.0}mimeType'],
'base URL', default=None)
if mime_type and mime_type.startswith('audio/'):
vcodec = 'none'
for i, media_el in enumerate(media_nodes):
tbr = int_or_none(media_el.attrib.get('bitrate'))
width = int_or_none(media_el.attrib.get('width'))
height = int_or_none(media_el.attrib.get('height'))
format_id = '-'.join(filter(None, [f4m_id, compat_str(i if tbr is None else tbr)]))
# If <bootstrapInfo> is present, the specified f4m is a
# stream-level manifest, and only set-level manifests may refer to
# external resources. See section 11.4 and section 4 of F4M spec
if bootstrap_info is None:
media_url = None
# @href is introduced in 2.0, see section 11.6 of F4M spec
if manifest_version == '2.0':
media_url = media_el.attrib.get('href')
if media_url is None:
media_url = media_el.attrib.get('url')
if not media_url:
continue
manifest_url = (
media_url if media_url.startswith('http://') or media_url.startswith('https://')
else ((manifest_base_url or '/'.join(manifest_url.split('/')[:-1])) + '/' + media_url))
# If media_url is itself a f4m manifest do the recursive extraction
# since bitrates in parent manifest (this one) and media_url manifest
# may differ leading to inability to resolve the format by requested
# bitrate in f4m downloader
ext = determine_ext(manifest_url)
if ext == 'f4m':
f4m_formats = self._extract_f4m_formats(
manifest_url, video_id, preference=preference, f4m_id=f4m_id,
transform_source=transform_source, fatal=fatal)
# Sometimes stream-level manifest contains single media entry that
# does not contain any quality metadata (e.g. http://matchtv.ru/#live-player).
# At the same time parent's media entry in set-level manifest may
# contain it. We will copy it from parent in such cases.
if len(f4m_formats) == 1:
f = f4m_formats[0]
f.update({
'tbr': f.get('tbr') or tbr,
'width': f.get('width') or width,
'height': f.get('height') or height,
'format_id': f.get('format_id') if not tbr else format_id,
'vcodec': vcodec,
})
formats.extend(f4m_formats)
continue
elif ext == 'm3u8':
formats.extend(self._extract_m3u8_formats(
manifest_url, video_id, 'mp4', preference=preference,
m3u8_id=m3u8_id, fatal=fatal))
continue
formats.append({
'format_id': format_id,
'url': manifest_url,
'manifest_url': manifest_url,
'ext': 'flv' if bootstrap_info is not None else None,
'protocol': 'f4m',
'tbr': tbr,
'width': width,
'height': height,
'vcodec': vcodec,
'preference': preference,
})
return formats
def _m3u8_meta_format(self, m3u8_url, ext=None, preference=None, m3u8_id=None):
return {
'format_id': '-'.join(filter(None, [m3u8_id, 'meta'])),
'url': m3u8_url,
'ext': ext,
'protocol': 'm3u8',
'preference': preference - 100 if preference else -100,
'resolution': 'multiple',
'format_note': 'Quality selection URL',
}
def _extract_m3u8_formats(self, m3u8_url, video_id, ext=None,
entry_protocol='m3u8', preference=None,
m3u8_id=None, note=None, errnote=None,
fatal=True, live=False, data=None, headers={},
query={}):
res = self._download_webpage_handle(
m3u8_url, video_id,
note=note or 'Downloading m3u8 information',
errnote=errnote or 'Failed to download m3u8 information',
fatal=fatal, data=data, headers=headers, query=query)
if res is False:
return []
m3u8_doc, urlh = res
m3u8_url = urlh.geturl()
return self._parse_m3u8_formats(
m3u8_doc, m3u8_url, ext=ext, entry_protocol=entry_protocol,
preference=preference, m3u8_id=m3u8_id, live=live)
def _parse_m3u8_formats(self, m3u8_doc, m3u8_url, ext=None,
entry_protocol='m3u8', preference=None,
m3u8_id=None, live=False):
if '#EXT-X-FAXS-CM:' in m3u8_doc: # Adobe Flash Access
return []
if re.search(r'#EXT-X-SESSION-KEY:.*?URI="skd://', m3u8_doc): # Apple FairPlay
return []
formats = []
format_url = lambda u: (
u
if re.match(r'^https?://', u)
else compat_urlparse.urljoin(m3u8_url, u))
# References:
# 1. https://tools.ietf.org/html/draft-pantos-http-live-streaming-21
# 2. https://github.com/ytdl-org/youtube-dl/issues/12211
# 3. https://github.com/ytdl-org/youtube-dl/issues/18923
# We should try extracting formats only from master playlists [1, 4.3.4],
# i.e. playlists that describe available qualities. On the other hand
# media playlists [1, 4.3.3] should be returned as is since they contain
# just the media without qualities renditions.
# Fortunately, master playlist can be easily distinguished from media
# playlist based on particular tags availability. As of [1, 4.3.3, 4.3.4]
# master playlist tags MUST NOT appear in a media playlist and vice versa.
# As of [1, 4.3.3.1] #EXT-X-TARGETDURATION tag is REQUIRED for every
# media playlist and MUST NOT appear in master playlist thus we can
# clearly detect media playlist with this criterion.
if '#EXT-X-TARGETDURATION' in m3u8_doc: # media playlist, return as is
return [{
'url': m3u8_url,
'format_id': m3u8_id,
'ext': ext,
'protocol': entry_protocol,
'preference': preference,
}]
groups = {}
last_stream_inf = {}
def extract_media(x_media_line):
media = parse_m3u8_attributes(x_media_line)
# As per [1, 4.3.4.1] TYPE, GROUP-ID and NAME are REQUIRED
media_type, group_id, name = media.get('TYPE'), media.get('GROUP-ID'), media.get('NAME')
if not (media_type and group_id and name):
return
groups.setdefault(group_id, []).append(media)
if media_type not in ('VIDEO', 'AUDIO'):
return
media_url = media.get('URI')
if media_url:
format_id = []
for v in (m3u8_id, group_id, name):
if v:
format_id.append(v)
f = {
'format_id': '-'.join(format_id),
'url': format_url(media_url),
'manifest_url': m3u8_url,
'language': media.get('LANGUAGE'),
'ext': ext,
'protocol': entry_protocol,
'preference': preference,
}
if media_type == 'AUDIO':
f['vcodec'] = 'none'
formats.append(f)
def build_stream_name():
# Despite specification does not mention NAME attribute for
# EXT-X-STREAM-INF tag it still sometimes may be present (see [1]
# or vidio test in TestInfoExtractor.test_parse_m3u8_formats)
# 1. http://www.vidio.com/watch/165683-dj_ambred-booyah-live-2015
stream_name = last_stream_inf.get('NAME')
if stream_name:
return stream_name
# If there is no NAME in EXT-X-STREAM-INF it will be obtained
# from corresponding rendition group
stream_group_id = last_stream_inf.get('VIDEO')
if not stream_group_id:
return
stream_group = groups.get(stream_group_id)
if not stream_group:
return stream_group_id
rendition = stream_group[0]
return rendition.get('NAME') or stream_group_id
# parse EXT-X-MEDIA tags before EXT-X-STREAM-INF in order to have the
# chance to detect video only formats when EXT-X-STREAM-INF tags
# precede EXT-X-MEDIA tags in HLS manifest such as [3].
for line in m3u8_doc.splitlines():
if line.startswith('#EXT-X-MEDIA:'):
extract_media(line)
for line in m3u8_doc.splitlines():
if line.startswith('#EXT-X-STREAM-INF:'):
last_stream_inf = parse_m3u8_attributes(line)
elif line.startswith('#') or not line.strip():
continue
else:
tbr = float_or_none(
last_stream_inf.get('AVERAGE-BANDWIDTH')
or last_stream_inf.get('BANDWIDTH'), scale=1000)
format_id = []
if m3u8_id:
format_id.append(m3u8_id)
stream_name = build_stream_name()
# Bandwidth of live streams may differ over time thus making
# format_id unpredictable. So it's better to keep provided
# format_id intact.
if not live:
format_id.append(stream_name if stream_name else '%d' % (tbr if tbr else len(formats)))
manifest_url = format_url(line.strip())
f = {
'format_id': '-'.join(format_id),
'url': manifest_url,
'manifest_url': m3u8_url,
'tbr': tbr,
'ext': ext,
'fps': float_or_none(last_stream_inf.get('FRAME-RATE')),
'protocol': entry_protocol,
'preference': preference,
}
resolution = last_stream_inf.get('RESOLUTION')
if resolution:
mobj = re.search(r'(?P<width>\d+)[xX](?P<height>\d+)', resolution)
if mobj:
f['width'] = int(mobj.group('width'))
f['height'] = int(mobj.group('height'))
# Unified Streaming Platform
mobj = re.search(
r'audio.*?(?:%3D|=)(\d+)(?:-video.*?(?:%3D|=)(\d+))?', f['url'])
if mobj:
abr, vbr = mobj.groups()
abr, vbr = float_or_none(abr, 1000), float_or_none(vbr, 1000)
f.update({
'vbr': vbr,
'abr': abr,
})
codecs = parse_codecs(last_stream_inf.get('CODECS'))
f.update(codecs)
audio_group_id = last_stream_inf.get('AUDIO')
# As per [1, 4.3.4.1.1] any EXT-X-STREAM-INF tag which
# references a rendition group MUST have a CODECS attribute.
# However, this is not always respected, for example, [2]
# contains EXT-X-STREAM-INF tag which references AUDIO
# rendition group but does not have CODECS and despite
# referencing an audio group it represents a complete
# (with audio and video) format. So, for such cases we will
# ignore references to rendition groups and treat them
# as complete formats.
if audio_group_id and codecs and f.get('vcodec') != 'none':
audio_group = groups.get(audio_group_id)
if audio_group and audio_group[0].get('URI'):
# TODO: update acodec for audio only formats with
# the same GROUP-ID
f['acodec'] = 'none'
formats.append(f)
# for DailyMotion
progressive_uri = last_stream_inf.get('PROGRESSIVE-URI')
if progressive_uri:
http_f = f.copy()
del http_f['manifest_url']
http_f.update({
'format_id': f['format_id'].replace('hls-', 'http-'),
'protocol': 'http',
'url': progressive_uri,
})
formats.append(http_f)
last_stream_inf = {}
return formats
@staticmethod
def _xpath_ns(path, namespace=None):
if not namespace:
return path
out = []
for c in path.split('/'):
if not c or c == '.':
out.append(c)
else:
out.append('{%s}%s' % (namespace, c))
return '/'.join(out)
def _extract_smil_formats(self, smil_url, video_id, fatal=True, f4m_params=None, transform_source=None):
smil = self._download_smil(smil_url, video_id, fatal=fatal, transform_source=transform_source)
if smil is False:
assert not fatal
return []
namespace = self._parse_smil_namespace(smil)
return self._parse_smil_formats(
smil, smil_url, video_id, namespace=namespace, f4m_params=f4m_params)
def _extract_smil_info(self, smil_url, video_id, fatal=True, f4m_params=None):
smil = self._download_smil(smil_url, video_id, fatal=fatal)
if smil is False:
return {}
return self._parse_smil(smil, smil_url, video_id, f4m_params=f4m_params)
def _download_smil(self, smil_url, video_id, fatal=True, transform_source=None):
return self._download_xml(
smil_url, video_id, 'Downloading SMIL file',
'Unable to download SMIL file', fatal=fatal, transform_source=transform_source)
def _parse_smil(self, smil, smil_url, video_id, f4m_params=None):
namespace = self._parse_smil_namespace(smil)
formats = self._parse_smil_formats(
smil, smil_url, video_id, namespace=namespace, f4m_params=f4m_params)
subtitles = self._parse_smil_subtitles(smil, namespace=namespace)
video_id = os.path.splitext(url_basename(smil_url))[0]
title = None
description = None
upload_date = None
for meta in smil.findall(self._xpath_ns('./head/meta', namespace)):
name = meta.attrib.get('name')
content = meta.attrib.get('content')
if not name or not content:
continue
if not title and name == 'title':
title = content
elif not description and name in ('description', 'abstract'):
description = content
elif not upload_date and name == 'date':
upload_date = unified_strdate(content)
thumbnails = [{
'id': image.get('type'),
'url': image.get('src'),
'width': int_or_none(image.get('width')),
'height': int_or_none(image.get('height')),
} for image in smil.findall(self._xpath_ns('.//image', namespace)) if image.get('src')]
return {
'id': video_id,
'title': title or video_id,
'description': description,
'upload_date': upload_date,
'thumbnails': thumbnails,
'formats': formats,
'subtitles': subtitles,
}
def _parse_smil_namespace(self, smil):
return self._search_regex(
r'(?i)^{([^}]+)?}smil$', smil.tag, 'namespace', default=None)
def _parse_smil_formats(self, smil, smil_url, video_id, namespace=None, f4m_params=None, transform_rtmp_url=None):
base = smil_url
for meta in smil.findall(self._xpath_ns('./head/meta', namespace)):
b = meta.get('base') or meta.get('httpBase')
if b:
base = b
break
formats = []
rtmp_count = 0
http_count = 0
m3u8_count = 0
srcs = []
media = smil.findall(self._xpath_ns('.//video', namespace)) + smil.findall(self._xpath_ns('.//audio', namespace))
for medium in media:
src = medium.get('src')
if not src or src in srcs:
continue
srcs.append(src)
bitrate = float_or_none(medium.get('system-bitrate') or medium.get('systemBitrate'), 1000)
filesize = int_or_none(medium.get('size') or medium.get('fileSize'))
width = int_or_none(medium.get('width'))
height = int_or_none(medium.get('height'))
proto = medium.get('proto')
ext = medium.get('ext')
src_ext = determine_ext(src)
streamer = medium.get('streamer') or base
if proto == 'rtmp' or streamer.startswith('rtmp'):
rtmp_count += 1
formats.append({
'url': streamer,
'play_path': src,
'ext': 'flv',
'format_id': 'rtmp-%d' % (rtmp_count if bitrate is None else bitrate),
'tbr': bitrate,
'filesize': filesize,
'width': width,
'height': height,
})
if transform_rtmp_url:
streamer, src = transform_rtmp_url(streamer, src)
formats[-1].update({
'url': streamer,
'play_path': src,
})
continue
src_url = src if src.startswith('http') else compat_urlparse.urljoin(base, src)
src_url = src_url.strip()
if proto == 'm3u8' or src_ext == 'm3u8':
m3u8_formats = self._extract_m3u8_formats(
src_url, video_id, ext or 'mp4', m3u8_id='hls', fatal=False)
if len(m3u8_formats) == 1:
m3u8_count += 1
m3u8_formats[0].update({
'format_id': 'hls-%d' % (m3u8_count if bitrate is None else bitrate),
'tbr': bitrate,
'width': width,
'height': height,
})
formats.extend(m3u8_formats)
elif src_ext == 'f4m':
f4m_url = src_url
if not f4m_params:
f4m_params = {
'hdcore': '3.2.0',
'plugin': 'flowplayer-3.2.0.1',
}
f4m_url += '&' if '?' in f4m_url else '?'
f4m_url += compat_urllib_parse_urlencode(f4m_params)
formats.extend(self._extract_f4m_formats(f4m_url, video_id, f4m_id='hds', fatal=False))
elif src_ext == 'mpd':
formats.extend(self._extract_mpd_formats(
src_url, video_id, mpd_id='dash', fatal=False))
elif re.search(r'\.ism/[Mm]anifest', src_url):
formats.extend(self._extract_ism_formats(
src_url, video_id, ism_id='mss', fatal=False))
elif src_url.startswith('http') and self._is_valid_url(src, video_id):
http_count += 1
formats.append({
'url': src_url,
'ext': ext or src_ext or 'flv',
'format_id': 'http-%d' % (bitrate or http_count),
'tbr': bitrate,
'filesize': filesize,
'width': width,
'height': height,
})
return formats
def _parse_smil_subtitles(self, smil, namespace=None, subtitles_lang='en'):
urls = []
subtitles = {}
for num, textstream in enumerate(smil.findall(self._xpath_ns('.//textstream', namespace))):
src = textstream.get('src')
if not src or src in urls:
continue
urls.append(src)
ext = textstream.get('ext') or mimetype2ext(textstream.get('type')) or determine_ext(src)
lang = textstream.get('systemLanguage') or textstream.get('systemLanguageName') or textstream.get('lang') or subtitles_lang
subtitles.setdefault(lang, []).append({
'url': src,
'ext': ext,
})
return subtitles
def _extract_xspf_playlist(self, xspf_url, playlist_id, fatal=True):
xspf = self._download_xml(
xspf_url, playlist_id, 'Downloading xpsf playlist',
'Unable to download xspf manifest', fatal=fatal)
if xspf is False:
return []
return self._parse_xspf(
xspf, playlist_id, xspf_url=xspf_url,
xspf_base_url=base_url(xspf_url))
def _parse_xspf(self, xspf_doc, playlist_id, xspf_url=None, xspf_base_url=None):
NS_MAP = {
'xspf': 'http://xspf.org/ns/0/',
's1': 'http://static.streamone.nl/player/ns/0',
}
entries = []
for track in xspf_doc.findall(xpath_with_ns('./xspf:trackList/xspf:track', NS_MAP)):
title = xpath_text(
track, xpath_with_ns('./xspf:title', NS_MAP), 'title', default=playlist_id)
description = xpath_text(
track, xpath_with_ns('./xspf:annotation', NS_MAP), 'description')
thumbnail = xpath_text(
track, xpath_with_ns('./xspf:image', NS_MAP), 'thumbnail')
duration = float_or_none(
xpath_text(track, xpath_with_ns('./xspf:duration', NS_MAP), 'duration'), 1000)
formats = []
for location in track.findall(xpath_with_ns('./xspf:location', NS_MAP)):
format_url = urljoin(xspf_base_url, location.text)
if not format_url:
continue
formats.append({
'url': format_url,
'manifest_url': xspf_url,
'format_id': location.get(xpath_with_ns('s1:label', NS_MAP)),
'width': int_or_none(location.get(xpath_with_ns('s1:width', NS_MAP))),
'height': int_or_none(location.get(xpath_with_ns('s1:height', NS_MAP))),
})
self._sort_formats(formats)
entries.append({
'id': playlist_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'duration': duration,
'formats': formats,
})
return entries
def _extract_mpd_formats(self, mpd_url, video_id, mpd_id=None, note=None, errnote=None, fatal=True, formats_dict={}, data=None, headers={}, query={}):
res = self._download_xml_handle(
mpd_url, video_id,
note=note or 'Downloading MPD manifest',
errnote=errnote or 'Failed to download MPD manifest',
fatal=fatal, data=data, headers=headers, query=query)
if res is False:
return []
mpd_doc, urlh = res
if mpd_doc is None:
return []
mpd_base_url = base_url(urlh.geturl())
return self._parse_mpd_formats(
mpd_doc, mpd_id=mpd_id, mpd_base_url=mpd_base_url,
formats_dict=formats_dict, mpd_url=mpd_url)
def _parse_mpd_formats(self, mpd_doc, mpd_id=None, mpd_base_url='', formats_dict={}, mpd_url=None):
"""
Parse formats from MPD manifest.
References:
1. MPEG-DASH Standard, ISO/IEC 23009-1:2014(E),
http://standards.iso.org/ittf/PubliclyAvailableStandards/c065274_ISO_IEC_23009-1_2014.zip
2. https://en.wikipedia.org/wiki/Dynamic_Adaptive_Streaming_over_HTTP
"""
if mpd_doc.get('type') == 'dynamic':
return []
namespace = self._search_regex(r'(?i)^{([^}]+)?}MPD$', mpd_doc.tag, 'namespace', default=None)
def _add_ns(path):
return self._xpath_ns(path, namespace)
def is_drm_protected(element):
return element.find(_add_ns('ContentProtection')) is not None
def extract_multisegment_info(element, ms_parent_info):
ms_info = ms_parent_info.copy()
# As per [1, 5.3.9.2.2] SegmentList and SegmentTemplate share some
# common attributes and elements. We will only extract relevant
# for us.
def extract_common(source):
segment_timeline = source.find(_add_ns('SegmentTimeline'))
if segment_timeline is not None:
s_e = segment_timeline.findall(_add_ns('S'))
if s_e:
ms_info['total_number'] = 0
ms_info['s'] = []
for s in s_e:
r = int(s.get('r', 0))
ms_info['total_number'] += 1 + r
ms_info['s'].append({
't': int(s.get('t', 0)),
# @d is mandatory (see [1, 5.3.9.6.2, Table 17, page 60])
'd': int(s.attrib['d']),
'r': r,
})
start_number = source.get('startNumber')
if start_number:
ms_info['start_number'] = int(start_number)
timescale = source.get('timescale')
if timescale:
ms_info['timescale'] = int(timescale)
segment_duration = source.get('duration')
if segment_duration:
ms_info['segment_duration'] = float(segment_duration)
def extract_Initialization(source):
initialization = source.find(_add_ns('Initialization'))
if initialization is not None:
ms_info['initialization_url'] = initialization.attrib['sourceURL']
segment_list = element.find(_add_ns('SegmentList'))
if segment_list is not None:
extract_common(segment_list)
extract_Initialization(segment_list)
segment_urls_e = segment_list.findall(_add_ns('SegmentURL'))
if segment_urls_e:
ms_info['segment_urls'] = [segment.attrib['media'] for segment in segment_urls_e]
else:
segment_template = element.find(_add_ns('SegmentTemplate'))
if segment_template is not None:
extract_common(segment_template)
media = segment_template.get('media')
if media:
ms_info['media'] = media
initialization = segment_template.get('initialization')
if initialization:
ms_info['initialization'] = initialization
else:
extract_Initialization(segment_template)
return ms_info
mpd_duration = parse_duration(mpd_doc.get('mediaPresentationDuration'))
formats = []
for period in mpd_doc.findall(_add_ns('Period')):
period_duration = parse_duration(period.get('duration')) or mpd_duration
period_ms_info = extract_multisegment_info(period, {
'start_number': 1,
'timescale': 1,
})
for adaptation_set in period.findall(_add_ns('AdaptationSet')):
if is_drm_protected(adaptation_set):
continue
adaption_set_ms_info = extract_multisegment_info(adaptation_set, period_ms_info)
for representation in adaptation_set.findall(_add_ns('Representation')):
if is_drm_protected(representation):
continue
representation_attrib = adaptation_set.attrib.copy()
representation_attrib.update(representation.attrib)
# According to [1, 5.3.7.2, Table 9, page 41], @mimeType is mandatory
mime_type = representation_attrib['mimeType']
content_type = mime_type.split('/')[0]
if content_type == 'text':
# TODO implement WebVTT downloading
pass
elif content_type in ('video', 'audio'):
base_url = ''
for element in (representation, adaptation_set, period, mpd_doc):
base_url_e = element.find(_add_ns('BaseURL'))
if base_url_e is not None:
base_url = base_url_e.text + base_url
if re.match(r'^https?://', base_url):
break
if mpd_base_url and not re.match(r'^https?://', base_url):
if not mpd_base_url.endswith('/') and not base_url.startswith('/'):
mpd_base_url += '/'
base_url = mpd_base_url + base_url
representation_id = representation_attrib.get('id')
lang = representation_attrib.get('lang')
url_el = representation.find(_add_ns('BaseURL'))
filesize = int_or_none(url_el.attrib.get('{http://youtube.com/yt/2012/10/10}contentLength') if url_el is not None else None)
bandwidth = int_or_none(representation_attrib.get('bandwidth'))
f = {
'format_id': '%s-%s' % (mpd_id, representation_id) if mpd_id else representation_id,
'manifest_url': mpd_url,
'ext': mimetype2ext(mime_type),
'width': int_or_none(representation_attrib.get('width')),
'height': int_or_none(representation_attrib.get('height')),
'tbr': float_or_none(bandwidth, 1000),
'asr': int_or_none(representation_attrib.get('audioSamplingRate')),
'fps': int_or_none(representation_attrib.get('frameRate')),
'language': lang if lang not in ('mul', 'und', 'zxx', 'mis') else None,
'format_note': 'DASH %s' % content_type,
'filesize': filesize,
'container': mimetype2ext(mime_type) + '_dash',
}
f.update(parse_codecs(representation_attrib.get('codecs')))
representation_ms_info = extract_multisegment_info(representation, adaption_set_ms_info)
def prepare_template(template_name, identifiers):
tmpl = representation_ms_info[template_name]
# First of, % characters outside $...$ templates
# must be escaped by doubling for proper processing
# by % operator string formatting used further (see
# https://github.com/ytdl-org/youtube-dl/issues/16867).
t = ''
in_template = False
for c in tmpl:
t += c
if c == '$':
in_template = not in_template
elif c == '%' and not in_template:
t += c
# Next, $...$ templates are translated to their
# %(...) counterparts to be used with % operator
t = t.replace('$RepresentationID$', representation_id)
t = re.sub(r'\$(%s)\$' % '|'.join(identifiers), r'%(\1)d', t)
t = re.sub(r'\$(%s)%%([^$]+)\$' % '|'.join(identifiers), r'%(\1)\2', t)
t.replace('$$', '$')
return t
# @initialization is a regular template like @media one
# so it should be handled just the same way (see
# https://github.com/ytdl-org/youtube-dl/issues/11605)
if 'initialization' in representation_ms_info:
initialization_template = prepare_template(
'initialization',
# As per [1, 5.3.9.4.2, Table 15, page 54] $Number$ and
# $Time$ shall not be included for @initialization thus
# only $Bandwidth$ remains
('Bandwidth', ))
representation_ms_info['initialization_url'] = initialization_template % {
'Bandwidth': bandwidth,
}
def location_key(location):
return 'url' if re.match(r'^https?://', location) else 'path'
if 'segment_urls' not in representation_ms_info and 'media' in representation_ms_info:
media_template = prepare_template('media', ('Number', 'Bandwidth', 'Time'))
media_location_key = location_key(media_template)
# As per [1, 5.3.9.4.4, Table 16, page 55] $Number$ and $Time$
# can't be used at the same time
if '%(Number' in media_template and 's' not in representation_ms_info:
segment_duration = None
if 'total_number' not in representation_ms_info and 'segment_duration' in representation_ms_info:
segment_duration = float_or_none(representation_ms_info['segment_duration'], representation_ms_info['timescale'])
representation_ms_info['total_number'] = int(math.ceil(float(period_duration) / segment_duration))
representation_ms_info['fragments'] = [{
media_location_key: media_template % {
'Number': segment_number,
'Bandwidth': bandwidth,
},
'duration': segment_duration,
} for segment_number in range(
representation_ms_info['start_number'],
representation_ms_info['total_number'] + representation_ms_info['start_number'])]
else:
# $Number*$ or $Time$ in media template with S list available
# Example $Number*$: http://www.svtplay.se/klipp/9023742/stopptid-om-bjorn-borg
# Example $Time$: https://play.arkena.com/embed/avp/v2/player/media/b41dda37-d8e7-4d3f-b1b5-9a9db578bdfe/1/129411
representation_ms_info['fragments'] = []
segment_time = 0
segment_d = None
segment_number = representation_ms_info['start_number']
def add_segment_url():
segment_url = media_template % {
'Time': segment_time,
'Bandwidth': bandwidth,
'Number': segment_number,
}
representation_ms_info['fragments'].append({
media_location_key: segment_url,
'duration': float_or_none(segment_d, representation_ms_info['timescale']),
})
for num, s in enumerate(representation_ms_info['s']):
segment_time = s.get('t') or segment_time
segment_d = s['d']
add_segment_url()
segment_number += 1
for r in range(s.get('r', 0)):
segment_time += segment_d
add_segment_url()
segment_number += 1
segment_time += segment_d
elif 'segment_urls' in representation_ms_info and 's' in representation_ms_info:
# No media template
# Example: https://www.youtube.com/watch?v=iXZV5uAYMJI
# or any YouTube dashsegments video
fragments = []
segment_index = 0
timescale = representation_ms_info['timescale']
for s in representation_ms_info['s']:
duration = float_or_none(s['d'], timescale)
for r in range(s.get('r', 0) + 1):
segment_uri = representation_ms_info['segment_urls'][segment_index]
fragments.append({
location_key(segment_uri): segment_uri,
'duration': duration,
})
segment_index += 1
representation_ms_info['fragments'] = fragments
elif 'segment_urls' in representation_ms_info:
# Segment URLs with no SegmentTimeline
# Example: https://www.seznam.cz/zpravy/clanek/cesko-zasahne-vitr-o-sile-vichrice-muze-byt-i-zivotu-nebezpecny-39091
# https://github.com/ytdl-org/youtube-dl/pull/14844
fragments = []
segment_duration = float_or_none(
representation_ms_info['segment_duration'],
representation_ms_info['timescale']) if 'segment_duration' in representation_ms_info else None
for segment_url in representation_ms_info['segment_urls']:
fragment = {
location_key(segment_url): segment_url,
}
if segment_duration:
fragment['duration'] = segment_duration
fragments.append(fragment)
representation_ms_info['fragments'] = fragments
# If there is a fragments key available then we correctly recognized fragmented media.
# Otherwise we will assume unfragmented media with direct access. Technically, such
# assumption is not necessarily correct since we may simply have no support for
# some forms of fragmented media renditions yet, but for now we'll use this fallback.
if 'fragments' in representation_ms_info:
f.update({
# NB: mpd_url may be empty when MPD manifest is parsed from a string
'url': mpd_url or base_url,
'fragment_base_url': base_url,
'fragments': [],
'protocol': 'http_dash_segments',
})
if 'initialization_url' in representation_ms_info:
initialization_url = representation_ms_info['initialization_url']
if not f.get('url'):
f['url'] = initialization_url
f['fragments'].append({location_key(initialization_url): initialization_url})
f['fragments'].extend(representation_ms_info['fragments'])
else:
# Assuming direct URL to unfragmented media.
f['url'] = base_url
# According to [1, 5.3.5.2, Table 7, page 35] @id of Representation
# is not necessarily unique within a Period thus formats with
# the same `format_id` are quite possible. There are numerous examples
# of such manifests (see https://github.com/ytdl-org/youtube-dl/issues/15111,
# https://github.com/ytdl-org/youtube-dl/issues/13919)
full_info = formats_dict.get(representation_id, {}).copy()
full_info.update(f)
formats.append(full_info)
else:
self.report_warning('Unknown MIME type %s in DASH manifest' % mime_type)
return formats
def _extract_ism_formats(self, ism_url, video_id, ism_id=None, note=None, errnote=None, fatal=True, data=None, headers={}, query={}):
res = self._download_xml_handle(
ism_url, video_id,
note=note or 'Downloading ISM manifest',
errnote=errnote or 'Failed to download ISM manifest',
fatal=fatal, data=data, headers=headers, query=query)
if res is False:
return []
ism_doc, urlh = res
if ism_doc is None:
return []
return self._parse_ism_formats(ism_doc, urlh.geturl(), ism_id)
def _parse_ism_formats(self, ism_doc, ism_url, ism_id=None):
"""
Parse formats from ISM manifest.
References:
1. [MS-SSTR]: Smooth Streaming Protocol,
https://msdn.microsoft.com/en-us/library/ff469518.aspx
"""
if ism_doc.get('IsLive') == 'TRUE' or ism_doc.find('Protection') is not None:
return []
duration = int(ism_doc.attrib['Duration'])
timescale = int_or_none(ism_doc.get('TimeScale')) or 10000000
formats = []
for stream in ism_doc.findall('StreamIndex'):
stream_type = stream.get('Type')
if stream_type not in ('video', 'audio'):
continue
url_pattern = stream.attrib['Url']
stream_timescale = int_or_none(stream.get('TimeScale')) or timescale
stream_name = stream.get('Name')
for track in stream.findall('QualityLevel'):
fourcc = track.get('FourCC', 'AACL' if track.get('AudioTag') == '255' else None)
# TODO: add support for WVC1 and WMAP
if fourcc not in ('H264', 'AVC1', 'AACL'):
self.report_warning('%s is not a supported codec' % fourcc)
continue
tbr = int(track.attrib['Bitrate']) // 1000
# [1] does not mention Width and Height attributes. However,
# they're often present while MaxWidth and MaxHeight are
# missing, so should be used as fallbacks
width = int_or_none(track.get('MaxWidth') or track.get('Width'))
height = int_or_none(track.get('MaxHeight') or track.get('Height'))
sampling_rate = int_or_none(track.get('SamplingRate'))
track_url_pattern = re.sub(r'{[Bb]itrate}', track.attrib['Bitrate'], url_pattern)
track_url_pattern = compat_urlparse.urljoin(ism_url, track_url_pattern)
fragments = []
fragment_ctx = {
'time': 0,
}
stream_fragments = stream.findall('c')
for stream_fragment_index, stream_fragment in enumerate(stream_fragments):
fragment_ctx['time'] = int_or_none(stream_fragment.get('t')) or fragment_ctx['time']
fragment_repeat = int_or_none(stream_fragment.get('r')) or 1
fragment_ctx['duration'] = int_or_none(stream_fragment.get('d'))
if not fragment_ctx['duration']:
try:
next_fragment_time = int(stream_fragment[stream_fragment_index + 1].attrib['t'])
except IndexError:
next_fragment_time = duration
fragment_ctx['duration'] = (next_fragment_time - fragment_ctx['time']) / fragment_repeat
for _ in range(fragment_repeat):
fragments.append({
'url': re.sub(r'{start[ _]time}', compat_str(fragment_ctx['time']), track_url_pattern),
'duration': fragment_ctx['duration'] / stream_timescale,
})
fragment_ctx['time'] += fragment_ctx['duration']
format_id = []
if ism_id:
format_id.append(ism_id)
if stream_name:
format_id.append(stream_name)
format_id.append(compat_str(tbr))
formats.append({
'format_id': '-'.join(format_id),
'url': ism_url,
'manifest_url': ism_url,
'ext': 'ismv' if stream_type == 'video' else 'isma',
'width': width,
'height': height,
'tbr': tbr,
'asr': sampling_rate,
'vcodec': 'none' if stream_type == 'audio' else fourcc,
'acodec': 'none' if stream_type == 'video' else fourcc,
'protocol': 'ism',
'fragments': fragments,
'_download_params': {
'duration': duration,
'timescale': stream_timescale,
'width': width or 0,
'height': height or 0,
'fourcc': fourcc,
'codec_private_data': track.get('CodecPrivateData'),
'sampling_rate': sampling_rate,
'channels': int_or_none(track.get('Channels', 2)),
'bits_per_sample': int_or_none(track.get('BitsPerSample', 16)),
'nal_unit_length_field': int_or_none(track.get('NALUnitLengthField', 4)),
},
})
return formats
def _parse_html5_media_entries(self, base_url, webpage, video_id, m3u8_id=None, m3u8_entry_protocol='m3u8', mpd_id=None, preference=None):
def absolute_url(item_url):
return urljoin(base_url, item_url)
def parse_content_type(content_type):
if not content_type:
return {}
ctr = re.search(r'(?P<mimetype>[^/]+/[^;]+)(?:;\s*codecs="?(?P<codecs>[^"]+))?', content_type)
if ctr:
mimetype, codecs = ctr.groups()
f = parse_codecs(codecs)
f['ext'] = mimetype2ext(mimetype)
return f
return {}
def _media_formats(src, cur_media_type, type_info={}):
full_url = absolute_url(src)
ext = type_info.get('ext') or determine_ext(full_url)
if ext == 'm3u8':
is_plain_url = False
formats = self._extract_m3u8_formats(
full_url, video_id, ext='mp4',
entry_protocol=m3u8_entry_protocol, m3u8_id=m3u8_id,
preference=preference, fatal=False)
elif ext == 'mpd':
is_plain_url = False
formats = self._extract_mpd_formats(
full_url, video_id, mpd_id=mpd_id, fatal=False)
else:
is_plain_url = True
formats = [{
'url': full_url,
'vcodec': 'none' if cur_media_type == 'audio' else None,
}]
return is_plain_url, formats
entries = []
# amp-video and amp-audio are very similar to their HTML5 counterparts
# so we wll include them right here (see
# https://www.ampproject.org/docs/reference/components/amp-video)
# For dl8-* tags see https://delight-vr.com/documentation/dl8-video/
_MEDIA_TAG_NAME_RE = r'(?:(?:amp|dl8(?:-live)?)-)?(video|audio)'
media_tags = [(media_tag, media_tag_name, media_type, '')
for media_tag, media_tag_name, media_type
in re.findall(r'(?s)(<(%s)[^>]*/>)' % _MEDIA_TAG_NAME_RE, webpage)]
media_tags.extend(re.findall(
# We only allow video|audio followed by a whitespace or '>'.
# Allowing more characters may end up in significant slow down (see
# https://github.com/ytdl-org/youtube-dl/issues/11979, example URL:
# http://www.porntrex.com/maps/videositemap.xml).
r'(?s)(<(?P<tag>%s)(?:\s+[^>]*)?>)(.*?)</(?P=tag)>' % _MEDIA_TAG_NAME_RE, webpage))
for media_tag, _, media_type, media_content in media_tags:
media_info = {
'formats': [],
'subtitles': {},
}
media_attributes = extract_attributes(media_tag)
src = strip_or_none(media_attributes.get('src'))
if src:
_, formats = _media_formats(src, media_type)
media_info['formats'].extend(formats)
media_info['thumbnail'] = absolute_url(media_attributes.get('poster'))
if media_content:
for source_tag in re.findall(r'<source[^>]+>', media_content):
s_attr = extract_attributes(source_tag)
# data-video-src and data-src are non standard but seen
# several times in the wild
src = strip_or_none(dict_get(s_attr, ('src', 'data-video-src', 'data-src')))
if not src:
continue
f = parse_content_type(s_attr.get('type'))
is_plain_url, formats = _media_formats(src, media_type, f)
if is_plain_url:
# width, height, res, label and title attributes are
# all not standard but seen several times in the wild
labels = [
s_attr.get(lbl)
for lbl in ('label', 'title')
if str_or_none(s_attr.get(lbl))
]
width = int_or_none(s_attr.get('width'))
height = (int_or_none(s_attr.get('height'))
or int_or_none(s_attr.get('res')))
if not width or not height:
for lbl in labels:
resolution = parse_resolution(lbl)
if not resolution:
continue
width = width or resolution.get('width')
height = height or resolution.get('height')
for lbl in labels:
tbr = parse_bitrate(lbl)
if tbr:
break
else:
tbr = None
f.update({
'width': width,
'height': height,
'tbr': tbr,
'format_id': s_attr.get('label') or s_attr.get('title'),
})
f.update(formats[0])
media_info['formats'].append(f)
else:
media_info['formats'].extend(formats)
for track_tag in re.findall(r'<track[^>]+>', media_content):
track_attributes = extract_attributes(track_tag)
kind = track_attributes.get('kind')
if not kind or kind in ('subtitles', 'captions'):
src = strip_or_none(track_attributes.get('src'))
if not src:
continue
lang = track_attributes.get('srclang') or track_attributes.get('lang') or track_attributes.get('label')
media_info['subtitles'].setdefault(lang, []).append({
'url': absolute_url(src),
})
for f in media_info['formats']:
f.setdefault('http_headers', {})['Referer'] = base_url
if media_info['formats'] or media_info['subtitles']:
entries.append(media_info)
return entries
def _extract_akamai_formats(self, manifest_url, video_id, hosts={}):
formats = []
hdcore_sign = 'hdcore=3.7.0'
f4m_url = re.sub(r'(https?://[^/]+)/i/', r'\1/z/', manifest_url).replace('/master.m3u8', '/manifest.f4m')
hds_host = hosts.get('hds')
if hds_host:
f4m_url = re.sub(r'(https?://)[^/]+', r'\1' + hds_host, f4m_url)
if 'hdcore=' not in f4m_url:
f4m_url += ('&' if '?' in f4m_url else '?') + hdcore_sign
f4m_formats = self._extract_f4m_formats(
f4m_url, video_id, f4m_id='hds', fatal=False)
for entry in f4m_formats:
entry.update({'extra_param_to_segment_url': hdcore_sign})
formats.extend(f4m_formats)
m3u8_url = re.sub(r'(https?://[^/]+)/z/', r'\1/i/', manifest_url).replace('/manifest.f4m', '/master.m3u8')
hls_host = hosts.get('hls')
if hls_host:
m3u8_url = re.sub(r'(https?://)[^/]+', r'\1' + hls_host, m3u8_url)
m3u8_formats = self._extract_m3u8_formats(
m3u8_url, video_id, 'mp4', 'm3u8_native',
m3u8_id='hls', fatal=False)
formats.extend(m3u8_formats)
http_host = hosts.get('http')
if http_host and m3u8_formats and 'hdnea=' not in m3u8_url:
REPL_REGEX = r'https?://[^/]+/i/([^,]+),([^/]+),([^/]+)\.csmil/.+'
qualities = re.match(REPL_REGEX, m3u8_url).group(2).split(',')
qualities_length = len(qualities)
if len(m3u8_formats) in (qualities_length, qualities_length + 1):
i = 0
for f in m3u8_formats:
if f['vcodec'] != 'none':
for protocol in ('http', 'https'):
http_f = f.copy()
del http_f['manifest_url']
http_url = re.sub(
REPL_REGEX, protocol + r'://%s/\g<1>%s\3' % (http_host, qualities[i]), f['url'])
http_f.update({
'format_id': http_f['format_id'].replace('hls-', protocol + '-'),
'url': http_url,
'protocol': protocol,
})
formats.append(http_f)
i += 1
return formats
def _extract_wowza_formats(self, url, video_id, m3u8_entry_protocol='m3u8_native', skip_protocols=[]):
query = compat_urlparse.urlparse(url).query
url = re.sub(r'/(?:manifest|playlist|jwplayer)\.(?:m3u8|f4m|mpd|smil)', '', url)
mobj = re.search(
r'(?:(?:http|rtmp|rtsp)(?P<s>s)?:)?(?P<url>//[^?]+)', url)
url_base = mobj.group('url')
http_base_url = '%s%s:%s' % ('http', mobj.group('s') or '', url_base)
formats = []
def manifest_url(manifest):
m_url = '%s/%s' % (http_base_url, manifest)
if query:
m_url += '?%s' % query
return m_url
if 'm3u8' not in skip_protocols:
formats.extend(self._extract_m3u8_formats(
manifest_url('playlist.m3u8'), video_id, 'mp4',
m3u8_entry_protocol, m3u8_id='hls', fatal=False))
if 'f4m' not in skip_protocols:
formats.extend(self._extract_f4m_formats(
manifest_url('manifest.f4m'),
video_id, f4m_id='hds', fatal=False))
if 'dash' not in skip_protocols:
formats.extend(self._extract_mpd_formats(
manifest_url('manifest.mpd'),
video_id, mpd_id='dash', fatal=False))
if re.search(r'(?:/smil:|\.smil)', url_base):
if 'smil' not in skip_protocols:
rtmp_formats = self._extract_smil_formats(
manifest_url('jwplayer.smil'),
video_id, fatal=False)
for rtmp_format in rtmp_formats:
rtsp_format = rtmp_format.copy()
rtsp_format['url'] = '%s/%s' % (rtmp_format['url'], rtmp_format['play_path'])
del rtsp_format['play_path']
del rtsp_format['ext']
rtsp_format.update({
'url': rtsp_format['url'].replace('rtmp://', 'rtsp://'),
'format_id': rtmp_format['format_id'].replace('rtmp', 'rtsp'),
'protocol': 'rtsp',
})
formats.extend([rtmp_format, rtsp_format])
else:
for protocol in ('rtmp', 'rtsp'):
if protocol not in skip_protocols:
formats.append({
'url': '%s:%s' % (protocol, url_base),
'format_id': protocol,
'protocol': protocol,
})
return formats
def _find_jwplayer_data(self, webpage, video_id=None, transform_source=js_to_json):
mobj = re.search(
r'(?s)jwplayer\((?P<quote>[\'"])[^\'" ]+(?P=quote)\)(?!</script>).*?\.setup\s*\((?P<options>[^)]+)\)',
webpage)
if mobj:
try:
jwplayer_data = self._parse_json(mobj.group('options'),
video_id=video_id,
transform_source=transform_source)
except ExtractorError:
pass
else:
if isinstance(jwplayer_data, dict):
return jwplayer_data
def _extract_jwplayer_data(self, webpage, video_id, *args, **kwargs):
jwplayer_data = self._find_jwplayer_data(
webpage, video_id, transform_source=js_to_json)
return self._parse_jwplayer_data(
jwplayer_data, video_id, *args, **kwargs)
def _parse_jwplayer_data(self, jwplayer_data, video_id=None, require_title=True,
m3u8_id=None, mpd_id=None, rtmp_params=None, base_url=None):
# JWPlayer backward compatibility: flattened playlists
# https://github.com/jwplayer/jwplayer/blob/v7.4.3/src/js/api/config.js#L81-L96
if 'playlist' not in jwplayer_data:
jwplayer_data = {'playlist': [jwplayer_data]}
entries = []
# JWPlayer backward compatibility: single playlist item
# https://github.com/jwplayer/jwplayer/blob/v7.7.0/src/js/playlist/playlist.js#L10
if not isinstance(jwplayer_data['playlist'], list):
jwplayer_data['playlist'] = [jwplayer_data['playlist']]
for video_data in jwplayer_data['playlist']:
# JWPlayer backward compatibility: flattened sources
# https://github.com/jwplayer/jwplayer/blob/v7.4.3/src/js/playlist/item.js#L29-L35
if 'sources' not in video_data:
video_data['sources'] = [video_data]
this_video_id = video_id or video_data['mediaid']
formats = self._parse_jwplayer_formats(
video_data['sources'], video_id=this_video_id, m3u8_id=m3u8_id,
mpd_id=mpd_id, rtmp_params=rtmp_params, base_url=base_url)
subtitles = {}
tracks = video_data.get('tracks')
if tracks and isinstance(tracks, list):
for track in tracks:
if not isinstance(track, dict):
continue
track_kind = track.get('kind')
if not track_kind or not isinstance(track_kind, compat_str):
continue
if track_kind.lower() not in ('captions', 'subtitles'):
continue
track_url = urljoin(base_url, track.get('file'))
if not track_url:
continue
subtitles.setdefault(track.get('label') or 'en', []).append({
'url': self._proto_relative_url(track_url)
})
entry = {
'id': this_video_id,
'title': unescapeHTML(video_data['title'] if require_title else video_data.get('title')),
'description': clean_html(video_data.get('description')),
'thumbnail': urljoin(base_url, self._proto_relative_url(video_data.get('image'))),
'timestamp': int_or_none(video_data.get('pubdate')),
'duration': float_or_none(jwplayer_data.get('duration') or video_data.get('duration')),
'subtitles': subtitles,
}
# https://github.com/jwplayer/jwplayer/blob/master/src/js/utils/validator.js#L32
if len(formats) == 1 and re.search(r'^(?:http|//).*(?:youtube\.com|youtu\.be)/.+', formats[0]['url']):
entry.update({
'_type': 'url_transparent',
'url': formats[0]['url'],
})
else:
self._sort_formats(formats)
entry['formats'] = formats
entries.append(entry)
if len(entries) == 1:
return entries[0]
else:
return self.playlist_result(entries)
def _parse_jwplayer_formats(self, jwplayer_sources_data, video_id=None,
m3u8_id=None, mpd_id=None, rtmp_params=None, base_url=None):
urls = []
formats = []
for source in jwplayer_sources_data:
if not isinstance(source, dict):
continue
source_url = urljoin(
base_url, self._proto_relative_url(source.get('file')))
if not source_url or source_url in urls:
continue
urls.append(source_url)
source_type = source.get('type') or ''
ext = mimetype2ext(source_type) or determine_ext(source_url)
if source_type == 'hls' or ext == 'm3u8':
formats.extend(self._extract_m3u8_formats(
source_url, video_id, 'mp4', entry_protocol='m3u8_native',
m3u8_id=m3u8_id, fatal=False))
elif source_type == 'dash' or ext == 'mpd':
formats.extend(self._extract_mpd_formats(
source_url, video_id, mpd_id=mpd_id, fatal=False))
elif ext == 'smil':
formats.extend(self._extract_smil_formats(
source_url, video_id, fatal=False))
# https://github.com/jwplayer/jwplayer/blob/master/src/js/providers/default.js#L67
elif source_type.startswith('audio') or ext in (
'oga', 'aac', 'mp3', 'mpeg', 'vorbis'):
formats.append({
'url': source_url,
'vcodec': 'none',
'ext': ext,
})
else:
height = int_or_none(source.get('height'))
if height is None:
# Often no height is provided but there is a label in
# format like "1080p", "720p SD", or 1080.
height = int_or_none(self._search_regex(
r'^(\d{3,4})[pP]?(?:\b|$)', compat_str(source.get('label') or ''),
'height', default=None))
a_format = {
'url': source_url,
'width': int_or_none(source.get('width')),
'height': height,
'tbr': int_or_none(source.get('bitrate')),
'ext': ext,
}
if source_url.startswith('rtmp'):
a_format['ext'] = 'flv'
# See com/longtailvideo/jwplayer/media/RTMPMediaProvider.as
# of jwplayer.flash.swf
rtmp_url_parts = re.split(
r'((?:mp4|mp3|flv):)', source_url, 1)
if len(rtmp_url_parts) == 3:
rtmp_url, prefix, play_path = rtmp_url_parts
a_format.update({
'url': rtmp_url,
'play_path': prefix + play_path,
})
if rtmp_params:
a_format.update(rtmp_params)
formats.append(a_format)
return formats
def _live_title(self, name):
""" Generate the title for a live video """
now = datetime.datetime.now()
now_str = now.strftime('%Y-%m-%d %H:%M')
return name + ' ' + now_str
def _int(self, v, name, fatal=False, **kwargs):
res = int_or_none(v, **kwargs)
if 'get_attr' in kwargs:
print(getattr(v, kwargs['get_attr']))
if res is None:
msg = 'Failed to extract %s: Could not parse value %r' % (name, v)
if fatal:
raise ExtractorError(msg)
else:
self._downloader.report_warning(msg)
return res
def _float(self, v, name, fatal=False, **kwargs):
res = float_or_none(v, **kwargs)
if res is None:
msg = 'Failed to extract %s: Could not parse value %r' % (name, v)
if fatal:
raise ExtractorError(msg)
else:
self._downloader.report_warning(msg)
return res
def _set_cookie(self, domain, name, value, expire_time=None, port=None,
path='/', secure=False, discard=False, rest={}, **kwargs):
cookie = compat_cookiejar_Cookie(
0, name, value, port, port is not None, domain, True,
domain.startswith('.'), path, True, secure, expire_time,
discard, None, None, rest)
self._downloader.cookiejar.set_cookie(cookie)
def _get_cookies(self, url):
""" Return a compat_cookies.SimpleCookie with the cookies for the url """
req = sanitized_Request(url)
self._downloader.cookiejar.add_cookie_header(req)
return compat_cookies.SimpleCookie(req.get_header('Cookie'))
def _apply_first_set_cookie_header(self, url_handle, cookie):
"""
Apply first Set-Cookie header instead of the last. Experimental.
Some sites (e.g. [1-3]) may serve two cookies under the same name
in Set-Cookie header and expect the first (old) one to be set rather
than second (new). However, as of RFC6265 the newer one cookie
should be set into cookie store what actually happens.
We will workaround this issue by resetting the cookie to
the first one manually.
1. https://new.vk.com/
2. https://github.com/ytdl-org/youtube-dl/issues/9841#issuecomment-227871201
3. https://learning.oreilly.com/
"""
for header, cookies in url_handle.headers.items():
if header.lower() != 'set-cookie':
continue
if sys.version_info[0] >= 3:
cookies = cookies.encode('iso-8859-1')
cookies = cookies.decode('utf-8')
cookie_value = re.search(
r'%s=(.+?);.*?\b[Dd]omain=(.+?)(?:[,;]|$)' % cookie, cookies)
if cookie_value:
value, domain = cookie_value.groups()
self._set_cookie(domain, cookie, value)
break
def get_testcases(self, include_onlymatching=False):
t = getattr(self, '_TEST', None)
if t:
assert not hasattr(self, '_TESTS'), \
'%s has _TEST and _TESTS' % type(self).__name__
tests = [t]
else:
tests = getattr(self, '_TESTS', [])
for t in tests:
if not include_onlymatching and t.get('only_matching', False):
continue
t['name'] = type(self).__name__[:-len('IE')]
yield t
def is_suitable(self, age_limit):
""" Test whether the extractor is generally suitable for the given
age limit (i.e. pornographic sites are not, all others usually are) """
any_restricted = False
for tc in self.get_testcases(include_onlymatching=False):
if tc.get('playlist', []):
tc = tc['playlist'][0]
is_restricted = age_restricted(
tc.get('info_dict', {}).get('age_limit'), age_limit)
if not is_restricted:
return True
any_restricted = any_restricted or is_restricted
return not any_restricted
def extract_subtitles(self, *args, **kwargs):
if (self._downloader.params.get('writesubtitles', False)
or self._downloader.params.get('listsubtitles')):
return self._get_subtitles(*args, **kwargs)
return {}
def _get_subtitles(self, *args, **kwargs):
raise NotImplementedError('This method must be implemented by subclasses')
@staticmethod
def _merge_subtitle_items(subtitle_list1, subtitle_list2):
""" Merge subtitle items for one language. Items with duplicated URLs
will be dropped. """
list1_urls = set([item['url'] for item in subtitle_list1])
ret = list(subtitle_list1)
ret.extend([item for item in subtitle_list2 if item['url'] not in list1_urls])
return ret
@classmethod
def _merge_subtitles(cls, subtitle_dict1, subtitle_dict2):
""" Merge two subtitle dictionaries, language by language. """
ret = dict(subtitle_dict1)
for lang in subtitle_dict2:
ret[lang] = cls._merge_subtitle_items(subtitle_dict1.get(lang, []), subtitle_dict2[lang])
return ret
def extract_automatic_captions(self, *args, **kwargs):
if (self._downloader.params.get('writeautomaticsub', False)
or self._downloader.params.get('listsubtitles')):
return self._get_automatic_captions(*args, **kwargs)
return {}
def _get_automatic_captions(self, *args, **kwargs):
raise NotImplementedError('This method must be implemented by subclasses')
def mark_watched(self, *args, **kwargs):
if (self._downloader.params.get('mark_watched', False)
and (self._get_login_info()[0] is not None
or self._downloader.params.get('cookiefile') is not None)):
self._mark_watched(*args, **kwargs)
def _mark_watched(self, *args, **kwargs):
raise NotImplementedError('This method must be implemented by subclasses')
def geo_verification_headers(self):
headers = {}
geo_verification_proxy = self._downloader.params.get('geo_verification_proxy')
if geo_verification_proxy:
headers['Ytdl-request-proxy'] = geo_verification_proxy
return headers
def _generic_id(self, url):
return compat_urllib_parse_unquote(os.path.splitext(url.rstrip('/').split('/')[-1])[0])
def _generic_title(self, url):
return compat_urllib_parse_unquote(os.path.splitext(url_basename(url))[0])
class SearchInfoExtractor(InfoExtractor):
"""
Base class for paged search queries extractors.
They accept URLs in the format _SEARCH_KEY(|all|[0-9]):{query}
Instances should define _SEARCH_KEY and _MAX_RESULTS.
"""
@classmethod
def _make_valid_url(cls):
return r'%s(?P<prefix>|[1-9][0-9]*|all):(?P<query>[\s\S]+)' % cls._SEARCH_KEY
@classmethod
def suitable(cls, url):
return re.match(cls._make_valid_url(), url) is not None
def _real_extract(self, query):
mobj = re.match(self._make_valid_url(), query)
if mobj is None:
raise ExtractorError('Invalid search query "%s"' % query)
prefix = mobj.group('prefix')
query = mobj.group('query')
if prefix == '':
return self._get_n_results(query, 1)
elif prefix == 'all':
return self._get_n_results(query, self._MAX_RESULTS)
else:
n = int(prefix)
if n <= 0:
raise ExtractorError('invalid download number %s for query "%s"' % (n, query))
elif n > self._MAX_RESULTS:
self._downloader.report_warning('%s returns max %i results (you requested %i)' % (self._SEARCH_KEY, self._MAX_RESULTS, n))
n = self._MAX_RESULTS
return self._get_n_results(query, n)
def _get_n_results(self, query, n):
"""Get a specified number of results for a query"""
raise NotImplementedError('This method must be implemented by subclasses')
@property
def SEARCH_KEY(self):
return self._SEARCH_KEY
| youtube_dl/extractor/common.py | 143,548 | Information Extractor class.
Information extractors are the classes that, given a URL, extract
information about the video (or videos) the URL refers to. This
information includes the real video URL, the video title, author and
others. The information is stored in a dictionary which is then
passed to the YoutubeDL. The YoutubeDL processes this
information possibly downloading the video to the file system, among
other possible outcomes.
The type field determines the type of the result.
By far the most common value (and the default if _type is missing) is
"video", which indicates a single video.
For a video, the dictionaries must include the following fields:
id: Video identifier.
title: Video title, unescaped.
Additionally, it must contain either a formats entry or a url one:
formats: A list of dictionaries for each format available, ordered
from worst to best quality.
Potential fields:
* url The mandatory URL representing the media:
for plain file media - HTTP URL of this file,
for RTMP - RTMP URL,
for HLS - URL of the M3U8 media playlist,
for HDS - URL of the F4M manifest,
for DASH
- HTTP URL to plain file media (in case of
unfragmented media)
- URL of the MPD manifest or base URL
representing the media if MPD manifest
is parsed from a string (in case of
fragmented media)
for MSS - URL of the ISM manifest.
* manifest_url
The URL of the manifest file in case of
fragmented media:
for HLS - URL of the M3U8 master playlist,
for HDS - URL of the F4M manifest,
for DASH - URL of the MPD manifest,
for MSS - URL of the ISM manifest.
* ext Will be calculated from URL if missing
* format A human-readable description of the format
("mp4 container with h264/opus").
Calculated from the format_id, width, height.
and format_note fields if missing.
* format_id A short description of the format
("mp4_h264_opus" or "19").
Technically optional, but strongly recommended.
* format_note Additional info about the format
("3D" or "DASH video")
* width Width of the video, if known
* height Height of the video, if known
* resolution Textual description of width and height
* tbr Average bitrate of audio and video in KBit/s
* abr Average audio bitrate in KBit/s
* acodec Name of the audio codec in use
* asr Audio sampling rate in Hertz
* vbr Average video bitrate in KBit/s
* fps Frame rate
* vcodec Name of the video codec in use
* container Name of the container format
* filesize The number of bytes, if known in advance
* filesize_approx An estimate for the number of bytes
* player_url SWF Player URL (used for rtmpdump).
* protocol The protocol that will be used for the actual
download, lower-case.
"http", "https", "rtsp", "rtmp", "rtmpe",
"m3u8", "m3u8_native" or "http_dash_segments".
* fragment_base_url
Base URL for fragments. Each fragment's path
value (if present) will be relative to
this URL.
* fragments A list of fragments of a fragmented media.
Each fragment entry must contain either an url
or a path. If an url is present it should be
considered by a client. Otherwise both path and
fragment_base_url must be present. Here is
the list of all potential fields:
* "url" - fragment's URL
* "path" - fragment's path relative to
fragment_base_url
* "duration" (optional, int or float)
* "filesize" (optional, int)
* preference Order number of this format. If this field is
present and not None, the formats get sorted
by this field, regardless of all other values.
-1 for default (order by other properties),
-2 or smaller for less than default.
< -1000 to hide the format (if there is
another one which is strictly better)
* language Language code, e.g. "de" or "en-US".
* language_preference Is this in the language mentioned in
the URL?
10 if it's what the URL is about,
-1 for default (don't know),
-10 otherwise, other values reserved for now.
* quality Order number of the video quality of this
format, irrespective of the file format.
-1 for default (order by other properties),
-2 or smaller for less than default.
* source_preference Order number for this video source
(quality takes higher priority)
-1 for default (order by other properties),
-2 or smaller for less than default.
* http_headers A dictionary of additional HTTP headers
to add to the request.
* stretched_ratio If given and not 1, indicates that the
video's pixels are not square.
width : height ratio as float.
* no_resume The server does not support resuming the
(HTTP or RTMP) download. Boolean.
* downloader_options A dictionary of downloader options as
described in FileDownloader
url: Final video URL.
ext: Video filename extension.
format: The video format, defaults to ext (used for --get-format)
player_url: SWF Player URL (used for rtmpdump).
The following fields are optional:
alt_title: A secondary title of the video.
display_id An alternative identifier for the video, not necessarily
unique, but available before title. Typically, id is
something like "4234987", title "Dancing naked mole rats",
and display_id "dancing-naked-mole-rats"
thumbnails: A list of dictionaries, with the following entries:
* "id" (optional, string) - Thumbnail format ID
* "url"
* "preference" (optional, int) - quality of the image
* "width" (optional, int)
* "height" (optional, int)
* "resolution" (optional, string "{width}x{height}",
deprecated)
* "filesize" (optional, int)
thumbnail: Full URL to a video thumbnail image.
description: Full video description.
uploader: Full name of the video uploader.
license: License name the video is licensed under.
creator: The creator of the video.
release_date: The date (YYYYMMDD) when the video was released.
timestamp: UNIX timestamp of the moment the video became available.
upload_date: Video upload date (YYYYMMDD).
If not explicitly set, calculated from timestamp.
uploader_id: Nickname or id of the video uploader.
uploader_url: Full URL to a personal webpage of the video uploader.
channel: Full name of the channel the video is uploaded on.
Note that channel fields may or may not repeat uploader
fields. This depends on a particular extractor.
channel_id: Id of the channel.
channel_url: Full URL to a channel webpage.
location: Physical location where the video was filmed.
subtitles: The available subtitles as a dictionary in the format
{tag: subformats}. "tag" is usually a language code, and
"subformats" is a list sorted from lower to higher
preference, each element is a dictionary with the "ext"
entry and one of:
* "data": The subtitles file contents
* "url": A URL pointing to the subtitles file
"ext" will be calculated from URL if missing
automatic_captions: Like 'subtitles', used by the YoutubeIE for
automatically generated captions
duration: Length of the video in seconds, as an integer or float.
view_count: How many users have watched the video on the platform.
like_count: Number of positive ratings of the video
dislike_count: Number of negative ratings of the video
repost_count: Number of reposts of the video
average_rating: Average rating give by users, the scale used depends on the webpage
comment_count: Number of comments on the video
comments: A list of comments, each with one or more of the following
properties (all but one of text or html optional):
* "author" - human-readable name of the comment author
* "author_id" - user ID of the comment author
* "id" - Comment ID
* "html" - Comment as HTML
* "text" - Plain text of the comment
* "timestamp" - UNIX timestamp of comment
* "parent" - ID of the comment this one is replying to.
Set to "root" to indicate that this is a
comment to the original video.
age_limit: Age restriction for the video, as an integer (years)
webpage_url: The URL to the video webpage, if given to youtube-dl it
should allow to get the same result again. (It will be set
by YoutubeDL if it's missing)
categories: A list of categories that the video falls in, for example
["Sports", "Berlin"]
tags: A list of tags assigned to the video, e.g. ["sweden", "pop music"]
is_live: True, False, or None (=unknown). Whether this video is a
live stream that goes on instead of a fixed-length video.
start_time: Time in seconds where the reproduction should start, as
specified in the URL.
end_time: Time in seconds where the reproduction should end, as
specified in the URL.
chapters: A list of dictionaries, with the following entries:
* "start_time" - The start time of the chapter in seconds
* "end_time" - The end time of the chapter in seconds
* "title" (optional, string)
The following fields should only be used when the video belongs to some logical
chapter or section:
chapter: Name or title of the chapter the video belongs to.
chapter_number: Number of the chapter the video belongs to, as an integer.
chapter_id: Id of the chapter the video belongs to, as a unicode string.
The following fields should only be used when the video is an episode of some
series, programme or podcast:
series: Title of the series or programme the video episode belongs to.
season: Title of the season the video episode belongs to.
season_number: Number of the season the video episode belongs to, as an integer.
season_id: Id of the season the video episode belongs to, as a unicode string.
episode: Title of the video episode. Unlike mandatory video title field,
this field should denote the exact title of the video episode
without any kind of decoration.
episode_number: Number of the video episode within a season, as an integer.
episode_id: Id of the video episode, as a unicode string.
The following fields should only be used when the media is a track or a part of
a music album:
track: Title of the track.
track_number: Number of the track within an album or a disc, as an integer.
track_id: Id of the track (useful in case of custom indexing, e.g. 6.iii),
as a unicode string.
artist: Artist(s) of the track.
genre: Genre(s) of the track.
album: Title of the album the track belongs to.
album_type: Type of the album (e.g. "Demo", "Full-length", "Split", "Compilation", etc).
album_artist: List of all artists appeared on the album (e.g.
"Ash Borer / Fell Voices" or "Various Artists", useful for splits
and compilations).
disc_number: Number of the disc or other physical medium the track belongs to,
as an integer.
release_year: Year (YYYY) when the album was released.
Unless mentioned otherwise, the fields should be Unicode strings.
Unless mentioned otherwise, None is equivalent to absence of information.
_type "playlist" indicates multiple videos.
There must be a key "entries", which is a list, an iterable, or a PagedList
object, each element of which is a valid dictionary by this specification.
Additionally, playlists can have "id", "title", "description", "uploader",
"uploader_id", "uploader_url", "duration" attributes with the same semantics
as videos (see above).
_type "multi_video" indicates that there are multiple videos that
form a single show, for examples multiple acts of an opera or TV episode.
It must have an entries key like a playlist and contain all the keys
required for a video at the same time.
_type "url" indicates that the video must be extracted from another
location, possibly by a different extractor. Its only required key is:
"url" - the next URL to extract.
The key "ie_key" can be set to the class name (minus the trailing "IE",
e.g. "Youtube") if the extractor class is known in advance.
Additionally, the dictionary may have any properties of the resolved entity
known in advance, for example "title" if the title of the referred video is
known ahead of time.
_type "url_transparent" entities have the same specification as "url", but
indicate that the given additional information is more precise than the one
associated with the resolved URL.
This is useful when a site employs a video service that hosts the video and
its technical metadata, but that video service does not embed a useful
title, description etc.
Subclasses of this one should re-define the _real_initialize() and
_real_extract() methods and define a _VALID_URL regexp.
Probably, they should also be added to the list of extractors.
_GEO_BYPASS attribute may be set to False in order to disable
geo restriction bypass mechanisms for a particular extractor.
Though it won't disable explicit geo restriction bypass based on
country code provided with geo_bypass_country.
_GEO_COUNTRIES attribute may contain a list of presumably geo unrestricted
countries for this extractor. One of these countries will be used by
geo restriction bypass mechanism right away in order to bypass
geo restriction, of course, if the mechanism is not disabled.
_GEO_IP_BLOCKS attribute may contain a list of presumably geo unrestricted
IP blocks in CIDR notation for this extractor. One of these IP blocks
will be used by geo restriction bypass mechanism similarly
to _GEO_COUNTRIES.
Finally, the _WORKING attribute should be set to False for broken IEs
in order to warn the users and skip the tests.
Base class for paged search queries extractors.
They accept URLs in the format _SEARCH_KEY(|all|[0-9]):{query}
Instances should define _SEARCH_KEY and _MAX_RESULTS.
Constructor. Receives an optional downloader.
Apply first Set-Cookie header instead of the last. Experimental.
Some sites (e.g. [1-3]) may serve two cookies under the same name
in Set-Cookie header and expect the first (old) one to be set rather
than second (new). However, as of RFC6265 the newer one cookie
should be set into cookie store what actually happens.
We will workaround this issue by resetting the cookie to
the first one manually.
1. https://new.vk.com/
2. https://github.com/ytdl-org/youtube-dl/issues/9841#issuecomment-227871201
3. https://learning.oreilly.com/
Return the JSON object as a dict.
See _download_webpage docstring for arguments specification.
Return a tuple (JSON object, URL handle).
See _download_webpage docstring for arguments specification.
Return the data of the page as a string.
Arguments:
url_or_request -- plain text URL as a string or
a compat_urllib_request.Requestobject
video_id -- Video/playlist/item identifier (string)
Keyword arguments:
note -- note printed before downloading (string)
errnote -- note printed in case of an error (string)
fatal -- flag denoting whether error should be considered fatal,
i.e. whether it should cause ExtractionError to be raised,
otherwise a warning will be reported and extraction continued
tries -- number of tries
timeout -- sleep interval between tries
encoding -- encoding for a page content decoding, guessed automatically
when not explicitly specified
data -- POST data (bytes)
headers -- HTTP headers (dict)
query -- URL query (dict)
expected_status -- allows to accept failed HTTP requests (non 2xx
status code) by explicitly specifying a set of accepted status
codes. Can be any of the following entities:
- an integer type specifying an exact failed status code to
accept
- a list or a tuple of integer types specifying a list of
failed status codes to accept
- a callable accepting an actual failed status code and
returning True if it should be accepted
Note that this argument does not affect success status codes (2xx)
which are always accepted.
Return a tuple (page content as string, URL handle).
See _download_webpage docstring for arguments specification.
Return the xml as an compat_etree_Element.
See _download_webpage docstring for arguments specification.
Return a tuple (xml as an compat_etree_Element, URL handle).
See _download_webpage docstring for arguments specification.
Return a compat_cookies.SimpleCookie with the cookies for the url
Get the login info as (username, password)
First look for the manually specified credentials using username_option
and password_option as keys in params dictionary. If no such credentials
available look in the netrc file using the netrc_machine or _NETRC_MACHINE
value.
If there's no info available, return (None, None)
Get a specified number of results for a query
Get the two-factor authentication info
TODO - asking the user will be required for sms/phone verify
currently just uses the command line option
If there's no info available, return None
Like _search_regex, but strips HTML tags and unescapes entities.
Initialize geo restriction bypass mechanism.
This method is used to initialize geo bypass mechanism based on faking
X-Forwarded-For HTTP header. A random country from provided country list
is selected and a random IP belonging to this country is generated. This
IP will be passed as X-Forwarded-For HTTP header in all subsequent
HTTP requests.
This method will be used for initial geo bypass mechanism initialization
during the instance initialization with _GEO_COUNTRIES and
_GEO_IP_BLOCKS.
You may also manually call it from extractor's code if geo bypass
information is not available beforehand (e.g. obtained during
extraction) or due to some other reason. In this case you should pass
this information in geo bypass context passed as first argument. It may
contain following fields:
countries: List of geo unrestricted countries (similar
to _GEO_COUNTRIES)
ip_blocks: List of geo unrestricted IP blocks in CIDR notation
(similar to _GEO_IP_BLOCKS)
Generate the title for a live video
Merge subtitle items for one language. Items with duplicated URLs
will be dropped.
Merge two subtitle dictionaries, language by language.
Parse formats from ISM manifest.
References:
1. [MS-SSTR]: Smooth Streaming Protocol,
https://msdn.microsoft.com/en-us/library/ff469518.aspx
Parse formats from MPD manifest.
References:
1. MPEG-DASH Standard, ISO/IEC 23009-1:2014(E),
http://standards.iso.org/ittf/PubliclyAvailableStandards/c065274_ISO_IEC_23009-1_2014.zip
2. https://en.wikipedia.org/wiki/Dynamic_Adaptive_Streaming_over_HTTP
Real extraction process. Redefine in subclasses.
Real initialization process. Redefine in subclasses.
Return the response handle.
See _download_webpage docstring for arguments specification.
Perform a regex search on the given string, using a single or a list of
patterns returning the first matching group.
In case of failure return a default value or raise a WARNING or a
RegexNotFoundError, depending on fatal, specifying the field name.
Extracts URL information and returns it in list of dicts.
Either "http:" or "https:", depending on the user's preferences
A string for getting the InfoExtractor with get_info_extractor
Initializes an instance (authentication, etc).
Test whether the extractor is generally suitable for the given
age limit (i.e. pornographic sites are not, all others usually are)
Returns a playlist
Report attempt to confirm age.
Report webpage download.
Report information extraction.
Report attempt to log in.
Sets the downloader for this IE.
Receives a URL and returns True if suitable for this IE.
Print msg to screen, prefixing it with '[ie_name]'
Returns a URL that points to a page that should be processed
Getter method for _WORKING.
coding: utf-8 This does not use has/getattr intentionally - we want to know whether we have cached the regexp for *this* class, whereas getattr would also match the superclass Geo bypass mechanism is explicitly disabled by user Backward compatibility: previously _initialize_geo_bypass expected a list of countries, some 3rd party code may still use it this way The whole point of geo bypass mechanism is to fake IP as X-Forwarded-For HTTP header based on some IP block or country code. Path 1: bypassing based on IP block in CIDR notation Explicit IP block specified by user, use it right away regardless of whether extractor is geo bypassable or not Otherwise use random IP block from geo bypass context but only if extractor is known as geo bypassable Path 2: bypassing based on country code Explicit country code specified by user, use it right away regardless of whether extractor is geo bypassable or not Otherwise use random country code from geo bypass context but only if extractor is known as geo bypassable Some sites check X-Forwarded-For HTTP header in order to figure out the origin of the client behind proxy. This allows bypassing geo restriction by faking this header's value to IP that belongs to some geo unrestricted country. We will do so once we encounter any geo restriction error. Retain reference to error to prevent file object from being closed before it can be read. Works around the effects of <https://bugs.python.org/issue15002> introduced in Python 3.4.1. Strip hashes from the URL (1038) Working around MAX_PATH limitation on Windows (see http://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx) Methods for following 608 TODO: ie should be the class used for getting the info return the first matching group Attempt to use provided username and password or .netrc data Helper functions for extracting OpenGraph info See http://www.rtalabel.org/index.php?content=howtofaqsingle See http://www.tjg-designs.com/WP/metadata-code-examples-adding-metadata-to-your-web-pages/ See http://schema.org/VideoObject JSON-LD may be malformed and thus `fatal` should be respected. At the same time `default` may be passed that assumes `fatal=False` for _search_regex. Let's simulate the same behavior here as well. For interaction count some sites provide string instead of an integer (as per spec) with non digit characters (e.g. ",") so extracting count with more relaxed str_to_int Automatically determine tbr when missing based on abr and vbr (improves formats sorting in some cases) TODO remove the following workaround Not yet supported audio only video only For now assume non HTTP(S) URLs always valid Some manifests may be malformed, e.g. prosiebensat1 generated manifests (see https://github.com/ytdl-org/youtube-dl/issues/6215issuecomment-121704244) currently youtube-dl cannot decode the playerVerificationChallenge as Akamai uses Adobe Alchemy Remove unsupported DRM protected media from final formats rendition (see https://github.com/ytdl-org/youtube-dl/issues/8573). If <bootstrapInfo> is present, the specified f4m is a stream-level manifest, and only set-level manifests may refer to external resources. See section 11.4 and section 4 of F4M spec @href is introduced in 2.0, see section 11.6 of F4M spec If media_url is itself a f4m manifest do the recursive extraction since bitrates in parent manifest (this one) and media_url manifest may differ leading to inability to resolve the format by requested bitrate in f4m downloader Sometimes stream-level manifest contains single media entry that does not contain any quality metadata (e.g. http://matchtv.ru/live-player). At the same time parent's media entry in set-level manifest may contain it. We will copy it from parent in such cases. Adobe Flash Access Apple FairPlay References: 1. https://tools.ietf.org/html/draft-pantos-http-live-streaming-21 2. https://github.com/ytdl-org/youtube-dl/issues/12211 3. https://github.com/ytdl-org/youtube-dl/issues/18923 We should try extracting formats only from master playlists [1, 4.3.4], i.e. playlists that describe available qualities. On the other hand media playlists [1, 4.3.3] should be returned as is since they contain just the media without qualities renditions. Fortunately, master playlist can be easily distinguished from media playlist based on particular tags availability. As of [1, 4.3.3, 4.3.4] master playlist tags MUST NOT appear in a media playlist and vice versa. As of [1, 4.3.3.1] EXT-X-TARGETDURATION tag is REQUIRED for every media playlist and MUST NOT appear in master playlist thus we can clearly detect media playlist with this criterion. media playlist, return as is As per [1, 4.3.4.1] TYPE, GROUP-ID and NAME are REQUIRED Despite specification does not mention NAME attribute for EXT-X-STREAM-INF tag it still sometimes may be present (see [1] or vidio test in TestInfoExtractor.test_parse_m3u8_formats) 1. http://www.vidio.com/watch/165683-dj_ambred-booyah-live-2015 If there is no NAME in EXT-X-STREAM-INF it will be obtained from corresponding rendition group parse EXT-X-MEDIA tags before EXT-X-STREAM-INF in order to have the chance to detect video only formats when EXT-X-STREAM-INF tags precede EXT-X-MEDIA tags in HLS manifest such as [3]. Bandwidth of live streams may differ over time thus making format_id unpredictable. So it's better to keep provided format_id intact. Unified Streaming Platform As per [1, 4.3.4.1.1] any EXT-X-STREAM-INF tag which references a rendition group MUST have a CODECS attribute. However, this is not always respected, for example, [2] contains EXT-X-STREAM-INF tag which references AUDIO rendition group but does not have CODECS and despite referencing an audio group it represents a complete (with audio and video) format. So, for such cases we will ignore references to rendition groups and treat them as complete formats. TODO: update acodec for audio only formats with the same GROUP-ID for DailyMotion As per [1, 5.3.9.2.2] SegmentList and SegmentTemplate share some common attributes and elements. We will only extract relevant for us. @d is mandatory (see [1, 5.3.9.6.2, Table 17, page 60]) According to [1, 5.3.7.2, Table 9, page 41], @mimeType is mandatory TODO implement WebVTT downloading First of, % characters outside $...$ templates must be escaped by doubling for proper processing by % operator string formatting used further (see https://github.com/ytdl-org/youtube-dl/issues/16867). Next, $...$ templates are translated to their %(...) counterparts to be used with % operator @initialization is a regular template like @media one so it should be handled just the same way (see https://github.com/ytdl-org/youtube-dl/issues/11605) As per [1, 5.3.9.4.2, Table 15, page 54] $Number$ and $Time$ shall not be included for @initialization thus only $Bandwidth$ remains As per [1, 5.3.9.4.4, Table 16, page 55] $Number$ and $Time$ can't be used at the same time $Number*$ or $Time$ in media template with S list available Example $Number*$: http://www.svtplay.se/klipp/9023742/stopptid-om-bjorn-borg Example $Time$: https://play.arkena.com/embed/avp/v2/player/media/b41dda37-d8e7-4d3f-b1b5-9a9db578bdfe/1/129411 No media template Example: https://www.youtube.com/watch?v=iXZV5uAYMJI or any YouTube dashsegments video Segment URLs with no SegmentTimeline Example: https://www.seznam.cz/zpravy/clanek/cesko-zasahne-vitr-o-sile-vichrice-muze-byt-i-zivotu-nebezpecny-39091 https://github.com/ytdl-org/youtube-dl/pull/14844 If there is a fragments key available then we correctly recognized fragmented media. Otherwise we will assume unfragmented media with direct access. Technically, such assumption is not necessarily correct since we may simply have no support for some forms of fragmented media renditions yet, but for now we'll use this fallback. NB: mpd_url may be empty when MPD manifest is parsed from a string Assuming direct URL to unfragmented media. According to [1, 5.3.5.2, Table 7, page 35] @id of Representation is not necessarily unique within a Period thus formats with the same `format_id` are quite possible. There are numerous examples of such manifests (see https://github.com/ytdl-org/youtube-dl/issues/15111, https://github.com/ytdl-org/youtube-dl/issues/13919) TODO: add support for WVC1 and WMAP [1] does not mention Width and Height attributes. However, they're often present while MaxWidth and MaxHeight are missing, so should be used as fallbacks amp-video and amp-audio are very similar to their HTML5 counterparts so we wll include them right here (see https://www.ampproject.org/docs/reference/components/amp-video) For dl8-* tags see https://delight-vr.com/documentation/dl8-video/ We only allow video|audio followed by a whitespace or '>'. Allowing more characters may end up in significant slow down (see https://github.com/ytdl-org/youtube-dl/issues/11979, example URL: http://www.porntrex.com/maps/videositemap.xml). data-video-src and data-src are non standard but seen several times in the wild width, height, res, label and title attributes are all not standard but seen several times in the wild JWPlayer backward compatibility: flattened playlists https://github.com/jwplayer/jwplayer/blob/v7.4.3/src/js/api/config.jsL81-L96 JWPlayer backward compatibility: single playlist item https://github.com/jwplayer/jwplayer/blob/v7.7.0/src/js/playlist/playlist.jsL10 JWPlayer backward compatibility: flattened sources https://github.com/jwplayer/jwplayer/blob/v7.4.3/src/js/playlist/item.jsL29-L35 https://github.com/jwplayer/jwplayer/blob/master/src/js/utils/validator.jsL32 https://github.com/jwplayer/jwplayer/blob/master/src/js/providers/default.jsL67 Often no height is provided but there is a label in format like "1080p", "720p SD", or 1080. See com/longtailvideo/jwplayer/media/RTMPMediaProvider.as of jwplayer.flash.swf | 32,356 | en | 0.825456 |
from django import forms
from django.apps import apps
from django.core.exceptions import PermissionDenied
from django.urls import reverse, NoReverseMatch
from django.template.context_processors import csrf
from django.db.models.base import ModelBase
from django.forms.forms import DeclarativeFieldsMetaclass
from django.forms.utils import flatatt
from django.template import loader
from django.http import Http404
from django.test.client import RequestFactory
from django.utils.encoding import force_text, smart_text
from django.utils.html import escape
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext as _
from django.utils.http import urlencode, urlquote
from django.views.decorators.cache import never_cache
from xadmin import widgets as exwidgets
from xadmin.layout import FormHelper
from xadmin.models import UserSettings, UserWidget
from xadmin.plugins.utils import get_context_dict
from xadmin.sites import site
from xadmin.views.base import CommAdminView, ModelAdminView, filter_hook, csrf_protect_m
from xadmin.views.edit import CreateAdminView
from xadmin.views.list import ListAdminView
from xadmin.util import unquote, DJANGO_11
import copy
class WidgetTypeSelect(forms.Widget):
def __init__(self, widgets, attrs=None):
super(WidgetTypeSelect, self).__init__(attrs)
self._widgets = widgets
def render(self, name, value, attrs=None, renderer=None):
if value is None:
value = ''
if DJANGO_11:
final_attrs = self.build_attrs(attrs, extra_attrs={'name': name})
else:
final_attrs = self.build_attrs(attrs, name=name)
final_attrs['class'] = 'nav nav-pills nav-stacked'
output = [u'<ul%s>' % flatatt(final_attrs)]
options = self.render_options(force_text(value), final_attrs['id'])
if options:
output.append(options)
output.append(u'</ul>')
output.append('<input type="hidden" id="%s_input" name="%s" value="%s"/>' %
(final_attrs['id'], name, force_text(value)))
return mark_safe(u'\n'.join(output))
def render_option(self, selected_choice, widget, id):
if widget.widget_type == selected_choice:
selected_html = u' class="active"'
else:
selected_html = ''
return (u'<li%s><a onclick="' +
'javascript:$(this).parent().parent().find(\'>li\').removeClass(\'active\');$(this).parent().addClass(\'active\');' +
'$(\'#%s_input\').attr(\'value\', \'%s\')' % (id, widget.widget_type) +
'"><h4><i class="%s"></i> %s</h4><p>%s</p></a></li>') % (
selected_html,
widget.widget_icon,
widget.widget_title or widget.widget_type,
widget.description)
def render_options(self, selected_choice, id):
# Normalize to strings.
output = []
for widget in self._widgets:
output.append(self.render_option(selected_choice, widget, id))
return u'\n'.join(output)
class UserWidgetAdmin(object):
model_icon = 'fa fa-dashboard'
list_display = ('widget_type', 'page_id', 'user')
list_filter = ['user', 'widget_type', 'page_id']
list_display_links = ('widget_type',)
user_fields = ['user']
hidden_menu = True
wizard_form_list = (
(_(u"Widget Type"), ('page_id', 'widget_type')),
(_(u"Widget Params"), {'callback':
"get_widget_params_form", 'convert': "convert_widget_params"})
)
def formfield_for_dbfield(self, db_field, **kwargs):
if db_field.name == 'widget_type':
widgets = widget_manager.get_widgets(self.request.GET.get('page_id', ''))
form_widget = WidgetTypeSelect(widgets)
return forms.ChoiceField(choices=[(w.widget_type, w.description) for w in widgets],
widget=form_widget, label=_('Widget Type'))
if 'page_id' in self.request.GET and db_field.name == 'page_id':
kwargs['widget'] = forms.HiddenInput
field = super(
UserWidgetAdmin, self).formfield_for_dbfield(db_field, **kwargs)
return field
def get_widget_params_form(self, wizard):
data = wizard.get_cleaned_data_for_step(wizard.steps.first)
widget_type = data['widget_type']
widget = widget_manager.get(widget_type)
fields = copy.deepcopy(widget.base_fields)
if 'id' in fields:
del fields['id']
return DeclarativeFieldsMetaclass("WidgetParamsForm", (forms.Form,), fields)
def convert_widget_params(self, wizard, cleaned_data, form):
widget = UserWidget()
value = dict([(f.name, f.value()) for f in form])
widget.set_value(value)
cleaned_data['value'] = widget.value
cleaned_data['user'] = self.user
def get_list_display(self):
list_display = super(UserWidgetAdmin, self).get_list_display()
if not self.user.is_superuser:
list_display.remove('user')
return list_display
def queryset(self):
if self.user.is_superuser:
return super(UserWidgetAdmin, self).queryset()
return UserWidget.objects.filter(user=self.user)
def update_dashboard(self, obj):
try:
portal_pos = UserSettings.objects.get(
user=obj.user, key="dashboard:%s:pos" % obj.page_id)
except UserSettings.DoesNotExist:
return
pos = [[w for w in col.split(',') if w != str(
obj.id)] for col in portal_pos.value.split('|')]
portal_pos.value = '|'.join([','.join(col) for col in pos])
portal_pos.save()
def delete_model(self):
self.update_dashboard(self.obj)
super(UserWidgetAdmin, self).delete_model()
def delete_models(self, queryset):
for obj in queryset:
self.update_dashboard(obj)
super(UserWidgetAdmin, self).delete_models(queryset)
site.register(UserWidget, UserWidgetAdmin)
class WidgetManager(object):
_widgets = None
def __init__(self):
self._widgets = {}
def register(self, widget_class):
self._widgets[widget_class.widget_type] = widget_class
return widget_class
def get(self, name):
return self._widgets[name]
def get_widgets(self, page_id):
return self._widgets.values()
widget_manager = WidgetManager()
class WidgetDataError(Exception):
def __init__(self, widget, errors):
super(WidgetDataError, self).__init__(str(errors))
self.widget = widget
self.errors = errors
class BaseWidget(forms.Form):
template = 'xadmin/widgets/base.html'
description = 'Base Widget, don\'t use it.'
widget_title = None
widget_icon = 'fa fa-plus-square'
widget_type = 'base'
base_title = None
id = forms.IntegerField(label=_('Widget ID'), widget=forms.HiddenInput)
title = forms.CharField(label=_('Widget Title'), required=False, widget=exwidgets.AdminTextInputWidget)
def __init__(self, dashboard, data):
self.dashboard = dashboard
self.admin_site = dashboard.admin_site
self.request = dashboard.request
self.user = dashboard.request.user
self.convert(data)
super(BaseWidget, self).__init__(data)
if not self.is_valid():
raise WidgetDataError(self, self.errors.as_text())
self.setup()
def setup(self):
helper = FormHelper()
helper.form_tag = False
helper.include_media = False
self.helper = helper
self.id = self.cleaned_data['id']
self.title = self.cleaned_data['title'] or self.base_title
if not (self.user.is_superuser or self.has_perm()):
raise PermissionDenied
@property
def widget(self):
context = {'widget_id': self.id, 'widget_title': self.title, 'widget_icon': self.widget_icon,
'widget_type': self.widget_type, 'form': self, 'widget': self}
context.update(csrf(self.request))
self.context(context)
return loader.render_to_string(self.template, context)
def context(self, context):
pass
def convert(self, data):
pass
def has_perm(self):
return False
def save(self):
value = dict([(f.name, f.value()) for f in self])
user_widget = UserWidget.objects.get(id=self.id)
user_widget.set_value(value)
user_widget.save()
def static(self, path):
return self.dashboard.static(path)
def vendor(self, *tags):
return self.dashboard.vendor(*tags)
def media(self):
return forms.Media()
@widget_manager.register
class HtmlWidget(BaseWidget):
widget_type = 'html'
widget_icon = 'fa fa-file-o'
description = _(
u'Html Content Widget, can write any html content in widget.')
content = forms.CharField(label=_(
'Html Content'), widget=exwidgets.AdminTextareaWidget, required=False)
def has_perm(self):
return True
def context(self, context):
context['content'] = self.cleaned_data['content']
class ModelChoiceIterator(object):
def __init__(self, field):
self.field = field
def __iter__(self):
from xadmin import site as g_admin_site
for m, ma in g_admin_site._registry.items():
yield ('%s.%s' % (m._meta.app_label, m._meta.model_name),
m._meta.verbose_name)
class ModelChoiceField(forms.ChoiceField):
def __init__(self, required=True, widget=None, label=None, initial=None,
help_text=None, *args, **kwargs):
# Call Field instead of ChoiceField __init__() because we don't need
# ChoiceField.__init__().
forms.Field.__init__(self, required=required, widget=widget, label=label, initial=initial, help_text=help_text,
*args, **kwargs)
self.widget.choices = self.choices
def __deepcopy__(self, memo):
result = forms.Field.__deepcopy__(self, memo)
return result
def _get_choices(self):
return ModelChoiceIterator(self)
choices = property(_get_choices, forms.ChoiceField._set_choices)
def to_python(self, value):
if isinstance(value, ModelBase):
return value
app_label, model_name = value.lower().split('.')
return apps.get_model(app_label, model_name)
def prepare_value(self, value):
if isinstance(value, ModelBase):
value = '%s.%s' % (value._meta.app_label, value._meta.model_name)
return value
def valid_value(self, value):
value = self.prepare_value(value)
for k, v in self.choices:
if value == smart_text(k):
return True
return False
class ModelBaseWidget(BaseWidget):
app_label = None
model_name = None
model_perm = 'change'
model = ModelChoiceField(label=_(u'Target Model'), widget=exwidgets.AdminSelectWidget)
def __init__(self, dashboard, data):
self.dashboard = dashboard
super(ModelBaseWidget, self).__init__(dashboard, data)
def setup(self):
self.model = self.cleaned_data['model']
self.app_label = self.model._meta.app_label
self.model_name = self.model._meta.model_name
super(ModelBaseWidget, self).setup()
def has_perm(self):
return self.dashboard.has_model_perm(self.model, self.model_perm)
def filte_choices_model(self, model, modeladmin):
return self.dashboard.has_model_perm(model, self.model_perm)
def model_admin_url(self, name, *args, **kwargs):
return reverse(
"%s:%s_%s_%s" % (self.admin_site.app_name, self.app_label,
self.model_name, name), args=args, kwargs=kwargs)
class PartialBaseWidget(BaseWidget):
def get_view_class(self, view_class, model=None, **opts):
admin_class = self.admin_site._registry.get(model) if model else None
return self.admin_site.get_view_class(view_class, admin_class, **opts)
def get_factory(self):
return RequestFactory()
def setup_request(self, request):
request.user = self.user
request.session = self.request.session
return request
def make_get_request(self, path, data={}, **extra):
req = self.get_factory().get(path, data, **extra)
return self.setup_request(req)
def make_post_request(self, path, data={}, **extra):
req = self.get_factory().post(path, data, **extra)
return self.setup_request(req)
@widget_manager.register
class QuickBtnWidget(BaseWidget):
widget_type = 'qbutton'
description = _(u'Quick button Widget, quickly open any page.')
template = "xadmin/widgets/qbutton.html"
base_title = _(u"Quick Buttons")
widget_icon = 'fa fa-caret-square-o-right'
def convert(self, data):
self.q_btns = data.pop('btns', [])
def get_model(self, model_or_label):
if isinstance(model_or_label, ModelBase):
return model_or_label
else:
return apps.get_model(*model_or_label.lower().split('.'))
def context(self, context):
btns = []
for b in self.q_btns:
btn = {}
if 'model' in b:
model = self.get_model(b['model'])
if not self.user.has_perm("%s.view_%s" % (model._meta.app_label, model._meta.model_name)):
continue
btn['url'] = reverse("%s:%s_%s_%s" % (self.admin_site.app_name, model._meta.app_label,
model._meta.model_name, b.get('view', 'changelist')))
btn['title'] = model._meta.verbose_name
btn['icon'] = self.dashboard.get_model_icon(model)
else:
try:
btn['url'] = reverse(b['url'])
except NoReverseMatch:
btn['url'] = b['url']
if 'title' in b:
btn['title'] = b['title']
if 'icon' in b:
btn['icon'] = b['icon']
btns.append(btn)
context.update({'btns': btns})
def has_perm(self):
return True
@widget_manager.register
class ListWidget(ModelBaseWidget, PartialBaseWidget):
widget_type = 'list'
description = _(u'Any Objects list Widget.')
template = "xadmin/widgets/list.html"
model_perm = 'view'
widget_icon = 'fa fa-align-justify'
def convert(self, data):
self.list_params = data.pop('params', {})
self.list_count = data.pop('count', 10)
def setup(self):
super(ListWidget, self).setup()
if not self.title:
self.title = self.model._meta.verbose_name_plural
req = self.make_get_request("", self.list_params)
self.list_view = self.get_view_class(ListAdminView, self.model)(req)
if self.list_count:
self.list_view.list_per_page = self.list_count
def context(self, context):
list_view = self.list_view
list_view.make_result_list()
base_fields = list_view.base_list_display
if len(base_fields) > 5:
base_fields = base_fields[0:5]
context['result_headers'] = [c for c in list_view.result_headers(
).cells if c.field_name in base_fields]
context['results'] = [[o for i, o in
enumerate(filter(lambda c:c.field_name in base_fields, r.cells))]
for r in list_view.results()]
context['result_count'] = list_view.result_count
context['page_url'] = self.model_admin_url('changelist') + "?" + urlencode(self.list_params)
@widget_manager.register
class AddFormWidget(ModelBaseWidget, PartialBaseWidget):
widget_type = 'addform'
description = _(u'Add any model object Widget.')
template = "xadmin/widgets/addform.html"
model_perm = 'add'
widget_icon = 'fa fa-plus'
def setup(self):
super(AddFormWidget, self).setup()
if self.title is None:
self.title = _('Add %s') % self.model._meta.verbose_name
req = self.make_get_request("")
self.add_view = self.get_view_class(
CreateAdminView, self.model, list_per_page=10)(req)
self.add_view.instance_forms()
def context(self, context):
helper = FormHelper()
helper.form_tag = False
helper.include_media = False
context.update({
'addform': self.add_view.form_obj,
'addhelper': helper,
'addurl': self.add_view.model_admin_url('add'),
'model': self.model
})
def media(self):
return self.add_view.media + self.add_view.form_obj.media + self.vendor('xadmin.plugin.quick-form.js')
class Dashboard(CommAdminView):
widget_customiz = True
widgets = []
title = _(u"Dashboard")
icon = None
def get_page_id(self):
return self.request.path
def get_portal_key(self):
return "dashboard:%s:pos" % self.get_page_id()
@filter_hook
def get_widget(self, widget_or_id, data=None):
try:
if isinstance(widget_or_id, UserWidget):
widget = widget_or_id
else:
widget = UserWidget.objects.get(user=self.user, page_id=self.get_page_id(), id=widget_or_id)
wid = widget_manager.get(widget.widget_type)
class widget_with_perm(wid):
def context(self, context):
super(widget_with_perm, self).context(context)
context.update({'has_change_permission': self.request.user.has_perm('xadmin.change_userwidget')})
wid_instance = widget_with_perm(self, data or widget.get_value())
return wid_instance
except UserWidget.DoesNotExist:
return None
@filter_hook
def get_init_widget(self):
portal = []
widgets = self.widgets
for col in widgets:
portal_col = []
for opts in col:
try:
widget = UserWidget(user=self.user, page_id=self.get_page_id(), widget_type=opts['type'])
widget.set_value(opts)
widget.save()
portal_col.append(self.get_widget(widget))
except (PermissionDenied, WidgetDataError):
widget.delete()
continue
portal.append(portal_col)
UserSettings(
user=self.user, key="dashboard:%s:pos" % self.get_page_id(),
value='|'.join([','.join([str(w.id) for w in col]) for col in portal])).save()
return portal
@filter_hook
def get_widgets(self):
if self.widget_customiz:
portal_pos = UserSettings.objects.filter(
user=self.user, key=self.get_portal_key())
if len(portal_pos):
portal_pos = portal_pos[0].value
widgets = []
if portal_pos:
user_widgets = dict([(uw.id, uw) for uw in UserWidget.objects.filter(user=self.user, page_id=self.get_page_id())])
for col in portal_pos.split('|'):
ws = []
for wid in col.split(','):
try:
widget = user_widgets.get(int(wid))
if widget:
ws.append(self.get_widget(widget))
except Exception as e:
import logging
logging.error(e, exc_info=True)
widgets.append(ws)
return widgets
return self.get_init_widget()
@filter_hook
def get_title(self):
return self.title
@filter_hook
def get_context(self):
new_context = {
'title': self.get_title(),
'icon': self.icon,
'portal_key': self.get_portal_key(),
'columns': [('col-sm-%d' % int(12 / len(self.widgets)), ws) for ws in self.widgets],
'has_add_widget_permission': self.has_model_perm(UserWidget, 'add') and self.widget_customiz,
'add_widget_url': self.get_admin_url('%s_%s_add' % (UserWidget._meta.app_label, UserWidget._meta.model_name)) +
"?user=%s&page_id=%s&_redirect=%s" % (self.user.id, self.get_page_id(), urlquote(self.request.get_full_path()))
}
context = super(Dashboard, self).get_context()
context.update(new_context)
return context
@never_cache
def get(self, request, *args, **kwargs):
self.widgets = self.get_widgets()
return self.template_response('xadmin/views/dashboard.html', self.get_context())
@csrf_protect_m
def post(self, request, *args, **kwargs):
if 'id' in request.POST:
widget_id = request.POST['id']
if request.POST.get('_delete', None) != 'on':
widget = self.get_widget(widget_id, request.POST.copy())
widget.save()
else:
try:
widget = UserWidget.objects.get(
user=self.user, page_id=self.get_page_id(), id=widget_id)
widget.delete()
try:
portal_pos = UserSettings.objects.get(user=self.user, key="dashboard:%s:pos" % self.get_page_id())
pos = [[w for w in col.split(',') if w != str(
widget_id)] for col in portal_pos.value.split('|')]
portal_pos.value = '|'.join([','.join(col) for col in pos])
portal_pos.save()
except Exception:
pass
except UserWidget.DoesNotExist:
pass
return self.get(request)
@filter_hook
def get_media(self):
media = super(Dashboard, self).get_media() + \
self.vendor('xadmin.page.dashboard.js', 'xadmin.page.dashboard.css')
if self.widget_customiz:
media = media + self.vendor('xadmin.plugin.portal.js')
for ws in self.widgets:
for widget in ws:
media = media + widget.media()
return media
class ModelDashboard(Dashboard, ModelAdminView):
title = _(u"%s Dashboard")
def get_page_id(self):
return 'model:%s/%s' % self.model_info
@filter_hook
def get_title(self):
return self.title % force_text(self.obj)
def init_request(self, object_id, *args, **kwargs):
self.obj = self.get_object(unquote(object_id))
if not self.has_view_permission(self.obj):
raise PermissionDenied
if self.obj is None:
raise Http404(_('%(name)s object with primary key %(key)r does not exist.') %
{'name': force_text(self.opts.verbose_name), 'key': escape(object_id)})
@filter_hook
def get_context(self):
new_context = {
'has_change_permission': self.has_change_permission(self.obj),
'object': self.obj,
}
context = Dashboard.get_context(self)
context.update(ModelAdminView.get_context(self))
context.update(new_context)
return context
@never_cache
def get(self, request, *args, **kwargs):
self.widgets = self.get_widgets()
return self.template_response(self.get_template_list('views/model_dashboard.html'), self.get_context())
| xadmin/views/dashboard.py | 23,641 | Normalize to strings. Call Field instead of ChoiceField __init__() because we don't need ChoiceField.__init__(). | 112 | en | 0.617005 |
# coding: utf-8
# Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
import oci # noqa: F401
from oci.util import WAIT_RESOURCE_NOT_FOUND # noqa: F401
class TransferDeviceClientCompositeOperations(object):
"""
This class provides a wrapper around :py:class:`~oci.dts.TransferDeviceClient` and offers convenience methods
for operations that would otherwise need to be chained together. For example, instead of performing an action
on a resource (e.g. launching an instance, creating a load balancer) and then using a waiter to wait for the resource
to enter a given state, you can call a single method in this class to accomplish the same functionality
"""
def __init__(self, client, **kwargs):
"""
Creates a new TransferDeviceClientCompositeOperations object
:param TransferDeviceClient client:
The service client which will be wrapped by this object
"""
self.client = client
def update_transfer_device_and_wait_for_state(self, id, transfer_device_label, update_transfer_device_details, wait_for_states=[], operation_kwargs={}, waiter_kwargs={}):
"""
Calls :py:func:`~oci.dts.TransferDeviceClient.update_transfer_device` and waits for the :py:class:`~oci.dts.models.TransferDevice` acted upon
to enter the given state(s).
:param str id: (required)
ID of the Transfer Job
:param str transfer_device_label: (required)
Label of the Transfer Device
:param oci.dts.models.UpdateTransferDeviceDetails update_transfer_device_details: (required)
fields to update
:param list[str] wait_for_states:
An array of states to wait on. These should be valid values for :py:attr:`~oci.dts.models.TransferDevice.lifecycle_state`
:param dict operation_kwargs:
A dictionary of keyword arguments to pass to :py:func:`~oci.dts.TransferDeviceClient.update_transfer_device`
:param dict waiter_kwargs:
A dictionary of keyword arguments to pass to the :py:func:`oci.wait_until` function. For example, you could pass ``max_interval_seconds`` or ``max_interval_seconds``
as dictionary keys to modify how long the waiter function will wait between retries and the maximum amount of time it will wait
"""
operation_result = self.client.update_transfer_device(id, transfer_device_label, update_transfer_device_details, **operation_kwargs)
if not wait_for_states:
return operation_result
lowered_wait_for_states = [w.lower() for w in wait_for_states]
wait_for_resource_id = operation_result.data.id
try:
waiter_result = oci.wait_until(
self.client,
self.client.get_transfer_device(wait_for_resource_id),
evaluate_response=lambda r: getattr(r.data, 'lifecycle_state') and getattr(r.data, 'lifecycle_state').lower() in lowered_wait_for_states,
**waiter_kwargs
)
result_to_return = waiter_result
return result_to_return
except Exception as e:
raise oci.exceptions.CompositeOperationError(partial_results=[operation_result], cause=e)
| src/oci/dts/transfer_device_client_composite_operations.py | 3,520 | This class provides a wrapper around :py:class:`~oci.dts.TransferDeviceClient` and offers convenience methods
for operations that would otherwise need to be chained together. For example, instead of performing an action
on a resource (e.g. launching an instance, creating a load balancer) and then using a waiter to wait for the resource
to enter a given state, you can call a single method in this class to accomplish the same functionality
Creates a new TransferDeviceClientCompositeOperations object
:param TransferDeviceClient client:
The service client which will be wrapped by this object
Calls :py:func:`~oci.dts.TransferDeviceClient.update_transfer_device` and waits for the :py:class:`~oci.dts.models.TransferDevice` acted upon
to enter the given state(s).
:param str id: (required)
ID of the Transfer Job
:param str transfer_device_label: (required)
Label of the Transfer Device
:param oci.dts.models.UpdateTransferDeviceDetails update_transfer_device_details: (required)
fields to update
:param list[str] wait_for_states:
An array of states to wait on. These should be valid values for :py:attr:`~oci.dts.models.TransferDevice.lifecycle_state`
:param dict operation_kwargs:
A dictionary of keyword arguments to pass to :py:func:`~oci.dts.TransferDeviceClient.update_transfer_device`
:param dict waiter_kwargs:
A dictionary of keyword arguments to pass to the :py:func:`oci.wait_until` function. For example, you could pass ``max_interval_seconds`` or ``max_interval_seconds``
as dictionary keys to modify how long the waiter function will wait between retries and the maximum amount of time it will wait
coding: utf-8 Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved. This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license. noqa: F401 noqa: F401 | 2,013 | en | 0.739704 |
#!/usr/bin/env python
#
# Create daily QC HTML report
#
# USAGE : cbicqc_report.py <QA Directory>
#
# AUTHOR : Mike Tyszka
# PLACE : Caltech
# DATES : 09/25/2013 JMT From scratch
# 10/23/2013 JMT Add com external call
# 10/24/2013 JMT Move stats calcs to new cbicqc_stats.py
#
# This file is part of CBICQC.
#
# CBICQC is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# CBICQC is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with CBICQC. If not, see <http://www.gnu.org/licenses/>.
#
# Copyright 2013-2014 California Institute of Technology.
import os
import string
import argparse
from pylab import *
# Define template
TEMPLATE_FORMAT = """
<html>
<head>
<STYLE TYPE="text/css">
BODY {
font-family : arial, sans-serif;
}
td, th {
padding-left : 10px;
padding-right : 10px;
padding-top : 0px;
padding-bottom : 0px;
text-align : "left";
}
</STYLE>
</head>
<body>
<h1 style="background-color:#E0E0FF">CBIC In Vivo Quality Control</h1>
<div>
<table>
<tr>
<td> QC Directory
<td> $qc_dir_abs
</tr>
<tr>
<td> Repetition Time (seconds)
<td> $TR_secs
</tr>
<tr>
<td> Image Volumes
<td> $N_vols
</tr>
<tr style="background-color:#AFFF9F">
<td> <b> Median tSFNR </b>
<td> <b> $tSFNR </b>
</tr>
</table>
</div>
<br>
<div>
<table>
<tr>
<td> <b> Parameter </b>
<td> <b> tMean </b>
<td> <b> Threshold </b>
<td> <b> Percent Outliers
</tr>
<td> Signal
<td> $signal_tmean
<td> $signal_thresh
<td> $signal_pout%
</tr>
<tr>
<td> Nyquist Ghost
<td> $ghost_tmean
<td> $ghost_thresh
<td> $ghost_pout%
</tr>
<tr>
<td> Air
<td> $air_tmean
<td> $air_thresh
<td> $air_pout%
</tr>
<tr>
<td> DVARS
<td> $dvars_tmean
<td> $dvars_thresh
<td> $dvars_pout%
</tr>
<tr>
<td> F-F Displacement (microns)
<td> $dd_um_tmean
<td> $dd_um_thresh
<td> $dd_um_pout%
</tr>
<tr>
<td> F-F Rotation (mdeg)
<td> $dr_mdeg_tmean
<td> $dr_mdeg_thresh
<td> $dr_mdeg_pout%
</tr>
</table>
</div>
<br>
<div>
<table>
<tr>
<td> <br><b>Motion Timeseries</b><br> <img src=qc_motion_timeseries.png />
<td> <br><b>ROI Timeseries</b><br> <img src=qc_roi_timeseries.png />
<tr>
</table>
</div>
<div>
<table>
<tr>
<td> <br><b>Temporal Mean Signal</b><br> <img src=qc_tmean_ortho.png />
<td> <br><b>Fluctuation Noise SD</b><br> <img src=qc_tsd_ortho.png />
</tr>
<tr>
<td> <b>Temporal Signal-to-Fluctuation-Noise Ratio (SFNR)</b><br> <img src=qc_tsfnr_ortho.png />
<td> <br><b>Region Mask</b><br> <img src=qc_labels_ortho.png />
</tr>
</table>
</div>
</body>
"""
# Main function
def main():
# Parse command line arguments
parser = argparse.ArgumentParser(description='QC reporting for in vivo fMRI timeseries')
parser.add_argument('-i', '--qc_dir', help="CBICQClive directory (*.qclive)")
# Parse command line arguments
args = parser.parse_args()
qc_dir = args.qc_dir
print(' Creating in vivo QC report for ' + qc_dir)
# Determine full path for QC directory
qc_dir_abs = os.path.abspath(qc_dir)
# Load dataset info and stats from QC directory
info_fname = os.path.join(qc_dir, 'qc_info.csv')
info = genfromtxt(info_fname, delimiter=',')
# Create substitution dictionary for HTML report
qc_dict = dict([
('qc_dir_abs', "%s" % qc_dir_abs),
('TR_secs', "%0.3f" % info[1]),
('N_vols', "%d" % info[2]),
('tSFNR', "%0.1f" % info[3]),
('signal_tmean', "%0.1f" % info[4]),
('signal_thresh', "%0.1f" % info[5]),
('signal_pout', "%0.1f" % info[6]),
('ghost_tmean', "%0.1f" % info[7]),
('ghost_thresh', "%0.1f" % info[8]),
('ghost_pout', "%0.1f" % info[9]),
('air_tmean', "%0.1f" % info[10]),
('air_thresh', "%0.1f" % info[11]),
('air_pout', "%0.1f" % info[12]),
('dvars_tmean', "%0.1f" % info[13]),
('dvars_thresh', "%0.1f" % info[14]),
('dvars_pout', "%0.1f" % info[15]),
('dd_um_tmean', "%0.1f" % info[16]),
('dd_um_thresh', "%0.1f" % info[17]),
('dd_um_pout', "%0.1f" % info[18]),
('dr_mdeg_tmean', "%0.1f" % info[19]),
('dr_mdeg_thresh', "%0.1f" % info[20]),
('dr_mdeg_pout', "%0.1f" % info[21]),
])
# Generate HTML report from template (see above)
TEMPLATE = string.Template(TEMPLATE_FORMAT)
html_data = TEMPLATE.safe_substitute(qc_dict)
# Write HTML report page
qc_report_html = os.path.join(qc_dir, 'index.html')
open(qc_report_html, "w").write(html_data)
# This is the standard boilerplate that calls the main() function.
if __name__ == '__main__':
main()
| cbicqclive_report.py | 5,891 | !/usr/bin/env python Create daily QC HTML report USAGE : cbicqc_report.py <QA Directory> AUTHOR : Mike Tyszka PLACE : Caltech DATES : 09/25/2013 JMT From scratch 10/23/2013 JMT Add com external call 10/24/2013 JMT Move stats calcs to new cbicqc_stats.py This file is part of CBICQC. CBICQC is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. CBICQC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with CBICQC. If not, see <http://www.gnu.org/licenses/>. Copyright 2013-2014 California Institute of Technology. Define template Main function Parse command line arguments Parse command line arguments Determine full path for QC directory Load dataset info and stats from QC directory Create substitution dictionary for HTML report Generate HTML report from template (see above) Write HTML report page This is the standard boilerplate that calls the main() function. | 1,332 | en | 0.75281 |
from typing import Callable
import unittest
# test
from .pipe import pipe
class TestPipe(unittest.TestCase):
def test_pipe_should_return_a_function(self) -> None:
# given
def echo(x: str) -> str:
return f"echo {x}"
# when
output = pipe(echo)
# then
self.assertTrue(isinstance(output, Callable)) # type: ignore
def test_pipe_should_return_an_empty_string(self) -> None:
# given
def echo(x: str) -> str:
return f"echo {x}"
# when
param = "hello world"
output = pipe(echo)(param)
# then
self.assertEqual(output, f"echo {param}")
def test_pipe_should_pipe_two_function(self) -> None:
# given
def echo(x: str) -> str:
return f"echo {x}"
def grep() -> str:
return "grep world"
# when
param = "hello world"
output = pipe(echo, grep)(param)
# then
self.assertEqual(output, f"echo {param} | grep world")
| src/unshell/utils/test_pipe.py | 1,037 | test given when then type: ignore given when then given when then | 65 | en | 0.199673 |
# -*- coding: utf-8 -*-
# Author:Qiujie Yao
# Email: yaoqiujie@gscopetech.com
# @Time: 2019-06-26 14:15
| VehicleInspection/apps/appointment/permissions.py | 106 | -*- coding: utf-8 -*- Author:Qiujie Yao Email: yaoqiujie@gscopetech.com @Time: 2019-06-26 14:15 | 95 | en | 0.378837 |
default_ids = [
[0x0E8D, 0x0003, -1], # MTK Brom
[0x0E8D, 0x6000, 2], # MTK Preloader
[0x0E8D, 0x2000, -1], # MTK Preloader
[0x0E8D, 0x2001, -1], # MTK Preloader
[0x0E8D, 0x20FF, -1], # MTK Preloader
[0x1004, 0x6000, 2], # LG Preloader
[0x22d9, 0x0006, -1], # OPPO Preloader
[0x0FCE, 0xF200, -1], # Sony Brom
]
| mtkclient/config/usb_ids.py | 343 | MTK Brom MTK Preloader MTK Preloader MTK Preloader MTK Preloader LG Preloader OPPO Preloader Sony Brom | 102 | en | 0.156257 |
########################################### Global Variables #################################
#sklearn pickled SGDClassifier where pre-trained clf.coef_ matrix is casted to a scipy.sparse.csr_matrix for efficiency and scalability
clf = None
#sklearn pickled TfidfVectorizer
vectorizer = None
#dictionary of labelid: (latitude, longitude) It is pre-computed as the median value of all training points in a region/cluster
label_coordinate = {}
#dictionary of (latitude,longitude):location (dictionary)
coordinate_address = {}
#check if model is loaded
model_loaded = False
#dictionary of hashed user name:(latitude, longitude) pre-trained by label propagation on TwitterWorld dataset
userhash_coordinate = {}
#check if lpworld model is loaded
lp_model_loaded = False
| WLM-WLMN/TextAnalyzer/TextAnalyzer/Pigeo/pigeo-master/params.py | 766 | Global Variables sklearn pickled SGDClassifier where pre-trained clf.coef_ matrix is casted to a scipy.sparse.csr_matrix for efficiency and scalabilitysklearn pickled TfidfVectorizerdictionary of labelid: (latitude, longitude) It is pre-computed as the median value of all training points in a region/clusterdictionary of (latitude,longitude):location (dictionary)check if model is loadeddictionary of hashed user name:(latitude, longitude) pre-trained by label propagation on TwitterWorld datasetcheck if lpworld model is loaded | 529 | en | 0.775621 |
"""
We recover the original divergence-free velocity field via
Ud,new = Ustar - Gphi
"""
import numpy
import pylab
import operator
def do_plots_c(Ud, Unew):
""" plot Ud,new and Ud with zoom on the bug """
pylab.clf()
pylab.cla()
f = pylab.figure()
f.text(.5, .95, r"$U_{\rm d}$ (left) and $U_{\rm d, new}$ (right) ", horizontalalignment='center')
pylab.subplot(221)
pylab.imshow(Ud[0])
pylab.ylabel("# of cells", size =8)
pylab.subplot(223)
pylab.imshow(Ud[1])
pylab.xlim(1,32)
pylab.xlabel("# of cells", size =8)
pylab.ylabel("# of cells", size =8)
pylab.subplot(222)
pylab.imshow(Unew[0])
pylab.ylabel("# of cells", size =8)
pylab.subplot(224)
pylab.imshow(Unew[1])
pylab.xlim(1,32)
pylab.xlabel("# of cells", size =8)
pylab.ylabel("# of cells", size =8)
pylab.savefig("plots/item_c_Udnew.png")
def doPartC(Ustar, phi_num, Ud, nx, ny, xmin, xmax, ymin, ymax, DO_PLOTS):
""" coordinates of centers """
dx = (xmax - xmin)/nx
dy = (ymax - ymin)/ny
""" calcuates the new gradient"""
Gphi = numpy.gradient(phi_num, dx, dy)
""" recover Ud, new """
Unew = map(operator.sub, Ustar,Gphi)
if (DO_PLOTS == 1):
do_plots_c(Ud, Unew)
return 0
| homework5_elliptic_PDES/part_c.py | 1,328 | coordinates of centers
plot Ud,new and Ud with zoom on the bug
We recover the original divergence-free velocity field via
Ud,new = Ustar - Gphi | 150 | en | 0.583578 |
"""Module containing sacred functions for handling ML models."""
import inspect
from sacred import Ingredient
from src import models
ingredient = Ingredient('model')
@ingredient.config
def cfg():
"""Model configuration."""
name = ''
parameters = {
}
@ingredient.named_config
def TopologicalSurrogateAutoencoder():
"""TopologicalSurrogateAutoencoder."""
name = 'TopologicalSurrogateAutoencoder'
parameters = {
'd_latent': 8*2*2,
'batch_size': 32,
'arch': [256, 256, 256, 256]
}
@ingredient.named_config
def Vanilla():
name = 'VanillaAutoencoderModel'
@ingredient.named_config
def VAE():
name = 'VanillaAutoencoderModel'
parameters = {
'autoencoder_model': 'MLPVAE'
}
@ingredient.named_config
def TopoReg():
name = 'TopologicallyRegularizedAutoencoder'
parameters = {
'toposig_kwargs': {
'sort_selected': False
}
}
@ingredient.named_config
def TopoRegSorted():
name = 'TopologicallyRegularizedAutoencoder'
parameters = {
'toposig_kwargs': {
'sort_selected': True
}
}
@ingredient.named_config
def TopoAE():
name = 'TopologicallyRegularizedAutoencoder'
parameters = {
'toposig_kwargs': {
'match_edges': 'symmetric'
},
'autoencoder_model': 'DeepAE',
'input_distance': 'l2'
}
@ingredient.named_config
def TopoAERandomConv():
name = 'TopologicallyRegularizedAutoencoder'
parameters = {
'toposig_kwargs': {
'match_edges': 'symmetric'
},
'autoencoder_model': 'DeepAE',
'input_distance': 'rp'
}
@ingredient.named_config
def TopoAEvgg():
name = 'TopologicallyRegularizedAutoencoder'
parameters = {
'toposig_kwargs': {
'match_edges': 'symmetric'
},
'autoencoder_model': 'DeepAE',
'input_distance': 'vgg'
}
@ingredient.named_config
def TopoAEOrtho():
name = 'TopologicallyRegularizedAutoencoder'
parameters = {
'toposig_kwargs': {
'match_edges': 'symmetric'
},
'input_distance': 'ortho'
}
@ingredient.named_config
def TopoAEOrthoSpheres():
name = 'TopologicallyRegularizedAutoencoder'
parameters = {
'toposig_kwargs': {
'match_edges': 'symmetric'
},
'autoencoder_model': 'MLPAutoencoder_Spheres',
'input_distance': 'ortho'
}
@ingredient.named_config
def TopoPCAOrtho():
name = 'TopologicallyRegularizedAutoencoder'
parameters = {
'toposig_kwargs': {
'match_edges': 'symmetric'
},
'autoencoder_model': 'LinearAEOrtho',
'input_distance': 'l2'
}
@ingredient.named_config
def TopoPCA():
name = 'TopologicallyRegularizedAutoencoder'
parameters = {
'toposig_kwargs': {
'match_edges': 'symmetric'
},
'autoencoder_model': 'LinearAE',
'input_distance': 'l2'
}
@ingredient.named_config
def TopoPCAOrthoSpheres():
name = 'TopologicallyRegularizedAutoencoder'
parameters = {
'toposig_kwargs': {
'match_edges': 'symmetric'
},
'autoencoder_model': 'LinearAEOrtho_Spheres',
'input_distance': 'l2'
}
@ingredient.named_config
def TopoPCASpheres():
name = 'TopologicallyRegularizedAutoencoder'
parameters = {
'toposig_kwargs': {
'match_edges': 'symmetric'
},
'autoencoder_model': 'LinearAE_Spheres',
'input_distance': 'l2'
}
@ingredient.named_config
def TopoAESpheres():
name = 'TopologicallyRegularizedAutoencoder'
parameters = {
'toposig_kwargs': {
'match_edges': 'symmetric'
},
'autoencoder_model': 'MLPAutoencoder_Spheres',
'input_distance': 'l2'
}
@ingredient.named_config
def TopoRegEdgeRandom():
name = 'TopologicallyRegularizedAutoencoder'
parameters = {
'toposig_kwargs': {
'match_edges': 'random'
}
}
@ingredient.capture
def get_instance(name, parameters, _log, _seed):
"""Get an instance of a model according to parameters in the configuration.
Also, check if the provided parameters fit to the signature of the model
class and log default values if not defined via the configuration.
"""
# Get the mode class
model_cls = getattr(models, name)
# Inspect if the constructor specification fits with additional_parameters
signature = inspect.signature(model_cls)
available_parameters = signature.parameters
for key in parameters.keys():
if key not in available_parameters.keys():
# If a parameter is defined which does not fit to the constructor
# raise an error
raise ValueError(
f'{key} is not available in {name}\'s Constructor'
)
# Now check if optional parameters of the constructor are not defined
optional_parameters = list(available_parameters.keys())[4:]
for parameter_name in optional_parameters:
# Copy list beforehand, so we can manipulate the parameter dict in the
# loop
parameter_keys = list(parameters.keys())
if parameter_name not in parameter_keys:
if parameter_name != 'random_state':
# If an optional parameter is not defined warn and run with
# default
default = available_parameters[parameter_name].default
_log.warning(
f'Optional parameter {parameter_name} not explicitly '
f'defined, will run with {parameter_name}={default}'
)
else:
_log.info(
f'Passing seed of experiment to model parameter '
'`random_state`.'
)
parameters['random_state'] = _seed
return model_cls(**parameters)
| exp/ingredients/model.py | 5,965 | TopologicalSurrogateAutoencoder.
Model configuration.
Get an instance of a model according to parameters in the configuration.
Also, check if the provided parameters fit to the signature of the model
class and log default values if not defined via the configuration.
Module containing sacred functions for handling ML models.
Get the mode class Inspect if the constructor specification fits with additional_parameters If a parameter is defined which does not fit to the constructor raise an error Now check if optional parameters of the constructor are not defined Copy list beforehand, so we can manipulate the parameter dict in the loop If an optional parameter is not defined warn and run with default | 707 | en | 0.483371 |
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
This module contains a Google Cloud Translate Hook.
"""
from google.cloud.translate_v2 import Client
from airflow.contrib.hooks.gcp_api_base_hook import GoogleCloudBaseHook
class CloudTranslateHook(GoogleCloudBaseHook):
"""
Hook for Google Cloud translate APIs.
All the methods in the hook where project_id is used must be called with
keyword arguments rather than positional.
"""
_client = None
def __init__(self, gcp_conn_id='google_cloud_default'):
super().__init__(gcp_conn_id)
def get_conn(self):
"""
Retrieves connection to Cloud Translate
:return: Google Cloud Translate client object.
:rtype: Client
"""
if not self._client:
self._client = Client(credentials=self._get_credentials())
return self._client
def translate(
self, values, target_language, format_=None, source_language=None, model=None
):
"""Translate a string or list of strings.
See https://cloud.google.com/translate/docs/translating-text
:type values: str or list
:param values: String or list of strings to translate.
:type target_language: str
:param target_language: The language to translate results into. This
is required by the API and defaults to
the target language of the current instance.
:type format_: str
:param format_: (Optional) One of ``text`` or ``html``, to specify
if the input text is plain text or HTML.
:type source_language: str or None
:param source_language: (Optional) The language of the text to
be translated.
:type model: str or None
:param model: (Optional) The model used to translate the text, such
as ``'base'`` or ``'nmt'``.
:rtype: str or list
:returns: A list of dictionaries for each queried value. Each
dictionary typically contains three keys (though not
all will be present in all cases)
* ``detectedSourceLanguage``: The detected language (as an
ISO 639-1 language code) of the text.
* ``translatedText``: The translation of the text into the
target language.
* ``input``: The corresponding input value.
* ``model``: The model used to translate the text.
If only a single value is passed, then only a single
dictionary will be returned.
:raises: :class:`~exceptions.ValueError` if the number of
values and translations differ.
"""
client = self.get_conn()
return client.translate(
values=values,
target_language=target_language,
format_=format_,
source_language=source_language,
model=model,
)
| airflow/contrib/hooks/gcp_translate_hook.py | 3,820 | Hook for Google Cloud translate APIs.
All the methods in the hook where project_id is used must be called with
keyword arguments rather than positional.
Retrieves connection to Cloud Translate
:return: Google Cloud Translate client object.
:rtype: Client
Translate a string or list of strings.
See https://cloud.google.com/translate/docs/translating-text
:type values: str or list
:param values: String or list of strings to translate.
:type target_language: str
:param target_language: The language to translate results into. This
is required by the API and defaults to
the target language of the current instance.
:type format_: str
:param format_: (Optional) One of ``text`` or ``html``, to specify
if the input text is plain text or HTML.
:type source_language: str or None
:param source_language: (Optional) The language of the text to
be translated.
:type model: str or None
:param model: (Optional) The model used to translate the text, such
as ``'base'`` or ``'nmt'``.
:rtype: str or list
:returns: A list of dictionaries for each queried value. Each
dictionary typically contains three keys (though not
all will be present in all cases)
* ``detectedSourceLanguage``: The detected language (as an
ISO 639-1 language code) of the text.
* ``translatedText``: The translation of the text into the
target language.
* ``input``: The corresponding input value.
* ``model``: The model used to translate the text.
If only a single value is passed, then only a single
dictionary will be returned.
:raises: :class:`~exceptions.ValueError` if the number of
values and translations differ.
This module contains a Google Cloud Translate Hook.
-*- coding: utf-8 -*- Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. | 2,638 | en | 0.715029 |
from os import path as op
import numpy as np
from numpy.polynomial import legendre
from numpy.testing import (assert_allclose, assert_array_equal, assert_equal,
assert_array_almost_equal)
from scipy.interpolate import interp1d
import pytest
import mne
from mne.forward import _make_surface_mapping, make_field_map
from mne.forward._lead_dots import (_comp_sum_eeg, _comp_sums_meg,
_get_legen_table, _do_cross_dots)
from mne.forward._make_forward import _create_meg_coils
from mne.forward._field_interpolation import _setup_dots
from mne.surface import get_meg_helmet_surf, get_head_surf
from mne.datasets import testing
from mne import read_evokeds, pick_types, make_fixed_length_events, Epochs
from mne.io import read_raw_fif
base_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')
raw_fname = op.join(base_dir, 'test_raw.fif')
evoked_fname = op.join(base_dir, 'test-ave.fif')
raw_ctf_fname = op.join(base_dir, 'test_ctf_raw.fif')
data_path = testing.data_path(download=False)
trans_fname = op.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc-trans.fif')
subjects_dir = op.join(data_path, 'subjects')
@testing.requires_testing_data
def test_field_map_ctf():
"""Test that field mapping can be done with CTF data."""
raw = read_raw_fif(raw_ctf_fname).crop(0, 1)
raw.apply_gradient_compensation(3)
events = make_fixed_length_events(raw, duration=0.5)
evoked = Epochs(raw, events).average()
evoked.pick_channels(evoked.ch_names[:50]) # crappy mapping but faster
# smoke test
make_field_map(evoked, trans=trans_fname, subject='sample',
subjects_dir=subjects_dir)
def test_legendre_val():
"""Test Legendre polynomial (derivative) equivalence."""
rng = np.random.RandomState(0)
# check table equiv
xs = np.linspace(-1., 1., 1000)
n_terms = 100
# True, numpy
vals_np = legendre.legvander(xs, n_terms - 1)
# Table approximation
for nc, interp in zip([100, 50], ['nearest', 'linear']):
lut, n_fact = _get_legen_table('eeg', n_coeff=nc, force_calc=True)
lut_fun = interp1d(np.linspace(-1, 1, lut.shape[0]), lut, interp,
axis=0)
vals_i = lut_fun(xs)
# Need a "1:" here because we omit the first coefficient in our table!
assert_allclose(vals_np[:, 1:vals_i.shape[1] + 1], vals_i,
rtol=1e-2, atol=5e-3)
# Now let's look at our sums
ctheta = rng.rand(20, 30) * 2.0 - 1.0
beta = rng.rand(20, 30) * 0.8
c1 = _comp_sum_eeg(beta.flatten(), ctheta.flatten(), lut_fun, n_fact)
c1.shape = beta.shape
# compare to numpy
n = np.arange(1, n_terms, dtype=float)[:, np.newaxis, np.newaxis]
coeffs = np.zeros((n_terms,) + beta.shape)
coeffs[1:] = (np.cumprod([beta] * (n_terms - 1), axis=0) *
(2.0 * n + 1.0) * (2.0 * n + 1.0) / n)
# can't use tensor=False here b/c it isn't in old numpy
c2 = np.empty((20, 30))
for ci1 in range(20):
for ci2 in range(30):
c2[ci1, ci2] = legendre.legval(ctheta[ci1, ci2],
coeffs[:, ci1, ci2])
assert_allclose(c1, c2, 1e-2, 1e-3) # close enough...
# compare fast and slow for MEG
ctheta = rng.rand(20 * 30) * 2.0 - 1.0
beta = rng.rand(20 * 30) * 0.8
lut, n_fact = _get_legen_table('meg', n_coeff=10, force_calc=True)
fun = interp1d(np.linspace(-1, 1, lut.shape[0]), lut, 'nearest', axis=0)
coeffs = _comp_sums_meg(beta, ctheta, fun, n_fact, False)
lut, n_fact = _get_legen_table('meg', n_coeff=20, force_calc=True)
fun = interp1d(np.linspace(-1, 1, lut.shape[0]), lut, 'linear', axis=0)
coeffs = _comp_sums_meg(beta, ctheta, fun, n_fact, False)
def test_legendre_table():
"""Test Legendre table calculation."""
# double-check our table generation
n = 10
for ch_type in ['eeg', 'meg']:
lut1, n_fact1 = _get_legen_table(ch_type, n_coeff=25, force_calc=True)
lut1 = lut1[:, :n - 1].copy()
n_fact1 = n_fact1[:n - 1].copy()
lut2, n_fact2 = _get_legen_table(ch_type, n_coeff=n, force_calc=True)
assert_allclose(lut1, lut2)
assert_allclose(n_fact1, n_fact2)
@testing.requires_testing_data
def test_make_field_map_eeg():
"""Test interpolation of EEG field onto head."""
evoked = read_evokeds(evoked_fname, condition='Left Auditory')
evoked.info['bads'] = ['MEG 2443', 'EEG 053'] # add some bads
surf = get_head_surf('sample', subjects_dir=subjects_dir)
# we must have trans if surface is in MRI coords
pytest.raises(ValueError, _make_surface_mapping, evoked.info, surf, 'eeg')
evoked.pick_types(meg=False, eeg=True)
fmd = make_field_map(evoked, trans_fname,
subject='sample', subjects_dir=subjects_dir)
# trans is necessary for EEG only
pytest.raises(RuntimeError, make_field_map, evoked, None,
subject='sample', subjects_dir=subjects_dir)
fmd = make_field_map(evoked, trans_fname,
subject='sample', subjects_dir=subjects_dir)
assert len(fmd) == 1
assert_array_equal(fmd[0]['data'].shape, (642, 59)) # maps data onto surf
assert len(fmd[0]['ch_names']) == 59
@testing.requires_testing_data
@pytest.mark.slowtest
def test_make_field_map_meg():
"""Test interpolation of MEG field onto helmet | head."""
evoked = read_evokeds(evoked_fname, condition='Left Auditory')
info = evoked.info
surf = get_meg_helmet_surf(info)
# let's reduce the number of channels by a bunch to speed it up
info['bads'] = info['ch_names'][:200]
# bad ch_type
pytest.raises(ValueError, _make_surface_mapping, info, surf, 'foo')
# bad mode
pytest.raises(ValueError, _make_surface_mapping, info, surf, 'meg',
mode='foo')
# no picks
evoked_eeg = evoked.copy().pick_types(meg=False, eeg=True)
pytest.raises(RuntimeError, _make_surface_mapping, evoked_eeg.info,
surf, 'meg')
# bad surface def
nn = surf['nn']
del surf['nn']
pytest.raises(KeyError, _make_surface_mapping, info, surf, 'meg')
surf['nn'] = nn
cf = surf['coord_frame']
del surf['coord_frame']
pytest.raises(KeyError, _make_surface_mapping, info, surf, 'meg')
surf['coord_frame'] = cf
# now do it with make_field_map
evoked.pick_types(meg=True, eeg=False)
evoked.info.normalize_proj() # avoid projection warnings
fmd = make_field_map(evoked, None,
subject='sample', subjects_dir=subjects_dir)
assert (len(fmd) == 1)
assert_array_equal(fmd[0]['data'].shape, (304, 106)) # maps data onto surf
assert len(fmd[0]['ch_names']) == 106
pytest.raises(ValueError, make_field_map, evoked, ch_type='foobar')
# now test the make_field_map on head surf for MEG
evoked.pick_types(meg=True, eeg=False)
evoked.info.normalize_proj()
fmd = make_field_map(evoked, trans_fname, meg_surf='head',
subject='sample', subjects_dir=subjects_dir)
assert len(fmd) == 1
assert_array_equal(fmd[0]['data'].shape, (642, 106)) # maps data onto surf
assert len(fmd[0]['ch_names']) == 106
pytest.raises(ValueError, make_field_map, evoked, meg_surf='foobar',
subjects_dir=subjects_dir, trans=trans_fname)
@testing.requires_testing_data
def test_make_field_map_meeg():
"""Test making a M/EEG field map onto helmet & head."""
evoked = read_evokeds(evoked_fname, baseline=(-0.2, 0.0))[0]
picks = pick_types(evoked.info, meg=True, eeg=True)
picks = picks[::10]
evoked.pick_channels([evoked.ch_names[p] for p in picks])
evoked.info.normalize_proj()
maps = make_field_map(evoked, trans_fname, subject='sample',
subjects_dir=subjects_dir, verbose='debug')
assert_equal(maps[0]['data'].shape, (642, 6)) # EEG->Head
assert_equal(maps[1]['data'].shape, (304, 31)) # MEG->Helmet
# reasonable ranges
maxs = (1.2, 2.0) # before #4418, was (1.1, 2.0)
mins = (-0.8, -1.3) # before #4418, was (-0.6, -1.2)
assert_equal(len(maxs), len(maps))
for map_, max_, min_ in zip(maps, maxs, mins):
assert_allclose(map_['data'].max(), max_, rtol=5e-2)
assert_allclose(map_['data'].min(), min_, rtol=5e-2)
# calculated from correct looking mapping on 2015/12/26
assert_allclose(np.sqrt(np.sum(maps[0]['data'] ** 2)), 19.0903, # 16.6088,
atol=1e-3, rtol=1e-3)
assert_allclose(np.sqrt(np.sum(maps[1]['data'] ** 2)), 19.4748, # 20.1245,
atol=1e-3, rtol=1e-3)
def _setup_args(info):
"""Configure args for test_as_meg_type_evoked."""
coils = _create_meg_coils(info['chs'], 'normal', info['dev_head_t'])
int_rad, _, lut_fun, n_fact = _setup_dots('fast', info, coils, 'meg')
my_origin = np.array([0., 0., 0.04])
args_dict = dict(intrad=int_rad, volume=False, coils1=coils, r0=my_origin,
ch_type='meg', lut=lut_fun, n_fact=n_fact)
return args_dict
@testing.requires_testing_data
def test_as_meg_type_evoked():
"""Test interpolation of data on to virtual channels."""
# validation tests
raw = read_raw_fif(raw_fname)
events = mne.find_events(raw)
picks = pick_types(raw.info, meg=True, eeg=True, stim=True,
ecg=True, eog=True, include=['STI 014'],
exclude='bads')
epochs = mne.Epochs(raw, events, picks=picks)
evoked = epochs.average()
with pytest.raises(ValueError, match="Invalid value for the 'ch_type'"):
evoked.as_type('meg')
with pytest.raises(ValueError, match="Invalid value for the 'ch_type'"):
evoked.copy().pick_types(meg='grad').as_type('meg')
# channel names
ch_names = evoked.info['ch_names']
virt_evoked = evoked.copy().pick_channels(ch_names=ch_names[:10:1])
virt_evoked.info.normalize_proj()
virt_evoked = virt_evoked.as_type('mag')
assert (all(ch.endswith('_v') for ch in virt_evoked.info['ch_names']))
# pick from and to channels
evoked_from = evoked.copy().pick_channels(ch_names=ch_names[2:10:3])
evoked_to = evoked.copy().pick_channels(ch_names=ch_names[0:10:3])
info_from, info_to = evoked_from.info, evoked_to.info
# set up things
args1, args2 = _setup_args(info_from), _setup_args(info_to)
args1.update(coils2=args2['coils1'])
args2.update(coils2=args1['coils1'])
# test cross dots
cross_dots1 = _do_cross_dots(**args1)
cross_dots2 = _do_cross_dots(**args2)
assert_array_almost_equal(cross_dots1, cross_dots2.T)
# correlation test
evoked = evoked.pick_channels(ch_names=ch_names[:10:]).copy()
data1 = evoked.pick_types(meg='grad').data.ravel()
data2 = evoked.as_type('grad').data.ravel()
assert (np.corrcoef(data1, data2)[0, 1] > 0.95)
# Do it with epochs
virt_epochs = \
epochs.copy().load_data().pick_channels(ch_names=ch_names[:10:1])
virt_epochs.info.normalize_proj()
virt_epochs = virt_epochs.as_type('mag')
assert (all(ch.endswith('_v') for ch in virt_epochs.info['ch_names']))
assert_allclose(virt_epochs.get_data().mean(0), virt_evoked.data)
| mne/forward/tests/test_field_interpolation.py | 11,370 | Configure args for test_as_meg_type_evoked.
Test interpolation of data on to virtual channels.
Test that field mapping can be done with CTF data.
Test Legendre table calculation.
Test Legendre polynomial (derivative) equivalence.
Test interpolation of EEG field onto head.
Test making a M/EEG field map onto helmet & head.
Test interpolation of MEG field onto helmet | head.
crappy mapping but faster smoke test check table equiv True, numpy Table approximation Need a "1:" here because we omit the first coefficient in our table! Now let's look at our sums compare to numpy can't use tensor=False here b/c it isn't in old numpy close enough... compare fast and slow for MEG double-check our table generation add some bads we must have trans if surface is in MRI coords trans is necessary for EEG only maps data onto surf let's reduce the number of channels by a bunch to speed it up bad ch_type bad mode no picks bad surface def now do it with make_field_map avoid projection warnings maps data onto surf now test the make_field_map on head surf for MEG maps data onto surf EEG->Head MEG->Helmet reasonable ranges before 4418, was (1.1, 2.0) before 4418, was (-0.6, -1.2) calculated from correct looking mapping on 2015/12/26 16.6088, 20.1245, validation tests channel names pick from and to channels set up things test cross dots correlation test Do it with epochs | 1,368 | en | 0.815386 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.