content stringlengths 27 928k | path stringlengths 4 230 | size int64 27 928k | nl_text stringlengths 21 396k | nl_size int64 21 396k | nl_language stringlengths 2 3 | nl_language_score float64 0.04 1 |
|---|---|---|---|---|---|---|
#!/usr/bin/env python
"""Tests for the export mechanisms of tulip.dumpsmach."""
from __future__ import print_function
import logging
import networkx as nx
from nose.tools import assert_raises
from tulip import spec, synth, dumpsmach
logging.getLogger('tulip').setLevel('ERROR')
logging.getLogger('astutils').setLevel('ERROR')
logging.getLogger('omega').setLevel('ERROR')
class basic_test(object):
def setUp(self):
self.triv = spec.GRSpec(env_vars="x", sys_vars="y",
env_init="x & y", env_prog="x",
sys_init="y", sys_prog="y && x")
self.triv_M = synth.synthesize(
self.triv, solver='omega')
self.dcounter = spec.GRSpec(
sys_vars={"y": (0, 5)},
env_init=['y = 0'],
sys_prog=["y=0", "y=5"])
self.dcounter_M = synth.synthesize(
self.dcounter, solver='omega')
self.enumf = spec.GRSpec(
sys_vars={'y': ['a', 'b']},
env_init=['y="a"'],
sys_safety=['y = "a" -> X(y = "b")',
'y = "b" -> X(y = "a")'])
self.enumf_M = synth.synthesize(
self.enumf, solver='omega')
def tearDown(self):
self.dcounter = None
self.dcounter_M = None
def test_python_case(self):
compile(dumpsmach.python_case(self.triv_M),
filename="<string>", mode="exec")
# print(dumpsmach.python_case(self.dcounter_M))
compile(dumpsmach.python_case(self.dcounter_M),
filename="<string>", mode="exec")
exec(compile(dumpsmach.python_case(self.enumf_M)
+'\nM = TulipStrategy(); M.move()',
filename="<string>", mode="exec"))
def test_nx():
g = nx.DiGraph()
g.inputs = {'a': '...', 'b': '...'}
g.outputs = {'c': '...', 'd': '...'}
start = 'Sinit'
g.add_edge(start, 0, a=0, b=0, c=0, d=0)
g.add_edge(0, 1, a=0, b=1, c=0, d=1)
g.add_edge(1, 2, a=1, b=0, c=1, d=1)
print(dumpsmach.python_case(g, classname='Machine', start='Sinit'))
exe_globals = dict()
exec(dumpsmach.python_case(g, classname='Machine', start='Sinit'), exe_globals)
m = exe_globals['Machine']() # previous line creates the class `Machine`
# Sinit -> 0
out = m.move(a=0, b=0)
assert out == dict(c=0, d=0)
# 0 -> 1
out = m.move(a=0, b=1)
assert out == dict(c=0, d=1)
# invalid input for index 2 in time sequence
with assert_raises(ValueError):
m.move(a=1, b=1)
# 1 -> 2
out = m.move(a=1, b=0)
assert out == dict(c=1, d=1)
# dead-end
with assert_raises(Exception):
m.move(a=1, b=0)
| tests/dumpsmach_test.py | 2,695 | Tests for the export mechanisms of tulip.dumpsmach.
!/usr/bin/env python print(dumpsmach.python_case(self.dcounter_M)) previous line creates the class `Machine` Sinit -> 0 0 -> 1 invalid input for index 2 in time sequence 1 -> 2 dead-end | 238 | en | 0.509687 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""The setup script. Based on Jeff Knupp's Demo + Cookiecutter"""
import io
import os
from setuptools import setup, find_packages
def read(*filenames, **kwargs):
encoding = kwargs.get('encoding', 'utf-8')
sep = kwargs.get('sep', '\n')
buf = []
for filename in filenames:
with io.open(filename, encoding=encoding) as f:
buf.append(f.read())
return sep.join(buf)
NAME = 'tiingo'
AUTHOR = "Cameron Yick"
EMAIL = 'cameron.yick@gmail.com'
URL = 'https://github.com/hydrosquall/tiingo-python'
DESCRIPTION = "REST Client for Tiingo Data Platform API"
LONG_DESCRIPTION = read('README.rst', 'HISTORY.rst')
requirements = [
'requests',
]
setup_requirements = [
'pytest-runner',
]
test_requirements = [
'pytest',
'vcrpy',
]
# Metadata about the module
# Load the package's __version__.py module as a dictionary.
# Via https://github.com/kennethreitz/setup.py/blob/master/setup.py
here = os.path.abspath(os.path.dirname(__file__))
about = {}
with open(os.path.join(here, NAME, '__version__.py')) as f:
exec(f.read(), about)
setup(
name=NAME,
version=about['__version__'],
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
author=AUTHOR,
author_email=EMAIL,
url=URL,
packages=find_packages(include=[NAME]),
include_package_data=True,
install_requires=requirements,
extras_require={'pandas': ['pandas>=0.18']},
license="MIT license",
zip_safe=False,
keywords=['tiingo', 'finance', 'stocks', 'rest'],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: Financial and Insurance Industry',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Topic :: Office/Business :: Financial :: Investment',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
],
test_suite='tests',
tests_require=test_requirements,
setup_requires=setup_requirements,
)
| setup.py | 2,192 | The setup script. Based on Jeff Knupp's Demo + Cookiecutter
!/usr/bin/env python -*- coding: utf-8 -*- Metadata about the module Load the package's __version__.py module as a dictionary. Via https://github.com/kennethreitz/setup.py/blob/master/setup.py | 253 | en | 0.678242 |
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/models/models.ccpm.ipynb (unless otherwise specified).
__all__ = ['CCPM']
# Cell
import torch
from torch import nn
from .layers.embedding import EmbeddingLayer
from .layers.common import KMaxPooling
from .bases.ctr import CTRModel
# Internal Cell
def get_activation(activation):
if isinstance(activation, str):
if activation.lower() == "relu":
return nn.ReLU()
elif activation.lower() == "sigmoid":
return nn.Sigmoid()
elif activation.lower() == "tanh":
return nn.Tanh()
else:
return getattr(nn, activation)()
else:
return activation
# Internal Cell
class CCPM_ConvLayer(nn.Module):
"""
Input X: tensor of shape (batch_size, 1, num_fields, embedding_dim)
"""
def __init__(self, num_fields, channels=[3], kernel_heights=[3], activation="Tanh"):
super(CCPM_ConvLayer, self).__init__()
if not isinstance(kernel_heights, list):
kernel_heights = [kernel_heights] * len(channels)
elif len(kernel_heights) != len(channels):
raise ValueError("channels={} and kernel_heights={} should have the same length."\
.format(channels, kernel_heights))
module_list = []
self.channels = [1] + channels
layers = len(kernel_heights)
for i in range(1, len(self.channels)):
in_channels = self.channels[i - 1]
out_channels = self.channels[i]
kernel_height = kernel_heights[i - 1]
module_list.append(nn.ZeroPad2d((0, 0, kernel_height - 1, kernel_height - 1)))
module_list.append(nn.Conv2d(in_channels, out_channels, kernel_size=(kernel_height, 1)))
if i < layers:
k = max(3, int((1 - pow(float(i) / layers, layers - i)) * num_fields))
else:
k = 3
module_list.append(KMaxPooling(k, dim=2))
module_list.append(get_activation(activation))
self.conv_layer = nn.Sequential(*module_list)
def forward(self, X):
return self.conv_layer(X)
# Cell
class CCPM(CTRModel):
def __init__(self,
feature_map,
model_id="CCPM",
task="binary_classification",
learning_rate=1e-3,
embedding_initializer="torch.nn.init.normal_(std=1e-4)",
embedding_dim=10,
channels=[4, 4, 2],
kernel_heights=[6, 5, 3],
activation="Tanh",
**kwargs):
super(CCPM, self).__init__(feature_map,
model_id=model_id,
**kwargs)
self.embedding_layer = EmbeddingLayer(feature_map, embedding_dim)
self.conv_layer = CCPM_ConvLayer(feature_map.num_fields,
channels=channels,
kernel_heights=kernel_heights,
activation=activation)
conv_out_dim = 3 * embedding_dim * channels[-1] # 3 is k-max-pooling size of the last layer
self.fc = nn.Linear(conv_out_dim, 1)
self.output_activation = self.get_final_activation(task)
self.init_weights(embedding_initializer=embedding_initializer)
def forward(self, inputs):
feature_emb = self.embedding_layer(inputs)
conv_in = torch.unsqueeze(feature_emb, 1) # shape (bs, 1, field, emb)
conv_out = self.conv_layer(conv_in)
flatten_out = torch.flatten(conv_out, start_dim=1)
y_pred = self.fc(flatten_out)
if self.output_activation is not None:
y_pred = self.output_activation(y_pred)
return y_pred | _docs/py/models/ccpm.py | 3,796 | Input X: tensor of shape (batch_size, 1, num_fields, embedding_dim)
AUTOGENERATED! DO NOT EDIT! File to edit: nbs/models/models.ccpm.ipynb (unless otherwise specified). Cell Internal Cell Internal Cell Cell 3 is k-max-pooling size of the last layer shape (bs, 1, field, emb) | 276 | en | 0.613783 |
# exported from PySB model 'model'
from pysb import Model, Monomer, Parameter, Expression, Compartment, Rule, Observable, Initial, MatchOnce, Annotation, ANY, WILD
Model()
Monomer('Ligand', ['Receptor'])
Monomer('ParpU', ['C3A'])
Monomer('C8A', ['BidU'])
Monomer('SmacM', ['BaxA'])
Monomer('BaxM', ['BidM', 'BaxA'])
Monomer('Apop', ['C3pro', 'Xiap'])
Monomer('Fadd', ['Receptor', 'C8pro'])
Monomer('SmacC', ['Xiap'])
Monomer('ParpC')
Monomer('Xiap', ['SmacC', 'Apop', 'C3A'])
Monomer('C9')
Monomer('C3ub')
Monomer('C8pro', ['Fadd'])
Monomer('C3pro', ['Apop'])
Monomer('CytoCM', ['BaxA'])
Monomer('CytoCC')
Monomer('BaxA', ['BaxM', 'BaxA_1', 'BaxA_2', 'SmacM', 'CytoCM'])
Monomer('ApafI')
Monomer('BidU', ['C8A'])
Monomer('BidT')
Monomer('C3A', ['Xiap', 'ParpU'])
Monomer('ApafA')
Monomer('BidM', ['BaxM'])
Monomer('Receptor', ['Ligand', 'Fadd'])
Parameter('bind_0_Ligand_binder_Receptor_binder_target_2kf', 1.0)
Parameter('bind_0_Ligand_binder_Receptor_binder_target_1kr', 1.0)
Parameter('bind_0_Receptor_binder_Fadd_binder_target_2kf', 1.0)
Parameter('bind_0_Receptor_binder_Fadd_binder_target_1kr', 1.0)
Parameter('substrate_binding_0_Fadd_catalyzer_C8pro_substrate_2kf', 1.0)
Parameter('substrate_binding_0_Fadd_catalyzer_C8pro_substrate_1kr', 1.0)
Parameter('catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product_1kc', 1.0)
Parameter('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_2kf', 1.0)
Parameter('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_1kr', 1.0)
Parameter('catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product_1kc', 1.0)
Parameter('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_2kf', 1.0)
Parameter('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_1kr', 1.0)
Parameter('inhibition_0_SmacC_inhibitor_Xiap_inh_target_2kf', 1.0)
Parameter('inhibition_0_SmacC_inhibitor_Xiap_inh_target_1kr', 1.0)
Parameter('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_2kf', 1.0)
Parameter('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_1kr', 1.0)
Parameter('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_2kf', 1.0)
Parameter('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_1kr', 1.0)
Parameter('catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product_1kc', 1.0)
Parameter('inhibition_0_Xiap_inhibitor_Apop_inh_target_2kf', 1.0)
Parameter('inhibition_0_Xiap_inhibitor_Apop_inh_target_1kr', 1.0)
Parameter('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_2kf', 1.0)
Parameter('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_1kr', 1.0)
Parameter('catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product_1kc', 1.0)
Parameter('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_2kf', 1.0)
Parameter('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_1kr', 1.0)
Parameter('catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product_1kc', 1.0)
Parameter('equilibration_0_BidT_equil_a_BidM_equil_b_1kf', 1.0)
Parameter('equilibration_0_BidT_equil_a_BidM_equil_b_1kr', 1.0)
Parameter('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_2kf', 1.0)
Parameter('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_1kr', 1.0)
Parameter('catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product_1kc', 1.0)
Parameter('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_2kf', 1.0)
Parameter('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_1kr', 1.0)
Parameter('self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate_1kc', 1.0)
Parameter('pore_formation_0_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_0_BaxA_pore_1kr', 1.0)
Parameter('pore_formation_1_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_1_BaxA_pore_1kr', 1.0)
Parameter('pore_formation_2_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_2_BaxA_pore_1kr', 1.0)
Parameter('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_2kf', 1.0)
Parameter('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kr', 1.0)
Parameter('transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kc', 1.0)
Parameter('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_2kf', 1.0)
Parameter('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kr', 1.0)
Parameter('transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kc', 1.0)
Parameter('Ligand_0', 1000.0)
Parameter('ParpU_0', 1000000.0)
Parameter('C8A_0', 0.0)
Parameter('SmacM_0', 100000.0)
Parameter('BaxM_0', 40000.0)
Parameter('Apop_0', 0.0)
Parameter('Fadd_0', 130000.0)
Parameter('SmacC_0', 0.0)
Parameter('ParpC_0', 0.0)
Parameter('Xiap_0', 28000.0)
Parameter('C9_0', 100000.0)
Parameter('C3ub_0', 0.0)
Parameter('C8pro_0', 130000.0)
Parameter('C3pro_0', 21000.0)
Parameter('CytoCM_0', 500000.0)
Parameter('CytoCC_0', 0.0)
Parameter('BaxA_0', 0.0)
Parameter('ApafI_0', 100000.0)
Parameter('BidU_0', 171000.0)
Parameter('BidT_0', 0.0)
Parameter('C3A_0', 0.0)
Parameter('ApafA_0', 0.0)
Parameter('BidM_0', 0.0)
Parameter('Receptor_0', 100.0)
Observable('Ligand_obs', Ligand())
Observable('ParpU_obs', ParpU())
Observable('C8A_obs', C8A())
Observable('SmacM_obs', SmacM())
Observable('BaxM_obs', BaxM())
Observable('Apop_obs', Apop())
Observable('Fadd_obs', Fadd())
Observable('SmacC_obs', SmacC())
Observable('ParpC_obs', ParpC())
Observable('Xiap_obs', Xiap())
Observable('C9_obs', C9())
Observable('C3ub_obs', C3ub())
Observable('C8pro_obs', C8pro())
Observable('C3pro_obs', C3pro())
Observable('CytoCM_obs', CytoCM())
Observable('CytoCC_obs', CytoCC())
Observable('BaxA_obs', BaxA())
Observable('ApafI_obs', ApafI())
Observable('BidU_obs', BidU())
Observable('BidT_obs', BidT())
Observable('C3A_obs', C3A())
Observable('ApafA_obs', ApafA())
Observable('BidM_obs', BidM())
Observable('Receptor_obs', Receptor())
Rule('bind_0_Ligand_binder_Receptor_binder_target', Ligand(Receptor=None) + Receptor(Ligand=None, Fadd=None) | Ligand(Receptor=1) % Receptor(Ligand=1, Fadd=None), bind_0_Ligand_binder_Receptor_binder_target_2kf, bind_0_Ligand_binder_Receptor_binder_target_1kr)
Rule('bind_0_Receptor_binder_Fadd_binder_target', Receptor(Ligand=ANY, Fadd=None) + Fadd(Receptor=None, C8pro=None) | Receptor(Ligand=ANY, Fadd=1) % Fadd(Receptor=1, C8pro=None), bind_0_Receptor_binder_Fadd_binder_target_2kf, bind_0_Receptor_binder_Fadd_binder_target_1kr)
Rule('substrate_binding_0_Fadd_catalyzer_C8pro_substrate', Fadd(Receptor=ANY, C8pro=None) + C8pro(Fadd=None) | Fadd(Receptor=ANY, C8pro=1) % C8pro(Fadd=1), substrate_binding_0_Fadd_catalyzer_C8pro_substrate_2kf, substrate_binding_0_Fadd_catalyzer_C8pro_substrate_1kr)
Rule('catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product', Fadd(Receptor=ANY, C8pro=1) % C8pro(Fadd=1) >> Fadd(Receptor=ANY, C8pro=None) + C8A(BidU=None), catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product_1kc)
Rule('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product', C8A(BidU=None) + BidU(C8A=None) | C8A(BidU=1) % BidU(C8A=1), catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_2kf, catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_1kr)
Rule('catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product', C8A(BidU=1) % BidU(C8A=1) >> C8A(BidU=None) + BidT(), catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product_1kc)
Rule('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex', ApafI() + CytoCC() | ApafA(), conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_2kf, conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_1kr)
Rule('inhibition_0_SmacC_inhibitor_Xiap_inh_target', SmacC(Xiap=None) + Xiap(SmacC=None, Apop=None, C3A=None) | SmacC(Xiap=1) % Xiap(SmacC=1, Apop=None, C3A=None), inhibition_0_SmacC_inhibitor_Xiap_inh_target_2kf, inhibition_0_SmacC_inhibitor_Xiap_inh_target_1kr)
Rule('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex', ApafA() + C9() | Apop(C3pro=None, Xiap=None), conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_2kf, conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_1kr)
Rule('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product', Apop(C3pro=None, Xiap=None) + C3pro(Apop=None) | Apop(C3pro=1, Xiap=None) % C3pro(Apop=1), catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_2kf, catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_1kr)
Rule('catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product', Apop(C3pro=1, Xiap=None) % C3pro(Apop=1) >> Apop(C3pro=None, Xiap=None) + C3A(Xiap=None, ParpU=None), catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product_1kc)
Rule('inhibition_0_Xiap_inhibitor_Apop_inh_target', Xiap(SmacC=None, Apop=None, C3A=None) + Apop(C3pro=None, Xiap=None) | Xiap(SmacC=None, Apop=1, C3A=None) % Apop(C3pro=None, Xiap=1), inhibition_0_Xiap_inhibitor_Apop_inh_target_2kf, inhibition_0_Xiap_inhibitor_Apop_inh_target_1kr)
Rule('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product', Xiap(SmacC=None, Apop=None, C3A=None) + C3A(Xiap=None, ParpU=None) | Xiap(SmacC=None, Apop=None, C3A=1) % C3A(Xiap=1, ParpU=None), catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_2kf, catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_1kr)
Rule('catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product', Xiap(SmacC=None, Apop=None, C3A=1) % C3A(Xiap=1, ParpU=None) >> Xiap(SmacC=None, Apop=None, C3A=None) + C3ub(), catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product_1kc)
Rule('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product', C3A(Xiap=None, ParpU=None) + ParpU(C3A=None) | C3A(Xiap=None, ParpU=1) % ParpU(C3A=1), catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_2kf, catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_1kr)
Rule('catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product', C3A(Xiap=None, ParpU=1) % ParpU(C3A=1) >> C3A(Xiap=None, ParpU=None) + ParpC(), catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product_1kc)
Rule('equilibration_0_BidT_equil_a_BidM_equil_b', BidT() | BidM(BaxM=None), equilibration_0_BidT_equil_a_BidM_equil_b_1kf, equilibration_0_BidT_equil_a_BidM_equil_b_1kr)
Rule('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product', BidM(BaxM=None) + BaxM(BidM=None, BaxA=None) | BidM(BaxM=1) % BaxM(BidM=1, BaxA=None), catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_2kf, catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_1kr)
Rule('catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product', BidM(BaxM=1) % BaxM(BidM=1, BaxA=None) >> BidM(BaxM=None) + BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product_1kc)
Rule('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate', BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxM(BidM=None, BaxA=None) | BaxA(BaxM=1, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) % BaxM(BidM=None, BaxA=1), self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_2kf, self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_1kr)
Rule('self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate', BaxA(BaxM=1, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) % BaxM(BidM=None, BaxA=1) >> BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate_1kc)
Rule('pore_formation_0_BaxA_pore', BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) | BaxA(BaxM=None, BaxA_1=None, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=None, SmacM=None, CytoCM=None), pore_formation_0_BaxA_pore_2kf, pore_formation_0_BaxA_pore_1kr)
Rule('pore_formation_1_BaxA_pore', BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, BaxA_1=None, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=None, SmacM=None, CytoCM=None) | BaxA(BaxM=None, BaxA_1=3, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None), pore_formation_1_BaxA_pore_2kf, pore_formation_1_BaxA_pore_1kr)
Rule('pore_formation_2_BaxA_pore', BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, BaxA_1=3, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) | BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None), pore_formation_2_BaxA_pore_2kf, pore_formation_2_BaxA_pore_1kr)
Rule('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C', BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + SmacM(BaxA=None) | BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=5, CytoCM=None) % SmacM(BaxA=5), transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_2kf, transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kr)
Rule('transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C', BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=5, CytoCM=None) % SmacM(BaxA=5) >> BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + SmacC(Xiap=None), transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kc)
Rule('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C', BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + CytoCM(BaxA=None) | BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=5) % CytoCM(BaxA=5), transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_2kf, transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kr)
Rule('transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C', BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=5) % CytoCM(BaxA=5) >> BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + CytoCC(), transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kc)
Initial(Ligand(Receptor=None), Ligand_0)
Initial(ParpU(C3A=None), ParpU_0)
Initial(C8A(BidU=None), C8A_0)
Initial(SmacM(BaxA=None), SmacM_0)
Initial(BaxM(BidM=None, BaxA=None), BaxM_0)
Initial(Apop(C3pro=None, Xiap=None), Apop_0)
Initial(Fadd(Receptor=None, C8pro=None), Fadd_0)
Initial(SmacC(Xiap=None), SmacC_0)
Initial(ParpC(), ParpC_0)
Initial(Xiap(SmacC=None, Apop=None, C3A=None), Xiap_0)
Initial(C9(), C9_0)
Initial(C3ub(), C3ub_0)
Initial(C8pro(Fadd=None), C8pro_0)
Initial(C3pro(Apop=None), C3pro_0)
Initial(CytoCM(BaxA=None), CytoCM_0)
Initial(CytoCC(), CytoCC_0)
Initial(BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), BaxA_0)
Initial(ApafI(), ApafI_0)
Initial(BidU(C8A=None), BidU_0)
Initial(BidT(), BidT_0)
Initial(C3A(Xiap=None, ParpU=None), C3A_0)
Initial(ApafA(), ApafA_0)
Initial(BidM(BaxM=None), BidM_0)
Initial(Receptor(Ligand=None, Fadd=None), Receptor_0)
| log_mito/model_112.py | 16,078 | exported from PySB model 'model' | 32 | en | 0.742345 |
# Copyright 2011 OpenStack Foundation
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import webob
from webob import exc
from nova.api.openstack import common
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova import compute
from nova import exception
from nova.openstack.common import log as logging
LOG = logging.getLogger(__name__)
ALIAS = "os-pause-server"
def authorize(context, action_name):
action = 'v3:%s:%s' % (ALIAS, action_name)
extensions.extension_authorizer('compute', action)(context)
class PauseServerController(wsgi.Controller):
def __init__(self, *args, **kwargs):
super(PauseServerController, self).__init__(*args, **kwargs)
self.compute_api = compute.API()
@extensions.expected_errors((404, 409))
@wsgi.action('pause')
def _pause(self, req, id, body):
"""Permit Admins to pause the server."""
ctxt = req.environ['nova.context']
authorize(ctxt, 'pause')
server = common.get_instance(self.compute_api, ctxt, id,
want_objects=True)
try:
self.compute_api.pause(ctxt, server)
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'pause')
except exception.InstanceNotFound as e:
raise exc.HTTPNotFound(explanation=e.format_message())
return webob.Response(status_int=202)
@extensions.expected_errors((404, 409))
@wsgi.action('unpause')
def _unpause(self, req, id, body):
"""Permit Admins to unpause the server."""
ctxt = req.environ['nova.context']
authorize(ctxt, 'unpause')
server = common.get_instance(self.compute_api, ctxt, id,
want_objects=True)
try:
self.compute_api.unpause(ctxt, server)
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'unpause')
except exception.InstanceNotFound as e:
raise exc.HTTPNotFound(explanation=e.format_message())
return webob.Response(status_int=202)
class PauseServer(extensions.V3APIExtensionBase):
"""Enable pause/unpause server actions."""
name = "PauseServer"
alias = ALIAS
version = 1
def get_controller_extensions(self):
controller = PauseServerController()
extension = extensions.ControllerExtension(self, 'servers', controller)
return [extension]
def get_resources(self):
return []
| nova/api/openstack/compute/plugins/v3/pause_server.py | 3,430 | Enable pause/unpause server actions.
Permit Admins to pause the server.
Permit Admins to unpause the server.
Copyright 2011 OpenStack Foundation Copyright 2013 IBM Corp. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. | 711 | en | 0.81833 |
# coding=utf-8
import websocket
import datetime
import csv
import time
import logging
import redis
import json
import copy
import pytz
from hftcoin.mdagent.ccws.configs import REDIS_HOST
from hftcoin.mdagent.ccws.configs import TIMEZONE
from hftcoin.mdagent.ccws.configs import ExConfigs
from hftcoin.mdagent.ccws.configs import HOME_PATH
class Exchange(object):
ExchangeId = ''
WebSocketConnection = None
RedisConnection = None
def __init__(self):
self.Logger = logging.getLogger(self.ExchangeId)
[self.ExConfig, self._WebSocketAddress] = ExConfigs[self.ExchangeId]
self.Config = {}
def set_market(self, currency, mode):
self.Config = self.ExConfig[currency][mode]
self.Logger = logging.getLogger('%s.%s.%s' % (self.ExchangeId, currency, mode))
def run_websocketapp(self, **kwargs):
self.Logger.info('Begin Connection')
url = self._WebSocketAddress + kwargs.pop('url_append', '')
on_error = kwargs.pop('on_error', self.on_error)
on_close = kwargs.pop('on_close', self.on_close)
on_message = kwargs.pop('on_message', self.on_message)
self.WebSocketConnection = websocket.WebSocketApp(
url,
on_error=on_error,
on_close=on_close,
on_message=on_message,
**kwargs,
)
while True:
try:
self.WebSocketConnection.run_forever()
except Exception as e:
self.Logger.exception(e)
def on_message(self, _ws, msg):
ts = int(time.time()*1000)
rdk = self.Config['RedisCollectKey']
# self.Logger.debug(msg)
self.RedisConnection.lpush(rdk, json.dumps([ts, msg]))
def on_error(self, _ws, error):
self.Logger.exception(error)
def on_close(self, _ws):
self.Logger.info('Connection closed.')
def connect_redis(self):
try:
self.RedisConnection = redis.StrictRedis(host=REDIS_HOST)
self.RedisConnection.ping()
except Exception as e:
self.Logger.exception(e)
def write_data_csv(self):
self.connect_redis()
[fn, rdk] = [self.Config.get(item) for item in ['FileName', 'RedisOutputKey']]
error_count = 100
while True:
try:
if self.RedisConnection.llen(rdk) > 0:
data = json.loads(self.RedisConnection.rpop(rdk).decode('utf8'))
# data[1] is timestamp
dt = datetime.datetime.fromtimestamp(data[1] / 1000, TIMEZONE)
calendar_path = '%4d/%02d/%02d' % (dt.year, dt.month, dt.day)
with open('%s/%s/%s' % (HOME_PATH, calendar_path, fn), 'a+') as csvFile:
csvwriter = csv.writer(csvFile)
csvwriter.writerow(data)
else:
time.sleep(60)
except RuntimeWarning:
break
except Exception as e:
self.Logger.exception(e)
error_count -= 1
if error_count < 0:
break
def collect_data(self):
pass
def process_data(self):
self.connect_redis()
getattr(self, self.Config.get('DataHandler', object))()
def _check_price_eq(self, p1, p2):
# divide by 2 to avoid precision
return abs(p1-p2) < self.Config['TickSize']/2
def _binary_search(self, find, list1, low, high):
while low <= high:
mid = int((low + high) / 2)
if self._check_price_eq(list1[mid][0], find):
return [mid, 'True']
elif list1[mid][0] > find:
high = mid - 1
else:
low = mid + 1
return [low, 'False']
def _update_order_book(self, bids, asks, side, price, remaining):
if side in ['bid', 'buy']:
book = bids
cut = int(99*(len(book)-1)/100)
else:
book = asks
cut = int((len(book)-1)/100)
if price < book[cut][0]:
res = self._binary_search(price, book, 0, cut-1)
else:
res = self._binary_search(price, book, cut, len(book)-1)
if res[1] == 'True':
if remaining < self.Config['AmountMin']:
del book[res[0]]
else:
book[res[0]][1] = remaining
else:
if remaining >= self.Config['AmountMin']:
book.insert(res[0], [price, remaining])
def check_data_validation(self, book):
length = int(len(book)/2)
for i in range(0, length - 2, 2):
if book[i] <= book[i + 2]:
return False
for i in range(length, 2 * length - 2, 2):
if book[i] >= book[i + 2]:
return False
for i in range(1, 2 * length, 2):
if book[i] < self.Config['AmountMin']:
return False
if book[0] > book[length]:
return False
return True
@staticmethod
def _cut_order_book(bids, asks, depth):
if len(bids) >= depth:
book = bids[-depth:]
book.reverse()
else:
book = copy.deepcopy(bids)
book.reverse()
book += [['None', 'None']] * (depth - len(bids))
if len(asks) >= depth:
book += asks[:depth]
else:
book += asks + [['None', 'None']] * (depth - len(asks))
book = [x[0:2] for x in book]
return sum(book, [])
@staticmethod
def fmt_date(ts):
return datetime.datetime.fromtimestamp(ts / 1000, TIMEZONE).strftime('%Y-%m-%d %H:%M:%S.%f %z')
@staticmethod
def date_from_str(ts):
return pytz.utc.localize(datetime.datetime.strptime(ts, '%Y-%m-%dT%H:%M:%S.%fZ'))
| ccws/base.py | 5,841 | coding=utf-8 self.Logger.debug(msg) data[1] is timestamp divide by 2 to avoid precision | 87 | en | 0.658612 |
import json
import os
import re
import sys
import sysconfig
RX_VERSION = re.compile(r"\d\.\d")
INSIGHTS = {
"_gdbm": "_GDBM_VERSION",
"_tkinter": "TCL_VERSION TK_VERSION",
"_sqlite3": "sqlite_version version",
"_ssl": "OPENSSL_VERSION",
"dbm.gnu": "_GDBM_VERSION",
"ensurepip": "_PIP_VERSION",
"pyexpat": "version_info",
"readline": "_READLINE_LIBRARY_VERSION",
"tkinter": "TclVersion TkVersion",
"zlib": "ZLIB_VERSION ZLIB_RUNTIME_VERSION",
}
def get_version(text):
if text:
if isinstance(text, bytes):
text = text.decode("utf-8")
elif isinstance(text, tuple):
text = ".".join(str(x) for x in text)
else:
text = str(text)
if text and RX_VERSION.search(text):
return text.splitlines()[0]
def pymodule_version_info(key, value, pymodule):
version = get_version(value)
if version:
result = dict(version_field=key, version=version)
if hasattr(pymodule, "__file__"):
result["path"] = pymodule.__file__
return result
def pymodule_info(module_name, pymodule):
fields = INSIGHTS.get(module_name)
fields = fields.split() if fields else ["__version__", "version", "VERSION"]
for f in fields:
v = pymodule_version_info(f, getattr(pymodule, f, None), pymodule)
if v:
return v
if hasattr(pymodule, "__file__"):
return dict(path=pymodule.__file__)
if hasattr(pymodule, "__spec__"):
v = getattr(pymodule.__spec__, "origin")
if v == "built-in":
return dict(version=v)
return dict(note=str(dir(pymodule)))
def module_report(module_name):
try:
return pymodule_info(module_name, __import__(module_name))
except Exception as e:
note = str(e)
if "No module named" in note:
return dict(version="*absent*")
return dict(version="*absent*", note=note)
def get_srcdir():
srcdir = sysconfig.get_config_var("srcdir")
if not srcdir or len(srcdir) < 3:
srcdir = sysconfig.get_config_var("DESTSHARED") # edge case: py2 reports an odd '.' as srcdir
return srcdir
def get_simplified_dirs(path):
result = []
if path:
path = os.path.dirname(path)
result.append(path)
if path.startswith("/private"):
result.append(path[8:]) # whoever compiled didn't use realpath(tmp)
elif not path.startswith("/tmp"): # nosec, just simplifying paths
result.append(os.path.dirname(result[0]))
return result
def main(arg):
if arg == "sysconfig":
marker = "$^"
simplified_dirs = get_simplified_dirs(sysconfig.get_config_var("abs_builddir"))
if simplified_dirs:
print("# '%s' is original abs_builddir:" % marker)
print("%s: %s\n" % (marker, simplified_dirs[0]))
for k, v in sorted(sysconfig.get_config_vars().items()):
for sp in simplified_dirs:
v = str(v).replace(sp, marker)
print("%s: %s" % (k, v))
return
if arg and not arg.startswith("-"):
report = dict((k, module_report(k)) for k in arg.split(","))
report = dict(report=report, srcdir=get_srcdir(), prefix=sysconfig.get_config_var("prefix"))
print(json.dumps(report, indent=2, sort_keys=True))
if __name__ == "__main__":
main(sys.argv[1] if len(sys.argv) > 1 else "")
| src/portable_python/external/_inspect.py | 3,442 | edge case: py2 reports an odd '.' as srcdir whoever compiled didn't use realpath(tmp) nosec, just simplifying paths | 115 | en | 0.868153 |
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RRrblup(RPackage):
"""Ridge Regression and Other Kernels for Genomic Selection.
Software for genomic prediction with the RR-BLUP mixed model (Endelman
2011, <doi:10.3835/plantgenome2011.08.0024>). One application is to
estimate marker effects by ridge regression; alternatively, BLUPs can be
calculated based on an additive relationship matrix or a Gaussian
kernel."""
cran = "rrBLUP"
version('4.6.1', sha256='e9230e74cc430a83ac5567071cb1c7f00b35c368f7d79bcc1cfde7225446c4db')
version('4.6', sha256='28b475a1466fcdc1780caace75cf34155338fda496cebd5799315598a4bc84af')
depends_on('r@2.14:', type=('build', 'run'))
| var/spack/repos/builtin/packages/r-rrblup/package.py | 882 | Ridge Regression and Other Kernels for Genomic Selection.
Software for genomic prediction with the RR-BLUP mixed model (Endelman
2011, <doi:10.3835/plantgenome2011.08.0024>). One application is to
estimate marker effects by ridge regression; alternatively, BLUPs can be
calculated based on an additive relationship matrix or a Gaussian
kernel.
Copyright 2013-2022 Lawrence Livermore National Security, LLC and other Spack Project Developers. See the top-level COPYRIGHT file for details. SPDX-License-Identifier: (Apache-2.0 OR MIT) | 535 | en | 0.753238 |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import unittest
import selenium.common.exceptions as Exceptions
from selenium.webdriver.common import by
import selenium.webdriver.support.ui as Support
from selenium.webdriver.support import wait
class BaseWebObject(unittest.TestCase):
"""Base class for all web objects."""
_spinner_locator = (by.By.CSS_SELECTOR, '.modal-body > .spinner')
def __init__(self, driver, conf):
self.driver = driver
self.conf = conf
self.explicit_wait = self.conf.selenium.explicit_wait
def _is_element_present(self, *locator):
try:
self._turn_off_implicit_wait()
self._get_element(*locator)
return True
except Exceptions.NoSuchElementException:
return False
finally:
self._turn_on_implicit_wait()
def _is_element_visible(self, *locator):
try:
return self._get_element(*locator).is_displayed()
except (Exceptions.NoSuchElementException,
Exceptions.ElementNotVisibleException):
return False
def _is_element_displayed(self, element):
try:
return element.is_displayed()
except Exception:
return False
def _is_text_visible(self, element, text, strict=True):
try:
if strict:
return element.text == text
else:
return text in element.text
except Exception:
return False
def _get_element(self, *locator):
return self.driver.find_element(*locator)
def _get_elements(self, *locator):
return self.driver.find_elements(*locator)
def _fill_field_element(self, data, field_element):
field_element.clear()
field_element.send_keys(data)
return field_element
def _select_dropdown(self, value, element):
select = Support.Select(element)
select.select_by_visible_text(value)
def _select_dropdown_by_value(self, value, element):
select = Support.Select(element)
select.select_by_value(value)
def _turn_off_implicit_wait(self):
self.driver.implicitly_wait(0)
def _turn_on_implicit_wait(self):
self.driver.implicitly_wait(self.conf.selenium.page_timeout)
def _wait_until(self, predicate, timeout=None, poll_frequency=0.5):
"""Wait until the value returned by predicate is not False or
the timeout is elapsed.
'predicate' takes the driver as argument.
"""
if not timeout:
timeout = self.explicit_wait
wait.WebDriverWait(self.driver, timeout, poll_frequency).until(
predicate)
def _wait_till_text_present_in_element(self, element, text, timeout=None):
"""Waiting for a text to appear in a certain element very often is
actually waiting for a _different_ element with a different text to
appear in place of an old element. So a way to avoid capturing stale
element reference should be provided for this use case.
Better to wrap getting entity status cell in a lambda
to avoid problems with cell being replaced with totally different
element by Javascript
"""
def predicate(_):
elt = element() if hasattr(element, '__call__') else element
return self._is_text_visible(elt, text)
self._wait_until(predicate, timeout)
def _wait_till_element_visible(self, element, timeout=None):
self._wait_until(lambda x: self._is_element_displayed(element),
timeout)
def _wait_till_element_disappears(self, element, timeout=None):
self._wait_until(lambda x: not self._is_element_displayed(element),
timeout)
def wait_till_element_disappears(self, element_getter):
try:
self._turn_off_implicit_wait()
self._wait_till_element_disappears(element_getter())
except Exceptions.NoSuchElementException:
# NOTE(mpavlase): This is valid state. When request completes
# even before Selenium get a chance to get the spinner element,
# it will raise the NoSuchElementException exception.
pass
finally:
self._turn_on_implicit_wait()
def wait_till_spinner_disappears(self):
getter = lambda: self.driver.find_element(*self._spinner_locator)
self.wait_till_element_disappears(getter)
| openstack-dashboard/openstack_dashboard/test/integration_tests/basewebobject.py | 5,021 | Base class for all web objects.
Waiting for a text to appear in a certain element very often is
actually waiting for a _different_ element with a different text to
appear in place of an old element. So a way to avoid capturing stale
element reference should be provided for this use case.
Better to wrap getting entity status cell in a lambda
to avoid problems with cell being replaced with totally different
element by Javascript
Wait until the value returned by predicate is not False or
the timeout is elapsed.
'predicate' takes the driver as argument.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. NOTE(mpavlase): This is valid state. When request completes even before Selenium get a chance to get the spinner element, it will raise the NoSuchElementException exception. | 1,282 | en | 0.871154 |
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "1"
# import deepspeed
# import mpi4py
# import pandas
import torch
import transformers
import wandb
#%env WANDB_PROJECT=wine_gpt2_Trainer_42
MODEL_NAME = "gpt2-medium"
# wandb.login(anonymous='never', key="222a37baaf0c1b0d1499ec003e5c2fe49f97b107")
wandb.init()
# wandb.watch(log='all')
print(torch.cuda.is_available())
print(f"transformers version: {transformers.__version__}")
print(f"PyTorch version: {torch.__version__}")
# Tokenizers
tokenizer = transformers.AutoTokenizer.from_pretrained(MODEL_NAME)
print(len(tokenizer))
tokenizer.add_special_tokens(
{"eos_token": "<|startoftext|>", "bos_token": "<|startoftext|>"}
)
tokenizer.add_tokens(
[
"[prompt]",
"[response]",
"[category_1]",
"[category_2]",
"[origin]",
"[description]",
"<|endoftext|>",
]
)
tokenizer.pad_token = tokenizer.eos_token
tokenizer.save_pretrained("data/modeling/trainer_42/")
print(len(tokenizer))
print("Created tokenizer")
class wineDataset(torch.utils.data.Dataset):
def __init__(self, encodings):
self.encodings = encodings
def __len__(self):
return len(self.encodings["input_ids"])
def __getitem__(self, idx):
item = {key: torch.tensor(val[idx]) for key, val in self.encodings.items()}
item["labels"] = item["input_ids"]
return item
with open("data/scraped/name_desc_nlp_ready_train.txt", "r", encoding="utf8") as file:
wines_raw_train = file.read().splitlines()
with open("data/scraped/name_desc_nlp_ready_test.txt", "r", encoding="utf8") as file:
wines_raw_test = file.read().splitlines()
print("Loaded dataset")
# wines_raw_train, wines_raw_test = train_test_split(wines_raw,test_size=0.2)
# wine_encodings_train = tokenizer(wines_raw_train, max_length=200, truncation=True, padding=True)
wine_encodings_test = tokenizer(
wines_raw_test, max_length=200, truncation=True, padding=True
)
print("Encoded dataset")
# wine_dataset_train = wineDataset(wine_encodings_train)
wine_dataset_test = wineDataset(wine_encodings_test)
print("Created PyTorch DataSet")
# train_loader = torch.utils.data.DataLoader(wine_dataset_train)
model = transformers.AutoModelForCausalLM.from_pretrained(MODEL_NAME)
# model.to('cuda')
model.resize_token_embeddings(len(tokenizer))
print(f"model parameters: {model.num_parameters():,}")
training_args = transformers.TrainingArguments(
output_dir="data/modeling/trainer_42/",
overwrite_output_dir=True,
num_train_epochs=1,
per_device_train_batch_size=2,
save_steps=100,
save_total_limit=2,
fp16=True,
# deepspeed='data/ds_config.json'
)
trainer = transformers.Trainer(
model=model, args=training_args, train_dataset=wine_dataset_test,
)
trainer.train()
| extra_code/transformers-gpt2-finetune.py | 2,802 | import deepspeed import mpi4py import pandas%env WANDB_PROJECT=wine_gpt2_Trainer_42 wandb.login(anonymous='never', key="222a37baaf0c1b0d1499ec003e5c2fe49f97b107") wandb.watch(log='all') Tokenizers wines_raw_train, wines_raw_test = train_test_split(wines_raw,test_size=0.2) wine_encodings_train = tokenizer(wines_raw_train, max_length=200, truncation=True, padding=True) wine_dataset_train = wineDataset(wine_encodings_train) train_loader = torch.utils.data.DataLoader(wine_dataset_train) model.to('cuda') deepspeed='data/ds_config.json' | 536 | en | 0.420306 |
import time
from django.db import connections
from django.db.utils import OperationalError
from django.core.management.base import BaseCommand
class Command(BaseCommand):
"""Django command to pause execution until database is available """
def handle(self, *args, **options):
self.stdout.write('Waiting for database...')
db_conn = None
while not db_conn:
try:
db_conn = connections['default']
except OperationalError:
self.stdout.write('Database unavailable, waiting 1 second...')
time.sleep(1)
self.stdout.write(self.style.SUCCESS('Database available')) | app/core/management/commands/wait_for_db.py | 669 | Django command to pause execution until database is available | 61 | en | 0.853355 |
from .base import * # noqa pylint: disable=wildcard-import, unused-wildcard-import
from .base import env
# GENERAL
# ------------------------------------------------------------------------------
SECRET_KEY = env("DJANGO_SECRET_KEY")
ALLOWED_HOSTS = env.list("DJANGO_ALLOWED_HOSTS", default=["coronacircles.de"])
# DATABASES
# ------------------------------------------------------------------------------
DATABASES["default"] = env.db("DATABASE_URL") # noqa F405
DATABASES["default"]["ATOMIC_REQUESTS"] = True # noqa F405
DATABASES["default"]["CONN_MAX_AGE"] = env.int("CONN_MAX_AGE", default=60) # noqa F405
# CACHES
# ------------------------------------------------------------------------------
# CACHES = {
# 'default': {
# 'BACKEND': 'django_redis.cache.RedisCache',
# 'LOCATION': env('REDIS_URL'),
# 'OPTIONS': {
# 'CLIENT_CLASS': 'django_redis.client.DefaultClient',
# 'IGNORE_EXCEPTIONS': True,
# }
# }
# }
# SECURITY
# ------------------------------------------------------------------------------
SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
SECURE_SSL_REDIRECT = env.bool("DJANGO_SECURE_SSL_REDIRECT", default=True)
SESSION_COOKIE_SECURE = True
SESSION_COOKIE_HTTPONLY = True
CSRF_COOKIE_SECURE = True
CSRF_COOKIE_HTTPONLY = True
# set this to 60 seconds first and then to 518400 once you prove the former works
SECURE_HSTS_SECONDS = env("DJANGO_SECURE_HSTS_SECONDS", default="60")
SECURE_HSTS_INCLUDE_SUBDOMAINS = env.bool(
"DJANGO_SECURE_HSTS_INCLUDE_SUBDOMAINS", default=True
)
SECURE_HSTS_PRELOAD = env.bool("DJANGO_SECURE_HSTS_PRELOAD", default=True)
SECURE_CONTENT_TYPE_NOSNIFF = env.bool(
"DJANGO_SECURE_CONTENT_TYPE_NOSNIFF", default=True
)
SECURE_BROWSER_XSS_FILTER = True
X_FRAME_OPTIONS = "DENY"
# STORAGES
# ------------------------------------------------------------------------------
# INSTALLED_APPS += ["storages"] # noqa F405
# AWS_ACCESS_KEY_ID = env("DJANGO_AWS_ACCESS_KEY_ID")
# AWS_SECRET_ACCESS_KEY = env("DJANGO_AWS_SECRET_ACCESS_KEY")
# AWS_STORAGE_BUCKET_NAME = env("DJANGO_AWS_STORAGE_BUCKET_NAME")
# AWS_AUTO_CREATE_BUCKET = False
# AWS_QUERYSTRING_AUTH = False
# _AWS_EXPIRY = 60 * 60 * 24 * 7
# AWS_S3_OBJECT_PARAMETERS = {
# "CacheControl": f"max-age={_AWS_EXPIRY}, s-maxage={_AWS_EXPIRY}, must-revalidate"
# }
# STATIC
# ------------------------
STATICFILES_STORAGE = "whitenoise.storage.CompressedManifestStaticFilesStorage"
# MEDIA
# ------------------------------------------------------------------------------
# DEFAULT_FILE_STORAGE = "storages.backends.s3boto3.S3Boto3Storage"
# MEDIA_URL = f"https://{AWS_STORAGE_BUCKET_NAME}.s3.amazonaws.com/"
# TEMPLATES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#templates
TEMPLATES[0]["OPTIONS"]["loaders"] = [ # noqa F405
(
"django.template.loaders.cached.Loader",
[
"django.template.loaders.filesystem.Loader",
"django.template.loaders.app_directories.Loader",
],
)
]
# EMAIL
# ------------------------------------------------------------------------------
DEFAULT_FROM_EMAIL = env(
"DJANGO_DEFAULT_FROM_EMAIL", default="CoronaCircles <contact@coronacircles.net>",
)
SERVER_EMAIL = env("DJANGO_SERVER_EMAIL", default=DEFAULT_FROM_EMAIL)
EMAIL_SUBJECT_PREFIX = env("DJANGO_EMAIL_SUBJECT_PREFIX", default="[Coronacircles]")
EMAIL_HOST = env("DJANGO_EMAIL_HOST", default="localhost")
EMAIL_HOST_USER = env("DJANGO_EMAIL_HOST_USER", default="")
EMAIL_HOST_PASSWORD = env("DJANGO_EMAIL_HOST_PASSWORD", default="")
EMAIL_PORT = env("DJANGO_EMAIL_PORT", default="465")
EMAIL_USE_SSL = env.bool("DJANGO_EMAIL_USE_SSL", default=False)
EMAIL_USE_TLS = env.bool("DJANGO_EMAIL_USE_TLS", default=False)
# ADMIN
# ------------------------------------------------------------------------------
# Django Admin URL regex.
# ADMIN_URL = env("DJANGO_ADMIN_URL") # no admin in use here
# Gunicorn
# ------------------------------------------------------------------------------
INSTALLED_APPS += ["gunicorn"] # noqa F405
# LOGGING
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#logging
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins bon every HTTP 500 error when DEBUG=False.
# See https://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
"version": 1,
"disable_existing_loggers": False,
"filters": {"require_debug_false": {"()": "django.utils.log.RequireDebugFalse"}},
"formatters": {
"verbose": {
"format": "%(asctime)s [%(process)d] [%(levelname)s] "
"pathname=%(pathname)s lineno=%(lineno)s "
"funcname=%(funcName)s %(message)s",
"datefmt": "%Y-%m-%d %H:%M:%S",
}
},
"handlers": {
"mail_admins": {
"level": "ERROR",
"filters": ["require_debug_false"],
"class": "django.utils.log.AdminEmailHandler",
},
"console": {
"level": "DEBUG",
"class": "logging.StreamHandler",
"formatter": "verbose",
},
},
"loggers": {
"django.request": {
"handlers": ["console", "mail_admins"],
"level": "ERROR",
"propagate": True,
},
"django.security.DisallowedHost": {
"level": "ERROR",
"handlers": ["console", "mail_admins"],
"propagate": True,
},
},
} | settings/production.py | 5,737 | noqa pylint: disable=wildcard-import, unused-wildcard-import GENERAL ------------------------------------------------------------------------------ DATABASES ------------------------------------------------------------------------------ noqa F405 noqa F405 noqa F405 CACHES ------------------------------------------------------------------------------ CACHES = { 'default': { 'BACKEND': 'django_redis.cache.RedisCache', 'LOCATION': env('REDIS_URL'), 'OPTIONS': { 'CLIENT_CLASS': 'django_redis.client.DefaultClient', 'IGNORE_EXCEPTIONS': True, } } } SECURITY ------------------------------------------------------------------------------ set this to 60 seconds first and then to 518400 once you prove the former works STORAGES ------------------------------------------------------------------------------ INSTALLED_APPS += ["storages"] noqa F405 AWS_ACCESS_KEY_ID = env("DJANGO_AWS_ACCESS_KEY_ID") AWS_SECRET_ACCESS_KEY = env("DJANGO_AWS_SECRET_ACCESS_KEY") AWS_STORAGE_BUCKET_NAME = env("DJANGO_AWS_STORAGE_BUCKET_NAME") AWS_AUTO_CREATE_BUCKET = False AWS_QUERYSTRING_AUTH = False _AWS_EXPIRY = 60 * 60 * 24 * 7 AWS_S3_OBJECT_PARAMETERS = { "CacheControl": f"max-age={_AWS_EXPIRY}, s-maxage={_AWS_EXPIRY}, must-revalidate" } STATIC ------------------------ MEDIA ------------------------------------------------------------------------------ DEFAULT_FILE_STORAGE = "storages.backends.s3boto3.S3Boto3Storage" MEDIA_URL = f"https://{AWS_STORAGE_BUCKET_NAME}.s3.amazonaws.com/" TEMPLATES ------------------------------------------------------------------------------ https://docs.djangoproject.com/en/dev/ref/settings/templates noqa F405 EMAIL ------------------------------------------------------------------------------ ADMIN ------------------------------------------------------------------------------ Django Admin URL regex. ADMIN_URL = env("DJANGO_ADMIN_URL") no admin in use here Gunicorn ------------------------------------------------------------------------------ noqa F405 LOGGING ------------------------------------------------------------------------------ See: https://docs.djangoproject.com/en/dev/ref/settings/logging A sample logging configuration. The only tangible logging performed by this configuration is to send an email to the site admins bon every HTTP 500 error when DEBUG=False. See https://docs.djangoproject.com/en/dev/topics/logging for more details on how to customize your logging configuration. | 2,501 | en | 0.31123 |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Setup.py for the Airflow project."""
import io
import logging
import os
import subprocess
import sys
import unittest
from importlib import util
from os.path import dirname
from textwrap import wrap
from typing import Dict, Iterable, List
from setuptools import Command, find_packages, setup
logger = logging.getLogger(__name__)
# Kept manually in sync with airflow.__version__
spec = util.spec_from_file_location("airflow.version", os.path.join('airflow', 'version.py')) # noqa
mod = util.module_from_spec(spec)
spec.loader.exec_module(mod) # type: ignore
version = mod.version # type: ignore
PY3 = sys.version_info[0] == 3
PY38 = PY3 and sys.version_info[1] >= 8
my_dir = dirname(__file__)
try:
with io.open(os.path.join(my_dir, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
except FileNotFoundError:
long_description = ''
def airflow_test_suite():
"""Test suite for Airflow tests"""
test_loader = unittest.TestLoader()
test_suite = test_loader.discover(os.path.join(my_dir, 'tests'), pattern='test_*.py')
return test_suite
class CleanCommand(Command):
"""
Command to tidy up the project root.
Registered as cmdclass in setup() so it can be called with ``python setup.py extra_clean``.
"""
description = "Tidy up the project root"
user_options = [] # type: List[str]
def initialize_options(self):
"""Set default values for options."""
def finalize_options(self):
"""Set final values for options."""
def run(self): # noqa
"""Run command to remove temporary files and directories."""
os.chdir(my_dir)
os.system('rm -vrf ./build ./dist ./*.pyc ./*.tgz ./*.egg-info')
class CompileAssets(Command):
"""
Compile and build the frontend assets using yarn and webpack.
Registered as cmdclass in setup() so it can be called with ``python setup.py compile_assets``.
"""
description = "Compile and build the frontend assets"
user_options = [] # type: List[str]
def initialize_options(self):
"""Set default values for options."""
def finalize_options(self):
"""Set final values for options."""
def run(self): # noqa
"""Run a command to compile and build assets."""
subprocess.check_call('./airflow/www/compile_assets.sh')
class ListExtras(Command):
"""
List all available extras
Registered as cmdclass in setup() so it can be called with ``python setup.py list_extras``.
"""
description = "List available extras"
user_options = [] # type: List[str]
def initialize_options(self):
"""Set default values for options."""
def finalize_options(self):
"""Set final values for options."""
def run(self): # noqa
"""List extras."""
print("\n".join(wrap(", ".join(EXTRAS_REQUIREMENTS.keys()), 100)))
def git_version(version_: str) -> str:
"""
Return a version to identify the state of the underlying git repo. The version will
indicate whether the head of the current git-backed working directory is tied to a
release tag or not : it will indicate the former with a 'release:{version}' prefix
and the latter with a 'dev0' prefix. Following the prefix will be a sha of the current
branch head. Finally, a "dirty" suffix is appended to indicate that uncommitted
changes are present.
:param str version_: Semver version
:return: Found Airflow version in Git repo
:rtype: str
"""
try:
import git
try:
repo = git.Repo(os.path.join(*[my_dir, '.git']))
except git.NoSuchPathError:
logger.warning('.git directory not found: Cannot compute the git version')
return ''
except git.InvalidGitRepositoryError:
logger.warning('Invalid .git directory not found: Cannot compute the git version')
return ''
except ImportError:
logger.warning('gitpython not found: Cannot compute the git version.')
return ''
if repo:
sha = repo.head.commit.hexsha
if repo.is_dirty():
return '.dev0+{sha}.dirty'.format(sha=sha)
# commit is clean
return '.release:{version}+{sha}'.format(version=version_, sha=sha)
else:
return 'no_git_version'
def write_version(filename: str = os.path.join(*[my_dir, "airflow", "git_version"])):
"""
Write the Semver version + git hash to file, e.g. ".dev0+2f635dc265e78db6708f59f68e8009abb92c1e65".
:param str filename: Destination file to write
"""
text = "{}".format(git_version(version))
with open(filename, 'w') as file:
file.write(text)
# 'Start dependencies group' and 'Start dependencies group' are mark for ./scripts/ci/check_order_setup.py
# If you change this mark you should also change ./scripts/ci/check_order_setup.py
# Start dependencies group
amazon = [
'boto3>=1.12.0,<2.0.0',
'watchtower~=0.7.3',
]
apache_beam = [
'apache-beam[gcp]',
]
async_packages = [
'eventlet>= 0.9.7',
'gevent>=0.13',
'greenlet>=0.4.9',
]
atlas = [
'atlasclient>=0.1.2',
]
azure = [
'azure-batch>=8.0.0',
'azure-cosmos>=3.0.1,<4',
'azure-datalake-store>=0.0.45',
'azure-identity>=1.3.1',
'azure-keyvault>=4.1.0',
'azure-kusto-data>=0.0.43,<0.1',
'azure-mgmt-containerinstance>=1.5.0,<2.0',
'azure-mgmt-datalake-store>=0.5.0',
'azure-mgmt-resource>=2.2.0',
'azure-storage>=0.34.0, <0.37.0',
'azure-storage-blob<12.0',
]
cassandra = [
'cassandra-driver>=3.13.0,<3.21.0',
]
celery = [
'celery~=4.4.2',
'flower>=0.7.3, <1.0',
'tornado>=4.2.0, <6.0', # Dep of flower. Pin to a version that works on Py3.5.2
'vine~=1.3', # https://stackoverflow.com/questions/32757259/celery-no-module-named-five
]
cgroups = [
'cgroupspy>=0.1.4',
]
cloudant = [
'cloudant>=2.0',
]
dask = [
'cloudpickle>=1.4.1, <1.5.0',
'distributed>=2.11.1, <2.20'
]
databricks = [
'requests>=2.20.0, <3',
]
datadog = [
'datadog>=0.14.0',
]
doc = [
'sphinx>=2.1.2',
'sphinx-argparse>=0.1.13',
'sphinx-autoapi==1.0.0',
'sphinx-copybutton',
'sphinx-jinja~=1.1',
'sphinx-rtd-theme>=0.1.6',
'sphinxcontrib-httpdomain>=1.7.0',
"sphinxcontrib-redoc>=1.6.0",
"sphinxcontrib-spelling==5.2.1"
]
docker = [
'docker~=3.0',
]
druid = [
'pydruid>=0.4.1',
]
elasticsearch = [
'elasticsearch>7, <7.6.0',
'elasticsearch-dbapi==0.1.0',
'elasticsearch-dsl>=5.0.0',
]
exasol = [
'pyexasol>=0.5.1,<1.0.0',
]
facebook = [
'facebook-business>=6.0.2',
]
flask_oauth = [
'Flask-OAuthlib>=0.9.1,<0.9.6', # Flask OAuthLib 0.9.6 requires Flask-Login 0.5.0 - breaks FAB
'oauthlib!=2.0.3,!=2.0.4,!=2.0.5,<3.0.0,>=1.1.2',
'requests-oauthlib==1.1.0',
]
google = [
'PyOpenSSL',
'google-ads>=4.0.0',
'google-api-python-client>=1.6.0,<2.0.0',
'google-auth>=1.0.0,<2.0.0',
'google-auth-httplib2>=0.0.1',
'google-cloud-automl>=0.4.0,<2.0.0',
'google-cloud-bigquery-datatransfer>=0.4.0,<2.0.0',
'google-cloud-bigtable>=1.0.0,<2.0.0',
'google-cloud-container>=0.1.1,<2.0.0',
'google-cloud-datacatalog>=0.5.0, <0.8', # TODO: we should migrate to 1.0 likely and add <2.0.0 then
'google-cloud-dataproc>=1.0.1,<2.0.0',
'google-cloud-dlp>=0.11.0,<2.0.0',
'google-cloud-kms>=1.2.1,<2.0.0',
'google-cloud-language>=1.1.1,<2.0.0',
'google-cloud-logging>=1.14.0,<2.0.0',
'google-cloud-monitoring>=0.34.0,<2.0.0',
'google-cloud-pubsub>=1.0.0,<2.0.0',
'google-cloud-redis>=0.3.0,<2.0.0',
'google-cloud-secret-manager>=0.2.0,<2.0.0',
'google-cloud-spanner>=1.10.0,<2.0.0',
'google-cloud-speech>=0.36.3,<2.0.0',
'google-cloud-storage>=1.16,<2.0.0',
'google-cloud-tasks>=1.2.1,<2.0.0',
'google-cloud-texttospeech>=0.4.0,<2.0.0',
'google-cloud-translate>=1.5.0,<2.0.0',
'google-cloud-videointelligence>=1.7.0,<2.0.0',
'google-cloud-vision>=0.35.2,<2.0.0',
'grpcio-gcp>=0.2.2',
'pandas-gbq',
]
grpc = [
'google-auth>=1.0.0, <2.0.0dev',
'google-auth-httplib2>=0.0.1',
'grpcio>=1.15.0',
]
hashicorp = [
'hvac~=0.10',
]
hdfs = [
'snakebite-py3',
]
hive = [
'hmsclient>=0.1.0',
'pyhive[hive]>=0.6.0',
]
jdbc = [
'jaydebeapi>=1.1.1',
]
jenkins = [
'python-jenkins>=1.0.0',
]
jira = [
'JIRA>1.0.7',
]
kerberos = [
'pykerberos>=1.1.13',
'requests_kerberos>=0.10.0',
'thrift_sasl>=0.2.0',
]
kubernetes = [
'cryptography>=2.0.0',
'kubernetes>=3.0.0',
]
kylin = [
'kylinpy>=2.6'
]
ldap = [
'ldap3>=2.5.1',
]
mongo = [
'dnspython>=1.13.0,<2.0.0',
'pymongo>=3.6.0',
]
mssql = [
'pymssql~=2.1.1',
]
mysql = [
'mysql-connector-python>=8.0.11, <=8.0.18',
'mysqlclient>=1.3.6,<1.4',
]
odbc = [
'pyodbc',
]
oracle = [
'cx_Oracle>=5.1.2',
]
pagerduty = [
'pypd>=1.1.0',
]
papermill = [
'papermill[all]>=1.2.1',
'nteract-scrapbook[all]>=0.3.1',
]
password = [
'bcrypt>=2.0.0',
'flask-bcrypt>=0.7.1',
]
pinot = [
'pinotdb==0.1.1',
]
plexus = [
'arrow>=0.16.0',
]
postgres = [
'psycopg2-binary>=2.7.4',
]
presto = [
'presto-python-client>=0.7.0,<0.8'
]
qds = [
'qds-sdk>=1.10.4',
]
rabbitmq = [
'amqp',
]
redis = [
'redis~=3.2',
]
salesforce = [
'simple-salesforce>=1.0.0',
]
samba = [
'pysmbclient>=0.1.3',
]
segment = [
'analytics-python>=1.2.9',
]
sendgrid = [
'sendgrid>=6.0.0,<7',
]
sentry = [
'blinker>=1.1',
'sentry-sdk>=0.8.0',
]
singularity = ['spython>=0.0.56']
slack = [
'slackclient>=2.0.0,<3.0.0',
]
snowflake = [
'snowflake-connector-python>=1.5.2',
'snowflake-sqlalchemy>=1.1.0',
]
spark = [
'pyspark',
]
ssh = [
'paramiko>=2.6.0',
'pysftp>=0.2.9',
'sshtunnel>=0.1.4,<0.2',
]
statsd = [
'statsd>=3.3.0, <4.0',
]
tableau = [
'tableauserverclient~=0.12',
]
vertica = [
'vertica-python>=0.5.1',
]
virtualenv = [
'virtualenv',
]
webhdfs = [
'hdfs[avro,dataframe,kerberos]>=2.0.4',
]
winrm = [
'pywinrm~=0.4',
]
yandexcloud = [
'yandexcloud>=0.22.0',
]
zendesk = [
'zdesk',
]
# End dependencies group
all_dbs = (cassandra + cloudant + druid + exasol + hdfs + hive + mongo + mssql + mysql +
pinot + postgres + presto + vertica)
############################################################################################################
# IMPORTANT NOTE!!!!!!!!!!!!!!!
# IF you are removing dependencies from this list, please make sure that you also increase
# DEPENDENCIES_EPOCH_NUMBER in the Dockerfile.ci
############################################################################################################
devel = [
'beautifulsoup4~=4.7.1',
'blinker',
'bowler',
'click==6.7',
'contextdecorator;python_version<"3.4"',
'coverage',
'docutils',
'flake8>=3.6.0',
'flake8-colors',
'flaky',
'freezegun',
'github3.py',
'gitpython',
'ipdb',
'jira',
'mongomock',
'moto>=1.3.14,<2.0.0',
'parameterized',
'paramiko',
'pipdeptree',
'pre-commit',
'pylint==2.5.3',
'pysftp',
'pytest',
'pytest-cov',
'pytest-instafail',
'pytest-rerunfailures',
'pytest-timeouts',
'pytest-xdist',
'pywinrm',
'qds-sdk>=1.9.6',
'requests_mock',
'setuptools',
'wheel',
'yamllint',
]
############################################################################################################
# IMPORTANT NOTE!!!!!!!!!!!!!!!
# IF you are removing dependencies from the above list, please make sure that you also increase
# DEPENDENCIES_EPOCH_NUMBER in the Dockerfile.ci
############################################################################################################
if PY3:
devel += ['mypy==0.770']
else:
devel += ['unittest2']
devel_minreq = cgroups + devel + doc + kubernetes + mysql + password
devel_hadoop = devel_minreq + hdfs + hive + kerberos + presto + webhdfs
PROVIDERS_REQUIREMENTS: Dict[str, Iterable[str]] = {
"amazon": amazon,
"apache.cassandra": cassandra,
"apache.druid": druid,
"apache.hdfs": hdfs,
"apache.hive": hive,
"apache.kylin": kylin,
"apache.livy": [],
"apache.pig": [],
"apache.pinot": pinot,
"apache.spark": spark,
"apache.sqoop": [],
"celery": celery,
"cloudant": cloudant,
"cncf.kubernetes": kubernetes,
"databricks": databricks,
"datadog": datadog,
"dingding": [],
"discord": [],
"docker": docker,
"elasticsearch": [],
"exasol": exasol,
"facebook": facebook,
"ftp": [],
"google": google,
"grpc": grpc,
"hashicorp": hashicorp,
"http": [],
"imap": [],
"jdbc": jdbc,
"jenkins": jenkins,
"jira": jira,
"microsoft.azure": azure,
"microsoft.mssql": mssql,
"microsoft.winrm": winrm,
"mongo": mongo,
"mysql": mysql,
"odbc": odbc,
"openfaas": [],
"opsgenie": [],
"oracle": oracle,
"pagerduty": pagerduty,
"papermill": papermill,
"plexus": plexus,
"postgres": postgres,
"presto": presto,
"qubole": qds,
"redis": redis,
"salesforce": salesforce,
"samba": samba,
"segment": segment,
"sftp": ssh,
"singularity": singularity,
"slack": slack,
"snowflake": snowflake,
"sqlite": [],
"ssh": ssh,
"vertica": vertica,
"yandex": yandexcloud,
"zendesk": zendesk,
}
EXTRAS_REQUIREMENTS: Dict[str, Iterable[str]] = {
'all_dbs': all_dbs,
'amazon': amazon,
'apache.atlas': atlas,
'apache.beam': apache_beam,
"apache.cassandra": cassandra,
"apache.druid": druid,
"apache.hdfs": hdfs,
"apache.hive": hive,
"apache.kylin": kylin,
"apache.pinot": pinot,
"apache.webhdfs": webhdfs,
'async': async_packages,
'atlas': atlas, # TODO: remove this in Airflow 2.1
'aws': amazon, # TODO: remove this in Airflow 2.1
'azure': azure, # TODO: remove this in Airflow 2.1
'cassandra': cassandra, # TODO: remove this in Airflow 2.1
'celery': celery,
'cgroups': cgroups,
'cloudant': cloudant,
'cncf.kubernetes': kubernetes,
'dask': dask,
'databricks': databricks,
'datadog': datadog,
'devel': devel_minreq,
'devel_hadoop': devel_hadoop,
'doc': doc,
'docker': docker,
'druid': druid, # TODO: remove this in Airflow 2.1
'elasticsearch': elasticsearch,
'exasol': exasol,
'facebook': facebook,
'gcp': google, # TODO: remove this in Airflow 2.1
'gcp_api': google, # TODO: remove this in Airflow 2.1
'github_enterprise': flask_oauth,
'google': google,
'google_auth': flask_oauth,
'grpc': grpc,
'hashicorp': hashicorp,
'hdfs': hdfs, # TODO: remove this in Airflow 2.1
'hive': hive, # TODO: remove this in Airflow 2.1
'jdbc': jdbc,
'jira': jira,
'kerberos': kerberos,
'kubernetes': kubernetes, # TODO: remove this in Airflow 2.1
'ldap': ldap,
"microsoft.azure": azure,
"microsoft.mssql": mssql,
"microsoft.winrm": winrm,
'mongo': mongo,
'mssql': mssql, # TODO: remove this in Airflow 2.1
'mysql': mysql,
'odbc': odbc,
'oracle': oracle,
'pagerduty': pagerduty,
'papermill': papermill,
'password': password,
'pinot': pinot, # TODO: remove this in Airflow 2.1
'plexus': plexus,
'postgres': postgres,
'presto': presto,
'qds': qds,
'rabbitmq': rabbitmq,
'redis': redis,
'salesforce': salesforce,
'samba': samba,
'segment': segment,
'sendgrid': sendgrid,
'sentry': sentry,
'singularity': singularity,
'slack': slack,
'snowflake': snowflake,
'spark': spark,
'ssh': ssh,
'statsd': statsd,
'tableau': tableau,
'vertica': vertica,
'virtualenv': virtualenv,
'webhdfs': webhdfs, # TODO: remove this in Airflow 2.1
'winrm': winrm, # TODO: remove this in Airflow 2.1
'yandexcloud': yandexcloud,
}
# Make devel_all contain all providers + extras + unique
devel_all = list(set(devel +
[req for req_list in EXTRAS_REQUIREMENTS.values() for req in req_list] +
[req for req_list in PROVIDERS_REQUIREMENTS.values() for req in req_list]))
PACKAGES_EXCLUDED_FOR_ALL = [
]
if PY3:
PACKAGES_EXCLUDED_FOR_ALL.extend([
'snakebite',
])
if PY38:
PACKAGES_EXCLUDED_FOR_ALL.extend([
'pymssql',
])
# Those packages are excluded because they break tests (downgrading mock) and they are
# not needed to run our test suite.
PACKAGES_EXCLUDED_FOR_CI = [
'apache-beam',
]
def is_package_excluded(package: str, exclusion_list: List[str]):
"""
Checks if package should be excluded.
:param package: package name (beginning of it)
:param exclusion_list: list of excluded packages
:return: true if package should be excluded
"""
return any([package.startswith(excluded_package) for excluded_package in exclusion_list])
devel_all = [package for package in devel_all if not is_package_excluded(
package=package,
exclusion_list=PACKAGES_EXCLUDED_FOR_ALL)
]
devel_ci = [package for package in devel_all if not is_package_excluded(
package=package,
exclusion_list=PACKAGES_EXCLUDED_FOR_CI + PACKAGES_EXCLUDED_FOR_ALL)
]
EXTRAS_REQUIREMENTS.update(
{
'all': devel_all,
'devel_ci': devel_ci,
}
)
#####################################################################################################
# IMPORTANT NOTE!!!!!!!!!!!!!!!
# IF you are removing dependencies from this list, please make sure that you also increase
# DEPENDENCIES_EPOCH_NUMBER in the Dockerfile.ci
#####################################################################################################
INSTALL_REQUIREMENTS = [
'alembic>=1.2, <2.0',
'argcomplete~=1.10',
'attrs~=19.3',
'cached_property~=1.5',
'cattrs~=1.0',
'colorlog==4.0.2',
'connexion[swagger-ui,flask]>=2.6.0,<3',
'croniter>=0.3.17, <0.4',
'cryptography>=0.9.3',
'dill>=0.2.2, <0.4',
'flask>=1.1.0, <2.0',
'flask-appbuilder>2.3.4,~=3.0',
'flask-caching>=1.3.3, <2.0.0',
'flask-login>=0.3, <0.5',
'flask-swagger==0.2.13',
'flask-wtf>=0.14.2, <0.15',
'funcsigs>=1.0.0, <2.0.0',
'graphviz>=0.12',
'gunicorn>=19.5.0, <20.0',
'iso8601>=0.1.12',
'jinja2>=2.10.1, <2.12.0',
'json-merge-patch==0.2',
'jsonschema~=3.0',
'lazy_object_proxy~=1.3',
'lockfile>=0.12.2',
'markdown>=2.5.2, <3.0',
'markupsafe>=1.1.1, <2.0',
'marshmallow-oneofschema>=2.0.1',
'pandas>=0.17.1, <2.0',
'pendulum~=2.0',
'pep562~=1.0;python_version<"3.7"',
'psutil>=4.2.0, <6.0.0',
'pygments>=2.0.1, <3.0',
'python-daemon>=2.1.1',
'python-dateutil>=2.3, <3',
'python-nvd3~=0.15.0',
'python-slugify>=3.0.0,<5.0',
'requests>=2.20.0, <3',
'setproctitle>=1.1.8, <2',
'sqlalchemy~=1.3',
'sqlalchemy_jsonfield~=0.9',
'tabulate>=0.7.5, <0.9',
'tenacity>=4.12.0, <5.2',
'termcolor>=1.1.0',
'thrift>=0.9.2',
'typing;python_version<"3.6"',
'typing-extensions>=3.7.4;python_version<"3.8"',
'tzlocal>=1.4,<2.0.0',
'unicodecsv>=0.14.1',
'werkzeug<1.0.0',
]
def do_setup():
"""Perform the Airflow package setup."""
write_version()
setup(
name='apache-airflow',
description='Programmatically author, schedule and monitor data pipelines',
long_description=long_description,
long_description_content_type='text/markdown',
license='Apache License 2.0',
version=version,
packages=find_packages(include=['airflow', 'airflow.*']),
package_data={
'airflow': ['py.typed'],
'': ['airflow/alembic.ini', "airflow/git_version", "*.ipynb",
"airflow/providers/cncf/kubernetes/example_dags/*.yaml"],
'airflow.api_connexion.openapi': ['*.yaml'],
'airflow.serialization': ["*.json"],
},
include_package_data=True,
zip_safe=False,
entry_points={
"console_scripts": [
"airflow = airflow.__main__:main",
],
},
install_requires=INSTALL_REQUIREMENTS,
setup_requires=[
'bowler',
'docutils',
'gitpython',
'setuptools',
'wheel',
],
extras_require=EXTRAS_REQUIREMENTS,
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Topic :: System :: Monitoring',
],
author='Apache Software Foundation',
author_email='dev@airflow.apache.org',
url='http://airflow.apache.org/',
download_url=(
'https://dist.apache.org/repos/dist/release/airflow/' + version),
cmdclass={
'extra_clean': CleanCommand,
'compile_assets': CompileAssets,
'list_extras': ListExtras,
},
test_suite='setup.airflow_test_suite',
python_requires='~=3.6',
)
if __name__ == "__main__":
do_setup()
| setup.py | 22,279 | Command to tidy up the project root.
Registered as cmdclass in setup() so it can be called with ``python setup.py extra_clean``.
Compile and build the frontend assets using yarn and webpack.
Registered as cmdclass in setup() so it can be called with ``python setup.py compile_assets``.
List all available extras
Registered as cmdclass in setup() so it can be called with ``python setup.py list_extras``.
Test suite for Airflow tests
Perform the Airflow package setup.
Set final values for options.
Set final values for options.
Set final values for options.
Return a version to identify the state of the underlying git repo. The version will
indicate whether the head of the current git-backed working directory is tied to a
release tag or not : it will indicate the former with a 'release:{version}' prefix
and the latter with a 'dev0' prefix. Following the prefix will be a sha of the current
branch head. Finally, a "dirty" suffix is appended to indicate that uncommitted
changes are present.
:param str version_: Semver version
:return: Found Airflow version in Git repo
:rtype: str
Set default values for options.
Set default values for options.
Set default values for options.
Checks if package should be excluded.
:param package: package name (beginning of it)
:param exclusion_list: list of excluded packages
:return: true if package should be excluded
Run command to remove temporary files and directories.
Run a command to compile and build assets.
List extras.
Write the Semver version + git hash to file, e.g. ".dev0+2f635dc265e78db6708f59f68e8009abb92c1e65".
:param str filename: Destination file to write
Setup.py for the Airflow project.
Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Kept manually in sync with airflow.__version__ noqa type: ignore type: ignore type: List[str] noqa type: List[str] noqa type: List[str] noqa commit is clean 'Start dependencies group' and 'Start dependencies group' are mark for ./scripts/ci/check_order_setup.py If you change this mark you should also change ./scripts/ci/check_order_setup.py Start dependencies group Dep of flower. Pin to a version that works on Py3.5.2 https://stackoverflow.com/questions/32757259/celery-no-module-named-five Flask OAuthLib 0.9.6 requires Flask-Login 0.5.0 - breaks FAB TODO: we should migrate to 1.0 likely and add <2.0.0 then End dependencies group IMPORTANT NOTE!!!!!!!!!!!!!!! IF you are removing dependencies from this list, please make sure that you also increase DEPENDENCIES_EPOCH_NUMBER in the Dockerfile.ci IMPORTANT NOTE!!!!!!!!!!!!!!! IF you are removing dependencies from the above list, please make sure that you also increase DEPENDENCIES_EPOCH_NUMBER in the Dockerfile.ci TODO: remove this in Airflow 2.1 TODO: remove this in Airflow 2.1 TODO: remove this in Airflow 2.1 TODO: remove this in Airflow 2.1 TODO: remove this in Airflow 2.1 TODO: remove this in Airflow 2.1 TODO: remove this in Airflow 2.1 TODO: remove this in Airflow 2.1 TODO: remove this in Airflow 2.1 TODO: remove this in Airflow 2.1 TODO: remove this in Airflow 2.1 TODO: remove this in Airflow 2.1 TODO: remove this in Airflow 2.1 TODO: remove this in Airflow 2.1 Make devel_all contain all providers + extras + unique Those packages are excluded because they break tests (downgrading mock) and they are not needed to run our test suite. IMPORTANT NOTE!!!!!!!!!!!!!!! IF you are removing dependencies from this list, please make sure that you also increase DEPENDENCIES_EPOCH_NUMBER in the Dockerfile.ci | 4,185 | en | 0.764358 |
#!/usr/bin/env python
# Copyright 2020 The Pigweed Authors
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
"""Watch files for changes and rebuild.
pw watch runs Ninja in a build directory when source files change. It works with
any Ninja project (GN or CMake).
Usage examples:
# Find a build directory and build the default target
pw watch
# Find a build directory and build the stm32f429i target
pw watch python.lint stm32f429i
# Build pw_run_tests.modules in the out/cmake directory
pw watch -C out/cmake pw_run_tests.modules
# Build the default target in out/ and pw_apps in out/cmake
pw watch -C out -C out/cmake pw_apps
# Find a directory and build python.tests, and build pw_apps in out/cmake
pw watch python.tests -C out/cmake pw_apps
"""
import argparse
from dataclasses import dataclass
import logging
import os
from pathlib import Path
import shlex
import subprocess
import sys
import threading
from typing import (Iterable, List, NamedTuple, NoReturn, Optional, Sequence,
Tuple)
from watchdog.events import FileSystemEventHandler # type: ignore[import]
from watchdog.observers import Observer # type: ignore[import]
import pw_cli.branding
import pw_cli.color
import pw_cli.env
import pw_cli.plugins
from pw_watch.debounce import DebouncedFunction, Debouncer
_COLOR = pw_cli.color.colors()
_LOG = logging.getLogger(__name__)
_ERRNO_INOTIFY_LIMIT_REACHED = 28
# Suppress events under 'fsevents', generated by watchdog on every file
# event on MacOS.
# TODO(b/182281481): Fix file ignoring, rather than just suppressing logs
_FSEVENTS_LOG = logging.getLogger('fsevents')
_FSEVENTS_LOG.setLevel(logging.WARNING)
_PASS_MESSAGE = """
██████╗ █████╗ ███████╗███████╗██╗
██╔══██╗██╔══██╗██╔════╝██╔════╝██║
██████╔╝███████║███████╗███████╗██║
██╔═══╝ ██╔══██║╚════██║╚════██║╚═╝
██║ ██║ ██║███████║███████║██╗
╚═╝ ╚═╝ ╚═╝╚══════╝╚══════╝╚═╝
"""
# Pick a visually-distinct font from "PASS" to ensure that readers can't
# possibly mistake the difference between the two states.
_FAIL_MESSAGE = """
▄██████▒░▄▄▄ ██▓ ░██▓
▓█▓ ░▒████▄ ▓██▒ ░▓██▒
▒████▒ ░▒█▀ ▀█▄ ▒██▒ ▒██░
░▓█▒ ░░██▄▄▄▄██ ░██░ ▒██░
░▒█░ ▓█ ▓██▒░██░░ ████████▒
▒█░ ▒▒ ▓▒█░░▓ ░ ▒░▓ ░
░▒ ▒ ▒▒ ░ ▒ ░░ ░ ▒ ░
░ ░ ░ ▒ ▒ ░ ░ ░
░ ░ ░ ░ ░
"""
# TODO(keir): Figure out a better strategy for exiting. The problem with the
# watcher is that doing a "clean exit" is slow. However, by directly exiting,
# we remove the possibility of the wrapper script doing anything on exit.
def _die(*args) -> NoReturn:
_LOG.critical(*args)
sys.exit(1)
class WatchCharset(NamedTuple):
slug_ok: str
slug_fail: str
_ASCII_CHARSET = WatchCharset(_COLOR.green('OK '), _COLOR.red('FAIL'))
_EMOJI_CHARSET = WatchCharset('✔️ ', '💥')
@dataclass(frozen=True)
class BuildCommand:
build_dir: Path
targets: Tuple[str, ...] = ()
def args(self) -> Tuple[str, ...]:
return (str(self.build_dir), *self.targets)
def __str__(self) -> str:
return ' '.join(shlex.quote(arg) for arg in self.args())
def git_ignored(file: Path) -> bool:
"""Returns true if this file is in a Git repo and ignored by that repo.
Returns true for ignored files that were manually added to a repo.
"""
file = file.resolve()
directory = file.parent
# Run the Git command from file's parent so that the correct repo is used.
while True:
try:
returncode = subprocess.run(
['git', 'check-ignore', '--quiet', '--no-index', file],
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
cwd=directory).returncode
return returncode in (0, 128)
except FileNotFoundError:
# If the directory no longer exists, try parent directories until
# an existing directory is found or all directories have been
# checked. This approach makes it possible to check if a deleted
# path is ignored in the repo it was originally created in.
if directory == directory.parent:
return False
directory = directory.parent
class PigweedBuildWatcher(FileSystemEventHandler, DebouncedFunction):
"""Process filesystem events and launch builds if necessary."""
def __init__(
self,
patterns: Sequence[str] = (),
ignore_patterns: Sequence[str] = (),
build_commands: Sequence[BuildCommand] = (),
charset: WatchCharset = _ASCII_CHARSET,
restart: bool = True,
):
super().__init__()
self.patterns = patterns
self.ignore_patterns = ignore_patterns
self.build_commands = build_commands
self.charset: WatchCharset = charset
self.restart_on_changes = restart
self._current_build: subprocess.Popen
self.debouncer = Debouncer(self)
# Track state of a build. These need to be members instead of locals
# due to the split between dispatch(), run(), and on_complete().
self.matching_path: Optional[Path] = None
self.builds_succeeded: List[bool] = []
self.wait_for_keypress_thread = threading.Thread(
None, self._wait_for_enter)
self.wait_for_keypress_thread.start()
def _wait_for_enter(self) -> NoReturn:
try:
while True:
_ = input()
self._current_build.kill()
self.debouncer.press('Manual build requested...')
# Ctrl-C on Unix generates KeyboardInterrupt
# Ctrl-Z on Windows generates EOFError
except (KeyboardInterrupt, EOFError):
_exit_due_to_interrupt()
def _path_matches(self, path: Path) -> bool:
"""Returns true if path matches according to the watcher patterns"""
return (not any(path.match(x) for x in self.ignore_patterns)
and any(path.match(x) for x in self.patterns))
def dispatch(self, event) -> None:
# There isn't any point in triggering builds on new directory creation.
# It's the creation or modification of files that indicate something
# meaningful enough changed for a build.
if event.is_directory:
return
# Collect paths of interest from the event.
paths: List[str] = []
if hasattr(event, 'dest_path'):
paths.append(os.fsdecode(event.dest_path))
if event.src_path:
paths.append(os.fsdecode(event.src_path))
for raw_path in paths:
_LOG.debug('File event: %s', raw_path)
# Check whether Git cares about any of these paths.
for path in (Path(p).resolve() for p in paths):
if not git_ignored(path) and self._path_matches(path):
self._handle_matched_event(path)
return
def _handle_matched_event(self, matching_path: Path) -> None:
if self.matching_path is None:
self.matching_path = matching_path
self.debouncer.press(
f'File change detected: {os.path.relpath(matching_path)}')
# Implementation of DebouncedFunction.run()
#
# Note: This will run on the timer thread created by the Debouncer, rather
# than on the main thread that's watching file events. This enables the
# watcher to continue receiving file change events during a build.
def run(self) -> None:
"""Run all the builds in serial and capture pass/fail for each."""
# Clear the screen and show a banner indicating the build is starting.
print('\033c', end='') # TODO(pwbug/38): Not Windows compatible.
print(pw_cli.branding.banner())
print(
_COLOR.green(
' Watching for changes. Ctrl-C to exit; enter to rebuild'))
print()
_LOG.info('Change detected: %s', self.matching_path)
self.builds_succeeded = []
num_builds = len(self.build_commands)
_LOG.info('Starting build with %d directories', num_builds)
env = os.environ.copy()
# Force colors in Pigweed subcommands run through the watcher.
env['PW_USE_COLOR'] = '1'
for i, cmd in enumerate(self.build_commands, 1):
_LOG.info('[%d/%d] Starting build: %s', i, num_builds, cmd)
# Run the build. Put a blank before/after for visual separation.
print()
self._current_build = subprocess.Popen(
['ninja', '-C', *cmd.args()], env=env)
returncode = self._current_build.wait()
print()
build_ok = (returncode == 0)
if build_ok:
level = logging.INFO
tag = '(OK)'
else:
level = logging.ERROR
tag = '(FAIL)'
_LOG.log(level, '[%d/%d] Finished build: %s %s', i, num_builds,
cmd, tag)
self.builds_succeeded.append(build_ok)
# Implementation of DebouncedFunction.cancel()
def cancel(self) -> bool:
if self.restart_on_changes:
self._current_build.kill()
return True
return False
# Implementation of DebouncedFunction.run()
def on_complete(self, cancelled: bool = False) -> None:
# First, use the standard logging facilities to report build status.
if cancelled:
_LOG.error('Finished; build was interrupted')
elif all(self.builds_succeeded):
_LOG.info('Finished; all successful')
else:
_LOG.info('Finished; some builds failed')
# Then, show a more distinct colored banner.
if not cancelled:
# Write out build summary table so you can tell which builds passed
# and which builds failed.
print()
print(' .------------------------------------')
print(' |')
for (succeeded, cmd) in zip(self.builds_succeeded,
self.build_commands):
slug = (self.charset.slug_ok
if succeeded else self.charset.slug_fail)
print(f' | {slug} {cmd}')
print(' |')
print(" '------------------------------------")
else:
# Build was interrupted.
print()
print(' .------------------------------------')
print(' |')
print(' | ', self.charset.slug_fail, '- interrupted')
print(' |')
print(" '------------------------------------")
# Show a large color banner so it is obvious what the overall result is.
if all(self.builds_succeeded) and not cancelled:
print(_COLOR.green(_PASS_MESSAGE))
else:
print(_COLOR.red(_FAIL_MESSAGE))
self.matching_path = None
# Implementation of DebouncedFunction.on_keyboard_interrupt()
def on_keyboard_interrupt(self) -> NoReturn:
_exit_due_to_interrupt()
_WATCH_PATTERN_DELIMITER = ','
_WATCH_PATTERNS = (
'*.bloaty',
'*.c',
'*.cc',
'*.css',
'*.cpp',
'*.cmake',
'CMakeLists.txt',
'*.gn',
'*.gni',
'*.go',
'*.h',
'*.hpp',
'*.ld',
'*.md',
'*.options',
'*.proto',
'*.py',
'*.rst',
)
def add_parser_arguments(parser: argparse.ArgumentParser) -> None:
"""Sets up an argument parser for pw watch."""
parser.add_argument('--patterns',
help=(_WATCH_PATTERN_DELIMITER +
'-delimited list of globs to '
'watch to trigger recompile'),
default=_WATCH_PATTERN_DELIMITER.join(_WATCH_PATTERNS))
parser.add_argument('--ignore_patterns',
dest='ignore_patterns_string',
help=(_WATCH_PATTERN_DELIMITER +
'-delimited list of globs to '
'ignore events from'))
parser.add_argument('--exclude_list',
nargs='+',
type=Path,
help='directories to ignore during pw watch',
default=[])
parser.add_argument('--no-restart',
dest='restart',
action='store_false',
help='do not restart ongoing builds if files change')
parser.add_argument(
'default_build_targets',
nargs='*',
metavar='target',
default=[],
help=('Automatically locate a build directory and build these '
'targets. For example, `host docs` searches for a Ninja '
'build directory (starting with out/) and builds the '
'`host` and `docs` targets. To specify one or more '
'directories, ust the -C / --build_directory option.'))
parser.add_argument(
'-C',
'--build_directory',
dest='build_directories',
nargs='+',
action='append',
default=[],
metavar=('directory', 'target'),
help=('Specify a build directory and optionally targets to '
'build. `pw watch -C out tgt` is equivalent to `ninja '
'-C out tgt`'))
def _exit(code: int) -> NoReturn:
# Note: The "proper" way to exit is via observer.stop(), then
# running a join. However it's slower, so just exit immediately.
#
# Additionally, since there are several threads in the watcher, the usual
# sys.exit approach doesn't work. Instead, run the low level exit which
# kills all threads.
os._exit(code) # pylint: disable=protected-access
def _exit_due_to_interrupt() -> NoReturn:
# To keep the log lines aligned with each other in the presence of
# a '^C' from the keyboard interrupt, add a newline before the log.
print()
print()
_LOG.info('Got Ctrl-C; exiting...')
_exit(0)
def _exit_due_to_inotify_limit():
# Show information and suggested commands in OSError: inotify limit reached.
_LOG.error('Inotify limit reached: run this in your terminal if you '
'are in Linux to temporarily increase inotify limit. \n')
print(
_COLOR.green(' sudo sysctl fs.inotify.max_user_watches='
'$NEW_LIMIT$\n'))
print(' Change $NEW_LIMIT$ with an integer number, '
'e.g., 1000 should be enough.')
_exit(0)
def _exit_due_to_pigweed_not_installed():
# Show information and suggested commands when pigweed environment variable
# not found.
_LOG.error('Environment variable $PW_ROOT not defined or is defined '
'outside the current directory.')
_LOG.error('Did you forget to activate the Pigweed environment? '
'Try source ./activate.sh')
_LOG.error('Did you forget to install the Pigweed environment? '
'Try source ./bootstrap.sh')
_exit(1)
# Go over each directory inside of the current directory.
# If it is not on the path of elements in directories_to_exclude, add
# (directory, True) to subdirectories_to_watch and later recursively call
# Observer() on them.
# Otherwise add (directory, False) to subdirectories_to_watch and later call
# Observer() with recursion=False.
def minimal_watch_directories(to_watch: Path, to_exclude: Iterable[Path]):
"""Determine which subdirectory to watch recursively"""
try:
to_watch = Path(to_watch)
except TypeError:
assert False, "Please watch one directory at a time."
# Reformat to_exclude.
directories_to_exclude: List[Path] = [
to_watch.joinpath(directory_to_exclude)
for directory_to_exclude in to_exclude
if to_watch.joinpath(directory_to_exclude).is_dir()
]
# Split the relative path of directories_to_exclude (compared to to_watch),
# and generate all parent paths needed to be watched without recursion.
exclude_dir_parents = {to_watch}
for directory_to_exclude in directories_to_exclude:
parts = list(
Path(directory_to_exclude).relative_to(to_watch).parts)[:-1]
dir_tmp = to_watch
for part in parts:
dir_tmp = Path(dir_tmp, part)
exclude_dir_parents.add(dir_tmp)
# Go over all layers of directory. Append those that are the parents of
# directories_to_exclude to the list with recursion==False, and others
# with recursion==True.
for directory in exclude_dir_parents:
dir_path = Path(directory)
yield dir_path, False
for item in Path(directory).iterdir():
if (item.is_dir() and item not in exclude_dir_parents
and item not in directories_to_exclude):
yield item, True
def get_common_excludes() -> List[Path]:
"""Find commonly excluded directories, and return them as a [Path]"""
exclude_list: List[Path] = []
typical_ignored_directories: List[str] = [
'.environment', # Legacy bootstrap-created CIPD and Python venv.
'.presubmit', # Presubmit-created CIPD and Python venv.
'.git', # Pigweed's git repo.
'.mypy_cache', # Python static analyzer.
'.cargo', # Rust package manager.
'environment', # Bootstrap-created CIPD and Python venv.
'out', # Typical build directory.
]
# Preset exclude list for Pigweed's upstream directories.
pw_root_dir = Path(os.environ['PW_ROOT'])
exclude_list.extend(pw_root_dir / ignored_directory
for ignored_directory in typical_ignored_directories)
# Preset exclude for common downstream project structures.
#
# If watch is invoked outside of the Pigweed root, exclude common
# directories.
pw_project_root_dir = Path(os.environ['PW_PROJECT_ROOT'])
if pw_project_root_dir != pw_root_dir:
exclude_list.extend(
pw_project_root_dir / ignored_directory
for ignored_directory in typical_ignored_directories)
# Check for and warn about legacy directories.
legacy_directories = [
'.cipd', # Legacy CIPD location.
'.python3-venv', # Legacy Python venv location.
]
found_legacy = False
for legacy_directory in legacy_directories:
full_legacy_directory = pw_root_dir / legacy_directory
if full_legacy_directory.is_dir():
_LOG.warning('Legacy environment directory found: %s',
str(full_legacy_directory))
exclude_list.append(full_legacy_directory)
found_legacy = True
if found_legacy:
_LOG.warning('Found legacy environment directory(s); these '
'should be deleted')
return exclude_list
def _find_build_dir(default_build_dir: Path = Path('out')) -> Optional[Path]:
"""Searches for a build directory, returning the first it finds."""
# Give priority to out/, then something under out/.
if default_build_dir.joinpath('build.ninja').exists():
return default_build_dir
for path in default_build_dir.glob('**/build.ninja'):
return path.parent
for path in Path.cwd().glob('**/build.ninja'):
return path.parent
return None
def watch(default_build_targets: List[str], build_directories: List[str],
patterns: str, ignore_patterns_string: str, exclude_list: List[Path],
restart: bool):
"""Watches files and runs Ninja commands when they change."""
_LOG.info('Starting Pigweed build watcher')
# Get pigweed directory information from environment variable PW_ROOT.
if os.environ['PW_ROOT'] is None:
_exit_due_to_pigweed_not_installed()
pw_root = Path(os.environ['PW_ROOT']).resolve()
if Path.cwd().resolve() not in [pw_root, *pw_root.parents]:
_exit_due_to_pigweed_not_installed()
# Preset exclude list for pigweed directory.
exclude_list += get_common_excludes()
build_commands = [
BuildCommand(Path(build_dir[0]), tuple(build_dir[1:]))
for build_dir in build_directories
]
# If no build directory was specified, search the tree for a build.ninja.
if default_build_targets or not build_directories:
build_dir = _find_build_dir()
# Make sure we found something; if not, bail.
if build_dir is None:
_die("No build dirs found. Did you forget to run 'gn gen out'?")
build_commands.append(
BuildCommand(build_dir, tuple(default_build_targets)))
# Verify that the build output directories exist.
for i, build_target in enumerate(build_commands, 1):
if not build_target.build_dir.is_dir():
_die("Build directory doesn't exist: %s", build_target)
else:
_LOG.info('Will build [%d/%d]: %s', i, len(build_commands),
build_target)
_LOG.debug('Patterns: %s', patterns)
# Try to make a short display path for the watched directory that has
# "$HOME" instead of the full home directory. This is nice for users
# who have deeply nested home directories.
path_to_log = str(Path().resolve()).replace(str(Path.home()), '$HOME')
# Ignore the user-specified patterns.
ignore_patterns = (ignore_patterns_string.split(_WATCH_PATTERN_DELIMITER)
if ignore_patterns_string else [])
env = pw_cli.env.pigweed_environment()
if env.PW_EMOJI:
charset = _EMOJI_CHARSET
else:
charset = _ASCII_CHARSET
event_handler = PigweedBuildWatcher(
patterns=patterns.split(_WATCH_PATTERN_DELIMITER),
ignore_patterns=ignore_patterns,
build_commands=build_commands,
charset=charset,
restart=restart,
)
try:
# It can take awhile to configure the filesystem watcher, so have the
# message reflect that with the "...". Run inside the try: to
# gracefully handle the user Ctrl-C'ing out during startup.
_LOG.info('Attaching filesystem watcher to %s/...', path_to_log)
# Observe changes for all files in the root directory. Whether the
# directory should be observed recursively or not is determined by the
# second element in subdirectories_to_watch.
observers = []
for path, rec in minimal_watch_directories(Path.cwd(), exclude_list):
observer = Observer()
observer.schedule(
event_handler,
str(path),
recursive=rec,
)
observer.start()
observers.append(observer)
event_handler.debouncer.press('Triggering initial build...')
for observer in observers:
while observer.is_alive():
observer.join(1)
# Ctrl-C on Unix generates KeyboardInterrupt
# Ctrl-Z on Windows generates EOFError
except (KeyboardInterrupt, EOFError):
_exit_due_to_interrupt()
except OSError as err:
if err.args[0] == _ERRNO_INOTIFY_LIMIT_REACHED:
_exit_due_to_inotify_limit()
else:
raise err
_LOG.critical('Should never get here')
observer.join()
def main() -> None:
"""Watch files for changes and rebuild."""
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
add_parser_arguments(parser)
watch(**vars(parser.parse_args()))
if __name__ == '__main__':
main()
| pw_watch/py/pw_watch/watch.py | 24,653 | Process filesystem events and launch builds if necessary.
Searches for a build directory, returning the first it finds.
Returns true if path matches according to the watcher patterns
Sets up an argument parser for pw watch.
Find commonly excluded directories, and return them as a [Path]
Returns true if this file is in a Git repo and ignored by that repo.
Returns true for ignored files that were manually added to a repo.
Watch files for changes and rebuild.
Determine which subdirectory to watch recursively
Run all the builds in serial and capture pass/fail for each.
Watches files and runs Ninja commands when they change.
Watch files for changes and rebuild.
pw watch runs Ninja in a build directory when source files change. It works with
any Ninja project (GN or CMake).
Usage examples:
# Find a build directory and build the default target
pw watch
# Find a build directory and build the stm32f429i target
pw watch python.lint stm32f429i
# Build pw_run_tests.modules in the out/cmake directory
pw watch -C out/cmake pw_run_tests.modules
# Build the default target in out/ and pw_apps in out/cmake
pw watch -C out -C out/cmake pw_apps
# Find a directory and build python.tests, and build pw_apps in out/cmake
pw watch python.tests -C out/cmake pw_apps
!/usr/bin/env python Copyright 2020 The Pigweed Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. type: ignore[import] type: ignore[import] Suppress events under 'fsevents', generated by watchdog on every file event on MacOS. TODO(b/182281481): Fix file ignoring, rather than just suppressing logs Pick a visually-distinct font from "PASS" to ensure that readers can't possibly mistake the difference between the two states. TODO(keir): Figure out a better strategy for exiting. The problem with the watcher is that doing a "clean exit" is slow. However, by directly exiting, we remove the possibility of the wrapper script doing anything on exit. Run the Git command from file's parent so that the correct repo is used. If the directory no longer exists, try parent directories until an existing directory is found or all directories have been checked. This approach makes it possible to check if a deleted path is ignored in the repo it was originally created in. Track state of a build. These need to be members instead of locals due to the split between dispatch(), run(), and on_complete(). Ctrl-C on Unix generates KeyboardInterrupt Ctrl-Z on Windows generates EOFError There isn't any point in triggering builds on new directory creation. It's the creation or modification of files that indicate something meaningful enough changed for a build. Collect paths of interest from the event. Check whether Git cares about any of these paths. Implementation of DebouncedFunction.run() Note: This will run on the timer thread created by the Debouncer, rather than on the main thread that's watching file events. This enables the watcher to continue receiving file change events during a build. Clear the screen and show a banner indicating the build is starting. TODO(pwbug/38): Not Windows compatible. Force colors in Pigweed subcommands run through the watcher. Run the build. Put a blank before/after for visual separation. Implementation of DebouncedFunction.cancel() Implementation of DebouncedFunction.run() First, use the standard logging facilities to report build status. Then, show a more distinct colored banner. Write out build summary table so you can tell which builds passed and which builds failed. Build was interrupted. Show a large color banner so it is obvious what the overall result is. Implementation of DebouncedFunction.on_keyboard_interrupt() Note: The "proper" way to exit is via observer.stop(), then running a join. However it's slower, so just exit immediately. Additionally, since there are several threads in the watcher, the usual sys.exit approach doesn't work. Instead, run the low level exit which kills all threads. pylint: disable=protected-access To keep the log lines aligned with each other in the presence of a '^C' from the keyboard interrupt, add a newline before the log. Show information and suggested commands in OSError: inotify limit reached. Show information and suggested commands when pigweed environment variable not found. Go over each directory inside of the current directory. If it is not on the path of elements in directories_to_exclude, add (directory, True) to subdirectories_to_watch and later recursively call Observer() on them. Otherwise add (directory, False) to subdirectories_to_watch and later call Observer() with recursion=False. Reformat to_exclude. Split the relative path of directories_to_exclude (compared to to_watch), and generate all parent paths needed to be watched without recursion. Go over all layers of directory. Append those that are the parents of directories_to_exclude to the list with recursion==False, and others with recursion==True. Legacy bootstrap-created CIPD and Python venv. Presubmit-created CIPD and Python venv. Pigweed's git repo. Python static analyzer. Rust package manager. Bootstrap-created CIPD and Python venv. Typical build directory. Preset exclude list for Pigweed's upstream directories. Preset exclude for common downstream project structures. If watch is invoked outside of the Pigweed root, exclude common directories. Check for and warn about legacy directories. Legacy CIPD location. Legacy Python venv location. Give priority to out/, then something under out/. Get pigweed directory information from environment variable PW_ROOT. Preset exclude list for pigweed directory. If no build directory was specified, search the tree for a build.ninja. Make sure we found something; if not, bail. Verify that the build output directories exist. Try to make a short display path for the watched directory that has "$HOME" instead of the full home directory. This is nice for users who have deeply nested home directories. Ignore the user-specified patterns. It can take awhile to configure the filesystem watcher, so have the message reflect that with the "...". Run inside the try: to gracefully handle the user Ctrl-C'ing out during startup. Observe changes for all files in the root directory. Whether the directory should be observed recursively or not is determined by the second element in subdirectories_to_watch. Ctrl-C on Unix generates KeyboardInterrupt Ctrl-Z on Windows generates EOFError | 6,881 | en | 0.883986 |
import os
import string
import textwrap
import unittest
import vtk, qt, ctk, slicer
from slicer.ScriptedLoadableModule import *
import logging
#
# DMRIInstall
#
class DMRIInstall(ScriptedLoadableModule):
"""
"""
helpText = textwrap.dedent(
"""
The SlicerDMRI extension provides diffusion-related tools including:
<ul>
<li> Diffusion Tensor Estimation</li>
<li>Tractography Display</li>
<li>Tractography Seeding</li>
<li>Fiber Tract Measurement</li>
</ul>
<br>
<br>
For more information, please visit:
<br>
<br>
<a href="http://dmri.slicer.org">http://dmri.slicer.org</a>
<br>
<br>
Questions are welcome on the Slicer forum:
<br>
<br>
<a href="https://discourse.slicer.org">https://discourse.slicer.org</a><br><br>
""")
errorText = textwrap.dedent(
"""
<h5 style="color:red">The SlicerDMRI extension is currently unavailable.</h5><br>
Please try a manual installation via the Extension Manager,
and contact the Slicer forum at:<br><br>
<a href="https://discourse.slicer.org">https://discourse.slicer.org</a><br><br>
With the following information:<br>
Slicer version: {builddate}<br>
Slicer revision: {revision}<br>
Platform: {platform}
""").format(builddate=slicer.app.applicationVersion,
revision = slicer.app.repositoryRevision,
platform = slicer.app.platform)
def __init__(self, parent):
# Hide this module if SlicerDMRI is already installed
model = slicer.app.extensionsManagerModel()
if model.isExtensionInstalled("SlicerDMRI"):
parent.hidden = True
ScriptedLoadableModule.__init__(self, parent)
self.parent.categories = ["Diffusion"]
self.parent.title = "Install Slicer Diffusion Tools (SlicerDMRI)"
self.parent.dependencies = []
self.parent.contributors = ["Isaiah Norton (BWH), Lauren O'Donnell (BWH)"]
self.parent.helpText = DMRIInstall.helpText
self.parent.helpText += self.getDefaultModuleDocumentationLink()
self.parent.acknowledgementText = textwrap.dedent(
"""
SlicerDMRI supported by NIH NCI ITCR U01CA199459 (Open Source Diffusion MRI
Technology For Brain Cancer Research), and made possible by NA-MIC, NAC,
BIRN, NCIGT, and the Slicer Community.
""")
class DMRIInstallWidget(ScriptedLoadableModuleWidget):
"""Uses ScriptedLoadableModuleWidget base class, available at:
https://github.com/Slicer/Slicer/blob/master/Base/Python/slicer/ScriptedLoadableModule.py
"""
def setup(self):
ScriptedLoadableModuleWidget.setup(self)
self.textBox = ctk.ctkFittedTextBrowser()
self.textBox.setOpenExternalLinks(True) # Open links in default browser
self.textBox.setHtml(DMRIInstall.helpText)
self.parent.layout().addWidget(self.textBox)
#
# Apply Button
#
self.applyButton = qt.QPushButton("Install SlicerDMRI")
self.applyButton.toolTip = 'Installs the "SlicerDMRI" extension from the Diffusion category.'
self.applyButton.icon = qt.QIcon(":/Icons/ExtensionDefaultIcon.png")
self.applyButton.enabled = True
self.applyButton.connect('clicked()', self.onApply)
self.parent.layout().addWidget(self.applyButton)
self.parent.layout().addStretch(1)
def onError(self):
self.applyButton.enabled = False
self.textBox.setHtml(DMRIInstall.errorText)
return
def onApply(self):
emm = slicer.app.extensionsManagerModel()
if emm.isExtensionInstalled("SlicerDMRI"):
self.textBox.setHtml("<h4>SlicerDMRI is already installed.<h4>")
self.applyButton.enabled = False
return
md = emm.retrieveExtensionMetadataByName("SlicerDMRI")
if not md or 'extension_id' not in md:
return self.onError()
if emm.downloadAndInstallExtension(md['extension_id']):
slicer.app.confirmRestart("Restart to complete SlicerDMRI installation?")
else:
self.onError()
| Modules/Scripted/DMRIInstall/DMRIInstall.py | 3,909 | Uses ScriptedLoadableModuleWidget base class, available at:
https://github.com/Slicer/Slicer/blob/master/Base/Python/slicer/ScriptedLoadableModule.py
DMRIInstall Hide this module if SlicerDMRI is already installed Open links in default browser Apply Button | 258 | en | 0.346724 |
from django.shortcuts import render,HttpResponse
from game.models import Contact
from django.contrib import messages
# Create your views here.
def index(request):
context={'variable':"This is sent."}
return render(request,'index.html',context)
def about(request):
return render(request,'about.html')
#return HttpResponse("This is about page.")
def products(request):
return render(request,'products.html')
#return HttpResponse("This is products page.")
def contact(request):
if request.method=="POST":
firstname=request.POST.get('firstname')
lastname=request.POST.get('lastname')
phone=request.POST.get('phone')
subject=request.POST.get('subject')
contact=Contact(firstname=firstname, lastname=lastname, phone=phone, subject=subject)
contact.save()
messages.success(request, 'Your message has been successfully sent.')
return render(request,'contacts.html')
#return HttpResponse("This is contact page.") | views.py | 1,020 | Create your views here.return HttpResponse("This is about page.")return HttpResponse("This is products page.")return HttpResponse("This is contact page.") | 154 | en | 0.661429 |
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import account_config_settings
import account_move
import account_partial_reconcile
import account_tax
import res_company
| apps/odoo/lib/odoo-10.0.post20170615-py2.7.egg/odoo/addons/account_tax_cash_basis/models/__init__.py | 222 | -*- coding: utf-8 -*- Part of Odoo. See LICENSE file for full copyright and licensing details. | 94 | en | 0.883753 |
# import libraries
from pyspark.sql import SparkSession
from pyspark import SparkConf
from pyspark.sql.types import *
from pyspark.sql.functions import col, count, lit, rand, when
import pandas as pd
from math import ceil
#################################################
# spark config
#################################################
mtaMaster = "spark://192.168.0.182:7077"
conf = SparkConf()
conf.setMaster(mtaMaster)
conf.set("spark.executor.memory", "24g")
conf.set("spark.driver.memory", "26g")
conf.set("spark.cores.max", 96)
conf.set("spark.driver.cores", 8)
conf.set("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
conf.set("spark.kryoserializer.buffer", "256m")
conf.set("spark.kryoserializer.buffer.max", "256m")
conf.set("spark.default.parallelism", 24)
conf.set("spark.eventLog.enabled", "true")
conf.set("spark.eventLog.dir", "hdfs://192.168.0.182:9000/eventlog")
conf.set("spark.history.fs.logDirectory", "hdfs://192.168.0.182:9000/eventlog")
conf.set("spark.driver.maxResultSize", "4g")
conf.getAll()
#################################################
# create spark session
#################################################
spark = SparkSession.builder.appName('ML2_HV_v1_NYT_sim1_and_sim3_to_sim2_round5_human_validation').config(conf=conf).getOrCreate()
sc = spark.sparkContext
# check things are working
print(sc)
print(sc.defaultParallelism)
print("SPARK CONTEXT IS RUNNING")
#################################################
# define major topic codes
#################################################
# major topic codes for loop (NO 23 IN THE NYT CORPUS)
majortopic_codes = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 100]
#majortopic_codes = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 23, 100]
#################################################
# read result data from round 3
#################################################
df_results = spark.read.parquet("hdfs://192.168.0.182:9000/input/ML2_HV_v1_NYT_r5_classified.parquet").repartition(50)
# verdict to integer for the comparison with majortopic later
df_results = df_results.withColumn('verdict', df_results.verdict.cast(IntegerType()))
#################################################
# create table to store sample and validation numbers
#################################################
columns = ["num_classified", "num_sample", "num_non_sample", "num_correct", "num_incorrect", "precision_in_sample", "num_added_to_training"]
df_numbers = pd.DataFrame(index=majortopic_codes, columns=columns)
df_numbers = df_numbers.fillna(0)
#################################################
# create table of samples from results
#################################################
# constants for sample size calculation for 95% confidence with +-0.05 precision confidence interval:
z = 1.96
delta = 0.05
z_delta = z*z*0.5*0.5/(delta*delta)
print("z_delta :", z_delta)
for i in majortopic_codes:
df_classified = df_results.where(col('verdict') == i)
num_classified = df_classified.count()
df_numbers["num_classified"].loc[i] = num_classified
print("MTC:", i, "num_classified: ", num_classified)
if num_classified > 100:
sample_size = ceil(z_delta/(1+1/num_classified*(z_delta-1)))
print("sample_size: ", sample_size)
if sample_size < 100:
sample_size = 100
df_sample = df_classified.sort('doc_id').withColumn('random', rand()).sort('random').limit(sample_size).drop('random')
df_sample_num = df_sample.count()
print("df_sample: ", df_sample_num)
# separate non-sample from sample elements
ids_drop = df_sample.select("doc_id")
df_non_sample = df_classified.join(ids_drop, "doc_id", "left_anti")
df_numbers["num_sample"].loc[i] = df_sample_num
df_numbers["num_non_sample"].loc[i] = df_non_sample.count()
else:
df_numbers["num_sample"].loc[i] = num_classified
df_sample = df_classified
df_non_sample = None
# create table of all samples and add new sample to it
if i == 1:
df_sample_all = df_sample
else:
df_sample_all = df_sample_all.union(df_sample)
#print("MTC:", i, "df_sample_all: ", df_sample_all.count())
# create table of all non-samples and add new non-sample to it
if i == 1:
df_non_sample_all = None
if df_non_sample != None and df_non_sample_all == None:
df_non_sample_all = df_non_sample
elif df_non_sample != None and df_non_sample_all != None:
df_non_sample_all = df_non_sample_all.union(df_non_sample)
#print("MTC:", i, "df_non_sample_all: ", df_non_sample_all.count())
print("MTC:", i)
#################################################
# check precision by majortopic codes
#################################################
# count correctly classified and precision for each majortopic code and write to table of numbers
df_correctly_classified = df_sample_all.where(col('majortopic') == col('verdict'))
for i in majortopic_codes:
num_correct = df_correctly_classified.where(col('verdict') == i).count()
df_numbers["num_correct"].loc[i] = num_correct
df_numbers["precision_in_sample"].loc[i] = num_correct/df_numbers["num_sample"].loc[i]
# count incorrectly classified for debugging and checking
df_incorrectly_classified = df_sample_all.where(col('majortopic') != col('verdict'))
for i in majortopic_codes:
num_incorrect = df_incorrectly_classified.where(col('verdict') == i).count()
df_numbers["num_incorrect"].loc[i] = num_incorrect
print(df_numbers)
#################################################
# create tables of elements based on precision
#################################################
# create tables for sorting elements based on precision results
# where precision is equal to or greater than 75%
# NOTE: validated wrongly classified elements will NOT be added to the results with the wrong major
# topic code, instead they will be added to the unclassified elements as in rounds 1&2
df_replace_all = None
# where precision is less than 75%
df_non_sample_replace = None
df_correct_replace = None
df_wrong_replace = None
for i in majortopic_codes:
print("create tables MTC:", i)
if df_numbers["precision_in_sample"].loc[i] >= 0.75:
# in this case add all elements from sample and non-sample to the training set with
# new major topic code i, EXCEPT for validated negatives, those are added to back into the
# test set
# first add wrong sample elements to their table
df_lemma = df_sample_all.where(col('verdict') == i).where(col('majortopic') != col('verdict'))
if df_wrong_replace == None:
df_wrong_replace = df_lemma
else:
df_wrong_replace = df_wrong_replace.union(df_lemma)
# get doc_ids for these elements to remove them from the rest of the elements classified as
# belonging to major topic i
ids_drop = df_lemma.select("doc_id")
# get all elements classified as belonging to major topic code i
df_lemma = df_results.where(col('verdict') == i)
# remove wrongly classified from df_lemma
df_lemma = df_lemma.join(ids_drop, "doc_id", "left_anti")
# add df_lemma to df_replace_all
if df_replace_all == None:
df_replace_all = df_lemma
else:
df_replace_all = df_replace_all.union(df_lemma)
# write numbers to df_numbers
df_numbers["num_added_to_training"].loc[i] = df_lemma.count()
#print("MTC:", i, "df_replace_all: ", df_replace_all.count())
else:
# in this case add only correct elements from sample to training set, the rest go back in
# the test set
# first add non-sample elements to their table, BUT we have to check whether non-sample elements
# exist
if df_non_sample_all != None:
df_lemma = df_non_sample_all.where(col('verdict') == i)
if df_non_sample_replace == None:
df_non_sample_replace = df_lemma
else:
df_non_sample_replace = df_non_sample_replace.union(df_lemma)
else:
df_non_sample_replace = None
#print("MTC:", i, "df_non_sample_replace: ", df_non_sample_replace.count())
# second add correct sample elements to their table
df_lemma = df_sample_all.where(col('verdict') == i).where(col('majortopic') == col('verdict'))
if df_correct_replace == None:
df_correct_replace = df_lemma
else:
df_correct_replace = df_correct_replace.union(df_lemma)
df_numbers["num_added_to_training"].loc[i] = df_lemma.count()
#print("MTC:", i, "df_correct_replace: ", df_correct_replace.count())
# finally add wrong sample elements to their table
df_lemma = df_sample_all.where(col('verdict') == i).where(col('majortopic') != col('verdict'))
if df_wrong_replace == None:
df_wrong_replace = df_lemma
else:
df_wrong_replace = df_wrong_replace.union(df_lemma)
#print("MTC:", i, "df_wrong_replace: ", df_wrong_replace.count())
# sometimes there will be no major topic code with precision => 75%
if df_replace_all == None:
df_replace_all = "empty"
# sometimes there will be no non-sample elements
if df_non_sample_replace == None:
df_non_sample_replace = "empty"
# the reason for creating these "empty" values, is because they will persist after we clear the
# cache, and we can use them later in the workflow control
# write all tables to parquet before clearing memory
df_correct_replace.write.parquet("hdfs://192.168.0.182:9000/input/df_correct_replace_temp.parquet", mode="overwrite")
df_wrong_replace.write.parquet("hdfs://192.168.0.182:9000/input/df_wrong_replace_temp.parquet", mode="overwrite")
# sometimes there will be no non-sample elements
if df_non_sample_replace != "empty":
df_non_sample_replace.write.parquet("hdfs://192.168.0.182:9000/input/df_non_sample_replace_temp.parquet", mode="overwrite")
# sometimes there will be no major topic code with precision => 75%
if df_replace_all != "empty":
df_replace_all.write.parquet("hdfs://192.168.0.182:9000/input/df_replace_all_temp.parquet", mode="overwrite")
# write df_numbers to csv
df_numbers.to_csv("ML2_HV_v1_NYT_human_validation_numbers_r5.csv", index=True)
# empty memory
spark.catalog.clearCache()
print("cache cleared")
#################################################
# prepare df_original to add tables to it
#################################################
df_original = spark.read.parquet("hdfs://192.168.0.182:9000/input/ML2_HV_v1_NYT_r5_train_and_remaining_NOTclassified.parquet").repartition(50)
# we need to create a new majortopic column, because we are now adding back in elements with
# potentially new labels
df_original = df_original.withColumnRenamed('majortopic', 'mtc_after_r4')
df_original = df_original.withColumn('majortopic', df_original['mtc_after_r4'])
# finally, create the new train id column
df_original = df_original.withColumn("train_r6", when(df_original["train_r5"] == 1, 1).otherwise(0))
#################################################
# add df_replace_all back to df_original
#################################################
if df_replace_all != "empty":
print("df_replace_all is NOT empty")
df_replace_all = spark.read.parquet("hdfs://192.168.0.182:9000/input/df_replace_all_temp.parquet").repartition(50)
# we need to create a new majortopic column, because we are now adding back in elements with
# potentially new labels
df_replace_all = df_replace_all.withColumnRenamed('majortopic', 'mtc_after_r4')
df_replace_all = df_replace_all.withColumn('majortopic', df_replace_all['verdict'])
# create the new train id column
df_replace_all = df_replace_all.withColumn("train_r6", lit(1))
# drop the extra columns to be able to add it back to df_original
df_replace_all = df_replace_all.drop('verdict')
# add df_replace_all elements to df_original
df_original = df_original.union(df_replace_all)
else:
print("df_replace_all is empty")
#################################################
# add df_non_sample_replace back to df_original
#################################################
if df_non_sample_replace != "empty":
print("df_non_sample_replace is NOT empty")
df_non_sample_replace = spark.read.parquet("hdfs://192.168.0.182:9000/input/df_non_sample_replace_temp.parquet").repartition(50)
# we need to create a new majortopic column, because we are now adding back in elements with
# potentially new labels
df_non_sample_replace = df_non_sample_replace.withColumnRenamed('majortopic', 'mtc_after_r4')
df_non_sample_replace = df_non_sample_replace.withColumn('majortopic', df_non_sample_replace['mtc_after_r4'])
# create the new train id column
df_non_sample_replace = df_non_sample_replace.withColumn("train_r6", lit(0))
# drop the extra columns to be able to add it back to df_original
df_non_sample_replace = df_non_sample_replace.drop('verdict')
# add df_non_sample_replace elements to df_original
df_original = df_original.union(df_non_sample_replace)
else:
print("df_non_sample_replace is empty")
#################################################
# add df_correct_replace back to df_original
#################################################
df_correct_replace = spark.read.parquet("hdfs://192.168.0.182:9000/input/df_correct_replace_temp.parquet").repartition(50)
# we need to create a new majortopic column, because we are now adding back in elements with
# potentially new labels
df_correct_replace = df_correct_replace.withColumnRenamed('majortopic', 'mtc_after_r4')
df_correct_replace = df_correct_replace.withColumn('majortopic', df_correct_replace['verdict'])
# create the new train id column
df_correct_replace = df_correct_replace.withColumn("train_r6", lit(1))
# drop the extra columns to be able to add it back to df_original
df_correct_replace = df_correct_replace.drop('verdict')
# add df_correct_replace elements to df_original
df_original = df_original.union(df_correct_replace)
#################################################
# add df_wrong_replace back to df_original
#################################################
df_wrong_replace = spark.read.parquet("hdfs://192.168.0.182:9000/input/df_wrong_replace_temp.parquet").repartition(50)
# we need to create a new majortopic column, because we are now adding back in elements with
# potentially new labels
df_wrong_replace = df_wrong_replace.withColumnRenamed('majortopic', 'mtc_after_r4')
df_wrong_replace = df_wrong_replace.withColumn('majortopic', df_wrong_replace['mtc_after_r4'])
# create the new train id column
df_wrong_replace = df_wrong_replace.withColumn("train_r6", lit(0))
# drop the extra columns to be able to add it back to df_original
df_wrong_replace = df_wrong_replace.drop('verdict')
# add df_wrong_replace elements to df_original
df_original = df_original.union(df_wrong_replace)
#################################################
# final write operations
#################################################
df_original.write.parquet("hdfs://192.168.0.182:9000/input/ML2_HV_v1_NYT_round6_start.parquet", mode="overwrite")
df_original.groupBy("train_r6").count().show(n=30)
# empty memory
spark.catalog.clearCache()
print("cache cleared")
# write to pandas and export to csv for debugging
df_original = spark.read.parquet("hdfs://192.168.0.182:9000/input/ML2_HV_v1_NYT_round6_start.parquet").repartition(50)
df_original = df_original.drop('text', 'words', 'features', 'raw_features').toPandas()
df_original.to_csv("ML2_HV_v1_NYT_round6_starting_table.csv", index=False)
sc.stop()
spark.stop()
| spark_cluster/04_2_HV_basic/HV_v1_NYT_sim1_and_sim3_to_sim2/6200_ML2_HV_v1_NYT_sim1_and_sim3_to_sim2_round5_human_validation.py | 15,787 | import libraries spark config create spark session check things are working define major topic codes major topic codes for loop (NO 23 IN THE NYT CORPUS)majortopic_codes = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 23, 100] read result data from round 3 verdict to integer for the comparison with majortopic later create table to store sample and validation numbers create table of samples from results constants for sample size calculation for 95% confidence with +-0.05 precision confidence interval: separate non-sample from sample elements create table of all samples and add new sample to itprint("MTC:", i, "df_sample_all: ", df_sample_all.count()) create table of all non-samples and add new non-sample to itprint("MTC:", i, "df_non_sample_all: ", df_non_sample_all.count()) check precision by majortopic codes count correctly classified and precision for each majortopic code and write to table of numbers count incorrectly classified for debugging and checking create tables of elements based on precision create tables for sorting elements based on precision results where precision is equal to or greater than 75% NOTE: validated wrongly classified elements will NOT be added to the results with the wrong major topic code, instead they will be added to the unclassified elements as in rounds 1&2 where precision is less than 75% in this case add all elements from sample and non-sample to the training set with new major topic code i, EXCEPT for validated negatives, those are added to back into the test set first add wrong sample elements to their table get doc_ids for these elements to remove them from the rest of the elements classified as belonging to major topic i get all elements classified as belonging to major topic code i remove wrongly classified from df_lemma add df_lemma to df_replace_all write numbers to df_numbersprint("MTC:", i, "df_replace_all: ", df_replace_all.count()) in this case add only correct elements from sample to training set, the rest go back in the test set first add non-sample elements to their table, BUT we have to check whether non-sample elements existprint("MTC:", i, "df_non_sample_replace: ", df_non_sample_replace.count()) second add correct sample elements to their tableprint("MTC:", i, "df_correct_replace: ", df_correct_replace.count()) finally add wrong sample elements to their tableprint("MTC:", i, "df_wrong_replace: ", df_wrong_replace.count()) sometimes there will be no major topic code with precision => 75% sometimes there will be no non-sample elements the reason for creating these "empty" values, is because they will persist after we clear the cache, and we can use them later in the workflow control write all tables to parquet before clearing memory sometimes there will be no non-sample elements sometimes there will be no major topic code with precision => 75% write df_numbers to csv empty memory prepare df_original to add tables to it we need to create a new majortopic column, because we are now adding back in elements with potentially new labels finally, create the new train id column add df_replace_all back to df_original we need to create a new majortopic column, because we are now adding back in elements with potentially new labels create the new train id column drop the extra columns to be able to add it back to df_original add df_replace_all elements to df_original add df_non_sample_replace back to df_original we need to create a new majortopic column, because we are now adding back in elements with potentially new labels create the new train id column drop the extra columns to be able to add it back to df_original add df_non_sample_replace elements to df_original add df_correct_replace back to df_original we need to create a new majortopic column, because we are now adding back in elements with potentially new labels create the new train id column drop the extra columns to be able to add it back to df_original add df_correct_replace elements to df_original add df_wrong_replace back to df_original we need to create a new majortopic column, because we are now adding back in elements with potentially new labels create the new train id column drop the extra columns to be able to add it back to df_original add df_wrong_replace elements to df_original final write operations empty memory write to pandas and export to csv for debugging | 4,375 | en | 0.784428 |
"""Extra types understood by apitools.
This file will be replaced by a .proto file when we switch to proto2
from protorpc.
"""
import collections
import json
import numbers
from protorpc import message_types
from protorpc import messages
from protorpc import protojson
from apitools.base.py import encoding
from apitools.base.py import exceptions
from apitools.base.py import util
__all__ = [
'DateTimeMessage',
'JsonArray',
'JsonObject',
'JsonValue',
'JsonProtoEncoder',
'JsonProtoDecoder',
]
# We import from protorpc.
# pylint:disable=invalid-name
DateTimeMessage = message_types.DateTimeMessage
# pylint:enable=invalid-name
def _ValidateJsonValue(json_value):
entries = [(f, json_value.get_assigned_value(f.name))
for f in json_value.all_fields()]
assigned_entries = [(f, value) for f, value in entries if value is not None]
if len(assigned_entries) != 1:
raise exceptions.InvalidDataError('Malformed JsonValue: %s' % json_value)
def _JsonValueToPythonValue(json_value):
"""Convert the given JsonValue to a json string."""
util.Typecheck(json_value, JsonValue)
_ValidateJsonValue(json_value)
if json_value.is_null:
return None
entries = [(f, json_value.get_assigned_value(f.name))
for f in json_value.all_fields()]
assigned_entries = [(f, value) for f, value in entries if value is not None]
field, value = assigned_entries[0]
if not isinstance(field, messages.MessageField):
return value
elif field.message_type is JsonObject:
return _JsonObjectToPythonValue(value)
elif field.message_type is JsonArray:
return _JsonArrayToPythonValue(value)
def _JsonObjectToPythonValue(json_value):
util.Typecheck(json_value, JsonObject)
return dict([(prop.key, _JsonValueToPythonValue(prop.value)) for prop
in json_value.properties])
def _JsonArrayToPythonValue(json_value):
util.Typecheck(json_value, JsonArray)
return [_JsonValueToPythonValue(e) for e in json_value.entries]
_MAXINT64 = 2 << 63 - 1
_MININT64 = -(2 << 63)
def _PythonValueToJsonValue(py_value):
"""Convert the given python value to a JsonValue."""
if py_value is None:
return JsonValue(is_null=True)
if isinstance(py_value, bool):
return JsonValue(boolean_value=py_value)
if isinstance(py_value, basestring):
return JsonValue(string_value=py_value)
if isinstance(py_value, numbers.Number):
if isinstance(py_value, (int, long)):
if _MININT64 < py_value < _MAXINT64:
return JsonValue(integer_value=py_value)
return JsonValue(double_value=float(py_value))
if isinstance(py_value, dict):
return JsonValue(object_value=_PythonValueToJsonObject(py_value))
if isinstance(py_value, collections.Iterable):
return JsonValue(array_value=_PythonValueToJsonArray(py_value))
raise exceptions.InvalidDataError(
'Cannot convert "%s" to JsonValue' % py_value)
def _PythonValueToJsonObject(py_value):
util.Typecheck(py_value, dict)
return JsonObject(
properties=[
JsonObject.Property(key=key, value=_PythonValueToJsonValue(value))
for key, value in py_value.iteritems()])
def _PythonValueToJsonArray(py_value):
return JsonArray(entries=map(_PythonValueToJsonValue, py_value))
class JsonValue(messages.Message):
"""Any valid JSON value."""
# Is this JSON object `null`?
is_null = messages.BooleanField(1, default=False)
# Exactly one of the following is provided if is_null is False; none
# should be provided if is_null is True.
boolean_value = messages.BooleanField(2)
string_value = messages.StringField(3)
# We keep two numeric fields to keep int64 round-trips exact.
double_value = messages.FloatField(4, variant=messages.Variant.DOUBLE)
integer_value = messages.IntegerField(5, variant=messages.Variant.INT64)
# Compound types
object_value = messages.MessageField('JsonObject', 6)
array_value = messages.MessageField('JsonArray', 7)
class JsonObject(messages.Message):
"""A JSON object value.
Messages:
Property: A property of a JsonObject.
Fields:
properties: A list of properties of a JsonObject.
"""
class Property(messages.Message):
"""A property of a JSON object.
Fields:
key: Name of the property.
value: A JsonValue attribute.
"""
key = messages.StringField(1)
value = messages.MessageField(JsonValue, 2)
properties = messages.MessageField(Property, 1, repeated=True)
class JsonArray(messages.Message):
"""A JSON array value."""
entries = messages.MessageField(JsonValue, 1, repeated=True)
_JSON_PROTO_TO_PYTHON_MAP = {
JsonArray: _JsonArrayToPythonValue,
JsonObject: _JsonObjectToPythonValue,
JsonValue: _JsonValueToPythonValue,
}
_JSON_PROTO_TYPES = tuple(_JSON_PROTO_TO_PYTHON_MAP.keys())
def _JsonProtoToPythonValue(json_proto):
util.Typecheck(json_proto, _JSON_PROTO_TYPES)
return _JSON_PROTO_TO_PYTHON_MAP[type(json_proto)](json_proto)
def _PythonValueToJsonProto(py_value):
if isinstance(py_value, dict):
return _PythonValueToJsonObject(py_value)
if (isinstance(py_value, collections.Iterable) and
not isinstance(py_value, basestring)):
return _PythonValueToJsonArray(py_value)
return _PythonValueToJsonValue(py_value)
def _JsonProtoToJson(json_proto, unused_encoder=None):
return json.dumps(_JsonProtoToPythonValue(json_proto))
def _JsonToJsonProto(json_data, unused_decoder=None):
return _PythonValueToJsonProto(json.loads(json_data))
# pylint:disable=invalid-name
JsonProtoEncoder = _JsonProtoToJson
JsonProtoDecoder = _JsonToJsonProto
# pylint:enable=invalid-name
encoding.RegisterCustomMessageCodec(
encoder=JsonProtoEncoder, decoder=JsonProtoDecoder)(JsonValue)
encoding.RegisterCustomMessageCodec(
encoder=JsonProtoEncoder, decoder=JsonProtoDecoder)(JsonObject)
encoding.RegisterCustomMessageCodec(
encoder=JsonProtoEncoder, decoder=JsonProtoDecoder)(JsonArray)
def _EncodeDateTimeField(field, value):
result = protojson.ProtoJson().encode_field(field, value)
return encoding.CodecResult(value=result, complete=True)
def _DecodeDateTimeField(unused_field, value):
result = protojson.ProtoJson().decode_field(
message_types.DateTimeField(1), value)
return encoding.CodecResult(value=result, complete=True)
encoding.RegisterFieldTypeCodec(_EncodeDateTimeField, _DecodeDateTimeField)(
message_types.DateTimeField)
def _EncodeInt64Field(field, value):
"""Handle the special case of int64 as a string."""
capabilities = [
messages.Variant.INT64,
messages.Variant.UINT64,
]
if field.variant not in capabilities:
return encoding.CodecResult(value=value, complete=False)
if field.repeated:
result = [str(x) for x in value]
else:
result = str(value)
return encoding.CodecResult(value=result, complete=True)
def _DecodeInt64Field(unused_field, value):
# Don't need to do anything special, they're decoded just fine
return encoding.CodecResult(value=value, complete=False)
encoding.RegisterFieldTypeCodec(_EncodeInt64Field, _DecodeInt64Field)(
messages.IntegerField)
| .install/.backup/lib/apitools/base/py/extra_types.py | 7,094 | A JSON array value.
A JSON object value.
Messages:
Property: A property of a JsonObject.
Fields:
properties: A list of properties of a JsonObject.
Any valid JSON value.
A property of a JSON object.
Fields:
key: Name of the property.
value: A JsonValue attribute.
Handle the special case of int64 as a string.
Convert the given JsonValue to a json string.
Convert the given python value to a JsonValue.
Extra types understood by apitools.
This file will be replaced by a .proto file when we switch to proto2
from protorpc.
We import from protorpc. pylint:disable=invalid-name pylint:enable=invalid-name Is this JSON object `null`? Exactly one of the following is provided if is_null is False; none should be provided if is_null is True. We keep two numeric fields to keep int64 round-trips exact. Compound types pylint:disable=invalid-name pylint:enable=invalid-name Don't need to do anything special, they're decoded just fine | 940 | en | 0.678448 |
#!/usr/bin/python3
import click
import os
import tempfile
import filecmp
import shutil
import difflib
import sys
import git
import shell_utils
SOURCE_EXTENSIONS = [".cpp", ".c", ".cxx", ".cc", ".h", ".hxx", ".hpp"]
class Colors:
HEADER = '\033[95m'
BLUE = '\033[94m'
CYAN = '\033[96m'
GREEN = '\033[92m'
YELLOW = '\033[93m'
RED = '\033[91m'
END = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
class Symbols:
PASS = u'\u2714'
FAIL = u'\u2718'
# Find all the source files we want to check
def find_files_to_check(modified_files, repo_dir):
if modified_files:
# Check which files have been added or modified by git
changed_files = shell_utils.run_shell_command('git diff-index --diff-filter=ACMR --name-only HEAD')
changed_files = "{}".format(changed_files.decode('utf-8')).split()
sources_to_check = [os.path.join(repo_dir, f) for f in changed_files if
f.lower().endswith(tuple(SOURCE_EXTENSIONS))]
else:
# Recursively walk through the repo and find all the files that meet the extensions criteria
sources_to_check = [os.path.join(d, f)
for d, dirs, files in os.walk(repo_dir)
for f in files if f.lower().endswith(tuple(SOURCE_EXTENSIONS))]
return sources_to_check
# Given a list of files, run clang-format on them. Optionally fix the files in place if desired
def check_files(files, fix_in_place, verbose):
num_failed_files = 0
for file in files:
# format the file with clang-format and save the output to a temporary file
output = shell_utils.run_shell_command("clang-format -style=file -fallback-style=none " + file)
formatted_file = tempfile.NamedTemporaryFile()
formatted_file.write(output)
formatted_file.seek(0)
# check if the formatted file is different from the original
file_changed = not filecmp.cmp(formatted_file.name, file)
# Only need to handle those files that were changed by clang-format. Files that weren't changed are good to go.
if file_changed:
num_failed_files += 1
print(Colors.RED + Symbols.FAIL + Colors.END + " " + str(file))
if verbose:
# get and display the diff between the original and formatted files
original_file = open(file, 'r')
new_file = open(formatted_file.name, 'r')
diff = difflib.unified_diff(original_file.readlines(), new_file.readlines())
print(Colors.CYAN)
for line in diff:
sys.stdout.write(line)
print(Colors.END)
if fix_in_place:
# if we are fixing in place, just replace the original file with the changed contents
print(Colors.YELLOW + "WARNING: Fixing in place. Original file will be changed." + Colors.END)
shutil.move(formatted_file.name, file)
else:
print(Colors.GREEN + Symbols.PASS + Colors.END + " " + str(file))
# clean up
try:
formatted_file.close()
except FileNotFoundError as _:
# Do nothing. We must have moved the file above
pass
return num_failed_files
@click.command()
@click.option('-f', '--fix-in-place', default=False, is_flag=True, help='Fix the issues found.')
@click.option('-m', '--modified-files', default=False, is_flag=True, help='Check modified files (according to git) '
'only.')
@click.option('-v', '--verbose', default=False, is_flag=True, help="Print all the errors found.")
def main(fix_in_place, modified_files, verbose):
# change directory to the root of the git project
repo = git.Repo('.', search_parent_directories=True)
os.chdir(repo.working_tree_dir)
# Find the source files we want ot check
sources_to_check = find_files_to_check(modified_files, repo.working_tree_dir)
# Run clang-format and compare the output to the original files
num_failed_files = check_files(sources_to_check, fix_in_place, verbose)
# Return success or failure
if num_failed_files:
print(
Colors.RED + 3 * Symbols.FAIL + " " + str(num_failed_files) + " files have formatting errors." + Colors.END)
if fix_in_place:
print("The formatting errors have been automatically fixed.")
sys.exit(1)
print(Colors.GREEN + 3 * Symbols.PASS + Colors.END + " All files are properly formatted!")
sys.exit(0)
if __name__ == '__main__':
main()
| tools/run_clang_format.py | 4,665 | !/usr/bin/python3 Find all the source files we want to check Check which files have been added or modified by git Recursively walk through the repo and find all the files that meet the extensions criteria Given a list of files, run clang-format on them. Optionally fix the files in place if desired format the file with clang-format and save the output to a temporary file check if the formatted file is different from the original Only need to handle those files that were changed by clang-format. Files that weren't changed are good to go. get and display the diff between the original and formatted files if we are fixing in place, just replace the original file with the changed contents clean up Do nothing. We must have moved the file above change directory to the root of the git project Find the source files we want ot check Run clang-format and compare the output to the original files Return success or failure | 921 | en | 0.918375 |
#filling 2nd form and validating ans
import time
from selenium import webdriver
from selenium.webdriver.common.by import By
driver=webdriver.Chrome("../chromedriver.exe")
driver.get("https://www.seleniumeasy.com/test/basic-first-form-demo.html")
num1=2
num2=3
element1=driver.find_element(By.ID,"sum1").send_keys(num1)
element2=driver.find_element(By.ID,"sum2").send_keys(num2)
button=driver.find_element(By.XPATH,"/html/body/div[2]/div/div[2]/div[2]/div[2]/form/button").click()
displayed_sum=driver.find_element(By.ID,"displayvalue").text
if (num1+num2) == int(displayed_sum):
print("same")
else:
print("different")
time.sleep(5)
driver.quit()
| selenium/filling form/form-2.py | 698 | filling 2nd form and validating ans | 35 | en | 0.699341 |
from collections import OrderedDict
from sympy import Basic, true
from devito.tools import as_tuple, is_integer, memoized_meth
from devito.types import Dimension
__all__ = ['Vector', 'LabeledVector', 'vmin', 'vmax']
class Vector(tuple):
"""
A representation of an object in Z^n.
The elements of a Vector can be integers or generic SymPy expressions.
Notes
-----
1) Vector-scalar comparison
If a comparison between a vector and a non-vector is attempted, then the
non-vector is promoted to a vector; if this is not possible, an exception
is raised. This is handy because it turns a vector-scalar comparison into
a vector-vector comparison with the scalar broadcasted to all vector entries.
For example: ::
(3, 4, 5) > 4 => (3, 4, 5) > (4, 4, 4) => False
2) Comparing Vector entries when these are SymPy expressions
When we compare two symbolic (SymPy expressions) entries, it might not be
possible to determine the truth value of the relation. For example, the
truth value of `3*i < 4*j` cannot be determined (unless some information
about `i` and `j` is available). In some cases, however, the comparison is
feasible; for example, `i + 4 < i` is definitely False. A sufficient condition
for two Vectors to be comparable is that their pair-wise indices are affine
functions of the same variables, with identical coefficient.
If the Vector is instantiated passing the keyword argument ``smart = True``,
some manipulation will be attempted to infer the truth value of a non-trivial
symbolic relation. This increases the cost of the comparison, while potentially
being ineffective, so use it judiciously. By default, ``smart = False``.
Raises
------
TypeError
If two Vectors cannot be compared, e.g. due to incomparable symbolic entries.
"""
def __new__(cls, *items, smart=False):
if not all(is_integer(i) or isinstance(i, Basic) for i in items):
raise TypeError("Illegal Vector element type")
obj = super(Vector, cls).__new__(cls, items)
obj.smart = smart
return obj
def _asvector(relax=False):
def __asvector(func):
def wrapper(self, other):
if not isinstance(other, Vector):
try:
other = Vector(*other)
except TypeError:
# Not iterable
other = Vector(*(as_tuple(other)*len(self)))
if relax is False and len(self) != len(other):
raise TypeError("Cannot operate with Vectors of different rank")
return func(self, other)
return wrapper
return __asvector
def __hash__(self):
return super(Vector, self).__hash__()
@_asvector()
def __add__(self, other):
return Vector(*[i + j for i, j in zip(self, other)], smart=self.smart)
@_asvector()
def __radd__(self, other):
return self + other
@_asvector()
def __sub__(self, other):
return Vector(*[i - j for i, j in zip(self, other)], smart=self.smart)
@_asvector()
def __rsub__(self, other):
return self - other
@_asvector(relax=True)
def __eq__(self, other):
return super(Vector, self).__eq__(other)
@_asvector(relax=True)
def __ne__(self, other):
return super(Vector, self).__ne__(other)
@_asvector()
def __lt__(self, other):
# This might raise an exception if the distance between the i-th entry
# of `self` and `other` isn't integer, but rather a generic expression
# not comparable to 0. However, the implementation is "smart", in the
# sense that it will return as soon as the first two comparable entries
# (i.e., such that their distance is a non-zero integer) are found
for i in self.distance(other):
try:
val = int(i)
if val < 0:
return True
elif val > 0:
return False
except TypeError:
if self.smart:
if (i < 0) == true:
return True
elif (i <= 0) == true:
# If `i` can assume the value 0 in at least one case, then
# definitely `i < 0` is generally False, so __lt__ must
# return False
return False
elif (i >= 0) == true:
return False
raise TypeError("Non-comparable index functions")
return False
@_asvector()
def __gt__(self, other):
return other.__lt__(self)
@_asvector()
def __le__(self, other):
if self.__eq__(other):
return True
# We cannot simply resort to `__lt__` as it might happen that:
# * v0 < v1 --> False
# * v0 == v1 --> False
# But
# * v0 <= v1 --> True
#
# For example, take `v0 = (a + 2)` and `v1 = (2)`; if `a` is attached
# the property that definitely `a >= 0`, then surely `v1 <= v0`, even
# though it can't be assumed anything about `v1 < 0` and `v1 == v0`
for i in self.distance(other):
try:
val = int(i)
if val < 0:
return True
elif val > 0:
return False
except TypeError:
if self.smart:
if (i < 0) == true:
return True
elif (i <= 0) == true:
continue
elif (i > 0) == true:
return False
elif (i >= 0) == true:
# See analogous considerations in __lt__
return False
raise TypeError("Non-comparable index functions")
# Note: unlike `__lt__`, if we end up here, then *it is* <=. For example,
# with `v0` and `v1` as above, we would get here
return True
@_asvector()
def __ge__(self, other):
return other.__le__(self)
def __getitem__(self, key):
ret = super(Vector, self).__getitem__(key)
return Vector(*ret, smart=self.smart) if isinstance(key, slice) else ret
def __repr__(self):
return "(%s)" % ','.join(str(i) for i in self)
@property
def rank(self):
return len(self)
@property
def sum(self):
return sum(self)
@property
def is_constant(self):
return all(is_integer(i) for i in self)
def distance(self, other):
"""
Compute the distance from ``self`` to ``other``.
The distance is a reflexive, transitive, and anti-symmetric relation,
which establishes a total ordering amongst Vectors.
The distance is a function [Vector x Vector --> D]. D is a tuple of length
equal to the Vector ``rank``. The i-th entry of D, D_i, indicates whether
the i-th component of ``self``, self_i, precedes (< 0), equals (== 0), or
succeeds (> 0) the i-th component of ``other``, other_i.
In particular, the *absolute value* of D_i represents the number of
integer points that exist between self_i and sink_i.
Examples
--------
| 3 | | 1 | | 2 |
source = | 2 | , sink = | 4 | , distance => | -2 |
| 1 | | 5 | | -4 |
There are 2, 2, and 4 points between [3-2], [2-4], and [1-5], respectively.
"""
return self - other
class LabeledVector(Vector):
"""
A Vector that associates a Dimension to each element.
"""
def __new__(cls, items=None):
try:
labels, values = zip(*items)
except (ValueError, TypeError):
labels, values = (), ()
if not all(isinstance(i, Dimension) for i in labels):
raise ValueError("All labels must be of type Dimension, got [%s]"
% ','.join(i.__class__.__name__ for i in labels))
obj = super(LabeledVector, cls).__new__(cls, *values)
obj.labels = labels
return obj
@classmethod
def transpose(cls, *vectors):
"""
Transpose a matrix represented as an iterable of homogeneous LabeledVectors.
"""
if len(vectors) == 0:
return LabeledVector()
if not all(isinstance(v, LabeledVector) for v in vectors):
raise ValueError("All items must be of type LabeledVector, got [%s]"
% ','.join(i.__class__.__name__ for i in vectors))
T = OrderedDict()
for v in vectors:
for l, i in zip(v.labels, v):
T.setdefault(l, []).append(i)
return tuple((l, Vector(*i)) for l, i in T.items())
def __repr__(self):
return "(%s)" % ','.join('%s:%s' % (l, i) for l, i in zip(self.labels, self))
def __hash__(self):
return hash((tuple(self), self.labels))
def __eq__(self, other):
if isinstance(other, LabeledVector) and self.labels != other.labels:
raise TypeError("Cannot compare due to mismatching `labels`")
return super(LabeledVector, self).__eq__(other)
def __ne__(self, other):
return not self.__eq__(other)
def __lt__(self, other):
if isinstance(other, LabeledVector) and self.labels != other.labels:
raise TypeError("Cannot compare due to mismatching `labels`")
return super(LabeledVector, self).__lt__(other)
def __gt__(self, other):
return other.__lt__(self)
def __ge__(self, other):
return self.__eq__(other) or self.__gt__(other)
def __le__(self, other):
return self.__eq__(other) or self.__lt__(other)
def __getitem__(self, index):
if isinstance(index, (slice, int)):
return super(LabeledVector, self).__getitem__(index)
elif isinstance(index, Dimension):
for d in index._defines:
if d in self.labels:
i = self.labels.index(d)
return super(LabeledVector, self).__getitem__(i)
return None
else:
raise TypeError("Indices must be integers, slices, or Dimensions, not %s"
% type(index))
def fromlabel(self, label, v=None):
return self[label] if label in self.labels else v
def items(self):
return zip(self.labels, self)
@memoized_meth
def distance(self, other):
"""
Compute the distance from ``self`` to ``other``.
Parameters
----------
other : LabeledVector
The LabeledVector from which the distance is computed.
"""
if not isinstance(other, LabeledVector):
raise TypeError("Cannot compute distance from obj of type %s", type(other))
if self.labels != other.labels:
raise TypeError("Cannot compute distance due to mismatching `labels`")
return LabeledVector(list(zip(self.labels, self - other)))
# Utility functions
def vmin(*vectors):
"""
Retrieve the minimum out of an iterable of Vectors.
Raises
------
TypeError
If there are two incomparable Vectors.
ValueError
If an empty sequence is supplied
"""
if not all(isinstance(i, Vector) for i in vectors):
raise TypeError("Expected an iterable of Vectors")
if len(vectors) == 0:
raise ValueError("min() arg is an empty sequence")
ret = vectors[0]
for i in vectors[1:]:
if i < ret or i <= ret:
ret = i
return ret
def vmax(*vectors):
"""
Retrieve the maximum out of an iterable of Vectors.
Raises
------
TypeError
If there are two incomparable Vectors.
ValueError
If an empty sequence is supplied
"""
if not all(isinstance(i, Vector) for i in vectors):
raise TypeError("Expected an iterable of Vectors")
if len(vectors) == 0:
raise ValueError("min() arg is an empty sequence")
ret = vectors[0]
for i in vectors[1:]:
if i > ret or i >= ret:
ret = i
return ret
| devito/ir/support/vector.py | 12,353 | A Vector that associates a Dimension to each element.
A representation of an object in Z^n.
The elements of a Vector can be integers or generic SymPy expressions.
Notes
-----
1) Vector-scalar comparison
If a comparison between a vector and a non-vector is attempted, then the
non-vector is promoted to a vector; if this is not possible, an exception
is raised. This is handy because it turns a vector-scalar comparison into
a vector-vector comparison with the scalar broadcasted to all vector entries.
For example: ::
(3, 4, 5) > 4 => (3, 4, 5) > (4, 4, 4) => False
2) Comparing Vector entries when these are SymPy expressions
When we compare two symbolic (SymPy expressions) entries, it might not be
possible to determine the truth value of the relation. For example, the
truth value of `3*i < 4*j` cannot be determined (unless some information
about `i` and `j` is available). In some cases, however, the comparison is
feasible; for example, `i + 4 < i` is definitely False. A sufficient condition
for two Vectors to be comparable is that their pair-wise indices are affine
functions of the same variables, with identical coefficient.
If the Vector is instantiated passing the keyword argument ``smart = True``,
some manipulation will be attempted to infer the truth value of a non-trivial
symbolic relation. This increases the cost of the comparison, while potentially
being ineffective, so use it judiciously. By default, ``smart = False``.
Raises
------
TypeError
If two Vectors cannot be compared, e.g. due to incomparable symbolic entries.
Compute the distance from ``self`` to ``other``.
The distance is a reflexive, transitive, and anti-symmetric relation,
which establishes a total ordering amongst Vectors.
The distance is a function [Vector x Vector --> D]. D is a tuple of length
equal to the Vector ``rank``. The i-th entry of D, D_i, indicates whether
the i-th component of ``self``, self_i, precedes (< 0), equals (== 0), or
succeeds (> 0) the i-th component of ``other``, other_i.
In particular, the *absolute value* of D_i represents the number of
integer points that exist between self_i and sink_i.
Examples
--------
| 3 | | 1 | | 2 |
source = | 2 | , sink = | 4 | , distance => | -2 |
| 1 | | 5 | | -4 |
There are 2, 2, and 4 points between [3-2], [2-4], and [1-5], respectively.
Compute the distance from ``self`` to ``other``.
Parameters
----------
other : LabeledVector
The LabeledVector from which the distance is computed.
Transpose a matrix represented as an iterable of homogeneous LabeledVectors.
Retrieve the maximum out of an iterable of Vectors.
Raises
------
TypeError
If there are two incomparable Vectors.
ValueError
If an empty sequence is supplied
Retrieve the minimum out of an iterable of Vectors.
Raises
------
TypeError
If there are two incomparable Vectors.
ValueError
If an empty sequence is supplied
Not iterable This might raise an exception if the distance between the i-th entry of `self` and `other` isn't integer, but rather a generic expression not comparable to 0. However, the implementation is "smart", in the sense that it will return as soon as the first two comparable entries (i.e., such that their distance is a non-zero integer) are found If `i` can assume the value 0 in at least one case, then definitely `i < 0` is generally False, so __lt__ must return False We cannot simply resort to `__lt__` as it might happen that: * v0 < v1 --> False * v0 == v1 --> False But * v0 <= v1 --> True For example, take `v0 = (a + 2)` and `v1 = (2)`; if `a` is attached the property that definitely `a >= 0`, then surely `v1 <= v0`, even though it can't be assumed anything about `v1 < 0` and `v1 == v0` See analogous considerations in __lt__ Note: unlike `__lt__`, if we end up here, then *it is* <=. For example, with `v0` and `v1` as above, we would get here Utility functions | 3,939 | en | 0.872604 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from threathunter_common.geo.phonelocator import *
__author__ = "nebula"
def test_phone():
print check_phone_number("+13482345020", None)
assert check_phone_number("13482345020", 'CN')
assert not check_phone_number("+134823450", None)
print get_carrier("13482121123", 'CN')
print get_carrier("13815430576", 'CN')
print get_carrier("13093705423", 'CN')
print get_geo("13482121123", 'CN')
print get_geo("13815430576", 'CN')
print get_geo("13093705423", 'CN')
print 111, get_geo("020 8366 1177", "GB")
print 111, get_geo("+442083661177")
print phonenumbers.parse("020 8366 1177", "GB")
print phonenumbers.parse("+442083661177")
assert False
| threathunter_common_python/test/testphone.py | 751 | !/usr/bin/env python -*- coding: utf-8 -*- | 42 | en | 0.34282 |
#!/usr/bin/env python3
"""Pre-commit hook to verify that all extras are documented in README.rst"""
import configparser
import re
from pathlib import Path
repo_dir = Path(__file__).parent.parent.parent
config = configparser.ConfigParser(strict=False)
config.read(repo_dir / "setup.cfg")
all_extra = []
extra_to_exclude = {"tests", "mypy", "docs"}
all_extras = set(config["options.extras_require"].keys()) - extra_to_exclude
readme_path = repo_dir / "README.rst"
extra_doc = """
.. list-table::
:header-rows: 1
* - Extra Name
- Installation Command
- Dependencies
"""
for extra in sorted(all_extras):
extra_doc += f"""
* - ``{extra}``
- ``pip install 'astronomer-providers[{extra}]'``
- {extra.replace(".", " ").title()}
"""
with open(readme_path, "r") as readme_file:
readme_contents = readme_file.read()
new_readme_text = re.sub(
r".. EXTRA_DOC_START([\s\S]*).. EXTRA_DOC_END",
f".. EXTRA_DOC_START{extra_doc}\n.. EXTRA_DOC_END",
readme_contents,
flags=re.MULTILINE,
)
if new_readme_text != readme_contents:
with open(readme_path, "w") as readme_file:
readme_file.write(new_readme_text)
| .circleci/scripts/pre_commit_readme_extra.py | 1,189 | Pre-commit hook to verify that all extras are documented in README.rst
!/usr/bin/env python3 | 93 | en | 0.689687 |
import pandas as pd
from sklearn.feature_extraction.text import CountVectorizer
from lda import LDA
def learn_topics(texts, topicnum):
# Get vocabulary and word counts. Use the top 10,000 most frequent
# lowercase unigrams with at least 3 alphabetical, non-numeric characters,
# punctuation treated as separators.
print("Vectorizing...")
CVzer = CountVectorizer(max_features=10000,
lowercase=True)
doc_vcnts = CVzer.fit_transform(texts)
vocabulary = CVzer.get_feature_names()
# Learn topics. Refresh conrols print frequency.
print("LDA")
lda_model = LDA(topicnum, n_iter=4000, refresh=500)
doc_topic = lda_model.fit_transform(doc_vcnts)
topic_word = lda_model.topic_word_
return doc_topic, topic_word, vocabulary
print("Reading data...")
env = pd.read_csv('../Data/Environmental Discourse/env_processed.csv', index_col=0)
env = env[~env.text_processed.isna()]
doc_topic, topic_word, vocabulary = learn_topics(env.text_processed, 100)
print(doc_topic[0,:])
for i in range(100):
env['topic_{}'.format(i)] = doc_topic[:, i]
env.to_csv('../Data/Environmental Discourse/env_lda.csv') | Programs/env_lda.py | 1,208 | Get vocabulary and word counts. Use the top 10,000 most frequent lowercase unigrams with at least 3 alphabetical, non-numeric characters, punctuation treated as separators. Learn topics. Refresh conrols print frequency. | 221 | en | 0.879337 |
import os
import glob
import psycopg2
import pandas as pd
import numpy as np
from sql_queries import *
def process_song_file(cur, filepath):
# open song file
df = pd.read_json(filepath, lines = True)
# insert song record
song_data = df[["song_id", "title", "artist_id", "year", "duration"]].values[0]
cur.execute(song_table_insert, song_data)
# insert artist record
artist_data = df[["artist_id", "artist_name", "artist_location", "artist_latitude", "artist_longitude",]].values[0]
cur.execute(artist_table_insert, artist_data)
def process_log_file(cur, filepath):
# open log file
df = pd.read_json(filepath, lines = True)
# filter by NextSong action
df = df.query("page=='NextSong'")
# convert timestamp column to datetime
t = pd.to_datetime(df["ts"]/1000, unit = 's')
# insert time data records
time_data = np.transpose(np.array([df["ts"].values, t.dt.hour.values, t.dt.day.values, t.dt.week.values, \
t.dt.month.values, t.dt.year.values, t.dt.weekday.values]))
column_labels = ("timestamp", "hour", "day", "week of year", "month", "year", "weekday")
time_df = pd.DataFrame(data = time_data, columns = column_labels)
for i, row in time_df.iterrows():
cur.execute(time_table_insert, list(row))
# load user table
user_df = df[["userId", "firstName", "lastName", "gender", "level"]]
# insert user records
for i, row in user_df.iterrows():
cur.execute(user_table_insert, row)
# insert songplay records
for index, row in df.iterrows():
# get songid and artistid from song and artist tables
cur.execute(song_select, (row.song, row.artist, row.length))
results = cur.fetchone()
if results:
songid, artistid = results
else:
songid, artistid = None, None
# insert songplay record
songplay_data = (row.ts, row.userId, row.level, songid, artistid, row.sessionId, \
row.location, row.userAgent)
cur.execute(songplay_table_insert, songplay_data)
def process_data(cur, conn, filepath, func):
# get all files matching extension from directory
all_files = []
for root, dirs, files in os.walk(filepath):
files = glob.glob(os.path.join(root,'*.json'))
for f in files :
all_files.append(os.path.abspath(f))
# get total number of files found
num_files = len(all_files)
print('{} files found in {}'.format(num_files, filepath))
# iterate over files and process
for i, datafile in enumerate(all_files, 1):
func(cur, datafile)
conn.commit()
print('{}/{} files processed.'.format(i, num_files))
def main():
conn = psycopg2.connect("host=127.0.0.1 dbname=sparkifydb user=student password=student")
cur = conn.cursor()
process_data(cur, conn, filepath='data/song_data', func=process_song_file)
process_data(cur, conn, filepath='data/log_data', func=process_log_file)
conn.close()
if __name__ == "__main__":
main() | ETL-data-with-postgres/etl.py | 3,111 | open song file insert song record insert artist record open log file filter by NextSong action convert timestamp column to datetime insert time data records load user table insert user records insert songplay records get songid and artistid from song and artist tables insert songplay record get all files matching extension from directory get total number of files found iterate over files and process | 402 | en | 0.779201 |
## ! DO NOT MANUALLY INVOKE THIS setup.py, USE CATKIN INSTEAD
from setuptools import setup
from catkin_pkg.python_setup import generate_distutils_setup
# fetch values from package.xml
setup_args = generate_distutils_setup(
packages=['soccer_trajectories'],
package_dir={'': 'src'},
)
setup(**setup_args)
| soccer_trajectories/setup.py | 315 | ! DO NOT MANUALLY INVOKE THIS setup.py, USE CATKIN INSTEAD fetch values from package.xml | 88 | en | 0.330696 |
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""preprocess"""
import argparse
import os
import numpy as np
from src.dataset import load_and_process
def generate_bin():
"""Generate bin files."""
parser = argparse.ArgumentParser()
parser.add_argument('--data_dir', type=str, default='./data/cora/cora_mr', help='Data dir')
parser.add_argument('--train_nodes_num', type=int, default=140, help='Nodes numbers for training')
parser.add_argument('--eval_nodes_num', type=int, default=500, help='Nodes numbers for evaluation')
parser.add_argument('--test_nodes_num', type=int, default=1000, help='Nodes numbers for test')
parser.add_argument('--result_path', type=str, default='./preprocess_Result/', help='Result path')
args = parser.parse_args()
feature, biases, _, _, _, _, y_test, test_mask = load_and_process(args.data_dir,
args.train_nodes_num,
args.eval_nodes_num,
args.test_nodes_num)
feature_path = os.path.join(args.result_path, '00_data')
biases_path = os.path.join(args.result_path, '01_data')
y_test_path = os.path.join(args.result_path, 'y_test.npy')
test_mask_path = os.path.join(args.result_path, 'test_mask.npy')
os.makedirs(feature_path)
os.makedirs(biases_path)
feature.tofile(os.path.join(feature_path, 'feature.bin'))
biases.tofile(os.path.join(biases_path, 'biases.bin'))
np.save(y_test_path, y_test)
np.save(test_mask_path, test_mask)
if __name__ == "__main__":
generate_bin()
| model_zoo/official/gnn/gat/preprocess.py | 2,312 | Generate bin files.
preprocess
Copyright 2021 Huawei Technologies Co., Ltd Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ============================================================================ | 671 | en | 0.794616 |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'birdview.ui'
#
# Created by: PyQt5 UI code generator 5.15.1
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_birdview(object):
def setupUi(self, birdview):
birdview.setObjectName("birdview")
birdview.resize(552, 551)
self.verticalLayout = QtWidgets.QVBoxLayout(birdview)
self.verticalLayout.setContentsMargins(5, 5, 5, 5)
self.verticalLayout.setSpacing(2)
self.verticalLayout.setObjectName("verticalLayout")
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setSpacing(5)
self.horizontalLayout.setObjectName("horizontalLayout")
self.btnOpenFile = QtWidgets.QPushButton(birdview)
self.btnOpenFile.setObjectName("btnOpenFile")
self.horizontalLayout.addWidget(self.btnOpenFile)
self.lab_file_name = QtWidgets.QLabel(birdview)
self.lab_file_name.setAlignment(QtCore.Qt.AlignCenter)
self.lab_file_name.setObjectName("lab_file_name")
self.horizontalLayout.addWidget(self.lab_file_name)
self.horizontalLayout.setStretch(0, 1)
self.horizontalLayout.setStretch(1, 4)
self.verticalLayout.addLayout(self.horizontalLayout)
self.vbox_bd = QtWidgets.QVBoxLayout()
self.vbox_bd.setObjectName("vbox_bd")
self.verticalLayout.addLayout(self.vbox_bd)
self.hbox_btn_slider = QtWidgets.QHBoxLayout()
self.hbox_btn_slider.setObjectName("hbox_btn_slider")
self.media_grid = QtWidgets.QGridLayout()
self.media_grid.setObjectName("media_grid")
self.hbox_btn_slider.addLayout(self.media_grid)
self.verticalLayout.addLayout(self.hbox_btn_slider)
self.verticalLayout.setStretch(0, 1)
self.verticalLayout.setStretch(1, 20)
self.verticalLayout.setStretch(2, 1)
self.retranslateUi(birdview)
QtCore.QMetaObject.connectSlotsByName(birdview)
def retranslateUi(self, birdview):
_translate = QtCore.QCoreApplication.translate
birdview.setWindowTitle(_translate("birdview", "BirdView"))
self.btnOpenFile.setText(_translate("birdview", "Open xls"))
self.lab_file_name.setText(_translate("birdview", "xls_name"))
| src/visualization_simulator/src/ui/ui_birdview.py | 2,436 | -*- coding: utf-8 -*- Form implementation generated from reading ui file 'birdview.ui' Created by: PyQt5 UI code generator 5.15.1 WARNING: Any manual changes made to this file will be lost when pyuic5 is run again. Do not edit this file unless you know what you are doing. | 273 | en | 0.902997 |
from django.utils import timezone
from maestros.models import Unidades
from maestros_generales.models import Empresas
__author__ = 'julian'
from django.contrib.gis.db import models
import datetime
class WaspTypeSensor(models.Model):
name = models.CharField(max_length=50)
units = models.ForeignKey(Unidades)
fechaalta = models.DateField(auto_now_add=True,verbose_name=("Fecha Alta"),blank=True,null=True)
fechabaja = models.DateField(verbose_name=("Fecha Baja"), blank=True,null=True)
class WaspMote(models.Model):
DeviceName = models.CharField(max_length=30)
Imei = models.BigIntegerField()
fechaalta = models.DateField(auto_now_add=True,verbose_name=("Fecha Alta"),blank=True,null=True)
fechabaja = models.DateField(verbose_name=("Fecha Baja"), blank=True,null=True)
empresa = models.ForeignKey(Empresas,null=True, blank=True,verbose_name=('Empresa'),on_delete=models.PROTECT)
class WaspSensor(models.Model):
waspmote = models.ForeignKey(WaspMote, on_delete=models.PROTECT)
probestype = models.ForeignKey(WaspTypeSensor,on_delete=models.PROTECT)
fechaalta = models.DateField(auto_now_add=True,verbose_name=("Fecha Alta"),blank=True,null=True)
fechabaja = models.DateField(verbose_name=("Fecha Baja"), blank=True,null=True)
empresa = models.ForeignKey(Empresas,null=True, blank=True,verbose_name=('Empresa'),on_delete=models.PROTECT)
class WaspData(models.Model):
waspsensor = models.ForeignKey(WaspSensor)
timestamp_waspmote = models.DateTimeField()
status = models.CharField(max_length=1)
#loc = models.PointField(srid=4326)
alt = models.FloatField()
lat = models.FloatField()
long = models.FloatField()
speed = models.FloatField()
course = models.FloatField()
voltage = models.IntegerField()
notes = models.TextField()
#objects = models.GeoManager()
valorsensor = models.FloatField()
#timestamp_server = models.DateTimeField()
timestamp_server = models.DateTimeField(default= lambda: timezone.now() + datetime.timedelta(hours=1), blank=True)
| rest_waspmote/models.py | 2,324 | loc = models.PointField(srid=4326)objects = models.GeoManager()timestamp_server = models.DateTimeField() | 132 | en | 0.282704 |
"""Test length measured in half bytes (nibbles). Nibbles were added in v2.1"""
import copy
import iso8583
import iso8583.specs
import pytest
# fmt: off
@pytest.mark.parametrize(
["data_enc", "len_enc", "len_type", "max_len", "len_count", "result", "result_f2_len"],
[
("ascii", "ascii", 2, 8, "bytes", b"02004000000000000000041234", b"04"),
("ascii", "ascii", 2, 8, "nibbles", b"02004000000000000000081234", b"08"),
("ascii", "b", 2, 8, "bytes", b"02004000000000000000\x00\x041234", b"\x00\x04"),
("ascii", "b", 2, 8, "nibbles", b"02004000000000000000\x00\x081234", b"\x00\x08"),
("b", "ascii", 2, 8, "bytes", b"0200400000000000000002\x12\x34", b"02"),
("b", "ascii", 2, 8, "nibbles", b"0200400000000000000004\x12\x34", b"04"),
("b", "b", 2, 8, "bytes", b"02004000000000000000\x00\x02\x12\x34", b"\x00\x02"),
("b", "b", 2, 8, "nibbles", b"02004000000000000000\x00\x04\x12\x34", b"\x00\x04"),
("ascii", "ascii", 0, 4, "bytes", b"020040000000000000001234", b""),
("ascii", "ascii", 0, 8, "nibbles", b"020040000000000000001234", b""),
("b", "ascii", 0, 2, "bytes", b"02004000000000000000\x12\x34", b""),
("b", "ascii", 0, 4, "nibbles", b"02004000000000000000\x12\x34", b""),
],
)
# fmt: on
def test_encode_nibbles(
data_enc: str,
len_enc: str,
len_type: int,
max_len: int,
len_count: str,
result: bytes,
result_f2_len: bytes,
) -> None:
spec = copy.deepcopy(iso8583.specs.default)
spec["t"]["data_enc"] = "ascii"
spec["p"]["data_enc"] = "ascii"
spec["2"]["data_enc"] = data_enc
spec["2"]["len_enc"] = len_enc
spec["2"]["len_type"] = len_type
spec["2"]["max_len"] = max_len
spec["2"]["len_count"] = len_count
decoded = {"t": "0200", "2": "1234"}
s, encoded = iso8583.encode(decoded, spec)
assert s == result
assert encoded["2"]["len"] == result_f2_len
# fmt: off
@pytest.mark.parametrize(
["len_enc", "len_type", "max_len", "len_count", "pad", "result", "result_f2_len"],
[
("ascii", 2, 8, "nibbles", "0", b"0200400000000000000003\x01\x23", b"03"),
("b", 2, 8, "nibbles", "0", b"02004000000000000000\x00\x03\x01\x23", b"\x00\x03"),
("ascii", 0, 3, "nibbles", "0", b"02004000000000000000\x01\x23", b""),
("ascii", 2, 8, "nibbles", "F", b"0200400000000000000003\xF1\x23", b"03"),
("b", 2, 8, "nibbles", "F", b"02004000000000000000\x00\x03\xF1\x23", b"\x00\x03"),
("ascii", 0, 3, "nibbles", "F", b"02004000000000000000\xF1\x23", b""),
("ascii", 2, 8, "nibbles", "01", b"0200400000000000000003\x01\x23", b"03"),
("b", 2, 8, "nibbles", "01", b"02004000000000000000\x00\x03\x01\x23", b"\x00\x03"),
("ascii", 0, 3, "nibbles", "01", b"02004000000000000000\x01\x23", b""),
("ascii", 2, 8, "nibbles", "F1", b"0200400000000000000003\xF1\x23", b"03"),
("b", 2, 8, "nibbles", "F1", b"02004000000000000000\x00\x03\xF1\x23", b"\x00\x03"),
("ascii", 0, 3, "nibbles", "F1", b"02004000000000000000\xF1\x23", b""),
],
)
# fmt: on
def test_encode_nibbles_odd_left_pad(
len_enc: str,
len_type: int,
max_len: int,
len_count: str,
pad: str,
result: bytes,
result_f2_len: bytes,
) -> None:
spec = copy.deepcopy(iso8583.specs.default)
spec["t"]["data_enc"] = "ascii"
spec["p"]["data_enc"] = "ascii"
spec["2"]["data_enc"] = "b"
spec["2"]["len_enc"] = len_enc
spec["2"]["len_type"] = len_type
spec["2"]["max_len"] = max_len
spec["2"]["len_count"] = len_count
spec["2"]["left_pad"] = pad
decoded = {"t": "0200", "2": "123"}
s, encoded = iso8583.encode(decoded, spec)
assert s == result
assert encoded["2"]["len"] == result_f2_len
# fmt: off
@pytest.mark.parametrize(
["len_enc", "len_type", "max_len", "len_count", "pad", "result", "result_f2_len"],
[
("ascii", 2, 8, "nibbles", "0", b"0200400000000000000003\x12\x30", b"03"),
("b", 2, 8, "nibbles", "0", b"02004000000000000000\x00\x03\x12\x30", b"\x00\x03"),
("ascii", 0, 3, "nibbles", "0", b"02004000000000000000\x12\x30", b""),
("ascii", 2, 8, "nibbles", "F", b"0200400000000000000003\x12\x3F", b"03"),
("b", 2, 8, "nibbles", "F", b"02004000000000000000\x00\x03\x12\x3F", b"\x00\x03"),
("ascii", 0, 3, "nibbles", "F", b"02004000000000000000\x12\x3F", b""),
("ascii", 2, 8, "nibbles", "01", b"0200400000000000000003\x12\x30", b"03"),
("b", 2, 8, "nibbles", "01", b"02004000000000000000\x00\x03\x12\x30", b"\x00\x03"),
("ascii", 0, 3, "nibbles", "01", b"02004000000000000000\x12\x30", b""),
("ascii", 2, 8, "nibbles", "F1", b"0200400000000000000003\x12\x3F", b"03"),
("b", 2, 8, "nibbles", "F1", b"02004000000000000000\x00\x03\x12\x3F", b"\x00\x03"),
("ascii", 0, 3, "nibbles", "F1", b"02004000000000000000\x12\x3F", b""),
],
)
# fmt: on
def test_encode_nibbles_odd_right_pad(
len_enc: str,
len_type: int,
max_len: int,
len_count: str,
pad: str,
result: bytes,
result_f2_len: bytes,
) -> None:
spec = copy.deepcopy(iso8583.specs.default)
spec["t"]["data_enc"] = "ascii"
spec["p"]["data_enc"] = "ascii"
spec["2"]["data_enc"] = "b"
spec["2"]["len_enc"] = len_enc
spec["2"]["len_type"] = len_type
spec["2"]["max_len"] = max_len
spec["2"]["len_count"] = len_count
spec["2"]["right_pad"] = pad
decoded = {"t": "0200", "2": "123"}
s, encoded = iso8583.encode(decoded, spec)
assert s == result
assert encoded["2"]["len"] == result_f2_len
def test_encode_nibbles_odd_no_pad() -> None:
spec = copy.deepcopy(iso8583.specs.default)
spec["t"]["data_enc"] = "ascii"
spec["p"]["data_enc"] = "ascii"
spec["2"]["data_enc"] = "b"
spec["2"]["len_enc"] = "b"
spec["2"]["len_type"] = 2
spec["2"]["max_len"] = 8
spec["2"]["len_count"] = "nibbles"
decoded = {"t": "0200", "2": "1"}
with pytest.raises(
iso8583.EncodeError,
match="Failed to encode .*: field 2",
):
iso8583.encode(decoded, spec=spec)
# fmt: off
@pytest.mark.parametrize(
["data_enc", "len_enc", "len_type", "max_len", "len_count", "data", "result_f2_len"],
[
("ascii", "ascii", 2, 8, "bytes", b"02004000000000000000041234", b"04"),
("ascii", "ascii", 2, 8, "nibbles", b"02004000000000000000081234", b"08"),
("ascii", "b", 2, 8, "bytes", b"02004000000000000000\x00\x041234", b"\x00\x04"),
("ascii", "b", 2, 8, "nibbles", b"02004000000000000000\x00\x081234", b"\x00\x08"),
("b", "ascii", 2, 8, "bytes", b"0200400000000000000002\x12\x34", b"02"),
("b", "ascii", 2, 8, "nibbles", b"0200400000000000000004\x12\x34", b"04"),
("b", "b", 2, 8, "bytes", b"02004000000000000000\x00\x02\x12\x34", b"\x00\x02"),
("b", "b", 2, 8, "nibbles", b"02004000000000000000\x00\x04\x12\x34", b"\x00\x04"),
("ascii", "ascii", 0, 4, "bytes", b"020040000000000000001234", b""),
("ascii", "ascii", 0, 8, "nibbles", b"020040000000000000001234", b""),
("b", "ascii", 0, 2, "bytes", b"02004000000000000000\x12\x34", b""),
("b", "ascii", 0, 4, "nibbles", b"02004000000000000000\x12\x34", b""),
],
)
# fmt: on
def test_decode_nibbles(
data_enc: str,
len_enc: str,
len_type: int,
max_len: int,
len_count: str,
data: bytes,
result_f2_len: bytes,
) -> None:
spec = copy.deepcopy(iso8583.specs.default)
spec["t"]["data_enc"] = "ascii"
spec["p"]["data_enc"] = "ascii"
spec["2"]["data_enc"] = data_enc
spec["2"]["len_enc"] = len_enc
spec["2"]["len_type"] = len_type
spec["2"]["max_len"] = max_len
spec["2"]["len_count"] = len_count
decoded, encoded = iso8583.decode(data, spec)
assert decoded["2"] == "1234"
assert encoded["2"]["len"] == result_f2_len
# fmt: off
@pytest.mark.parametrize(
["len_enc", "len_type", "max_len", "len_count", "pad", "data", "result_f2_len"],
[
("ascii", 2, 8, "nibbles", "0", b"0200400000000000000003\x01\x23", b"03"),
("b", 2, 8, "nibbles", "0", b"02004000000000000000\x00\x03\x01\x23", b"\x00\x03"),
("ascii", 0, 3, "nibbles", "0", b"02004000000000000000\x01\x23", b""),
("ascii", 2, 8, "nibbles", "F", b"0200400000000000000003\xF1\x23", b"03"),
("b", 2, 8, "nibbles", "F", b"02004000000000000000\x00\x03\xF1\x23", b"\x00\x03"),
("ascii", 0, 3, "nibbles", "F", b"02004000000000000000\xF1\x23", b""),
("ascii", 2, 8, "nibbles", "01", b"0200400000000000000003\x01\x23", b"03"),
("b", 2, 8, "nibbles", "01", b"02004000000000000000\x00\x03\x01\x23", b"\x00\x03"),
("ascii", 0, 3, "nibbles", "01", b"02004000000000000000\x01\x23", b""),
("ascii", 2, 8, "nibbles", "F1", b"0200400000000000000003\xF1\x23", b"03"),
("b", 2, 8, "nibbles", "F1", b"02004000000000000000\x00\x03\xF1\x23", b"\x00\x03"),
("ascii", 0, 3, "nibbles", "F1", b"02004000000000000000\xF1\x23", b""),
],
)
# fmt: on
def test_decode_nibbles_left_pad(
len_enc: str,
len_type: int,
max_len: int,
len_count: str,
pad: str,
data: bytes,
result_f2_len: bytes,
) -> None:
spec = copy.deepcopy(iso8583.specs.default)
spec["t"]["data_enc"] = "ascii"
spec["p"]["data_enc"] = "ascii"
spec["2"]["data_enc"] = "b"
spec["2"]["len_enc"] = len_enc
spec["2"]["len_type"] = len_type
spec["2"]["max_len"] = max_len
spec["2"]["len_count"] = len_count
spec["2"]["left_pad"] = pad
decoded, encoded = iso8583.decode(data, spec)
assert decoded["2"] == "123"
assert encoded["2"]["len"] == result_f2_len
# fmt: off
@pytest.mark.parametrize(
["len_enc", "len_type", "max_len", "len_count", "pad", "data", "result_f2_len"],
[
("ascii", 2, 8, "nibbles", "0", b"0200400000000000000003\x12\x30", b"03"),
("b", 2, 8, "nibbles", "0", b"02004000000000000000\x00\x03\x12\x30", b"\x00\x03"),
("ascii", 0, 3, "nibbles", "0", b"02004000000000000000\x12\x30", b""),
("ascii", 2, 8, "nibbles", "F", b"0200400000000000000003\x12\x3F", b"03"),
("b", 2, 8, "nibbles", "F", b"02004000000000000000\x00\x03\x12\x3F", b"\x00\x03"),
("ascii", 0, 3, "nibbles", "F", b"02004000000000000000\x12\x3F", b""),
("ascii", 2, 8, "nibbles", "01", b"0200400000000000000003\x12\x30", b"03"),
("b", 2, 8, "nibbles", "01", b"02004000000000000000\x00\x03\x12\x30", b"\x00\x03"),
("ascii", 0, 3, "nibbles", "01", b"02004000000000000000\x12\x30", b""),
("ascii", 2, 8, "nibbles", "F1", b"0200400000000000000003\x12\x3F", b"03"),
("b", 2, 8, "nibbles", "F1", b"02004000000000000000\x00\x03\x12\x3F", b"\x00\x03"),
("ascii", 0, 3, "nibbles", "F1", b"02004000000000000000\x12\x3F", b""),
],
)
# fmt: on
def test_decode_nibbles_right_pad(
len_enc: str,
len_type: int,
max_len: int,
len_count: str,
pad: str,
data: bytes,
result_f2_len: bytes,
) -> None:
spec = copy.deepcopy(iso8583.specs.default)
spec["t"]["data_enc"] = "ascii"
spec["p"]["data_enc"] = "ascii"
spec["2"]["data_enc"] = "b"
spec["2"]["len_enc"] = len_enc
spec["2"]["len_type"] = len_type
spec["2"]["max_len"] = max_len
spec["2"]["len_count"] = len_count
spec["2"]["right_pad"] = pad
decoded, encoded = iso8583.decode(data, spec)
assert decoded["2"] == "123"
assert encoded["2"]["len"] == result_f2_len
def test_decode_nibbles_odd_no_pad() -> None:
spec = copy.deepcopy(iso8583.specs.default)
spec["t"]["data_enc"] = "ascii"
spec["p"]["data_enc"] = "ascii"
spec["2"]["data_enc"] = "b"
spec["2"]["len_enc"] = "b"
spec["2"]["len_type"] = 2
spec["2"]["max_len"] = 8
spec["2"]["len_count"] = "nibbles"
data = b"02004000000000000000\x00\x03\x12\x30"
with pytest.raises(
iso8583.DecodeError,
match="Field data is 4 nibbles, expecting 3: field 2 pos 22",
):
iso8583.decode(data, spec=spec)
def test_encode_nibbles_variable_over_max() -> None:
"""Variable field length is over maximum allowed"""
spec = copy.deepcopy(iso8583.specs.default)
spec["t"]["data_enc"] = "ascii"
spec["p"]["data_enc"] = "ascii"
spec["2"]["data_enc"] = "ascii"
spec["2"]["len_enc"] = "ascii"
spec["2"]["len_type"] = 2
spec["2"]["max_len"] = 4
spec["2"]["len_count"] = "nibbles"
decoded = {"t": "0200", "2": "1234"}
with pytest.raises(
iso8583.EncodeError,
match="Field data is 8 nibbles, larger than maximum 4: field 2",
):
iso8583.encode(decoded, spec=spec)
def test_encode_nibbles_fixed_partial() -> None:
"""Fixed field is provided partially"""
spec = copy.deepcopy(iso8583.specs.default)
spec["t"]["data_enc"] = "ascii"
spec["p"]["data_enc"] = "ascii"
spec["2"]["data_enc"] = "ascii"
spec["2"]["len_enc"] = "ascii"
spec["2"]["len_type"] = 0
spec["2"]["max_len"] = 4
spec["2"]["len_count"] = "nibbles"
decoded = {"t": "0200", "2": "1"}
with pytest.raises(
iso8583.EncodeError,
match="Field data is 2 nibbles, expecting 4: field 2",
):
iso8583.encode(decoded, spec=spec)
def test_encode_nibbles_fixed_missing() -> None:
"""Fixed field is missing"""
spec = copy.deepcopy(iso8583.specs.default)
spec["t"]["data_enc"] = "ascii"
spec["p"]["data_enc"] = "ascii"
spec["2"]["data_enc"] = "ascii"
spec["2"]["len_enc"] = "ascii"
spec["2"]["len_type"] = 0
spec["2"]["max_len"] = 4
spec["2"]["len_count"] = "nibbles"
decoded = {"t": "0200", "2": ""}
with pytest.raises(
iso8583.EncodeError,
match="Field data is 0 nibbles, expecting 4: field 2",
):
iso8583.encode(decoded, spec=spec)
def test_decode_nibbles_variable_over_max() -> None:
"""Variable field length is over maximum allowed"""
spec = copy.deepcopy(iso8583.specs.default)
spec["t"]["data_enc"] = "ascii"
spec["p"]["data_enc"] = "ascii"
spec["2"]["data_enc"] = "ascii"
spec["2"]["len_enc"] = "ascii"
spec["2"]["len_type"] = 2
spec["2"]["max_len"] = 4
spec["2"]["len_count"] = "nibbles"
s = b"02004000000000000000081234"
with pytest.raises(
iso8583.DecodeError,
match="Field data is 8 nibbles, larger than maximum 4: field 2 pos 20",
):
iso8583.decode(s, spec=spec)
def test_decode_nibbles_variable_partial() -> None:
"""Variable field is provided partially"""
spec = copy.deepcopy(iso8583.specs.default)
spec["t"]["data_enc"] = "ascii"
spec["p"]["data_enc"] = "ascii"
spec["2"]["data_enc"] = "ascii"
spec["2"]["len_enc"] = "ascii"
spec["2"]["len_type"] = 2
spec["2"]["max_len"] = 4
spec["2"]["len_count"] = "nibbles"
s = b"02004000000000000000041"
with pytest.raises(
iso8583.DecodeError,
match="Field data is 2 nibbles, expecting 4: field 2 pos 22",
):
iso8583.decode(s, spec=spec)
def test_decode_nibbles_variable_missing() -> None:
"""Variable field is missing"""
spec = copy.deepcopy(iso8583.specs.default)
spec["t"]["data_enc"] = "ascii"
spec["p"]["data_enc"] = "ascii"
spec["2"]["data_enc"] = "ascii"
spec["2"]["len_enc"] = "ascii"
spec["2"]["len_type"] = 2
spec["2"]["max_len"] = 4
spec["2"]["len_count"] = "nibbles"
s = b"0200400000000000000004"
with pytest.raises(
iso8583.DecodeError,
match="Field data is 0 nibbles, expecting 4: field 2 pos 22",
):
iso8583.decode(s, spec=spec)
def test_decode_nibbles_fixed_partial() -> None:
"""Fixed field is provided partially"""
spec = copy.deepcopy(iso8583.specs.default)
spec["t"]["data_enc"] = "ascii"
spec["p"]["data_enc"] = "ascii"
spec["2"]["data_enc"] = "ascii"
spec["2"]["len_enc"] = "ascii"
spec["2"]["len_type"] = 0
spec["2"]["max_len"] = 4
spec["2"]["len_count"] = "nibbles"
s = b"020040000000000000001"
with pytest.raises(
iso8583.DecodeError,
match="Field data is 2 nibbles, expecting 4: field 2 pos 20",
):
iso8583.decode(s, spec=spec)
def test_decode_nibbles_fixed_missing() -> None:
"""Fixed field is missing"""
spec = copy.deepcopy(iso8583.specs.default)
spec["t"]["data_enc"] = "ascii"
spec["p"]["data_enc"] = "ascii"
spec["2"]["data_enc"] = "ascii"
spec["2"]["len_enc"] = "ascii"
spec["2"]["len_type"] = 0
spec["2"]["max_len"] = 4
spec["2"]["len_count"] = "nibbles"
s = b"02004000000000000000"
with pytest.raises(
iso8583.DecodeError,
match="Field data is 0 nibbles, expecting 4: field 2 pos 20",
):
iso8583.decode(s, spec=spec)
| tests/test_nibbles.py | 16,991 | Fixed field is missing
Fixed field is provided partially
Variable field is missing
Variable field length is over maximum allowed
Variable field is provided partially
Fixed field is missing
Fixed field is provided partially
Variable field length is over maximum allowed
Test length measured in half bytes (nibbles). Nibbles were added in v2.1
fmt: off fmt: on fmt: off fmt: on fmt: off fmt: on fmt: off fmt: on fmt: off fmt: on fmt: off fmt: on | 445 | en | 0.933005 |
import argparse
import os, sys
import os.path as osp
import torchvision
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from torchvision import transforms
import network, loss
from torch.utils.data import DataLoader
import random, pdb, math, copy
from tqdm import tqdm
from scipy.spatial.distance import cdist
import pickle
from data_load import mnist, svhn, usps
# inverse_transform = None
# class InverseTransform(torchvision.transforms.Normalize):
# """
# Undoes the normalization and returns the reconstructed images in the input domain.
# """
# def __init__(self, mean, std):
# mean = torch.as_tensor(mean)
# std = torch.as_tensor(std)
# std_inv = 1 / (std + 1e-7)
# mean_inv = -mean * std_inv
# super().__init__(mean=mean_inv, std=std_inv)
# def __call__(self, tensor):
# t = super().__call__(tensor.clone())
# # return transforms.ToPILImage()(t)
# return t
def digit_load(args):
global inverse_transform
train_bs = args.batch_size
if args.dset == 's':
test_source = svhn.SVHN('./data/svhn/', split='test', download=True,
transform=transforms.Compose([
transforms.Resize(32),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
]))
# assert inverse_transform == None
# inverse_transform = InverseTransform((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
elif args.dset == 'u':
test_source = usps.USPS('./data/usps/', train=False, download=True,
transform=transforms.Compose([
transforms.RandomCrop(28, padding=4),
transforms.RandomRotation(10),
transforms.ToTensor(),
transforms.Normalize((0.5,), (0.5,))
]))
# assert inverse_transform == None
# inverse_transform = InverseTransform((0.5,), (0.5,))
elif args.dset == 'm':
test_source = mnist.MNIST('./data/mnist/', train=False, download=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.5,), (0.5,))
]))
# assert inverse_transform == None
# inverse_transform = InverseTransform((0.5,), (0.5,))
dset_loaders = {}
dset_loaders["test"] = DataLoader(test_source, batch_size=train_bs*2, shuffle=False,
num_workers=args.worker, drop_last=False)
return dset_loaders
def cal_acc(loader, netF, netB, netC):
k = 0
start_test = True
with torch.no_grad():
iter_test = iter(loader)
for i in range(len(loader)):
data = iter_test.next()
input_images = []
inputs = data[0]
inputs_clone = inputs.clone()
for j in range(inputs_clone.size(0)):
x = transforms.Normalize((-1,), (2,))(inputs_clone[j])
input_images.append(transforms.ToPILImage()(x))
labels = data[1]
outputs = netC(netB(netF(inputs)))
#
_, predict = torch.max(outputs.float().cpu(), 1)
for j in range(inputs.size(0)):
folder = args.output_dir + '/inspect/label-{}'.format(labels[j])
if not osp.exists(folder):
os.makedirs(folder)
subfolder = folder + '/pred-{}'.format(predict[j])
if not osp.exists(subfolder):
os.makedirs(subfolder)
input_images[j].save(subfolder + '/{}.jpg'.format(k))
k += 1
#
if start_test:
all_output = outputs.float().cpu()
all_label = labels.float()
start_test = False
else:
all_output = torch.cat((all_output, outputs.float().cpu()), 0)
all_label = torch.cat((all_label, labels.float()), 0)
_, predict = torch.max(all_output, 1)
accuracy = torch.sum(torch.squeeze(predict).float() == all_label).item() / float(all_label.size()[0])
mean_ent = torch.mean(loss.Entropy(nn.Softmax(dim=1)(all_output))).cpu().data.item()
return accuracy*100, mean_ent
def test(args):
dset_loaders = digit_load(args)
## set base network
if args.dset == 'u':
netF = network.LeNetBase()#.cuda()
elif args.dset == 'm':
netF = network.LeNetBase()#.cuda()
elif args.dset == 's':
netF = network.DTNBase()#.cuda()
netB = network.feat_bootleneck(type=args.classifier, feature_dim=netF.in_features, bottleneck_dim=args.bottleneck)#.cuda()
netC = network.feat_classifier(type=args.layer, class_num = args.class_num, bottleneck_dim=args.bottleneck)#.cuda()
args.modelpath = args.output_dir + '/F.pt'
netF.load_state_dict(torch.load(args.modelpath))
args.modelpath = args.output_dir + '/B.pt'
netB.load_state_dict(torch.load(args.modelpath))
args.modelpath = args.output_dir + '/C.pt'
netC.load_state_dict(torch.load(args.modelpath))
netF.eval()
netB.eval()
netC.eval()
acc, _ = cal_acc(dset_loaders['test'], netF, netB, netC)
log_str = 'Task: {}, Accuracy = {:.2f}%'.format(args.dset, acc)
try:
args.out_file.write(log_str + '\n')
args.out_file.flush()
except:
pass
print(log_str+'\n')
def print_args(args):
s = "==========================================\n"
for arg, content in args.__dict__.items():
s += "{}:{}\n".format(arg, content)
return s
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='SHOT')
parser.add_argument('--gpu_id', type=str, nargs='?', default='0', help="device id to run")
parser.add_argument('--s', type=int, default=0, help="source")
parser.add_argument('--t', type=int, default=1, help="target")
parser.add_argument('--max_epoch', type=int, default=30, help="maximum epoch")
parser.add_argument('--batch_size', type=int, default=64, help="batch_size")
parser.add_argument('--worker', type=int, default=4, help="number of workers")
parser.add_argument('--dset', type=str, default='s', choices=['u', 'm','s'])
parser.add_argument('--lr', type=float, default=0.01, help="learning rate")
parser.add_argument('--seed', type=int, default=2020, help="random seed")
parser.add_argument('--bottleneck', type=int, default=256)
parser.add_argument('--layer', type=str, default="wn", choices=["linear", "wn"])
parser.add_argument('--classifier', type=str, default="bn", choices=["ori", "bn"])
parser.add_argument('--output', type=str, default='')
parser.add_argument('--issave', type=bool, default=True)
args = parser.parse_args()
args.class_num = 10
# os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu_id
SEED = args.seed
torch.manual_seed(SEED)
# torch.cuda.manual_seed(SEED)
np.random.seed(SEED)
random.seed(SEED)
# torch.backends.cudnn.deterministic = True
args.output_dir = osp.join(args.output, 'seed' + str(args.seed), args.dset)
test(args)
# python unsupervised_digit.py --dset m --gpu_id 0 --output ckps_unsupervised_digit
# python unsupervised_digit.py --dset m --gpu_id 0 --ent --output ckps_unsupervised_digit_ent
# python unsupervised_digit.py --dset m --gpu_id 0 --gent --output ckps_unsupervised_digit_gent
# python unsupervised_digit.py --dset m --gpu_id 0 --ent --gent --output ckps_unsupervised_digit_ent_gent
# na verdade n sem como saber qual classe vai sair .. ideal é ver tsne? ou mostrar as classificacoes primeiro?
# show classification + gradcam (versao mais rapida) | experiments/digit/unsupervised_digit_inspect.py | 7,716 | inverse_transform = None class InverseTransform(torchvision.transforms.Normalize): """ Undoes the normalization and returns the reconstructed images in the input domain. """ def __init__(self, mean, std): mean = torch.as_tensor(mean) std = torch.as_tensor(std) std_inv = 1 / (std + 1e-7) mean_inv = -mean * std_inv super().__init__(mean=mean_inv, std=std_inv) def __call__(self, tensor): t = super().__call__(tensor.clone()) return transforms.ToPILImage()(t) return t assert inverse_transform == None inverse_transform = InverseTransform((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)) assert inverse_transform == None inverse_transform = InverseTransform((0.5,), (0.5,)) assert inverse_transform == None inverse_transform = InverseTransform((0.5,), (0.5,)) set base network.cuda().cuda() .cuda().cuda().cuda() os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu_id torch.cuda.manual_seed(SEED) torch.backends.cudnn.deterministic = True python unsupervised_digit.py --dset m --gpu_id 0 --output ckps_unsupervised_digit python unsupervised_digit.py --dset m --gpu_id 0 --ent --output ckps_unsupervised_digit_ent python unsupervised_digit.py --dset m --gpu_id 0 --gent --output ckps_unsupervised_digit_gent python unsupervised_digit.py --dset m --gpu_id 0 --ent --gent --output ckps_unsupervised_digit_ent_gent na verdade n sem como saber qual classe vai sair .. ideal é ver tsne? ou mostrar as classificacoes primeiro? show classification + gradcam (versao mais rapida) | 1,534 | en | 0.235312 |
# -*- coding: utf-8 -*-
# Copyright (c) 2018, SIS and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
class Encuesta(Document):
pass
| encuestaapp/encuestaapp/doctype/encuesta/encuesta.py | 248 | -*- coding: utf-8 -*- Copyright (c) 2018, SIS and contributors For license information, please see license.txt | 110 | en | 0.783907 |
"""
Tests for Timestamp parsing, aimed at pandas/_libs/tslibs/parsing.pyx
"""
from datetime import datetime
import re
from dateutil.parser import parse
import numpy as np
import pytest
from pandas._libs.tslibs import parsing
from pandas._libs.tslibs.parsing import parse_time_string
import pandas.util._test_decorators as td
import pandas._testing as tm
def test_parse_time_string():
(parsed, reso) = parse_time_string("4Q1984")
(parsed_lower, reso_lower) = parse_time_string("4q1984")
assert reso == reso_lower
assert parsed == parsed_lower
def test_parse_time_string_invalid_type():
# Raise on invalid input, don't just return it
msg = "Argument 'arg' has incorrect type (expected str, got tuple)"
with pytest.raises(TypeError, match=re.escape(msg)):
parse_time_string((4, 5))
@pytest.mark.parametrize(
"dashed,normal", [("1988-Q2", "1988Q2"), ("2Q-1988", "2Q1988")]
)
def test_parse_time_quarter_with_dash(dashed, normal):
# see gh-9688
(parsed_dash, reso_dash) = parse_time_string(dashed)
(parsed, reso) = parse_time_string(normal)
assert parsed_dash == parsed
assert reso_dash == reso
@pytest.mark.parametrize("dashed", ["-2Q1992", "2-Q1992", "4-4Q1992"])
def test_parse_time_quarter_with_dash_error(dashed):
msg = f"Unknown datetime string format, unable to parse: {dashed}"
with pytest.raises(parsing.DateParseError, match=msg):
parse_time_string(dashed)
@pytest.mark.parametrize(
"date_string,expected",
[
("123.1234", False),
("-50000", False),
("999", False),
("m", False),
("T", False),
("Mon Sep 16, 2013", True),
("2012-01-01", True),
("01/01/2012", True),
("01012012", True),
("0101", True),
("1-1", True),
],
)
def test_does_not_convert_mixed_integer(date_string, expected):
assert parsing._does_string_look_like_datetime(date_string) is expected
@pytest.mark.parametrize(
"date_str,kwargs,msg",
[
(
"2013Q5",
dict(),
(
"Incorrect quarterly string is given, "
"quarter must be between 1 and 4: 2013Q5"
),
),
# see gh-5418
(
"2013Q1",
dict(freq="INVLD-L-DEC-SAT"),
(
"Unable to retrieve month information "
"from given freq: INVLD-L-DEC-SAT"
),
),
],
)
def test_parsers_quarterly_with_freq_error(date_str, kwargs, msg):
with pytest.raises(parsing.DateParseError, match=msg):
parsing.parse_time_string(date_str, **kwargs)
@pytest.mark.parametrize(
"date_str,freq,expected",
[
("2013Q2", None, datetime(2013, 4, 1)),
("2013Q2", "A-APR", datetime(2012, 8, 1)),
("2013-Q2", "A-DEC", datetime(2013, 4, 1)),
],
)
def test_parsers_quarterly_with_freq(date_str, freq, expected):
result, _ = parsing.parse_time_string(date_str, freq=freq)
assert result == expected
@pytest.mark.parametrize(
"date_str", ["2Q 2005", "2Q-200A", "2Q-200", "22Q2005", "2Q200.", "6Q-20"]
)
def test_parsers_quarter_invalid(date_str):
if date_str == "6Q-20":
msg = (
"Incorrect quarterly string is given, quarter "
f"must be between 1 and 4: {date_str}"
)
else:
msg = f"Unknown datetime string format, unable to parse: {date_str}"
with pytest.raises(ValueError, match=msg):
parsing.parse_time_string(date_str)
@pytest.mark.parametrize(
"date_str,expected",
[("201101", datetime(2011, 1, 1, 0, 0)), ("200005", datetime(2000, 5, 1, 0, 0))],
)
def test_parsers_month_freq(date_str, expected):
result, _ = parsing.parse_time_string(date_str, freq="M")
assert result == expected
@td.skip_if_not_us_locale
@pytest.mark.parametrize(
"string,fmt",
[
("20111230", "%Y%m%d"),
("2011-12-30", "%Y-%m-%d"),
("30-12-2011", "%d-%m-%Y"),
("2011-12-30 00:00:00", "%Y-%m-%d %H:%M:%S"),
("2011-12-30T00:00:00", "%Y-%m-%dT%H:%M:%S"),
("2011-12-30 00:00:00.000000", "%Y-%m-%d %H:%M:%S.%f"),
],
)
def test_guess_datetime_format_with_parseable_formats(string, fmt):
result = parsing._guess_datetime_format(string)
assert result == fmt
@pytest.mark.parametrize("dayfirst,expected", [(True, "%d/%m/%Y"), (False, "%m/%d/%Y")])
def test_guess_datetime_format_with_dayfirst(dayfirst, expected):
ambiguous_string = "01/01/2011"
result = parsing._guess_datetime_format(ambiguous_string, dayfirst=dayfirst)
assert result == expected
@td.skip_if_has_locale
@pytest.mark.parametrize(
"string,fmt",
[
("30/Dec/2011", "%d/%b/%Y"),
("30/December/2011", "%d/%B/%Y"),
("30/Dec/2011 00:00:00", "%d/%b/%Y %H:%M:%S"),
],
)
def test_guess_datetime_format_with_locale_specific_formats(string, fmt):
result = parsing._guess_datetime_format(string)
assert result == fmt
@pytest.mark.parametrize(
"invalid_dt",
[
"2013",
"01/2013",
"12:00:00",
"1/1/1/1",
"this_is_not_a_datetime",
"51a",
9,
datetime(2011, 1, 1),
],
)
def test_guess_datetime_format_invalid_inputs(invalid_dt):
# A datetime string must include a year, month and a day for it to be
# guessable, in addition to being a string that looks like a datetime.
assert parsing._guess_datetime_format(invalid_dt) is None
@pytest.mark.parametrize(
"string,fmt",
[
("2011-1-1", "%Y-%m-%d"),
("1/1/2011", "%m/%d/%Y"),
("30-1-2011", "%d-%m-%Y"),
("2011-1-1 0:0:0", "%Y-%m-%d %H:%M:%S"),
("2011-1-3T00:00:0", "%Y-%m-%dT%H:%M:%S"),
("2011-1-1 00:00:00", "%Y-%m-%d %H:%M:%S"),
],
)
def test_guess_datetime_format_no_padding(string, fmt):
# see gh-11142
result = parsing._guess_datetime_format(string)
assert result == fmt
def test_try_parse_dates():
arr = np.array(["5/1/2000", "6/1/2000", "7/1/2000"], dtype=object)
result = parsing.try_parse_dates(arr, dayfirst=True)
expected = np.array([parse(d, dayfirst=True) for d in arr])
tm.assert_numpy_array_equal(result, expected)
def test_parse_time_string_check_instance_type_raise_exception():
# issue 20684
msg = "Argument 'arg' has incorrect type (expected str, got tuple)"
with pytest.raises(TypeError, match=re.escape(msg)):
parse_time_string((1, 2, 3))
result = parse_time_string("2019")
expected = (datetime(2019, 1, 1), "year")
assert result == expected
| venv/Lib/site-packages/pandas/tests/tslibs/test_parsing.py | 6,830 | Tests for Timestamp parsing, aimed at pandas/_libs/tslibs/parsing.pyx
Raise on invalid input, don't just return it see gh-9688 see gh-5418 A datetime string must include a year, month and a day for it to be guessable, in addition to being a string that looks like a datetime. see gh-11142 issue 20684 | 302 | en | 0.872254 |
"""
This file is also being used by the GalaxyCloudRunner (gcr) Docker image.
"""
from getpass import getuser
from multiprocessing import cpu_count
from socket import gethostname
from string import Template
SLURM_CONFIG_TEMPLATE = '''
# slurm.conf file generated by configurator.html.
# Put this file on all nodes of your cluster.
# See the slurm.conf man page for more information.
#
ControlMachine=$hostname
#ControlAddr=
#BackupController=
#BackupAddr=
#
AuthType=auth/munge
CacheGroups=0
#CheckpointType=checkpoint/none
CryptoType=crypto/munge
MpiDefault=none
#PluginDir=
#PlugStackConfig=
#PrivateData=jobs
ProctrackType=proctrack/pgid
#Prolog=
#PrologSlurmctld=
#PropagatePrioProcess=0
#PropagateResourceLimits=
#PropagateResourceLimitsExcept=
ReturnToService=1
#SallocDefaultCommand=
SlurmctldPidFile=/var/run/slurmctld.pid
SlurmctldPort=6817
SlurmdPidFile=/var/run/slurmd.pid
SlurmdPort=6818
SlurmdSpoolDir=/tmp/slurmd
SlurmUser=$user
#SlurmdUser=root
#SrunEpilog=
#SrunProlog=
StateSaveLocation=/tmp
SwitchType=switch/none
#TaskEpilog=
TaskPlugin=task/none
#TaskPluginParam=
#TaskProlog=
InactiveLimit=0
KillWait=30
MinJobAge=300
#OverTimeLimit=0
SlurmctldTimeout=120
SlurmdTimeout=300
#UnkillableStepTimeout=60
#VSizeFactor=0
Waittime=0
FastSchedule=1
SchedulerType=sched/backfill
SchedulerPort=7321
SelectType=select/linear
#SelectTypeParameters=
AccountingStorageType=accounting_storage/none
#AccountingStorageUser=
AccountingStoreJobComment=YES
ClusterName=cluster
#DebugFlags=
#JobCompHost=
#JobCompLoc=
#JobCompPass=
#JobCompPort=
JobCompType=jobcomp/none
#JobCompUser=
JobAcctGatherFrequency=30
JobAcctGatherType=jobacct_gather/none
SlurmctldDebug=3
#SlurmctldLogFile=
SlurmdDebug=3
#SlurmdLogFile=
NodeName=$hostname CPUs=$cpus State=UNKNOWN
PartitionName=debug Nodes=$hostname Default=YES MaxTime=INFINITE State=UP
'''
def main():
template_params = {"hostname": gethostname(),
"user": getuser(),
"cpus": cpu_count()}
config_contents = Template(SLURM_CONFIG_TEMPLATE).substitute(template_params)
open("/etc/slurm-llnl/slurm.conf", "w").write(config_contents)
if __name__ == "__main__":
main()
| pulsar/scripts/_configure_slurm.py | 2,178 | This file is also being used by the GalaxyCloudRunner (gcr) Docker image. | 73 | en | 0.960302 |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import logging
import numpy as np
from astropy import units as u
from astropy.coordinates import Angle
from astropy.io import fits
from astropy.table import Table
from gammapy.maps import MapAxes, MapAxis
from gammapy.utils.array import array_stats_str
from gammapy.utils.gauss import MultiGauss2D
from gammapy.utils.interpolation import ScaledRegularGridInterpolator
from gammapy.utils.scripts import make_path
from .table import PSF3D, EnergyDependentTablePSF
__all__ = ["EnergyDependentMultiGaussPSF"]
log = logging.getLogger(__name__)
class EnergyDependentMultiGaussPSF:
"""Triple Gauss analytical PSF depending on energy and theta.
To evaluate the PSF call the ``to_energy_dependent_table_psf`` or ``psf_at_energy_and_theta`` methods.
Parameters
----------
energy_axis_true : `MapAxis`
True energy axis
offset_axis : `MapAxis`
Offset axis.
sigmas : list of 'numpy.ndarray'
Triple Gauss sigma parameters, where every entry is
a two dimensional 'numpy.ndarray' containing the sigma
value for every given energy and theta.
norms : list of 'numpy.ndarray'
Triple Gauss norm parameters, where every entry is
a two dimensional 'numpy.ndarray' containing the norm
value for every given energy and theta. Norm corresponds
to the value of the Gaussian at theta = 0.
meta : dict
Meta data
Examples
--------
Plot R68 of the PSF vs. theta and energy:
.. plot::
:include-source:
import matplotlib.pyplot as plt
from gammapy.irf import EnergyDependentMultiGaussPSF
filename = '$GAMMAPY_DATA/cta-1dc/caldb/data/cta/1dc/bcf/South_z20_50h/irf_file.fits'
psf = EnergyDependentMultiGaussPSF.read(filename, hdu='POINT SPREAD FUNCTION')
psf.plot_containment(0.68)
plt.show()
"""
tag = "psf_3gauss"
def __init__(
self,
energy_axis_true,
offset_axis,
sigmas,
norms,
meta,
):
energy_axis_true.assert_name("energy_true")
offset_axis.assert_name("offset")
self._energy_axis_true = energy_axis_true
self._offset_axis = offset_axis
sigmas[0][sigmas[0] == 0] = 1
sigmas[1][sigmas[1] == 0] = 1
sigmas[2][sigmas[2] == 0] = 1
self.sigmas = sigmas
self.norms = norms
self.meta = meta or {}
self._interp_norms = self._setup_interpolators(self.norms)
self._interp_sigmas = self._setup_interpolators(self.sigmas)
@property
def energy_thresh_lo(self):
"""Low energy threshold"""
return self.meta["LO_THRES"] * u.TeV
@property
def energy_thresh_hi(self):
"""High energy threshold"""
return self.meta["HI_THRES"] * u.TeV
@property
def energy_axis_true(self):
return self._energy_axis_true
@property
def offset_axis(self):
return self._offset_axis
def _setup_interpolators(self, values_list):
interps = []
for values in values_list:
interp = ScaledRegularGridInterpolator(
points=(self.offset_axis.center, self.energy_axis_true.center),
values=values,
)
interps.append(interp)
return interps
@classmethod
def read(cls, filename, hdu="PSF_2D_GAUSS"):
"""Create `EnergyDependentMultiGaussPSF` from FITS file.
Parameters
----------
filename : str
File name
"""
with fits.open(str(make_path(filename)), memmap=False) as hdulist:
return cls.from_table_hdu(hdulist[hdu])
@classmethod
def from_table_hdu(cls, hdu):
"""Create `EnergyDependentMultiGaussPSF` from HDU list.
Parameters
----------
hdu : `~astropy.io.fits.BinTableHDU`
HDU
"""
table = Table.read(hdu)
energy_axis_true = MapAxis.from_table(
table, column_prefix="ENERG", format="gadf-dl3"
)
offset_axis = MapAxis.from_table(
table, column_prefix="THETA", format="gadf-dl3"
)
# Get sigmas
shape = (offset_axis.nbin, energy_axis_true.nbin)
sigmas = []
for key in ["SIGMA_1", "SIGMA_2", "SIGMA_3"]:
sigma = hdu.data[key].reshape(shape).copy()
sigmas.append(sigma)
# Get amplitudes
norms = []
for key in ["SCALE", "AMPL_2", "AMPL_3"]:
norm = hdu.data[key].reshape(shape).copy()
norms.append(norm)
return cls(
energy_axis_true=energy_axis_true,
offset_axis=offset_axis,
sigmas=sigmas,
norms=norms,
meta=dict(hdu.header)
)
def to_hdulist(self):
"""
Convert psf table data to FITS hdu list.
Returns
-------
hdu_list : `~astropy.io.fits.HDUList`
PSF in HDU list format.
"""
# Set up data
names = [
"SCALE",
"SIGMA_1",
"AMPL_2",
"SIGMA_2",
"AMPL_3",
"SIGMA_3",
]
units = ["", "deg", "", "deg", "", "deg"]
data = [
self.norms[0],
self.sigmas[0],
self.norms[1],
self.sigmas[1],
self.norms[2],
self.sigmas[2],
]
axes = MapAxes([self.energy_axis_true, self.offset_axis])
table = axes.to_table(format="gadf-dl3")
for name_, data_, unit_ in zip(names, data, units):
table[name_] = [data_]
table[name_].unit = unit_
# Create hdu and hdu list
hdu = fits.BinTableHDU(table)
hdu.header.update(self.meta)
return fits.HDUList([fits.PrimaryHDU(), hdu])
def write(self, filename, *args, **kwargs):
"""Write PSF to FITS file.
Calls `~astropy.io.fits.HDUList.writeto`, forwarding all arguments.
"""
self.to_hdulist().writeto(str(make_path(filename)), *args, **kwargs)
def psf_at_energy_and_theta(self, energy, theta):
"""
Get `~gammapy.modeling.models.MultiGauss2D` model for given energy and theta.
No interpolation is used.
Parameters
----------
energy : `~astropy.units.u.Quantity`
Energy at which a PSF is requested.
theta : `~astropy.coordinates.Angle`
Offset angle at which a PSF is requested.
Returns
-------
psf : `~gammapy.utils.gauss.MultiGauss2D`
Multigauss PSF object.
"""
energy = u.Quantity(energy)
theta = u.Quantity(theta)
sigmas, norms = [], []
pars = {"A_1": 1}
for interp_sigma in self._interp_sigmas:
sigma = interp_sigma((theta, energy))
sigmas.append(sigma)
for name, interp_norm in zip(["scale", "A_2", "A_3"], self._interp_norms):
pars[name] = interp_norm((theta, energy))
for idx, sigma in enumerate(sigmas):
a = pars[f"A_{idx + 1}"]
norm = pars["scale"] * 2 * a * sigma ** 2
norms.append(norm)
m = MultiGauss2D(sigmas, norms)
m.normalize()
return m
def containment_radius(self, energy, theta, fraction=0.68):
"""Compute containment for all energy and theta values"""
# This is a false positive from pylint
# See https://github.com/PyCQA/pylint/issues/2435
energies = u.Quantity(
energy
).flatten() # pylint:disable=assignment-from-no-return
thetas = Angle(theta).flatten()
radius = np.empty((theta.size, energy.size))
for idx, energy in enumerate(energies):
for jdx, theta in enumerate(thetas):
try:
psf = self.psf_at_energy_and_theta(energy, theta)
radius[jdx, idx] = psf.containment_radius(fraction)
except ValueError:
log.debug(
f"Computing containment failed for energy = {energy:.2f}"
f" and theta={theta:.2f}"
)
log.debug(f"Sigmas: {psf.sigmas} Norms: {psf.norms}")
radius[jdx, idx] = np.nan
return Angle(radius, "deg")
def plot_containment(self, fraction=0.68, ax=None, add_cbar=True, **kwargs):
"""
Plot containment image with energy and theta axes.
Parameters
----------
fraction : float
Containment fraction between 0 and 1.
add_cbar : bool
Add a colorbar
"""
import matplotlib.pyplot as plt
ax = plt.gca() if ax is None else ax
energy = self.energy_axis_true.center
offset = self.offset_axis.center
# Set up and compute data
containment = self.containment_radius(energy, offset, fraction)
# plotting defaults
kwargs.setdefault("cmap", "GnBu")
kwargs.setdefault("vmin", np.nanmin(containment.value))
kwargs.setdefault("vmax", np.nanmax(containment.value))
# Plotting
x = energy.value
y = offset.value
caxes = ax.pcolormesh(x, y, containment.value, **kwargs)
# Axes labels and ticks, colobar
ax.semilogx()
ax.set_ylabel(f"Offset ({offset.unit})")
ax.set_xlabel(f"Energy ({energy.unit})")
ax.set_xlim(x.min(), x.max())
ax.set_ylim(y.min(), y.max())
try:
self._plot_safe_energy_range(ax)
except KeyError:
pass
if add_cbar:
label = f"Containment radius R{100 * fraction:.0f} ({containment.unit})"
ax.figure.colorbar(caxes, ax=ax, label=label)
return ax
def _plot_safe_energy_range(self, ax):
"""add safe energy range lines to the plot"""
esafe = self.energy_thresh_lo
omin = self.offset_axis.center.min()
omax = self.offset_axis.center.max()
ax.vlines(x=esafe.value, ymin=omin.value, ymax=omax.value)
label = f"Safe energy threshold: {esafe:3.2f}"
ax.text(x=1.1 * esafe.value, y=0.3, s=label, va="top")
def plot_containment_vs_energy(
self, fractions=[0.68, 0.95], thetas=Angle([0, 1], "deg"), ax=None, **kwargs
):
"""Plot containment fraction as a function of energy.
"""
import matplotlib.pyplot as plt
ax = plt.gca() if ax is None else ax
energy = self.energy_axis_true.center
for theta in thetas:
for fraction in fractions:
radius = self.containment_radius(energy, theta, fraction).squeeze()
kwargs.setdefault("label", f"{theta.deg} deg, {100 * fraction:.1f}%")
ax.plot(energy.value, radius.value, **kwargs)
ax.semilogx()
ax.legend(loc="best")
ax.set_xlabel("Energy (TeV)")
ax.set_ylabel("Containment radius (deg)")
def peek(self, figsize=(15, 5)):
"""Quick-look summary plots."""
import matplotlib.pyplot as plt
fig, axes = plt.subplots(nrows=1, ncols=3, figsize=figsize)
self.plot_containment(fraction=0.68, ax=axes[0])
self.plot_containment(fraction=0.95, ax=axes[1])
self.plot_containment_vs_energy(ax=axes[2])
# TODO: implement this plot
# psf = self.psf_at_energy_and_theta(energy='1 TeV', theta='1 deg')
# psf.plot_components(ax=axes[2])
plt.tight_layout()
def info(
self,
fractions=[0.68, 0.95],
energies=u.Quantity([1.0, 10.0], "TeV"),
thetas=u.Quantity([0.0], "deg"),
):
"""
Print PSF summary info.
The containment radius for given fraction, energies and thetas is
computed and printed on the command line.
Parameters
----------
fractions : list
Containment fraction to compute containment radius for.
energies : `~astropy.units.u.Quantity`
Energies to compute containment radius for.
thetas : `~astropy.units.u.Quantity`
Thetas to compute containment radius for.
Returns
-------
ss : string
Formatted string containing the summary info.
"""
ss = "\nSummary PSF info\n"
ss += "----------------\n"
ss += array_stats_str(self.offset_axis.center.to("deg"), "Theta")
ss += array_stats_str(self.energy_axis_true.edges[1:], "Energy hi")
ss += array_stats_str(self.energy_axis_true.edges[:-1], "Energy lo")
ss += f"Safe energy threshold lo: {self.energy_thresh_lo:6.3f}\n"
ss += f"Safe energy threshold hi: {self.energy_thresh_hi:6.3f}\n"
for fraction in fractions:
containment = self.containment_radius(energies, thetas, fraction)
for i, energy in enumerate(energies):
for j, theta in enumerate(thetas):
radius = containment[j, i]
ss += (
"{:2.0f}% containment radius at theta = {} and "
"E = {:4.1f}: {:5.8f}\n"
"".format(100 * fraction, theta, energy, radius)
)
return ss
def to_energy_dependent_table_psf(self, theta=None, rad=None, exposure=None):
"""Convert triple Gaussian PSF ot table PSF.
Parameters
----------
theta : `~astropy.coordinates.Angle`
Offset in the field of view. Default theta = 0 deg
rad : `~astropy.coordinates.Angle`
Offset from PSF center used for evaluating the PSF on a grid.
Default offset = [0, 0.005, ..., 1.495, 1.5] deg.
exposure : `~astropy.units.u.Quantity`
Energy dependent exposure. Should be in units equivalent to 'cm^2 s'.
Default exposure = 1.
Returns
-------
tabe_psf : `~gammapy.irf.EnergyDependentTablePSF`
Instance of `EnergyDependentTablePSF`.
"""
# Convert energies to log center
energies = self.energy_axis_true.center
# Defaults and input handling
if theta is None:
theta = Angle(0, "deg")
else:
theta = Angle(theta)
if rad is None:
rad = Angle(np.arange(0, 1.5, 0.005), "deg")
rad_axis = MapAxis.from_nodes(rad, name="rad")
psf_value = u.Quantity(np.zeros((energies.size, rad.size)), "deg^-2")
for idx, energy in enumerate(energies):
psf_gauss = self.psf_at_energy_and_theta(energy, theta)
psf_value[idx] = u.Quantity(psf_gauss(rad), "deg^-2")
return EnergyDependentTablePSF(
energy_axis_true=self.energy_axis_true,
rad_axis=rad_axis,
exposure=exposure,
data=psf_value,
)
def to_psf3d(self, rad=None):
"""Create a PSF3D from an analytical PSF.
Parameters
----------
rad : `~astropy.units.u.Quantity` or `~astropy.coordinates.Angle`
the array of position errors (rad) on which the PSF3D will be defined
Returns
-------
psf3d : `~gammapy.irf.PSF3D`
the PSF3D. It will be defined on the same energy and offset values than the input psf.
"""
offsets = self.offset_axis.center
energy = self.energy_axis_true.center
if rad is None:
rad = np.linspace(0, 0.66, 67) * u.deg
rad_axis = MapAxis.from_edges(rad, name="rad")
shape = (self.energy_axis_true.nbin, self.offset_axis.nbin, rad_axis.nbin)
psf_value = np.zeros(shape) * u.Unit("sr-1")
for idx, offset in enumerate(offsets):
table_psf = self.to_energy_dependent_table_psf(offset)
psf_value[:, idx, :] = table_psf.evaluate(energy, rad_axis.center)
return PSF3D(
energy_axis_true=self.energy_axis_true,
rad_axis=rad_axis,
offset_axis=self.offset_axis,
data=psf_value,
meta=self.meta.copy()
)
| gammapy/irf/psf/gauss.py | 16,206 | Triple Gauss analytical PSF depending on energy and theta.
To evaluate the PSF call the ``to_energy_dependent_table_psf`` or ``psf_at_energy_and_theta`` methods.
Parameters
----------
energy_axis_true : `MapAxis`
True energy axis
offset_axis : `MapAxis`
Offset axis.
sigmas : list of 'numpy.ndarray'
Triple Gauss sigma parameters, where every entry is
a two dimensional 'numpy.ndarray' containing the sigma
value for every given energy and theta.
norms : list of 'numpy.ndarray'
Triple Gauss norm parameters, where every entry is
a two dimensional 'numpy.ndarray' containing the norm
value for every given energy and theta. Norm corresponds
to the value of the Gaussian at theta = 0.
meta : dict
Meta data
Examples
--------
Plot R68 of the PSF vs. theta and energy:
.. plot::
:include-source:
import matplotlib.pyplot as plt
from gammapy.irf import EnergyDependentMultiGaussPSF
filename = '$GAMMAPY_DATA/cta-1dc/caldb/data/cta/1dc/bcf/South_z20_50h/irf_file.fits'
psf = EnergyDependentMultiGaussPSF.read(filename, hdu='POINT SPREAD FUNCTION')
psf.plot_containment(0.68)
plt.show()
add safe energy range lines to the plot
Compute containment for all energy and theta values
High energy threshold
Low energy threshold
Create `EnergyDependentMultiGaussPSF` from HDU list.
Parameters
----------
hdu : `~astropy.io.fits.BinTableHDU`
HDU
Print PSF summary info.
The containment radius for given fraction, energies and thetas is
computed and printed on the command line.
Parameters
----------
fractions : list
Containment fraction to compute containment radius for.
energies : `~astropy.units.u.Quantity`
Energies to compute containment radius for.
thetas : `~astropy.units.u.Quantity`
Thetas to compute containment radius for.
Returns
-------
ss : string
Formatted string containing the summary info.
Quick-look summary plots.
Plot containment image with energy and theta axes.
Parameters
----------
fraction : float
Containment fraction between 0 and 1.
add_cbar : bool
Add a colorbar
Plot containment fraction as a function of energy.
Get `~gammapy.modeling.models.MultiGauss2D` model for given energy and theta.
No interpolation is used.
Parameters
----------
energy : `~astropy.units.u.Quantity`
Energy at which a PSF is requested.
theta : `~astropy.coordinates.Angle`
Offset angle at which a PSF is requested.
Returns
-------
psf : `~gammapy.utils.gauss.MultiGauss2D`
Multigauss PSF object.
Create `EnergyDependentMultiGaussPSF` from FITS file.
Parameters
----------
filename : str
File name
Convert triple Gaussian PSF ot table PSF.
Parameters
----------
theta : `~astropy.coordinates.Angle`
Offset in the field of view. Default theta = 0 deg
rad : `~astropy.coordinates.Angle`
Offset from PSF center used for evaluating the PSF on a grid.
Default offset = [0, 0.005, ..., 1.495, 1.5] deg.
exposure : `~astropy.units.u.Quantity`
Energy dependent exposure. Should be in units equivalent to 'cm^2 s'.
Default exposure = 1.
Returns
-------
tabe_psf : `~gammapy.irf.EnergyDependentTablePSF`
Instance of `EnergyDependentTablePSF`.
Convert psf table data to FITS hdu list.
Returns
-------
hdu_list : `~astropy.io.fits.HDUList`
PSF in HDU list format.
Create a PSF3D from an analytical PSF.
Parameters
----------
rad : `~astropy.units.u.Quantity` or `~astropy.coordinates.Angle`
the array of position errors (rad) on which the PSF3D will be defined
Returns
-------
psf3d : `~gammapy.irf.PSF3D`
the PSF3D. It will be defined on the same energy and offset values than the input psf.
Write PSF to FITS file.
Calls `~astropy.io.fits.HDUList.writeto`, forwarding all arguments.
Licensed under a 3-clause BSD style license - see LICENSE.rst Get sigmas Get amplitudes Set up data Create hdu and hdu list This is a false positive from pylint See https://github.com/PyCQA/pylint/issues/2435 pylint:disable=assignment-from-no-return Set up and compute data plotting defaults Plotting Axes labels and ticks, colobar TODO: implement this plot psf = self.psf_at_energy_and_theta(energy='1 TeV', theta='1 deg') psf.plot_components(ax=axes[2]) Convert energies to log center Defaults and input handling | 4,259 | en | 0.471116 |
# We need to make our string alternating, i. e. si≠si+1. When we reverse substring sl…sr,
# we change no more than two pairs sl−1,sl and sr,sr+1. Moreover, one pair should be a
# consecutive pair 00 and other — 11. So, we can find lower bound to our answer as maximum
# between number of pairs of 00 and number of pairs of 11. And we can always reach this
# lower bound, by pairing 00 with 11 or with left/right border of s.
for _ in range(int(input())):
n = int(input())
s = input()
z, o = 0, 0 # will store total number of pairs
zeros, ones = 0, 0 # will store no of pairs in one streak
for el in s:
if el == '1':
ones += 1
# streak of zeros are broken by one so no of pairs of zeros are added to z
z += max(zeros-1, 0)
zeros = 0
if el == '0':
zeros += 1
# streak of ones are broken by one so no of pairs of ones are added to o
o += max(ones-1, 0)
ones = 0
# we count pairs only when it the streak is broken. So to count the final unbroken streak
o += max(ones-1, 0)
z += max(zeros-1, 0)
print(max(o, z))
| Codeforces_problems/Reverse Binary Strings/solution.py | 1,173 | We need to make our string alternating, i. e. si≠si+1. When we reverse substring sl…sr, we change no more than two pairs sl−1,sl and sr,sr+1. Moreover, one pair should be a consecutive pair 00 and other — 11. So, we can find lower bound to our answer as maximum between number of pairs of 00 and number of pairs of 11. And we can always reach this lower bound, by pairing 00 with 11 or with left/right border of s. will store total number of pairs will store no of pairs in one streak streak of zeros are broken by one so no of pairs of zeros are added to z streak of ones are broken by one so no of pairs of ones are added to o we count pairs only when it the streak is broken. So to count the final unbroken streak | 720 | en | 0.879149 |
import numpy as np
import cv2
import matplotlib.pylab as plt
from keras.preprocessing.image import load_img
from keras.models import model_from_json
from models import (
create_cam_model, preprocess_image,
get_cam_img
)
# Define CAM conv layer name
CAM_CONV_LAYER = 'cam_conv_layer'
def read_model(model_path, weigths_path):
"""Load your pretrained model
"""
model = model_from_json(open(model_path).read())
model.load_weights(weigths_path)
return model
def train_cam_model(X_train, Y_train, X_test, Y_test,
batch_size, nb_epoch):
"""Train CAM model based on your pretrained model
# Arguments
model: your pretrained model, CAM model is trained based on this model.
"""
# Use your allready trained model
pretrained_model_path = ''
pretrained_weights_path = ''
# Your pretrained model name
pretrained_model_name = 'VGG16'
# Label class num
num_classes = 10
# CAM input spacial size
gap_spacial_size = 14
# The layer before CAM(GAP) layers.
# CAM paper suggests to use the last convnet(VGG) or mergenet(Inception, or other architectures)
# Change this name based on your model.
if pretrained_model_name == 'VGG16':
in_layer_name = 'block5_conv3'
elif pretrained_model_name == 'InceptionV3':
in_layer_name = 'batchnormalization_921'
elif pretrained_model_name == 'ResNet50':
in_layer_name = 'merge_13'
else:
in_layer_name = ''
# Load your allready trained model, transfer it to CAM model
pretrained_model = read_model(pretrained_model_path,
pretrained_weights_path)
# Create CAM model based on trained model
model = create_cam_model(pretrained_model,
gap_spacial_size,
num_classes,
in_layer_name,
CAM_CONV_LAYER)
# Train your CAM model
model.compile(loss='categorical_crossentropy',
optimizer='adadelta',
metrics=['accuracy'])
model.fit(X_train, Y_train,
batch_size=batch_size,
nb_epoch=nb_epoch,
shuffle=True, verbose=1,
validation_data=(X_test, Y_test))
# Save model
model.save_weights('')
return model
def cam_model():
"""
Return your trained CAM model
"""
return
def plot_cam_map(img_path, img_size, batch_size, label_plot):
"""Plot class activation map.
"""
# CAM input spacial size
gap_spacial_size = 14
# Use your trained CAM model
model = cam_model()
# Load and format data
im_ori = np.asarray(load_img(img_path, target_size=(img_size, img_size)))
test_data = preprocess_image(img_path, img_size, expand_dims=True)
# Get class map image
im_cam = get_cam_img(model,
test_data,
label_plot,
CAM_CONV_LAYER,
ratio=img_size / gap_spacial_size)
# Resize if the shape of class map is not equal to original image
if im_cam.shape != im_ori[:, :, 0].shape:
im_cam = cv2.resize(im_cam, (img_size, img_size), cv2.INTER_LINEAR)
# Show the predictions. You can analyze the class map with the predictions.
prediction_labels = model.predict(test_data.astype('float32'), batch_size=batch_size, verbose=1)
print('Info: Predictions:\n{}'.format(prediction_labels))
# Plot original image and the class map
plt.imshow(im_ori)
plt.imshow(im_cam,
cmap='jet',
alpha=0.5,
interpolation='bilinear')
plt.show()
| demo.py | 3,223 | Return your trained CAM model
Plot class activation map.
Load your pretrained model
Train CAM model based on your pretrained model
# Arguments
model: your pretrained model, CAM model is trained based on this model.
Define CAM conv layer name Use your allready trained model Your pretrained model name Label class num CAM input spacial size The layer before CAM(GAP) layers. CAM paper suggests to use the last convnet(VGG) or mergenet(Inception, or other architectures) Change this name based on your model. Load your allready trained model, transfer it to CAM model Create CAM model based on trained model Train your CAM model Save model CAM input spacial size Use your trained CAM model Load and format data Get class map image Resize if the shape of class map is not equal to original image Show the predictions. You can analyze the class map with the predictions. Plot original image and the class map | 934 | en | 0.832267 |
#!/usr/bin/python
"""Plot LFEs of given order parameter."""
import argparse
import sys
import matplotlib.pyplot as plt
from matplotlib import cm
from matplotlib import gridspec
from matplotlib.ticker import MaxNLocator
import numpy as np
import pandas as pd
from matplotlibstyles import styles
from matplotlibstyles import plotutils
def main():
args = parse_args()
f = setup_figure()
gs = gridspec.GridSpec(1, 1, f)
ax = f.add_subplot(gs[0, 0])
if args.post_lfes == None:
args.post_lfes = ['' for i in range(len(args.systems))]
plot_figure(f, ax, vars(args))
setup_axis(ax, args.tag)
#set_labels(ax)
save_figure(f, args.plot_filebase)
def setup_figure():
styles.set_default_style()
figsize = (plotutils.cm_to_inches(10), plotutils.cm_to_inches(7))
return plt.figure(figsize=figsize, dpi=300, constrained_layout=True)
def plot_figure(f, ax, args):
systems = args['systems']
varis = args['varis']
input_dir = args['input_dir']
tag = args['tag']
post_lfes = args['post_lfes']
stacking_enes = args['stacking_enes']
if stacking_enes is not None:
stacking_enes = [abs(e) for e in stacking_enes]
cmap = plotutils.create_truncated_colormap(
0.2, 0.8, name='plasma')
#mappable = plotutils.create_linear_mappable(
# cmap, abs(stacking_enes[0]), abs(stacking_enes[-1]))
#colors = [mappable.to_rgba(abs(e)) for e in stacking_enes]
increment = stacking_enes[1] - stacking_enes[0]
cmap, norm, colors = plotutils.create_segmented_colormap(cmap, stacking_enes, increment)
else:
cmap = cm.get_cmap('tab10')
colors = [cmap(i) for i in range(len(systems))]
for i in range(len(systems)):
system = systems[i]
vari = varis[i]
post_lfe = post_lfes[i]
if post_lfe != '':
post_lfe = '-' + post_lfe
inp_filebase = f'{input_dir}/{system}-{vari}_lfes{post_lfe}-{tag}'
lfes = pd.read_csv(f'{inp_filebase}.aves', sep=' ', index_col=0)
lfe_stds = pd.read_csv(f'{inp_filebase}.stds', sep=' ', index_col=0)
temp = lfes.columns[0]
lfes = lfes[temp]
lfes = lfes - lfes[0]
lfe_stds = lfe_stds[temp]
label = f'{system}-{vari}'
ax.errorbar(lfes.index, lfes, yerr=lfe_stds, marker='o', label=label,
color=colors[i])
if stacking_enes is not None:
label = r'$-U_\text{stack} / \SI{1000}{\kb\kelvin}$'
tick_labels = [f'${e/1000:.1f}$' for e in stacking_enes]
plotutils.plot_segmented_colorbar(
f, ax, cmap, norm, label, tick_labels, 'horizontal')
def setup_axis(ax, ylabel=None, xlabel=None, ylim_top=None, xlim_right=None):
ax.xaxis.set_major_locator(MaxNLocator(integer=True))
ax.set_ylabel(ylabel)
ax.set_xlabel(xlabel)
ax.set_ylim(top=ylim_top)
ax.set_xlim(right=xlim_right)
def set_labels(ax):
plt.legend()
def save_figure(f, plot_filebase):
#f.savefig(plot_filebase + '.pgf', transparent=True)
f.savefig(plot_filebase + '.pdf', transparent=True)
f.savefig(plot_filebase + '.png', transparent=True)
def parse_args():
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument(
'input_dir',
type=str,
help='Input directory')
parser.add_argument(
'plot_filebase',
type=str,
help='Plots directory')
parser.add_argument(
'tag',
type=str,
help='OP tag')
parser.add_argument(
'--systems',
nargs='+',
type=str,
help='Systems')
parser.add_argument(
'--varis',
nargs='+',
type=str,
help='Simulation variants')
parser.add_argument(
'--post_lfes',
nargs='+',
type=str,
help='Filename additions after lfes, if any')
parser.add_argument(
'--stacking_enes',
nargs='+',
type=float,
help='Stacking energies (for colormap)')
return parser.parse_args()
if __name__ == '__main__':
main()
| scripts/plotting/plot_lfes.py | 4,177 | Plot LFEs of given order parameter.
!/usr/bin/pythonset_labels(ax)mappable = plotutils.create_linear_mappable( cmap, abs(stacking_enes[0]), abs(stacking_enes[-1]))colors = [mappable.to_rgba(abs(e)) for e in stacking_enes]f.savefig(plot_filebase + '.pgf', transparent=True) | 276 | en | 0.272372 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'Fabian Affolter <fabian()affolter-engineering.ch>'
__copyright__ = 'Copyright 2014 Fabian Affolter'
__license__ = """Eclipse Public License - v 1.0 (http://www.eclipse.org/legal/epl-v10.html)"""
HAVE_DBUS=True
try:
import dbus
except ImportError:
HAVE_DBUS=False
def plugin(srv, item):
"""Send a message through dbus to the user's desktop."""
srv.logging.debug("*** MODULE=%s: service=%s, target=%s", __file__, item.service, item.target)
if not HAVE_DBUS:
srv.logging.error("Cannot send DBUS message; `dbus' module not installed")
return False
text = item.message
summary = item.addrs[0]
app_name = item.get('title', srv.SCRIPTNAME)
replaces_id = 0
service = 'org.freedesktop.Notifications'
path = '/' + service.replace('.', '/')
interface = service
app_icon = '/usr/share/icons/gnome/32x32/places/network-server.png'
expire_timeout = 1000
actions = []
hints = []
try:
srv.logging.debug("Sending message to %s..." % (item.target))
session_bus = dbus.SessionBus()
obj = session_bus.get_object(service, path)
interface = dbus.Interface(obj, interface)
interface.Notify(app_name, replaces_id, app_icon, summary, text,
actions, hints, expire_timeout)
srv.logging.debug("Successfully sent message")
except Exception, e:
srv.logging.error("Error sending message to %s: %s" % (item.target, str(e)))
return False
return True
| services/dbus.py | 1,566 | !/usr/bin/env python -*- coding: utf-8 -*- | 42 | en | 0.34282 |
# encoding: utf-8
import datetime
import logging
from ckan.common import config
from six import text_type
from sqlalchemy import Table, select, join, func, and_
import ckan.plugins as p
import ckan.model as model
log = logging.getLogger(__name__)
cache_enabled = p.toolkit.asbool(
config.get('ckanext.stats.cache_enabled', False)
)
if cache_enabled:
log.warn(
'ckanext.stats does not support caching in current implementations'
)
DATE_FORMAT = '%Y-%m-%d'
def table(name):
return Table(name, model.meta.metadata, autoload=True)
def datetime2date(datetime_):
return datetime.date(datetime_.year, datetime_.month, datetime_.day)
class Stats(object):
@classmethod
def largest_groups(cls, limit=10):
member = table('member')
package = table('package')
j = join(member, package, member.c.table_id == package.c.id)
s = select(
[member.c.group_id,
func.count(member.c.table_id)]
).select_from(j).group_by(member.c.group_id).where(
and_(
member.c.group_id != None, member.c.table_name == 'package',
package.c.private == False, package.c.state == 'active'
)
).order_by(func.count(member.c.table_id).desc()).limit(limit)
res_ids = model.Session.execute(s).fetchall()
res_groups = [
(model.Session.query(model.Group).get(text_type(group_id)), val)
for group_id, val in res_ids
]
return res_groups
@classmethod
def top_tags(cls, limit=10, returned_tag_info='object'): # by package
assert returned_tag_info in ('name', 'id', 'object')
tag = table('tag')
package_tag = table('package_tag')
package = table('package')
if returned_tag_info == 'name':
from_obj = [package_tag.join(tag)]
tag_column = tag.c.name
else:
from_obj = None
tag_column = package_tag.c.tag_id
j = join(
package_tag, package, package_tag.c.package_id == package.c.id
)
s = select([tag_column,
func.count(package_tag.c.package_id)],
from_obj=from_obj).select_from(j).where(
and_(
package_tag.c.state == 'active',
package.c.private == False,
package.c.state == 'active'
)
)
s = s.group_by(tag_column).order_by(
func.count(package_tag.c.package_id).desc()
).limit(limit)
res_col = model.Session.execute(s).fetchall()
if returned_tag_info in ('id', 'name'):
return res_col
elif returned_tag_info == 'object':
res_tags = [
(model.Session.query(model.Tag).get(text_type(tag_id)), val)
for tag_id, val in res_col
]
return res_tags
@classmethod
def top_package_creators(cls, limit=10):
userid_count = model.Session.query(
model.Package.creator_user_id,
func.count(model.Package.creator_user_id)
).filter(model.Package.state == 'active'
).filter(model.Package.private == False).group_by(
model.Package.creator_user_id
).order_by(func.count(model.Package.creator_user_id).desc()
).limit(limit).all()
user_count = [
(model.Session.query(model.User).get(text_type(user_id)), count)
for user_id, count in userid_count
if user_id
]
return user_count
| ckanext/stats/stats.py | 3,678 | encoding: utf-8 by package | 26 | en | 0.959346 |
import time
import numpy as np
import os.path as osp
import datetime
from collections import OrderedDict
import torch
import torch.nn as nn
from torch.utils.tensorboard import SummaryWriter
import nni
from dassl.data import DataManager
from dassl.optim import build_optimizer, build_lr_scheduler
from dassl.utils import (
MetricMeter, AverageMeter, tolist_if_not, count_num_param, load_checkpoint,
save_checkpoint, resume_from_checkpoint, load_pretrained_weights
)
from dassl.modeling import build_head, build_backbone
from dassl.evaluation import build_evaluator
class SimpleNet(nn.Module):
"""A simple neural network composed of a CNN backbone
and optionally a head such as mlp for classification.
"""
def __init__(self, cfg, model_cfg, num_classes, **kwargs):
super().__init__()
self.backbone = build_backbone(
model_cfg.BACKBONE.NAME,
verbose=cfg.VERBOSE,
pretrained=model_cfg.BACKBONE.PRETRAINED,
**kwargs
)
fdim = self.backbone.out_features
print("------------------------fdim:", fdim)
self.head = None
if model_cfg.HEAD.NAME and model_cfg.HEAD.HIDDEN_LAYERS:
self.head = build_head(
model_cfg.HEAD.NAME,
verbose=cfg.VERBOSE,
in_features=fdim,
hidden_layers=model_cfg.HEAD.HIDDEN_LAYERS,
activation=model_cfg.HEAD.ACTIVATION,
bn=model_cfg.HEAD.BN,
dropout=model_cfg.HEAD.DROPOUT,
**kwargs
)
fdim = self.head.out_features
self.classifier = None
if num_classes > 0:
self.classifier = nn.Linear(fdim, num_classes)
self._fdim = fdim
@property
def fdim(self):
return self._fdim
def forward(self, x, return_feature=False):
f = self.backbone(x)
if self.head is not None:
f = self.head(f)
if self.classifier is None:
return f
y = self.classifier(f)
if return_feature:
return y, f
return y
class TrainerBase:
"""Base class for iterative trainer."""
def __init__(self):
self._models = OrderedDict()
self._optims = OrderedDict()
self._scheds = OrderedDict()
self._writer = None
def register_model(self, name='model', model=None, optim=None, sched=None):
if self.__dict__.get('_models') is None:
raise AttributeError(
'Cannot assign model before super().__init__() call'
)
if self.__dict__.get('_optims') is None:
raise AttributeError(
'Cannot assign optim before super().__init__() call'
)
if self.__dict__.get('_scheds') is None:
raise AttributeError(
'Cannot assign sched before super().__init__() call'
)
assert name not in self._models, 'Found duplicate model names'
self._models[name] = model
self._optims[name] = optim
self._scheds[name] = sched
def get_model_names(self, names=None):
names_real = list(self._models.keys())
if names is not None:
names = tolist_if_not(names)
for name in names:
assert name in names_real
return names
else:
return names_real
def save_model(self, epoch, directory, is_best=False, model_name=''):
names = self.get_model_names()
for name in names:
model_dict = self._models[name].state_dict()
optim_dict = None
if self._optims[name] is not None:
optim_dict = self._optims[name].state_dict()
sched_dict = None
if self._scheds[name] is not None:
sched_dict = self._scheds[name].state_dict()
save_checkpoint(
{
'state_dict': model_dict,
'epoch': epoch + 1,
'optimizer': optim_dict,
'scheduler': sched_dict
},
osp.join(directory, name),
is_best=is_best,
model_name=model_name
)
def resume_model_if_exist(self, directory):
names = self.get_model_names()
file_missing = False
for name in names:
path = osp.join(directory, name)
if not osp.exists(path):
file_missing = True
break
if file_missing:
print('No checkpoint found, train from scratch')
return 0
print(
'Found checkpoint in "{}". Will resume training'.format(directory)
)
for name in names:
path = osp.join(directory, name)
start_epoch = resume_from_checkpoint(
path, self._models[name], self._optims[name],
self._scheds[name]
)
return start_epoch
def load_model(self, directory, epoch=None):
if not directory:
print(
'Note that load_model() is skipped as no pretrained model is given'
)
return
names = self.get_model_names()
# By default, the best model is loaded
model_file = 'model-best.pth.tar'
if epoch is not None:
model_file = 'model.pth.tar-' + str(epoch)
for name in names:
model_path = osp.join(directory, name, model_file)
if not osp.exists(model_path):
raise FileNotFoundError(
'Model not found at "{}"'.format(model_path)
)
checkpoint = load_checkpoint(model_path)
state_dict = checkpoint['state_dict']
epoch = checkpoint['epoch']
print(
'Loading weights to {} '
'from "{}" (epoch = {})'.format(name, model_path, epoch)
)
self._models[name].load_state_dict(state_dict)
def set_model_mode(self, mode='train', names=None):
names = self.get_model_names(names)
for name in names:
if mode == 'train':
self._models[name].train()
else:
self._models[name].eval()
def update_lr(self, names=None):
names = self.get_model_names(names)
for name in names:
if self._scheds[name] is not None:
self._scheds[name].step()
def detect_anomaly(self, loss):
if not torch.isfinite(loss).all():
raise FloatingPointError('Loss is infinite or NaN!')
def init_writer(self, log_dir):
if self.__dict__.get('_writer') is None or self._writer is None:
print(
'Initializing summary writer for tensorboard '
'with log_dir={}'.format(log_dir)
)
self._writer = SummaryWriter(log_dir=log_dir)
def close_writer(self):
if self._writer is not None:
self._writer.close()
def write_scalar(self, tag, scalar_value, global_step=None):
if self._writer is None:
# Do nothing if writer is not initialized
# Note that writer is only used when training is needed
pass
else:
self._writer.add_scalar(tag, scalar_value, global_step)
def train(self, start_epoch, max_epoch):
"""Generic training loops."""
self.start_epoch = start_epoch
self.max_epoch = max_epoch
self.before_train()
for self.epoch in range(self.start_epoch, self.max_epoch):
self.before_epoch()
self.run_epoch()
self.after_epoch()
self.after_train()
def before_train(self):
pass
def after_train(self):
pass
def before_epoch(self):
pass
def after_epoch(self):
pass
def run_epoch(self):
raise NotImplementedError
def test(self):
raise NotImplementedError
def parse_batch_train(self, batch):
raise NotImplementedError
def parse_batch_test(self, batch):
raise NotImplementedError
def forward_backward(self, batch):
raise NotImplementedError
def model_inference(self, input):
raise NotImplementedError
def model_zero_grad(self, names=None):
names = self.get_model_names(names)
for name in names:
if self._optims[name] is not None:
self._optims[name].zero_grad()
def model_backward(self, loss):
self.detect_anomaly(loss)
if not self.use_amp:
loss.backward()
else:
self.scaler.scale(loss).backward()
def model_update(self, names=None):
names = self.get_model_names(names)
for name in names:
if self._optims[name] is not None:
if not self.use_amp:
self._optims[name].step()
else:
self.scaler.step(self._optims[name])
def model_backward_and_update(self, loss, names=None):
self.model_zero_grad(names)
self.model_backward(loss)
self.model_update(names)
if self.use_amp:
self.scaler.update()
class SimpleTrainer(TrainerBase):
"""A simple trainer class implementing generic functions."""
def __init__(self, cfg):
super().__init__()
self.check_cfg(cfg)
if torch.cuda.is_available() and cfg.USE_CUDA:
self.device = torch.device('cuda')
else:
self.device = torch.device('cpu')
# use amp to accelerate training
self.use_amp = cfg.TRAIN.USE_AMP
if self.use_amp:
self.scaler = torch.cuda.amp.GradScaler()
# Save as attributes some frequently used variables
self.start_epoch = self.epoch = 0
self.max_epoch = cfg.OPTIM.MAX_EPOCH
self.output_dir = cfg.OUTPUT_DIR
self.cfg = cfg
self.build_data_loader()
self.build_model()
self.evaluator = build_evaluator(cfg, lab2cname=self.dm.lab2cname)
# zhaoxin modify
self.best_val_acc = -np.inf
self.best_test_acc = -np.inf
self.best_val_test_acc = 0
self.best_val_epoch = 0
self.best_test_epoch = 0
def check_cfg(self, cfg):
"""Check whether some variables are set correctly for
the trainer (optional).
For example, a trainer might require a particular sampler
for training such as 'RandomDomainSampler', so it is good
to do the checking:
assert cfg.DATALOADER.SAMPLER_TRAIN == 'RandomDomainSampler'
"""
pass
def build_data_loader(self):
"""Create essential data-related attributes.
What must be done in the re-implementation
of this method:
1) initialize data manager
2) assign as attributes the data loaders
3) assign as attribute the number of classes
"""
self.dm = DataManager(self.cfg)
self.train_loader_x = self.dm.train_loader_x
self.train_loader_u = self.dm.train_loader_u
self.val_loader = self.dm.val_loader
self.test_loader = self.dm.test_loader
self.num_classes = self.dm.num_classes
def build_model(self):
"""Build and register model.
The default builds a classification model along with its
optimizer and scheduler.
Custom trainers can re-implement this method if necessary.
"""
cfg = self.cfg
print('Building model')
self.model = SimpleNet(cfg, cfg.MODEL, self.num_classes)
# for name, module in self.model.named_children():
# print(name)
if cfg.MODEL.INIT_WEIGHTS:
load_pretrained_weights(self.model, cfg.MODEL.INIT_WEIGHTS)
self.model.to(self.device)
print('# params: {:,}'.format(count_num_param(self.model)))
self.optim = build_optimizer(self.model, cfg.OPTIM)
self.sched = build_lr_scheduler(self.optim, cfg.OPTIM)
self.register_model('model', self.model, self.optim, self.sched)
def train(self):
super().train(self.start_epoch, self.max_epoch)
def before_train(self):
# directory = self.cfg.OUTPUT_DIR
if self.cfg.RESUME:
directory = self.cfg.RESUME
self.start_epoch = self.resume_model_if_exist(directory)
# Initialize summary writer
self.init_writer(self.output_dir)
# Remember the starting time (for computing the elapsed time)
self.time_start = time.time()
def after_train(self):
print('Finished training')
do_test = not self.cfg.TEST.NO_TEST
if do_test and not self.cfg.NNI:
if self.cfg.TEST.FINAL_MODEL == 'best_val':
print('Deploy the model with the best val performance')
self.load_model(self.output_dir)
# zhaoxin modify
if self.cfg.TEST.PER_CLASS_RESULT:
self.best_val_test_acc, per_class_accs = self.test(return_per_class_results=True)
perclass_path = osp.join(self.output_dir, 'perclass_result.txt')
with open(perclass_path, 'w') as f:
for acc in per_class_accs:
f.write("{:6f}\n".format(acc))
else:
self.best_val_test_acc = self.test()
# zhaoxin add
if self.cfg.TEST.FINAL_MODEL == 'best_val':
print(
'best_val_acc: {}\nbest_val_epoch: {}\nbest_val_test_acc: {}'.
format(
self.best_val_acc, self.best_val_epoch,
self.best_val_test_acc
)
)
if self.cfg.TEST.TEST_EVERY_EPOCH:
print(
'best_test_acc: {}\nbest_test_epoch: {}'.format(
self.best_test_acc, self.best_test_epoch
)
)
result_path = osp.join(self.output_dir, 'result.txt')
with open(result_path, 'w') as f:
f.write("{:6f}\n".format(self.best_val_test_acc))
if self.cfg.NNI:
nni.report_final_result(self.best_val_acc)
# Show elapsed time
elapsed = round(time.time() - self.time_start)
elapsed = str(datetime.timedelta(seconds=elapsed))
print('Elapsed: {}'.format(elapsed))
# Close writer
self.close_writer()
def after_epoch(self):
last_epoch = (self.epoch + 1) == self.max_epoch
do_test = not self.cfg.TEST.NO_TEST
meet_checkpoint_freq = (
self.epoch + 1
) % self.cfg.TRAIN.CHECKPOINT_FREQ == 0 if self.cfg.TRAIN.CHECKPOINT_FREQ > 0 else False
# zhaoxin modify
if do_test and self.cfg.TEST.FINAL_MODEL == 'best_val':
curr_val_acc = self.test(split='val')
# nni: report intermediate result
if self.cfg.NNI:
nni.report_intermediate_result(curr_val_acc)
is_best = curr_val_acc > self.best_val_acc
if is_best:
self.best_val_acc = curr_val_acc
self.best_val_epoch = self.epoch + 1
self.save_model(
self.epoch,
self.output_dir,
model_name='model-best.pth.tar'
)
if do_test and self.cfg.TEST.TEST_EVERY_EPOCH:
curr_test_acc = self.test(split='test')
if curr_test_acc > self.best_test_acc:
self.best_test_acc = curr_test_acc
self.best_test_epoch = self.epoch + 1
# if self.cfg.TEST.FINAL_MODEL == 'best_val':
# if is_best:
# self.best_val_test_acc = curr_test_acc
if meet_checkpoint_freq or last_epoch:
self.save_model(self.epoch, self.output_dir)
@torch.no_grad()
def test(self, split=None, return_per_class_results=False):
"""A generic testing pipeline."""
self.set_model_mode('eval')
self.evaluator.reset()
if split is None:
split = self.cfg.TEST.SPLIT
if split == 'val' and self.val_loader is not None:
data_loader = self.val_loader
print('Do evaluation on {} set'.format(split))
else:
data_loader = self.test_loader
print('Do evaluation on test set')
for batch_idx, batch in enumerate(data_loader):
input, label = self.parse_batch_test(batch)
output = self.model_inference(input)
self.evaluator.process(output, label)
results = self.evaluator.evaluate()
for k, v in results.items():
if k == 'perclass_accuracies':
continue
tag = '{}/{}'.format(split, k)
self.write_scalar(tag, v, self.epoch)
if not return_per_class_results:
return list(results.values())[0]
else:
return results['accuracy'], results['perclass_accuracies']
def model_inference(self, input):
return self.model(input)
def parse_batch_test(self, batch):
input = batch['img']
label = batch['label']
input = input.to(self.device)
label = label.to(self.device)
return input, label
def get_current_lr(self, names=None):
names = self.get_model_names(names)
name = names[0]
return self._optims[name].param_groups[0]['lr']
class TrainerXU(SimpleTrainer):
"""A base trainer using both labeled and unlabeled data.
In the context of domain adaptation, labeled and unlabeled data
come from source and target domains respectively.
When it comes to semi-supervised learning, all data comes from the
same domain.
"""
def run_epoch(self):
self.set_model_mode('train')
losses = MetricMeter()
batch_time = AverageMeter()
data_time = AverageMeter()
# Decide to iterate over labeled or unlabeled dataset
len_train_loader_x = len(self.train_loader_x)
len_train_loader_u = len(self.train_loader_u)
if self.cfg.TRAIN.COUNT_ITER == 'train_x':
self.num_batches = len_train_loader_x
elif self.cfg.TRAIN.COUNT_ITER == 'train_u':
self.num_batches = len_train_loader_u
elif self.cfg.TRAIN.COUNT_ITER == 'smaller_one':
self.num_batches = min(len_train_loader_x, len_train_loader_u)
else:
raise ValueError
train_loader_x_iter = iter(self.train_loader_x)
train_loader_u_iter = iter(self.train_loader_u)
end = time.time()
for self.batch_idx in range(self.num_batches):
try:
batch_x = next(train_loader_x_iter)
except StopIteration:
train_loader_x_iter = iter(self.train_loader_x)
batch_x = next(train_loader_x_iter)
try:
batch_u = next(train_loader_u_iter)
except StopIteration:
train_loader_u_iter = iter(self.train_loader_u)
batch_u = next(train_loader_u_iter)
data_time.update(time.time() - end)
loss_summary = self.forward_backward(batch_x, batch_u)
batch_time.update(time.time() - end)
losses.update(loss_summary)
if (self.batch_idx + 1) % self.cfg.TRAIN.PRINT_FREQ == 0:
nb_this_epoch = self.num_batches - (self.batch_idx + 1)
nb_future_epochs = (
self.max_epoch - (self.epoch + 1)
) * self.num_batches
eta_seconds = batch_time.avg * (nb_this_epoch+nb_future_epochs)
eta = str(datetime.timedelta(seconds=int(eta_seconds)))
print(
'epoch [{0}/{1}][{2}/{3}]\t'
'time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'data {data_time.val:.3f} ({data_time.avg:.3f})\t'
'eta {eta}\t'
'{losses}\t'
'lr {lr}'.format(
self.epoch + 1,
self.max_epoch,
self.batch_idx + 1,
self.num_batches,
batch_time=batch_time,
data_time=data_time,
eta=eta,
losses=losses,
lr=self.get_current_lr()
)
)
n_iter = self.epoch * self.num_batches + self.batch_idx
for name, meter in losses.meters.items():
self.write_scalar('train/' + name, meter.avg, n_iter)
self.write_scalar('train/lr', self.get_current_lr(), n_iter)
end = time.time()
def parse_batch_train(self, batch_x, batch_u):
input_x = batch_x['img']
label_x = batch_x['label']
input_u = batch_u['img']
input_x = input_x.to(self.device)
label_x = label_x.to(self.device)
input_u = input_u.to(self.device)
return input_x, label_x, input_u
class TrainerX(SimpleTrainer):
"""A base trainer using labeled data only."""
def run_epoch(self):
self.set_model_mode('train')
losses = MetricMeter()
batch_time = AverageMeter()
data_time = AverageMeter()
self.num_batches = len(self.train_loader_x)
end = time.time()
for self.batch_idx, batch in enumerate(self.train_loader_x):
data_time.update(time.time() - end)
loss_summary = self.forward_backward(batch)
batch_time.update(time.time() - end)
losses.update(loss_summary)
if (self.batch_idx + 1) % self.cfg.TRAIN.PRINT_FREQ == 0:
nb_this_epoch = self.num_batches - (self.batch_idx + 1)
nb_future_epochs = (
self.max_epoch - (self.epoch + 1)
) * self.num_batches
eta_seconds = batch_time.avg * (nb_this_epoch+nb_future_epochs)
eta = str(datetime.timedelta(seconds=int(eta_seconds)))
print(
'epoch [{0}/{1}][{2}/{3}]\t'
'time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'data {data_time.val:.3f} ({data_time.avg:.3f})\t'
'eta {eta}\t'
'{losses}\t'
'lr {lr}'.format(
self.epoch + 1,
self.max_epoch,
self.batch_idx + 1,
self.num_batches,
batch_time=batch_time,
data_time=data_time,
eta=eta,
losses=losses,
lr=self.get_current_lr()
)
)
n_iter = self.epoch * self.num_batches + self.batch_idx
for name, meter in losses.meters.items():
self.write_scalar('train/' + name, meter.avg, n_iter)
self.write_scalar('train/lr', self.get_current_lr(), n_iter)
end = time.time()
def parse_batch_train(self, batch):
input = batch['img']
label = batch['label']
domain = batch['domain']
input = input.to(self.device)
label = label.to(self.device)
domain = domain.to(self.device)
return input, label, domain
| dassl/engine/trainer.py | 23,578 | A simple neural network composed of a CNN backbone
and optionally a head such as mlp for classification.
A simple trainer class implementing generic functions.
Base class for iterative trainer.
A base trainer using labeled data only.
A base trainer using both labeled and unlabeled data.
In the context of domain adaptation, labeled and unlabeled data
come from source and target domains respectively.
When it comes to semi-supervised learning, all data comes from the
same domain.
Create essential data-related attributes.
What must be done in the re-implementation
of this method:
1) initialize data manager
2) assign as attributes the data loaders
3) assign as attribute the number of classes
Build and register model.
The default builds a classification model along with its
optimizer and scheduler.
Custom trainers can re-implement this method if necessary.
Check whether some variables are set correctly for
the trainer (optional).
For example, a trainer might require a particular sampler
for training such as 'RandomDomainSampler', so it is good
to do the checking:
assert cfg.DATALOADER.SAMPLER_TRAIN == 'RandomDomainSampler'
A generic testing pipeline.
Generic training loops.
By default, the best model is loaded Do nothing if writer is not initialized Note that writer is only used when training is needed use amp to accelerate training Save as attributes some frequently used variables zhaoxin modify for name, module in self.model.named_children(): print(name) directory = self.cfg.OUTPUT_DIR Initialize summary writer Remember the starting time (for computing the elapsed time) zhaoxin modify zhaoxin add Show elapsed time Close writer zhaoxin modify nni: report intermediate result if self.cfg.TEST.FINAL_MODEL == 'best_val': if is_best: self.best_val_test_acc = curr_test_acc Decide to iterate over labeled or unlabeled dataset | 1,869 | en | 0.790593 |
import json
import os
import pickle
import requests
import shutil
import tempfile
import uuid
from flask import Blueprint, current_app, jsonify, request, send_file
name = 'HTTP'
prefix = 'http'
storage_enabled = True
global storage_path
plugin = Blueprint(name, __name__)
def register(app, plugin_storage_path=None):
app.register_blueprint(plugin, url_prefix=f'/{prefix}')
app.logger.info(f'{name} plugin registered.')
global storage_path
storage_path = plugin_storage_path
persistence = {
"configuration": {},
"execution": {},
}
result_zip_file_name = 'results.zip'
@plugin.route('/')
def index():
return f'This is the Radon CTT Agent HTTP Plugin.', 200
@plugin.route('/configuration/', methods=['POST'])
def configuration_create():
config_instance = {}
configuration_uuid = str(uuid.uuid4())
config_instance['uuid'] = configuration_uuid
params = {
'use_https': {
'required': True,
'default': False,
},
'method': {
'required': True,
'default': 'GET',
},
'hostname': {
'required': True,
'default': None,
},
'port': {
'required': True,
'default': 80,
},
'path': {
'required': True,
'default': "/",
},
'test_body': {
'required': False,
'default': None,
},
'test_header': {
'required': False,
'default': None,
},
}
for param in params:
is_required = params[param]['required']
default_value = params[param]['default']
if param in request.form:
value = request.form.get(param, type=str)
current_app.logger.info(f'\'{param}\' set to: \'{value}\'.')
config_instance[param] = value
else:
if is_required and default_value is not None:
value = default_value
current_app.logger.info(f'\'{param}\' set to default value: \'{value}\'.')
config_instance[param] = value
if is_required and param not in config_instance:
current_app.logger.error(f"Required parameter {param} not provided.")
return f'Required parameter {param} not provided.', 400
persistence['configuration'][configuration_uuid] = config_instance
current_app.logger.info(f"Config: {config_instance}")
return jsonify(config_instance), 201
@plugin.route('/execution/', methods=['POST'])
def execution():
execution_instance = {}
if 'config_uuid' in request.form:
config_uuid = request.form['config_uuid']
config_entry = persistence['configuration'][config_uuid]
execution_instance['config'] = config_entry
# Assign values from config if they are stored in the config, otherwise assign None
use_https = bool(config_entry['use_https']) if 'use_https' in config_entry else None
method = str(config_entry['method']).upper() if 'method' in config_entry else None
hostname = str(config_entry['hostname']) if 'hostname' in config_entry else None
port = int(config_entry['port']) if 'port' in config_entry else None
path = str(config_entry['path']) if 'path' in config_entry else None
test_body = config_entry['test_body'] if 'test_body' in config_entry else None
test_header = config_entry['test_header'] if 'test_header' in config_entry else None
# Check if required parameters are set
if use_https is not None and method and hostname and port and path:
protocol = 'http'
if use_https:
protocol += 's'
target_url = f'{protocol}://{hostname}:{port}{path}'
# Send request with given parameters
response = requests.request(method, target_url, headers=test_header, json=test_body)
response_status = response.status_code
# Create UUID for execution
execution_uuid = str(uuid.uuid4())
execution_instance['uuid'] = execution_uuid
execution_instance['target_url'] = target_url
execution_instance['status'] = str(response_status)
persistence['execution'][execution_uuid] = execution_instance
execution_results_dir = os.path.join(storage_path, execution_uuid)
os.makedirs(execution_results_dir)
execution_json = os.path.join(execution_results_dir, 'execution.json')
received_response = os.path.join(execution_results_dir, 'response.bin')
with open(execution_json, 'w') as exec_json:
exec_json.write(json.dumps(execution_instance))
with open(received_response, 'wb') as response_bin:
response_bin.write(pickle.dumps(response))
with tempfile.NamedTemporaryFile() as tf:
tmp_zip_file = shutil.make_archive(tf.name, 'zip', execution_results_dir)
shutil.copy2(tmp_zip_file, os.path.join(execution_results_dir, result_zip_file_name))
# Test was executed with any possible outcome
return jsonify(execution_instance), 200
else:
return "Required configuration parameters are missing.", jsonify(config_entry), 400
else:
return "No configuration with that ID found.", jsonify(persistence), 404
# Get execution results
@plugin.route('/execution/<string:exec_uuid>/', methods=['GET'])
def execution_results(exec_uuid):
try:
execution_uuid = persistence.get('execution').get(exec_uuid).get('uuid')
except AttributeError:
return "No execution found with that ID.", 404
results_zip_path = os.path.join(storage_path, execution_uuid, result_zip_file_name)
if os.path.isfile(results_zip_path):
return send_file(results_zip_path)
else:
return "No results available (yet).", 404
| http/__init__.py | 5,958 | Assign values from config if they are stored in the config, otherwise assign None Check if required parameters are set Send request with given parameters Create UUID for execution Test was executed with any possible outcome Get execution results | 245 | en | 0.781348 |
"""
Copyright 2021 K.M Ahnaf Zamil
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from urllib.parse import parse_qs
import typing
__all__: typing.Final = ["Request", "ImmutableDict", "_Redirect", "Endpoint"]
class _Redirect(object):
"""Just an object for simulating a redirect"""
def __init__(self, url: str) -> None:
self.url = url
class ImmutableDict(dict):
"""An immutable dictionary implementation for query arguments and form data"""
def __setitem__(self, k, v) -> None:
raise ValueError("ImmutableDict object cannot be modified (immutable)")
class Request(object):
"""An object that contains information related to the HTTP request"""
def __init__(self, environ):
self._environ = environ
@property
def method(self) -> str:
"""HTTP method used for the request"""
return self._environ["REQUEST_METHOD"]
@property
def endpoint(self) -> str:
"""The route/endpoint used for that specific request"""
return self._environ["PATH_INFO"]
@property
def query_args(self) -> ImmutableDict:
"""Query arguments from the request"""
args = self._environ["QUERY_STRING"]
if not args:
return ImmutableDict({})
args = args.split("&")
query_args = {}
for _arg in args:
name, value = _arg.split("=")
query_args[name] = value
return ImmutableDict(query_args)
@property
def form(self) -> typing.Optional[typing.Dict]:
"""Form data sent via HTTP request"""
data = self._environ.get("wsgi.input") # Returns io.BytesIO object
if data:
form_dict = parse_qs(data.getvalue().decode("utf-8"))
final_dict = {}
for k, v in form_dict.items():
final_dict[k] = v[0] # Since v is list containing the form data
return ImmutableDict(final_dict)
def __str__(self):
return f'<Request endpoint="{self.endpoint}" method="{self.method}">'
class Endpoint(object):
def __init__(self, route, func) -> None:
self.route = route
self.extension = None
self._func = func
def __call__(self, request: Request):
return self._func(request)
| pogweb/models.py | 3,242 | An immutable dictionary implementation for query arguments and form data
An object that contains information related to the HTTP request
Just an object for simulating a redirect
The route/endpoint used for that specific request
Form data sent via HTTP request
HTTP method used for the request
Query arguments from the request
Copyright 2021 K.M Ahnaf Zamil
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
Returns io.BytesIO object Since v is list containing the form data | 1,449 | en | 0.843492 |
# Scrapy settings for amzASINScrapper project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# https://docs.scrapy.org/en/latest/topics/settings.html
# https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
# https://docs.scrapy.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'amzASINScrapper'
SPIDER_MODULES = ['amzASINScrapper.spiders']
NEWSPIDER_MODULE = 'amzASINScrapper.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'amzASINScrapper (+http://www.yourdomain.com)'
# Obey robots.txt rules
ROBOTSTXT_OBEY = True
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See https://docs.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
#COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See https://docs.scrapy.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'amzASINScrapper.middlewares.AmzasinscrapperSpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
# 'amzASINScrapper.middlewares.AmzasinscrapperDownloaderMiddleware': 543,
#}
# Enable or disable extensions
# See https://docs.scrapy.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See https://docs.scrapy.org/en/latest/topics/item-pipeline.html
#ITEM_PIPELINES = {
# 'amzASINScrapper.pipelines.AmzasinscrapperPipeline': 300,
#}
# Enable and configure the AutoThrottle extension (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
| amzASINScrapper/amzASINScrapper/settings.py | 3,156 | Scrapy settings for amzASINScrapper project For simplicity, this file contains only settings considered important or commonly used. You can find more settings consulting the documentation: https://docs.scrapy.org/en/latest/topics/settings.html https://docs.scrapy.org/en/latest/topics/downloader-middleware.html https://docs.scrapy.org/en/latest/topics/spider-middleware.html Crawl responsibly by identifying yourself (and your website) on the user-agentUSER_AGENT = 'amzASINScrapper (+http://www.yourdomain.com)' Obey robots.txt rules Configure maximum concurrent requests performed by Scrapy (default: 16)CONCURRENT_REQUESTS = 32 Configure a delay for requests for the same website (default: 0) See https://docs.scrapy.org/en/latest/topics/settings.htmldownload-delay See also autothrottle settings and docsDOWNLOAD_DELAY = 3 The download delay setting will honor only one of:CONCURRENT_REQUESTS_PER_DOMAIN = 16CONCURRENT_REQUESTS_PER_IP = 16 Disable cookies (enabled by default)COOKIES_ENABLED = False Disable Telnet Console (enabled by default)TELNETCONSOLE_ENABLED = False Override the default request headers:DEFAULT_REQUEST_HEADERS = { 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', 'Accept-Language': 'en',} Enable or disable spider middlewares See https://docs.scrapy.org/en/latest/topics/spider-middleware.htmlSPIDER_MIDDLEWARES = { 'amzASINScrapper.middlewares.AmzasinscrapperSpiderMiddleware': 543,} Enable or disable downloader middlewares See https://docs.scrapy.org/en/latest/topics/downloader-middleware.htmlDOWNLOADER_MIDDLEWARES = { 'amzASINScrapper.middlewares.AmzasinscrapperDownloaderMiddleware': 543,} Enable or disable extensions See https://docs.scrapy.org/en/latest/topics/extensions.htmlEXTENSIONS = { 'scrapy.extensions.telnet.TelnetConsole': None,} Configure item pipelines See https://docs.scrapy.org/en/latest/topics/item-pipeline.htmlITEM_PIPELINES = { 'amzASINScrapper.pipelines.AmzasinscrapperPipeline': 300,} Enable and configure the AutoThrottle extension (disabled by default) See https://docs.scrapy.org/en/latest/topics/autothrottle.htmlAUTOTHROTTLE_ENABLED = True The initial download delayAUTOTHROTTLE_START_DELAY = 5 The maximum download delay to be set in case of high latenciesAUTOTHROTTLE_MAX_DELAY = 60 The average number of requests Scrapy should be sending in parallel to each remote serverAUTOTHROTTLE_TARGET_CONCURRENCY = 1.0 Enable showing throttling stats for every response received:AUTOTHROTTLE_DEBUG = False Enable and configure HTTP caching (disabled by default) See https://docs.scrapy.org/en/latest/topics/downloader-middleware.htmlhttpcache-middleware-settingsHTTPCACHE_ENABLED = TrueHTTPCACHE_EXPIRATION_SECS = 0HTTPCACHE_DIR = 'httpcache'HTTPCACHE_IGNORE_HTTP_CODES = []HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage' | 2,860 | en | 0.619686 |
"""Test cases around the demo fan platform."""
import pytest
from homeassistant.setup import async_setup_component
from homeassistant.components import fan
from homeassistant.const import STATE_OFF, STATE_ON
from tests.components.fan import common
FAN_ENTITY_ID = 'fan.living_room_fan'
def get_entity(hass):
"""Get the fan entity."""
return hass.states.get(FAN_ENTITY_ID)
@pytest.fixture(autouse=True)
def setup_comp(hass):
"""Initialize components."""
hass.loop.run_until_complete(async_setup_component(hass, fan.DOMAIN, {
'fan': {
'platform': 'demo',
}
}))
async def test_turn_on(hass):
"""Test turning on the device."""
assert STATE_OFF == get_entity(hass).state
await common.async_turn_on(hass, FAN_ENTITY_ID)
assert STATE_OFF != get_entity(hass).state
await common.async_turn_on(hass, FAN_ENTITY_ID, fan.SPEED_HIGH)
assert STATE_ON == get_entity(hass).state
assert fan.SPEED_HIGH == \
get_entity(hass).attributes[fan.ATTR_SPEED]
async def test_turn_off(hass):
"""Test turning off the device."""
assert STATE_OFF == get_entity(hass).state
await common.async_turn_on(hass, FAN_ENTITY_ID)
assert STATE_OFF != get_entity(hass).state
await common.async_turn_off(hass, FAN_ENTITY_ID)
assert STATE_OFF == get_entity(hass).state
async def test_turn_off_without_entity_id(hass):
"""Test turning off all fans."""
assert STATE_OFF == get_entity(hass).state
await common.async_turn_on(hass, FAN_ENTITY_ID)
assert STATE_OFF != get_entity(hass).state
await common.async_turn_off(hass)
assert STATE_OFF == get_entity(hass).state
async def test_set_direction(hass):
"""Test setting the direction of the device."""
assert STATE_OFF == get_entity(hass).state
await common.async_set_direction(hass, FAN_ENTITY_ID,
fan.DIRECTION_REVERSE)
assert fan.DIRECTION_REVERSE == \
get_entity(hass).attributes.get('direction')
async def test_set_speed(hass):
"""Test setting the speed of the device."""
assert STATE_OFF == get_entity(hass).state
await common.async_set_speed(hass, FAN_ENTITY_ID, fan.SPEED_LOW)
assert fan.SPEED_LOW == \
get_entity(hass).attributes.get('speed')
async def test_oscillate(hass):
"""Test oscillating the fan."""
assert not get_entity(hass).attributes.get('oscillating')
await common.async_oscillate(hass, FAN_ENTITY_ID, True)
assert get_entity(hass).attributes.get('oscillating')
await common.async_oscillate(hass, FAN_ENTITY_ID, False)
assert not get_entity(hass).attributes.get('oscillating')
async def test_is_on(hass):
"""Test is on service call."""
assert not fan.is_on(hass, FAN_ENTITY_ID)
await common.async_turn_on(hass, FAN_ENTITY_ID)
assert fan.is_on(hass, FAN_ENTITY_ID)
| tests/components/demo/test_fan.py | 2,876 | Get the fan entity.
Initialize components.
Test cases around the demo fan platform. | 83 | en | 0.672301 |
from brownie import AdvancedCollectible, network
import pytest
from scripts.advanced_collectible.deploy_and_create import deploy_and_create, get_contract
from scripts.utils.helpful_scripts import LOCAL_BLOCKCHAIN_ENVIRONMENTS, get_account
def test_can_create_advanced_collectible():
if network.show_active() not in LOCAL_BLOCKCHAIN_ENVIRONMENTS:
pytest.skip("Only for local testing")
advanced_collectible, creation_transaction = deploy_and_create()
# getting the requestId value from the requestedCollectible event
requestId = creation_transaction.events["requestedCollectible"]["requestId"]
randomNumber = 777
get_contract("vrf_coordinator").callBackWithRandomness(
requestId, randomNumber, advanced_collectible.address, {"from": get_account()})
assert advanced_collectible.tokenCounter() == 1
assert advanced_collectible.tokenIdToBreed(0) == randomNumber % 3
| tests/unit/test_advanced_collectible.py | 912 | getting the requestId value from the requestedCollectible event | 63 | en | 0.672916 |
# Digital OCEAN FLASK SERVER RECEIVES IMAGE
from flask import Flask, request, jsonify
import classify
import base64
import json
import firebase
import env
# Instantiate Flask
app = Flask(__name__)
# health check
@app.route("/status")
def health_check():
return "Running!"
# Performing image Recognition on Image, sent as bytes via POST payload
@app.route("/detect", methods=["POST"])
def detect():
imgBytes = request.data
imgdata = base64.b64decode(imgBytes)
with open("temp.png", "wb") as f:
f.write(imgdata)
print("successfully receieved image")
# Pass image bytes to classifier
result = classify.analyse("temp.png")
# Return results as neat JSON object, using
result = jsonify(result)
print(result.json)
response_data = result.json
print(response_data)
db = firebase.Firebase()
db.authenticate()
db.push(response_data)
print("Updated Firebase.")
return result
if __name__ == "__main__":
app.run(host="0.0.0.0", port=80, debug=True)
| app.py | 1,030 | Digital OCEAN FLASK SERVER RECEIVES IMAGE Instantiate Flask health check Performing image Recognition on Image, sent as bytes via POST payload Pass image bytes to classifier Return results as neat JSON object, using | 215 | en | 0.908955 |
#!/usr/bin/env python3
import os.path
import tensorflow as tf
import helper
import warnings
from distutils.version import LooseVersion
import project_tests as tests
# Check TensorFlow Version
assert LooseVersion(tf.__version__) >= LooseVersion('1.0'), 'Please use TensorFlow version 1.0 or newer. You are using {}'.format(tf.__version__)
print('TensorFlow Version: {}'.format(tf.__version__))
# Check for a GPU
if not tf.test.gpu_device_name():
warnings.warn('No GPU found. Please use a GPU to train your neural network.')
else:
print('Default GPU Device: {}'.format(tf.test.gpu_device_name()))
def load_vgg(sess, vgg_path):
"""
Load Pretrained VGG Model into TensorFlow.
:param sess: TensorFlow Session
:param vgg_path: Path to vgg folder, containing "variables/" and "saved_model.pb"
:return: Tuple of Tensors from VGG model (image_input, keep_prob, layer3_out, layer4_out, layer7_out)
"""
# TODO: Implement function
# Use tf.saved_model.loader.load to load the model and weights
vgg_tag = 'vgg16'
tf.saved_model.loader.load(sess, [vgg_tag], vgg_path)
vgg_input_tensor_name = 'image_input:0'
vgg_keep_prob_tensor_name = 'keep_prob:0'
vgg_layer3_out_tensor_name = 'layer3_out:0'
vgg_layer4_out_tensor_name = 'layer4_out:0'
vgg_layer7_out_tensor_name = 'layer7_out:0'
graph = tf.get_default_graph()
input_img = graph.get_tensor_by_name(vgg_input_tensor_name)
prob = graph.get_tensor_by_name(vgg_keep_prob_tensor_name)
layer3_o = graph.get_tensor_by_name(vgg_layer3_out_tensor_name)
layer4_o = graph.get_tensor_by_name(vgg_layer4_out_tensor_name)
layer7_o = graph.get_tensor_by_name(vgg_layer7_out_tensor_name)
return input_img, prob, layer3_o, layer4_o, layer7_o
tests.test_load_vgg(load_vgg, tf)
def layers(vgg_layer3_out, vgg_layer4_out, vgg_layer7_out, num_classes):
"""
Create the layers for a fully convolutional network. Build skip-layers using the vgg layers.
:param vgg_layer3_out: TF Tensor for VGG Layer 3 output
:param vgg_layer4_out: TF Tensor for VGG Layer 4 output
:param vgg_layer7_out: TF Tensor for VGG Layer 7 output
:param num_classes: Number of classes to classify
:return: The Tensor for the last layer of output
"""
# TODO: Implement function
# 1x1 convolution layer with road / not-road features only
conv_1by1_l7 = tf.layers.conv2d(vgg_layer7_out, num_classes, 1, padding='SAME',
kernel_regularizer=tf.contrib.layers.l2_regularizer(1e-3))
# upscaling size/ add features
output = tf.layers.conv2d_transpose(conv_1by1_l7, 512, 4, strides=(2,2), padding='SAME',
kernel_regularizer=tf.contrib.layers.l2_regularizer(1e-3))
# skip connections / add to upscaled output
output = tf.add(output, vgg_layer4_out)
# upscaling size/ reduce features
output = tf.layers.conv2d_transpose(output, 256, 4, strides=(2,2), padding='SAME',
kernel_regularizer=tf.contrib.layers.l2_regularizer(1e-3))
# skip connections / add to upscaled output
output = tf.add(output, vgg_layer3_out)
# upscaling size/ reduce features to road OR not-road
output = tf.layers.conv2d_transpose(output, num_classes, 16, strides=(8,8), padding='SAME',
kernel_regularizer=tf.contrib.layers.l2_regularizer(1e-3), name='nn_final_output')
return output
tests.test_layers(layers)
def optimize(nn_last_layer, correct_label, learning_rate, num_classes):
"""
Build the TensorFLow loss and optimizer operations.
:param nn_last_layer: TF Tensor of the last layer in the neural network
:param correct_label: TF Placeholder for the correct label image
:param learning_rate: TF Placeholder for the learning rate
:param num_classes: Number of classes to classify
:return: Tuple of (logits, train_op, cross_entropy_loss)
"""
# TODO: Implement function
logits = tf.reshape(nn_last_layer, (-1, num_classes))
# add loss function
cross_entropy_loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=correct_label))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
# training_op
training_operation = optimizer.minimize(cross_entropy_loss)
return logits, training_operation, cross_entropy_loss
tests.test_optimize(optimize)
def train_nn(sess, epochs, batch_size, get_batches_fn, train_op, cross_entropy_loss, input_image,
correct_label, keep_prob, learning_rate):
"""
Train neural network and print out the loss during training.
:param sess: TF Session
:param epochs: Number of epochs
:param batch_size: Batch size
:param get_batches_fn: Function to get batches of training data. Call using get_batches_fn(batch_size)
:param train_op: TF Operation to train the neural network
:param cross_entropy_loss: TF Tensor for the amount of loss
:param input_image: TF Placeholder for input images
:param correct_label: TF Placeholder for label images
:param keep_prob: TF Placeholder for dropout keep probability
:param learning_rate: TF Placeholder for learning rate
"""
# TODO: Implement function
# initialize global variables
sess.run(tf.global_variables_initializer())
# going through the batches of images i.e. epoch
for epoch in range(epochs):
for (input_img, gt_img) in get_batches_fn(batch_size):
_, loss = sess.run([train_op, cross_entropy_loss], feed_dict={input_image: input_img,
correct_label: gt_img,
keep_prob: 0.7,
learning_rate: 5e-04})
print("Loss of {} at epoch {}/{}".format(loss, epoch, epochs))
tests.test_train_nn(train_nn)
def run():
num_classes = 2
image_shape = (160, 576) # KITTI dataset uses 160x576 images
data_dir = './data'
runs_dir = './runs'
tests.test_for_kitti_dataset(data_dir)
# Download pretrained vgg model
helper.maybe_download_pretrained_vgg(data_dir)
# OPTIONAL: Train and Inference on the cityscapes dataset instead of the Kitti dataset.
# You'll need a GPU with at least 10 teraFLOPS to train on.
# https://www.cityscapes-dataset.com/
epochs = 20
batch_size = 5
with tf.Session() as sess:
# Path to vgg model
vgg_path = os.path.join(data_dir, 'vgg')
# Create function to get batches
get_batches_fn = helper.gen_batch_function(os.path.join(data_dir, 'data_road/training'), image_shape)
# OPTIONAL: Augment Images for better results
# https://datascience.stackexchange.com/questions/5224/how-to-prepare-augment-images-for-neural-network
correct_label = tf.placeholder(tf.int32)
learning_rate = tf.placeholder(tf.float32)
# TODO: Build NN using load_vgg, layers, and optimize function
input_img, keep_prob, layer3_o, layer4_o, layer7_o = load_vgg(sess, vgg_path)
layer_output = layers(layer3_o, layer4_o, layer7_o, num_classes)
logits, train_op, cross_entropy_loss = optimize(layer_output, correct_label, learning_rate, num_classes)
# TODO: Train NN using the train_nn function
train_nn(sess, epochs, batch_size, get_batches_fn, train_op, cross_entropy_loss, input_img,
correct_label, keep_prob, learning_rate)
# TODO: Save inference data using helper.save_inference_samples
helper.save_inference_samples(runs_dir, data_dir, sess, image_shape, logits, keep_prob, input_img)
# OPTIONAL: Apply the trained model to a video
if __name__ == '__main__':
run()
| main.py | 7,750 | Create the layers for a fully convolutional network. Build skip-layers using the vgg layers.
:param vgg_layer3_out: TF Tensor for VGG Layer 3 output
:param vgg_layer4_out: TF Tensor for VGG Layer 4 output
:param vgg_layer7_out: TF Tensor for VGG Layer 7 output
:param num_classes: Number of classes to classify
:return: The Tensor for the last layer of output
Load Pretrained VGG Model into TensorFlow.
:param sess: TensorFlow Session
:param vgg_path: Path to vgg folder, containing "variables/" and "saved_model.pb"
:return: Tuple of Tensors from VGG model (image_input, keep_prob, layer3_out, layer4_out, layer7_out)
Build the TensorFLow loss and optimizer operations.
:param nn_last_layer: TF Tensor of the last layer in the neural network
:param correct_label: TF Placeholder for the correct label image
:param learning_rate: TF Placeholder for the learning rate
:param num_classes: Number of classes to classify
:return: Tuple of (logits, train_op, cross_entropy_loss)
Train neural network and print out the loss during training.
:param sess: TF Session
:param epochs: Number of epochs
:param batch_size: Batch size
:param get_batches_fn: Function to get batches of training data. Call using get_batches_fn(batch_size)
:param train_op: TF Operation to train the neural network
:param cross_entropy_loss: TF Tensor for the amount of loss
:param input_image: TF Placeholder for input images
:param correct_label: TF Placeholder for label images
:param keep_prob: TF Placeholder for dropout keep probability
:param learning_rate: TF Placeholder for learning rate
!/usr/bin/env python3 Check TensorFlow Version Check for a GPU TODO: Implement function Use tf.saved_model.loader.load to load the model and weights TODO: Implement function 1x1 convolution layer with road / not-road features only upscaling size/ add features skip connections / add to upscaled output upscaling size/ reduce features skip connections / add to upscaled output upscaling size/ reduce features to road OR not-road TODO: Implement function add loss function training_op TODO: Implement function initialize global variables going through the batches of images i.e. epoch KITTI dataset uses 160x576 images Download pretrained vgg model OPTIONAL: Train and Inference on the cityscapes dataset instead of the Kitti dataset. You'll need a GPU with at least 10 teraFLOPS to train on. https://www.cityscapes-dataset.com/ Path to vgg model Create function to get batches OPTIONAL: Augment Images for better results https://datascience.stackexchange.com/questions/5224/how-to-prepare-augment-images-for-neural-network TODO: Build NN using load_vgg, layers, and optimize function TODO: Train NN using the train_nn function TODO: Save inference data using helper.save_inference_samples OPTIONAL: Apply the trained model to a video | 2,804 | en | 0.674212 |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""CIFAR10 small image classification dataset.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
from tensorflow.python.keras._impl.keras import backend as K
from tensorflow.python.keras._impl.keras.datasets.cifar import load_batch
from tensorflow.python.keras._impl.keras.utils.data_utils import get_file
def load_data():
"""Loads CIFAR10 dataset.
Returns:
Tuple of Numpy arrays: `(x_train, y_train), (x_test, y_test)`.
"""
dirname = 'cifar-10-batches-py'
origin = 'https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz'
path = get_file(dirname, origin=origin, untar=True)
num_train_samples = 50000
x_train = np.empty((num_train_samples, 3, 32, 32), dtype='uint8')
y_train = np.empty((num_train_samples,), dtype='uint8')
for i in range(1, 6):
fpath = os.path.join(path, 'data_batch_' + str(i))
(x_train[(i - 1) * 10000:i * 10000, :, :, :],
y_train[(i - 1) * 10000:i * 10000]) = load_batch(fpath)
fpath = os.path.join(path, 'test_batch')
x_test, y_test = load_batch(fpath)
y_train = np.reshape(y_train, (len(y_train), 1))
y_test = np.reshape(y_test, (len(y_test), 1))
if K.image_data_format() == 'channels_last':
x_train = x_train.transpose(0, 2, 3, 1)
x_test = x_test.transpose(0, 2, 3, 1)
return (x_train, y_train), (x_test, y_test)
| tensorflow/python/keras/_impl/keras/datasets/cifar10.py | 2,090 | Loads CIFAR10 dataset.
Returns:
Tuple of Numpy arrays: `(x_train, y_train), (x_test, y_test)`.
CIFAR10 small image classification dataset.
Copyright 2015 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ============================================================================== | 806 | en | 0.802723 |
"""
Django settings for scannerKH project.
Generated by 'django-admin startproject' using Django 3.0.5.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '74lbuyy!_ihecg*uh8i9^j!wq3gc_)vv$55!h&0yon03f2%c$$'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'scanner.apps.ScannerConfig',
'user.apps.UserConfig',
'grosshaendler.apps.GrosshaendlerConfig',
'artikel.apps.ArtikelConfig',
'bestellung.apps.BestellungConfig',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'scannerKH.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'scannerKH.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
AUTH_USER_MODEL = 'user.User'
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'de'
TIME_ZONE = 'CEST'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
| scannerKH/scannerKH/settings.py | 3,309 | Django settings for scannerKH project.
Generated by 'django-admin startproject' using Django 3.0.5.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
Build paths inside the project like this: os.path.join(BASE_DIR, ...) Quick-start development settings - unsuitable for production See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/ SECURITY WARNING: keep the secret key used in production secret! SECURITY WARNING: don't run with debug turned on in production! Application definition Database https://docs.djangoproject.com/en/3.0/ref/settings/databases Password validation https://docs.djangoproject.com/en/3.0/ref/settings/auth-password-validators Internationalization https://docs.djangoproject.com/en/3.0/topics/i18n/ Static files (CSS, JavaScript, Images) https://docs.djangoproject.com/en/3.0/howto/static-files/ | 990 | en | 0.679119 |
# -*- coding: utf-8 -*-
import scrapy
import re
import json
from locations.hourstudy import inputoutput
class AldiUKSpider(scrapy.Spider):
name = "aldiuk"
allowed_domains = ['www.aldi.co.uk']
start_urls = (
'https://www.aldi.co.uk/sitemap/store',
)
def parse(self, response):
response.selector.remove_namespaces()
city_urls = response.xpath('//url/loc/text()').extract()
for path in city_urls:
yield scrapy.Request(
path.strip(),
callback=self.parse_store,
)
else:
pass
def parse_store(self, response):
json_data = response.xpath('//script[@type="text/javascript"]/text()').extract_first().replace('\n','').replace('\t','').split('.push(')[1].rstrip(')')
data = json.loads(json_data)
geojson_data = response.xpath('//script[@class="js-store-finder-initial-state"][@type="application/json"]/text()').extract_first()
geodata = json.loads(geojson_data)
# properties = {
# 'name': data['seoData']['name'],
# 'ref': data['seoData']['name'],
# 'addr_full': data['seoData']['address']['streetAddress'],
# 'city': data['seoData']['address']['addressLocality'],
# 'postcode': data['seoData']['address']['postalCode'],
# 'country': data['seoData']['address']['addressCountry'],
# 'website': response.request.url,
# 'opening_hours': str(data['seoData']['openingHours']).replace('[','').replace(']','').replace("'",''),
# 'lat': float(geodata['store']['latlng']['lat']),
# 'lon': float(geodata['store']['latlng']['lng']),
# }
raw = str(data['seoData']['openingHours'])
formatted = str(data['seoData']['openingHours']).replace('[','').replace(']','').replace("'",'')
yield inputoutput(raw,formatted)
| locations/spiders/aldi_uk.py | 1,878 | -*- coding: utf-8 -*- properties = { 'name': data['seoData']['name'], 'ref': data['seoData']['name'], 'addr_full': data['seoData']['address']['streetAddress'], 'city': data['seoData']['address']['addressLocality'], 'postcode': data['seoData']['address']['postalCode'], 'country': data['seoData']['address']['addressCountry'], 'website': response.request.url, 'opening_hours': str(data['seoData']['openingHours']).replace('[','').replace(']','').replace("'",''), 'lat': float(geodata['store']['latlng']['lat']), 'lon': float(geodata['store']['latlng']['lng']), } | 561 | en | 0.206151 |
"""
Django settings for api_drf project.
Generated by 'django-admin startproject' using Django 2.1.7.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '2p$!i%#w$3e9(l3v4#%_#fi2_fae2l7ksdsd+1*vrc6_#8_@_*'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'mainService'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'api_drf.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'api_drf.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
REST_FRAMEWORK = {
# When you enable API versioning, the request.version attribute will contain a string
# that corresponds to the version requested in the incoming client request.
'DEFAULT_VERSIONING_CLASS': 'rest_framework.versioning.URLPathVersioning',
} | api_drf/api_drf/settings.py | 3,401 | Django settings for api_drf project.
Generated by 'django-admin startproject' using Django 2.1.7.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
Build paths inside the project like this: os.path.join(BASE_DIR, ...) Quick-start development settings - unsuitable for production See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/ SECURITY WARNING: keep the secret key used in production secret! SECURITY WARNING: don't run with debug turned on in production! Application definition Database https://docs.djangoproject.com/en/2.1/ref/settings/databases Password validation https://docs.djangoproject.com/en/2.1/ref/settings/auth-password-validators Internationalization https://docs.djangoproject.com/en/2.1/topics/i18n/ Static files (CSS, JavaScript, Images) https://docs.djangoproject.com/en/2.1/howto/static-files/ When you enable API versioning, the request.version attribute will contain a string that corresponds to the version requested in the incoming client request. | 1,146 | en | 0.657073 |
# See LICENSE for licensing information.
#
# Copyright (c) 2016-2019 Regents of the University of California and The Board
# of Regents for the Oklahoma Agricultural and Mechanical College
# (acting for and on behalf of Oklahoma State University)
# All rights reserved.
#
import debug
from tech import drc, parameter, spice
from abc import ABC, abstractmethod
from .stimuli import *
from .charutils import *
class spice_measurement(ABC):
"""Base class for spice stimulus measurements."""
def __init__(self, measure_name, measure_scale=None, has_port=True):
#Names must be unique for correct spice simulation, but not enforced here.
self.name = measure_name
self.measure_scale = measure_scale
self.has_port = has_port #Needed for error checking
#Some meta values used externally. variables are added here for consistency accross the objects
self.meta_str = None
self.meta_add_delay = False
@abstractmethod
def get_measure_function(self):
return None
@abstractmethod
def get_measure_values(self):
return None
def write_measure(self, stim_obj, input_tuple):
measure_func = self.get_measure_function()
if measure_func == None:
debug.error("Did not set measure function",1)
measure_vals = self.get_measure_values(*input_tuple)
measure_func(stim_obj, *measure_vals)
def retrieve_measure(self, port=None):
self.port_error_check(port)
if port != None:
value = parse_spice_list("timing", "{0}{1}".format(self.name.lower(), port))
else:
value = parse_spice_list("timing", "{0}".format(self.name.lower()))
if type(value)!=float or self.measure_scale == None:
return value
else:
return value*self.measure_scale
def port_error_check(self, port):
if self.has_port and port == None:
debug.error("Cannot retrieve measurement, port input was expected.",1)
elif not self.has_port and port != None:
debug.error("Unexpected port input received during measure retrieval.",1)
class delay_measure(spice_measurement):
"""Generates a spice measurement for the delay of 50%-to-50% points of two signals."""
def __init__(self, measure_name, trig_name, targ_name, trig_dir_str, targ_dir_str,\
trig_vdd=0.5, targ_vdd=0.5, measure_scale=None, has_port=True):
spice_measurement.__init__(self, measure_name, measure_scale, has_port)
self.set_meas_constants(trig_name, targ_name, trig_dir_str, targ_dir_str, trig_vdd, targ_vdd)
def get_measure_function(self):
return stimuli.gen_meas_delay
def set_meas_constants(self, trig_name, targ_name, trig_dir_str, targ_dir_str, trig_vdd, targ_vdd):
"""Set the constants for this measurement: signal names, directions, and trigger scales"""
self.trig_dir_str = trig_dir_str
self.targ_dir_str = targ_dir_str
self.trig_val_of_vdd = trig_vdd
self.targ_val_of_vdd = targ_vdd
self.trig_name_no_port = trig_name
self.targ_name_no_port = targ_name
#Time delays and ports are variant and needed as inputs when writing the measurement
def get_measure_values(self, trig_td, targ_td, vdd_voltage, port=None):
"""Constructs inputs to stimulus measurement function. Variant values are inputs here."""
self.port_error_check(port)
trig_val = self.trig_val_of_vdd * vdd_voltage
targ_val = self.targ_val_of_vdd * vdd_voltage
if port != None:
#For dictionary indexing reasons, the name is formatted differently than the signals
meas_name = "{}{}".format(self.name, port)
trig_name = self.trig_name_no_port.format(port)
targ_name = self.targ_name_no_port.format(port)
else:
meas_name = self.name
trig_name = self.trig_name_no_port
targ_name = self.targ_name_no_port
return (meas_name,trig_name,targ_name,trig_val,targ_val,self.trig_dir_str,self.targ_dir_str,trig_td,targ_td)
class slew_measure(delay_measure):
def __init__(self, measure_name, signal_name, slew_dir_str, measure_scale=None, has_port=True):
spice_measurement.__init__(self, measure_name, measure_scale, has_port)
self.set_meas_constants(signal_name, slew_dir_str)
def set_meas_constants(self, signal_name, slew_dir_str):
"""Set the values needed to generate a Spice measurement statement based on the name of the measurement."""
self.trig_dir_str = slew_dir_str
self.targ_dir_str = slew_dir_str
if slew_dir_str == "RISE":
self.trig_val_of_vdd = 0.1
self.targ_val_of_vdd = 0.9
elif slew_dir_str == "FALL":
self.trig_val_of_vdd = 0.9
self.targ_val_of_vdd = 0.1
else:
debug.error("Unrecognised slew measurement direction={}".format(slew_dir_str),1)
self.trig_name_no_port = signal_name
self.targ_name_no_port = signal_name
#Time delays and ports are variant and needed as inputs when writing the measurement
class power_measure(spice_measurement):
"""Generates a spice measurement for the average power between two time points."""
def __init__(self, measure_name, power_type="", measure_scale=None, has_port=True):
spice_measurement.__init__(self, measure_name, measure_scale, has_port)
self.set_meas_constants(power_type)
def get_measure_function(self):
return stimuli.gen_meas_power
def set_meas_constants(self, power_type):
"""Sets values useful for power simulations. This value is only meta related to the lib file (rise/fall)"""
#Not needed for power simulation
self.power_type = power_type #Expected to be "RISE"/"FALL"
def get_measure_values(self, t_initial, t_final, port=None):
"""Constructs inputs to stimulus measurement function. Variant values are inputs here."""
self.port_error_check(port)
if port != None:
meas_name = "{}{}".format(self.name, port)
else:
meas_name = self.name
return (meas_name,t_initial,t_final)
class voltage_when_measure(spice_measurement):
"""Generates a spice measurement to measure the voltage of a signal based on the voltage of another."""
def __init__(self, measure_name, trig_name, targ_name, trig_dir_str, trig_vdd, measure_scale=None, has_port=True):
spice_measurement.__init__(self, measure_name, measure_scale, has_port)
self.set_meas_constants(trig_name, targ_name, trig_dir_str, trig_vdd)
def get_measure_function(self):
return stimuli.gen_meas_find_voltage
def set_meas_constants(self, trig_name, targ_name, trig_dir_str, trig_vdd):
"""Sets values useful for power simulations. This value is only meta related to the lib file (rise/fall)"""
self.trig_dir_str = trig_dir_str
self.trig_val_of_vdd = trig_vdd
self.trig_name_no_port = trig_name
self.targ_name_no_port = targ_name
def get_measure_values(self, trig_td, vdd_voltage, port=None):
"""Constructs inputs to stimulus measurement function. Variant values are inputs here."""
self.port_error_check(port)
if port != None:
#For dictionary indexing reasons, the name is formatted differently than the signals
meas_name = "{}{}".format(self.name, port)
trig_name = self.trig_name_no_port.format(port)
targ_name = self.targ_name_no_port.format(port)
else:
meas_name = self.name
trig_name = self.trig_name_no_port
targ_name = self.targ_name_no_port
trig_voltage = self.trig_val_of_vdd*vdd_voltage
return (meas_name,trig_name,targ_name,trig_voltage,self.trig_dir_str,trig_td)
class voltage_at_measure(spice_measurement):
"""Generates a spice measurement to measure the voltage at a specific time.
The time is considered variant with different periods."""
def __init__(self, measure_name, targ_name, measure_scale=None, has_port=True):
spice_measurement.__init__(self, measure_name, measure_scale, has_port)
self.set_meas_constants(targ_name)
def get_measure_function(self):
return stimuli.gen_meas_find_voltage_at_time
def set_meas_constants(self, targ_name):
"""Sets values useful for power simulations. This value is only meta related to the lib file (rise/fall)"""
self.targ_name_no_port = targ_name
def get_measure_values(self, time_at, port=None):
"""Constructs inputs to stimulus measurement function. Variant values are inputs here."""
self.port_error_check(port)
if port != None:
#For dictionary indexing reasons, the name is formatted differently than the signals
meas_name = "{}{}".format(self.name, port)
targ_name = self.targ_name_no_port.format(port)
else:
meas_name = self.name
targ_name = self.targ_name_no_port
return (meas_name,targ_name,time_at)
| compiler/characterizer/measurements.py | 9,162 | Generates a spice measurement for the delay of 50%-to-50% points of two signals.
Generates a spice measurement for the average power between two time points.
Base class for spice stimulus measurements.
Generates a spice measurement to measure the voltage at a specific time.
The time is considered variant with different periods.
Generates a spice measurement to measure the voltage of a signal based on the voltage of another.
Constructs inputs to stimulus measurement function. Variant values are inputs here.
Constructs inputs to stimulus measurement function. Variant values are inputs here.
Constructs inputs to stimulus measurement function. Variant values are inputs here.
Constructs inputs to stimulus measurement function. Variant values are inputs here.
Set the constants for this measurement: signal names, directions, and trigger scales
Set the values needed to generate a Spice measurement statement based on the name of the measurement.
Sets values useful for power simulations. This value is only meta related to the lib file (rise/fall)
Sets values useful for power simulations. This value is only meta related to the lib file (rise/fall)
Sets values useful for power simulations. This value is only meta related to the lib file (rise/fall)
See LICENSE for licensing information. Copyright (c) 2016-2019 Regents of the University of California and The Board of Regents for the Oklahoma Agricultural and Mechanical College (acting for and on behalf of Oklahoma State University) All rights reserved.Names must be unique for correct spice simulation, but not enforced here.Needed for error checkingSome meta values used externally. variables are added here for consistency accross the objectsTime delays and ports are variant and needed as inputs when writing the measurementFor dictionary indexing reasons, the name is formatted differently than the signalsTime delays and ports are variant and needed as inputs when writing the measurementNot needed for power simulationExpected to be "RISE"/"FALL"For dictionary indexing reasons, the name is formatted differently than the signalsFor dictionary indexing reasons, the name is formatted differently than the signals | 2,183 | en | 0.870244 |
from setuptools import find_packages, setup
NAME = "popmon"
MAJOR = 0
REVISION = 3
PATCH = 8
DEV = False
# NOTE: also update version at: README.rst
with open("requirements.txt") as f:
REQUIREMENTS = f.read().splitlines()
# read the contents of abstract file
with open("README.rst", encoding="utf-8") as f:
long_description = f.read()
VERSION = "{major}.{revision}.{patch}".format(
major=MAJOR, revision=REVISION, patch=PATCH
)
FULL_VERSION = VERSION
if DEV:
FULL_VERSION += ".dev"
with open("requirements-test.txt") as f:
REQUIREMENTS += f.read().splitlines()
def write_version_py(filename: str = "popmon/version.py") -> None:
"""Write package version to version.py.
This will ensure that the version in version.py is in sync with us.
:param filename: The version.py to write too.
:type filename: str
"""
# Do not modify the indentation of version_str!
version_str = """\"\"\"THIS FILE IS AUTO-GENERATED BY SETUP.PY.\"\"\"
name = \"{name!s}\"
version = \"{version!s}\"
full_version = \"{full_version!s}\"
release = {is_release!s}
"""
with open(filename, "w") as version_file:
version_file.write(
version_str.format(
name=NAME.lower(),
version=VERSION,
full_version=FULL_VERSION,
is_release=not DEV,
)
)
def setup_package() -> None:
"""The main setup method.
It is responsible for setting up and installing the package.
"""
write_version_py()
setup(
name=NAME,
version=VERSION,
url="https://github.com/ing-bank/popmon",
license="MIT",
author="ING Wholesale Banking Advanced Analytics",
description="Monitor the stability of a pandas or spark dataset",
keywords="pandas spark data-science data-analysis monitoring statistics python jupyter ipython",
long_description=long_description,
long_description_content_type="text/x-rst",
python_requires=">=3.6",
packages=find_packages(),
install_requires=REQUIREMENTS,
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
# files to be shipped with the installation, under: popmon/popmon/
# after installation, these can be found with the functions in resources.py
package_data=dict(
popmon=[
"visualization/templates/*.html",
"visualization/templates/assets/css/*.css",
"visualization/templates/assets/js/*.js",
"test_data/*.csv.gz",
"test_data/*.json*",
"notebooks/popmon*tutorial*.ipynb",
]
),
entry_points={
"console_scripts": ["popmon_run = popmon.pipeline.amazing_pipeline:run"]
},
)
if __name__ == "__main__":
setup_package()
| setup.py | 2,993 | The main setup method.
It is responsible for setting up and installing the package.
Write package version to version.py.
This will ensure that the version in version.py is in sync with us.
:param filename: The version.py to write too.
:type filename: str
NOTE: also update version at: README.rst read the contents of abstract file Do not modify the indentation of version_str! files to be shipped with the installation, under: popmon/popmon/ after installation, these can be found with the functions in resources.py | 520 | en | 0.868609 |
# -*- coding: utf-8 -*-
"""Access to FAIRsharing via its API.
.. seealso:: https://beta.fairsharing.org/API_doc
"""
from typing import Any, Iterable, Mapping, MutableMapping, Optional
import pystow
import requests
import yaml
from tqdm import tqdm
__all__ = [
"ensure_fairsharing",
"load_fairsharing",
"FairsharingClient",
]
PATH = pystow.join("bio", "fairsharing", name="fairsharing.yaml")
def load_fairsharing(force_download: bool = False, use_tqdm: bool = True, **kwargs):
"""Get the FAIRsharing registry."""
path = ensure_fairsharing(force_download=force_download, use_tqdm=use_tqdm, **kwargs)
with path.open() as file:
return yaml.safe_load(file)
def ensure_fairsharing(force_download: bool = False, use_tqdm: bool = True, **kwargs):
"""Get the FAIRsharing registry."""
if PATH.exists() and not force_download:
return PATH
client = FairsharingClient(**kwargs)
# As of 2021-12-13, there are a bit less than 4k records that take about 3 minutes to download
rv = {
row["prefix"]: row
for row in tqdm(
client.iter_records(),
unit_scale=True,
unit="record",
desc="Downloading FAIRsharing",
disable=not use_tqdm,
)
}
with PATH.open("w") as file:
yaml.safe_dump(rv, file, allow_unicode=True, sort_keys=True)
return PATH
# These fields are the same in each record
REDUNDANT_FIELDS = {
"fairsharing-licence",
}
class FairsharingClient:
"""A client for programmatic access to the FAIRsharing private API."""
def __init__(
self,
login: Optional[str] = None,
password: Optional[str] = None,
base_url: Optional[str] = None,
):
"""Instantiate the client and get an appropriate JWT token.
:param login: FAIRsharing username
:param password: Corresponding FAIRsharing password
:param base_url: The base URL
"""
self.base_url = base_url or "https://api.fairsharing.org"
self.signin_url = f"{self.base_url}/users/sign_in"
self.records_url = f"{self.base_url}/fairsharing_records"
self.username = pystow.get_config(
"fairsharing", "login", passthrough=login, raise_on_missing=True
)
self.password = pystow.get_config(
"fairsharing", "password", passthrough=password, raise_on_missing=True
)
self.jwt = self.get_jwt()
self.session = requests.Session()
self.session.headers.update(
{
"Accept": "application/json",
"Content-Type": "application/json",
"Authorization": f"Bearer {self.jwt}",
}
)
def get_jwt(self) -> str:
"""Get the JWT."""
payload = {
"user": {
"login": self.username,
"password": self.password,
},
}
res = requests.post(self.signin_url, json=payload).json()
return res["jwt"]
def iter_records(self) -> Iterable[Mapping[str, Any]]:
"""Iterate over all FAIRsharing records."""
yield from self._iter_records_helper(self.records_url)
def _preprocess_record(
self, record: MutableMapping[str, Any]
) -> Optional[MutableMapping[str, Any]]:
if "type" in record:
del record["type"]
record = {"id": record["id"], **record["attributes"]}
doi = record.get("doi")
if doi is None:
# Records without a DOI can't be resolved
url = record["url"]
if not url.startswith("https://fairsharing.org/fairsharing_records/"):
tqdm.write(f"{record['id']} has no DOI: {record['url']}")
return None
elif doi.startswith("10.25504/"):
record["prefix"] = record.pop("doi")[len("10.25504/") :]
else:
tqdm.write(f"DOI has unexpected prefix: {record['doi']}")
record["description"] = _removeprefix(
record.get("description"), "This FAIRsharing record describes: "
)
record["name"] = _removeprefix(record.get("name"), "FAIRsharing record for: ")
for key in REDUNDANT_FIELDS:
if key in record:
del record[key]
return record
def _iter_records_helper(self, url: str) -> Iterable[Mapping[str, Any]]:
res = self.session.get(url).json()
for record in res["data"]:
yv = self._preprocess_record(record)
if yv:
yield yv
next_url = res["links"].get("next")
if next_url:
yield from self._iter_records_helper(next_url)
def _removeprefix(s: Optional[str], prefix) -> Optional[str]:
if s is None:
return None
if s.startswith(prefix):
return s[len(prefix) :]
return s
if __name__ == "__main__":
ensure_fairsharing(force_download=True)
| src/fairsharing_client/api.py | 4,932 | A client for programmatic access to the FAIRsharing private API.
Instantiate the client and get an appropriate JWT token.
:param login: FAIRsharing username
:param password: Corresponding FAIRsharing password
:param base_url: The base URL
Get the FAIRsharing registry.
Get the JWT.
Iterate over all FAIRsharing records.
Get the FAIRsharing registry.
Access to FAIRsharing via its API.
.. seealso:: https://beta.fairsharing.org/API_doc
-*- coding: utf-8 -*- As of 2021-12-13, there are a bit less than 4k records that take about 3 minutes to download These fields are the same in each record Records without a DOI can't be resolved | 634 | en | 0.865712 |
#
# Copyright 2022 DMetaSoul
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
This module computes evaluation metrics for MSMARCO dataset on the ranking task.
Command line:
python msmarco_eval_ranking.py <path_to_reference_file> <path_to_candidate_file>
Creation Date : 06/12/2018
Last Modified : 1/21/2019
Authors : Daniel Campos <dacamp@microsoft.com>, Rutger van Haasteren <ruvanh@microsoft.com>
"""
import sys
import json
from collections import Counter
MaxMRRRank = 10
def load_reference_from_stream(f):
qids_to_relevant_passageids = {}
for line in f:
try:
sample = json.loads(line.strip())
qid = sample["question_id"]
if qid in qids_to_relevant_passageids:
pass
else:
qids_to_relevant_passageids[qid] = []
for answer_paragraph in sample["answer_paragraphs"]:
qids_to_relevant_passageids[qid].append(answer_paragraph["paragraph_id"])
except:
raise IOError('\"%s\" is not valid format' % line)
return qids_to_relevant_passageids
def load_reference(path_to_reference):
"""Load Reference reference relevant passages
Args:path_to_reference (str): path to a file to load.
Returns:qids_to_relevant_passageids (dict): dictionary mapping from query_id (int) to relevant passages (list of ints).
"""
with open(path_to_reference, 'r') as f:
qids_to_relevant_passageids = load_reference_from_stream(f)
return qids_to_relevant_passageids
def load_candidate_from_stream(f):
qid_to_ranked_candidate_passages = {}
try:
preds = json.load(f)
for qid in preds.keys():
tmp = [0] * 50
qid_to_ranked_candidate_passages[qid] = tmp
for rank, pid in enumerate(preds[qid][:50]):
qid_to_ranked_candidate_passages[qid][rank] = pid
except:
raise IOError('Submitted file is not valid format')
return qid_to_ranked_candidate_passages
def load_candidate(path_to_candidate):
"""Load candidate data from a file.
Args:path_to_candidate (str): path to file to load.
Returns:qid_to_ranked_candidate_passages (dict): dictionary mapping from query_id (int) to a list of 1000 passage ids(int) ranked by relevance and importance
"""
with open(path_to_candidate, 'r') as f:
qid_to_ranked_candidate_passages = load_candidate_from_stream(f)
return qid_to_ranked_candidate_passages
def quality_checks_qids(qids_to_relevant_passageids, qids_to_ranked_candidate_passages):
"""Perform quality checks on the dictionaries
Args:
p_qids_to_relevant_passageids (dict): dictionary of query-passage mapping
Dict as read in with load_reference or load_reference_from_stream
p_qids_to_ranked_candidate_passages (dict): dictionary of query-passage candidates
Returns:
bool,str: Boolean whether allowed, message to be shown in case of a problem
"""
message = ''
allowed = True
# Create sets of the QIDs for the submitted and reference queries
candidate_set = set(qids_to_ranked_candidate_passages.keys())
ref_set = set(qids_to_relevant_passageids.keys())
# Check that we do not have multiple passages per query
for qid in qids_to_ranked_candidate_passages:
# Remove all zeros from the candidates
duplicate_pids = set(
[item for item, count in Counter(qids_to_ranked_candidate_passages[qid]).items() if count > 1])
if len(duplicate_pids - set([0])) > 0:
message = "Cannot rank a passage multiple times for a single query. QID={qid}, PID={pid}".format(
qid=qid, pid=list(duplicate_pids)[0])
allowed = False
return allowed, message
def compute_metrics(qids_to_relevant_passageids, qids_to_ranked_candidate_passages):
"""Compute MRR metric
Args:
p_qids_to_relevant_passageids (dict): dictionary of query-passage mapping
Dict as read in with load_reference or load_reference_from_stream
p_qids_to_ranked_candidate_passages (dict): dictionary of query-passage candidates
Returns:
dict: dictionary of metrics {'MRR': <MRR Score>}
"""
all_scores = {}
MRR = 0
qids_with_relevant_passages = 0
ranking = []
recall_q_top1 = set()
recall_q_top50 = set()
recall_q_all = set()
for qid in qids_to_ranked_candidate_passages:
if qid in qids_to_relevant_passageids:
ranking.append(0)
target_pid = qids_to_relevant_passageids[qid]
candidate_pid = qids_to_ranked_candidate_passages[qid]
for i in range(0, MaxMRRRank):
if candidate_pid[i] in target_pid:
MRR += 1.0 / (i + 1)
ranking.pop()
ranking.append(i + 1)
break
for i, pid in enumerate(candidate_pid):
if pid in target_pid:
recall_q_all.add(qid)
if i < 50:
recall_q_top50.add(qid)
if i == 0:
recall_q_top1.add(qid)
break
if len(ranking) == 0:
raise IOError("No matching QIDs found. Are you sure you are scoring the evaluation set?")
MRR = MRR / len(qids_to_relevant_passageids)
recall_top1 = len(recall_q_top1) * 1.0 / len(qids_to_relevant_passageids)
recall_top50 = len(recall_q_top50) * 1.0 / len(qids_to_relevant_passageids)
recall_all = len(recall_q_all) * 1.0 / len(qids_to_relevant_passageids)
all_scores['MRR@10'] = MRR
all_scores["recall@1"] = recall_top1
all_scores["recall@50"] = recall_top50
# all_scores["recall@all"] = recall_all
all_scores['QueriesRanked'] = len(qids_to_ranked_candidate_passages)
return all_scores
def compute_metrics_from_files(path_to_reference, path_to_candidate, perform_checks=True):
qids_to_relevant_passageids = load_reference(path_to_reference)
qids_to_ranked_candidate_passages = load_candidate(path_to_candidate)
if perform_checks:
allowed, message = quality_checks_qids(qids_to_relevant_passageids, qids_to_ranked_candidate_passages)
if message != '': print(message)
return compute_metrics(qids_to_relevant_passageids, qids_to_ranked_candidate_passages)
def main():
"""Command line:
python result_eval.py <path_to_reference_file> <path_to_candidate_file>
"""
if len(sys.argv) == 3:
path_to_reference = sys.argv[1]
path_to_candidate = sys.argv[2]
else:
print('Usage: result_eval.py <reference ranking> <candidate ranking>')
exit()
metrics = compute_metrics_from_files(path_to_reference, path_to_candidate)
result = dict()
for metric in sorted(metrics):
result[metric] = metrics[metric]
result_json = json.dumps(result)
print(result_json)
if __name__ == '__main__':
main()
| demo/search/src/eval/evaluation.py | 7,430 | Compute MRR metric
Args:
p_qids_to_relevant_passageids (dict): dictionary of query-passage mapping
Dict as read in with load_reference or load_reference_from_stream
p_qids_to_ranked_candidate_passages (dict): dictionary of query-passage candidates
Returns:
dict: dictionary of metrics {'MRR': <MRR Score>}
Load candidate data from a file.
Args:path_to_candidate (str): path to file to load.
Returns:qid_to_ranked_candidate_passages (dict): dictionary mapping from query_id (int) to a list of 1000 passage ids(int) ranked by relevance and importance
Load Reference reference relevant passages
Args:path_to_reference (str): path to a file to load.
Returns:qids_to_relevant_passageids (dict): dictionary mapping from query_id (int) to relevant passages (list of ints).
Command line:
python result_eval.py <path_to_reference_file> <path_to_candidate_file>
Perform quality checks on the dictionaries
Args:
p_qids_to_relevant_passageids (dict): dictionary of query-passage mapping
Dict as read in with load_reference or load_reference_from_stream
p_qids_to_ranked_candidate_passages (dict): dictionary of query-passage candidates
Returns:
bool,str: Boolean whether allowed, message to be shown in case of a problem
This module computes evaluation metrics for MSMARCO dataset on the ranking task.
Command line:
python msmarco_eval_ranking.py <path_to_reference_file> <path_to_candidate_file>
Creation Date : 06/12/2018
Last Modified : 1/21/2019
Authors : Daniel Campos <dacamp@microsoft.com>, Rutger van Haasteren <ruvanh@microsoft.com>
Copyright 2022 DMetaSoul Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Create sets of the QIDs for the submitted and reference queries Check that we do not have multiple passages per query Remove all zeros from the candidates all_scores["recall@all"] = recall_all | 2,290 | en | 0.670816 |
"""
Copyright [2021] [DenyS]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import typing
__all__: typing.Sequence[str] = ("Info",)
T = typing.TypeVar("T")
class Info(typing.Generic[T]):
"""Annotation for filtering global variables.
Parameters:
-----------
value: :class:`TypeVar`
A parameter that stores the value of a certain variable.
Features:
---------
* `__repr__`: repr(Info())
Development Information.
* `__str__`: str(Info()) | Info()
Will output the value that stores value.
"""
def __init__(self, value: T) -> None:
self.value = value
def __repr__(self) -> str:
return f"Info(value={self.value})"
def __str__(self) -> str:
return str(self.value)
| multibar/core/variants/lib_info.py | 1,248 | Annotation for filtering global variables.
Parameters:
-----------
value: :class:`TypeVar`
A parameter that stores the value of a certain variable.
Features:
---------
* `__repr__`: repr(Info())
Development Information.
* `__str__`: str(Info()) | Info()
Will output the value that stores value.
Copyright [2021] [DenyS]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. | 859 | en | 0.724577 |
from django.conf.urls import patterns, include, url
from django.contrib import admin
admin.autodiscover()
urlpatterns = [
# Examples:
# url(r'^$', 'simpleproject.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
url(r'^admin/', include(admin.site.urls)),
url(r'^simpleapp/', include('simpleapp.urls')),
]
| simpleproject/simpleproject/urls.py | 344 | Examples: url(r'^$', 'simpleproject.views.home', name='home'), url(r'^blog/', include('blog.urls')), | 100 | en | 0.288443 |
# -*- coding: utf-8 -*-
"""
Demonstrates basic use of LegendItem
"""
import initExample ## Add path to library (just for examples; you do not need this)
import pyqtgraph as pg
from pyqtgraph.Qt import QtCore, QtGui
import numpy as np
win = pg.plot()
win.setWindowTitle('pyqtgraph example: BarGraphItem')
# # option1: only for .plot(), following c1,c2 for example-----------------------
# win.addLegend(frame=False, colCount=2)
# bar graph
x = np.arange(10)
y = np.sin(x+2) * 3
bg1 = pg.BarGraphItem(x=x, height=y, width=0.3, brush='b', pen='w', name='bar')
win.addItem(bg1)
# curve
c1 = win.plot([np.random.randint(0,8) for i in range(10)], pen='r', symbol='t', symbolPen='r', symbolBrush='g', name='curve1')
c2 = win.plot([2,1,4,3,1,3,2,4,3,2], pen='g', fillLevel=0, fillBrush=(255,255,255,30), name='curve2')
# scatter plot
s1 = pg.ScatterPlotItem(size=10, pen=pg.mkPen(None), brush=pg.mkBrush(255, 255, 255, 120), name='scatter')
spots = [{'pos': [i, np.random.randint(-3, 3)], 'data': 1} for i in range(10)]
s1.addPoints(spots)
win.addItem(s1)
# # option2: generic method------------------------------------------------
legend = pg.LegendItem((80,60), offset=(70,20))
legend.setParentItem(win.graphicsItem())
legend.addItem(bg1, 'bar')
legend.addItem(c1, 'curve1')
legend.addItem(c2, 'curve2')
legend.addItem(s1, 'scatter')
if __name__ == '__main__':
pg.exec()
| examples/Legend.py | 1,378 | Demonstrates basic use of LegendItem
-*- coding: utf-8 -*- Add path to library (just for examples; you do not need this) option1: only for .plot(), following c1,c2 for example----------------------- win.addLegend(frame=False, colCount=2) bar graph curve scatter plot option2: generic method------------------------------------------------ | 342 | en | 0.508734 |
class TimedData(object):
"""
Struttura dati per eventi accompagnati da un informazione temporale discreta (timestamp o intervallo)
"""
def __init__(self, data, time, timestamp=True):
"""
I parametri di input sono
- "data": il dato che si vuole memorizzare (di qualsiasi natura)
- "time": l'informazione temporale associata al dato (numero intero)
- "timestamp": flag booleana. Se vero, il campo "time" e' un timestamp; altrimenti,
e' un intervallo di tempo
"""
# Controllo dell'input: parametro "time"
try:
time = int(time)
except:
raise TypeError('"time" parameter is invalid. It must be an integer number')
# Creo la struttura dati
self.data = data
self.time = time
self.timestamp = True if timestamp else False
def __eq__(self, other):
c1 = self.data == other.data
c2 = self.time == other.time
c3 = self.timestamp == other.timestamp
return c1 and c2 and c3
def __str__(self):
return '(data=%s, time=%s, timestamp=%s)' % (self.data, self.time, self.timestamp)
def get_data(self):
"""
Ritorna il campo "data"
"""
return self.data
def get_time(self):
"""
Ritorna il campo "time"
"""
return self.time
class TimedArray(object):
"""
Array di oggetti TimedData
"""
def __init__(self, timestamp=True, empty=True):
"""
La flag "timestamp" serve per specificare se la lista contiene dati con un timestamp (True) oppure un
intervallo temporale (False) associato: la flag "empty" permette invece di creare, se settata a False,
un TimedArray contenente al suo interno un nodo di partenza (d = 0, t = 0)
"""
self._list = []
self.timestamp = (timestamp is True)
if not empty:
# Creo il nodo di partenza
self.append(TimedData(0, 0, self.timestamp))
def __str__(self):
x = ''
first = True
for i in self._list:
if first:
x += str(i)
first = False
else:
x += ', ' + str(i)
return '(timestamp=%s, [%s]' % (self.timestamp, x)
def get_list(self):
"""
Ritorna l'elenco di oggetti "TimedData", memorizzati come lista
"""
return self._list
def get_data_list(self):
"""
Ritorna gli attributi "data" di ogni elemento del vettore, sottoforma di lista
"""
return map(lambda x: x.get_data(), self._list)
def get_time_list(self):
"""
Ritorna gli attributi "time" di ogni elemento del vettore, sottoforma di lista
"""
return map(lambda x: x.get_time(), self._list)
def has_time_intervals(self):
"""
Ritorna True se gli elementi del vettore hanno associato un intervallo temporale
"""
return self.timestamp is False
def append(self, item):
"""
Aggiungo un elemento alla lista
"""
# Controllo dei parametri di input: "item"
if not isinstance(item, TimedData):
raise TypeError('cannot add a non-"TimedData" object to a "TimedArray" list')
if item.timestamp != self.timestamp:
raise ValueError(
'"item" parameter is invalid: its "timestamp" attribute must be equal to %s' % self.timestamp)
# Accodo l'elemento alla lista
self._list.append(item)
def remove(self, item):
"""
Questa funzione rimuove "item" (se presente) dall'array
"""
# Controllo dei parametri di input: "item"
if not isinstance(item, TimedData):
raise TypeError('the item to remove must be a "TimedData" object')
# Elimino l'oggetto, se presente
if item in self._list:
self._list.remove(item)
def remove_all(self, items):
"""
Questa funzione permette di rimuovere un elenco di oggetti "TimedData"
"""
# Controllo dei parametri di input: "items"
if not isinstance(items, (list, tuple)):
raise TypeError('"items" parameter must be an array')
# Elimino un oggetto per volta
try:
for x in items:
self.remove(x)
except TypeError:
raise TypeError('the items list must contain only "TimedData" objects')
def filter(self, f):
"""
Questa funzione applica la funzione f per filtrare il contenuto del vettore
"""
res = TimedArray(self.timestamp, empty=True)
res._list = filter(
f,
self._list
)
return res
def filter_data_range(self, start, end):
"""
La funzione filtra il vettore per range di valori "Data"
"""
return self.filter(
lambda x: start <= x.get_data() <= end
)
def filter_time_range(self, start, end):
"""
La funzione filtra il vettore per range di valori "Data"
"""
return self.filter(
lambda x: start <= x.get_time() <= end
)
def search(self, to_search):
"""
Funzione di ricerca all'interno del contenuto del vettore.
Se "timestamp" e' True, la chiave per la ricerca e' il timestamp: altrimenti,
la chiave diventa il contenuto a cui e' associato l'intervallo temporale.
"""
if self.timestamp:
# La chiave di ricerca e' "time", un numero intero
res = self.search_by_time(to_search)
else:
# La chiave di ricerca e' "data", un dato di qualsiasi tipo
res = self.search_by_data(to_search)
# Risultati di ricerca
return res
def search_by_data(self, to_search):
"""
Funzione di ricerca per campo "data", all'interno del vettore
"""
research = (lambda x: x.data == to_search)
return filter(research, self._list)
def search_by_datas(self, search_params):
"""
Funzione di ricerca per campo "data", all'interno del vettore: il parametro di ricerca e' un vettore
"""
# Controllo dei parametri di input: "searchParams"
if not isinstance(search_params, (list, tuple)):
raise TypeError('"searchParams" parameter is invalid. It must be an array')
# Effettuo tante ricerche quanti sono i parametri specificati
result = []
for x in search_params:
# Ricerca per data, parametro "x"
tmp = self.search_by_data(x)
# Accodo quanto ottenuto al risultato di ricerca globale
for t in tmp:
result.append(t)
# Risultati della ricerca multipla
return result
def search_by_time(self, to_search):
"""
Funzione di ricerca per campo "time", all'interno del vettore
Il parametro "toSearch" deve essere un numero intero
"""
if not isinstance(to_search, (int, long)):
raise TypeError('the research parameter must be an integer number (timestamp)')
research = (lambda x: x.time == to_search)
return filter(research, self._list)
def search_by_times(self, search_params):
"""
Funzione di ricerca per campo "time", all'interno del vettore: il parametro di ricerca e' un vettore
"""
# Controllo dei parametri di input: "searchParams"
if not isinstance(search_params, (list, tuple)):
raise TypeError('"searchParams" parameter is invalid. It must be an array')
# Effettuo tante ricerche quanti sono i parametri specificati
result = []
for x in search_params:
# Ricerca per data, parametro "x"
tmp = self.search_by_time(x)
# Accodo quanto ottenuto al risultato di ricerca globale
for t in tmp:
result.append(t)
# Risultati della ricerca multipla
return result
def contains(self, to_search):
"""
La funzione mi dice se la ricerca nel vettore, sulla base della chiave di ricerca
"toSearch" specificata, produce risultati
"""
return len(self.search(to_search)) > 0
def update(self, to_search, new_value):
"""
Questa funzione aggiorna il contenuto degli elementi del vettore che
soddisfano il criterio di ricerca specificato
- "toSearch" e' la chiave di ricerca
- "newValue" e' il valore aggiornato da inserire
"""
# Effettuo una ricerca
items = self.search(to_search)
# Definisco il criterio di aggiornamento
if self.timestamp:
# La chiave di ricerca e' "time": aggiorno "data"
# update_function = (lambda x: x.data = newValue)
def update_function(x):
x.data = new_value
else:
# La chiave di ricerca e' "data": aggiorno "time"
# update_function = (lambda x: x.time = newValue)
def update_function(x):
x.time = new_value
# Aggiorno gli elementi
map(update_function, items)
def insert_or_update(self, time_to_search, data_value):
if self.contains(time_to_search):
self.update(time_to_search, data_value)
else:
self.append(
TimedData(data_value, time_to_search, self.timestamp)
)
| timed_structures.py | 9,525 | Array di oggetti TimedData
Struttura dati per eventi accompagnati da un informazione temporale discreta (timestamp o intervallo)
I parametri di input sono
- "data": il dato che si vuole memorizzare (di qualsiasi natura)
- "time": l'informazione temporale associata al dato (numero intero)
- "timestamp": flag booleana. Se vero, il campo "time" e' un timestamp; altrimenti,
e' un intervallo di tempo
La flag "timestamp" serve per specificare se la lista contiene dati con un timestamp (True) oppure un
intervallo temporale (False) associato: la flag "empty" permette invece di creare, se settata a False,
un TimedArray contenente al suo interno un nodo di partenza (d = 0, t = 0)
Aggiungo un elemento alla lista
La funzione mi dice se la ricerca nel vettore, sulla base della chiave di ricerca
"toSearch" specificata, produce risultati
Questa funzione applica la funzione f per filtrare il contenuto del vettore
La funzione filtra il vettore per range di valori "Data"
La funzione filtra il vettore per range di valori "Data"
Ritorna il campo "data"
Ritorna gli attributi "data" di ogni elemento del vettore, sottoforma di lista
Ritorna l'elenco di oggetti "TimedData", memorizzati come lista
Ritorna il campo "time"
Ritorna gli attributi "time" di ogni elemento del vettore, sottoforma di lista
Ritorna True se gli elementi del vettore hanno associato un intervallo temporale
Questa funzione rimuove "item" (se presente) dall'array
Questa funzione permette di rimuovere un elenco di oggetti "TimedData"
Funzione di ricerca all'interno del contenuto del vettore.
Se "timestamp" e' True, la chiave per la ricerca e' il timestamp: altrimenti,
la chiave diventa il contenuto a cui e' associato l'intervallo temporale.
Funzione di ricerca per campo "data", all'interno del vettore
Funzione di ricerca per campo "data", all'interno del vettore: il parametro di ricerca e' un vettore
Funzione di ricerca per campo "time", all'interno del vettore
Il parametro "toSearch" deve essere un numero intero
Funzione di ricerca per campo "time", all'interno del vettore: il parametro di ricerca e' un vettore
Questa funzione aggiorna il contenuto degli elementi del vettore che
soddisfano il criterio di ricerca specificato
- "toSearch" e' la chiave di ricerca
- "newValue" e' il valore aggiornato da inserire
Controllo dell'input: parametro "time" Creo la struttura dati Creo il nodo di partenza Controllo dei parametri di input: "item" Accodo l'elemento alla lista Controllo dei parametri di input: "item" Elimino l'oggetto, se presente Controllo dei parametri di input: "items" Elimino un oggetto per volta La chiave di ricerca e' "time", un numero intero La chiave di ricerca e' "data", un dato di qualsiasi tipo Risultati di ricerca Controllo dei parametri di input: "searchParams" Effettuo tante ricerche quanti sono i parametri specificati Ricerca per data, parametro "x" Accodo quanto ottenuto al risultato di ricerca globale Risultati della ricerca multipla Controllo dei parametri di input: "searchParams" Effettuo tante ricerche quanti sono i parametri specificati Ricerca per data, parametro "x" Accodo quanto ottenuto al risultato di ricerca globale Risultati della ricerca multipla Effettuo una ricerca Definisco il criterio di aggiornamento La chiave di ricerca e' "time": aggiorno "data" update_function = (lambda x: x.data = newValue) La chiave di ricerca e' "data": aggiorno "time" update_function = (lambda x: x.time = newValue) Aggiorno gli elementi | 3,464 | it | 0.973974 |
"""
This problem was asked by Amazon.
Given a matrix of 1s and 0s, return the number of "islands" in the matrix.
A 1 represents land and 0 represents water, so an island is a group of 1s
that are neighboring whose perimeter is surrounded by water.
For example, this matrix has 4 islands.
1 0 0 0 0
0 0 1 1 0
0 1 1 0 0
0 0 0 0 0
1 1 0 0 1
1 1 0 0 1
"""
moves = [
# row, col
(0, 1), # west
(0, -1), # east
(1, 0), # south
(-1, 0), # north
(1,1), # south-west
(1, -1), # south-east
(-1, 1), # north-west
(-1, -1) # north-east
]
def mark_island(row, col, land_map, marker):
if row < 0 or col<0 or row>=len(land_map) or col >= len(land_map[0]):
return land_map
if land_map[row][col]== 0:
return land_map
if land_map[row][col]== marker:
return land_map
if land_map[row][col] == 1:
land_map[row][col] = marker
for r,c in moves:
land_map = mark_island(row+r, col+c, land_map, marker)
return land_map
def find_num_of_islands(land_map):
islands_found = 0
for i in range(len(land_map)):
for j in range(len(land_map[0])):
if land_map[i][j] == 1:
islands_found+= 1
land_map = mark_island(i, j, land_map, marker='i')
# print(*land_map, sep='\n')
return islands_found
if __name__ == '__main__':
land_map = [
[1, 0, 0, 0, 0],
[0, 0, 1, 1, 0],
[0, 1, 1, 0, 0],
[0, 0, 0, 0, 0],
[1, 1, 0, 0, 1],
[1, 1, 0, 0, 1],
]
print(find_num_of_islands(land_map)) # 4
land_map = [
[1, 0, 0, 0, 0],
[0, 0, 1, 1, 0],
[0, 1, 1, 0, 0],
[0, 0, 0, 0, 0],
[1, 1, 0, 0, 1],
[1, 1, 0, 0, 1],
[1, 0, 0, 0, 0],
[0, 0, 1, 1, 0],
[0, 1, 1, 0, 0],
[0, 0, 0, 0, 0],
[1, 1, 0, 0, 1],
[1, 1, 0, 0, 1],
]
print(find_num_of_islands(land_map)) # 7 | DailyCodingProblem/84_Amazon_Find_Islands_From_Matrix.py | 1,963 | This problem was asked by Amazon.
Given a matrix of 1s and 0s, return the number of "islands" in the matrix.
A 1 represents land and 0 represents water, so an island is a group of 1s
that are neighboring whose perimeter is surrounded by water.
For example, this matrix has 4 islands.
1 0 0 0 0
0 0 1 1 0
0 1 1 0 0
0 0 0 0 0
1 1 0 0 1
1 1 0 0 1
row, col west east south north south-west south-east north-west north-east print(*land_map, sep='\n') 4 7 | 455 | en | 0.978998 |
# The following comments couldn't be translated into the new config version:
# untracked PSet maxEvents = {untracked int32 input = 2}
#include "Configuration/ReleaseValidation/data/Services.cff"
# include "Configuration/StandardSequences/data/FakeConditions.cff"
# untracked PSet options = {
# include "FWCore/Framework/test/cmsExceptionsFatalOption.cff"
# untracked bool makeTriggerResults = true
# }
import FWCore.ParameterSet.Config as cms
process = cms.Process("TEST")
#
# ecal trig prim producer
# # ecal tpg params
# es_module = EcalTrigPrimESProducer {
# untracked string DatabaseFile = "TPG.txt"
# #untracked string DatabaseFile = "TPG_RCT_internal.txt"
# }
#
process.load("FWCore.MessageService.MessageLogger_cfi")
# standard RCT configuration, including input scales
process.load("L1TriggerConfig.RCTConfigProducers.L1RCTConfig_cff")
# using standard scales
process.load("L1TriggerConfig.L1ScalesProducers.L1CaloScalesConfig_cff")
#include "L1TriggerConfig/L1ScalesProducers/data/L1CaloInputScalesConfig.cff"
process.load("L1Trigger.RegionalCaloTrigger.L1RCTTestAnalyzer_cfi")
process.load("L1Trigger.RegionalCaloTrigger.rctDigis_cfi")
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(64)
)
process.TFileService = cms.Service("TFileService",
fileName = cms.string('rct.root')
)
process.source = cms.Source("EmptySource")
process.rctInput = cms.EDProducer("RctInputTextToDigi",
inputFile = cms.FileInPath('L1Trigger/TextToDigi/test/data/rctTestInputFileElec.txt')
)
process.input = cms.Path(process.rctInput)
process.p4 = cms.Path(process.rctDigis*process.L1RCTTestAnalyzer)
process.schedule = cms.Schedule(process.input,process.p4)
process.L1RCTTestAnalyzer.ecalDigisLabel = 'rctInput'
process.L1RCTTestAnalyzer.hcalDigisLabel = 'rctInput'
process.rctDigis.ecalDigisLabel = 'rctInput'
process.rctDigis.hcalDigisLabel = 'rctInput'
| L1Trigger/RegionalCaloTrigger/test/rctInputTest_cfg.py | 1,912 | The following comments couldn't be translated into the new config version: untracked PSet maxEvents = {untracked int32 input = 2}include "Configuration/ReleaseValidation/data/Services.cff" include "Configuration/StandardSequences/data/FakeConditions.cff" untracked PSet options = { include "FWCore/Framework/test/cmsExceptionsFatalOption.cff" untracked bool makeTriggerResults = true } ecal trig prim producer ecal tpg params es_module = EcalTrigPrimESProducer { untracked string DatabaseFile = "TPG.txt" untracked string DatabaseFile = "TPG_RCT_internal.txt" } standard RCT configuration, including input scales using standard scalesinclude "L1TriggerConfig/L1ScalesProducers/data/L1CaloInputScalesConfig.cff" | 738 | en | 0.641233 |
from twisted.internet import defer
from signing.processor import expose
class SayHiImplementation(object):
"""
Responds with 'hello, %s' % arg
"""
@expose
def say_hi(self, identifier):
d = defer.Deferred()
d.callback('hello, %s' % identifier)
return d
| signing/processorimpl/sayhiimplementation.py | 297 | Responds with 'hello, %s' % arg | 31 | en | 0.390518 |
# -*- coding: utf-8 -*-
from collections import namedtuple
from subprocess import check_output
import click
from .utils import cd
try:
from subprocess import call as run
except ImportError:
from subprocess import run
class VueJs(object):
"""
Provide subprocess call to `npm` and `vue-cli`
"""
@staticmethod
def node_check():
"""
Node and npm version checker
"""
node_ver = check_output('node -v'.split()).decode('utf-8').rsplit('.')[0]
npm_ver = check_output('npm -v'.split()).decode('utf-8').rsplit('.')[0]
return all([node_ver > 'v5', npm_ver >= '4'])
@staticmethod
def vue_cli_check():
"""
vue-cli version checker
"""
try:
return check_output('vue -V'.split()).decode('utf-8').rsplit('.')[0]
except OSError:
return False
@staticmethod
def install_cli():
run('npm install -g vue-cli'.split())
@staticmethod
def project_setup(project):
run('vue init webpack {project}'.format(project=project).split())
@staticmethod
def install_dependencies(project):
with cd(project):
run('npm install'.split())
@staticmethod
def dev():
run('npm run dev'.split())
@staticmethod
def build():
run('npm run build'.split())
class VueJsBuilder(object):
@staticmethod
def startproject(project):
nt = namedtuple('Result', ['status', 'message', 'color'])
if VueJs.vue_cli_check():
VueJs.project_setup(project)
VueJs.install_dependencies(project)
return nt(True, 'Application and dependencies installed\n', 'green')
else:
return nt(False, 'Please install vue-cli via `vuecli` command', 'red')
@click.group()
def cli():
"""
Click entry point: vue-cli commands group
By convention all new cli has a cli function with a pass statement
"""
pass
@cli.command()
def vuecheck():
"""
Check if node > 5 and npm > 3 are installed
"""
if VueJs.node_check():
click.echo(click.style('Found node and npm', fg='green'))
else:
click.echo(click.style('Missing node and npm installation', fg='red'))
@cli.command()
def installvuecli():
"""
Install vue-cli
"""
if VueJs.vue_cli_check():
click.echo(click.style('Found valid vue-cli', fg='green'))
else:
VueJs.install_cli()
click.echo(click.style('Installed vue-cli globally', fg='green'))
@cli.command()
@click.argument('project')
def startvueapp(project):
"""
Init vue project via vue-cli
"""
result = VueJsBuilder.startproject(project)
click.echo(click.style(result.message, fg=result.color))
@cli.command()
def vuedev():
"""
Run frontend dev server via npm
"""
VueJs.dev()
@cli.command()
def vuebuild():
"""
Build Vue.js project via npm
"""
VueJs.build()
| python_vuejs/vuejs.py | 2,963 | Provide subprocess call to `npm` and `vue-cli`
Click entry point: vue-cli commands group
By convention all new cli has a cli function with a pass statement
Install vue-cli
Node and npm version checker
Init vue project via vue-cli
vue-cli version checker
Build Vue.js project via npm
Check if node > 5 and npm > 3 are installed
Run frontend dev server via npm
-*- coding: utf-8 -*- | 382 | en | 0.656057 |
import numpy as np
from PIL import Image, ImageDraw
from scipy import interpolate, ndimage, stats, signal, integrate, misc
from astropy.io import ascii, fits
from astropy.wcs import WCS
from astropy.coordinates import SkyCoord
import astropy.units as u
import astropy.constants as c
import corner as triangle # formerly dfm/triangle
# from astropy.modeling import models, fitting
from astropy.modeling.models import custom_model
from astropy.modeling.fitting import LevMarLSQFitter # , SimplexLSQFitter
import matplotlib.pyplot as plt
import matplotlib as mpl
import emcee
#import ipdb;
import pdb
# # # # # # # # # # # # # # # # # # # # # #
# make iPython print immediately
import sys
oldsysstdout = sys.stdout
class flushfile():
def __init__(self, f):
self.f = f
def __getattr__(self, name):
return object.__getattribute__(self.f, name)
def write(self, x):
self.f.write(x)
self.f.flush()
def flush(self):
self.f.flush()
# sys.stdout = flushfile(sys.stdout)
# sys.stdout = oldsysstdout
def rot_matrix(theta):
'''
rot_matrix(theta)
2D rotation matrix for theta in radians
returns numpy matrix
'''
c, s = np.cos(theta), np.sin(theta)
return np.matrix([[c, -s], [s, c]])
def rectangle(c, w, h, angle=0, center=True):
'''
create rotated rectangle
for input into PIL ImageDraw.polygon
to make a rectangle polygon mask
Rectagle is created and rotated with center
at zero, and then translated to center position
accepters centers
Default : center
tl, tr, bl, br
'''
cx, cy = c
# define initial polygon irrespective of center
x = -w / 2., +w / 2., +w / 2., -w / 2.
y = +h / 2., +h / 2., -h / 2., -h / 2.
# correct center if starting from corner
if center is not True:
if center[0] == 'b':
# y = tuple([i + h/2. for i in y])
cy = cy + h / 2.
else:
# y = tuple([i - h/2. for i in y])
cy = cy - h / 2.
if center[1] == 'l':
# x = tuple([i + w/2 for i in x])
cx = cx + w / 2.
else:
# x = tuple([i - w/2 for i in x])
cx = cx - w / 2.
R = rot_matrix(angle * np.pi / 180.)
c = []
for i in range(4):
xr, yr = np.dot(R, np.asarray([x[i], y[i]])).A.ravel()
# coord switch to match ordering of FITs dimensions
c.append((cx + xr, cy + yr))
# print (cx,cy)
return c
def comp(arr):
'''
returns the compressed version
of the input array if it is a
numpy MaskedArray
'''
try:
return arr.compressed()
except:
return arr
def mavg(arr, n=2, mode='valid'):
'''
returns the moving average of an array.
returned array is shorter by (n-1)
'''
if len(arr) > 400:
return signal.fftconvolve(arr, [1. / float(n)] * n, mode=mode)
else:
return signal.convolve(arr, [1. / float(n)] * n, mode=mode)
def mgeo(arr, n=2):
'''
Returns array of lenth len(arr) - (n-1)
# # written by me
# # slower for short loops
# # faster for n ~ len(arr) and large arr
a = []
for i in xrange(len(arr)-(n-1)):
a.append(stats.gmean(arr[i:n+i]))
# # Original method# #
# # written by me ... ~10x faster for short arrays
b = np.array([np.roll(np.pad(arr,(0,n),mode='constant',constant_values=1),i)
for i in xrange(n)])
return np.product(b,axis=0)[n-1:-n]**(1./float(n))
'''
a = []
for i in range(len(arr) - (n - 1)):
a.append(stats.gmean(arr[i:n + i]))
return np.asarray(a)
def avg(arr, n=2):
'''
NOT a general averaging function
return bin centers (lin and log)
'''
diff = np.diff(arr)
# 2nd derivative of linear bin is 0
if np.allclose(diff, diff[::-1]):
return mavg(arr, n=n)
else:
return np.power(10., mavg(np.log10(arr), n=n))
# return mgeo(arr, n=n) # equivalent methods, only easier
def shift_bins(arr,phase=0,nonneg=False):
# assume original bins are nonneg
if phase != 0:
diff = np.diff(arr)
if np.allclose(diff,diff[::-1]):
diff = diff[0]
arr = arr + phase*diff
#pre = arr[0] + phase*diff
return arr
else:
arr = np.log10(arr)
diff = np.diff(arr)[0]
arr = arr + phase * diff
return np.power(10.,arr)
else:
return arr
def llspace(xmin, xmax, n=None, log=False, dx=None, dex=None):
'''
llspace(xmin, xmax, n = None, log = False, dx = None, dex = None)
get values evenly spaced in linear or log spaced
n [10] -- Optional -- number of steps
log [false] : switch for log spacing
dx : spacing for linear bins
dex : spacing for log bins (in base 10)
dx and dex override n
'''
xmin, xmax = float(xmin), float(xmax)
nisNone = n is None
dxisNone = dx is None
dexisNone = dex is None
if nisNone & dxisNone & dexisNone:
print('Error: Defaulting to 10 linears steps')
n = 10.
nisNone = False
# either user specifies log or gives dex and not dx
log = log or (dxisNone and (not dexisNone))
if log:
if xmin == 0:
print("log(0) is -inf. xmin must be > 0 for log spacing")
xmin, xmax = np.log10(xmin), np.log10(xmax)
# print nisNone, dxisNone, dexisNone, log # for debugging logic
if not nisNone: # this will make dex or dx if they are not specified
if log and dexisNone: # if want log but dex not given
dex = (xmax - xmin) / n
# print dex
elif (not log) and dxisNone: # else if want lin but dx not given
dx = (xmax - xmin) / n # takes floor
#print dx
if log:
#return np.power(10, np.linspace(xmin, xmax , (xmax - xmin)/dex + 1))
return np.power(10, np.arange(xmin, xmax + dex, dex))
else:
#return np.linspace(xmin, xmax, (xmax-xmin)/dx + 1)
return np.arange(xmin, xmax + dx, dx)
def nametoradec(name):
'''
Get names formatted as
hhmmss.ss+ddmmss to Decimal Degree
only works for dec > 0 (splits on +, not -)
Will fix this eventually...
'''
if 'string' not in str(type(name)):
rightascen = []
declinatio = []
for n in name:
ra, de = n.split('+')
ra = ra[0:2] + ':' + ra[2:4] + ':' + ra[4:6] + '.' + ra[6:8]
de = de[0:2] + ':' + de[2:4] + ':' + de[4:6]
coord = SkyCoord(ra, de, frame='icrs',
unit=('hourangle', 'degree'))
rightascen.append(coord.ra.value)
declinatio.append(coord.dec.value)
return np.array(rightascen), np.array(declinatio)
else:
ra, de = name.split('+')
ra = ra[0:2] + ':' + ra[2:4] + ':' + ra[4:6] + '.' + ra[6:8]
de = de[0:2] + ':' + de[2:4] + ':' + de[4:6]
coord = SkyCoord(ra, de, frame='icrs', unit=('hourangle', 'degree'))
return np.array(coord.ra.value), np.array(coord.dec.value)
def get_ext(extmap, errmap, extwcs, ra, de):
'''
Get the extinction (errors) for a particular position or
list of positions
More generally get the value (error) for a particular
position given a wcs and world coordinates
'''
try:
xp, yp = extwcs.all_world2pix(
np.array([ra]).flatten(), np.array([de]).flatten(), 0)
except:
xp, yp = WCS(extwcs).all_world2pix(
np.array([ra]).flatten(), np.array([de]).flatten(), 0)
ext = []
err = []
for i in range(len(np.array(xp))):
try:
ext.append(extmap[yp[int(round(i))], xp[int(round(i))]])
if errmap is not None:
err.append(errmap[yp[int(round(i))], xp[int(round(i))]])
except IndexError:
ext.append(np.nan)
if errmap is not None:
err.append(np.nan)
if errmap is not None:
return np.array(ext), np.array(err)
else:
return np.array(ext), None
def pdf(values, bins):
'''
** Normalized differential area function. **
(statistical) probability denisty function
normalized so that the integral is 1
and. The integral over a range is the
probability of the value is within
that range.
Returns array of size len(bins)-1
Plot versus bins[:-1]
'''
if hasattr(bins,'__getitem__'):
range=(np.nanmin(bins),np.nanmax(bins))
else:
range = None
h, x = np.histogram(values, bins=bins, range=range, density=False)
# From the definition of Pr(x) = dF(x)/dx this
# is the correct form. It returns the correct
# probabilities when tested
pdf = h / (np.sum(h, dtype=float) * np.diff(x))
return pdf, avg(x)
def pdf2(values, bins):
'''
The ~ PDF normalized so that
the integral is equal to the
total amount of a quantity.
The integral over a range is the
total amount within that range.
Returns array of size len(bins)-1
Plot versus bins[:-1]
'''
if hasattr(bins,'__getitem__'):
range=(np.nanmin(bins),np.nanmax(bins))
else:
range = None
pdf, x = np.histogram(values, bins=bins, range=range, density=False)
pdf = pdf.astype(float) / np.diff(x)
return pdf, avg(x)
def edf(data, pdf=False):
y = np.arange(len(data), dtype=float)
x = np.sort(data).astype(float)
return y, x
def cdf(values, bins):
'''
(statistical) cumulative distribution function
Integral on [-inf, b] is the fraction below b.
CDF is invariant to binning.
This assumes you are using the entire range in the binning.
Returns array of size len(bins)
Plot versus bins[:-1]
'''
if hasattr(bins,'__getitem__'):
range = (np.nanmin(bins),np.nanmax(bins))
else:
range = None
h, bins = np.histogram(values, bins=bins, range=range, density=False) # returns int
c = np.cumsum(h / np.sum(h, dtype=float)) # cumulative fraction below bin_k
# append 0 to beginning because P( X < min(x)) = 0
return np.append(0, c), bins
def cdf2(values, bins):
'''
# # Exclusively for area_function which needs to be unnormalized
(statistical) cumulative distribution function
Value at b is total amount below b.
CDF is invariante to binning
Plot versus bins[:-1]
Not normalized to 1
'''
if hasattr(bins,'__getitem__'):
range=(np.nanmin(bins),np.nanmax(bins))
else:
range = None
h, bins = np.histogram(values, bins=bins, range=range, density=False)
c = np.cumsum(h).astype(float)
return np.append(0., c), bins
def area_function(extmap, bins):
'''
Complimentary CDF for cdf2 (not normalized to 1)
Value at b is total amount above b.
'''
c, bins = cdf2(extmap, bins)
return c.max() - c, bins
def diff_area_function(extmap, bins,scale=1):
'''
See pdf2
'''
s, bins = area_function(extmap, bins)
dsdx = -np.diff(s) / np.diff(bins)
return dsdx*scale, avg(bins)
def log_diff_area_function(extmap, bins):
'''
See pdf2
'''
s, bins = diff_area_function(extmap, bins)
g=s>0
dlnsdlnx = np.diff(np.log(s[g])) / np.diff(np.log(bins[g]))
return dlnsdlnx, avg(bins[g])
def mass_function(values, bins, scale=1, aktomassd=183):
'''
M(>Ak), mass weighted complimentary cdf
'''
if hasattr(bins,'__getitem__'):
range=(np.nanmin(bins),np.nanmax(bins))
else:
range = None
h, bins = np.histogram(values, bins=bins, range=range, density=False, weights=values*aktomassd*scale)
c = np.cumsum(h).astype(float)
return c.max() - c, bins
def hist(values, bins, err=False, density=False, **kwargs):
'''
really just a wrapper for numpy.histogram
'''
if hasattr(bins,'__getitem__'):
range=(np.nanmin(bins),np.nanmax(bins))
else:
range = None
hist, x = np.histogram(values, bins=bins, range=range, density=density, **kwargs)
if (err is None) or (err is False):
return hist.astype(np.float), avg(x)
else:
return hist.astype(np.float), avg(x), np.sqrt(hist)
def bootstrap(X, X_err=None, n=None, smooth=False):
'''
(smooth) bootstrap
bootstrap(X,Xerr,n,smooth=True)
X : array to be resampled
X_err [optional]: errors to perturb data for smooth bootstrap
only provide is doing smooth bootstrapping
n : number of samples. Default - len(X)
smooth: optionally use smooth bootstrapping.
will be set to False if no X_err is provided
'''
if X_err is None:
smooth = False
if n is None: # default n
n = len(X)
resample_i = np.random.randint(0,len(X),size=(n,))
X_resample = np.asarray(X)[resample_i]
if smooth:
X_resample = np.random.normal(X_resample, \
np.asarray(X_err)[resample_i])
return X_resample
def num_above(values, level):
return np.sum((values >= level) & np.isfinite(values), dtype=np.float)
def num_below(values, level):
return np.sum((values < level) & np.isfinite(values), dtype=np.float)
def alpha_ML(data, xmin,xmax):
'''
uses maximum likelihood to estimation
to determine power-law and error
From Clauset et al. 2010
'''
data = data[np.isfinite(data)]
data = data[(data >= xmin) & (data <= xmax)]
alpha = 1 + len(data) * (np.sum(np.log(data / xmin))**(-1))
error = (alpha -1 )/np.sqrt(len(data))
#loglike = np.sum((-1+alpha)*np.log(xmin)-alpha*np.log(data)+np.log(-1+alpha))
N = len(data)
loglike = N*np.log(alpha-1) - N*np.log(xmin) - alpha * np.sum(np.log(data/xmin))
return alpha , error, loglike, xmin, xmax
def sigconf1d(n):
cdf = (1/2.)*(1+special.erf(n/np.sqrt(2)))
return (1-cdf)*100,100* cdf,100*special.erf(n/np.sqrt(2))
def surfd(X, Xmap, bins, Xerr = None, Xmaperr = None, boot=False, scale=1., return_err=False, smooth=False):
'''
call: surfd(X, map, bins,
xerr = None, merr = None, scale = 1.)
calculates H(X)/H(M) = Nx pdf(x) dx / Nm pdf(m) dm ; dm = dx
so it is independent of whether dx or dlog(x)
'''
# get dn/dx
if boot:
n = np.histogram(bootstrap(X,Xerr,smooth=True), bins = bins, range=(bins.min(),bins.max()))[0]
s = np.histogram(bootstrap(Xmap,Xmaperr,smooth=True), bins = bins, range=(bins.min(),bins.max()))[0] * scale
else:
n = np.histogram(X, bins = bins, range=(bins.min(),bins.max()))[0]
s = np.histogram(Xmap, bins = bins, range=(bins.min(),bins.max()))[0] * scale
if not return_err:
return n / s
else:
return n / s, n / s * np.sqrt(1. / n - scale / s)
def alpha(y, x, err=None, return_kappa=False, cov=False):
'''
this returns -1*alpha, and optionally kappa and errors
'''
a1 = set(np.nonzero(np.multiply(x, y))[0])
a2 = set(np.where(np.isfinite(np.add(x, y, err)))[0])
a = np.asarray(list(a1 & a2))
y = np.log(y[a])
x = np.log(x[a])
if err is None:
p, covar = np.polyfit(x, y, 1, cov=True)
m, b = p
me, be = np.sqrt(np.sum(covar * [[1, 0], [0, 1]], axis=1))
me, be
else:
err = err[a]
err = err / y
p, covar = np.polyfit(x, y, 1, w=1. / err**2, cov=True)
m, b = p
me, be = np.sqrt(np.sum(covar * [[1, 0], [0, 1]], axis=1))
me, be
if return_kappa:
if cov:
return m, np.exp(b), me, be
else:
return m, np.exp(b)
else:
if cov:
return m, me
else:
return m
def Heaviside(x):
return 0.5 * (np.sign(x) + 1.)
def schmidt_law(Ak, theta):
'''
schmidt_law(Ak,(beta,kappa))
beta is the power law index (same as alpha)
'''
if len(theta) == 2:
beta, kappa = theta
return kappa * (Ak ** beta)
elif len(theta) == 3:
beta, kappa, Ak0 = theta
sfr = Heaviside(Ak - Ak0) * kappa * (Ak ** beta)
sfr[Ak < Ak0] = 0#np.nan # kappa * (Ak0 ** beta)
return sfr
def lmfit_powerlaw(x, y, yerr=None, xmin=-np.inf, xmax=np.inf, init=None, maxiter=1000000):
@custom_model
def model(x, beta=init[0], kappa=init[1]):
return np.log(kappa * (np.exp(x) ** beta))
keep = np.isfinite(1. / y) & (x >= xmin) & (x <= xmax)
if yerr is not None:
keep = keep & np.isfinite(1. / yerr)
m_init = model()
fit = LevMarLSQFitter()
#weights = (yerr / y)[keep]**(-2.)
m = fit(m_init, np.log(x[keep]), np.log(y[keep]), maxiter=maxiter)
return m, fit
def fit_lmfit_schmidt(x, y, yerr, init=None):
m, _ = lmfit_powerlaw(x,y,yerr,init=init)
return m.parameters
def emcee_schmidt(x, y, yerr, pos=None, pose=None,
nwalkers=None, nsteps=None, burnin=200,verbose=True):
'''
emcee_schmidt provides a convenient wrapper for fitting the schimdt law
to binned x,log(y) data. Generally, it fits a normalization and a slope
'''
def model(x, theta):
'''
theta = (beta, kappa)
'''
return np.log(schmidt_law(x, theta))
def lnlike(theta, x, y, yerr):
mod = model(x, theta)
inv_sigma2 = 1 / yerr**2
# Poisson statistics -- not using this
#mu = (yerr)**2 # often called lambda = poisson variance for bin x_i
#resid = np.abs(y - mod) # where w calculate the poisson probability
#return np.sum(resid * np.log(mu) - mu) - np.sum(np.log(misc.factorial(resid)))
#######################################################
########## CHI^2 log-likelihood #######################
return -0.5 * (np.sum((y - mod)**2 * inv_sigma2))# - 0.5 * 3 * np.log(np.sum(k))
def lnprior(theta):
# different priors for different version of
# the schmidt law
if len(theta) == 3:
beta, kappa, Ak0 = theta
c3 = 0. < Ak0 <= 5.
c4 = True
else:
beta, kappa = theta
c3 = True
c4 = True
c1 = 0 <= beta <= 6# Never run's into this region
c2 = 0 <= kappa # Never run's into this region
if c1 and c2 and c3 and c4:
return 0.0
return -np.inf
def lnprob(theta, x, y, yerr):
## update likelihood
lp = lnprior(theta)
if not np.isfinite(lp):
return -np.inf
return lp + lnlike(theta, x, y, yerr)
ndim, nwalkers = len(pos), nwalkers
pos = [np.array(pos) + np.array(pose) * 0.5 *
(0.5 - np.random.rand(ndim)) for i in range(nwalkers)]
sampler = emcee.EnsembleSampler(
nwalkers, ndim, lnprob, args=(x, y, yerr))
sampler.run_mcmc(pos, nsteps)
# Get input values
# x, y, yerr = sampler.args
samples = sampler.chain[:, burnin:, :].reshape((-1, sampler.ndim))
# # Print out final values # #
theta_mcmc = np.percentile(samples, [16, 50, 84], axis=0).T
if verbose: print(sampler.acor)
if verbose:
for i, item in enumerate(theta_mcmc):
j = ['beta', 'kappa', 'A_{K,0}', 'A_{K,f}']
inserts = (j[i], item[1], item[2] - item[1], item[1] - item[0])
print('%s = %0.2f (+%0.2f,-%0.2f)' % inserts)
return sampler, np.median(samples, axis=0), np.std(samples, axis=0)
def fit(bins, samp, samperr, maps, mapserr, scale=1., sampler=None, log=False,
pos=None, pose=None, nwalkers=100, nsteps=1e4, boot=1000, burnin=200,
threshold=False, threshold2=False,verbose=True):
'''
# # # A Schmidt Law fitting Function using EMCEE by D.F.M.
fit(bins, samp, samperr, maps, mapserr, scale=1.,
pos=None, pose=None, nwalkers=100, nsteps=1e4)
bins: bin edges for binning data (I know it's bad to bin)
samp : values for your sample
samperr : errors on values for you sample
maps: map of values from which you drew your sample
mapserr: error on maps...
pos : initial location of ball of walkers
pose : initial spread of walkers
'''
#print 'Hi!. It\'s hammer time...'
# x values are bin midpoints
x = avg(bins) # assume if log=True, then bins are already log
# x = bins[:-1]
# y = np.asarray([surfd(samp,maps,bins,boot=True,scale=scale) for i in xrange(boot)])
# yerr = np.nanstd(y,axis=0)
#if log:
# samp = np.log10(samp)
# maps = np.log10(maps)
# bins = np.log10(bins) # because bins doesn't get used again after surfd
y, yerr = surfd(samp, maps, bins, scale=scale, return_err=True)
###########################################+
###### ADDED FOR SHIFTING EXPERIMENT ######+
###########################################+
bins2 = shift_bins(bins,0.5)
bin
x2 = avg(bins2)
y2, yerr2 = surfd(samp, maps, bins2, scale=scale, return_err=True)
concatx = np.concatenate((x,x2))
concaty = np.concatenate((y,y2))
concatyerr = np.concatenate((yerr,yerr2))
srt = np.argsort(concatx)
x = concatx[srt]
y = concaty[srt]
yerr = concatyerr[srt]
nonzero = np.isfinite(1. / y) & np.isfinite(yerr) & np.isfinite(1./yerr)
y = y[nonzero]
yerr = yerr[nonzero]
x = x[nonzero]
# initialize walker positions and walker bundle size
init = alpha(y, x, return_kappa=True, cov=True)
if pos is None:
pos = init[:2]
if pose is None:
if np.isnan(init[2] + init[3]):
pose = (1, 1)
else:
pose = (init[2], init[3])
if threshold | threshold2:
pos = pos + (0.4,)
pose = pose + (0.2,)
if threshold2:
pos = pos + (8.,)
pose = pose + (.5,)
#print pos
#print pose
pos = np.asarray(pos)
pose = .1*pos#np.asarray(pose)
# This function only fits sources, it doesn't plot, so don't pass
# and emcee sampler type. it will spit it back out
# # # # # # # RUN EMCEE # # # # # # #
# pdb.set_trace()
if sampler is None:
if verbose: print('Sampler autocorrelation times . . .')
sampler, theta, theta_std = emcee_schmidt(x, np.log(y), yerr/y,
pos=pos, pose=pose,
nwalkers=nwalkers,
nsteps=nsteps, burnin=burnin,verbose=verbose)
else:
print('Next time don\'t give me a ' + str(type(sampler)) + '.')
#
try:
return sampler, x, y, yerr, theta, theta_std
except:
return sampler, x, y, yerr
def schmidt_results_plots(sampler, model, x, y, yerr, burnin=200, akmap=None,
bins=None, scale=None, triangle_plot=True):
'''
model: should pass schmidt_law()
'''
try:
mpl.style.use('john')
except:
None
# Get input values
# x, y, yerr = sampler.args
if hasattr(sampler,'__getitem__'):
chain = sampler
dim = chain.shape[-1]
else:
chain = sampler.chain
dim = sampler.dim
samples = chain[:, burnin:, :].reshape((-1, dim))
# # Print out final values # #
theta_mcmc = np.percentile(samples, [16, 50, 84], axis=0).T # Get percentiles for each parameter
n_params = len(theta_mcmc[:,1])
#print n_params
for i, item in enumerate(theta_mcmc):
j = ['beta', 'kappa', 'A_{K,0}','A_{K,f}']
inserts = (j[i], item[1], item[2] - item[1], item[1] - item[0])
print('%s = %0.2f (+%0.2f,-%0.2f)' % inserts)
# Plot corner plot
if triangle_plot:
if n_params == 3:
labels = ['beta', 'kappa', 'A_{K,0}']
elif n_params == 4:
labels = ['beta', 'kappa', 'A_{K,0}', 'A_{K,f}']
else:
labels = ['beta', 'kappa']
#print labels
_ = triangle.corner(samples, labels=labels,
truths=theta_mcmc[:, 1], quantiles=[.16, .84],
verbose=False)
# generate schmidt laws from parameter samples
xln = np.logspace(np.log10(x.min()*.5),np.log10(x.max()*2.),100)
smlaw_samps = np.asarray([schmidt_law(xln, samp) for samp in samples])
# get percentile bands
percent = lambda x: np.nanpercentile(smlaw_samps, x, interpolation='linear', axis=0)
# Plot fits
fig = plt.figure()
# Plot data with errorbars
plt.plot(xln, percent(50), 'k') # 3 sigma band
# yperr = np.abs(np.exp(np.log(y)+yerr/y) - y)
# ynerr = np.abs(np.exp(np.log(y)-yerr/y) - y)
plt.errorbar(x, y, yerr, fmt='rs', alpha=0.7, mec='none')
plt.legend(['Median', 'Data'],
loc='upper left', fontsize=12)
# draw 1,2,3 sigma bands
plt.fill_between(xln, percent(1), percent(99), color='0.9') # 1 sigma band
plt.fill_between(xln, percent(2), percent(98), color='0.75') # 2 sigma band
plt.fill_between(xln, percent(16), percent(84), color='0.5') # 3 sigma band
plt.loglog(nonposy='clip')
return plt.gca()
def flatchain(chain):
return chain.reshape((-1,chain.shape[-1]))
def norm_chain(chain, axis=0):
std = np.std(flatchain(chain), axis=axis)
med = np.median(flatchain(chain), axis=axis)
return (chain-med)/std
def plot_walkers(sampler,limits = None, bad = None):
'''
sampler : emcee Sampler class
'''
if hasattr(sampler,'__getitem__'):
chain = sampler
ndim = chain.shape[-1]
else:
chain = sampler.chain
ndim = sampler.ndim
fig = plt.figure(figsize=(8 * ndim, 4 * ndim))
if hasattr(limits,'__getitem__'):
limits += [None] * (3-len(limits))
slices = slice(limits[0],limits[1],limits[2])
else:
slices = slice(None,limits,None)
for w,walk in enumerate(chain[:,slices,:]):
if bad is None:
color = 'k'
elif bad[w]:
color = 'r'
else:
color = 'k'
for p, param in enumerate(walk.T):
ax = plt.subplot(ndim, 1, p + 1)
ax.plot(param, color, alpha=.75, lw=0.75)
# ax.set_ylim(param.min()*0.5,param.max()*1.5)
# ax.semilogy()
plt.tight_layout()
return fig
def tester():
print('hi ya\'ll')
| schmidt_funcs.py | 26,232 | this returns -1*alpha, and optionally kappa and errors
uses maximum likelihood to estimation
to determine power-law and error
From Clauset et al. 2010
Complimentary CDF for cdf2 (not normalized to 1)
Value at b is total amount above b.
NOT a general averaging function
return bin centers (lin and log)
(smooth) bootstrap
bootstrap(X,Xerr,n,smooth=True)
X : array to be resampled
X_err [optional]: errors to perturb data for smooth bootstrap
only provide is doing smooth bootstrapping
n : number of samples. Default - len(X)
smooth: optionally use smooth bootstrapping.
will be set to False if no X_err is provided
(statistical) cumulative distribution function
Integral on [-inf, b] is the fraction below b.
CDF is invariant to binning.
This assumes you are using the entire range in the binning.
Returns array of size len(bins)
Plot versus bins[:-1]
# # Exclusively for area_function which needs to be unnormalized
(statistical) cumulative distribution function
Value at b is total amount below b.
CDF is invariante to binning
Plot versus bins[:-1]
Not normalized to 1
returns the compressed version
of the input array if it is a
numpy MaskedArray
See pdf2
emcee_schmidt provides a convenient wrapper for fitting the schimdt law
to binned x,log(y) data. Generally, it fits a normalization and a slope
# # # A Schmidt Law fitting Function using EMCEE by D.F.M.
fit(bins, samp, samperr, maps, mapserr, scale=1.,
pos=None, pose=None, nwalkers=100, nsteps=1e4)
bins: bin edges for binning data (I know it's bad to bin)
samp : values for your sample
samperr : errors on values for you sample
maps: map of values from which you drew your sample
mapserr: error on maps...
pos : initial location of ball of walkers
pose : initial spread of walkers
Get the extinction (errors) for a particular position or
list of positions
More generally get the value (error) for a particular
position given a wcs and world coordinates
really just a wrapper for numpy.histogram
llspace(xmin, xmax, n = None, log = False, dx = None, dex = None)
get values evenly spaced in linear or log spaced
n [10] -- Optional -- number of steps
log [false] : switch for log spacing
dx : spacing for linear bins
dex : spacing for log bins (in base 10)
dx and dex override n
See pdf2
M(>Ak), mass weighted complimentary cdf
returns the moving average of an array.
returned array is shorter by (n-1)
Returns array of lenth len(arr) - (n-1)
# # written by me
# # slower for short loops
# # faster for n ~ len(arr) and large arr
a = []
for i in xrange(len(arr)-(n-1)):
a.append(stats.gmean(arr[i:n+i]))
# # Original method# #
# # written by me ... ~10x faster for short arrays
b = np.array([np.roll(np.pad(arr,(0,n),mode='constant',constant_values=1),i)
for i in xrange(n)])
return np.product(b,axis=0)[n-1:-n]**(1./float(n))
theta = (beta, kappa)
Get names formatted as
hhmmss.ss+ddmmss to Decimal Degree
only works for dec > 0 (splits on +, not -)
Will fix this eventually...
** Normalized differential area function. **
(statistical) probability denisty function
normalized so that the integral is 1
and. The integral over a range is the
probability of the value is within
that range.
Returns array of size len(bins)-1
Plot versus bins[:-1]
The ~ PDF normalized so that
the integral is equal to the
total amount of a quantity.
The integral over a range is the
total amount within that range.
Returns array of size len(bins)-1
Plot versus bins[:-1]
sampler : emcee Sampler class
create rotated rectangle
for input into PIL ImageDraw.polygon
to make a rectangle polygon mask
Rectagle is created and rotated with center
at zero, and then translated to center position
accepters centers
Default : center
tl, tr, bl, br
rot_matrix(theta)
2D rotation matrix for theta in radians
returns numpy matrix
schmidt_law(Ak,(beta,kappa))
beta is the power law index (same as alpha)
model: should pass schmidt_law()
call: surfd(X, map, bins,
xerr = None, merr = None, scale = 1.)
calculates H(X)/H(M) = Nx pdf(x) dx / Nm pdf(m) dm ; dm = dx
so it is independent of whether dx or dlog(x)
formerly dfm/triangle from astropy.modeling import models, fitting , SimplexLSQFitterimport ipdb; make iPython print immediately sys.stdout = flushfile(sys.stdout) sys.stdout = oldsysstdout define initial polygon irrespective of center correct center if starting from corner y = tuple([i + h/2. for i in y]) y = tuple([i - h/2. for i in y]) x = tuple([i + w/2 for i in x]) x = tuple([i - w/2 for i in x]) coord switch to match ordering of FITs dimensions print (cx,cy) 2nd derivative of linear bin is 0 return mgeo(arr, n=n) equivalent methods, only easier assume original bins are nonnegpre = arr[0] + phase*diff either user specifies log or gives dex and not dx print nisNone, dxisNone, dexisNone, log for debugging logic this will make dex or dx if they are not specified if want log but dex not given print dex else if want lin but dx not given takes floorprint dxreturn np.power(10, np.linspace(xmin, xmax , (xmax - xmin)/dex + 1))return np.linspace(xmin, xmax, (xmax-xmin)/dx + 1) From the definition of Pr(x) = dF(x)/dx this is the correct form. It returns the correct probabilities when tested returns int cumulative fraction below bin_k append 0 to beginning because P( X < min(x)) = 0 default nloglike = np.sum((-1+alpha)*np.log(xmin)-alpha*np.log(data)+np.log(-1+alpha)) get dn/dxnp.nan kappa * (Ak0 ** beta)weights = (yerr / y)[keep]**(-2.) Poisson statistics -- not using thismu = (yerr)**2 often called lambda = poisson variance for bin x_iresid = np.abs(y - mod) where w calculate the poisson probabilityreturn np.sum(resid * np.log(mu) - mu) - np.sum(np.log(misc.factorial(resid))) CHI^2 log-likelihood - 0.5 * 3 * np.log(np.sum(k)) different priors for different version of the schmidt law Never run's into this region Never run's into this region update likelihood Get input values x, y, yerr = sampler.args Print out final values print 'Hi!. It\'s hammer time...' x values are bin midpoints assume if log=True, then bins are already log x = bins[:-1] y = np.asarray([surfd(samp,maps,bins,boot=True,scale=scale) for i in xrange(boot)]) yerr = np.nanstd(y,axis=0)if log: samp = np.log10(samp) maps = np.log10(maps) bins = np.log10(bins) because bins doesn't get used again after surfd+ ADDED FOR SHIFTING EXPERIMENT ++ initialize walker positions and walker bundle sizeprint posprint posenp.asarray(pose) This function only fits sources, it doesn't plot, so don't pass and emcee sampler type. it will spit it back out RUN EMCEE pdb.set_trace() Get input values x, y, yerr = sampler.args Print out final values Get percentiles for each parameterprint n_params Plot corner plotprint labels generate schmidt laws from parameter samples get percentile bands Plot fits Plot data with errorbars 3 sigma band yperr = np.abs(np.exp(np.log(y)+yerr/y) - y) ynerr = np.abs(np.exp(np.log(y)-yerr/y) - y) draw 1,2,3 sigma bands 1 sigma band 2 sigma band 3 sigma band ax.set_ylim(param.min()*0.5,param.max()*1.5) ax.semilogy() | 7,099 | en | 0.649984 |
"""A word2vec implementation using Tensorflow and estimators."""
import os
from collections import defaultdict
import logging
import tensorflow as tf
# from tensorflow.python import debug as tf_debug # pylint: disable=E0611
import word2vec.utils.datasets as datasets_utils
import word2vec.models.word2vec as w2v_model
from word2vec.evaluation.men import MEN
logger = logging.getLogger(__name__)
__all__ = ('Word2Vec')
class Word2Vec():
"""Tensorflow implementation of Word2vec."""
def __init__(self):
"""Initialize vocab dictionaries."""
self._words = []
self._counts = []
self._total_count = 0
@property
def vocab_size(self):
"""Return the number of items in vocabulary.
Since we use len(word_freq_dict) as the default index for UKN in
the index_table, we have to add 1 to the length
"""
return len(self._words) + 1
def build_vocab(self, data_filepath, vocab_filepath, min_count):
"""Create vocabulary-related data."""
logger.info('Building vocabulary from file {}'.format(data_filepath))
logger.info('Loading word counts...')
if self.vocab_size > 1:
logger.warning('This instance of W2V\'s vocabulary does not seem '
'to be empty. Erasing previously stored vocab...')
self._words, self._counts, self._total_count = [], [], 0
word_count_dict = defaultdict(int)
with open(data_filepath, 'r') as data_stream:
for line in data_stream:
for word in line.strip().split():
word_count_dict[word] += 1
logger.info('Saving word frequencies to file: {}'
.format(vocab_filepath))
with open(vocab_filepath, 'w') as vocab_stream:
# words need to be sorted in decreasing frequency to be able
# to rely on the default tf.nn.log_uniform_candidate_sampler
# later on in the tf.nn.nce_loss
for word, count in sorted(word_count_dict.items(),
key=lambda x: x[1], reverse=True):
print('{}\t{}'.format(word, count), file=vocab_stream)
if count >= min_count:
self._words.append(word)
self._counts.append(count)
self._total_count += count
def load_vocab(self, vocab_filepath, min_count):
"""Load a previously saved vocabulary file."""
logger.info('Loading word counts from file {}'.format(vocab_filepath))
self._words, self._counts, self._total_count = [], [], 0
with open(vocab_filepath, 'r', encoding='UTF-8') as vocab_stream:
for line in vocab_stream:
word_count = line.strip().split('\t', 1)
word, count = word_count[0], int(word_count[1])
if count >= min_count:
self._words.append(word)
self._counts.append(count)
self._total_count += count
logger.info('Done loading word counts')
# pylint: disable=R0914,W0613
def train(self, train_mode, training_data_filepath, model_dirpath,
batch_size, embedding_size, num_neg_samples,
learning_rate, window_size, num_epochs, sampling_rate,
p_num_threads, t_num_threads, shuffling_buffer_size,
save_summary_steps, save_checkpoints_steps, keep_checkpoint_max,
log_step_count_steps, debug, debug_port, xla):
"""Train Word2Vec."""
if self.vocab_size == 1:
raise Exception('You need to build or load a vocabulary before '
'training word2vec')
if train_mode not in ('cbow', 'skipgram'):
raise Exception('Unsupported train_mode \'{}\''.format(train_mode))
sess_config = tf.compat.v1.ConfigProto(log_device_placement=True)
sess_config.intra_op_parallelism_threads = t_num_threads
sess_config.inter_op_parallelism_threads = t_num_threads
# if xla:
# sess_config.graph_options.optimizer_options.global_jit_level = \
# tf.OptimizerOptions.ON_1 # JIT compilation on GPU
run_config = tf.estimator.RunConfig(
session_config=sess_config, save_summary_steps=save_summary_steps,
save_checkpoints_steps=save_checkpoints_steps,
keep_checkpoint_max=keep_checkpoint_max,
log_step_count_steps=log_step_count_steps)
estimator = tf.estimator.Estimator(
model_fn=w2v_model.model,
model_dir=model_dirpath,
config=run_config,
params={
'mode': train_mode,
'vocab_size': self.vocab_size,
'batch_size': batch_size,
'embedding_size': embedding_size,
'num_neg_samples': num_neg_samples,
'learning_rate': learning_rate,
'words': self._words,
'p_num_threads': p_num_threads,
'xla': xla,
'men': MEN(os.path.join(
os.path.dirname(os.path.dirname(__file__)),
'resources', 'MEN_dataset_natural_form_full'))
})
# waiting for v2 fix in tf.summary.FileWriter:
tf.compat.v1.disable_eager_execution()
if debug:
raise Exception('Unsupported parameter: waiting for the TF team '
'to release v2 equivalents for TensorBoardDebugHook')
# hooks = [tf.estimator.ProfilerHook(
# save_steps=save_summary_steps, show_dataflow=True,
# show_memory=True, output_dir=model_dirpath),
# tf_debug.TensorBoardDebugHook('localhost:{}'
# .format(debug_port))]
# else:
hooks = [tf.estimator.ProfilerHook(
save_steps=save_summary_steps, show_dataflow=True,
show_memory=True, output_dir=model_dirpath)]
estimator.train(
input_fn=lambda: datasets_utils.get_w2v_train_dataset(
training_data_filepath, train_mode, self._words, self._counts,
self._total_count, window_size, sampling_rate, batch_size,
num_epochs, p_num_threads, shuffling_buffer_size),
hooks=hooks)
| word2vec/estimators/word2vec.py | 6,383 | Tensorflow implementation of Word2vec.
Initialize vocab dictionaries.
Create vocabulary-related data.
Load a previously saved vocabulary file.
Train Word2Vec.
Return the number of items in vocabulary.
Since we use len(word_freq_dict) as the default index for UKN in
the index_table, we have to add 1 to the length
A word2vec implementation using Tensorflow and estimators.
from tensorflow.python import debug as tf_debug pylint: disable=E0611 words need to be sorted in decreasing frequency to be able to rely on the default tf.nn.log_uniform_candidate_sampler later on in the tf.nn.nce_loss pylint: disable=R0914,W0613 if xla: sess_config.graph_options.optimizer_options.global_jit_level = \ tf.OptimizerOptions.ON_1 JIT compilation on GPU waiting for v2 fix in tf.summary.FileWriter: hooks = [tf.estimator.ProfilerHook( save_steps=save_summary_steps, show_dataflow=True, show_memory=True, output_dir=model_dirpath), tf_debug.TensorBoardDebugHook('localhost:{}' .format(debug_port))] else: | 1,062 | en | 0.553409 |
import argparse
import collections
import fnmatch
import os.path
import pprint
import re
import sys
#######################
### OSimStatsHelper ###
#######################
class OSimStatsHelper:
"""Takes a list of stats and returns a stat containing their summation by each sample."""
@staticmethod
def sumStats(stats):
totalStat = {
'abs' : { 'units' : stats[0]['abs']['units'] },
'category' : stats[0]['category'],
'container' : "Total",
'name' : stats[0]['name'],
'fullName' : ".".join((stats[0]['category'], "Total", stats[0]['name']))
}
totalStat['abs']['values'] = OSimStatsHelper.sumStatsToValues(stats, 'abs')
#print "Summing %s" % (totalStat['name'])
if 'delta' in stats[0]:
totalStat['delta'] = { 'units' : stats[0]['delta']['units'] }
totalStat['delta']['values'] = OSimStatsHelper.sumStatsToValues(stats, 'delta')
return totalStat
@staticmethod
def sumStatsToValues(stats, type):
totals = []
for stat in stats:
values = stat[type]['values']
for i in range(0, len(values)):
if i + 1 > len(totals):
totals.append(values[i])
else:
totals[i] += values[i]
return totals
@staticmethod
def splitStatsFullName(fullName):
return statNamePartsRe.match(fullName).groups();
#lineRe = re.compile("(.* .*) - (.*) : (\d+)[ ,]([^:]*)")
#lineRe = re.compile("(.* .*) - (.*) : (?P<abs>[\d\.-]+)(?: (?:\D+))?(?P<delta>[\d\.-]+)?")
lineRe = re.compile("(.* .*) - (.*) : (?P<abs>[^,]+)(?:, )?(?P<delta>[^,]+)?")
statsReportStartRe = re.compile(" - \*\*\* STATS REPORT AT")
statNamePartsRe = re.compile("^(.*?)\.(.*)\.(.*?)$");
valueRe = re.compile("([^ %/]+)(.*)")
#######################
### OSimStatsCorpus ###
#######################
class OSimStatsCorpus:
_data = {}
_samplesCount = 0
@property
def data(self):
return self._data
def __init__(self):
self.clear()
def __len__(self):
return self._samplesCount
@staticmethod
def parseValue(rawValue, valueRe):
valueMatch = valueRe.match(rawValue)
return float(valueMatch.group(1)), valueMatch.group(2)
def getStat(self, statFullName):
"""
Get a statistic given its full name.
FIXME: Does not allow one to interrogate a given set yet.
"""
if self._data == None:
return None
(category, container, name) = OSimStatsHelper.splitStatsFullName(statFullName);
for set in self._data.items():
if category in set and container in set[category] and name in set[category][container]:
return set[category][container][name]
else:
return None
def getStats(self, setGlob = "*", selectGlob = "*"):
"""
Returns a dictionary of stats where fullName => stat.
If glob is specified then this is used to match stats using their full name
If no stats are found then an empty dictionary is returned.
"""
if selectGlob == None:
selectGlob = "*"
if setGlob == None:
setGlob = "*"
matchingStats = collections.OrderedDict()
for setName, set in self._data.items():
if fnmatch.fnmatch(setName, setGlob):
for category, containers in set.items():
for container, stats in containers.items():
for statName, stat in stats.items():
if fnmatch.fnmatch(stat['fullName'], selectGlob):
matchingStats[stat['fullName']] = stat
return matchingStats
def clear(self):
"""Clear out any existing dataset."""
self._data = {}
self._samplesCount = 0
def load(self, path):
"""Load OpenSimulator stats log data from the given path and merge into any existing data."""
# Set structure
# category : {
# container : {
# stat : {
# 'abs' : { 'values' : [], 'units' : "" },
# 'delta' : { 'values' : [], 'units' : "" }
# 'name' : string
# 'fullName' : string
# 'category' : string
# 'container' : string
# }
# delta may not be present
with open(path) as f:
setName = os.path.splitext(os.path.basename(path))[0]
print "Loading set %s" % (setName)
if not setName in self._data:
self._data[setName] = {}
set = self.data[setName]
for line in f:
match = lineRe.match(line)
if match != None:
statFullName = match.group(2)
#(category, container, name) = statFullName.split(".")
(category, container, name) = OSimStatsHelper.splitStatsFullName(statFullName);
rawValue = match.group("abs")
#print match.lastindex
#print rawValue
value = OSimStatsCorpus.parseValue(rawValue, valueRe)
if not category in set:
set[category] = collections.OrderedDict()
if not container in set[category]:
set[category][container] = collections.OrderedDict()
if not name in set[category][container]:
entry = {
'abs' : { 'values' : [], 'units' : value[1] },
'category' : category,
'container' : container,
'fullName' : statFullName,
'name' : name
}
set[category][container][name] = entry
stat = set[category][container][name]
stat['abs']['values'].append(value[0])
# Handle delta value if present
if match.group("delta"):
rawValue = match.group("delta")
value = OSimStatsCorpus.parseValue(rawValue, valueRe)
if not 'delta' in stat:
stat['delta'] = { 'values' : [], 'units' : value[1] }
stat['delta']['values'].append(value[0])
else:
match = statsReportStartRe.search(line)
if (match != None):
self._samplesCount += 1
else:
print "Ignoring [%s]" % (line) | analysis/opensimulator-stats-analyzer/src/osta/osta.py | 7,766 | OSimStatsHelper print "Summing %s" % (totalStat['name'])lineRe = re.compile("(.* .*) - (.*) : (\d+)[ ,]([^:]*)")lineRe = re.compile("(.* .*) - (.*) : (?P<abs>[\d\.-]+)(?: (?:\D+))?(?P<delta>[\d\.-]+)?") OSimStatsCorpus Set structure category : { container : { stat : { 'abs' : { 'values' : [], 'units' : "" }, 'delta' : { 'values' : [], 'units' : "" } 'name' : string 'fullName' : string 'category' : string 'container' : string } delta may not be present(category, container, name) = statFullName.split(".") print match.lastindexprint rawValue Handle delta value if present | 723 | en | 0.170001 |
import os
import sys
import errno
import random
import pickle
import numpy as np
import torch
import torchvision
import torch.nn.functional as F
from torch.utils.data.dataset import Dataset
from torch.utils.data import Dataset, DataLoader
from torch.utils.data.sampler import BatchSampler
from torchvision.datasets import DatasetFolder
from torchvision import transforms
from torch import nn
from torch import optim
import matplotlib.pyplot as plt
#==============================================================================
# Network definition
#==============================================================================
class SE_HIPP_3D_Net(nn.Module):
def __init__(self):
super(SE_HIPP_3D_Net, self).__init__()
self.conv1 = nn.Conv2d(28, 32, kernel_size=4, stride=1, padding=1)
self.bn1 = nn.BatchNorm2d(32)
self.relu = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(32, 64, kernel_size=2, stride=1, padding=0)
self.bn2 = nn.BatchNorm2d(64)
self.fc1 = nn.Linear(64*7*7, 120)
self.dropout = nn.Dropout(0.5)
self.fc2 = nn.Linear(120, 2)
def forward(self, x):
x = self.conv1(x)
x = F.max_pool2d(x, kernel_size=3, stride=2, padding=0)
x = self.bn1(x)
x = self.relu(x)
x = self.conv2(x)
x = F.max_pool2d(x, kernel_size=2, stride=2, padding=1)
x = self.bn2(x)
x = self.relu(x)
# print("size", x.size())
x = x.view(-1, self.num_flat_features(x))
x = self.dropout(x)
# print("size", x.size())
x = F.relu(self.fc1(x))
x = self.fc2(x)
return x
def num_flat_features(self, x):
size = x.size()[1:]
num_features = 1
for s in size:
num_features *= s
return num_features | src/pytorch-template/old/models/baseline_3D_single.py | 1,851 | ============================================================================== Network definition============================================================================== print("size", x.size()) print("size", x.size()) | 223 | en | 0.332277 |
#!/usr/bin/python
"""Cartesian execution of options for experiments"""
import itertools
from pprint import pprint
import os
# GROUPS = [
# ('train', {'type': 'option',
# 'order': 0,
# 'values': ['train5k']}),
# ('lang', {'type': 'option',
# 'order': 1,
# 'values': 'hungarian,basque,french,korean,polish,swedish'.split(',')}),
# ('infuse', {'type': 'option',
# 'order': 2,
# 'values': ['true', 'false']}),
# ('maxmsr', {'type': 'option',
# 'order': 3,
# 'values': '1'.split(',')})
# ]
#
GROUPS = [
('train', {'type': 'option',
'order': 0,
'values': ['train', 'train5k']}),
('lang', {'type': 'option',
'order': 1,
'values': 'hungarian,basque,french,korean,polish,swedish'.split(',')}),
('infuse', {'type': 'option',
'order': 2,
'values': ['true', 'false']}),
('maxmsr', {'type': 'option',
'order': 3,
'values': '1,2,4,8'.split(',')})
]
# GROUPS = [
# ('gram', {'type': 'file',
# 'use': 'agg',
# 'order': 0,
# 'values': ['unigram', 'bigram', 'trigram', 'nextunigram', 'nextbigram', 'nexttrigram']}),
# # ('prev', {'type': 'file',
# # 'use': 'optional',
# # 'value': 'prev'}),
# ('pop', {'type': 'option',
# 'use': 'optional',
# 'value': '-pop'})
# ]
# BASE = """nohup ./chukuparser md -f $conf -td corpus/train4k.hebtb.gold.lattices -tl corpus/train4k.hebtb.pred.lattices -in corpus/dev.hebtb.gold.conll.pred.lattices -ing corpus/dev.hebtb.gold.conll.gold.lattices -om devo.$exp.b32.hebtb.mapping -it 1 -b 32 -p Funcs_Main_POS_Both_Prop -wb -bconc $flags > runstatus.$exp.b32"""
MALEARN = """nohup ./yap malearn -lattice spmrl/train.$lang.gold.conll.tobeparsed.tagged.lattices -raw spmrl/train.$lang.gold.conll.tobeparsed.raw -out $lang.json > malearn.$exp.out"""
MATRAIN = """nohup ./yap ma -dict $lang.json -raw spmrl/$train.$lang.gold.conll.tobeparsed.raw -out $train.$lang.$maxmsr.analyzed.lattices -maxmsrperpos $maxmsr > matrain.$exp.out"""
MADEV = """nohup ./yap ma -dict $lang.json -raw spmrl/dev.$lang.gold.conll.tobeparsed.raw -out dev.$lang.$maxmsr.analyzed.lattices -maxmsrperpos $maxmsr > madev.$exp.out"""
MD = """nohup ./yap md -f conf/standalone.md.yaml -td spmrl/$train.$lang.gold.conll.tobeparsed.tagged.lattices -tl $train.$lang.$maxmsr.analyzed.lattices -in dev.$lang.$maxmsr.analyzed.lattices -ing spmrl/dev.$lang.gold.conll.tobeparsed.tagged.lattices -om devo.$train_$lang_$maxmsr_$infuse.mapping -infusedev=$infuse -it 1 -b 32 -p Funcs_Main_POS_Both_Prop -bconc -pop > runstatus.$exp.out"""
cmds = [MALEARN, MATRAIN, MADEV, MD]
REPLACE_STR = '$exp'
CONF_FILE = 'standalone.md.%s.yaml'
BASE_FILE = 'standalone.base.md.yaml'
# first transform optional to empty, existing
for (name, conf) in GROUPS:
if conf.get('use', None) == 'optional':
conf['values'] = [None, conf['value']]
conf_values = map(lambda (name, conf): conf['values'], GROUPS)
executions = list(itertools.product(*conf_values))
def gen_agg_file(values, out_name):
with open(out_name, 'w') as outf:
for value in values:
with open(value) as inf:
outf.write(inf.read())
for execution in executions:
print 'At execution %s' % str(execution)
files = [BASE_FILE]
exp_strings = []
command_line_options = []
options = {}
# for i, param in enumerate(execution):
# conf_name, conf = GROUPS[i]
# # print "\tAt conf %s" % conf_name
# # pprint(conf)
# # print "\tparam is %s" % str(param)
# if conf['type'] == 'option' and param:
# print "\t\tadd %s=%s to command line" % (conf_name, str(param))
# options[conf_name] = param
# # print "\t\tadd %s to command line" % str(conf['value'])
# # command_line_options.append(conf['value'])
# if conf.get('use', None) == 'optional':
# exp_strings.append(conf_name if param else 'no%s' % conf_name)
# else:
# exp_strings.append(param)
# if conf['type'] == 'file':
# if conf['use'] == 'agg':
# files += conf['values'][:conf['values'].index(param)+1]
# if conf['use'] == 'optional' and param:
# files.append(param)
for cmd in cmds:
execcmd = cmd[:]
for name, value in zip(map(lambda (k,v):k, GROUPS), execution):
execcmd = execcmd.replace('$'+name, value)
execcmd = execcmd.replace('$exp', '_'.join(execution))
print execcmd
os.system(execcmd)
# exp_string = '_'.join(exp_strings)
# outname = CONF_FILE % exp_string
# print command_line_options
# gen_agg_file(files, outname)
# new_command = BASE.replace('$conf', outname).replace('$exp', exp_string, 2).replace('$flags', ' '.join(command_line_options))
# print 'Executing %s' % new_command
# os.system(new_command)
| scripts/cartesian_experiments.py | 5,102 | !/usr/bin/python GROUPS = [ ('train', {'type': 'option', 'order': 0, 'values': ['train5k']}), ('lang', {'type': 'option', 'order': 1, 'values': 'hungarian,basque,french,korean,polish,swedish'.split(',')}), ('infuse', {'type': 'option', 'order': 2, 'values': ['true', 'false']}), ('maxmsr', {'type': 'option', 'order': 3, 'values': '1'.split(',')}) ] GROUPS = [ ('gram', {'type': 'file', 'use': 'agg', 'order': 0, 'values': ['unigram', 'bigram', 'trigram', 'nextunigram', 'nextbigram', 'nexttrigram']}), ('prev', {'type': 'file', 'use': 'optional', 'value': 'prev'}), ('pop', {'type': 'option', 'use': 'optional', 'value': '-pop'}) ] BASE = """nohup ./chukuparser md -f $conf -td corpus/train4k.hebtb.gold.lattices -tl corpus/train4k.hebtb.pred.lattices -in corpus/dev.hebtb.gold.conll.pred.lattices -ing corpus/dev.hebtb.gold.conll.gold.lattices -om devo.$exp.b32.hebtb.mapping -it 1 -b 32 -p Funcs_Main_POS_Both_Prop -wb -bconc $flags > runstatus.$exp.b32""" first transform optional to empty, existing for i, param in enumerate(execution): conf_name, conf = GROUPS[i] print "\tAt conf %s" % conf_name pprint(conf) print "\tparam is %s" % str(param) if conf['type'] == 'option' and param: print "\t\tadd %s=%s to command line" % (conf_name, str(param)) options[conf_name] = param print "\t\tadd %s to command line" % str(conf['value']) command_line_options.append(conf['value']) if conf.get('use', None) == 'optional': exp_strings.append(conf_name if param else 'no%s' % conf_name) else: exp_strings.append(param) if conf['type'] == 'file': if conf['use'] == 'agg': files += conf['values'][:conf['values'].index(param)+1] if conf['use'] == 'optional' and param: files.append(param) exp_string = '_'.join(exp_strings) outname = CONF_FILE % exp_string print command_line_options gen_agg_file(files, outname) new_command = BASE.replace('$conf', outname).replace('$exp', exp_string, 2).replace('$flags', ' '.join(command_line_options)) print 'Executing %s' % new_command os.system(new_command) | 2,354 | en | 0.112564 |
# Copyright (c) 2020 The Regents of the University of Michigan
# All rights reserved.
# This software is licensed under the BSD 3-Clause License.
import garnett
import hoomd
import hoomd.hpmc
# Vertices of a cube
cube_verts = [[-1, -1, -1], [-1, -1, 1], [-1, 1, 1], [-1, 1, -1],
[1, -1, -1], [1, -1, 1], [1, 1, 1], [1, 1, -1]]
with hoomd.context.SimulationContext():
box = hoomd.data.boxdim(L=10, dimensions=3)
snapshot = hoomd.data.make_snapshot(N=4, box=box)
snapshot.particles.position[:] = [
[2, 0, 0],
[4, 0, 0],
[0, 4, 0],
[0, 0, 4],
]
system = hoomd.init.read_snapshot(snapshot)
mc = hoomd.hpmc.integrate.convex_polyhedron(seed=452784, d=0.2, a=0.4)
mc.shape_param.set('A', vertices=cube_verts)
gsd_dump = hoomd.dump.gsd('cube.gsd', period=10, group=hoomd.group.all())
gsd_dump.dump_state(mc)
gsd_dump.dump_shape(mc)
hoomd.run(1000)
# Restore a snapshot from saved data
with garnett.read('cube.gsd') as traj:
snapshot2 = system.take_snapshot()
traj[-1].to_hoomd_snapshot(snapshot2)
with hoomd.context.SimulationContext():
# Create a HOOMD snapshot from a garnett Trajectory frame
with garnett.read('cube.gsd') as traj:
snapshot = traj[-1].to_hoomd_snapshot()
system = hoomd.init.read_snapshot(snapshot)
mc = hoomd.hpmc.integrate.convex_polyhedron(seed=452784, d=0.2, a=0.4)
mc.shape_param.set('A', vertices=cube_verts)
gsd_dump = hoomd.dump.gsd('cube.gsd', period=10, group=hoomd.group.all())
gsd_dump.dump_state(mc)
gsd_dump.dump_shape(mc)
hoomd.run(1000)
| examples/example-hpmc.py | 1,633 | Copyright (c) 2020 The Regents of the University of Michigan All rights reserved. This software is licensed under the BSD 3-Clause License. Vertices of a cube Restore a snapshot from saved data Create a HOOMD snapshot from a garnett Trajectory frame | 249 | en | 0.830578 |
#!/usr/bin/env python
# _*_ coding:utf-8 _*_
import json
import xmind
import logging
from xmind2testcase2021.zentao import xmind_to_zentao_csv_file
from xmind2testcase2021.testlink import xmind_to_testlink_xml_file
from xmind2testcase2021.utils import xmind_testcase_to_json_file
from xmind2testcase2021.utils import xmind_testsuite_to_json_file
from xmind2testcase2021.utils import get_xmind_testcase_list
from xmind2testcase2021.utils import get_xmind_testsuite_list
logging.basicConfig(level=logging.INFO)
def main():
xmind_file = 'docs/xmind_testcase_template_v1.1.xmind'
print('Start to convert XMind file: %s' % xmind_file)
# 1、testcases import file
# (1) zentao
zentao_csv_file = xmind_to_zentao_csv_file(xmind_file)
print('Convert XMind file to zentao csv file successfully: %s' % zentao_csv_file)
# (2) testlink
testlink_xml_file = xmind_to_testlink_xml_file(xmind_file)
print('Convert XMind file to testlink xml file successfully: %s' % testlink_xml_file)
# 2、 testcases json file
# (1) testsuite
testsuite_json_file = xmind_testsuite_to_json_file(xmind_file)
print('Convert XMind file to testsuite json file successfully: %s' % testsuite_json_file)
# (2) testcase
testcase_json_file = xmind_testcase_to_json_file(xmind_file)
print('Convert XMind file to testcase json file successfully: %s' % testcase_json_file)
# 3、test dict/json data
# (1) testsuite
testsuites = get_xmind_testsuite_list(xmind_file)
print('Convert XMind to testsuits dict data:\n%s' %
json.dumps(testsuites, indent=2, separators=(',', ': '), ensure_ascii=False))
# (2) testcase
testcases = get_xmind_testcase_list(xmind_file)
print('Convert Xmind to testcases dict data:\n%s' %
json.dumps(testcases, indent=4, separators=(',', ': '), ensure_ascii=False))
# (3) xmind file
workbook = xmind.load(xmind_file)
print('Convert XMind to Json data:\n%s' %
json.dumps(workbook.getData(), indent=2, separators=(',', ': '), ensure_ascii=False))
print('Finished conversion, Congratulations!')
if __name__ == '__main__':
main() | samples.py | 2,152 | !/usr/bin/env python _*_ coding:utf-8 _*_ 1、testcases import file (1) zentao (2) testlink 2、 testcases json file (1) testsuite (2) testcase 3、test dict/json data (1) testsuite (2) testcase (3) xmind file | 203 | en | 0.212822 |
# ===============================================================================
# Copyright 2015 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
import os
import shutil
from datetime import datetime
from traits.api import Bool
from uncertainties import ufloat
from pychron.core.helpers.datetime_tools import ISO_FORMAT_STR
from pychron.core.helpers.filetools import glob_list_directory, add_extension, \
list_directory
from pychron.dvc import dvc_dump, dvc_load, repository_path, list_frozen_productions
from pychron.dvc.meta_object import IrradiationGeometry, Chronology, Production, cached, Gains, LoadGeometry, \
MetaObjectException
from pychron.git_archive.repo_manager import GitRepoManager
from pychron.paths import paths, r_mkdir
from pychron.pychron_constants import INTERFERENCE_KEYS, RATIO_KEYS, DEFAULT_MONITOR_NAME, DATE_FORMAT, NULL_STR
# ============= enthought library imports =======================
def irradiation_geometry(name):
p = os.path.join(paths.meta_root, 'irradiation_holders', add_extension(name))
return IrradiationGeometry(p)
def irradiation_geometry_holes(name):
geom = irradiation_geometry(name)
return geom.holes
def irradiation_chronology(name, allow_null=False):
p = os.path.join(paths.meta_root, name, 'chronology.txt')
return Chronology(p, allow_null=allow_null)
def dump_chronology(path, doses):
if doses is None:
doses = []
with open(path, 'w') as wfile:
for p, s, e in doses:
if not isinstance(s, str):
s = s.strftime(ISO_FORMAT_STR)
if not isinstance(s, str):
s = s.strftime(ISO_FORMAT_STR)
if not isinstance(p, str):
p = '{:0.3f}'.format(p)
line = '{},{},{}\n'.format(p, s, e)
wfile.write(line)
def gain_path(name):
root = os.path.join(paths.meta_root, 'spectrometers')
if not os.path.isdir(root):
os.mkdir(root)
p = os.path.join(root, add_extension('{}.gain'.format(name), '.json'))
return p
def get_frozen_productions(repo):
prods = {}
for name, path in list_frozen_productions(repo):
prods[name] = Production(path)
return prods
def get_frozen_flux(repo, irradiation):
path = repository_path(repo, '{}.json'.format(irradiation))
fd = {}
if path:
fd = dvc_load(path)
for fi in fd.values():
fi['j'] = ufloat(*fi['j'], tag='J')
return fd
class MetaRepo(GitRepoManager):
clear_cache = Bool
def get_monitor_info(self, irrad, level):
age, decay = NULL_STR, NULL_STR
positions = self._get_level_positions(irrad, level)
# assume all positions have same monitor_age/decay constant. Not strictly true. Potential some ambiquity but
# will not be resolved now 8/26/18.
if positions:
position = positions[0]
opt = position.get('options')
if opt:
age = position.get('monitor_age', NULL_STR)
decayd = position.get('decay_constants')
if decayd:
decay = decayd.get('lambda_k_total', NULL_STR)
return str(age), str(decay)
def add_unstaged(self, *args, **kw):
super(MetaRepo, self).add_unstaged(self.path, **kw)
def save_gains(self, ms, gains_dict):
p = gain_path(ms)
dvc_dump(gains_dict, p)
if self.add_paths(p):
self.commit('Updated gains')
def update_script(self, rootname, name, path_or_blob):
self._update_text(os.path.join('scripts', rootname.lower()), name, path_or_blob)
def update_experiment_queue(self, rootname, name, path_or_blob):
self._update_text(os.path.join('experiments', rootname.lower()), name, path_or_blob)
def update_level_production(self, irrad, name, prname, note=None):
prname = prname.replace(' ', '_')
pathname = add_extension(prname, '.json')
src = os.path.join(paths.meta_root, irrad, 'productions', pathname)
if os.path.isfile(src):
self.update_productions(irrad, name, prname, note=note)
else:
self.warning_dialog('Invalid production name'.format(prname))
def update_level_monitor(self, irradiation, level, monitor_name, monitor_material, monitor_age, lambda_k):
path = self.get_level_path(irradiation, level)
obj = dvc_load(path)
positions = self._get_level_positions(irradiation, level)
options = {'monitor_name': monitor_name,
'monitor_material': monitor_material,
'monitor_age': monitor_age}
decay_constants = {'lambda_k_total': lambda_k, 'lambda_k_total_error': 0}
for p in positions:
p['options'] = options
p['decay_constants'] = decay_constants
obj['positions'] = positions
dvc_dump(obj, path)
def add_production_to_irradiation(self, irrad, name, params, add=True, commit=False):
self.debug('adding production {} to irradiation={}'.format(name, irrad))
p = os.path.join(paths.meta_root, irrad, 'productions', add_extension(name, '.json'))
prod = Production(p, new=not os.path.isfile(p))
prod.update(params)
prod.dump()
if add:
self.add(p, commit=commit)
def add_production(self, irrad, name, obj, commit=False, add=True):
p = self.get_production(irrad, name, force=True)
p.attrs = attrs = INTERFERENCE_KEYS + RATIO_KEYS
kef = lambda x: '{}_err'.format(x)
if obj:
def values():
return ((k, getattr(obj, k), kef(k), getattr(obj, kef(k))) for k in attrs)
else:
def values():
return ((k, 0, kef(k), 0) for k in attrs)
for k, v, ke, e in values():
setattr(p, k, v)
setattr(p, ke, e)
p.dump()
if add:
self.add(p.path, commit=commit)
def update_production(self, prod, irradiation=None):
ip = self.get_production(prod.name)
self.debug('saving production {}'.format(prod.name))
params = prod.get_params()
for k, v in params.items():
self.debug('setting {}={}'.format(k, v))
setattr(ip, k, v)
ip.note = prod.note
self.add(ip.path, commit=False)
self.commit('updated production {}'.format(prod.name))
def update_productions(self, irrad, level, production, note=None, add=True):
p = os.path.join(paths.meta_root, irrad, 'productions.json')
obj = dvc_load(p)
obj['note'] = str(note) or ''
if level in obj:
if obj[level] != production:
self.debug('setting production to irrad={}, level={}, prod={}'.format(irrad, level, production))
obj[level] = production
dvc_dump(obj, p)
if add:
self.add(p, commit=False)
else:
obj[level] = production
dvc_dump(obj, p)
if add:
self.add(p, commit=False)
def set_identifier(self, irradiation, level, pos, identifier):
p = self.get_level_path(irradiation, level)
jd = dvc_load(p)
positions = self._get_level_positions(irradiation, level)
d = next((p for p in positions if p['position'] == pos), None)
if d:
d['identifier'] = identifier
jd['positions'] = positions
dvc_dump(jd, p)
self.add(p, commit=False)
def get_level_path(self, irrad, level):
return os.path.join(paths.meta_root, irrad, '{}.json'.format(level))
def add_level(self, irrad, level, add=True):
p = self.get_level_path(irrad, level)
lv = dict(z=0, positions=[])
dvc_dump(lv, p)
if add:
self.add(p, commit=False)
def add_chronology(self, irrad, doses, add=True):
p = os.path.join(paths.meta_root, irrad, 'chronology.txt')
dump_chronology(p, doses)
if add:
self.add(p, commit=False)
def add_irradiation(self, name):
p = os.path.join(paths.meta_root, name)
if not os.path.isdir(p):
os.mkdir(p)
def add_position(self, irradiation, level, pos, add=True):
p = self.get_level_path(irradiation, level)
jd = dvc_load(p)
if isinstance(jd, list):
positions = jd
z = 0
else:
positions = jd.get('positions', [])
z = jd.get('z', 0)
pd = next((p for p in positions if p['position'] == pos), None)
if pd is None:
positions.append({'position': pos, 'decay_constants': {}})
dvc_dump({'z': z, 'positions': positions}, p)
if add:
self.add(p, commit=False)
def add_irradiation_geometry_file(self, path):
try:
holder = IrradiationGeometry(path)
if not holder.holes:
raise BaseException
except BaseException:
self.warning_dialog('Invalid Irradiation Geometry file. Failed to import')
return
self.smart_pull()
root = os.path.join(paths.meta_root, 'irradiation_holders')
if not os.path.isdir(root):
os.mkdir(root)
name = os.path.basename(path)
dest = os.path.join(root, name)
shutil.copyfile(path, dest)
self.add(dest, commit=False)
self.commit('added irradiation geometry file {}'.format(name))
self.push()
self.information_dialog('Irradiation Geometry "{}" added'.format(name))
# p = os.path.join(root, add_extension(name))
# def add_irradiation_holder(self, name, blob, commit=False, overwrite=False, add=True):
# root = os.path.join(paths.meta_root, 'irradiation_holders')
# if not os.path.isdir(root):
# os.mkdir(root)
# p = os.path.join(root, add_extension(name))
#
# if not os.path.isfile(p) or overwrite:
# with open(p, 'w') as wfile:
# holes = list(iter_geom(blob))
# n = len(holes)
# wfile.write('{},0.0175\n'.format(n))
# for idx, (x, y, r) in holes:
# wfile.write('{:0.4f},{:0.4f},{:0.4f}\n'.format(x, y, r))
# if add:
# self.add(p, commit=commit)
def get_load_holders(self):
p = os.path.join(paths.meta_root, 'load_holders')
return list_directory(p, extension='.txt', remove_extension=True)
def add_load_holder(self, name, path_or_txt, commit=False, add=True):
p = os.path.join(paths.meta_root, 'load_holders', name)
if os.path.isfile(path_or_txt):
shutil.copyfile(path_or_txt, p)
else:
with open(p, 'w') as wfile:
wfile.write(path_or_txt)
if add:
self.add(p, commit=commit)
def update_level_z(self, irradiation, level, z):
p = self.get_level_path(irradiation, level)
obj = dvc_load(p)
try:
add = obj['z'] != z
obj['z'] = z
except TypeError:
obj = {'z': z, 'positions': obj}
add = True
dvc_dump(obj, p)
if add:
self.add(p, commit=False)
def remove_irradiation_position(self, irradiation, level, hole):
p = self.get_level_path(irradiation, level)
jd = dvc_load(p)
if jd:
if isinstance(jd, list):
positions = jd
z = 0
else:
positions = jd['positions']
z = jd['z']
npositions = [ji for ji in positions if not ji['position'] == hole]
obj = {'z': z, 'positions': npositions}
dvc_dump(obj, p)
self.add(p, commit=False)
def new_flux_positions(self, irradiation, level, positions, add=True):
p = self.get_level_path(irradiation, level)
obj = {'positions': positions, 'z': 0}
dvc_dump(obj, p)
if add:
self.add(p, commit=False)
def update_fluxes(self, irradiation, level, j, e, add=True):
p = self.get_level_path(irradiation, level)
jd = dvc_load(p)
if isinstance(jd, list):
positions = jd
else:
positions = jd.get('positions')
if positions:
for ip in positions:
ip['j'] = j
ip['j_err'] = e
dvc_dump(jd, p)
if add:
self.add(p, commit=False)
def update_flux(self, irradiation, level, pos, identifier, j, e, mj, me, decay=None,
position_jerr=None,
analyses=None, options=None, add=True):
if options is None:
options = {}
if decay is None:
decay = {}
if analyses is None:
analyses = []
p = self.get_level_path(irradiation, level)
jd = dvc_load(p)
if isinstance(jd, list):
positions = jd
z = 0
else:
positions = jd.get('positions', [])
z = jd.get('z', 0)
npos = {'position': pos, 'j': j, 'j_err': e,
'mean_j': mj, 'mean_j_err': me,
'position_jerr': position_jerr,
'decay_constants': decay,
'identifier': identifier,
'options': options,
'analyses': [{'uuid': ai.uuid,
'record_id': ai.record_id,
'is_omitted': ai.is_omitted()}
for ai in analyses]}
if positions:
added = any((ji['position'] == pos for ji in positions))
npositions = [ji if ji['position'] != pos else npos for ji in positions]
if not added:
npositions.append(npos)
else:
npositions = [npos]
obj = {'z': z, 'positions': npositions}
dvc_dump(obj, p)
if add:
self.add(p, commit=False)
def update_chronology(self, name, doses):
p = os.path.join(paths.meta_root, name, 'chronology.txt')
dump_chronology(p, doses)
self.add(p, commit=False)
def get_irradiation_holder_names(self):
return glob_list_directory(os.path.join(paths.meta_root, 'irradiation_holders'),
extension='.txt',
remove_extension=True)
def get_cocktail_irradiation(self):
"""
example cocktail.json
{
"chronology": "2016-06-01 17:00:00",
"j": 4e-4,
"j_err": 4e-9
}
:return:
"""
p = os.path.join(paths.meta_root, 'cocktail.json')
ret = dvc_load(p)
nret = {}
if ret:
lines = ['1.0, {}, {}'.format(ret['chronology'], ret['chronology'])]
c = Chronology.from_lines(lines)
nret['chronology'] = c
nret['flux'] = ufloat(ret['j'], ret['j_err'])
return nret
def get_default_productions(self):
p = os.path.join(paths.meta_root, 'reactors.json')
if not os.path.isfile(p):
with open(p, 'w') as wfile:
from pychron.file_defaults import REACTORS_DEFAULT
wfile.write(REACTORS_DEFAULT)
return dvc_load(p)
def get_flux_positions(self, irradiation, level):
positions = self._get_level_positions(irradiation, level)
return positions
def get_flux(self, irradiation, level, position):
positions = self.get_flux_positions(irradiation, level)
return self.get_flux_from_positions(position, positions)
def get_flux_from_positions(self, position, positions):
j, je, pe, lambda_k = 0, 0, 0, None
monitor_name, monitor_material, monitor_age = DEFAULT_MONITOR_NAME, 'sanidine', ufloat(28.201, 0)
if positions:
pos = next((p for p in positions if p['position'] == position), None)
if pos:
j, je, pe = pos.get('j', 0), pos.get('j_err', 0), pos.get('position_jerr', 0)
dc = pos.get('decay_constants')
if dc:
# this was a temporary fix and likely can be removed
if isinstance(dc, float):
v, e = dc, 0
else:
v, e = dc.get('lambda_k_total', 0), dc.get('lambda_k_total_error', 0)
lambda_k = ufloat(v, e)
mon = pos.get('monitor')
if mon:
monitor_name = mon.get('name', DEFAULT_MONITOR_NAME)
sa = mon.get('age', 28.201)
se = mon.get('error', 0)
monitor_age = ufloat(sa, se, tag='monitor_age')
monitor_material = mon.get('material', 'sanidine')
fd = {'j': ufloat(j, je, tag='J'),
'position_jerr': pe,
'lambda_k': lambda_k,
'monitor_name': monitor_name,
'monitor_material': monitor_material,
'monitor_age': monitor_age}
return fd
def get_gains(self, name):
g = self.get_gain_obj(name)
return g.gains
def save_sensitivities(self, sens):
ps = []
for k, v in sens.items():
root = os.path.join(paths.meta_root, 'spectrometers')
p = os.path.join(root, add_extension('{}.sens'.format(k), '.json'))
dvc_dump(v, p)
ps.append(p)
if self.add_paths(ps):
self.commit('Updated sensitivity')
def get_sensitivities(self):
specs = {}
root = os.path.join(paths.meta_root, 'spectrometers')
for p in list_directory(root):
if p.endswith('.sens.json'):
name = p.split('.')[0]
p = os.path.join(root, p)
obj = dvc_load(p)
for r in obj:
if r['create_date']:
r['create_date'] = datetime.strptime(r['create_date'], DATE_FORMAT)
specs[name] = obj
return specs
def get_sensitivity(self, name):
sens = self.get_sensitivities()
spec = sens.get(name)
v = 1
if spec:
# get most recent sensitivity
record = spec[-1]
v = record.get('sensitivity', 1)
return v
@cached('clear_cache')
def get_gain_obj(self, name, **kw):
p = gain_path(name)
return Gains(p)
# @cached('clear_cache')
def get_production(self, irrad, level, allow_null=False, **kw):
path = os.path.join(paths.meta_root, irrad, 'productions.json')
obj = dvc_load(path)
pname = obj.get(level, '')
p = os.path.join(paths.meta_root, irrad, 'productions', add_extension(pname, ext='.json'))
ip = Production(p, allow_null=allow_null)
# print 'new production id={}, name={}, irrad={}, level={}'.format(id(ip), pname, irrad, level)
return pname, ip
# @cached('clear_cache')
def get_chronology(self, name, allow_null=False, **kw):
chron = None
try:
chron = irradiation_chronology(name, allow_null=allow_null)
if self.application:
chron.use_irradiation_endtime = self.application.get_boolean_preference(
'pychron.arar.constants.use_irradiation_endtime', False)
except MetaObjectException:
if name != 'NoIrradiation':
self.warning('Could not locate the irradiation chronology "{}"'.format(name))
return chron
@cached('clear_cache')
def get_irradiation_holder_holes(self, name, **kw):
return irradiation_geometry_holes(name)
@cached('clear_cache')
def get_load_holder_holes(self, name, **kw):
p = os.path.join(paths.meta_root, 'load_holders', add_extension(name))
holder = LoadGeometry(p)
return holder.holes
@property
def sensitivity_path(self):
return os.path.join(paths.meta_root, 'sensitivity.json')
# private
def _get_level_positions(self, irrad, level):
p = self.get_level_path(irrad, level)
obj = dvc_load(p)
if isinstance(obj, list):
positions = obj
else:
positions = obj.get('positions', [])
return positions
def _update_text(self, tag, name, path_or_blob):
if not name:
self.debug('cannot update text with no name. tag={} name={}'.format(tag, name))
return
root = os.path.join(paths.meta_root, tag)
if not os.path.isdir(root):
r_mkdir(root)
p = os.path.join(root, name)
if os.path.isfile(path_or_blob):
shutil.copyfile(path_or_blob, p)
else:
with open(p, 'w') as wfile:
wfile.write(path_or_blob)
self.add(p, commit=False)
# ============= EOF =============================================
| pychron/dvc/meta_repo.py | 21,588 | example cocktail.json
{
"chronology": "2016-06-01 17:00:00",
"j": 4e-4,
"j_err": 4e-9
}
:return:
=============================================================================== Copyright 2015 Jake Ross Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. =============================================================================== ============= enthought library imports ======================= assume all positions have same monitor_age/decay constant. Not strictly true. Potential some ambiquity but will not be resolved now 8/26/18. p = os.path.join(root, add_extension(name)) def add_irradiation_holder(self, name, blob, commit=False, overwrite=False, add=True): root = os.path.join(paths.meta_root, 'irradiation_holders') if not os.path.isdir(root): os.mkdir(root) p = os.path.join(root, add_extension(name)) if not os.path.isfile(p) or overwrite: with open(p, 'w') as wfile: holes = list(iter_geom(blob)) n = len(holes) wfile.write('{},0.0175\n'.format(n)) for idx, (x, y, r) in holes: wfile.write('{:0.4f},{:0.4f},{:0.4f}\n'.format(x, y, r)) if add: self.add(p, commit=commit) this was a temporary fix and likely can be removed get most recent sensitivity @cached('clear_cache') print 'new production id={}, name={}, irrad={}, level={}'.format(id(ip), pname, irrad, level) @cached('clear_cache') private ============= EOF ============================================= | 1,975 | en | 0.636685 |
#!/usr/bin/python
import glob,re,sys,math,pyfits
import numpy as np
import utils
if len(sys.argv) < 2:
print '\nconvert basti SSP models to ez_gal fits format'
print 'Run in directory with SED models for one metallicity'
print 'Usage: convert_basti.py ez_gal.ascii\n'
sys.exit(2)
fileout = sys.argv[1]
# try to extract meta data out of fileout
sfh = ''; tau = ''; met = ''; imf = ''
# split on _ but get rid of the extension
parts = '.'.join(fileout.split('.')[:-1]).split('_')
# look for sfh
for (check,val) in zip(['ssp','exp'], ['SSP','Exponential']):
if parts.count(check):
sfh = val
sfh_index = parts.index(check)
break
# tau?
if sfh:
tau = parts[sfh_index+1] if sfh == 'exp' else ''
# metallicity
if parts.count('z'):
met = parts[parts.index('z') + 1]
# imf
for (check,val) in zip(['krou','salp','chab'], ['Kroupa', 'Salpeter', 'Chabrier']):
if parts.count(check):
imf = val
break
if parts.count('n'):
n = parts[parts.index('n') + 1]
ae = False
if parts.count('ae'): ae = True
# does the file with masses exist?
has_masses = False
mass_file = glob.glob('MLR*.txt')
if len(mass_file):
# read it in!
print 'Loading masses from %s' % mass_file[0]
data = utils.rascii(mass_file[0], silent=True)
masses = data[:,10:14].sum(axis=1)
has_masses = True
files = glob.glob('SPEC*agb*')
nages = len(files)
ages = []
for (i,file) in enumerate(files):
ls = []
this = []
# extract the age from the filename and convert to years
m = re.search('t60*(\d+)$', file)
ages.append(int(m.group(1))*1e6)
# read in this file
fp = open(file, 'r')
for line in fp:
parts = line.strip().split()
ls.append(float(parts[0].strip()))
this.append(float(parts[1].strip()))
if i == 0:
# if this is the first file, generate the data table
nls = len(ls)
seds = np.empty((nls,nages))
# convert to ergs/s/angstrom
seds[:,i] = np.array(this)/4.3607e-33/1e10
# convert to numpy
ages = np.array(ages)
ls = np.array(ls)*10.0
# make sure we are sorted in age
sinds = ages.argsort()
ages = ages[sinds]
seds = seds[:,sinds]
# speed of light
c = utils.convert_length(utils.c, incoming='m', outgoing='a')
# convert from angstroms to hertz
vs = c/ls
# convert from ergs/s/A to ergs/s/Hz
seds *= ls.reshape((ls.size,1))**2.0/c
# and now from ergs/s/Hz to ergs/s/Hz/cm^2.0
seds /= (4.0*math.pi*utils.convert_length(10, incoming='pc', outgoing='cm')**2.0)
# sort in frequency space
sinds = vs.argsort()
# generate fits frame with sed in it
primary_hdu = pyfits.PrimaryHDU(seds[sinds,:])
primary_hdu.header.update('units', 'ergs/s/cm^2/Hz')
primary_hdu.header.update('has_seds', True)
primary_hdu.header.update('nfilters', 0)
primary_hdu.header.update('nzfs', 0)
# store meta data
if sfh and met and imf:
primary_hdu.header.update('has_meta', True)
primary_hdu.header.update('model', 'BaSTI', comment='meta data')
primary_hdu.header.update('met', met, comment='meta data')
primary_hdu.header.update('imf', imf, comment='meta data')
primary_hdu.header.update('sfh', sfh, comment='meta data')
if sfh == 'Exponential': primary_hdu.header.update('tau', tau, comment='meta data')
primary_hdu.header.update('n', n, comment='meta data')
primary_hdu.header.update('ae', ae, comment='meta data')
# store the list of frequencies in a table
vs_hdu = pyfits.new_table(pyfits.ColDefs([pyfits.Column(name='vs', array=vs[sinds], format='D', unit='hertz')]))
vs_hdu.header.update('units', 'hertz')
# and the list of ages
cols = [pyfits.Column(name='ages', array=ages, format='D', unit='years')]
# and masses
if has_masses: cols.append(pyfits.Column(name='masses', array=masses, format='D', unit='m_sun'))
ages_hdu = pyfits.new_table(pyfits.ColDefs(cols))
if has_masses: ages_hdu.header.update('has_mass', True)
# make the fits file in memory
hdulist = pyfits.HDUList([primary_hdu,vs_hdu,ages_hdu])
# and write it out
hdulist.writeto(fileout, clobber=True) | ezgal/scripts/convert_basti.py | 4,032 | !/usr/bin/python try to extract meta data out of fileout split on _ but get rid of the extension look for sfh tau? metallicity imf does the file with masses exist? read it in! extract the age from the filename and convert to years read in this file if this is the first file, generate the data table convert to ergs/s/angstrom convert to numpy make sure we are sorted in age speed of light convert from angstroms to hertz convert from ergs/s/A to ergs/s/Hz and now from ergs/s/Hz to ergs/s/Hz/cm^2.0 sort in frequency space generate fits frame with sed in it store meta data store the list of frequencies in a table and the list of ages and masses make the fits file in memory and write it out | 693 | en | 0.833455 |
import path4gmns as pg
from time import time
def test_download_sample_data_sets():
pg.download_sample_data_sets()
def test_find_shortest_path():
load_demand = False
network = pg.read_network(load_demand)
print('\nshortest path (node id) from node 1 to node 2, '
+network.find_shortest_path(1, 2))
print('\nshortest path (link id) from node 1 to node 2, '
+network.find_shortest_path(1, 2, seq_type='link'))
# retrieve the shortest path under a specific mode (which must be defined
# in settings.yaml)
print('\nshortest path (node id) from node 1 to node 2, '
+network.find_shortest_path(1, 2, mode='w'))
print('\nshortest path (link id) from node 1 to node 2, '
+network.find_shortest_path(1, 2, mode='w', seq_type='link'))
def test_find_shortest_path_for_agents():
network = pg.read_network()
st = time()
# find agent paths under a specific mode defined in settings.yaml,
# say, w (i.e., walk)
# network.find_path_for_agents('w') or network.find_path_for_agents('walk')
network.find_path_for_agents()
print('\nprocessing time of finding shortest paths for all agents: '
f'{time()-st:.2f} s')
agent_id = 300
print('\norigin node id of agent is '
f'{network.get_agent_orig_node_id(agent_id)}')
print('destination node id of agent is '
f'{network.get_agent_dest_node_id(agent_id)}')
print('shortest path (node id) of agent, '
f'{network.get_agent_node_path(agent_id)}')
print('shortest path (link id) of agent, '
f'{network.get_agent_link_path(agent_id)}')
agent_id = 1000
print('\norigin node id of agent is '
f'{network.get_agent_orig_node_id(agent_id)}')
print('destination node id of agent is '
f'{network.get_agent_dest_node_id(agent_id)}')
print('shortest path (node id) of agent, '
f'{network.get_agent_node_path(agent_id)}')
print('shortest path (link id) of agent, '
f'{network.get_agent_link_path(agent_id)}')
# output unique agent paths to a csv file
# if you do not want to include geometry info in the output file,
# you can do pg.output_agent_paths(network, False)
pg.output_agent_paths(network)
def test_column_generation_py():
network = pg.read_network()
print('\nstart column generation\n')
st = time()
iter_num = 20
column_update_num = 20
# pg.perform_network_assignment(assignment_mode=1, assignment_num,
# column_update_num, network)
# has been deprecated starting from v0.7.2, and will be removed later.
pg.perform_column_generation(iter_num, column_update_num, network)
print(f'processing time of column generation: {time()-st:.2f} s'
f' for {iter_num} assignment iterations and '
f'{column_update_num} iterations in column generation')
# if you do not want to include geometry info in the output file,
# use pg.output_columns(network, False)
pg.output_columns(network)
pg.output_link_performance(network)
def test_column_generation_dtalite():
""" validation using DTALite """
print('start column generation using DTALite')
st = time()
mode = 1
iter_num = 20
column_update_num = 20
pg.perform_network_assignment_DTALite(mode, iter_num, column_update_num)
print(f'processing time of column generation: {time()-st:.2f} s'
f' for {iter_num} assignment iterations and '
f'{column_update_num} iterations in column generation')
print('\npath finding results can be found in agent.csv')
def test_loading_columns():
network = pg.read_network()
print('\nstart loading columns\n')
st = time()
pg.load_columns(network)
print(f'processing time of loading columns: {time()-st:.2f} s')
print('\nstart column generation\n')
st = time()
iter_num = 0
column_update_num = 10
# pg.perform_network_assignment(assignment_mode=1, assignment_num,
# column_update_num, network)
# has been deprecated starting from v0.7.2, and will be removed in later.
pg.perform_column_generation(iter_num, column_update_num, network)
print(f'processing time of column generation: {time()-st:.2f} s'
f' for {iter_num} assignment iterations and '
f'{column_update_num} iterations in column generation')
pg.output_columns(network)
pg.output_link_performance(network)
def test_accessibility():
load_demand = False
network = pg.read_network(load_demand)
print('\nstart accessibility evaluation\n')
st = time()
# multimodal accessibility evaluation
pg.evaluate_accessibility(network)
# accessibility evalutation for a target mode
# pg.evaluate_accessibility(network, multimodal=False, mode='p')
print('complete accessibility evaluation.\n')
print(f'processing time of accessibility evaluation: {time()-st:.2f} s')
# get accessible nodes and links starting from node 1 with a 5-minitue
# time window for the default mode auto (i.e., 'p')
network.get_accessible_nodes(1, 5)
network.get_accessible_links(1, 5)
# get accessible nodes and links starting from node 1 with a 15-minitue
# time window for mode walk (i.e., 'w')
network.get_accessible_nodes(1, 15, 'w')
network.get_accessible_links(1, 15, 'w')
def demo_mode(mode):
print(f'the selected mode is {mode}\n')
if mode == 0:
# option 0: download the sample data set from GitHub
test_download_sample_data_sets()
elif mode == 1:
# option 1: find shortest path between O and D on Chicago network
test_find_shortest_path()
elif mode == 2:
# option 2: find shortest paths for all agents on Chicago network
test_find_shortest_path_for_agents()
elif mode == 3:
# option 3: perform column generation using Python engine
# on Chicago network
test_column_generation_py()
elif mode == 4:
# option 4: perform column generation using DTALite on Chicago network
test_column_generation_dtalite()
elif mode == 5:
# option 5: load columns generated from option 3 or 4
# on Chicago network
test_loading_columns()
else:
# option 6: evaluate multimodal accessibility on Chicago network
test_accessibility()
if __name__=="__main__":
demo_mode(6) | tests/demo.py | 6,457 | validation using DTALite
retrieve the shortest path under a specific mode (which must be defined in settings.yaml) find agent paths under a specific mode defined in settings.yaml, say, w (i.e., walk) network.find_path_for_agents('w') or network.find_path_for_agents('walk') output unique agent paths to a csv file if you do not want to include geometry info in the output file, you can do pg.output_agent_paths(network, False) pg.perform_network_assignment(assignment_mode=1, assignment_num, column_update_num, network) has been deprecated starting from v0.7.2, and will be removed later. if you do not want to include geometry info in the output file, use pg.output_columns(network, False) pg.perform_network_assignment(assignment_mode=1, assignment_num, column_update_num, network) has been deprecated starting from v0.7.2, and will be removed in later. multimodal accessibility evaluation accessibility evalutation for a target mode pg.evaluate_accessibility(network, multimodal=False, mode='p') get accessible nodes and links starting from node 1 with a 5-minitue time window for the default mode auto (i.e., 'p') get accessible nodes and links starting from node 1 with a 15-minitue time window for mode walk (i.e., 'w') option 0: download the sample data set from GitHub option 1: find shortest path between O and D on Chicago network option 2: find shortest paths for all agents on Chicago network option 3: perform column generation using Python engine on Chicago network option 4: perform column generation using DTALite on Chicago network option 5: load columns generated from option 3 or 4 on Chicago network option 6: evaluate multimodal accessibility on Chicago network | 1,745 | en | 0.776592 |
"""Define family of algorithms and make them interchangeable
The algorithms vary independetly from the clients using it.
This class implements to IngestorInterface and dynamically invoke
a suitable algorithm (strategy.algorithm()), through parse()
abstract method. i.e. it is independent of how an algorithm
is implemented.
That means, the behavior can be changed without breaking the classes
that use it, and the classes can switch between behaviors by changing
the specific implementation used without requiring any
significant code changes.
"""
from typing import List
from .IngestorInterface import IngestorInterface
from .QuoteModel import QuoteModel
from .CSVImporter import CSVImporter
from .PDFImporter import PDFImporter
from .DocxImporter import DocxImporter
from .TXTImporter import TXTImporter
class Ingestor(IngestorInterface):
"""Define family of algorithms & dynamically invoke the one of interest"""
importer_classes = [CSVImporter, PDFImporter, DocxImporter, TXTImporter]
@classmethod
def parse(cls, path: str) -> List[QuoteModel]:
for importer in cls.importer_classes:
if importer.can_ingest(path):
return importer.parse(path)
| QuoteEngine/Ingestor.py | 1,241 | Define family of algorithms & dynamically invoke the one of interest
Define family of algorithms and make them interchangeable
The algorithms vary independetly from the clients using it.
This class implements to IngestorInterface and dynamically invoke
a suitable algorithm (strategy.algorithm()), through parse()
abstract method. i.e. it is independent of how an algorithm
is implemented.
That means, the behavior can be changed without breaking the classes
that use it, and the classes can switch between behaviors by changing
the specific implementation used without requiring any
significant code changes. | 611 | en | 0.870318 |
from infoblox_netmri.utils.utils import locate, to_snake
from infoblox_netmri.api.exceptions.netmri_exceptions import NotImplementedException
class Broker(object):
""" Base class for broker instances, provides methods for API requests.
And return responces wrapped with specific class
:param client: InfobloxNetMRI client
"""
controller = None
def __init__(self, client):
self.client = client
def api_request(self, method_name, params):
""" Make api request and return single wrapped object
:param method_name: name of API methods
:param params: dict-wrapped params for specific API call
"""
data = self.client.api_request(method_name, params)
if isinstance(data, dict) and len(data) > 1:
for x in data.keys():
data[x] = self._get_return_object_type(data.get(x))
return data
class_name = to_snake(self.__class__.__name__.replace("Broker", ""))
if class_name in data:
result_name = class_name
else:
result_name = method_name.split('/')[-1]
if result_name not in data:
return data
return self._get_return_object_type(data.get(result_name))
# See NETMRI-31545
def api_mixed_request(self, method_name, params):
""" Make api request and download a file and return
JSON response or request status dictionary
:param method_name: name of API methods
:param params: dict-wrapped params for specific API call
"""
data = self.client.api_request(method_name, params, downloadable=True)
class_name = to_snake(self.__class__.__name__.replace("Broker", ""))
if class_name in data:
result_name = class_name
else:
result_name = method_name.split('/')[-1]
if result_name not in data:
return data
return self._get_return_object_type(data.get(result_name))
def api_list_request(self, method_name, params):
""" Make api request and return list of wrapped objects
:param method_name: name of API methods
:param params: dict-wrapped params for specific API call
"""
data = self.client.api_request(method_name, params)
if not data:
return None
try:
return [self._get_return_object_type(x) for x in data[self.controller]]
except KeyError:
print("Sorry, this method will be implemented in the\
future versions of NetMRI")
raise NotImplementedException(self.controller, method_name)
def _get_method_fullname(self, method):
""" Returns full API method name using controller name
**Input**
:param method: method name
:return: full API path
"""
return "{}/{}".format(self.controller, method)
def _get_return_object_type(self, data):
""" Returns wrapped response which inherits from RemoteModel class
:param data: API responce data
:return: RemoteModel child class
"""
if not data or type(data) != dict:
return data
class_name = data.get("_class")
obj_class = locate(self._get_remote_class_name(class_name))
return obj_class(data, self.client)
def _get_remote_class_name(self, name):
""" Generate full path to specific RemoteModel instance
:param name: name of model
:return: full path for model
"""
return "infoblox_netmri.api.remote.models.{pckg}_remote.{name}Remote".format(
pckg=to_snake(name),
name=name
) | infoblox_netmri/api/broker/broker.py | 3,759 | Base class for broker instances, provides methods for API requests.
And return responces wrapped with specific class
:param client: InfobloxNetMRI client
Returns full API method name using controller name
**Input**
:param method: method name
:return: full API path
Generate full path to specific RemoteModel instance
:param name: name of model
:return: full path for model
Returns wrapped response which inherits from RemoteModel class
:param data: API responce data
:return: RemoteModel child class
Make api request and return list of wrapped objects
:param method_name: name of API methods
:param params: dict-wrapped params for specific API call
Make api request and download a file and return
JSON response or request status dictionary
:param method_name: name of API methods
:param params: dict-wrapped params for specific API call
Make api request and return single wrapped object
:param method_name: name of API methods
:param params: dict-wrapped params for specific API call
See NETMRI-31545 | 1,009 | en | 0.638667 |
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from six.moves.urllib.parse import unquote
from swift import gettext_ as _
from swift.account.utils import account_listing_response
from swift.common.middleware.acl import parse_acl, format_acl
from swift.common.utils import public
from swift.common.constraints import check_metadata
from swift.common.http import HTTP_NOT_FOUND, HTTP_GONE
from swift.proxy.controllers.base import Controller, clear_info_cache, \
set_info_cache
from swift.common.middleware import listing_formats
from swift.common.swob import HTTPBadRequest, HTTPMethodNotAllowed
from swift.common.request_helpers import get_sys_meta_prefix
class AccountController(Controller):
"""WSGI controller for account requests"""
server_type = 'Account'
def __init__(self, app, account_name, **kwargs):
super(AccountController, self).__init__(app)
self.account_name = unquote(account_name)
if not self.app.allow_account_management:
self.allowed_methods.remove('PUT')
self.allowed_methods.remove('DELETE')
def add_acls_from_sys_metadata(self, resp):
if resp.environ['REQUEST_METHOD'] in ('HEAD', 'GET', 'PUT', 'POST'):
prefix = get_sys_meta_prefix('account') + 'core-'
name = 'access-control'
(extname, intname) = ('x-account-' + name, prefix + name)
acl_dict = parse_acl(version=2, data=resp.headers.pop(intname))
if acl_dict: # treat empty dict as empty header
resp.headers[extname] = format_acl(
version=2, acl_dict=acl_dict)
def GETorHEAD(self, req):
"""Handler for HTTP GET/HEAD requests."""
length_limit = self.get_name_length_limit()
if len(self.account_name) > length_limit:
resp = HTTPBadRequest(request=req)
resp.body = b'Account name length of %d longer than %d' % \
(len(self.account_name), length_limit)
# Don't cache this. We know the account doesn't exist because
# the name is bad; we don't need to cache that because it's
# really cheap to recompute.
return resp
partition = self.app.account_ring.get_part(self.account_name)
concurrency = self.app.account_ring.replica_count \
if self.app.get_policy_options(None).concurrent_gets else 1
node_iter = self.app.iter_nodes(self.app.account_ring, partition)
params = req.params
params['format'] = 'json'
req.params = params
resp = self.GETorHEAD_base(
req, _('Account'), node_iter, partition,
req.swift_entity_path.rstrip('/'), concurrency)
if resp.status_int == HTTP_NOT_FOUND:
if resp.headers.get('X-Account-Status', '').lower() == 'deleted':
resp.status = HTTP_GONE
elif self.app.account_autocreate:
# This is kind of a lie; we pretend like the account is
# there, but it's not. We'll create it as soon as something
# tries to write to it, but we don't need databases on disk
# to tell us that nothing's there.
#
# We set a header so that certain consumers can tell it's a
# fake listing. The important one is the PUT of a container
# to an autocreate account; the proxy checks to see if the
# account exists before actually performing the PUT and
# creates the account if necessary. If we feed it a perfect
# lie, it'll just try to create the container without
# creating the account, and that'll fail.
resp = account_listing_response(
self.account_name, req,
listing_formats.get_listing_content_type(req))
resp.headers['X-Backend-Fake-Account-Listing'] = 'yes'
# Cache this. We just made a request to a storage node and got
# up-to-date information for the account.
resp.headers['X-Backend-Recheck-Account-Existence'] = str(
self.app.recheck_account_existence)
set_info_cache(self.app, req.environ, self.account_name, None, resp)
if req.environ.get('swift_owner'):
self.add_acls_from_sys_metadata(resp)
else:
for header in self.app.swift_owner_headers:
resp.headers.pop(header, None)
return resp
@public
def PUT(self, req):
"""HTTP PUT request handler."""
if not self.app.allow_account_management:
return HTTPMethodNotAllowed(
request=req,
headers={'Allow': ', '.join(self.allowed_methods)})
error_response = check_metadata(req, 'account')
if error_response:
return error_response
length_limit = self.get_name_length_limit()
if len(self.account_name) > length_limit:
resp = HTTPBadRequest(request=req)
resp.body = b'Account name length of %d longer than %d' % \
(len(self.account_name), length_limit)
return resp
account_partition, accounts = \
self.app.account_ring.get_nodes(self.account_name)
headers = self.generate_request_headers(req, transfer=True)
clear_info_cache(self.app, req.environ, self.account_name)
resp = self.make_requests(
req, self.app.account_ring, account_partition, 'PUT',
req.swift_entity_path, [headers] * len(accounts))
self.add_acls_from_sys_metadata(resp)
return resp
@public
def POST(self, req):
"""HTTP POST request handler."""
length_limit = self.get_name_length_limit()
if len(self.account_name) > length_limit:
resp = HTTPBadRequest(request=req)
resp.body = b'Account name length of %d longer than %d' % \
(len(self.account_name), length_limit)
return resp
error_response = check_metadata(req, 'account')
if error_response:
return error_response
account_partition, accounts = \
self.app.account_ring.get_nodes(self.account_name)
headers = self.generate_request_headers(req, transfer=True)
clear_info_cache(self.app, req.environ, self.account_name)
resp = self.make_requests(
req, self.app.account_ring, account_partition, 'POST',
req.swift_entity_path, [headers] * len(accounts))
if resp.status_int == HTTP_NOT_FOUND and self.app.account_autocreate:
self.autocreate_account(req, self.account_name)
resp = self.make_requests(
req, self.app.account_ring, account_partition, 'POST',
req.swift_entity_path, [headers] * len(accounts))
self.add_acls_from_sys_metadata(resp)
return resp
@public
def DELETE(self, req):
"""HTTP DELETE request handler."""
# Extra safety in case someone typos a query string for an
# account-level DELETE request that was really meant to be caught by
# some middleware.
if req.query_string:
return HTTPBadRequest(request=req)
if not self.app.allow_account_management:
return HTTPMethodNotAllowed(
request=req,
headers={'Allow': ', '.join(self.allowed_methods)})
account_partition, accounts = \
self.app.account_ring.get_nodes(self.account_name)
headers = self.generate_request_headers(req)
clear_info_cache(self.app, req.environ, self.account_name)
resp = self.make_requests(
req, self.app.account_ring, account_partition, 'DELETE',
req.swift_entity_path, [headers] * len(accounts))
return resp
| swift/proxy/controllers/account.py | 8,404 | WSGI controller for account requests
HTTP DELETE request handler.
Handler for HTTP GET/HEAD requests.
HTTP POST request handler.
HTTP PUT request handler.
Copyright (c) 2010-2012 OpenStack Foundation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. treat empty dict as empty header Don't cache this. We know the account doesn't exist because the name is bad; we don't need to cache that because it's really cheap to recompute. This is kind of a lie; we pretend like the account is there, but it's not. We'll create it as soon as something tries to write to it, but we don't need databases on disk to tell us that nothing's there. We set a header so that certain consumers can tell it's a fake listing. The important one is the PUT of a container to an autocreate account; the proxy checks to see if the account exists before actually performing the PUT and creates the account if necessary. If we feed it a perfect lie, it'll just try to create the container without creating the account, and that'll fail. Cache this. We just made a request to a storage node and got up-to-date information for the account. Extra safety in case someone typos a query string for an account-level DELETE request that was really meant to be caught by some middleware. | 1,722 | en | 0.940426 |
"""
ASGI config for backend project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'backend.settings')
application = get_asgi_application()
| backend/backend/asgi.py | 407 | ASGI config for backend project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/asgi/ | 213 | en | 0.716352 |
from thriftpy2.thrift import TType
class ThriftError(Exception):
""" Base Exception defined by `aiothrift` """
class ConnectionClosedError(ThriftError):
"""Raised if connection to server was closed."""
class PoolClosedError(ThriftError):
"""Raised when operating on a closed thrift connection pool"""
class ThriftAppError(ThriftError):
"""Application level thrift exceptions."""
thrift_spec = {
1: (TType.STRING, 'message', False),
2: (TType.I32, 'type', False),
}
UNKNOWN = 0
UNKNOWN_METHOD = 1
INVALID_MESSAGE_TYPE = 2
WRONG_METHOD_NAME = 3
BAD_SEQUENCE_ID = 4
MISSING_RESULT = 5
INTERNAL_ERROR = 6
PROTOCOL_ERROR = 7
def __init__(self, type=UNKNOWN, message=None):
super().__init__()
self.type = type
self.message = message
def __str__(self):
if self.message:
return self.message
if self.type == self.UNKNOWN_METHOD:
return 'Unknown method'
elif self.type == self.INVALID_MESSAGE_TYPE:
return 'Invalid message type'
elif self.type == self.WRONG_METHOD_NAME:
return 'Wrong method name'
elif self.type == self.BAD_SEQUENCE_ID:
return 'Bad sequence ID'
elif self.type == self.MISSING_RESULT:
return 'Missing result'
else:
return 'Default (unknown) TApplicationException'
| aiothrift/errors.py | 1,428 | Raised if connection to server was closed.
Raised when operating on a closed thrift connection pool
Application level thrift exceptions.
Base Exception defined by `aiothrift` | 174 | en | 0.95419 |
"""
From https://zenodo.org/record/3539363
"""
import re
def section_text(text):
"""Splits text into sections.
Assumes text is in a radiology report format, e.g.:
COMPARISON: Chest radiograph dated XYZ.
IMPRESSION: ABC...
Given text like this, it will output text from each section,
where the section type is determined by the all caps header.
Returns a three element tuple:
sections - list containing the text of each section
section_names - a normalized version of the section name
section_idx - list of start indices of the text in the section
"""
p_section = re.compile(
r'\n ([A-Z ()/,-]+):\s', re.DOTALL)
sections = list()
section_names = list()
section_idx = list()
idx = 0
s = p_section.search(text, idx)
if s:
sections.append(text[0:s.start(1)])
section_names.append('preamble')
section_idx.append(0)
while s:
current_section = s.group(1).lower()
# get the start of the text for this section
idx_start = s.end()
# skip past the first newline to avoid some bad parses
idx_skip = text[idx_start:].find('\n')
if idx_skip == -1:
idx_skip = 0
s = p_section.search(text, idx_start + idx_skip)
if s is None:
idx_end = len(text)
else:
idx_end = s.start()
sections.append(text[idx_start:idx_end])
section_names.append(current_section)
section_idx.append(idx_start)
else:
sections.append(text)
section_names.append('full report')
section_idx.append(0)
section_names = normalize_section_names(section_names)
# remove empty sections
# this handles when the report starts with a finding-like statement
# .. but this statement is not a section, more like a report title
# e.g. p10/p10103318/s57408307
# CHEST, PA LATERAL:
#
# INDICATION: This is the actual section ....
# it also helps when there are multiple findings sections
# usually one is empty
for i in reversed(range(len(section_names))):
if section_names[i] in ('impression', 'findings'):
if sections[i].strip() == '':
sections.pop(i)
section_names.pop(i)
section_idx.pop(i)
if ('impression' not in section_names) & ('findings' not in section_names):
# create a new section for the final paragraph
if '\n \n' in sections[-1]:
sections.append('\n \n'.join(sections[-1].split('\n \n')[1:]))
sections[-2] = sections[-2].split('\n \n')[0]
section_names.append('last_paragraph')
section_idx.append(section_idx[-1] + len(sections[-2]))
return sections, section_names, section_idx
def normalize_section_names(section_names):
# first, lower case all
section_names = [s.lower().strip() for s in section_names]
frequent_sections = {
"preamble": "preamble", # 227885
"impression": "impression", # 187759
"comparison": "comparison", # 154647
"indication": "indication", # 153730
"findings": "findings", # 149842
"examination": "examination", # 94094
"technique": "technique", # 81402
"history": "history", # 45624
"comparisons": "comparison", # 8686
"clinical history": "history", # 7121
"reason for examination": "indication", # 5845
"notification": "notification", # 5749
"reason for exam": "indication", # 4430
"clinical information": "history", # 4024
"exam": "examination", # 3907
"clinical indication": "indication", # 1945
"conclusion": "impression", # 1802
"chest, two views": "findings", # 1735
"recommendation(s)": "recommendations", # 1700
"type of examination": "examination", # 1678
"reference exam": "comparison", # 347
"patient history": "history", # 251
"addendum": "addendum", # 183
"comparison exam": "comparison", # 163
"date": "date", # 108
"comment": "comment", # 88
"findings and impression": "impression", # 87
"wet read": "wet read", # 83
"comparison film": "comparison", # 79
"recommendations": "recommendations", # 72
"findings/impression": "impression", # 47
"pfi": "history",
'recommendation': 'recommendations',
'wetread': 'wet read',
'ndication': 'impression', # 1
'impresson': 'impression', # 2
'imprression': 'impression', # 1
'imoression': 'impression', # 1
'impressoin': 'impression', # 1
'imprssion': 'impression', # 1
'impresion': 'impression', # 1
'imperssion': 'impression', # 1
'mpression': 'impression', # 1
'impession': 'impression', # 3
'findings/ impression': 'impression', # ,1
'finding': 'findings', # ,8
'findins': 'findings',
'findindgs': 'findings', # ,1
'findgings': 'findings', # ,1
'findngs': 'findings', # ,1
'findnings': 'findings', # ,1
'finidngs': 'findings', # ,2
'idication': 'indication', # ,1
'reference findings': 'findings', # ,1
'comparision': 'comparison', # ,2
'comparsion': 'comparison', # ,1
'comparrison': 'comparison', # ,1
'comparisions': 'comparison' # ,1
}
p_findings = [
'chest',
'portable',
'pa and lateral',
'lateral and pa',
'ap and lateral',
'lateral and ap',
'frontal and',
'two views',
'frontal view',
'pa view',
'ap view',
'one view',
'lateral view',
'bone window',
'frontal upright',
'frontal semi-upright',
'ribs',
'pa and lat'
]
p_findings = re.compile('({})'.format('|'.join(p_findings)))
main_sections = [
'impression', 'findings', 'history', 'comparison',
'addendum'
]
for i, s in enumerate(section_names):
if s in frequent_sections:
section_names[i] = frequent_sections[s]
continue
main_flag = False
for m in main_sections:
if m in s:
section_names[i] = m
main_flag = True
break
if main_flag:
continue
m = p_findings.search(s)
if m is not None:
section_names[i] = 'findings'
# if it looks like it is describing the entire study
# it's equivalent to findings
# group similar phrasings for impression
return section_names
def custom_mimic_cxr_rules():
custom_section_names = {
's50913680': 'recommendations', # files/p11/p11851243/s50913680.txt
's59363654': 'examination', # files/p12/p12128253/s59363654.txt
's59279892': 'technique', # files/p13/p13150370/s59279892.txt
's59768032': 'recommendations', # files/p13/p13249077/s59768032.txt
's57936451': 'indication', # files/p14/p14325424/s57936451.txt
's50058765': 'indication', # files/p14/p14731346/s50058765.txt
's53356173': 'examination', # files/p15/p15898350/s53356173.txt
's53202765': 'technique', # files/p16/p16076182/s53202765.txt
's50808053': 'technique', # files/p16/p16631485/s50808053.txt
's51966317': 'indication', # files/p10/p10817099/s51966317.txt
's50743547': 'examination', # files/p11/p11388341/s50743547.txt
's56451190': 'note', # files/p11/p11842879/s56451190.txt
's59067458': 'recommendations', # files/p11/p11984647/s59067458.txt
's59215320': 'examination', # files/p12/p12408912/s59215320.txt
's55124749': 'indication', # files/p12/p12428492/s55124749.txt
's54365831': 'indication', # files/p13/p13876470/s54365831.txt
's59087630': 'recommendations', # files/p14/p14267880/s59087630.txt
's58157373': 'recommendations', # files/p15/p15032392/s58157373.txt
's56482935': 'recommendations', # files/p15/p15388421/s56482935.txt
's58375018': 'recommendations', # files/p15/p15505556/s58375018.txt
's54654948': 'indication', # files/p17/p17090359/s54654948.txt
's55157853': 'examination', # files/p18/p18975498/s55157853.txt
's51491012': 'history', # files/p19/p19314266/s51491012.txt
}
custom_indices = {
's50525523': [201, 349], # files/p10/p10602608/s50525523.txt
's57564132': [233, 554], # files/p10/p10637168/s57564132.txt
's59982525': [313, 717], # files/p11/p11989982/s59982525.txt
's53488209': [149, 475], # files/p12/p12458657/s53488209.txt
's54875119': [234, 988], # files/p13/p13687044/s54875119.txt
's50196495': [59, 399], # files/p13/p13894879/s50196495.txt
's56579911': [59, 218], # files/p15/p15394326/s56579911.txt
's52648681': [292, 631], # files/p15/p15666238/s52648681.txt
's59889364': [172, 453], # files/p15/p15835529/s59889364.txt
's53514462': [73, 377], # files/p16/p16297706/s53514462.txt
's59505494': [59, 450], # files/p16/p16730991/s59505494.txt
's53182247': [59, 412], # files/p16/p16770442/s53182247.txt
's51410602': [47, 320], # files/p17/p17069955/s51410602.txt
's56412866': [522, 822], # files/p17/p17612000/s56412866.txt
's54986978': [59, 306], # files/p17/p17912487/s54986978.txt
's59003148': [262, 505], # files/p17/p17916384/s59003148.txt
's57150433': [61, 394], # files/p18/p18335791/s57150433.txt
's56760320': [219, 457], # files/p18/p18418794/s56760320.txt
's59562049': [158, 348], # files/p18/p18502016/s59562049.txt
's52674888': [145, 296], # files/p19/p19381919/s52674888.txt
's55258338': [192, 568], # files/p13/p13719117/s55258338.txt
's59330497': [140, 655], # files/p15/p15479218/s59330497.txt
's52119491': [179, 454], # files/p17/p17959278/s52119491.txt
# below have no findings at all in the entire report
's58235663': [0, 0], # files/p11/p11573679/s58235663.txt
's50798377': [0, 0], # files/p12/p12632853/s50798377.txt
's54168089': [0, 0], # files/p14/p14463099/s54168089.txt
's53071062': [0, 0], # files/p15/p15774521/s53071062.txt
's56724958': [0, 0], # files/p16/p16175671/s56724958.txt
's54231141': [0, 0], # files/p16/p16312859/s54231141.txt
's53607029': [0, 0], # files/p17/p17603668/s53607029.txt
's52035334': [0, 0], # files/p19/p19349312/s52035334.txt
}
return custom_section_names, custom_indices
| src/data/datasets/mimic_cxr/section_parser.py | 10,850 | Splits text into sections.
Assumes text is in a radiology report format, e.g.:
COMPARISON: Chest radiograph dated XYZ.
IMPRESSION: ABC...
Given text like this, it will output text from each section,
where the section type is determined by the all caps header.
Returns a three element tuple:
sections - list containing the text of each section
section_names - a normalized version of the section name
section_idx - list of start indices of the text in the section
From https://zenodo.org/record/3539363
get the start of the text for this section skip past the first newline to avoid some bad parses remove empty sections this handles when the report starts with a finding-like statement .. but this statement is not a section, more like a report title e.g. p10/p10103318/s57408307 CHEST, PA LATERAL: INDICATION: This is the actual section .... it also helps when there are multiple findings sections usually one is empty create a new section for the final paragraph first, lower case all 227885 187759 154647 153730 149842 94094 81402 45624 8686 7121 5845 5749 4430 4024 3907 1945 1802 1735 1700 1678 347 251 183 163 108 88 87 83 79 72 47 1 2 1 1 1 1 1 1 1 3 ,1 ,8 ,1 ,1 ,1 ,1 ,2 ,1 ,1 ,2 ,1 ,1 ,1 if it looks like it is describing the entire study it's equivalent to findings group similar phrasings for impression files/p11/p11851243/s50913680.txt files/p12/p12128253/s59363654.txt files/p13/p13150370/s59279892.txt files/p13/p13249077/s59768032.txt files/p14/p14325424/s57936451.txt files/p14/p14731346/s50058765.txt files/p15/p15898350/s53356173.txt files/p16/p16076182/s53202765.txt files/p16/p16631485/s50808053.txt files/p10/p10817099/s51966317.txt files/p11/p11388341/s50743547.txt files/p11/p11842879/s56451190.txt files/p11/p11984647/s59067458.txt files/p12/p12408912/s59215320.txt files/p12/p12428492/s55124749.txt files/p13/p13876470/s54365831.txt files/p14/p14267880/s59087630.txt files/p15/p15032392/s58157373.txt files/p15/p15388421/s56482935.txt files/p15/p15505556/s58375018.txt files/p17/p17090359/s54654948.txt files/p18/p18975498/s55157853.txt files/p19/p19314266/s51491012.txt files/p10/p10602608/s50525523.txt files/p10/p10637168/s57564132.txt files/p11/p11989982/s59982525.txt files/p12/p12458657/s53488209.txt files/p13/p13687044/s54875119.txt files/p13/p13894879/s50196495.txt files/p15/p15394326/s56579911.txt files/p15/p15666238/s52648681.txt files/p15/p15835529/s59889364.txt files/p16/p16297706/s53514462.txt files/p16/p16730991/s59505494.txt files/p16/p16770442/s53182247.txt files/p17/p17069955/s51410602.txt files/p17/p17612000/s56412866.txt files/p17/p17912487/s54986978.txt files/p17/p17916384/s59003148.txt files/p18/p18335791/s57150433.txt files/p18/p18418794/s56760320.txt files/p18/p18502016/s59562049.txt files/p19/p19381919/s52674888.txt files/p13/p13719117/s55258338.txt files/p15/p15479218/s59330497.txt files/p17/p17959278/s52119491.txt below have no findings at all in the entire report files/p11/p11573679/s58235663.txt files/p12/p12632853/s50798377.txt files/p14/p14463099/s54168089.txt files/p15/p15774521/s53071062.txt files/p16/p16175671/s56724958.txt files/p16/p16312859/s54231141.txt files/p17/p17603668/s53607029.txt files/p19/p19349312/s52035334.txt | 3,241 | en | 0.580666 |
# SVG Path specification parser
import re
from . import path
COMMANDS = set('MmZzLlHhVvCcSsQqTtAa')
UPPERCASE = set('MZLHVCSQTA')
COMMAND_RE = re.compile("([MmZzLlHhVvCcSsQqTtAa])")
FLOAT_RE = re.compile("[-+]?[0-9]*\.?[0-9]+(?:[eE][-+]?[0-9]+)?")
def _tokenize_path(pathdef):
for x in COMMAND_RE.split(pathdef):
if x in COMMANDS:
yield x
for token in FLOAT_RE.findall(x):
yield token
def parse_path(pathdef, current_pos=0j):
# In the SVG specs, initial movetos are absolute, even if
# specified as 'm'. This is the default behavior here as well.
# But if you pass in a current_pos variable, the initial moveto
# will be relative to that current_pos. This is useful.
elements = list(_tokenize_path(pathdef))
# Reverse for easy use of .pop()
elements.reverse()
segments = path.Path()
start_pos = None
command = None
while elements:
if elements[-1] in COMMANDS:
# New command.
last_command = command # Used by S and T
command = elements.pop()
absolute = command in UPPERCASE
command = command.upper()
else:
# If this element starts with numbers, it is an implicit command
# and we don't change the command. Check that it's allowed:
if command is None:
raise ValueError("Unallowed implicit command in %s, position %s" % (
pathdef, len(pathdef.split()) - len(elements)))
if command == 'M':
# Moveto command.
x = elements.pop()
y = elements.pop()
pos = float(x) + float(y) * 1j
if absolute:
current_pos = pos
else:
current_pos += pos
# when M is called, reset start_pos
# This behavior of Z is defined in svg spec:
# http://www.w3.org/TR/SVG/paths.html#PathDataClosePathCommand
start_pos = current_pos
# Implicit moveto commands are treated as lineto commands.
# So we set command to lineto here, in case there are
# further implicit commands after this moveto.
command = 'L'
elif command == 'Z':
# Close path
segments.append(path.Line(current_pos, start_pos))
segments.closed = True
current_pos = start_pos
start_pos = None
command = None # You can't have implicit commands after closing.
elif command == 'L':
x = elements.pop()
y = elements.pop()
pos = float(x) + float(y) * 1j
if not absolute:
pos += current_pos
segments.append(path.Line(current_pos, pos))
current_pos = pos
elif command == 'H':
x = elements.pop()
pos = float(x) + current_pos.imag * 1j
if not absolute:
pos += current_pos.real
segments.append(path.Line(current_pos, pos))
current_pos = pos
elif command == 'V':
y = elements.pop()
pos = current_pos.real + float(y) * 1j
if not absolute:
pos += current_pos.imag * 1j
segments.append(path.Line(current_pos, pos))
current_pos = pos
elif command == 'C':
try:
control1 = float(elements.pop()) + float(elements.pop()) * 1j
control2 = float(elements.pop()) + float(elements.pop()) * 1j
end = float(elements.pop()) + float(elements.pop()) * 1j
except ValueError:
print elements
if not absolute:
control1 += current_pos
control2 += current_pos
end += current_pos
segments.append(path.CubicBezier(current_pos, control1, control2, end))
current_pos = end
elif command == 'S':
# Smooth curve. First control point is the "reflection" of
# the second control point in the previous path.
if last_command not in 'CS':
# If there is no previous command or if the previous command
# was not an C, c, S or s, assume the first control point is
# coincident with the current point.
control1 = current_pos
else:
# The first control point is assumed to be the reflection of
# the second control point on the previous command relative
# to the current point.
control1 = current_pos + current_pos - segments[-1].control2
control2 = float(elements.pop()) + float(elements.pop()) * 1j
end = float(elements.pop()) + float(elements.pop()) * 1j
if not absolute:
control2 += current_pos
end += current_pos
segments.append(path.CubicBezier(current_pos, control1, control2, end))
current_pos = end
elif command == 'Q':
control = float(elements.pop()) + float(elements.pop()) * 1j
end = float(elements.pop()) + float(elements.pop()) * 1j
if not absolute:
control += current_pos
end += current_pos
segments.append(path.QuadraticBezier(current_pos, control, end))
current_pos = end
elif command == 'T':
# Smooth curve. Control point is the "reflection" of
# the second control point in the previous path.
if last_command not in 'QT':
# If there is no previous command or if the previous command
# was not an Q, q, T or t, assume the first control point is
# coincident with the current point.
control = current_pos
else:
# The control point is assumed to be the reflection of
# the control point on the previous command relative
# to the current point.
control = current_pos + current_pos - segments[-1].control
end = float(elements.pop()) + float(elements.pop()) * 1j
if not absolute:
end += current_pos
segments.append(path.QuadraticBezier(current_pos, control, end))
current_pos = end
elif command == 'A':
radius = float(elements.pop()) + float(elements.pop()) * 1j
rotation = float(elements.pop())
arc = float(elements.pop())
sweep = float(elements.pop())
end = float(elements.pop()) + float(elements.pop()) * 1j
if not absolute:
end += current_pos
segments.append(path.Arc(current_pos, radius, rotation, arc, sweep, end))
current_pos = end
return segments
| svg/path/parser.py | 6,912 | SVG Path specification parser In the SVG specs, initial movetos are absolute, even if specified as 'm'. This is the default behavior here as well. But if you pass in a current_pos variable, the initial moveto will be relative to that current_pos. This is useful. Reverse for easy use of .pop() New command. Used by S and T If this element starts with numbers, it is an implicit command and we don't change the command. Check that it's allowed: Moveto command. when M is called, reset start_pos This behavior of Z is defined in svg spec: http://www.w3.org/TR/SVG/paths.htmlPathDataClosePathCommand Implicit moveto commands are treated as lineto commands. So we set command to lineto here, in case there are further implicit commands after this moveto. Close path You can't have implicit commands after closing. Smooth curve. First control point is the "reflection" of the second control point in the previous path. If there is no previous command or if the previous command was not an C, c, S or s, assume the first control point is coincident with the current point. The first control point is assumed to be the reflection of the second control point on the previous command relative to the current point. Smooth curve. Control point is the "reflection" of the second control point in the previous path. If there is no previous command or if the previous command was not an Q, q, T or t, assume the first control point is coincident with the current point. The control point is assumed to be the reflection of the control point on the previous command relative to the current point. | 1,582 | en | 0.907254 |
# -*- coding: utf-8 -*-
from tests.integration import TestsBase
from chsdi.models.bod import Catalog
from sqlalchemy.orm import scoped_session, sessionmaker
from chsdi.views.catalog import create_digraph
from chsdi.lib.filters import filter_by_geodata_staging
class TestCatalogService(TestsBase):
def test_nodes_connection(self):
try:
geodata_staging = self.testapp.app.registry.settings['geodata_staging']
session = scoped_session(sessionmaker())
topics = self.testapp.get('/rest/services', status=200)
for t in topics.json['topics']:
topic = t.get('id')
query = session.query(Catalog).filter(Catalog.topic == topic)\
.order_by(Catalog.orderKey)
query = filter_by_geodata_staging(query, Catalog.staging, geodata_staging)
rows = query.all()
if (rows):
graph, meta, root_id = create_digraph(rows, 'fr')
nodes = graph.nodes()
if len(nodes) != len(rows):
for row in rows:
if row.id not in nodes:
raise Exception('%s %s %s is unconnected leaf' % (topic, row.category, row.layerBodId))
finally:
if session:
session.close()
def test_catalog_no_params(self):
resp = self.testapp.get('/rest/services/blw/CatalogServer', status=200)
self.assertTrue(resp.content_type == 'application/json')
self.assertTrue('root' in resp.json['results'])
self.assertTrue('children' in resp.json['results']['root'])
self.assertTrue('selectedOpen' in resp.json['results']['root']['children'][0])
self.assertTrue('category' in resp.json['results']['root'])
def test_catalog_with_callback(self):
resp = self.testapp.get('/rest/services/blw/CatalogServer', params={'callback': 'cb_'}, status=200)
self.assertEqual(resp.content_type, 'application/javascript')
def test_catalog_existing_map_no_catalog(self):
self.testapp.get('/rest/services/all/CatalogServer', status=404)
def test_catalog_wrong_map(self):
self.testapp.get('/rest/services/foo/CatalogServer', status=400)
def test_catalog_ordering(self):
resp = self.testapp.get('/rest/services/inspire/CatalogServer', params={'lang': 'en'}, status=200)
self.assertEqual(resp.content_type, 'application/json')
self.assertTrue('AGNES' in resp.json['results']['root']['children'][0]['children'][0]['children'][0]['label'])
self.assertTrue('Geoid in CH1903' in resp.json['results']['root']['children'][0]['children'][0]['children'][1]['label'])
def test_catalog_languages(self):
for lang in ('de', 'fr', 'it', 'rm', 'en'):
link = '/rest/services/ech/CatalogServer?lang=' + lang
resp = self.testapp.get(link)
self.assertEqual(resp.status_int, 200, link)
def test_layersconfig_with_callback(self):
resp = self.testapp.get('/rest/services/blw/MapServer/layersConfig', params={'callback': 'cb_'}, status=200)
self.assertEqual(resp.content_type, 'application/javascript')
def test_all_catalogs(self):
def existInList(node, l):
found = False
for entry in l:
if entry.id == node.get('id'):
found = True
break
if not found:
print node.get('id')
return False
if 'children' in node:
for child in node.get('children'):
if not existInList(child, l):
return False
return True
from chsdi.models.bod import Catalog
from sqlalchemy.orm import scoped_session, sessionmaker
DBSession = scoped_session(sessionmaker())
old_staging = self.testapp.app.registry.settings['geodata_staging']
# We fix staging for next calls to prod
self.testapp.app.registry.settings['geodata_staging'] = u'prod'
try:
topics = self.testapp.get('/rest/services', status=200)
for t in topics.json['topics']:
topic = t.get('id')
# Get catalog
catalog = self.testapp.get('/rest/services/' + topic + '/CatalogServer', status=200)
# Get flat catalog table entries
query = DBSession.query(Catalog).filter(Catalog.topic == topic).filter(Catalog.staging == u'prod')
entries = query.all()
# Check if every node in the catalog is in view_catalog of db
self.assertTrue(existInList(catalog.json['results']['root'], entries))
finally:
# reset staging to previous setting
self.testapp.app.registry.settings['geodata_staging'] = old_staging
DBSession.close()
def test_catalogs_with_layersconfig(self):
def existInList(node, l):
if node.get('category') != 'layer':
return True
found = False
for entry in l:
if entry == node.get('layerBodId'):
found = True
break
if not found:
print node.get('layerBodId')
return False
if 'children' in node:
for child in node.get('children'):
if not existInList(child, l):
return False
return True
from sqlalchemy.orm import scoped_session, sessionmaker
DBSession = scoped_session(sessionmaker())
old_staging = self.testapp.app.registry.settings['geodata_staging']
# We fix staging for next calls to prod
self.testapp.app.registry.settings['geodata_staging'] = u'prod'
try:
topics = self.testapp.get('/rest/services', status=200)
for t in topics.json['topics']:
topic = t.get('id')
# Get catalog
catalog = self.testapp.get('/rest/services/' + topic + '/CatalogServer', status=200)
# Get LayersConfig for this topic
layersconf = self.testapp.get('/rest/services/' + topic + '/MapServer/layersConfig', status=200)
# Check if all layers of catalog are in LayersConfig
self.assertTrue(existInList(catalog.json['results']['root'], layersconf.json), 'For Topic: ' + topic)
finally:
# reset staging to previous setting
self.testapp.app.registry.settings['geodata_staging'] = old_staging
DBSession.close()
| tests/integration/test_catalog.py | 6,699 | -*- coding: utf-8 -*- We fix staging for next calls to prod Get catalog Get flat catalog table entries Check if every node in the catalog is in view_catalog of db reset staging to previous setting We fix staging for next calls to prod Get catalog Get LayersConfig for this topic Check if all layers of catalog are in LayersConfig reset staging to previous setting | 363 | en | 0.681486 |
# Licensed to Elasticsearch B.V. under one or more contributor
# license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright
# ownership. Elasticsearch B.V. licenses this file to you under
# the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import typing as t
from elastic_transport import ObjectApiResponse
from ._base import NamespacedClient
from .utils import _rewrite_parameters
class TextStructureClient(NamespacedClient):
@_rewrite_parameters(
body_name="text_files",
)
def find_structure(
self,
*,
text_files: t.Union[t.List[t.Any], t.Tuple[t.Any, ...]],
charset: t.Optional[str] = None,
column_names: t.Optional[str] = None,
delimiter: t.Optional[str] = None,
explain: t.Optional[bool] = None,
format: t.Optional[str] = None,
grok_pattern: t.Optional[str] = None,
has_header_row: t.Optional[bool] = None,
line_merge_size_limit: t.Optional[int] = None,
lines_to_sample: t.Optional[int] = None,
quote: t.Optional[str] = None,
should_trim_fields: t.Optional[bool] = None,
timeout: t.Optional[t.Union[int, str]] = None,
timestamp_field: t.Optional[str] = None,
timestamp_format: t.Optional[str] = None,
) -> ObjectApiResponse[t.Any]:
"""
Finds the structure of a text file. The text file must contain data that is suitable
to be ingested into Elasticsearch.
`<https://www.elastic.co/guide/en/elasticsearch/reference/current/find-structure.html>`_
:param text_files:
:param charset: The text’s character set. It must be a character set that is
supported by the JVM that Elasticsearch uses. For example, UTF-8, UTF-16LE,
windows-1252, or EUC-JP. If this parameter is not specified, the structure
finder chooses an appropriate character set.
:param column_names: If you have set format to delimited, you can specify the
column names in a comma-separated list. If this parameter is not specified,
the structure finder uses the column names from the header row of the text.
If the text does not have a header role, columns are named "column1", "column2",
"column3", etc.
:param delimiter: If you have set format to delimited, you can specify the character
used to delimit the values in each row. Only a single character is supported;
the delimiter cannot have multiple characters. By default, the API considers
the following possibilities: comma, tab, semi-colon, and pipe (|). In this
default scenario, all rows must have the same number of fields for the delimited
format to be detected. If you specify a delimiter, up to 10% of the rows
can have a different number of columns than the first row.
:param explain: If this parameter is set to true, the response includes a field
named explanation, which is an array of strings that indicate how the structure
finder produced its result.
:param format: The high level structure of the text. Valid values are ndjson,
xml, delimited, and semi_structured_text. By default, the API chooses the
format. In this default scenario, all rows must have the same number of fields
for a delimited format to be detected. If the format is set to delimited
and the delimiter is not set, however, the API tolerates up to 5% of rows
that have a different number of columns than the first row.
:param grok_pattern: If you have set format to semi_structured_text, you can
specify a Grok pattern that is used to extract fields from every message
in the text. The name of the timestamp field in the Grok pattern must match
what is specified in the timestamp_field parameter. If that parameter is
not specified, the name of the timestamp field in the Grok pattern must match
"timestamp". If grok_pattern is not specified, the structure finder creates
a Grok pattern.
:param has_header_row: If you have set format to delimited, you can use this
parameter to indicate whether the column names are in the first row of the
text. If this parameter is not specified, the structure finder guesses based
on the similarity of the first row of the text to other rows.
:param line_merge_size_limit: The maximum number of characters in a message when
lines are merged to form messages while analyzing semi-structured text. If
you have extremely long messages you may need to increase this, but be aware
that this may lead to very long processing times if the way to group lines
into messages is misdetected.
:param lines_to_sample: The number of lines to include in the structural analysis,
starting from the beginning of the text. The minimum is 2; If the value of
this parameter is greater than the number of lines in the text, the analysis
proceeds (as long as there are at least two lines in the text) for all of
the lines.
:param quote: If you have set format to delimited, you can specify the character
used to quote the values in each row if they contain newlines or the delimiter
character. Only a single character is supported. If this parameter is not
specified, the default value is a double quote ("). If your delimited text
format does not use quoting, a workaround is to set this argument to a character
that does not appear anywhere in the sample.
:param should_trim_fields: If you have set format to delimited, you can specify
whether values between delimiters should have whitespace trimmed from them.
If this parameter is not specified and the delimiter is pipe (|), the default
value is true. Otherwise, the default value is false.
:param timeout: Sets the maximum amount of time that the structure analysis make
take. If the analysis is still running when the timeout expires then it will
be aborted.
:param timestamp_field: Optional parameter to specify the timestamp field in
the file
:param timestamp_format: The Java time format of the timestamp field in the text.
"""
if text_files is None:
raise ValueError("Empty value passed for parameter 'text_files'")
__path = "/_text_structure/find_structure"
__query: t.Dict[str, t.Any] = {}
if charset is not None:
__query["charset"] = charset
if column_names is not None:
__query["column_names"] = column_names
if delimiter is not None:
__query["delimiter"] = delimiter
if explain is not None:
__query["explain"] = explain
if format is not None:
__query["format"] = format
if grok_pattern is not None:
__query["grok_pattern"] = grok_pattern
if has_header_row is not None:
__query["has_header_row"] = has_header_row
if line_merge_size_limit is not None:
__query["line_merge_size_limit"] = line_merge_size_limit
if lines_to_sample is not None:
__query["lines_to_sample"] = lines_to_sample
if quote is not None:
__query["quote"] = quote
if should_trim_fields is not None:
__query["should_trim_fields"] = should_trim_fields
if timeout is not None:
__query["timeout"] = timeout
if timestamp_field is not None:
__query["timestamp_field"] = timestamp_field
if timestamp_format is not None:
__query["timestamp_format"] = timestamp_format
__body = text_files
__headers = {
"accept": "application/json",
"content-type": "application/x-ndjson",
}
return self.perform_request( # type: ignore[return-value]
"POST", __path, params=__query, headers=__headers, body=__body
)
| elasticsearch/_sync/client/text_structure.py | 8,785 | Finds the structure of a text file. The text file must contain data that is suitable
to be ingested into Elasticsearch.
`<https://www.elastic.co/guide/en/elasticsearch/reference/current/find-structure.html>`_
:param text_files:
:param charset: The text’s character set. It must be a character set that is
supported by the JVM that Elasticsearch uses. For example, UTF-8, UTF-16LE,
windows-1252, or EUC-JP. If this parameter is not specified, the structure
finder chooses an appropriate character set.
:param column_names: If you have set format to delimited, you can specify the
column names in a comma-separated list. If this parameter is not specified,
the structure finder uses the column names from the header row of the text.
If the text does not have a header role, columns are named "column1", "column2",
"column3", etc.
:param delimiter: If you have set format to delimited, you can specify the character
used to delimit the values in each row. Only a single character is supported;
the delimiter cannot have multiple characters. By default, the API considers
the following possibilities: comma, tab, semi-colon, and pipe (|). In this
default scenario, all rows must have the same number of fields for the delimited
format to be detected. If you specify a delimiter, up to 10% of the rows
can have a different number of columns than the first row.
:param explain: If this parameter is set to true, the response includes a field
named explanation, which is an array of strings that indicate how the structure
finder produced its result.
:param format: The high level structure of the text. Valid values are ndjson,
xml, delimited, and semi_structured_text. By default, the API chooses the
format. In this default scenario, all rows must have the same number of fields
for a delimited format to be detected. If the format is set to delimited
and the delimiter is not set, however, the API tolerates up to 5% of rows
that have a different number of columns than the first row.
:param grok_pattern: If you have set format to semi_structured_text, you can
specify a Grok pattern that is used to extract fields from every message
in the text. The name of the timestamp field in the Grok pattern must match
what is specified in the timestamp_field parameter. If that parameter is
not specified, the name of the timestamp field in the Grok pattern must match
"timestamp". If grok_pattern is not specified, the structure finder creates
a Grok pattern.
:param has_header_row: If you have set format to delimited, you can use this
parameter to indicate whether the column names are in the first row of the
text. If this parameter is not specified, the structure finder guesses based
on the similarity of the first row of the text to other rows.
:param line_merge_size_limit: The maximum number of characters in a message when
lines are merged to form messages while analyzing semi-structured text. If
you have extremely long messages you may need to increase this, but be aware
that this may lead to very long processing times if the way to group lines
into messages is misdetected.
:param lines_to_sample: The number of lines to include in the structural analysis,
starting from the beginning of the text. The minimum is 2; If the value of
this parameter is greater than the number of lines in the text, the analysis
proceeds (as long as there are at least two lines in the text) for all of
the lines.
:param quote: If you have set format to delimited, you can specify the character
used to quote the values in each row if they contain newlines or the delimiter
character. Only a single character is supported. If this parameter is not
specified, the default value is a double quote ("). If your delimited text
format does not use quoting, a workaround is to set this argument to a character
that does not appear anywhere in the sample.
:param should_trim_fields: If you have set format to delimited, you can specify
whether values between delimiters should have whitespace trimmed from them.
If this parameter is not specified and the delimiter is pipe (|), the default
value is true. Otherwise, the default value is false.
:param timeout: Sets the maximum amount of time that the structure analysis make
take. If the analysis is still running when the timeout expires then it will
be aborted.
:param timestamp_field: Optional parameter to specify the timestamp field in
the file
:param timestamp_format: The Java time format of the timestamp field in the text.
Licensed to Elasticsearch B.V. under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. Elasticsearch B.V. licenses this file to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. type: ignore[return-value] | 5,438 | en | 0.789518 |
"""
Tests for the utils module
"""
import datetime
import operator as op
from math import ceil
from types import SimpleNamespace
import pytest
import pytz
from mitol.common.utils import (
is_near_now,
has_equal_properties,
first_or_none,
first_matching_item,
max_or_none,
partition_to_lists,
unique,
unique_ignore_case,
item_at_index_or_none,
all_equal,
all_unique,
has_all_keys,
group_into_dict,
now_in_utc,
filter_dict_by_key_set,
chunks,
get_error_response_summary,
)
from ecommerce.factories import Order, ReceiptFactory
from main.utils import (
get_field_names,
is_empty_file,
serialize_model_object,
is_blank,
partition_around_index,
format_month_day,
)
from main.test_utils import format_as_iso8601, MockResponse
def test_now_in_utc():
"""now_in_utc() should return the current time set to the UTC time zone"""
now = now_in_utc()
assert is_near_now(now)
assert now.tzinfo == pytz.UTC
def test_is_near_now():
"""
Test is_near_now for now
"""
now = datetime.datetime.now(tz=pytz.UTC)
assert is_near_now(now) is True
later = now + datetime.timedelta(0, 6)
assert is_near_now(later) is False
earlier = now - datetime.timedelta(0, 6)
assert is_near_now(earlier) is False
def test_first_or_none():
"""
Assert that first_or_none returns the first item in an iterable or None
"""
assert first_or_none([]) is None
assert first_or_none(set()) is None
assert first_or_none([1, 2, 3]) == 1
assert first_or_none(range(1, 5)) == 1
def test_first_matching_item():
"""first_matching_item should return the first item where the predicate function returns true"""
assert first_matching_item([1, 2, 3, 4, 5], lambda x: x % 2 == 0) == 2
assert first_matching_item([], lambda x: True) is None
assert first_matching_item(["x", "y", "z"], lambda x: False) is None
def test_max_or_none():
"""
Assert that max_or_none returns the max of some iterable, or None if the iterable has no items
"""
assert max_or_none(i for i in [5, 4, 3, 2, 1]) == 5
assert max_or_none([1, 3, 5, 4, 2]) == 5
assert max_or_none([]) is None
def test_unique():
"""
Assert that unique() returns a generator of unique elements from a provided iterable
"""
assert list(unique([1, 2, 2, 3, 3, 0, 3])) == [1, 2, 3, 0]
assert list(unique(("a", "b", "a", "c", "C", None))) == ["a", "b", "c", "C", None]
def test_unique_ignore_case():
"""
Assert that unique_ignore_case() returns a generator of unique lowercase strings from a
provided iterable
"""
assert list(unique_ignore_case(["ABC", "def", "AbC", "DEf"])) == ["abc", "def"]
def test_item_at_index_or_none():
"""
Assert that item_at_index_or_none returns an item at a given index, or None if that index
doesn't exist
"""
arr = [1, 2, 3]
assert item_at_index_or_none(arr, 1) == 2
assert item_at_index_or_none(arr, 10) is None
def test_all_equal():
"""
Assert that all_equal returns True if all of the provided args are equal to each other
"""
assert all_equal(1, 1, 1) is True
assert all_equal(1, 2, 1) is False
assert all_equal() is True
def test_all_unique():
"""
Assert that all_unique returns True if all of the items in the iterable argument are unique
"""
assert all_unique([1, 2, 3, 4]) is True
assert all_unique((1, 2, 3, 4)) is True
assert all_unique([1, 2, 3, 1]) is False
def test_has_all_keys():
"""
Assert that has_all_keys returns True if the given dict has all of the specified keys
"""
d = {"a": 1, "b": 2, "c": 3}
assert has_all_keys(d, ["a", "c"]) is True
assert has_all_keys(d, ["a", "z"]) is False
def test_is_blank():
"""
Assert that is_blank returns True if the given value is None or a blank string
"""
assert is_blank("") is True
assert is_blank(None) is True
assert is_blank(0) is False
assert is_blank(" ") is False
assert is_blank(False) is False
assert is_blank("value") is False
def test_group_into_dict():
"""
Assert that group_into_dict takes an iterable of items and returns a dictionary of those items
grouped by generated keys
"""
class Car: # pylint: disable=missing-docstring
def __init__(self, make, model):
self.make = make
self.model = model
cars = [
Car(make="Honda", model="Civic"),
Car(make="Honda", model="Accord"),
Car(make="Ford", model="F150"),
Car(make="Ford", model="Focus"),
Car(make="Jeep", model="Wrangler"),
]
grouped_cars = group_into_dict(cars, key_fn=op.attrgetter("make"))
assert set(grouped_cars.keys()) == {"Honda", "Ford", "Jeep"}
assert set(grouped_cars["Honda"]) == set(cars[0:2])
assert set(grouped_cars["Ford"]) == set(cars[2:4])
assert grouped_cars["Jeep"] == [cars[4]]
nums = [1, 2, 3, 4, 5, 6]
grouped_nums = group_into_dict(nums, key_fn=lambda num: (num % 2 == 0))
assert grouped_nums.keys() == {True, False}
assert set(grouped_nums[True]) == {2, 4, 6}
assert set(grouped_nums[False]) == {1, 3, 5}
def test_filter_dict_by_key_set():
"""
Test that filter_dict_by_key_set returns a dict with only the given keys
"""
d = {"a": 1, "b": 2, "c": 3, "d": 4}
assert filter_dict_by_key_set(d, {"a", "c"}) == {"a": 1, "c": 3}
assert filter_dict_by_key_set(d, {"a", "c", "nonsense"}) == {"a": 1, "c": 3}
assert filter_dict_by_key_set(d, {"nonsense"}) == {}
def test_partition_to_lists():
"""
Assert that partition_to_lists splits an iterable into two lists according to a condition
"""
nums = [1, 2, 1, 3, 1, 4, 0, None, None]
not_ones, ones = partition_to_lists(nums, lambda n: n == 1)
assert not_ones == [2, 3, 4, 0, None, None]
assert ones == [1, 1, 1]
# The default predicate is the standard Python bool() function
falsey, truthy = partition_to_lists(nums)
assert falsey == [0, None, None]
assert truthy == [1, 2, 1, 3, 1, 4]
def test_partition_around_index():
"""partition_around_index should split a list into two lists around an index"""
assert partition_around_index([1, 2, 3, 4], 2) == ([1, 2], [4])
assert partition_around_index([1, 2, 3, 4], 0) == ([], [2, 3, 4])
assert partition_around_index([1, 2, 3, 4], 3) == ([1, 2, 3], [])
with pytest.raises(ValueError):
partition_around_index([1, 2, 3, 4], 4)
@pytest.mark.parametrize(
"content,content_type,exp_summary_content,exp_url_in_summary",
[
['{"bad": "response"}', "application/json", '{"bad": "response"}', False],
["plain text", "text/plain", "plain text", False],
[
"<div>HTML content</div>",
"text/html; charset=utf-8",
"(HTML body ignored)",
True,
],
],
)
def test_get_error_response_summary(
content, content_type, exp_summary_content, exp_url_in_summary
):
"""
get_error_response_summary should provide a summary of an error HTTP response object with the correct bits of
information depending on the type of content.
"""
status_code = 400
url = "http://example.com"
mock_response = MockResponse(
status_code=status_code, content=content, content_type=content_type, url=url
)
summary = get_error_response_summary(mock_response)
assert f"Response - code: {status_code}" in summary
assert f"content: {exp_summary_content}" in summary
assert (f"url: {url}" in summary) is exp_url_in_summary
@pytest.mark.django_db
def test_jsonfield(settings):
"""
Test a model with a JSONField is handled correctly
"""
settings.CYBERSOURCE_SECURITY_KEY = "asdf"
receipt = ReceiptFactory.create()
assert serialize_model_object(receipt) == {
"created_on": format_as_iso8601(receipt.created_on),
"data": receipt.data,
"id": receipt.id,
"updated_on": format_as_iso8601(receipt.updated_on),
"order": receipt.order.id,
}
def test_get_field_names():
"""
Assert that get_field_names does not include related fields
"""
assert set(get_field_names(Order)) == {
"user",
"status",
"total_price_paid",
"application",
"created_on",
"updated_on",
"payment_type",
}
def test_is_empty_file():
"""is_empty_file should return True if the given object is None or has a blank name property"""
fake_file = None
assert is_empty_file(fake_file) is True
fake_file = SimpleNamespace(name="")
assert is_empty_file(fake_file) is True
fake_file = SimpleNamespace(name="path/to/file.txt")
assert is_empty_file(fake_file) is False
def test_chunks():
"""
test for chunks
"""
input_list = list(range(113))
output_list = []
for nums in chunks(input_list):
output_list += nums
assert output_list == input_list
output_list = []
for nums in chunks(input_list, chunk_size=1):
output_list += nums
assert output_list == input_list
output_list = []
for nums in chunks(input_list, chunk_size=124):
output_list += nums
assert output_list == input_list
def test_chunks_iterable():
"""
test that chunks works on non-list iterables too
"""
count = 113
input_range = range(count)
chunk_output = []
for chunk in chunks(input_range, chunk_size=10):
chunk_output.append(chunk)
assert len(chunk_output) == ceil(113 / 10)
range_list = []
for chunk in chunk_output:
range_list += chunk
assert range_list == list(range(count))
def test_format_month_day():
"""
format_month_day should format the month and day from a datetime
"""
dt = datetime.datetime(year=2020, month=1, day=1, tzinfo=pytz.UTC)
assert format_month_day(dt) == "Jan 1"
assert format_month_day(dt, month_fmt="%b") == "Jan 1"
assert format_month_day(dt, month_fmt="%B") == "January 1"
def test_has_equal_properties():
"""
Assert that has_equal_properties returns True if an object has equivalent properties to a given dict
"""
obj = SimpleNamespace(a=1, b=2, c=3)
assert has_equal_properties(obj, {}) is True
assert has_equal_properties(obj, dict(a=1, b=2)) is True
assert has_equal_properties(obj, dict(a=1, b=2, c=3)) is True
assert has_equal_properties(obj, dict(a=2)) is False
assert has_equal_properties(obj, dict(d=4)) is False
| main/utils_test.py | 10,561 | Assert that all_equal returns True if all of the provided args are equal to each other
Assert that all_unique returns True if all of the items in the iterable argument are unique
test for chunks
test that chunks works on non-list iterables too
Test that filter_dict_by_key_set returns a dict with only the given keys
first_matching_item should return the first item where the predicate function returns true
Assert that first_or_none returns the first item in an iterable or None
format_month_day should format the month and day from a datetime
get_error_response_summary should provide a summary of an error HTTP response object with the correct bits of
information depending on the type of content.
Assert that get_field_names does not include related fields
Assert that group_into_dict takes an iterable of items and returns a dictionary of those items
grouped by generated keys
Assert that has_all_keys returns True if the given dict has all of the specified keys
Assert that has_equal_properties returns True if an object has equivalent properties to a given dict
Assert that is_blank returns True if the given value is None or a blank string
is_empty_file should return True if the given object is None or has a blank name property
Test is_near_now for now
Assert that item_at_index_or_none returns an item at a given index, or None if that index
doesn't exist
Test a model with a JSONField is handled correctly
Assert that max_or_none returns the max of some iterable, or None if the iterable has no items
now_in_utc() should return the current time set to the UTC time zone
partition_around_index should split a list into two lists around an index
Assert that partition_to_lists splits an iterable into two lists according to a condition
Assert that unique() returns a generator of unique elements from a provided iterable
Assert that unique_ignore_case() returns a generator of unique lowercase strings from a
provided iterable
Tests for the utils module
pylint: disable=missing-docstring The default predicate is the standard Python bool() function | 2,060 | en | 0.659819 |
"""
Test admin tools
"""
from io import BytesIO, TextIOWrapper
import csv
import six
import zipfile
import django
from django.contrib.auth import get_user_model
from django.contrib.contenttypes.models import ContentType
from django.test import Client, TestCase
import gdpr_assist
from .gdpr_assist_tests_app.factories import (
ModelWithPrivacyMetaFactory,
FirstSearchModelFactory,
SecondSearchModelFactory,
)
from .gdpr_assist_tests_app.models import (
FirstSearchModel,
SecondSearchModel,
)
model_root_url = '/admin/gdpr_assist_tests_app/modelwithprivacymeta/'
tool_root_url = '/admin/gdpr_assist/personaldata/'
class AdminTestCase(TestCase):
def setUp(self):
self.client = Client()
User = get_user_model()
user = User.objects.create_superuser(
username='test',
email='test@example.com',
password='test',
)
if django.VERSION <= (1, 9):
# Django 1.8 support - no client.force_login
self.client.login(username='test', password='test')
else:
# Django 1.9+
self.client.force_login(user)
class TestModelAdmin(AdminTestCase):
def test_changelist__anonymise_action_present(self):
ModelWithPrivacyMetaFactory.create()
response = self.client.get(model_root_url)
self.assertContains(response, '<option value="anonymise">')
def test_anonymise_action_submit__redirect_to_anonymise_view(self):
obj_1 = ModelWithPrivacyMetaFactory.create()
obj_2 = ModelWithPrivacyMetaFactory.create()
response = self.client.post(
model_root_url,
{
'action': 'anonymise',
'_selected_action': [obj_1.pk, obj_2.pk],
},
follow=True,
)
test_url = '{root_url}anonymise/?ids={pk1},{pk2}'.format(
root_url=model_root_url,
pk1=obj_1.pk,
pk2=obj_2.pk,
)
if django.VERSION <= (1, 9):
# Django 1.8 support - redirects include host
self.assertEqual(len(response.redirect_chain), 1)
self.assertTrue(response.redirect_chain[0][0].endswith(
test_url
))
self.assertEqual(response.redirect_chain[0][1], 302)
else:
# Django 1.9+
self.assertEqual(
response.redirect_chain,
[(test_url, 302)],
)
self.assertContains(
response,
'<p>Are you sure you want to anonymise the following Model With Privacy Metas:</p>',
)
self.assertContains(
response,
'<input type="hidden" name="ids" value="{pk1},{pk2}">'.format(
pk1=obj_1.pk,
pk2=obj_2.pk,
),
)
def test_anonymise_view_submit__redirect_to_anonymise_view(self):
obj_1 = ModelWithPrivacyMetaFactory.create(anonymised=False)
obj_2 = ModelWithPrivacyMetaFactory.create(anonymised=False)
response = self.client.post(
model_root_url + 'anonymise/',
{
'ids': ','.join([str(obj_1.pk), str(obj_2.pk)]),
},
follow=True,
)
obj_1.refresh_from_db()
obj_2.refresh_from_db()
self.assertTrue(obj_1.anonymised)
self.assertTrue(obj_2.anonymised)
if django.VERSION <= (1, 9):
# Django 1.8 support - redirects include host
self.assertEqual(len(response.redirect_chain), 1)
self.assertTrue(response.redirect_chain[0][0].endswith(model_root_url))
self.assertEqual(response.redirect_chain[0][1], 302)
else:
# Django 1.9+
self.assertEqual(
response.redirect_chain,
[(model_root_url, 302)],
)
self.assertContains(
response,
'<li class="success">2 Model With Privacy Metas anonymised</li>',
)
class TestAdminTool(AdminTestCase):
def test_tool_is_available(self):
FirstSearchModelFactory.create()
response = self.client.get(tool_root_url)
self.assertContains(response, '<h1>Personal Data</h1>')
def test_search__returns_correct_results(self):
obj_1 = FirstSearchModelFactory.create(
email='one@example.com',
)
FirstSearchModelFactory.create(
email='two@example.com',
)
response = self.client.post(tool_root_url, {'term': 'one@example.com'})
self.assertContains(
response,
'<h2>Gdpr_Assist_Tests_App: First Search Model</h2>',
)
self.assertContains(
response,
'<input name="obj_pk" value="{}-{}" class="action-select" type="checkbox">'.format(
ContentType.objects.get_for_model(FirstSearchModel).pk,
obj_1.pk,
),
)
def test_anonymise__records_anonymised(self):
obj_1 = FirstSearchModelFactory.create(
email='one@example.com',
anonymised=False,
)
obj_2 = FirstSearchModelFactory.create(
email='two@example.com',
anonymised=False,
)
content_type = ContentType.objects.get_for_model(FirstSearchModel).pk
response = self.client.post(
tool_root_url,
{
'term': 'one@example.com',
'action': gdpr_assist.admin.tool.PersonalDataSearchForm.ACTION_ANONYMISE,
'obj_pk': ['{}-{}'.format(content_type, obj_1.pk)],
},
follow=True,
)
obj_1.refresh_from_db()
obj_2.refresh_from_db()
self.assertTrue(obj_1.anonymised)
self.assertFalse(obj_2.anonymised)
if django.VERSION <= (1, 9):
# Django 1.8 support - redirects include host
self.assertEqual(len(response.redirect_chain), 1)
self.assertTrue(response.redirect_chain[0][0].endswith(tool_root_url))
self.assertEqual(response.redirect_chain[0][1], 302)
else:
# Django 1.9+
self.assertEqual(
response.redirect_chain,
[(tool_root_url, 302)],
)
def test_export_no_matches__reports_error(self):
# Request an object we know doesn't exist
self.assertEqual(FirstSearchModel.objects.count(), 0)
response = self.client.post(
tool_root_url,
{
'term': 'one@example.com',
'action': gdpr_assist.admin.tool.PersonalDataSearchForm.ACTION_EXPORT,
'obj_pk': [
'{}-1'.format(
ContentType.objects.get_for_model(FirstSearchModel).pk,
),
],
},
)
self.assertEqual(response.status_code, 200)
self.assertContains(
response,
'<li class="error">No objects selected</li>',
)
def test_export_matches__records_export(self):
# Creating 4 records:
# * One matching in FirstSearchModel so we collect multiple models
# * One not matching in FirstSearchModel so we exclude ignored records
# * Two in SecondSearchModel so we collect multiple records
obj_1 = FirstSearchModelFactory.create(
chars='test1',
email='one@example.com',
anonymised=False,
)
obj_2 = FirstSearchModelFactory.create(
chars='test2',
email='two@example.com',
anonymised=False,
)
obj_3 = SecondSearchModelFactory.create(
chars='test3',
email='one@example.com',
anonymised=False,
)
obj_4 = SecondSearchModelFactory.create(
chars='test4',
email='one@example.com',
anonymised=False,
)
content_type_1 = ContentType.objects.get_for_model(FirstSearchModel).pk
content_type_2 = ContentType.objects.get_for_model(SecondSearchModel).pk
response = self.client.post(
tool_root_url,
{
'term': 'one@example.com',
'action': gdpr_assist.admin.tool.PersonalDataSearchForm.ACTION_EXPORT,
'obj_pk': [
'{}-{}'.format(content_type_1, obj_1.pk),
'{}-{}'.format(content_type_2, obj_3.pk),
'{}-{}'.format(content_type_2, obj_4.pk),
],
},
follow=True,
)
# Check they didn't get anonymised by mistake
obj_1.refresh_from_db()
obj_2.refresh_from_db()
obj_3.refresh_from_db()
obj_4.refresh_from_db()
self.assertFalse(obj_1.anonymised)
self.assertFalse(obj_2.anonymised)
self.assertFalse(obj_3.anonymised)
self.assertFalse(obj_4.anonymised)
# Download zip into memory and check it's as expected
zip_data = BytesIO()
zip_data.write(response.content)
zip_file = zipfile.ZipFile(zip_data)
self.assertEqual(
sorted(zip_file.namelist()),
[
'gdpr_assist_tests_app-FirstSearchModel.csv',
'second_search.csv',
],
)
if six.PY2:
mode = 'rU'
else:
mode = 'r'
with zip_file.open(
'gdpr_assist_tests_app-FirstSearchModel.csv',
mode,
) as f:
reader = csv.DictReader(TextIOWrapper(f))
self.assertEqual(
reader.fieldnames,
['email'],
)
rows = list(reader)
self.assertEqual(len(rows), 1)
self.assertEqual(rows[0]['email'], 'one@example.com')
with zip_file.open('second_search.csv', mode) as f:
reader = csv.DictReader(TextIOWrapper(f))
self.assertEqual(
sorted(reader.fieldnames),
['chars', 'email'],
)
rows = list(reader)
self.assertEqual(len(rows), 2)
self.assertEqual(rows[0]['chars'], 'test3')
self.assertEqual(rows[0]['email'], 'one@example.com')
self.assertEqual(rows[1]['chars'], 'test4')
self.assertEqual(rows[1]['email'], 'one@example.com')
| tests/test_admin.py | 10,416 | Test admin tools
Django 1.8 support - no client.force_login Django 1.9+ Django 1.8 support - redirects include host Django 1.9+ Django 1.8 support - redirects include host Django 1.9+ Django 1.8 support - redirects include host Django 1.9+ Request an object we know doesn't exist Creating 4 records: * One matching in FirstSearchModel so we collect multiple models * One not matching in FirstSearchModel so we exclude ignored records * Two in SecondSearchModel so we collect multiple records Check they didn't get anonymised by mistake Download zip into memory and check it's as expected | 589 | en | 0.853858 |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""A source and a sink for reading from and writing to text files."""
# pytype: skip-file
from __future__ import absolute_import
import logging
from builtins import object
from builtins import range
from functools import partial
from typing import Optional
from past.builtins import long
from apache_beam.coders import coders
from apache_beam.io import filebasedsink
from apache_beam.io import filebasedsource
from apache_beam.io import iobase
from apache_beam.io.filebasedsource import ReadAllFiles
from apache_beam.io.filesystem import CompressionTypes
from apache_beam.io.iobase import Read
from apache_beam.io.iobase import Write
from apache_beam.transforms import PTransform
from apache_beam.transforms.display import DisplayDataItem
__all__ = [
'ReadFromText',
'ReadFromTextWithFilename',
'ReadAllFromText',
'WriteToText'
]
_LOGGER = logging.getLogger(__name__)
class _TextSource(filebasedsource.FileBasedSource):
r"""A source for reading text files.
Parses a text file as newline-delimited elements. Supports newline delimiters
'\n' and '\r\n.
This implementation only supports reading text encoded using UTF-8 or
ASCII.
"""
DEFAULT_READ_BUFFER_SIZE = 8192
class ReadBuffer(object):
# A buffer that gives the buffered data and next position in the
# buffer that should be read.
def __init__(self, data, position):
self._data = data
self._position = position
@property
def data(self):
return self._data
@data.setter
def data(self, value):
assert isinstance(value, bytes)
self._data = value
@property
def position(self):
return self._position
@position.setter
def position(self, value):
assert isinstance(value, (int, long))
if value > len(self._data):
raise ValueError(
'Cannot set position to %d since it\'s larger than '
'size of data %d.' % (value, len(self._data)))
self._position = value
def reset(self):
self.data = b''
self.position = 0
def __init__(self,
file_pattern,
min_bundle_size,
compression_type,
strip_trailing_newlines,
coder, # type: coders.Coder
buffer_size=DEFAULT_READ_BUFFER_SIZE,
validate=True,
skip_header_lines=0,
header_processor_fns=(None, None)):
"""Initialize a _TextSource
Args:
header_processor_fns (tuple): a tuple of a `header_matcher` function
and a `header_processor` function. The `header_matcher` should
return `True` for all lines at the start of the file that are part
of the file header and `False` otherwise. These header lines will
not be yielded when reading records and instead passed into
`header_processor` to be handled. If `skip_header_lines` and a
`header_matcher` are both provided, the value of `skip_header_lines`
lines will be skipped and the header will be processed from
there.
Raises:
ValueError: if skip_lines is negative.
Please refer to documentation in class `ReadFromText` for the rest
of the arguments.
"""
super(_TextSource, self).__init__(
file_pattern,
min_bundle_size,
compression_type=compression_type,
validate=validate)
self._strip_trailing_newlines = strip_trailing_newlines
self._compression_type = compression_type
self._coder = coder
self._buffer_size = buffer_size
if skip_header_lines < 0:
raise ValueError(
'Cannot skip negative number of header lines: %d' % skip_header_lines)
elif skip_header_lines > 10:
_LOGGER.warning(
'Skipping %d header lines. Skipping large number of header '
'lines might significantly slow down processing.')
self._skip_header_lines = skip_header_lines
self._header_matcher, self._header_processor = header_processor_fns
def display_data(self):
parent_dd = super(_TextSource, self).display_data()
parent_dd['strip_newline'] = DisplayDataItem(
self._strip_trailing_newlines, label='Strip Trailing New Lines')
parent_dd['buffer_size'] = DisplayDataItem(
self._buffer_size, label='Buffer Size')
parent_dd['coder'] = DisplayDataItem(self._coder.__class__, label='Coder')
return parent_dd
def read_records(self, file_name, range_tracker):
start_offset = range_tracker.start_position()
read_buffer = _TextSource.ReadBuffer(b'', 0)
next_record_start_position = -1
def split_points_unclaimed(stop_position):
return (
0 if stop_position <= next_record_start_position else
iobase.RangeTracker.SPLIT_POINTS_UNKNOWN)
range_tracker.set_split_points_unclaimed_callback(split_points_unclaimed)
with self.open_file(file_name) as file_to_read:
position_after_processing_header_lines = (
self._process_header(file_to_read, read_buffer))
start_offset = max(start_offset, position_after_processing_header_lines)
if start_offset > position_after_processing_header_lines:
# Seeking to one position before the start index and ignoring the
# current line. If start_position is at beginning if the line, that line
# belongs to the current bundle, hence ignoring that is incorrect.
# Seeking to one byte before prevents that.
file_to_read.seek(start_offset - 1)
read_buffer.reset()
sep_bounds = self._find_separator_bounds(file_to_read, read_buffer)
if not sep_bounds:
# Could not find a separator after (start_offset - 1). This means that
# none of the records within the file belongs to the current source.
return
_, sep_end = sep_bounds
read_buffer.data = read_buffer.data[sep_end:]
next_record_start_position = start_offset - 1 + sep_end
else:
next_record_start_position = position_after_processing_header_lines
while range_tracker.try_claim(next_record_start_position):
record, num_bytes_to_next_record = self._read_record(file_to_read,
read_buffer)
# For compressed text files that use an unsplittable OffsetRangeTracker
# with infinity as the end position, above 'try_claim()' invocation
# would pass for an empty record at the end of file that is not
# followed by a new line character. Since such a record is at the last
# position of a file, it should not be a part of the considered range.
# We do this check to ignore such records.
if len(record) == 0 and num_bytes_to_next_record < 0: # pylint: disable=len-as-condition
break
# Record separator must be larger than zero bytes.
assert num_bytes_to_next_record != 0
if num_bytes_to_next_record > 0:
next_record_start_position += num_bytes_to_next_record
yield self._coder.decode(record)
if num_bytes_to_next_record < 0:
break
def _process_header(self, file_to_read, read_buffer):
# Returns a tuple containing the position in file after processing header
# records and a list of decoded header lines that match
# 'header_matcher'.
header_lines = []
position = self._skip_lines(
file_to_read, read_buffer,
self._skip_header_lines) if self._skip_header_lines else 0
if self._header_matcher:
while True:
record, num_bytes_to_next_record = self._read_record(file_to_read,
read_buffer)
decoded_line = self._coder.decode(record)
if not self._header_matcher(decoded_line):
# We've read past the header section at this point, so go back a line.
file_to_read.seek(position)
read_buffer.reset()
break
header_lines.append(decoded_line)
if num_bytes_to_next_record < 0:
break
position += num_bytes_to_next_record
if self._header_processor:
self._header_processor(header_lines)
return position
def _find_separator_bounds(self, file_to_read, read_buffer):
# Determines the start and end positions within 'read_buffer.data' of the
# next separator starting from position 'read_buffer.position'.
# Currently supports following separators.
# * '\n'
# * '\r\n'
# This method may increase the size of buffer but it will not decrease the
# size of it.
current_pos = read_buffer.position
while True:
if current_pos >= len(read_buffer.data):
# Ensuring that there are enough bytes to determine if there is a '\n'
# at current_pos.
if not self._try_to_ensure_num_bytes_in_buffer(
file_to_read, read_buffer, current_pos + 1):
return
# Using find() here is more efficient than a linear scan of the byte
# array.
next_lf = read_buffer.data.find(b'\n', current_pos)
if next_lf >= 0:
if next_lf > 0 and read_buffer.data[next_lf - 1:next_lf] == b'\r':
# Found a '\r\n'. Accepting that as the next separator.
return (next_lf - 1, next_lf + 1)
else:
# Found a '\n'. Accepting that as the next separator.
return (next_lf, next_lf + 1)
current_pos = len(read_buffer.data)
def _try_to_ensure_num_bytes_in_buffer(
self, file_to_read, read_buffer, num_bytes):
# Tries to ensure that there are at least num_bytes bytes in the buffer.
# Returns True if this can be fulfilled, returned False if this cannot be
# fulfilled due to reaching EOF.
while len(read_buffer.data) < num_bytes:
read_data = file_to_read.read(self._buffer_size)
if not read_data:
return False
read_buffer.data += read_data
return True
def _skip_lines(self, file_to_read, read_buffer, num_lines):
"""Skip num_lines from file_to_read, return num_lines+1 start position."""
if file_to_read.tell() > 0:
file_to_read.seek(0)
position = 0
for _ in range(num_lines):
_, num_bytes_to_next_record = self._read_record(file_to_read, read_buffer)
if num_bytes_to_next_record < 0:
# We reached end of file. It is OK to just break here
# because subsequent _read_record will return same result.
break
position += num_bytes_to_next_record
return position
def _read_record(self, file_to_read, read_buffer):
# Returns a tuple containing the current_record and number of bytes to the
# next record starting from 'read_buffer.position'. If EOF is
# reached, returns a tuple containing the current record and -1.
if read_buffer.position > self._buffer_size:
# read_buffer is too large. Truncating and adjusting it.
read_buffer.data = read_buffer.data[read_buffer.position:]
read_buffer.position = 0
record_start_position_in_buffer = read_buffer.position
sep_bounds = self._find_separator_bounds(file_to_read, read_buffer)
read_buffer.position = sep_bounds[1] if sep_bounds else len(
read_buffer.data)
if not sep_bounds:
# Reached EOF. Bytes up to the EOF is the next record. Returning '-1' for
# the starting position of the next record.
return (read_buffer.data[record_start_position_in_buffer:], -1)
if self._strip_trailing_newlines:
# Current record should not contain the separator.
return (
read_buffer.data[record_start_position_in_buffer:sep_bounds[0]],
sep_bounds[1] - record_start_position_in_buffer)
else:
# Current record should contain the separator.
return (
read_buffer.data[record_start_position_in_buffer:sep_bounds[1]],
sep_bounds[1] - record_start_position_in_buffer)
class _TextSourceWithFilename(_TextSource):
def read_records(self, file_name, range_tracker):
records = super(_TextSourceWithFilename,
self).read_records(file_name, range_tracker)
for record in records:
yield (file_name, record)
class _TextSink(filebasedsink.FileBasedSink):
"""A sink to a GCS or local text file or files."""
def __init__(self,
file_path_prefix,
file_name_suffix='',
append_trailing_newlines=True,
num_shards=0,
shard_name_template=None,
coder=coders.ToStringCoder(), # type: coders.Coder
compression_type=CompressionTypes.AUTO,
header=None):
"""Initialize a _TextSink.
Args:
file_path_prefix: The file path to write to. The files written will begin
with this prefix, followed by a shard identifier (see num_shards), and
end in a common extension, if given by file_name_suffix. In most cases,
only this argument is specified and num_shards, shard_name_template, and
file_name_suffix use default values.
file_name_suffix: Suffix for the files written.
append_trailing_newlines: indicate whether this sink should write an
additional newline char after writing each element.
num_shards: The number of files (shards) used for output. If not set, the
service will decide on the optimal number of shards.
Constraining the number of shards is likely to reduce
the performance of a pipeline. Setting this value is not recommended
unless you require a specific number of output files.
shard_name_template: A template string containing placeholders for
the shard number and shard count. When constructing a filename for a
particular shard number, the upper-case letters 'S' and 'N' are
replaced with the 0-padded shard number and shard count respectively.
This argument can be '' in which case it behaves as if num_shards was
set to 1 and only one file will be generated. The default pattern used
is '-SSSSS-of-NNNNN' if None is passed as the shard_name_template.
coder: Coder used to encode each line.
compression_type: Used to handle compressed output files. Typical value
is CompressionTypes.AUTO, in which case the final file path's
extension (as determined by file_path_prefix, file_name_suffix,
num_shards and shard_name_template) will be used to detect the
compression.
header: String to write at beginning of file as a header. If not None and
append_trailing_newlines is set, '\n' will be added.
Returns:
A _TextSink object usable for writing.
"""
super(_TextSink, self).__init__(
file_path_prefix,
file_name_suffix=file_name_suffix,
num_shards=num_shards,
shard_name_template=shard_name_template,
coder=coder,
mime_type='text/plain',
compression_type=compression_type)
self._append_trailing_newlines = append_trailing_newlines
self._header = header
def open(self, temp_path):
file_handle = super(_TextSink, self).open(temp_path)
if self._header is not None:
file_handle.write(coders.ToStringCoder().encode(self._header))
if self._append_trailing_newlines:
file_handle.write(b'\n')
return file_handle
def display_data(self):
dd_parent = super(_TextSink, self).display_data()
dd_parent['append_newline'] = DisplayDataItem(
self._append_trailing_newlines, label='Append Trailing New Lines')
return dd_parent
def write_encoded_record(self, file_handle, encoded_value):
"""Writes a single encoded record."""
file_handle.write(encoded_value)
if self._append_trailing_newlines:
file_handle.write(b'\n')
def _create_text_source(
file_pattern=None,
min_bundle_size=None,
compression_type=None,
strip_trailing_newlines=None,
coder=None,
skip_header_lines=None):
return _TextSource(
file_pattern=file_pattern,
min_bundle_size=min_bundle_size,
compression_type=compression_type,
strip_trailing_newlines=strip_trailing_newlines,
coder=coder,
validate=False,
skip_header_lines=skip_header_lines)
class ReadAllFromText(PTransform):
"""A ``PTransform`` for reading a ``PCollection`` of text files.
Reads a ``PCollection`` of text files or file patterns and and produces a
``PCollection`` of strings.
Parses a text file as newline-delimited elements, by default assuming
UTF-8 encoding. Supports newline delimiters '\\n' and '\\r\\n'.
This implementation only supports reading text encoded using UTF-8 or ASCII.
This does not support other encodings such as UTF-16 or UTF-32.
"""
DEFAULT_DESIRED_BUNDLE_SIZE = 64 * 1024 * 1024 # 64MB
def __init__(
self,
min_bundle_size=0,
desired_bundle_size=DEFAULT_DESIRED_BUNDLE_SIZE,
compression_type=CompressionTypes.AUTO,
strip_trailing_newlines=True,
coder=coders.StrUtf8Coder(), # type: coders.Coder
skip_header_lines=0,
**kwargs):
"""Initialize the ``ReadAllFromText`` transform.
Args:
min_bundle_size: Minimum size of bundles that should be generated when
splitting this source into bundles. See ``FileBasedSource`` for more
details.
desired_bundle_size: Desired size of bundles that should be generated when
splitting this source into bundles. See ``FileBasedSource`` for more
details.
compression_type: Used to handle compressed input files. Typical value
is ``CompressionTypes.AUTO``, in which case the underlying file_path's
extension will be used to detect the compression.
strip_trailing_newlines: Indicates whether this source should remove
the newline char in each line it reads before decoding that line.
validate: flag to verify that the files exist during the pipeline
creation time.
skip_header_lines: Number of header lines to skip. Same number is skipped
from each source file. Must be 0 or higher. Large number of skipped
lines might impact performance.
coder: Coder used to decode each line.
"""
super(ReadAllFromText, self).__init__(**kwargs)
source_from_file = partial(
_create_text_source,
min_bundle_size=min_bundle_size,
compression_type=compression_type,
strip_trailing_newlines=strip_trailing_newlines,
coder=coder,
skip_header_lines=skip_header_lines)
self._desired_bundle_size = desired_bundle_size
self._min_bundle_size = min_bundle_size
self._compression_type = compression_type
self._read_all_files = ReadAllFiles(
True,
compression_type,
desired_bundle_size,
min_bundle_size,
source_from_file)
def expand(self, pvalue):
return pvalue | 'ReadAllFiles' >> self._read_all_files
class ReadFromText(PTransform):
r"""A :class:`~apache_beam.transforms.ptransform.PTransform` for reading text
files.
Parses a text file as newline-delimited elements, by default assuming
``UTF-8`` encoding. Supports newline delimiters ``\n`` and ``\r\n``.
This implementation only supports reading text encoded using ``UTF-8`` or
``ASCII``.
This does not support other encodings such as ``UTF-16`` or ``UTF-32``.
"""
_source_class = _TextSource
def __init__(
self,
file_pattern=None,
min_bundle_size=0,
compression_type=CompressionTypes.AUTO,
strip_trailing_newlines=True,
coder=coders.StrUtf8Coder(), # type: coders.Coder
validate=True,
skip_header_lines=0,
**kwargs):
"""Initialize the :class:`ReadFromText` transform.
Args:
file_pattern (str): The file path to read from as a local file path or a
GCS ``gs://`` path. The path can contain glob characters
(``*``, ``?``, and ``[...]`` sets).
min_bundle_size (int): Minimum size of bundles that should be generated
when splitting this source into bundles. See
:class:`~apache_beam.io.filebasedsource.FileBasedSource` for more
details.
compression_type (str): Used to handle compressed input files.
Typical value is :attr:`CompressionTypes.AUTO
<apache_beam.io.filesystem.CompressionTypes.AUTO>`, in which case the
underlying file_path's extension will be used to detect the compression.
strip_trailing_newlines (bool): Indicates whether this source should
remove the newline char in each line it reads before decoding that line.
validate (bool): flag to verify that the files exist during the pipeline
creation time.
skip_header_lines (int): Number of header lines to skip. Same number is
skipped from each source file. Must be 0 or higher. Large number of
skipped lines might impact performance.
coder (~apache_beam.coders.coders.Coder): Coder used to decode each line.
"""
super(ReadFromText, self).__init__(**kwargs)
self._source = self._source_class(
file_pattern,
min_bundle_size,
compression_type,
strip_trailing_newlines,
coder,
validate=validate,
skip_header_lines=skip_header_lines)
def expand(self, pvalue):
return pvalue.pipeline | Read(self._source)
class ReadFromTextWithFilename(ReadFromText):
r"""A :class:`~apache_beam.io.textio.ReadFromText` for reading text
files returning the name of the file and the content of the file.
This class extend ReadFromText class just setting a different
_source_class attribute.
"""
_source_class = _TextSourceWithFilename
class WriteToText(PTransform):
"""A :class:`~apache_beam.transforms.ptransform.PTransform` for writing to
text files."""
def __init__(
self,
file_path_prefix, # type: str
file_name_suffix='',
append_trailing_newlines=True,
num_shards=0,
shard_name_template=None, # type: Optional[str]
coder=coders.ToStringCoder(), # type: coders.Coder
compression_type=CompressionTypes.AUTO,
header=None):
r"""Initialize a :class:`WriteToText` transform.
Args:
file_path_prefix (str): The file path to write to. The files written will
begin with this prefix, followed by a shard identifier (see
**num_shards**), and end in a common extension, if given by
**file_name_suffix**. In most cases, only this argument is specified and
**num_shards**, **shard_name_template**, and **file_name_suffix** use
default values.
file_name_suffix (str): Suffix for the files written.
append_trailing_newlines (bool): indicate whether this sink should write
an additional newline char after writing each element.
num_shards (int): The number of files (shards) used for output.
If not set, the service will decide on the optimal number of shards.
Constraining the number of shards is likely to reduce
the performance of a pipeline. Setting this value is not recommended
unless you require a specific number of output files.
shard_name_template (str): A template string containing placeholders for
the shard number and shard count. Currently only ``''`` and
``'-SSSSS-of-NNNNN'`` are patterns accepted by the service.
When constructing a filename for a particular shard number, the
upper-case letters ``S`` and ``N`` are replaced with the ``0``-padded
shard number and shard count respectively. This argument can be ``''``
in which case it behaves as if num_shards was set to 1 and only one file
will be generated. The default pattern used is ``'-SSSSS-of-NNNNN'``.
coder (~apache_beam.coders.coders.Coder): Coder used to encode each line.
compression_type (str): Used to handle compressed output files.
Typical value is :class:`CompressionTypes.AUTO
<apache_beam.io.filesystem.CompressionTypes.AUTO>`, in which case the
final file path's extension (as determined by **file_path_prefix**,
**file_name_suffix**, **num_shards** and **shard_name_template**) will
be used to detect the compression.
header (str): String to write at beginning of file as a header.
If not :data:`None` and **append_trailing_newlines** is set, ``\n`` will
be added.
"""
self._sink = _TextSink(
file_path_prefix,
file_name_suffix,
append_trailing_newlines,
num_shards,
shard_name_template,
coder,
compression_type,
header)
def expand(self, pcoll):
return pcoll | Write(self._sink)
| sdks/python/apache_beam/io/textio.py | 25,387 | A ``PTransform`` for reading a ``PCollection`` of text files.
Reads a ``PCollection`` of text files or file patterns and and produces a
``PCollection`` of strings.
Parses a text file as newline-delimited elements, by default assuming
UTF-8 encoding. Supports newline delimiters '\n' and '\r\n'.
This implementation only supports reading text encoded using UTF-8 or ASCII.
This does not support other encodings such as UTF-16 or UTF-32.
A :class:`~apache_beam.transforms.ptransform.PTransform` for reading text
files.
Parses a text file as newline-delimited elements, by default assuming
``UTF-8`` encoding. Supports newline delimiters ``\n`` and ``\r\n``.
This implementation only supports reading text encoded using ``UTF-8`` or
``ASCII``.
This does not support other encodings such as ``UTF-16`` or ``UTF-32``.
A :class:`~apache_beam.io.textio.ReadFromText` for reading text
files returning the name of the file and the content of the file.
This class extend ReadFromText class just setting a different
_source_class attribute.
A :class:`~apache_beam.transforms.ptransform.PTransform` for writing to
text files.
A sink to a GCS or local text file or files.
A source for reading text files.
Parses a text file as newline-delimited elements. Supports newline delimiters
'\n' and '\r\n.
This implementation only supports reading text encoded using UTF-8 or
ASCII.
Initialize a _TextSource
Args:
header_processor_fns (tuple): a tuple of a `header_matcher` function
and a `header_processor` function. The `header_matcher` should
return `True` for all lines at the start of the file that are part
of the file header and `False` otherwise. These header lines will
not be yielded when reading records and instead passed into
`header_processor` to be handled. If `skip_header_lines` and a
`header_matcher` are both provided, the value of `skip_header_lines`
lines will be skipped and the header will be processed from
there.
Raises:
ValueError: if skip_lines is negative.
Please refer to documentation in class `ReadFromText` for the rest
of the arguments.
Initialize a _TextSink.
Args:
file_path_prefix: The file path to write to. The files written will begin
with this prefix, followed by a shard identifier (see num_shards), and
end in a common extension, if given by file_name_suffix. In most cases,
only this argument is specified and num_shards, shard_name_template, and
file_name_suffix use default values.
file_name_suffix: Suffix for the files written.
append_trailing_newlines: indicate whether this sink should write an
additional newline char after writing each element.
num_shards: The number of files (shards) used for output. If not set, the
service will decide on the optimal number of shards.
Constraining the number of shards is likely to reduce
the performance of a pipeline. Setting this value is not recommended
unless you require a specific number of output files.
shard_name_template: A template string containing placeholders for
the shard number and shard count. When constructing a filename for a
particular shard number, the upper-case letters 'S' and 'N' are
replaced with the 0-padded shard number and shard count respectively.
This argument can be '' in which case it behaves as if num_shards was
set to 1 and only one file will be generated. The default pattern used
is '-SSSSS-of-NNNNN' if None is passed as the shard_name_template.
coder: Coder used to encode each line.
compression_type: Used to handle compressed output files. Typical value
is CompressionTypes.AUTO, in which case the final file path's
extension (as determined by file_path_prefix, file_name_suffix,
num_shards and shard_name_template) will be used to detect the
compression.
header: String to write at beginning of file as a header. If not None and
append_trailing_newlines is set, '
' will be added.
Returns:
A _TextSink object usable for writing.
Initialize the ``ReadAllFromText`` transform.
Args:
min_bundle_size: Minimum size of bundles that should be generated when
splitting this source into bundles. See ``FileBasedSource`` for more
details.
desired_bundle_size: Desired size of bundles that should be generated when
splitting this source into bundles. See ``FileBasedSource`` for more
details.
compression_type: Used to handle compressed input files. Typical value
is ``CompressionTypes.AUTO``, in which case the underlying file_path's
extension will be used to detect the compression.
strip_trailing_newlines: Indicates whether this source should remove
the newline char in each line it reads before decoding that line.
validate: flag to verify that the files exist during the pipeline
creation time.
skip_header_lines: Number of header lines to skip. Same number is skipped
from each source file. Must be 0 or higher. Large number of skipped
lines might impact performance.
coder: Coder used to decode each line.
Initialize the :class:`ReadFromText` transform.
Args:
file_pattern (str): The file path to read from as a local file path or a
GCS ``gs://`` path. The path can contain glob characters
(``*``, ``?``, and ``[...]`` sets).
min_bundle_size (int): Minimum size of bundles that should be generated
when splitting this source into bundles. See
:class:`~apache_beam.io.filebasedsource.FileBasedSource` for more
details.
compression_type (str): Used to handle compressed input files.
Typical value is :attr:`CompressionTypes.AUTO
<apache_beam.io.filesystem.CompressionTypes.AUTO>`, in which case the
underlying file_path's extension will be used to detect the compression.
strip_trailing_newlines (bool): Indicates whether this source should
remove the newline char in each line it reads before decoding that line.
validate (bool): flag to verify that the files exist during the pipeline
creation time.
skip_header_lines (int): Number of header lines to skip. Same number is
skipped from each source file. Must be 0 or higher. Large number of
skipped lines might impact performance.
coder (~apache_beam.coders.coders.Coder): Coder used to decode each line.
Initialize a :class:`WriteToText` transform.
Args:
file_path_prefix (str): The file path to write to. The files written will
begin with this prefix, followed by a shard identifier (see
**num_shards**), and end in a common extension, if given by
**file_name_suffix**. In most cases, only this argument is specified and
**num_shards**, **shard_name_template**, and **file_name_suffix** use
default values.
file_name_suffix (str): Suffix for the files written.
append_trailing_newlines (bool): indicate whether this sink should write
an additional newline char after writing each element.
num_shards (int): The number of files (shards) used for output.
If not set, the service will decide on the optimal number of shards.
Constraining the number of shards is likely to reduce
the performance of a pipeline. Setting this value is not recommended
unless you require a specific number of output files.
shard_name_template (str): A template string containing placeholders for
the shard number and shard count. Currently only ``''`` and
``'-SSSSS-of-NNNNN'`` are patterns accepted by the service.
When constructing a filename for a particular shard number, the
upper-case letters ``S`` and ``N`` are replaced with the ``0``-padded
shard number and shard count respectively. This argument can be ``''``
in which case it behaves as if num_shards was set to 1 and only one file
will be generated. The default pattern used is ``'-SSSSS-of-NNNNN'``.
coder (~apache_beam.coders.coders.Coder): Coder used to encode each line.
compression_type (str): Used to handle compressed output files.
Typical value is :class:`CompressionTypes.AUTO
<apache_beam.io.filesystem.CompressionTypes.AUTO>`, in which case the
final file path's extension (as determined by **file_path_prefix**,
**file_name_suffix**, **num_shards** and **shard_name_template**) will
be used to detect the compression.
header (str): String to write at beginning of file as a header.
If not :data:`None` and **append_trailing_newlines** is set, ``\n`` will
be added.
Skip num_lines from file_to_read, return num_lines+1 start position.
Writes a single encoded record.
A source and a sink for reading from and writing to text files.
Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. pytype: skip-file A buffer that gives the buffered data and next position in the buffer that should be read. type: coders.Coder Seeking to one position before the start index and ignoring the current line. If start_position is at beginning if the line, that line belongs to the current bundle, hence ignoring that is incorrect. Seeking to one byte before prevents that. Could not find a separator after (start_offset - 1). This means that none of the records within the file belongs to the current source. For compressed text files that use an unsplittable OffsetRangeTracker with infinity as the end position, above 'try_claim()' invocation would pass for an empty record at the end of file that is not followed by a new line character. Since such a record is at the last position of a file, it should not be a part of the considered range. We do this check to ignore such records. pylint: disable=len-as-condition Record separator must be larger than zero bytes. Returns a tuple containing the position in file after processing header records and a list of decoded header lines that match 'header_matcher'. We've read past the header section at this point, so go back a line. Determines the start and end positions within 'read_buffer.data' of the next separator starting from position 'read_buffer.position'. Currently supports following separators. * '\n' * '\r\n' This method may increase the size of buffer but it will not decrease the size of it. Ensuring that there are enough bytes to determine if there is a '\n' at current_pos. Using find() here is more efficient than a linear scan of the byte array. Found a '\r\n'. Accepting that as the next separator. Found a '\n'. Accepting that as the next separator. Tries to ensure that there are at least num_bytes bytes in the buffer. Returns True if this can be fulfilled, returned False if this cannot be fulfilled due to reaching EOF. We reached end of file. It is OK to just break here because subsequent _read_record will return same result. Returns a tuple containing the current_record and number of bytes to the next record starting from 'read_buffer.position'. If EOF is reached, returns a tuple containing the current record and -1. read_buffer is too large. Truncating and adjusting it. Reached EOF. Bytes up to the EOF is the next record. Returning '-1' for the starting position of the next record. Current record should not contain the separator. Current record should contain the separator. type: coders.Coder 64MB type: coders.Coder type: coders.Coder type: str type: Optional[str] type: coders.Coder | 11,982 | en | 0.821316 |
import os
import click
from flask import Flask, render_template
from flask_wtf.csrf import CSRFError
from telechat.extensions import db, login_manager, csrf, moment
from telechat.blueprints.auth import auth_bp
from telechat.blueprints.chat import chat_bp
from telechat.blueprints.admin import admin_bp
from telechat.blueprints.oauth import oauth_bp
from telechat.settings import config
from telechat.models import User, Message
def register_extensions(app: Flask):
"""注册需要的扩展程序包到 Flask 程序实例 app 中"""
db.init_app(app) # 数据库 ORM
login_manager.init_app(app) # 登录状态管理
csrf.init_app(app) # CSRF 令牌管理
moment.init_app(app) # 时间格式化管理
def register_blueprints(app: Flask):
"""注册需要的蓝图程序包到 Flask 程序实例 app 中"""
app.register_blueprint(auth_bp)
app.register_blueprint(oauth_bp)
app.register_blueprint(chat_bp)
app.register_blueprint(admin_bp)
def register_errors(app: Flask):
"""注册需要的错误处理程序包到 Flask 程序实例 app 中"""
@app.errorhandler(400) # Bad Request 客户端请求的语法错误,服务器无法理解
def bad_request(e):
return render_template('error.html', description=e.description, code=e.code), 400
@app.errorhandler(404) # Not Found 服务器无法根据客户端的请求找到资源(网页)
def page_not_found(e):
return render_template('error.html', description=e.description, code=e.code), 404
@app.errorhandler(500) # Internal Server Error 服务器内部错误,无法完成请求
def internal_server_error(e):
return render_template('error.html', description="服务器内部错误,无法完成请求!", code="500"), 500
@app.errorhandler(CSRFError) # CSRF 验证失败
def csrf_error_handle(e):
return render_template('error.html', description=e.description, code=e.code), 400
def register_commands(app: Flask):
"""注册需要的CLI命令程序包到 Flask 程序实例 app 中"""
@app.cli.command()
@click.option('--drop', is_flag=True, help="创建之前销毁数据库")
def initdb(drop: bool):
"""初始化数据库结构"""
if drop:
# 确认删除
pass
pass
@app.cli.command()
@click.option('--num', default=300, help="消息数量,默认为300")
def forge(num: int):
"""生成虚拟数据"""
pass
def create_app(config_name=None):
"""程序工厂:创建 Flask 程序,加载配置,注册扩展、蓝图等程序包"""
# 从环境变量载入配置环境名称
if config_name is None:
config_name = os.getenv('FLASK_CONFIG', 'development')
# 创建 Flask 程序实例,程序名称为 telechat
app = Flask('telechat')
# 载入相应的配置
app.config.from_object(config[config_name])
# 注册程序包
register_extensions(app) # 扩展
register_blueprints(app) # 蓝图
register_errors(app) # 错误处理
register_commands(app) # CLI命令
# 返回已配置好的 Flask 程序实例
return app
| telechat/__init__.py | 3,161 | 程序工厂:创建 Flask 程序,加载配置,注册扩展、蓝图等程序包
生成虚拟数据
初始化数据库结构
注册需要的蓝图程序包到 Flask 程序实例 app 中
注册需要的CLI命令程序包到 Flask 程序实例 app 中
注册需要的错误处理程序包到 Flask 程序实例 app 中
注册需要的扩展程序包到 Flask 程序实例 app 中
数据库 ORM 登录状态管理 CSRF 令牌管理 时间格式化管理 Bad Request 客户端请求的语法错误,服务器无法理解 Not Found 服务器无法根据客户端的请求找到资源(网页) Internal Server Error 服务器内部错误,无法完成请求 CSRF 验证失败 确认删除 从环境变量载入配置环境名称 创建 Flask 程序实例,程序名称为 telechat 载入相应的配置 注册程序包 扩展 蓝图 错误处理 CLI命令 返回已配置好的 Flask 程序实例 | 413 | zh | 0.98702 |
"""Configuration format loaders"""
import locale
import os
from abc import ABC, abstractmethod
import yaml
from pydantic import create_model
def load_configuration(configuration_file_path, parameters_file_path, bundles):
"""Combines the configuration and parameters and build the configuration object"""
mappings = {}
for bundle in bundles:
if hasattr(bundle, "config_mapping"):
mappings.update(bundle.config_mapping)
loader = YmlLoader()
return loader.build_config(mappings, config_source=configuration_file_path, parameters_source=parameters_file_path)
def is_string(value):
"""Check if the value is actually a string or not"""
try:
float(value)
return False
except ValueError:
if value.lower() in ["true", "false"]:
return False
return True
class ConfigurationLoader(ABC):
"""Base configuration loader"""
@abstractmethod
def load_parameters(self, source):
"""Convert the source into a dictionary"""
@abstractmethod
def load_config(self, config_source, parameters_source):
"""Prase the config file and build a dictionary"""
def build_config(self, config_mappings, config_source, parameters_source):
"""By using the loaded parameters and loaded config, build the final configuration object"""
configuration_class = create_model('Configuration', **{k: (v, ...) for k, v in config_mappings.items()})
return configuration_class(**self.load_config(config_source, parameters_source))
class YmlLoader(ConfigurationLoader):
"""YML Format parser and config loader"""
def load_parameters(self, source):
"""For YML, the source it the file path"""
with open(source, encoding=locale.getpreferredencoding(False)) as parameters_source:
loaded = yaml.safe_load(parameters_source.read())
if loaded:
for key, value in loaded.items():
if isinstance(value, str):
loaded[key] = "'" + value + "'"
return loaded
return {}
def load_config(self, config_source, parameters_source):
"""For YML, the source it the file path"""
with open(config_source, encoding=locale.getpreferredencoding(False)) as config_source_file:
config_raw = config_source_file.read()
parameters = {}
# Parameters from file
if os.path.isfile(parameters_source):
params = self.load_parameters(parameters_source)
if params is not None:
parameters.update(params)
# Overwrite parameters with the environment variables
env_params = {}
env_params.update(os.environ)
for key, value in env_params.items():
if is_string(value):
env_params[key] = "'" + value + "'"
parameters.update(env_params)
# Replace the parameters
final_configuration = config_raw.format(**parameters)
final_configuration = yaml.safe_load(final_configuration)
return final_configuration if final_configuration is not None else {}
| applauncher/configuration.py | 3,210 | Base configuration loader
YML Format parser and config loader
By using the loaded parameters and loaded config, build the final configuration object
Check if the value is actually a string or not
Prase the config file and build a dictionary
For YML, the source it the file path
Combines the configuration and parameters and build the configuration object
Convert the source into a dictionary
For YML, the source it the file path
Configuration format loaders
Parameters from file Overwrite parameters with the environment variables Replace the parameters | 555 | en | 0.48322 |
from sqlalchemy import and_
from sqlalchemy import exc
from sqlalchemy import ForeignKey
from sqlalchemy import func
from sqlalchemy import INT
from sqlalchemy import Integer
from sqlalchemy import MetaData
from sqlalchemy import Sequence
from sqlalchemy import sql
from sqlalchemy import String
from sqlalchemy import testing
from sqlalchemy import VARCHAR
from sqlalchemy.testing import assert_raises_message
from sqlalchemy.testing import engines
from sqlalchemy.testing import eq_
from sqlalchemy.testing import fixtures
from sqlalchemy.testing import is_
from sqlalchemy.testing.schema import Column
from sqlalchemy.testing.schema import Table
class InsertExecTest(fixtures.TablesTest):
__backend__ = True
@classmethod
def define_tables(cls, metadata):
Table(
"users",
metadata,
Column(
"user_id", INT, primary_key=True, test_needs_autoincrement=True
),
Column("user_name", VARCHAR(20)),
test_needs_acid=True,
)
@testing.requires.multivalues_inserts
def test_multivalues_insert(self):
users = self.tables.users
users.insert(
values=[
{"user_id": 7, "user_name": "jack"},
{"user_id": 8, "user_name": "ed"},
]
).execute()
rows = users.select().order_by(users.c.user_id).execute().fetchall()
eq_(rows[0], (7, "jack"))
eq_(rows[1], (8, "ed"))
users.insert(values=[(9, "jack"), (10, "ed")]).execute()
rows = users.select().order_by(users.c.user_id).execute().fetchall()
eq_(rows[2], (9, "jack"))
eq_(rows[3], (10, "ed"))
def test_insert_heterogeneous_params(self):
"""test that executemany parameters are asserted to match the
parameter set of the first."""
users = self.tables.users
assert_raises_message(
exc.StatementError,
r"\(sqlalchemy.exc.InvalidRequestError\) A value is required for "
"bind parameter 'user_name', in "
"parameter group 2\n"
r"\[SQL: u?INSERT INTO users",
users.insert().execute,
{"user_id": 7, "user_name": "jack"},
{"user_id": 8, "user_name": "ed"},
{"user_id": 9},
)
# this succeeds however. We aren't yet doing
# a length check on all subsequent parameters.
users.insert().execute(
{"user_id": 7}, {"user_id": 8, "user_name": "ed"}, {"user_id": 9}
)
def _test_lastrow_accessor(self, table_, values, assertvalues):
"""Tests the inserted_primary_key and lastrow_has_id() functions."""
def insert_values(engine, table_, values):
"""
Inserts a row into a table, returns the full list of values
INSERTed including defaults that fired off on the DB side and
detects rows that had defaults and post-fetches.
"""
# verify implicit_returning is working
if engine.dialect.implicit_returning:
ins = table_.insert()
comp = ins.compile(engine, column_keys=list(values))
if not set(values).issuperset(
c.key for c in table_.primary_key
):
is_(bool(comp.returning), True)
result = engine.execute(table_.insert(), **values)
ret = values.copy()
for col, id_ in zip(
table_.primary_key, result.inserted_primary_key
):
ret[col.key] = id_
if result.lastrow_has_defaults():
criterion = and_(
*[
col == id_
for col, id_ in zip(
table_.primary_key, result.inserted_primary_key
)
]
)
row = engine.execute(table_.select(criterion)).first()
for c in table_.c:
ret[c.key] = row[c]
return ret
if testing.against("firebird", "postgresql", "oracle", "mssql"):
assert testing.db.dialect.implicit_returning
if testing.db.dialect.implicit_returning:
test_engines = [
engines.testing_engine(options={"implicit_returning": False}),
engines.testing_engine(options={"implicit_returning": True}),
]
else:
test_engines = [testing.db]
for engine in test_engines:
try:
table_.create(bind=engine, checkfirst=True)
i = insert_values(engine, table_, values)
eq_(i, assertvalues)
finally:
table_.drop(bind=engine)
@testing.skip_if("sqlite")
def test_lastrow_accessor_one(self):
metadata = MetaData()
self._test_lastrow_accessor(
Table(
"t1",
metadata,
Column(
"id",
Integer,
primary_key=True,
test_needs_autoincrement=True,
),
Column("foo", String(30), primary_key=True),
),
{"foo": "hi"},
{"id": 1, "foo": "hi"},
)
@testing.skip_if("sqlite")
def test_lastrow_accessor_two(self):
metadata = MetaData()
self._test_lastrow_accessor(
Table(
"t2",
metadata,
Column(
"id",
Integer,
primary_key=True,
test_needs_autoincrement=True,
),
Column("foo", String(30), primary_key=True),
Column("bar", String(30), server_default="hi"),
),
{"foo": "hi"},
{"id": 1, "foo": "hi", "bar": "hi"},
)
def test_lastrow_accessor_three(self):
metadata = MetaData()
self._test_lastrow_accessor(
Table(
"t3",
metadata,
Column("id", String(40), primary_key=True),
Column("foo", String(30), primary_key=True),
Column("bar", String(30)),
),
{"id": "hi", "foo": "thisisfoo", "bar": "thisisbar"},
{"id": "hi", "foo": "thisisfoo", "bar": "thisisbar"},
)
@testing.requires.sequences
def test_lastrow_accessor_four(self):
metadata = MetaData()
self._test_lastrow_accessor(
Table(
"t4",
metadata,
Column(
"id",
Integer,
Sequence("t4_id_seq", optional=True),
primary_key=True,
),
Column("foo", String(30), primary_key=True),
Column("bar", String(30), server_default="hi"),
),
{"foo": "hi", "id": 1},
{"id": 1, "foo": "hi", "bar": "hi"},
)
def test_lastrow_accessor_five(self):
metadata = MetaData()
self._test_lastrow_accessor(
Table(
"t5",
metadata,
Column("id", String(10), primary_key=True),
Column("bar", String(30), server_default="hi"),
),
{"id": "id1"},
{"id": "id1", "bar": "hi"},
)
@testing.skip_if("sqlite")
def test_lastrow_accessor_six(self):
metadata = MetaData()
self._test_lastrow_accessor(
Table(
"t6",
metadata,
Column(
"id",
Integer,
primary_key=True,
test_needs_autoincrement=True,
),
Column("bar", Integer, primary_key=True),
),
{"bar": 0},
{"id": 1, "bar": 0},
)
# TODO: why not in the sqlite suite?
@testing.only_on("sqlite+pysqlite")
@testing.provide_metadata
def test_lastrowid_zero(self):
from sqlalchemy.dialects import sqlite
eng = engines.testing_engine()
class ExcCtx(sqlite.base.SQLiteExecutionContext):
def get_lastrowid(self):
return 0
eng.dialect.execution_ctx_cls = ExcCtx
t = Table(
"t",
self.metadata,
Column("x", Integer, primary_key=True),
Column("y", Integer),
)
t.create(eng)
r = eng.execute(t.insert().values(y=5))
eq_(r.inserted_primary_key, [0])
@testing.fails_on(
"sqlite", "sqlite autoincremnt doesn't work with composite pks"
)
@testing.provide_metadata
def test_misordered_lastrow(self):
metadata = self.metadata
related = Table(
"related",
metadata,
Column("id", Integer, primary_key=True),
mysql_engine="MyISAM",
)
t6 = Table(
"t6",
metadata,
Column(
"manual_id",
Integer,
ForeignKey("related.id"),
primary_key=True,
),
Column(
"auto_id",
Integer,
primary_key=True,
test_needs_autoincrement=True,
),
mysql_engine="MyISAM",
)
metadata.create_all()
r = related.insert().values(id=12).execute()
id_ = r.inserted_primary_key[0]
eq_(id_, 12)
r = t6.insert().values(manual_id=id_).execute()
eq_(r.inserted_primary_key, [12, 1])
def test_implicit_id_insert_select_columns(self):
users = self.tables.users
stmt = users.insert().from_select(
(users.c.user_id, users.c.user_name),
users.select().where(users.c.user_id == 20),
)
testing.db.execute(stmt)
def test_implicit_id_insert_select_keys(self):
users = self.tables.users
stmt = users.insert().from_select(
["user_id", "user_name"],
users.select().where(users.c.user_id == 20),
)
testing.db.execute(stmt)
@testing.requires.empty_inserts
@testing.requires.returning
def test_no_inserted_pk_on_returning(self):
users = self.tables.users
result = testing.db.execute(
users.insert().returning(users.c.user_id, users.c.user_name)
)
assert_raises_message(
exc.InvalidRequestError,
r"Can't call inserted_primary_key when returning\(\) is used.",
getattr,
result,
"inserted_primary_key",
)
class TableInsertTest(fixtures.TablesTest):
"""test for consistent insert behavior across dialects
regarding the inline=True flag, lower-case 't' tables.
"""
run_create_tables = "each"
__backend__ = True
@classmethod
def define_tables(cls, metadata):
Table(
"foo",
metadata,
Column("id", Integer, Sequence("t_id_seq"), primary_key=True),
Column("data", String(50)),
Column("x", Integer),
)
def _fixture(self, types=True):
if types:
t = sql.table(
"foo",
sql.column("id", Integer),
sql.column("data", String),
sql.column("x", Integer),
)
else:
t = sql.table(
"foo", sql.column("id"), sql.column("data"), sql.column("x")
)
return t
def _test(self, stmt, row, returning=None, inserted_primary_key=False):
r = testing.db.execute(stmt)
if returning:
returned = r.first()
eq_(returned, returning)
elif inserted_primary_key is not False:
eq_(r.inserted_primary_key, inserted_primary_key)
eq_(testing.db.execute(self.tables.foo.select()).first(), row)
def _test_multi(self, stmt, rows, data):
testing.db.execute(stmt, rows)
eq_(
testing.db.execute(
self.tables.foo.select().order_by(self.tables.foo.c.id)
).fetchall(),
data,
)
@testing.requires.sequences
def test_explicit_sequence(self):
t = self._fixture()
self._test(
t.insert().values(
id=func.next_value(Sequence("t_id_seq")), data="data", x=5
),
(1, "data", 5),
)
def test_uppercase(self):
t = self.tables.foo
self._test(
t.insert().values(id=1, data="data", x=5),
(1, "data", 5),
inserted_primary_key=[1],
)
def test_uppercase_inline(self):
t = self.tables.foo
self._test(
t.insert(inline=True).values(id=1, data="data", x=5),
(1, "data", 5),
inserted_primary_key=[1],
)
@testing.crashes(
"mssql+pyodbc",
"Pyodbc + SQL Server + Py3K, some decimal handling issue",
)
def test_uppercase_inline_implicit(self):
t = self.tables.foo
self._test(
t.insert(inline=True).values(data="data", x=5),
(1, "data", 5),
inserted_primary_key=[None],
)
def test_uppercase_implicit(self):
t = self.tables.foo
self._test(
t.insert().values(data="data", x=5),
(1, "data", 5),
inserted_primary_key=[1],
)
def test_uppercase_direct_params(self):
t = self.tables.foo
self._test(
t.insert().values(id=1, data="data", x=5),
(1, "data", 5),
inserted_primary_key=[1],
)
@testing.requires.returning
def test_uppercase_direct_params_returning(self):
t = self.tables.foo
self._test(
t.insert().values(id=1, data="data", x=5).returning(t.c.id, t.c.x),
(1, "data", 5),
returning=(1, 5),
)
@testing.fails_on(
"mssql", "lowercase table doesn't support identity insert disable"
)
def test_direct_params(self):
t = self._fixture()
self._test(
t.insert().values(id=1, data="data", x=5),
(1, "data", 5),
inserted_primary_key=[],
)
@testing.fails_on(
"mssql", "lowercase table doesn't support identity insert disable"
)
@testing.requires.returning
def test_direct_params_returning(self):
t = self._fixture()
self._test(
t.insert().values(id=1, data="data", x=5).returning(t.c.id, t.c.x),
(1, "data", 5),
returning=(1, 5),
)
@testing.requires.emulated_lastrowid
def test_implicit_pk(self):
t = self._fixture()
self._test(
t.insert().values(data="data", x=5),
(1, "data", 5),
inserted_primary_key=[],
)
@testing.requires.emulated_lastrowid
def test_implicit_pk_multi_rows(self):
t = self._fixture()
self._test_multi(
t.insert(),
[
{"data": "d1", "x": 5},
{"data": "d2", "x": 6},
{"data": "d3", "x": 7},
],
[(1, "d1", 5), (2, "d2", 6), (3, "d3", 7)],
)
@testing.requires.emulated_lastrowid
def test_implicit_pk_inline(self):
t = self._fixture()
self._test(
t.insert(inline=True).values(data="data", x=5),
(1, "data", 5),
inserted_primary_key=[],
)
| test/sql/test_insert_exec.py | 15,732 | test for consistent insert behavior across dialects
regarding the inline=True flag, lower-case 't' tables.
Tests the inserted_primary_key and lastrow_has_id() functions.
Inserts a row into a table, returns the full list of values
INSERTed including defaults that fired off on the DB side and
detects rows that had defaults and post-fetches.
test that executemany parameters are asserted to match the
parameter set of the first.
this succeeds however. We aren't yet doing a length check on all subsequent parameters. verify implicit_returning is working TODO: why not in the sqlite suite? | 591 | en | 0.730241 |
import re
import connexion
import logging
import auslib
from os import path
from flask import request
from flask_compress import Compress
from auslib.web.admin.views.problem import problem
from auslib.web.admin.views.validators import BalrogRequestBodyValidator
from raven.contrib.flask import Sentry
from specsynthase.specbuilder import SpecBuilder
try:
from urllib import unquote
except ImportError: # pragma: no cover
from urllib.parse import unquote
log = logging.getLogger(__name__)
current_dir = path.dirname(__file__)
web_dir = path.dirname(auslib.web.__file__)
spec = SpecBuilder().add_spec(path.join(current_dir, 'swagger/api.yaml'))\
.add_spec(path.join(web_dir, 'common/swagger/definitions.yml'))\
.add_spec(path.join(web_dir, 'common/swagger/parameters.yml'))\
.add_spec(path.join(web_dir, 'common/swagger/responses.yml'))
validator_map = {
'body': BalrogRequestBodyValidator
}
connexion_app = connexion.App(__name__, validator_map=validator_map, debug=False)
connexion_app.add_api(spec, strict_validation=True)
app = connexion_app.app
sentry = Sentry()
from auslib.dockerflow import create_dockerflow_endpoints
create_dockerflow_endpoints(app)
# When running under uwsgi, paths will not get decoded before hitting the app.
# We need to handle this ourselves in certain fields, and adding converters
# for them is the best way to do this.
class UnquotingMiddleware(object):
def __init__(self, app):
self.app = app
def __call__(self, environ, start_response):
environ["PATH_INFO"] = unquote(environ["PATH_INFO"])
return self.app(environ, start_response)
app.wsgi_app = UnquotingMiddleware(app.wsgi_app)
@app.errorhandler(500)
def ise(error):
log.error("Caught ISE 500 error.")
log.debug("Request path is: %s", request.path)
log.debug("Request environment is: %s", request.environ)
log.debug("Request headers are: %s", request.headers)
return error
# Connexion's error handling sometimes breaks when parameters contain
# unicode characters (https://github.com/zalando/connexion/issues/604).
# To work around, we catch them and return a 400 (which is what Connexion
# would do if it didn't hit this error).
@app.errorhandler(UnicodeEncodeError)
def unicode(error):
return problem(400, "Unicode Error", "Connexion was unable to parse some unicode data correctly.")
@app.after_request
def add_security_headers(response):
response.headers['X-Frame-Options'] = 'DENY'
response.headers['X-Content-Type-Options'] = 'nosniff'
response.headers["Strict-Transport-Security"] = app.config.get("STRICT_TRANSPORT_SECURITY", "max-age=31536000;")
if re.match("^/ui/", request.path):
# This enables swagger-ui to dynamically fetch and
# load the swagger specification JSON file containing API definition and examples.
response.headers['X-Frame-Options'] = 'SAMEORIGIN'
else:
response.headers["Content-Security-Policy"] = \
app.config.get("CONTENT_SECURITY_POLICY", "default-src 'none'; frame-ancestors 'none'")
return response
Compress(app)
| auslib/web/admin/base.py | 3,154 | pragma: no cover When running under uwsgi, paths will not get decoded before hitting the app. We need to handle this ourselves in certain fields, and adding converters for them is the best way to do this. Connexion's error handling sometimes breaks when parameters contain unicode characters (https://github.com/zalando/connexion/issues/604). To work around, we catch them and return a 400 (which is what Connexion would do if it didn't hit this error). This enables swagger-ui to dynamically fetch and load the swagger specification JSON file containing API definition and examples. | 583 | en | 0.822537 |
# -*- coding: utf-8 -*-
# FLEDGE_BEGIN
# See: http://fledge.readthedocs.io/
# FLEDGE_END
""" Test end to end flow with:
Notification service with
Threshold in-built rule plugin
notify-python35 delivery channel plugin
"""
import os
import time
import subprocess
import http.client
import json
from threading import Event
import urllib.parse
import pytest
__author__ = "Ashish Jabble"
__copyright__ = "Copyright (c) 2019 Dianomic Systems"
__license__ = "Apache 2.0"
__version__ = "${VERSION}"
SERVICE = "notification"
SERVICE_NAME = "NotificationServer #1"
NOTIFY_PLUGIN = "python35"
NOTIFY_INBUILT_RULES = ["Threshold"]
def _configure_and_start_service(service_branch, fledge_url, remove_directories):
try:
subprocess.run(["$FLEDGE_ROOT/tests/system/python/scripts/install_c_service {} {}"
.format(service_branch, SERVICE)], shell=True, check=True, stdout=subprocess.DEVNULL)
except subprocess.CalledProcessError:
assert False, "{} installation failed".format(SERVICE)
finally:
remove_directories("/tmp/fledge-service-{}".format(SERVICE))
# Start service
conn = http.client.HTTPConnection(fledge_url)
data = {"name": SERVICE_NAME,
"type": "notification",
"enabled": "true"
}
conn.request("POST", '/fledge/service', json.dumps(data))
r = conn.getresponse()
assert 200 == r.status
r = r.read().decode()
jdoc = json.loads(r)
assert 2 == len(jdoc)
assert SERVICE_NAME == jdoc['name']
def _install_notify_plugin(notify_branch, plugin_name, remove_directories):
try:
subprocess.run(["$FLEDGE_ROOT/tests/system/python/scripts/install_c_plugin {} notify {}".format(
notify_branch, plugin_name)], shell=True, check=True, stdout=subprocess.DEVNULL)
except subprocess.CalledProcessError:
assert False, "{} installation failed".format(plugin_name)
finally:
remove_directories("/tmp/fledge-notify-{}".format(plugin_name))
def _get_result(fledge_url, path):
conn = http.client.HTTPConnection(fledge_url)
conn.request("GET", path)
r = conn.getresponse()
assert 200 == r.status
r = r.read().decode()
jdoc = json.loads(r)
return jdoc
def _verify_service(fledge_url, status):
jdoc = _get_result(fledge_url, '/fledge/service')
srvc = [s for s in jdoc['services'] if s['name'] == SERVICE_NAME]
assert 1 == len(srvc)
svc = srvc[0]
assert SERVICE.capitalize() == svc['type']
assert status == svc['status']
def _verify_audit_log_entry(fledge_url, path, name, severity='INFORMATION', count=1):
jdoc = _get_result(fledge_url, path)
assert len(jdoc['audit'])
assert count == jdoc['totalCount']
audit_detail = jdoc['audit'][0]
assert severity == audit_detail['severity']
assert name == audit_detail['details']['name']
def _add_notification_instance(fledge_url, payload):
conn = http.client.HTTPConnection(fledge_url)
conn.request("POST", '/fledge/notification', json.dumps(payload))
r = conn.getresponse()
assert 200 == r.status
r = r.read().decode()
jdoc = json.loads(r)
assert "Notification {} created successfully".format(payload['name']) == jdoc['result']
def pause_for_x_seconds(x=1):
wait_e = Event()
wait_e.clear()
wait_e.wait(timeout=x)
class TestNotificationService:
def test_service(self, reset_and_start_fledge, service_branch, fledge_url, wait_time, retries, remove_directories):
_configure_and_start_service(service_branch, fledge_url, remove_directories)
retry_count = 0
# only 2 services is being up by default i.e core and storage
default_registry_count = 2
service_registry = default_registry_count
while service_registry != 3 and retry_count < retries:
svc = _get_result(fledge_url, '/fledge/service')
service_registry = svc['services']
retry_count += 1
pause_for_x_seconds(x=wait_time * 2)
if len(service_registry) == default_registry_count:
assert False, "Failed to start the {} service".format(SERVICE)
_verify_service(fledge_url, status='running')
_verify_audit_log_entry(fledge_url, '/fledge/audit?source=NTFST', name=SERVICE_NAME)
def test_get_default_notification_plugins(self, fledge_url, remove_directories):
remove_directories(os.environ['FLEDGE_ROOT'] + '/plugins/notificationDelivery')
remove_directories(os.environ['FLEDGE_ROOT'] + '/plugins/notificationRule')
remove_directories(os.environ['FLEDGE_ROOT'] + 'cmake_build/C/plugins/notificationDelivery')
remove_directories(os.environ['FLEDGE_ROOT'] + 'cmake_build/C/plugins/notificationRule')
jdoc = _get_result(fledge_url, '/fledge/notification/plugin')
assert [] == jdoc['delivery']
assert 1 == len(jdoc['rules'])
assert NOTIFY_INBUILT_RULES[0] == jdoc['rules'][0]['name']
class TestNotificationCRUD:
@pytest.mark.parametrize("data", [
{"name": "Test 1", "description": "Test 1 notification", "rule": NOTIFY_INBUILT_RULES[0],
"channel": NOTIFY_PLUGIN, "enabled": "false", "notification_type": "retriggered"},
{"name": "Test2", "description": "Test 2 notification", "rule": NOTIFY_INBUILT_RULES[0],
"channel": NOTIFY_PLUGIN, "enabled": "false", "notification_type": "toggled"},
{"name": "Test #3", "description": "Test 3 notification", "rule": NOTIFY_INBUILT_RULES[0],
"channel": NOTIFY_PLUGIN, "enabled": "false", "notification_type": "one shot"}
])
def test_create_notification_instances_with_default_rule_and_channel_python35(self, fledge_url, notify_branch,
data,
remove_directories):
if data['name'] == 'Test 1':
_install_notify_plugin(notify_branch, NOTIFY_PLUGIN, remove_directories)
_add_notification_instance(fledge_url, data)
def test_inbuilt_rule_plugin_and_notify_python35_delivery(self, fledge_url):
jdoc = _get_result(fledge_url, '/fledge/notification/plugin')
assert 1 == len(jdoc['delivery'])
assert NOTIFY_PLUGIN == jdoc['delivery'][0]['name']
assert 1 == len(jdoc['rules'])
assert NOTIFY_INBUILT_RULES[0] == jdoc['rules'][0]['name']
def test_get_notifications_and_audit_entry(self, fledge_url):
jdoc = _get_result(fledge_url, '/fledge/notification')
assert 3 == len(jdoc['notifications'])
# Test 1, Test2 and Test #3
jdoc = _get_result(fledge_url, '/fledge/audit?source=NTFAD')
assert 3 == jdoc['totalCount']
def test_update_notification(self, fledge_url, name="Test 1"):
conn = http.client.HTTPConnection(fledge_url)
data = {"notification_type": "toggled"}
conn.request("PUT", '/fledge/notification/{}'.format(urllib.parse.quote(name))
, json.dumps(data))
r = conn.getresponse()
assert 200 == r.status
r = r.read().decode()
jdoc = json.loads(r)
assert "Notification {} updated successfully".format(name) == jdoc["result"]
# Verify updated notification info
jdoc = _get_result(fledge_url, '/fledge/notification/{}'.format(urllib.parse.quote(name)))
assert "toggled" == jdoc['notification']['notificationType']
def test_delete_notification(self, fledge_url, name="Test #3"):
conn = http.client.HTTPConnection(fledge_url)
conn.request("DELETE", '/fledge/notification/{}'.format(urllib.parse.quote(name)))
r = conn.getresponse()
assert 200 == r.status
r = r.read().decode()
jdoc = json.loads(r)
assert "Notification {} deleted successfully.".format(name) == jdoc["result"]
# Verify only two notifications should exist NOT 3
jdoc = _get_result(fledge_url, '/fledge/notification')
notifications = jdoc['notifications']
assert 2 == len(notifications)
assert "Test 1" == notifications[0]['name']
assert "Test2" == notifications[1]['name']
jdoc = _get_result(fledge_url, '/fledge/audit?source=NTFDL')
assert 1 == jdoc['totalCount']
class TestSentAndReceiveNotification:
FOGBENCH_TEMPLATE = "fogbench-template.json"
SENSOR_VALUE = 20
SOUTH_PLUGIN_NAME = "coap"
ASSET_NAME = "{}".format(SOUTH_PLUGIN_NAME)
@pytest.fixture
def start_south(self, add_south, remove_data_file, remove_directories, south_branch, fledge_url):
""" This fixture clone a south repo and starts south instance
add_south: Fixture that starts any south service with given configuration
remove_data_file: Fixture that remove data file created during the tests
remove_directories: Fixture that remove directories created during the tests """
fogbench_template_path = self.prepare_template_reading_from_fogbench()
add_south(self.SOUTH_PLUGIN_NAME, south_branch, fledge_url, service_name=self.SOUTH_PLUGIN_NAME)
yield self.start_south
# Cleanup code that runs after the test is over
remove_data_file(fogbench_template_path)
remove_directories("/tmp/fledge-south-{}".format(self.SOUTH_PLUGIN_NAME))
def prepare_template_reading_from_fogbench(self):
""" Define the template file for fogbench readings """
fogbench_template_path = os.path.join(
os.path.expandvars('${FLEDGE_ROOT}'), 'data/{}'.format(self.FOGBENCH_TEMPLATE))
with open(fogbench_template_path, "w") as f:
f.write(
'[{"name": "%s", "sensor_values": '
'[{"name": "sensor", "type": "number", "min": %d, "max": %d, "precision": 0}]}]' % (
self.ASSET_NAME, self.SENSOR_VALUE, self.SENSOR_VALUE))
return fogbench_template_path
def ingest_readings_from_fogbench(self, fledge_url, wait_time):
pause_for_x_seconds(x=wait_time*3)
conn = http.client.HTTPConnection(fledge_url)
subprocess.run(["cd $FLEDGE_ROOT/extras/python; python3 -m fogbench -t ../../data/{}; cd -"
.format(self.FOGBENCH_TEMPLATE)], shell=True, check=True, stdout=subprocess.DEVNULL)
pause_for_x_seconds(x=wait_time)
conn.request("GET", '/fledge/asset')
r = conn.getresponse()
assert 200 == r.status
r = r.read().decode()
val = json.loads(r)
assert 1 == len(val)
assert self.ASSET_NAME == val[0]["assetCode"]
assert 1 == val[0]["count"]
conn.request("GET", '/fledge/asset/{}'.format(self.ASSET_NAME))
r = conn.getresponse()
assert 200 == r.status
r = r.read().decode()
val = json.loads(r)
assert 1 == len(val)
assert {'sensor': self.SENSOR_VALUE} == val[0]["reading"]
def configure_rule_with_single_item_eval_type(self, fledge_url, cat_name):
conn = http.client.HTTPConnection(fledge_url)
data = {"asset": self.ASSET_NAME,
"datapoint": "sensor",
"evaluation_data": "Single Item",
"condition": ">",
"trigger_value": str(self.SENSOR_VALUE - 10),
}
conn.request("PUT", '/fledge/category/rule{}'.format(cat_name), json.dumps(data))
r = conn.getresponse()
assert 200 == r.status
def enable_notification(self, fledge_url, cat_name, is_enabled=True):
_enabled = "true" if is_enabled else "false"
data = {"value": _enabled}
conn = http.client.HTTPConnection(fledge_url)
conn.request("PUT", '/fledge/category/{}/enable'.format(cat_name), json.dumps(data))
r = conn.getresponse()
assert 200 == r.status
def test_sent_and_receive_notification(self, fledge_url, start_south, wait_time):
data = {"name": "Test4",
"description": "Test4_Notification",
"rule": NOTIFY_INBUILT_RULES[0],
"channel": NOTIFY_PLUGIN,
"enabled": True,
"notification_type": "one shot"
}
name = data['name']
_add_notification_instance(fledge_url, data)
self.configure_rule_with_single_item_eval_type(fledge_url, name)
# upload script NotifyPython35::configure() -> lowercase(categoryName) + _script_ + method_name + ".py"
cat_name = "delivery{}".format(name)
script_path = '$FLEDGE_ROOT/tests/system/python/data/notify35.py'
url = 'http://' + fledge_url + '/fledge/category/' + cat_name + '/script/upload'
upload_script = 'curl -F "script=@{}" {}'.format(script_path, url)
subprocess.run(upload_script, shell=True, check=True, stdout=subprocess.DEVNULL)
# enable notification delivery (it was getting disabled, as no script file was available)
self.enable_notification(fledge_url, "delivery" + name)
self.ingest_readings_from_fogbench(fledge_url, wait_time)
time.sleep(wait_time)
_verify_audit_log_entry(fledge_url, '/fledge/audit?source=NTFSN', name=name)
class TestStartStopNotificationService:
def test_shutdown_service_with_schedule_disable(self, fledge_url, disable_schedule, wait_time):
disable_schedule(fledge_url, SERVICE_NAME)
_verify_service(fledge_url, status='shutdown')
pause_for_x_seconds(x=wait_time)
# After shutdown there should be 1 entry for NTFSD (shutdown)
_verify_audit_log_entry(fledge_url, '/fledge/audit?source=NTFSD', name=SERVICE_NAME, count=1)
def test_restart_notification_service(self, fledge_url, enable_schedule, wait_time):
enable_schedule(fledge_url, SERVICE_NAME)
pause_for_x_seconds(x=wait_time)
_verify_service(fledge_url, status='running')
# After restart there should be 2 entries for NTFST (start)
_verify_audit_log_entry(fledge_url, '/fledge/audit?source=NTFST', name=SERVICE_NAME, count=2)
| tests/system/python/e2e/test_e2e_notification_service_with_plugins.py | 14,103 | Define the template file for fogbench readings
This fixture clone a south repo and starts south instance
add_south: Fixture that starts any south service with given configuration
remove_data_file: Fixture that remove data file created during the tests
remove_directories: Fixture that remove directories created during the tests
Test end to end flow with:
Notification service with
Threshold in-built rule plugin
notify-python35 delivery channel plugin
-*- coding: utf-8 -*- FLEDGE_BEGIN See: http://fledge.readthedocs.io/ FLEDGE_END Start service only 2 services is being up by default i.e core and storage Test 1, Test2 and Test 3 Verify updated notification info Verify only two notifications should exist NOT 3 Cleanup code that runs after the test is over upload script NotifyPython35::configure() -> lowercase(categoryName) + _script_ + method_name + ".py" enable notification delivery (it was getting disabled, as no script file was available) After shutdown there should be 1 entry for NTFSD (shutdown) After restart there should be 2 entries for NTFST (start) | 1,072 | en | 0.864584 |
# coding: utf-8
# ... import symbolic tools
weak_formulation = load('pyccel.symbolic.gelato', 'weak_formulation', True, 2)
glt_function = load('pyccel.symbolic.gelato', 'glt_function', True, 3)
Grad = load('pyccel.symbolic.gelato', 'Grad', False, 1)
Curl = load('pyccel.symbolic.gelato', 'Curl', False, 1)
Div = load('pyccel.symbolic.gelato', 'Div', False, 1)
Rot = load('pyccel.symbolic.gelato', 'Rot', False, 1)
Cross = load('pyccel.symbolic.gelato', 'Cross', False, 2)
Dot = load('pyccel.symbolic.gelato', 'Dot', False, 2)
# ...
# ... Laplace
a1 = lambda x,y,v,u: Dot(Grad(u), Grad(v))
ga1 = glt_function(a1, [4, 4], [2, 2])
wa1 = weak_formulation(a1, 2)
print(' a1 := ', a1)
print(' glt symbol a1 := ', ga1)
print('wa1 := ', wa1)
print('')
# ...
# ...
a2 = lambda x,y,v,u: Rot(u) * Rot(v) + Div(u) * Div(v) + 0.2 * Dot(u, v)
ga2 = glt_function(a2, [4, 4], [2, 2])
wa2 = weak_formulation(a2, 2)
print(' a2 := ', a2)
print(' glt symbol a2 := ', ga2)
print('wa2 := ', wa2)
print('')
# ...
# ...
a3 = lambda x,y,v,u: Cross(Curl(u), Curl(v)) + 0.2 * u * v
ga3 = glt_function(a3, [4, 4], [2, 2])
wa3 = weak_formulation(a3, 2)
print(' a3 := ', a3)
print(' glt symbol a3 := ', ga3)
print('wa3 := ', wa3)
print('')
# ...
| src_old/tests/scripts/lambda/pdes/2d/ex10.py | 1,321 | coding: utf-8 ... import symbolic tools ... ... Laplace ... ... ... ... ... | 75 | en | 0.359709 |
from py12306.log.base import BaseLog
from py12306.helpers.func import *
@singleton
class OrderLog(BaseLog):
# 这里如果不声明,会出现重复打印,目前不知道什么原因
logs = []
thread_logs = {}
quick_log = []
MESSAGE_REQUEST_INIT_DC_PAGE_FAIL = '请求初始化订单页面失败'
MESSAGE_SUBMIT_ORDER_REQUEST_FAIL = '提交订单失败,错误原因 {} \n'
MESSAGE_SUBMIT_ORDER_REQUEST_SUCCESS = '提交订单成功'
MESSAGE_CHECK_ORDER_INFO_FAIL = '检查订单失败,错误原因 {} \n'
MESSAGE_CHECK_ORDER_INFO_SUCCESS = '检查订单成功'
MESSAGE_GET_QUEUE_INFO_SUCCESS = '获取排队信息成功,目前排队人数 {}, 余票还剩余 {} 张'
MESSAGE_GET_QUEUE_INFO_NO_SEAT = '接口返回实际为无票,跳过本次排队'
MESSAGE_GET_QUEUE_COUNT_SUCCESS = '排队成功,你当前排在第 {} 位, 余票还剩余 {} 张'
MESSAGE_GET_QUEUE_LESS_TICKET = '排队失败,目前排队人数已经超过余票张数'
MESSAGE_GET_QUEUE_COUNT_FAIL = '排队失败,错误原因 {}'
MESSAGE_CONFIRM_SINGLE_FOR_QUEUE_SUCCESS = '# 提交订单成功!#'
MESSAGE_CONFIRM_SINGLE_FOR_QUEUE_ERROR = '出票失败,错误原因 {}'
MESSAGE_CONFIRM_SINGLE_FOR_QUEUE_FAIL = '提交订单失败,错误原因 {}'
MESSAGE_QUERY_ORDER_WAIT_TIME_WAITING = '排队等待中,排队人数 {},预计还需要 {} 秒'
MESSAGE_QUERY_ORDER_WAIT_TIME_FAIL = '排队失败,错误原因 {}'
MESSAGE_QUERY_ORDER_WAIT_TIME_INFO = '第 {} 次排队,请耐心等待'
MESSAGE_ORDER_SUCCESS_NOTIFICATION_TITLE = '车票购买成功!'
MESSAGE_ORDER_SUCCESS_NOTIFICATION_CONTENT = '请及时登录12306账号[{}],打开 \'未完成订单\',在30分钟内完成支付!'
MESSAGE_ORDER_SUCCESS_NOTIFICATION_INFO = '\t\t车次信息: {} {}[{}] -> {}[{}],乘车日期 {},席位:{},乘车人:{}'
MESSAGE_ORDER_SUCCESS_NOTIFICATION_OF_VOICE_CODE_START_SEND = '正在发送语音通知...'
MESSAGE_ORDER_SUCCESS_NOTIFICATION_OF_VOICE_CODE_CONTENT = '你的车票 {} 到 {} 购买成功,请登录 12306 进行支付'
MESSAGE_ORDER_SUCCESS_NOTIFICATION_OF_EMAIL_CONTENT = '订单号 {},请及时登录12306账号[{}],打开 \'未完成订单\',在30分钟内完成支付!'
MESSAGE_JOB_CLOSED = '当前任务已结束'
@classmethod
def print_passenger_did_deleted(cls, passengers):
self = cls()
result = [passenger.get('name') + '(' + passenger.get('type_text') + ')' for passenger in passengers]
self.add_quick_log('# 删减后的乘客列表 {} #'.format(', '.join(result)))
self.flush()
return self
@classmethod
def print_ticket_did_ordered(cls, order_id):
self = cls()
self.add_quick_log('# 车票购买成功,订单号 {} #'.format(order_id))
self.flush()
return self
@classmethod
def get_order_success_notification_info(cls, query):
from py12306.query.job import Job
assert isinstance(query, Job)
passengers = [passenger.get(
'name') + '(' + passenger.get('type_text') + ')' for passenger in query.passengers]
return cls.MESSAGE_ORDER_SUCCESS_NOTIFICATION_INFO.format(query.get_info_of_train_number(),
query.get_info_of_left_station(),
query.get_info_of_train_left_time(),
query.get_info_of_arrive_station(),
query.get_info_of_train_arrive_time(),
query.get_info_of_left_date(),
query.current_seat_name,
','.join(passengers))
| py12306/log/order_log.py | 3,989 | 这里如果不声明,会出现重复打印,目前不知道什么原因 | 25 | zh | 0.999665 |
""" DarkWorldsMetricMountianOsteric.py
Custom evaluation metric for the 'Observing Dark Worlds' competition.
[Description of metric, or reference to documentation.]
Update: Made for the training set only so users can check there results from the training c
@Author: David Harvey
Created: 22 August 2012
"""
import numpy as np
import math as mt
import itertools as it
import csv as c
import getopt as gt
import sys as sys
import argparse as ap
import string as st
import random as rd
def calc_delta_r(x_predicted,y_predicted,x_true,y_true):
""" Compute the scalar distance between predicted halo centers
and the true halo centers. Predictions are matched to the closest
halo center.
Notes: It takes in the predicted and true positions, and then loops over each possile configuration and finds the most optimal one.
Arguments:
x_predicted, y_predicted: vector for predicted x- and y-positions (1 to 3 elements)
x_true, y_true: vector for known x- and y-positions (1 to 3 elements)
Returns:
radial_distance: vector containing the scalar distances between the predicted halo centres and the true halo centres (1 to 3 elements)
true_halo_idexes: vector containing indexes of the input true halos which matches the predicted halo indexes (1 to 3 elements)
measured_halo_indexes: vector containing indexes of the predicted halo position with the reference to the true halo position.
e.g if true_halo_indexes=[0,1] and measured_halo_indexes=[1,0] then the first x,y coordinates of the true halo position matches the second input of the predicted x,y coordinates.
"""
num_halos=len(x_true) #Only works for number of halso > 1
num_configurations=mt.factorial(num_halos) #The number of possible different comb
configurations=np.zeros([num_halos,num_configurations],int) #The array of combinations
#I will pass back
distances = np.zeros([num_configurations],float) #THe array of the distances
#for all possible combinations
radial_distance=[] #The vector of distances
#I will pass back
#Pick a combination of true and predicted
a=['01','012'] #Input for the permutatiosn, 01 number halos or 012
count=0 #For the index of the distances array
true_halo_indexes=[] #The tuples which will show the order of halos picked
predicted_halo_indexes=[]
distances_perm=np.zeros([num_configurations,num_halos],float) #The distance between eac
#true and predicted
#halo for every comb
true_halo_indexes_perm=[] #log of all the permutations of true halos used
predicted_halo_indexes_perm=[] #log of all the predicted permutations
for perm in it.permutations(a[num_halos-2],num_halos):
which_true_halos=[]
which_predicted_halos=[]
for j in xrange(num_halos): #loop through all the true halos with the
distances_perm[count,j]=np.sqrt((x_true[j]-x_predicted[int(perm[j])])**2\
+(y_true[j]-y_predicted[int(perm[j])])**2)
#This array logs the distance between true and
#predicted halo for ALL configruations
which_true_halos.append(j) #logthe order in which I try each true halo
which_predicted_halos.append(int(perm[j])) #log the order in which I true
#each predicted halo
true_halo_indexes_perm.append(which_true_halos) #this is a tuple of tuples of
#all of thifferent config
#true halo indexes
predicted_halo_indexes_perm.append(which_predicted_halos)
distances[count]=sum(distances_perm[count,0::]) #Find what the total distances
#are for each configuration
count=count+1
config = np.where(distances == min(distances))[0][0] #The configuration used is the one
#which has the smallest distance
radial_distance.append(distances_perm[config,0::]) #Find the tuple of distances that
#correspond to this smallest distance
true_halo_indexes=true_halo_indexes_perm[config] #Find the tuple of the index which refers
#to the smallest distance
predicted_halo_indexes=predicted_halo_indexes_perm[config]
return radial_distance,true_halo_indexes,predicted_halo_indexes
def calc_theta(x_predicted, y_predicted, x_true, y_true, x_ref, y_ref):
""" Calculate the angle the predicted position and the true position, where the zero degree corresponds to the line joing the true halo position and the reference point given.
Arguments:
x_predicted, y_predicted: vector for predicted x- and y-positions (1 to 3 elements)
x_true, y_true: vector for known x- and y-positions (1 to 3 elements)
Note that the input of these are matched up so that the first elements of each
vector are associated with one another
x_ref, y_ref: scalars of the x,y coordinate of reference point
Returns:
Theta: A vector containing the angles of the predicted halo w.r.t the true halo
with the vector joining the reference point and the halo as the zero line.
"""
num_halos=len(x_predicted)
theta=np.zeros([num_halos+1],float) #Set up the array which will pass back the values
phi = np.zeros([num_halos],float)
psi = np.arctan( (y_true-y_ref)/(x_true-x_ref) )
# Angle at which the halo is at
#with respect to the reference poitn
phi[x_true != x_ref] = np.arctan((y_predicted[x_true != x_predicted]-\
y_true[x_true != x_predicted])\
/(x_predicted[x_true != x_predicted]-\
x_true[x_true != x_predicted])) # Angle of the estimate
#wrt true halo centre
#Before finding the angle with the zero line as the line joiing the halo and the reference
#point I need to convert the angle produced by Python to an angle between 0 and 2pi
phi =convert_to_360(phi, x_predicted-x_true,\
y_predicted-y_true)
psi = convert_to_360(psi, x_true-x_ref,\
y_true-y_ref)
theta = phi-psi #The angle with the baseline as the line joing the ref and the halo
theta[theta< 0.0]=theta[theta< 0.0]+2.0*mt.pi #If the angle of the true pos wrt the ref is
#greater than the angle of predicted pos
#and the true pos then add 2pi
return theta
def convert_to_360(angle, x_in, y_in):
""" Convert the given angle to the true angle in the range 0:2pi
Arguments:
angle:
x_in, y_in: the x and y coordinates used to determine the quartile
the coordinate lies in so to add of pi or 2pi
Returns:
theta: the angle in the range 0:2pi
"""
n = len(x_in)
for i in xrange(n):
if x_in[i] < 0 and y_in[i] > 0:
angle[i] = angle[i]+mt.pi
elif x_in[i] < 0 and y_in[i] < 0:
angle[i] = angle[i]+mt.pi
elif x_in[i] > 0 and y_in[i] < 0:
angle[i] = angle[i]+2.0*mt.pi
elif x_in[i] == 0 and y_in[i] == 0:
angle[i] = 0
elif x_in[i] == 0 and y_in[i] > 0:
angle[i] = mt.pi/2.
elif x_in[i] < 0 and y_in[i] == 0:
angle[i] = mt.pi
elif x_in[i] == 0 and y_in[i] < 0:
angle[i] = 3.*mt.pi/2.
return angle
def get_ref(x_halo,y_halo,weight):
""" Gets the reference point of the system of halos by weighted averaging the x and y
coordinates.
Arguments:
x_halo, y_halo: Vector num_halos referrin to the coordinates of the halos
weight: the weight which will be assigned to the position of the halo
num_halos: number of halos in the system
Returns:
x_ref, y_ref: The coordinates of the reference point for the metric
"""
#Find the weighted average of the x and y coordinates
x_ref = np.sum([x_halo*weight])/np.sum([weight])
y_ref = np.sum([y_halo*weight])/np.sum([weight])
return x_ref,y_ref
def main_score( nhalo_all, x_true_all, y_true_all, x_ref_all, y_ref_all, sky_prediction):
"""abstracts the score from the old command-line interface.
sky_prediction is a dx2 array of predicted x,y positions
-camdp"""
r=np.array([],dtype=float) # The array which I will log all the calculated radial distances
angle=np.array([],dtype=float) #The array which I will log all the calculated angles
#Load in the sky_ids from the true
num_halos_total=0 #Keep track of how many halos are iput into the metric
for selectskyinsolutions, sky in enumerate(sky_prediction): #Loop through each line in result.csv and analyse each one
nhalo=int(nhalo_all[selectskyinsolutions])#How many halos in the
#selected sky?
x_true=x_true_all[selectskyinsolutions][0:nhalo]
y_true=y_true_all[selectskyinsolutions][0:nhalo]
x_predicted=np.array([],dtype=float)
y_predicted=np.array([],dtype=float)
for i in xrange(nhalo):
x_predicted=np.append(x_predicted,float(sky[0])) #get the predictd values
y_predicted=np.append(y_predicted,float(sky[1]))
#The solution file for the test data provides masses
#to calculate the centre of mass where as the Training_halo.csv
#direct provides x_ref y_ref. So in the case of test data
#we need to calculae the ref point from the masses using
#Get_ref()
x_ref=x_ref_all[selectskyinsolutions]
y_ref=y_ref_all[selectskyinsolutions]
num_halos_total=num_halos_total+nhalo
#Single halo case, this needs to be separately caluclated since
#x_ref = x_true
if nhalo == 1:
#What is the radial distance between the true and predicted position
r=np.append(r,np.sqrt( (x_predicted-x_true)**2 \
+ (y_predicted-y_true)**2))
#What is the angle between the predicted position and true halo position
if (x_predicted-x_true) != 0:
psi = np.arctan((y_predicted-y_true)/(x_predicted-x_true))
else: psi=0.
theta = convert_to_360([psi], [x_predicted-x_true], [y_predicted-y_true])
angle=np.append(angle,theta)
else:
#r_index_index, contains the radial distances of the predicted to
#true positions. These are found by matching up the true halos to
#the predicted halos such that the average of all the radial distances
#is optimal. it also contains indexes of the halos used which are used to
#show which halo has been mathced to which.
r_index_index = calc_delta_r(x_predicted, y_predicted, x_true, \
y_true)
r=np.append(r,r_index_index[0][0])
halo_index= r_index_index[1] #The true halos indexes matched with the
predicted_index=r_index_index[2] #predicted halo index
angle=np.append(angle,calc_theta\
(x_predicted[predicted_index],\
y_predicted[predicted_index],\
x_true[halo_index],\
y_true[halo_index],x_ref,\
y_ref)) # Find the angles of the predicted
#position wrt to the halo and
# add to the vector angle
# Find what the average distance the estimate is from the halo position
av_r=sum(r)/len(r)
#In order to quanitfy the orientation invariance we will express each angle
# as a vector and find the average vecor
#R_bar^2=(1/N Sum^Ncos(theta))^2+(1/N Sum^Nsin(theta))**2
N = float(num_halos_total)
angle_vec = np.sqrt(( 1.0/N * sum(np.cos(angle)) )**2 + \
( 1.0/N * sum(np.sin(angle)) )**2)
W1=1./1000. #Weight the av_r such that < 1 i a good score > 1 isnt so good.
W2=1.
metric = W1*av_r + W2*angle_vec #Weighted metric, weights TBD
print 'Your average distance in pixels you are away from the true halo is', av_r
print 'Your average angular vector is', angle_vec
print 'Your score for the training data is', metric
return metric
def main(user_fname, fname):
""" Script to compute the evaluation metric for the Observing Dark Worlds competition. You can run it on your training data to understand how well you have done with the training data.
"""
r=np.array([],dtype=float) # The array which I will log all the calculated radial distances
angle=np.array([],dtype=float) #The array which I will log all the calculated angles
#Load in the sky_ids from the true
true_sky_id=[]
sky_loader = c.reader(open(fname, 'rb')) #Load in the sky_ids from the solution file
for row in sky_loader:
true_sky_id.append(row[0])
#Load in the true values from the solution file
nhalo_all=np.loadtxt(fname,usecols=(1,),delimiter=',',skiprows=1)
x_true_all=np.loadtxt(fname,usecols=(4,6,8),delimiter=',',skiprows=1)
y_true_all=np.loadtxt(fname,usecols=(5,7,9),delimiter=',',skiprows=1)
x_ref_all=np.loadtxt(fname,usecols=(2,),delimiter=',',skiprows=1)
y_ref_all=np.loadtxt(fname,usecols=(3,),delimiter=',',skiprows=1)
for row in sky_loader:
true_sky_id.append(row[1])
num_halos_total=0 #Keep track of how many halos are iput into the metric
sky_prediction = c.reader(open(user_fname, 'rb')) #Open the result.csv
try: #See if the input file from user has a header on it
#with open('JoyceTest/trivialUnitTest_Pred.txt', 'r') as f:
with open(user_fname, 'r') as f:
header = float((f.readline()).split(',')[1]) #try and make where the
#first input would be
#a float, if succed its
#not a header
print 'THE INPUT FILE DOESNT APPEAR TO HAVE A HEADER'
except :
print 'THE INPUT FILE APPEARS TO HAVE A HEADER, SKIPPING THE FIRST LINE'
skip_header = sky_prediction.next()
for sky in sky_prediction: #Loop through each line in result.csv and analyse each one
sky_id = str(sky[0]) #Get the sky_id of the input
does_it_exist=true_sky_id.count(sky_id) #Is the input sky_id
#from user a real one?
if does_it_exist > 0: #If it does then find the matching solutions to the sky_id
selectskyinsolutions=true_sky_id.index(sky_id)-1
else: #Otherwise exit
print 'Sky_id does not exist, formatting problem: ',sky_id
sys.exit(2)
nhalo=int(nhalo_all[selectskyinsolutions])#How many halos in the
#selected sky?
x_true=x_true_all[selectskyinsolutions][0:nhalo]
y_true=y_true_all[selectskyinsolutions][0:nhalo]
x_predicted=np.array([],dtype=float)
y_predicted=np.array([],dtype=float)
for i in xrange(nhalo):
x_predicted=np.append(x_predicted,float(sky[2*i+1])) #get the predictd values
y_predicted=np.append(y_predicted,float(sky[2*i+2]))
#The solution file for the test data provides masses
#to calculate the centre of mass where as the Training_halo.csv
#direct provides x_ref y_ref. So in the case of test data
#we need to calculae the ref point from the masses using
#Get_ref()
x_ref=x_ref_all[selectskyinsolutions]
y_ref=y_ref_all[selectskyinsolutions]
num_halos_total=num_halos_total+nhalo
#Single halo case, this needs to be separately caluclated since
#x_ref = x_true
if nhalo == 1:
#What is the radial distance between the true and predicted position
r=np.append(r,np.sqrt( (x_predicted-x_true)**2 \
+ (y_predicted-y_true)**2))
#What is the angle between the predicted position and true halo position
if (x_predicted-x_true) != 0:
psi = np.arctan((y_predicted-y_true)/(x_predicted-x_true))
else: psi=0.
theta = convert_to_360([psi], [x_predicted-x_true], [y_predicted-y_true])
angle=np.append(angle,theta)
else:
#r_index_index, contains the radial distances of the predicted to
#true positions. These are found by matching up the true halos to
#the predicted halos such that the average of all the radial distances
#is optimal. it also contains indexes of the halos used which are used to
#show which halo has been mathced to which.
r_index_index = calc_delta_r(x_predicted, y_predicted, x_true, \
y_true)
r=np.append(r,r_index_index[0][0])
halo_index= r_index_index[1] #The true halos indexes matched with the
predicted_index=r_index_index[2] #predicted halo index
angle=np.append(angle,calc_theta\
(x_predicted[predicted_index],\
y_predicted[predicted_index],\
x_true[halo_index],\
y_true[halo_index],x_ref,\
y_ref)) # Find the angles of the predicted
#position wrt to the halo and
# add to the vector angle
# Find what the average distance the estimate is from the halo position
av_r=sum(r)/len(r)
#In order to quanitfy the orientation invariance we will express each angle
# as a vector and find the average vecor
#R_bar^2=(1/N Sum^Ncos(theta))^2+(1/N Sum^Nsin(theta))**2
N = float(num_halos_total)
angle_vec = np.sqrt(( 1.0/N * sum(np.cos(angle)) )**2 + \
( 1.0/N * sum(np.sin(angle)) )**2)
W1=1./1000. #Weight the av_r such that < 1 i a good score > 1 isnt so good.
W2=1.
metric = W1*av_r + W2*angle_vec #Weighted metric, weights TBD
print 'Your average distance in pixels you are away from the true halo is', av_r
print 'Your average angular vector is', angle_vec
print 'Your score for the training data is', metric
if __name__ == "__main__":
#For help just typed 'python DarkWorldsMetric.py -h'
parser = ap.ArgumentParser(description='Work out the Metric for your input file')
parser.add_argument('inputfile',type=str,nargs=1,help='Input file of halo positions. Needs to be in the format SkyId,halo_x1,haloy1,halox_2,halo_y2,halox3,halo_y3 ')
parser.add_argument('reffile',type=str,nargs=1,help='This should point to Training_halos.csv')
args = parser.parse_args()
user_fname=args.inputfile[0]
filename = (args.reffile[0]).count('Training_halos.csv')
if filename == 0:
fname=args.reffile[0]+str('Training_halos.csv')
else:
fname=args.reffile[0]
main(user_fname, fname)
| Chapter5_LossFunctions/DarkWorldsMetric.py | 20,353 | Only works for number of halso > 1The number of possible different combThe array of combinationsI will pass backTHe array of the distancesfor all possible combinationsThe vector of distancesI will pass backPick a combination of true and predicted Input for the permutatiosn, 01 number halos or 012For the index of the distances arrayThe tuples which will show the order of halos pickedThe distance between eactrue and predictedhalo for every comblog of all the permutations of true halos usedlog of all the predicted permutationsloop through all the true halos with theThis array logs the distance between true andpredicted halo for ALL configruationslogthe order in which I try each true halolog the order in which I trueeach predicted halothis is a tuple of tuples ofall of thifferent configtrue halo indexesFind what the total distancesare for each configurationThe configuration used is the onewhich has the smallest distanceFind the tuple of distances thatcorrespond to this smallest distanceFind the tuple of the index which refersto the smallest distanceSet up the array which will pass back the values Angle at which the halo is atwith respect to the reference poitn Angle of the estimatewrt true halo centreBefore finding the angle with the zero line as the line joiing the halo and the referencepoint I need to convert the angle produced by Python to an angle between 0 and 2piThe angle with the baseline as the line joing the ref and the haloIf the angle of the true pos wrt the ref isgreater than the angle of predicted posand the true pos then add 2piFind the weighted average of the x and y coordinates The array which I will log all the calculated radial distancesThe array which I will log all the calculated anglesLoad in the sky_ids from the trueKeep track of how many halos are iput into the metricLoop through each line in result.csv and analyse each oneHow many halos in theselected sky?get the predictd valuesThe solution file for the test data provides masses to calculate the centre of mass where as the Training_halo.csvdirect provides x_ref y_ref. So in the case of test datawe need to calculae the ref point from the masses usingGet_ref()Single halo case, this needs to be separately caluclated sincex_ref = x_trueWhat is the radial distance between the true and predicted positionWhat is the angle between the predicted position and true halo positionr_index_index, contains the radial distances of the predicted totrue positions. These are found by matching up the true halos tothe predicted halos such that the average of all the radial distancesis optimal. it also contains indexes of the halos used which are used toshow which halo has been mathced to which.The true halos indexes matched with the predicted halo index Find the angles of the predictedposition wrt to the halo and add to the vector angle Find what the average distance the estimate is from the halo positionIn order to quanitfy the orientation invariance we will express each angle as a vector and find the average vecorR_bar^2=(1/N Sum^Ncos(theta))^2+(1/N Sum^Nsin(theta))**2Weight the av_r such that < 1 i a good score > 1 isnt so good.Weighted metric, weights TBD The array which I will log all the calculated radial distancesThe array which I will log all the calculated anglesLoad in the sky_ids from the trueLoad in the sky_ids from the solution fileLoad in the true values from the solution fileKeep track of how many halos are iput into the metricOpen the result.csv See if the input file from user has a header on itwith open('JoyceTest/trivialUnitTest_Pred.txt', 'r') as f:try and make where thefirst input would bea float, if succed itsnot a headerLoop through each line in result.csv and analyse each oneGet the sky_id of the inputIs the input sky_idfrom user a real one?If it does then find the matching solutions to the sky_idOtherwise exitHow many halos in theselected sky?get the predictd valuesThe solution file for the test data provides masses to calculate the centre of mass where as the Training_halo.csvdirect provides x_ref y_ref. So in the case of test datawe need to calculae the ref point from the masses usingGet_ref()Single halo case, this needs to be separately caluclated sincex_ref = x_trueWhat is the radial distance between the true and predicted positionWhat is the angle between the predicted position and true halo positionr_index_index, contains the radial distances of the predicted totrue positions. These are found by matching up the true halos tothe predicted halos such that the average of all the radial distancesis optimal. it also contains indexes of the halos used which are used toshow which halo has been mathced to which.The true halos indexes matched with the predicted halo index Find the angles of the predictedposition wrt to the halo and add to the vector angle Find what the average distance the estimate is from the halo positionIn order to quanitfy the orientation invariance we will express each angle as a vector and find the average vecorR_bar^2=(1/N Sum^Ncos(theta))^2+(1/N Sum^Nsin(theta))**2Weight the av_r such that < 1 i a good score > 1 isnt so good.Weighted metric, weights TBDFor help just typed 'python DarkWorldsMetric.py -h' | 5,197 | en | 0.839903 |
#!/usr/bin/env python3
# methodological_experiment.py
import sys, os, csv
import numpy as np
import pandas as pd
import versatiletrainer2
import metaselector
import matplotlib.pyplot as plt
from scipy import stats
def first_experiment():
sourcefolder = '../data/'
metadatapath = '../metadata/mastermetadata.csv'
vocabpath = '../modeloutput/experimentalvocab.txt'
tags4positive = {'fantasy_loc', 'fantasy_oclc'}
tags4negative = {'sf_loc', 'sf_oclc'}
sizecap = 200
metadata, masterdata, classvector, classdictionary, orderedIDs, authormatches, vocablist = versatiletrainer2.get_simple_data(sourcefolder, metadatapath, vocabpath, tags4positive, tags4negative, sizecap)
c_range = [.004, .012, 0.3, 0.8, 2]
featurestart = 3000
featureend = 4400
featurestep = 100
modelparams = 'logistic', 10, featurestart, featureend, featurestep, c_range
matrix, maxaccuracy, metadata, coefficientuples, features4max, best_regularization_coef = versatiletrainer2.tune_a_model(metadata, masterdata, classvector, classdictionary, orderedIDs, authormatches, vocablist, tags4positive, tags4negative, modelparams, 'first_experiment', '../modeloutput/first_experiment.csv')
plt.rcParams["figure.figsize"] = [9.0, 6.0]
plt.matshow(matrix, origin = 'lower', cmap = plt.cm.YlOrRd)
plt.show()
def get_ratio_data(vocabpath, sizecap, ratio, tags4positive, tags4negative, excludebelow = 0, excludeabove = 3000):
''' Loads metadata, selects instances for the positive
and negative classes (using a ratio to dilute the positive
class with negative instances), creates a lexicon if one doesn't
already exist, and creates a pandas dataframe storing
texts as rows and words/features as columns. A refactored
and simplified version of get_data_for_model().
'''
holdout_authors = True
freqs_already_normalized = True
verbose = False
datecols = ['firstpub']
indexcol = ['docid']
extension = '.tsv'
genrecol = 'tags'
numfeatures = 8000
sourcefolder = '../data/'
metadatapath = '../metadata/mastermetadata.csv'
# Get a list of files.
allthefiles = os.listdir(sourcefolder)
volumeIDsinfolder = list()
volumepaths = list()
numchars2trim = len(extension)
for filename in allthefiles:
if filename.endswith(extension):
volID = filename[0 : -numchars2trim]
# The volume ID is basically the filename minus its extension.
volumeIDsinfolder.append(volID)
metadata = metaselector.load_metadata(metadatapath, volumeIDsinfolder, excludebelow, excludeabove, indexcol = indexcol, datecols = datecols, genrecol = genrecol)
# That function returns a pandas dataframe which is guaranteed to be indexed by indexcol,
# and to contain a numeric column 'std_date' as well as a column 'tagset' which contains
# sets of genre tags for each row. It has also been filtered so it only contains volumes
# in the folder, and none whose date is below excludebelow or above excludeabove.
orderedIDs, classdictionary = metaselector.dilute_positive_class(metadata, sizecap, tags4positive, tags4negative, ratio)
metadata = metadata.loc[orderedIDs]
# Limits the metadata data frame to rows we are actually using
# (those selected in select_instances).
# We now create an ordered list of id-path tuples.
volspresent = [(x, sourcefolder + x + extension) for x in orderedIDs]
print(len(volspresent))
print('Building vocabulary.')
vocablist = versatiletrainer2.get_vocablist(vocabpath, volspresent, n = numfeatures)
numfeatures = len(vocablist)
print()
print("Number of features: " + str(numfeatures))
# For each volume, we're going to create a list of volumes that should be
# excluded from the training set when it is to be predicted. More precisely,
# we're going to create a list of their *indexes*, so that we can easily
# remove rows from the training matrix.
authormatches = [ [] for x in orderedIDs]
# Now we proceed to enlarge that list by identifying, for each volume,
# a set of indexes that have the same author. Obvs, there will always be at least one.
# We exclude a vol from it's own training set.
if holdout_authors:
for idx1, anid in enumerate(orderedIDs):
thisauthor = metadata.loc[anid, 'author']
authormatches[idx1] = list(np.flatnonzero(metadata['author'] == thisauthor))
for alist in authormatches:
alist.sort(reverse = True)
print()
print('Authors matched.')
print()
# I am reversing the order of indexes so that I can delete them from
# back to front, without changing indexes yet to be deleted.
# This will become important in the modelingprocess module.
masterdata, classvector = versatiletrainer2.get_dataframe(volspresent, classdictionary, vocablist, freqs_already_normalized)
return metadata, masterdata, classvector, classdictionary, orderedIDs, authormatches, vocablist
def vary_sf_ratio_against_random():
if not os.path.isfile('../measuredivergence/modeldata.tsv'):
with open('../measuredivergence/modeldata.tsv', mode = 'w', encoding = 'utf-8') as f:
outline = 'name\tsize\tratio\taccuracy\tfeatures\tregularization\n'
f.write(outline)
size = 80
for iteration in [5, 6, 7]:
ceiling = 105
if iteration == 7:
ceiling = 5
for pct in range(0, ceiling, 5):
ratio = pct / 100
name = 'iter' + str(iteration) + '_size' + str(size) + '_ratio' + str(pct)
vocabpath = '../measuredivergence/vocabularies/' + name + '.txt'
tags4positive = {'sf_loc', 'sf_oclc'}
tags4negative = {'random'}
metadata, masterdata, classvector, classdictionary, orderedIDs, authormatches, vocablist = get_ratio_data(vocabpath, size, ratio, tags4positive, tags4negative, excludebelow = 0, excludeabove = 3000)
c_range = [.00005, .0003, .001, .004, .012, 0.2, 0.8]
featurestart = 1000
featureend = 6000
featurestep = 300
modelparams = 'logistic', 16, featurestart, featureend, featurestep, c_range
matrix, maxaccuracy, metadata, coefficientuples, features4max, best_regularization_coef = versatiletrainer2.tune_a_model(metadata, masterdata, classvector, classdictionary, orderedIDs, authormatches, vocablist, tags4positive, tags4negative, modelparams, name, '../measuredivergence/modeloutput/' + name + '.csv', write_fullmodel = False)
# It's important not to write fullmodel if you want the csvs
# to accurately reflect terrible accuracy on diluted datasets.
# write_fullmodel = False forces crossvalidation.
with open('../measuredivergence/modeldata.tsv', mode = 'a', encoding = 'utf-8') as f:
outline = name + '\t' + str(size) + '\t' + str(ratio) + '\t' + str(maxaccuracy) + '\t' + str(features4max) + '\t' + str(best_regularization_coef) + '\n'
f.write(outline)
def vary_fantasy_ratio_against_sf():
if not os.path.isfile('../measuredivergence/modeldata.tsv'):
with open('../measuredivergence/modeldata.tsv', mode = 'w', encoding = 'utf-8') as f:
outline = 'name\tsize\tratio\taccuracy\tfeatures\tregularization\n'
f.write(outline)
size = 80
for iteration in [8, 9, 10]:
ceiling = 105
if iteration == 10:
ceiling = 5
for pct in range(0, ceiling, 5):
ratio = pct / 100
name = 'iter' + str(iteration) + '_size' + str(size) + '_ratio' + str(pct)
vocabpath = '../measuredivergence/vocabularies/' + name + '.txt'
tags4positive = {'fantasy_loc', 'fantasy_oclc'}
tags4negative = {'sf_loc', 'sf_oclc'}
metadata, masterdata, classvector, classdictionary, orderedIDs, authormatches, vocablist = get_ratio_data(vocabpath, size, ratio, tags4positive, tags4negative, excludebelow = 0, excludeabove = 3000)
c_range = [.00005, .0003, .001, .004, .012, 0.2, 0.8, 3]
featurestart = 2000
featureend = 7500
featurestep = 400
modelparams = 'logistic', 16, featurestart, featureend, featurestep, c_range
matrix, maxaccuracy, metadata, coefficientuples, features4max, best_regularization_coef = versatiletrainer2.tune_a_model(metadata, masterdata, classvector, classdictionary, orderedIDs, authormatches, vocablist, tags4positive, tags4negative, modelparams, name, '../measuredivergence/modeloutput/' + name + '.csv', write_fullmodel = False)
# write_fullmodel = False forces crossvalidation.
with open('../measuredivergence/modeldata.tsv', mode = 'a', encoding = 'utf-8') as f:
outline = name + '\t' + str(size) + '\t' + str(ratio) + '\t' + str(maxaccuracy) + '\t' + str(features4max) + '\t' + str(best_regularization_coef) + '\n'
f.write(outline)
def vary_fantasy_ratio_against_random():
if not os.path.isfile('../measuredivergence/modeldata.tsv'):
with open('../measuredivergence/modeldata.tsv', mode = 'w', encoding = 'utf-8') as f:
outline = 'name\tsize\tratio\taccuracy\tfeatures\tregularization\n'
f.write(outline)
size = 80
for iteration in [11, 12, 13]:
ceiling = 105
if iteration == 13:
ceiling = 5
for pct in range(0, ceiling, 5):
ratio = pct / 100
name = 'iter' + str(iteration) + '_size' + str(size) + '_ratio' + str(pct)
vocabpath = '../measuredivergence/vocabularies/' + name + '.txt'
tags4positive = {'fantasy_loc', 'fantasy_oclc'}
tags4negative = {'random'}
metadata, masterdata, classvector, classdictionary, orderedIDs, authormatches, vocablist = get_ratio_data(vocabpath, size, ratio, tags4positive, tags4negative, excludebelow = 0, excludeabove = 3000)
c_range = [.00005, .0003, .001, .004, .012, 0.2, 0.8, 3]
featurestart = 1600
featureend = 6400
featurestep = 400
modelparams = 'logistic', 16, featurestart, featureend, featurestep, c_range
matrix, maxaccuracy, metadata, coefficientuples, features4max, best_regularization_coef = versatiletrainer2.tune_a_model(metadata, masterdata, classvector, classdictionary, orderedIDs, authormatches, vocablist, tags4positive, tags4negative, modelparams, name, '../measuredivergence/modeloutput/' + name + '.csv', write_fullmodel = False)
# write_fullmodel = False forces crossvalidation.
with open('../measuredivergence/modeldata.tsv', mode = 'a', encoding = 'utf-8') as f:
outline = name + '\t' + str(size) + '\t' + str(ratio) + '\t' + str(maxaccuracy) + '\t' + str(features4max) + '\t' + str(best_regularization_coef) + '\n'
f.write(outline)
def accuracy(df, column):
totalcount = len(df.realclass)
tp = sum((df.realclass > 0.5) & (df[column] > 0.5))
tn = sum((df.realclass <= 0.5) & (df[column] <= 0.5))
fp = sum((df.realclass <= 0.5) & (df[column] > 0.5))
fn = sum((df.realclass > 0.5) & (df[column] <= 0.5))
assert totalcount == (tp + fp + tn + fn)
return (tp + tn) / totalcount
def accuracy_loss(df):
return accuracy(df, 'probability') - accuracy(df, 'alien_model')
def kldivergence(p, q):
"""Kullback-Leibler divergence D(P || Q) for discrete distributions
Parameters
----------
p, q : array-like, dtype=float, shape=n
Discrete probability distributions.
"""
p = np.asarray(p, dtype=np.float)
q = np.asarray(q, dtype=np.float)
return np.sum(np.where(p != 0, p * np.log(p / q), 0))
def averagecorr(r1, r2):
z1 = np.arctanh(r1)
z2 = np.arctanh(r2)
themean = (z1 + z2) / 2
return np.tanh(themean)
def get_divergences(gold, testname, itera, size, pct):
'''
This function gets several possible measures of divergence
between two models.
'''
# We start by constructing the paths to the gold
# standard model criteria (.pkl) and
# model output (.csv) on the examples
# originally used to train it.
# We're going to try applying the gold standard
# criteria to another model's output, and vice-
# versa.
model1 = '../measuredivergence/modeloutput/' + gold + '.pkl'
meta1 = '../measuredivergence/modeloutput/' + gold + '.csv'
# Now we construct paths to the test model
# criteria (.pkl) and output (.csv).
testpath = '../measuredivergence/modeloutput/' + testname
model2 = testpath + '.pkl'
meta2 = testpath + '.csv'
model1on2 = versatiletrainer2.apply_pickled_model(model1, '../data/', '.tsv', meta2)
model2on1 = versatiletrainer2.apply_pickled_model(model2, '../data/', '.tsv', meta1)
pearson1on2 = stats.pearsonr(model1on2.probability, model1on2.alien_model)[0]
pearson2on1 = stats.pearsonr(model2on1.probability, model2on1.alien_model)[0]
pearson = averagecorr(pearson1on2, pearson2on1)
spearman1on2 = stats.spearmanr(model1on2.probability, model1on2.alien_model)[0]
spearman2on1 = stats.spearmanr(model2on1.probability, model2on1.alien_model)[0]
spearman = averagecorr(spearman1on2, spearman2on1)
loss1on2 = accuracy_loss(model1on2)
loss2on1 = accuracy_loss(model2on1)
loss = (loss1on2 + loss2on1) / 2
kl1on2 = kldivergence(model1on2.probability, model1on2.alien_model)
kl2on1 = kldivergence(model2on1.probability, model2on1.alien_model)
kl = (kl1on2 + kl2on1) / 2
return pearson, spearman, loss, kl, spearman1on2, spearman2on1, loss1on2, loss2on1
def measure_sf_divergences():
columns = ['name1', 'name2', 'size', 'acc1', 'acc2', 'ratiodiff', 'pearson', 'spearman', 'spear1on2', 'spear2on1', 'loss', 'loss1on2', 'loss2on1', 'kl']
if not os.path.isfile('../measuredivergence/sf_divergences.tsv'):
with open('../measuredivergence/sf_divergences.tsv', mode = 'a', encoding = 'utf-8') as f:
scribe = csv.DictWriter(f, delimiter = '\t', fieldnames = columns)
scribe.writeheader()
goldstandards = ['iter5_size80_ratio0', 'iter6_size80_ratio0', 'iter7_size80_ratio0']
size = 80
modeldata = pd.read_csv('../measuredivergence/modeldata.tsv', sep = '\t', index_col = 'name')
for gold in goldstandards:
for itera in [5, 6]:
for pct in range(0, 105, 5):
ratio = pct / 100
testname = 'iter' + str(itera) + '_size' + str(size) + '_ratio' + str(pct)
if testname == gold:
continue
# we don't test a model against itself
else:
row = dict()
row['pearson'], row['spearman'], row['loss'], row['kl'], row['spear1on2'], row['spear2on1'], row['loss1on2'], row['loss2on1'] = get_divergences(gold, testname, itera, size, pct)
row['name1'] = gold
row['name2'] = testname
row['size'] = size
row['acc1'] = modeldata.loc[gold, 'accuracy']
row['acc2'] = modeldata.loc[testname, 'accuracy']
row['ratiodiff'] = ratio
with open('../measuredivergence/sf_divergences.tsv', mode = 'a', encoding = 'utf-8') as f:
scribe = csv.DictWriter(f, delimiter = '\t', fieldnames = columns)
scribe.writerow(row)
def measure_fsf_divergences():
columns = ['name1', 'name2', 'size', 'acc1', 'acc2', 'ratiodiff', 'pearson', 'spearman', 'spear1on2', 'spear2on1', 'loss', 'loss1on2', 'loss2on1', 'kl']
if not os.path.isfile('../measuredivergence/fsf_divergences.tsv'):
with open('../measuredivergence/fsf_divergences.tsv', mode = 'a', encoding = 'utf-8') as f:
scribe = csv.DictWriter(f, delimiter = '\t', fieldnames = columns)
scribe.writeheader()
goldstandards = ['iter8_size80_ratio0', 'iter9_size80_ratio0', 'iter10_size80_ratio0']
size = 80
modeldata = pd.read_csv('../measuredivergence/modeldata.tsv', sep = '\t', index_col = 'name')
for gold in goldstandards:
for itera in [8, 9]:
for pct in range(0, 105, 5):
ratio = pct / 100
testname = 'iter' + str(itera) + '_size' + str(size) + '_ratio' + str(pct)
if testname == gold:
continue
# we don't test a model against itself
else:
row = dict()
row['pearson'], row['spearman'], row['loss'], row['kl'], row['spear1on2'], row['spear2on1'], row['loss1on2'], row['loss2on1'] = get_divergences(gold, testname, itera, size, pct)
row['name1'] = gold
row['name2'] = testname
row['size'] = size
row['acc1'] = modeldata.loc[gold, 'accuracy']
row['acc2'] = modeldata.loc[testname, 'accuracy']
row['ratiodiff'] = ratio
with open('../measuredivergence/fsf_divergences.tsv', mode = 'a', encoding = 'utf-8') as f:
scribe = csv.DictWriter(f, delimiter = '\t', fieldnames = columns)
scribe.writerow(row)
def measure_fantasy_divergences():
columns = ['name1', 'name2', 'size', 'acc1', 'acc2', 'ratiodiff', 'pearson', 'spearman', 'spear1on2', 'spear2on1', 'loss', 'loss1on2', 'loss2on1', 'kl']
if not os.path.isfile('../measuredivergence/fantasy_divergences.tsv'):
with open('../measuredivergence/fantasy_divergences.tsv', mode = 'a', encoding = 'utf-8') as f:
scribe = csv.DictWriter(f, delimiter = '\t', fieldnames = columns)
scribe.writeheader()
goldstandards = ['iter11_size80_ratio0', 'iter12_size80_ratio0', 'iter13_size80_ratio0']
size = 80
modeldata = pd.read_csv('../measuredivergence/modeldata.tsv', sep = '\t', index_col = 'name')
for gold in goldstandards:
for itera in [11, 12]:
for pct in range(0, 105, 5):
ratio = pct / 100
testname = 'iter' + str(itera) + '_size' + str(size) + '_ratio' + str(pct)
if testname == gold:
continue
# we don't test a model against itself
else:
row = dict()
row['pearson'], row['spearman'], row['loss'], row['kl'], row['spear1on2'], row['spear2on1'], row['loss1on2'], row['loss2on1'] = get_divergences(gold, testname, itera, size, pct)
row['name1'] = gold
row['name2'] = testname
row['size'] = size
row['acc1'] = modeldata.loc[gold, 'accuracy']
row['acc2'] = modeldata.loc[testname, 'accuracy']
row['ratiodiff'] = ratio
with open('../measuredivergence/fantasy_divergences.tsv', mode = 'a', encoding = 'utf-8') as f:
scribe = csv.DictWriter(f, delimiter = '\t', fieldnames = columns)
scribe.writerow(row)
def new_experiment():
# The first time I ran this, I used partition 2 to build the
# mixed data, and partition 1 as a gold standard. Now reversing.
outmodelpath = '../measuredivergence/results/newexperimentmodels.csv'
columns = ['name', 'size', 'ratio', 'iteration', 'meandate', 'maxaccuracy', 'features', 'regularization']
if not os.path.isfile(outmodelpath):
with open(outmodelpath, mode = 'w', encoding = 'utf-8') as f:
scribe = csv.DictWriter(f, fieldnames = columns)
scribe.writeheader()
c_range = [.00001, .0001, .001, .01, 0.1, 1, 10, 100]
featurestart = 1500
featureend = 6000
featurestep = 300
modelparams = 'logistic', 10, featurestart, featureend, featurestep, c_range
sizecap = 75
for i in range(3, 6):
for ratio in [0, 5, 10, 20, 30, 40, 50, 60, 70, 80, 90, 95, 100]:
sourcefolder = '../measuredivergence/mix/' + str(ratio) + '/'
metadatapath = '../measuredivergence/partitionmeta/meta' + str(ratio) + '.csv'
name = 'mixeddata_' + str(i) + '_' + str(ratio)
vocabpath = '../lexica/' + name + '.txt'
tags4positive = {'fantasy', 'detective'}
tags4negative = {'random'}
floor = 1800
ceiling = 1930
metadata, masterdata, classvector, classdictionary, orderedIDs, authormatches, vocablist = versatiletrainer2.get_simple_data(sourcefolder, metadatapath, vocabpath, tags4positive, tags4negative, sizecap, excludebelow = floor, excludeabove = ceiling, force_even_distribution = False, numfeatures = 6000)
matrix, maxaccuracy, metadata, coefficientuples, features4max, best_regularization_coef = versatiletrainer2.tune_a_model(metadata, masterdata, classvector, classdictionary, orderedIDs, authormatches, vocablist, tags4positive, tags4negative, modelparams, name, '../measuredivergence/newmodeloutput/' + name + '.csv')
meandate = int(round(np.sum(metadata.firstpub) / len(metadata.firstpub)))
row = dict()
row['name'] = name
row['size'] = sizecap
row['ratio'] = ratio
row['iteration'] = i
row['meandate'] = meandate
row['maxaccuracy'] = maxaccuracy
row['features'] = features4max
row['regularization'] = best_regularization_coef
with open(outmodelpath, mode = 'a', encoding = 'utf-8') as f:
scribe = csv.DictWriter(f, fieldnames = columns)
scribe.writerow(row)
os.remove(vocabpath)
sourcefolder = '../data/'
metadatapath = '../measuredivergence/partitionmeta/part2.csv'
# note that this is changed if you create mix data with
# partition 2
name = 'goldfantasy_' + str(i)
vocabpath = '../lexica/' + name + '.txt'
tags4positive = {'fantasy'}
tags4negative = {'random', 'randomB'}
floor = 1800
ceiling = 1930
metadata, masterdata, classvector, classdictionary, orderedIDs, authormatches, vocablist = versatiletrainer2.get_simple_data(sourcefolder, metadatapath, vocabpath, tags4positive, tags4negative, sizecap, excludebelow = floor, excludeabove = ceiling, force_even_distribution = False, numfeatures = 6000)
matrix, maxaccuracy, metadata, coefficientuples, features4max, best_regularization_coef = versatiletrainer2.tune_a_model(metadata, masterdata, classvector, classdictionary, orderedIDs, authormatches, vocablist, tags4positive, tags4negative, modelparams, name, '../measuredivergence/newmodeloutput/' + name + '.csv')
meandate = int(round(np.sum(metadata.firstpub) / len(metadata.firstpub)))
row = dict()
row['name'] = name
row['size'] = sizecap
row['ratio'] = ratio
row['iteration'] = i
row['meandate'] = meandate
row['maxaccuracy'] = maxaccuracy
row['features'] = features4max
row['regularization'] = best_regularization_coef
with open(outmodelpath, mode = 'a', encoding = 'utf-8') as f:
scribe = csv.DictWriter(f, fieldnames = columns)
scribe.writerow(row)
os.remove(vocabpath)
sourcefolder = '../data/'
metadatapath = '../measuredivergence/partitionmeta/part2.csv'
# depending on which partition you used to create mix data;
# this will be the other one
name = 'golddetective_' + str(i)
vocabpath = '../lexica/' + name + '.txt'
tags4positive = {'detective'}
tags4negative = {'random', 'randomB'}
floor = 1800
ceiling = 1930
metadata, masterdata, classvector, classdictionary, orderedIDs, authormatches, vocablist = versatiletrainer2.get_simple_data(sourcefolder, metadatapath, vocabpath, tags4positive, tags4negative, sizecap, excludebelow = floor, excludeabove = ceiling, force_even_distribution = False, numfeatures = 6000)
matrix, maxaccuracy, metadata, coefficientuples, features4max, best_regularization_coef = versatiletrainer2.tune_a_model(metadata, masterdata, classvector, classdictionary, orderedIDs, authormatches, vocablist, tags4positive, tags4negative, modelparams, name, '../measuredivergence/newmodeloutput/' + name + '.csv')
meandate = int(round(np.sum(metadata.firstpub) / len(metadata.firstpub)))
row = dict()
row['name'] = name
row['size'] = sizecap
row['ratio'] = ratio
row['iteration'] = i
row['meandate'] = meandate
row['maxaccuracy'] = maxaccuracy
row['features'] = features4max
row['regularization'] = best_regularization_coef
with open(outmodelpath, mode = 'a', encoding = 'utf-8') as f:
scribe = csv.DictWriter(f, fieldnames = columns)
scribe.writerow(row)
os.remove(vocabpath)
def accuracy(df, column):
totalcount = len(df.realclass)
tp = sum((df.realclass > 0.5) & (df[column] > 0.5))
tn = sum((df.realclass <= 0.5) & (df[column] <= 0.5))
fp = sum((df.realclass <= 0.5) & (df[column] > 0.5))
fn = sum((df.realclass > 0.5) & (df[column] <= 0.5))
assert totalcount == (tp + fp + tn + fn)
return (tp + tn) / totalcount
def accuracy_loss(df):
return accuracy(df, 'probability') - accuracy(df, 'alien_model')
def get_divergence(sampleA, sampleB, twodatafolder = '../data/', onedatafolder = '../data/'):
'''
This function applies model a to b, and vice versa, and returns
a couple of measures of divergence: notably lost accuracy and
z-tranformed spearman correlation.
'''
# We start by constructing the paths to the sampleA
# standard model criteria (.pkl) and
# model output (.csv) on the examples
# originally used to train it.
# We're going to try applying the sampleA standard
# criteria to another model's output, and vice-
# versa.
model1 = '../measuredivergence/newmodeloutput/' + sampleA + '.pkl'
meta1 = '../measuredivergence/newmodeloutput/' + sampleA + '.csv'
# Now we construct paths to the test model
# criteria (.pkl) and output (.csv).
model2 = '../measuredivergence/newmodeloutput/' + sampleB + '.pkl'
meta2 = '../measuredivergence/newmodeloutput/' + sampleB + '.csv'
model1on2 = versatiletrainer2.apply_pickled_model(model1, twodatafolder, '.tsv', meta2)
model2on1 = versatiletrainer2.apply_pickled_model(model2, onedatafolder, '.tsv', meta1)
spearman1on2 = np.arctanh(stats.spearmanr(model1on2.probability, model1on2.alien_model)[0])
spearman2on1 = np.arctanh(stats.spearmanr(model2on1.probability, model2on1.alien_model)[0])
spearman = (spearman1on2 + spearman2on1) / 2
loss1on2 = accuracy_loss(model1on2)
loss2on1 = accuracy_loss(model2on1)
loss = (loss1on2 + loss2on1) / 2
alienacc2 = accuracy(model1on2, 'alien_model')
alienacc1 = accuracy(model2on1, 'alien_model')
acc2 = accuracy(model1on2, 'probability')
acc1 = accuracy(model2on1, 'probability')
meandate2 = np.mean(model1on2.std_date)
meandate1 = np.mean(model2on1.std_date)
return spearman, loss, spearman1on2, spearman2on1, loss1on2, loss2on1, acc1, acc2, alienacc1, alienacc2, meandate1, meandate2
def write_a_row(r, outfile, columns):
with open(outfile, mode = 'a', encoding = 'utf-8') as f:
scribe = csv.DictWriter(f, fieldnames = columns, delimiter = '\t')
scribe.writerow(r)
def new_divergences():
outcomparisons = '../measuredivergence/results/new_comparisons.tsv'
columns = ['testype', 'name1', 'name2', 'ratio', 'spearman', 'spear1on2', 'spear2on1', 'loss', 'loss1on2', 'loss2on1', 'acc1', 'acc2', 'alienacc1', 'alienacc2', 'meandate1', 'meandate2']
if not os.path.isfile(outcomparisons):
with open(outcomparisons, mode = 'a', encoding = 'utf-8') as f:
scribe = csv.DictWriter(f, delimiter = '\t', fieldnames = columns)
scribe.writeheader()
# I originally ran this with i and j
# iterating through range(3). Now trying
# on models generated with the partitions
# reversed.
for i in range(3, 6):
for j in range(3, 6):
for ratio in [0, 5, 10, 20, 30, 40, 50, 60, 70, 80, 90, 95, 100]:
r = dict()
r['testype'] = 'fantasy2mixed'
r['name1'] = 'goldfantasy_' + str(i)
r['name2'] = 'mixeddata_' + str(j) + '_' + str(ratio)
r['spearman'], r['loss'], r['spear1on2'], r['spear2on1'], r['loss1on2'], r['loss2on1'], r['acc1'], r['acc2'], r['alienacc1'], r['alienacc2'], r['meandate1'], r['meandate2'] = get_divergence(r['name1'], r['name2'], twodatafolder = '../measuredivergence/mix/' + str(ratio) + '/')
r['ratio'] = ratio
write_a_row(r, outcomparisons, columns)
r = dict()
r['testype'] = 'detective2mixed'
r['name1'] = 'golddetective_' + str(i)
r['name2'] = 'mixeddata_' + str(j) + '_' + str(ratio)
r['spearman'], r['loss'], r['spear1on2'], r['spear2on1'], r['loss1on2'], r['loss2on1'], r['acc1'], r['acc2'], r['alienacc1'], r['alienacc2'], r['meandate1'], r['meandate2'] = get_divergence(r['name1'], r['name2'], twodatafolder = '../measuredivergence/mix/' + str(ratio) + '/')
r['ratio'] = 100 - ratio
# note that distance from detective is the complement
# of distance from fantasy
write_a_row(r, outcomparisons, columns)
def new_self_comparisons ():
outcomparisons = '../measuredivergence/results/self_comparisons.tsv'
columns = ['testype', 'name1', 'name2', 'ratio', 'spearman', 'spear1on2', 'spear2on1', 'loss', 'loss1on2', 'loss2on1', 'acc1', 'acc2', 'alienacc1', 'alienacc2', 'meandate1', 'meandate2']
if not os.path.isfile(outcomparisons):
with open(outcomparisons, mode = 'a', encoding = 'utf-8') as f:
scribe = csv.DictWriter(f, delimiter = '\t', fieldnames = columns)
scribe.writeheader()
for i in range(0, 3):
for j in range(3, 6):
for ratio in [0, 5, 10, 20, 30, 40, 50, 60, 70, 80, 90, 95, 100]:
r = dict()
r['testype'] = 'selfmixed'
r['name1'] = 'mixeddata_' + str(i) + '_' + str(ratio)
r['name2'] = 'mixeddata_' + str(j) + '_' + str(ratio)
r['spearman'], r['loss'], r['spear1on2'], r['spear2on1'], r['loss1on2'], r['loss2on1'], r['acc1'], r['acc2'], r['alienacc1'], r['alienacc2'], r['meandate1'], r['meandate2'] = get_divergence(r['name1'], r['name2'], twodatafolder = '../measuredivergence/mix/' + str(ratio) + '/', onedatafolder = '../measuredivergence/altmix/' + str(ratio) + '/')
r['ratio'] = ratio
write_a_row(r, outcomparisons, columns)
new_self_comparisons()
| variation/methodological_experiment.py | 31,068 | This function applies model a to b, and vice versa, and returns
a couple of measures of divergence: notably lost accuracy and
z-tranformed spearman correlation.
This function gets several possible measures of divergence
between two models.
Loads metadata, selects instances for the positive
and negative classes (using a ratio to dilute the positive
class with negative instances), creates a lexicon if one doesn't
already exist, and creates a pandas dataframe storing
texts as rows and words/features as columns. A refactored
and simplified version of get_data_for_model().
Kullback-Leibler divergence D(P || Q) for discrete distributions
Parameters
----------
p, q : array-like, dtype=float, shape=n
Discrete probability distributions.
!/usr/bin/env python3 methodological_experiment.py Get a list of files. The volume ID is basically the filename minus its extension. That function returns a pandas dataframe which is guaranteed to be indexed by indexcol, and to contain a numeric column 'std_date' as well as a column 'tagset' which contains sets of genre tags for each row. It has also been filtered so it only contains volumes in the folder, and none whose date is below excludebelow or above excludeabove. Limits the metadata data frame to rows we are actually using (those selected in select_instances). We now create an ordered list of id-path tuples. For each volume, we're going to create a list of volumes that should be excluded from the training set when it is to be predicted. More precisely, we're going to create a list of their *indexes*, so that we can easily remove rows from the training matrix. Now we proceed to enlarge that list by identifying, for each volume, a set of indexes that have the same author. Obvs, there will always be at least one. We exclude a vol from it's own training set. I am reversing the order of indexes so that I can delete them from back to front, without changing indexes yet to be deleted. This will become important in the modelingprocess module. It's important not to write fullmodel if you want the csvs to accurately reflect terrible accuracy on diluted datasets. write_fullmodel = False forces crossvalidation. write_fullmodel = False forces crossvalidation. write_fullmodel = False forces crossvalidation. We start by constructing the paths to the gold standard model criteria (.pkl) and model output (.csv) on the examples originally used to train it. We're going to try applying the gold standard criteria to another model's output, and vice- versa. Now we construct paths to the test model criteria (.pkl) and output (.csv). we don't test a model against itself we don't test a model against itself we don't test a model against itself The first time I ran this, I used partition 2 to build the mixed data, and partition 1 as a gold standard. Now reversing. note that this is changed if you create mix data with partition 2 depending on which partition you used to create mix data; this will be the other one We start by constructing the paths to the sampleA standard model criteria (.pkl) and model output (.csv) on the examples originally used to train it. We're going to try applying the sampleA standard criteria to another model's output, and vice- versa. Now we construct paths to the test model criteria (.pkl) and output (.csv). I originally ran this with i and j iterating through range(3). Now trying on models generated with the partitions reversed. note that distance from detective is the complement of distance from fantasy | 3,499 | en | 0.899121 |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for random_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from lingvo.core import test_utils
from lingvo.core.ops import py_x_ops
from six.moves import range
import tensorflow as tf
FLAGS = tf.flags.FLAGS
class RandomOpsTest(test_utils.TestCase):
def testRandomPermutationSequenceRepeat(self):
with self.session() as sess:
out = py_x_ops.random_permutation_sequence(num=20, batch=7, repeat=True)
remaining = list(range(20))
for _ in range(10):
# Each epoch takes exactly 3 steps.
vals = sess.run(out).tolist() + sess.run(out).tolist() + sess.run(
out).tolist()
self.assertEqual(len(vals), 21)
# Contains all the remaining values from previous epoch.
for x in remaining:
vals.remove(x) # Raises exception if x is not in vals.
# Remaining items have no duplicates.
self.assertEqual(len(vals), len(set(vals)))
remaining = list(set(range(20)) - set(vals))
def testRandomPermutationSequenceNoRepeat(self):
with self.session() as sess:
out = py_x_ops.random_permutation_sequence(num=20, batch=7, repeat=False)
# Each epoch takes exactly 3 steps.
vals = sess.run(out).tolist() + sess.run(out).tolist() + sess.run(
out).tolist()
self.assertEqual(list(range(20)), sorted(vals))
# repeat=False. We should see OutOfRange error.
with self.assertRaises(tf.errors.OutOfRangeError):
sess.run(out)
if __name__ == '__main__':
tf.test.main()
| lingvo/core/ops/random_ops_test.py | 2,279 | Tests for random_ops.
Copyright 2018 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ============================================================================== Each epoch takes exactly 3 steps. Contains all the remaining values from previous epoch. Raises exception if x is not in vals. Remaining items have no duplicates. Each epoch takes exactly 3 steps. repeat=False. We should see OutOfRange error. | 927 | en | 0.842928 |
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
class PyFastjsonschema(PythonPackage):
"""Fast JSON schema validator for Python."""
homepage = "https://github.com/horejsek/python-fastjsonschema"
pypi = "fastjsonschema/fastjsonschema-2.15.1.tar.gz"
version('2.15.1', sha256='671f36d225b3493629b5e789428660109528f373cf4b8a22bac6fa2f8191c2d2')
depends_on('py-setuptools', type='build')
| var/spack/repos/builtin/packages/py-fastjsonschema/package.py | 557 | Fast JSON schema validator for Python.
Copyright 2013-2022 Lawrence Livermore National Security, LLC and other Spack Project Developers. See the top-level COPYRIGHT file for details. SPDX-License-Identifier: (Apache-2.0 OR MIT) | 229 | en | 0.593844 |
import pytest
from datetime import date, datetime
from dateutil.relativedelta import relativedelta
from django.contrib.auth.models import User
from records.models import (
Category, Record, Budget, OUTCOME, INCOME, SAVINGS, tmz)
from records.month_control import MonthControl, MonthControlWithBudget
@pytest.fixture
def current_date():
today = date.today()
today_datetime = datetime(
day=today.day, month=today.month, year=today.year)
return tmz(today_datetime)
@pytest.fixture
def future_date(current_date):
date = current_date+relativedelta(days=1)
return date
@pytest.fixture
def day_of_month(future_date):
return future_date.day
@pytest.fixture
def start_of_recurrence(future_date):
"""
Date object representing the first day of a record with recurrence
"""
return future_date
@pytest.fixture
def end_of_recurrence(future_date):
"""
Return a date which is used to determine the end month the recurrence
should occur
"""
date = future_date+relativedelta(months=6)
return date
@pytest.fixture
def next_month(current_date):
date = current_date+relativedelta(months=1)
return date
@pytest.fixture
def next_month_future(future_date):
date = future_date+relativedelta(months=1)
return date
@pytest.fixture
def infinite_future_date(current_date):
date = current_date+relativedelta(years=360)
return date
@pytest.fixture
def month_control(user, current_date):
"""
Return a MonthControl object for the current date.
Important: currently any Record fixture should come before month_control
"""
month_control = MonthControl(
user, current_date.month, current_date.year, cache={})
return month_control
@pytest.fixture
def month_control_with_budget(user, current_date):
"""
Return a MonthControlWithBudget object for the current date.
Important: currently any Record fixture should come before month_control
"""
month_control = MonthControlWithBudget(
user, current_date.month, current_date.year, cache={})
return month_control
def _user(username='test_user'):
raw_password = "fake"
new_user = User.objects.create_user(
username=username, email="a@b.com", password=raw_password)
setattr(new_user, "raw_password", raw_password)
return new_user
@pytest.fixture
def user():
return _user()
@pytest.fixture
def another_user():
return _user('another_user')
@pytest.fixture
def outcome(user):
"""
Main category of outcome type
"""
category = Category.objects.create(
name="outcome", type_category=OUTCOME, user=user)
return category
@pytest.fixture
def income(user):
"""
Main category of income type
"""
category = Category.objects.create(
name="income", type_category=INCOME, user=user)
return category
@pytest.fixture
def savings(user):
"""
Category of Savings
"""
category = Category.objects.create(
name="savings", type_category=SAVINGS, user=user)
return category
@pytest.fixture
def outcome_current(user, outcome, current_date):
"""
Record of type Outcome set to today (current date)
"""
record = Record.objects.create(
category=outcome, amount=1, start_date=current_date, user=user)
return record
@pytest.fixture
def outcome_future(user, outcome, future_date):
"""
Record of type Outcome set in the future
"""
record = Record.objects.create(
category=outcome, amount=1, start_date=future_date, user=user)
return record
@pytest.fixture
def outcome_recurrent(user, outcome, start_of_recurrence):
"""
Record of type Outcome set in the future with a day of the month set
to create a recurring record
This fixture should not be used with outcome_recurrent_limited and
outcome_with_parent since they change the instance of this own record
"""
record = Record.objects.create(
category=outcome, amount=1, start_date=start_of_recurrence, user=user,
day_of_month=start_of_recurrence.day)
return record
@pytest.fixture
def outcome_recurrent_limited(user, outcome_recurrent, end_of_recurrence):
"""
Record of type Outcome set in the future with a recurring day of the month
set and limited to a certain time
"""
outcome_recurrent.end_date = end_of_recurrence
outcome_recurrent.save()
return outcome_recurrent
@pytest.fixture
def outcome_with_parent(
outcome_future, outcome_recurrent, next_month_future):
outcome_future.parent = outcome_recurrent
outcome_future.start_date = next_month_future
outcome_future.save()
return outcome_future
@pytest.fixture
def savings_current(request, user, savings, current_date):
"""
Record of type Outcome set in the future
"""
record = Record.objects.create(
category=savings, amount=1, start_date=current_date, user=user)
return record
@pytest.fixture
def budget(user):
budget = Budget.objects.create(user=user, amount=1)
return budget
| moneyforecast/tests/records/fixtures.py | 5,073 | Return a date which is used to determine the end month the recurrence
should occur
Main category of income type
Return a MonthControl object for the current date.
Important: currently any Record fixture should come before month_control
Return a MonthControlWithBudget object for the current date.
Important: currently any Record fixture should come before month_control
Main category of outcome type
Record of type Outcome set to today (current date)
Record of type Outcome set in the future
Record of type Outcome set in the future with a day of the month set
to create a recurring record
This fixture should not be used with outcome_recurrent_limited and
outcome_with_parent since they change the instance of this own record
Record of type Outcome set in the future with a recurring day of the month
set and limited to a certain time
Category of Savings
Record of type Outcome set in the future
Date object representing the first day of a record with recurrence | 966 | en | 0.875139 |
'''Ask two student's grade, inform 3 possible averages.
average :
> 7 = Approved
< 7 & > 5 = Recovery
< 5 = Failed
'''
g1 = float(input("Inform the student's first grade: "))
g2 = float(input("Inform the student's second grade: "))
average = (g1 + g2)/2 # how to calculate the avarege grade between two values
if average >= 7:
print(f"Student with avarege \033[35m{average}\033[m: \033[32mAPPROVED\033[m")
elif average >= 5 and average < 7:
print(f"Student with avarege \033[35m{average}\033[m: \033[33mRECOVERY\033[m")
else:
print(f"Student with avarege \033[35m{average}\033[m: \033[31mFAILED\033[m") | Python-codes-CeV/40-Average.py | 615 | Ask two student's grade, inform 3 possible averages.
average :
> 7 = Approved
< 7 & > 5 = Recovery
< 5 = Failed
how to calculate the avarege grade between two values | 167 | en | 0.894869 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.