text
stringlengths 4
1.02M
| meta
dict |
|---|---|
from __future__ import unicode_literals
from ..dti import TrackBallStick
def test_TrackBallStick_inputs():
input_map = dict(anisfile=dict(argstr='-anisfile %s',
),
anisthresh=dict(argstr='-anisthresh %f',
),
args=dict(argstr='%s',
),
curveinterval=dict(argstr='-curveinterval %f',
requires=['curvethresh'],
),
curvethresh=dict(argstr='-curvethresh %f',
),
data_dims=dict(argstr='-datadims %s',
units='voxels',
),
environ=dict(nohash=True,
usedefault=True,
),
gzip=dict(argstr='-gzip',
),
ignore_exception=dict(nohash=True,
usedefault=True,
),
in_file=dict(argstr='-inputfile %s',
position=1,
),
inputdatatype=dict(argstr='-inputdatatype %s',
),
inputmodel=dict(argstr='-inputmodel %s',
usedefault=True,
),
interpolator=dict(argstr='-interpolator %s',
),
ipthresh=dict(argstr='-ipthresh %f',
),
maxcomponents=dict(argstr='-maxcomponents %d',
units='NA',
),
numpds=dict(argstr='-numpds %d',
units='NA',
),
out_file=dict(argstr='-outputfile %s',
genfile=True,
position=-1,
),
output_root=dict(argstr='-outputroot %s',
position=-1,
),
outputtracts=dict(argstr='-outputtracts %s',
),
seed_file=dict(argstr='-seedfile %s',
position=2,
),
stepsize=dict(argstr='-stepsize %f',
requires=['tracker'],
),
terminal_output=dict(deprecated='1.0.0',
nohash=True,
),
tracker=dict(argstr='-tracker %s',
usedefault=True,
),
voxel_dims=dict(argstr='-voxeldims %s',
units='mm',
),
)
inputs = TrackBallStick.input_spec()
for key, metadata in list(input_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(inputs.traits()[key], metakey) == value
def test_TrackBallStick_outputs():
output_map = dict(tracked=dict(),
)
outputs = TrackBallStick.output_spec()
for key, metadata in list(output_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(outputs.traits()[key], metakey) == value
|
{
"content_hash": "6c5945e13b7ca4994dae5d83b83edada",
"timestamp": "",
"source": "github",
"line_count": 85,
"max_line_length": 67,
"avg_line_length": 25.129411764705882,
"alnum_prop": 0.6044007490636704,
"repo_name": "mick-d/nipype",
"id": "361838512d0cd301771cead57c59dd440b066b34",
"size": "2190",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nipype/interfaces/camino/tests/test_auto_TrackBallStick.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "9823"
},
{
"name": "KiCad",
"bytes": "3797"
},
{
"name": "Makefile",
"bytes": "1854"
},
{
"name": "Matlab",
"bytes": "1999"
},
{
"name": "Python",
"bytes": "4607773"
},
{
"name": "Shell",
"bytes": "380"
},
{
"name": "Tcl",
"bytes": "43408"
}
],
"symlink_target": ""
}
|
"""Calculates evaluation scores for a prediction TSV file.
The prediction file is produced by predict_main.py and should contain 3 or more
columns:
1: sources (concatenated)
2: prediction
3-n: targets (1 or more)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import app
from absl import flags
from absl import logging
import score_lib
FLAGS = flags.FLAGS
flags.DEFINE_string(
'prediction_file', None,
'TSV file containing source, prediction, and target columns.')
flags.DEFINE_bool(
'case_insensitive', True,
'Whether score computation should be case insensitive (in the LaserTagger '
'paper this was set to True).')
def main(argv):
if len(argv) > 1:
raise app.UsageError('Too many command-line arguments.')
flags.mark_flag_as_required('prediction_file')
sources, predictions, target_lists = score_lib.read_data(
FLAGS.prediction_file, FLAGS.case_insensitive)
logging.info(f'Read file: {FLAGS.prediction_file}')
exact = score_lib.compute_exact_score(predictions, target_lists)
sari, keep, addition, deletion = score_lib.compute_sari_scores(
sources, predictions, target_lists)
print(f'Exact score: {100*exact:.3f}')
print(f'SARI score: {100*sari:.3f}')
print(f' KEEP score: {100*keep:.3f}')
print(f' ADDITION score: {100*addition:.3f}')
print(f' DELETION score: {100*deletion:.3f}')
if __name__ == '__main__':
app.run(main)
|
{
"content_hash": "e13afeb90ec414548ddd6a9dc76c3dee",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 79,
"avg_line_length": 29.254901960784313,
"alnum_prop": 0.7037533512064343,
"repo_name": "googleinterns/lasertagger",
"id": "f6256c5660f4ba5de415a6fd73230df14916e741",
"size": "2119",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "score_main.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "192692"
},
{
"name": "Shell",
"bytes": "4088"
}
],
"symlink_target": ""
}
|
"""
Measure resonators, one at a time, with the readout tone centered in the filterbank bin.
"""
from __future__ import division
import time
import numpy as np
from kid_readout.roach import analog, calculate, hardware_tools
from kid_readout.measurement import acquire, basic
from kid_readout.equipment import hardware, agilent_33220
acquire.show_settings()
acquire.show_git_status()
logger = acquire.get_script_logger(__file__)
# Parameters
suffix = 'hittite'
attenuations = [20]
fft_gains = [5]
f_center_all = 1e6 * np.array([2522.24, 2605.96, 2723.65, 2787.96, 3851.13])
f_center = f_center_all[1:2] # select a subset of the frequencies
fractional_frequency_shift = 0
f_center *= (1 + fractional_frequency_shift)
df_baseband_target = 5e3
fine_sweep_num_linewidths = 5
Q_max_expected = 50e3
df_coarse_sweep = f_center.min() / Q_max_expected # A coarse sweep with a resolution of one linewidth should work
df_total = 4e6 # The total span of the baseband tones
df_lo = 2.5e3 # This is the smallest resolution available
f_baseband_minimum = 10e6 # Keep the tones away from the LO by at least this frequency.
sweep_length_seconds = 0 # Take the minimum amount of data, in this case one block
stream_length_seconds = 10
# Hardware
conditioner = analog.HeterodyneMarkII()
shield = hardware.Thing(name='magnetic_shield_pocket', state={'orientation': 'horizontal'})
hittite = hardware.Thing(name='hittite', state={'output_dBm': 0})
hw = hardware.Hardware(conditioner, shield, hittite)
ri = hardware_tools.r1h11_with_mk2(initialize=True, use_config=False)
ri.adc_valon.set_ref_select(0) # internal
ri.lo_valon.set_ref_select(1) # external
# Calculate sweep parameters, LO and baseband sweep frequencies
ri_state = ri.state
tone_sample_exponent = int(np.round(np.log2(ri_state.adc_sample_rate / df_baseband_target)))
df_baseband = ri_state.adc_sample_rate / 2 ** tone_sample_exponent
logger.info("Baseband resolution is {:.0f} Hz using 2^{:d} samples".format(df_baseband, tone_sample_exponent))
num_sweep_tones = min(int(df_total / df_baseband), ri.max_num_waveforms(2 ** tone_sample_exponent))
logger.info("Using {:d} tones".format(num_sweep_tones))
f_baseband = f_baseband_minimum + ri.state.adc_sample_rate / 2**tone_sample_exponent * np.arange(num_sweep_tones)
logger.info("Coarse sweep span is {:.1f} MHz".format(1e-6 * f_baseband.ptp()))
coarse_stride = max(df_coarse_sweep // df_baseband, 1)
logger.info("Coarse sweep resolution is {:.0f} Hz".format(coarse_stride * df_baseband))
f_lo_center = df_lo * np.round((f_center - f_baseband.mean()) / df_lo)
# Run
npd = acquire.new_npy_directory(suffix=suffix)
tic = time.time()
try:
ri.set_tone_baseband_freqs(freqs=1e-6 * f_baseband[:, np.newaxis], nsamp=2 ** tone_sample_exponent)
for lo_index, f_lo in enumerate(f_lo_center):
ri.set_lo(lomhz=1e-6 * f_lo, chan_spacing=1e-6 * df_lo)
for attenuation_index, (attenuation, fft_gain) in enumerate(zip(attenuations, fft_gains)):
ri.set_dac_attenuator(attenuation)
ri.set_fft_gain(fft_gain)
state = hw.state()
state['lo_index'] = lo_index
coarse_sweep = acquire.run_loaded_sweep(ri, length_seconds=sweep_length_seconds,
tone_bank_indices=np.arange(0, num_sweep_tones, coarse_stride))[0]
npd.write(coarse_sweep)
coarse_f_r = coarse_sweep.resonator.f_0
coarse_Q = coarse_sweep.resonator.Q
logger.info("Coarse sweep f_r = {:.3f} MHz +/- {:.0f} Hz".format(1e-6 * coarse_f_r,
coarse_sweep.resonator.f_0_error))
logger.info("Coarse sweep Q = {:.0f} +/- {:.0f}".format(coarse_Q, coarse_sweep.resonator.Q_error))
df_filterbank = calculate.stream_sample_rate(ri_state)
f_baseband_bin_center = df_filterbank * np.round(f_baseband.mean() / df_filterbank)
f_lo_fine = df_lo * np.round((coarse_f_r - f_baseband_bin_center) / df_lo)
ri.set_lo(lomhz=1e-6 * f_lo_fine, chan_spacing=1e-6 * df_lo)
fine_indices = np.where(np.abs(f_lo_fine + f_baseband - coarse_f_r) <=
(fine_sweep_num_linewidths / 2) * (coarse_f_r / coarse_Q))[0]
fine_sweep = acquire.run_loaded_sweep(ri, length_seconds=sweep_length_seconds,
tone_bank_indices=fine_indices)[0]
ri.select_bank(np.argmin(np.abs(f_baseband_bin_center - f_baseband)))
ri.select_fft_bins(np.array([0]))
print("Frequency in Hz is {:.1f}".format(1e6 * ri.tone_frequencies[ri.bank][0]))
power = float(raw_input("Attach the Hittite output to the cryostat input and enter the power in dBm: "))
state['hittite']['output_dBm'] = power
logger.info("Recording {:.1f} s stream".format(stream_length_seconds))
stream = ri.get_measurement(num_seconds=stream_length_seconds, demod=True)[0]
sweep_stream = basic.SingleSweepStream(sweep=fine_sweep, stream=stream, state=state)
npd.write(sweep_stream)
npd.write(ri.get_adc_measurement())
raw_input("Reconnect the roach output to the cryostat input.")
state['hittite']['output_dBm'] = 0
finally:
npd.close()
print("Wrote {}".format(npd.root_path))
print("Elapsed time {:.0f} minutes.".format((time.time() - tic) / 60))
|
{
"content_hash": "c91c47cf8a839e5b6dfa89bad598b04e",
"timestamp": "",
"source": "github",
"line_count": 100,
"max_line_length": 118,
"avg_line_length": 54.69,
"alnum_prop": 0.6518559151581642,
"repo_name": "ColumbiaCMB/kid_readout",
"id": "001e56b1e7958ab239a4e5e30e4b4e85231e939e",
"size": "5469",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "apps/data_taking_scripts/cooldown/2017-02-10_hpd/r1h11_sweepstream_hittite.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "C",
"bytes": "13672"
},
{
"name": "Python",
"bytes": "2033932"
}
],
"symlink_target": ""
}
|
from .acfun import *
from .alive import *
from .baidu import *
from .bilibili import *
from .blip import *
from .catfun import *
from .cbs import *
from .cntv import *
from .coursera import *
from .dailymotion import *
from .douban import *
from .douyutv import *
from .ehow import *
from .facebook import *
from .freesound import *
from .funshion import *
from .google import *
from .ifeng import *
from .instagram import *
from .iqiyi import *
from .joy import *
from .jpopsuki import *
from .ku6 import *
from .kugou import *
from .kuwo import *
from .letv import *
from .lizhi import *
from .magisto import *
from .miaopai import *
from .miomio import *
from .mixcloud import *
from .mtv81 import *
from .netease import *
from .nicovideo import *
from .pptv import *
from .qianmo import *
from .qq import *
from .sina import *
from .sohu import *
from .songtaste import *
from .soundcloud import *
from .theplatform import *
from .tucao import *
from .tudou import *
from .tumblr import *
from .twitter import *
from .vid48 import *
from .videobam import *
from .vimeo import *
from .vine import *
from .vk import *
from .w56 import *
from .xiami import *
from .yinyuetai import *
from .youku import *
from .youtube import *
from .ted import *
from .khan import *
|
{
"content_hash": "9593fca5187e0c43011f37e6a019b4bd",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 26,
"avg_line_length": 21.862068965517242,
"alnum_prop": 0.7255520504731862,
"repo_name": "Red54/you-get",
"id": "198bc55b0365537d0d51799296338d3d6cf1195d",
"size": "1291",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "src/you_get/extractors/__init__.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "818"
},
{
"name": "Python",
"bytes": "255280"
}
],
"symlink_target": ""
}
|
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this data, including any software or models in source or binary
# form, as well as any drawings, specifications, and documentation
# (collectively "the Data"), to deal in the Data without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Data, and to
# permit persons to whom the Data is furnished to do so, subject to the
# following conditions:
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Data.
# THE DATA IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS, SPONSORS, DEVELOPERS, CONTRIBUTORS, OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE DATA OR THE USE OR OTHER DEALINGS IN THE DATA.
# =======================
# This version of the META tools is a fork of an original version produced
# by Vanderbilt University's Institute for Software Integrated Systems (ISIS).
# Their license statement:
# Copyright (C) 2011-2014 Vanderbilt University
# Developed with the sponsorship of the Defense Advanced Research Projects
# Agency (DARPA) and delivered to the U.S. Government with Unlimited Rights
# as defined in DFARS 252.227-7013.
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this data, including any software or models in source or binary
# form, as well as any drawings, specifications, and documentation
# (collectively "the Data"), to deal in the Data without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Data, and to
# permit persons to whom the Data is furnished to do so, subject to the
# following conditions:
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Data.
# THE DATA IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS, SPONSORS, DEVELOPERS, CONTRIBUTORS, OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE DATA OR THE USE OR OTHER DEALINGS IN THE DATA.
# Copyright (c) 2011, Thomas Paviot (tpaviot@gmail.com)
# All rights reserved.
# This file is part of the StepClassLibrary (SCL).
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# Neither the name of the <ORGANIZATION> nor the names of its contributors may
# be used to endorse or promote products derived from this software without
# specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
''' This module provide string utils'''
def process_nested_parent_str(attr_str,idx=0):
'''
The first letter should be a parenthesis
input string: "(1,4,(5,6),7)"
output: ['1','4',['5','6'],'7']
'''
params = []
current_param = ''
k = 0
while (k<len(attr_str)):
ch = attr_str[k]
k += 1
if ch==',':
params.append(current_param)
current_param = ''
elif ch=='(':
nv = attr_str[k:]
current_param, progress = process_nested_parent_str(nv)
params.append(current_param)
current_param = ''
k += progress+1
elif ch==')':
params.append(current_param)
return params,k
else:
current_param += ch
current_param = current_param.strip(' \t\n\r')
params.append(current_param)
return params,k
if __name__=="__main__":
print process_nested_parent_str("'A'")[0]
print process_nested_parent_str("30.0,0.0,5.0")[0]
print process_nested_parent_str("1,2,(3,4,5),6,7,8")[0]
print process_nested_parent_str("(#9149,#9166),#9142,.T.")[0]
|
{
"content_hash": "e7be50a34989c3a2f0f78871f8466869",
"timestamp": "",
"source": "github",
"line_count": 123,
"max_line_length": 80,
"avg_line_length": 45.86178861788618,
"alnum_prop": 0.7066123027831944,
"repo_name": "pombredanne/metamorphosys-desktop",
"id": "43ee6afaf7b7d654a6f54b9b5b5b568594113d4d",
"size": "5692",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "metamorphosys/META/src/CADAssembler/Python/Utils.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Arduino",
"bytes": "10683"
},
{
"name": "Assembly",
"bytes": "117345"
},
{
"name": "Awk",
"bytes": "3591"
},
{
"name": "Batchfile",
"bytes": "228118"
},
{
"name": "BitBake",
"bytes": "4526"
},
{
"name": "C",
"bytes": "3613212"
},
{
"name": "C#",
"bytes": "11617773"
},
{
"name": "C++",
"bytes": "51448188"
},
{
"name": "CMake",
"bytes": "3055"
},
{
"name": "CSS",
"bytes": "109563"
},
{
"name": "Clojure",
"bytes": "37831"
},
{
"name": "Eagle",
"bytes": "3782687"
},
{
"name": "Emacs Lisp",
"bytes": "8514"
},
{
"name": "GAP",
"bytes": "49124"
},
{
"name": "Groff",
"bytes": "2178"
},
{
"name": "Groovy",
"bytes": "7686"
},
{
"name": "HTML",
"bytes": "4025250"
},
{
"name": "Inno Setup",
"bytes": "35715"
},
{
"name": "Java",
"bytes": "489537"
},
{
"name": "JavaScript",
"bytes": "167454"
},
{
"name": "Lua",
"bytes": "1660"
},
{
"name": "Makefile",
"bytes": "97209"
},
{
"name": "Mathematica",
"bytes": "26"
},
{
"name": "Matlab",
"bytes": "80874"
},
{
"name": "Max",
"bytes": "78198"
},
{
"name": "Modelica",
"bytes": "44541139"
},
{
"name": "Objective-C",
"bytes": "34004"
},
{
"name": "Perl",
"bytes": "19285"
},
{
"name": "PostScript",
"bytes": "400254"
},
{
"name": "PowerShell",
"bytes": "19749"
},
{
"name": "Processing",
"bytes": "1477"
},
{
"name": "Prolog",
"bytes": "3121"
},
{
"name": "Protocol Buffer",
"bytes": "58995"
},
{
"name": "Python",
"bytes": "5517835"
},
{
"name": "Ruby",
"bytes": "4483"
},
{
"name": "Shell",
"bytes": "956773"
},
{
"name": "Smarty",
"bytes": "37892"
},
{
"name": "TeX",
"bytes": "4183594"
},
{
"name": "Visual Basic",
"bytes": "22546"
},
{
"name": "XSLT",
"bytes": "332312"
}
],
"symlink_target": ""
}
|
from beritest_tools import BaseBERITestCase
class test_raw_sd(BaseBERITestCase):
def test_a0(self):
'''Test load of stored double word'''
self.assertRegisterEqual(self.MIPS.a0, 0xfedcba9876543210, "Load of stored double word failed")
def test_a1(self):
'''Test signed load of stored positive double word'''
self.assertRegisterEqual(self.MIPS.a1, 1, "Signed load of positive double word failed")
def test_a2(self):
'''Test signed load of stored negative double word'''
self.assertRegisterEqual(self.MIPS.a2, 0xffffffffffffffff, "Signed load of negative double word failed")
def test_pos_offset(self):
'''Test double word store, load at positive offset'''
self.assertRegisterEqual(self.MIPS.a3, 2, "Double word store, load at positive offset failed")
def test_neg_offset(self):
'''Test double word store, load at negative offset'''
self.assertRegisterEqual(self.MIPS.a4, 1, "Double word store, load at negative offset failed")
def test_dram(self):
'''Test load of stored double word from DRAM'''
self.assertRegisterEqual(self.MIPS.s0, 0xfedcba9876543210, "Load from DRAM failed")
|
{
"content_hash": "5caa1b6fe678219737c71e3de6b303c9",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 112,
"avg_line_length": 46.30769230769231,
"alnum_prop": 0.6943521594684385,
"repo_name": "8l/beri",
"id": "ed70c95fef569bdb777db3b427eb4009ca263d85",
"size": "2342",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "cheritest/trunk/tests/mem/test_raw_sd.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "1629022"
},
{
"name": "Bluespec",
"bytes": "2336405"
},
{
"name": "C",
"bytes": "1058899"
},
{
"name": "C++",
"bytes": "1864"
},
{
"name": "Groff",
"bytes": "14381"
},
{
"name": "Haskell",
"bytes": "11711"
},
{
"name": "Lex",
"bytes": "2894"
},
{
"name": "Makefile",
"bytes": "242450"
},
{
"name": "Mathematica",
"bytes": "291"
},
{
"name": "Objective-C",
"bytes": "2387"
},
{
"name": "OpenEdge ABL",
"bytes": "568"
},
{
"name": "Perl",
"bytes": "19159"
},
{
"name": "Python",
"bytes": "1491002"
},
{
"name": "Shell",
"bytes": "91130"
},
{
"name": "SystemVerilog",
"bytes": "12058"
},
{
"name": "Tcl",
"bytes": "132818"
},
{
"name": "TeX",
"bytes": "4996"
},
{
"name": "Verilog",
"bytes": "125674"
},
{
"name": "Yacc",
"bytes": "5871"
}
],
"symlink_target": ""
}
|
import os
import sys
import re
import ast
from optparse import OptionParser
class PythonTestFinder(object):
def find_functions(self, ast_body, matcher):
for obj in ast_body:
if not matcher(obj):
continue
if isinstance(obj, ast.FunctionDef):
yield obj.name
if isinstance(obj, ast.ClassDef):
for func in self.find_functions(obj.body, matcher):
yield '%s.%s' % (obj.name, func)
def get_module_tests(self, module):
with open(module) as f:
data = f.read()
result = ast.parse(data)
def matcher(obj):
if isinstance(obj, ast.FunctionDef):
return re.search('test', obj.name, re.IGNORECASE)
# Unlike nose, we're not able to determine whether this class
# inherits from unittest.TestCase
# So it may be the case that this class name lacks 'test'. As a
# compromise, match all classes
return isinstance(obj, ast.ClassDef)
tests = list(
self.find_functions(result.body, matcher)
)
return tests
class NoseTestFinder(object):
def _generate_tests(self, suite):
from nose.suite import ContextSuite
from nose.case import Test
for context in suite._tests:
if isinstance(context, Test):
yield context
continue
assert isinstance(context, ContextSuite)
for test in self._generate_tests(context):
yield test
def _get_test_name(self, test_wrapper):
from nose.case import FunctionTestCase
test = test_wrapper.test
if isinstance(test, FunctionTestCase):
return test.test.__name__
return test.__class__.__name__ + '.' + test._testMethodName
def _generate_test_names(self, suite):
return map(self._get_test_name, self._generate_tests(suite))
def get_module_tests(self, module):
import nose
loader = nose.loader.defaultTestLoader()
return self._generate_test_names(loader.loadTestsFromName(module))
def _get_prefixed(strings, prefix):
for string in strings:
if string.startswith(prefix):
yield string.replace(prefix, '', 1)
def _get_py_or_dirs(directory, prefix):
for entry in os.listdir(directory or '.'):
path = os.path.join(directory, entry)
if entry.startswith(prefix):
leftover = entry.replace(prefix, '', 1)
if os.path.isdir(path):
yield leftover + '/'
elif leftover.endswith('.py'):
yield leftover + ':'
def _complete(test_finder, thing):
if ':' in thing:
# complete a test
module, test_part = thing.split(':')
tests = list(test_finder.get_module_tests(module))
if '.' in test_part:
# complete a method
return _get_prefixed(strings=tests, prefix=test_part)
funcs = [test for test in tests if test.count('.') == 0]
classes = [test.split('.')[0] for test in tests if '.' in test]
if test_part in classes:
# indicate a method should be completed
return ['.']
return _get_prefixed(strings=funcs + classes, prefix=test_part)
if os.path.isdir(thing):
# complete directory contents
if thing != '.' and not thing.endswith('/'):
return ['/']
return _get_py_or_dirs(thing, '')
if os.path.exists(thing):
# add a colon to indicate search for specific class/func
return [':']
# path not exists, complete a partial path
directory, file_part = os.path.split(thing)
return _get_py_or_dirs(directory, file_part)
def complete(test_finder, thing):
for option in set(_complete(test_finder, thing)):
sys.stdout.write(thing + option + ' ') # avoid print for python 3
def main():
methods = {
'nose': NoseTestFinder,
'python': PythonTestFinder,
}
parser = OptionParser(usage='usage: %prog [options] ')
parser.add_option(
"-s",
"--search-method",
help="Search method to use when locating tests",
choices=list(methods.keys()),
default='python',
)
(options, args) = parser.parse_args()
finder_class = methods[options.search_method]
finder_instance = finder_class()
complete(finder_instance, './' if len(args) == 0 else args[0])
if __name__ == '__main__':
main()
|
{
"content_hash": "85ff64b05e074615bec00405ed37314f",
"timestamp": "",
"source": "github",
"line_count": 136,
"max_line_length": 75,
"avg_line_length": 33.26470588235294,
"alnum_prop": 0.5864279398762158,
"repo_name": "scorphus/nosecomplete",
"id": "59c4fc1e77577f57f1b176639b58ff6fcf476bee",
"size": "4524",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "nosecomplete.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "7327"
}
],
"symlink_target": ""
}
|
import sys
from pyasn1.codec.der import decoder
from pyasn1.codec.der import encoder
from pyasn1_modules import pem
from pyasn1_modules import rfc2314
if len(sys.argv) != 1:
print("""Usage:
$ cat certificateRequest.pem | %s""" % sys.argv[0])
sys.exit(-1)
certType = rfc2314.CertificationRequest()
certCnt = 0
while True:
idx, substrate = pem.readPemBlocksFromFile(
sys.stdin, ('-----BEGIN CERTIFICATE REQUEST-----',
'-----END CERTIFICATE REQUEST-----')
)
if not substrate:
break
cert, rest = decoder.decode(substrate, asn1Spec=certType)
if rest:
substrate = substrate[:-len(rest)]
print(cert.prettyPrint())
assert encoder.encode(cert) == substrate, 'cert recode fails'
certCnt += 1
print('*** %s PEM certificate request(s) de/serialized' % certCnt)
|
{
"content_hash": "97b199896ed4fc0874ce3f25dd460aa9",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 66,
"avg_line_length": 22.83783783783784,
"alnum_prop": 0.6497041420118344,
"repo_name": "etingof/pyasn1-modules",
"id": "385312abf6105a5842ed1f7543fc7621662f444c",
"size": "1165",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tools/pkcs10dump.py",
"mode": "33261",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "1395999"
}
],
"symlink_target": ""
}
|
import os
import sys
if __name__ == "__main__":
# CHANGED manage.py will use development settings by
# default. Change the DJANGO_SETTINGS_MODULE environment variable
# for using the environment specific settings file.
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "mysana.settings.development")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
{
"content_hash": "8f14bbb49c1581995c3a92ec33cb5ed9",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 82,
"avg_line_length": 35.166666666666664,
"alnum_prop": 0.7393364928909952,
"repo_name": "triump0870/mysana",
"id": "2e7b4d79a36143ecc6989e3386b7c6eac2fbd82d",
"size": "444",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/manage.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "3378"
},
{
"name": "HTML",
"bytes": "27777"
},
{
"name": "JavaScript",
"bytes": "363"
},
{
"name": "Python",
"bytes": "51120"
},
{
"name": "Shell",
"bytes": "14230"
}
],
"symlink_target": ""
}
|
import time
from typing import TYPE_CHECKING, Dict, List, Optional, Tuple
if TYPE_CHECKING:
from sydent.sydent import Sydent
class JoinTokenStore:
def __init__(self, sydent: "Sydent") -> None:
self.sydent = sydent
def storeToken(
self, medium: str, normalised_address: str, roomId: str, sender: str, token: str
) -> None:
"""
Store a new invite token and its metadata. Please note that email
addresses need to be casefolded before calling this function.
:param medium: The medium of the 3PID the token is associated to.
:param normalised_address: The address of the 3PID the token is associated to.
:param roomId: The ID of the room the 3PID is invited in.
:param sender: The MXID of the user that sent the invite.
:param token: The token to store.
"""
cur = self.sydent.db.cursor()
cur.execute(
"INSERT INTO invite_tokens"
" ('medium', 'address', 'room_id', 'sender', 'token', 'received_ts')"
" VALUES (?, ?, ?, ?, ?, ?)",
(medium, normalised_address, roomId, sender, token, int(time.time())),
)
self.sydent.db.commit()
def getTokens(self, medium: str, address: str) -> List[Dict[str, str]]:
"""
Retrieves the pending invites tokens for this 3PID that haven't been delivered
yet.
:param medium: The medium of the 3PID to get tokens for.
:param address: The address of the 3PID to get tokens for.
:return: A list of dicts, each containing a pending token and its metadata for
this 3PID.
"""
cur = self.sydent.db.cursor()
res = cur.execute(
"SELECT medium, address, room_id, sender, token FROM invite_tokens"
" WHERE medium = ? AND address = ? AND sent_ts IS NULL",
(
medium,
address,
),
)
rows: List[Tuple[str, str, str, str, str]] = res.fetchall()
ret = []
for row in rows:
medium, address, roomId, sender, token = row
ret.append(
{
"medium": medium,
"address": address,
"room_id": roomId,
"sender": sender,
"token": token,
}
)
return ret
def markTokensAsSent(self, medium: str, address: str) -> None:
"""
Updates the invite tokens associated with a given 3PID to mark them as
delivered to a homeserver so they're not delivered again in the future.
:param medium: The medium of the 3PID to update tokens for.
:param address: The address of the 3PID to update tokens for.
"""
cur = self.sydent.db.cursor()
cur.execute(
"UPDATE invite_tokens SET sent_ts = ? WHERE medium = ? AND address = ?",
(
int(time.time()),
medium,
address,
),
)
self.sydent.db.commit()
def storeEphemeralPublicKey(self, publicKey: str) -> None:
"""
Saves the provided ephemeral public key.
:param publicKey: The key to store.
"""
cur = self.sydent.db.cursor()
cur.execute(
"INSERT INTO ephemeral_public_keys"
" (public_key, persistence_ts)"
" VALUES (?, ?)",
(publicKey, int(time.time())),
)
self.sydent.db.commit()
def validateEphemeralPublicKey(self, publicKey: str) -> bool:
"""
Checks if an ephemeral public key is valid, and, if it is, updates its
verification count.
:param publicKey: The public key to validate.
:return: Whether the key is valid.
"""
cur = self.sydent.db.cursor()
cur.execute(
"UPDATE ephemeral_public_keys"
" SET verify_count = verify_count + 1"
" WHERE public_key = ?",
(publicKey,),
)
self.sydent.db.commit()
return cur.rowcount > 0
def getSenderForToken(self, token: str) -> Optional[str]:
"""
Retrieves the MXID of the user that sent the invite the provided token is for.
:param token: The token to retrieve the sender of.
:return: The invite's sender, or None if the token doesn't match an existing
invite.
"""
cur = self.sydent.db.cursor()
res = cur.execute("SELECT sender FROM invite_tokens WHERE token = ?", (token,))
rows: List[Tuple[str]] = res.fetchall()
if rows:
return rows[0][0]
return None
def deleteTokens(self, medium: str, address: str) -> None:
"""
Deletes every token for a given 3PID.
:param medium: The medium of the 3PID to delete tokens for.
:param address: The address of the 3PID to delete tokens for.
"""
cur = self.sydent.db.cursor()
cur.execute(
"DELETE FROM invite_tokens WHERE medium = ? AND address = ?",
(
medium,
address,
),
)
self.sydent.db.commit()
|
{
"content_hash": "12856ec929afd4a052e4236b52ac77a5",
"timestamp": "",
"source": "github",
"line_count": 162,
"max_line_length": 88,
"avg_line_length": 32.388888888888886,
"alnum_prop": 0.5450733752620545,
"repo_name": "matrix-org/sydent",
"id": "8b41770c2eff7cc0832bc1d1b4548afef824c0fb",
"size": "5825",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sydent/db/invite_tokens.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2161"
},
{
"name": "Gherkin",
"bytes": "283"
},
{
"name": "HTML",
"bytes": "1143"
},
{
"name": "Jinja",
"bytes": "31155"
},
{
"name": "Python",
"bytes": "463511"
},
{
"name": "Shell",
"bytes": "3218"
}
],
"symlink_target": ""
}
|
from pudzu.charts import *
from string import ascii_lowercase as lc
df = pd.read_csv("datasets/twoletters.csv")
groups = { w : t[0] for t in df.itertuples() for w in t[2].split(' ') }
palette = [ "#fbb4ae", "#b3cde3", "#ccebc5", "#decbe4", "#fed9a6", "#ffffcc", "#e5d8bd", "#fddaec" ]
labels = [ "Functional words: //be//, //of//, //to//,...", "Interjections: //hm//, //ow//, //yo//,...", "Letter names: //ar//, //ef//, //pi//,...", "Solfège notes: //re//, //mi//, //fa//,...", "Contractions: //ad//, //bi//, //za//,...", "Foreignisms: //aa//, //qi//, //zo//,...", "Dialectal words: //ae//, //ch//, //un//,...", "Other words: //ax//, //ma//, //pa//,..." ]
def box(word):
return Rectangle(45, "white" if word not in groups else palette[groups[word]]).place(Image.from_text(word.upper(), arial(16, bold=True), "#F0F0F0" if word not in groups else "black"))
grid = Image.from_array([[box(r+c) for c in lc] for r in lc]).pad(1, "black")
grid.save("output/twolettersgrid.png")
def legbox(i):
return Rectangle(40, palette[i]).place(Image.from_text(str(sum(1 for _,j in groups.items() if i==j)), arial(16, bold=True)))
legend = generate_legend([legbox(i) for i in range(len(palette))], labels, font_family=partial(arial, 24), header="WORD CATEGORIES", footer="Words in multiple categories are included under their most common meaning.", max_width=400)
chart = Image.from_row([grid, legend], bg="white", yalign=0, padding=10)
title = Image.from_text("A categorisation of two-letter English words".upper(), arial(48))
subtitle = Image.from_text("based on the Collins Scrabble Words tournament wordlist", arial(36))
img = Image.from_column([title, subtitle, chart], bg="white", padding=5)
img.place(Image.from_text("/u/Udzu", font("arial", 14), fg="black", bg="white", padding=5).pad((1,1,0,0), "black"), align=1, padding=10, copy=False)
img.save("output/twoletters.png")
|
{
"content_hash": "9b084056a68341598af1a6c8c1c03254",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 370,
"avg_line_length": 75.36,
"alnum_prop": 0.6353503184713376,
"repo_name": "Udzu/pudzu",
"id": "bf1ebc764efe81cd41dfdfed76ec10adcf21848c",
"size": "1885",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dataviz/twoletters.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "7945"
},
{
"name": "Python",
"bytes": "867429"
},
{
"name": "Roff",
"bytes": "3702309"
}
],
"symlink_target": ""
}
|
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/component/bio/shared_bio_component_clothing_casual_training_2.iff"
result.attribute_template_id = -1
result.stfName("craft_bio_components_n","bio_component_clothing_casual_training_2")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
{
"content_hash": "f41e80af2a7f7ff6a7f6e05b8182878e",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 102,
"avg_line_length": 28.923076923076923,
"alnum_prop": 0.726063829787234,
"repo_name": "obi-two/Rebelion",
"id": "877c0f889eece563290f686bfc114fca996e105e",
"size": "521",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "data/scripts/templates/object/tangible/component/bio/shared_bio_component_clothing_casual_training_2.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11818"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2293610"
},
{
"name": "CMake",
"bytes": "39727"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7499185"
},
{
"name": "SQLPL",
"bytes": "41864"
}
],
"symlink_target": ""
}
|
"""This module is deprecated. Please use :mod:`airflow.providers.microsoft.azure.transfers.local_to_wasb`."""
from __future__ import annotations
import warnings
from airflow.providers.microsoft.azure.transfers.local_to_wasb import LocalFilesystemToWasbOperator # noqa
warnings.warn(
"This module is deprecated. Please use `airflow.providers.microsoft.azure.transfers.local_to_wasb`.",
DeprecationWarning,
stacklevel=2,
)
|
{
"content_hash": "ef9b6493bb55aae1bd0ea4e96f5a85a8",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 109,
"avg_line_length": 36.416666666666664,
"alnum_prop": 0.7780320366132724,
"repo_name": "nathanielvarona/airflow",
"id": "60ee64c8b5f52f3fe7df879337d4997670b3e39d",
"size": "1224",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "airflow/providers/microsoft/azure/transfers/file_to_wasb.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "25980"
},
{
"name": "Dockerfile",
"bytes": "70681"
},
{
"name": "HCL",
"bytes": "3786"
},
{
"name": "HTML",
"bytes": "173025"
},
{
"name": "JavaScript",
"bytes": "142848"
},
{
"name": "Jinja",
"bytes": "38895"
},
{
"name": "Jupyter Notebook",
"bytes": "5482"
},
{
"name": "Mako",
"bytes": "1339"
},
{
"name": "Python",
"bytes": "23169682"
},
{
"name": "R",
"bytes": "313"
},
{
"name": "Shell",
"bytes": "211967"
},
{
"name": "TypeScript",
"bytes": "484556"
}
],
"symlink_target": ""
}
|
import json
import os
from datetime import datetime
from core import parse_network
from plugins.mock_plugin import MockPlugin
plugins_list = [MockPlugin()]
net_map = parse_network('net_map_' + datetime.now().strftime("%d_%m_%y__%H_%M_%S"), plugins_list)
net_map_json_data = json.dumps(net_map.to_dict())
print(net_map_json_data)
save_path = os.path.abspath('saves/' + net_map.name + '.map')
with open(save_path, 'wb') as fw:
fw.write(net_map_json_data)
|
{
"content_hash": "e90d882cfa7b636a223800ae72fbd167",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 97,
"avg_line_length": 27.11764705882353,
"alnum_prop": 0.702819956616052,
"repo_name": "mordvin-denis/net-parser",
"id": "0da7facaba2c484624af6b66d5b8f1f89d0c9400",
"size": "461",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "run.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "91395"
}
],
"symlink_target": ""
}
|
"""The schema module provides the building blocks for database metadata.
Each element within this module describes a database entity which can be
created and dropped, or is otherwise part of such an entity. Examples include
tables, columns, sequences, and indexes.
All entities are subclasses of :class:`~sqlalchemy.schema.SchemaItem`, and as
defined in this module they are intended to be agnostic of any vendor-specific
constructs.
A collection of entities are grouped into a unit called
:class:`~sqlalchemy.schema.MetaData`. MetaData serves as a logical grouping of
schema elements, and can also be associated with an actual database connection
such that operations involving the contained elements can contact the database
as needed.
Two of the elements here also build upon their "syntactic" counterparts, which
are defined in :class:`~sqlalchemy.sql.expression.`, specifically
:class:`~sqlalchemy.schema.Table` and :class:`~sqlalchemy.schema.Column`.
Since these objects are part of the SQL expression language, they are usable
as components in SQL expressions.
"""
from __future__ import absolute_import
import collections
import sqlalchemy
from . import coercions
from . import ddl
from . import roles
from . import type_api
from . import visitors
from .base import _bind_or_error
from .base import DedupeColumnCollection
from .base import DialectKWArgs
from .base import Executable
from .base import SchemaEventTarget
from .coercions import _document_text_coercion
from .elements import ClauseElement
from .elements import ColumnClause
from .elements import ColumnElement
from .elements import quoted_name
from .elements import TextClause
from .selectable import TableClause
from .type_api import to_instance
from .visitors import InternalTraversal
from .. import event
from .. import exc
from .. import inspection
from .. import util
RETAIN_SCHEMA = util.symbol(
"retain_schema"
"""Symbol indicating that a :class:`_schema.Table`, :class:`.Sequence`
or in some cases a :class:`_schema.ForeignKey` object, in situations
where the object is being copied for a :meth:`.Table.to_metadata`
operation, should retain the schema name that it already has.
"""
)
BLANK_SCHEMA = util.symbol(
"blank_schema",
"""Symbol indicating that a :class:`_schema.Table`, :class:`.Sequence`
or in some cases a :class:`_schema.ForeignKey` object
should have 'None' for its schema, even if the parent
:class:`_schema.MetaData` has specified a schema.
.. versionadded:: 1.0.14
""",
)
NULL_UNSPECIFIED = util.symbol(
"NULL_UNSPECIFIED",
"""Symbol indicating the "nullable" keyword was not passed to a Column.
Normally we would expect None to be acceptable for this but some backends
such as that of SQL Server place special signficance on a "nullability"
value of None.
""",
)
def _get_table_key(name, schema):
if schema is None:
return name
else:
return schema + "." + name
# this should really be in sql/util.py but we'd have to
# break an import cycle
def _copy_expression(expression, source_table, target_table):
if source_table is None or target_table is None:
return expression
def replace(col):
if (
isinstance(col, Column)
and col.table is source_table
and col.key in source_table.c
):
return target_table.c[col.key]
else:
return None
return visitors.replacement_traverse(expression, {}, replace)
@inspection._self_inspects
class SchemaItem(SchemaEventTarget, visitors.Visitable):
"""Base class for items that define a database schema."""
__visit_name__ = "schema_item"
create_drop_stringify_dialect = "default"
def _init_items(self, *args, **kw):
"""Initialize the list of child items for this SchemaItem."""
for item in args:
if item is not None:
try:
spwd = item._set_parent_with_dispatch
except AttributeError as err:
util.raise_(
exc.ArgumentError(
"'SchemaItem' object, such as a 'Column' or a "
"'Constraint' expected, got %r" % item
),
replace_context=err,
)
else:
spwd(self, **kw)
def __repr__(self):
return util.generic_repr(self, omit_kwarg=["info"])
@util.memoized_property
def info(self):
"""Info dictionary associated with the object, allowing user-defined
data to be associated with this :class:`.SchemaItem`.
The dictionary is automatically generated when first accessed.
It can also be specified in the constructor of some objects,
such as :class:`_schema.Table` and :class:`_schema.Column`.
"""
return {}
def _schema_item_copy(self, schema_item):
if "info" in self.__dict__:
schema_item.info = self.info.copy()
schema_item.dispatch._update(self.dispatch)
return schema_item
_use_schema_map = True
class Table(DialectKWArgs, SchemaItem, TableClause):
r"""Represent a table in a database.
e.g.::
mytable = Table(
"mytable", metadata,
Column('mytable_id', Integer, primary_key=True),
Column('value', String(50))
)
The :class:`_schema.Table`
object constructs a unique instance of itself based
on its name and optional schema name within the given
:class:`_schema.MetaData` object. Calling the :class:`_schema.Table`
constructor with the same name and same :class:`_schema.MetaData` argument
a second time will return the *same* :class:`_schema.Table`
object - in this way
the :class:`_schema.Table` constructor acts as a registry function.
.. seealso::
:ref:`metadata_describing` - Introduction to database metadata
Constructor arguments are as follows:
:param name: The name of this table as represented in the database.
The table name, along with the value of the ``schema`` parameter,
forms a key which uniquely identifies this :class:`_schema.Table`
within
the owning :class:`_schema.MetaData` collection.
Additional calls to :class:`_schema.Table` with the same name,
metadata,
and schema name will return the same :class:`_schema.Table` object.
Names which contain no upper case characters
will be treated as case insensitive names, and will not be quoted
unless they are a reserved word or contain special characters.
A name with any number of upper case characters is considered
to be case sensitive, and will be sent as quoted.
To enable unconditional quoting for the table name, specify the flag
``quote=True`` to the constructor, or use the :class:`.quoted_name`
construct to specify the name.
:param metadata: a :class:`_schema.MetaData`
object which will contain this
table. The metadata is used as a point of association of this table
with other tables which are referenced via foreign key. It also
may be used to associate this table with a particular
:class:`.Connectable`.
:param \*args: Additional positional arguments are used primarily
to add the list of :class:`_schema.Column`
objects contained within this
table. Similar to the style of a CREATE TABLE statement, other
:class:`.SchemaItem` constructs may be added here, including
:class:`.PrimaryKeyConstraint`, and
:class:`_schema.ForeignKeyConstraint`.
:param autoload: Defaults to ``False``, unless
:paramref:`_schema.Table.autoload_with`
is set in which case it defaults to ``True``;
:class:`_schema.Column` objects
for this table should be reflected from the database, possibly
augmenting objects that were explicitly specified.
:class:`_schema.Column` and other objects explicitly set on the
table will replace corresponding reflected objects.
.. deprecated:: 1.4
The autoload parameter is deprecated and will be removed in
version 2.0. Please use the
:paramref:`_schema.Table.autoload_with` parameter, passing an
engine or connection.
.. seealso::
:ref:`metadata_reflection_toplevel`
:param autoload_replace: Defaults to ``True``; when using
:paramref:`_schema.Table.autoload`
in conjunction with :paramref:`_schema.Table.extend_existing`,
indicates
that :class:`_schema.Column` objects present in the already-existing
:class:`_schema.Table`
object should be replaced with columns of the same
name retrieved from the autoload process. When ``False``, columns
already present under existing names will be omitted from the
reflection process.
Note that this setting does not impact :class:`_schema.Column` objects
specified programmatically within the call to :class:`_schema.Table`
that
also is autoloading; those :class:`_schema.Column` objects will always
replace existing columns of the same name when
:paramref:`_schema.Table.extend_existing` is ``True``.
.. seealso::
:paramref:`_schema.Table.autoload`
:paramref:`_schema.Table.extend_existing`
:param autoload_with: An :class:`_engine.Engine` or
:class:`_engine.Connection` object,
or a :class:`_reflection.Inspector` object as returned by
:func:`_sa.inspect`
against one, with which this :class:`_schema.Table`
object will be reflected.
When set to a non-None value, the autoload process will take place
for this table against the given engine or connection.
:param extend_existing: When ``True``, indicates that if this
:class:`_schema.Table` is already present in the given
:class:`_schema.MetaData`,
apply further arguments within the constructor to the existing
:class:`_schema.Table`.
If :paramref:`_schema.Table.extend_existing` or
:paramref:`_schema.Table.keep_existing` are not set,
and the given name
of the new :class:`_schema.Table` refers to a :class:`_schema.Table`
that is
already present in the target :class:`_schema.MetaData` collection,
and
this :class:`_schema.Table`
specifies additional columns or other constructs
or flags that modify the table's state, an
error is raised. The purpose of these two mutually-exclusive flags
is to specify what action should be taken when a
:class:`_schema.Table`
is specified that matches an existing :class:`_schema.Table`,
yet specifies
additional constructs.
:paramref:`_schema.Table.extend_existing`
will also work in conjunction
with :paramref:`_schema.Table.autoload` to run a new reflection
operation against the database, even if a :class:`_schema.Table`
of the same name is already present in the target
:class:`_schema.MetaData`; newly reflected :class:`_schema.Column`
objects
and other options will be added into the state of the
:class:`_schema.Table`, potentially overwriting existing columns
and options of the same name.
As is always the case with :paramref:`_schema.Table.autoload`,
:class:`_schema.Column` objects can be specified in the same
:class:`_schema.Table`
constructor, which will take precedence. Below, the existing
table ``mytable`` will be augmented with :class:`_schema.Column`
objects
both reflected from the database, as well as the given
:class:`_schema.Column`
named "y"::
Table("mytable", metadata,
Column('y', Integer),
extend_existing=True,
autoload_with=engine
)
.. seealso::
:paramref:`_schema.Table.autoload`
:paramref:`_schema.Table.autoload_replace`
:paramref:`_schema.Table.keep_existing`
:param implicit_returning: True by default - indicates that
RETURNING can be used by default to fetch newly inserted primary key
values, for backends which support this. Note that
:func:`_sa.create_engine` also provides an ``implicit_returning``
flag.
:param include_columns: A list of strings indicating a subset of
columns to be loaded via the ``autoload`` operation; table columns who
aren't present in this list will not be represented on the resulting
``Table`` object. Defaults to ``None`` which indicates all columns
should be reflected.
:param resolve_fks: Whether or not to reflect :class:`_schema.Table`
objects
related to this one via :class:`_schema.ForeignKey` objects, when
:paramref:`_schema.Table.autoload` or
:paramref:`_schema.Table.autoload_with` is
specified. Defaults to True. Set to False to disable reflection of
related tables as :class:`_schema.ForeignKey`
objects are encountered; may be
used either to save on SQL calls or to avoid issues with related tables
that can't be accessed. Note that if a related table is already present
in the :class:`_schema.MetaData` collection, or becomes present later,
a
:class:`_schema.ForeignKey` object associated with this
:class:`_schema.Table` will
resolve to that table normally.
.. versionadded:: 1.3
.. seealso::
:paramref:`.MetaData.reflect.resolve_fks`
:param info: Optional data dictionary which will be populated into the
:attr:`.SchemaItem.info` attribute of this object.
:param keep_existing: When ``True``, indicates that if this Table
is already present in the given :class:`_schema.MetaData`, ignore
further arguments within the constructor to the existing
:class:`_schema.Table`, and return the :class:`_schema.Table`
object as
originally created. This is to allow a function that wishes
to define a new :class:`_schema.Table` on first call, but on
subsequent calls will return the same :class:`_schema.Table`,
without any of the declarations (particularly constraints)
being applied a second time.
If :paramref:`_schema.Table.extend_existing` or
:paramref:`_schema.Table.keep_existing` are not set,
and the given name
of the new :class:`_schema.Table` refers to a :class:`_schema.Table`
that is
already present in the target :class:`_schema.MetaData` collection,
and
this :class:`_schema.Table`
specifies additional columns or other constructs
or flags that modify the table's state, an
error is raised. The purpose of these two mutually-exclusive flags
is to specify what action should be taken when a
:class:`_schema.Table`
is specified that matches an existing :class:`_schema.Table`,
yet specifies
additional constructs.
.. seealso::
:paramref:`_schema.Table.extend_existing`
:param listeners: A list of tuples of the form ``(<eventname>, <fn>)``
which will be passed to :func:`.event.listen` upon construction.
This alternate hook to :func:`.event.listen` allows the establishment
of a listener function specific to this :class:`_schema.Table` before
the "autoload" process begins. Historically this has been intended
for use with the :meth:`.DDLEvents.column_reflect` event, however
note that this event hook may now be associated with the
:class:`_schema.MetaData` object directly::
def listen_for_reflect(table, column_info):
"handle the column reflection event"
# ...
t = Table(
'sometable',
autoload_with=engine,
listeners=[
('column_reflect', listen_for_reflect)
])
.. seealso::
:meth:`_events.DDLEvents.column_reflect`
:param must_exist: When ``True``, indicates that this Table must already
be present in the given :class:`_schema.MetaData` collection, else
an exception is raised.
:param prefixes:
A list of strings to insert after CREATE in the CREATE TABLE
statement. They will be separated by spaces.
:param quote: Force quoting of this table's name on or off, corresponding
to ``True`` or ``False``. When left at its default of ``None``,
the column identifier will be quoted according to whether the name is
case sensitive (identifiers with at least one upper case character are
treated as case sensitive), or if it's a reserved word. This flag
is only needed to force quoting of a reserved word which is not known
by the SQLAlchemy dialect.
.. note:: setting this flag to ``False`` will not provide
case-insensitive behavior for table reflection; table reflection
will always search for a mixed-case name in a case sensitive
fashion. Case insensitive names are specified in SQLAlchemy only
by stating the name with all lower case characters.
:param quote_schema: same as 'quote' but applies to the schema identifier.
:param schema: The schema name for this table, which is required if
the table resides in a schema other than the default selected schema
for the engine's database connection. Defaults to ``None``.
If the owning :class:`_schema.MetaData` of this :class:`_schema.Table`
specifies its
own :paramref:`_schema.MetaData.schema` parameter,
then that schema name will
be applied to this :class:`_schema.Table`
if the schema parameter here is set
to ``None``. To set a blank schema name on a :class:`_schema.Table`
that
would otherwise use the schema set on the owning
:class:`_schema.MetaData`,
specify the special symbol :attr:`.BLANK_SCHEMA`.
.. versionadded:: 1.0.14 Added the :attr:`.BLANK_SCHEMA` symbol to
allow a :class:`_schema.Table`
to have a blank schema name even when the
parent :class:`_schema.MetaData` specifies
:paramref:`_schema.MetaData.schema`.
The quoting rules for the schema name are the same as those for the
``name`` parameter, in that quoting is applied for reserved words or
case-sensitive names; to enable unconditional quoting for the schema
name, specify the flag ``quote_schema=True`` to the constructor, or use
the :class:`.quoted_name` construct to specify the name.
:param comment: Optional string that will render an SQL comment on table
creation.
.. versionadded:: 1.2 Added the :paramref:`_schema.Table.comment`
parameter
to :class:`_schema.Table`.
:param \**kw: Additional keyword arguments not mentioned above are
dialect specific, and passed in the form ``<dialectname>_<argname>``.
See the documentation regarding an individual dialect at
:ref:`dialect_toplevel` for detail on documented arguments.
"""
__visit_name__ = "table"
constraints = None
"""A collection of all :class:`_schema.Constraint` objects associated with
this :class:`_schema.Table`.
Includes :class:`_schema.PrimaryKeyConstraint`,
:class:`_schema.ForeignKeyConstraint`, :class:`_schema.UniqueConstraint`,
:class:`_schema.CheckConstraint`. A separate collection
:attr:`_schema.Table.foreign_key_constraints` refers to the collection
of all :class:`_schema.ForeignKeyConstraint` objects, and the
:attr:`_schema.Table.primary_key` attribute refers to the single
:class:`_schema.PrimaryKeyConstraint` associated with the
:class:`_schema.Table`.
.. seealso::
:attr:`_schema.Table.constraints`
:attr:`_schema.Table.primary_key`
:attr:`_schema.Table.foreign_key_constraints`
:attr:`_schema.Table.indexes`
:class:`_reflection.Inspector`
"""
indexes = None
"""A collection of all :class:`_schema.Index` objects associated with this
:class:`_schema.Table`.
.. seealso::
:meth:`_reflection.Inspector.get_indexes`
"""
_traverse_internals = TableClause._traverse_internals + [
("schema", InternalTraversal.dp_string)
]
def _gen_cache_key(self, anon_map, bindparams):
if self._annotations:
return (self,) + self._annotations_cache_key
else:
return (self,)
@util.deprecated_params(
mustexist=(
"1.4",
"Deprecated alias of :paramref:`_schema.Table.must_exist`",
),
autoload=(
"2.0",
"The autoload parameter is deprecated and will be removed in "
"version 2.0. Please use the "
"autoload_with parameter, passing an engine or connection.",
),
)
def __new__(cls, *args, **kw):
if not args and not kw:
# python3k pickle seems to call this
return object.__new__(cls)
try:
name, metadata, args = args[0], args[1], args[2:]
except IndexError:
raise TypeError(
"Table() takes at least two positional-only "
"arguments 'name' and 'metadata'"
)
schema = kw.get("schema", None)
if schema is None:
schema = metadata.schema
elif schema is BLANK_SCHEMA:
schema = None
keep_existing = kw.get("keep_existing", False)
extend_existing = kw.get("extend_existing", False)
if keep_existing and extend_existing:
msg = "keep_existing and extend_existing are mutually exclusive."
raise exc.ArgumentError(msg)
must_exist = kw.pop("must_exist", kw.pop("mustexist", False))
key = _get_table_key(name, schema)
if key in metadata.tables:
if not keep_existing and not extend_existing and bool(args):
raise exc.InvalidRequestError(
"Table '%s' is already defined for this MetaData "
"instance. Specify 'extend_existing=True' "
"to redefine "
"options and columns on an "
"existing Table object." % key
)
table = metadata.tables[key]
if extend_existing:
table._init_existing(*args, **kw)
return table
else:
if must_exist:
raise exc.InvalidRequestError("Table '%s' not defined" % (key))
table = object.__new__(cls)
table.dispatch.before_parent_attach(table, metadata)
metadata._add_table(name, schema, table)
try:
table._init(name, metadata, *args, **kw)
table.dispatch.after_parent_attach(table, metadata)
return table
except Exception:
with util.safe_reraise():
metadata._remove_table(name, schema)
def __init__(self, *args, **kw):
"""Constructor for :class:`_schema.Table`.
This method is a no-op. See the top-level
documentation for :class:`_schema.Table`
for constructor arguments.
"""
# __init__ is overridden to prevent __new__ from
# calling the superclass constructor.
def _init(self, name, metadata, *args, **kwargs):
super(Table, self).__init__(
quoted_name(name, kwargs.pop("quote", None))
)
self.metadata = metadata
self.schema = kwargs.pop("schema", None)
if self.schema is None:
self.schema = metadata.schema
elif self.schema is BLANK_SCHEMA:
self.schema = None
else:
quote_schema = kwargs.pop("quote_schema", None)
self.schema = quoted_name(self.schema, quote_schema)
self.indexes = set()
self.constraints = set()
PrimaryKeyConstraint(
_implicit_generated=True
)._set_parent_with_dispatch(self)
self.foreign_keys = set()
self._extra_dependencies = set()
if self.schema is not None:
self.fullname = "%s.%s" % (self.schema, self.name)
else:
self.fullname = self.name
autoload_with = kwargs.pop("autoload_with", None)
autoload = kwargs.pop("autoload", autoload_with is not None)
# this argument is only used with _init_existing()
kwargs.pop("autoload_replace", True)
keep_existing = kwargs.pop("keep_existing", False)
extend_existing = kwargs.pop("extend_existing", False)
_extend_on = kwargs.pop("_extend_on", None)
resolve_fks = kwargs.pop("resolve_fks", True)
include_columns = kwargs.pop("include_columns", None)
self.implicit_returning = kwargs.pop("implicit_returning", True)
self.comment = kwargs.pop("comment", None)
if "info" in kwargs:
self.info = kwargs.pop("info")
if "listeners" in kwargs:
listeners = kwargs.pop("listeners")
for evt, fn in listeners:
event.listen(self, evt, fn)
self._prefixes = kwargs.pop("prefixes", None) or []
self._extra_kwargs(**kwargs)
# load column definitions from the database if 'autoload' is defined
# we do it after the table is in the singleton dictionary to support
# circular foreign keys
if autoload:
self._autoload(
metadata,
autoload_with,
include_columns,
_extend_on=_extend_on,
resolve_fks=resolve_fks,
)
# initialize all the column, etc. objects. done after reflection to
# allow user-overrides
self._init_items(
*args,
allow_replacements=extend_existing or keep_existing or autoload
)
def _autoload(
self,
metadata,
autoload_with,
include_columns,
exclude_columns=(),
resolve_fks=True,
_extend_on=None,
):
if autoload_with is None:
autoload_with = _bind_or_error(
metadata,
msg="No engine is bound to this Table's MetaData. "
"Pass an engine to the Table via "
"autoload_with=<someengine_or_connection>",
)
insp = inspection.inspect(autoload_with)
with insp._inspection_context() as conn_insp:
conn_insp.reflect_table(
self,
include_columns,
exclude_columns,
resolve_fks,
_extend_on=_extend_on,
)
@property
def _sorted_constraints(self):
"""Return the set of constraints as a list, sorted by creation
order.
"""
return sorted(self.constraints, key=lambda c: c._creation_order)
@property
def foreign_key_constraints(self):
""":class:`_schema.ForeignKeyConstraint` objects referred to by this
:class:`_schema.Table`.
This list is produced from the collection of
:class:`_schema.ForeignKey`
objects currently associated.
.. seealso::
:attr:`_schema.Table.constraints`
:attr:`_schema.Table.foreign_keys`
:attr:`_schema.Table.indexes`
"""
return set(fkc.constraint for fkc in self.foreign_keys)
def _init_existing(self, *args, **kwargs):
autoload_with = kwargs.pop("autoload_with", None)
autoload = kwargs.pop("autoload", autoload_with is not None)
autoload_replace = kwargs.pop("autoload_replace", True)
schema = kwargs.pop("schema", None)
_extend_on = kwargs.pop("_extend_on", None)
# these arguments are only used with _init()
kwargs.pop("extend_existing", False)
kwargs.pop("keep_existing", False)
if schema and schema != self.schema:
raise exc.ArgumentError(
"Can't change schema of existing table from '%s' to '%s'",
(self.schema, schema),
)
include_columns = kwargs.pop("include_columns", None)
if include_columns is not None:
for c in self.c:
if c.name not in include_columns:
self._columns.remove(c)
resolve_fks = kwargs.pop("resolve_fks", True)
for key in ("quote", "quote_schema"):
if key in kwargs:
raise exc.ArgumentError(
"Can't redefine 'quote' or 'quote_schema' arguments"
)
# update `self` with these kwargs, if provided
self.comment = kwargs.pop("comment", self.comment)
self.implicit_returning = kwargs.pop(
"implicit_returning", self.implicit_returning
)
self.info = kwargs.pop("info", self.info)
if autoload:
if not autoload_replace:
# don't replace columns already present.
# we'd like to do this for constraints also however we don't
# have simple de-duping for unnamed constraints.
exclude_columns = [c.name for c in self.c]
else:
exclude_columns = ()
self._autoload(
self.metadata,
autoload_with,
include_columns,
exclude_columns,
resolve_fks,
_extend_on=_extend_on,
)
self._extra_kwargs(**kwargs)
self._init_items(*args)
def _extra_kwargs(self, **kwargs):
self._validate_dialect_kwargs(kwargs)
def _init_collections(self):
pass
def _reset_exported(self):
pass
@property
def _autoincrement_column(self):
return self.primary_key._autoincrement_column
@property
def key(self):
"""Return the 'key' for this :class:`_schema.Table`.
This value is used as the dictionary key within the
:attr:`_schema.MetaData.tables` collection. It is typically the same
as that of :attr:`_schema.Table.name` for a table with no
:attr:`_schema.Table.schema`
set; otherwise it is typically of the form
``schemaname.tablename``.
"""
return _get_table_key(self.name, self.schema)
def __repr__(self):
return "Table(%s)" % ", ".join(
[repr(self.name)]
+ [repr(self.metadata)]
+ [repr(x) for x in self.columns]
+ ["%s=%s" % (k, repr(getattr(self, k))) for k in ["schema"]]
)
def __str__(self):
return _get_table_key(self.description, self.schema)
@property
def bind(self):
"""Return the connectable associated with this Table."""
return self.metadata and self.metadata.bind or None
def add_is_dependent_on(self, table):
"""Add a 'dependency' for this Table.
This is another Table object which must be created
first before this one can, or dropped after this one.
Usually, dependencies between tables are determined via
ForeignKey objects. However, for other situations that
create dependencies outside of foreign keys (rules, inheriting),
this method can manually establish such a link.
"""
self._extra_dependencies.add(table)
def append_column(self, column, replace_existing=False):
"""Append a :class:`_schema.Column` to this :class:`_schema.Table`.
The "key" of the newly added :class:`_schema.Column`, i.e. the
value of its ``.key`` attribute, will then be available
in the ``.c`` collection of this :class:`_schema.Table`, and the
column definition will be included in any CREATE TABLE, SELECT,
UPDATE, etc. statements generated from this :class:`_schema.Table`
construct.
Note that this does **not** change the definition of the table
as it exists within any underlying database, assuming that
table has already been created in the database. Relational
databases support the addition of columns to existing tables
using the SQL ALTER command, which would need to be
emitted for an already-existing table that doesn't contain
the newly added column.
:param replace_existing: When ``True``, allows replacing existing
columns. When ``False``, the default, an warning will be raised
if a column with the same ``.key`` already exists. A future
version of sqlalchemy will instead rise a warning.
.. versionadded:: 1.4.0
"""
column._set_parent_with_dispatch(
self, allow_replacements=replace_existing
)
def append_constraint(self, constraint):
"""Append a :class:`_schema.Constraint` to this
:class:`_schema.Table`.
This has the effect of the constraint being included in any
future CREATE TABLE statement, assuming specific DDL creation
events have not been associated with the given
:class:`_schema.Constraint` object.
Note that this does **not** produce the constraint within the
relational database automatically, for a table that already exists
in the database. To add a constraint to an
existing relational database table, the SQL ALTER command must
be used. SQLAlchemy also provides the
:class:`.AddConstraint` construct which can produce this SQL when
invoked as an executable clause.
"""
constraint._set_parent_with_dispatch(self)
def _set_parent(self, metadata, **kw):
metadata._add_table(self.name, self.schema, self)
self.metadata = metadata
@util.deprecated(
"1.4",
"The :meth:`_schema.Table.exists` method is deprecated and will be "
"removed in a future release. Please refer to "
":meth:`_reflection.Inspector.has_table`.",
)
def exists(self, bind=None):
"""Return True if this table exists."""
if bind is None:
bind = _bind_or_error(self)
insp = inspection.inspect(bind)
return insp.has_table(self.name, schema=self.schema)
def create(self, bind=None, checkfirst=False):
"""Issue a ``CREATE`` statement for this
:class:`_schema.Table`, using the given :class:`.Connectable`
for connectivity.
.. note:: the "bind" argument will be required in
SQLAlchemy 2.0.
.. seealso::
:meth:`_schema.MetaData.create_all`.
"""
if bind is None:
bind = _bind_or_error(self)
bind._run_ddl_visitor(ddl.SchemaGenerator, self, checkfirst=checkfirst)
def drop(self, bind=None, checkfirst=False):
"""Issue a ``DROP`` statement for this
:class:`_schema.Table`, using the given :class:`.Connectable`
for connectivity.
.. note:: the "bind" argument will be required in
SQLAlchemy 2.0.
.. seealso::
:meth:`_schema.MetaData.drop_all`.
"""
if bind is None:
bind = _bind_or_error(self)
bind._run_ddl_visitor(ddl.SchemaDropper, self, checkfirst=checkfirst)
@util.deprecated(
"1.4",
":meth:`_schema.Table.tometadata` is renamed to "
":meth:`_schema.Table.to_metadata`",
)
def tometadata(
self,
metadata,
schema=RETAIN_SCHEMA,
referred_schema_fn=None,
name=None,
):
"""Return a copy of this :class:`_schema.Table`
associated with a different
:class:`_schema.MetaData`.
See :meth:`_schema.Table.to_metadata` for a full description.
"""
return self.to_metadata(
metadata,
schema=schema,
referred_schema_fn=referred_schema_fn,
name=name,
)
def to_metadata(
self,
metadata,
schema=RETAIN_SCHEMA,
referred_schema_fn=None,
name=None,
):
"""Return a copy of this :class:`_schema.Table` associated with a
different :class:`_schema.MetaData`.
E.g.::
m1 = MetaData()
user = Table('user', m1, Column('id', Integer, primary_key=True))
m2 = MetaData()
user_copy = user.to_metadata(m2)
.. versionchanged:: 1.4 The :meth:`_schema.Table.to_metadata` function
was renamed from :meth:`_schema.Table.tometadata`.
:param metadata: Target :class:`_schema.MetaData` object,
into which the
new :class:`_schema.Table` object will be created.
:param schema: optional string name indicating the target schema.
Defaults to the special symbol :attr:`.RETAIN_SCHEMA` which indicates
that no change to the schema name should be made in the new
:class:`_schema.Table`. If set to a string name, the new
:class:`_schema.Table`
will have this new name as the ``.schema``. If set to ``None``, the
schema will be set to that of the schema set on the target
:class:`_schema.MetaData`, which is typically ``None`` as well,
unless
set explicitly::
m2 = MetaData(schema='newschema')
# user_copy_one will have "newschema" as the schema name
user_copy_one = user.to_metadata(m2, schema=None)
m3 = MetaData() # schema defaults to None
# user_copy_two will have None as the schema name
user_copy_two = user.to_metadata(m3, schema=None)
:param referred_schema_fn: optional callable which can be supplied
in order to provide for the schema name that should be assigned
to the referenced table of a :class:`_schema.ForeignKeyConstraint`.
The callable accepts this parent :class:`_schema.Table`, the
target schema that we are changing to, the
:class:`_schema.ForeignKeyConstraint` object, and the existing
"target schema" of that constraint. The function should return the
string schema name that should be applied. To reset the schema
to "none", return the symbol :data:`.BLANK_SCHEMA`. To effect no
change, return ``None`` or :data:`.RETAIN_SCHEMA`.
.. versionchanged:: 1.4.33 The ``referred_schema_fn`` function
may return the :data:`.BLANK_SCHEMA` or :data:`.RETAIN_SCHEMA`
symbols.
E.g.::
def referred_schema_fn(table, to_schema,
constraint, referred_schema):
if referred_schema == 'base_tables':
return referred_schema
else:
return to_schema
new_table = table.to_metadata(m2, schema="alt_schema",
referred_schema_fn=referred_schema_fn)
.. versionadded:: 0.9.2
:param name: optional string name indicating the target table name.
If not specified or None, the table name is retained. This allows
a :class:`_schema.Table` to be copied to the same
:class:`_schema.MetaData` target
with a new name.
.. versionadded:: 1.0.0
"""
if name is None:
name = self.name
if schema is RETAIN_SCHEMA:
schema = self.schema
elif schema is None:
schema = metadata.schema
key = _get_table_key(name, schema)
if key in metadata.tables:
util.warn(
"Table '%s' already exists within the given "
"MetaData - not copying." % self.description
)
return metadata.tables[key]
args = []
for c in self.columns:
args.append(c._copy(schema=schema))
table = Table(
name,
metadata,
schema=schema,
comment=self.comment,
*args,
**self.kwargs
)
for c in self.constraints:
if isinstance(c, ForeignKeyConstraint):
referred_schema = c._referred_schema
if referred_schema_fn:
fk_constraint_schema = referred_schema_fn(
self, schema, c, referred_schema
)
else:
fk_constraint_schema = (
schema if referred_schema == self.schema else None
)
table.append_constraint(
c._copy(schema=fk_constraint_schema, target_table=table)
)
elif not c._type_bound:
# skip unique constraints that would be generated
# by the 'unique' flag on Column
if c._column_flag:
continue
table.append_constraint(
c._copy(schema=schema, target_table=table)
)
for index in self.indexes:
# skip indexes that would be generated
# by the 'index' flag on Column
if index._column_flag:
continue
Index(
index.name,
unique=index.unique,
*[
_copy_expression(expr, self, table)
for expr in index.expressions
],
_table=table,
**index.kwargs
)
return self._schema_item_copy(table)
class Column(DialectKWArgs, SchemaItem, ColumnClause):
"""Represents a column in a database table."""
__visit_name__ = "column"
inherit_cache = True
def __init__(self, *args, **kwargs):
r"""
Construct a new ``Column`` object.
:param name: The name of this column as represented in the database.
This argument may be the first positional argument, or specified
via keyword.
Names which contain no upper case characters
will be treated as case insensitive names, and will not be quoted
unless they are a reserved word. Names with any number of upper
case characters will be quoted and sent exactly. Note that this
behavior applies even for databases which standardize upper
case names as case insensitive such as Oracle.
The name field may be omitted at construction time and applied
later, at any time before the Column is associated with a
:class:`_schema.Table`. This is to support convenient
usage within the :mod:`~sqlalchemy.ext.declarative` extension.
:param type\_: The column's type, indicated using an instance which
subclasses :class:`~sqlalchemy.types.TypeEngine`. If no arguments
are required for the type, the class of the type can be sent
as well, e.g.::
# use a type with arguments
Column('data', String(50))
# use no arguments
Column('level', Integer)
The ``type`` argument may be the second positional argument
or specified by keyword.
If the ``type`` is ``None`` or is omitted, it will first default to
the special type :class:`.NullType`. If and when this
:class:`_schema.Column` is made to refer to another column using
:class:`_schema.ForeignKey` and/or
:class:`_schema.ForeignKeyConstraint`, the type
of the remote-referenced column will be copied to this column as
well, at the moment that the foreign key is resolved against that
remote :class:`_schema.Column` object.
.. versionchanged:: 0.9.0
Support for propagation of type to a :class:`_schema.Column`
from its
:class:`_schema.ForeignKey` object has been improved and should be
more reliable and timely.
:param \*args: Additional positional arguments include various
:class:`.SchemaItem` derived constructs which will be applied
as options to the column. These include instances of
:class:`.Constraint`, :class:`_schema.ForeignKey`,
:class:`.ColumnDefault`, :class:`.Sequence`, :class:`.Computed`
:class:`.Identity`. In some cases an
equivalent keyword argument is available such as ``server_default``,
``default`` and ``unique``.
:param autoincrement: Set up "auto increment" semantics for an
**integer primary key column with no foreign key dependencies**
(see later in this docstring for a more specific definition).
This may influence the :term:`DDL` that will be emitted for
this column during a table create, as well as how the column
will be considered when INSERT statements are compiled and
executed.
The default value is the string ``"auto"``,
which indicates that a single-column (i.e. non-composite) primary key
that is of an INTEGER type with no other client-side or server-side
default constructs indicated should receive auto increment semantics
automatically. Other values include ``True`` (force this column to
have auto-increment semantics for a :term:`composite primary key` as
well), ``False`` (this column should never have auto-increment
semantics), and the string ``"ignore_fk"`` (special-case for foreign
key columns, see below).
The term "auto increment semantics" refers both to the kind of DDL
that will be emitted for the column within a CREATE TABLE statement,
when methods such as :meth:`.MetaData.create_all` and
:meth:`.Table.create` are invoked, as well as how the column will be
considered when an INSERT statement is compiled and emitted to the
database:
* **DDL rendering** (i.e. :meth:`.MetaData.create_all`,
:meth:`.Table.create`): When used on a :class:`.Column` that has
no other
default-generating construct associated with it (such as a
:class:`.Sequence` or :class:`.Identity` construct), the parameter
will imply that database-specific keywords such as PostgreSQL
``SERIAL``, MySQL ``AUTO_INCREMENT``, or ``IDENTITY`` on SQL Server
should also be rendered. Not every database backend has an
"implied" default generator available; for example the Oracle
backend always needs an explicit construct such as
:class:`.Identity` to be included with a :class:`.Column` in order
for the DDL rendered to include auto-generating constructs to also
be produced in the database.
* **INSERT semantics** (i.e. when a :func:`_sql.insert` construct is
compiled into a SQL string and is then executed on a database using
:meth:`_engine.Connection.execute` or equivalent): A single-row
INSERT statement will be known to produce a new integer primary key
value automatically for this column, which will be accessible
after the statement is invoked via the
:attr:`.CursorResult.inserted_primary_key` attribute upon the
:class:`_result.Result` object. This also applies towards use of the
ORM when ORM-mapped objects are persisted to the database,
indicating that a new integer primary key will be available to
become part of the :term:`identity key` for that object. This
behavior takes place regardless of what DDL constructs are
associated with the :class:`_schema.Column` and is independent
of the "DDL Rendering" behavior discussed in the previous note
above.
The parameter may be set to ``True`` to indicate that a column which
is part of a composite (i.e. multi-column) primary key should
have autoincrement semantics, though note that only one column
within a primary key may have this setting. It can also
be set to ``True`` to indicate autoincrement semantics on a
column that has a client-side or server-side default configured,
however note that not all dialects can accommodate all styles
of default as an "autoincrement". It can also be
set to ``False`` on a single-column primary key that has a
datatype of INTEGER in order to disable auto increment semantics
for that column.
.. versionchanged:: 1.1 The autoincrement flag now defaults to
``"auto"`` which indicates autoincrement semantics by default
for single-column integer primary keys only; for composite
(multi-column) primary keys, autoincrement is never implicitly
enabled; as always, ``autoincrement=True`` will allow for
at most one of those columns to be an "autoincrement" column.
``autoincrement=True`` may also be set on a
:class:`_schema.Column`
that has an explicit client-side or server-side default,
subject to limitations of the backend database and dialect.
The setting *only* has an effect for columns which are:
* Integer derived (i.e. INT, SMALLINT, BIGINT).
* Part of the primary key
* Not referring to another column via :class:`_schema.ForeignKey`,
unless
the value is specified as ``'ignore_fk'``::
# turn on autoincrement for this column despite
# the ForeignKey()
Column('id', ForeignKey('other.id'),
primary_key=True, autoincrement='ignore_fk')
It is typically not desirable to have "autoincrement" enabled on a
column that refers to another via foreign key, as such a column is
required to refer to a value that originates from elsewhere.
The setting has these effects on columns that meet the
above criteria:
* DDL issued for the column, if the column does not already include
a default generating construct supported by the backend such as
:class:`.Identity`, will include database-specific
keywords intended to signify this column as an
"autoincrement" column for specific backends. Behavior for
primary SQLAlchemy dialects includes:
* AUTO INCREMENT on MySQL and MariaDB
* SERIAL on PostgreSQL
* IDENTITY on MS-SQL - this occurs even without the
:class:`.Identity` construct as the
:paramref:`.Column.autoincrement` parameter pre-dates this
construct.
* SQLite - SQLite integer primary key columns are implicitly
"auto incrementing" and no additional keywords are rendered;
to render the special SQLite keyword ``AUTOINCREMENT``
is not included as this is unnecessary and not recommended
by the database vendor. See the section
:ref:`sqlite_autoincrement` for more background.
* Oracle - The Oracle dialect has no default "autoincrement"
feature available at this time, instead the :class:`.Identity`
construct is recommended to achieve this (the :class:`.Sequence`
construct may also be used).
* Third-party dialects - consult those dialects' documentation
for details on their specific behaviors.
* When a single-row :func:`_sql.insert` construct is compiled and
executed, which does not set the :meth:`_sql.Insert.inline`
modifier, newly generated primary key values for this column
will be automatically retrieved upon statement execution
using a method specific to the database driver in use:
* MySQL, SQLite - calling upon ``cursor.lastrowid()``
(see
`https://www.python.org/dev/peps/pep-0249/#lastrowid
<https://www.python.org/dev/peps/pep-0249/#lastrowid>`_)
* PostgreSQL, SQL Server, Oracle - use RETURNING or an equivalent
construct when rendering an INSERT statement, and then retrieving
the newly generated primary key values after execution
* PostgreSQL, Oracle for :class:`_schema.Table` objects that
set :paramref:`_schema.Table.implicit_returning` to False -
for a :class:`.Sequence` only, the :class:`.Sequence` is invoked
explicitly before the INSERT statement takes place so that the
newly generated primary key value is available to the client
* SQL Server for :class:`_schema.Table` objects that
set :paramref:`_schema.Table.implicit_returning` to False -
the ``SELECT scope_identity()`` construct is used after the
INSERT statement is invoked to retrieve the newly generated
primary key value.
* Third-party dialects - consult those dialects' documentation
for details on their specific behaviors.
* For multiple-row :func:`_sql.insert` constructs invoked with
a list of parameters (i.e. "executemany" semantics), primary-key
retrieving behaviors are generally disabled, however there may
be special APIs that may be used to retrieve lists of new
primary key values for an "executemany", such as the psycopg2
"fast insertmany" feature. Such features are very new and
may not yet be well covered in documentation.
:param default: A scalar, Python callable, or
:class:`_expression.ColumnElement` expression representing the
*default value* for this column, which will be invoked upon insert
if this column is otherwise not specified in the VALUES clause of
the insert. This is a shortcut to using :class:`.ColumnDefault` as
a positional argument; see that class for full detail on the
structure of the argument.
Contrast this argument to
:paramref:`_schema.Column.server_default`
which creates a default generator on the database side.
.. seealso::
:ref:`metadata_defaults_toplevel`
:param doc: optional String that can be used by the ORM or similar
to document attributes on the Python side. This attribute does
**not** render SQL comments; use the
:paramref:`_schema.Column.comment`
parameter for this purpose.
:param key: An optional string identifier which will identify this
``Column`` object on the :class:`_schema.Table`.
When a key is provided,
this is the only identifier referencing the ``Column`` within the
application, including ORM attribute mapping; the ``name`` field
is used only when rendering SQL.
:param index: When ``True``, indicates that a :class:`_schema.Index`
construct will be automatically generated for this
:class:`_schema.Column`, which will result in a "CREATE INDEX"
statement being emitted for the :class:`_schema.Table` when the DDL
create operation is invoked.
Using this flag is equivalent to making use of the
:class:`_schema.Index` construct explicitly at the level of the
:class:`_schema.Table` construct itself::
Table(
"some_table",
metadata,
Column("x", Integer),
Index("ix_some_table_x", "x")
)
To add the :paramref:`_schema.Index.unique` flag to the
:class:`_schema.Index`, set both the
:paramref:`_schema.Column.unique` and
:paramref:`_schema.Column.index` flags to True simultaneously,
which will have the effect of rendering the "CREATE UNIQUE INDEX"
DDL instruction instead of "CREATE INDEX".
The name of the index is generated using the
:ref:`default naming convention <constraint_default_naming_convention>`
which for the :class:`_schema.Index` construct is of the form
``ix_<tablename>_<columnname>``.
As this flag is intended only as a convenience for the common case
of adding a single-column, default configured index to a table
definition, explicit use of the :class:`_schema.Index` construct
should be preferred for most use cases, including composite indexes
that encompass more than one column, indexes with SQL expressions
or ordering, backend-specific index configuration options, and
indexes that use a specific name.
.. note:: the :attr:`_schema.Column.index` attribute on
:class:`_schema.Column`
**does not indicate** if this column is indexed or not, only
if this flag was explicitly set here. To view indexes on
a column, view the :attr:`_schema.Table.indexes` collection
or use :meth:`_reflection.Inspector.get_indexes`.
.. seealso::
:ref:`schema_indexes`
:ref:`constraint_naming_conventions`
:paramref:`_schema.Column.unique`
:param info: Optional data dictionary which will be populated into the
:attr:`.SchemaItem.info` attribute of this object.
:param nullable: When set to ``False``, will cause the "NOT NULL"
phrase to be added when generating DDL for the column. When
``True``, will normally generate nothing (in SQL this defaults to
"NULL"), except in some very specific backend-specific edge cases
where "NULL" may render explicitly.
Defaults to ``True`` unless :paramref:`_schema.Column.primary_key`
is also ``True`` or the column specifies a :class:`_sql.Identity`,
in which case it defaults to ``False``.
This parameter is only used when issuing CREATE TABLE statements.
.. note::
When the column specifies a :class:`_sql.Identity` this
parameter is in general ignored by the DDL compiler. The
PostgreSQL database allows nullable identity column by
setting this parameter to ``True`` explicitly.
:param onupdate: A scalar, Python callable, or
:class:`~sqlalchemy.sql.expression.ClauseElement` representing a
default value to be applied to the column within UPDATE
statements, which will be invoked upon update if this column is not
present in the SET clause of the update. This is a shortcut to
using :class:`.ColumnDefault` as a positional argument with
``for_update=True``.
.. seealso::
:ref:`metadata_defaults` - complete discussion of onupdate
:param primary_key: If ``True``, marks this column as a primary key
column. Multiple columns can have this flag set to specify
composite primary keys. As an alternative, the primary key of a
:class:`_schema.Table` can be specified via an explicit
:class:`.PrimaryKeyConstraint` object.
:param server_default: A :class:`.FetchedValue` instance, str, Unicode
or :func:`~sqlalchemy.sql.expression.text` construct representing
the DDL DEFAULT value for the column.
String types will be emitted as-is, surrounded by single quotes::
Column('x', Text, server_default="val")
x TEXT DEFAULT 'val'
A :func:`~sqlalchemy.sql.expression.text` expression will be
rendered as-is, without quotes::
Column('y', DateTime, server_default=text('NOW()'))
y DATETIME DEFAULT NOW()
Strings and text() will be converted into a
:class:`.DefaultClause` object upon initialization.
This parameter can also accept complex combinations of contextually
valid SQLAlchemy expressions or constructs::
from sqlalchemy import create_engine
from sqlalchemy import Table, Column, MetaData, ARRAY, Text
from sqlalchemy.dialects.postgresql import array
engine = create_engine(
'postgresql://scott:tiger@localhost/mydatabase'
)
metadata_obj = MetaData()
tbl = Table(
"foo",
metadata_obj,
Column("bar",
ARRAY(Text),
server_default=array(["biz", "bang", "bash"])
)
)
metadata_obj.create_all(engine)
The above results in a table created with the following SQL::
CREATE TABLE foo (
bar TEXT[] DEFAULT ARRAY['biz', 'bang', 'bash']
)
Use :class:`.FetchedValue` to indicate that an already-existing
column will generate a default value on the database side which
will be available to SQLAlchemy for post-fetch after inserts. This
construct does not specify any DDL and the implementation is left
to the database, such as via a trigger.
.. seealso::
:ref:`server_defaults` - complete discussion of server side
defaults
:param server_onupdate: A :class:`.FetchedValue` instance
representing a database-side default generation function,
such as a trigger. This
indicates to SQLAlchemy that a newly generated value will be
available after updates. This construct does not actually
implement any kind of generation function within the database,
which instead must be specified separately.
.. warning:: This directive **does not** currently produce MySQL's
"ON UPDATE CURRENT_TIMESTAMP()" clause. See
:ref:`mysql_timestamp_onupdate` for background on how to
produce this clause.
.. seealso::
:ref:`triggered_columns`
:param quote: Force quoting of this column's name on or off,
corresponding to ``True`` or ``False``. When left at its default
of ``None``, the column identifier will be quoted according to
whether the name is case sensitive (identifiers with at least one
upper case character are treated as case sensitive), or if it's a
reserved word. This flag is only needed to force quoting of a
reserved word which is not known by the SQLAlchemy dialect.
:param unique: When ``True``, and the :paramref:`_schema.Column.index`
parameter is left at its default value of ``False``,
indicates that a :class:`_schema.UniqueConstraint`
construct will be automatically generated for this
:class:`_schema.Column`,
which will result in a "UNIQUE CONSTRAINT" clause referring
to this column being included
in the ``CREATE TABLE`` statement emitted, when the DDL create
operation for the :class:`_schema.Table` object is invoked.
When this flag is ``True`` while the
:paramref:`_schema.Column.index` parameter is simultaneously
set to ``True``, the effect instead is that a
:class:`_schema.Index` construct which includes the
:paramref:`_schema.Index.unique` parameter set to ``True``
is generated. See the documentation for
:paramref:`_schema.Column.index` for additional detail.
Using this flag is equivalent to making use of the
:class:`_schema.UniqueConstraint` construct explicitly at the
level of the :class:`_schema.Table` construct itself::
Table(
"some_table",
metadata,
Column("x", Integer),
UniqueConstraint("x")
)
The :paramref:`_schema.UniqueConstraint.name` parameter
of the unique constraint object is left at its default value
of ``None``; in the absence of a :ref:`naming convention <constraint_naming_conventions>`
for the enclosing :class:`_schema.MetaData`, the UNIQUE CONSTRAINT
construct will be emitted as unnamed, which typically invokes
a database-specific naming convention to take place.
As this flag is intended only as a convenience for the common case
of adding a single-column, default configured unique constraint to a table
definition, explicit use of the :class:`_schema.UniqueConstraint` construct
should be preferred for most use cases, including composite constraints
that encompass more than one column, backend-specific index configuration options, and
constraints that use a specific name.
.. note:: the :attr:`_schema.Column.unique` attribute on
:class:`_schema.Column`
**does not indicate** if this column has a unique constraint or
not, only if this flag was explicitly set here. To view
indexes and unique constraints that may involve this column,
view the
:attr:`_schema.Table.indexes` and/or
:attr:`_schema.Table.constraints` collections or use
:meth:`_reflection.Inspector.get_indexes` and/or
:meth:`_reflection.Inspector.get_unique_constraints`
.. seealso::
:ref:`schema_unique_constraint`
:ref:`constraint_naming_conventions`
:paramref:`_schema.Column.index`
:param system: When ``True``, indicates this is a "system" column,
that is a column which is automatically made available by the
database, and should not be included in the columns list for a
``CREATE TABLE`` statement.
For more elaborate scenarios where columns should be
conditionally rendered differently on different backends,
consider custom compilation rules for :class:`.CreateColumn`.
:param comment: Optional string that will render an SQL comment on
table creation.
.. versionadded:: 1.2 Added the
:paramref:`_schema.Column.comment`
parameter to :class:`_schema.Column`.
""" # noqa: E501, RST201, RST202
name = kwargs.pop("name", None)
type_ = kwargs.pop("type_", None)
args = list(args)
if args:
if isinstance(args[0], util.string_types):
if name is not None:
raise exc.ArgumentError(
"May not pass name positionally and as a keyword."
)
name = args.pop(0)
if args:
coltype = args[0]
if hasattr(coltype, "_sqla_type"):
if type_ is not None:
raise exc.ArgumentError(
"May not pass type_ positionally and as a keyword."
)
type_ = args.pop(0)
if name is not None:
name = quoted_name(name, kwargs.pop("quote", None))
elif "quote" in kwargs:
raise exc.ArgumentError(
"Explicit 'name' is required when " "sending 'quote' argument"
)
super(Column, self).__init__(name, type_)
self.key = kwargs.pop("key", name)
self.primary_key = primary_key = kwargs.pop("primary_key", False)
self._user_defined_nullable = udn = kwargs.pop(
"nullable", NULL_UNSPECIFIED
)
if udn is not NULL_UNSPECIFIED:
self.nullable = udn
else:
self.nullable = not primary_key
self.default = kwargs.pop("default", None)
self.server_default = kwargs.pop("server_default", None)
self.server_onupdate = kwargs.pop("server_onupdate", None)
# these default to None because .index and .unique is *not*
# an informational flag about Column - there can still be an
# Index or UniqueConstraint referring to this Column.
self.index = kwargs.pop("index", None)
self.unique = kwargs.pop("unique", None)
self.system = kwargs.pop("system", False)
self.doc = kwargs.pop("doc", None)
self.onupdate = kwargs.pop("onupdate", None)
self.autoincrement = kwargs.pop("autoincrement", "auto")
self.constraints = set()
self.foreign_keys = set()
self.comment = kwargs.pop("comment", None)
self.computed = None
self.identity = None
# check if this Column is proxying another column
if "_proxies" in kwargs:
self._proxies = kwargs.pop("_proxies")
# otherwise, add DDL-related events
elif isinstance(self.type, SchemaEventTarget):
self.type._set_parent_with_dispatch(self)
if self.default is not None:
if isinstance(self.default, (ColumnDefault, Sequence)):
args.append(self.default)
else:
if getattr(self.type, "_warn_on_bytestring", False):
if isinstance(self.default, util.binary_type):
util.warn(
"Unicode column '%s' has non-unicode "
"default value %r specified."
% (self.key, self.default)
)
args.append(ColumnDefault(self.default))
if self.server_default is not None:
if isinstance(self.server_default, FetchedValue):
args.append(self.server_default._as_for_update(False))
else:
args.append(DefaultClause(self.server_default))
if self.onupdate is not None:
if isinstance(self.onupdate, (ColumnDefault, Sequence)):
args.append(self.onupdate)
else:
args.append(ColumnDefault(self.onupdate, for_update=True))
if self.server_onupdate is not None:
if isinstance(self.server_onupdate, FetchedValue):
args.append(self.server_onupdate._as_for_update(True))
else:
args.append(
DefaultClause(self.server_onupdate, for_update=True)
)
self._init_items(*args)
util.set_creation_order(self)
if "info" in kwargs:
self.info = kwargs.pop("info")
self._extra_kwargs(**kwargs)
foreign_keys = None
"""A collection of all :class:`_schema.ForeignKey` marker objects
associated with this :class:`_schema.Column`.
Each object is a member of a :class:`_schema.Table`-wide
:class:`_schema.ForeignKeyConstraint`.
.. seealso::
:attr:`_schema.Table.foreign_keys`
"""
index = None
"""The value of the :paramref:`_schema.Column.index` parameter.
Does not indicate if this :class:`_schema.Column` is actually indexed
or not; use :attr:`_schema.Table.indexes`.
.. seealso::
:attr:`_schema.Table.indexes`
"""
unique = None
"""The value of the :paramref:`_schema.Column.unique` parameter.
Does not indicate if this :class:`_schema.Column` is actually subject to
a unique constraint or not; use :attr:`_schema.Table.indexes` and
:attr:`_schema.Table.constraints`.
.. seealso::
:attr:`_schema.Table.indexes`
:attr:`_schema.Table.constraints`.
"""
def _extra_kwargs(self, **kwargs):
self._validate_dialect_kwargs(kwargs)
def __str__(self):
if self.name is None:
return "(no name)"
elif self.table is not None:
if self.table.named_with_column:
return self.table.description + "." + self.description
else:
return self.description
else:
return self.description
def references(self, column):
"""Return True if this Column references the given column via foreign
key."""
for fk in self.foreign_keys:
if fk.column.proxy_set.intersection(column.proxy_set):
return True
else:
return False
def append_foreign_key(self, fk):
fk._set_parent_with_dispatch(self)
def __repr__(self):
kwarg = []
if self.key != self.name:
kwarg.append("key")
if self.primary_key:
kwarg.append("primary_key")
if not self.nullable:
kwarg.append("nullable")
if self.onupdate:
kwarg.append("onupdate")
if self.default:
kwarg.append("default")
if self.server_default:
kwarg.append("server_default")
if self.comment:
kwarg.append("comment")
return "Column(%s)" % ", ".join(
[repr(self.name)]
+ [repr(self.type)]
+ [repr(x) for x in self.foreign_keys if x is not None]
+ [repr(x) for x in self.constraints]
+ [
(
self.table is not None
and "table=<%s>" % self.table.description
or "table=None"
)
]
+ ["%s=%s" % (k, repr(getattr(self, k))) for k in kwarg]
)
def _set_parent(self, table, allow_replacements=True):
if not self.name:
raise exc.ArgumentError(
"Column must be constructed with a non-blank name or "
"assign a non-blank .name before adding to a Table."
)
self._reset_memoizations()
if self.key is None:
self.key = self.name
existing = getattr(self, "table", None)
if existing is not None and existing is not table:
raise exc.ArgumentError(
"Column object '%s' already assigned to Table '%s'"
% (self.key, existing.description)
)
if self.key in table._columns:
col = table._columns.get(self.key)
if col is not self:
if not allow_replacements:
util.warn_deprecated(
"A column with name '%s' is already present "
"in table '%s'. Please use method "
":meth:`_schema.Table.append_column` with the "
"parameter ``replace_existing=True`` to replace an "
"existing column." % (self.key, table.name),
"1.4",
)
for fk in col.foreign_keys:
table.foreign_keys.remove(fk)
if fk.constraint in table.constraints:
# this might have been removed
# already, if it's a composite constraint
# and more than one col being replaced
table.constraints.remove(fk.constraint)
table._columns.replace(self)
self.table = table
if self.primary_key:
table.primary_key._replace(self)
elif self.key in table.primary_key:
raise exc.ArgumentError(
"Trying to redefine primary-key column '%s' as a "
"non-primary-key column on table '%s'"
% (self.key, table.fullname)
)
if self.index:
if isinstance(self.index, util.string_types):
raise exc.ArgumentError(
"The 'index' keyword argument on Column is boolean only. "
"To create indexes with a specific name, create an "
"explicit Index object external to the Table."
)
table.append_constraint(
Index(
None, self.key, unique=bool(self.unique), _column_flag=True
)
)
elif self.unique:
if isinstance(self.unique, util.string_types):
raise exc.ArgumentError(
"The 'unique' keyword argument on Column is boolean "
"only. To create unique constraints or indexes with a "
"specific name, append an explicit UniqueConstraint to "
"the Table's list of elements, or create an explicit "
"Index object external to the Table."
)
table.append_constraint(
UniqueConstraint(self.key, _column_flag=True)
)
self._setup_on_memoized_fks(lambda fk: fk._set_remote_table(table))
if self.identity and (
isinstance(self.default, Sequence)
or isinstance(self.onupdate, Sequence)
):
raise exc.ArgumentError(
"An column cannot specify both Identity and Sequence."
)
def _setup_on_memoized_fks(self, fn):
fk_keys = [
((self.table.key, self.key), False),
((self.table.key, self.name), True),
]
for fk_key, link_to_name in fk_keys:
if fk_key in self.table.metadata._fk_memos:
for fk in self.table.metadata._fk_memos[fk_key]:
if fk.link_to_name is link_to_name:
fn(fk)
def _on_table_attach(self, fn):
if self.table is not None:
fn(self, self.table)
else:
event.listen(self, "after_parent_attach", fn)
@util.deprecated(
"1.4",
"The :meth:`_schema.Column.copy` method is deprecated "
"and will be removed in a future release.",
)
def copy(self, **kw):
return self._copy(**kw)
def _copy(self, **kw):
"""Create a copy of this ``Column``, uninitialized.
This is used in :meth:`_schema.Table.to_metadata`.
"""
# Constraint objects plus non-constraint-bound ForeignKey objects
args = [
c._copy(**kw) for c in self.constraints if not c._type_bound
] + [c._copy(**kw) for c in self.foreign_keys if not c.constraint]
# ticket #5276
column_kwargs = {}
for dialect_name in self.dialect_options:
dialect_options = self.dialect_options[dialect_name]._non_defaults
for (
dialect_option_key,
dialect_option_value,
) in dialect_options.items():
column_kwargs[
dialect_name + "_" + dialect_option_key
] = dialect_option_value
server_default = self.server_default
server_onupdate = self.server_onupdate
if isinstance(server_default, (Computed, Identity)):
server_default = server_onupdate = None
args.append(self.server_default._copy(**kw))
type_ = self.type
if isinstance(type_, SchemaEventTarget):
type_ = type_.copy(**kw)
if self._user_defined_nullable is not NULL_UNSPECIFIED:
column_kwargs["nullable"] = self._user_defined_nullable
c = self._constructor(
name=self.name,
type_=type_,
key=self.key,
primary_key=self.primary_key,
unique=self.unique,
system=self.system,
# quote=self.quote, # disabled 2013-08-27 (commit 031ef080)
index=self.index,
autoincrement=self.autoincrement,
default=self.default,
server_default=server_default,
onupdate=self.onupdate,
server_onupdate=server_onupdate,
doc=self.doc,
comment=self.comment,
*args,
**column_kwargs
)
return self._schema_item_copy(c)
def _make_proxy(
self, selectable, name=None, key=None, name_is_truncatable=False, **kw
):
"""Create a *proxy* for this column.
This is a copy of this ``Column`` referenced by a different parent
(such as an alias or select statement). The column should
be used only in select scenarios, as its full DDL/default
information is not transferred.
"""
fk = [
ForeignKey(
col if col is not None else f._colspec,
_unresolvable=col is None,
_constraint=f.constraint,
)
for f, col in [
(fk, fk._resolve_column(raiseerr=False))
for fk in self.foreign_keys
]
]
if name is None and self.name is None:
raise exc.InvalidRequestError(
"Cannot initialize a sub-selectable"
" with this Column object until its 'name' has "
"been assigned."
)
try:
c = self._constructor(
coercions.expect(
roles.TruncatedLabelRole, name if name else self.name
)
if name_is_truncatable
else (name or self.name),
self.type,
# this may actually be ._proxy_key when the key is incoming
key=key if key else name if name else self.key,
primary_key=self.primary_key,
nullable=self.nullable,
_proxies=[self],
*fk
)
except TypeError as err:
util.raise_(
TypeError(
"Could not create a copy of this %r object. "
"Ensure the class includes a _constructor() "
"attribute or method which accepts the "
"standard Column constructor arguments, or "
"references the Column class itself." % self.__class__
),
from_=err,
)
c.table = selectable
c._propagate_attrs = selectable._propagate_attrs
if selectable._is_clone_of is not None:
c._is_clone_of = selectable._is_clone_of.columns.get(c.key)
if self.primary_key:
selectable.primary_key.add(c)
if fk:
selectable.foreign_keys.update(fk)
return c.key, c
class ForeignKey(DialectKWArgs, SchemaItem):
"""Defines a dependency between two columns.
``ForeignKey`` is specified as an argument to a :class:`_schema.Column`
object,
e.g.::
t = Table("remote_table", metadata,
Column("remote_id", ForeignKey("main_table.id"))
)
Note that ``ForeignKey`` is only a marker object that defines
a dependency between two columns. The actual constraint
is in all cases represented by the :class:`_schema.ForeignKeyConstraint`
object. This object will be generated automatically when
a ``ForeignKey`` is associated with a :class:`_schema.Column` which
in turn is associated with a :class:`_schema.Table`. Conversely,
when :class:`_schema.ForeignKeyConstraint` is applied to a
:class:`_schema.Table`,
``ForeignKey`` markers are automatically generated to be
present on each associated :class:`_schema.Column`, which are also
associated with the constraint object.
Note that you cannot define a "composite" foreign key constraint,
that is a constraint between a grouping of multiple parent/child
columns, using ``ForeignKey`` objects. To define this grouping,
the :class:`_schema.ForeignKeyConstraint` object must be used, and applied
to the :class:`_schema.Table`. The associated ``ForeignKey`` objects
are created automatically.
The ``ForeignKey`` objects associated with an individual
:class:`_schema.Column`
object are available in the `foreign_keys` collection
of that column.
Further examples of foreign key configuration are in
:ref:`metadata_foreignkeys`.
"""
__visit_name__ = "foreign_key"
def __init__(
self,
column,
_constraint=None,
use_alter=False,
name=None,
onupdate=None,
ondelete=None,
deferrable=None,
initially=None,
link_to_name=False,
match=None,
info=None,
_unresolvable=False,
**dialect_kw
):
r"""
Construct a column-level FOREIGN KEY.
The :class:`_schema.ForeignKey` object when constructed generates a
:class:`_schema.ForeignKeyConstraint`
which is associated with the parent
:class:`_schema.Table` object's collection of constraints.
:param column: A single target column for the key relationship. A
:class:`_schema.Column` object or a column name as a string:
``tablename.columnkey`` or ``schema.tablename.columnkey``.
``columnkey`` is the ``key`` which has been assigned to the column
(defaults to the column name itself), unless ``link_to_name`` is
``True`` in which case the rendered name of the column is used.
:param name: Optional string. An in-database name for the key if
`constraint` is not provided.
:param onupdate: Optional string. If set, emit ON UPDATE <value> when
issuing DDL for this constraint. Typical values include CASCADE,
DELETE and RESTRICT.
:param ondelete: Optional string. If set, emit ON DELETE <value> when
issuing DDL for this constraint. Typical values include CASCADE,
DELETE and RESTRICT.
:param deferrable: Optional bool. If set, emit DEFERRABLE or NOT
DEFERRABLE when issuing DDL for this constraint.
:param initially: Optional string. If set, emit INITIALLY <value> when
issuing DDL for this constraint.
:param link_to_name: if True, the string name given in ``column`` is
the rendered name of the referenced column, not its locally
assigned ``key``.
:param use_alter: passed to the underlying
:class:`_schema.ForeignKeyConstraint`
to indicate the constraint should
be generated/dropped externally from the CREATE TABLE/ DROP TABLE
statement. See :paramref:`_schema.ForeignKeyConstraint.use_alter`
for further description.
.. seealso::
:paramref:`_schema.ForeignKeyConstraint.use_alter`
:ref:`use_alter`
:param match: Optional string. If set, emit MATCH <value> when issuing
DDL for this constraint. Typical values include SIMPLE, PARTIAL
and FULL.
:param info: Optional data dictionary which will be populated into the
:attr:`.SchemaItem.info` attribute of this object.
.. versionadded:: 1.0.0
:param \**dialect_kw: Additional keyword arguments are dialect
specific, and passed in the form ``<dialectname>_<argname>``. The
arguments are ultimately handled by a corresponding
:class:`_schema.ForeignKeyConstraint`.
See the documentation regarding
an individual dialect at :ref:`dialect_toplevel` for detail on
documented arguments.
.. versionadded:: 0.9.2
"""
self._colspec = coercions.expect(roles.DDLReferredColumnRole, column)
self._unresolvable = _unresolvable
if isinstance(self._colspec, util.string_types):
self._table_column = None
else:
self._table_column = self._colspec
if not isinstance(
self._table_column.table, (util.NoneType, TableClause)
):
raise exc.ArgumentError(
"ForeignKey received Column not bound "
"to a Table, got: %r" % self._table_column.table
)
# the linked ForeignKeyConstraint.
# ForeignKey will create this when parent Column
# is attached to a Table, *or* ForeignKeyConstraint
# object passes itself in when creating ForeignKey
# markers.
self.constraint = _constraint
self.parent = None
self.use_alter = use_alter
self.name = name
self.onupdate = onupdate
self.ondelete = ondelete
self.deferrable = deferrable
self.initially = initially
self.link_to_name = link_to_name
self.match = match
if info:
self.info = info
self._unvalidated_dialect_kw = dialect_kw
def __repr__(self):
return "ForeignKey(%r)" % self._get_colspec()
@util.deprecated(
"1.4",
"The :meth:`_schema.ForeignKey.copy` method is deprecated "
"and will be removed in a future release.",
)
def copy(self, schema=None, **kw):
return self._copy(schema=schema, **kw)
def _copy(self, schema=None, **kw):
"""Produce a copy of this :class:`_schema.ForeignKey` object.
The new :class:`_schema.ForeignKey` will not be bound
to any :class:`_schema.Column`.
This method is usually used by the internal
copy procedures of :class:`_schema.Column`, :class:`_schema.Table`,
and :class:`_schema.MetaData`.
:param schema: The returned :class:`_schema.ForeignKey` will
reference the original table and column name, qualified
by the given string schema name.
"""
fk = ForeignKey(
self._get_colspec(schema=schema),
use_alter=self.use_alter,
name=self.name,
onupdate=self.onupdate,
ondelete=self.ondelete,
deferrable=self.deferrable,
initially=self.initially,
link_to_name=self.link_to_name,
match=self.match,
**self._unvalidated_dialect_kw
)
return self._schema_item_copy(fk)
def _get_colspec(self, schema=None, table_name=None):
"""Return a string based 'column specification' for this
:class:`_schema.ForeignKey`.
This is usually the equivalent of the string-based "tablename.colname"
argument first passed to the object's constructor.
"""
if schema not in (None, RETAIN_SCHEMA):
_schema, tname, colname = self._column_tokens
if table_name is not None:
tname = table_name
if schema is BLANK_SCHEMA:
return "%s.%s" % (tname, colname)
else:
return "%s.%s.%s" % (schema, tname, colname)
elif table_name:
schema, tname, colname = self._column_tokens
if schema:
return "%s.%s.%s" % (schema, table_name, colname)
else:
return "%s.%s" % (table_name, colname)
elif self._table_column is not None:
return "%s.%s" % (
self._table_column.table.fullname,
self._table_column.key,
)
else:
return self._colspec
@property
def _referred_schema(self):
return self._column_tokens[0]
def _table_key(self):
if self._table_column is not None:
if self._table_column.table is None:
return None
else:
return self._table_column.table.key
else:
schema, tname, colname = self._column_tokens
return _get_table_key(tname, schema)
target_fullname = property(_get_colspec)
def references(self, table):
"""Return True if the given :class:`_schema.Table`
is referenced by this
:class:`_schema.ForeignKey`."""
return table.corresponding_column(self.column) is not None
def get_referent(self, table):
"""Return the :class:`_schema.Column` in the given
:class:`_schema.Table`
referenced by this :class:`_schema.ForeignKey`.
Returns None if this :class:`_schema.ForeignKey`
does not reference the given
:class:`_schema.Table`.
"""
return table.corresponding_column(self.column)
@util.memoized_property
def _column_tokens(self):
"""parse a string-based _colspec into its component parts."""
m = self._get_colspec().split(".")
if m is None:
raise exc.ArgumentError(
"Invalid foreign key column specification: %s" % self._colspec
)
if len(m) == 1:
tname = m.pop()
colname = None
else:
colname = m.pop()
tname = m.pop()
# A FK between column 'bar' and table 'foo' can be
# specified as 'foo', 'foo.bar', 'dbo.foo.bar',
# 'otherdb.dbo.foo.bar'. Once we have the column name and
# the table name, treat everything else as the schema
# name. Some databases (e.g. Sybase) support
# inter-database foreign keys. See tickets#1341 and --
# indirectly related -- Ticket #594. This assumes that '.'
# will never appear *within* any component of the FK.
if len(m) > 0:
schema = ".".join(m)
else:
schema = None
return schema, tname, colname
def _resolve_col_tokens(self):
if self.parent is None:
raise exc.InvalidRequestError(
"this ForeignKey object does not yet have a "
"parent Column associated with it."
)
elif self.parent.table is None:
raise exc.InvalidRequestError(
"this ForeignKey's parent column is not yet associated "
"with a Table."
)
parenttable = self.parent.table
if self._unresolvable:
schema, tname, colname = self._column_tokens
tablekey = _get_table_key(tname, schema)
return parenttable, tablekey, colname
# assertion
# basically Column._make_proxy() sends the actual
# target Column to the ForeignKey object, so the
# string resolution here is never called.
for c in self.parent.base_columns:
if isinstance(c, Column):
assert c.table is parenttable
break
else:
assert False
######################
schema, tname, colname = self._column_tokens
if schema is None and parenttable.metadata.schema is not None:
schema = parenttable.metadata.schema
tablekey = _get_table_key(tname, schema)
return parenttable, tablekey, colname
def _link_to_col_by_colstring(self, parenttable, table, colname):
_column = None
if colname is None:
# colname is None in the case that ForeignKey argument
# was specified as table name only, in which case we
# match the column name to the same column on the
# parent.
# this use case wasn't working in later 1.x series
# as it had no test coverage; fixed in 2.0
parent = self.parent
assert parent is not None
key = parent.key
_column = table.c.get(key, None)
elif self.link_to_name:
key = colname
for c in table.c:
if c.name == colname:
_column = c
else:
key = colname
_column = table.c.get(colname, None)
if _column is None:
raise exc.NoReferencedColumnError(
"Could not initialize target column "
"for ForeignKey '%s' on table '%s': "
"table '%s' has no column named '%s'"
% (self._colspec, parenttable.name, table.name, key),
table.name,
key,
)
return _column
def _set_target_column(self, column):
assert self.parent is not None
# propagate TypeEngine to parent if it didn't have one
if self.parent.type._isnull:
self.parent.type = column.type
# super-edgy case, if other FKs point to our column,
# they'd get the type propagated out also.
def set_type(fk):
if fk.parent.type._isnull:
fk.parent.type = column.type
self.parent._setup_on_memoized_fks(set_type)
self.column = column
@util.memoized_property
def column(self):
"""Return the target :class:`_schema.Column` referenced by this
:class:`_schema.ForeignKey`.
If no target column has been established, an exception
is raised.
.. versionchanged:: 0.9.0
Foreign key target column resolution now occurs as soon as both
the ForeignKey object and the remote Column to which it refers
are both associated with the same MetaData object.
"""
return self._resolve_column()
def _resolve_column(self, raiseerr=True):
if isinstance(self._colspec, util.string_types):
parenttable, tablekey, colname = self._resolve_col_tokens()
if self._unresolvable or tablekey not in parenttable.metadata:
if not raiseerr:
return None
raise exc.NoReferencedTableError(
"Foreign key associated with column '%s' could not find "
"table '%s' with which to generate a "
"foreign key to target column '%s'"
% (self.parent, tablekey, colname),
tablekey,
)
elif parenttable.key not in parenttable.metadata:
if not raiseerr:
return None
raise exc.InvalidRequestError(
"Table %s is no longer associated with its "
"parent MetaData" % parenttable
)
else:
table = parenttable.metadata.tables[tablekey]
return self._link_to_col_by_colstring(
parenttable, table, colname
)
elif hasattr(self._colspec, "__clause_element__"):
_column = self._colspec.__clause_element__()
return _column
else:
_column = self._colspec
return _column
def _set_parent(self, column, **kw):
if self.parent is not None and self.parent is not column:
raise exc.InvalidRequestError(
"This ForeignKey already has a parent !"
)
self.parent = column
self.parent.foreign_keys.add(self)
self.parent._on_table_attach(self._set_table)
def _set_remote_table(self, table):
parenttable, tablekey, colname = self._resolve_col_tokens()
self._link_to_col_by_colstring(parenttable, table, colname)
_column = self._link_to_col_by_colstring(parenttable, table, colname)
self._set_target_column(_column)
assert self.constraint is not None
self.constraint._validate_dest_table(table)
def _remove_from_metadata(self, metadata):
parenttable, table_key, colname = self._resolve_col_tokens()
fk_key = (table_key, colname)
if self in metadata._fk_memos[fk_key]:
# TODO: no test coverage for self not in memos
metadata._fk_memos[fk_key].remove(self)
def _set_table(self, column, table):
# standalone ForeignKey - create ForeignKeyConstraint
# on the hosting Table when attached to the Table.
assert isinstance(table, Table)
if self.constraint is None:
self.constraint = ForeignKeyConstraint(
[],
[],
use_alter=self.use_alter,
name=self.name,
onupdate=self.onupdate,
ondelete=self.ondelete,
deferrable=self.deferrable,
initially=self.initially,
match=self.match,
**self._unvalidated_dialect_kw
)
self.constraint._append_element(column, self)
self.constraint._set_parent_with_dispatch(table)
table.foreign_keys.add(self)
# set up remote ".column" attribute, or a note to pick it
# up when the other Table/Column shows up
if isinstance(self._colspec, util.string_types):
parenttable, table_key, colname = self._resolve_col_tokens()
fk_key = (table_key, colname)
if table_key in parenttable.metadata.tables:
table = parenttable.metadata.tables[table_key]
try:
_column = self._link_to_col_by_colstring(
parenttable, table, colname
)
except exc.NoReferencedColumnError:
# this is OK, we'll try later
pass
else:
self._set_target_column(_column)
parenttable.metadata._fk_memos[fk_key].append(self)
elif hasattr(self._colspec, "__clause_element__"):
_column = self._colspec.__clause_element__()
self._set_target_column(_column)
else:
_column = self._colspec
self._set_target_column(_column)
class DefaultGenerator(Executable, SchemaItem):
"""Base class for column *default* values."""
__visit_name__ = "default_generator"
is_sequence = False
is_server_default = False
column = None
def __init__(self, for_update=False):
self.for_update = for_update
def _set_parent(self, column, **kw):
self.column = column
if self.for_update:
self.column.onupdate = self
else:
self.column.default = self
@util.deprecated_20(
":meth:`.DefaultGenerator.execute`",
alternative="All statement execution in SQLAlchemy 2.0 is performed "
"by the :meth:`_engine.Connection.execute` method of "
":class:`_engine.Connection`, "
"or in the ORM by the :meth:`.Session.execute` method of "
":class:`.Session`.",
)
def execute(self, bind=None):
if bind is None:
bind = _bind_or_error(self)
return bind._execute_default(self, (), util.EMPTY_DICT)
def _execute_on_connection(
self, connection, multiparams, params, execution_options
):
return connection._execute_default(
self, multiparams, params, execution_options
)
@property
def bind(self):
"""Return the connectable associated with this default."""
if getattr(self, "column", None) is not None:
return self.column.table.bind
else:
return None
class ColumnDefault(DefaultGenerator):
"""A plain default value on a column.
This could correspond to a constant, a callable function,
or a SQL clause.
:class:`.ColumnDefault` is generated automatically
whenever the ``default``, ``onupdate`` arguments of
:class:`_schema.Column` are used. A :class:`.ColumnDefault`
can be passed positionally as well.
For example, the following::
Column('foo', Integer, default=50)
Is equivalent to::
Column('foo', Integer, ColumnDefault(50))
"""
def __init__(self, arg, **kwargs):
"""Construct a new :class:`.ColumnDefault`.
:param arg: argument representing the default value.
May be one of the following:
* a plain non-callable Python value, such as a
string, integer, boolean, or other simple type.
The default value will be used as is each time.
* a SQL expression, that is one which derives from
:class:`_expression.ColumnElement`. The SQL expression will
be rendered into the INSERT or UPDATE statement,
or in the case of a primary key column when
RETURNING is not used may be
pre-executed before an INSERT within a SELECT.
* A Python callable. The function will be invoked for each
new row subject to an INSERT or UPDATE.
The callable must accept exactly
zero or one positional arguments. The one-argument form
will receive an instance of the :class:`.ExecutionContext`,
which provides contextual information as to the current
:class:`_engine.Connection` in use as well as the current
statement and parameters.
"""
super(ColumnDefault, self).__init__(**kwargs)
if isinstance(arg, FetchedValue):
raise exc.ArgumentError(
"ColumnDefault may not be a server-side default type."
)
if callable(arg):
arg = self._maybe_wrap_callable(arg)
self.arg = arg
@util.memoized_property
def is_callable(self):
return callable(self.arg)
@util.memoized_property
def is_clause_element(self):
return isinstance(self.arg, ClauseElement)
@util.memoized_property
def is_scalar(self):
return (
not self.is_callable
and not self.is_clause_element
and not self.is_sequence
)
@util.memoized_property
@util.preload_module("sqlalchemy.sql.sqltypes")
def _arg_is_typed(self):
sqltypes = util.preloaded.sql_sqltypes
if self.is_clause_element:
return not isinstance(self.arg.type, sqltypes.NullType)
else:
return False
def _maybe_wrap_callable(self, fn):
"""Wrap callables that don't accept a context.
This is to allow easy compatibility with default callables
that aren't specific to accepting of a context.
"""
try:
argspec = util.get_callable_argspec(fn, no_self=True)
except TypeError:
return util.wrap_callable(lambda ctx: fn(), fn)
defaulted = argspec[3] is not None and len(argspec[3]) or 0
positionals = len(argspec[0]) - defaulted
if positionals == 0:
return util.wrap_callable(lambda ctx: fn(), fn)
elif positionals == 1:
return fn
else:
raise exc.ArgumentError(
"ColumnDefault Python function takes zero or one "
"positional arguments"
)
def __repr__(self):
return "ColumnDefault(%r)" % (self.arg,)
class IdentityOptions(object):
"""Defines options for a named database sequence or an identity column.
.. versionadded:: 1.3.18
.. seealso::
:class:`.Sequence`
"""
def __init__(
self,
start=None,
increment=None,
minvalue=None,
maxvalue=None,
nominvalue=None,
nomaxvalue=None,
cycle=None,
cache=None,
order=None,
):
"""Construct a :class:`.IdentityOptions` object.
See the :class:`.Sequence` documentation for a complete description
of the parameters.
:param start: the starting index of the sequence.
:param increment: the increment value of the sequence.
:param minvalue: the minimum value of the sequence.
:param maxvalue: the maximum value of the sequence.
:param nominvalue: no minimum value of the sequence.
:param nomaxvalue: no maximum value of the sequence.
:param cycle: allows the sequence to wrap around when the maxvalue
or minvalue has been reached.
:param cache: optional integer value; number of future values in the
sequence which are calculated in advance.
:param order: optional boolean value; if ``True``, renders the
ORDER keyword.
"""
self.start = start
self.increment = increment
self.minvalue = minvalue
self.maxvalue = maxvalue
self.nominvalue = nominvalue
self.nomaxvalue = nomaxvalue
self.cycle = cycle
self.cache = cache
self.order = order
class Sequence(IdentityOptions, DefaultGenerator):
"""Represents a named database sequence.
The :class:`.Sequence` object represents the name and configurational
parameters of a database sequence. It also represents
a construct that can be "executed" by a SQLAlchemy :class:`_engine.Engine`
or :class:`_engine.Connection`,
rendering the appropriate "next value" function
for the target database and returning a result.
The :class:`.Sequence` is typically associated with a primary key column::
some_table = Table(
'some_table', metadata,
Column('id', Integer, Sequence('some_table_seq'),
primary_key=True)
)
When CREATE TABLE is emitted for the above :class:`_schema.Table`, if the
target platform supports sequences, a CREATE SEQUENCE statement will
be emitted as well. For platforms that don't support sequences,
the :class:`.Sequence` construct is ignored.
.. seealso::
:ref:`defaults_sequences`
:class:`.CreateSequence`
:class:`.DropSequence`
"""
__visit_name__ = "sequence"
is_sequence = True
def __init__(
self,
name,
start=None,
increment=None,
minvalue=None,
maxvalue=None,
nominvalue=None,
nomaxvalue=None,
cycle=None,
schema=None,
cache=None,
order=None,
data_type=None,
optional=False,
quote=None,
metadata=None,
quote_schema=None,
for_update=False,
):
"""Construct a :class:`.Sequence` object.
:param name: the name of the sequence.
:param start: the starting index of the sequence. This value is
used when the CREATE SEQUENCE command is emitted to the database
as the value of the "START WITH" clause. If ``None``, the
clause is omitted, which on most platforms indicates a starting
value of 1.
:param increment: the increment value of the sequence. This
value is used when the CREATE SEQUENCE command is emitted to
the database as the value of the "INCREMENT BY" clause. If ``None``,
the clause is omitted, which on most platforms indicates an
increment of 1.
:param minvalue: the minimum value of the sequence. This
value is used when the CREATE SEQUENCE command is emitted to
the database as the value of the "MINVALUE" clause. If ``None``,
the clause is omitted, which on most platforms indicates a
minvalue of 1 and -2^63-1 for ascending and descending sequences,
respectively.
.. versionadded:: 1.0.7
:param maxvalue: the maximum value of the sequence. This
value is used when the CREATE SEQUENCE command is emitted to
the database as the value of the "MAXVALUE" clause. If ``None``,
the clause is omitted, which on most platforms indicates a
maxvalue of 2^63-1 and -1 for ascending and descending sequences,
respectively.
.. versionadded:: 1.0.7
:param nominvalue: no minimum value of the sequence. This
value is used when the CREATE SEQUENCE command is emitted to
the database as the value of the "NO MINVALUE" clause. If ``None``,
the clause is omitted, which on most platforms indicates a
minvalue of 1 and -2^63-1 for ascending and descending sequences,
respectively.
.. versionadded:: 1.0.7
:param nomaxvalue: no maximum value of the sequence. This
value is used when the CREATE SEQUENCE command is emitted to
the database as the value of the "NO MAXVALUE" clause. If ``None``,
the clause is omitted, which on most platforms indicates a
maxvalue of 2^63-1 and -1 for ascending and descending sequences,
respectively.
.. versionadded:: 1.0.7
:param cycle: allows the sequence to wrap around when the maxvalue
or minvalue has been reached by an ascending or descending sequence
respectively. This value is used when the CREATE SEQUENCE command
is emitted to the database as the "CYCLE" clause. If the limit is
reached, the next number generated will be the minvalue or maxvalue,
respectively. If cycle=False (the default) any calls to nextval
after the sequence has reached its maximum value will return an
error.
.. versionadded:: 1.0.7
:param schema: optional schema name for the sequence, if located
in a schema other than the default. The rules for selecting the
schema name when a :class:`_schema.MetaData`
is also present are the same
as that of :paramref:`_schema.Table.schema`.
:param cache: optional integer value; number of future values in the
sequence which are calculated in advance. Renders the CACHE keyword
understood by Oracle and PostgreSQL.
.. versionadded:: 1.1.12
:param order: optional boolean value; if ``True``, renders the
ORDER keyword, understood by Oracle, indicating the sequence is
definitively ordered. May be necessary to provide deterministic
ordering using Oracle RAC.
.. versionadded:: 1.1.12
:param data_type: The type to be returned by the sequence, for
dialects that allow us to choose between INTEGER, BIGINT, etc.
(e.g., mssql).
.. versionadded:: 1.4.0
:param optional: boolean value, when ``True``, indicates that this
:class:`.Sequence` object only needs to be explicitly generated
on backends that don't provide another way to generate primary
key identifiers. Currently, it essentially means, "don't create
this sequence on the PostgreSQL backend, where the SERIAL keyword
creates a sequence for us automatically".
:param quote: boolean value, when ``True`` or ``False``, explicitly
forces quoting of the :paramref:`_schema.Sequence.name` on or off.
When left at its default of ``None``, normal quoting rules based
on casing and reserved words take place.
:param quote_schema: Set the quoting preferences for the ``schema``
name.
:param metadata: optional :class:`_schema.MetaData` object which this
:class:`.Sequence` will be associated with. A :class:`.Sequence`
that is associated with a :class:`_schema.MetaData`
gains the following
capabilities:
* The :class:`.Sequence` will inherit the
:paramref:`_schema.MetaData.schema`
parameter specified to the target :class:`_schema.MetaData`, which
affects the production of CREATE / DROP DDL, if any.
* The :meth:`.Sequence.create` and :meth:`.Sequence.drop` methods
automatically use the engine bound to the :class:`_schema.MetaData`
object, if any.
* The :meth:`_schema.MetaData.create_all` and
:meth:`_schema.MetaData.drop_all`
methods will emit CREATE / DROP for this :class:`.Sequence`,
even if the :class:`.Sequence` is not associated with any
:class:`_schema.Table` / :class:`_schema.Column`
that's a member of this
:class:`_schema.MetaData`.
The above behaviors can only occur if the :class:`.Sequence` is
explicitly associated with the :class:`_schema.MetaData`
via this parameter.
.. seealso::
:ref:`sequence_metadata` - full discussion of the
:paramref:`.Sequence.metadata` parameter.
:param for_update: Indicates this :class:`.Sequence`, when associated
with a :class:`_schema.Column`,
should be invoked for UPDATE statements
on that column's table, rather than for INSERT statements, when
no value is otherwise present for that column in the statement.
"""
DefaultGenerator.__init__(self, for_update=for_update)
IdentityOptions.__init__(
self,
start=start,
increment=increment,
minvalue=minvalue,
maxvalue=maxvalue,
nominvalue=nominvalue,
nomaxvalue=nomaxvalue,
cycle=cycle,
cache=cache,
order=order,
)
self.name = quoted_name(name, quote)
self.optional = optional
if schema is BLANK_SCHEMA:
self.schema = schema = None
elif metadata is not None and schema is None and metadata.schema:
self.schema = schema = metadata.schema
else:
self.schema = quoted_name(schema, quote_schema)
self.metadata = metadata
self._key = _get_table_key(name, schema)
if metadata:
self._set_metadata(metadata)
if data_type is not None:
self.data_type = to_instance(data_type)
else:
self.data_type = None
@util.memoized_property
def is_callable(self):
return False
@util.memoized_property
def is_clause_element(self):
return False
@util.preload_module("sqlalchemy.sql.functions")
def next_value(self):
"""Return a :class:`.next_value` function element
which will render the appropriate increment function
for this :class:`.Sequence` within any SQL expression.
"""
if self.bind:
return util.preloaded.sql_functions.func.next_value(
self, bind=self.bind
)
else:
return util.preloaded.sql_functions.func.next_value(self)
def _set_parent(self, column, **kw):
super(Sequence, self)._set_parent(column)
column._on_table_attach(self._set_table)
def _set_table(self, column, table):
self._set_metadata(table.metadata)
def _set_metadata(self, metadata):
self.metadata = metadata
self.metadata._sequences[self._key] = self
@property
def bind(self):
if self.metadata:
return self.metadata.bind
else:
return None
def create(self, bind=None, checkfirst=True):
"""Creates this sequence in the database.
.. note:: the "bind" argument will be required in
SQLAlchemy 2.0.
"""
if bind is None:
bind = _bind_or_error(self)
bind._run_ddl_visitor(ddl.SchemaGenerator, self, checkfirst=checkfirst)
def drop(self, bind=None, checkfirst=True):
"""Drops this sequence from the database.
.. note:: the "bind" argument will be required in
SQLAlchemy 2.0.
"""
if bind is None:
bind = _bind_or_error(self)
bind._run_ddl_visitor(ddl.SchemaDropper, self, checkfirst=checkfirst)
def _not_a_column_expr(self):
raise exc.InvalidRequestError(
"This %s cannot be used directly "
"as a column expression. Use func.next_value(sequence) "
"to produce a 'next value' function that's usable "
"as a column element." % self.__class__.__name__
)
@inspection._self_inspects
class FetchedValue(SchemaEventTarget):
"""A marker for a transparent database-side default.
Use :class:`.FetchedValue` when the database is configured
to provide some automatic default for a column.
E.g.::
Column('foo', Integer, FetchedValue())
Would indicate that some trigger or default generator
will create a new value for the ``foo`` column during an
INSERT.
.. seealso::
:ref:`triggered_columns`
"""
is_server_default = True
reflected = False
has_argument = False
is_clause_element = False
def __init__(self, for_update=False):
self.for_update = for_update
def _as_for_update(self, for_update):
if for_update == self.for_update:
return self
else:
return self._clone(for_update)
def _clone(self, for_update):
n = self.__class__.__new__(self.__class__)
n.__dict__.update(self.__dict__)
n.__dict__.pop("column", None)
n.for_update = for_update
return n
def _set_parent(self, column, **kw):
self.column = column
if self.for_update:
self.column.server_onupdate = self
else:
self.column.server_default = self
def __repr__(self):
return util.generic_repr(self)
class DefaultClause(FetchedValue):
"""A DDL-specified DEFAULT column value.
:class:`.DefaultClause` is a :class:`.FetchedValue`
that also generates a "DEFAULT" clause when
"CREATE TABLE" is emitted.
:class:`.DefaultClause` is generated automatically
whenever the ``server_default``, ``server_onupdate`` arguments of
:class:`_schema.Column` are used. A :class:`.DefaultClause`
can be passed positionally as well.
For example, the following::
Column('foo', Integer, server_default="50")
Is equivalent to::
Column('foo', Integer, DefaultClause("50"))
"""
has_argument = True
def __init__(self, arg, for_update=False, _reflected=False):
util.assert_arg_type(
arg, (util.string_types[0], ClauseElement, TextClause), "arg"
)
super(DefaultClause, self).__init__(for_update)
self.arg = arg
self.reflected = _reflected
def __repr__(self):
return "DefaultClause(%r, for_update=%r)" % (self.arg, self.for_update)
class Constraint(DialectKWArgs, SchemaItem):
"""A table-level SQL constraint.
:class:`_schema.Constraint` serves as the base class for the series of
constraint objects that can be associated with :class:`_schema.Table`
objects, including :class:`_schema.PrimaryKeyConstraint`,
:class:`_schema.ForeignKeyConstraint`
:class:`_schema.UniqueConstraint`, and
:class:`_schema.CheckConstraint`.
"""
__visit_name__ = "constraint"
def __init__(
self,
name=None,
deferrable=None,
initially=None,
_create_rule=None,
info=None,
_type_bound=False,
**dialect_kw
):
r"""Create a SQL constraint.
:param name:
Optional, the in-database name of this ``Constraint``.
:param deferrable:
Optional bool. If set, emit DEFERRABLE or NOT DEFERRABLE when
issuing DDL for this constraint.
:param initially:
Optional string. If set, emit INITIALLY <value> when issuing DDL
for this constraint.
:param info: Optional data dictionary which will be populated into the
:attr:`.SchemaItem.info` attribute of this object.
.. versionadded:: 1.0.0
:param \**dialect_kw: Additional keyword arguments are dialect
specific, and passed in the form ``<dialectname>_<argname>``. See
the documentation regarding an individual dialect at
:ref:`dialect_toplevel` for detail on documented arguments.
:param _create_rule:
used internally by some datatypes that also create constraints.
:param _type_bound:
used internally to indicate that this constraint is associated with
a specific datatype.
"""
self.name = name
self.deferrable = deferrable
self.initially = initially
if info:
self.info = info
self._create_rule = _create_rule
self._type_bound = _type_bound
util.set_creation_order(self)
self._validate_dialect_kwargs(dialect_kw)
@property
def table(self):
try:
if isinstance(self.parent, Table):
return self.parent
except AttributeError:
pass
raise exc.InvalidRequestError(
"This constraint is not bound to a table. Did you "
"mean to call table.append_constraint(constraint) ?"
)
def _set_parent(self, parent, **kw):
self.parent = parent
parent.constraints.add(self)
@util.deprecated(
"1.4",
"The :meth:`_schema.Constraint.copy` method is deprecated "
"and will be removed in a future release.",
)
def copy(self, **kw):
return self._copy(**kw)
def _copy(self, **kw):
raise NotImplementedError()
class ColumnCollectionMixin(object):
columns = None
"""A :class:`_expression.ColumnCollection` of :class:`_schema.Column`
objects.
This collection represents the columns which are referred to by
this object.
"""
_allow_multiple_tables = False
def __init__(self, *columns, **kw):
_autoattach = kw.pop("_autoattach", True)
self._column_flag = kw.pop("_column_flag", False)
self.columns = DedupeColumnCollection()
processed_expressions = kw.pop("_gather_expressions", None)
if processed_expressions is not None:
self._pending_colargs = []
for (
expr,
column,
strname,
add_element,
) in coercions.expect_col_expression_collection(
roles.DDLConstraintColumnRole, columns
):
self._pending_colargs.append(add_element)
processed_expressions.append(expr)
else:
self._pending_colargs = [
coercions.expect(roles.DDLConstraintColumnRole, column)
for column in columns
]
if _autoattach and self._pending_colargs:
self._check_attach()
def _check_attach(self, evt=False):
col_objs = [c for c in self._pending_colargs if isinstance(c, Column)]
cols_w_table = [c for c in col_objs if isinstance(c.table, Table)]
cols_wo_table = set(col_objs).difference(cols_w_table)
if cols_wo_table:
# feature #3341 - place event listeners for Column objects
# such that when all those cols are attached, we autoattach.
assert not evt, "Should not reach here on event call"
# issue #3411 - don't do the per-column auto-attach if some of the
# columns are specified as strings.
has_string_cols = set(
c for c in self._pending_colargs if c is not None
).difference(col_objs)
if not has_string_cols:
def _col_attached(column, table):
# this isinstance() corresponds with the
# isinstance() above; only want to count Table-bound
# columns
if isinstance(table, Table):
cols_wo_table.discard(column)
if not cols_wo_table:
self._check_attach(evt=True)
self._cols_wo_table = cols_wo_table
for col in cols_wo_table:
col._on_table_attach(_col_attached)
return
columns = cols_w_table
tables = {c.table for c in columns}
if len(tables) == 1:
self._set_parent_with_dispatch(tables.pop())
elif len(tables) > 1 and not self._allow_multiple_tables:
table = columns[0].table
others = [c for c in columns[1:] if c.table is not table]
if others:
raise exc.ArgumentError(
"Column(s) %s are not part of table '%s'."
% (
", ".join("'%s'" % c for c in others),
table.description,
)
)
def _col_expressions(self, table):
return [
table.c[col] if isinstance(col, util.string_types) else col
for col in self._pending_colargs
]
def _set_parent(self, table, **kw):
for col in self._col_expressions(table):
if col is not None:
self.columns.add(col)
class ColumnCollectionConstraint(ColumnCollectionMixin, Constraint):
"""A constraint that proxies a ColumnCollection."""
def __init__(self, *columns, **kw):
r"""
:param \*columns:
A sequence of column names or Column objects.
:param name:
Optional, the in-database name of this constraint.
:param deferrable:
Optional bool. If set, emit DEFERRABLE or NOT DEFERRABLE when
issuing DDL for this constraint.
:param initially:
Optional string. If set, emit INITIALLY <value> when issuing DDL
for this constraint.
:param \**kw: other keyword arguments including dialect-specific
arguments are propagated to the :class:`.Constraint` superclass.
"""
_autoattach = kw.pop("_autoattach", True)
_column_flag = kw.pop("_column_flag", False)
Constraint.__init__(self, **kw)
ColumnCollectionMixin.__init__(
self, *columns, _autoattach=_autoattach, _column_flag=_column_flag
)
columns = None
"""A :class:`_expression.ColumnCollection` representing the set of columns
for this constraint.
"""
def _set_parent(self, table, **kw):
Constraint._set_parent(self, table)
ColumnCollectionMixin._set_parent(self, table)
def __contains__(self, x):
return x in self.columns
@util.deprecated(
"1.4",
"The :meth:`_schema.ColumnCollectionConstraint.copy` method "
"is deprecated and will be removed in a future release.",
)
def copy(self, target_table=None, **kw):
return self._copy(target_table=target_table, **kw)
def _copy(self, target_table=None, **kw):
# ticket #5276
constraint_kwargs = {}
for dialect_name in self.dialect_options:
dialect_options = self.dialect_options[dialect_name]._non_defaults
for (
dialect_option_key,
dialect_option_value,
) in dialect_options.items():
constraint_kwargs[
dialect_name + "_" + dialect_option_key
] = dialect_option_value
c = self.__class__(
name=self.name,
deferrable=self.deferrable,
initially=self.initially,
*[
_copy_expression(expr, self.parent, target_table)
for expr in self.columns
],
**constraint_kwargs
)
return self._schema_item_copy(c)
def contains_column(self, col):
"""Return True if this constraint contains the given column.
Note that this object also contains an attribute ``.columns``
which is a :class:`_expression.ColumnCollection` of
:class:`_schema.Column` objects.
"""
return self.columns.contains_column(col)
def __iter__(self):
return iter(self.columns)
def __len__(self):
return len(self.columns)
class CheckConstraint(ColumnCollectionConstraint):
"""A table- or column-level CHECK constraint.
Can be included in the definition of a Table or Column.
"""
_allow_multiple_tables = True
__visit_name__ = "table_or_column_check_constraint"
@_document_text_coercion(
"sqltext",
":class:`.CheckConstraint`",
":paramref:`.CheckConstraint.sqltext`",
)
def __init__(
self,
sqltext,
name=None,
deferrable=None,
initially=None,
table=None,
info=None,
_create_rule=None,
_autoattach=True,
_type_bound=False,
**kw
):
r"""Construct a CHECK constraint.
:param sqltext:
A string containing the constraint definition, which will be used
verbatim, or a SQL expression construct. If given as a string,
the object is converted to a :func:`_expression.text` object.
If the textual
string includes a colon character, escape this using a backslash::
CheckConstraint(r"foo ~ E'a(?\:b|c)d")
:param name:
Optional, the in-database name of the constraint.
:param deferrable:
Optional bool. If set, emit DEFERRABLE or NOT DEFERRABLE when
issuing DDL for this constraint.
:param initially:
Optional string. If set, emit INITIALLY <value> when issuing DDL
for this constraint.
:param info: Optional data dictionary which will be populated into the
:attr:`.SchemaItem.info` attribute of this object.
.. versionadded:: 1.0.0
"""
self.sqltext = coercions.expect(roles.DDLExpressionRole, sqltext)
columns = []
visitors.traverse(self.sqltext, {}, {"column": columns.append})
super(CheckConstraint, self).__init__(
name=name,
deferrable=deferrable,
initially=initially,
_create_rule=_create_rule,
info=info,
_type_bound=_type_bound,
_autoattach=_autoattach,
*columns,
**kw
)
if table is not None:
self._set_parent_with_dispatch(table)
@property
def is_column_level(self):
return not isinstance(self.parent, Table)
@util.deprecated(
"1.4",
"The :meth:`_schema.CheckConstraint.copy` method is deprecated "
"and will be removed in a future release.",
)
def copy(self, target_table=None, **kw):
return self._copy(target_table=target_table, **kw)
def _copy(self, target_table=None, **kw):
if target_table is not None:
# note that target_table is None for the copy process of
# a column-bound CheckConstraint, so this path is not reached
# in that case.
sqltext = _copy_expression(self.sqltext, self.table, target_table)
else:
sqltext = self.sqltext
c = CheckConstraint(
sqltext,
name=self.name,
initially=self.initially,
deferrable=self.deferrable,
_create_rule=self._create_rule,
table=target_table,
_autoattach=False,
_type_bound=self._type_bound,
)
return self._schema_item_copy(c)
class ForeignKeyConstraint(ColumnCollectionConstraint):
"""A table-level FOREIGN KEY constraint.
Defines a single column or composite FOREIGN KEY ... REFERENCES
constraint. For a no-frills, single column foreign key, adding a
:class:`_schema.ForeignKey` to the definition of a :class:`_schema.Column`
is a
shorthand equivalent for an unnamed, single column
:class:`_schema.ForeignKeyConstraint`.
Examples of foreign key configuration are in :ref:`metadata_foreignkeys`.
"""
__visit_name__ = "foreign_key_constraint"
def __init__(
self,
columns,
refcolumns,
name=None,
onupdate=None,
ondelete=None,
deferrable=None,
initially=None,
use_alter=False,
link_to_name=False,
match=None,
table=None,
info=None,
**dialect_kw
):
r"""Construct a composite-capable FOREIGN KEY.
:param columns: A sequence of local column names. The named columns
must be defined and present in the parent Table. The names should
match the ``key`` given to each column (defaults to the name) unless
``link_to_name`` is True.
:param refcolumns: A sequence of foreign column names or Column
objects. The columns must all be located within the same Table.
:param name: Optional, the in-database name of the key.
:param onupdate: Optional string. If set, emit ON UPDATE <value> when
issuing DDL for this constraint. Typical values include CASCADE,
DELETE and RESTRICT.
:param ondelete: Optional string. If set, emit ON DELETE <value> when
issuing DDL for this constraint. Typical values include CASCADE,
DELETE and RESTRICT.
:param deferrable: Optional bool. If set, emit DEFERRABLE or NOT
DEFERRABLE when issuing DDL for this constraint.
:param initially: Optional string. If set, emit INITIALLY <value> when
issuing DDL for this constraint.
:param link_to_name: if True, the string name given in ``column`` is
the rendered name of the referenced column, not its locally assigned
``key``.
:param use_alter: If True, do not emit the DDL for this constraint as
part of the CREATE TABLE definition. Instead, generate it via an
ALTER TABLE statement issued after the full collection of tables
have been created, and drop it via an ALTER TABLE statement before
the full collection of tables are dropped.
The use of :paramref:`_schema.ForeignKeyConstraint.use_alter` is
particularly geared towards the case where two or more tables
are established within a mutually-dependent foreign key constraint
relationship; however, the :meth:`_schema.MetaData.create_all` and
:meth:`_schema.MetaData.drop_all`
methods will perform this resolution
automatically, so the flag is normally not needed.
.. versionchanged:: 1.0.0 Automatic resolution of foreign key
cycles has been added, removing the need to use the
:paramref:`_schema.ForeignKeyConstraint.use_alter` in typical use
cases.
.. seealso::
:ref:`use_alter`
:param match: Optional string. If set, emit MATCH <value> when issuing
DDL for this constraint. Typical values include SIMPLE, PARTIAL
and FULL.
:param info: Optional data dictionary which will be populated into the
:attr:`.SchemaItem.info` attribute of this object.
.. versionadded:: 1.0.0
:param \**dialect_kw: Additional keyword arguments are dialect
specific, and passed in the form ``<dialectname>_<argname>``. See
the documentation regarding an individual dialect at
:ref:`dialect_toplevel` for detail on documented arguments.
.. versionadded:: 0.9.2
"""
Constraint.__init__(
self,
name=name,
deferrable=deferrable,
initially=initially,
info=info,
**dialect_kw
)
self.onupdate = onupdate
self.ondelete = ondelete
self.link_to_name = link_to_name
self.use_alter = use_alter
self.match = match
if len(set(columns)) != len(refcolumns):
if len(set(columns)) != len(columns):
# e.g. FOREIGN KEY (a, a) REFERENCES r (b, c)
raise exc.ArgumentError(
"ForeignKeyConstraint with duplicate source column "
"references are not supported."
)
else:
# e.g. FOREIGN KEY (a) REFERENCES r (b, c)
# paraphrasing
# https://www.postgresql.org/docs/current/static/ddl-constraints.html
raise exc.ArgumentError(
"ForeignKeyConstraint number "
"of constrained columns must match the number of "
"referenced columns."
)
# standalone ForeignKeyConstraint - create
# associated ForeignKey objects which will be applied to hosted
# Column objects (in col.foreign_keys), either now or when attached
# to the Table for string-specified names
self.elements = [
ForeignKey(
refcol,
_constraint=self,
name=self.name,
onupdate=self.onupdate,
ondelete=self.ondelete,
use_alter=self.use_alter,
link_to_name=self.link_to_name,
match=self.match,
deferrable=self.deferrable,
initially=self.initially,
**self.dialect_kwargs
)
for refcol in refcolumns
]
ColumnCollectionMixin.__init__(self, *columns)
if table is not None:
if hasattr(self, "parent"):
assert table is self.parent
self._set_parent_with_dispatch(table)
def _append_element(self, column, fk):
self.columns.add(column)
self.elements.append(fk)
columns = None
"""A :class:`_expression.ColumnCollection` representing the set of columns
for this constraint.
"""
elements = None
"""A sequence of :class:`_schema.ForeignKey` objects.
Each :class:`_schema.ForeignKey`
represents a single referring column/referred
column pair.
This collection is intended to be read-only.
"""
@property
def _elements(self):
# legacy - provide a dictionary view of (column_key, fk)
return util.OrderedDict(zip(self.column_keys, self.elements))
@property
def _referred_schema(self):
for elem in self.elements:
return elem._referred_schema
else:
return None
@property
def referred_table(self):
"""The :class:`_schema.Table` object to which this
:class:`_schema.ForeignKeyConstraint` references.
This is a dynamically calculated attribute which may not be available
if the constraint and/or parent table is not yet associated with
a metadata collection that contains the referred table.
.. versionadded:: 1.0.0
"""
return self.elements[0].column.table
def _validate_dest_table(self, table):
table_keys = set([elem._table_key() for elem in self.elements])
if None not in table_keys and len(table_keys) > 1:
elem0, elem1 = sorted(table_keys)[0:2]
raise exc.ArgumentError(
"ForeignKeyConstraint on %s(%s) refers to "
"multiple remote tables: %s and %s"
% (table.fullname, self._col_description, elem0, elem1)
)
@property
def column_keys(self):
"""Return a list of string keys representing the local
columns in this :class:`_schema.ForeignKeyConstraint`.
This list is either the original string arguments sent
to the constructor of the :class:`_schema.ForeignKeyConstraint`,
or if the constraint has been initialized with :class:`_schema.Column`
objects, is the string ``.key`` of each element.
.. versionadded:: 1.0.0
"""
if hasattr(self, "parent"):
return self.columns.keys()
else:
return [
col.key if isinstance(col, ColumnElement) else str(col)
for col in self._pending_colargs
]
@property
def _col_description(self):
return ", ".join(self.column_keys)
def _set_parent(self, table, **kw):
Constraint._set_parent(self, table)
try:
ColumnCollectionConstraint._set_parent(self, table)
except KeyError as ke:
util.raise_(
exc.ArgumentError(
"Can't create ForeignKeyConstraint "
"on table '%s': no column "
"named '%s' is present." % (table.description, ke.args[0])
),
from_=ke,
)
for col, fk in zip(self.columns, self.elements):
if not hasattr(fk, "parent") or fk.parent is not col:
fk._set_parent_with_dispatch(col)
self._validate_dest_table(table)
@util.deprecated(
"1.4",
"The :meth:`_schema.ForeignKeyConstraint.copy` method is deprecated "
"and will be removed in a future release.",
)
def copy(self, schema=None, target_table=None, **kw):
return self._copy(schema=schema, target_table=target_table, **kw)
def _copy(self, schema=None, target_table=None, **kw):
fkc = ForeignKeyConstraint(
[x.parent.key for x in self.elements],
[
x._get_colspec(
schema=schema,
table_name=target_table.name
if target_table is not None
and x._table_key() == x.parent.table.key
else None,
)
for x in self.elements
],
name=self.name,
onupdate=self.onupdate,
ondelete=self.ondelete,
use_alter=self.use_alter,
deferrable=self.deferrable,
initially=self.initially,
link_to_name=self.link_to_name,
match=self.match,
)
for self_fk, other_fk in zip(self.elements, fkc.elements):
self_fk._schema_item_copy(other_fk)
return self._schema_item_copy(fkc)
class PrimaryKeyConstraint(ColumnCollectionConstraint):
"""A table-level PRIMARY KEY constraint.
The :class:`.PrimaryKeyConstraint` object is present automatically
on any :class:`_schema.Table` object; it is assigned a set of
:class:`_schema.Column` objects corresponding to those marked with
the :paramref:`_schema.Column.primary_key` flag::
>>> my_table = Table('mytable', metadata,
... Column('id', Integer, primary_key=True),
... Column('version_id', Integer, primary_key=True),
... Column('data', String(50))
... )
>>> my_table.primary_key
PrimaryKeyConstraint(
Column('id', Integer(), table=<mytable>,
primary_key=True, nullable=False),
Column('version_id', Integer(), table=<mytable>,
primary_key=True, nullable=False)
)
The primary key of a :class:`_schema.Table` can also be specified by using
a :class:`.PrimaryKeyConstraint` object explicitly; in this mode of usage,
the "name" of the constraint can also be specified, as well as other
options which may be recognized by dialects::
my_table = Table('mytable', metadata,
Column('id', Integer),
Column('version_id', Integer),
Column('data', String(50)),
PrimaryKeyConstraint('id', 'version_id',
name='mytable_pk')
)
The two styles of column-specification should generally not be mixed.
An warning is emitted if the columns present in the
:class:`.PrimaryKeyConstraint`
don't match the columns that were marked as ``primary_key=True``, if both
are present; in this case, the columns are taken strictly from the
:class:`.PrimaryKeyConstraint` declaration, and those columns otherwise
marked as ``primary_key=True`` are ignored. This behavior is intended to
be backwards compatible with previous behavior.
.. versionchanged:: 0.9.2 Using a mixture of columns within a
:class:`.PrimaryKeyConstraint` in addition to columns marked as
``primary_key=True`` now emits a warning if the lists don't match.
The ultimate behavior of ignoring those columns marked with the flag
only is currently maintained for backwards compatibility; this warning
may raise an exception in a future release.
For the use case where specific options are to be specified on the
:class:`.PrimaryKeyConstraint`, but the usual style of using
``primary_key=True`` flags is still desirable, an empty
:class:`.PrimaryKeyConstraint` may be specified, which will take on the
primary key column collection from the :class:`_schema.Table` based on the
flags::
my_table = Table('mytable', metadata,
Column('id', Integer, primary_key=True),
Column('version_id', Integer, primary_key=True),
Column('data', String(50)),
PrimaryKeyConstraint(name='mytable_pk',
mssql_clustered=True)
)
.. versionadded:: 0.9.2 an empty :class:`.PrimaryKeyConstraint` may now
be specified for the purposes of establishing keyword arguments with
the constraint, independently of the specification of "primary key"
columns within the :class:`_schema.Table` itself; columns marked as
``primary_key=True`` will be gathered into the empty constraint's
column collection.
"""
__visit_name__ = "primary_key_constraint"
def __init__(self, *columns, **kw):
self._implicit_generated = kw.pop("_implicit_generated", False)
super(PrimaryKeyConstraint, self).__init__(*columns, **kw)
def _set_parent(self, table, **kw):
super(PrimaryKeyConstraint, self)._set_parent(table)
if table.primary_key is not self:
table.constraints.discard(table.primary_key)
table.primary_key = self
table.constraints.add(self)
table_pks = [c for c in table.c if c.primary_key]
if self.columns and table_pks and set(table_pks) != set(self.columns):
util.warn(
"Table '%s' specifies columns %s as primary_key=True, "
"not matching locally specified columns %s; setting the "
"current primary key columns to %s. This warning "
"may become an exception in a future release"
% (
table.name,
", ".join("'%s'" % c.name for c in table_pks),
", ".join("'%s'" % c.name for c in self.columns),
", ".join("'%s'" % c.name for c in self.columns),
)
)
table_pks[:] = []
for c in self.columns:
c.primary_key = True
if c._user_defined_nullable is NULL_UNSPECIFIED:
c.nullable = False
if table_pks:
self.columns.extend(table_pks)
def _reload(self, columns):
"""repopulate this :class:`.PrimaryKeyConstraint` given
a set of columns.
Existing columns in the table that are marked as primary_key=True
are maintained.
Also fires a new event.
This is basically like putting a whole new
:class:`.PrimaryKeyConstraint` object on the parent
:class:`_schema.Table` object without actually replacing the object.
The ordering of the given list of columns is also maintained; these
columns will be appended to the list of columns after any which
are already present.
"""
# set the primary key flag on new columns.
# note any existing PK cols on the table also have their
# flag still set.
for col in columns:
col.primary_key = True
self.columns.extend(columns)
PrimaryKeyConstraint._autoincrement_column._reset(self)
self._set_parent_with_dispatch(self.table)
def _replace(self, col):
PrimaryKeyConstraint._autoincrement_column._reset(self)
self.columns.replace(col)
self.dispatch._sa_event_column_added_to_pk_constraint(self, col)
@property
def columns_autoinc_first(self):
autoinc = self._autoincrement_column
if autoinc is not None:
return [autoinc] + [c for c in self.columns if c is not autoinc]
else:
return list(self.columns)
@util.memoized_property
def _autoincrement_column(self):
def _validate_autoinc(col, autoinc_true):
if col.type._type_affinity is None or not issubclass(
col.type._type_affinity,
(
type_api.INTEGERTYPE._type_affinity,
type_api.NUMERICTYPE._type_affinity,
),
):
if autoinc_true:
raise exc.ArgumentError(
"Column type %s on column '%s' is not "
"compatible with autoincrement=True" % (col.type, col)
)
else:
return False
elif (
not isinstance(col.default, (type(None), Sequence))
and not autoinc_true
):
return False
elif (
col.server_default is not None
and not isinstance(col.server_default, Identity)
and not autoinc_true
):
return False
elif col.foreign_keys and col.autoincrement not in (
True,
"ignore_fk",
):
return False
return True
if len(self.columns) == 1:
col = list(self.columns)[0]
if col.autoincrement is True:
_validate_autoinc(col, True)
return col
elif (
col.autoincrement
in (
"auto",
"ignore_fk",
)
and _validate_autoinc(col, False)
):
return col
else:
autoinc = None
for col in self.columns:
if col.autoincrement is True:
_validate_autoinc(col, True)
if autoinc is not None:
raise exc.ArgumentError(
"Only one Column may be marked "
"autoincrement=True, found both %s and %s."
% (col.name, autoinc.name)
)
else:
autoinc = col
return autoinc
class UniqueConstraint(ColumnCollectionConstraint):
"""A table-level UNIQUE constraint.
Defines a single column or composite UNIQUE constraint. For a no-frills,
single column constraint, adding ``unique=True`` to the ``Column``
definition is a shorthand equivalent for an unnamed, single column
UniqueConstraint.
"""
__visit_name__ = "unique_constraint"
class Index(DialectKWArgs, ColumnCollectionMixin, SchemaItem):
"""A table-level INDEX.
Defines a composite (one or more column) INDEX.
E.g.::
sometable = Table("sometable", metadata,
Column("name", String(50)),
Column("address", String(100))
)
Index("some_index", sometable.c.name)
For a no-frills, single column index, adding
:class:`_schema.Column` also supports ``index=True``::
sometable = Table("sometable", metadata,
Column("name", String(50), index=True)
)
For a composite index, multiple columns can be specified::
Index("some_index", sometable.c.name, sometable.c.address)
Functional indexes are supported as well, typically by using the
:data:`.func` construct in conjunction with table-bound
:class:`_schema.Column` objects::
Index("some_index", func.lower(sometable.c.name))
An :class:`.Index` can also be manually associated with a
:class:`_schema.Table`,
either through inline declaration or using
:meth:`_schema.Table.append_constraint`. When this approach is used,
the names
of the indexed columns can be specified as strings::
Table("sometable", metadata,
Column("name", String(50)),
Column("address", String(100)),
Index("some_index", "name", "address")
)
To support functional or expression-based indexes in this form, the
:func:`_expression.text` construct may be used::
from sqlalchemy import text
Table("sometable", metadata,
Column("name", String(50)),
Column("address", String(100)),
Index("some_index", text("lower(name)"))
)
.. versionadded:: 0.9.5 the :func:`_expression.text`
construct may be used to
specify :class:`.Index` expressions, provided the :class:`.Index`
is explicitly associated with the :class:`_schema.Table`.
.. seealso::
:ref:`schema_indexes` - General information on :class:`.Index`.
:ref:`postgresql_indexes` - PostgreSQL-specific options available for
the :class:`.Index` construct.
:ref:`mysql_indexes` - MySQL-specific options available for the
:class:`.Index` construct.
:ref:`mssql_indexes` - MSSQL-specific options available for the
:class:`.Index` construct.
"""
__visit_name__ = "index"
def __init__(self, name, *expressions, **kw):
r"""Construct an index object.
:param name:
The name of the index
:param \*expressions:
Column expressions to include in the index. The expressions
are normally instances of :class:`_schema.Column`, but may also
be arbitrary SQL expressions which ultimately refer to a
:class:`_schema.Column`.
:param unique=False:
Keyword only argument; if True, create a unique index.
:param quote=None:
Keyword only argument; whether to apply quoting to the name of
the index. Works in the same manner as that of
:paramref:`_schema.Column.quote`.
:param info=None: Optional data dictionary which will be populated
into the :attr:`.SchemaItem.info` attribute of this object.
.. versionadded:: 1.0.0
:param \**kw: Additional keyword arguments not mentioned above are
dialect specific, and passed in the form
``<dialectname>_<argname>``. See the documentation regarding an
individual dialect at :ref:`dialect_toplevel` for detail on
documented arguments.
"""
self.table = table = None
self.name = quoted_name(name, kw.pop("quote", None))
self.unique = kw.pop("unique", False)
_column_flag = kw.pop("_column_flag", False)
if "info" in kw:
self.info = kw.pop("info")
# TODO: consider "table" argument being public, but for
# the purpose of the fix here, it starts as private.
if "_table" in kw:
table = kw.pop("_table")
self._validate_dialect_kwargs(kw)
self.expressions = []
# will call _set_parent() if table-bound column
# objects are present
ColumnCollectionMixin.__init__(
self,
*expressions,
_column_flag=_column_flag,
_gather_expressions=self.expressions
)
if table is not None:
self._set_parent(table)
def _set_parent(self, table, **kw):
ColumnCollectionMixin._set_parent(self, table)
if self.table is not None and table is not self.table:
raise exc.ArgumentError(
"Index '%s' is against table '%s', and "
"cannot be associated with table '%s'."
% (self.name, self.table.description, table.description)
)
self.table = table
table.indexes.add(self)
expressions = self.expressions
col_expressions = self._col_expressions(table)
assert len(expressions) == len(col_expressions)
self.expressions = [
expr if isinstance(expr, ClauseElement) else colexpr
for expr, colexpr in zip(expressions, col_expressions)
]
@property
def bind(self):
"""Return the connectable associated with this Index."""
return self.table.bind
def create(self, bind=None, checkfirst=False):
"""Issue a ``CREATE`` statement for this
:class:`.Index`, using the given :class:`.Connectable`
for connectivity.
.. note:: the "bind" argument will be required in
SQLAlchemy 2.0.
.. seealso::
:meth:`_schema.MetaData.create_all`.
"""
if bind is None:
bind = _bind_or_error(self)
bind._run_ddl_visitor(ddl.SchemaGenerator, self, checkfirst=checkfirst)
return self
def drop(self, bind=None, checkfirst=False):
"""Issue a ``DROP`` statement for this
:class:`.Index`, using the given :class:`.Connectable`
for connectivity.
.. note:: the "bind" argument will be required in
SQLAlchemy 2.0.
.. seealso::
:meth:`_schema.MetaData.drop_all`.
"""
if bind is None:
bind = _bind_or_error(self)
bind._run_ddl_visitor(ddl.SchemaDropper, self, checkfirst=checkfirst)
def __repr__(self):
return "Index(%s)" % (
", ".join(
[repr(self.name)]
+ [repr(e) for e in self.expressions]
+ (self.unique and ["unique=True"] or [])
)
)
DEFAULT_NAMING_CONVENTION = util.immutabledict({"ix": "ix_%(column_0_label)s"})
class MetaData(SchemaItem):
"""A collection of :class:`_schema.Table`
objects and their associated schema
constructs.
Holds a collection of :class:`_schema.Table` objects as well as
an optional binding to an :class:`_engine.Engine` or
:class:`_engine.Connection`. If bound, the :class:`_schema.Table` objects
in the collection and their columns may participate in implicit SQL
execution.
The :class:`_schema.Table` objects themselves are stored in the
:attr:`_schema.MetaData.tables` dictionary.
:class:`_schema.MetaData` is a thread-safe object for read operations.
Construction of new tables within a single :class:`_schema.MetaData`
object,
either explicitly or via reflection, may not be completely thread-safe.
.. seealso::
:ref:`metadata_describing` - Introduction to database metadata
"""
__visit_name__ = "metadata"
@util.deprecated_params(
bind=(
"2.0",
"The :paramref:`_schema.MetaData.bind` argument is deprecated and "
"will be removed in SQLAlchemy 2.0.",
),
)
def __init__(
self,
bind=None,
schema=None,
quote_schema=None,
naming_convention=None,
info=None,
):
"""Create a new MetaData object.
:param bind:
An Engine or Connection to bind to. May also be a string or URL
instance, these are passed to :func:`_sa.create_engine` and
this :class:`_schema.MetaData` will
be bound to the resulting engine.
:param schema:
The default schema to use for the :class:`_schema.Table`,
:class:`.Sequence`, and potentially other objects associated with
this :class:`_schema.MetaData`. Defaults to ``None``.
.. seealso::
:ref:`schema_metadata_schema_name` - details on how the
:paramref:`_schema.MetaData.schema` parameter is used.
:paramref:`_schema.Table.schema`
:paramref:`.Sequence.schema`
:param quote_schema:
Sets the ``quote_schema`` flag for those :class:`_schema.Table`,
:class:`.Sequence`, and other objects which make usage of the
local ``schema`` name.
:param info: Optional data dictionary which will be populated into the
:attr:`.SchemaItem.info` attribute of this object.
.. versionadded:: 1.0.0
:param naming_convention: a dictionary referring to values which
will establish default naming conventions for :class:`.Constraint`
and :class:`.Index` objects, for those objects which are not given
a name explicitly.
The keys of this dictionary may be:
* a constraint or Index class, e.g. the :class:`.UniqueConstraint`,
:class:`_schema.ForeignKeyConstraint` class, the :class:`.Index`
class
* a string mnemonic for one of the known constraint classes;
``"fk"``, ``"pk"``, ``"ix"``, ``"ck"``, ``"uq"`` for foreign key,
primary key, index, check, and unique constraint, respectively.
* the string name of a user-defined "token" that can be used
to define new naming tokens.
The values associated with each "constraint class" or "constraint
mnemonic" key are string naming templates, such as
``"uq_%(table_name)s_%(column_0_name)s"``,
which describe how the name should be composed. The values
associated with user-defined "token" keys should be callables of the
form ``fn(constraint, table)``, which accepts the constraint/index
object and :class:`_schema.Table` as arguments, returning a string
result.
The built-in names are as follows, some of which may only be
available for certain types of constraint:
* ``%(table_name)s`` - the name of the :class:`_schema.Table`
object
associated with the constraint.
* ``%(referred_table_name)s`` - the name of the
:class:`_schema.Table`
object associated with the referencing target of a
:class:`_schema.ForeignKeyConstraint`.
* ``%(column_0_name)s`` - the name of the :class:`_schema.Column`
at
index position "0" within the constraint.
* ``%(column_0N_name)s`` - the name of all :class:`_schema.Column`
objects in order within the constraint, joined without a
separator.
* ``%(column_0_N_name)s`` - the name of all
:class:`_schema.Column`
objects in order within the constraint, joined with an
underscore as a separator.
* ``%(column_0_label)s``, ``%(column_0N_label)s``,
``%(column_0_N_label)s`` - the label of either the zeroth
:class:`_schema.Column` or all :class:`.Columns`, separated with
or without an underscore
* ``%(column_0_key)s``, ``%(column_0N_key)s``,
``%(column_0_N_key)s`` - the key of either the zeroth
:class:`_schema.Column` or all :class:`.Columns`, separated with
or without an underscore
* ``%(referred_column_0_name)s``, ``%(referred_column_0N_name)s``
``%(referred_column_0_N_name)s``, ``%(referred_column_0_key)s``,
``%(referred_column_0N_key)s``, ... column tokens which
render the names/keys/labels of columns that are referenced
by a :class:`_schema.ForeignKeyConstraint`.
* ``%(constraint_name)s`` - a special key that refers to the
existing name given to the constraint. When this key is
present, the :class:`.Constraint` object's existing name will be
replaced with one that is composed from template string that
uses this token. When this token is present, it is required that
the :class:`.Constraint` is given an explicit name ahead of time.
* user-defined: any additional token may be implemented by passing
it along with a ``fn(constraint, table)`` callable to the
naming_convention dictionary.
.. versionadded:: 1.3.0 - added new ``%(column_0N_name)s``,
``%(column_0_N_name)s``, and related tokens that produce
concatenations of names, keys, or labels for all columns referred
to by a given constraint.
.. seealso::
:ref:`constraint_naming_conventions` - for detailed usage
examples.
"""
self.tables = util.FacadeDict()
self.schema = quoted_name(schema, quote_schema)
self.naming_convention = (
naming_convention
if naming_convention
else DEFAULT_NAMING_CONVENTION
)
if info:
self.info = info
self._schemas = set()
self._sequences = {}
self._fk_memos = collections.defaultdict(list)
self.bind = bind
tables = None
"""A dictionary of :class:`_schema.Table`
objects keyed to their name or "table key".
The exact key is that determined by the :attr:`_schema.Table.key`
attribute;
for a table with no :attr:`_schema.Table.schema` attribute,
this is the same
as :attr:`_schema.Table.name`. For a table with a schema,
it is typically of the
form ``schemaname.tablename``.
.. seealso::
:attr:`_schema.MetaData.sorted_tables`
"""
def __repr__(self):
if self.bind:
return "MetaData(bind=%r)" % self.bind
else:
return "MetaData()"
def __contains__(self, table_or_key):
if not isinstance(table_or_key, util.string_types):
table_or_key = table_or_key.key
return table_or_key in self.tables
def _add_table(self, name, schema, table):
key = _get_table_key(name, schema)
self.tables._insert_item(key, table)
if schema:
self._schemas.add(schema)
def _remove_table(self, name, schema):
key = _get_table_key(name, schema)
removed = dict.pop(self.tables, key, None)
if removed is not None:
for fk in removed.foreign_keys:
fk._remove_from_metadata(self)
if self._schemas:
self._schemas = set(
[
t.schema
for t in self.tables.values()
if t.schema is not None
]
)
def __getstate__(self):
return {
"tables": self.tables,
"schema": self.schema,
"schemas": self._schemas,
"sequences": self._sequences,
"fk_memos": self._fk_memos,
"naming_convention": self.naming_convention,
}
def __setstate__(self, state):
self.tables = state["tables"]
self.schema = state["schema"]
self.naming_convention = state["naming_convention"]
self._bind = None
self._sequences = state["sequences"]
self._schemas = state["schemas"]
self._fk_memos = state["fk_memos"]
def is_bound(self):
"""True if this MetaData is bound to an Engine or Connection."""
return self._bind is not None
def bind(self):
"""An :class:`_engine.Engine` or :class:`_engine.Connection`
to which this
:class:`_schema.MetaData` is bound.
Typically, a :class:`_engine.Engine` is assigned to this attribute
so that "implicit execution" may be used, or alternatively
as a means of providing engine binding information to an
ORM :class:`.Session` object::
engine = create_engine("someurl://")
metadata.bind = engine
.. deprecated :: 1.4
The metadata.bind attribute, as part of the deprecated system
of "implicit execution", is itself deprecated and will be
removed in SQLAlchemy 2.0.
.. seealso::
:ref:`dbengine_implicit` - background on "bound metadata"
"""
return self._bind
@util.preload_module("sqlalchemy.engine.url")
def _bind_to(self, bind):
"""Bind this MetaData to an Engine, Connection, string or URL."""
url = util.preloaded.engine_url
if isinstance(bind, util.string_types + (url.URL,)):
self._bind = sqlalchemy.create_engine(bind)
else:
self._bind = bind
bind = property(bind, _bind_to)
def clear(self):
"""Clear all Table objects from this MetaData."""
dict.clear(self.tables)
self._schemas.clear()
self._fk_memos.clear()
def remove(self, table):
"""Remove the given Table object from this MetaData."""
self._remove_table(table.name, table.schema)
@property
def sorted_tables(self):
"""Returns a list of :class:`_schema.Table` objects sorted in order of
foreign key dependency.
The sorting will place :class:`_schema.Table`
objects that have dependencies
first, before the dependencies themselves, representing the
order in which they can be created. To get the order in which
the tables would be dropped, use the ``reversed()`` Python built-in.
.. warning::
The :attr:`.MetaData.sorted_tables` attribute cannot by itself
accommodate automatic resolution of dependency cycles between
tables, which are usually caused by mutually dependent foreign key
constraints. When these cycles are detected, the foreign keys
of these tables are omitted from consideration in the sort.
A warning is emitted when this condition occurs, which will be an
exception raise in a future release. Tables which are not part
of the cycle will still be returned in dependency order.
To resolve these cycles, the
:paramref:`_schema.ForeignKeyConstraint.use_alter` parameter may be
applied to those constraints which create a cycle. Alternatively,
the :func:`_schema.sort_tables_and_constraints` function will
automatically return foreign key constraints in a separate
collection when cycles are detected so that they may be applied
to a schema separately.
.. versionchanged:: 1.3.17 - a warning is emitted when
:attr:`.MetaData.sorted_tables` cannot perform a proper sort
due to cyclical dependencies. This will be an exception in a
future release. Additionally, the sort will continue to return
other tables not involved in the cycle in dependency order which
was not the case previously.
.. seealso::
:func:`_schema.sort_tables`
:func:`_schema.sort_tables_and_constraints`
:attr:`_schema.MetaData.tables`
:meth:`_reflection.Inspector.get_table_names`
:meth:`_reflection.Inspector.get_sorted_table_and_fkc_names`
"""
return ddl.sort_tables(
sorted(self.tables.values(), key=lambda t: t.key)
)
def reflect(
self,
bind=None,
schema=None,
views=False,
only=None,
extend_existing=False,
autoload_replace=True,
resolve_fks=True,
**dialect_kwargs
):
r"""Load all available table definitions from the database.
Automatically creates ``Table`` entries in this ``MetaData`` for any
table available in the database but not yet present in the
``MetaData``. May be called multiple times to pick up tables recently
added to the database, however no special action is taken if a table
in this ``MetaData`` no longer exists in the database.
:param bind:
A :class:`.Connectable` used to access the database; if None, uses
the existing bind on this ``MetaData``, if any.
.. note:: the "bind" argument will be required in
SQLAlchemy 2.0.
:param schema:
Optional, query and reflect tables from an alternate schema.
If None, the schema associated with this :class:`_schema.MetaData`
is used, if any.
:param views:
If True, also reflect views.
:param only:
Optional. Load only a sub-set of available named tables. May be
specified as a sequence of names or a callable.
If a sequence of names is provided, only those tables will be
reflected. An error is raised if a table is requested but not
available. Named tables already present in this ``MetaData`` are
ignored.
If a callable is provided, it will be used as a boolean predicate to
filter the list of potential table names. The callable is called
with a table name and this ``MetaData`` instance as positional
arguments and should return a true value for any table to reflect.
:param extend_existing: Passed along to each :class:`_schema.Table` as
:paramref:`_schema.Table.extend_existing`.
.. versionadded:: 0.9.1
:param autoload_replace: Passed along to each :class:`_schema.Table`
as
:paramref:`_schema.Table.autoload_replace`.
.. versionadded:: 0.9.1
:param resolve_fks: if True, reflect :class:`_schema.Table`
objects linked
to :class:`_schema.ForeignKey` objects located in each
:class:`_schema.Table`.
For :meth:`_schema.MetaData.reflect`,
this has the effect of reflecting
related tables that might otherwise not be in the list of tables
being reflected, for example if the referenced table is in a
different schema or is omitted via the
:paramref:`.MetaData.reflect.only` parameter. When False,
:class:`_schema.ForeignKey` objects are not followed to the
:class:`_schema.Table`
in which they link, however if the related table is also part of the
list of tables that would be reflected in any case, the
:class:`_schema.ForeignKey` object will still resolve to its related
:class:`_schema.Table` after the :meth:`_schema.MetaData.reflect`
operation is
complete. Defaults to True.
.. versionadded:: 1.3.0
.. seealso::
:paramref:`_schema.Table.resolve_fks`
:param \**dialect_kwargs: Additional keyword arguments not mentioned
above are dialect specific, and passed in the form
``<dialectname>_<argname>``. See the documentation regarding an
individual dialect at :ref:`dialect_toplevel` for detail on
documented arguments.
.. versionadded:: 0.9.2 - Added
:paramref:`.MetaData.reflect.**dialect_kwargs` to support
dialect-level reflection options for all :class:`_schema.Table`
objects reflected.
"""
if bind is None:
bind = _bind_or_error(self)
with inspection.inspect(bind)._inspection_context() as insp:
reflect_opts = {
"autoload_with": insp,
"extend_existing": extend_existing,
"autoload_replace": autoload_replace,
"resolve_fks": resolve_fks,
"_extend_on": set(),
}
reflect_opts.update(dialect_kwargs)
if schema is None:
schema = self.schema
if schema is not None:
reflect_opts["schema"] = schema
available = util.OrderedSet(insp.get_table_names(schema))
if views:
available.update(insp.get_view_names(schema))
if schema is not None:
available_w_schema = util.OrderedSet(
["%s.%s" % (schema, name) for name in available]
)
else:
available_w_schema = available
current = set(self.tables)
if only is None:
load = [
name
for name, schname in zip(available, available_w_schema)
if extend_existing or schname not in current
]
elif callable(only):
load = [
name
for name, schname in zip(available, available_w_schema)
if (extend_existing or schname not in current)
and only(name, self)
]
else:
missing = [name for name in only if name not in available]
if missing:
s = schema and (" schema '%s'" % schema) or ""
raise exc.InvalidRequestError(
"Could not reflect: requested table(s) not available "
"in %r%s: (%s)" % (bind.engine, s, ", ".join(missing))
)
load = [
name
for name in only
if extend_existing or name not in current
]
for name in load:
try:
Table(name, self, **reflect_opts)
except exc.UnreflectableTableError as uerr:
util.warn("Skipping table %s: %s" % (name, uerr))
def create_all(self, bind=None, tables=None, checkfirst=True):
"""Create all tables stored in this metadata.
Conditional by default, will not attempt to recreate tables already
present in the target database.
:param bind:
A :class:`.Connectable` used to access the
database; if None, uses the existing bind on this ``MetaData``, if
any.
.. note:: the "bind" argument will be required in
SQLAlchemy 2.0.
:param tables:
Optional list of ``Table`` objects, which is a subset of the total
tables in the ``MetaData`` (others are ignored).
:param checkfirst:
Defaults to True, don't issue CREATEs for tables already present
in the target database.
"""
if bind is None:
bind = _bind_or_error(self)
bind._run_ddl_visitor(
ddl.SchemaGenerator, self, checkfirst=checkfirst, tables=tables
)
def drop_all(self, bind=None, tables=None, checkfirst=True):
"""Drop all tables stored in this metadata.
Conditional by default, will not attempt to drop tables not present in
the target database.
:param bind:
A :class:`.Connectable` used to access the
database; if None, uses the existing bind on this ``MetaData``, if
any.
.. note:: the "bind" argument will be required in
SQLAlchemy 2.0.
:param tables:
Optional list of ``Table`` objects, which is a subset of the
total tables in the ``MetaData`` (others are ignored).
:param checkfirst:
Defaults to True, only issue DROPs for tables confirmed to be
present in the target database.
"""
if bind is None:
bind = _bind_or_error(self)
bind._run_ddl_visitor(
ddl.SchemaDropper, self, checkfirst=checkfirst, tables=tables
)
@util.deprecated_cls(
"1.4",
":class:`.ThreadLocalMetaData` is deprecated and will be removed "
"in a future release.",
constructor="__init__",
)
class ThreadLocalMetaData(MetaData):
"""A MetaData variant that presents a different ``bind`` in every thread.
Makes the ``bind`` property of the MetaData a thread-local value, allowing
this collection of tables to be bound to different ``Engine``
implementations or connections in each thread.
The ThreadLocalMetaData starts off bound to None in each thread. Binds
must be made explicitly by assigning to the ``bind`` property or using
``connect()``. You can also re-bind dynamically multiple times per
thread, just like a regular ``MetaData``.
"""
__visit_name__ = "metadata"
def __init__(self):
"""Construct a ThreadLocalMetaData."""
self.context = util.threading.local()
self.__engines = {}
super(ThreadLocalMetaData, self).__init__()
def bind(self):
"""The bound Engine or Connection for this thread.
This property may be assigned an Engine or Connection, or assigned a
string or URL to automatically create a basic Engine for this bind
with ``create_engine()``."""
return getattr(self.context, "_engine", None)
@util.preload_module("sqlalchemy.engine.url")
def _bind_to(self, bind):
"""Bind to a Connectable in the caller's thread."""
url = util.preloaded.engine_url
if isinstance(bind, util.string_types + (url.URL,)):
try:
self.context._engine = self.__engines[bind]
except KeyError:
e = sqlalchemy.create_engine(bind)
self.__engines[bind] = e
self.context._engine = e
else:
# TODO: this is squirrely. we shouldn't have to hold onto engines
# in a case like this
if bind not in self.__engines:
self.__engines[bind] = bind
self.context._engine = bind
bind = property(bind, _bind_to)
def is_bound(self):
"""True if there is a bind for this thread."""
return (
hasattr(self.context, "_engine")
and self.context._engine is not None
)
def dispose(self):
"""Dispose all bound engines, in all thread contexts."""
for e in self.__engines.values():
if hasattr(e, "dispose"):
e.dispose()
class Computed(FetchedValue, SchemaItem):
"""Defines a generated column, i.e. "GENERATED ALWAYS AS" syntax.
The :class:`.Computed` construct is an inline construct added to the
argument list of a :class:`_schema.Column` object::
from sqlalchemy import Computed
Table('square', metadata_obj,
Column('side', Float, nullable=False),
Column('area', Float, Computed('side * side'))
)
See the linked documentation below for complete details.
.. versionadded:: 1.3.11
.. seealso::
:ref:`computed_ddl`
"""
__visit_name__ = "computed_column"
@_document_text_coercion(
"sqltext", ":class:`.Computed`", ":paramref:`.Computed.sqltext`"
)
def __init__(self, sqltext, persisted=None):
"""Construct a GENERATED ALWAYS AS DDL construct to accompany a
:class:`_schema.Column`.
:param sqltext:
A string containing the column generation expression, which will be
used verbatim, or a SQL expression construct, such as a
:func:`_expression.text`
object. If given as a string, the object is converted to a
:func:`_expression.text` object.
:param persisted:
Optional, controls how this column should be persisted by the
database. Possible values are:
* ``None``, the default, it will use the default persistence
defined by the database.
* ``True``, will render ``GENERATED ALWAYS AS ... STORED``, or the
equivalent for the target database if supported.
* ``False``, will render ``GENERATED ALWAYS AS ... VIRTUAL``, or
the equivalent for the target database if supported.
Specifying ``True`` or ``False`` may raise an error when the DDL
is emitted to the target database if the database does not support
that persistence option. Leaving this parameter at its default
of ``None`` is guaranteed to succeed for all databases that support
``GENERATED ALWAYS AS``.
"""
self.sqltext = coercions.expect(roles.DDLExpressionRole, sqltext)
self.persisted = persisted
self.column = None
def _set_parent(self, parent, **kw):
if not isinstance(
parent.server_default, (type(None), Computed)
) or not isinstance(parent.server_onupdate, (type(None), Computed)):
raise exc.ArgumentError(
"A generated column cannot specify a server_default or a "
"server_onupdate argument"
)
self.column = parent
parent.computed = self
self.column.server_onupdate = self
self.column.server_default = self
def _as_for_update(self, for_update):
return self
@util.deprecated(
"1.4",
"The :meth:`_schema.Computed.copy` method is deprecated "
"and will be removed in a future release.",
)
def copy(self, target_table=None, **kw):
return self._copy(target_table, **kw)
def _copy(self, target_table=None, **kw):
sqltext = _copy_expression(
self.sqltext,
self.column.table if self.column is not None else None,
target_table,
)
g = Computed(sqltext, persisted=self.persisted)
return self._schema_item_copy(g)
class Identity(IdentityOptions, FetchedValue, SchemaItem):
"""Defines an identity column, i.e. "GENERATED { ALWAYS | BY DEFAULT }
AS IDENTITY" syntax.
The :class:`.Identity` construct is an inline construct added to the
argument list of a :class:`_schema.Column` object::
from sqlalchemy import Identity
Table('foo', metadata_obj,
Column('id', Integer, Identity())
Column('description', Text),
)
See the linked documentation below for complete details.
.. versionadded:: 1.4
.. seealso::
:ref:`identity_ddl`
"""
__visit_name__ = "identity_column"
def __init__(
self,
always=False,
on_null=None,
start=None,
increment=None,
minvalue=None,
maxvalue=None,
nominvalue=None,
nomaxvalue=None,
cycle=None,
cache=None,
order=None,
):
"""Construct a GENERATED { ALWAYS | BY DEFAULT } AS IDENTITY DDL
construct to accompany a :class:`_schema.Column`.
See the :class:`.Sequence` documentation for a complete description
of most parameters.
.. note::
MSSQL supports this construct as the preferred alternative to
generate an IDENTITY on a column, but it uses non standard
syntax that only support :paramref:`_schema.Identity.start`
and :paramref:`_schema.Identity.increment`.
All other parameters are ignored.
:param always:
A boolean, that indicates the type of identity column.
If ``False`` is specified, the default, then the user-specified
value takes precedence.
If ``True`` is specified, a user-specified value is not accepted (
on some backends, like PostgreSQL, OVERRIDING SYSTEM VALUE, or
similar, may be specified in an INSERT to override the sequence
value).
Some backends also have a default value for this parameter,
``None`` can be used to omit rendering this part in the DDL. It
will be treated as ``False`` if a backend does not have a default
value.
:param on_null:
Set to ``True`` to specify ON NULL in conjunction with a
``always=False`` identity column. This option is only supported on
some backends, like Oracle.
:param start: the starting index of the sequence.
:param increment: the increment value of the sequence.
:param minvalue: the minimum value of the sequence.
:param maxvalue: the maximum value of the sequence.
:param nominvalue: no minimum value of the sequence.
:param nomaxvalue: no maximum value of the sequence.
:param cycle: allows the sequence to wrap around when the maxvalue
or minvalue has been reached.
:param cache: optional integer value; number of future values in the
sequence which are calculated in advance.
:param order: optional boolean value; if true, renders the
ORDER keyword.
"""
IdentityOptions.__init__(
self,
start=start,
increment=increment,
minvalue=minvalue,
maxvalue=maxvalue,
nominvalue=nominvalue,
nomaxvalue=nomaxvalue,
cycle=cycle,
cache=cache,
order=order,
)
self.always = always
self.on_null = on_null
self.column = None
def _set_parent(self, parent, **kw):
if not isinstance(
parent.server_default, (type(None), Identity)
) or not isinstance(parent.server_onupdate, type(None)):
raise exc.ArgumentError(
"A column with an Identity object cannot specify a "
"server_default or a server_onupdate argument"
)
if parent.autoincrement is False:
raise exc.ArgumentError(
"A column with an Identity object cannot specify "
"autoincrement=False"
)
self.column = parent
parent.identity = self
if parent._user_defined_nullable is NULL_UNSPECIFIED:
parent.nullable = False
parent.server_default = self
def _as_for_update(self, for_update):
return self
@util.deprecated(
"1.4",
"The :meth:`_schema.Identity.copy` method is deprecated "
"and will be removed in a future release.",
)
def copy(self, **kw):
return self._copy(**kw)
def _copy(self, **kw):
i = Identity(
always=self.always,
on_null=self.on_null,
start=self.start,
increment=self.increment,
minvalue=self.minvalue,
maxvalue=self.maxvalue,
nominvalue=self.nominvalue,
nomaxvalue=self.nomaxvalue,
cycle=self.cycle,
cache=self.cache,
order=self.order,
)
return self._schema_item_copy(i)
|
{
"content_hash": "4c3452edc7bfee906a72d62c68f640f8",
"timestamp": "",
"source": "github",
"line_count": 5261,
"max_line_length": 101,
"avg_line_length": 37.08648545903821,
"alnum_prop": 0.5908913854606584,
"repo_name": "j5int/sqlalchemy",
"id": "dde665cbde73becf46cf00503992c838c671f289",
"size": "195347",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/sqlalchemy/sql/schema.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "63151"
},
{
"name": "Python",
"bytes": "15339979"
}
],
"symlink_target": ""
}
|
""" Snob, 'because it arbitrarily puts things in classes' -- C.S. Wallace. """
import sys
from setuptools import setup, find_packages
from codecs import open
from os import path, system
from re import compile as re_compile
# For convenience.
if sys.argv[-1] == "publish":
system("python setup.py sdist upload")
sys.exit()
def read(filename):
kwds = {"encoding": "utf-8"} if sys.version_info[0] >= 3 else {}
with open(filename, **kwds) as fp:
contents = fp.read()
return contents
# Get the version information.
here = path.abspath(path.dirname(__file__))
vre = re_compile("__version__ = \"(.*?)\"")
version = vre.findall(read(path.join(here, "snob", "__init__.py")))[0]
setup(
name="snob",
version=version,
author="Andrew R. Casey",
author_email="andrew.casey@monash.edu",
description="Put things in classes",
long_description=read(path.join(here, "README.md")),
license="MIT",
classifiers=[
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.6",
"Topic :: Scientific/Engineering :: Astronomy",
"Topic :: Scientific/Engineering :: Physics"
],
keywords="MML snob minimum message length",
packages=find_packages(exclude=["tests"]),
install_requires=["numpy", "scipy", "six"],
extras_require={
"test": ["coverage"]
},
package_data={
"": ["LICENSE"],
},
include_package_data=True,
data_files=None
)
|
{
"content_hash": "4dcda2b9c86a3d5cbbfeca717f2ca1c9",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 78,
"avg_line_length": 30.547169811320753,
"alnum_prop": 0.6219888820259419,
"repo_name": "andycasey/snob",
"id": "ef114dd6e0ff4fc23b937b3eafa63a2dc3affab9",
"size": "1620",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "2904054"
},
{
"name": "MAXScript",
"bytes": "15074"
},
{
"name": "Makefile",
"bytes": "2199"
},
{
"name": "Python",
"bytes": "489030"
},
{
"name": "Shell",
"bytes": "2964"
},
{
"name": "TeX",
"bytes": "490057"
}
],
"symlink_target": ""
}
|
from touchdown.tests.stubs.aws import NetworkAclStubber
from .fixture import AwsFixture
class NetworkAclFixture(AwsFixture):
def __init__(self, goal, aws, vpc):
super(NetworkAclFixture, self).__init__(goal, aws)
self.vpc = vpc
def __enter__(self):
self.network_acl = self.fixtures.enter_context(
NetworkAclStubber(
self.goal.get_service(
self.vpc.get_network_acl(name="test-network-acl"), "describe"
)
)
)
self.network_acl.add_describe_network_acls_one_response_by_name()
return self.network_acl.resource
|
{
"content_hash": "907a54f3b1ac4c27d7cea4019823a76c",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 81,
"avg_line_length": 30.571428571428573,
"alnum_prop": 0.6043613707165109,
"repo_name": "yaybu/touchdown",
"id": "985ae14017557bd60382075b4c0dc4cdbf866e2c",
"size": "1220",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "touchdown/tests/fixtures/aws/network_acl.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "760"
},
{
"name": "Python",
"bytes": "1047173"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
from builtins import object
from builtins import str
from lib.common import helpers
class Module(object):
def __init__(self, mainMenu, params=[]):
self.info = {
'Name': 'Invoke-CredentialInjection',
'Author': ['@JosephBialek'],
'Description': ("Runs PowerSploit's Invoke-CredentialInjection to "
"create logons with clear-text credentials without "
"triggering a suspicious Event ID 4648 (Explicit "
"Credential Logon)."),
'Software': 'S0194',
'Techniques': ['T1214', 'T1003'],
'Background' : False,
'OutputExtension' : None,
'NeedsAdmin' : True,
'OpsecSafe' : True,
'Language' : 'powershell',
'MinLanguageVersion' : '2',
'Comments': [
'https://github.com/PowerShellMafia/PowerSploit/blob/master/Exfiltration/Invoke-CredentialInjection.ps1'
]
}
# any options needed by the module, settable during runtime
self.options = {
# format:
# value_name : {description, required, default_value}
'Agent' : {
'Description' : 'Agent to run module on.',
'Required' : True,
'Value' : ''
},
'NewWinLogon' : {
'Description' : 'Switch. Create a new WinLogon.exe process.',
'Required' : False,
'Value' : ''
},
'ExistingWinLogon' : {
'Description' : 'Switch. Use an existing WinLogon.exe process',
'Required' : False,
'Value' : ''
},
'CredID' : {
'Description' : 'CredID from the store to use.',
'Required' : False,
'Value' : ''
},
'DomainName' : {
'Description' : 'The domain name of the user account.',
'Required' : False,
'Value' : ''
},
'UserName' : {
'Description' : 'Username to log in with.',
'Required' : False,
'Value' : ''
},
'Password' : {
'Description' : 'Password of the user.',
'Required' : False,
'Value' : ''
},
'LogonType' : {
'Description' : 'Logon type of the injected logon (Interactive, RemoteInteractive, or NetworkCleartext)',
'Required' : False,
'Value' : 'RemoteInteractive'
},
'AuthPackage' : {
'Description' : 'authentication package to use (Kerberos or Msv1_0)',
'Required' : False,
'Value' : 'Kerberos'
}
}
# save off a copy of the mainMenu object to access external functionality
# like listeners/agent handlers/etc.
self.mainMenu = mainMenu
for param in params:
# parameter format is [Name, Value]
option, value = param
if option in self.options:
self.options[option]['Value'] = value
def generate(self, obfuscate=False, obfuscationCommand=""):
# read in the common module source code
moduleSource = self.mainMenu.installPath + "/data/module_source/credentials/Invoke-CredentialInjection.ps1"
if obfuscate:
helpers.obfuscate_module(moduleSource=moduleSource, obfuscationCommand=obfuscationCommand)
moduleSource = moduleSource.replace("module_source", "obfuscated_module_source")
try:
f = open(moduleSource, 'r')
except:
print(helpers.color("[!] Could not read module source path at: " + str(moduleSource)))
return ""
moduleCode = f.read()
f.close()
script = moduleCode
scriptEnd = "Invoke-CredentialInjection"
if self.options["NewWinLogon"]['Value'] == "" and self.options["ExistingWinLogon"]['Value'] == "":
print(helpers.color("[!] Either NewWinLogon or ExistingWinLogon must be specified"))
return ""
# if a credential ID is specified, try to parse
credID = self.options["CredID"]['Value']
if credID != "":
if not self.mainMenu.credentials.is_credential_valid(credID):
print(helpers.color("[!] CredID is invalid!"))
return ""
(credID, credType, domainName, userName, password, host, os, sid, notes) = self.mainMenu.credentials.get_credentials(credID)[0]
if credType != "plaintext":
print(helpers.color("[!] A CredID with a plaintext password must be used!"))
return ""
if domainName != "":
self.options["DomainName"]['Value'] = domainName
if userName != "":
self.options["UserName"]['Value'] = userName
if password != "":
self.options["Password"]['Value'] = password
if self.options["DomainName"]['Value'] == "" or self.options["UserName"]['Value'] == "" or self.options["Password"]['Value'] == "":
print(helpers.color("[!] DomainName/UserName/Password or CredID required!"))
return ""
for option,values in self.options.items():
if option.lower() != "agent" and option.lower() != "credid":
if values['Value'] and values['Value'] != '':
if values['Value'].lower() == "true":
# if we're just adding a switch
scriptEnd += " -" + str(option)
else:
scriptEnd += " -" + str(option) + " " + str(values['Value'])
if obfuscate:
scriptEnd = helpers.obfuscate(self.mainMenu.installPath, psScript=scriptEnd, obfuscationCommand=obfuscationCommand)
script += scriptEnd
script = helpers.keyword_obfuscation(script)
return script
|
{
"content_hash": "936118d903d0964f08fb5ea1b13b7215",
"timestamp": "",
"source": "github",
"line_count": 168,
"max_line_length": 139,
"avg_line_length": 37.80357142857143,
"alnum_prop": 0.500236183278224,
"repo_name": "byt3bl33d3r/Empire",
"id": "1d329559a87e7872bba6385d9dd1e498699b229e",
"size": "6351",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/modules/powershell/credentials/credential_injection.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "1966"
},
{
"name": "Java",
"bytes": "496"
},
{
"name": "Objective-C",
"bytes": "2664"
},
{
"name": "PHP",
"bytes": "2198"
},
{
"name": "PowerShell",
"bytes": "16998705"
},
{
"name": "Python",
"bytes": "2789955"
},
{
"name": "Shell",
"bytes": "10123"
}
],
"symlink_target": ""
}
|
from core_app import utils
from sure import expect
def test_url_parse1():
samples = [
("https://www.google.kz/?gws_rd="
"cr&ei=OowNVaPxIdXdavjsgcgH", "google.kz/"),
("https://www.google.kz", "google.kz/"),
("http://www.google.kz", "google.kz/"),
("http://google.kz", "google.kz/"),
("google.kz", "google.kz/"),
("google.kz/index.html", "google.kz/index.html"),
]
for tst, ans in samples:
res = utils.clear_uri(tst)
expect(res).should.be.equal(ans)
def test_url_parse2():
utils.clear_uri.when.called_with('').should.throw(AssertionError)
|
{
"content_hash": "125e56ff1035c3724a71aee3ac5e714d",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 69,
"avg_line_length": 30,
"alnum_prop": 0.5793650793650794,
"repo_name": "giAtSDU/disqus_be",
"id": "d98694932e5288290e871c37fe0ecb198bf0edc9",
"size": "630",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "disqus_be/core_app/tests/test_utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7019"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
from __future__ import print_function
from django.core.management.base import BaseCommand
from zerver.models import Realm, get_realm
from zerver.lib.actions import do_add_realm_emoji, do_remove_realm_emoji
import sys
import six
class Command(BaseCommand):
help = """Manage emoji for the specified realm
Example: python2.7 manage.py realm_emoji --realm=zulip.com --op=add robotheart https://humbug-user-avatars.s3.amazonaws.com/95ffa70fe0e7aea3c052ba91b38a28d8779f5705
Example: python2.7 manage.py realm_emoji --realm=zulip.com --op=remove robotheart
Example: python2.7 manage.py realm_emoji --realm=zulip.com --op=show
"""
def add_arguments(self, parser):
parser.add_argument('-r', '--realm',
dest='domain',
type=str,
required=True,
help='The name of the realm.')
parser.add_argument('--op',
dest='op',
type=str,
default="show",
help='What operation to do (add, show, remove).')
parser.add_argument('name', metavar='<name>', type=str, nargs='?', default=None,
help="name of the emoji")
parser.add_argument('img_url', metavar='<image url>', type=str, nargs='?',
help="URL of image to display for the emoji")
def handle(self, *args, **options):
realm = get_realm(options["domain"])
if options["op"] == "show":
for name, url in six.iteritems(realm.get_emoji()):
print(name, url)
sys.exit(0)
name = options['name']
if name is None:
self.print_help("python2.7 manage.py", "realm_emoji")
sys.exit(1)
if options["op"] == "add":
img_url = options['img_url']
if img_url is None:
self.print_help("python2.7 manage.py", "realm_emoji")
sys.exit(1)
do_add_realm_emoji(realm, name, img_url)
sys.exit(0)
elif options["op"] == "remove":
do_remove_realm_emoji(realm, name)
sys.exit(0)
else:
self.print_help("python2.7 manage.py", "realm_emoji")
sys.exit(1)
|
{
"content_hash": "fda675f362eda6a3c9880a718547a85e",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 165,
"avg_line_length": 40.672413793103445,
"alnum_prop": 0.5476896990250106,
"repo_name": "ryansnowboarder/zulip",
"id": "4c9ca8aa008bf1d24295c7cfa135ab7c1c572295",
"size": "2359",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "zerver/management/commands/realm_emoji.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "164"
},
{
"name": "CSS",
"bytes": "182566"
},
{
"name": "CoffeeScript",
"bytes": "18435"
},
{
"name": "Groovy",
"bytes": "5515"
},
{
"name": "HTML",
"bytes": "385288"
},
{
"name": "JavaScript",
"bytes": "1571750"
},
{
"name": "Nginx",
"bytes": "1228"
},
{
"name": "PHP",
"bytes": "18930"
},
{
"name": "Pascal",
"bytes": "1113"
},
{
"name": "Perl",
"bytes": "383634"
},
{
"name": "Puppet",
"bytes": "95624"
},
{
"name": "Python",
"bytes": "1863879"
},
{
"name": "Ruby",
"bytes": "255867"
},
{
"name": "Shell",
"bytes": "32357"
}
],
"symlink_target": ""
}
|
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
cwd = os.getcwd()
parent = os.path.dirname(cwd)
sys.path.append(parent)
import zurb_ink
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'django-zurb-ink'
copyright = u'2015, Rodrigo Machado'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = zurb_ink.__version__
# The full version, including alpha/beta/rc tags.
release = zurb_ink.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'django-zurb-inkdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'django-zurb-ink.tex', u'django-zurb-ink Documentation',
u'Rodrigo Machado', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'django-zurb-ink', u'django-zurb-ink Documentation',
[u'Rodrigo Machado'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'django-zurb-ink', u'django-zurb-ink Documentation',
u'Rodrigo Machado', 'django-zurb-ink', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
|
{
"content_hash": "33191b28fc28cf7c0a0bd4eafa4c5e5d",
"timestamp": "",
"source": "github",
"line_count": 241,
"max_line_length": 80,
"avg_line_length": 32.17427385892116,
"alnum_prop": 0.7037657982976528,
"repo_name": "rcmachado/django-zurb-ink",
"id": "70a263aeb1d9b1d64d260fc3ea9fc66b5062303c",
"size": "8175",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "docs/conf.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "13347"
},
{
"name": "HTML",
"bytes": "14610"
},
{
"name": "Makefile",
"bytes": "1254"
},
{
"name": "Python",
"bytes": "5067"
}
],
"symlink_target": ""
}
|
from .weather import Weather
from .logdata import LogData
|
{
"content_hash": "5178460c6cf0c7cebbecaf8c32884c7d",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 28,
"avg_line_length": 29,
"alnum_prop": 0.8275862068965517,
"repo_name": "jackbrucesimpson/forecast",
"id": "5860518d0e57dd817c71c4827e17f23fc20f8c3d",
"size": "82",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "6403"
}
],
"symlink_target": ""
}
|
"""
Definitions related to message data specification.
:copyright: Copyright since 2006 by Oliver Schoenborn, all rights reserved.
:license: BSD, see LICENSE_BSD_Simple.txt for details.
"""
from .listener import getArgs as getListenerArgs
from .validatedefnargs import MessageDataSpecError
from .topicargspecimpl import (
SenderMissingReqdMsgDataError,
SenderUnknownMsgDataError,
ArgsInfo
)
def topicArgsFromCallable(_callable):
"""Get the topic message data names and list of those that are required,
by introspecting given callable. Returns a pair, (args, required)
where args is a dictionary of allowed message data names vs docstring,
and required states which ones are required rather than optional."""
argsInfo = getListenerArgs(_callable)
required = argsInfo.getRequiredArgs()
defaultDoc = 'UNDOCUMENTED'
args = dict.fromkeys(argsInfo.allParams, defaultDoc)
return args, required
class ArgSpecGiven:
"""
The message data specification (MDS) for a topic.
This consists of each argument name that listener should have in its
call protocol, plus which ones are required in any sendMessage(), and a
documentation string for each argument. This instance will be transformed
into an ArgsInfo object which is basically a superset of that information,
needed to ensure that the arguments specifications satisfy
pubsub policies for chosen API version.
"""
SPEC_GIVEN_NONE = 1 # specification not given
SPEC_GIVEN_ALL = 3 # all args specified
def __init__(self, argsDocs=None, reqdArgs=None):
self.reqdArgs = tuple(reqdArgs or ())
if argsDocs is None:
self.argsSpecType = ArgSpecGiven.SPEC_GIVEN_NONE
self.argsDocs = {}
else:
self.argsSpecType = ArgSpecGiven.SPEC_GIVEN_ALL
self.argsDocs = argsDocs
# check that all args marked as required are in argsDocs
missingArgs = set(self.reqdArgs).difference(self.argsDocs.keys()) # py3: iter keys ok
if missingArgs:
msg = 'Params [%s] missing inherited required args [%%s]' % ','.join(argsDocs.keys()) # iter keys ok
raise MessageDataSpecError(msg, missingArgs)
def setAll(self, allArgsDocs, reqdArgs = None):
self.argsDocs = allArgsDocs
self.reqdArgs = reqdArgs or ()
self.argsSpecType = ArgSpecGiven.SPEC_GIVEN_ALL
def isComplete(self):
"""Returns True if the definition is usable, false otherwise."""
return self.argsSpecType == ArgSpecGiven.SPEC_GIVEN_ALL
def getOptional(self):
return tuple( set( self.argsDocs.keys() ).difference( self.reqdArgs ) )
def __str__(self):
return "%s, %s, %s" % \
(self.argsDocs, self.reqdArgs, self.argsSpecType)
|
{
"content_hash": "0f9d92acf8eb327860118970ef714cb8",
"timestamp": "",
"source": "github",
"line_count": 77,
"max_line_length": 116,
"avg_line_length": 37.935064935064936,
"alnum_prop": 0.6665525504964054,
"repo_name": "ktan2020/legacy-automation",
"id": "202c973d6baef4e63d5f42c56fa369a22aee0e91",
"size": "2921",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "win/Lib/site-packages/wx-3.0-msw/wx/lib/pubsub/core/topicargspec.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "ActionScript",
"bytes": "913"
},
{
"name": "Ada",
"bytes": "289"
},
{
"name": "Assembly",
"bytes": "687"
},
{
"name": "Boo",
"bytes": "540"
},
{
"name": "C",
"bytes": "40116"
},
{
"name": "C#",
"bytes": "474"
},
{
"name": "C++",
"bytes": "393"
},
{
"name": "CSS",
"bytes": "70883"
},
{
"name": "ColdFusion",
"bytes": "1012"
},
{
"name": "Common Lisp",
"bytes": "1034"
},
{
"name": "D",
"bytes": "1858"
},
{
"name": "Eiffel",
"bytes": "426"
},
{
"name": "Erlang",
"bytes": "9243"
},
{
"name": "FORTRAN",
"bytes": "1810"
},
{
"name": "Forth",
"bytes": "182"
},
{
"name": "Groovy",
"bytes": "2366"
},
{
"name": "Haskell",
"bytes": "816"
},
{
"name": "Haxe",
"bytes": "455"
},
{
"name": "Java",
"bytes": "1155"
},
{
"name": "JavaScript",
"bytes": "69444"
},
{
"name": "Lua",
"bytes": "795"
},
{
"name": "Matlab",
"bytes": "1278"
},
{
"name": "OCaml",
"bytes": "350"
},
{
"name": "Objective-C++",
"bytes": "885"
},
{
"name": "PHP",
"bytes": "1411"
},
{
"name": "Pascal",
"bytes": "388"
},
{
"name": "Perl",
"bytes": "252651"
},
{
"name": "Pike",
"bytes": "589"
},
{
"name": "Python",
"bytes": "42085780"
},
{
"name": "R",
"bytes": "1156"
},
{
"name": "Ruby",
"bytes": "480"
},
{
"name": "Scheme",
"bytes": "282"
},
{
"name": "Shell",
"bytes": "30518"
},
{
"name": "Smalltalk",
"bytes": "926"
},
{
"name": "Squirrel",
"bytes": "697"
},
{
"name": "Stata",
"bytes": "302"
},
{
"name": "SystemVerilog",
"bytes": "3145"
},
{
"name": "Tcl",
"bytes": "1039"
},
{
"name": "TeX",
"bytes": "1746"
},
{
"name": "VHDL",
"bytes": "985"
},
{
"name": "Vala",
"bytes": "664"
},
{
"name": "Verilog",
"bytes": "439"
},
{
"name": "Visual Basic",
"bytes": "2142"
},
{
"name": "XSLT",
"bytes": "152770"
},
{
"name": "ooc",
"bytes": "890"
},
{
"name": "xBase",
"bytes": "769"
}
],
"symlink_target": ""
}
|
from tempest_lib import exceptions as lib_exc
from tempest.api.object_storage import base
from tempest.common.utils import data_utils
from tempest import config
from tempest import test
CONF = config.CONF
QUOTA_BYTES = 10
QUOTA_COUNT = 3
class ContainerQuotasTest(base.BaseObjectTest):
"""Attempts to test the perfect behavior of quotas in a container."""
def setUp(self):
"""Creates and sets a container with quotas.
Quotas are set by adding meta values to the container,
and are validated when set:
- X-Container-Meta-Quota-Bytes:
Maximum size of the container, in bytes.
- X-Container-Meta-Quota-Count:
Maximum object count of the container.
"""
super(ContainerQuotasTest, self).setUp()
self.container_name = data_utils.rand_name(name="TestContainer")
self.container_client.create_container(self.container_name)
metadata = {"quota-bytes": str(QUOTA_BYTES),
"quota-count": str(QUOTA_COUNT), }
self.container_client.update_container_metadata(
self.container_name, metadata)
def tearDown(self):
"""Cleans the container of any object after each test."""
self.delete_containers([self.container_name])
super(ContainerQuotasTest, self).tearDown()
@test.idempotent_id('9a0fb034-86af-4df0-86fa-f8bd7db21ae0')
@test.requires_ext(extension='container_quotas', service='object')
@test.attr(type="smoke")
def test_upload_valid_object(self):
"""Attempts to uploads an object smaller than the bytes quota."""
object_name = data_utils.rand_name(name="TestObject")
data = data_utils.arbitrary_string(QUOTA_BYTES)
nbefore = self._get_bytes_used()
resp, _ = self.object_client.create_object(
self.container_name, object_name, data)
self.assertHeaders(resp, 'Object', 'PUT')
nafter = self._get_bytes_used()
self.assertEqual(nbefore + len(data), nafter)
@test.idempotent_id('22eeeb2b-3668-4160-baef-44790f65a5a0')
@test.requires_ext(extension='container_quotas', service='object')
@test.attr(type="smoke")
def test_upload_large_object(self):
"""Attempts to upload an object lagger than the bytes quota."""
object_name = data_utils.rand_name(name="TestObject")
data = data_utils.arbitrary_string(QUOTA_BYTES + 1)
nbefore = self._get_bytes_used()
self.assertRaises(lib_exc.OverLimit,
self.object_client.create_object,
self.container_name, object_name, data)
nafter = self._get_bytes_used()
self.assertEqual(nbefore, nafter)
@test.idempotent_id('3a387039-697a-44fc-a9c0-935de31f426b')
@test.requires_ext(extension='container_quotas', service='object')
@test.attr(type="smoke")
def test_upload_too_many_objects(self):
"""Attempts to upload many objects that exceeds the count limit."""
for _ in range(QUOTA_COUNT):
name = data_utils.rand_name(name="TestObject")
self.object_client.create_object(self.container_name, name, "")
nbefore = self._get_object_count()
self.assertEqual(nbefore, QUOTA_COUNT)
self.assertRaises(lib_exc.OverLimit,
self.object_client.create_object,
self.container_name, "OverQuotaObject", "")
nafter = self._get_object_count()
self.assertEqual(nbefore, nafter)
def _get_container_metadata(self):
resp, _ = self.container_client.list_container_metadata(
self.container_name)
return resp
def _get_object_count(self):
resp = self._get_container_metadata()
return int(resp["x-container-object-count"])
def _get_bytes_used(self):
resp = self._get_container_metadata()
return int(resp["x-container-bytes-used"])
|
{
"content_hash": "9fb9669fb747e85a2db7b941a0fdbcd8",
"timestamp": "",
"source": "github",
"line_count": 103,
"max_line_length": 75,
"avg_line_length": 38.50485436893204,
"alnum_prop": 0.640695915279879,
"repo_name": "liucode/tempest-master",
"id": "896352bd2c1001aeb85c6ae2ef1dc65acf3dae90",
"size": "4591",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "tempest/api/object_storage/test_container_quotas.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "2834934"
},
{
"name": "Shell",
"bytes": "8578"
}
],
"symlink_target": ""
}
|
import unittest
from producteev import Producteev
try:
from ConfigParser import ConfigParser
import os
config = ConfigParser()
path = os.path.realpath(os.path.join(os.path.dirname(__file__),
os.path.pardir))
conf_file = open(os.path.join(path, 'tests.cfg'))
config.readfp(conf_file)
API_KEY = config.get('Application', 'API_KEY')
SECRET_KEY = config.get('Application', 'SECRET_KEY')
username = config.get('User', 'Username')
password = config.get('User', 'Password')
except:
import getpass
API_KEY = raw_input('API KEY: ')
SECRET_KEY = raw_input('SECRET KEY: ')
username = raw_input('Username: ')
password = getpass.getpass()
class TestProducteev(unittest.TestCase):
"""
Test class for producteev module.
"""
def setUp(self):
self.client = Producteev(API_KEY, SECRET_KEY)
self.client.login(username, password)
def test_tasks(self):
# Test new and get
t1 = self.client.tasks.new('New test task')
t2 = self.client.tasks.get(t1)
self.assertEqual(t1, t2)
# Test list_all
self.assertTrue(t1, self.client.tasks.list_all())
# Test delete
self.assertTrue(t1.delete())
def test_labels(self):
# Test new and get
dashboard_id = self.client.users.me.default_dashboard.id
l1 = self.client.labels.new('New label', dashboard_id)
l2 = self.client.labels.get(l1)
self.assertEqual(l1, l2)
# Test list
self.assertTrue(l1, self.client.labels.list())
# Test delete
self.assertTrue(l2.delete())
def test_users(self):
me = self.client.users.me
# Test get fullname
fn = me.full_name
f_l = '%s %s' % (me.firstname, me.lastname)
self.assertEqual(fn, f_l)
# Test sort by
from producteev.users import SORT_BY
ds = me.sort_by
me.sort_by = SORT_BY[0]
self.assertEqual(me.sort_by, SORT_BY[0])
me.sort_by = 1
self.assertEqual(me.sort_by, SORT_BY[1])
me.sort_by = ds
# Test get user
u = self.client.users.get(me.id)
self.assertEqual(me, u)
u = self.client.users.get(me)
self.assertEqual(me, u)
u = self.client.users.get(str(me.id))
self.assertEqual(me, u)
def test_dashboars(self):
# Test default dashboard
dd = self.client.users.me.default_dashboard
self.assertTrue(dd in self.client.dashboards.list)
# Test new and get
d1 = self.client.dashboards.new('New dashboard')
d2 = self.client.dashboards.get(d1)
self.assertEqual(d1, d2)
# Test list
self.assertTrue(d1 in self.client.dashboards.list)
# Test set default dashboard
self.client.users.me.default_dashboard = d1
dd = self.client.users.me.default_dashboard
self.assertEqual(dd, d1)
self.client.users.me.default_dashboard = dd
# Test delete
self.assertTrue(d1.delete())
def test_subtasks(self):
# Test new
t1 = self.client.tasks.new('New test task')
st1 = self.client.subtasks.new(t1, 'New subtask')
self.assertTrue(st1 in t1.subtasks)
# Test delete
self.assertTrue(st1.delete())
self.assertTrue(t1.delete())
def test_activities(self):
# TODO
pass
if __name__ == "__main__":
unittest.main()
|
{
"content_hash": "3a0bdc07872d7ec33a7ee6100cdfedb7",
"timestamp": "",
"source": "github",
"line_count": 119,
"max_line_length": 67,
"avg_line_length": 29.327731092436974,
"alnum_prop": 0.5931232091690545,
"repo_name": "magarcia/python-producteev",
"id": "290582b87c0a976af4feeab288c94e4bae1c2d01",
"size": "3490",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_producteev.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "57411"
}
],
"symlink_target": ""
}
|
from aiakos_client.support_api_client import AiakosSupportApiClient
from qautils.http.headers_utils import set_representation_headers
from commons.authentication import Authentication as auth
__author__ = "Telefonica I+D"
__copyright__ = "Copyright 2015"
__license__ = " Apache License, Version 2.0"
HTTP_REPRESENTATION_HEADER_TEXT = "text/plain"
class AiakosApiClient:
"""
This class implements a manager for Aiakos API clients.
"""
def __init__(self, protocol, host, port, base_resource):
"""
Contructor of the class. Inits API parameters and headers.
:param protocol (string): Protocol.
:param host (string): Host.
:param port (string): Port.
:param base_resource(string): Base resource.
:return: None
"""
self.protocol = protocol
self.host = host
self.port = port
self.base_resource = base_resource
self.headers = {}
set_representation_headers(self.headers,
content_type=HTTP_REPRESENTATION_HEADER_TEXT,
accept=HTTP_REPRESENTATION_HEADER_TEXT)
def add_token(self):
"""
Add a token to header.
:return: None
"""
token = auth.init_auth()
self.headers.update({"X-Auth-Token": token})
def update_representation_headers(self, content_type, accept):
"""
Update headers with the given representations.
:param content_type (string): Content-Type header value.
:param accept (string): Accept header value.
:return: None
"""
set_representation_headers(self.headers,
content_type=content_type,
accept=accept)
def support_api_resource(self):
"""
Return the Aiakos API Client for the Support API resource.
:return (AiakosSupportApiClient): Aiakos Support API Client.
"""
return AiakosSupportApiClient(self.protocol, self.host, self.port, self.base_resource, self.headers)
|
{
"content_hash": "f77b18165632ffb08d2d95e47feb7bf5",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 108,
"avg_line_length": 33.83870967741935,
"alnum_prop": 0.6067683508102956,
"repo_name": "telefonicaid/fiware-aiakos",
"id": "3f13f6f5d146640822fb374666c2d665e6df16dd",
"size": "2884",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "test/acceptance/aiakos_client/client.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "API Blueprint",
"bytes": "3681"
},
{
"name": "CSS",
"bytes": "111"
},
{
"name": "Gherkin",
"bytes": "9409"
},
{
"name": "HTML",
"bytes": "327"
},
{
"name": "JavaScript",
"bytes": "62572"
},
{
"name": "Python",
"bytes": "29566"
},
{
"name": "Shell",
"bytes": "2578"
}
],
"symlink_target": ""
}
|
import os
from twilio.rest import Client
# Initialize the client
# To set up environmental variables, see http://twil.io/secure
account = os.environ['TWILIO_ACCOUNT_SID']
token = os.environ['TWILIO_AUTH_TOKEN']
client = Client(account, token)
# Delete the channel
response = client.chat \
.services("ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.channels("CHXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.delete()
print(response)
|
{
"content_hash": "3ed1acfa0d5167b6d0f6391a33e37a5b",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 66,
"avg_line_length": 29.3125,
"alnum_prop": 0.6886993603411514,
"repo_name": "TwilioDevEd/api-snippets",
"id": "efeccaeec8d5c4d7888c516c207d98c932bf9ad8",
"size": "542",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ip-messaging/rest/channels/delete-channels/delete-channels.6.x.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C#",
"bytes": "637161"
},
{
"name": "C++",
"bytes": "24856"
},
{
"name": "Go",
"bytes": "7217"
},
{
"name": "HTML",
"bytes": "335"
},
{
"name": "Java",
"bytes": "912474"
},
{
"name": "JavaScript",
"bytes": "512877"
},
{
"name": "M",
"bytes": "147"
},
{
"name": "Objective-C",
"bytes": "53325"
},
{
"name": "PHP",
"bytes": "517186"
},
{
"name": "Python",
"bytes": "442184"
},
{
"name": "Ruby",
"bytes": "438928"
},
{
"name": "Shell",
"bytes": "3854"
},
{
"name": "Swift",
"bytes": "42345"
},
{
"name": "TypeScript",
"bytes": "16767"
}
],
"symlink_target": ""
}
|
"""Wrapper script around TraceEventAdder script."""
import argparse
import sys
import os
from util import build_utils
def main(argv):
argv = build_utils.ExpandFileArgs(argv[1:])
parser = argparse.ArgumentParser()
build_utils.AddDepfileOption(parser)
parser.add_argument('--script',
required=True,
help='Path to the java binary wrapper script.')
parser.add_argument('--stamp', help='Path to stamp to mark when finished.')
parser.add_argument('--classpath', action='append', nargs='+')
parser.add_argument('--input-jars', action='append', nargs='+')
parser.add_argument('--output-jars', action='append', nargs='+')
args = parser.parse_args(argv)
args.classpath = build_utils.ParseGnList(args.classpath)
args.input_jars = build_utils.ParseGnList(args.input_jars)
args.output_jars = build_utils.ParseGnList(args.output_jars)
for output_jar in args.output_jars:
jar_dir = os.path.dirname(output_jar)
if not os.path.exists(jar_dir):
os.makedirs(jar_dir)
all_input_jars = set(args.classpath + args.input_jars)
cmd = [
args.script, '--classpath', ':'.join(sorted(all_input_jars)),
':'.join(args.input_jars), ':'.join(args.output_jars)
]
build_utils.CheckOutput(cmd, print_stdout=True)
build_utils.Touch(args.stamp)
build_utils.WriteDepfile(args.depfile, args.stamp, inputs=all_input_jars)
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
{
"content_hash": "7455a2ef050525b62bba3a7ec4885837",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 77,
"avg_line_length": 32.31111111111111,
"alnum_prop": 0.6788170563961485,
"repo_name": "nwjs/chromium.src",
"id": "ec39e5f8313cbe771372e997ad0ff0c13d5d0c69",
"size": "1617",
"binary": false,
"copies": "11",
"ref": "refs/heads/nw70",
"path": "build/android/gyp/trace_event_bytecode_rewriter.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
from datetime import datetime, timedelta
from NodeDefender.db.sql import SQL, PowerModel, GroupModel, iCPEModel
from sqlalchemy import func
from sqlalchemy.sql import label
from itertools import groupby
def current(*groups):
groups = GroupModel.query.filter(GroupModel.name.in_(*[groups])).all()
if not len(groups):
return False
ret_data = []
for group in groups:
group_data = {}
group_data['name'] = group.name
icpes = [node.icpe.mac_address for node in group.nodes if node.icpe]
min_ago = (datetime.now() - timedelta(hours=0.5))
latest_power = SQL.session.query(PowerModel,\
label('sum', func.sum(PowerModel.average)),
label('count', func.count(PowerModel.average))).\
join(PowerModel.icpe).\
filter(iCPEModel.mac_address.in_(*[icpes])).\
filter(PowerModel.date > min_ago).first()
if latest_power.count:
group_data['power'] = latest_power.sum / latest_power.count
else:
group_data['power'] = 0.0
ret_data.append(group_data)
return ret_data
def average(*groups):
groups = GroupModel.query.filter(GroupModel.name.in_(*[groups])).all()
if not len(groups):
return False
min_ago = (datetime.now() - timedelta(hours=0.5))
day_ago = (datetime.now() - timedelta(days=1))
week_ago = (datetime.now() - timedelta(days=7))
month_ago = (datetime.now() - timedelta(days=30))
ret_data = []
for group in groups:
group_data = {}
group_data['name'] = group.name
icpes = [node.icpe.mac_address for node in group.nodes if node.icpe]
current_power = SQL.session.query(PowerModel,\
label('sum', func.sum(PowerModel.average)),
label('count', func.count(PowerModel.average))).\
join(PowerModel.icpe).\
filter(iCPEModel.mac_address.in_(*[icpes])).\
filter(PowerModel.date > min_ago).first()
daily_power = SQL.session.query(PowerModel,\
label('sum', func.sum(PowerModel.average)),
label('count', func.count(PowerModel.average))).\
join(PowerModel.icpe).\
filter(iCPEModel.mac_address.in_(*[icpes])).\
filter(PowerModel.date > day_ago).first()
weekly_power = SQL.session.query(PowerModel,\
label('sum', func.sum(PowerModel.average)),
label('count', func.count(PowerModel.average))).\
join(PowerModel.icpe).\
filter(iCPEModel.mac_address.in_(*[icpes])).\
filter(PowerModel.date > week_ago).first()
monthly_power = SQL.session.query(PowerModel,\
label('sum', func.sum(PowerModel.average)),
label('count', func.count(PowerModel.average))).\
join(PowerModel.icpe).\
filter(iCPEModel.mac_address.in_(*[icpes])).\
filter(PowerModel.date > month_ago).first()
if current_power.count:
current_power = (current_power.sum / current_power.count)
else:
current_power = 0.0
if daily_power.count:
daily_power = (daily_power.sum / daily_power.count)
else:
daily_power = 0.0
if weekly_power.count:
weekly_power = (weekly_power.sum / weekly_power.count)
else:
weekly_power = 0.0
if monthly_power.count:
monthly_power = (monthly_power.sum / monthly_power.count)
else:
monthly_power = 0.0
group_data['daily'] = daily_power
group_data['weekly'] = weekly_power
group_data['monthly'] = monthly_power
ret_data.append(group_data)
return ret_data
def chart(*groups):
from_date = (datetime.now() - timedelta(days=30))
to_date = datetime.now()
groups = SQL.session.query(GroupModel).filter(GroupModel.name.in_(groups)).all()
if not len(groups):
return False
ret_data = []
for group in groups:
icpes = [node.icpe.mac_address for node in group.nodes if node.icpe]
power_data = SQL.session.query(PowerModel).\
join(PowerModel.icpe).\
filter(iCPEModel.mac_address.in_(*[icpes])).\
filter(PowerModel.date > from_date).\
filter(PowerModel.date < to_date).all()
if not power_data:
continue
group_data = {}
group_data['name'] = group.name
group_data['power'] = []
grouped_data = [list(v) for k, v in groupby(power_data, lambda p:
p.date)]
for data in grouped_data:
entry = {'date' : str(data[0].date)}
for power in data:
try:
entry['value'] = (power.average + entry['power']) / 2
except KeyError:
entry['value'] = power.average
group_data['power'].append(entry)
ret_data.append(group_data)
return ret_data
|
{
"content_hash": "75c5d164f83373e9999dd785cefe73c7",
"timestamp": "",
"source": "github",
"line_count": 141,
"max_line_length": 84,
"avg_line_length": 37.49645390070922,
"alnum_prop": 0.5435975033100057,
"repo_name": "CTSNE/NodeDefender",
"id": "f94186ce6a44435069b66c78aaebf1fe933a25e0",
"size": "5287",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "NodeDefender/db/data/power.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "5419"
},
{
"name": "HTML",
"bytes": "188223"
},
{
"name": "JavaScript",
"bytes": "2861"
},
{
"name": "Python",
"bytes": "290127"
}
],
"symlink_target": ""
}
|
from PyQt4.QtCore import SIGNAL, SLOT
from PyQt4.QtGui import QVBoxLayout, QDialog, QDialogButtonBox, QMessageBox
from guidata.dataset.datatypes import DataSet
from guidata.dataset.dataitems import (FloatItem, IntItem, BoolItem,
ChoiceItem, StringItem)
from guidata.dataset.qtwidgets import DataSetEditGroupBox, DataSetEditLayout
from guidata.dataset.qtitemwidgets import LineEditWidget
class IntOrNoneItem(IntItem):
""" Like the normal guidata IntItem, but allows empty input
that is interpreted as None.
"""
def check_value(self, value):
#raise Exception
if value is None:
return True
return super(IntOrNoneItem, self).check_value(value)
def from_string(self, value):
if value == 'None' or value == '': # None is allowed
return None
ret = super(IntOrNoneItem, self).from_string(value)
if ret is None: # But None from super is an error
return 'Error'
return ret
class FloatOrNoneItem(FloatItem):
""" Like the normal guidata FloatItem, but allows empty input
that is interpreted as None.
"""
def check_value(self, value):
#raise Exception
if value is None:
return True
return super(FloatOrNoneItem, self).check_value(value)
def from_string(self, value):
if value == 'None' or value == '': # None is allowed
return None
ret = super(FloatOrNoneItem, self).from_string(value)
if ret is None: # But None from super is an error
return 'Error'
return ret
DataSetEditLayout.register(IntOrNoneItem, LineEditWidget)
DataSetEditLayout.register(FloatOrNoneItem, LineEditWidget)
def valid_params(params):
if not params:
return False
if not params.values()[0]:
return False
return True
def has_ui_params(io):
if valid_params(io.read_params):
return True
return valid_params(io.write_params)
class ParamDialog(QDialog):
""" A Dialog with read and write option for a Neo IO.
"""
def __init__(self, io, params_read, params_write, parent):
super(ParamDialog, self).__init__(parent)
self.setWindowTitle('Options for %s' % (io.name or io.__name__))
self.io = io
self.setModal(True)
self.mainLayout = QVBoxLayout()
self.setLayout(self.mainLayout)
buttons = QDialogButtonBox(QDialogButtonBox.Ok | QDialogButtonBox.Cancel)
self.connect(buttons, SIGNAL("accepted()"), SLOT("accept()"))
self.connect(buttons, SIGNAL("rejected()"), SLOT("reject()"))
self.read_params_edit = None
self.read_params = params_read
if valid_params(io.read_params):
params = self.neo_to_guidata(io.read_params.values()[0], self.read_params)
self.read_params_edit = DataSetEditGroupBox(
"Read", params, show_button=False)
self.mainLayout.addWidget(self.read_params_edit)
self.write_params_edit = None
self.write_params = params_write
if valid_params(io.write_params):
params = self.neo_to_guidata(io.write_params.values()[0], self.write_params)
self.write_params_edit = DataSetEditGroupBox(
"Write", params, show_button=False)
self.mainLayout.addWidget(self.write_params_edit)
self.mainLayout.addWidget(buttons)
def neo_to_guidata(self, paramlist, param_dict):
""" Take a list of parameter description in Neo format and return
a respective guidata DataSet.
:param list paramlist: List of (name, parameters) tuples that describe
load or save parameters Neo style.
:param dict param_dict: Dictionary with default values. Is modified to
include new default values when the respective parameter is not
present.
"""
# Normal widgets
guidata_types = {
bool: BoolItem,
float: FloatItem,
int: IntItem,
str: StringItem,
unicode: StringItem
}
# Widgets where parameter can be None
guidata_types_with_none = {
bool: BoolItem,
float: FloatOrNoneItem,
int: IntOrNoneItem,
str: StringItem,
unicode: StringItem
}
# Build up parameter items dictionary
items = {}
for name, params in paramlist:
if 'label' in params:
label = params['label']
else:
label = name
if name in param_dict:
default = param_dict[name]
else:
if 'value' in params:
default = params['value']
else:
default = None
if 'type' in params:
if default is None:
classitem = guidata_types_with_none[params['type']]
else:
classitem = guidata_types[params['type']]
else:
if default is None:
classitem = guidata_types_with_none[type(default)]
else:
classitem = guidata_types[type(default)]
if 'possible' in params:
possible = params['possible'][:]
for i, p in enumerate(possible):
if possible[i] == ' ':
possible[i] = 'Space'
elif possible[i] == '\t':
possible[i] = 'Tab'
def_choice = 0
if name in param_dict and name in params['possible']:
def_choice = params['possible'].index(param_dict[name])
elif default in params['possible']:
def_choice = params['possible'].index(default)
items[name] = ChoiceItem(label, possible,
default=def_choice)
else:
items[name] = classitem(label, default=default)
if name not in param_dict:
param_dict[name] = default
guidata_class = type('Parameters', (DataSet,), items)
return guidata_class
def accept(self):
""" Validate inputs """
ok = True
if self.read_params_edit:
if self.read_params_edit.edit.check_all_values():
self.read_params_edit.edit.accept_changes()
else:
ok = False
if self.write_params_edit:
if self.write_params_edit.edit.check_all_values():
self.write_params_edit.edit.accept_changes()
else:
ok = False
if not ok:
QMessageBox.warning(self, 'Invalid parameters',
"Some required entries are incorrect.\n",
"Please check highlighted fields.")
else:
QDialog.accept(self)
def _get_choice_value(self, name, value, params):
""" Make sure that a parameter value is the actual value, not an index
into the choises for parameters with a list of possible values.
"""
for n, d in params.values()[0]:
if name != n:
continue
if not 'possible' in d:
return value
return d['possible'][value]
# Should not happen, just return original value
return value
def get_read_params(self):
""" Return a dictionary of read parameter values suitable for passing
to Neo IO read function.
"""
d = {}
for name in self.read_params.iterkeys():
d[name] = self._get_choice_value(
name, getattr(self.read_params_edit.dataset, name),
self.io.read_params)
return d
def get_write_params(self):
""" Return a dictionary of write parameter values suitable for passing
to Neo IO write function.
"""
d = {}
for name in self.write_params.iterkeys():
d[name] = self._get_choice_value(
name, getattr(self.write_params_edit.dataset, name,),
self.io.write_params)
return d
|
{
"content_hash": "5e6b72fddb51d4d8adf14e47f16248dd",
"timestamp": "",
"source": "github",
"line_count": 238,
"max_line_length": 88,
"avg_line_length": 34.80252100840336,
"alnum_prop": 0.563563926113727,
"repo_name": "rproepp/spykeviewer",
"id": "300908981a8526ca402d776f5d5154f6f67af676",
"size": "8283",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "spykeviewer/ui/io_settings.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "518454"
}
],
"symlink_target": ""
}
|
import json
import os
import warnings
from pathlib import Path
from typing import Dict, List, Optional, Sequence, Union, overload
from monai.config import KeysCollection, PathLike
from monai.data.utils import partition_dataset, select_cross_validation_folds
from monai.utils import ensure_tuple
@overload
def _compute_path(base_dir: PathLike, element: PathLike, check_path: bool = False) -> str:
...
@overload
def _compute_path(base_dir: PathLike, element: List[PathLike], check_path: bool = False) -> List[str]:
...
def _compute_path(base_dir, element, check_path=False):
"""
Args:
base_dir: the base directory of the dataset.
element: file path(s) to append to directory.
check_path: if `True`, only compute when the result is an existing path.
Raises:
TypeError: When ``element`` contains a non ``str``.
TypeError: When ``element`` type is not in ``Union[list, str]``.
"""
def _join_path(base_dir: PathLike, item: PathLike):
result = os.path.normpath(os.path.join(base_dir, item))
if check_path and not os.path.exists(result):
# if not an existing path, don't join with base dir
return f"{item}"
return f"{result}"
if isinstance(element, (str, os.PathLike)):
return _join_path(base_dir, element)
if isinstance(element, list):
for e in element:
if not isinstance(e, (str, os.PathLike)):
return element
return [_join_path(base_dir, e) for e in element]
return element
def _append_paths(base_dir: PathLike, is_segmentation: bool, items: List[Dict]) -> List[Dict]:
"""
Args:
base_dir: the base directory of the dataset.
is_segmentation: whether the datalist is for segmentation task.
items: list of data items, each of which is a dict keyed by element names.
Raises:
TypeError: When ``items`` contains a non ``dict``.
"""
for item in items:
if not isinstance(item, dict):
raise TypeError(f"Every item in items must be a dict but got {type(item).__name__}.")
for k, v in item.items():
if k == "image" or is_segmentation and k == "label":
item[k] = _compute_path(base_dir, v, check_path=False)
else:
# for other items, auto detect whether it's a valid path
item[k] = _compute_path(base_dir, v, check_path=True)
return items
def load_decathlon_datalist(
data_list_file_path: PathLike,
is_segmentation: bool = True,
data_list_key: str = "training",
base_dir: Optional[PathLike] = None,
) -> List[Dict]:
"""Load image/label paths of decathlon challenge from JSON file
Json file is similar to what you get from http://medicaldecathlon.com/
Those dataset.json files
Args:
data_list_file_path: the path to the json file of datalist.
is_segmentation: whether the datalist is for segmentation task, default is True.
data_list_key: the key to get a list of dictionary to be used, default is "training".
base_dir: the base directory of the dataset, if None, use the datalist directory.
Raises:
ValueError: When ``data_list_file_path`` does not point to a file.
ValueError: When ``data_list_key`` is not specified in the data list file.
Returns a list of data items, each of which is a dict keyed by element names, for example:
.. code-block::
[
{'image': '/workspace/data/chest_19.nii.gz', 'label': 0},
{'image': '/workspace/data/chest_31.nii.gz', 'label': 1}
]
"""
data_list_file_path = Path(data_list_file_path)
if not data_list_file_path.is_file():
raise ValueError(f"Data list file {data_list_file_path} does not exist.")
with open(data_list_file_path) as json_file:
json_data = json.load(json_file)
if data_list_key not in json_data:
raise ValueError(f'Data list {data_list_key} not specified in "{data_list_file_path}".')
expected_data = json_data[data_list_key]
if data_list_key == "test" and not isinstance(expected_data[0], dict):
# decathlon datalist may save the test images in a list directly instead of dict
expected_data = [{"image": i} for i in expected_data]
if base_dir is None:
base_dir = data_list_file_path.parent
return _append_paths(base_dir, is_segmentation, expected_data)
def load_decathlon_properties(data_property_file_path: PathLike, property_keys: Union[Sequence[str], str]) -> Dict:
"""Load the properties from the JSON file contains data property with specified `property_keys`.
Args:
data_property_file_path: the path to the JSON file of data properties.
property_keys: expected keys to load from the JSON file, for example, we have these keys
in the decathlon challenge:
`name`, `description`, `reference`, `licence`, `tensorImageSize`,
`modality`, `labels`, `numTraining`, `numTest`, etc.
"""
data_property_file_path = Path(data_property_file_path)
if not data_property_file_path.is_file():
raise ValueError(f"Data property file {data_property_file_path} does not exist.")
with open(data_property_file_path) as json_file:
json_data = json.load(json_file)
properties = {}
for key in ensure_tuple(property_keys):
if key not in json_data:
raise KeyError(f"key {key} is not in the data property file.")
properties[key] = json_data[key]
return properties
def check_missing_files(
datalist: List[Dict], keys: KeysCollection, root_dir: Optional[PathLike] = None, allow_missing_keys: bool = False
):
"""Checks whether some files in the Decathlon datalist are missing.
It would be helpful to check missing files before a heavy training run.
Args:
datalist: a list of data items, every item is a dictionary.
usually generated by `load_decathlon_datalist` API.
keys: expected keys to check in the datalist.
root_dir: if not None, provides the root dir for the relative file paths in `datalist`.
allow_missing_keys: whether allow missing keys in the datalist items.
if False, raise exception if missing. default to False.
Returns:
A list of missing filenames.
"""
missing_files = []
for item in datalist:
for k in ensure_tuple(keys):
if k not in item:
if not allow_missing_keys:
raise ValueError(f"key `{k}` is missing in the datalist item: {item}")
continue
for f in ensure_tuple(item[k]):
if not isinstance(f, (str, os.PathLike)):
raise ValueError(f"filepath of key `{k}` must be a string or a list of strings, but got: {f}.")
f = Path(f)
if isinstance(root_dir, (str, os.PathLike)):
f = Path(root_dir).joinpath(f)
if not f.exists():
missing_files.append(f)
return missing_files
def create_cross_validation_datalist(
datalist: List[Dict],
nfolds: int,
train_folds: Union[Sequence[int], int],
val_folds: Union[Sequence[int], int],
train_key: str = "training",
val_key: str = "validation",
filename: Optional[Union[Path, str]] = None,
shuffle: bool = True,
seed: int = 0,
check_missing: bool = False,
keys: Optional[KeysCollection] = None,
root_dir: Optional[str] = None,
allow_missing_keys: bool = False,
raise_error: bool = True,
):
"""
Utility to create new Decathlon style datalist based on cross validation partition.
Args:
datalist: loaded list of dictionaries for all the items to partition.
nfolds: number of the kfold split.
train_folds: indices of folds for training part.
val_folds: indices of folds for validation part.
train_key: the key of train part in the new datalist, defaults to "training".
val_key: the key of validation part in the new datalist, defaults to "validation".
filename: if not None and ends with ".json", save the new datalist into JSON file.
shuffle: whether to shuffle the datalist before partition, defaults to `True`.
seed: if `shuffle` is True, set the random seed, defaults to `0`.
check_missing: whether to check all the files specified by `keys` are existing.
keys: if not None and check_missing_files is True, the expected keys to check in the datalist.
root_dir: if not None, provides the root dir for the relative file paths in `datalist`.
allow_missing_keys: if check_missing_files is `True`, whether allow missing keys in the datalist items.
if False, raise exception if missing. default to False.
raise_error: when found missing files, if `True`, raise exception and stop, if `False`, print warning.
"""
if check_missing and keys is not None:
files = check_missing_files(datalist, keys, root_dir, allow_missing_keys)
if files:
msg = f"some files of the datalist are missing: {files}"
if raise_error:
raise ValueError(msg)
warnings.warn(msg)
data = partition_dataset(data=datalist, num_partitions=nfolds, shuffle=shuffle, seed=seed)
train_list = select_cross_validation_folds(partitions=data, folds=train_folds)
val_list = select_cross_validation_folds(partitions=data, folds=val_folds)
ret = {train_key: train_list, val_key: val_list}
if isinstance(filename, (str, Path)):
with open(filename, "w") as f:
json.dump(ret, f, indent=4)
return ret
|
{
"content_hash": "d55288d13d2e9ecd6749d2c93de3f679",
"timestamp": "",
"source": "github",
"line_count": 240,
"max_line_length": 117,
"avg_line_length": 40.62083333333333,
"alnum_prop": 0.6420145655964714,
"repo_name": "Project-MONAI/MONAI",
"id": "c1bbabceb9e13adcbc590f783c75d7720e555b81",
"size": "10323",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "monai/data/decathlon_datalist.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "15956"
},
{
"name": "C++",
"bytes": "189648"
},
{
"name": "Cuda",
"bytes": "154905"
},
{
"name": "Dockerfile",
"bytes": "2454"
},
{
"name": "Python",
"bytes": "7209898"
},
{
"name": "Shell",
"bytes": "20587"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
import os
import unittest
from integration.orquesta import base
from st2common.constants import action as ac_const
# Those tests hang and time out very often so they are disabled for the timing being until we find
# the root cause for the race or run that test separately in isolation in retry loop
@unittest.skipIf(
os.environ.get("ST2_CI_RUN_ORQUESTA_PAUSE_RESUME_TESTS", "false").lower()
not in ["1", "true"],
"Skipping race prone tests",
)
class PauseResumeWiringTest(
base.TestWorkflowExecution, base.WorkflowControlTestCaseMixin
):
temp_file_path_x = None
temp_file_path_y = None
def setUp(self):
super(PauseResumeWiringTest, self).setUp()
# Create temporary files used by the tests
self.temp_file_path_x = self._create_temp_file()
self.temp_file_path_y = self._create_temp_file()
def tearDown(self):
# Delete temporary files.
self._delete_temp_file(self.temp_file_path_x)
self._delete_temp_file(self.temp_file_path_y)
super(PauseResumeWiringTest, self).tearDown()
def test_pause_and_resume(self):
# A temp file is created during test setup. Ensure the temp file exists.
path = self.temp_file_path_x
self.assertTrue(os.path.exists(path))
# Launch the workflow. The workflow will wait for the temp file to be deleted.
params = {"tempfile": path}
ex = self._execute_workflow("examples.orquesta-test-pause", params)
self._wait_for_task(ex, "task1", ac_const.LIVEACTION_STATUS_RUNNING)
# Cancel the workflow before the temp file is deleted. The workflow will be paused
# but task1 will still be running to allow for graceful exit.
self.st2client.executions.pause(ex.id)
# Expecting the ex to be canceling, waiting for task1 to be completed.
ex = self._wait_for_state(ex, ac_const.LIVEACTION_STATUS_PAUSING)
# Delete the temporary file.
os.remove(path)
self.assertFalse(os.path.exists(path))
# Wait for the ex to be canceled.
ex = self._wait_for_state(ex, ac_const.LIVEACTION_STATUS_PAUSED)
# Resume the ex.
ex = self.st2client.executions.resume(ex.id)
# Wait for completion.
ex = self._wait_for_state(ex, ac_const.LIVEACTION_STATUS_SUCCEEDED)
def test_pause_and_resume_cascade_to_subworkflow(self):
# A temp file is created during test setup. Ensure the temp file exists.
path = self.temp_file_path_x
self.assertTrue(os.path.exists(path))
# Launch the workflow. The workflow will wait for the temp file to be deleted.
params = {"tempfile": path}
ex = self._execute_workflow("examples.orquesta-test-pause-subworkflow", params)
ex = self._wait_for_state(ex, ac_const.LIVEACTION_STATUS_RUNNING)
tk_exs = self._wait_for_task(ex, "task1", ac_const.LIVEACTION_STATUS_RUNNING)
# Pause the workflow before the temp file is deleted. The workflow will be paused
# but task1 will still be running to allow for graceful exit.
ex = self.st2client.executions.pause(ex.id)
# Expecting the ex to be pausing, waiting for task1 to be completed.
ex = self._wait_for_state(ex, ac_const.LIVEACTION_STATUS_PAUSING)
tk_ac_ex = self._wait_for_state(tk_exs[0], ac_const.LIVEACTION_STATUS_PAUSING)
# Delete the temporary file.
os.remove(path)
self.assertFalse(os.path.exists(path))
# Wait for the exs to be paused.
tk_ac_ex = self._wait_for_state(tk_ac_ex, ac_const.LIVEACTION_STATUS_PAUSED)
ex = self._wait_for_state(ex, ac_const.LIVEACTION_STATUS_PAUSED)
# Resume the parent ex.
ex = self.st2client.executions.resume(ex.id)
# Wait for completion.
tk_ac_ex = self._wait_for_state(tk_ac_ex, ac_const.LIVEACTION_STATUS_SUCCEEDED)
ex = self._wait_for_state(ex, ac_const.LIVEACTION_STATUS_SUCCEEDED)
def test_pause_and_resume_cascade_to_subworkflows(self):
# Temp files are created during test setup. Ensure the temp files exist.
path1 = self.temp_file_path_x
self.assertTrue(os.path.exists(path1))
path2 = self.temp_file_path_y
self.assertTrue(os.path.exists(path2))
# Launch the workflow. The workflow will wait for the temp file to be deleted.
params = {"file1": path1, "file2": path2}
ex = self._execute_workflow("examples.orquesta-test-pause-subworkflows", params)
ex = self._wait_for_state(ex, ac_const.LIVEACTION_STATUS_RUNNING)
tk1_exs = self._wait_for_task(ex, "task1", ac_const.LIVEACTION_STATUS_RUNNING)
tk2_exs = self._wait_for_task(ex, "task2", ac_const.LIVEACTION_STATUS_RUNNING)
# Pause the workflow before the temp files are deleted. The workflow will be paused
# but task1 will still be running to allow for graceful exit.
ex = self.st2client.executions.pause(ex.id)
# Expecting the ex to be pausing, waiting for task1 to be completed.
ex = self._wait_for_state(ex, ac_const.LIVEACTION_STATUS_PAUSING)
tk1_ac_ex = self._wait_for_state(tk1_exs[0], ac_const.LIVEACTION_STATUS_PAUSING)
tk2_ac_ex = self._wait_for_state(tk2_exs[0], ac_const.LIVEACTION_STATUS_PAUSING)
# Delete the temporary file for one of the subworkflow.
os.remove(path1)
self.assertFalse(os.path.exists(path1))
# Check the workflow and subworkflow status.
tk1_ac_ex = self._wait_for_state(tk1_ac_ex, ac_const.LIVEACTION_STATUS_PAUSED)
tk1_ac_ex = self._wait_for_state(tk2_ac_ex, ac_const.LIVEACTION_STATUS_PAUSING)
ex = self._wait_for_state(ex, ac_const.LIVEACTION_STATUS_PAUSING)
# Delete the temporary file for the other subworkflow.
os.remove(path2)
self.assertFalse(os.path.exists(path2))
# Check the workflow and subworkflow status.
tk1_ac_ex = self._wait_for_state(tk1_ac_ex, ac_const.LIVEACTION_STATUS_PAUSED)
tk1_ac_ex = self._wait_for_state(tk2_ac_ex, ac_const.LIVEACTION_STATUS_PAUSED)
ex = self._wait_for_state(ex, ac_const.LIVEACTION_STATUS_PAUSED)
# Resume the parent ex.
ex = self.st2client.executions.resume(ex.id)
# Wait for completion.
tk1_ac_ex = self._wait_for_state(
tk1_ac_ex, ac_const.LIVEACTION_STATUS_SUCCEEDED
)
tk2_ac_ex = self._wait_for_state(
tk2_ac_ex, ac_const.LIVEACTION_STATUS_SUCCEEDED
)
ex = self._wait_for_state(ex, ac_const.LIVEACTION_STATUS_SUCCEEDED)
def test_pause_and_resume_cascade_from_subworkflow(self):
# A temp file is created during test setup. Ensure the temp file exists.
path = self.temp_file_path_x
self.assertTrue(os.path.exists(path))
# Launch the workflow. The workflow will wait for the temp file to be deleted.
params = {"tempfile": path}
ex = self._execute_workflow("examples.orquesta-test-pause-subworkflow", params)
ex = self._wait_for_state(ex, ac_const.LIVEACTION_STATUS_RUNNING)
tk_exs = self._wait_for_task(ex, "task1", ac_const.LIVEACTION_STATUS_RUNNING)
# Pause the subworkflow before the temp file is deleted. The task will be
# paused but workflow will still be running.
tk_ac_ex = self.st2client.executions.pause(tk_exs[0].id)
# Expecting the workflow is still running and task1 is pausing.
ex = self._wait_for_state(ex, ac_const.LIVEACTION_STATUS_RUNNING)
tk_ac_ex = self._wait_for_state(tk_ac_ex, ac_const.LIVEACTION_STATUS_PAUSING)
# Delete the temporary file.
os.remove(path)
self.assertFalse(os.path.exists(path))
# Wait for the workflow and task to be paused.
tk_ac_ex = self._wait_for_state(tk_ac_ex, ac_const.LIVEACTION_STATUS_PAUSED)
ex = self._wait_for_state(ex, ac_const.LIVEACTION_STATUS_PAUSED)
# Resume the task.
tk_ac_ex = self.st2client.executions.resume(tk_ac_ex.id)
# Wait for completion.
tk_ac_ex = self._wait_for_state(tk_ac_ex, ac_const.LIVEACTION_STATUS_SUCCEEDED)
ex = self._wait_for_state(ex, ac_const.LIVEACTION_STATUS_SUCCEEDED)
def test_pause_from_1_of_2_subworkflows_and_resume_subworkflow_when_workflow_paused(
self,
):
# Temp files are created during test setup. Ensure the temp files exist.
path1 = self.temp_file_path_x
self.assertTrue(os.path.exists(path1))
path2 = self.temp_file_path_y
self.assertTrue(os.path.exists(path2))
# Launch the workflow. The workflow will wait for the temp file to be deleted.
params = {"file1": path1, "file2": path2}
ex = self._execute_workflow("examples.orquesta-test-pause-subworkflows", params)
ex = self._wait_for_state(ex, ac_const.LIVEACTION_STATUS_RUNNING)
tk1_exs = self._wait_for_task(ex, "task1", ac_const.LIVEACTION_STATUS_RUNNING)
tk2_exs = self._wait_for_task(ex, "task2", ac_const.LIVEACTION_STATUS_RUNNING)
# Pause the subworkflow before the temp file is deleted. The task will be
# paused but workflow and the other subworkflow will still be running.
tk1_ac_ex = self.st2client.executions.pause(tk1_exs[0].id)
# Expecting the workflow is still running and task1 is pausing.
ex = self._wait_for_state(ex, ac_const.LIVEACTION_STATUS_RUNNING)
tk1_ac_ex = self._wait_for_state(tk1_ac_ex, ac_const.LIVEACTION_STATUS_PAUSING)
tk2_ac_ex = self._wait_for_state(tk2_exs[0], ac_const.LIVEACTION_STATUS_RUNNING)
# Delete the temporary file for the subworkflow.
os.remove(path1)
self.assertFalse(os.path.exists(path1))
# Wait for the subworkflow to pause while the workflow
# and the other subworkflow will still be running.
ex = self._wait_for_state(ex, ac_const.LIVEACTION_STATUS_RUNNING)
tk1_ac_ex = self._wait_for_state(tk1_ac_ex, ac_const.LIVEACTION_STATUS_PAUSED)
tk2_ac_ex = self._wait_for_state(tk2_ac_ex, ac_const.LIVEACTION_STATUS_RUNNING)
# Delete the temporary file for the other subworkflow.
os.remove(path2)
self.assertFalse(os.path.exists(path2))
# The workflow will now be paused because no other task is running.
ex = self._wait_for_state(ex, ac_const.LIVEACTION_STATUS_PAUSED)
tk1_ac_ex = self._wait_for_state(tk1_ac_ex, ac_const.LIVEACTION_STATUS_PAUSED)
tk2_ac_ex = self._wait_for_state(
tk2_ac_ex, ac_const.LIVEACTION_STATUS_SUCCEEDED
)
# Resume the subworkflow.
tk1_ac_ex = self.st2client.executions.resume(tk1_ac_ex.id)
# Wait for completion.
tk1_ac_ex = self._wait_for_state(
tk1_ac_ex, ac_const.LIVEACTION_STATUS_SUCCEEDED
)
tk2_ac_ex = self._wait_for_state(
tk2_ac_ex, ac_const.LIVEACTION_STATUS_SUCCEEDED
)
ex = self._wait_for_state(ex, ac_const.LIVEACTION_STATUS_SUCCEEDED)
def test_pause_from_1_of_2_subworkflows_and_resume_subworkflow_while_workflow_running(
self,
):
# Temp files are created during test setup. Ensure the temp files exist.
path1 = self.temp_file_path_x
self.assertTrue(os.path.exists(path1))
path2 = self.temp_file_path_y
self.assertTrue(os.path.exists(path2))
# Launch the workflow. The workflow will wait for the temp file to be deleted.
params = {"file1": path1, "file2": path2}
ex = self._execute_workflow("examples.orquesta-test-pause-subworkflows", params)
ex = self._wait_for_state(ex, ac_const.LIVEACTION_STATUS_RUNNING)
tk1_exs = self._wait_for_task(ex, "task1", ac_const.LIVEACTION_STATUS_RUNNING)
tk2_exs = self._wait_for_task(ex, "task2", ac_const.LIVEACTION_STATUS_RUNNING)
# Pause the subworkflow before the temp file is deleted. The task will be
# paused but workflow and the other subworkflow will still be running.
tk1_ac_ex = self.st2client.executions.pause(tk1_exs[0].id)
# Expecting the workflow is still running and task1 is pausing.
ex = self._wait_for_state(ex, ac_const.LIVEACTION_STATUS_RUNNING)
tk1_ac_ex = self._wait_for_state(tk1_ac_ex, ac_const.LIVEACTION_STATUS_PAUSING)
tk2_ac_ex = self._wait_for_state(tk2_exs[0], ac_const.LIVEACTION_STATUS_RUNNING)
# Delete the temporary file for the subworkflow.
os.remove(path1)
self.assertFalse(os.path.exists(path1))
# Wait for the subworkflow to pause while the workflow
# and the other subworkflow will still be running.
ex = self._wait_for_state(ex, ac_const.LIVEACTION_STATUS_RUNNING)
tk1_ac_ex = self._wait_for_state(tk1_ac_ex, ac_const.LIVEACTION_STATUS_PAUSED)
tk2_ac_ex = self._wait_for_state(tk2_ac_ex, ac_const.LIVEACTION_STATUS_RUNNING)
# Resume the subworkflow.
tk1_ac_ex = self.st2client.executions.resume(tk1_ac_ex.id)
# The subworkflow will succeed while the other subworkflow is still running.
ex = self._wait_for_state(ex, ac_const.LIVEACTION_STATUS_RUNNING)
tk1_ac_ex = self._wait_for_state(
tk1_ac_ex, ac_const.LIVEACTION_STATUS_SUCCEEDED
)
tk2_ac_ex = self._wait_for_state(tk2_ac_ex, ac_const.LIVEACTION_STATUS_RUNNING)
# Delete the temporary file for the other subworkflow.
os.remove(path2)
self.assertFalse(os.path.exists(path2))
# Wait for completion.
tk1_ac_ex = self._wait_for_state(
tk1_ac_ex, ac_const.LIVEACTION_STATUS_SUCCEEDED
)
tk2_ac_ex = self._wait_for_state(
tk2_ac_ex, ac_const.LIVEACTION_STATUS_SUCCEEDED
)
ex = self._wait_for_state(ex, ac_const.LIVEACTION_STATUS_SUCCEEDED)
def test_pause_from_all_subworkflows_and_resume_from_subworkflows(self):
# Temp files are created during test setup. Ensure the temp files exist.
path1 = self.temp_file_path_x
self.assertTrue(os.path.exists(path1))
path2 = self.temp_file_path_y
self.assertTrue(os.path.exists(path2))
# Launch the workflow. The workflow will wait for the temp file to be deleted.
params = {"file1": path1, "file2": path2}
ex = self._execute_workflow("examples.orquesta-test-pause-subworkflows", params)
ex = self._wait_for_state(ex, ac_const.LIVEACTION_STATUS_RUNNING)
tk1_exs = self._wait_for_task(ex, "task1", ac_const.LIVEACTION_STATUS_RUNNING)
tk2_exs = self._wait_for_task(ex, "task2", ac_const.LIVEACTION_STATUS_RUNNING)
# Pause the subworkflow before the temp file is deleted. The task will be
# paused but workflow and the other subworkflow will still be running.
tk1_ac_ex = self.st2client.executions.pause(tk1_exs[0].id)
# Expecting the workflow is still running and task1 is pausing.
ex = self._wait_for_state(ex, ac_const.LIVEACTION_STATUS_RUNNING)
tk1_ac_ex = self._wait_for_state(tk1_ac_ex, ac_const.LIVEACTION_STATUS_PAUSING)
tk2_ac_ex = self._wait_for_state(tk2_exs[0], ac_const.LIVEACTION_STATUS_RUNNING)
# Pause the other subworkflow before the temp file is deleted. The main
# workflow will still running because pause is initiated downstream.
tk2_ac_ex = self.st2client.executions.pause(tk2_exs[0].id)
# Expecting workflow and subworkflows are pausing.
ex = self._wait_for_state(ex, ac_const.LIVEACTION_STATUS_RUNNING)
tk1_ac_ex = self._wait_for_state(tk1_ac_ex, ac_const.LIVEACTION_STATUS_PAUSING)
tk2_ac_ex = self._wait_for_state(tk2_ac_ex, ac_const.LIVEACTION_STATUS_PAUSING)
# Delete the temporary files for the subworkflows.
os.remove(path1)
self.assertFalse(os.path.exists(path1))
os.remove(path2)
self.assertFalse(os.path.exists(path2))
# Wait for subworkflows to pause. The main workflow will also
# pause now because no other task is running.
tk1_ac_ex = self._wait_for_state(tk1_ac_ex, ac_const.LIVEACTION_STATUS_PAUSED)
tk2_ac_ex = self._wait_for_state(tk2_ac_ex, ac_const.LIVEACTION_STATUS_PAUSED)
ex = self._wait_for_state(ex, ac_const.LIVEACTION_STATUS_PAUSED)
# Resume the subworkflow.
tk1_ac_ex = self.st2client.executions.resume(tk1_ac_ex.id)
# The subworkflow will succeed while the other subworkflow is still paused.
tk1_ac_ex = self._wait_for_state(
tk1_ac_ex, ac_const.LIVEACTION_STATUS_SUCCEEDED
)
tk2_ac_ex = self._wait_for_state(tk2_ac_ex, ac_const.LIVEACTION_STATUS_PAUSED)
ex = self._wait_for_state(ex, ac_const.LIVEACTION_STATUS_PAUSED)
# Resume the other subworkflow.
tk2_ac_ex = self.st2client.executions.resume(tk2_ac_ex.id)
# Wait for completion.
tk1_ac_ex = self._wait_for_state(
tk1_ac_ex, ac_const.LIVEACTION_STATUS_SUCCEEDED
)
tk2_ac_ex = self._wait_for_state(
tk2_ac_ex, ac_const.LIVEACTION_STATUS_SUCCEEDED
)
ex = self._wait_for_state(ex, ac_const.LIVEACTION_STATUS_SUCCEEDED)
def test_pause_from_all_subworkflows_and_resume_from_parent_workflow(self):
# Temp files are created during test setup. Ensure the temp files exist.
path1 = self.temp_file_path_x
self.assertTrue(os.path.exists(path1))
path2 = self.temp_file_path_y
self.assertTrue(os.path.exists(path2))
# Launch the workflow. The workflow will wait for the temp file to be deleted.
params = {"file1": path1, "file2": path2}
ex = self._execute_workflow("examples.orquesta-test-pause-subworkflows", params)
ex = self._wait_for_state(ex, ac_const.LIVEACTION_STATUS_RUNNING)
tk1_exs = self._wait_for_task(ex, "task1", ac_const.LIVEACTION_STATUS_RUNNING)
tk2_exs = self._wait_for_task(ex, "task2", ac_const.LIVEACTION_STATUS_RUNNING)
# Pause the subworkflow before the temp file is deleted. The task will be
# paused but workflow and the other subworkflow will still be running.
tk1_ac_ex = self.st2client.executions.pause(tk1_exs[0].id)
# Expecting the workflow is still running and task1 is pausing.
ex = self._wait_for_state(ex, ac_const.LIVEACTION_STATUS_RUNNING)
tk1_ac_ex = self._wait_for_state(tk1_ac_ex, ac_const.LIVEACTION_STATUS_PAUSING)
tk2_ac_ex = self._wait_for_state(tk2_exs[0], ac_const.LIVEACTION_STATUS_RUNNING)
# Pause the other subworkflow before the temp file is deleted. The main
# workflow will still running because pause is initiated downstream.
tk2_ac_ex = self.st2client.executions.pause(tk2_exs[0].id)
# Expecting workflow and subworkflows are pausing.
ex = self._wait_for_state(ex, ac_const.LIVEACTION_STATUS_RUNNING)
tk1_ac_ex = self._wait_for_state(tk1_ac_ex, ac_const.LIVEACTION_STATUS_PAUSING)
tk2_ac_ex = self._wait_for_state(tk2_ac_ex, ac_const.LIVEACTION_STATUS_PAUSING)
# Delete the temporary files for the subworkflows.
os.remove(path1)
self.assertFalse(os.path.exists(path1))
os.remove(path2)
self.assertFalse(os.path.exists(path2))
# Wait for subworkflows to pause. The main workflow will also
# pause now because no other task is running.
tk1_ac_ex = self._wait_for_state(tk1_ac_ex, ac_const.LIVEACTION_STATUS_PAUSED)
tk2_ac_ex = self._wait_for_state(tk2_ac_ex, ac_const.LIVEACTION_STATUS_PAUSED)
ex = self._wait_for_state(ex, ac_const.LIVEACTION_STATUS_PAUSED)
# Resume the parent workflow.
ex = self.st2client.executions.resume(ex.id)
# Wait for completion.
tk1_ac_ex = self._wait_for_state(
tk1_ac_ex, ac_const.LIVEACTION_STATUS_SUCCEEDED
)
tk2_ac_ex = self._wait_for_state(
tk2_ac_ex, ac_const.LIVEACTION_STATUS_SUCCEEDED
)
ex = self._wait_for_state(ex, ac_const.LIVEACTION_STATUS_SUCCEEDED)
|
{
"content_hash": "ebde524f5b6477ee3fb29af19e1e0681",
"timestamp": "",
"source": "github",
"line_count": 426,
"max_line_length": 98,
"avg_line_length": 47.394366197183096,
"alnum_prop": 0.6615651312530956,
"repo_name": "Plexxi/st2",
"id": "b6588088ee2d8a5e988c3f78997d0fde8b4f74c8",
"size": "20818",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "st2tests/integration/orquesta/test_wiring_pause_and_resume.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "198"
},
{
"name": "JavaScript",
"bytes": "444"
},
{
"name": "Jinja",
"bytes": "174532"
},
{
"name": "Makefile",
"bytes": "75242"
},
{
"name": "PowerShell",
"bytes": "856"
},
{
"name": "Python",
"bytes": "6453910"
},
{
"name": "Shell",
"bytes": "93607"
},
{
"name": "Starlark",
"bytes": "7236"
}
],
"symlink_target": ""
}
|
"""
wechatpy.utils
~~~~~~~~~~~~~~~
This module provides some useful utilities.
:copyright: (c) 2014 by messense.
:license: MIT, see LICENSE for more details.
"""
from __future__ import absolute_import, unicode_literals
import string
import random
import hashlib
try:
'''Use simplejson if we can, fallback to json otherwise.'''
import simplejson as json
except ImportError:
import json # NOQA
import six
import six.moves.urllib.parse as urlparse
class ObjectDict(dict):
"""Makes a dictionary behave like an object, with attribute-style access.
"""
def __getattr__(self, key):
if key in self:
return self[key]
return None
def __setattr__(self, key, value):
self[key] = value
class WeChatSigner(object):
"""WeChat data signer"""
def __init__(self, delimiter=b''):
self._data = []
self._delimiter = to_binary(delimiter)
def add_data(self, *args):
"""Add data to signer"""
for data in args:
self._data.append(to_binary(data))
@property
def signature(self):
"""Get data signature"""
self._data.sort()
str_to_sign = self._delimiter.join(self._data)
return hashlib.sha1(str_to_sign).hexdigest()
def check_signature(token, signature, timestamp, nonce):
"""Check WeChat callback signature, raises InvalidSignatureException
if check failed.
:param token: WeChat callback token
:param signature: WeChat callback signature sent by WeChat server
:param timestamp: WeChat callback timestamp sent by WeChat server
:param nonce: WeChat callback nonce sent by WeChat sever
"""
signer = WeChatSigner()
signer.add_data(token, timestamp, nonce)
if signer.signature != signature:
from wechatpy.exceptions import InvalidSignatureException
raise InvalidSignatureException()
def to_text(value, encoding='utf-8'):
"""Convert value to unicode, default encoding is utf-8
:param value: Value to be converted
:param encoding: Desired encoding
"""
if not value:
return ''
if isinstance(value, six.text_type):
return value
if isinstance(value, six.binary_type):
return value.decode(encoding)
return six.text_type(value)
def to_binary(value, encoding='utf-8'):
"""Convert value to binary string, default encoding is utf-8
:param value: Value to be converted
:param encoding: Desired encoding
"""
if not value:
return b''
if isinstance(value, six.binary_type):
return value
if isinstance(value, six.text_type):
return value.encode(encoding)
return six.binary_type(value)
def timezone(zone):
"""Try to get timezone using pytz or python-dateutil
:param zone: timezone str
:return: timezone tzinfo or None
"""
try:
import pytz
return pytz.timezone(zone)
except ImportError:
pass
try:
from dateutil.tz import gettz
return gettz(zone)
except ImportError:
return None
def random_string(length=16):
rule = string.ascii_letters + string.digits
rand_list = random.sample(rule, length)
return ''.join(rand_list)
def get_querystring(uri):
"""Get Querystring information from uri.
:param uri: uri
:return: querystring info or {}
"""
parts = urlparse.urlsplit(uri)
return urlparse.parse_qs(parts.query)
def byte2int(c):
if six.PY2:
return ord(c)
return c
|
{
"content_hash": "6bc31957552364bd9bb8e0b2842235ae",
"timestamp": "",
"source": "github",
"line_count": 142,
"max_line_length": 77,
"avg_line_length": 24.802816901408452,
"alnum_prop": 0.6473594548551959,
"repo_name": "hunter007/wechatpy",
"id": "b3259dda234afebea6927fc5d7b9eaa6c4e4dac7",
"size": "3546",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "wechatpy/utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "530006"
},
{
"name": "Shell",
"bytes": "70"
}
],
"symlink_target": ""
}
|
import json
import inspect
GB_IN_KB = 1073741824.0
cb_info = {
"drive_add": {
"bus_name": "org.storaged.Storaged",
"signal_name": "InterfacesAdded",
"path": None,
"dbus_interface": "org.freedesktop.DBus.ObjectManager"
},
"drive_remove": {
"bus_name": "org.storaged.Storaged",
"signal_name": "InterfacesRemoved",
"path": None,
"dbus_interface": "org.freedesktop.DBus.ObjectManager"
},
"drive_corruption": {
"bus_name": "org.storaged.Storaged",
"signal_name": "PropertiesChanged",
"path": None,
"dbus_interface": "org.freedesktop.DBus.Properties"
},
"block_property_changed": {
"bus_name": "org.storaged.Storaged",
"signal_name": "PropertiesChanged",
"path": None,
"dbus_interface": "org.freedesktop.DBus.Properties"
},
"block_add": {
"bus_name": "org.storaged.Storaged",
"signal_name": "InterfacesAdded",
"path": None,
"dbus_interface": "org.freedesktop.DBus.ObjectManager"
},
"block_remove": {
"bus_name": "org.storaged.Storaged",
"signal_name": "InterfacesRemoved",
"path": None,
"dbus_interface": "org.freedesktop.DBus.ObjectManager"
},
"mount_state_change": {
"bus_name": "org.storaged.Storaged",
"signal_name": "PropertiesChanged",
"path": None,
"dbus_interface": "org.freedesktop.DBus.Properties"
},
"glusterd_status": {
"signal_name": "PropertiesChanged",
"path": "/org/freedesktop/systemd1/unit/glusterd_2eservice",
"dbus_interface": "org.freedesktop.DBus.Properties",
"bus_name": None
},
"salt_minion_status": {
"signal_name": "PropertiesChanged",
"path": "/org/freedesktop/systemd1/unit/salt_2dminion_2eservice",
"dbus_interface": "org.freedesktop.DBus.Properties",
"bus_name": None
},
"network_device_added": {
"signal_name": "DeviceAdded",
"path": "/org/freedesktop/NetworkManager",
"dbus_interface": "org.freedesktop.NetworkManager",
"bus_name": "org.freedesktop.NetworkManager"
},
"network_device_removed": {
"signal_name": "DeviceRemoved",
"path": "/org/freedesktop/NetworkManager",
"dbus_interface": "org.freedesktop.NetworkManager",
"bus_name": "org.freedesktop.NetworkManager"
},
"network_device_changed": {
"signal_name": "PropertiesChanged",
"path": None,
"dbus_interface": "org.freedesktop.NetworkManager.Device.Wired",
"bus_name": "org.freedesktop.NetworkManager"
},
"collectd_status": {
"signal_name": "PropertiesChanged",
"path": "/org/freedesktop/systemd1/unit/collectd_2eservice",
"dbus_interface": "org.freedesktop.DBus.Properties",
"bus_name": None
},
"lvm_vg_create": {
"bus_name": "org.storaged.Storaged",
"signal_name": "InterfacesAdded",
"path": None,
"dbus_interface": None
},
"lvm_vg_delete": {
"bus_name": "org.storaged.Storaged",
"signal_name": "InterfacesRemoved",
"path": None,
"dbus_interface": None
},
"lvm_lv_create": {
"bus_name": "org.storaged.Storaged",
"signal_name": "InterfacesAdded",
"path": None,
"dbus_interface": None
},
"lvm_lv_delete": {
"bus_name": "org.storaged.Storaged",
"signal_name": "InterfacesRemoved",
"path": None,
"dbus_interface": None
}
}
class CallBack(object):
"""
Call back class that contains all the callback functions.
"""
def __init__(self, caller):
self.caller = caller
f = open("/etc/salt/minion_id", "r")
self.minion_id = f.read()
f.close()
def drive_add(self, a, d, path):
for k, v in d.iteritems():
if e.split('.')[-1] == "Drive":
res = {}
res["tags"] = {}
res["tags"]["ID"] = str(v.get("Id"))
res["tags"]["size"] = str(v.get("Size"))
res["tags"]["Model"] = str(v.get("Model"))
res["tags"]["Seat"] = str(v.get("Seat"))
res["tags"]["Serial"] = str(v.get("Serial"))
res["tags"]["Vendor"] = str(v.get("Vendor"))
res["message"] = "New storage drive of size %s Gb added"
" Id: %s" % (str(float(res["tags"]["size"])/(GB_IN_KB)),
res["tags"]["ID"])
res["severity"] = "INFO"
print res
tag = "dbus/node/{0}/generic/storage/drive/added".format(
self.minion_id)
self.caller.sminion.functions['event.send'](
tag,
res
)
def drive_remove(self, a, d, path):
for e in d:
if e.split('.')[-1] == "Drive":
res = {}
res["tags"] = {}
res["tags"]["ID"] = str(a.split('/')[-1]).replace("_", "-")
res["tags"]["Action"] = "Removed"
res["message"] = "Storage drive Removed ID: %s" % (
res["tags"]["ID"])
res["severity"] = "Warning"
tag = "dbus/node/{0}/generic/storage/drive/removed".format(
self.minion_id)
self.caller.sminion.functions['event.send'](
tag,
res
)
def drive_corruption(self, a, d, c, path):
res = {}
res["tags"] = {}
device_id = path.split("/")[-1]
if a.split('.')[-1] == "Ata":
for k, v in d.items():
if k in ["SmartFailing", "SmartTemperature",
"SmartSelftestStatus", "SmartNumAttributesFailing"]:
res["tags"][str(k)] = str(v)
if not res:
return
res["tags"]["deviceId"] = device_id
res["message"] = "Device with id: %s might be failing" % (
device_id)
res["severity"] = "Critical"
tag = "dbus/node/{0}/generic/storage/drive/possible"
"Failure".format(self.minion_id)
self.caller.sminion.functions['event.send'](
tag,
res
)
def block_property_changed(self, a, d, c, path):
res = {}
res["tags"] = {}
device_name = path.split("/")[-1]
if a.split('.')[-1] == "Block":
for k, v in d.items():
if k in ["IdType", "IdUsage", "IdVersion"]:
res["tags"][str(k)] = str(v)
if not res:
return
res["tags"]["deviceName"] = device_name
res["message"] = "Properties of block device %s has changed" % (
device_name)
res["severity"] = "INFO"
tag = "dbus/node/{0}/generic/storage/block/changed".format(
self.minion_id)
self.caller.sminion.functions['event.send'](
tag,
res
)
def block_add(self, a, d, path):
res = {}
res["tags"] = {}
for e, v in d.iteritems():
if e.split('.')[-1] == "Block":
deviceName = ""
for e in v.get("Device"):
deviceName += str(e)
res["tags"]["DeviceName"] = deviceName[:-1]
res["tags"]["DeviceNumber"] = str(v.get("DeviceNumber"))
res["tags"]["Drive"] = str(v.get("Drive")).split(
'/')[-1].replace("_", "-")
res["tags"]["size"] = str(v.get("Size"))
res["tags"]["ID"] = str(v.get("Id"))
res["message"] = "New Block Device %s of size %s "
"GB added" % (res["tags"]["DeviceName"],
str(float(res["tags"]["size"])/(GB_IN_KB)))
res["severity"] = "INFO"
elif e.split('.')[-1] == "Partition":
res["tags"]["PartitionNumber"] = str(v.get("Number"))
res["tags"]["Offset"] = str(v.get("Offset"))
res["tags"]["size"] = str(v.get("Size"))
res["tags"]["Table"] = str(v.get("Table")).split('/')[-1]
res["tags"]["Type"] = str(v.get("Type"))
if res:
tag = "dbus/node/{0}/generic/storage/block/added".format(
self.minion_id)
self.caller.sminion.functions['event.send'](
tag,
res
)
def block_remove(self, a, d, path):
for e in d:
if e.split('.')[-1] == "Block":
res = {}
res["tags"] = {}
res["tags"]["DeviceName"] = str(a.split('/')[-1])
res["tags"]["Action"] = "Removed"
res["message"] = "Block Device %s Removed" % (
res["tags"]["DeviceName"])
res["severity"] = "Warning"
tag = "dbus/node/{0}/generic/storage/Block/"
"removed".format(self.minion_id)
self.caller.sminion.functions['event.send'](
tag,
res
)
def mount_state_change(self, a, d, c, path):
res = {}
res["tags"] = {}
if a.split('.')[-1] == "Filesystem":
mountPoints = []
for k, v in d.items():
res["tags"]["DeviceName"] = str(path).split('/')[-1]
res["tags"]["Action"] = "Device mount state changed"
for mPoint in v:
mountPoint = ""
for e in mPoint:
mountPoint += str(e)
mountPoints.append(mountPoint[:-1])
res["tags"]["MountPoints"] = mountPoints
res["message"] = "Device %s mounted on following mount points"
": %s" % (res["tags"]["DeviceName"], ",".join(
res["tags"]["MountPoints"]))
res["severity"] = "INFO"
tag = "dbus/node/{0}/generic/storage/mount/changed".format(
self.minion_id)
self.caller.sminion.functions['event.send'](
tag,
res
)
def network_device_added(self, a, path):
res = {}
res["tags"] = {}
res["tags"]["deviceNo"] = str(a.split('/')[-1])
res["tags"]["action"] = "added"
res["message"] = "Network device added"
res["severity"] = "info"
tag = "dbus/node/{0}/generic/network/device/added".format(
self.minion_id)
self.caller.sminion.functions['event.send'](
tag,
res
)
def network_device_removed(self, a, path):
res = {}
res["tags"] = {}
res["tags"]["deviceNo"] = str(a.split('/')[-1])
res["tags"]["action"] = "removed"
res["message"] = "Network device removed"
res["severity"] = "info"
tag = "dbus/node/{0}/generic/network/device/removed".format(
self.minion_id)
self.caller.sminion.functions['event.send'](
tag,
res
)
def network_device_changed(self, d, path):
res = {}
res["tags"] = {}
for k, v in d.iteritems():
if str(k) not in ["StateReason",
"ActiveConnection",
"AvailableConnections"]:
res["tags"][str(k)] = str(v)
if not res["tags"]:
return
res["tags"]["deviceNo"] = str(path.split('/')[-1])
res["message"] = "Network device property changed"
res["severity"] = "info"
tag = "dbus/node/{0}/generic/network/device/propertyChanged".format(
self.minion_id)
self.caller.sminion.functions['event.send'](
tag,
res
)
def glusterd_status(self, a, b, c, path):
if str(a).split('.')[-1] != "Unit":
return
res = {}
res["tags"] = {}
res["tags"]["serviceName"] = "Glusterd"
res["tags"]["ActiveState"] = str(b.get("ActiveState"))
res["tags"]["SubState"] = str(b.get("SubState"))
res["message"] = "glusterd process state changed"
" to %s" % (res["tags"]["ActiveState"])
res["severity"] = "Warning"
tag = "dbus/node/{0}/glusterfs/service/glusterd".format(self.minion_id)
self.caller.sminion.functions['event.send'](
tag,
res
)
def salt_minion_status(self, a, b, c, path):
if str(a).split('.')[-1] != "Unit":
return
res = {}
res["tags"] = {}
res["tags"]["serviceName"] = "salt-minion"
res["tags"]["ActiveState"] = str(b.get("ActiveState"))
res["tags"]["SubState"] = str(b.get("SubState"))
res["message"] = "salt-minion process state changed to %s" % (
res["tags"]["ActiveState"])
res["severity"] = "Warning"
tag = "dbus/node/{0}/generic/service/salt_minion".format(
self.minion_id)
self.caller.sminion.functions['event.send'](
tag,
res
)
def collectd_status(self, a, b, c, path):
if str(a).split('.')[-1] != "Unit":
return
res = {}
res["tags"] = {}
res["tags"]["serviceName"] = "collectd"
res["tags"]["ActiveState"] = str(b.get("ActiveState"))
res["tags"]["SubState"] = str(b.get("SubState"))
res["message"] = "collectd process state changed to %s" % (
res["tags"]["ActiveState"])
res["severity"] = "Warning"
tag = "dbus/node/{0}/generic/service/collectd".format(self.minion_id)
self.caller.sminion.functions['event.send'](
tag,
res
)
def lvm_vg_create(self, a, b, path):
if b.keys()[0] != "org.storaged.Storaged.VolumeGroup":
return
res = {}
res["tags"] = {}
res["tags"]['VgName'] = str(b[b.keys()[0]].get('Name'))
res["tags"]['UUID'] = str(b[b.keys()[0]].get('UUID'))
res["tags"]['Size'] = str(b[b.keys()[0]].get('Size'))
res["tags"]['FreeSize'] = str(b[b.keys()[0]].get('FreeSize'))
tag = "dbus/node/{0}/generic/lvm/vg/create".format(self.minion_id)
self.caller.sminion.functions['event.send'](
tag,
res,
with_env=True
)
def lvm_lv_create(self, a, b, path):
if b.keys()[0] != "org.storaged.Storaged.LogicalVolume":
return
res = {}
res["tags"] = {}
res["tags"]['LvName'] = str(b[b.keys()[0]].get('Name'))
res["tags"]['UUID'] = str(b[b.keys()[0]].get('UUID'))
res["tags"]['Size'] = str(b[b.keys()[0]].get('Size'))
res["tags"]['Type'] = str(b[b.keys()[0]].get('Type'))
if res['Type'] != 'pool':
pass
else:
res["tags"]['ThinPool'] = str(b[b.keys()[0]].get(
'ThinPool')).split('/')
res["tags"]['VolumeGroup'] = str(b[b.keys()[0]].get(
'VolumeGroup')).split('/')[-1]
tag = "dbus/node/{0}/generic/lvm/lv/create".format(self.minion_id)
self.caller.sminion.functions['event.send'](
tag,
res
)
def lvm_lv_delete(self, a, b, path):
if str(b[0]) != "org.storaged.Storaged.LogicalVolume":
return
res = {}
res["tags"] = {}
tv = str(a).split('/')
res["tags"]['LvName'] = tv[-2] + "/" + tv[-1]
res["tags"]['Action'] = "Removed"
tag = "dbus/node/{0}/generic/lvm/lv/delete".format(self.minion_id)
self.caller.sminion.functions['event.send'](
tag,
res
)
def lvm_vg_delete(self, a, b, path):
if str(b[0]) != "org.storaged.Storaged.VolumeGroup":
return
res = {}
res["tags"] = {}
tv = str(a).split('/')
res["tags"]['VgName'] = tv[-1]
res["tags"]['Action'] = "Removed"
tag = "dbus/node/{0}/generic/lvm/vg/delete".format(self.minion_id)
self.caller.sminion.functions['event.send'](
tag,
res
)
def get_enabled_methods(cb):
"""
This function reads the configuration file dbusAgent.conf and
return's a dictionary with function name as key and function as
value of all the enabled methods
"""
f = open("/root/DbusAgent/dbusAgent.json", 'r')
conf = json.loads(f.read())
enabled_methods = []
for group, info in conf.iteritems():
if not conf[group]['enabled']:
continue
for method, enabled in conf[group]["methods"].iteritems():
if enabled:
enabled_methods.append(str(method))
all_methods = inspect.getmembers(cb)
em = {}
for el in all_methods:
if el[0] in enabled_methods:
em.update({el[0]: el[1]})
return em
|
{
"content_hash": "e20df448a04653b48f945493fc1afe2c",
"timestamp": "",
"source": "github",
"line_count": 469,
"max_line_length": 79,
"avg_line_length": 36.39445628997868,
"alnum_prop": 0.4823363993204054,
"repo_name": "nnDarshan/dbus_agent",
"id": "2753cd0effbd0f898022b711e7b8dd09031d264e",
"size": "17069",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "callBack.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "25584"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = """
---
module: net_l3_interface
version_added: "2.4"
author: "Ricardo Carrillo Cruz (@rcarrillocruz)"
short_description: Manage L3 interfaces on network devices
description:
- This module provides declarative management of L3 interfaces
on network devices.
options:
name:
description:
- Name of the L3 interface.
ipv4:
description:
- IPv4 of the L3 interface.
ipv6:
description:
- IPv6 of the L3 interface.
aggregate:
description: List of L3 interfaces definitions
purge:
description:
- Purge L3 interfaces not defined in the I(aggregate) parameter.
default: no
state:
description:
- State of the L3 interface configuration.
default: present
choices: ['present', 'absent']
"""
EXAMPLES = """
- name: Set eth0 IPv4 address
net_l3_interface:
name: eth0
ipv4: 192.168.0.1/24
- name: Remove eth0 IPv4 address
net_l3_interface:
name: eth0
state: absent
- name: Set IP addresses on aggregate
net_l3_interface:
aggregate:
- { name: eth1, ipv4: 192.168.2.10/24 }
- { name: eth2, ipv4: 192.168.3.10/24, ipv6: "fd5d:12c9:2201:1::1/64" }
- name: Remove IP addresses on aggregate
net_l3_interface:
aggregate:
- { name: eth1, ipv4: 192.168.2.10/24 }
- { name: eth2, ipv4: 192.168.3.10/24, ipv6: "fd5d:12c9:2201:1::1/64" }
state: absent
"""
RETURN = """
commands:
description: The list of configuration mode commands to send to the device
returned: always, except for the platforms that use Netconf transport to manage the device.
type: list
sample:
- set interfaces ethernet eth0 address '192.168.0.1/24'
"""
|
{
"content_hash": "8c65917c18830e7a7b0ff80fc16a3d1f",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 93,
"avg_line_length": 25.68918918918919,
"alnum_prop": 0.6543924250394529,
"repo_name": "e-gob/plataforma-kioscos-autoatencion",
"id": "db842e504699d9169f9c1b2f271c39b14a921614",
"size": "2074",
"binary": false,
"copies": "94",
"ref": "refs/heads/master",
"path": "scripts/ansible-play/.venv/lib/python2.7/site-packages/ansible/modules/network/layer3/net_l3_interface.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "41110"
},
{
"name": "C++",
"bytes": "3804"
},
{
"name": "CSS",
"bytes": "34823"
},
{
"name": "CoffeeScript",
"bytes": "8521"
},
{
"name": "HTML",
"bytes": "61168"
},
{
"name": "JavaScript",
"bytes": "7206"
},
{
"name": "Makefile",
"bytes": "1347"
},
{
"name": "PowerShell",
"bytes": "584344"
},
{
"name": "Python",
"bytes": "25506593"
},
{
"name": "Ruby",
"bytes": "245726"
},
{
"name": "Shell",
"bytes": "5075"
}
],
"symlink_target": ""
}
|
import functools
import numpy as np
from scipy.stats import norm as ndist
import regreg.api as rr
from selection.tests.instance import gaussian_instance
from selection.learning.utils import full_model_inference, pivot_plot
from selection.learning.core import normal_sampler, keras_fit
from selection.learning.learners import mixture_learner
mixture_learner.scales = [1]*10 + [1.5,2,3,4,5,10]
def BHfilter(pval, q=0.2):
pval = np.asarray(pval)
pval_sort = np.sort(pval)
comparison = q * np.arange(1, pval.shape[0] + 1.) / pval.shape[0]
passing = pval_sort < comparison
if passing.sum():
thresh = comparison[np.nonzero(passing)[0].max()]
return np.nonzero(pval <= thresh)[0]
return []
def simulate(n=200, p=100, s=10, signal=(2.5, 3), sigma=2, alpha=0.1, B=1000):
# description of statistical problem
X, y, truth = gaussian_instance(n=n,
p=p,
s=s,
equicorrelated=False,
rho=0.5,
sigma=sigma,
signal=signal,
random_signs=True,
scale=False)[:3]
XTX = X.T.dot(X)
XTXi = np.linalg.inv(XTX)
resid = y - X.dot(XTXi.dot(X.T.dot(y)))
dispersion = np.linalg.norm(resid)**2 / (n-p)
S = X.T.dot(y)
covS = dispersion * X.T.dot(X)
smooth_sampler = normal_sampler(S, covS)
def meta_algorithm(XTX, XTXi, dispersion, lam, sampler):
p = XTX.shape[0]
success = np.zeros(p)
loss = rr.quadratic_loss((p,), Q=XTX)
pen = rr.l1norm(p, lagrange=lam)
scale = 0.
noisy_S = sampler(scale=scale)
soln = XTXi.dot(noisy_S)
solnZ = soln / (np.sqrt(np.diag(XTXi)) * np.sqrt(dispersion))
pval = ndist.cdf(solnZ)
pval = 2 * np.minimum(pval, 1 - pval)
return set(BHfilter(pval, q=0.2))
lam = 4. * np.sqrt(n)
selection_algorithm = functools.partial(meta_algorithm, XTX, XTXi, dispersion, lam)
# run selection algorithm
return full_model_inference(X,
y,
truth,
selection_algorithm,
smooth_sampler,
success_params=(1, 1),
B=B,
fit_probability=keras_fit,
fit_args={'epochs':5, 'sizes':[200]*10, 'dropout':0., 'activation':'relu'})
if __name__ == "__main__":
import statsmodels.api as sm
import matplotlib.pyplot as plt
import pandas as pd
for i in range(500):
df = simulate(B=40000)
csvfile = 'keras_targets_BH_strong.csv'
outbase = csvfile[:-4]
if df is not None and i > 0:
try: # concatenate to disk
df = pd.concat([df, pd.read_csv(csvfile)])
except FileNotFoundError:
pass
df.to_csv(csvfile, index=False)
if len(df['pivot']) > 0:
pivot_ax, length_ax = pivot_plot(df, outbase)
|
{
"content_hash": "993a85833ed441c510e9160bd56994bf",
"timestamp": "",
"source": "github",
"line_count": 98,
"max_line_length": 107,
"avg_line_length": 33.255102040816325,
"alnum_prop": 0.5105860693464253,
"repo_name": "selective-inference/selective-inference",
"id": "d11b2be9e3fd63de7a6c7eeba16aba9067ea013e",
"size": "3259",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "doc/learning_examples/keras/keras_targets_BH_strong.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "269"
},
{
"name": "C++",
"bytes": "13148"
},
{
"name": "Python",
"bytes": "572490"
},
{
"name": "R",
"bytes": "11134"
},
{
"name": "TeX",
"bytes": "3355"
}
],
"symlink_target": ""
}
|
import time
import pytest
from kervi.application import Application
import kervi.utility.nethelper as nethelper
APP_READY = False
MODULE_LOADED = None
def app_ready(module_name):
global APP_READY
APP_READY = True
def module_loaded(value):
global MODULE_LOADED
MODULE_LOADED = value
@pytest.mark.slow
def test_application():
app = Application(
{
"modules":["app_module"],
"network":{
"ip": "127.0.0.1"
}
})
app.spine.register_event_handler("appReady", app_ready)
app.spine.register_command_handler("signalModuleLoad", module_loaded)
assert app.config.application.id == "kervi"
assert app.config.modules == ["app_module"]
assert app.config.network.ip == "127.0.0.1"
app._xrun()
process_info = app.spine.send_query("getProcessInfo")
time.sleep(5)
app.stop(False)
assert APP_READY
assert MODULE_LOADED == "test_x"
assert len(process_info) == 4
processes = ["application", "plugin_kervi.plugin.ipc.websocket", "plugin_kervi.plugin.ui.web", "app_module"]
for process in process_info:
assert process["id"] in processes
|
{
"content_hash": "d1d4672194ad2f556457b5b8b1ac4b05",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 112,
"avg_line_length": 23.5,
"alnum_prop": 0.6459574468085106,
"repo_name": "kervi/kervi",
"id": "7bad9045d60bb86f1a4cfbafc24ee5322fe9ad18",
"size": "1175",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "kervi/tests/test_application.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "389"
},
{
"name": "CSS",
"bytes": "610125"
},
{
"name": "HTML",
"bytes": "2998420"
},
{
"name": "JavaScript",
"bytes": "16183042"
},
{
"name": "Python",
"bytes": "954284"
},
{
"name": "Shell",
"bytes": "557"
},
{
"name": "TypeScript",
"bytes": "286601"
}
],
"symlink_target": ""
}
|
from netforce.controller import Controller
from netforce.template import render
from netforce.model import get_model
from netforce.database import get_connection, get_active_db # XXX: move this
from netforce.locale import set_active_locale, get_active_locale
from .cms_base import BaseController
class Addresses(BaseController):
_path = "/ecom_addresses"
def get(self):
db = get_connection()
try:
ctx = self.context
edit_form_vals = {}
for addr in ctx["customer"].addresses:
edit_form_vals[addr.id] = {
"first_name": addr.first_name,
"last_name": addr.last_name,
"company": addr.company,
"address": addr.address,
"address2": addr.address2,
"province": addr.province,
"province_id": addr.province_id.id,
"district_id": addr.district_id.id,
"subdistrict_id": addr.subdistrict_id.id,
"postal_code": addr.postal_code,
"country": addr.country_id.name,
"phone": addr.phone,
}
ctx["edit_form_vals"] = edit_form_vals
content = render("ecom_addresses", ctx)
ctx["content"] = content
html = render("cms_layout", ctx)
self.write(html)
db.commit()
except:
import traceback
traceback.print_exc()
db.rollback()
def post(self):
db = get_connection()
try:
try:
addr_id = self.get_argument("id", None)
if addr_id:
addr_id = int(addr_id)
method = self.get_argument("method", None)
if method == "delete":
get_model("address").delete([addr_id])
return
# fields=["first_name","last_name","company","address","address2","postal_code","province","district","subdistrict","country","phone"]
fields = ["first_name", "last_name", "company", "address", "address2",
"postal_code", "city", "province", "district", "subdistrict", "country", "phone"]
# req_fields=["first_name","last_name","address","postal_code","country","province","district","subdistrict"]
req_fields = ["first_name", "last_name", "address", "postal_code", "country"]
field_errors = {}
form_vals = {}
for n in fields:
v = self.get_argument(n, None)
form_vals[n] = v
if n in req_fields and not v:
field_errors[n] = True
if field_errors:
raise Exception("Some required fields are missing")
user_id = self.get_cookie("user_id")
user_id = int(user_id)
user = get_model("base.user").browse(user_id)
if not user:
raise Exception("User not found")
contact_id = user.contact_id.id
res = get_model("country").search([["id", "=", form_vals["country"]]])
if not res:
raise Exception("Invalid country")
country_id = res[0]
res=get_model("province").search([["id","=",form_vals["province"]]])
if not res:
raise Exception("Invalid province")
province_id=res[0]
res=get_model("district").search([["id","=",form_vals["district"]]])
if not res:
raise Exception("Invalid district")
district_id=res[0]
res=get_model("subdistrict").search([["id","=",form_vals["subdistrict"]]])
if not res:
raise Exception("Invalid subdistrict")
subdistrict_id=res[0]
vals = {
"contact_id": contact_id,
"first_name": form_vals["first_name"],
"last_name": form_vals["last_name"],
"company": form_vals["company"],
"address": form_vals["address"],
"address2": form_vals["address2"],
"province_id": province_id,
"postal_code": form_vals["postal_code"],
"country_id": country_id,
"city": form_vals["city"],
"province_id": province_id,
"district_id": district_id,
"subdistrict_id": subdistrict_id,
}
if addr_id:
get_model("address").write([addr_id], vals)
else:
get_model("address").create(vals)
cart_id = self.get_cookie("cart_id")
if cart_id:
cart_id = int(cart_id)
get_model("ecom.cart").set_default_address([cart_id])
db.commit()
self.redirect("/ecom_addresses")
except Exception as e:
ctx = self.context
db = get_connection()
error_message = str(e)
edit_form_vals = {}
for addr in ctx["customer"].addresses:
edit_form_vals[addr.id] = {
"first_name": addr.first_name,
"last_name": addr.last_name,
"company": addr.company,
"address": addr.address,
"address2": addr.address2,
"postal_code": addr.postal_code,
"province": addr.province,
"district": addr.district,
"subdistrict": addr.subdistrict,
"city": addr.city,
"country": addr.country_id.name,
"phone": addr.phone,
}
ctx["edit_form_vals"] = edit_form_vals
if addr_id:
edit_form_vals[addr_id] = form_vals
ctx["edit_error_message"] = {addr_id: error_message}
ctx["edit_field_errors"] = {addr_id: field_errors}
ctx["show_edit_address"] = {addr_id: True}
else:
ctx["form_vals"] = form_vals
ctx["error_message"] = error_message
ctx["field_errors"] = field_errors
ctx["show_add_address"] = True
content = render("ecom_addresses", ctx)
ctx["content"] = content
html = render("cms_layout", ctx)
db.commit()
self.write(html)
except:
import traceback
traceback.print_exc()
db.rollback()
Addresses.register()
|
{
"content_hash": "4f90ee8992820eeaa64a25e7bb38f5d5",
"timestamp": "",
"source": "github",
"line_count": 156,
"max_line_length": 150,
"avg_line_length": 44.94230769230769,
"alnum_prop": 0.45856511196690913,
"repo_name": "bank-netforce/netforce",
"id": "9c2db2d71104651f1ede80335f7e79e003817904",
"size": "8116",
"binary": false,
"copies": "4",
"ref": "refs/heads/stable-3.1",
"path": "netforce_ecom/netforce_ecom/controllers/ecom_addresses.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "73"
},
{
"name": "CSS",
"bytes": "407336"
},
{
"name": "HTML",
"bytes": "478918"
},
{
"name": "Java",
"bytes": "11870"
},
{
"name": "JavaScript",
"bytes": "3712147"
},
{
"name": "Makefile",
"bytes": "353"
},
{
"name": "PHP",
"bytes": "2274"
},
{
"name": "Python",
"bytes": "3469514"
},
{
"name": "Roff",
"bytes": "15858"
},
{
"name": "Shell",
"bytes": "117"
}
],
"symlink_target": ""
}
|
import json
import os
import re
class HubMetaData(object):
os_info_path = '/etc/os-release'
connected_wifi_info_path = 'etc/NetworkManager/system-connections/bluenet'
cpu_info_path = '/proc/cpuinfo'
@classmethod
def _read_from_file(cls, file_path):
if not os.path.isfile(file_path):
return ''
with open(file_path) as data_file:
if file_path.endswith('.json'):
return json.load(data_file)
else:
return data_file.readlines()
@classmethod
def os_version(cls):
os_info = cls._read_from_file(cls.os_info_path)
for line in os_info:
if 'VERSION=' in line:
version = re.split('VERSION=', line)[1]
return version.strip().replace('"', '')
return ''
@classmethod
def wifi(cls):
network_info = cls._read_from_file(cls.connected_wifi_info_path)
for line in network_info:
if 'ssid=' in line:
ssid = re.split('ssid=', line)[1]
return ssid.strip()
return ''
@classmethod
def hardware_identifier(cls):
cpu_info = cls._read_from_file(cls.cpu_info_path)
for line in cpu_info:
if 'Serial' in line:
serial = re.split(':\s*', line)[1]
return serial.strip().replace('02c00081', '')
return ''
@classmethod
def phue_bridge_info(cls, devices_path, bridge_ip):
devices = cls._read_from_file(devices_path)
bridges = [
device for device in devices
if device['ip'] == bridge_ip and device['type'] == 'philips_hue'
]
if not bridges or not bridges[0].get('extra'):
return {}
return bridges[0]['extra'].get('bridge', {})
|
{
"content_hash": "8d3e164505fe3349e210da61edc7b4dd",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 78,
"avg_line_length": 26.794117647058822,
"alnum_prop": 0.5433589462129528,
"repo_name": "getsenic/senic-hub",
"id": "28b46e928f27cab47358fd5197f9ae5deb47c194",
"size": "1822",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "senic_hub/backend/hub_metadata.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "2426"
},
{
"name": "Python",
"bytes": "255261"
},
{
"name": "Shell",
"bytes": "74338"
},
{
"name": "Vim script",
"bytes": "7381"
}
],
"symlink_target": ""
}
|
import urllib, shutil, csv
from time import time
import os
parentdir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
os.sys.path.insert(0,parentdir)
import summary.summary
def findRoutines(fileName):
for ln in fileName:
url = ln.split(";")[3]
routineName = url.split("/")[-1]
#print routineName
#print url
f = urllib.urlopen(url)
flag = 1
for line in f:
line = line[3:]
#print line
if line.startswith("Arguments"):
break
else:
if line.startswith("\par Purpose:"):
flag = 0
if line.startswith("Arguments"):
flag = 1
if not flag:
index1 = line.find("inverse of a")
if index1 > -1:
routines_inverse_341.append(routineName)
f_inverse_341.write(routineName)
else:
pass
fileName.close()
print "------------- Find 'inverse' routines in v3.4.1 --------------"
###------------ find routines that compute the inverse of a matrix in the new version
###------------ and write them into routines/inverse_341.txt
## find the routines that HAVE the keywords:
f_computational_341_single = open(parentdir+'/sort341/routines/computational_341_single.txt')
f_computational_341_double = open(parentdir+'/sort341/routines/computational_341_double.txt')
f_computational_341_complex = open(parentdir+'/sort341/routines/computational_341_complex.txt')
f_computational_341_complex16 = open(parentdir+'/sort341/routines/computational_341_complex16.txt')
f_inverse_341 = open('./routines/inverse_341.txt', 'w')
routines_inverse_341 = []
start = time()
findRoutines(f_computational_341_single)
findRoutines(f_computational_341_double)
findRoutines(f_computational_341_complex)
findRoutines(f_computational_341_complex16)
elapsed = (time() - start)
print "There are %s routines in the 341 version that provides inverse." % len(routines_inverse_341), elapsed
|
{
"content_hash": "a00cde4d75afb172278804283c7de09f",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 108,
"avg_line_length": 35.11666666666667,
"alnum_prop": 0.6098718557190318,
"repo_name": "LighthouseHPC/lighthouse",
"id": "136a851a998f51cdd2bf4581c0611e37d7bd1c32",
"size": "2107",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/LAPACK341/computational_inverse/inverse_find_341.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
}
|
from flask import render_template
def bootstrap():
return render_template('bootstrap.html')
|
{
"content_hash": "a0c934090bc9a927cd470ecc68e73818",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 44,
"avg_line_length": 19.6,
"alnum_prop": 0.7551020408163265,
"repo_name": "stephanos/subvoc",
"id": "606cfc77a57f12502761a00a7a3aa8146e12b6f1",
"size": "98",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "web/routes/bootstrap.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "6646"
},
{
"name": "HTML",
"bytes": "2443"
},
{
"name": "JavaScript",
"bytes": "1294927"
},
{
"name": "Python",
"bytes": "64112"
},
{
"name": "Shell",
"bytes": "3154"
}
],
"symlink_target": ""
}
|
import unittest
if __name__ == '__main__':
from main import setup_tincan_path
setup_tincan_path()
from tincan import InteractionComponent, LanguageMap
class InteractionComponentTest(unittest.TestCase):
def test_InitEmpty(self):
icomp = InteractionComponent()
self.assertIsNone(icomp.id)
self.assertNotIn('description', vars(icomp))
def test_InitExceptionEmptyId(self):
with self.assertRaises(ValueError):
InteractionComponent(id='')
def test_InitId(self):
icomp = InteractionComponent(id='test')
self.assertEqual(icomp.id, 'test')
self.assertNotIn('description', vars(icomp))
def test_InitDescription(self):
icomp = InteractionComponent(description={"en-US": "test"})
self.assertIsNone(icomp.id)
self.descriptionVerificationHelper(icomp.description)
def test_InitEmptyDescription(self):
icomp = InteractionComponent(id='test', description={})
self.assertEqual(icomp.id, 'test')
self.assertIsInstance(icomp.description, LanguageMap)
self.assertEqual(len(vars(icomp.description)), 0)
def test_InitAnonDescription(self):
icomp = InteractionComponent(id='test', description={"en-US": "test"})
self.assertEqual(icomp.id, 'test')
self.descriptionVerificationHelper(icomp.description)
def test_InitLanguageMapDescription(self):
icomp = InteractionComponent(id='test', description=LanguageMap({"en-US": "test"}))
self.assertEqual(icomp.id, 'test')
self.descriptionVerificationHelper(icomp.description)
def test_InitEmptyLanguageMapDescription(self):
icomp = InteractionComponent(id='test', description=LanguageMap({}))
self.assertEqual(icomp.id, 'test')
self.assertIsInstance(icomp.description, LanguageMap)
self.assertEqual(len(vars(icomp.description)), 0)
def test_InitUnpackDescription(self):
obj = {"description": {"en-US": "test"}}
icomp = InteractionComponent(**obj)
self.descriptionVerificationHelper(icomp.description)
def test_InitUnpack(self):
obj = {"id": "test", "description": {"en-US": "test"}}
icomp = InteractionComponent(**obj)
self.assertEqual(icomp.id, 'test')
self.descriptionVerificationHelper(icomp.description)
def test_InitExceptionUnpackEmptyId(self):
obj = {"id": ""}
with self.assertRaises(ValueError):
InteractionComponent(**obj)
def test_InitExceptionUnpackFlatDescription(self):
obj = {"id": "test", "description": "test"}
with self.assertRaises(ValueError):
InteractionComponent(**obj)
def test_FromJSONExceptionBadJSON(self):
with self.assertRaises(ValueError):
InteractionComponent.from_json('{"bad JSON"}')
def test_FromJSONExceptionMalformedJSON(self):
with self.assertRaises(AttributeError):
InteractionComponent.from_json('{"test": "invalid property"}')
""" An exception is best here to keep client code from thinking its doing \
something its not when instantiating a InteractionComponent """
def test_FromJSONExceptionPartiallyMalformedJSON(self):
with self.assertRaises(AttributeError):
InteractionComponent.from_json('{"test": "invalid property", "id": \
"valid property"}')
def test_FromJSONEmptyObject(self):
icomp = InteractionComponent.from_json('{}')
self.assertIsNone(icomp.id)
self.assertNotIn('description', vars(icomp))
def test_FromJSONExceptionEmpty(self):
with self.assertRaises(ValueError):
InteractionComponent.from_json('')
def test_FromJSONId(self):
icomp = InteractionComponent.from_json('{"id": "test"}')
self.assertEqual(icomp.id, 'test')
self.assertNotIn('description', vars(icomp))
def test_FromJSONExceptionFlatDescription(self):
with self.assertRaises(ValueError):
InteractionComponent.from_json('{"id": "test", "description": "flatdescription"}')
def test_FromJSON(self):
icomp = InteractionComponent.from_json('{"id": "test", "description": {"en-US": "test"}}')
self.assertEqual(icomp.id, 'test')
self.descriptionVerificationHelper(icomp.description)
def test_AsVersionEmpty(self):
icomp = InteractionComponent()
icomp2 = icomp.as_version("1.0.0")
self.assertEqual(icomp2, {})
def test_AsVersionNotEmpty(self):
icomp = InteractionComponent(**{'id': 'test'})
icomp2 = icomp.as_version()
self.assertEqual(icomp2, {'id': 'test'})
def test_ToJSONFromJSON(self):
json_str = '{"id": "test", "description": {"en-US": "test"}}'
icomp = InteractionComponent.from_json(json_str)
self.assertEqual(icomp.id, 'test')
self.descriptionVerificationHelper(icomp.description)
self.assertEqual(icomp.to_json(), json_str)
def test_ToJSON(self):
icomp = InteractionComponent(**{"id": "test", "description": {"en-US": "test"}})
self.assertEqual(icomp.to_json(), '{"id": "test", "description": {"en-US": "test"}}')
def test_ToJSONIgnoreNoneDescription(self):
icomp = InteractionComponent(id='test')
self.assertEqual(icomp.to_json(), '{"id": "test"}')
def test_ToJSONIgnoreNoneId(self):
icomp = InteractionComponent(description={"en-US": "test"})
self.assertEqual(icomp.to_json(), '{"description": {"en-US": "test"}}')
def test_ToJSONEmpty(self):
icomp = InteractionComponent()
self.assertEqual(icomp.to_json(), '{}')
def descriptionVerificationHelper(self, description):
self.assertIsInstance(description, LanguageMap)
self.assertEqual(len(description), 1)
self.assertIn('en-US', description)
self.assertEqual(description['en-US'], 'test')
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(InteractionComponentTest)
unittest.TextTestRunner(verbosity=2).run(suite)
|
{
"content_hash": "7a3f355f88c31e487d9455f3cad9d677",
"timestamp": "",
"source": "github",
"line_count": 154,
"max_line_length": 98,
"avg_line_length": 39.493506493506494,
"alnum_prop": 0.6619533048339362,
"repo_name": "jpablo128/TinCanPython",
"id": "6e5b5f8e5ea71b787f810be841354d906dc0f2ae",
"size": "6686",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "test/interactioncomponent_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "380360"
}
],
"symlink_target": ""
}
|
from .column import Column
from typing import List, Union
class Date(Column):
_allowed_column_types = [
'DATE',
'DATETIME',
'TIMESTAMP',
]
def __init__(
self,
name: str = '',
column_type: str = '',
length: Union[str, int] = None,
null: bool = True,
has_default: bool = False,
default: Union[str, int] = None,
unsigned: bool = None,
character_set: str = None,
collate: str = None,
auto_increment: bool = False,
enum_values: List[str] = None,
parsing_errors: List[str] = None,
parsing_warnings: List[str] = None,
):
# it would be nice to just do `def __init__(**kwargs)` and then `super().__init__(**kwargs)`
# but then we would lose our type hints. :shrug:
super().__init__(
name=name,
column_type=column_type,
length=length,
null=null,
has_default=has_default,
default=default,
unsigned=unsigned,
character_set=character_set,
collate=collate,
auto_increment=auto_increment,
enum_values=enum_values,
parsing_errors=parsing_errors,
parsing_warnings=parsing_warnings,
)
def _check_for_schema_errors_and_warnings(self):
super()._check_for_schema_errors_and_warnings()
if self.length:
self._schema_errors.append(f"Column '{self.name}' of type '{self.column_type}' cannot have a length")
if self.character_set is not None:
self._schema_errors.append(f"Column '{self.name}' of type '{self.column_type}' cannot have a character set")
if self.collate is not None:
self._schema_errors.append(f"Column '{self.name}' of type '{self.column_type}' cannot have a collate")
if self.auto_increment and self.column_type in no_auto_increment:
self._schema_errors.append(f"Column '{self.name}' of type '{self.column_type}' cannot be an AUTO_INCREMENT")
if self.values:
self._schema_errors.append(
"Column '%s' of type %s is not allowed to have a list of values for its length" %
(self.name, self.column_type)
)
if self.unsigned:
self._schema_errors.append("Column %s cannot be unsigned" % self._name)
|
{
"content_hash": "4a22137a960543df326309432b7e9527",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 120,
"avg_line_length": 37,
"alnum_prop": 0.5667359667359667,
"repo_name": "cmancone/mygrations",
"id": "720ee2fdbf22f8b7f3b468c63ad75aac45429b7c",
"size": "2405",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mygrations/core/definitions/columns/date.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "416430"
},
{
"name": "Shell",
"bytes": "331"
}
],
"symlink_target": ""
}
|
__author__ = 'jono'
import websocket, threading
class Session:
def __init__(self, agarthon):
self.main = agarthon
self.ip, self.port = self.main.server_info[0].split(':')
self.token = self.main.server_info[1]
self.data_in = []
self.running = False
self.thread = None
# Receive loop starts on self.connect() call which instantiates ws
self.ws = None
def start(self):
self.connect()
# Start the read thread (receive data loop)
self.thread = threading.Thread(name='SessionReadThread', target=self.recv_loop)
self.thread.start()
def is_connected(self):
return self.running and self.ws.connected
def connect(self):
url = 'ws://' + self.ip + ':' + self.port + '/'
self.ws = websocket.WebSocket()
try:
self.ws.connect(url=url, origin='http://agar.io')
self.running = True
print('Websocket created to ' + url)
except Exception as ex:
print('Could not create a connection to ' + url + ' for reason: ' + str(ex))
# Constantly updates data_in byte array from packets sent to the client by the server
def recv_loop(self):
while self.is_connected():
try:
if self.ws.connected:
data = self.ws.recv()
self.data_in.append(data)
else:
print('Could not receive data because there is no websocket connection!')
return
except Exception as ex:
print('Could not receive data from websocket connection for reason: ' + str(ex))
return
def disconnect(self):
self.thread.stop()
self.ws.close()
self.running = False
# Send already formatted data
def send(self, data):
if self.is_connected:
if len(data) > 0:
try:
self.ws.send(data)
print('Sent packet: ' + str(data))
except Exception as ex:
print('Could not send data for reason: ' + str(ex))
else:
print('Tried to send packet with no data!')
else:
print('Tried to send packet with no connection!')
def read(self):
if self.is_connected and self.ws.connected:
if len(self.data_in) > 0:
return_data = self.data_in[0]
del(self.data_in[0])
return return_data
else:
print('The input byte array is empty!')
else:
print('Tried to read byte with no connection!')
|
{
"content_hash": "a4f7f548e24a7bbf5d2ed3bef9a71738",
"timestamp": "",
"source": "github",
"line_count": 82,
"max_line_length": 96,
"avg_line_length": 32.829268292682926,
"alnum_prop": 0.5334323922734027,
"repo_name": "jonnylin13/agarthon",
"id": "545a2f14782aa1ec996788adc3cac9ea1ce6a66b",
"size": "2692",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "session.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "87261"
},
{
"name": "Python",
"bytes": "20420"
}
],
"symlink_target": ""
}
|
import os
import sys
import traceback
NO_DEFAULT = object()
if __name__ == '__main__':
if not os.environ.get("ASFGIT_ADMIN"):
print "Invalid server configuration."
exit(1)
sys.path.append(os.environ["ASFGIT_ADMIN"])
import asfgit.log as log
import asfgit.git_multimail as git_multimail
import asfgit.util as util
path = filter(None, os.environ["PATH_INFO"].split("/"))
path = filter(lambda p: p != "git-receive-pack", path)
if len(path) != 1:
raise ValueError("Invalid PATH_INFO: %s" % os.environ["PATH_INFO"])
path = path[0]
repo = ""
if path[-4:] == ".git":
repo = util.decode(path[:-4])
else:
repo = util.decode(path)
try:
config = git_multimail.Config('multimailhook')
try:
environment = git_multimail.GenericEnvironment(config=config)
except git_multimail.ConfigurationException:
sys.stderr.write('*** %s\n' % sys.exc_info()[1])
sys.exit(1)
mailer = git_multimail.SendMailer(
environment,
command=['/usr/local/sbin/sendmail', '-oi', '-t'],
envelopesender='git@apache.org',
)
git_multimail.run_as_post_receive_hook(environment, mailer)
except Exception, exc:
log.exception()
print "Error: %s" % exc
exit(0) # Don't exit(1) here, we want the bleedin' sync to complete!
|
{
"content_hash": "45dbc387483655f99f155bcc63eda37a",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 76,
"avg_line_length": 30.382978723404257,
"alnum_prop": 0.584733893557423,
"repo_name": "apache/infrastructure-puppet",
"id": "7ed1a9a8243fb11fee3c098385c61bd244798687",
"size": "1453",
"binary": false,
"copies": "1",
"ref": "refs/heads/deployment",
"path": "modules/gitbox/files/hooks/post-receive.d/02-send-emails.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "44620"
},
{
"name": "CSS",
"bytes": "21836"
},
{
"name": "HTML",
"bytes": "109389"
},
{
"name": "JavaScript",
"bytes": "25565"
},
{
"name": "Lua",
"bytes": "87889"
},
{
"name": "Makefile",
"bytes": "1043"
},
{
"name": "Pascal",
"bytes": "1976"
},
{
"name": "Perl",
"bytes": "123056"
},
{
"name": "Puppet",
"bytes": "203328"
},
{
"name": "Python",
"bytes": "548366"
},
{
"name": "Ruby",
"bytes": "68814"
},
{
"name": "Shell",
"bytes": "163107"
}
],
"symlink_target": ""
}
|
"""
Copyright 2015-2016 @_rc0r <hlt99@blinkenshell.org>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import subprocess
import afl_utils
import threading
class VerifyThread(threading.Thread):
def __init__(self, thread_id, timeout_secs, target_cmd, in_queue, out_queue, in_queue_lock, out_queue_lock):
threading.Thread.__init__(self)
self.id = thread_id
self.timeout_secs = timeout_secs
self.target_cmd = target_cmd
self.in_queue = in_queue
self.out_queue = out_queue
self.in_queue_lock = in_queue_lock
self.out_queue_lock = out_queue_lock
self.exit = False
def run(self):
while not self.exit:
self.in_queue_lock.acquire()
if not self.in_queue.empty():
cs = self.in_queue.get()
self.in_queue_lock.release()
cmd = self.target_cmd.replace("@@", os.path.abspath(cs))
cs_fd = open(os.path.abspath(cs))
try:
if afl_utils.afl_collect.stdin_mode(self.target_cmd):
v = subprocess.call(cmd.split(), stdin=cs_fd, stderr=subprocess.DEVNULL,
stdout=subprocess.DEVNULL, timeout=self.timeout_secs)
else:
v = subprocess.call(cmd.split(), stderr=subprocess.DEVNULL,
stdout=subprocess.DEVNULL, timeout=self.timeout_secs)
# check if process was terminated/stopped by signal
if not os.WIFSIGNALED(v) and not os.WIFSTOPPED(v):
self.out_queue_lock.acquire()
self.out_queue.put((cs, 'invalid'))
self.out_queue_lock.release()
else:
# need extension (add uninteresting signals):
# following signals don't indicate hard crashes: 1
# os.WTERMSIG(v) ?= v & 0x7f ???
if (os.WTERMSIG(v) or os.WSTOPSIG(v)) in [1]:
self.out_queue_lock.acquire()
self.out_queue.put((cs, 'invalid'))
self.out_queue_lock.release()
# debug
# else:
# if os.WIFSIGNALED(v):
# print("%s: sig: %d (%d)" % (cs, os.WTERMSIG(v), v))
# elif os.WIFSTOPPED(v):
# print("%s: sig: %d (%d)" % (cs, os.WSTOPSIG(v), v))
except subprocess.TimeoutExpired:
self.out_queue_lock.acquire()
self.out_queue.put((cs, 'timeout'))
self.out_queue_lock.release()
except Exception:
pass
cs_fd.close()
else:
self.in_queue_lock.release()
self.exit = True
class GdbThread(threading.Thread):
def __init__(self, thread_id, gdb_cmd, out_dir, grep_for, out_queue, out_queue_lock):
threading.Thread.__init__(self)
self.id = thread_id
self.gdb_cmd = gdb_cmd
self.out_dir = out_dir
self.out_queue = out_queue
self.out_queue_lock = out_queue_lock
self.grep_for = grep_for
def run(self):
try:
script_output = subprocess.check_output(" ".join(self.gdb_cmd), shell=True, stderr=subprocess.DEVNULL,
stdin=subprocess.DEVNULL)
except (subprocess.TimeoutExpired, subprocess.CalledProcessError) as e:
script_output = e.output
script_output = script_output.decode(errors='replace').splitlines()
for line in script_output:
matching = [line.replace(g, '') for g in self.grep_for if g in line]
matching = " ".join(matching).strip('\' ')
matching = matching.replace(self.out_dir, '')
if len(matching) > 0:
self.out_queue_lock.acquire()
self.out_queue.put(matching)
self.out_queue_lock.release()
class AflTminThread(threading.Thread):
def __init__(self, thread_id, tmin_cmd, target_cmd, output_dir, in_queue, out_queue, in_queue_lock, out_queue_lock):
threading.Thread.__init__(self)
self.id = thread_id
self.target_cmd = target_cmd
self.output_dir = output_dir
self.in_queue = in_queue
self.out_queue = out_queue
self.in_queue_lock = in_queue_lock
self.out_queue_lock = out_queue_lock
self.tmin_cmd = tmin_cmd
self.exit = False
def run(self):
while not self.exit:
self.in_queue_lock.acquire()
if not self.in_queue.empty():
f = self.in_queue.get()
self.in_queue_lock.release()
cmd = "%s-i %s -o %s -- %s" % (self.tmin_cmd, f, os.path.join(self.output_dir, os.path.basename(f)),
self.target_cmd)
try:
subprocess.call(cmd, stderr=subprocess.DEVNULL, stdout=subprocess.DEVNULL, shell=True)
self.out_queue_lock.acquire()
self.out_queue.put(os.path.join(self.output_dir, os.path.basename(f)))
self.out_queue_lock.release()
# except subprocess.CalledProcessError as e:
# print("afl-tmin failed with exit code %d!" % e.returncode)
except subprocess.CalledProcessError:
pass
except Exception:
pass
else:
self.in_queue_lock.release()
self.exit = True
|
{
"content_hash": "ffec0ec6532f13188d4a2d01369c53ac",
"timestamp": "",
"source": "github",
"line_count": 148,
"max_line_length": 120,
"avg_line_length": 42.5472972972973,
"alnum_prop": 0.5307289185326346,
"repo_name": "rc0r/afl-utils",
"id": "fc87d62549367a6ff30b3556356046010bf08628",
"size": "6297",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "afl_utils/AflThread.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "509"
},
{
"name": "Makefile",
"bytes": "116"
},
{
"name": "Python",
"bytes": "187431"
}
],
"symlink_target": ""
}
|
from flask.ext.restful import fields
from flask.ext.restful_swagger import swagger
from tools import PseudoClasses
__author__ = 'Robin Quetin'
import ModelDefinitions
def gen_message_fields(class_ref):
resource_fields = {
"session_id": fields.String,
"object": fields.Nested(class_ref.resource_fields),
}
return resource_fields
def gen_message_multival_fields(class_ref):
resource_fields = {
"session_id": fields.String,
"object": fields.List(fields.Nested(class_ref.resource_fields))
}
return resource_fields
class DefaultMessage(object):
required = ['object']
# region Swagger Doc
@swagger.model
@swagger.nested(
object=ModelDefinitions.AssetEnvironmentPropertiesModel.__name__
)
# endregion
class AssetEnvironmentPropertiesMessage(DefaultMessage):
resource_fields = gen_message_multival_fields(ModelDefinitions.AssetEnvironmentPropertiesModel)
required = DefaultMessage.required
# region Swagger Doc
@swagger.model
@swagger.nested(
object=ModelDefinitions.AssetModel.__name__
)
# endregion
class AssetMessage(DefaultMessage):
resource_fields = gen_message_fields(ModelDefinitions.AssetModel)
required = DefaultMessage.required
# region Swagger Doc
@swagger.model
@swagger.nested(
object=ModelDefinitions.AttackerModel.__name__,
)
# endregion
class AttackerMessage(DefaultMessage):
resource_fields = gen_message_fields(ModelDefinitions.AttackerModel)
required = DefaultMessage.required
# region Swagger Doc
@swagger.model
@swagger.nested(
object=ModelDefinitions.CImportParams.__name__
)
# endregion
class CImportMessage(DefaultMessage):
resource_fields = gen_message_fields(ModelDefinitions.CImportParams)
required = DefaultMessage.required
# region Swagger Doc
@swagger.model
@swagger.nested(
object=ModelDefinitions.DependencyModel.__name__
)
# endregion
class DependencyMessage(DefaultMessage):
resource_fields = gen_message_fields(ModelDefinitions.DependencyModel)
required = DefaultMessage.required
# region Swagger Doc
@swagger.model
@swagger.nested(
object=ModelDefinitions.EnvironmentModel.__name__
)
# endregion
class EnvironmentMessage(DefaultMessage):
resource_fields = gen_message_fields(ModelDefinitions.EnvironmentModel)
required = DefaultMessage.required
# region Swagger Doc
@swagger.model
@swagger.nested(
object=ModelDefinitions.GoalModel.__name__
)
# endregion
class GoalMessage(DefaultMessage):
resource_fields = gen_message_fields(ModelDefinitions.GoalModel)
required = DefaultMessage.required
# region Swagger Doc
@swagger.model
@swagger.nested(
object=PseudoClasses.ProjectSettings.__name__
)
# endregion
class ProjectMessage(DefaultMessage):
resource_fields = gen_message_fields(PseudoClasses.ProjectSettings)
required = DefaultMessage.required
# region Swagger Doc
@swagger.model
@swagger.nested(
object=ModelDefinitions.RequirementModel.__name__
)
# endregion
class RequirementMessage(DefaultMessage):
resource_fields = gen_message_fields(ModelDefinitions.RequirementModel)
required = DefaultMessage.required
# region Swagger Doc
@swagger.model
@swagger.nested(
object=ModelDefinitions.ResponseModel.__name__
)
# endregion
class ResponseMessage(DefaultMessage):
resource_fields = gen_message_fields(ModelDefinitions.ResponseModel)
required = DefaultMessage.required
# region Swagger Doc
@swagger.model
@swagger.nested(
object=ModelDefinitions.RiskModel.__name__
)
# endregion
class RiskMessage(DefaultMessage):
resource_fields = gen_message_fields(ModelDefinitions.RiskModel)
required = DefaultMessage.required
# region Swagger Doc
@swagger.model
@swagger.nested(
object=ModelDefinitions.RoleModel.__name__,
property_0=ModelDefinitions.RoleEnvironmentPropertiesModel.__name__
)
# endregion
class RoleMessage(DefaultMessage):
resource_fields = gen_message_fields(ModelDefinitions.RoleModel)
required = DefaultMessage.required
# region Swagger Doc
@swagger.model
@swagger.nested(
object=ModelDefinitions.ThreatModel.__name__
)
# endregion
class ThreatMessage(DefaultMessage):
resource_fields = gen_message_fields(ModelDefinitions.ThreatModel)
required = DefaultMessage.required
# region Swagger Doc
@swagger.model
@swagger.nested(
object=ModelDefinitions.ValueTypeModel.__name__
)
# endregion
class ValueTypeMessage(DefaultMessage):
resource_fields = gen_message_fields(ModelDefinitions.ValueTypeModel)
required = DefaultMessage.required
# region Swagger Doc
@swagger.model
@swagger.nested(
object=ModelDefinitions.VulnerabilityModel.__name__
)
# endregion
class VulnerabilityMessage(DefaultMessage):
resource_fields = gen_message_fields(ModelDefinitions.VulnerabilityModel)
required = DefaultMessage.required
|
{
"content_hash": "9921d338106189f0ad90944825e60a81",
"timestamp": "",
"source": "github",
"line_count": 176,
"max_line_length": 99,
"avg_line_length": 27.27840909090909,
"alnum_prop": 0.7760883149343887,
"repo_name": "RobinQuetin/CAIRIS-web",
"id": "7f54aeda23fab14653c5cf2fad59bb04070893cf",
"size": "4801",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "cairis/cairis/tools/MessageDefinitions.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "11265"
},
{
"name": "Mako",
"bytes": "13226"
},
{
"name": "Python",
"bytes": "3313365"
},
{
"name": "Shell",
"bytes": "19461"
},
{
"name": "XSLT",
"bytes": "35522"
}
],
"symlink_target": ""
}
|
import urllib
import sys
import os
URLs = [
"https://www.google.com/images/srpr/logo11w.png",
"http://i.imgur.com/TigwF.jpg",
"http://conquest.imslp.info/files/imglnks/usimg/e/ea/IMSLP113136-PMLP01646-FChopin_s_Werke_BH_Band1_Title_Pages.pdf",
"http://i.imgur.com/poKTTTq.jpg",
"http://upload.wikimedia.org/wikipedia/commons/2/22/Turkish_Van_Cat.jpg",
"http://www.safehavenrr.org/Images/bunny.jpg",
"https://developer.apple.com/library/ios/documentation/General/Conceptual/iCloudDesignGuide/iCloudDesignGuide.pdf",
"http://javanese.imslp.info/files/imglnks/usimg/3/39/IMSLP140684-PMLP02666-Liszt_Franz-3_Etudes_de_Concert_S.144_No.3_Kistner_1655_filter.pdf"
]
def main(args):
if len(args) == 1:
for url in URLs:
filePath = os.path.join(os.path.expanduser("~/Downloads"), url.split('/')[-1])
urllib.urlretrieve (url, filePath)
print '\"' + url + '\" downloaded to \"' + filePath + '\"'
else:
if (args[1] == "purge"):
for url in URLs:
filePath = os.path.join(os.path.expanduser("~/Downloads"), url.split('/')[-1])
os.remove(filePath)
if __name__ == "__main__":
sys.exit(main(sys.argv))
|
{
"content_hash": "3b632ccaa35254393aba23aca3dd45a5",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 143,
"avg_line_length": 37.43333333333333,
"alnum_prop": 0.692787177203918,
"repo_name": "ziyuanliu/nyzr",
"id": "28b1d190d025521d4cca695269f2dda361910816",
"size": "1123",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "downloader.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "573"
},
{
"name": "C++",
"bytes": "634"
},
{
"name": "Objective-C",
"bytes": "217996"
},
{
"name": "Python",
"bytes": "1123"
},
{
"name": "Ruby",
"bytes": "74"
},
{
"name": "Shell",
"bytes": "3548"
}
],
"symlink_target": ""
}
|
import asyncio
from unittest import mock
import pytest
from sqlalchemy import Column, Integer, MetaData, String, Table, func, select
from aiopg import sa
meta = MetaData()
tbl = Table(
"sa_tbl2",
meta,
Column("id", Integer, nullable=False, primary_key=True),
Column("name", String(255)),
)
@pytest.fixture
def connect(make_connection):
async def go(**kwargs):
conn = await make_connection(**kwargs)
cur = await conn.cursor()
await cur.execute("DROP TABLE IF EXISTS sa_tbl2")
await cur.execute(
"CREATE TABLE sa_tbl2 " "(id serial, name varchar(255))"
)
await cur.execute("INSERT INTO sa_tbl2 (name)" "VALUES ('first')")
cur.close()
engine = mock.Mock(from_spec=sa.engine.Engine)
engine.dialect = sa.engine._dialect
return sa.SAConnection(conn, engine)
yield go
@pytest.fixture
def xa_connect(connect):
async def go(**kwargs):
conn = await connect(**kwargs)
val = await conn.scalar("show max_prepared_transactions")
if not int(val):
raise pytest.skip(
"Twophase transacions are not supported. "
"Set max_prepared_transactions to "
"a nonzero value"
)
return conn
yield go
async def test_without_transactions(connect):
conn1 = await connect()
conn2 = await connect()
res1 = await conn1.scalar(select([func.count()]).select_from(tbl))
assert 1 == res1
await conn2.execute(tbl.delete())
res2 = await conn1.scalar(select([func.count()]).select_from(tbl))
assert 0 == res2
async def test_connection_attr(connect):
conn = await connect()
tr = await conn.begin()
assert tr.connection is conn
async def test_root_transaction(connect):
conn1 = await connect()
conn2 = await connect()
tr = await conn1.begin()
assert tr.is_active
await conn1.execute(tbl.delete())
res1 = await conn2.scalar(select([func.count()]).select_from(tbl))
assert 1 == res1
await tr.commit()
assert not tr.is_active
assert not conn1.in_transaction
res2 = await conn2.scalar(select([func.count()]).select_from(tbl))
assert 0 == res2
async def test_root_transaction_rollback(connect):
conn1 = await connect()
conn2 = await connect()
tr = await conn1.begin()
assert tr.is_active
await conn1.execute(tbl.delete())
res1 = await conn2.scalar(select([func.count()]).select_from(tbl))
assert 1 == res1
await tr.rollback()
assert not tr.is_active
res2 = await conn2.scalar(select([func.count()]).select_from(tbl))
assert 1 == res2
async def test_root_transaction_close(connect):
conn1 = await connect()
conn2 = await connect()
tr = await conn1.begin()
assert tr.is_active
await conn1.execute(tbl.delete())
res1 = await conn2.scalar(select([func.count()]).select_from(tbl))
assert 1 == res1
await tr.close()
assert not tr.is_active
res2 = await conn2.scalar(select([func.count()]).select_from(tbl))
assert 1 == res2
async def test_root_transaction_commit_inactive(connect):
conn = await connect()
tr = await conn.begin()
assert tr.is_active
await tr.commit()
assert not tr.is_active
with pytest.raises(sa.InvalidRequestError):
await tr.commit()
async def test_root_transaction_rollback_inactive(connect):
conn = await connect()
tr = await conn.begin()
assert tr.is_active
await tr.rollback()
assert not tr.is_active
await tr.rollback()
assert not tr.is_active
async def test_root_transaction_double_close(connect):
conn = await connect()
tr = await conn.begin()
assert tr.is_active
await tr.close()
assert not tr.is_active
await tr.close()
assert not tr.is_active
async def test_inner_transaction_commit(connect):
conn = await connect()
tr1 = await conn.begin()
tr2 = await conn.begin()
assert tr2.is_active
await tr2.commit()
assert not tr2.is_active
assert tr1.is_active
await tr1.commit()
assert not tr2.is_active
assert not tr1.is_active
async def test_rollback_on_connection_close(connect):
conn1 = await connect()
conn2 = await connect()
tr = await conn1.begin()
await conn1.execute(tbl.delete())
res1 = await conn2.scalar(select([func.count()]).select_from(tbl))
assert 1 == res1
await conn1.close()
res2 = await conn2.scalar(select([func.count()]).select_from(tbl))
assert 1 == res2
del tr
async def test_inner_transaction_rollback(connect):
conn = await connect()
tr1 = await conn.begin()
tr2 = await conn.begin()
assert tr2.is_active
await conn.execute(tbl.insert().values(name="aaaa"))
await tr2.rollback()
assert not tr2.is_active
assert not tr1.is_active
res = await conn.scalar(select([func.count()]).select_from(tbl))
assert 1 == res
async def test_inner_transaction_close(connect):
conn = await connect()
tr1 = await conn.begin()
tr2 = await conn.begin()
assert tr2.is_active
await conn.execute(tbl.insert().values(name="aaaa"))
await tr2.close()
assert not tr2.is_active
assert tr1.is_active
await tr1.commit()
res = await conn.scalar(select([func.count()]).select_from(tbl))
assert 2 == res
async def test_nested_transaction_commit(connect):
conn = await connect()
tr1 = await conn.begin_nested()
tr2 = await conn.begin_nested()
assert tr1.is_active
assert tr2.is_active
await conn.execute(tbl.insert().values(name="aaaa"))
await tr2.commit()
assert not tr2.is_active
assert tr1.is_active
res = await conn.scalar(select([func.count()]).select_from(tbl))
assert 2 == res
await tr1.commit()
assert not tr2.is_active
assert not tr1.is_active
res = await conn.scalar(select([func.count()]).select_from(tbl))
assert 2 == res
async def test_nested_transaction_commit_twice(connect):
conn = await connect()
tr1 = await conn.begin_nested()
tr2 = await conn.begin_nested()
await conn.execute(tbl.insert().values(name="aaaa"))
await tr2.commit()
assert not tr2.is_active
assert tr1.is_active
await tr2.commit()
assert not tr2.is_active
assert tr1.is_active
res = await conn.scalar(select([func.count()]).select_from(tbl))
assert 2 == res
await tr1.close()
async def test_nested_transaction_rollback(connect):
conn = await connect()
tr1 = await conn.begin_nested()
tr2 = await conn.begin_nested()
assert tr1.is_active
assert tr2.is_active
await conn.execute(tbl.insert().values(name="aaaa"))
await tr2.rollback()
assert not tr2.is_active
assert tr1.is_active
res = await conn.scalar(select([func.count()]).select_from(tbl))
assert 1 == res
await tr1.commit()
assert not tr2.is_active
assert not tr1.is_active
res = await conn.scalar(select([func.count()]).select_from(tbl))
assert 1 == res
async def test_nested_transaction_rollback_twice(connect):
conn = await connect()
tr1 = await conn.begin_nested()
tr2 = await conn.begin_nested()
await conn.execute(tbl.insert().values(name="aaaa"))
await tr2.rollback()
assert not tr2.is_active
assert tr1.is_active
await tr2.rollback()
assert not tr2.is_active
assert tr1.is_active
await tr1.commit()
res = await conn.scalar(select([func.count()]).select_from(tbl))
assert 1 == res
async def test_twophase_transaction_commit(xa_connect):
conn = await xa_connect()
tr = await conn.begin_twophase()
await conn.execute(tbl.insert().values(name="aaaa"))
await tr.prepare()
assert tr.is_active
await tr.commit()
assert not tr.is_active
res = await conn.scalar(select([func.count()]).select_from(tbl))
assert 2 == res
async def test_twophase_transaction_twice(xa_connect):
conn = await xa_connect()
tr = await conn.begin_twophase()
with pytest.raises(sa.InvalidRequestError):
await conn.begin_twophase()
assert tr.is_active
await tr.prepare()
await tr.commit()
async def test_transactions_sequence(xa_connect):
conn = await xa_connect()
await conn.execute(tbl.delete())
assert conn._transaction is None
tr1 = await conn.begin()
assert tr1 is conn._transaction
await conn.execute(tbl.insert().values(name="a"))
res1 = await conn.scalar(select([func.count()]).select_from(tbl))
assert 1 == res1
await tr1.commit()
assert conn._transaction is None
tr2 = await conn.begin()
assert tr2 is conn._transaction
await conn.execute(tbl.insert().values(name="b"))
res2 = await conn.scalar(select([func.count()]).select_from(tbl))
assert 2 == res2
await tr2.rollback()
assert conn._transaction is None
tr3 = await conn.begin()
assert tr3 is conn._transaction
await conn.execute(tbl.insert().values(name="b"))
res3 = await conn.scalar(select([func.count()]).select_from(tbl))
assert 2 == res3
await tr3.commit()
assert conn._transaction is None
async def test_transaction_mode(connect):
conn = await connect()
await conn.execute(tbl.delete())
tr1 = await conn.begin(isolation_level="SERIALIZABLE")
await conn.execute(tbl.insert().values(name="a"))
res1 = await conn.scalar(select([func.count()]).select_from(tbl))
assert 1 == res1
await tr1.commit()
tr2 = await conn.begin(isolation_level="REPEATABLE READ")
await conn.execute(tbl.insert().values(name="b"))
res2 = await conn.scalar(select([func.count()]).select_from(tbl))
assert 2 == res2
await tr2.commit()
tr3 = await conn.begin(isolation_level="READ UNCOMMITTED")
await conn.execute(tbl.insert().values(name="c"))
res3 = await conn.scalar(select([func.count()]).select_from(tbl))
assert 3 == res3
await tr3.commit()
tr4 = await conn.begin(readonly=True)
assert tr4 is conn._transaction
res1 = await conn.scalar(select([func.count()]).select_from(tbl))
assert 3 == res1
await tr4.commit()
tr5 = await conn.begin(isolation_level="READ UNCOMMITTED", readonly=True)
res1 = await conn.scalar(select([func.count()]).select_from(tbl))
assert 3 == res1
await tr5.commit()
tr6 = await conn.begin(deferrable=True)
await conn.execute(tbl.insert().values(name="f"))
res1 = await conn.scalar(select([func.count()]).select_from(tbl))
assert 4 == res1
await tr6.commit()
tr7 = await conn.begin(isolation_level="REPEATABLE READ", deferrable=True)
await conn.execute(tbl.insert().values(name="g"))
res1 = await conn.scalar(select([func.count()]).select_from(tbl))
assert 5 == res1
await tr7.commit()
tr8 = await conn.begin(
isolation_level="SERIALIZABLE", readonly=True, deferrable=True
)
assert tr8 is conn._transaction
res1 = await conn.scalar(select([func.count()]).select_from(tbl))
assert 5 == res1
await tr8.commit()
async def test_timeout_in_transaction_context_manager(make_engine):
engine = await make_engine(timeout=1)
with pytest.raises(asyncio.TimeoutError):
async with engine.acquire() as connection:
async with connection.begin():
await connection.execute("SELECT pg_sleep(10)")
engine.terminate()
await engine.wait_closed()
async def test_timeout_in_nested_transaction_context_manager(make_engine):
engine = await make_engine(timeout=1)
with pytest.raises(asyncio.TimeoutError):
async with engine.acquire() as connection:
async with connection.begin():
async with connection.begin_nested():
await connection.execute("SELECT pg_sleep(10)")
engine.terminate()
await engine.wait_closed()
async def test_cancel_in_transaction_context_manager(make_engine, loop):
engine = await make_engine()
with pytest.raises(asyncio.CancelledError):
async with engine.acquire() as connection:
async with connection.begin():
task = loop.create_task(
connection.execute("SELECT pg_sleep(10)")
)
async def cancel_soon():
await asyncio.sleep(1)
task.cancel()
loop.create_task(cancel_soon())
await task
engine.terminate()
await engine.wait_closed()
async def test_cancel_in_savepoint_context_manager(make_engine, loop):
engine = await make_engine()
with pytest.raises(asyncio.CancelledError):
async with engine.acquire() as connection:
async with connection.begin():
async with connection.begin_nested():
task = loop.create_task(
connection.execute("SELECT pg_sleep(10)")
)
async def cancel_soon():
await asyncio.sleep(1)
task.cancel()
loop.create_task(cancel_soon())
await task
engine.terminate()
await engine.wait_closed()
|
{
"content_hash": "89d07402d8b5b699b46456a0d5cfbf36",
"timestamp": "",
"source": "github",
"line_count": 483,
"max_line_length": 78,
"avg_line_length": 27.36024844720497,
"alnum_prop": 0.6431328036322361,
"repo_name": "aio-libs/aiopg",
"id": "0fb73a0c60a1b2bf3ad8138ffec64bb6acfb5f97",
"size": "13215",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_sa_transaction.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Makefile",
"bytes": "2023"
},
{
"name": "Python",
"bytes": "231174"
}
],
"symlink_target": ""
}
|
import argparse
import os.path
import xlrd
import xlsxwriter
QLANG_PREFIX = 'qlang-'
QLANG_SUFFIX = '-qlang'
QLANG_WORKSHEET_NAME = 'translations'
QLANG_WORKSHEET = 'worksheet'
QLANG_ROW = 'row'
QLANG_TYPE = 'type'
QLANG_LIST_NAME = 'list_name'
QLANG_NAME = 'name'
QLANG_COLUMN = 'column'
ENGLISH = 'English'
TRANSLATIONS = 'translations'
ENGLISH_SUFFIX = '::{}'.format(ENGLISH)
COL_FORMAT = '{}::'
BOTH_COL_FORMAT = '{}::{}'
LANGUAGE_SEP = '::'
SURVEY = 'survey'
CHOICES = 'choices'
SETTINGS = 'settings'
SELECT_ONE = 'select_one'
SELECT_MULTIPLE = 'select_multiple'
class QlangError(Exception):
pass
def questionnaire_to_translations(filename, prefix):
with xlrd.open_workbook(filename) as book:
# if no survey worksheet, then abort (don't catch exception here
survey_ws = book.sheet_by_name(SURVEY)
survey, s_others = process_worksheet(survey_ws)
c_others = []
choices_dict = {}
try:
choices_ws = book.sheet_by_name(CHOICES)
choices, c_others = process_worksheet(choices_ws)
choices_dict = group_choices(choices)
if set(c_others) != set(s_others):
# TODO Allow choices and survey to start with diff other langs
m = 'Languages not the same in the {} and {} worksheets.'
m = m.format(SURVEY, CHOICES)
raise QlangError(m)
except xlrd.XLRDError as e:
# TODO Eventually reformat what is printed. May not be an error
print(e)
m = 'Likely error: No "choices" worksheet found in "{}"'
m = m.format(filename)
print(m)
write_out_translations(filename, prefix, s_others, survey, choices_dict)
def write_out_translations(filename, prefix, others, survey, choices_dict):
first_part, second_part = os.path.split(filename)
out_file = os.path.join(first_part, prefix + second_part)
wb = xlsxwriter.Workbook(out_file)
ws = wb.add_worksheet(QLANG_WORKSHEET_NAME)
basic_header = [QLANG_WORKSHEET, QLANG_ROW, QLANG_TYPE, QLANG_NAME,
QLANG_COLUMN, ENGLISH]
ws.write_row(0, 0, basic_header)
ws.write_row(0, len(basic_header), others)
n = 1
remaining_list_names = list(choices_dict.keys())
for line in survey:
out_row = [line[QLANG_WORKSHEET], line[QLANG_ROW] + 1,
line[QLANG_TYPE], line[QLANG_NAME], line[QLANG_COLUMN],
line[ENGLISH]]
out_row += line[TRANSLATIONS]
ws.write_row(n, 0, out_row)
n += 1
this_type = line[QLANG_TYPE].split(None)
has_choice_list = this_type[0] in (SELECT_ONE, SELECT_MULTIPLE)
if has_choice_list and this_type[1] in remaining_list_names:
these_choices = choices_dict[this_type[1]]
for c in these_choices:
choice_row = [c[QLANG_WORKSHEET], c[QLANG_ROW] + 1,
c[QLANG_TYPE], c[QLANG_NAME],
c[QLANG_COLUMN], c[ENGLISH]]
choice_row += c[TRANSLATIONS]
ws.write_row(n, 0, choice_row)
n += 1
remaining_list_names.remove(this_type[1])
# TODO add conditional formatting
red_background = wb.add_format({'bg_color': '#FFC7CE'})
start_col = len(basic_header)-1
last_col = len(basic_header) + len(others) - 1
ws.conditional_format(1, start_col, n-1, last_col, {
'type': 'blanks',
'format': red_background
})
wb.close()
if remaining_list_names:
print('### Unused list names in "{}"'.format(filename))
for list_name in remaining_list_names:
print(' - {}'.format(list_name))
m = 'Translation file created: "{}"'.format(out_file)
print(m)
def group_choices(choices):
choices_dict = {}
for line in choices:
this_choice_list = line[QLANG_TYPE]
if this_choice_list in choices_dict:
choices_dict[this_choice_list].append(line)
else:
choices_dict[this_choice_list] = [line]
return choices_dict
# returns list of tuples for English columns
# returns sorted list of languages found that are translations
# returns dict of dictionaries to find where translations are
def preprocess_header(header):
# list of tuples, index and column name, e.g. (4, 'hint')
english = []
for i, cell in enumerate(header):
if cell.endswith(ENGLISH_SUFFIX):
english.append((i, cell[:-len(ENGLISH_SUFFIX)]))
if not english:
m = 'English not found in a worksheet'
raise QlangError(m)
# to contain all OTHER (non-English) languages used in the header
other_languages = set()
# #by end of for-loop, keys are columns with ::English. values are dicts
# #that have language and column
# e.g. {label: {Hindi: 10, French: 11}, hint: {Hindi: 12, French: 13}}
# except with quotes around the strings.
translation_lookup = {}
for i, column in english:
prefix = COL_FORMAT.format(column)
translations = [item for item in enumerate(header) if item[0] != i and
item[1].startswith(prefix)]
these_languages = {lang[len(prefix):]: j for j, lang in translations}
translation_lookup[column] = these_languages
other_languages |= set(these_languages.keys())
others = list(other_languages)
others.sort()
return english, others, translation_lookup
# Return the worksheet and the list of other languages (not english)
def process_worksheet(worksheet):
unicode = get_unicode_ws(worksheet)
# Assumption that first row is the header
header = unicode[0]
english, others, translations = preprocess_header(header)
type_col = get_type_col(header)
name_col = header.index(QLANG_NAME)
rows_for_output = []
for i, row in enumerate(unicode):
if i == 0:
continue
for j, column in english:
this_cell = row[j]
# No more blank values! Use '#####'
if this_cell != '':
this_dict = {}
this_dict[QLANG_WORKSHEET] = worksheet.name
this_dict[QLANG_ROW] = i
this_dict[QLANG_TYPE] = row[type_col]
this_dict[QLANG_NAME] = row[name_col]
this_dict[QLANG_COLUMN] = column
this_dict[ENGLISH] = this_cell
this_dict[TRANSLATIONS] = get_translations(row, others,
translations[column])
rows_for_output.append(this_dict)
return rows_for_output, others
def get_type_col(header):
if QLANG_TYPE in header:
col = header.index(QLANG_TYPE)
elif QLANG_LIST_NAME in header:
col = header.index(QLANG_LIST_NAME)
else:
m = 'Unable to find "{}" or "{}" in header'
m = m.format(QLANG_TYPE, QLANG_LIST_NAME)
raise QlangError(m)
return col
def get_translations(row, others, translations):
these_translations = []
for lang in others:
try:
lang_col = translations[lang]
these_translations.append(row[lang_col])
except KeyError:
these_translations.append('')
return these_translations
# throws errors: file not found (xlrd.open_workbook)
# sheet not found (xlrd.sheet_by_name)
def translations_to_questionnaire(filename, prefix, suffix):
first_part, second_part = os.path.split(filename)
if not second_part.startswith(prefix):
m = '"{}" does not start with supplied prefix "{}"'
m = m.format(second_part, prefix)
raise QlangError(m)
orig_filename = os.path.join(first_part,second_part[len(prefix):])
full_file, ext = os.path.splitext(orig_filename)
dest_filename = full_file + suffix + ext
with xlrd.open_workbook(filename) as book:
with xlrd.open_workbook(orig_filename) as orig:
trans_ws = book.sheet_by_name(QLANG_WORKSHEET_NAME)
# Copy over "survey" and "choices" after merging translations
survey_ws = orig.sheet_by_name(SURVEY)
new_survey = get_worksheet_w_trans(survey_ws, trans_ws)
choices_ws = orig.sheet_by_name(CHOICES)
new_choices = get_worksheet_w_trans(choices_ws, trans_ws)
wb = xlsxwriter.Workbook(dest_filename)
survey_out_ws = wb.add_worksheet(SURVEY)
write_out_worksheet(survey_out_ws, new_survey)
choices_out_ws = wb.add_worksheet(CHOICES)
write_out_worksheet(choices_out_ws, new_choices)
# Copy all other sheets over
for sheet in orig.sheet_names():
if sheet not in (SURVEY, CHOICES):
rows = get_unicode_ws(orig.sheet_by_name(sheet))
this_ws = wb.add_worksheet(sheet)
write_out_worksheet(this_ws, rows)
wb.close()
m = 'Translations successfully merged: "{}"'.format(dest_filename)
print(m)
def write_out_worksheet(ws, lines):
for i, line in enumerate(lines):
ws.write_row(i, 0, line)
# TODO does it work if there are no translation languages?
def get_worksheet_w_trans(ws, trans):
# for each heading in built header, check if it exists in original header
# then check if it is in translation file
trans_rows = get_unicode_ws(trans)
trans_header = trans_rows[0]
ws_ind = trans_header.index(QLANG_WORKSHEET)
row_ind = trans_header.index(QLANG_ROW)
name_ind = trans_header.index(QLANG_NAME)
column_ind = trans_header.index(QLANG_COLUMN)
english_ind = trans_header.index(ENGLISH)
trans_langs = trans_header[(english_ind + 1):]
# Keep rows for this sheet
ws_name = ws.name
correct_trans_rows = [row for row in trans_rows if row[ws_ind] == ws_name]
# Get this sheet
ws_rows = get_unicode_ws(ws)
ws_header = ws_rows[0]
# dictionary: key is row, inside is another dictionary. key is column, and
# value is text value
# e.g. {1: {name: your_name, label::English: Name?, label::French: Nom?}}
# except with quotes around the strings
trans_dict = {}
for row in correct_trans_rows:
row_number = row[row_ind] - 1
this_col = row[column_ind]
this_dict = {}
this_dict[QLANG_NAME] = row[name_ind]
this_dict[BOTH_COL_FORMAT.format(this_col, ENGLISH)] = row[english_ind]
for lang, val in zip(trans_langs, row[(english_ind+1):]):
this_key = BOTH_COL_FORMAT.format(this_col, lang)
this_dict[this_key] = val
try:
trans_dict[row_number].update(this_dict)
except KeyError:
trans_dict[row_number] = this_dict
# BUILD header for survey, languages based on translation file
combined_header = build_combined_header(ws_header, trans_langs)
combined_lines = [combined_header]
for i, line in enumerate(ws_rows):
if i == 0:
continue
this_row = []
for col in combined_header:
orig = ''
# see if original WS has column from combined header, then get val
# for current row.
try:
orig_ind = ws_header.index(col)
orig = line[orig_ind]
except ValueError:
pass
# then see if there is corresponding value in translation lookup
try:
this_dict = trans_dict[i]
new_val = this_dict[col]
if col == QLANG_NAME and new_val != orig:
m = 'Name mismatch: {} <> {}'.format(orig, new_val)
raise QlangError(m)
else:
orig = new_val
if new_val == '':
m = '### Missing translation for row {} and col "{}"'
m = m.format(i + 1, col)
print(m)
except (IndexError, KeyError) as e:
pass
this_row.append(orig)
combined_lines.append(this_row)
return combined_lines
def build_combined_header(ws_header, trans_langs):
# need to know which are english columns
# need to know which are translation columns
# this probably could be optimized....
combined_trans = []
english_col = []
for col in ws_header:
if col.endswith(ENGLISH_SUFFIX):
base = col[:-len(ENGLISH_SUFFIX)]
english_col.append(base)
for lang in trans_langs:
new_col = BOTH_COL_FORMAT.format(base, lang)
combined_trans.append(new_col)
combined_header = []
for col in ws_header:
split_col = col.split(LANGUAGE_SEP)
if split_col[0] not in english_col or split_col[1] == ENGLISH:
combined_header.append(col)
combined_header += combined_trans
return combined_header
def get_unicode_ws(ws):
rows = []
for i in range(ws.nrows):
this_row = ws.row(i)
try:
these_vales = [f(cell, ws.name, i+1, j) for j, cell in enumerate(this_row, start=1)]
rows.append(these_vales)
except QlangError as e:
m = 'Excel sheet "{}", row {}: {}'
m = m.format(ws.name, i+1, e)
raise QlangError(m)
return rows
# important for switching between google docs and xlsx
def newline_space_fix(s):
newline_space = '\n '
fix = '\n'
while newline_space in s:
s = s.replace(newline_space, fix)
return s
def space_newline_fix(s):
space_newline = ' \n'
fix = '\n'
while space_newline in s:
s = s.replace(space_newline, fix)
return s
def f(cell, sheet_name, row, col):
# Can format differently?
if cell.ctype == xlrd.XL_CELL_BOOLEAN:
return 'TRUE' if cell.value == 1 else 'FALSE'
elif cell.ctype == xlrd.XL_CELL_EMPTY:
return ''
elif cell.ctype == xlrd.XL_CELL_TEXT:
s = cell.value.strip()
s = newline_space_fix(s)
return s
elif cell.ctype == xlrd.XL_CELL_NUMBER:
return cell.value
else:
m = 'Unhandled cell type: {}. May be DATE. Sheet {}, row {}, col {}'.format(cell.ctype, sheet_name, row, col)
print(m)
return cell.value
if __name__ == '__main__':
prog_desc = ('From an XLSForm create an MS-Excel file to facilitate quick '
'translation. Also, merge a translation file back into '
'XLSForm.')
parser = argparse.ArgumentParser(description=prog_desc)
file_help = 'One or more paths to files destined for conversion.'
parser.add_argument('xlsxfile', nargs='+', help=file_help)
merge_help = ('Include this flag to merge translation files back into '
'XLSForms. Do not include this flag to tell the program to '
'create translation files.')
parser.add_argument('-m', '--merge', action='store_true',
help=merge_help)
prefix_help = ('A prefix to prepend to the base file name.')
parser.add_argument('-p', '--prefix', help=prefix_help)
suffix_help = ('A suffix to add to the base file name. Cannot start with a '
'hyphen ("-").')
parser.add_argument('-s', '--suffix', help=suffix_help)
args = parser.parse_args()
if args.suffix is None:
args.suffix = QLANG_SUFFIX
else:
args.suffix = args.suffix.replace('%', '-')
if args.prefix is None:
args.prefix = QLANG_PREFIX
else:
args.prefix = args.prefix.replace('%', '-')
for filename in set(args.xlsxfile):
if args.merge:
try:
translations_to_questionnaire(filename, args.prefix,
args.suffix)
except FileNotFoundError as e:
print(e)
else:
questionnaire_to_translations(filename, args.prefix)
|
{
"content_hash": "5c4bba27efd4120f917561d45083c439",
"timestamp": "",
"source": "github",
"line_count": 430,
"max_line_length": 117,
"avg_line_length": 37.02325581395349,
"alnum_prop": 0.589572864321608,
"repo_name": "jkpr/qlang",
"id": "8150353cd131e70e1a8534609a74fc72d016d58f",
"size": "15920",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "qlang/qlang.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "55130"
}
],
"symlink_target": ""
}
|
import sys # pragma: no cover
from remoteappmanager.command_line_config import (
CommandLineConfig) # pragma: no cover
from remoteappmanager.environment_config import (
EnvironmentConfig) # pragma: no cover
from remoteappmanager.file_config import FileConfig # pragma: no cover
from tornado.options import print_help # pragma: no cover
from remoteappmanager.application import Application # pragma: no cover
def main(): # pragma: no cover
try:
command_line_config = CommandLineConfig()
command_line_config.parse_config()
file_config = FileConfig()
if command_line_config.config_file:
file_config.parse_config(command_line_config.config_file)
environment_config = EnvironmentConfig()
environment_config.parse_config()
except Exception as e:
print_help()
print("Error: {}".format(e))
sys.exit(1)
app = Application(command_line_config, file_config, environment_config)
app.start()
|
{
"content_hash": "b6f214cb5b39705e79ac4e768caf0f46",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 75,
"avg_line_length": 32.54838709677419,
"alnum_prop": 0.6897918731417245,
"repo_name": "simphony/simphony-remote",
"id": "485802e45bea9b5a7dacadd7cbddabf4114330ab",
"size": "1009",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "remoteappmanager/cli/remoteappmanager/__main__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "14011"
},
{
"name": "JavaScript",
"bytes": "51718"
},
{
"name": "Makefile",
"bytes": "6052"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Python",
"bytes": "418020"
},
{
"name": "Shell",
"bytes": "1690"
},
{
"name": "Vue",
"bytes": "46644"
}
],
"symlink_target": ""
}
|
"""we_are_social URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
from django.contrib.flatpages import views
from paypal.standard.ipn import urls as paypal_urls
from paypal_store import views as paypal_views
from accounts import views as accounts_views
from products import views as product_views
from hello import views as hello_views
from magazines import views as magazine_views
urlpatterns = [ # pylint: disable-msg=C0103
# admin backend url
url(r'^admin/', admin.site.urls),
# hello urls
url(r'^$', hello_views.get_index, name='index'),
# accounts urls
url(r'^register/$', accounts_views.register, name='register'),
url(r'^login/$', accounts_views.login, name='login'),
url(r'^profile/$', accounts_views.profile, name='profile'),
url(r'^logout/$', accounts_views.logout, name='logout'),
url(r'^about/$', views.flatpage, {'url': '/pages/about/'}, name='about'),
url(r'^cancel_subscription/$', accounts_views.cancel_subscription, name='cancel_subscription'),
url(r'^subscriptions_webhook/$', accounts_views.subscriptions_webhook,
name='subscriptions_webhook'),
# paypal urls
url(r'^a-very-hard-to-guess-url/', include(paypal_urls)),
url(r'^paypal-return/', paypal_views.paypal_return),
url(r'^paypal-cancel/', paypal_views.paypal_cancel),
# products urls
url(r'^products/$', product_views.all_products),
# magazines urls
url(r'^magazines/$', magazine_views.all_magazines),
]
|
{
"content_hash": "9810c4f2712cebf0b0cfe9716829a6d8",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 99,
"avg_line_length": 43.36734693877551,
"alnum_prop": 0.699764705882353,
"repo_name": "GunnerJnr/_CodeInstitute",
"id": "38456eeff04d9d301c367730f42136a660c4c3fc",
"size": "2125",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Stream-3/Full-Stack-Development/15.Paypal-Subscriptions/3.Use-Django-Orm-To-Connect-Users-To-Their-Purchases/we_are_social/we_are_social/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "336"
},
{
"name": "CSS",
"bytes": "2545480"
},
{
"name": "HTML",
"bytes": "708226"
},
{
"name": "JavaScript",
"bytes": "1984479"
},
{
"name": "Python",
"bytes": "1727585"
},
{
"name": "Shell",
"bytes": "75780"
},
{
"name": "TSQL",
"bytes": "642"
}
],
"symlink_target": ""
}
|
import diventi.accounts.models
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('accounts', '0199_auto_20190615_1236'),
]
operations = [
migrations.AlterModelManagers(
name='diventiuser',
managers=[
('objects', diventi.accounts.models.DiventiUserManager()),
],
),
]
|
{
"content_hash": "f0e1c89cf5ee09afad844c5c1c46f88c",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 74,
"avg_line_length": 22.38888888888889,
"alnum_prop": 0.5831265508684863,
"repo_name": "flavoi/diventi",
"id": "7ca4a023b9bb4f5e31ae1570586b4f742dfb7038",
"size": "452",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "diventi/accounts/migrations/0200_auto_20190615_1251.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "385265"
},
{
"name": "Procfile",
"bytes": "46"
},
{
"name": "Python",
"bytes": "826530"
}
],
"symlink_target": ""
}
|
from pysnmp.entity.rfc3413 import ntforg
from pyasn1.compat.octets import null
try:
import asyncio
except ImportError:
import trollius as asyncio
def _cbFunWithFuture(snmpEngine, sendRequestHandle, errorIndication,
errorStatus, errorIndex, varBinds, future):
if future.cancelled():
return
future.set_result(
(snmpEngine, errorIndication, errorStatus, errorIndex, varBinds)
)
class NotificationOriginator:
def __init__(self):
self.notificationOriginator = ntforg.NotificationOriginator()
def sendVarBinds(self,
snmpEngine,
notificationTarget,
snmpContext,
contextName,
notificationName,
instanceIndex,
additionalVarBinds=()):
future = asyncio.Future()
self.notificationOriginator.sendVarBinds(
snmpEngine,
notificationTarget,
snmpContext,
contextName,
notificationName,
instanceIndex,
additionalVarBinds,
_cbFunWithFuture,
future
)
return future
|
{
"content_hash": "567ac6f536fe8e2bc3102e406497220d",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 72,
"avg_line_length": 30.225,
"alnum_prop": 0.5831265508684863,
"repo_name": "ww9rivers/pysnmp",
"id": "03e927d88761372c777cac5e27fbcf548739b2f3",
"size": "2682",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pysnmp/entity/rfc3413/asyncio/ntforg.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "1004861"
}
],
"symlink_target": ""
}
|
import socket
from subprocess import Popen, PIPE, STDOUT
import os
import time
import string
import requests
import json
import omxplayer
class UnsupportedFileTypeException(Exception):
'''Raised if the file type is not among the list of supported types'''
pass
class FileNotFoundException(Exception):
'''raised if the file is not valid'''
pass
class OmxCommsError(Exception):
'''raised if a command failed to execute'''
pass
class Omx(object):
def __init__(self):
# connection attrs
# private playlist var, stores list of file paths
# mirrors the list in the player at all times
self._playlist = []
self._player = None
# used to determine if a
self.supported = ["mp4", "avi", "mkv",
"flv", ".aac", "3gp"] # add more later
# creating an instance of the vlc window
# local socket connection to the vlc player
@property
def playlist(self):
'''returns list of file paths'''
return self._playlist
@property
def connection_open(self):
return self._player.is_playing()
@playlist.setter
def playlist(self, arg):
"""Takes a string, tuple or a list as an argument and
updates the player's playlist and the local_playlist variable
enqueues the vlc object with a playlist of all the files stored in it
can only add files to the playlist"""
if isinstance(arg, (list, tuple)):
for path in arg:
self.check_path(path)
if not path in self._playlist:
data = self._enqueue(path)
elif isinstance(arg, str):
self.check_path(arg)
if not arg in self._playlist:
data = self._enqueue(arg)
@playlist.deleter
def playlist(self):
'''clears the local playlist var and the remote one'''
self._playlist = []
self.clear()
def create_player(self):
if self.playlist == []:
raise Exception("The video player has no files ot add")
else:
self._player = omxplayer.OMXPlayer(self._playlist[0])
def check_path(self, path):
'''Ensures all files added to the application are
valid paths.'''
if not os.path.isfile(path):
raise FileNotFoundException()
path, file = os.path.split(path)
name, ext = file.split(".")
if ext not in self.supported:
raise UnsupportedFileTypeException()
def toggle_fullscreen(self):
'''For compatibility'''
return True
def toggle_loop(self):
'''for compatibility'''
return True
def pause(self):
"""Checks the current state to make sure the player is playing something"""
if self._player:
self._player.pause()
def play(self):
"""First checks if a valid file is currently loaded."""
if self._player:
self._player.play()
def stop(self):
"""checks first if there is something to stop"""
if self._player:
self._player.stop()
def _enqueue(self, path):
'''adds a file to the playlist'''
self.playlist = path
def clear(self):
'''clears all files from the playlist'''
del self.playlist
def playlist_loop(self):
"""Get the currently playing video
get its remaining time by subtracting its
current time from its duration and creating a new instance for each file"""
if not self._player:
self.create_player()
while True:
time.sleep(0.5)
remaining = self._player.duration() - self._player.position()
if remaining < 1:
current = self._playlist.index(self._player.get_source())
if current < len(self._playlist) - 2:
next = self._playlist[current + 1]
else: next = self._playlist[0]
self._player.load(next)
|
{
"content_hash": "88c48769bd02767b922edbad92d64f14",
"timestamp": "",
"source": "github",
"line_count": 140,
"max_line_length": 83,
"avg_line_length": 29.107142857142858,
"alnum_prop": 0.578159509202454,
"repo_name": "nakamura9/deploy_ad_server",
"id": "4fb991fecdf999ff01e858ce6468528630641d22",
"size": "4075",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "client/omxplayer/myomx.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "54405"
},
{
"name": "Python",
"bytes": "101904"
},
{
"name": "Shell",
"bytes": "3378"
}
],
"symlink_target": ""
}
|
import csv
import os
from collections import defaultdict
base_dir = os.path.dirname(os.path.abspath(__file__))
assets_dir = os.path.join(base_dir, 'assets')
pool_file = os.path.join(assets_dir, 'general_class_question_pool.txt')
pool_index_file = os.path.join(assets_dir, 'general_class_question_pool_index.txt')
pool_csv = os.path.join(assets_dir, 'pool.csv')
pool_index = defaultdict(lambda: {
'title': None,
'id': None,
'exam_questions': 0,
'exam_groups': 0,
'total_questions': 0,
'index': defaultdict(str),
})
find = lambda lst, word: min(i for i, sentence in enumerate(lst) if word in sentence)
with open(pool_index_file, 'r') as f:
for line in f:
l_stripped = line.rstrip()
l_split = l_stripped.split(' ')
if l_split[0] == 'SUBELEMENT':
l_split.pop(0)
id = l_split[0]
l_split.pop(0)
lb_pos = find(l_split, '[')
rb_pos = find(l_split, ']')
l_split[lb_pos] = l_split[lb_pos].replace('[', '')
l_split[rb_pos] = l_split[rb_pos].replace(']', '')
qs_and_grps = (' '.join(l_split[lb_pos:rb_pos+1])).split('-')
title = ' '.join(l_split[0:lb_pos])
exam_questions = int((qs_and_grps[0].split(' '))[0])
grp_questions = int((qs_and_grps[1].split(' '))[0])
total_questions = int(l_split[rb_pos+1])
pool_index[id]['id'] = id
pool_index[id]['title'] = title
pool_index[id]['exam_questions'] = exam_questions
pool_index[id]['exam_groups'] = grp_questions
pool_index[id]['total_questions'] = total_questions
else:
section_id = line[0:2]
id = line[2:3]
title = (line.split('-'))[1].strip()
pool_index[section_id]['index'][id] = title
"""
G1A01 (C) [97.301(d)]
On which of the following bands is a General Class license holder granted all amateur frequency privileges?
A. 60, 20, 17, and 12 meters
B. 160, 80, 40, and 10 meters
C. 160, 60, 30, 17, 12, and 10 meters
D. 160, 30, 17, 15, 12, and 10 meters
~~
"""
with open(pool_file, 'r') as f:
for line in f:
question_lines = [f.readline()]
with open(pool_csv, 'w') as f:
output = csv.writer(f)
for sid, sec in pool_index.items():
output.writerow(['group', sid, sec['title'], sec['exam_questions'], sec['exam_groups'], sec['total_questions']])
for qid, title in sec['index'].items():
output.writerow(['subgroup', sid, qid, title])
|
{
"content_hash": "7a56022af3ed67f3b973f1ebef8f2f49",
"timestamp": "",
"source": "github",
"line_count": 80,
"max_line_length": 120,
"avg_line_length": 31.7625,
"alnum_prop": 0.568280204643841,
"repo_name": "alphachai/fcc_general_licence_study",
"id": "13b8eb3a357a18a020b93cede088e66adbf3fc43",
"size": "2565",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "parse_pool.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "2565"
}
],
"symlink_target": ""
}
|
from participantCollection import ParticipantCollection
import re
import datetime
import pyperclip
# EDIT ME!
currentMonthTotalDays = 31
currentMonthIndex = datetime.date.today().month
currentMonthPenultimateDayIndex = currentMonthTotalDays - 1
currentMonthName = {1: 'January', 2: 'February', 3: 'March', 4: 'April', 5: 'May', 6: 'June', 7: 'July', 8: 'August', 9: 'September', 10: 'October', 11: 'November', 12: 'December'}[currentMonthIndex]
nextMonthIndex = currentMonthIndex % 12 + 1
nextMonthName = {1: 'January', 2: 'February', 3: 'March', 4: 'April', 5: 'May', 6: 'June', 7: 'July', 8: 'August', 9: 'September', 10: 'October', 11: 'November', 12: 'December'}[nextMonthIndex]
currentDayOfMonthIndex = datetime.date.today().day
# TODO: testing...
# currentDayOfMonthIndex = 31
currentDayOfMonthName = {1: 'first', 2: 'second', 3: 'third', 4: 'fourth', 5: 'fifth', 6: 'sixth', 7: 'seventh', 8: 'eighth', 9: 'ninth', 10: 'tenth', 11: 'eleventh', 12: 'twelfth', 13: 'thirteenth', 14: 'fourteenth', 15: 'fifteenth', 16: 'sixteenth', 17: 'seventeenth', 18: 'eighteenth', 19: 'nineteenth', 20: 'twentieth', 21: 'twenty-first', 22: 'twenty-second', 23: 'twenty-third', 24: 'twenty-fourth', 25: 'twenty-fifth', 26: 'twenty-sixth', 27: 'twenty-seventh', 28: 'twenty-eighth', 29: 'twenty-ninth', 30: 'thirtieth', 31: 'thirty-first'}[currentDayOfMonthIndex]
currentDayOfWeekName = {0: 'Monday', 1: 'Tuesday', 2: 'Wednesday', 3: 'Thursday', 4: 'Friday', 5: 'Saturday', 6: 'Sunday'}[datetime.date.today().weekday()]
participants = ParticipantCollection()
numberStillIn = participants.sizeOfParticipantsWhoAreStillIn()
initialNumber = participants.size()
percentStillIn = int(round(100 * numberStillIn / initialNumber, 0))
# print "There are currently **" + str(numberStillIn) + " out of " + str(initialNumber) +"** original participants. That's **" + str(int(round(100*numberStillIn/initialNumber,0))) + "%** Here is the list of participants still with the challenge:\n"
def stringToPrintLegacy():
answer = "There are currently **NUMBER_STILL_IN out of INITIAL_NUMBER** original participants. That's **PERCENT_STILL_IN%**. Here is the list of participants still with the challenge:\n\n"
answer = re.sub('NUMBER_STILL_IN', str(numberStillIn), answer)
answer = re.sub('INITIAL_NUMBER', str(initialNumber), answer)
answer = re.sub('PERCENT_STILL_IN', str(percentStillIn), answer)
for participant in participants.participantsWhoAreStillIn():
answer += "/u/" + participant.name
if not participant.hasCheckedIn:
answer += " ~"
answer += "\n\n"
return answer
def templateForParticipants():
answer = ""
for participant in participants.participantsWhoAreStillIn():
answer += "/u/" + participant.name
if not participant.hasCheckedIn:
answer += " ~"
answer += "\n\n"
return answer
def templateForParticipantsOnFinalDay():
answer = ""
answer += "These participants have checked in at least once in the last 15 days:\n"
answer += "\n"
for participant in participants.participantsWhoAreStillInAndHaveCheckedIn():
answer += "/u/" + participant.name + "\n"
answer += "\n"
answer += "These participants have not reported a relapse, so they are still in the running, but **if they do not check in by the end of today, they will be removed from the list, and will not be considered victorious**:\n"
answer += "\n"
for participant in participants.participantsWhoAreStillInAndHaveNotCheckedIn():
answer += "/u/" + participant.name + " ~\n"
answer += "\n"
return answer
def templateFor1():
print '1\n\n'
answer = ""
print "============================================================="
answer += "**Daily news:** This is CURRENT_DAY_OF_WEEK_NAME, CURRENT_MONTH_NAME CURRENT_DAY_OF_MONTH_INDEX, the CURRENT_DAY_OF_MONTH_NAME day of the Stay Clean: CURRENT_MONTH_NAME challenge. ~~We will no longer be accepting new signups.~~ Good news! We will be be accepting late signups for the next 3 days. If you forgot to sign up for the CURRENT_MONTH_NAME challenge, just leave a \"sign me up\" comment below, and I'll add you. Best of luck to everyone here!\n"
answer += "\n"
answer += "Here's how this thing works:\n"
answer += "\n"
answer += "- At the end of this post is a list of people who have signed up for the challenge, and who are still in the running. That means that they have not needed to reset because of a relapse or slip.\n"
answer += "- Please check in with the group in the comments as often as you want! Feel free to share thoughts, feelings, experiences, progress, wisdom, encouragement and whatever else!\n"
answer += "- **IMPORTANT: if you relapse, please post a comment to that effect here** and I will remove your name from the list. We will not judge you or shame you, we have all been there.\n"
answer += '- If you have a "~" after your name, you have yet to check in on any update threads. If it is still there by CURRENT_MONTH_NAME 15th, you will be removed from the list, in order to keep the numbers as realistic as possible.\n'
answer += "- ~~We will not be accepting any new participants~~, but even if you're not on the list, please feel free to check in in the update threads anyway! Also, stay tuned to catch the NEXT_MONTH_NAME thread!\n"
answer += "\n"
answer += "Good luck!\n"
answer += "\n"
answer += "For a chart of relapse data, check out [this Google Spreadsheet](https://docs.google.com/spreadsheets/d/1fnRMkDqFAJpsWHaZt8duMkZIPBCtUy0IfGFmlIfvOII/edit#gid=0).\n"
answer += "\n"
answer += "Here are our **INITIAL_NUMBER** original participants:\n\n"
answer += templateForParticipants()
print "============================================================="
return answer
def templateFor2():
print '2\n\n'
answer = ""
answer += "**Daily news:** This is CURRENT_DAY_OF_WEEK_NAME, CURRENT_MONTH_NAME CURRENT_DAY_OF_MONTH_INDEX, the CURRENT_DAY_OF_MONTH_NAME day of the Stay Clean: CURRENT_MONTH_NAME challenge. This is the second day of our 3 day late-signup grace period. If you forgot to sign up for the CURRENT_MONTH_NAME challenge, just leave a \"sign me up\" comment below, and I'll add you.\n"
answer += "\n"
answer += "Guidelines:\n"
answer += "\n"
answer += "- At the end of this post is a list of people who have signed up for the challenge, and who are still in the running. That means that they have not needed to reset because of a relapse or slip.\n"
answer += "- Please check in with the group in the comments as often as you want! Feel free to share thoughts, feelings, experiences, progress, wisdom, encouragement and whatever else!\n"
answer += "- **IMPORTANT: if you relapse, please post a comment to that effect here** and I will remove your name from the list. We will not judge you or shame you, we have all been there.\n"
answer += '- If you have a "~" after your name, you have yet to check in on any update threads. If it is still there by CURRENT_MONTH_NAME 15th, you will be removed from the list, in order to keep the numbers as realistic as possible.\n'
answer += "- ~~We will not be accepting any new participants~~, but even if you're not on the list, please feel free to check in in the update threads anyway! Also, stay tuned to catch the NEXT_MONTH_NAME thread!\n"
answer += "\n"
answer += "Good luck!\n"
answer += "\n"
answer += "For a chart of relapse data, check out [this Google Spreadsheet](https://docs.google.com/spreadsheets/d/1fnRMkDqFAJpsWHaZt8duMkZIPBCtUy0IfGFmlIfvOII/edit#gid=0).\n"
answer += "\n"
answer += "There are currently **NUMBER_STILL_IN out of INITIAL_NUMBER** original participants. That's **PERCENT_STILL_IN%**. Here is the list of participants still with the challenge:\n\n"
answer += templateForParticipants()
return answer
def templateFor3():
print '3\n\n'
answer = ""
answer += "**Daily news:** This is CURRENT_DAY_OF_WEEK_NAME, CURRENT_MONTH_NAME CURRENT_DAY_OF_MONTH_INDEX, the CURRENT_DAY_OF_MONTH_NAME day of the Stay Clean: CURRENT_MONTH_NAME challenge. This is the last day of our 3 day late-signup grace period. If you forgot to sign up for the CURRENT_MONTH_NAME challenge, just leave a \"sign me up\" comment below, and I'll add you. After today, further signup requests will be silently ignored.\n"
answer += "\n"
answer += "Guidelines:\n"
answer += "\n"
answer += "- At the end of this post is a list of people who have signed up for the challenge, and who are still in the running. That means that they have not needed to reset because of a relapse or slip.\n"
answer += "- Please check in with the group in the comments as often as you want! Feel free to share thoughts, feelings, experiences, progress, wisdom, encouragement and whatever else!\n"
answer += "- **IMPORTANT: if you relapse, please post a comment to that effect here** and I will remove your name from the list. We will not judge you or shame you, we have all been there.\n"
answer += '- If you have a "~" after your name, you have yet to check in on any update threads. If it is still there by CURRENT_MONTH_NAME 15th, you will be removed from the list, in order to keep the numbers as realistic as possible.\n'
answer += "- ~~We will not be accepting any new participants~~, but even if you're not on the list, please feel free to check in in the update threads anyway! Also, stay tuned to catch the NEXT_MONTH_NAME thread!\n"
answer += "\n"
answer += "Good luck!\n"
answer += "\n"
answer += "For a chart of relapse data, check out [this Google Spreadsheet](https://docs.google.com/spreadsheets/d/1fnRMkDqFAJpsWHaZt8duMkZIPBCtUy0IfGFmlIfvOII/edit#gid=0).\n"
answer += "\n"
answer += "There are currently **NUMBER_STILL_IN out of INITIAL_NUMBER** original participants. That's **PERCENT_STILL_IN%**. Here is the list of participants still with the challenge:\n\n"
answer += templateForParticipants()
return answer
def templateFor4():
print '4\n\n'
answer = ""
answer += "**Daily news:** This is CURRENT_DAY_OF_WEEK_NAME, CURRENT_MONTH_NAME CURRENT_DAY_OF_MONTH_INDEX, the CURRENT_DAY_OF_MONTH_NAME day of the Stay Clean: CURRENT_MONTH_NAME challenge. Our 3 day late-signup grace period is now over. If you forgot to sign up, it's too late for CURRENT_MONTH_NAME, but feel free to leave comments here anyway, and we'll see you in NEXT_MONTH_NAME.\n"
answer += "\n"
answer += "Guidelines:\n"
answer += "\n"
answer += "- At the end of this post is a list of people who have signed up for the challenge, and who are still in the running. That means that they have not needed to reset because of a relapse or slip.\n"
answer += "- Please check in with the group in the comments as often as you want! Feel free to share thoughts, feelings, experiences, progress, wisdom, encouragement and whatever else!\n"
answer += "- **IMPORTANT: if you relapse, please post a comment to that effect here** and I will remove your name from the list. We will not judge you or shame you, we have all been there.\n"
answer += '- If you have a "~" after your name, you have yet to check in on any update threads. If it is still there by CURRENT_MONTH_NAME 15th, you will be removed from the list, in order to keep the numbers as realistic as possible.\n'
answer += "- We will not be accepting any new participants, but even if you're not on the list, please feel free to check in in the update threads anyway! Also, stay tuned to catch the NEXT_MONTH_NAME thread!\n"
answer += "\n"
answer += "Good luck!\n"
answer += "\n"
answer += "For a chart of relapse data, check out [this Google Spreadsheet](https://docs.google.com/spreadsheets/d/1fnRMkDqFAJpsWHaZt8duMkZIPBCtUy0IfGFmlIfvOII/edit#gid=0).\n"
answer += "\n"
answer += "There are currently **NUMBER_STILL_IN out of INITIAL_NUMBER** original participants. That's **PERCENT_STILL_IN%**. Here is the list of participants still with the challenge:\n\n"
answer += templateForParticipants()
return answer
def templateFor5to9():
print '5 to 9\n\n'
answer = ""
answer += "**Daily news:** This is CURRENT_DAY_OF_WEEK_NAME, CURRENT_MONTH_NAME CURRENT_DAY_OF_MONTH_INDEX, the CURRENT_DAY_OF_MONTH_NAME day of the Stay Clean: CURRENT_MONTH_NAME challenge. Keep fighting the good fight!\n"
answer += "\n"
answer += "Guidelines:\n"
answer += "\n"
answer += "- At the end of this post is a list of people who have signed up for the challenge, and who are still in the running. That means that they have not needed to reset because of a relapse or slip.\n"
answer += "- Please check in with the group in the comments as often as you want! Feel free to share thoughts, feelings, experiences, progress, wisdom, encouragement and whatever else!\n"
answer += "- **IMPORTANT: if you relapse, please post a comment to that effect here** and I will remove your name from the list. We will not judge you or shame you, we have all been there.\n"
answer += '- If you have a "~" after your name, you have yet to check in on any update threads. If it is still there by CURRENT_MONTH_NAME 15th, you will be removed from the list, in order to keep the numbers as realistic as possible.\n'
answer += "- We will not be accepting any new participants, but even if you're not on the list, please feel free to check in in the update threads anyway! Also, stay tuned to catch the NEXT_MONTH_NAME thread!\n"
answer += "\n"
answer += "Good luck!\n"
answer += "\n"
answer += "For a chart of relapse data, check out [this Google Spreadsheet](https://docs.google.com/spreadsheets/d/1fnRMkDqFAJpsWHaZt8duMkZIPBCtUy0IfGFmlIfvOII/edit#gid=0).\n"
answer += "\n"
answer += "There are currently **NUMBER_STILL_IN out of INITIAL_NUMBER** original participants. That's **PERCENT_STILL_IN%**. Here is the list of participants still with the challenge:\n\n"
answer += templateForParticipants()
return answer
def templateFor10to14():
print '10 to 14\n\n'
answer = ""
answer += "**Daily news:** This is CURRENT_DAY_OF_WEEK_NAME, CURRENT_MONTH_NAME CURRENT_DAY_OF_MONTH_INDEX, the CURRENT_DAY_OF_MONTH_NAME day of the Stay Clean: CURRENT_MONTH_NAME challenge. Keep fighting the good fight!\n"
answer += "\n"
answer += "**THE COUNTDOWN: Attention everyone!** You have " + str(15 - currentDayOfMonthIndex) + " days to make an update comment (if you haven't already) to be counted as an active participant! **Otherwise your name will be REMOVED from the list** on CURRENT_MONTH_INDEX/15!!\n"
answer += "\n"
answer += "Guidelines:\n"
answer += "\n"
answer += "- At the end of this post is a list of people who have signed up for the challenge, and who are still in the running. That means that they have not needed to reset because of a relapse or slip.\n"
answer += "- Please check in with the group in the comments as often as you want! Feel free to share thoughts, feelings, experiences, progress, wisdom, encouragement and whatever else!\n"
answer += "- **IMPORTANT: if you relapse, please post a comment to that effect here** and I will remove your name from the list. We will not judge you or shame you, we have all been there.\n"
answer += '- If you have a "~" after your name, you have yet to check in on any update threads. If it is still there by CURRENT_MONTH_NAME 15th, you will be removed from the list, in order to keep the numbers as realistic as possible.\n'
answer += "- We will not be accepting any new participants, but even if you're not on the list, please feel free to check in in the update threads anyway! Also, stay tuned to catch the NEXT_MONTH_NAME thread!\n"
answer += "\n"
answer += "Good luck!\n"
answer += "\n"
answer += "For a chart of relapse data, check out [this Google Spreadsheet](https://docs.google.com/spreadsheets/d/1fnRMkDqFAJpsWHaZt8duMkZIPBCtUy0IfGFmlIfvOII/edit#gid=0).\n"
answer += "\n"
answer += "There are currently **NUMBER_STILL_IN out of INITIAL_NUMBER** original participants. That's **PERCENT_STILL_IN%**. Here is the list of participants still with the challenge:\n\n"
answer += templateForParticipants()
return answer
def templateFor15():
print '15\n\n'
answer = ""
answer += "**Daily news:** This is CURRENT_DAY_OF_WEEK_NAME, CURRENT_MONTH_NAME CURRENT_DAY_OF_MONTH_INDEX, the CURRENT_DAY_OF_MONTH_NAME day of the Stay Clean: CURRENT_MONTH_NAME challenge. Keep fighting the good fight!\n"
answer += "\n"
answer += "**THIS IS YOUR LAST DAY TO CHECK IN** (if you haven't already) **BEFORE YOUR NAME IS REMOVED FROM THE LIST!** Check in by posting a brief comment.\n"
answer += "\n"
answer += "Guidelines:\n"
answer += "\n"
answer += "- At the end of this post is a list of people who have signed up for the challenge, and who are still in the running. That means that they have not needed to reset because of a relapse or slip.\n"
answer += "- Please check in with the group in the comments as often as you want! Feel free to share thoughts, feelings, experiences, progress, wisdom, encouragement and whatever else!\n"
answer += "- **IMPORTANT: if you relapse, please post a comment to that effect here** and I will remove your name from the list. We will not judge you or shame you, we have all been there.\n"
answer += '- If you have a "~" after your name, you have yet to check in on any update threads. If it is still there by CURRENT_MONTH_NAME 15th, you will be removed from the list, in order to keep the numbers as realistic as possible.\n'
answer += "- We will not be accepting any new participants, but even if you're not on the list, please feel free to check in in the update threads anyway! Also, stay tuned to catch the NEXT_MONTH_NAME thread!\n"
answer += "\n"
answer += "Good luck!\n"
answer += "\n"
answer += "For a chart of relapse data, check out [this Google Spreadsheet](https://docs.google.com/spreadsheets/d/1fnRMkDqFAJpsWHaZt8duMkZIPBCtUy0IfGFmlIfvOII/edit#gid=0).\n"
answer += "\n"
answer += "There are currently **NUMBER_STILL_IN out of INITIAL_NUMBER** original participants. That's **PERCENT_STILL_IN%**. Here is the list of participants still with the challenge:\n\n"
answer += templateForParticipants()
return answer
def templateFor16toPenultimate():
print '16 to penultimate\n\n'
answer = ""
answer += "**Daily news:** This is CURRENT_DAY_OF_WEEK_NAME, CURRENT_MONTH_NAME CURRENT_DAY_OF_MONTH_INDEX, the CURRENT_DAY_OF_MONTH_NAME day of the Stay Clean: CURRENT_MONTH_NAME challenge. Keep fighting the good fight!\n"
answer += "\n"
answer += "If you think you should still be on this list but aren't, you probably got removed in the great purge of CURRENT_MONTH_NAME 15th because you never checked in. However, if you let me know you're still with it I will re-add you.\n"
answer += "\n"
answer += "Guidelines:\n"
answer += "\n"
answer += "- At the end of this post is a list of people who have signed up for the challenge, and who are still in the running. That means that they have not needed to reset because of a relapse or slip.\n"
answer += "- Please check in with the group in the comments as often as you want! Feel free to share thoughts, feelings, experiences, progress, wisdom, encouragement and whatever else!\n"
answer += "- **IMPORTANT: if you relapse, please post a comment to that effect here** and I will remove your name from the list. We will not judge you or shame you, we have all been there.\n"
answer += '- If you have a "~" after your name, you have yet to check in on any update threads since CURRENT_MONTH_NAME 15. If it is still there by CURRENT_MONTH_NAME CURRENT_MONTH_TOTAL_DAYS, you will be removed from the list, in order to keep the numbers as realistic as possible.\n'
answer += "- We will not be accepting any new participants, but even if you're not on the list, please feel free to check in in the update threads anyway! Also, stay tuned to catch the NEXT_MONTH_NAME thread!\n"
answer += "\n"
answer += "Good luck!\n"
answer += "\n"
answer += "For a chart of relapse data, check out [this Google Spreadsheet](https://docs.google.com/spreadsheets/d/1fnRMkDqFAJpsWHaZt8duMkZIPBCtUy0IfGFmlIfvOII/edit#gid=0).\n"
answer += "\n"
answer += "There are currently **NUMBER_STILL_IN out of INITIAL_NUMBER** original participants. That's **PERCENT_STILL_IN%**. Here is the list of participants still with the challenge:\n\n"
answer += templateForParticipants()
return answer
def templateForUltimate():
print 'Ultimate\n\n'
answer = ""
answer += "**Daily news:** This is CURRENT_DAY_OF_WEEK_NAME, CURRENT_MONTH_NAME CURRENT_DAY_OF_MONTH_INDEX, the last day of the Stay Clean: CURRENT_MONTH_NAME challenge. This is it, folks, the day we've been waiting for... the final day of the challenge. I'll be making a congratulatory post tomorrow to honor the victors. I'm really proud of everyone who signed up for this challenge. Quitting porn is difficult, especially in an era where porn is always as close as a few keystrokes, and triggers are absolutely everywhere. Everybody who gave it their best shot deserves to take a minute right now to feel good about themselves.\n"
answer += "\n"
answer += "For a chart of relapse data, check out [this Google Spreadsheet](https://docs.google.com/spreadsheets/d/1fnRMkDqFAJpsWHaZt8duMkZIPBCtUy0IfGFmlIfvOII/edit#gid=0).\n"
answer += "\n"
# TODO: need to do the part where it lists the checked in and non-checked in participants separately.
answer += "There are currently **NUMBER_STILL_IN out of INITIAL_NUMBER** original participants. That's **PERCENT_STILL_IN%**.\n\n"
answer += templateForParticipantsOnFinalDay()
return answer
def templateToUse():
if currentDayOfMonthIndex == 1:
return templateFor1()
elif currentDayOfMonthIndex == 2:
return templateFor2()
elif currentDayOfMonthIndex == 3:
return templateFor3()
elif currentDayOfMonthIndex == 4:
return templateFor4()
elif 5 <= currentDayOfMonthIndex <= 9:
return templateFor5to9()
elif 10 <= currentDayOfMonthIndex <= 14:
return templateFor10to14()
if currentDayOfMonthIndex == 15:
return templateFor15()
elif (currentDayOfMonthIndex >= 16) and (currentDayOfMonthIndex <= currentMonthPenultimateDayIndex):
return templateFor16toPenultimate()
else:
return templateForUltimate()
def stringToPrint():
answer = templateToUse()
answer = re.sub('NUMBER_STILL_IN', str(numberStillIn), answer)
answer = re.sub('INITIAL_NUMBER', str(initialNumber), answer)
answer = re.sub('PERCENT_STILL_IN', str(percentStillIn), answer)
answer = re.sub('CURRENT_MONTH_INDEX', str(currentMonthIndex), answer)
answer = re.sub('CURRENT_MONTH_TOTAL_DAYS', str(currentMonthTotalDays), answer)
answer = re.sub('CURRENT_MONTH_PENULTIMATE_DAY_INDEX', str(currentMonthPenultimateDayIndex), answer)
answer = re.sub('CURRENT_MONTH_NAME', currentMonthName, answer)
answer = re.sub('NEXT_MONTH_INDEX', str(nextMonthIndex), answer)
answer = re.sub('NEXT_MONTH_NAME', nextMonthName, answer)
answer = re.sub('CURRENT_DAY_OF_MONTH_INDEX', str(currentDayOfMonthIndex), answer)
answer = re.sub('CURRENT_DAY_OF_MONTH_NAME', currentDayOfMonthName, answer)
answer = re.sub('CURRENT_DAY_OF_WEEK_NAME', currentDayOfWeekName, answer)
return answer
outputString = stringToPrint()
print "============================================================="
print outputString
print "============================================================="
pyperclip.copy(outputString)
|
{
"content_hash": "e1b503b4259a6918d1dfa3d518d19049",
"timestamp": "",
"source": "github",
"line_count": 309,
"max_line_length": 643,
"avg_line_length": 77.55016181229773,
"alnum_prop": 0.6980344698076201,
"repo_name": "foobarbazblarg/stayclean",
"id": "4906ab5a0a3deb3f281afb336499a9b183a5981c",
"size": "23981",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "stayclean-2015-december/display.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "4232161"
},
{
"name": "Shell",
"bytes": "52056"
}
],
"symlink_target": ""
}
|
from cassandra.policies import RetryPolicy
class MonascaRetryPolicy(RetryPolicy):
def __init__(self, read_attempts, write_attempts, unavailable_attempts):
super(MonascaRetryPolicy, self).__init__()
self.read_attempts = read_attempts
self.write_attempts = write_attempts
self.unavailable_attempts = unavailable_attempts
def on_read_timeout(self, query, consistency, required_responses,
received_responses, data_retrieved, retry_num):
if retry_num >= self.read_attempts:
return self.RETHROW, None
elif received_responses >= required_responses and not data_retrieved:
return self.RETRY, consistency
else:
return self.RETHROW, None
def on_write_timeout(self, query, consistency, write_type,
required_responses, received_responses, retry_num):
if retry_num >= self.write_attempts:
return self.RETHROW, None
else:
return self.RETRY, consistency
def on_unavailable(self, query, consistency, required_replicas, alive_replicas, retry_num):
return (
self.RETRY_NEXT_HOST,
consistency) if retry_num < self.unavailable_attempts else (
self.RETHROW,
None)
|
{
"content_hash": "85177f4b5953c5a3e5d42f5838857f11",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 95,
"avg_line_length": 34.39473684210526,
"alnum_prop": 0.6342769701606733,
"repo_name": "openstack/monasca-persister",
"id": "51052099b42f3d36beacfce831798b68112f0868",
"size": "1882",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "monasca_persister/repositories/cassandra/retry_policy.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2161"
},
{
"name": "Java",
"bytes": "187701"
},
{
"name": "Jinja",
"bytes": "4254"
},
{
"name": "Python",
"bytes": "170779"
},
{
"name": "Shell",
"bytes": "14841"
}
],
"symlink_target": ""
}
|
from django.contrib.contenttypes.fields import GenericForeignKey
from django.contrib.contenttypes.models import ContentType
from django.db import models
from django.urls import reverse
from mptt.models import TreeForeignKey
from netbox.models import ChangeLoggedModel, NestedGroupModel, OrganizationalModel, NetBoxModel
from netbox.models.features import WebhooksMixin
from tenancy.choices import *
__all__ = (
'ContactAssignment',
'Contact',
'ContactGroup',
'ContactRole',
)
class ContactGroup(NestedGroupModel):
"""
An arbitrary collection of Contacts.
"""
name = models.CharField(
max_length=100
)
slug = models.SlugField(
max_length=100
)
parent = TreeForeignKey(
to='self',
on_delete=models.CASCADE,
related_name='children',
blank=True,
null=True,
db_index=True
)
description = models.CharField(
max_length=200,
blank=True
)
class Meta:
ordering = ['name']
unique_together = (
('parent', 'name')
)
def get_absolute_url(self):
return reverse('tenancy:contactgroup', args=[self.pk])
class ContactRole(OrganizationalModel):
"""
Functional role for a Contact assigned to an object.
"""
name = models.CharField(
max_length=100,
unique=True
)
slug = models.SlugField(
max_length=100,
unique=True
)
description = models.CharField(
max_length=200,
blank=True,
)
class Meta:
ordering = ['name']
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse('tenancy:contactrole', args=[self.pk])
class Contact(NetBoxModel):
"""
Contact information for a particular object(s) in NetBox.
"""
group = models.ForeignKey(
to='tenancy.ContactGroup',
on_delete=models.SET_NULL,
related_name='contacts',
blank=True,
null=True
)
name = models.CharField(
max_length=100
)
title = models.CharField(
max_length=100,
blank=True
)
phone = models.CharField(
max_length=50,
blank=True
)
email = models.EmailField(
blank=True
)
address = models.CharField(
max_length=200,
blank=True
)
link = models.URLField(
blank=True
)
comments = models.TextField(
blank=True
)
clone_fields = [
'group',
]
class Meta:
ordering = ['name']
unique_together = (
('group', 'name')
)
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse('tenancy:contact', args=[self.pk])
class ContactAssignment(WebhooksMixin, ChangeLoggedModel):
content_type = models.ForeignKey(
to=ContentType,
on_delete=models.CASCADE
)
object_id = models.PositiveBigIntegerField()
object = GenericForeignKey(
ct_field='content_type',
fk_field='object_id'
)
contact = models.ForeignKey(
to='tenancy.Contact',
on_delete=models.PROTECT,
related_name='assignments'
)
role = models.ForeignKey(
to='tenancy.ContactRole',
on_delete=models.PROTECT,
related_name='assignments'
)
priority = models.CharField(
max_length=50,
choices=ContactPriorityChoices,
blank=True
)
clone_fields = ('content_type', 'object_id')
class Meta:
ordering = ('priority', 'contact')
unique_together = ('content_type', 'object_id', 'contact', 'role', 'priority')
def __str__(self):
if self.priority:
return f"{self.contact} ({self.get_priority_display()})"
return str(self.contact)
def get_absolute_url(self):
return reverse('tenancy:contact', args=[self.contact.pk])
|
{
"content_hash": "c7b85745ddaa7e3e3e3c12cb4772377f",
"timestamp": "",
"source": "github",
"line_count": 170,
"max_line_length": 95,
"avg_line_length": 23.152941176470588,
"alnum_prop": 0.5962906504065041,
"repo_name": "digitalocean/netbox",
"id": "75ec9f69c7c6fc01df38e864118942bdf3f3fb56",
"size": "3936",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "netbox/tenancy/models/contacts.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "189339"
},
{
"name": "HTML",
"bytes": "570800"
},
{
"name": "JavaScript",
"bytes": "326125"
},
{
"name": "Python",
"bytes": "1815170"
},
{
"name": "Shell",
"bytes": "2786"
}
],
"symlink_target": ""
}
|
import httplib
# importing beautiful soup for parsing html tags
from bs4 import BeautifulSoup as BS
# setting up the connecting using get request
conn = httplib.HTTPSConnection("www.sslproxies.org")
conn.request("GET", "/")
# getting the response and storing it in data
response = conn.getresponse()
data = response.read()
# applying beautifulsoup for parsing
soup = BS(data,"html.parser")
# parsing the table for the needed infos
table = soup.find('tbody')
rows = table.findAll('tr')
for tr in rows:
cols = tr.findAll('td')
# parsing and storing data in each row
IP_Address,Port,Code_Country,Country,Type_proxy,Google,https,LastCheck = [c.text for c in cols]
# displaying string along with needed infos
print IP_Address+" "+Port+" "+Country+" "+Type_proxy
|
{
"content_hash": "c232c6ac17218c62b4cba724494a8c0f",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 96,
"avg_line_length": 38.1,
"alnum_prop": 0.7493438320209974,
"repo_name": "HoussemCharf/FunUtils",
"id": "ecdeade0f94f4777bbb39531a081019719fbced9",
"size": "787",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Proxy Grabber/newproxy.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "431"
},
{
"name": "HTML",
"bytes": "1777"
},
{
"name": "JavaScript",
"bytes": "475"
},
{
"name": "Python",
"bytes": "150133"
},
{
"name": "Shell",
"bytes": "341"
}
],
"symlink_target": ""
}
|
import argparse
import json
import os
import subprocess
import sys
import typing
import cryptography.x509
import os.path
import glob
from binascii import hexlify, unhexlify
from enum import Enum
VID_NOT_PRESENT = 0xFFFF
PID_NOT_PRESENT = 0x0000
VALID_IN_PAST = "2020-06-28 14:23:43"
VALID_NOW = "2022-09-28 14:23:43"
VALID_IN_FUTURE = "2031-06-28 14:23:43"
class CertType(Enum):
PAA = 1
PAI = 2
DAC = 3
CERT_STRUCT_TEST_CASES = [
{
"description": 'Valid certificate version field set to v3(2)',
"test_folder": 'cert_version_v3',
"error_flag": 'no-error',
"is_success_case": 'true',
},
{
"description": 'Invalid certificate version field set to v2(1)',
"test_folder": 'cert_version_v2',
"error_flag": 'cert-version',
"is_success_case": 'false',
},
{
"description": 'Valid certificate signature algorithm ECDSA_WITH_SHA256',
"test_folder": 'sig_algo_ecdsa_with_sha256',
"error_flag": 'no-error',
"is_success_case": 'true',
},
{
"description": 'Invalid certificate signature algorithm ECDSA_WITH_SHA1',
"test_folder": 'sig_algo_ecdsa_with_sha1',
"error_flag": 'sig-algo',
"is_success_case": 'false',
},
{
"description": "VID in Subject field doesn't match VID in Issuer field",
"test_folder": 'subject_vid_mismatch',
"error_flag": 'subject-vid-mismatch',
"is_success_case": 'false',
},
{
"description": "PID in Subject field doesn't match PID in Issuer field",
"test_folder": 'subject_pid_mismatch',
"error_flag": 'subject-pid-mismatch',
"is_success_case": 'false',
},
{
"description": "Valid certificate public key curve prime256v1",
"test_folder": 'sig_curve_prime256v1',
"error_flag": 'no-error',
"is_success_case": 'true',
},
{
"description": "Invalid certificate public key curve secp256k1",
"test_folder": 'sig_curve_secp256k1',
"error_flag": 'sig-curve',
"is_success_case": 'false',
},
{
"description": "Certificate validity period starts in the past",
"test_folder": 'valid_in_past',
"error_flag": 'no-error',
"is_success_case": 'false',
},
{
"description": "Certificate validity period starts in the future",
"test_folder": 'valid_in_future',
"error_flag": 'no-error',
"is_success_case": 'false',
},
# TODO Cases:
# 'issuer-vid'
# 'issuer-pid'
# 'subject-vid'
# 'subject-pid'
{
"description": "Certificate doesn't include Basic Constraint extension",
"test_folder": 'ext_basic_missing',
"error_flag": 'ext-basic-missing',
"is_success_case": 'false',
},
{
"description": "Certificate Basic Constraint extension critical field is missing",
"test_folder": 'ext_basic_critical_missing',
"error_flag": 'ext-basic-critical-missing',
"is_success_case": 'false',
},
{
"description": "Certificate Basic Constraint extension critical field is set as 'non-critical'",
"test_folder": 'ext_basic_critical_wrong',
"error_flag": 'ext-basic-critical-wrong',
"is_success_case": 'false',
},
{
"description": "Certificate Basic Constraint extension CA field is missing",
"test_folder": 'ext_basic_ca_missing',
"error_flag": 'ext-basic-ca-missing',
"is_success_case": 'false',
},
{
"description": "Certificate Basic Constraint extension CA field is wrong (TRUE for DAC and FALSE for PAI)",
"test_folder": 'ext_basic_ca_wrong',
"error_flag": 'ext-basic-ca-wrong',
"is_success_case": 'false',
},
{
"description": "Certificate Basic Constraint extension PathLen field presence is wrong (present for DAC not present for PAI)",
"test_folder": 'ext_basic_pathlen_presence_wrong',
"error_flag": 'ext-basic-pathlen-presence-wrong',
"is_success_case": 'false',
},
{
"description": "Certificate Basic Constraint extension PathLen field set to 0",
"test_folder": 'ext_basic_pathlen0',
"error_flag": 'ext-basic-pathlen0',
"is_success_case": 'false',
},
{
"description": "Certificate Basic Constraint extension PathLen field set to 1",
"test_folder": 'ext_basic_pathlen1',
"error_flag": 'ext-basic-pathlen1',
"is_success_case": 'false',
},
{
"description": "Certificate Basic Constraint extension PathLen field set to 2",
"test_folder": 'ext_basic_pathlen2',
"error_flag": 'ext-basic-pathlen2',
"is_success_case": 'false',
},
{
"description": "Certificate doesn't include Key Usage extension",
"test_folder": 'ext_key_usage_missing',
"error_flag": 'ext-key-usage-missing',
"is_success_case": 'false',
},
{
"description": "Certificate Key Usage extension critical field is missing",
"test_folder": 'ext_key_usage_critical_missing',
"error_flag": 'ext-key-usage-critical-missing',
"is_success_case": 'false',
},
{
"description": "Certificate Key Usage extension critical field is set as 'non-critical'",
"test_folder": 'ext_key_usage_critical_wrong',
"error_flag": 'ext-key-usage-critical-wrong',
"is_success_case": 'false',
},
{
"description": "Certificate Key Usage extension diginalSignature field is wrong (not present for DAC and present for PAI, which is OK as optional)",
"test_folder": 'ext_key_usage_dig_sig_wrong',
"error_flag": 'ext-key-usage-dig-sig',
"is_success_case": 'false',
},
{
"description": "Certificate Key Usage extension keyCertSign field is wrong (present for DAC and not present for PAI)",
"test_folder": 'ext_key_usage_key_cert_sign_wrong',
"error_flag": 'ext-key-usage-key-cert-sign',
"is_success_case": 'false',
},
{
"description": "Certificate Key Usage extension cRLSign field is wrong (present for DAC and not present for PAI)",
"test_folder": 'ext_key_usage_crl_sign_wrong',
"error_flag": 'ext-key-usage-crl-sign',
"is_success_case": 'false',
},
{
"description": "Certificate doesn't include Authority Key ID (AKID) extension",
"test_folder": 'ext_akid_missing',
"error_flag": 'ext-akid-missing',
"is_success_case": 'false',
},
{
"description": "Certificate doesn't include Subject Key ID (SKID) extension",
"test_folder": 'ext_skid_missing',
"error_flag": 'ext-skid-missing',
"is_success_case": 'false',
},
{
"description": "Certificate includes optional Extended Key Usage extension",
"test_folder": 'ext_extended_key_usage_present',
"error_flag": 'ext-extended-key-usage',
"is_success_case": 'true',
},
{
"description": "Certificate includes optional Authority Information Access extension",
"test_folder": 'ext_authority_info_access_present',
"error_flag": 'ext-authority-info-access',
"is_success_case": 'true',
},
{
"description": "Certificate includes optional Subject Alternative Name extension",
"test_folder": 'ext_subject_alt_name_present',
"error_flag": 'ext-subject-alt-name',
"is_success_case": 'true',
},
]
VIDPID_FALLBACK_ENCODING_TEST_CASES = [
# Valid/Invalid encoding examples from the spec:
{
"description": 'Fallback VID and PID encoding example from spec: valid and recommended since easily human-readable',
"common_name": 'ACME Matter Devel DAC 5CDA9899 Mvid:FFF1 Mpid:00B1',
"test_folder": 'vidpid_fallback_encoding_01',
"is_success_case": 'true',
},
{
"description": 'Fallback VID and PID encoding example from spec: valid and recommended since easily human-readable',
"common_name": 'ACME Matter Devel DAC 5CDA9899 Mpid:00B1 Mvid:FFF1',
"test_folder": 'vidpid_fallback_encoding_02',
"is_success_case": 'true',
},
{
"description": 'Fallback VID and PID encoding example from spec: valid example showing that order or separators are not considered at all for the overall validity of the embedded fields',
"common_name": 'Mpid:00B1,ACME Matter Devel DAC 5CDA9899,Mvid:FFF1',
"test_folder": 'vidpid_fallback_encoding_03',
"is_success_case": 'true',
},
{
"description": 'Fallback VID and PID encoding example from spec: valid, but less readable',
"common_name": 'ACME Matter Devel DAC 5CDA9899 Mvid:FFF1Mpid:00B1',
"test_folder": 'vidpid_fallback_encoding_04',
"is_success_case": 'true',
},
{
"description": 'Fallback VID and PID encoding example from spec: valid, but highly discouraged, since embedding of substrings within other substrings may be confusing to human readers',
"common_name": 'Mvid:FFF1ACME Matter Devel DAC 5CDAMpid:00B19899',
"test_folder": 'vidpid_fallback_encoding_05',
"is_success_case": 'true',
},
{
"description": 'Fallback VID and PID encoding example from spec: invalid, since substring following Mvid: is not exactly 4 uppercase hexadecimal digits',
"common_name": 'ACME Matter Devel DAC 5CDA9899 Mvid:FF1 Mpid:00B1',
"test_folder": 'vidpid_fallback_encoding_06',
"is_success_case": 'false',
},
{
"description": 'Fallback VID and PID encoding example from spec: invalid, since substring following Mvid: is not exactly 4 uppercase hexadecimal digits',
"common_name": 'ACME Matter Devel DAC 5CDA9899 Mvid:fff1 Mpid:00B1',
"test_folder": 'vidpid_fallback_encoding_07',
"is_success_case": 'false',
},
{
"description": 'Fallback VID and PID encoding example from spec: invalid, since substring following Mpid: is not exactly 4 uppercase hexadecimal digits',
"common_name": 'ACME Matter Devel DAC 5CDA9899 Mvid:FFF1 Mpid:B1',
"test_folder": 'vidpid_fallback_encoding_08',
"is_success_case": 'false',
},
{
"description": 'Fallback VID and PID encoding example from spec: invalid, since substring following Mpid: is not exactly 4 uppercase hexadecimal digits',
"common_name": 'ACME Matter Devel DAC 5CDA9899 Mpid: Mvid:FFF1',
"test_folder": 'vidpid_fallback_encoding_09',
"is_success_case": 'false',
},
# More valid/invalid fallback encoding examples:
{
"description": 'Fallback VID and PID encoding example: invalid VID encoding',
"common_name": 'Mvid:FFF Mpid:00B10x',
"test_folder": 'vidpid_fallback_encoding_10',
"is_success_case": 'false',
},
{
"description": 'Fallback VID and PID encoding example: valid, but less human-readable',
"common_name": 'MpidMvid:FFF10 Matter Test Mpid:00B1',
"test_folder": 'vidpid_fallback_encoding_11',
"is_success_case": 'true',
},
{
"description": 'Fallback VID and PID encoding example: invalid, PID not present and VID not upper case',
"common_name": 'Matter Devel DAC Mpid:Mvid:Fff1',
"test_folder": 'vidpid_fallback_encoding_12',
"is_success_case": 'false',
},
{
"description": 'Fallback VID and PID encoding example: invalid VID prefix',
"common_name": 'Matter Devel DAC Mpid:00B1 MVID:FFF1',
"test_folder": 'vidpid_fallback_encoding_13',
"is_success_case": 'false',
},
{
"description": 'Fallback VID and PID encoding example: invalid PID and VID prefixes',
"common_name": 'Matter Devel DAC Mpid_00B1 Mvid_FFF1',
"test_folder": 'vidpid_fallback_encoding_14',
"is_success_case": 'false',
},
# Examples with both fallback encoding in the common name and using Matter specific OIDs
{
"description": 'Mix of Fallback and Matter OID encoding for VID and PID: valid, Matter OIDs are used and wrong values in the common-name are ignored',
"common_name": 'ACME Matter Devel DAC 5CDA9899 Mvid:FFF2 Mpid:00B2',
"vid": 0xFFF1,
"pid": 0x00B1,
"test_folder": 'vidpid_fallback_encoding_15',
"is_success_case": 'true',
},
{
"description": 'Mix of Fallback and Matter OID encoding for VID and PID: wrong, Correct values encoded in the common-name are ignored',
"common_name": 'ACME Matter Devel DAC 5CDA9899 Mvid:FFF1 Mpid:00B1',
"vid": 0xFFF2,
"pid": 0x00B2,
"test_folder": 'vidpid_fallback_encoding_16',
"is_success_case": 'false',
},
{
"description": 'Mix of Fallback and Matter OID encoding for VID and PID: invalid, PID is using Matter OID then VID must also use Matter OID',
"common_name": 'Mvid:FFF1',
"pid": 0x00B1,
"test_folder": 'vidpid_fallback_encoding_17',
"is_success_case": 'false',
},
]
CD_STRUCT_TEST_CASES = [
{
"description": 'Valid format_version field set to 1.',
"test_folder": 'format_version_1',
"error_flag": 'no-error',
"is_success_case": 'true',
},
{
"description": 'The format_version field is missing.',
"test_folder": 'format_version_missing',
"error_flag": 'format-version-missing',
"is_success_case": 'false',
},
{
"description": 'Invalid format_version field set to 2.',
"test_folder": 'format_version_2',
"error_flag": 'format-version-wrong',
"is_success_case": 'false',
},
{
"description": 'The vendor_id field is missing.',
"test_folder": 'vid_missing',
"error_flag": 'vid-missing',
"is_success_case": 'false',
},
{
"description": "The vendor_id field doesn't match the VID in DAC.",
"test_folder": 'vid_mismatch',
"error_flag": 'vid-mismatch',
"is_success_case": 'false',
},
{
"description": 'The product_id_array field is missing.',
"test_folder": 'pid_array_missing',
"error_flag": 'pid-array-missing',
"is_success_case": 'false',
},
{
"description": "The product_id_array field is empty TLV array.",
"test_folder": 'pid_array_count0',
"error_flag": 'pid-array-count0',
"is_success_case": 'false',
},
{
"description": "The product_id_array field has one PID value which matches the PID value in DAC.",
"test_folder": 'pid_array_count01_valid',
"error_flag": 'pid-array-count01-valid',
"is_success_case": 'true',
},
{
"description": "The product_id_array field has one PID value that doesn't match the PID value in DAC.",
"test_folder": 'pid_array_count01_mismatch',
"error_flag": 'pid-array-count01-mismatch',
"is_success_case": 'false',
},
{
"description": "The product_id_array field has 10 PID values one of which matches the PID value in DAC.",
"test_folder": 'pid_array_count10_valid',
"error_flag": 'pid-array-count10-valid',
"is_success_case": 'true',
},
{
"description": "The product_id_array field has 10 PID values none of which matches the PID value in DAC.",
"test_folder": 'pid_array_count10_mismatch',
"error_flag": 'pid-array-count10-mismatch',
"is_success_case": 'false',
},
{
"description": "The product_id_array field has 100 PID values one of which matches the PID value in DAC.",
"test_folder": 'pid_array_count100_valid',
"error_flag": 'pid-array-count100-valid',
"is_success_case": 'true',
},
{
"description": "The product_id_array field has 100 PID values none of which matches the PID value in DAC.",
"test_folder": 'pid_array_count100_mismatch',
"error_flag": 'pid-array-count100-mismatch',
"is_success_case": 'false',
},
{
"description": "The device_type_id field is missing.",
"test_folder": 'device_type_id_missing',
"error_flag": 'device-type-id-missing',
"is_success_case": 'false',
},
{
"description": "The device_type_id field doesn't match the device_type_id value in the DCL entries associated with the VID and PID.",
"test_folder": 'device_type_id_mismatch',
"error_flag": 'device-type-id-mismatch',
"is_success_case": 'false',
},
{
"description": "The certificate_id field is missing.",
"test_folder": 'cert_id_missing',
"error_flag": 'cert-id-missing',
"is_success_case": 'false',
},
{
"description": "The certificate_id field doesn't contain a globally unique serial number allocated by the CSA for this CD.",
"test_folder": 'cert_id_mismatch',
"error_flag": 'cert-id-mismatch',
"is_success_case": 'false',
},
{
"description": 'The certificate_id field has wrong length.',
"test_folder": 'cert_id_len_wrong',
"error_flag": 'cert-id-len-wrong',
"is_success_case": 'false',
},
{
"description": 'The security_level field is missing.',
"test_folder": 'security_level_missing',
"error_flag": 'security-level-missing',
"is_success_case": 'false',
},
{
"description": 'The security_level field is set to invalid value (different from 0).',
"test_folder": 'security_level_wrong',
"error_flag": 'security-level-wrong',
"is_success_case": 'false',
},
{
"description": 'The security_information field is missing.',
"test_folder": 'security_info_missing',
"error_flag": 'security-info-missing',
"is_success_case": 'false',
},
{
"description": 'The security_information field is set to invalid value (different from 0).',
"test_folder": 'security_info_wrong',
"error_flag": 'security-info-wrong',
"is_success_case": 'false',
},
{
"description": 'The version_number field is missing.',
"test_folder": 'version_number_missing',
"error_flag": 'version-number-missing',
"is_success_case": 'false',
},
{
"description": 'The version_number field matches the VID and PID used in a DeviceSoftwareVersionModel entry in the DCL matching the certification record associated with the product presenting this CD.',
"test_folder": 'version_number_match',
"error_flag": 'no-error',
"is_success_case": 'true',
},
{
"description": "The version_number field doesn't match the VID and PID used in a DeviceSoftwareVersionModel entry in the DCL matching the certification record associated with the product presenting this CD.",
"test_folder": 'version_number_wrong',
"error_flag": 'version-number-wrong',
"is_success_case": 'false',
},
{
"description": 'The certification_type field is missing.',
"test_folder": 'cert_type_missing',
"error_flag": 'cert-type-missing',
"is_success_case": 'false',
},
{
"description": 'The certification_type field is set to invalid value.',
"test_folder": 'cert_type_wrong',
"error_flag": 'cert-type-wrong',
"is_success_case": 'false',
},
{
"description": 'The dac_origin_vendor_id and dac_origin_product_id fields are not present.',
"test_folder": 'dac_origin_vid_pid_missing',
"error_flag": 'no-error',
"is_success_case": 'true',
},
{
"description": 'The dac_origin_vendor_id field is present and dac_origin_product_id fields is not present.',
"test_folder": 'dac_origin_vid_present_pid_missing',
"error_flag": 'dac-origin-vid-present',
"is_success_case": 'false',
},
{
"description": 'The dac_origin_vendor_id field is not present and dac_origin_product_id is present.',
"test_folder": 'dac_origin_vid_missing_pid_present',
"error_flag": 'dac-origin-pid-present',
"is_success_case": 'false',
},
{
"description": 'The dac_origin_vendor_id and dac_origin_product_id fields present and contain the VID and PID values that match the VID and PID found in the DAC Subject DN.',
"test_folder": 'dac_origin_vid_pid_present_match',
"error_flag": 'dac-origin-vid-pid-present',
"is_success_case": 'true',
},
{
"description": "The dac_origin_vendor_id and dac_origin_product_id fields present and the VID value doesn't match the VID found in the DAC Subject DN.",
"test_folder": 'dac_origin_vid_pid_present_vid_mismatch',
"error_flag": 'dac-origin-vid-mismatch',
"is_success_case": 'false',
},
{
"description": "The dac_origin_vendor_id and dac_origin_product_id fields present and the PID value doesn't match the PID found in the DAC Subject DN.",
"test_folder": 'dac_origin_vid_pid_present_pid_mismatch',
"error_flag": 'dac-origin-pid-mismatch',
"is_success_case": 'false',
},
{
"description": 'The optional authorized_paa_list field is not present.',
"test_folder": 'authorized_paa_list_missing',
"error_flag": 'no-error',
"is_success_case": 'true',
},
{
"description": 'The authorized_paa_list contains one valid PAA which is authorized to sign the PAI.',
"test_folder": 'authorized_paa_list_count0',
"error_flag": 'authorized-paa-list-count0',
"is_success_case": 'false',
},
{
"description": 'The authorized_paa_list contains one valid PAA which is authorized to sign the PAI.',
"test_folder": 'authorized_paa_list_count1_valid',
"error_flag": 'authorized-paa-list-count1-valid',
"is_success_case": 'true',
},
{
"description": 'The authorized_paa_list contains two PAAs one of which is valid PAA authorized to sign the PAI.',
"test_folder": 'authorized_paa_list_count2_valid',
"error_flag": 'authorized-paa-list-count2-valid',
"is_success_case": 'true',
},
{
"description": 'The authorized_paa_list contains three PAAs none of which is a valid PAA authorized to sign the PAI.',
"test_folder": 'authorized_paa_list_count3_invalid',
"error_flag": 'authorized-paa-list-count3-invalid',
"is_success_case": 'false',
},
{
"description": 'The authorized_paa_list contains ten PAAs one of which is valid PAA authorized to sign the PAI.',
"test_folder": 'authorized_paa_list_count10_valid',
"error_flag": 'authorized-paa-list-count10-valid',
"is_success_case": 'true',
},
{
"description": 'The authorized_paa_list contains ten PAAs none of which is a valid PAA authorized to sign the PAI.',
"test_folder": 'authorized_paa_list_count10_invalid',
"error_flag": 'authorized-paa-list-count10-invalid',
"is_success_case": 'false',
},
{
"description": 'Invalid Signer Info version set to v2.',
"test_folder": 'signer_info_v2',
"error_flag": 'signer-info-v2',
"is_success_case": 'false',
},
{
"description": 'Invalid Signer Info digest algorithm SHA1.',
"test_folder": 'signer_info_digest_algo_sha1',
"error_flag": 'signer-info-digest-algo',
"is_success_case": 'false',
},
{
"description": 'The subjectKeyIdentifier contains SKID of a well-known Zigbee Alliance certificate.',
"test_folder": 'signer_info_skid_valid',
"error_flag": 'no-error',
"is_success_case": 'true',
},
{
"description": 'The subjectKeyIdentifier contains invalid SKID of a certificate unknown by Zigbee Alliance.',
"test_folder": 'signer_info_skid_invalid',
"error_flag": 'signer-info-skid',
"is_success_case": 'false',
},
{
"description": 'Valid CMS version set to v3.',
"test_folder": 'cms_v3',
"error_flag": 'no-error',
"is_success_case": 'true',
},
{
"description": 'Invalid CMS version set to v2.',
"test_folder": 'cms_v2',
"error_flag": 'cms-v2',
"is_success_case": 'false',
},
{
"description": 'Valid CMS digest algorithm SHA256.',
"test_folder": 'cms_digest_algo_sha256',
"error_flag": 'no-error',
"is_success_case": 'true',
},
{
"description": 'Invalid CMS digest algorithm SHA1.',
"test_folder": 'cms_digest_algo_sha1',
"error_flag": 'cms-digest-algo',
"is_success_case": 'false',
},
{
"description": 'Valid CMS signature algorithm ECDSA_WITH_SHA256.',
"test_folder": 'cms_sig_algo_ecdsa_with_sha256',
"error_flag": 'no-error',
"is_success_case": 'true',
},
{
"description": 'Invalid CMS signature algorithm ECDSA_WITH_SHA1.',
"test_folder": 'cms_sig_algo_ecdsa_with_sha1',
"error_flag": 'cms-sig-algo',
"is_success_case": 'false',
},
{
"description": 'Valid CMS eContentType pkcs7-data.',
"test_folder": 'cms_econtent_type_pkcs7_data',
"error_flag": 'no-error',
"is_success_case": 'true',
},
{
"description": 'Invalid CMS eContentType is set to Microsoft Authenticode [MSAC] OID = { 1.3.6.1.4.1.311.2.1.4 }.',
"test_folder": 'cms_econtent_type_msac',
"error_flag": 'cms-econtent-type',
"is_success_case": 'false',
},
{
"description": 'Invalid CMS Signature.',
"test_folder": 'cms_signature',
"error_flag": 'cms-sig',
"is_success_case": 'false',
},
]
class Names:
def __init__(self, cert_type: CertType, paa_path, test_case_out_dir):
prefixes = {CertType.PAA: paa_path,
CertType.PAI: test_case_out_dir + '/pai-',
CertType.DAC: test_case_out_dir + '/dac-'}
prefix = prefixes[cert_type]
self.cert_pem = prefix + 'Cert.pem'
self.cert_der = prefix + 'Cert.der'
self.key_pem = prefix + 'Key.pem'
self.key_der = prefix + 'Key.der'
class DevCertBuilder:
def __init__(self, cert_type: CertType, error_type: str, paa_path: str, test_case_out_dir: str, chip_cert: str, vid: int, pid: int, custom_cn_attribute: str, valid_from: str):
self.vid = vid
self.pid = pid
self.cert_type = cert_type
self.error_type = error_type
self.chipcert = chip_cert
self.custom_cn_attribute = custom_cn_attribute
self.valid_from = valid_from
if not os.path.exists(self.chipcert):
raise Exception('Path not found: %s' % self.chipcert)
if not os.path.exists(test_case_out_dir):
os.mkdir(test_case_out_dir)
paa = Names(CertType.PAA, paa_path, test_case_out_dir)
pai = Names(CertType.PAI, paa_path, test_case_out_dir)
dac = Names(CertType.DAC, paa_path, test_case_out_dir)
if cert_type == CertType.PAI:
self.signer = paa
self.own = pai
if cert_type == CertType.DAC:
self.signer = pai
self.own = dac
def make_certs_and_keys(self) -> None:
"""Creates the PEM and DER certs and keyfiles"""
error_type_flag = ' -I -E' + self.error_type
subject_name = self.custom_cn_attribute
vid_flag = ' -V 0x{:X}'.format(self.vid)
pid_flag = ' -P 0x{:X}'.format(self.pid)
if (len(self.valid_from) == 0):
validity_flags = ' -l 4294967295 '
else:
validity_flags = ' -f "' + self.valid_from + '" -l 730 '
if self.cert_type == CertType.PAI:
if (len(subject_name) == 0):
subject_name = 'Matter Test PAI'
type_flag = '-t i'
elif self.cert_type == CertType.DAC:
if (len(subject_name) == 0):
subject_name = 'Matter Test DAC'
type_flag = '-t d'
else:
return
cmd = self.chipcert + ' gen-att-cert ' + type_flag + error_type_flag + ' -c "' + subject_name + '" -C ' + self.signer.cert_pem + ' -K ' + \
self.signer.key_pem + vid_flag + pid_flag + validity_flags + ' -o ' + self.own.cert_pem + ' -O ' + self.own.key_pem
subprocess.run(cmd, shell=True)
cmd = 'openssl x509 -inform pem -in ' + self.own.cert_pem + \
' -out ' + self.own.cert_der + ' -outform DER'
subprocess.run(cmd, shell=True)
cmd = 'openssl ec -inform pem -in ' + self.own.key_pem + \
' -out ' + self.own.key_der + ' -outform DER'
subprocess.run(cmd, shell=True)
def add_raw_ec_keypair_to_dict_from_der(der_key_filename: str, json_dict: dict):
with open(der_key_filename, 'rb') as infile:
key_data_der = infile.read()
key_der = cryptography.hazmat.primitives.serialization.load_der_private_key(key_data_der, None)
json_dict["dac_private_key"] = hexlify(key_der.private_numbers().private_value.to_bytes(32, byteorder='big')).decode('utf-8')
pk_x = key_der.public_key().public_numbers().x
pk_y = key_der.public_key().public_numbers().y
public_key_raw_bytes = bytearray([0x04])
public_key_raw_bytes.extend(bytearray(pk_x.to_bytes(32, byteorder='big')))
public_key_raw_bytes.extend(bytearray(pk_y.to_bytes(32, byteorder='big')))
json_dict["dac_public_key"] = hexlify(bytes(public_key_raw_bytes)).decode('utf-8')
def add_files_to_json_config(files_mapping: dict, json_dict: dict):
for output_key_name, filename in files_mapping.items():
with open(filename, "rb") as infile:
file_bytes = infile.read()
json_dict[output_key_name] = hexlify(file_bytes).decode('utf-8')
def generate_test_case_vector_json(test_case_out_dir: str, test_cert: str, test_case):
json_dict = {}
files_in_path = glob.glob(os.path.join(test_case_out_dir, "*"))
output_json_filename = test_case_out_dir + "/test_case_vector.json"
files_to_add = {
"dac_cert": "dac-Cert.der",
"pai_cert": "pai-Cert.der",
"firmware_information": "firmware-info.bin",
"certification_declaration": "cd.der",
}
# Add description fields to JSON Config
if "description" in test_case:
json_dict["description"] = test_cert.upper() + " Test Vector: " + test_case["description"]
if "is_success_case" in test_case:
# These test cases are expected to fail when error injected in DAC but expected to pass when error injected in PAI
if (test_cert == 'pai') and (test_case["test_folder"] in ['ext_basic_pathlen0', 'vidpid_fallback_encoding_08', 'vidpid_fallback_encoding_09', 'ext_key_usage_dig_sig_wrong']):
json_dict["is_success_case"] = "true"
else:
json_dict["is_success_case"] = test_case["is_success_case"]
# Out of all files we could add, find the ones that were present in test case, and embed them in hex
files_available = {os.path.basename(path) for path in files_in_path}
files_to_add = {key: os.path.join(test_case_out_dir, filename)
for key, filename in files_to_add.items() if filename in files_available}
add_files_to_json_config(files_to_add, json_dict)
# Embed the DAC key if present
if "dac-Key.der" in files_available:
der_key_filename = os.path.join(test_case_out_dir, "dac-Key.der")
add_raw_ec_keypair_to_dict_from_der(der_key_filename, json_dict)
with open(output_json_filename, "wt+") as outfile:
json.dump(json_dict, outfile, indent=2)
def main():
argparser = argparse.ArgumentParser()
argparser.add_argument('-o', '--out_dir', dest='outdir',
default='credentials/development/commissioner_dut',
help='output directory for all generated test vectors')
argparser.add_argument('-p', '--paa', dest='paapath',
default='credentials/test/attestation/Chip-Test-PAA-FFF1-', help='PAA to use')
argparser.add_argument('-d', '--cd', dest='cdpath',
default='credentials/test/certification-declaration/Chip-Test-CD-Signing-',
help='CD Signing Key/Cert to use')
argparser.add_argument('-c', '--chip-cert_dir', dest='chipcertdir',
default='out/debug/linux_x64_clang/', help='Directory where chip-cert tool is located')
args = argparser.parse_args()
if not os.path.exists(args.outdir):
os.mkdir(args.outdir)
chipcert = args.chipcertdir + 'chip-cert'
if not os.path.exists(chipcert):
raise Exception('Path not found: %s' % chipcert)
cd_cert = args.cdpath + 'Cert.pem'
cd_key = args.cdpath + 'Key.pem'
for test_cert in ['dac', 'pai']:
for test_case in CERT_STRUCT_TEST_CASES:
test_case_out_dir = args.outdir + '/struct_' + test_cert + '_' + test_case["test_folder"]
if test_case["test_folder"] == 'valid_in_past':
if test_cert == 'dac':
dac_valid_from = VALID_IN_PAST
pai_valid_from = VALID_NOW
else:
dac_valid_from = VALID_NOW
pai_valid_from = VALID_IN_PAST
elif test_case["test_folder"] == 'valid_in_future':
if test_cert == 'dac':
dac_valid_from = VALID_IN_FUTURE
pai_valid_from = VALID_NOW
else:
dac_valid_from = VALID_NOW
pai_valid_from = VALID_IN_FUTURE
else:
dac_valid_from = ''
pai_valid_from = ''
if test_cert == 'dac':
error_type_dac = test_case["error_flag"]
error_type_pai = 'no-error'
else:
if test_case["error_flag"] == 'ext-skid-missing':
error_type_dac = 'ext-akid-missing'
else:
error_type_dac = 'no-error'
error_type_pai = test_case["error_flag"]
vid = 0xFFF1
pid = 0x8000
# Generate PAI Cert/Key
builder = DevCertBuilder(CertType.PAI, error_type_pai, args.paapath, test_case_out_dir,
chipcert, vid, PID_NOT_PRESENT, '', pai_valid_from)
builder.make_certs_and_keys()
if test_cert == 'pai':
if test_case["error_flag"] == 'subject-vid-mismatch':
vid += 1
if test_case["error_flag"] == 'subject-pid-mismatch':
pid += 1
# Generate DAC Cert/Key
builder = DevCertBuilder(CertType.DAC, error_type_dac, args.paapath, test_case_out_dir,
chipcert, vid, pid, '', dac_valid_from)
builder.make_certs_and_keys()
# Generate Certification Declaration (CD)
vid_flag = ' -V 0x{:X}'.format(vid)
pid_flag = ' -p 0x{:X}'.format(pid)
cmd = chipcert + ' gen-cd -K ' + cd_key + ' -C ' + cd_cert + ' -O ' + test_case_out_dir + '/cd.der' + \
' -f 1 ' + vid_flag + pid_flag + ' -d 0x1234 -c "ZIG20141ZB330001-24" -l 0 -i 0 -n 9876 -t 0'
subprocess.run(cmd, shell=True)
# Generate Test Case Data Container in JSON Format
generate_test_case_vector_json(test_case_out_dir, test_cert, test_case)
for test_cert in ['dac', 'pai']:
for test_case in VIDPID_FALLBACK_ENCODING_TEST_CASES:
test_case_out_dir = args.outdir + '/struct_' + test_cert + '_' + test_case["test_folder"]
if test_cert == 'dac':
common_name_dac = test_case["common_name"]
common_name_pai = ''
if "vid" in test_case:
vid_dac = test_case["vid"]
else:
vid_dac = VID_NOT_PRESENT
if "pid" in test_case:
pid_dac = test_case["pid"]
else:
pid_dac = PID_NOT_PRESENT
vid_pai = 0xFFF1
pid_pai = 0x00B1
else:
common_name_dac = ''
common_name_pai = test_case["common_name"]
common_name_pai = common_name_pai.replace('DAC', 'PAI')
vid_dac = 0xFFF1
pid_dac = 0x00B1
if "vid" in test_case:
vid_pai = test_case["vid"]
else:
vid_pai = VID_NOT_PRESENT
if "pid" in test_case:
pid_pai = test_case["pid"]
else:
pid_pai = PID_NOT_PRESENT
# Generate PAI Cert/Key
builder = DevCertBuilder(CertType.PAI, 'no-error', args.paapath, test_case_out_dir,
chipcert, vid_pai, pid_pai, common_name_pai, '')
builder.make_certs_and_keys()
# Generate DAC Cert/Key
builder = DevCertBuilder(CertType.DAC, 'no-error', args.paapath, test_case_out_dir,
chipcert, vid_dac, pid_dac, common_name_dac, '')
builder.make_certs_and_keys()
# Generate Certification Declaration (CD)
cmd = chipcert + ' gen-cd -K ' + cd_key + ' -C ' + cd_cert + ' -O ' + test_case_out_dir + '/cd.der' + \
' -f 1 -V 0xFFF1 -p 0x00B1 -d 0x1234 -c "ZIG20141ZB330001-24" -l 0 -i 0 -n 9876 -t 0'
subprocess.run(cmd, shell=True)
# Generate Test Case Data Container in JSON Format
generate_test_case_vector_json(test_case_out_dir, test_cert, test_case)
for test_case in CD_STRUCT_TEST_CASES:
test_case_out_dir = args.outdir + '/struct_cd_' + test_case["test_folder"]
vid = 0xFFF1
pid = 0x8000
# Generate PAI Cert/Key
builder = DevCertBuilder(CertType.PAI, 'no-error', args.paapath, test_case_out_dir,
chipcert, vid, pid, '', '')
builder.make_certs_and_keys()
# Generate DAC Cert/Key
builder = DevCertBuilder(CertType.DAC, 'no-error', args.paapath, test_case_out_dir,
chipcert, vid, pid, '', '')
builder.make_certs_and_keys()
# Generate Certification Declaration (CD)
vid_flag = ' -V 0x{:X}'.format(vid)
pid_flag = ' -p 0x{:X}'.format(pid)
dac_origin_flag = ' '
if test_case["error_flag"] == 'dac-origin-vid-present' or test_case["error_flag"] == 'dac-origin-vid-pid-present':
dac_origin_flag += ' -o 0x{:X}'.format(vid)
if test_case["error_flag"] == 'dac-origin-pid-present' or test_case["error_flag"] == 'dac-origin-vid-pid-present':
dac_origin_flag += ' -r 0x{:X}'.format(pid)
if test_case["error_flag"] == 'authorized-paa-list-count0' or test_case["error_flag"] == 'authorized-paa-list-count1-valid' or test_case["error_flag"] == 'authorized-paa-list-count2-valid' or test_case["error_flag"] == 'authorized-paa-list-count3-invalid' or test_case["error_flag"] == 'authorized-paa-list-count10-valid' or test_case["error_flag"] == 'authorized-paa-list-count10-invalid':
authorized_paa_flag = ' -a ' + args.paapath + 'Cert.pem'
else:
authorized_paa_flag = ''
cmd = chipcert + ' gen-cd -I -E ' + test_case["error_flag"] + ' -K ' + cd_key + ' -C ' + cd_cert + ' -O ' + test_case_out_dir + '/cd.der' + \
' -f 1 ' + vid_flag + pid_flag + dac_origin_flag + authorized_paa_flag + ' -d 0x1234 -c "ZIG20141ZB330001-24" -l 0 -i 0 -n 9876 -t 0'
subprocess.run(cmd, shell=True)
# Generate Test Case Data Container in JSON Format
generate_test_case_vector_json(test_case_out_dir, 'cd', test_case)
# Test case: Generate {DAC, PAI, PAA} chain with random (invalid) PAA
test_case = {
"description": 'Use Invalid PAA (Not Registered in the DCL).',
"test_folder": 'invalid_paa',
"error_flag": 'no-error',
"is_success_case": 'false',
}
test_case_out_dir = args.outdir + '/' + test_case["test_folder"]
paapath = test_case_out_dir + '/paa-'
if not os.path.exists(test_case_out_dir):
os.mkdir(test_case_out_dir)
# Generate PAA Cert/Key
cmd = chipcert + ' gen-att-cert -t a -c "Invalid (Not Registered in the DCL) Matter PAA" -f "' + VALID_IN_PAST + \
'" -l 4294967295 -o ' + paapath + 'Cert.pem -O ' + paapath + 'Key.pem'
subprocess.run(cmd, shell=True)
vid = 0xFFF1
pid = 0x8000
# Generate PAI Cert/Key
builder = DevCertBuilder(CertType.PAI, test_case["error_flag"], paapath, test_case_out_dir,
chipcert, vid, PID_NOT_PRESENT, '', VALID_IN_PAST)
builder.make_certs_and_keys()
# Generate DAC Cert/Key
builder = DevCertBuilder(CertType.DAC, test_case["error_flag"], paapath, test_case_out_dir,
chipcert, vid, pid, '', VALID_IN_PAST)
builder.make_certs_and_keys()
# Generate Certification Declaration (CD)
vid_flag = ' -V 0x{:X}'.format(vid)
pid_flag = ' -p 0x{:X}'.format(pid)
cmd = chipcert + ' gen-cd -K ' + cd_key + ' -C ' + cd_cert + ' -O ' + test_case_out_dir + '/cd.der' + \
' -f 1 ' + vid_flag + pid_flag + ' -d 0x1234 -c "ZIG20141ZB330001-24" -l 0 -i 0 -n 9876 -t 0'
subprocess.run(cmd, shell=True)
# Generate Test Case Data Container in JSON Format
generate_test_case_vector_json(test_case_out_dir, 'paa', test_case)
if __name__ == '__main__':
sys.exit(main())
|
{
"content_hash": "2c87d65466bc1ba9bc95aa1c974bbe27",
"timestamp": "",
"source": "github",
"line_count": 1000,
"max_line_length": 398,
"avg_line_length": 41.915,
"alnum_prop": 0.5848502922581414,
"repo_name": "project-chip/connectedhomeip",
"id": "3fb099d81c0052f93e04c8e6932a2aa868fe6fa4",
"size": "41938",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/tools/chip-cert/gen_com_dut_test_vectors.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "1759301"
},
{
"name": "C++",
"bytes": "19104548"
},
{
"name": "CMake",
"bytes": "140510"
},
{
"name": "Dockerfile",
"bytes": "50353"
},
{
"name": "Emacs Lisp",
"bytes": "1042"
},
{
"name": "Java",
"bytes": "167719"
},
{
"name": "JavaScript",
"bytes": "2106"
},
{
"name": "Jinja",
"bytes": "22322"
},
{
"name": "Objective-C",
"bytes": "930838"
},
{
"name": "Objective-C++",
"bytes": "435348"
},
{
"name": "Python",
"bytes": "1931007"
},
{
"name": "Shell",
"bytes": "195843"
},
{
"name": "Tcl",
"bytes": "311"
},
{
"name": "ZAP",
"bytes": "584219"
}
],
"symlink_target": ""
}
|
from SearchBehavior import SearchBehavior
from CompositionSearchBehavior import CompositionSearchBehavior
import sys
class NodeBehaviorGenerator:
def generateNodeBehavior(self, entries, nodePopulator):
self.__nodeBehavior = ''
for entry in entries:
value = entry.firstChild.data
key = entry.getAttribute('key')
if key == 'nodeBehavior':
self.__nodeBehavior = value
if self.__nodeBehavior == 'SearchBehavior':
return SearchBehavior(entries, nodePopulator)
if self.__nodeBehavior == 'CompositionSearchBehavior':
return CompositionSearchBehavior(entries, nodePopulator)
if self.__nodeBehavior == 'None':
return None
print 'Invalid node behavior %s' % self.__nodeBehavior
sys.exit()
|
{
"content_hash": "95ecf5a0627ceecc6a836fff4776a92a",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 68,
"avg_line_length": 32.17857142857143,
"alnum_prop": 0.6037735849056604,
"repo_name": "unaguil/hyperion-ns2",
"id": "ceb2b913f6a17cd2503f9c62a323c2db5bb4c316",
"size": "901",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "experiments/nodeBehavior/NodeBehaviorGenerator.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
}
|
from shellac.server import StreamBuf
def test():
print 'Testing StreamBuf...'
s = StreamBuf()
assert s.ready() == False
assert s.closed() == False
s.write('Hello')
assert s.ready() == True
assert s.read() == 'Hello'
assert s.read() == 'Hello'
s.ack(2)
assert s.read() == 'llo'
s.ack(3)
assert s.read() == ''
s.close()
assert s.closed() == True
assert s.buffer() == 'Hello'
s.clear()
assert s.buffer() == ''
assert s.ready() == False
assert s.closed() == False
s.write('Romeo, oh Romeo.')
s.close()
s.ack(16)
assert s.complete() == True
print
print 'Done.'
print
if __name__ == '__main__':
test()
|
{
"content_hash": "2375af8f13522cc4fd1dba76f93aab8a",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 36,
"avg_line_length": 14.790697674418604,
"alnum_prop": 0.5911949685534591,
"repo_name": "kmacrow/Shellac",
"id": "ebbd5696a826d18d21a92b67b671b249c64dd6da",
"size": "637",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/StreamBufTests.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "OpenEdge ABL",
"bytes": "1545"
},
{
"name": "Python",
"bytes": "46738"
},
{
"name": "Shell",
"bytes": "761"
}
],
"symlink_target": ""
}
|
__author__ = 'wilmer'
# This one corresponds to the AverageOpeningTime.pdf document (first model)
try:
import mkl
have_mkl = True
print("Running with MKL Acceleration")
except ImportError:
have_mkl = False
print("Running with normal backends")
import pickle
import time
import socket
import numpy as np
import matplotlib.pyplot as plt
from pylab import Line2D, gca
from scipy.stats import describe
from gurobipy import *
import math
from itertools import product
import pylab as pl
from matplotlib import collections as mc
import os
# User input goes here and only here
tumorsite = "HelycalGyn"
tumorsite = "Prostate"
numcores = 12
initialProjections = 51
initialProjections = 153
numberOfLeaves = 64
timeA = 0.077 #secs
timeM = 0.02 #Minimum LOT
time10 = 10
speed = 24 #degrees per second
if "Lung" == tumorsite:
initialProjections = 153
do_subsample = False
maxvoxels = 2000 # Never run less than 300. Only useful if do_subsample is True
imrt = True
imrtwith20msecondsconstraint = False #Only active if imrt activated
loadWarmStart = False
relaxedProblem = False
pairSolution = False
# If called externally
executor = ''
if len(sys.argv) > 1:
executor = sys.argv[0]
timeM = float(sys.argv[1])
timeA = float(sys.argv[2])
# IMRT
imrt = False
if int(sys.argv[3]):
imrt = True
# IMRT WITH CONSTRAINT
imrtwith20msecondsconstraint = False
if int(sys.argv[4]):
imrtwith20msecondsconstraint = True
# pair solutions
pairSolution = False
if int(sys.argv[5]):
pairSolution = True
# relaxed Problem
relaxedProblem = False
if int(sys.argv[6]):
relaxedProblem = True
initialProjections = int(sys.argv[7])
loadWarmStart = False
if int(sys.argv[8]):
loadWarmStart = True
if len(sys.argv) > 9:
maxvoxels = int(sys.argv[9])
do_subsample = True
# Logical fixes:
if imrtwith20msecondsconstraint and not imrt:
imrtwith20msecondsconstraint = False
if imrt:
loadWarmStart = False
relaxedProblem = False
pairSolution = False
print('Arguments are: timeM', timeM, 'timeA', timeA, 'maxvoxels', maxvoxels, 'effective?', str(do_subsample), 'imrt', str(imrt),
'imrtwithConstraint', str(imrtwith20msecondsconstraint), 'relaxedProblem',
str(relaxedProblem), 'pairSolution', str(pairSolution), 'warmStart', str(loadWarmStart))
# First calculations
t51 = (360/initialProjections) / speed
k10 = math.ceil(time10 / t51)
delta51 = speed * t51 #distance equals speed times time
initialTime = time.time()
if 'radiation-math' == socket.gethostname():
runner277 = "python tomoAverageOnlyTwoProjectionsNeighbors.py"
elif 'IOE-Starchief' == socket.gethostname(): # MY HOUSE
runner277 = "C:\Intel\python\intelpython3\python tomoAverageOnlyTwoProjectionsNeighbors.py"
elif 'DESKTOP-EA1PG8V' == socket.gethostname(): # MY HOUSE
runner277 = "C:\Intel\python\intelpython3\python tomoAverageOnlyTwoProjectionsNeighbors.py"
elif ('arc-ts.umich.edu' == socket.gethostname().split('.', 1)[-1]): # FLUX
runner277 = "C:\Intel\python\intelpython3\python tomoAverageOnlyTwoProjectionsNeighbors.py"
else:
runner277 = "C:\Intel\python\intelpython3\python tomoAverageOnlyTwoProjectionsNeighbors.py"
## Function that reads the files produced by Weiguo
def getvector(necfile,dtype):
with open(necfile, 'rb') as f:
try:
data = np.fromfile(f, dtype=dtype)
finally:
f.close()
return(data)
def get_subsampled_mask(struct_img_mask_full_res, subsampling_img):
sub_sampled_img_struct = np.zeros_like(struct_img_mask_full_res)
sub_sampled_img_struct[np.where(subsampling_img)] = struct_img_mask_full_res[np.where(subsampling_img)]
return np.copy(sub_sampled_img_struct)
def get_structure_mask(struct_id_list, struct_img_arr):
img_struct = np.zeros_like(struct_img_arr)
# Go structure by structure and identify the voxels that belong to it
for s in struct_id_list:
img_struct[np.where(struct_img_arr & 2 ** (s - 1))] = s
return np.copy(img_struct)
## Function that selects roughly the number numelems as a sample. (Usually you get substantially less)
## Say you input numelems=90. Then you get less than 90 voxels in your case.
def get_sub_sub_sample(subsampling_img, numelems):
sub_sub = np.zeros_like(subsampling_img)
locations = np.where(subsampling_img)[0]
#print(locations)
print('number of elements', len(locations))
a = np.arange(0,len(locations), int(len(locations)/numelems))
#print(a)
sublocations = locations[a]
sub_sub[sublocations] = 1
return(sub_sub)
class tomodata:
## Initialization of the data
def __init__(self):
print('hostname:', socket.gethostname())
self.base_dir = 'data/dij/HelicalGyn/'
#self.base_dir = 'data/dij153/HelicalGyn/'
if ('arc-ts.umich.edu' == socket.gethostname().split('.', 1)[-1]): # FLUX
self.base_dir = '/scratch/engin_flux/wilmer/dij/HelicalGyn/'
if tumorsite == "Prostate":
self.base_dir = 'data/dij/prostate/'
if 153 == initialProjections:
self.base_dir = 'data/dij153/prostate/'
if ('arc-ts.umich.edu' == socket.gethostname().split('.', 1)[-1]): # FLUX
self.base_dir = '/scratch/engin_flux/wilmer/dij/prostate/'
if tumorsite == "Lung":
self.base_dir = 'data/dij153/lung/' # 51
if ('arc-ts.umich.edu' == socket.gethostname().split('.', 1)[-1]): # FLUX
self.base_dir = '/scratch/engin_flux/wilmer/dij/lung/'
# The number of loops to be used in this case
self.bixelsintween = 1
self.yBar = 700
self.maxvoxels = maxvoxels
self.img_filename = 'samplemask.img'
self.header_filename = 'samplemask.header'
self.struct_img_filename = 'roimask.img'
self.struct_img_header = 'roimask.header'
self.outputDirectory = "outputMultiProj/"
self.roinames = {}
# N Value: Number of beamlets in the gantry (overriden in Wilmer's Case)
self.L = numberOfLeaves
self.get_dim(self.base_dir, 'samplemask.header')
self.get_totalbeamlets(self.base_dir, 'dij/Size_out.txt')
self.roimask_reader(self.base_dir, 'roimask.header')
self.timeA = timeA
self.timeM = timeM
#self.argumentVariables()
print('Read vectors...')
self.readWeiguosCase( )
self.maskNamesGetter(self.base_dir + self.struct_img_header)
print('done')
# Create a space in smallvoxel coordinates
self.smallvoxels = self.BigToSmallCreator()
#Now remove bixels carefully
#self.removebixels(self.bixelsintween)
#Do the smallvoxels again:
_, _, self.smallvoxels, _ = np.unique(self.smallvoxels, return_index=True, return_inverse=True, return_counts=True)
print('Build sparse matrix.')
self.totalsmallvoxels = max(self.smallvoxels) + 1 #12648448
print('totalsmallvoxels:', self.totalsmallvoxels)
print('a brief description of Dijs array', describe(self.Dijs))
#self.D = sps.csr_matrix((self.Dijs, (self.smallvoxels, self.bixels)), shape=(self.totalsmallvoxels, self.totalbeamlets))
self.quadHelperThresh = np.zeros(len(self.mask))
self.quadHelperUnder = np.zeros(len(self.mask))
self.quadHelperOver = np.zeros(len(self.mask))
self.numProjections = self.getNumProjections()
#######################################
projIni = 1 + np.floor(max(self.bixels / self.L)).astype(int)
self.numProjections = k10 + projIni
self.leafsD = (self.bixels % self.L).astype(int)
self.projectionsD = np.floor(self.bixels / self.L).astype(int)
self.bdata = np.zeros((self.numProjections, self.L))
self.bixelvoxel = np.zeros((max(self.bixels)+1, 1+max(self.smallvoxels)))
if tumorsite == "Prostate":
for i in range(len(self.mask)):
# Constraint on TARGETS
T = None
if self.mask[i] in self.TARGETList:
T = self.TARGETThresholds[np.where(self.mask[i] == self.TARGETList)[0][0]]
self.quadHelperOver[i] = 15.5 #PROSTATE! FOR GYN SEE BELOW!
self.quadHelperUnder[i] = 2.3 #PROSTATE! FOR GYN SEE BELOW!
# Constraint on OARs
elif self.mask[i] in self.OARList:
T = self.OARThresholds[np.where(self.mask[i] == self.OARList)[0][0]]
self.quadHelperOver[i] = 5E-3 #PROSTATE! FOR GYN SEE BELOW!
self.quadHelperUnder[i] = 0.0 #PROSTATE! FOR GYN SEE BELOW!
#if 7 == self.mask[i]: # RECTUM!!!
# self.quadHelperOver[i] = 50E-3 # PROSTATE! FOR GYN SEE BELOW!
# self.quadHelperUnder[i] = 0.0 # PROSTATE! FOR GYN SEE BELOW!
elif 0 == self.mask[i]:
print('there is an element in the voxels that is also mask 0')
self.quadHelperThresh[i] = T
elif "Lung" == tumorsite:
print('Gyn Case parameters')
for i in range(len(self.mask)):
# Constraint on TARGETS
T = None
if self.mask[i] in self.TARGETList:
T = self.TARGETThresholds[np.where(self.mask[i] == self.TARGETList)[0][0]]
self.quadHelperOver[i] = 0.0001 #GYN! FOR PROSTATE SEE BELOW!
self.quadHelperUnder[i] = 1E1 #GYN! FOR PROSTATE SEE BELOW!
# Constraint on OARs
elif self.mask[i] in self.OARList:
T = self.OARThresholds[np.where(self.mask[i] == self.OARList)[0][0]]
self.quadHelperOver[i] = 0.003 #GYN! FOR PROSTATE SEE BELOW!
self.quadHelperUnder[i] = 0.0 #GYN! FOR PROSTATE SEE BELOW!
elif 0 == self.mask[i]:
print('there is an element in the voxels that is also mask 0')
self.quadHelperThresh[i] = T
########################
else:
print('Gyn Case parameters')
for i in range(len(self.mask)):
# Constraint on TARGETS
T = None
if self.mask[i] in self.TARGETList:
T = self.TARGETThresholds[np.where(self.mask[i] == self.TARGETList)[0][0]]
self.quadHelperOver[i] = 0.0001 #GYN! FOR PROSTATE SEE BELOW!
self.quadHelperUnder[i] = 9E11 #GYN! FOR PROSTATE SEE BELOW!
# Constraint on OARs
elif self.mask[i] in self.OARList:
T = self.OARThresholds[np.where(self.mask[i] == self.OARList)[0][0]]
self.quadHelperOver[i] = 0.003 #GYN! FOR PROSTATE SEE BELOW!
self.quadHelperUnder[i] = 0.0 #GYN! FOR PROSTATE SEE BELOW!
elif 0 == self.mask[i]:
print('there is an element in the voxels that is also mask 0')
self.quadHelperThresh[i] = T
########################
for i in range(len(self.bixels)):
beamP = self.projectionsD[i] + k10
beamL = self.leafsD[i]
self.bixelvoxel[self.bixels[i], self.smallvoxels[i]] += 1
#if self.smallvoxels[i] == 0:
if self.smallvoxels[i] == 0:
print('Choose one point and corresponding dose (p, l, v, d):', beamP, beamL, self.smallvoxels[i], self.Dijs[i])
if self.bdata[beamP, beamL] < self.Dijs[i]:
self.bdata[beamP, beamL] = self.Dijs[i]
print(self.bixelvoxel.max())
# Logging
self.treatmentName = 'IMRT'
if imrtwith20msecondsconstraint:
self.treatmentName += '20msec'
if not imrt:
if pairSolution:
self.treatmentName = 'pairModel'
else:
self.treatmentName = 'fullModel'
if relaxedProblem:
self.treatmentName += 'relaxedVersion'
self.chunkName = tumorsite + '-' + str(initialProjections) + '-' + self.treatmentName + '-MinLOT-' + str(timeM) + '-minAvgLot-' + str(timeA) + '-vxls-' + str(self.totalsmallvoxels) + '-ntnsty-'+str(self.yBar)
self.feasibleName = tumorsite + '-' + str(initialProjections) + self.treatmentName + '-MinLOT-' + str(timeM) + '-minAvgLot-' + str(timeA) + '-vxls-' + str(maxvoxels) + '-ntnsty-' + str(self.yBar)
self.logfile = ''
if imrt:
self.logFile = self.outputDirectory + 'logFile' + self.chunkName + 'IMRT.log'
else:
if relaxedProblem:
self.logFile = self.outputDirectory + 'logFile' + self.chunkName + 'relaxed.log'
elif pairSolution:
self.logFile = self.outputDirectory + 'logFile' + self.chunkName + 'pairSolution.log'
else:
self.logFile = self.outputDirectory + 'logFile' + self.chunkName + 'completeSolution.log'
## Keep the ROI's in a dictionary
def maskNamesGetter(self, maskfile):
lines = tuple(open(maskfile, 'r'))
for line in lines:
if 'ROIIndex =' == line[:10]:
roinumber = line.split(' = ')[1].rstrip()
elif 'ROIName =' == line[:9]:
roiname = line.split(' = ')[1].rstrip()
elif '}' == line[0]:
self.roinames[roinumber] = roiname
else:
pass
def roimask_reader(self, base, fname):
self.OARDict = {}
self.TARGETDict = {}
self.SUPPORTDict = {}
with open(base + fname, 'r') as rmrd:
for line in rmrd:
#print(line)
if 'ROIIndex =' in line:
roiidx = int(line.split(' ')[2])
elif 'ROIName =' in line:
roiname = line.split(' ')[2].rstrip()
elif 'RTROIInterpretedType =' in line:
roitype = line.split(' ')[-1]
#print('roitype:', roitype)
if 'SUPPORT' in roitype or 'AVOIDANCE' in roitype or 'EXTERNAL' in roitype or '=' in roitype:
self.SUPPORTDict[roiidx] = roiname
elif 'ORGAN' in roitype:
self.OARDict[roiidx] = roiname
elif 'PTV' in roitype or 'CTV' in roitype or 'TARGET' in roitype:
self.TARGETDict[roiidx] = roiname
else:
print('rio type not defined')
pass
else:
pass
rmrd.closed
#Merge all dictionaries
self.AllDict = dict(self.SUPPORTDict)
self.AllDict.update(self.OARDict)
self.AllDict.update(self.TARGETDict)
## Get the total number of beamlets
def get_totalbeamlets(self, base, fname):
with open(base + fname, 'r') as szout:
for i, line in enumerate(szout):
if 1 == i:
self.totalbeamlets = int(line)
szout.closed
## Get the dimensions of the voxel big space
def get_dim(self, base, fname):
with open(base + fname, 'r') as header:
dim_xyz = [0] * 3
for i, line in enumerate(header):
if 'x_dim' in line:
dim_x = int(line.split(' ')[2])
if 'y_dim' in line:
dim_y = int(line.split(' ')[2])
if 'z_dim' in line:
dim_z = int(line.split(' ')[2])
header.closed
self.voxelsBigSpace = dim_x * dim_y * dim_z
## Create a map from big to small voxel space, the order of elements is preserved but there is a compression to only
# one element in between.
def BigToSmallCreator(self):
# Notice that the order of voxels IS preserved. So (1,2,3,80,7) produces c = (0,1,2,4,3)
a, b, c, d = np.unique(self.voxels, return_index=True, return_inverse=True, return_counts=True)
print('BigToSmallCreator:size of c. Size of the problem:', len(c))
return(c)
def getNumProjections(self):
with open(self.base_dir + 'motion.txt') as f:
for i, l in enumerate(f):
pass
return i # Do not return -1 because the file has a header.
def removezeroes(self, toremove):
# Next I am removing the voxels that have a mask of zero (0) because they REALLY complicate things otherwise
# Making the problem larger.
#-------------------------------------
# Cut the mask to only the elements contained in the voxel list
voxelindex = np.zeros_like(self.mask)
voxelindex[np.unique(self.voxels)] = 1
self.mask = np.multiply(voxelindex, self.mask)
locats = np.where(toremove[0] == self.mask)[0]
if len(toremove) > 1:
for i in range(1, len(toremove)):
locats = np.concatenate([locats, np.where(toremove[i] == self.mask)[0]])
locats.sort()
self.mask = np.delete(self.mask, locats)
# intersection of voxels and nonzero
indices = np.where(np.in1d(self.voxels, locats))[0]
# Cut whatever is not in the voxels.
self.bixels = np.delete(self.bixels, indices)
self.voxels = np.delete(self.voxels, indices)
self.Dijs = np.delete(self.Dijs, indices)
def removebixels(self, pitch):
bixelkill = np.where(0 != (self.bixels % pitch) )
bixelkill = np.where(self.bixels < 60)
self.bixels = np.delete(self.bixels, bixelkill)
self.smallvoxels = np.delete(self.smallvoxels, bixelkill)
self.Dijs = np.delete(self.Dijs, bixelkill)
self.mask = self.mask[np.unique(self.smallvoxels)]
## Read Weiguo's Case
def readWeiguosCase(self):
# Assign structures and thresholds for each of them in order of how important they are
if "Prostate" == tumorsite:
self.OARList = [21, 6, 11, 13, 14, 8, 12, 15, 7, 9, 5, 4, 20, 19, 18, 10, 22, 10, 11, 17, 12, 3, 15, 16, 9, 5, 4, 20, 21, 19]
self.OARThresholds = [10, 10, 10, 10, 10, 10, 10, 78, 10, 10, 10, 10, 10, 10, 10, 10, 1000, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100]
self.TARGETList = [2]
self.TARGETThresholds = [78]
elif "Lung" == tumorsite:
self.OARList = [5, 4, 7, 3, 2, 13, 6, 1]
self.OARThresholds = [10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10]
self.TARGETList = [11, 12]
self.TARGETThresholds = [70, 70]
else:
self.OARList = [4]
self.OARThresholds = [1]
self.TARGETList = [8, 7]
self.TARGETThresholds = [35, 45]
dtype=np.uint32
self.bixels = getvector(self.base_dir + 'dij/Bixels_out.bin', np.int32)
self.voxels = getvector(self.base_dir + 'dij/Voxels_out.bin', np.int32)
self.Dijs = getvector(self.base_dir + 'dij/Dijs_out.bin', np.float32)
self.ALLList = self.TARGETList + self.OARList
# get subsample mask (img_arr will have 1 in the positions where there is data)
img_arr = getvector(self.base_dir + self.img_filename, dtype=dtype)
# Only use a subsample of img_arr
if do_subsample:
img_arr = get_sub_sub_sample(img_arr, self.maxvoxels)
# get structure file (used for the mask)
struct_img_arr = getvector(self.base_dir + self.struct_img_filename, dtype=dtype)
# Convert the mask into a list of unitary structures. A voxel gets assigned to only one place
img_struct = get_structure_mask(reversed(self.ALLList), struct_img_arr)
# Get the subsampled list of voxels
self.mask = get_subsampled_mask(img_struct, img_arr)
# Select only the voxels that exist in the small voxel space provided.
if tumorsite == "Prostate":
self.removezeroes([0, 18])
else:
self.removezeroes([0, 10, 14, 15, 8, 16, 9, 17])
def maxTgtDoses(self, numProjections, k10):
# This function will calculate the maximum bixel to a target coming from a particular beamlet
bdoses = np.zeros(self.L, numProjections)
def solveModel(data):
voxels = range(len(data.mask))
projIni = 1 + np.floor(max(data.bixels / data.L)).astype(int)
numProjections = k10 + projIni
projections = range(numProjections)
projectionsm1 = range(numProjections - 1)
projectionsEven = range(0, numProjections - 1, 2)
leaves = range(data.L)
leafsD = data.leafsD
projectionsD = data.projectionsD
m = None
m = Model("solveModel")
m.params.LogFile = data.logFile
m.params.DisplayInterval = 60
m.params.TimeLimit = 24 * 3600
if relaxedProblem:
m.params.TimeLimit = 2 * 3600
m.params.partitionPlace = 30
if imrt:
m.params.Method = 2
if not imrt:
m.params.BarConvTol = 1e-6 # Default value is 1e-8
m.params.BarQCPConvTol = 1e-6
m.params.FeasibilityTol = 1e-7 # Guarantee feasibility
m.params.ImproveStartGap = 5.0
m.params.ImproveStartTime = 3600 # Change the strategy to finding feasible solution after 1 hour
m.params.MIPGap = 0.05
m.params.MIPFocus = 1
m.params.PreDepRow = 1
m.params.Presolve = 0 # This is probably giving a better solution at the end
m.params.ScaleFlag = 2
#m.params.Heuristics = 0.1
if maxvoxels > 1000:
m.params.Presolve = 2
m.params.Method = 0
if pairSolution:
m.params.Method = -1
m.params.ObjScale = 3
m.params.SimplexPricing = 3
if relaxedProblem:
m.params.Presolve = 2
m.params.Method = 2
if imrt:
print("Putting together the IMRT-like version of the model")
m.params.Presolve = -1
else:
print('Finding the beamlets that should be closed')
bdoses = data.maxTgtDoses(numProjections, k10)
print("Solving the Average LOT Constrained version of the model")
z = m.addVars(voxels, lb = 0.0, obj = 1.0, vtype = GRB.CONTINUOUS, name = "z")
t = m.addVars(leaves, projections, obj = 1.0, vtype = GRB.CONTINUOUS, name="t", lb = 0.0, ub = t51)
z_plus = m.addVars(voxels, lb = 0.0, vtype = GRB.CONTINUOUS, name = "z_plus")
z_minus = m.addVars(voxels, lb = 0.0, vtype = GRB.CONTINUOUS, name = "z_minus")
## Preparation of the data for faster speeds
perPartition = int(numProjections / numcores) + 1
if imrtwith20msecondsconstraint:
beta = m.addVars(leaves, projections, obj=1.0, vtype=GRB.BINARY, name="beta", ub=1.0, lb=0.0)
for p in projections:
mypartition = int(p/perPartition) + 1
for l in leaves:
beta[l, p].Partition = mypartition
t[l, p].Partition = mypartition
if not imrt:
if relaxedProblem:
variabletype = GRB.CONTINUOUS
thereisaHint = False # Relaxed problem does not get a hint
else:
variabletype = GRB.BINARY
thereisaHint = True # Real problem gets a hint from the relaxed one.
beta = m.addVars(leaves, projections, obj = 1.0, vtype=variabletype, name="beta", ub=1.0, lb=0.0)
if pairSolution:
gamma = m.addVars(leaves, projections, obj = 1.0, vtype=variabletype, name="gamma", ub=1.0, lb=0.0)
else:
blittle = m.addVars(leaves, projections, obj = 1.0, vtype=variabletype, name="blittle", ub=1.0, lb=0.0)
mlittle = m.addVars(leaves, projections, obj = 1.0, vtype=variabletype, name="mlittle", ub=1.0, lb=0.0)
elittle = m.addVars(leaves, projections, obj = 1.0, vtype=variabletype, name="elittle", ub=1.0, lb=0.0)
mathcalT = m.addVar(vtype=GRB.CONTINUOUS, name="mathcalT", lb=0.0)
mathcalN = m.addVar(vtype=GRB.CONTINUOUS, name="mathcalN", lb=0.0)
mathcalT.Partition = 0
mathcalN.Partition = 0
for p in projections:
mypartition = int(p/perPartition) + 1
if pairSolution:
for l in leaves:
beta[l, p].Partition = mypartition
gamma[l, p].Partition = mypartition
t[l, p].Partition = mypartition
else:
for l in leaves:
beta[l, p].BranchPriority = 10
elittle[l,p].BranchPriority = 5
blittle[l,p].VarHintVal = 0
# Partition Assignment
beta[l, p].Partition = mypartition
blittle[l, p].Partition = mypartition
mlittle[l, p].Partition = mypartition
elittle[l, p].Partition = mypartition
t[l, p].Partition = mypartition
v_actual = 0
for v in voxels:
# Find maximum dose to v
# Find positions where v = voxel in voxelarray in big voxel space
indices = np.where(data.smallvoxels == v)[0]
doseCoeffs = data.Dijs[indices]
# find position(s) of maximum doses in All voxels space
maxdosesAll = np.where(np.amax(doseCoeffs) == data.Dijs)[0]
# Find the first one in the intersection of all voxel space and where voxels = this voxel.
thisbixel = data.bixels[np.intersect1d(maxdosesAll, indices)[0]]
thisp = np.floor(thisbixel / data.L).astype(int)
mypartition = int(thisp / perPartition) + 1
z[v_actual].Partition = mypartition
z_plus[v_actual].Partition = mypartition
z_minus[v_actual].Partition = mypartition
v_actual += 1
if thereisaHint:
hintfile = data.outputDirectory + 'hints' + data.chunkName + '.pkl'
hintfile = hintfile.replace("Model-Min", "ModelrelaxedVersion-Min")
try:
myhints = pickle.load(open(hintfile, 'rb'))
except:
print('----Running a relaxed version of the problem--------------')
callerstring = 'C:\Intel\python\intelpython3\python ' + executor + ' ' + str(timeM) + ' ' + str(
timeA) + ' ' + str(int(imrt)) + ' ' + str(int(imrtwith20msecondsconstraint)) + ' ' + str(
int(pairSolution)) + ' ' + str(1) + ' ' + str(initialProjections) + ' ' + str(int(loadWarmStart))
if do_subsample:
callerstring += ' ' + str(maxvoxels)
print('------- Invoking a version of the model that will create hints file with callerstring:', callerstring)
os.system(callerstring)
myhints = pickle.load(open(hintfile, 'rb'))
print('---------Finished running the relaxed version of the problem--------------')
if loadWarmStart:
warmstartFile = data.outputDirectory + 'Feasible' + data.feasibleName + '.pkl'
warmstartFile = warmstartFile.replace(str(maxvoxels), '2000')
try:
wst = pickle.load(open(warmstartFile, 'rb'))
except:
callerstring = 'C:\Intel\python\intelpython3\python ' + executor + ' ' + str(timeM) + ' ' + str(
timeA) + ' ' + str(int(imrt)) + ' ' + str(int(imrtwith20msecondsconstraint)) + ' ' + str(
int(pairSolution)) + ' ' + str(0) + ' ' + str(initialProjections) + ' ' + str(0) + ' 2000'
print('------- Invoking a version of the model that will create warm start file with callerstring:', callerstring)
os.system(callerstring)
wst = pickle.load(open(warmstartFile, 'rb'))
for p, l in list(product(projections, leaves)):
if loadWarmStart:
t[l, p].Start = wst['t_out'][p, l]
t[l, p].VarHintVal = myhints['t_out'][p, l]; t[l,p].VarHintPri = 10
if not imrt:
if pairSolution:
gamma[l, p].VarHintVal = myhints['gamma_out'][p, l];
gamma[l, p].VarHintPri = 1
beta[l, p].VarHintVal = myhints['beta_output'][p, l];
beta[l, p].VarHintPri = 1
else:
elittle[l, p].VarHintVal = myhints['elittle_out'][p, l]; elittle[l, p].VarHintPri = 1
mlittle[l, p].VarHintVal = myhints['mlittle_out'][p, l]; mlittle[l, p].VarHintPri = 1
beta[l, p].VarHintVal = myhints['beta_output'][p, l]; beta[l, p].VarHintPri = 2
if loadWarmStart:
if pairSolution:
beta[l, p].Start = wst['beta_output'][p, l]
gamma[l, p].Start = wst['gamma_out'][p, l]
else:
elittle[l, p].Start = wst['elittle_out'][p, l]
mlittle[l, p].Start = wst['mlittle_out'][p, l]
blittle[l, p].Start = wst['blittle_out'][p, l]
beta[l, p].Start = wst['beta_output'][p, l]
if len(voxels) == len(myhints['z_output']): # Size of the previous run is the same size
for v in voxels:
z[v].VarHintVal = myhints['z_output'][v]; z[v].VarHintPri = 6
z_plus[v].VarHintVal = myhints['z_plus_out'][v]; z_plus[v].VarHintPri = 6
z_minus[v].VarHintVal = myhints['z_minus_out'][v]; z_minus[v].VarHintPri = 6
#if loadWarmStart and len(voxels) == len(wst['z_output']):
# for v in voxels:
# z[v].Start = wst['z_output'][v]
# z_plus[v].Start = wst['z_plus_out'][v]
# z_minus[v].Start = wst['z_minus_out'][v]
else: # Size of the previous run was different
zdose = np.zeros(len(voxels))
for l in range(len(data.smallvoxels)):
zdose[data.smallvoxels[l]] += data.Dijs[l] * myhints['t_out'][projectionsD[l] + k10, leafsD[l]]
zdose *= data.yBar
for v in voxels:
z[v].VarHintVal = zdose[v]; z[v].VarHintPri = 6
differenz = zdose[v] - data.quadHelperThresh[v]
if differenz >= 0:
z_plus[v].VarHintVal = differenz; z_plus[v].VarHintPri = 6
z_minus[v].VarHintVal = 0.0; z_minus[v].VarHintPri = 6
else:
z_minus[v].VarHintVal = -1 * differenz; z_minus[v].VarHintPri = 6
z_plus[v].VarHintVal = 0.0; z_plus[v].VarHintPri = 6
m.update()
print("Putting together the constraints in the model")
hs = [LinExpr(0.0) for _ in voxels]
[hs[data.smallvoxels[l]].add(data.Dijs[l] * t[leafsD[l], projectionsD[l] + k10]) for l in range(len(data.smallvoxels))]
[m.addConstr(z[v] == data.yBar * hs[v], name="doses_to_j_yparam[" + str(v) + "]") for v in voxels]
positive_only = m.addConstrs((z_plus[v] - z_minus[v] == z[v] - data.quadHelperThresh[v] for v in voxels), "positive_only")
myObj = QuadExpr(0.0)
print('working on the voxels')
for v in voxels:
myObj.add(data.quadHelperUnder[v] * z_minus[v] * z_minus[v] + data.quadHelperOver[v] * z_plus[v] * z_plus[v])
print('done working in the voxels')
closed_ghost = m.addConstrs((0 == t[l, p] for l in leaves for p in range(k10)), "closed_ghost")
close_zeros = list()
czcounter = 0
print('working on the close_zeros constraints')
for l in leaves:
for p in projections:
if data.bdata[p, l] < 0.0001:
close_zeros.append(m.addConstr((0 == t[l, p]), 'close_zeros_' + str(l) + '_' + str(p)))
czcounter += 1
print('closing a total beamlets of:', czcounter)
if imrt:
if imrtwith20msecondsconstraint:
# Force the creation of
twentymsecond_a = m.addConstrs((0.02 * beta[l, p] <= t[l, p] for l in leaves for p in projections), "fivesecond_a")
twentymsecond_b = m.addConstrs((t[l, p] <= t51 * beta[l, p] for l in leaves for p in projections),
"fivesecond_b")
else:
close_ghost_beta = m.addConstrs((0 == beta[l, p] for l in leaves for p in range(k10)), "close_ghost_beta")
time_per_projection_b = m.addConstrs((t[l, p] <= t51 * beta[l, p] for l in leaves for p in projections),
"time_per_projection_b")
allts = LinExpr(0.0)
allns = LinExpr(0.0)
if pairSolution:
cancel_odd = m.addConstrs(
(0 == gamma[l, p + 1] for l in leaves for p in projectionsEven),
"cancel_odd")
gamma_1 = m.addConstrs(
(gamma[l, p] <= beta[l, p] + beta[l, p + 1] for l in leaves for p in projectionsEven),
"gamma_1")
gamma_2 = m.addConstrs(
(beta[l, p] + beta[l, p + 1] <= 2 * gamma[l, p] for l in leaves for p in projectionsEven),
"gamma_2")
minimum_lot = m.addConstrs(
(t[l, p] + t[l, p + 1] >= data.timeM * gamma[l, p] for l in leaves for p in projectionsEven),
"minimum_lot")
for l in range(data.L):
for p in range(k10, numProjections):
allts.add(t[l,p])
allns.add(gamma[l, p])
else:
time_per_projection_a = m.addConstrs((t51 * mlittle[l, p] <= t[l, p] for l in leaves for p in projections), "time_per_projection_a")
three_options = m.addConstrs((elittle[l, p] + mlittle[l, p] + blittle[l, p] == beta[l, p] for l in leaves for p in projections), "three_options")
m_follows_m_or_e = m.addConstrs((mlittle[l, p + 1] <= mlittle[l, p] + elittle[l, p] for l in leaves for p in projectionsm1),"m_follows_m_or_e")
b_follows_m_or_e = m.addConstrs(
(blittle[l, p + 1] <= mlittle[l, p] + elittle[l, p] for l in leaves for p in projectionsm1),
"b_follows_m_or_e")
minimul_lot_eb = m.addConstrs((t[l, p] + t[l, p + 1] >= data.timeM * (elittle[l, p] + blittle[l, p + 1] - 1) for l in leaves for p in projectionsm1), "minimum_lot_eb")
minimul_lot_e = m.addConstrs((t[l, p] >= data.timeM * (elittle[l, p] + elittle[l, p + 1] - beta[l, p + 1]) for l in leaves for p in projectionsm1), "minimum_lot_e")
for l in range(data.L):
for p in range(k10, numProjections):
allts.add(t[l,p])
allns.add(elittle[l, p])
sumAllOpeningEvents = m.addConstr(mathcalN == allns, "sumAllOpeningEvents")
sumAllOpeningTimes = m.addConstr(mathcalT == allts, "sumAllOpeningTimes")
Average_LOT_c = m.addConstr((mathcalT >= data.timeA * mathcalN), "Average_LOT_c")
m.setObjective(myObj, GRB.MINIMIZE)
m.update()
print('--- Starting the GUROBI optimization ---')
m.write('IMRTAnalysis' + str(initialProjections) + '.mps')
m.optimize()
m.printQuality()
z_output = [v.x for v in m.getVars()[0:len(voxels)]]
zplus_output = np.zeros(len(z_output), dtype=float)
zminus_output = np.zeros(len(z_output), dtype=float)
t_output = np.zeros((numProjections, data.L), dtype=float)
if not imrt:
beta_output = np.zeros((numProjections, data.L), dtype=float)
if pairSolution:
gamma_output = np.zeros((numProjections, data.L), dtype=float)
else:
blittle_output = np.zeros((numProjections, data.L), dtype=float)
mlittle_output = np.zeros((numProjections, data.L), dtype=float)
elittle_output = np.zeros((numProjections, data.L), dtype=float)
for v in range(len(z_output)):
zminus_output[v] = z_minus[v].x
zplus_output[v] = z_plus[v].x
for p, l in list(product(projections, leaves)):
t_output[p, l] = t[l, p].x
if not imrt:
beta_output[p,l] = beta[l, p].X
if pairSolution:
gamma_output[p, l] = gamma[l, p].X
else:
blittle_output[p, l] = blittle[l, p].x
mlittle_output[p, l] = mlittle[l, p].x
elittle_output[p, l] = elittle[l, p].x
#tn = np.transpose(np.reshape(t_output, (data.L, numProjections)))
#np.savetxt("foo.csv", tn, delimiter=",")
if imrt:
d = {"z_out": z, "z_plus_out": z_plus, "z_minus_out": z_minus, "t_out": t_output, "z_output": z_output,
"objVal": m.objVal}
else:
if pairSolution:
d = {"t_out": t_output, "z_output": z_output, "z_plus_out": zplus_output, "z_minus_out": zminus_output,
"gamma_out": gamma_output, "beta_output": beta_output,
"slackAvgLOT": Average_LOT_c.getAttr("Slack"),
"gurobisT": mathcalT.x, "gurobisN": mathcalN.x, "objVal": m.objVal}
else:
d = {"t_out": t_output, "z_output": z_output, "z_plus_out": zplus_output, "z_minus_out": zminus_output,
"mlittle_out": mlittle_output, "blittle_out": blittle_output,
"elittle_out": elittle_output, "beta_output": beta_output, "slackAvgLOT": Average_LOT_c.getAttr("Slack"),
"gurobisT": mathcalT.x, "gurobisN": mathcalN.x, "objVal": m.objVal}
if relaxedProblem:
print('printing the relaxed hints file: ', data.outputDirectory + 'Feasible' + data.chunkName + '.pkl')
outputFile = open(data.outputDirectory + 'hints' + data.chunkName + '.pkl', 'wb')
else:
print('If this is a warm start. It is saved on file: ', data.outputDirectory + 'Feasible' + data.feasibleName + '.pkl')
outputFile = open(data.outputDirectory + 'Feasible' + data.feasibleName + '.pkl', 'wb')
pickle.dump(d, outputFile)
outputFile.close()
print(m.params)
return(d)
# Plot the dose volume histogram
def plotDVHNoClass(data, z, NameTag='', showPlot=False):
voxDict = {}
plotList = [6, 7, 8, 10, 13, 14, 2]
data.TARGETList = np.intersect1d(np.array(data.TARGETList), np.unique(data.mask))
data.TARGETList = np.intersect1d(np.array(data.TARGETList), plotList)
data.OARList = np.intersect1d(np.array(data.OARList), np.unique(data.mask))
data.OARList = np.intersect1d(np.array(data.OARList), plotList)
for t in data.TARGETList:
voxDict[t] = np.where(data.mask == t)[0]
for o in data.OARList:
voxDict[o] = np.where(data.mask == o)[0]
dose = np.array([z[j] for j in range(data.totalsmallvoxels)])
plt.clf()
for index, sValues in voxDict.items():
sVoxels = sValues
hist, bins = np.histogram(dose[sVoxels], bins=100)
dvh = 1. - np.cumsum(hist) / float(sVoxels.shape[0])
dvh = np.insert(dvh, 0, 1)
plt.plot(bins, dvh, label=data.AllDict[index], linewidth=2)
lgd = plt.legend(fancybox=True, framealpha=0.5, bbox_to_anchor=(1.05, 1), loc=2)
plt.grid(True)
plt.xlabel('Dose Gray')
plt.ylabel('Fractional Volume')
if imrt:
if imrtwith20msecondsconstraint:
plt.title('DVH-' + tumorsite + ' IMRT benchmark with 20 msec constraint')
else:
plt.title('DVH-' + tumorsite + ' IMRT benchmark')
else:
plt.title('DVH-' + tumorsite + 'min. LOT = ' + str(data.timeM) + ' and min.AvgLOT = ' + str(data.timeA))
print(data.outputDirectory + 'DVH' + data.chunkName + '.png')
plt.savefig(data.outputDirectory + 'DVH' + data.chunkName + '.png', bbox_extra_artists=(lgd,), bbox_inches='tight')
if showPlot:
plt.show()
plt.close()
def plotSinogram(t, L, data):
plt.figure()
ax = gca()
lines = []
for l in range(L):
for aperture in range(len(t[l])):
a, b = t[l][aperture]
lines.append([(a, l), (b, l)])
lc = mc.LineCollection(lines, linewidths = 3, colors = 'red')
fig, ax = pl.subplots()
ax.add_collection(lc)
ax.autoscale()
plt.title('Sinogram')
plt.xlabel('time in seconds')
plt.ylabel('leaves')
plt.savefig(data.outputDirectory + 'Sinogram' + data.chunkName + '.png')
def calculateTIMRT(numProjections, t51, tim, data):
# t is a list that contains a list per leaf. In this inner list, you get the time when it opens and the time when it
# closes as a pair. leavelengths will contain the total opening times
t = [list() for _ in range(data.L)] # there is one for each of the leaves
leavelengths = []
projectionsEven = range(0, numProjections - 1, 2)
for p in projectionsEven:
time1 = t51 * p
time2 = t51 * (p + 1)
for l in range(data.L):
if tim[p, l] > 0:
t[l].append([time1, time1 + tim[p, l]])
leavelengths.append(tim[p, l])
if tim[p + 1, l] > 0:
t[l].append([time2, time2 + tim[p + 1, l]])
leavelengths.append(tim[p + 1, l])
return([t, leavelengths])
def calculateT(numProjections, t51, tim, data, elittle, mlittle, blittle):
# t is a list that contains a list per leaf. In this inner list, you get the time when it opens and the time when it
# closes as a pair. leavelengths will contain the total opening times
t = [list() for _ in range(data.L)] # there is one for each of the leaves
projections = range(0, numProjections)
leavelengths = []
for l in range(data.L):
# This implies that the opening is new or continuing from before
continuousopening = False
for p in projections:
timeright = t51 * (p + 1)
timeleft = t51 * p
# Handle special case of last projection
if p == numProjections - 1:
if continuousopening:
endingtime = timeleft + tim[p, l]
t[l].append([beginningtime, endingtime])
leavelengths.append(endingtime - beginningtime)
continuousopening = False
else:
if 1 == elittle[p, l]:
# If it opened in the right projection it HAS to be centered
midtime = timeleft + t51 / 2.0
beginningtime = midtime - tim[p, l] / 2.0
endingtime = midtime + tim[p, l] / 2.0
t[l].append([beginningtime, endingtime])
leavelengths.append(endingtime - beginningtime)
continuousopening = False # this line unnecessary
else:
if continuousopening:
if (1 == blittle[p, l] or (0 == mlittle[p + 1, l] + blittle[p + 1, l])):
# This is when a continuing aperture closes in this projection
if 1 == blittle[p, l]:
endingtime = timeleft + tim[p, l]
else:
endingtime = timeright
t[l].append([beginningtime, endingtime])
leavelengths.append(endingtime - beginningtime)
continuousopening = False
else:
# The aperture continues
pass
else:
if elittle[p, l] == 1 and tim[p, l] > 0:
# A new opening event started.
if 0 == mlittle[p + 1, l] + blittle[p + 1, l] or p == (numProjections - 1):
# If it closed right now it HAS to be centered
midtime = timeleft + t51 / 2.0
beginningtime = midtime - tim[p, l] / 2.0
endingtime = midtime + tim[p, l] / 2.0
t[l].append([beginningtime, endingtime])
leavelengths.append(endingtime - beginningtime)
continuousopening = False #this line unnecessary
# But if didn't close it must continue
else:
beginningtime = timeright - tim[p, l]
# endingtime, leavelengths and t not assigned
continuousopening = True
else:
#Leaves were closed and they continue to be closed. Nothing happened
pass
return([t, leavelengths])
def sinogramAndHistogramYesIMRT(d, data):
projIni = 1 + np.floor(max(data.bixels / data.L)).astype(int)
numProjections = k10 + projIni
projections = range(numProjections)
tim = d["t_out"]
abc = dict()
abc['numProjections'] = numProjections
abc['t51'] = t51
abc['tim'] = tim
abc['data'] = data
with open(data.outputDirectory + 'calculateT' + data.chunkName + '.pkl', "wb") as f:
pickle.dump(abc, f, pickle.HIGHEST_PROTOCOL)
f.close()
t, leavelengths = calculateTIMRT(numProjections, t51, tim, data)
plotSinogram(t, data.L, data)
plt.clf()
binsequence = [i for i in np.arange(min(leavelengths), max(leavelengths), 0.01)] + [max(leavelengths)]
plt.hist(np.array(leavelengths), bins = binsequence)
# Add a few extra ticks to the labels
#extraticks = [a['minLength'], a['avLength']]
#plt.xticks(list(plt.xticks()[0]) + extraticks)
plt.xlabel('Leaf Opening Times')
if imrtwith20msecondsconstraint:
plt.title('histogram IMRT with intensity: ' + str(data.yBar) + 'with cutoff 20 msecs')
else:
plt.title('histogram IMRT with intensity: ' + str(data.yBar))
plt.savefig(data.outputDirectory + 'histogram' + data.chunkName + '.png')
abc = dict()
totalLength = 0.0
n = 0
minLength = 1000.0
maxLength = -1.0
for l in range(data.L):
for aperture in range(len(t[l])):
a, b = t[l][aperture]
totalLength += b - a
maxLength = max(maxLength, b-a)
n += 1
# min length
if b-a > 0.000001:
minLength = min(b-a, minLength)
abc['avLength'] = totalLength / n
abc['totalLength'] = totalLength
abc['minLength'] = minLength
abc['modFactor'] = maxLength / (totalLength / n)
abc['t'] = t
abc['leavelengths'] = leavelengths
abc['objVal'] = d['objVal']
print('average length measured by me:', abc['avLength'])
print('objective Value:', abc['objVal'])
output3 = open(data.outputDirectory + 'pickleresults-' + data.chunkName + '.pkl', 'wb')
pickle.dump(abc, output3, pickle.HIGHEST_PROTOCOL)
output3.close()
def calculateTpairSolution(numProjections, t51, tim, data, gamma):
# t is a list that contains a list per leaf. In this inner list, you get the time when it opens and the time when it
# closes as a pair. leavelengths will contain the total opening times
t = [list() for _ in range(data.L)] # there is one for each of the leaves
projections = range(0, numProjections)
projectionsEven = range(0, numProjections, 2)
leavelengths = []
for l in range(data.L):
# This implies that the opening is new or continuing from before
for p in projectionsEven:
timemiddle = t51 * (p + 1)
if p == numProjections - 1: # last projections
if gamma[p, l]:
beginningtime = t51 * p; endingtime = beginningtime + t[p, l]
t[l].append([beginningtime, endingtime])
leavelengths.append(endingtime - beginningtime)
if gamma[p, l]:
beginningtime = timemiddle - tim[p, l]; endingtime = timemiddle + tim[p + 1, l]
t[l].append([beginningtime, endingtime])
leavelengths.append(endingtime - beginningtime)
return([t, leavelengths])
def sinogramAndHistogramNoIMRT(d, data):
projIni = 1 + np.floor(max(data.bixels / data.L)).astype(int)
numProjections = k10 + projIni
tim = d["t_out"]
beta = d["beta_output"]
abc = dict()
if pairSolution:
gamma = d["gamma_out"]
abc['gamma'] = gamma
else:
elittle = d["elittle_out"]
blittle = d["blittle_out"]
mlittle = d["mlittle_out"]
abc['elittle'] = elittle
abc['mlittle'] = mlittle
abc['blittle'] = blittle
abc['numProjections'] = numProjections
abc['t51'] = t51
abc['tim'] = tim
abc['data'] = data
abc['z_output'] = d['z_output']
with open(data.outputDirectory + 'calculateT' + data.chunkName + '.pkl', "wb") as f:
pickle.dump(abc, f, pickle.HIGHEST_PROTOCOL)
f.close()
# contains pairs when the aperture opens and closes
if pairSolution:
t, leavelengths = calculateTpairSolution(numProjections, t51, tim, data, gamma)
else:
t, leavelengths = calculateT(numProjections, t51, tim, data, elittle, mlittle, blittle)
abc = dict()
totalLength = 0.0
n = 0
minLength = 1000.0
maxLength = -1.0
for l in range(data.L):
for aperture in range(len(t[l])):
a, b = t[l][aperture]
totalLength += b - a
#print('mynumbers', b - a)
maxLength = max(maxLength, b-a)
n += 1
# min length
if b-a > 0.000001:
minLength = min(b-a, minLength)
abc['avLength'] = totalLength / n
abc['totalLength'] = totalLength
abc['minLength'] = minLength
abc['modFactor'] = maxLength / (totalLength / n)
abc['t'] = t
abc['leavelengths'] = leavelengths
abc['t_output'] = tim
abc['beta_output'] = beta
abc['slackAvgLOT'] = d["slackAvgLOT"]
abc['gurobisT'] = d['gurobisT']
abc['myN'] = len(leavelengths)
abc['mySecondN'] = n
abc['gurobisN'] = d['gurobisN']
abc['gurobiAvLength'] = abc['gurobisT'] / abc['gurobisN']
abc['objVal'] = d['objVal']
print('whole a package:', abc)
print('average length measured by me:', abc['avLength'], ' and measured by GUROBI: ', abc['gurobiAvLength'])
print('objective Value:', abc['objVal'])
print('minimum length:', minLength)
print('modulation factor:', abc['modFactor'])
plotSinogram(t, data.L, data)
plt.clf()
binsequence = [i for i in np.arange(min(leavelengths), max(leavelengths), 0.01)] + [max(leavelengths)]
plt.hist(np.array(leavelengths), bins = binsequence)
# Add a few extra ticks to the labels
plt.xlabel('Leaf Opening Times')
plt.text(abc['minLength'], 7, str(abc['minLength'])[0:6], color='r', rotation=89)
plt.text(abc['avLength'], 7, str(abc['avLength'])[0:6], color='r', rotation=89)
plt.title('histogram: Min LOT Goal: ' + str(data.timeM) + ' Actual:' + str(abc['minLength'])[0:6] +
' AvgLOT goal:' + str(data.timeA) + ' Actual: ' + str(abc['avLength'])[0:6] )
plt.savefig(data.outputDirectory + 'histogram' + data.chunkName + '.png')
# Let's pickle save the data results
output2 = open(data.outputDirectory + 'pickleresults-' + data.chunkName + '.pkl', 'wb')
pickle.dump(abc, output2)
output2.close()
return(t)
dataobject = tomodata()
d = solveModel(dataobject)
# Save info to create dvhs later
#####################################
#####################################
#####################################
output2 = open(dataobject.outputDirectory + dataobject.chunkName + '-z.pkl', 'wb')
pickle.dump(d["z_output"], output2)
output2.close()
try:
output = open(dataobject.outputDirectory + dataobject.chunkName + '-dataobject.pkl', 'wb')
pickle.dump(dataobject, output)
output.close()
except:
print("couldn't dump the object")
#####################################
#####################################
#####################################
plotDVHNoClass(dataobject, d["z_output"], 'dvh')
if imrt:
sinogramAndHistogramYesIMRT(d, dataobject)
else:
if not relaxedProblem:
t = sinogramAndHistogramNoIMRT(d, dataobject)
print('total time:', time.time() - initialTime)
sys.exit()
start_time = time.time()
# Erase everything in the warmstart file
f = open("warmstart.dat", "w")
f.close()
oldobj = np.inf
for i in [1, 2, 4, 8, 16, 32]:
start_time = time.time()
#pstring = runAMPL(maxvoxels, i, tumorsite)
totalAMPLtime = (time.time() - start_time)
print("--- %s seconds running the AMPL part---" % totalAMPLtime)
z, betas, B, cgamma, lgamma, newobj = readDosefromtext(pstring)
print('new obj:', newobj)
mej = (newobj - oldobj)/oldobj
oldobj = newobj
print('reduction:', mej)
if mej < 0.01:
break
output2 = open('z.pkl', 'wb')
pickle.dump(z, output2)
output2.close()
output = open('dataobject.pkl', 'wb')
try:
pickle.dump(dataobject, output)
except:
print("dataobject was never defined")
output.close()
totalAMPLtime = (time.time() - start_time)
print("--- %s seconds running the AMPL part---" % totalAMPLtime)
if len(sys.argv) > 4:
print("tabledresults: ", sys.argv[1], sys.argv[2], sys.argv[3], dataobject.totalsmallvoxels, totalAMPLtime)
# Ignore errors that correspond to DVH Plot
try:
pass
# Correct this and bring back the plotting when I can.
#plotDVHNoClass(dataobject, z, 'dvh')
except IndexError:
print("Index is out of bounds and no DVH plot will be generated. However, I am ignoring this error for now.")
# Output ampl results for the next run in case something fails.
text_output = open("amploutput.txt", "wb")
text_output.write(pstring)
text_output.close()
|
{
"content_hash": "ef926433b4cf17849af7770433d3bed1",
"timestamp": "",
"source": "github",
"line_count": 1126,
"max_line_length": 216,
"avg_line_length": 47.66429840142096,
"alnum_prop": 0.5722191168250419,
"repo_name": "wilmerhenao/Tomotherapy-Without-Pulse",
"id": "01792678997ad13c58252db7709d04dbdea426be",
"size": "53670",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "OrganizedmultiToolIMRTtest.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "657"
},
{
"name": "CSS",
"bytes": "35396"
},
{
"name": "HTML",
"bytes": "75600"
},
{
"name": "JavaScript",
"bytes": "33553"
},
{
"name": "Makefile",
"bytes": "544"
},
{
"name": "Python",
"bytes": "186940"
},
{
"name": "TeX",
"bytes": "38948"
}
],
"symlink_target": ""
}
|
"""Tests for tensorflow.ops.reverse_sequence_op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import sys
import numpy as np
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.platform import benchmark
from tensorflow.python.platform import test
class WhereOpTest(test.TestCase):
def _testWhere(self, x, truth, expected_err_re=None, fn=array_ops.where):
with self.cached_session(use_gpu=True):
ans = fn(x)
self.assertTrue(ans.get_shape().is_compatible_with([None, x.ndim]))
if expected_err_re is None:
tf_ans = self.evaluate(ans)
self.assertAllClose(tf_ans, truth, atol=1e-10)
else:
with self.assertRaisesOpError(expected_err_re):
self.evaluate(ans)
def _testWrongNumbers(self, fn=array_ops.where):
with self.session(use_gpu=True):
with self.assertRaises(ValueError):
fn([False, True], [1, 2], None)
with self.assertRaises(ValueError):
fn([False, True], None, [1, 2])
def _testBasicVec(self, fn=array_ops.where):
x = np.asarray([True, False])
truth = np.asarray([[0]], dtype=np.int64)
self._testWhere(x, truth, None, fn)
x = np.asarray([False, True, False])
truth = np.asarray([[1]], dtype=np.int64)
self._testWhere(x, truth, None, fn)
x = np.asarray([False, False, True, False, True])
truth = np.asarray([[2], [4]], dtype=np.int64)
self._testWhere(x, truth, None, fn)
def _testRandomVec(self, fn=array_ops.where):
x = np.random.rand(1000000) > 0.5
truth = np.vstack([np.where(x)[0].astype(np.int64)]).T
self._testWhere(x, truth, None, fn)
def _testBasicMat(self, fn=array_ops.where):
x = np.asarray([[True, False], [True, False]])
# Ensure RowMajor mode
truth = np.asarray([[0, 0], [1, 0]], dtype=np.int64)
self._testWhere(x, truth, None, fn)
def _testBasic3Tensor(self, fn=array_ops.where):
x = np.asarray([[[True, False], [True, False]],
[[False, True], [False, True]],
[[False, False], [False, True]]])
# Ensure RowMajor mode
truth = np.asarray(
[[0, 0, 0], [0, 1, 0], [1, 0, 1], [1, 1, 1], [2, 1, 1]], dtype=np.int64)
self._testWhere(x, truth, None, fn)
def _testRandom(self, dtype, expected_err_re=None, fn=array_ops.where):
shape = [127, 33, 53]
x = np.random.randn(*shape) + 1j * np.random.randn(*shape)
x = (np.random.randn(*shape) > 0).astype(dtype)
truth = np.where(np.abs(x) > 0) # Tuples of indices by axis.
truth = np.vstack(truth).T # Convert to [num_true, indices].
self._testWhere(x, truth, expected_err_re, fn)
def _testThreeArgument(self, fn=array_ops.where):
x = np.array([[-2, 3, -1], [1, -3, -3]])
np_val = np.where(x > 0, x * x, -x)
with self.test_session(use_gpu=True):
tf_val = self.evaluate(fn(constant_op.constant(x) > 0, x * x, -x))
self.assertAllEqual(tf_val, np_val)
def testWrongNumbers(self):
self._testWrongNumbers()
@test_util.run_deprecated_v1
def testBasicVec(self):
self._testBasicVec()
@test_util.run_deprecated_v1
def testRandomVec(self):
self._testRandomVec()
@test_util.run_deprecated_v1
def testBasicMat(self):
self._testBasicMat()
@test_util.run_deprecated_v1
def testBasic3Tensor(self):
self._testBasic3Tensor()
@test_util.run_deprecated_v1
def testRandomBool(self):
self._testRandom(np.bool)
@test_util.run_deprecated_v1
def testRandomInt32(self):
self._testRandom(np.int32)
@test_util.run_deprecated_v1
def testRandomInt64(self):
self._testRandom(np.int64)
@test_util.run_deprecated_v1
def testRandomFloat(self):
self._testRandom(np.float32)
@test_util.run_deprecated_v1
def testRandomDouble(self):
self._testRandom(np.float64)
@test_util.run_deprecated_v1
def testRandomComplex64(self):
self._testRandom(np.complex64)
@test_util.run_deprecated_v1
def testRandomComplex128(self):
self._testRandom(np.complex128)
@test_util.run_deprecated_v1
def testRandomUint8(self):
self._testRandom(np.uint8)
@test_util.run_deprecated_v1
def testRandomInt8(self):
self._testRandom(np.int8)
@test_util.run_deprecated_v1
def testRandomInt16(self):
self._testRandom(np.int16)
@test_util.run_deprecated_v1
def testThreeArgument(self):
self._testThreeArgument()
def testV2WrongNumbers(self):
self._testWrongNumbers(array_ops.where_v2)
def testV2BasicVec(self):
self._testBasicVec(array_ops.where_v2)
def testV2RandomVec(self):
self._testRandomVec(array_ops.where_v2)
def testV2BasicMat(self):
self._testBasicMat(array_ops.where_v2)
def testV2Basic3Tensor(self):
self._testBasic3Tensor(array_ops.where_v2)
def testV2RandomBool(self):
self._testRandom(np.bool, None, array_ops.where_v2)
def testV2RandomInt32(self):
self._testRandom(np.int32, None, array_ops.where_v2)
def testV2RandomInt64(self):
self._testRandom(np.int64, None, array_ops.where_v2)
def testV2RandomFloat(self):
self._testRandom(np.float32, None, array_ops.where_v2)
def testV2RandomDouble(self):
self._testRandom(np.float64, None, array_ops.where_v2)
def testV2RandomComplex64(self):
self._testRandom(np.complex64, None, array_ops.where_v2)
def testV2RandomComplex128(self):
self._testRandom(np.complex128, None, array_ops.where_v2)
def testV2RandomUint8(self):
self._testRandom(np.uint8, None, array_ops.where_v2)
def testV2RandomInt8(self):
self._testRandom(np.int8, None, array_ops.where_v2)
def testV2RandomInt16(self):
self._testRandom(np.int16, None, array_ops.where_v2)
def testV2ThreeArgument(self):
self._testThreeArgument(array_ops.where_v2)
def testV2Broadcasting(self):
f = np.random.normal(0, 1, (3, 5, 1, 1))
x = np.zeros((7, 11))
y = np.ones((7, 11))
np_val = np.where(f < 0, x, y)
with self.test_session(use_gpu=True):
tf_val = self.evaluate(
array_ops.where_v2(constant_op.constant(f) < 0, x, y))
self.assertAllEqual(tf_val, np_val)
def testV2ScalarBroadcasting(self):
x = np.zeros((7, 11))
y = np.ones((7, 11))
np_val = np.where(True, x, y)
with self.test_session(use_gpu=True):
tf_val = self.evaluate(
array_ops.where_v2(
constant_op.constant(True, dtype=dtypes.bool), x, y))
self.assertAllEqual(tf_val, np_val)
def testV2VectorBroadcasting(self):
x = np.zeros(7)
y = np.ones(7)
np_val = np.where([True], x, y)
with self.test_session(use_gpu=True):
tf_val = self.evaluate(
array_ops.where_v2(
constant_op.constant([True], dtype=dtypes.bool), x, y))
self.assertAllEqual(tf_val, np_val)
def testV2PredBroadcasting(self):
pred = np.array([1, 0, 0]).reshape((3, 1))
x = np.random.randn(3, 4)
y = np.random.randn(3, 4)
np_val = np.where(pred, x, y)
with self.test_session(use_gpu=True):
tf_val = self.evaluate(array_ops.where_v2(pred, x, y))
self.assertAllClose(tf_val, np_val)
@test_util.run_deprecated_v1
def testBatchSelect(self):
x = np.array([[-2, 3, -1] * 64, [1, -3, -3] * 64] * 8192) # [16384, 192]
c_mat = np.array([[False] * 192, [True] * 192] * 8192) # [16384, 192]
c_vec = np.array([False, True] * 8192) # [16384]
np_val = np.where(c_mat, x * x, -x)
with self.session(use_gpu=True):
tf_val = array_ops.where(c_vec, x * x, -x).eval()
self.assertAllEqual(tf_val, np_val)
class WhereBenchmark(test.Benchmark):
def benchmarkWhere(self):
for (m, n, p, use_gpu) in itertools.product(
[10],
[10, 100, 1000, 10000, 100000, 1000000],
[0.01, 0.5, 0.99],
[False, True]):
name = "m_%d_n_%d_p_%g_use_gpu_%s" % (m, n, p, use_gpu)
device = "/%s:0" % ("gpu" if use_gpu else "cpu")
with ops.Graph().as_default():
with ops.device(device):
x = random_ops.random_uniform((m, n), dtype=dtypes.float32) <= p
v = resource_variable_ops.ResourceVariable(x)
op = array_ops.where(v)
with session.Session(config=benchmark.benchmark_config()) as sess:
self.evaluate(v.initializer)
r = self.run_op_benchmark(sess, op, min_iters=100, name=name)
gb_processed_input = m * n / 1.0e9
# approximate size of output: m*n*p int64s for each axis.
gb_processed_output = 2 * 8 * m * n * p / 1.0e9
gb_processed = gb_processed_input + gb_processed_output
throughput = gb_processed / r["wall_time"]
print("Benchmark: %s \t wall_time: %0.03g s \t "
"Throughput: %0.03g GB/s" % (name, r["wall_time"], throughput))
sys.stdout.flush()
def benchmarkBatchSelect(self):
for (m, n, use_gpu) in itertools.product([1000, 10000, 100000],
[10, 100, 1000], [False, True]):
name = "m_%d_n_%d_use_gpu_%s" % (m, n, use_gpu)
device = "/%s:0" % ("gpu" if use_gpu else "cpu")
with ops.Graph().as_default():
with ops.device(device):
x_gen = random_ops.random_uniform([m, n], dtype=dtypes.float32)
y_gen = random_ops.random_uniform([m, n], dtype=dtypes.float32)
c_gen = random_ops.random_uniform([m], dtype=dtypes.float32) <= 0.5
x = resource_variable_ops.ResourceVariable(x_gen)
y = resource_variable_ops.ResourceVariable(y_gen)
c = resource_variable_ops.ResourceVariable(c_gen)
op = array_ops.where(c, x, y)
with session.Session(config=benchmark.benchmark_config()) as sess:
self.evaluate(x.initializer)
self.evaluate(y.initializer)
self.evaluate(c.initializer)
r = self.run_op_benchmark(sess, op, min_iters=100, name=name)
# approximate size of output: m*n*2 floats for each axis.
gb_processed = m * n * 8 / 1.0e9
throughput = gb_processed / r["wall_time"]
print("Benchmark: %s \t wall_time: %0.03g s \t "
"Throughput: %0.03g GB/s" % (name, r["wall_time"], throughput))
sys.stdout.flush()
if __name__ == "__main__":
test.main()
|
{
"content_hash": "822fe6ca09f7c4e1a8630f00d31a5140",
"timestamp": "",
"source": "github",
"line_count": 312,
"max_line_length": 80,
"avg_line_length": 33.91025641025641,
"alnum_prop": 0.635538752362949,
"repo_name": "davidzchen/tensorflow",
"id": "c16d016f5e394a4cc9c46ecfd70e98e3f40b190c",
"size": "11269",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "tensorflow/python/kernel_tests/where_op_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "32240"
},
{
"name": "Batchfile",
"bytes": "55269"
},
{
"name": "C",
"bytes": "887514"
},
{
"name": "C#",
"bytes": "8562"
},
{
"name": "C++",
"bytes": "81865221"
},
{
"name": "CMake",
"bytes": "6500"
},
{
"name": "Dockerfile",
"bytes": "112853"
},
{
"name": "Go",
"bytes": "1867241"
},
{
"name": "HTML",
"bytes": "4686483"
},
{
"name": "Java",
"bytes": "971474"
},
{
"name": "Jupyter Notebook",
"bytes": "549437"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "MLIR",
"bytes": "1921657"
},
{
"name": "Makefile",
"bytes": "65901"
},
{
"name": "Objective-C",
"bytes": "116558"
},
{
"name": "Objective-C++",
"bytes": "316967"
},
{
"name": "PHP",
"bytes": "4236"
},
{
"name": "Pascal",
"bytes": "318"
},
{
"name": "Pawn",
"bytes": "19963"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "Python",
"bytes": "37285698"
},
{
"name": "RobotFramework",
"bytes": "1779"
},
{
"name": "Roff",
"bytes": "2705"
},
{
"name": "Ruby",
"bytes": "7464"
},
{
"name": "SWIG",
"bytes": "8992"
},
{
"name": "Shell",
"bytes": "700629"
},
{
"name": "Smarty",
"bytes": "35540"
},
{
"name": "Starlark",
"bytes": "3604653"
},
{
"name": "Swift",
"bytes": "62814"
},
{
"name": "Vim Snippet",
"bytes": "58"
}
],
"symlink_target": ""
}
|
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'Video Copy Manager'
copyright = '2016, Florian Zierler'
author = 'Florian Zierler'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '3.5.2'
# The full version, including alpha/beta/rc tags.
release = '3.5.2'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'VideoCopyManagerdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'VideoCopyManager.tex', 'Video Copy Manager Documentation',
'Florian Zierler', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'videocopymanager', 'Video Copy Manager Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'VideoCopyManager', 'Video Copy Manager Documentation',
author, 'VideoCopyManager', 'One line description of project.',
'Miscellaneous'),
]
|
{
"content_hash": "a7be53a1a4f8c89387048217c0716ccc",
"timestamp": "",
"source": "github",
"line_count": 116,
"max_line_length": 78,
"avg_line_length": 32.19827586206897,
"alnum_prop": 0.6704149933065596,
"repo_name": "zierler-f/Video-Copy-Manager",
"id": "e1068f48569518398dbad404d42ff1e00b1e0d40",
"size": "4821",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docs/conf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "12823"
}
],
"symlink_target": ""
}
|
"""Test that parsing can fail."""
from pytest import raises
from envargs import errors
from envargs import parse_dict
from envargs import Var
def test_parse_fail():
"""Simple case that fails to parse."""
args = {
'a_var': Var(
use=int,
load_from='A_VAR',
),
}
values = {
'A_VAR': 'abc',
}
with raises(errors.ParseError) as err:
parse_dict(values, args)
assert err.value.extra == {
'location': 'A_VAR',
'value': 'abc',
}
def test_validation_with_lambda_fail():
"""Simple case that fails to validate."""
args = {
'a_var': Var(
use=int,
load_from='A_VAR',
validate=lambda x: x == 0,
),
}
values = {
'A_VAR': '1',
}
with raises(errors.ValidationError) as err:
parse_dict(values, args)
assert err.value.extra == {
'value': 1,
'location': 'A_VAR',
}
def test_missing_value():
"""Test case that fails because of a missing value."""
args = {
'a_var': Var(
use=int,
load_from='A_VAR',
validate=lambda x: x == 0,
),
}
values = {}
with raises(errors.ParseError) as err:
parse_dict(values, args)
assert err.value.extra == {
'location': 'A_VAR',
}
assert str(err.value) == 'Required field "A_VAR" missing.'
def test_fancy_validation_function():
"""Test that fails to validate with a real function."""
def validation_function(value):
if value == 1:
raise errors.ValidationError(
'Value not 1',
value=value,
)
args = {
'a_var': Var(
use=int,
load_from='A_VAR',
validate=validation_function,
),
}
values = {
'A_VAR': '1',
}
with raises(errors.ValidationError) as err:
parse_dict(values, args)
assert err.value.extra == {
'value': 1,
}
def test_err_msg():
"""Test that error messages bubble up, when you use them."""
args = {
'a_var': Var(
use=int,
load_from='A_VAR',
err_msg='A_VAR not valid',
),
}
values = {
'A_VAR': 'abc',
}
with raises(errors.ParseError) as err:
parse_dict(values, args)
assert err.value.message == 'A_VAR not valid'
assert repr(err.value) == "ParseError('A_VAR not valid',)"
|
{
"content_hash": "6a71eeb18dae1405f7d4cead41387585",
"timestamp": "",
"source": "github",
"line_count": 122,
"max_line_length": 64,
"avg_line_length": 20.524590163934427,
"alnum_prop": 0.5035942492012779,
"repo_name": "cknv/envargs",
"id": "8a67ef07b27604a29c35bc8f981347e3af606297",
"size": "2504",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_errors.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "131"
},
{
"name": "Python",
"bytes": "12345"
}
],
"symlink_target": ""
}
|
import odoo.tests
@odoo.tests.common.at_install(True)
@odoo.tests.common.post_install(True)
class TestUi(odoo.tests.HttpCase):
def test_sale_available(self):
# delay is added to be sure that all elements have been rendered properly
self.phantom_js(
"/",
"odoo.__DEBUG__.services['web_tour.tour'].run('shop_sale_available', 1000)",
"odoo.__DEBUG__.services['web_tour.tour'].tours.shop_sale_available.ready",
login="admin",
)
|
{
"content_hash": "d9caf65ac65a6ade1c5cbfca48baf0ef",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 88,
"avg_line_length": 35.857142857142854,
"alnum_prop": 0.6254980079681275,
"repo_name": "it-projects-llc/website-addons",
"id": "d0d8aaf3f4255c859de856e7af795823d5dc8cb9",
"size": "629",
"binary": false,
"copies": "1",
"ref": "refs/heads/13.0",
"path": "website_sale_available/tests/test_sale_available.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "38675"
},
{
"name": "HTML",
"bytes": "139628"
},
{
"name": "JavaScript",
"bytes": "355569"
},
{
"name": "Python",
"bytes": "223394"
}
],
"symlink_target": ""
}
|
from setuptools import setup
setup(
name='dns_sync',
version='1.2.0',
description='Sync a Google Cloud DNS zone with GCE resources',
long_description=('Listens for compute engine audit events'
'for load blaancers or instances creation and '
'deletes or creates dns records in a Cloud DNS zone.'),
# supply use github url when it exists
# url='',
author='Ben Menasha',
author_email='bmenasha@google.com',
license='apache 2.0',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Programming Language :: Python :: 2.7'],
keywords='google cloud dns gce',
packages=['dns_sync'],
setup_requires=['pytest-runner'],
tests_require=['mock', 'pytest'],
install_requires=['google-cloud-datastore',
'google-cloud-resource-manager',
'google-api-python-client',
'webapp2', 'webapp2_static', 'webob', 'pyyaml',
'oauth2client==3.0.0']
)
|
{
"content_hash": "fd416bbd59579201a7cb3f21047d482e",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 77,
"avg_line_length": 38.25,
"alnum_prop": 0.5816993464052288,
"repo_name": "GoogleCloudPlatform/professional-services",
"id": "791e7de584366ebb562b2946d75be553e39bcb25",
"size": "1655",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "tools/dns-sync/setup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C#",
"bytes": "117994"
},
{
"name": "C++",
"bytes": "174"
},
{
"name": "CSS",
"bytes": "13405"
},
{
"name": "Component Pascal",
"bytes": "798"
},
{
"name": "Dockerfile",
"bytes": "15093"
},
{
"name": "Go",
"bytes": "352968"
},
{
"name": "HCL",
"bytes": "204776"
},
{
"name": "HTML",
"bytes": "1229668"
},
{
"name": "Java",
"bytes": "338810"
},
{
"name": "JavaScript",
"bytes": "59905"
},
{
"name": "Jinja",
"bytes": "60083"
},
{
"name": "Makefile",
"bytes": "14129"
},
{
"name": "Python",
"bytes": "2250081"
},
{
"name": "Scala",
"bytes": "978327"
},
{
"name": "Shell",
"bytes": "109299"
},
{
"name": "Smarty",
"bytes": "19839"
},
{
"name": "TypeScript",
"bytes": "147194"
}
],
"symlink_target": ""
}
|
from django.contrib import admin
from .models import *
# Register your models here.
class Familia_Inline(admin.TabularInline):
model = Familia
max_num = 1
can_delete = False
class Educacion_Inline(admin.TabularInline):
model = Educacion
extra = 1
max_num = 10
class TenenciaPropiedad_Inline(admin.TabularInline):
model = TenenciaPropiedad
max_num = 1
can_delete = False
class AreaFinca_Inline(admin.TabularInline):
model = AreaFinca
max_num = 1
can_delete = False
class DetalleAreaFinca_Inline(admin.TabularInline):
model = DetalleAreaFinca
max_num = 11
extra = 1
class Reforestacion_Inline(admin.TabularInline):
model = Reforestacion
max_num = 8
extra = 1
class CaracterizacionTerreno_Inline(admin.TabularInline):
model = CaracterizacionTerreno
max_num = 1
can_delete = False
class FenomenosNaturales_Inline(admin.TabularInline):
model = FenomenosNaturales
max_num = 1
can_delete = False
class RazonesAgricolas_Inline(admin.TabularInline):
model = RazonesAgricolas
max_num = 1
can_delete = False
class RazonesMercado_Inline(admin.TabularInline):
model = RazonesMercado
max_num = 1
can_delete = False
class Inversion_Inline(admin.TabularInline):
model = Inversion
max_num = 1
can_delete = False
class MitigacionRiesgos_Inline(admin.TabularInline):
model = MitigacionRiesgos
max_num = 1
can_delete = False
class OrganizacionAsociada_Inline(admin.TabularInline):
model = OrganizacionAsociada
max_num = 1
can_delete = False
class ServiciosOrganizado_Inline(admin.TabularInline):
model = ServiciosOrganizado
max_num = 1
can_delete = False
class BeneficiosOrganizado_Inline(admin.TabularInline):
model = BeneficiosOrganizado
max_num = 1
can_delete = False
class AreaCacao_Inline(admin.TabularInline):
model = AreaCacao
max_num = 1
can_delete = False
class Plantacion_Inline(admin.TabularInline):
model = Plantacion
max_num = 5
extra = 1
class ProduccionCacao_Inline(admin.TabularInline):
model = ProduccionCacao
max_num = 1
can_delete = False
class Certificacion_Inline(admin.TabularInline):
model = Certificacion
max_num = 1
can_delete = False
class CostoProduccion_Inline(admin.TabularInline):
model = CostoProduccion
max_num = 1
can_delete = False
class TecnicasAplicadas_Inline(admin.StackedInline):
model = TecnicasAplicadas
max_num = 1
can_delete = False
class ComercializacionCacao_Inline(admin.TabularInline):
model = ComercializacionCacao
extra = 1
class DistanciaComercioCacao_Inline(admin.TabularInline):
model = DistanciaComercioCacao
max_num = 1
can_delete = False
class CapacitacionesTecnicas_Inline(admin.TabularInline):
model = CapacitacionesTecnicas
max_num = 11
extra = 1
class CapacitacionesSocioeconomicas_Inline(admin.TabularInline):
model = CapacitacionesSocioeconomicas
max_num = 8
extra = 1
class ProblemasAreaCacao_Inline(admin.TabularInline):
model = ProblemasAreaCacao
max_num = 1
can_delete = False
class Genero_Inline(admin.StackedInline):
model = Genero
max_num = 1
can_delete = False
fieldsets = [(None,
{'fields' : (('actividades'),('ingresos','ingreso_mesual'),('destino_ingresos',),('decisiones',))}),
]
class AmpliarAreasCacao_Inline(admin.TabularInline):
model = AmpliarAreasCacao
max_num = 1
can_delete = False
class EncuestaAdmin(admin.ModelAdmin):
def get_queryset(self, request):
if request.user.is_superuser:
return Encuesta.objects.all()
return Encuesta.objects.filter(usuario=request.user)
def save_model(self, request, obj, form, change):
if request.user.is_superuser:
obj.save()
else:
obj.usuario = request.user
obj.save()
def get_form(self, request, obj=None, **kwargs):
if request.user.is_superuser:
self.exclude = ('anno',)
self.fieldsets = [(('Información del Entrevistado'), {'fields' : (('fecha',),('organizacion','encuestador'),('entrevistado','usuario'))}),]
else:
self.exclude = ('usuario','anno')
self.fieldsets = [(('Información del Entrevistado'), {'fields' : (('fecha',),('organizacion','encuestador'),('entrevistado',))}),]
return super(EncuestaAdmin, self).get_form(request, obj=None, **kwargs)
def get_list_filter(self, request):
if request.user.is_superuser:
return ('organizacion',)
else:
return ()
inlines = [Familia_Inline,Educacion_Inline,TenenciaPropiedad_Inline,AreaFinca_Inline,DetalleAreaFinca_Inline,
Reforestacion_Inline,CaracterizacionTerreno_Inline,FenomenosNaturales_Inline,RazonesAgricolas_Inline,
RazonesMercado_Inline,Inversion_Inline,MitigacionRiesgos_Inline,OrganizacionAsociada_Inline,
ServiciosOrganizado_Inline,BeneficiosOrganizado_Inline,AreaCacao_Inline,Plantacion_Inline,
ProduccionCacao_Inline,Certificacion_Inline,CostoProduccion_Inline,TecnicasAplicadas_Inline,
ComercializacionCacao_Inline,DistanciaComercioCacao_Inline,CapacitacionesTecnicas_Inline,
CapacitacionesSocioeconomicas_Inline,ProblemasAreaCacao_Inline,Genero_Inline,AmpliarAreasCacao_Inline]
list_display = ('entrevistado','organizacion','encuestador','fecha')
list_display_links = ('organizacion','entrevistado')
search_fields = ['entrevistado__nombre','encuestador__nombre']
class Media:
js = ('js/admin.js',)
css = {
'all': ('css/admin.css',)
}
admin.site.register(Profesiones)
admin.site.register(SituacionesPropiedad)
admin.site.register(Beneficios)
admin.site.register(QuienCertifica)
admin.site.register(TiposServicio)
admin.site.register(ActividadesProduccion)
admin.site.register(DestinoIngresos)
admin.site.register(OtrosIngresos)
admin.site.register(ProblemasArea1)
admin.site.register(ProblemasArea2)
admin.site.register(ProblemasArea3)
admin.site.register(Entrevistados)
admin.site.register(Encuestadores)
admin.site.register(Encuesta,EncuestaAdmin)
|
{
"content_hash": "a2f331da36cc26b3ab6c2c35a044ab5c",
"timestamp": "",
"source": "github",
"line_count": 206,
"max_line_length": 142,
"avg_line_length": 27.79126213592233,
"alnum_prop": 0.7680349344978166,
"repo_name": "shiminasai/aprocacaho",
"id": "4e3369e7961ee7f975ca4e8ecf6b2f3f1f071785",
"size": "5751",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "productores/admin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "236400"
},
{
"name": "HTML",
"bytes": "418754"
},
{
"name": "JavaScript",
"bytes": "133318"
},
{
"name": "Python",
"bytes": "194712"
}
],
"symlink_target": ""
}
|
from django_filters import FilterSet, ModelMultipleChoiceFilter
from django_select2.forms import Select2MultipleWidget
from grandchallenge.algorithms.models import Job
from grandchallenge.archives.models import Archive
from grandchallenge.cases.models import Image
from grandchallenge.reader_studies.models import ReaderStudy
class ImageFilterSet(FilterSet):
archive = ModelMultipleChoiceFilter(
queryset=Archive.objects.all(),
widget=Select2MultipleWidget,
label="Archive",
help_text="Filter images that belong to an archive",
field_name="componentinterfacevalue__archive_items__archive__pk",
to_field_name="pk",
)
job_input = ModelMultipleChoiceFilter(
queryset=Job.objects.all(),
widget=Select2MultipleWidget,
label="Job Input",
help_text="Filter images that are used as input to an algorithm job",
field_name="componentinterfacevalue__algorithm_jobs_as_input__pk",
to_field_name="pk",
)
job_output = ModelMultipleChoiceFilter(
queryset=Job.objects.all(),
widget=Select2MultipleWidget,
label="Job Output",
help_text="Filter images that are produced as output from an algorithm job",
field_name="componentinterfacevalue__algorithm_jobs_as_output__pk",
to_field_name="pk",
)
reader_study = ModelMultipleChoiceFilter(
queryset=ReaderStudy.objects.all(),
widget=Select2MultipleWidget,
label="Reader Study",
help_text="Filter images that belong to a reader study",
field_name="readerstudies__pk",
to_field_name="pk",
)
class Meta:
model = Image
fields = (
"study",
"origin",
"job_input",
"job_output",
"archive",
"reader_study",
"name",
)
|
{
"content_hash": "2c619b5174a1b624ebb62e8f643d8b25",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 84,
"avg_line_length": 34.75925925925926,
"alnum_prop": 0.6515716568993074,
"repo_name": "comic/comic-django",
"id": "ac29a4061a888f2988af6aec4ecbe0113242fec0",
"size": "1877",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/grandchallenge/cases/filters.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "94300"
},
{
"name": "HTML",
"bytes": "101108"
},
{
"name": "JavaScript",
"bytes": "122734"
},
{
"name": "PHP",
"bytes": "99155"
},
{
"name": "Python",
"bytes": "486219"
},
{
"name": "Shell",
"bytes": "793"
}
],
"symlink_target": ""
}
|
from django.conf import settings
from django.contrib.auth import get_user_model, SESSION_KEY, BACKEND_SESSION_KEY
from django.contrib.sessions.backends.db import SessionStore
from django.core.management import BaseCommand
__author__ = 'peter'
User = get_user_model()
class Command(BaseCommand):
def handle(self, email, *_, **__):
session_key = create_pre_authenticated_session(email)
self.stdout.write(session_key)
def create_pre_authenticated_session(email):
user = User.objects.create(email=email)
session = SessionStore()
session[SESSION_KEY] = user.pk
session[BACKEND_SESSION_KEY] = settings.AUTHENTICATION_BACKENDS[0]
session.save()
return session.session_key
|
{
"content_hash": "81b1366305ff2c2ae8fb660955aa163e",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 80,
"avg_line_length": 31.08695652173913,
"alnum_prop": 0.7328671328671329,
"repo_name": "PeterHo/mysite",
"id": "871e3f332ddc5844d7434bc139c8eb91496202d6",
"size": "730",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "functional_tests/management/commands/create_session.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "53762"
},
{
"name": "HTML",
"bytes": "35270"
},
{
"name": "JavaScript",
"bytes": "411445"
},
{
"name": "Python",
"bytes": "138911"
}
],
"symlink_target": ""
}
|
import mock
from datadog_checks.http_check import HTTPCheck, http_check
def test__init__():
# empty values should be ignored
init_config = {'ca_certs': ''}
# `get_ca_certs_path` needs to be mocked because it's used as fallback when
# init_config doesn't contain `ca_certs`
with mock.patch('datadog_checks.http_check.http_check.get_ca_certs_path', return_value='bar'):
http_check = HTTPCheck('http_check', init_config, [{}])
assert http_check.ca_certs == 'bar'
# normal case
init_config = {'ca_certs': 'foo'}
http_check = HTTPCheck('http_check', init_config, [{}])
assert http_check.ca_certs == 'foo'
def test_instances_do_not_share_data():
http_check_1 = HTTPCheck('http_check', {'ca_certs': 'foo'}, [{}])
http_check_1.HTTP_CONFIG_REMAPPER['ca_certs']['default'] = 'foo'
http_check_2 = HTTPCheck('http_check', {'ca_certs': 'bar'}, [{}])
http_check_2.HTTP_CONFIG_REMAPPER['ca_certs']['default'] = 'bar'
assert http_check_1.HTTP_CONFIG_REMAPPER['ca_certs']['default'] == 'foo'
assert http_check_2.HTTP_CONFIG_REMAPPER['ca_certs']['default'] == 'bar'
def test_message_lenght_when_content_is_too_long():
max_lenght = http_check.MESSAGE_LENGTH
try:
http_check.MESSAGE_LENGTH = 25
too_long_content = 'this message is too long'
error_message = 'There has been an error.'
message = HTTPCheck._include_content(True, error_message, too_long_content)
finally:
http_check.MESSAGE_LENGTH = max_lenght
assert len(message) == 25
assert error_message in message
assert too_long_content not in message
def test_message_lenght_when_content_is_ok():
content = '''{
"HikariPool-1.pool.ConnectivityCheck" : {
"healthy" : true
},
"database" : {
"healthy" : true,
"message" : "Service located at jdbc ostgresql://pgbouncer-server.staging.net is alive. Version: 1.5"
},
"deadlocks" : {
"healthy" : true
}
"gateway" : {
"healthy" : true,
"message" : "Service located at https://apis.staging.eu.people-doc.com is alive."
}
}'''
error_message = 'There has been an error.'
message = HTTPCheck._include_content(True, error_message, content)
assert len(message) < http_check.MESSAGE_LENGTH
assert content in message
assert error_message in message
def test_message_when_content_is_disabled():
content = "This is not part of the message"
error_message = 'There has been an error.'
message = HTTPCheck._include_content(False, error_message, content)
assert message == error_message
assert content not in message
|
{
"content_hash": "04f40bc302d657b2dee568863f7a2975",
"timestamp": "",
"source": "github",
"line_count": 77,
"max_line_length": 109,
"avg_line_length": 34.54545454545455,
"alnum_prop": 0.6447368421052632,
"repo_name": "DataDog/integrations-core",
"id": "5270fbe9ca35fd4cf4201fb1be277045da8e6393",
"size": "2801",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "http_check/tests/test_unit.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "578"
},
{
"name": "COBOL",
"bytes": "12312"
},
{
"name": "Dockerfile",
"bytes": "22998"
},
{
"name": "Erlang",
"bytes": "15518"
},
{
"name": "Go",
"bytes": "6988"
},
{
"name": "HCL",
"bytes": "4080"
},
{
"name": "HTML",
"bytes": "1318"
},
{
"name": "JavaScript",
"bytes": "1817"
},
{
"name": "Kotlin",
"bytes": "430"
},
{
"name": "Lua",
"bytes": "3489"
},
{
"name": "PHP",
"bytes": "20"
},
{
"name": "PowerShell",
"bytes": "2398"
},
{
"name": "Python",
"bytes": "13020828"
},
{
"name": "Roff",
"bytes": "359"
},
{
"name": "Ruby",
"bytes": "241"
},
{
"name": "Scala",
"bytes": "7000"
},
{
"name": "Shell",
"bytes": "83227"
},
{
"name": "Swift",
"bytes": "203"
},
{
"name": "TSQL",
"bytes": "29972"
},
{
"name": "TypeScript",
"bytes": "1019"
}
],
"symlink_target": ""
}
|
from .utils import Utils
from .parser import Parser
from .errors import NotFoundError, NoTitleError, ErrorPageError, HomepageRedirectError, UnknownError
import requests as _requests
import functools
session = _requests.Session()
session.headers['User-Agent'] = 'reFill/2 (https://en.wikipedia.org/wiki/User:Zhaofeng_Li/reFill)'
# Ugly hack to set default timeouts
# https://stackoverflow.com/a/55841818
for method in ('get', 'options', 'head', 'post', 'put', 'patch', 'delete'):
setattr(session, method, functools.partial(getattr(session, method), timeout=(5, 10)))
|
{
"content_hash": "b0e216eee79b00d073847ceb46bdde95",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 100,
"avg_line_length": 44,
"alnum_prop": 0.7534965034965035,
"repo_name": "zhaofengli/refill",
"id": "246fcd53812b54552d7fe1c07f5fd7b8fb21ef54",
"size": "572",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "backend/refill/utils/__init__.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Dockerfile",
"bytes": "1107"
},
{
"name": "HTML",
"bytes": "183"
},
{
"name": "JavaScript",
"bytes": "138189"
},
{
"name": "Makefile",
"bytes": "394"
},
{
"name": "Python",
"bytes": "56452"
},
{
"name": "Shell",
"bytes": "471"
},
{
"name": "Vue",
"bytes": "20591"
}
],
"symlink_target": ""
}
|
""" Tests for go_cli.export_contacts. """
from unittest import TestCase
from StringIO import StringIO
import json
from click.testing import CliRunner
from go_http.exceptions import PagedException
import go_cli.export_contacts
from go_cli.main import cli
from go_cli.export_contacts import (
contact_to_csv_dict, csv_contact_writer, json_contact_writer)
from go_cli.tests.utils import ApiHelper
class TestExportContactsCommand(TestCase):
def setUp(self):
self.runner = CliRunner()
self.api_helper = ApiHelper(self)
self.api_helper.patch_api(go_cli.export_contacts, 'ContactsApiClient')
def tearDown(self):
self.api_helper.tearDown()
def invoke_export_contacts(self, args, account="acc-1", token="tok-1"):
return self.runner.invoke(cli, [
'--account', account, 'export-contacts',
'--token', token,
] + args)
def test_help(self):
result = self.runner.invoke(cli, ['export-contacts', '--help'])
self.assertEqual(result.exit_code, 0)
self.assertTrue(
"Export contacts from the contacts API."
in result.output)
def test_export_no_api_details(self):
result = self.runner.invoke(cli, ['export-contacts'])
self.assertEqual(result.exit_code, 2)
self.assertTrue(
"Please specify both the account key and the contacts API"
" authentication token. See --help."
in result.output)
def test_export_no_output_specified(self):
result = self.invoke_export_contacts([])
self.assertEqual(result.exit_code, 2)
self.assertTrue("Please specify either --csv or --json (but not both)."
in result.output)
def test_export_to_csv(self):
response = self.api_helper.add_contacts(
"tok-1",
contacts=[
{"msisdn": "1234"},
{"msisdn": "5678"},
])
with self.runner.isolated_filesystem():
result = self.invoke_export_contacts(['--csv', 'contacts.csv'])
self.assertEqual(result.output, "")
self.api_helper.check_response(response, 'GET')
with open('contacts.csv') as f:
self.assertEqual(
f.read(),
"msisdn\r\n1234\r\n5678\r\n")
def test_export_to_json(self):
response = self.api_helper.add_contacts(
"tok-1",
contacts=[
{"msisdn": "1234"},
{"msisdn": "5678"},
])
with self.runner.isolated_filesystem():
result = self.invoke_export_contacts(['--json', 'contacts.json'])
self.assertEqual(result.output, "")
self.api_helper.check_response(response, 'GET')
with open('contacts.json') as f:
self.assertEqual(
f.read(),
'{"msisdn": "1234"}\n{"msisdn": "5678"}\n')
def test_resume_csv(self):
response = self.api_helper.add_contacts(
"tok-1",
start_cursor="abcd",
contacts=[
{"msisdn": "8888"},
{"msisdn": "9999"},
])
with self.runner.isolated_filesystem():
result = self.invoke_export_contacts([
'--resume', 'abcd', '--csv', 'contacts.csv'])
self.assertEqual(result.output, "")
self.api_helper.check_response(response, 'GET')
with open('contacts.csv') as f:
self.assertEqual(
f.read(),
'8888\r\n9999\r\n')
def test_resume_json(self):
response = self.api_helper.add_contacts(
"tok-1",
start_cursor="abcd",
contacts=[
{"msisdn": "8888"},
{"msisdn": "9999"},
])
with self.runner.isolated_filesystem():
result = self.invoke_export_contacts([
'--resume', 'abcd', '--json', 'contacts.json'])
self.assertEqual(result.output, "")
self.api_helper.check_response(response, 'GET')
with open('contacts.json') as f:
self.assertEqual(
f.read(),
'{"msisdn": "8888"}\n{"msisdn": "9999"}\n')
def test_page_exception(self):
def raise_page_exc(*args, **kw):
yield {"msisdn": "1234"}
raise PagedException("abcd", Exception("Foo"))
self.api_helper.patch_api_method(
go_cli.export_contacts, 'ContactsApiClient', 'contacts',
raise_page_exc)
with self.runner.isolated_filesystem():
result = self.invoke_export_contacts([
'--json', 'contacts.json'])
self.assertEqual(
result.output,
"Error: Error downloading contacts. Please re-run with"
" --resume=abcd to resume.\n")
with open('contacts.json') as f:
self.assertEqual(
f.read(),
'{"msisdn": "1234"}\n')
class TestContactToCsvDict(TestCase):
def test_unicode_keys(self):
self.assertEqual(
contact_to_csv_dict({u"éł": "123"}),
{u"éł".encode("utf-8"): "123"})
def test_unicode_value(self):
self.assertEqual(
contact_to_csv_dict({"123": u"éł"}),
{"123": u"éł".encode("utf-8")})
def test_non_string_values(self):
self.assertEqual(
contact_to_csv_dict({"abc": [1, 2, 3]}),
{"abc": json.dumps([1, 2, 3])})
class TestCsvContactWriter(TestCase):
def test_new_file(self):
f = StringIO()
writer = csv_contact_writer(f, False)
writer({"msisdn": "1234"})
writer({"msisdn": "5678"})
self.assertEqual(f.getvalue(), "msisdn\r\n1234\r\n5678\r\n")
def test_resumed_file(self):
f = StringIO()
writer = csv_contact_writer(f, True)
writer({"msisdn": "1234"})
writer({"msisdn": "5678"})
self.assertEqual(f.getvalue(), "1234\r\n5678\r\n")
class TestJsonContactWriter(TestCase):
def test_new_file(self):
f = StringIO()
writer = json_contact_writer(f)
writer({"msisdn": "1234"})
writer({"msisdn": "5678"})
self.assertEqual(
f.getvalue(), '{"msisdn": "1234"}\n{"msisdn": "5678"}\n')
|
{
"content_hash": "40fe05d075bda34c8b601b0d27149aeb",
"timestamp": "",
"source": "github",
"line_count": 182,
"max_line_length": 79,
"avg_line_length": 35.31868131868132,
"alnum_prop": 0.5365588052271313,
"repo_name": "praekelt/go-cli",
"id": "01490092db0b9eac6e7727bbfb14d343c4df9f5c",
"size": "6461",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "go_cli/tests/test_export_contacts.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "24093"
},
{
"name": "Shell",
"bytes": "607"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import core, workspace
from caffe2.python.test_util import TestCase
import numpy as np
import tempfile
class TestIndexOps(TestCase):
def _test_index_ops(self, entries, dtype, index_create_op):
workspace.RunOperatorOnce(core.CreateOperator(
index_create_op,
[],
['index'],
max_elements=10))
my_entries = np.array(
[entries[0], entries[1], entries[2]], dtype=dtype)
workspace.FeedBlob('entries', my_entries)
workspace.RunOperatorOnce(core.CreateOperator(
'IndexLoad',
['index', 'entries'],
['index']))
query1 = np.array(
[entries[0], entries[3], entries[0], entries[4]],
dtype=dtype)
workspace.FeedBlob('query1', query1)
workspace.RunOperatorOnce(core.CreateOperator(
'IndexGet',
['index', 'query1'],
['result1']))
result1 = workspace.FetchBlob('result1')
np.testing.assert_array_equal([1, 4, 1, 5], result1)
workspace.RunOperatorOnce(core.CreateOperator(
'IndexFreeze',
['index'],
['index']))
query2 = np.array(
[entries[5], entries[4], entries[0], entries[6], entries[7]],
dtype=dtype)
workspace.FeedBlob('query2', query2)
workspace.RunOperatorOnce(core.CreateOperator(
'IndexGet',
['index', 'query2'],
['result2']))
result2 = workspace.FetchBlob('result2')
np.testing.assert_array_equal([0, 5, 1, 0, 0], result2)
workspace.RunOperatorOnce(core.CreateOperator(
'IndexSize',
['index'],
['index_size']))
size = workspace.FetchBlob('index_size')
self.assertEquals(size, 6)
workspace.RunOperatorOnce(core.CreateOperator(
'IndexStore',
['index'],
['stored_entries']))
stored_actual = workspace.FetchBlob('stored_entries')
new_entries = np.array([entries[3], entries[4]], dtype=dtype)
expected = np.concatenate((my_entries, new_entries))
if dtype is str:
# we'll always get bytes back from Caffe2
expected = np.array([
x.item().encode('utf-8') if isinstance(x, np.str_) else x
for x in expected
], dtype=object)
np.testing.assert_array_equal(expected, stored_actual)
workspace.RunOperatorOnce(core.CreateOperator(
index_create_op,
[],
['index2']))
workspace.RunOperatorOnce(core.CreateOperator(
'IndexLoad',
['index2', 'stored_entries'],
['index2'],
skip_first_entry=1))
workspace.RunOperatorOnce(core.CreateOperator(
'IndexSize',
['index2'],
['index2_size']))
index2_size = workspace.FetchBlob('index2_size')
self.assertEquals(index2_size, 5)
# test serde
with tempfile.NamedTemporaryFile() as tmp:
workspace.RunOperatorOnce(core.CreateOperator(
'Save',
['index'],
[],
absolute_path=1,
db_type='minidb',
db=tmp.name))
# frees up the blob
workspace.FeedBlob('index', np.array([]))
# reloads the index
workspace.RunOperatorOnce(core.CreateOperator(
'Load',
[],
['index'],
absolute_path=1,
db_type='minidb',
db=tmp.name))
query3 = np.array(
[entries[0], entries[3], entries[0], entries[4], entries[4]],
dtype=dtype)
workspace.FeedBlob('query3', query3)
workspace.RunOperatorOnce(core.CreateOperator(
'IndexGet', ['index', 'query3'], ['result3']))
result3 = workspace.FetchBlob('result3')
np.testing.assert_array_equal([1, 4, 1, 5, 5], result3)
def test_string_index_ops(self):
self._test_index_ops([
'entry1', 'entry2', 'entry3', 'new_entry1',
'new_entry2', 'miss1', 'miss2', 'miss3',
], str, 'StringIndexCreate')
def test_int_index_ops(self):
self._test_index_ops(list(range(8)), np.int32, 'IntIndexCreate')
def test_long_index_ops(self):
self._test_index_ops(list(range(8)), np.int64, 'LongIndexCreate')
if __name__ == "__main__":
import unittest
unittest.main()
|
{
"content_hash": "91d6f6b9d6f040d3039ef8f56c54ecc7",
"timestamp": "",
"source": "github",
"line_count": 137,
"max_line_length": 77,
"avg_line_length": 34.613138686131386,
"alnum_prop": 0.5428089413749473,
"repo_name": "sf-wind/caffe2",
"id": "7c15f8220e0e55712143029ca80b1280b3f2be61",
"size": "5413",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "caffe2/python/operator_test/index_ops_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "5415"
},
{
"name": "C",
"bytes": "316608"
},
{
"name": "C++",
"bytes": "4740750"
},
{
"name": "CMake",
"bytes": "139512"
},
{
"name": "CSS",
"bytes": "2196"
},
{
"name": "Cuda",
"bytes": "665218"
},
{
"name": "HTML",
"bytes": "5203"
},
{
"name": "Makefile",
"bytes": "1225"
},
{
"name": "Metal",
"bytes": "36752"
},
{
"name": "Objective-C",
"bytes": "6505"
},
{
"name": "Objective-C++",
"bytes": "239139"
},
{
"name": "Python",
"bytes": "2901542"
},
{
"name": "Shell",
"bytes": "31734"
}
],
"symlink_target": ""
}
|
import io
from collections import namedtuple
from django.db import models, transaction
from memoized import memoized
from casexml.apps.phone.restore import stream_response
from corehq.apps.users.util import raw_username
from corehq.blobs import CODES, get_blob_db
from corehq.blobs.atomic import AtomicBlobs
from corehq.util.quickcache import quickcache
class DemoUserRestore(models.Model):
"""
This holds the frozen restore XML blob for a demo mobile worker
"""
demo_user_id = models.CharField(max_length=255, default=None, db_index=True)
restore_blob_id = models.CharField(max_length=255, default=None)
content_length = models.IntegerField(null=True)
timestamp_created = models.DateTimeField(auto_now=True)
restore_comment = models.CharField(max_length=250, null=True, blank=True)
@classmethod
def create(cls, user_id, restore_content, domain):
"""
The method to create a new DemoUserRestore object
ags:
user_id: the id of the CommCareUser
restore_content: a string or file-like object of user's restore XML
"""
restore = cls(
demo_user_id=user_id,
restore_comment="",
)
with AtomicBlobs(get_blob_db()) as db:
restore._write_restore_blob(restore_content, db, domain)
restore.save()
return restore
def get_restore_http_response(self):
"""
Returns restore XML as a streaming http response
"""
payload = self._get_restore_xml()
headers = {
'Content-Length': self.content_length,
'Content-Type': 'text/xml'
}
return stream_response(payload, headers)
def get_restore_as_string(self):
"""
Returns restore XML as a string
"""
try:
blob = self._get_restore_xml()
return blob.read()
finally:
blob.close()
def _get_restore_xml(self):
return get_blob_db().get(key=self.restore_blob_id, type_code=CODES.demo_user_restore)
def delete(self):
"""
Deletes the restore object and the xml blob permenantly
"""
get_blob_db().delete(key=self.restore_blob_id)
super(DemoUserRestore, self).delete()
def _write_restore_blob(self, restore, db, domain):
if isinstance(restore, str):
restore = io.BytesIO(restore.encode("utf-8"))
elif isinstance(restore, bytes):
restore = io.BytesIO(restore)
meta = db.put(
restore,
domain=domain,
parent_id=self.demo_user_id or "DemoUserRestore",
type_code=CODES.demo_user_restore,
)
self.restore_blob_id = meta.key
self.content_length = meta.content_length
class SerialIdBucket(models.Model):
"""
Model used to keep track of an incrementing, unique integer
to be used in serial ID generation
"""
domain = models.CharField(max_length=255)
bucket_id = models.CharField(max_length=255)
current_value = models.IntegerField(default=-1)
class Meta(object):
index_together = ('domain', 'bucket_id',)
unique_together = ('domain', 'bucket_id',)
@classmethod
def get_next(cls, domain, bucket_id, session_id=None):
if session_id:
return cls._get_next_cached(domain, bucket_id, session_id)
else:
return cls._get_next(domain, bucket_id)
@classmethod
@quickcache(['domain', 'bucket_id', 'session_id'])
def _get_next_cached(cls, domain, bucket_id, session_id):
return cls._get_next(domain, bucket_id)
@classmethod
@transaction.atomic
def _get_next(cls, domain, bucket_id):
# select_for_update locks matching rows until the end of the transaction
bucket, _ = (cls.objects
.select_for_update()
.get_or_create(domain=domain, bucket_id=bucket_id))
bucket.current_value += 1
bucket.save()
return bucket.current_value
Measure = namedtuple('Measure', 'slug name description')
class MobileRecoveryMeasure(models.Model):
"""
Model representing a method of recovering from a fatal error on mobile.
"""
MEASURES = (
Measure('app_reinstall_and_update', "Reinstall and Update App",
"Reinstall the current CommCare app either OTA or with a ccz, but "
"requiring an OTA update to the latest version before it may be used."),
Measure('app_update', "Update App",
"Update the current CommCare app"),
Measure('cc_reinstall', "CC Reinstall Needed",
"Notify the user that CommCare needs to be reinstalled"),
Measure('cc_update', "CC Update Needed",
"Notify the user that CommCare needs to be updated"),
Measure('app_offline_reinstall_and_update', "Offline Reinstall and Update App",
"Reinstall the current CommCare app offline.")
)
measure = models.CharField(
max_length=255,
choices=[(m.slug, m.name) for m in MEASURES],
help_text="<br/>".join(
"<strong>{}:</strong> {}".format(m.name, m.description)
for m in MEASURES
)
)
domain = models.CharField(max_length=255)
app_id = models.CharField(max_length=50)
cc_all_versions = models.BooleanField(
verbose_name="All CommCare Versions", default=True)
cc_version_min = models.CharField(
verbose_name="Min CommCare Version", max_length=255, blank=True)
cc_version_max = models.CharField(
verbose_name="Max CommCare Version", max_length=255, blank=True)
app_all_versions = models.BooleanField(
verbose_name="All App Versions", default=True)
app_version_min = models.IntegerField(
verbose_name="Min App Version", null=True, blank=True)
app_version_max = models.IntegerField(
verbose_name="Max App Version", null=True, blank=True)
created_on = models.DateTimeField(auto_now_add=True)
username = models.CharField(max_length=255, editable=False)
notes = models.TextField(blank=True)
@property
def sequence_number(self):
return self.pk
def to_mobile_json(self):
res = {
"sequence_number": self.sequence_number,
"type": self.measure,
}
if not self.cc_all_versions:
res["cc_version_min"] = self.cc_version_min
res["cc_version_max"] = self.cc_version_max
if not self.app_all_versions:
res["app_version_min"] = self.app_version_min
res["app_version_max"] = self.app_version_max
return res
class DeviceLogRequest(models.Model):
"""
A pending request that a particular device submit their logs
"""
domain = models.CharField(max_length=255)
username = models.CharField(max_length=255, help_text="raw_username, no @domain.commcarehq.org")
created_on = models.DateTimeField(auto_now_add=True)
class Meta(object):
unique_together = ('domain', 'username')
def delete(self, *args, **kwargs):
super().delete(*args, **kwargs)
_all_device_log_requests.reset_cache()
def save(self, *args, **kwargs):
super().save(*args, **kwargs)
_all_device_log_requests.reset_cache()
@classmethod
def is_pending(cls, domain, username):
"""Is there a pending device log request matching these params?"""
return (domain, raw_username(username)) in _all_device_log_requests()
@memoized
def _all_device_log_requests():
# This is expected to be very small, usually empty, but it's accessed
# every heartbeat request, so it's memoized to the Django process
return set(DeviceLogRequest.objects.values_list('domain', 'username'))
|
{
"content_hash": "f66e19bbd979556a6f8c2d1985198566",
"timestamp": "",
"source": "github",
"line_count": 225,
"max_line_length": 100,
"avg_line_length": 34.8,
"alnum_prop": 0.6284802043422734,
"repo_name": "dimagi/commcare-hq",
"id": "5b6fd5ef7ef426185f4e712f30d0e32dd15e5abe",
"size": "7830",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "corehq/apps/ota/models.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "82928"
},
{
"name": "Dockerfile",
"bytes": "2341"
},
{
"name": "HTML",
"bytes": "2589268"
},
{
"name": "JavaScript",
"bytes": "5889543"
},
{
"name": "Jinja",
"bytes": "3693"
},
{
"name": "Less",
"bytes": "176180"
},
{
"name": "Makefile",
"bytes": "1622"
},
{
"name": "PHP",
"bytes": "2232"
},
{
"name": "PLpgSQL",
"bytes": "66704"
},
{
"name": "Python",
"bytes": "21779773"
},
{
"name": "Roff",
"bytes": "150"
},
{
"name": "Shell",
"bytes": "67473"
}
],
"symlink_target": ""
}
|
from tempest.common import credentials_factory as credentials
from tempest import config
from tempest import exceptions
from tempest.tests import fake_config
from tempest.tests.lib import base
class TestLegacyCredentialsProvider(base.TestCase):
fixed_params = {'identity_version': 'v2'}
def setUp(self):
super(TestLegacyCredentialsProvider, self).setUp()
self.useFixture(fake_config.ConfigFixture())
self.stubs.Set(config, 'TempestConfigPrivate', fake_config.FakePrivate)
def test_get_creds_roles_legacy_invalid(self):
test_accounts_class = credentials.LegacyCredentialProvider(
**self.fixed_params)
self.assertRaises(exceptions.InvalidConfiguration,
test_accounts_class.get_creds_by_roles,
['fake_role'])
|
{
"content_hash": "fd3c465e16bb14e287d3c61d8263cd6d",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 79,
"avg_line_length": 37.59090909090909,
"alnum_prop": 0.7013301088270859,
"repo_name": "HybridF5/tempest",
"id": "6fc490e4b271ffa7b877193fae2a9c1e71770ab8",
"size": "1461",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tempest/tests/common/test_credentials.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "3636851"
},
{
"name": "Shell",
"bytes": "8175"
}
],
"symlink_target": ""
}
|
from nose.tools import *
from cohadar import lexicon, parser
def test_parse_sentence():
result = parser.parse_sentence([('verb', 'run'), ('direction', 'north')])
assert_equal(result, parser.Sentence('player', 'run', 'north'))
def test_parse_sentence2():
result = parser.parse_sentence([('noun', 'bear'), ('verb', 'eat'), ('stop', 'the'), ('noun', 'honey')])
assert_equal(result, parser.Sentence(subject='bear', verb='eat', obj='honey'))
def test_parse_sentence3():
word_list = lexicon.scan("run north")
result = parser.parse_sentence(word_list)
assert_equal(result, parser.Sentence('player', 'run', 'north'))
def test_parse_sentence4():
word_list = lexicon.scan("bear eat the honey")
result = parser.parse_sentence(word_list)
assert_equal(result, parser.Sentence(subject='bear', verb='eat', obj='honey'))
def test_bad():
word_list = lexicon.scan("the bear honey")
with assert_raises(parser.ParserError) as cm:
parser.parse_sentence(word_list)
assert_equal(cm.exception.message, "Expected a verb next.")
|
{
"content_hash": "2b72bf96e7ee819d6233280aa3337db3",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 104,
"avg_line_length": 32.1875,
"alnum_prop": 0.6951456310679611,
"repo_name": "cohadar/learn-python-the-hard-way",
"id": "5fd29c36eee1d7f6a5162816813a22e6373e7054",
"size": "1030",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/parser_tests.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "30545"
}
],
"symlink_target": ""
}
|
import sqlite3
connection = sqlite3.connect('easy_ticket.db')
cursor = connection.cursor()
print("Opened database successfully")
input_file = open('links.txt', 'r')
num_of_lines = int(input_file.readline().strip())
i = 0
while i < num_of_lines:
connection_info = input_file.readline().strip().split(';')
station_a = connection_info[0].strip()
cursor.execute('SELECT * FROM station WHERE name = ?', (station_a,))
station_a_row = cursor.fetchone()
station_a_id = station_a_row[0]
station_b = connection_info[1].strip()
print(station_b);
cursor.execute('SELECT * FROM station WHERE name = ?', (station_b,))
station_b_row = cursor.fetchone()
station_b_id = station_b_row[0]
connection_distance = float(connection_info[2].strip())
print(station_a_id, station_b_id, connection_distance)
cursor.execute('INSERT INTO connection(station_a, station_b, distance) VALUES(?,?,?)', (station_a_id, station_b_id, connection_distance))
connection.commit()
i += 1
connection.close()
|
{
"content_hash": "b9d5f9da782c81bdf0917a21800f43ea",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 141,
"avg_line_length": 36.75,
"alnum_prop": 0.6754130223517979,
"repo_name": "elailai94/EasyTicket",
"id": "71c106227575f7805adf48b54eb3ff1f03407474",
"size": "1029",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "source-code/test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "30260"
}
],
"symlink_target": ""
}
|
"""
WWW: http://4suite.com/4DOM e-mail: support@4suite.com
Copyright (c) 2000 Fourthought Inc, USA. All Rights Reserved.
See http://4suite.com/COPYRIGHT for license and copyright information
"""
from xml.dom import Node
from FtNode import FtNode
class ProcessingInstruction(FtNode):
nodeType = Node.PROCESSING_INSTRUCTION_NODE
def __init__(self,ownerDocument,target,data):
FtNode.__init__(self,ownerDocument,'','','')
self.__dict__['__nodeName'] = target
self.__dict__['__nodeValue'] = data
def _get_target(self):
return self.__dict__['__nodeName']
def _get_data(self):
return self.__dict__['__nodeValue']
def _set_data(self, newData):
self.__dict__['__nodeValue'] = newData
### Overridden Methods ###
def __repr__(self):
data = self.data
if len(data) > 20:
data = data[20:] + '...'
return "<ProcessingInstruction at %x: target='%s' data='%s'>" % (
id(self),
self.target,
data
)
### Helper Functions For Cloning ###
def _4dom_clone(self, owner):
return self.__class__(owner, self.target, self.data)
def __getinitargs__(self):
return (self.ownerDocument,
self.target,
self.data
)
### Attribute Access Mappings ###
_readComputedAttrs = FtNode._readComputedAttrs.copy()
_readComputedAttrs.update({'target':_get_target,
'data':_get_data
})
_writeComputedAttrs = FtNode._writeComputedAttrs.copy()
_writeComputedAttrs.update({'data':_set_data
})
# Create the read-only list of attributes
_readOnlyAttrs = filter(lambda k,m=_writeComputedAttrs: not m.has_key(k),
FtNode._readOnlyAttrs + _readComputedAttrs.keys())
|
{
"content_hash": "0b20d33d2b606f95077238d2907026b8",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 78,
"avg_line_length": 29.56923076923077,
"alnum_prop": 0.558792924037461,
"repo_name": "Integral-Technology-Solutions/ConfigNOW",
"id": "6f1465d4b7c6175da93c3eacaff62124c0724cd1",
"size": "2132",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "Lib/xml/dom/ProcessingInstruction.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1499"
},
{
"name": "HTML",
"bytes": "2243"
},
{
"name": "Java",
"bytes": "594"
},
{
"name": "Python",
"bytes": "2973691"
},
{
"name": "Shell",
"bytes": "5797"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import, division, print_function
import errno
from contextlib import contextmanager
from astropy import units as u
from astropy.units import Unit, UnitBase
from ..compat.contextlib import suppress
from ..compat.math import isclose
from .compat import PY33
__all__ = (
'format_size',
'prefix',
'qisclose',
'read_only_property',
'suppress_file_exists_error',
'verify_unit',
)
_size_suffixes = {
'decimal': ('kB', 'MB', 'GB', 'TB', 'PB', 'EB', 'ZB', 'YB'),
'binary': ('KiB', 'MiB', 'GiB', 'TiB', 'PiB', 'EiB', 'ZiB', 'YiB'),
'gnu': "KMGTPEZY",
}
def read_only_property(name, docstring=None):
"""Return property for accessing attribute with name `name`
Parameters:
name: Attribute name
docstring: Optional docstring for getter.
Example:
.. code-block:: python
class Circle:
def __init__(self, radius):
self._radius = radius
radius = read_only_property('_radius')
"""
def fget(self):
return getattr(self, name)
fget.__doc__ = docstring
return property(fget)
def verify_unit(quantity, unit):
"""Verify unit of passed quantity and return it.
Parameters:
quantity: :py:class:`~astropy.units.Quantity` to be verified. Bare
numbers are valid if the unit is dimensionless.
unit: Equivalent unit, or string parsable by
:py:class:`astropy.units.Unit`
Raises:
ValueError: Units are not equivalent.
Returns:
``quantity`` unchanged. Bare numbers will be converted to a dimensionless
:py:class:`~astropy.units.Quantity`.
Example:
.. code-block:: python
def __init__(self, a):
self.a = verify_unit(a, astropy.units.m)
"""
if not isinstance(unit, UnitBase):
unit = Unit(unit)
q = quantity * u.one
if unit.is_equivalent(q.unit):
return q
else:
raise ValueError(
"Unit '{}' not equivalent to quantity '{}'.".format(unit, quantity))
def qisclose(a, b, rel_tol=1e-9, abs_tol=0.0):
"""Helper function for using :py:func:`math.isclose` with
:py:class:`~astropy.units.Quantity` objects.
"""
return isclose(a.si.value, b.si.value, rel_tol=rel_tol, abs_tol=abs_tol)
def format_size(value, binary=False, gnu=False, format='%.1f'):
"""Format a number of bytes like a human readable file size (e.g. 10 kB). By
default, decimal suffixes (kB, MB) are used. Passing binary=true will use
binary suffixes (KiB, MiB) are used and the base will be 2**10 instead of
10**3. If ``gnu`` is True, the binary argument is ignored and GNU-style
(ls -sh style) prefixes are used (K, M) with the 2**10 definition.
Non-gnu modes are compatible with jinja2's ``filesizeformat`` filter.
Copyright (c) 2010 Jason Moiron and Contributors.
"""
if gnu:
suffix = _size_suffixes['gnu']
elif binary:
suffix = _size_suffixes['binary']
else:
suffix = _size_suffixes['decimal']
base = 1024 if (gnu or binary) else 1000
bytes = float(value)
if bytes == 1 and not gnu:
return '1 Byte'
elif bytes < base and not gnu:
return '%d Bytes' % bytes
elif bytes < base and gnu:
return '%dB' % bytes
for i, s in enumerate(suffix):
unit = base ** (i + 2)
if bytes < unit and not gnu:
return (format + ' %s') % ((base * bytes / unit), s)
elif bytes < unit and gnu:
return (format + '%s') % ((base * bytes / unit), s)
if gnu:
return (format + '%s') % ((base * bytes / unit), s)
return (format + ' %s') % ((base * bytes / unit), s)
@contextmanager
def suppress_file_exists_error():
"""Compatibility function for catching FileExistsError on Python 2"""
if PY33:
with suppress(FileExistsError): # noqa
yield
else:
try:
yield
except OSError as e:
if e.errno != errno.EEXIST:
raise
def prefix(prefix, iterable):
"""Prepend items from `iterable` with `prefix` string."""
for x in iterable:
yield '{prefix}{x}'.format(prefix=prefix, x=x)
|
{
"content_hash": "06bc8e9c0a4dd2e1c9d20affe91624e6",
"timestamp": "",
"source": "github",
"line_count": 148,
"max_line_length": 81,
"avg_line_length": 28.83108108108108,
"alnum_prop": 0.593156784626201,
"repo_name": "python-astrodynamics/astrodynamics",
"id": "e7c3782a4ac39639d6cc35d45ea31657e362817b",
"size": "4283",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/astrodynamics/utils/helper.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "857"
},
{
"name": "Python",
"bytes": "91781"
},
{
"name": "Shell",
"bytes": "1378"
}
],
"symlink_target": ""
}
|
def main(j, args, params, tags, tasklet):
import yaml
args.requestContext.params['breadcrumbdata'] = yaml.load(args.cmdstr)
params.result = ('', args.doc)
return params
def match(j, args, params, tags, tasklet):
return True
|
{
"content_hash": "23a0dff542bfe7779214a06031b3219a",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 73,
"avg_line_length": 25.7,
"alnum_prop": 0.6459143968871596,
"repo_name": "Jumpscale/jumpscale_portal8",
"id": "e9dfa31b33d25050d2b126e9308d3aeb2d61a2de",
"size": "257",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "apps/portalbase/macros/wiki/breadcrumb/3_breadcrumb.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "482591"
},
{
"name": "HTML",
"bytes": "313255"
},
{
"name": "JavaScript",
"bytes": "8815099"
},
{
"name": "PHP",
"bytes": "205758"
},
{
"name": "Python",
"bytes": "974012"
},
{
"name": "Ruby",
"bytes": "28925"
},
{
"name": "Shell",
"bytes": "291"
}
],
"symlink_target": ""
}
|
from weibo import APIClient
from kafka import KafkaProducer
import json
import time
APP_KEY = "3722673574"
APP_SECRET = "3686fea0a65da883b6c2a7586f350425"
CALLBACK_URL = 'https://api.weibo.com/oauth2/default.html'
client = APIClient(app_key=APP_KEY, app_secret=APP_SECRET, redirect_uri=CALLBACK_URL)
with open('token.json', 'r') as f:
r = json.load(f)
access_token = r["access_token"]
expires_in = r["expires_at"]
client.set_access_token(access_token, expires_in)
producer = KafkaProducer()
# put data into kafka
def put_data_kafka():
raw_data = client.statuses.public_timeline.get()
for x in range(0,len(raw_data)):
print raw_data.statuses[1].text.encode("utf-8")
text = raw_data.statuses[1].text.encode("utf-8")
producer.send('test',text)
producer.flush()
# aviod the too-fast
def control_pace():
i = 1
while i <= 50:
put_data_kafka()
i += 1
time.sleep(10) # sleep 10 seconds
control_pace()
|
{
"content_hash": "47cfe610cb66e947eeb1ba6b51393f6d",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 85,
"avg_line_length": 27.11111111111111,
"alnum_prop": 0.6721311475409836,
"repo_name": "Heipiao/weibo",
"id": "a089e08b7f647620e93b4e8aaaf69e9ea241e69a",
"size": "990",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "py/data_input_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "20094"
},
{
"name": "Python",
"bytes": "9279"
},
{
"name": "Shell",
"bytes": "28219"
}
],
"symlink_target": ""
}
|
"""
mbed CMSIS-DAP debugger
Copyright (c) 2015 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from ..core.target import Target
## @brief Split command line by whitespace, supporting quoted strings.
#
# Accepts
def split_command_line(cmd_line):
result = []
if type(cmd_line) is str:
args = [cmd_line]
else:
args = cmd_line
for cmd in args:
state = 0
word = ''
open_quote = ''
for c in cmd:
if state == 0:
if c in (' ', '\t', '\r', '\n'):
if word:
result.append(word)
word = ''
elif c in ('"', "'"):
open_quote = c
state = 1
else:
word += c
elif state == 1:
if c == open_quote:
result.append(word)
word = ''
state = 0
else:
word += c
if word:
result.append(word)
return result
## Map of vector char characters to masks.
VECTOR_CATCH_CHAR_MAP = {
'h': Target.CATCH_HARD_FAULT,
'b': Target.CATCH_BUS_FAULT,
'm': Target.CATCH_MEM_FAULT,
'i': Target.CATCH_INTERRUPT_ERR,
's': Target.CATCH_STATE_ERR,
'c': Target.CATCH_CHECK_ERR,
'p': Target.CATCH_COPROCESSOR_ERR,
'r': Target.CATCH_CORE_RESET,
'a': Target.CATCH_ALL,
'n': Target.CATCH_NONE,
}
## @brief Convert a vector catch string to a mask.
#
# @exception ValueError Raised if an invalid vector catch character is encountered.
def convert_vector_catch(value):
# Make case insensitive.
value = value.lower()
# Handle special vector catch options.
if value == 'all':
return Target.CATCH_ALL
elif value == 'none':
return Target.CATCH_NONE
# Convert options string to mask.
try:
return sum([VECTOR_CATCH_CHAR_MAP[c] for c in value])
except KeyError as e:
# Reraise an error with a more helpful message.
raise ValueError("invalid vector catch option '{}'".format(e.args[0]))
|
{
"content_hash": "eacb1b6f0898a2163ae7088c2c05a632",
"timestamp": "",
"source": "github",
"line_count": 88,
"max_line_length": 83,
"avg_line_length": 30.511363636363637,
"alnum_prop": 0.5649906890130354,
"repo_name": "wjzhang/pyOCD",
"id": "aed049e192e2b339ad1f6b49b8ecbd03c047dc6d",
"size": "2685",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "pyOCD/utility/cmdline.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "801"
},
{
"name": "C",
"bytes": "3924"
},
{
"name": "Python",
"bytes": "888117"
},
{
"name": "Shell",
"bytes": "479"
}
],
"symlink_target": ""
}
|
import abc
import six
from neutron.common import rpc as n_rpc
from neutron.db.vpn import vpn_validator
from neutron import manager
from neutron.openstack.common import log as logging
from neutron.plugins.common import constants
LOG = logging.getLogger(__name__)
@six.add_metaclass(abc.ABCMeta)
class VpnDriver(object):
def __init__(self, service_plugin, validator=None):
self.service_plugin = service_plugin
if validator is None:
validator = vpn_validator.VpnReferenceValidator()
self.validator = validator
@property
def service_type(self):
pass
@abc.abstractmethod
def create_vpnservice(self, context, vpnservice):
pass
@abc.abstractmethod
def update_vpnservice(
self, context, old_vpnservice, vpnservice):
pass
@abc.abstractmethod
def delete_vpnservice(self, context, vpnservice):
pass
class BaseIPsecVpnAgentApi(n_rpc.RpcProxy):
"""Base class for IPSec API to agent."""
def __init__(self, to_agent_topic, topic, default_version):
self.to_agent_topic = to_agent_topic
super(BaseIPsecVpnAgentApi, self).__init__(topic, default_version)
def _agent_notification(self, context, method, router_id,
version=None, **kwargs):
"""Notify update for the agent.
This method will find where is the router, and
dispatch notification for the agent.
"""
admin_context = context.is_admin and context or context.elevated()
plugin = manager.NeutronManager.get_service_plugins().get(
constants.L3_ROUTER_NAT)
if not version:
version = self.RPC_API_VERSION
l3_agents = plugin.get_l3_agents_hosting_routers(
admin_context, [router_id],
admin_state_up=True,
active=True)
for l3_agent in l3_agents:
LOG.debug(_('Notify agent at %(topic)s.%(host)s the message '
'%(method)s %(args)s'),
{'topic': self.to_agent_topic,
'host': l3_agent.host,
'method': method,
'args': kwargs})
self.cast(
context, self.make_msg(method, **kwargs),
version=version,
topic='%s.%s' % (self.to_agent_topic, l3_agent.host))
def vpnservice_updated(self, context, router_id, **kwargs):
"""Send update event of vpnservices."""
self._agent_notification(context, 'vpnservice_updated', router_id,
**kwargs)
|
{
"content_hash": "f369df29be8a5a31daf92d67ab9d9856",
"timestamp": "",
"source": "github",
"line_count": 79,
"max_line_length": 74,
"avg_line_length": 33.0253164556962,
"alnum_prop": 0.5990801073208126,
"repo_name": "virtualopensystems/neutron",
"id": "00ee0e7f51f5764052ad6b746a361ded46a55b77",
"size": "3250",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "neutron/services/vpn/service_drivers/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "21914"
},
{
"name": "JavaScript",
"bytes": "60527"
},
{
"name": "Python",
"bytes": "9873662"
},
{
"name": "Shell",
"bytes": "9202"
},
{
"name": "XSLT",
"bytes": "50907"
}
],
"symlink_target": ""
}
|
import json
import frappe
from frappe.tests.utils import FrappeTestCase
from frappe.utils import set_request
from frappe.website.doctype.web_form.web_form import accept
from frappe.website.serve import get_response_content
test_dependencies = ["Web Form"]
class TestWebForm(FrappeTestCase):
def setUp(self):
frappe.conf.disable_website_cache = True
frappe.local.path = None
def tearDown(self):
frappe.conf.disable_website_cache = False
frappe.local.path = None
frappe.local.request_ip = None
frappe.form_dict.web_form = None
frappe.form_dict.data = None
frappe.form_dict.docname = None
def test_accept(self):
frappe.set_user("Administrator")
doc = {
"doctype": "Event",
"subject": "_Test Event Web Form",
"description": "_Test Event Description",
"starts_on": "2014-09-09",
}
frappe.form_dict.web_form = "manage-events"
frappe.form_dict.data = json.dumps(doc)
frappe.local.request_ip = "127.0.0.1"
accept(web_form="manage-events", data=json.dumps(doc))
self.event_name = frappe.db.get_value("Event", {"subject": "_Test Event Web Form"})
self.assertTrue(self.event_name)
def test_edit(self):
self.test_accept()
doc = {
"doctype": "Event",
"subject": "_Test Event Web Form",
"description": "_Test Event Description 1",
"starts_on": "2014-09-09",
"name": self.event_name,
}
self.assertNotEqual(
frappe.db.get_value("Event", self.event_name, "description"), doc.get("description")
)
frappe.form_dict.web_form = "manage-events"
frappe.form_dict.docname = self.event_name
frappe.form_dict.data = json.dumps(doc)
accept(web_form="manage-events", docname=self.event_name, data=json.dumps(doc))
self.assertEqual(
frappe.db.get_value("Event", self.event_name, "description"), doc.get("description")
)
def test_webform_render(self):
set_request(method="GET", path="manage-events/new")
content = get_response_content("manage-events/new")
self.assertIn('<h1 class="ellipsis">New Manage Events</h1>', content)
self.assertIn('data-doctype="Web Form"', content)
self.assertIn('data-path="manage-events/new"', content)
self.assertIn('source-type="Generator"', content)
|
{
"content_hash": "3cfb37d1a3142441b2f07abd341980ac",
"timestamp": "",
"source": "github",
"line_count": 75,
"max_line_length": 87,
"avg_line_length": 29,
"alnum_prop": 0.7029885057471265,
"repo_name": "StrellaGroup/frappe",
"id": "5a2269b64de8cdb96d83f14acf98871cf02315a0",
"size": "2272",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "frappe/website/doctype/web_form/test_web_form.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "65093"
},
{
"name": "HTML",
"bytes": "250858"
},
{
"name": "JavaScript",
"bytes": "2515308"
},
{
"name": "Less",
"bytes": "10921"
},
{
"name": "Python",
"bytes": "3605011"
},
{
"name": "SCSS",
"bytes": "261492"
},
{
"name": "Vue",
"bytes": "98456"
}
],
"symlink_target": ""
}
|
"""Tests for XLA JIT compiler."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.compiler.tests.xla_test import XLATestCase
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_nn_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.platform import googletest
class UnaryOpsTest(XLATestCase):
"""Test cases for unary operators."""
def _assertOpOutputMatchesExpected(self, op, inp, expected,
equality_test=None, rtol=1e-3, atol=1e-5):
"""Verifies that 'op' produces 'expected' when fed input 'inp' .
Args:
op: operator to test
inp: numpy input array to use as input to 'op'.
expected: numpy array representing the expected output of 'op'.
equality_test: either None, or a function that tests two numpy arrays for
equality. If None, self.assertAllClose is used.
rtol: relative tolerance for equality test.
atol: absolute tolerance for equality test.
"""
with self.test_session() as session:
with self.test_scope():
pinp = array_ops.placeholder(
dtypes.as_dtype(inp.dtype), inp.shape, name="a")
output = op(pinp)
result = session.run(output, {pinp: inp})
if equality_test is None:
equality_test = self.assertAllClose
equality_test(result, expected, rtol=rtol, atol=atol)
def ListsAreClose(self, result, expected, rtol, atol):
"""Tests closeness of two lists of floats."""
self.assertEqual(len(result), len(expected))
for i in xrange(len(result)):
self.assertAllClose(result[i], expected[i], rtol, atol)
def testAllTypeOps(self):
for dtype in self.numeric_types:
self._assertOpOutputMatchesExpected(
array_ops.diag,
np.array([1, 2, 3, 4], dtype=dtype),
np.array([[1, 0, 0, 0], [0, 2, 0, 0], [0, 0, 3, 0], [0, 0, 0, 4]],
dtype=dtype))
self._assertOpOutputMatchesExpected(
array_ops.diag_part,
np.arange(36).reshape([2, 3, 2, 3]).astype(dtype),
np.array([[0, 7, 14], [21, 28, 35]], dtype=dtype))
self._assertOpOutputMatchesExpected(
array_ops.identity,
np.array([[-1, 1]], dtype=dtype),
expected=np.array([[-1, 1]], dtype=dtype))
self._assertOpOutputMatchesExpected(
array_ops.matrix_diag,
np.array([[1, 2], [3, 4]], dtype=dtype),
np.array([[[1, 0], [0, 2]], [[3, 0], [0, 4]]], dtype=dtype))
self._assertOpOutputMatchesExpected(
array_ops.matrix_diag_part,
np.arange(3 * 2 * 4).reshape([3, 2, 4]).astype(dtype),
np.array([[0, 5], [8, 13], [16, 21]], dtype=dtype))
self._assertOpOutputMatchesExpected(
array_ops.prevent_gradient,
np.array([[-1, 1]], dtype=dtype),
expected=np.array([[-1, 1]], dtype=dtype))
self._assertOpOutputMatchesExpected(
array_ops.squeeze,
np.array([[[[[]]]]], dtype=dtype),
expected=np.array([], dtype=dtype))
self._assertOpOutputMatchesExpected(
array_ops.squeeze,
np.array([[[1], [2]]], dtype=dtype),
expected=np.array([1, 2], dtype=dtype))
self._assertOpOutputMatchesExpected(
array_ops.squeeze,
np.array([[[1]], [[2]]], dtype=dtype),
expected=np.array([1, 2], dtype=dtype))
self._assertOpOutputMatchesExpected(
array_ops.squeeze,
np.array([[[1, 2], [3, 4]]], dtype=dtype),
expected=np.array([[1, 2], [3, 4]], dtype=dtype))
self._assertOpOutputMatchesExpected(
array_ops.stop_gradient,
np.array([[-1, 1]], dtype=dtype),
expected=np.array([[-1, 1]], dtype=dtype))
def testFloatOps(self):
for dtype in self.float_types:
self._assertOpOutputMatchesExpected(
math_ops.ceil,
np.array([[-1.7, 1.2]], dtype=dtype),
expected=np.array([[-1, 2]], dtype=dtype))
self._assertOpOutputMatchesExpected(
math_ops.exp,
np.array([[-1, 1]], dtype=dtype),
expected=np.array([[0.36787945, 2.7182817]], dtype=dtype))
self._assertOpOutputMatchesExpected(
math_ops.floor,
np.array([[-1.7, 1.2]], dtype=dtype),
expected=np.array([[-2, 1]], dtype=dtype))
# Tests for tf.nn ops.
self._assertOpOutputMatchesExpected(
nn_ops.l2_loss, np.array([[[]]], dtype=dtype), expected=dtype(0))
# TODO(b/31644876): enable this test case when fixed.
# self._assertOpOutputMatchesExpected(tf.nn.l2_loss, dtype(4), dtype(10))
self._assertOpOutputMatchesExpected(
nn_ops.l2_loss, np.array([[-2, 4]], dtype=dtype), expected=dtype(10))
self._assertOpOutputMatchesExpected(
math_ops.reciprocal,
np.array([[1, 2]], dtype=dtype),
expected=np.array([[1, 0.5]], dtype=dtype))
self._assertOpOutputMatchesExpected(
math_ops.log,
np.array([[1, 2]], dtype=dtype),
expected=np.array([[0, 0.69314718]], dtype=dtype))
# TODO(b/34703906): improve log1p implementation and make tolerance
# tighter.
self._assertOpOutputMatchesExpected(
math_ops.log1p,
np.array([[1e-14, 1e-15, 0.6]], dtype=dtype),
expected=np.log1p(np.array([[1e-14, 1e-15, 0.6]], dtype=dtype)))
self._assertOpOutputMatchesExpected(
math_ops.round,
np.array([[-1.7, 1.2, 4.0, 0.0], [-3.5, -2.5, -1.5, -0.5],
[0.5, 1.5, 2.5, 3.5]], dtype=dtype),
expected=np.array([[-2, 1, 4, 0], [-4, -2, -2, 0], [0, 2, 2, 4]],
dtype=dtype))
self._assertOpOutputMatchesExpected(
math_ops.rsqrt,
np.array([[4, 16]], dtype=dtype),
expected=np.array([[0.5, 0.25]], dtype=dtype))
self._assertOpOutputMatchesExpected(
math_ops.sigmoid,
np.array(
[[1, 1, 1, 1],
[1, 2, 3, 4]],
dtype=dtype),
expected=np.array(
[[0.7310586, 0.7310586, 0.7310586, 0.7310586],
[0.7310586, 0.880797, 0.95257413, 0.98201376]],
dtype=dtype))
self._assertOpOutputMatchesExpected(
math_ops.sqrt,
np.array([[4, 9]], dtype=dtype),
expected=np.array([[2, 3]], dtype=dtype))
self._assertOpOutputMatchesExpected(
math_ops.tanh,
np.array(
[[1, 1, 1, 1],
[1, 2, 3, 4]],
dtype=dtype),
expected=np.array(
[[0.76159418, 0.76159418, 0.76159418, 0.76159418],
[0.76159418, 0.96402758, 0.99505478, 0.99932933]],
dtype=dtype))
self._assertOpOutputMatchesExpected(
nn_ops.log_softmax,
np.array(
[[1, 1, 1, 1],
[1, 2, 3, 4]],
dtype=dtype),
expected=np.array(
[[-1.3862944, -1.3862944, -1.3862944, -1.3862944],
[-3.4401896, -2.4401896, -1.4401897, -0.44018969]],
dtype=dtype))
self._assertOpOutputMatchesExpected(
nn_ops.relu,
np.array([[-1, 1]], dtype=dtype),
expected=np.array([[0, 1]], dtype=dtype))
self._assertOpOutputMatchesExpected(
nn_ops.relu6,
np.array([[-0.05, 6.05, 5]], dtype=dtype),
expected=np.array([[0, 6, 5]], dtype=dtype))
self._assertOpOutputMatchesExpected(
nn_ops.softmax,
np.array(
[[1, 1, 1, 1],
[1, 2, 3, 4]],
dtype=dtype),
expected=np.array(
[[0.25, 0.25, 0.25, 0.25],
[0.032058604, 0.087144323, 0.23688284, 0.64391428]],
dtype=dtype))
self._assertOpOutputMatchesExpected(
nn_ops.softplus,
np.array([[-2, 0, 8]], dtype=dtype),
expected=np.array([[0.126928, 0.6931472, 8.0003354]], dtype=dtype))
def testNumericOps(self):
for dtype in self.numeric_types:
self._assertOpOutputMatchesExpected(
math_ops.abs,
np.array([[2, -1]], dtype=dtype),
expected=np.array([[2, 1]], dtype=dtype))
self._assertOpOutputMatchesExpected(
math_ops.negative,
np.array([[-1, 1]], dtype=dtype),
expected=np.array([[1, -1]], dtype=dtype))
self._assertOpOutputMatchesExpected(
math_ops.square,
np.array([[-2, 3]], dtype=dtype),
expected=np.array([[4, 9]], dtype=dtype))
self._assertOpOutputMatchesExpected(
array_ops.zeros_like,
np.array([[4, 3], [2, 1]], dtype=dtype),
expected=np.array([[0, 0], [0, 0]], dtype=dtype))
def testLogicalOps(self):
self._assertOpOutputMatchesExpected(
math_ops.logical_not,
np.array([[True, False], [False, True]], dtype=np.bool),
expected=np.array([[False, True], [True, False]], dtype=np.bool))
def testBiasAddGrad(self):
self._assertOpOutputMatchesExpected(
gen_nn_ops.bias_add_grad,
np.array([[1., 2.], [3., 4.]], dtype=np.float32),
expected=np.array([4., 6.], dtype=np.float32))
self._assertOpOutputMatchesExpected(
lambda x: gen_nn_ops.bias_add_grad(x, data_format="NCHW"),
np.array([[[1., 2.], [3., 4.]], [[5., 6.], [7., 8.]]],
dtype=np.float32),
expected=np.array([10., 26.], dtype=np.float32))
def testCast(self):
shapes = [[], [4], [2, 3], [2, 0, 4]]
types = [dtypes.bool, dtypes.int32, dtypes.float32]
for shape in shapes:
for src_type in types:
for dst_type in types:
src = np.arange(np.prod(shape)).astype(src_type.as_numpy_dtype)
src = src.reshape(shape)
dst = src.astype(dst_type.as_numpy_dtype)
self._assertOpOutputMatchesExpected(
lambda x, dst_type=dst_type: math_ops.cast(x, dst_type),
src,
expected=dst)
def testInvertPermutation(self):
self._assertOpOutputMatchesExpected(
array_ops.invert_permutation,
np.array([1, 2, 0], np.int32),
expected=np.array([2, 0, 1], dtype=np.int32))
def testRank(self):
rank_op = lambda x: array_ops.rank_internal(x, optimize=False)
for dtype in self.numeric_types:
self._assertOpOutputMatchesExpected(
rank_op, dtype(7), expected=np.int32(0))
self._assertOpOutputMatchesExpected(
rank_op, np.array(
[[], []], dtype=dtype), expected=np.int32(2))
self._assertOpOutputMatchesExpected(
rank_op, np.array(
[-1, 1], dtype=dtype), expected=np.int32(1))
self._assertOpOutputMatchesExpected(
rank_op, np.array(
[[-1, 1]], dtype=dtype), expected=np.int32(2))
self._assertOpOutputMatchesExpected(
rank_op,
np.array([[-1], [1], [4]], dtype=dtype),
expected=np.int32(2))
def testShape(self):
shape_op = lambda x: array_ops.shape_internal(x, optimize=False)
for dtype in self.numeric_types:
self._assertOpOutputMatchesExpected(
shape_op, dtype(7), expected=np.array([], dtype=np.int32))
self._assertOpOutputMatchesExpected(
shape_op,
np.array([[], []], dtype=dtype),
expected=np.array([2, 0], dtype=np.int32))
self._assertOpOutputMatchesExpected(
shape_op,
np.array([-1, 1], dtype=dtype),
expected=np.array([2], dtype=np.int32))
self._assertOpOutputMatchesExpected(
shape_op,
np.array([[-1, 1]], dtype=dtype),
expected=np.array([1, 2], dtype=np.int32))
self._assertOpOutputMatchesExpected(
shape_op,
np.array([[-1], [1], [4]], dtype=dtype),
expected=np.array([3, 1], dtype=np.int32))
def testSize(self):
size_op = lambda x: array_ops.size_internal(x, optimize=False)
for dtype in self.numeric_types:
self._assertOpOutputMatchesExpected(
size_op, dtype(7), expected=np.int32(1))
self._assertOpOutputMatchesExpected(
size_op, np.array([[], []], dtype=dtype), expected=np.int32(0))
self._assertOpOutputMatchesExpected(
size_op, np.array([-1, 1], dtype=dtype), expected=np.int32(2))
self._assertOpOutputMatchesExpected(
size_op, np.array([[-1, 1]], dtype=dtype), expected=np.int32(2))
self._assertOpOutputMatchesExpected(
size_op,
np.array([[-1], [1], [4]], dtype=dtype),
expected=np.int32(3))
def testUnpack(self):
self._assertOpOutputMatchesExpected(
array_ops.unstack,
np.array([[1., 2.], [3., 4.], [5., 6.]], dtype=np.float32),
expected=[
np.array([1., 2.], dtype=np.float32),
np.array([3., 4.], dtype=np.float32),
np.array([5., 6.], dtype=np.float32),
],
equality_test=self.ListsAreClose)
self._assertOpOutputMatchesExpected(
lambda x: array_ops.unstack(x, axis=1),
np.array([[1., 2.], [3., 4.], [5., 6.]], dtype=np.float32),
expected=[
np.array([1., 3., 5.], dtype=np.float32),
np.array([2., 4., 6.], dtype=np.float32),
],
equality_test=self.ListsAreClose)
if __name__ == "__main__":
googletest.main()
|
{
"content_hash": "ac0a51fae3e63eb132905a5918c89b57",
"timestamp": "",
"source": "github",
"line_count": 364,
"max_line_length": 79,
"avg_line_length": 37.357142857142854,
"alnum_prop": 0.5679511692896014,
"repo_name": "MoamerEncsConcordiaCa/tensorflow",
"id": "c96826fd0a64b2d8fb02da22cfdc72edbb674317",
"size": "14287",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tensorflow/compiler/tests/unary_ops_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "7583"
},
{
"name": "C",
"bytes": "176871"
},
{
"name": "C++",
"bytes": "22197536"
},
{
"name": "CMake",
"bytes": "137754"
},
{
"name": "CSS",
"bytes": "774"
},
{
"name": "Go",
"bytes": "786935"
},
{
"name": "HTML",
"bytes": "579704"
},
{
"name": "Java",
"bytes": "286255"
},
{
"name": "JavaScript",
"bytes": "13406"
},
{
"name": "Jupyter Notebook",
"bytes": "1833623"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "37227"
},
{
"name": "Objective-C",
"bytes": "7056"
},
{
"name": "Objective-C++",
"bytes": "64656"
},
{
"name": "Protocol Buffer",
"bytes": "207866"
},
{
"name": "Python",
"bytes": "19632899"
},
{
"name": "Shell",
"bytes": "334269"
},
{
"name": "TypeScript",
"bytes": "786973"
}
],
"symlink_target": ""
}
|
"""Tests for TFGAN losses."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.gan.python.losses.python import losses_impl as tfgan_losses
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.ops.distributions import categorical
from tensorflow.python.ops.distributions import normal
from tensorflow.python.ops.losses import losses as tf_losses
from tensorflow.python.platform import test
# TODO(joelshor): Use `parameterized` tests when opensourced.
class _LossesTest(object):
def init_constants(self):
self._discriminator_real_outputs_np = [-5.0, 1.4, 12.5, 2.7]
self._discriminator_gen_outputs_np = [10.0, 4.4, -5.5, 3.6]
self._weights = 2.3
self._discriminator_real_outputs = constant_op.constant(
self._discriminator_real_outputs_np, dtype=dtypes.float32)
self._discriminator_gen_outputs = constant_op.constant(
self._discriminator_gen_outputs_np, dtype=dtypes.float32)
def test_generator_all_correct(self):
loss = self._g_loss_fn(self._discriminator_gen_outputs)
self.assertEqual(self._discriminator_gen_outputs.dtype, loss.dtype)
self.assertEqual(self._generator_loss_name, loss.op.name)
with self.test_session():
self.assertAlmostEqual(self._expected_g_loss, loss.eval(), 5)
def test_discriminator_all_correct(self):
loss = self._d_loss_fn(
self._discriminator_real_outputs, self._discriminator_gen_outputs)
self.assertEqual(self._discriminator_gen_outputs.dtype, loss.dtype)
self.assertEqual(self._discriminator_loss_name, loss.op.name)
with self.test_session():
self.assertAlmostEqual(self._expected_d_loss, loss.eval(), 5)
def test_generator_loss_collection(self):
self.assertEqual(0, len(ops.get_collection('collection')))
self._g_loss_fn(
self._discriminator_gen_outputs, loss_collection='collection')
self.assertEqual(1, len(ops.get_collection('collection')))
def test_discriminator_loss_collection(self):
self.assertEqual(0, len(ops.get_collection('collection')))
self._d_loss_fn(
self._discriminator_real_outputs, self._discriminator_gen_outputs,
loss_collection='collection')
self.assertEqual(1, len(ops.get_collection('collection')))
def test_generator_no_reduction(self):
loss = self._g_loss_fn(
self._discriminator_gen_outputs, reduction=tf_losses.Reduction.NONE)
self.assertAllEqual([4], loss.shape)
def test_discriminator_no_reduction(self):
loss = self._d_loss_fn(
self._discriminator_real_outputs, self._discriminator_gen_outputs,
reduction=tf_losses.Reduction.NONE)
self.assertAllEqual([4], loss.shape)
def test_generator_patch(self):
loss = self._g_loss_fn(
array_ops.reshape(self._discriminator_gen_outputs, [2, 2]))
self.assertEqual(self._discriminator_gen_outputs.dtype, loss.dtype)
with self.test_session():
self.assertAlmostEqual(self._expected_g_loss, loss.eval(), 5)
def test_discriminator_patch(self):
loss = self._d_loss_fn(
array_ops.reshape(self._discriminator_real_outputs, [2, 2]),
array_ops.reshape(self._discriminator_gen_outputs, [2, 2]))
self.assertEqual(self._discriminator_gen_outputs.dtype, loss.dtype)
with self.test_session():
self.assertAlmostEqual(self._expected_d_loss, loss.eval(), 5)
def test_generator_loss_with_placeholder_for_logits(self):
logits = array_ops.placeholder(dtypes.float32, shape=(None, 4))
weights = array_ops.ones_like(logits, dtype=dtypes.float32)
loss = self._g_loss_fn(logits, weights=weights)
self.assertEqual(logits.dtype, loss.dtype)
with self.test_session() as sess:
loss = sess.run(loss,
feed_dict={
logits: [[10.0, 4.4, -5.5, 3.6]],
})
self.assertAlmostEqual(self._expected_g_loss, loss, 5)
def test_discriminator_loss_with_placeholder_for_logits(self):
logits = array_ops.placeholder(dtypes.float32, shape=(None, 4))
logits2 = array_ops.placeholder(dtypes.float32, shape=(None, 4))
real_weights = array_ops.ones_like(logits, dtype=dtypes.float32)
generated_weights = array_ops.ones_like(logits, dtype=dtypes.float32)
loss = self._d_loss_fn(
logits, logits2, real_weights=real_weights,
generated_weights=generated_weights)
with self.test_session() as sess:
loss = sess.run(loss,
feed_dict={
logits: [self._discriminator_real_outputs_np],
logits2: [self._discriminator_gen_outputs_np],
})
self.assertAlmostEqual(self._expected_d_loss, loss, 5)
def test_generator_with_python_scalar_weight(self):
loss = self._g_loss_fn(
self._discriminator_gen_outputs, weights=self._weights)
with self.test_session():
self.assertAlmostEqual(self._expected_g_loss * self._weights,
loss.eval(), 4)
def test_discriminator_with_python_scalar_weight(self):
loss = self._d_loss_fn(
self._discriminator_real_outputs, self._discriminator_gen_outputs,
real_weights=self._weights, generated_weights=self._weights)
with self.test_session():
self.assertAlmostEqual(self._expected_d_loss * self._weights,
loss.eval(), 4)
def test_generator_with_scalar_tensor_weight(self):
loss = self._g_loss_fn(self._discriminator_gen_outputs,
weights=constant_op.constant(self._weights))
with self.test_session():
self.assertAlmostEqual(self._expected_g_loss * self._weights,
loss.eval(), 4)
def test_discriminator_with_scalar_tensor_weight(self):
weights = constant_op.constant(self._weights)
loss = self._d_loss_fn(
self._discriminator_real_outputs, self._discriminator_gen_outputs,
real_weights=weights, generated_weights=weights)
with self.test_session():
self.assertAlmostEqual(self._expected_d_loss * self._weights,
loss.eval(), 4)
def test_generator_add_summaries(self):
self.assertEqual(0, len(ops.get_collection(ops.GraphKeys.SUMMARIES)))
self._g_loss_fn(self._discriminator_gen_outputs, add_summaries=True)
self.assertLess(0, len(ops.get_collection(ops.GraphKeys.SUMMARIES)))
def test_discriminator_add_summaries(self):
self.assertEqual(0, len(ops.get_collection(ops.GraphKeys.SUMMARIES)))
self._d_loss_fn(
self._discriminator_real_outputs, self._discriminator_gen_outputs,
add_summaries=True)
self.assertLess(0, len(ops.get_collection(ops.GraphKeys.SUMMARIES)))
class LeastSquaresLossTest(test.TestCase, _LossesTest):
"""Tests for least_squares_xxx_loss."""
def setUp(self):
super(LeastSquaresLossTest, self).setUp()
self.init_constants()
self._expected_g_loss = 17.69625
self._expected_d_loss = 41.73375
self._generator_loss_name = 'lsq_generator_loss/value'
self._discriminator_loss_name = 'lsq_discriminator_loss/add'
self._g_loss_fn = tfgan_losses.least_squares_generator_loss
self._d_loss_fn = tfgan_losses.least_squares_discriminator_loss
class ModifiedLossTest(test.TestCase, _LossesTest):
"""Tests for modified_xxx_loss."""
def setUp(self):
super(ModifiedLossTest, self).setUp()
self.init_constants()
self._expected_g_loss = 1.38582
self._expected_d_loss = 6.19637
self._generator_loss_name = 'generator_modified_loss/value'
self._discriminator_loss_name = 'discriminator_modified_loss/add_1'
self._g_loss_fn = tfgan_losses.modified_generator_loss
self._d_loss_fn = tfgan_losses.modified_discriminator_loss
class MinimaxLossTest(test.TestCase, _LossesTest):
"""Tests for minimax_xxx_loss."""
def setUp(self):
super(MinimaxLossTest, self).setUp()
self.init_constants()
self._expected_g_loss = -4.82408
self._expected_d_loss = 6.19637
self._generator_loss_name = 'generator_minimax_loss/Neg'
self._discriminator_loss_name = 'discriminator_minimax_loss/add_1'
self._g_loss_fn = tfgan_losses.minimax_generator_loss
self._d_loss_fn = tfgan_losses.minimax_discriminator_loss
class WassersteinLossTest(test.TestCase, _LossesTest):
"""Tests for wasserstein_xxx_loss."""
def setUp(self):
super(WassersteinLossTest, self).setUp()
self.init_constants()
self._expected_g_loss = -3.12500
self._expected_d_loss = 0.22500
self._generator_loss_name = 'generator_wasserstein_loss/value'
self._discriminator_loss_name = 'discriminator_wasserstein_loss/sub'
self._g_loss_fn = tfgan_losses.wasserstein_generator_loss
self._d_loss_fn = tfgan_losses.wasserstein_discriminator_loss
# TODO(joelshor): Use `parameterized` tests when opensourced.
# TODO(joelshor): Refactor this test to use the same code as the other losses.
class ACGANLossTest(test.TestCase):
"""Tests for wasserstein_xxx_loss."""
def setUp(self):
super(ACGANLossTest, self).setUp()
self._g_loss_fn = tfgan_losses.acgan_generator_loss
self._d_loss_fn = tfgan_losses.acgan_discriminator_loss
self._discriminator_gen_classification_logits_np = [[10.0, 4.4, -5.5, 3.6],
[-4.0, 4.4, 5.2, 4.6],
[1.1, 2.4, -3.5, 5.6],
[1.1, 2.4, -3.5, 5.6]]
self._discriminator_real_classification_logits_np = [[-2.0, 0.4, 12.5, 2.7],
[-1.2, 1.9, 12.3, 2.6],
[-2.4, -1.7, 2.5, 2.7],
[1.1, 2.4, -3.5, 5.6]]
self._one_hot_labels_np = [[0, 1, 0, 0],
[0, 0, 1, 0],
[1, 0, 0, 0],
[1, 0, 0, 0]]
self._weights = 2.3
self._discriminator_gen_classification_logits = constant_op.constant(
self._discriminator_gen_classification_logits_np, dtype=dtypes.float32)
self._discriminator_real_classification_logits = constant_op.constant(
self._discriminator_real_classification_logits_np, dtype=dtypes.float32)
self._one_hot_labels = constant_op.constant(
self._one_hot_labels_np, dtype=dtypes.float32)
self._generator_kwargs = {
'discriminator_gen_classification_logits':
self._discriminator_gen_classification_logits,
'one_hot_labels': self._one_hot_labels,
}
self._discriminator_kwargs = {
'discriminator_gen_classification_logits':
self._discriminator_gen_classification_logits,
'discriminator_real_classification_logits':
self._discriminator_real_classification_logits,
'one_hot_labels': self._one_hot_labels,
}
self._generator_loss_name = 'softmax_cross_entropy_loss/value'
self._discriminator_loss_name = 'add'
self._expected_g_loss = 3.84974
self._expected_d_loss = 9.43950
def test_generator_all_correct(self):
loss = self._g_loss_fn(**self._generator_kwargs)
self.assertEqual(
self._discriminator_gen_classification_logits.dtype, loss.dtype)
self.assertEqual(self._generator_loss_name, loss.op.name)
with self.test_session():
self.assertAlmostEqual(self._expected_g_loss, loss.eval(), 5)
def test_discriminator_all_correct(self):
loss = self._d_loss_fn(**self._discriminator_kwargs)
self.assertEqual(
self._discriminator_gen_classification_logits.dtype, loss.dtype)
self.assertEqual(self._discriminator_loss_name, loss.op.name)
with self.test_session():
self.assertAlmostEqual(self._expected_d_loss, loss.eval(), 5)
def test_generator_loss_collection(self):
self.assertEqual(0, len(ops.get_collection('collection')))
self._g_loss_fn(loss_collection='collection', **self._generator_kwargs)
self.assertEqual(1, len(ops.get_collection('collection')))
def test_discriminator_loss_collection(self):
self.assertEqual(0, len(ops.get_collection('collection')))
self._d_loss_fn(loss_collection='collection', **self._discriminator_kwargs)
self.assertEqual(1, len(ops.get_collection('collection')))
def test_generator_no_reduction(self):
loss = self._g_loss_fn(
reduction=tf_losses.Reduction.NONE, **self._generator_kwargs)
self.assertAllEqual([4], loss.shape)
def test_discriminator_no_reduction(self):
loss = self._d_loss_fn(
reduction=tf_losses.Reduction.NONE, **self._discriminator_kwargs)
self.assertAllEqual([4], loss.shape)
def test_generator_patch(self):
patch_args = {x: array_ops.reshape(y, [2, 2, 4]) for x, y in
self._generator_kwargs.items()}
loss = self._g_loss_fn(**patch_args)
with self.test_session():
self.assertAlmostEqual(self._expected_g_loss, loss.eval(), 5)
def test_discriminator_patch(self):
patch_args = {x: array_ops.reshape(y, [2, 2, 4]) for x, y in
self._discriminator_kwargs.items()}
loss = self._d_loss_fn(**patch_args)
with self.test_session():
self.assertAlmostEqual(self._expected_d_loss, loss.eval(), 5)
def test_generator_loss_with_placeholder_for_logits(self):
gen_logits = array_ops.placeholder(dtypes.float32, shape=(None, 4))
one_hot_labels = array_ops.placeholder(dtypes.int32, shape=(None, 4))
loss = self._g_loss_fn(gen_logits, one_hot_labels)
with self.test_session() as sess:
loss = sess.run(
loss, feed_dict={
gen_logits: self._discriminator_gen_classification_logits_np,
one_hot_labels: self._one_hot_labels_np,
})
self.assertAlmostEqual(self._expected_g_loss, loss, 5)
def test_discriminator_loss_with_placeholder_for_logits_and_weights(self):
gen_logits = array_ops.placeholder(dtypes.float32, shape=(None, 4))
real_logits = array_ops.placeholder(dtypes.float32, shape=(None, 4))
one_hot_labels = array_ops.placeholder(dtypes.int32, shape=(None, 4))
loss = self._d_loss_fn(gen_logits, real_logits, one_hot_labels)
with self.test_session() as sess:
loss = sess.run(
loss, feed_dict={
gen_logits: self._discriminator_gen_classification_logits_np,
real_logits: self._discriminator_real_classification_logits_np,
one_hot_labels: self._one_hot_labels_np,
})
self.assertAlmostEqual(self._expected_d_loss, loss, 5)
def test_generator_with_python_scalar_weight(self):
loss = self._g_loss_fn(weights=self._weights, **self._generator_kwargs)
with self.test_session():
self.assertAlmostEqual(self._expected_g_loss * self._weights,
loss.eval(), 4)
def test_discriminator_with_python_scalar_weight(self):
loss = self._d_loss_fn(
real_weights=self._weights, generated_weights=self._weights,
**self._discriminator_kwargs)
with self.test_session():
self.assertAlmostEqual(self._expected_d_loss * self._weights,
loss.eval(), 4)
def test_generator_with_scalar_tensor_weight(self):
loss = self._g_loss_fn(
weights=constant_op.constant(self._weights), **self._generator_kwargs)
with self.test_session():
self.assertAlmostEqual(self._expected_g_loss * self._weights,
loss.eval(), 4)
def test_discriminator_with_scalar_tensor_weight(self):
weights = constant_op.constant(self._weights)
loss = self._d_loss_fn(real_weights=weights, generated_weights=weights,
**self._discriminator_kwargs)
with self.test_session():
self.assertAlmostEqual(self._expected_d_loss * self._weights,
loss.eval(), 4)
def test_generator_add_summaries(self):
self.assertEqual(0, len(ops.get_collection(ops.GraphKeys.SUMMARIES)))
self._g_loss_fn(add_summaries=True, **self._generator_kwargs)
self.assertLess(0, len(ops.get_collection(ops.GraphKeys.SUMMARIES)))
def test_discriminator_add_summaries(self):
self.assertEqual(0, len(ops.get_collection(ops.GraphKeys.SUMMARIES)))
self._d_loss_fn(add_summaries=True, **self._discriminator_kwargs)
self.assertLess(0, len(ops.get_collection(ops.GraphKeys.SUMMARIES)))
class _PenaltyTest(object):
def test_all_correct(self):
loss = self._penalty_fn(**self._kwargs)
self.assertEqual(self._expected_dtype, loss.dtype)
self.assertEqual(self._expected_op_name, loss.op.name)
with self.test_session():
variables.global_variables_initializer().run()
self.assertAlmostEqual(self._expected_loss, loss.eval(), 6)
def test_loss_collection(self):
self.assertEqual(0, len(ops.get_collection('collection')))
self._penalty_fn(loss_collection='collection', **self._kwargs)
self.assertEqual(1, len(ops.get_collection('collection')))
def test_no_reduction(self):
loss = self._penalty_fn(reduction=tf_losses.Reduction.NONE, **self._kwargs)
self.assertAllEqual([self._batch_size], loss.shape)
def test_python_scalar_weight(self):
loss = self._penalty_fn(weights=2.3, **self._kwargs)
with self.test_session():
variables.global_variables_initializer().run()
self.assertAlmostEqual(self._expected_loss * 2.3, loss.eval(), 3)
def test_scalar_tensor_weight(self):
loss = self._penalty_fn(weights=constant_op.constant(2.3), **self._kwargs)
with self.test_session():
variables.global_variables_initializer().run()
self.assertAlmostEqual(self._expected_loss * 2.3, loss.eval(), 3)
class GradientPenaltyTest(test.TestCase, _PenaltyTest):
"""Tests for wasserstein_gradient_penalty."""
def setUp(self):
super(GradientPenaltyTest, self).setUp()
self._penalty_fn = tfgan_losses.wasserstein_gradient_penalty
self._generated_data_np = [[3.1, 2.3, -12.3, 32.1]]
self._real_data_np = [[-12.3, 23.2, 16.3, -43.2]]
self._expected_dtype = dtypes.float32
with variable_scope.variable_scope('fake_scope') as self._scope:
self._discriminator_fn(0.0, 0.0)
self._kwargs = {
'generated_data': constant_op.constant(
self._generated_data_np, dtype=self._expected_dtype),
'real_data': constant_op.constant(
self._real_data_np, dtype=self._expected_dtype),
'generator_inputs': None,
'discriminator_fn': self._discriminator_fn,
'discriminator_scope': self._scope,
}
self._expected_loss = 9.00000
self._expected_op_name = 'weighted_loss/value'
self._batch_size = 1
def _discriminator_fn(self, inputs, _):
return variable_scope.get_variable('dummy_d', initializer=2.0) * inputs
def test_loss_with_placeholder(self):
generated_data = array_ops.placeholder(dtypes.float32, shape=(None, None))
real_data = array_ops.placeholder(dtypes.float32, shape=(None, None))
loss = tfgan_losses.wasserstein_gradient_penalty(
generated_data,
real_data,
self._kwargs['generator_inputs'],
self._kwargs['discriminator_fn'],
self._kwargs['discriminator_scope'])
self.assertEqual(generated_data.dtype, loss.dtype)
with self.test_session() as sess:
variables.global_variables_initializer().run()
loss = sess.run(loss,
feed_dict={
generated_data: self._generated_data_np,
real_data: self._real_data_np,
})
self.assertAlmostEqual(self._expected_loss, loss, 5)
def test_reuses_scope(self):
"""Test that gradient penalty reuses discriminator scope."""
num_vars = len(ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES))
tfgan_losses.wasserstein_gradient_penalty(**self._kwargs)
self.assertEqual(
num_vars, len(ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)))
class MutualInformationPenaltyTest(test.TestCase, _PenaltyTest):
"""Tests for mutual_information_penalty."""
def setUp(self):
super(MutualInformationPenaltyTest, self).setUp()
self._penalty_fn = tfgan_losses.mutual_information_penalty
self._structured_generator_inputs = [1.0, 2.0]
self._predicted_distributions = [categorical.Categorical(logits=[1.0, 2.0]),
normal.Normal([0.0], [1.0])]
self._expected_dtype = dtypes.float32
self._kwargs = {
'structured_generator_inputs': self._structured_generator_inputs,
'predicted_distributions': self._predicted_distributions,
}
self._expected_loss = 1.61610
self._expected_op_name = 'mul'
self._batch_size = 2
class CombineAdversarialLossTest(test.TestCase):
"""Tests for combine_adversarial_loss."""
def setUp(self):
super(CombineAdversarialLossTest, self).setUp()
self._generated_data_np = [[3.1, 2.3, -12.3, 32.1]]
self._real_data_np = [[-12.3, 23.2, 16.3, -43.2]]
self._generated_data = constant_op.constant(
self._generated_data_np, dtype=dtypes.float32)
self._real_data = constant_op.constant(
self._real_data_np, dtype=dtypes.float32)
self._generated_inputs = None
self._expected_loss = 9.00000
def _test_correct_helper(self, use_weight_factor):
variable_list = [variables.Variable(1.0)]
main_loss = variable_list[0] * 2
adversarial_loss = variable_list[0] * 3
gradient_ratio_epsilon = 1e-6
if use_weight_factor:
weight_factor = constant_op.constant(2.0)
gradient_ratio = None
adv_coeff = 2.0
expected_loss = 1.0 * 2 + adv_coeff * 1.0 * 3
else:
weight_factor = None
gradient_ratio = constant_op.constant(0.5)
adv_coeff = 2.0 / (3 * 0.5 + gradient_ratio_epsilon)
expected_loss = 1.0 * 2 + adv_coeff * 1.0 * 3
combined_loss = tfgan_losses.combine_adversarial_loss(
main_loss,
adversarial_loss,
weight_factor=weight_factor,
gradient_ratio=gradient_ratio,
gradient_ratio_epsilon=gradient_ratio_epsilon,
variables=variable_list)
with self.test_session(use_gpu=True):
variables.global_variables_initializer().run()
self.assertNear(expected_loss, combined_loss.eval(), 1e-5)
def test_correct_useweightfactor(self):
self._test_correct_helper(True)
def test_correct_nouseweightfactor(self):
self._test_correct_helper(False)
def _test_no_weight_skips_adversarial_loss_helper(self, use_weight_factor):
"""Test the 0 adversarial weight or grad ratio skips adversarial loss."""
main_loss = constant_op.constant(1.0)
adversarial_loss = constant_op.constant(1.0)
weight_factor = 0.0 if use_weight_factor else None
gradient_ratio = None if use_weight_factor else 0.0
combined_loss = tfgan_losses.combine_adversarial_loss(
main_loss,
adversarial_loss,
weight_factor=weight_factor,
gradient_ratio=gradient_ratio,
gradient_summaries=False)
with self.test_session(use_gpu=True):
self.assertEqual(1.0, combined_loss.eval())
def test_no_weight_skips_adversarial_loss_useweightfactor(self):
self._test_no_weight_skips_adversarial_loss_helper(True)
def test_no_weight_skips_adversarial_loss_nouseweightfactor(self):
self._test_no_weight_skips_adversarial_loss_helper(False)
def test_stable_global_norm_avoids_overflow(self):
tensors = [array_ops.ones([4]), array_ops.ones([4, 4]) * 1e19, None]
gnorm_is_inf = math_ops.is_inf(clip_ops.global_norm(tensors))
stable_gnorm_is_inf = math_ops.is_inf(
tfgan_losses._numerically_stable_global_norm(tensors))
with self.test_session(use_gpu=True):
self.assertTrue(gnorm_is_inf.eval())
self.assertFalse(stable_gnorm_is_inf.eval())
def test_stable_global_norm_unchanged(self):
"""Test that preconditioning doesn't change global norm value."""
random_seed.set_random_seed(1234)
tensors = [random_ops.random_uniform([3]*i, -10.0, 10.0) for i in range(6)]
gnorm = clip_ops.global_norm(tensors)
precond_gnorm = tfgan_losses._numerically_stable_global_norm(tensors)
with self.test_session(use_gpu=True) as sess:
for _ in range(10): # spot check closeness on more than one sample.
gnorm_np, precond_gnorm_np = sess.run([gnorm, precond_gnorm])
self.assertNear(gnorm_np, precond_gnorm_np, 1e-5)
if __name__ == '__main__':
test.main()
|
{
"content_hash": "1002e4f4f72f6c88890e8c1c411d96fb",
"timestamp": "",
"source": "github",
"line_count": 592,
"max_line_length": 83,
"avg_line_length": 42.0152027027027,
"alnum_prop": 0.6645760463152817,
"repo_name": "tornadozou/tensorflow",
"id": "3e003dd0f808f80dcc486e78e8e101ac6f198947",
"size": "25562",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "tensorflow/contrib/gan/python/losses/python/losses_impl_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "8458"
},
{
"name": "C",
"bytes": "201402"
},
{
"name": "C++",
"bytes": "29734773"
},
{
"name": "CMake",
"bytes": "647266"
},
{
"name": "Go",
"bytes": "976912"
},
{
"name": "Java",
"bytes": "412117"
},
{
"name": "Jupyter Notebook",
"bytes": "1833675"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "38128"
},
{
"name": "Objective-C",
"bytes": "7056"
},
{
"name": "Objective-C++",
"bytes": "63210"
},
{
"name": "Perl",
"bytes": "6715"
},
{
"name": "Protocol Buffer",
"bytes": "276756"
},
{
"name": "PureBasic",
"bytes": "24932"
},
{
"name": "Python",
"bytes": "26531000"
},
{
"name": "Ruby",
"bytes": "327"
},
{
"name": "Shell",
"bytes": "373122"
}
],
"symlink_target": ""
}
|
"""Data preparation code for DDSP models."""
from ddsp.training.data_preparation import prepare_tfrecord_lib
|
{
"content_hash": "6160f26f73cddc2d9aa2bc4f28835ff9",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 63,
"avg_line_length": 36.666666666666664,
"alnum_prop": 0.7909090909090909,
"repo_name": "magenta/ddsp",
"id": "d768d14677e73da856c2ec50ef7ae2fa447d34b7",
"size": "692",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "ddsp/training/data_preparation/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "907"
},
{
"name": "Jupyter Notebook",
"bytes": "196182"
},
{
"name": "Python",
"bytes": "666327"
},
{
"name": "Shell",
"bytes": "300"
}
],
"symlink_target": ""
}
|
from math import ceil
from models.raster import Raster
import shapefile
import random
import os
class NetworkUtils(object):
'''
Utilities for converting road networks into nodes and edges.
created: Feb 13, 2017 by abdinoor
'''
KPH_TO_MPH = 0.621371
METERS_TO_MPH = KPH_TO_MPH * 0.001
@classmethod
def create_network_files(cls, nodes_shapefile, edges_shapefile):
''' Convert shapefiles into format needed for Centroid. '''
nodes_output_file = os.path.join(os.path.dirname(nodes_shapefile),
'wherehouse_nodes.csv')
edges_output_file = os.path.join(os.path.dirname(edges_shapefile),
'wherehouse_edges.csv')
# Load the nodes shapefile
nodes_file = shapefile.Reader(nodes_shapefile)
# Write the header for the output file
with open(nodes_output_file, 'w') as nodes:
nodes.write('nid,lon,lat,external_nid\n')
# Loop through all the shapes in the nodes shapefile and re-index
# everything starting with index 1. Then write the node data out
# to a file.
external_nid_2_nid = {}
for idx, record in enumerate(nodes_file.iterShapeRecords()):
external_nid_2_nid[record.record[0]] = idx
# Filter out any nodes at (0, 0)
if record.shape.points[0][0] == 0:
continue
nodes.write('%s,%s,%s,%s\n' % (
idx,
record.shape.points[0][0],
record.shape.points[0][1],
record.record[0])
)
print 'Wrote to node output file: %s' % nodes_output_file
# Open the edges file
edges_file = shapefile.Reader(edges_shapefile)
# Write the header for the edges output file.
with open(edges_output_file, 'w') as edges:
edges.write('eid,source,target,dir,capacity,speed_mph,' +
'free_flow_travel_time\n')
# Iterate through all the edge shapes, impute missing columns and
# write to file.
for idx, record in enumerate(edges_file.iterRecords()):
# Convert speed to MPH for capacity inference.
speed_mph = ceil(record[13] * cls.KPH_TO_MPH)
# Make sure the minimum speed isn't 0 to avoid divide by 0 in
# travel time cost calculations.
speed_mph = 0.00001 if speed_mph == 0 else speed_mph
# Impute capacity based on speed and number of lanes
capacity = cls.__capacity_profile(speed_mph, record[10])
# Compute travel time cost in minutes
cost_time = (record[2] * cls.METERS_TO_MPH) / speed_mph * 60
# Write to file
edges.write('%s,%s,%s,%s,%s,%s,%s\n' % (
idx,
external_nid_2_nid[record[0]],
external_nid_2_nid[record[1]],
0,
int(capacity),
int(speed_mph),
cost_time
))
print 'Wrote to edge output file: %s' % edges_output_file
# The original shapefile didn't have the eid column making it hard to
# join routing output for visualization and validation in QGIS.
# This loop runs over the shapefile records and adds a field then
# saves a new shapefile.
edges_file = shapefile.Reader(edges_shapefile)
extended_file = shapefile.Writer()
extended_file.fields = list(edges_file.fields)
extended_file.field('EID', 'N', 9, 0)
for idx, record in enumerate(edges_file.iterShapeRecords()):
new_record = record.record
new_record.append(idx)
extended_file.records.append(new_record)
extended_file._shapes.append(record.shape)
basename, ext = os.path.splitext(edges_shapefile)
name = basename + "_eid" + ext
extended_file.save(name)
print 'Wrote to extended edges shapefile: %s' % name
@staticmethod
def __capacity_profile(speed, lanes):
if speed >= 40:
return int((1700. + 10. * speed) * lanes)
else:
return int(1900 * lanes * 0.92 * 0.55)
return 0
@classmethod
def od_to_intersections(cls, SA2_od_filename, census_raster_file,
nodes_shapefile):
''' Convert OD to intersection nodes and edges. '''
# Load the OD
od_filename = SA2_od_filename
# Load a raster file of maping each raster cell to an SA2 it covers
census_raster = Raster()
path = census_raster_file
census_raster.load_data_from_file(path)
# Load the network nodes
nodes_file = shapefile.Reader(nodes_shapefile)
# Loop over all the network nodes and look up which SA2 they are
# contained in. Maintain a mapping from SA2 to the list of
# intersection nodes within.
sa2_2_nid = {}
for idx, rec in enumerate(nodes_shapefile.iterShapeRecords()):
lon, lat = rec.shape.points[0]
sa2 = census_raster.in_poly(lon, lat)
sa2 = str(sa2)
# If no SA2 is found, the value will be 0 and we should ignore node
if sa2 == 0:
continue
if sa2 in sa2_2_nid:
sa2_2_nid[sa2].append(idx)
else:
sa2_2_nid[sa2] = [idx]
od_inter = {}
# Loop over all SA2 - SA2 OD pairs. For each SA2 origin and destination,
# pick a random intersection on each end. Assign a fraction of the
# SA2 to SA2 flow to this node to node pair.
for idx, row in od_scaled.dropna().iterrows():
for i in xrange(int(row.flow_scaled)):
o_sa2 = row.origin
d_sa2 = row.destination
# If we haven't mapped nodes for either the origin to destination
# just ignore it.
if o_sa2 not in sa2_2_nid or d_sa2 not in sa2_2_nid:
continue
o = random.choice(sa2_2_nid[o_sa2])
d = random.choice(sa2_2_nid[d_sa2])
if (o, d) in od_inter:
od_inter[(o, d)] += row.flow_scaled / int(row.flow_scaled)
else:
od_inter[(o, d)] = row.flow_scaled / int(row.flow_scaled)
basename, ext = os.path.splitext(SA2_od_file)
od_output_filename = basename + '.intersection' + ext
with open(od_output_filename, 'w') as wfid:
wfid.write('origin_nid,destination_nid,flow\n')
for (origin_nid, destination_nid), flow in od_inter:
wfid.write('%d,%d,%0.3f\n' % (origin_nid, destination_nid, flow))
|
{
"content_hash": "63c49815c17411836d2046c64743e3cc",
"timestamp": "",
"source": "github",
"line_count": 166,
"max_line_length": 81,
"avg_line_length": 41.602409638554214,
"alnum_prop": 0.552128583840139,
"repo_name": "wherehouse/wherehouse-sdk-py",
"id": "87fc39f859d840b66a20d7318e07410eb14736fd",
"size": "6906",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "wherehouse/centroid/network_utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "23478"
},
{
"name": "Shell",
"bytes": "495"
}
],
"symlink_target": ""
}
|
from unittest import mock
from nova import objects
from nova.scheduler.filters import aggregate_instance_extra_specs as agg_specs
from nova import test
from nova.tests.unit.scheduler import fakes
@mock.patch('nova.scheduler.filters.utils.aggregate_metadata_get_by_host')
class TestAggregateInstanceExtraSpecsFilter(test.NoDBTestCase):
def setUp(self):
super(TestAggregateInstanceExtraSpecsFilter, self).setUp()
self.filt_cls = agg_specs.AggregateInstanceExtraSpecsFilter()
def test_aggregate_filter_passes_no_extra_specs(self, agg_mock):
capabilities = {'opt1': 1, 'opt2': 2}
spec_obj = objects.RequestSpec(
context=mock.sentinel.ctx,
flavor=objects.Flavor(memory_mb=1024))
host = fakes.FakeHostState('host1', 'node1', capabilities)
self.assertTrue(self.filt_cls.host_passes(host, spec_obj))
self.assertFalse(agg_mock.called)
def test_aggregate_filter_passes_empty_extra_specs(self, agg_mock):
capabilities = {'opt1': 1, 'opt2': 2}
spec_obj = objects.RequestSpec(
context=mock.sentinel.ctx,
flavor=objects.Flavor(memory_mb=1024, extra_specs={}))
host = fakes.FakeHostState('host1', 'node1', capabilities)
self.assertTrue(self.filt_cls.host_passes(host, spec_obj))
self.assertFalse(agg_mock.called)
def _do_test_aggregate_filter_extra_specs(self, especs, passes):
spec_obj = objects.RequestSpec(
context=mock.sentinel.ctx,
flavor=objects.Flavor(memory_mb=1024, extra_specs=especs))
host = fakes.FakeHostState('host1', 'node1',
{'free_ram_mb': 1024})
assertion = self.assertTrue if passes else self.assertFalse
assertion(self.filt_cls.host_passes(host, spec_obj))
def test_aggregate_filter_passes_extra_specs_simple(self, agg_mock):
agg_mock.return_value = {'opt1': set(['1']), 'opt2': set(['2'])}
especs = {
# Un-scoped extra spec
'opt1': '1',
# Scoped extra spec that applies to this filter
'aggregate_instance_extra_specs:opt2': '2',
}
self._do_test_aggregate_filter_extra_specs(especs, passes=True)
def test_aggregate_filter_passes_extra_specs_simple_comma(self, agg_mock):
agg_mock.return_value = {'opt1': set(['1', '3']), 'opt2': set(['2'])}
especs = {
# Un-scoped extra spec
'opt1': '1',
# Scoped extra spec that applies to this filter
'aggregate_instance_extra_specs:opt1': '3',
}
self._do_test_aggregate_filter_extra_specs(especs, passes=True)
def test_aggregate_filter_passes_with_key_same_as_scope(self, agg_mock):
agg_mock.return_value = {'aggregate_instance_extra_specs': set(['1'])}
especs = {
# Un-scoped extra spec, make sure we don't blow up if it
# happens to match our scope.
'aggregate_instance_extra_specs': '1',
}
self._do_test_aggregate_filter_extra_specs(especs, passes=True)
def test_aggregate_filter_fails_extra_specs_simple(self, agg_mock):
agg_mock.return_value = {'opt1': set(['1']), 'opt2': set(['2'])}
especs = {
'opt1': '1',
'opt2': '222'
}
self._do_test_aggregate_filter_extra_specs(especs, passes=False)
|
{
"content_hash": "8ce35ecadc18767109bec22934c2d8cb",
"timestamp": "",
"source": "github",
"line_count": 80,
"max_line_length": 78,
"avg_line_length": 42.5,
"alnum_prop": 0.6244117647058823,
"repo_name": "mahak/nova",
"id": "971e1a366ce2bc1a53dc37ff0df14a23e714da74",
"size": "3973",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "nova/tests/unit/scheduler/filters/test_aggregate_instance_extra_specs_filters.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "3545"
},
{
"name": "Mako",
"bytes": "1952"
},
{
"name": "Python",
"bytes": "23261880"
},
{
"name": "Shell",
"bytes": "28113"
},
{
"name": "Smarty",
"bytes": "507244"
}
],
"symlink_target": ""
}
|
from read_until import ReadUntil
import time
import errno
from socket import error as socket_error
import threading
import sys, os, re
from Bio import SeqIO
from StringIO import StringIO
import string
import mlpy
import sklearn.preprocessing
import random
import math
import csv
import numpy as np
import array as ar
import configargparse
import shutil
import pickle
import multiprocessing
import ctypes
import subprocess
import re
import logging
import platform
from ruutils import process_model_file,query_yes_no,send_message,process_ref_fasta,get_seq_len,squiggle_search2,extractsquig,go_or_no,genome_worker,check_files
class LockedDict(dict):
"""
A dict where __setitem__ is synchronised with a new function to
atomically pop and clear the map.
"""
def __init__(self, *args, **kwargs):
self.lock = threading.Lock()
super(LockedDict, self).__init__(*args, **kwargs)
def __setitem__(self, key, value):
with self.lock:
super(LockedDict, self).__setitem__(key, value)
def pop_all_and_clear(self):
with self.lock:
d=dict(self) # take copy as a normal dict
super(LockedDict, self).clear()
return d
def run_analysis(args,analyser):
#analyser = MyAnalyser(args)
host = "ws://"+str(args.ip)+":"+str(args.port)+"/"
setup_conditions = {"ignore_first_events": 75, "padding_length_events": 0,
"events_length": 250, "repetitions": 1}
state=RunningState()
print "Initialising Read Until"
try:
with ReadUntil(host=host,
setup_conditions=setup_conditions,
data_received=analyser.apply_async_with_callback,
connection_closed=state.closed) as my_client:
print "Trying to connect"
try:
my_client.start()
except Exception,err:
print err
print "Client connection started. Beginning unblock loop..."
while state.keep_running:
print "looping"
try:
unblock_now = analyser.next_unblock_map()
except Exception,err:
print err
print "caught a connection fault - reads not unblocked"
if len(unblock_now)>0:
if args.verbose is True: print "Unblocking channels: ", unblock_now.keys()
print time.strftime('%Y-%m-%d %H:%M:%S'),
print "Unblocking ",len(unblock_now.keys())
my_client.unblock(unblock_now)
# Throttle rate at which we make unblock controls. Although
# unblocks should be timely, it is more efficient on the network
# and on the hardware to unblock a bigger list of channels at once.
time.sleep(1)
print "...unblock loop ended. Connection closed."
except Exception,err:
print err
print "Problem - ws_event_sampler either crashed or not initialised. Please check your setup."
# with ReadUntil(host=host,
# setup_conditions=setup_conditions,
# data_received=analyser.apply_async_with_callback,
# connection_closed=state.closed) as my_client:
# try:
# my_client.start()
# except Exception,err:
# print err
# print "Client connection started. Beginning unblock loop..."
# while state.keep_running:
# print "looping"
# unblock_now = analyser.next_unblock_map()
# if len(unblock_now)>0:
# if args.verbose is True: print "Unblocking channels: ", unblock_now.keys()
# print time.strftime('%Y-%m-%d %H:%M:%S'),
# print "Unblocking ",len(unblock_now.keys())
# my_client.unblock(unblock_now)
#
# # Throttle rate at which we make unblock controls. Although
# # unblocks should be timely, it is more efficient on the network
# # and on the hardware to unblock a bigger list of channels at once.
# time.sleep(1)
# print "...unblock loop ended. Connection closed."
class MyAnalyser:
"""
Analyses recent data for each channel and amends the list of the channel
ids that should be unblocked. ReadUntil should call data_received whenever
some new data is available. It may call it several times concurrently and
possibly with updates to the same channel if in the setup conditions we
have requested more than 1 update for a given read. We react by keeping up
to date a current_unblock_set. Then we regularly make a separate unblock
call using that list in another thread.
This demonstrates one way of arranging code to analyse and unblock.
How best to handle concurrent overlapping updates and when to unblock is an
open problem left to the client script to resolve which will depend on what
it is trying to achieve.
"""
def __init__(self,args,p,seqids,threedarray):
self.current_unblock_map = LockedDict()
def mycallback(self, actions):
if actions[0] == "Skip":
if args.verbose is True: print "Should skip"
self.current_unblock_map[actions[1]]=actions[2]
logging.info('%s,%s,%s,%s,%s,%s,%s,%s', actions[1], actions[2], 'REJ',actions[3],actions[4][0],actions[4][1],actions[4][2],actions[4][3])
elif actions[0] == "timeout":
if args.verbose is True: print "Read timeout"
logging.info('%s,%s,%s,%s', actions[1], actions[2], 'TOT',actions[3])
#logging.info('%s,%s,%s', actions[1],actions[2],"TOT")
elif actions[0] == "evenskip":
if args.verbose is True: print "Even Skip"
logging.info('%s,%s,%s,%s', actions[1], actions[2], 'EVE',actions[3])
else:
if args.verbose is True: print "Sequencing from ",action[1],action[2]
#print actions[1], actions[2], 'SEQ',actions[3],actions[4][0],actions[4][1],actions[4][2],actions[4][3]
logging.info('%s,%s,%s,%s,%s,%s,%s,%s', actions[1], actions[2], 'SEQ',actions[3],actions[4][0],actions[4][1],actions[4][2],actions[4][3])
def apply_async_with_callback(self, channels):
if args.verbose is True: print "Channels length",len(channels)
d=list()
if args.verbose is True: print "Checking Channels"
for channel_id, data in channels.iteritems():
p.apply_async(genome_worker, args = ((channel_id,data,time.time(),args,seqlen,seqids,threedarray), ), callback = self.mycallback)
def next_unblock_map(self):
"""
Returns current map of channel_id to read_id that should be unblocked but
also clears the map in the assumption that the action to unblock will be
carried out straightaway.
"""
return self.current_unblock_map.pop_all_and_clear()
class RunningState:
def __init__(self):
self.keep_running=True
def closed(self, *args):
self.keep_running=False
if __name__ == "__main__":
multiprocessing.freeze_support()
manager=multiprocessing.Manager()
global oper
oper = platform.system()
if oper is 'Windows': # MS
oper = 'windows'
else:
oper = 'linux' # MS
## linux version
if (oper is "linux"):
config_file = os.path.join(os.path.sep, os.path.dirname(os.path.realpath('__file__')), 'amp.config')
## linux version
if (oper is "windows"):
config_file = os.path.join(os.path.sep, os.path.dirname(os.path.realpath('__file__')), 'ampW.config')
__version__ = "1.2"
__date__ = "8th April 2016"
parser = configargparse.ArgParser(description='gReadUntil.py: A program providing read until for genome sequences with the Oxford Nanopore minION device. This program will ultimately be driven by minoTour to enable selective remote sequencing. This program is partly based on original code generously provided by Oxford Nanopore Technologies.')
parser.add('-fasta', '--reference_fasta_file', type=str, dest='fasta', required=True, default=None, help="The fasta format file describing the reference sequence for your organism.")
parser.add('-targets', nargs = '*', dest='targets',required=True, help = 'Positional IDs to enrich for in the form seqid:start-stop . Can be space seperated eg: J02459:10000-15000 J02459:35000-40000')
parser.add('-procs', '--proc_num', type=int, dest='procs',required=True, help = 'The number of processors to run this on.')
parser.add('-t', '--time', type=int, dest='time', required=True, default=300, help="This is an error catch for when we cannot keep up with the rate of sequencing on the device. It takes a finite amount of time to process through the all the channels from the sequencer. If we cannot process through the array quickly enough then we will \'fall behind\' and lose the ability to filter sequences. Rather than do that we set a threshold after which we allow the sequencing to complete naturally. The default is 300 seconds which equates to 9kb of sequencing at the standard rate.")
parser.add('-m', '--model',type=str, required=True, help = 'The appropriate template model file to use', dest='temp_model')
parser.add('-ip', '--ip-address', type=str ,dest='ip',required=False,default="127.0.0.1", help="The IP address of the machine running minKNOW.")
parser.add('-p', '--port', type=int, dest='port', default=None,required=True, help='The port that ws_event_sampler is running on.' )
parser.add('-log', '--log-file', type=str, dest='logfile', default='readuntil.log', help="The name of the log file that data will be written to regarding the decision made by this program to process read until.")
parser.add('-length', '--library-length', type=int, dest='length', required=False, default=0, help="Provide the average expected length of your library. This offset will be applied to reads that are likely to extend into your region of interest on either strand.")
parser.add('-skip','--skip_even', action='store_true', help="If set, this will allow all reads from even numbered channels to be sequenced regardless of where they map. This provides an internal control.", default=False,dest='skip')
parser.add('-v', '--verbose-true', action='store_true', help="Print detailed messages while processing files.", default=False, dest='verbose')
parser.add_argument('-ver', '--version', action='version',version=('%(prog)s version={version} date={date}').format(version=__version__,date=__date__))
#global args
args = parser.parse_args()
###Check files
check_files((args.fasta,args.temp_model))
fasta_file = args.fasta
#global seqlen
seqlen = get_seq_len(fasta_file)
#print type(seqlen)
print seqlen
model_file = args.temp_model
global model_kmer_means
global kmer_len
model_kmer_means,kmer_len=process_model_file(model_file)
seqids,threedarray = process_ref_fasta(fasta_file,model_kmer_means,kmer_len)
#print "init kmerhash",type(kmerhash)
print type(threedarray)
p = multiprocessing.Pool(args.procs)
analyser = MyAnalyser(args,p,seqids,threedarray)
messagesend = True
try:
if messagesend: send_message("minoTour software is implementing read until on this version of minKNOW and will reject reads being sequenced. If you don\'t want this to happen, you should ensure ws_event_sampler is not running! You proceed at your own risk.",args.ip)
except:
messagesend = False
next
print " ` "
print " ;` ,; "
print " :;;;;;;;;;;;;;;;, "
print " , .;;;;;, , "
print " @@@@@@@@ ;;;;;;; @@@@@@@@ "
print " @@@@@@@@@@# ;;;;; +@@@@@@@@@@ "
print " #@@@@@@`@@@@@ .;. @@@@@.@@@@@@@ "
print " .@@@@@ @@@@@@@@@@@ @@@@@: "
print " .@@@@` @@@@@@@@@ @@@@, "
print " '@@@@@@+ @@@@@@@ '@@@@@@+ "
print " ;@@@# @@@@@. +@@@; inoTour read until routines. "
print " .;;;;;;;, "
print " ;;;. .;;;` "
print " ;; ;;` "
print " Welcome to the .;;: ,;;, "
print ""
print "This script WILL implement read until.\nIf you proceed it is at your own risk.\n\n"
if not query_yes_no("Seriously - are you happy to proceed? Entering yes will make it your fault..."):
sys.exit()
print "***********************************************************************************************"
if args.skip is True:
print "**** This version of the code will not process reads derived from even numbered channels. ****"
else:
print "**** This version of the code will process reads regardless of channel. ****"
print "***********************************************************************************************"
logging.basicConfig(format='%(levelname)s:%(message)s',filename=args.logfile, filemode='w', level=logging.INFO )
current_time = time.time()
print current_time
while 1:
try:
print "Running Analysis"
run_analysis(args,analyser)
except socket_error as serr:
if serr.errno != errno.ECONNREFUSED:
raise serr
print "Hanging around, waiting for the server..."
time.sleep(5) # Wait a bit and try again
|
{
"content_hash": "a7bde229e8e550efca7c127fe48b006b",
"timestamp": "",
"source": "github",
"line_count": 278,
"max_line_length": 582,
"avg_line_length": 49.093525179856115,
"alnum_prop": 0.6057297772567409,
"repo_name": "mattloose/RUscripts",
"id": "8acc16f400bdabd2fb815fdc6e783291b55cf1ef",
"size": "13648",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ReadUntil/gReadUntil.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "167126"
}
],
"symlink_target": ""
}
|
from datetime import timedelta
# external
from blessed import Terminal
# django
from django.db import connection
from django.db.models import F, Sum
from django.utils import timezone
from django.utils.translation import gettext as _
from django_q import VERSION, models
from django_q.brokers import get_broker
# local
from django_q.conf import Conf
from django_q.status import Stat
# optional
try:
import psutil
except ImportError:
psutil = None
def get_process_mb(pid):
try:
process = psutil.Process(pid)
mb_used = round(process.memory_info().rss / 1024 ** 2, 2)
except psutil.NoSuchProcess:
mb_used = "NO_PROCESS_FOUND"
return mb_used
def monitor(run_once=False, broker=None):
if not broker:
broker = get_broker()
term = Terminal()
broker.ping()
with term.fullscreen(), term.hidden_cursor(), term.cbreak():
val = None
start_width = int(term.width / 8)
while val not in (
"q",
"Q",
):
col_width = int(term.width / 8)
# In case of resize
if col_width != start_width:
print(term.clear())
start_width = col_width
print(
term.move(0, 0)
+ term.black_on_green(term.center(_("Host"), width=col_width - 1))
)
print(
term.move(0, 1 * col_width)
+ term.black_on_green(term.center(_("Id"), width=col_width - 1))
)
print(
term.move(0, 2 * col_width)
+ term.black_on_green(term.center(_("State"), width=col_width - 1))
)
print(
term.move(0, 3 * col_width)
+ term.black_on_green(term.center(_("Pool"), width=col_width - 1))
)
print(
term.move(0, 4 * col_width)
+ term.black_on_green(term.center(_("TQ"), width=col_width - 1))
)
print(
term.move(0, 5 * col_width)
+ term.black_on_green(term.center(_("RQ"), width=col_width - 1))
)
print(
term.move(0, 6 * col_width)
+ term.black_on_green(term.center(_("RC"), width=col_width - 1))
)
print(
term.move(0, 7 * col_width)
+ term.black_on_green(term.center(_("Up"), width=col_width - 1))
)
i = 2
stats = Stat.get_all(broker=broker)
print(term.clear_eos())
for stat in stats:
status = stat.status
# color status
if stat.status == Conf.WORKING:
status = term.green(str(Conf.WORKING))
elif stat.status == Conf.STOPPING:
status = term.yellow(str(Conf.STOPPING))
elif stat.status == Conf.STOPPED:
status = term.red(str(Conf.STOPPED))
elif stat.status == Conf.IDLE:
status = str(Conf.IDLE)
# color q's
tasks = str(stat.task_q_size)
if stat.task_q_size > 0:
tasks = term.cyan(str(stat.task_q_size))
if Conf.QUEUE_LIMIT and stat.task_q_size == Conf.QUEUE_LIMIT:
tasks = term.green(str(stat.task_q_size))
results = stat.done_q_size
if results > 0:
results = term.cyan(str(results))
# color workers
workers = len(stat.workers)
if workers < Conf.WORKERS:
workers = term.yellow(str(workers))
# format uptime
uptime = (timezone.now() - stat.tob).total_seconds()
hours, remainder = divmod(uptime, 3600)
minutes, seconds = divmod(remainder, 60)
uptime = "%d:%02d:%02d" % (hours, minutes, seconds)
# print to the terminal
print(
term.move(i, 0)
+ term.center(stat.host[: col_width - 1], width=col_width - 1)
)
print(
term.move(i, 1 * col_width)
+ term.center(str(stat.cluster_id)[-8:], width=col_width - 1)
)
print(
term.move(i, 2 * col_width)
+ term.center(status, width=col_width - 1)
)
print(
term.move(i, 3 * col_width)
+ term.center(workers, width=col_width - 1)
)
print(
term.move(i, 4 * col_width)
+ term.center(tasks, width=col_width - 1)
)
print(
term.move(i, 5 * col_width)
+ term.center(results, width=col_width - 1)
)
print(
term.move(i, 6 * col_width)
+ term.center(stat.reincarnations, width=col_width - 1)
)
print(
term.move(i, 7 * col_width)
+ term.center(uptime, width=col_width - 1)
)
i += 1
# bottom bar
i += 1
queue_size = broker.queue_size()
lock_size = broker.lock_size()
if lock_size:
queue_size = f"{queue_size}({lock_size})"
print(
term.move(i, 0)
+ term.white_on_cyan(term.center(broker.info(), width=col_width * 2))
)
print(
term.move(i, 2 * col_width)
+ term.black_on_cyan(term.center(_("Queued"), width=col_width))
)
print(
term.move(i, 3 * col_width)
+ term.white_on_cyan(term.center(queue_size, width=col_width))
)
print(
term.move(i, 4 * col_width)
+ term.black_on_cyan(term.center(_("Success"), width=col_width))
)
print(
term.move(i, 5 * col_width)
+ term.white_on_cyan(
term.center(models.Success.objects.count(), width=col_width)
)
)
print(
term.move(i, 6 * col_width)
+ term.black_on_cyan(term.center(_("Failures"), width=col_width))
)
print(
term.move(i, 7 * col_width)
+ term.white_on_cyan(
term.center(models.Failure.objects.count(), width=col_width)
)
)
# for testing
if run_once:
return Stat.get_all(broker=broker)
print(term.move(i + 2, 0) + term.center(_("[Press q to quit]")))
val = term.inkey(timeout=1)
def info(broker=None):
if not broker:
broker = get_broker()
term = Terminal()
broker.ping()
stat = Stat.get_all(broker=broker)
# general stats
clusters = len(stat)
workers = 0
reincarnations = 0
for cluster in stat:
workers += len(cluster.workers)
reincarnations += cluster.reincarnations
# calculate tasks pm and avg exec time
tasks_per = 0
per = _("day")
exec_time = 0
last_tasks = models.Success.objects.filter(
stopped__gte=timezone.now() - timedelta(hours=24)
)
tasks_per_day = last_tasks.count()
if tasks_per_day > 0:
# average execution time over the last 24 hours
if connection.vendor != "sqlite":
exec_time = last_tasks.aggregate(
time_taken=Sum(F("stopped") - F("started"))
)
exec_time = exec_time["time_taken"].total_seconds() / tasks_per_day
else:
# can't sum timedeltas on sqlite
for t in last_tasks:
exec_time += t.time_taken()
exec_time = exec_time / tasks_per_day
# tasks per second/minute/hour/day in the last 24 hours
if tasks_per_day > 24 * 60 * 60:
tasks_per = tasks_per_day / (24 * 60 * 60)
per = _("second")
elif tasks_per_day > 24 * 60:
tasks_per = tasks_per_day / (24 * 60)
per = _("minute")
elif tasks_per_day > 24:
tasks_per = tasks_per_day / 24
per = _("hour")
else:
tasks_per = tasks_per_day
# print to terminal
print(term.clear_eos())
col_width = int(term.width / 6)
print(
term.black_on_green(
term.center(
_(
f'-- {Conf.PREFIX.capitalize()} { ".".join(str(v) for v in VERSION)} on {broker.info()} --'
)
)
)
)
print(
term.cyan(_("Clusters"))
+ term.move_x(1 * col_width)
+ term.white(str(clusters))
+ term.move_x(2 * col_width)
+ term.cyan(_("Workers"))
+ term.move_x(3 * col_width)
+ term.white(str(workers))
+ term.move_x(4 * col_width)
+ term.cyan(_("Restarts"))
+ term.move_x(5 * col_width)
+ term.white(str(reincarnations))
)
print(
term.cyan(_("Queued"))
+ term.move_x(1 * col_width)
+ term.white(str(broker.queue_size()))
+ term.move_x(2 * col_width)
+ term.cyan(_("Successes"))
+ term.move_x(3 * col_width)
+ term.white(str(models.Success.objects.count()))
+ term.move_x(4 * col_width)
+ term.cyan(_("Failures"))
+ term.move_x(5 * col_width)
+ term.white(str(models.Failure.objects.count()))
)
print(
term.cyan(_("Schedules"))
+ term.move_x(1 * col_width)
+ term.white(str(models.Schedule.objects.count()))
+ term.move_x(2 * col_width)
+ term.cyan(_(f"Tasks/{per}"))
+ term.move_x(3 * col_width)
+ term.white(f"{tasks_per:.2f}")
+ term.move_x(4 * col_width)
+ term.cyan(_("Avg time"))
+ term.move_x(5 * col_width)
+ term.white(f"{exec_time:.4f}")
)
return True
def memory(run_once=False, workers=False, broker=None):
if not broker:
broker = get_broker()
term = Terminal()
broker.ping()
if not psutil:
print(term.clear_eos())
print(
term.white_on_red(
'Cannot start "qmemory" command. Missing "psutil" library.'
)
)
return
with term.fullscreen(), term.hidden_cursor(), term.cbreak():
MEMORY_AVAILABLE_LOWEST_PERCENTAGE = 100.0
MEMORY_AVAILABLE_LOWEST_PERCENTAGE_AT = timezone.now()
cols = 8
val = None
start_width = int(term.width / cols)
while val not in ["q", "Q"]:
col_width = int(term.width / cols)
# In case of resize
if col_width != start_width:
print(term.clear())
start_width = col_width
# sentinel, monitor and workers memory usage
print(
term.move(0, 0 * col_width)
+ term.black_on_green(term.center(_("Host"), width=col_width - 1))
)
print(
term.move(0, 1 * col_width)
+ term.black_on_green(term.center(_("Id"), width=col_width - 1))
)
print(
term.move(0, 2 * col_width)
+ term.black_on_green(
term.center(_("Available (%)"), width=col_width - 1)
)
)
print(
term.move(0, 3 * col_width)
+ term.black_on_green(
term.center(_("Available (MB)"), width=col_width - 1)
)
)
print(
term.move(0, 4 * col_width)
+ term.black_on_green(term.center(_("Total (MB)"), width=col_width - 1))
)
print(
term.move(0, 5 * col_width)
+ term.black_on_green(
term.center(_("Sentinel (MB)"), width=col_width - 1)
)
)
print(
term.move(0, 6 * col_width)
+ term.black_on_green(
term.center(_("Monitor (MB)"), width=col_width - 1)
)
)
print(
term.move(0, 7 * col_width)
+ term.black_on_green(
term.center(_("Workers (MB)"), width=col_width - 1)
)
)
row = 2
stats = Stat.get_all(broker=broker)
print(term.clear_eos())
for stat in stats:
# memory available (%)
memory_available_percentage = round(
psutil.virtual_memory().available
* 100
/ psutil.virtual_memory().total,
2,
)
# memory available (MB)
memory_available = round(
psutil.virtual_memory().available / 1024 ** 2, 2
)
if memory_available_percentage < MEMORY_AVAILABLE_LOWEST_PERCENTAGE:
MEMORY_AVAILABLE_LOWEST_PERCENTAGE = memory_available_percentage
MEMORY_AVAILABLE_LOWEST_PERCENTAGE_AT = timezone.now()
print(
term.move(row, 0 * col_width)
+ term.center(stat.host[: col_width - 1], width=col_width - 1)
)
print(
term.move(row, 1 * col_width)
+ term.center(str(stat.cluster_id)[-8:], width=col_width - 1)
)
print(
term.move(row, 2 * col_width)
+ term.center(memory_available_percentage, width=col_width - 1)
)
print(
term.move(row, 3 * col_width)
+ term.center(memory_available, width=col_width - 1)
)
print(
term.move(row, 4 * col_width)
+ term.center(
round(psutil.virtual_memory().total / 1024 ** 2, 2),
width=col_width - 1,
)
)
print(
term.move(row, 5 * col_width)
+ term.center(get_process_mb(stat.sentinel), width=col_width - 1)
)
print(
term.move(row, 6 * col_width)
+ term.center(
get_process_mb(getattr(stat, "monitor", None)),
width=col_width - 1,
)
)
workers_mb = 0
for worker_pid in stat.workers:
result = get_process_mb(worker_pid)
if isinstance(result, str):
result = 0
workers_mb += result
print(
term.move(row, 7 * col_width)
+ term.center(
workers_mb or "NO_PROCESSES_FOUND", width=col_width - 1
)
)
row += 1
# each worker's memory usage
if workers:
row += 2
col_width = int(term.width / (1 + Conf.WORKERS))
print(
term.move(row, 0 * col_width)
+ term.black_on_cyan(term.center(_("Id"), width=col_width - 1))
)
for worker_num in range(Conf.WORKERS):
print(
term.move(row, (worker_num + 1) * col_width)
+ term.black_on_cyan(
term.center(
"Worker #{} (MB)".format(worker_num + 1),
width=col_width - 1,
)
)
)
row += 2
for stat in stats:
print(
term.move(row, 0 * col_width)
+ term.center(str(stat.cluster_id)[-8:], width=col_width - 1)
)
for idx, worker_pid in enumerate(stat.workers):
mb_used = get_process_mb(worker_pid)
print(
term.move(row, (idx + 1) * col_width)
+ term.center(mb_used, width=col_width - 1)
)
row += 1
row += 1
print(
term.move(row, 0)
+ _("Available lowest (%): {} ({})").format(
str(MEMORY_AVAILABLE_LOWEST_PERCENTAGE),
MEMORY_AVAILABLE_LOWEST_PERCENTAGE_AT.strftime(
"%Y-%m-%d %H:%M:%S+00:00"
),
)
)
# for testing
if run_once:
return Stat.get_all(broker=broker)
print(term.move(row + 2, 0) + term.center("[Press q to quit]"))
val = term.inkey(timeout=1)
def get_ids():
# prints id (PID) of running clusters
stat = Stat.get_all()
if stat:
for s in stat:
print(s.cluster_id)
else:
print("No clusters appear to be running.")
return True
|
{
"content_hash": "9ad9971a5b905dd4dc4d393b4ba563aa",
"timestamp": "",
"source": "github",
"line_count": 483,
"max_line_length": 112,
"avg_line_length": 36.36645962732919,
"alnum_prop": 0.44469114716766295,
"repo_name": "Koed00/django-q",
"id": "97404187b5d9d4c1688f3f8cfba492c30d030da4",
"size": "17565",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django_q/monitor.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "197496"
}
],
"symlink_target": ""
}
|
import tests.missing_data.test_missing_data_ozone_generic as gen
gen.test_ozone_missing_data(None, 'Constant')
|
{
"content_hash": "e6ca7e62bf32e3a4a96337521b53b872",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 64,
"avg_line_length": 37.333333333333336,
"alnum_prop": 0.7946428571428571,
"repo_name": "antoinecarme/pyaf",
"id": "8cb5123d9c76a9c20b141fcadbe7d3aa9519eea0",
"size": "112",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/missing_data/test_missing_data_ozone_None_Constant.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "6773299"
},
{
"name": "Procfile",
"bytes": "24"
},
{
"name": "Python",
"bytes": "54209093"
},
{
"name": "R",
"bytes": "807"
},
{
"name": "Shell",
"bytes": "3619"
}
],
"symlink_target": ""
}
|
"""
Copyright (C) 2015, MuChu Hsu
Contributed by Muchu Hsu (muchu1983@gmail.com)
This file is part of BSD license
<https://opensource.org/licenses/BSD-3-Clause>
"""
import unittest
import logging
import json
from cameo.parserForINDIEGOGO import ParserForINDIEGOGO
"""
測試
"""
class ParserForINDIEGOGOTest(unittest.TestCase):
#準備
def setUp(self):
logging.basicConfig(level=logging.WARNING)
pass
#收尾
def tearDown(self):
pass
#測試 解析 explore 頁面
def test_parseExplorePage(self):
logging.info("ParserForINDIEGOGOTest.test_parseExplorePage")
parser = ParserForINDIEGOGO()
parser.parseExplorePage()
#測試 解析 category 頁面
def test_parseCategoryPage(self):
logging.info("ParserForINDIEGOGOTest.test_parseCategoryPage")
parser = ParserForINDIEGOGO()
parser.parseCategoryPage()
#測試 解析 project 頁面
def test_parseProjectPage(self):
logging.info("ParserForINDIEGOGOTest.test_parseProjectPage")
strCategory = "animals"
parser = ParserForINDIEGOGO()
parser.beforeParseProjectPage(strCategory)
parser.parseProjectDetailsPage(strCategory)
parser.parseProjectStoryPage(strCategory)
parser.parseProjectBackersPage(strCategory)
parser.parseProjectUpdatesPage(strCategory)
parser.parseProjectCommentsPage(strCategory)
parser.parseProjectRewardPage(strCategory)
parser.afterParseProjectPage(strCategory)
#測試 解析 individuals 頁面
def test_parseIndividualsPage(self):
logging.info("ParserForINDIEGOGOTest.test_parseIndividualsPage")
strCategory = "animals"
parser = ParserForINDIEGOGO()
parser.beforeParseIndividualsPage(strCategory)
parser.parseIndividualsProfilePage(strCategory)
parser.parseIndividualsCampaignsPage(strCategory)
parser.afterParseIndividualsPage(strCategory)
#測試開始
if __name__ == "__main__":
unittest.main(exit=False)
|
{
"content_hash": "c49b5715a4b8277aa2bc0101abb48a5a",
"timestamp": "",
"source": "github",
"line_count": 67,
"max_line_length": 72,
"avg_line_length": 30.17910447761194,
"alnum_prop": 0.7022749752720079,
"repo_name": "muchu1983/104_cameo",
"id": "aca85249164540db589fb0c564439eb7d403f83f",
"size": "2114",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/unit/test_parserForINDIEGOGO.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "4028"
},
{
"name": "HTML",
"bytes": "885957"
},
{
"name": "Python",
"bytes": "738810"
}
],
"symlink_target": ""
}
|
"""readcycle.py: model of a stationary bicycle
The Cycle class serves two purposes:
1. communication with a Digispark/Arduino microcontroller
2. backend to the cycle UI
"""
import serial
import time
import datetime
import traceback
import sys
from hist import History
class Cycle:
"""
Model class for communication with an Arduino connected to a
stationary bicycle
"""
# state
speed = 0
delta = 0
ridetime = 0
revolutions = 0
rpm = 0
# serial communication options
_rate = 9600
_port = '/dev/ttyACM0'
def __init__(self):
"""trys to connect to the arduino"""
try: # connect to arduino
self.ser = serial.Serial(self._port, self._rate, timeout=0.1)
self.ser.setDTR(1)
print "Serial connection is open:", self.ser.isOpen()
except:
print "Arduino not connected"
sys.exit(1)
self.speed_hist = History(2)
def poll(self):
"""reads data from Arduino
Data protocol is lines of key-value pairs:
.t: active ride time in milliseconds (e.g., t=1231)
v: overall number of revolutions (e.g., v=10)
d: duration of most recent revolution in milliseconds (e.g., d=800)
"""
data_left = self.ser.inWaiting()
if data_left > 0:
try:
print "trying to read"
data = self.ser.readlines()
print "nlines", len(data)
print "".join(data)
for line in data:
print line,
slots = line.strip().split("=")
if len(slots) != 2:
continue # skip malformed lines
key, value = slots
#if key == "speed" or key == "s":
# self.speed = float(value)
# self.speed_hist.add(self.speed)
#if key == "rpm" or key == "r":
# self.rpm = float(value)
if key == "ridetime" or key == "t":
self.ridetime = int(value)
elif key == "revolutions" or key == "v":
self.revolutions = int(value)
elif key == "d": # digispark sends delta instead of speed
self.delta = int(value)
except KeyboardInterrupt:
# user tried to kill program
self.ser.close()
raise
except:
# something went wrong, print trace
# this can also occur on Arduino read errors, so don't fail here
traceback.print_exc(file=sys.stdout)
pass
def get_rpm(self):
"""computes rpm from delta"""
if self.delta == 0:
self.rpm = 0
else:
self.rpm = 60000./self.delta
return self.rpm
def get_revolutions(self):
return self.revolutions
def get_ride_time(self):
"""parses ride time from millisecond timestamp"""
return datetime.datetime.fromtimestamp(self.ridetime/1000.)
def get_speed(self):
"""calculate speed from rpm"""
# function determined empirically from stock meter
self.speed = self.get_rpm()/3.5
# debug print
print "speed", self.speed
return self.speed
if __name__ == "__main__":
cycle = Cycle()
while True:
cycle.poll()
print cycle.get_speed()
time.sleep(0.2)
|
{
"content_hash": "fb95f8eed611b31bf6d7513e4245183c",
"timestamp": "",
"source": "github",
"line_count": 134,
"max_line_length": 81,
"avg_line_length": 27.35820895522388,
"alnum_prop": 0.5027277686852155,
"repo_name": "imbadatgit/cycleUI",
"id": "f1aee60b899269f1c82481a5d8af52bfcd38db84",
"size": "3713",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/ui/readcycle.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Arduino",
"bytes": "1652"
},
{
"name": "Python",
"bytes": "17135"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
import six
from bitfield import BitField
from collections import Sequence
from django.db import models
from sentry.db.models import ArrayField
class ApiScopes(Sequence):
project = (("project:read"), ("project:write"), ("project:admin"), ("project:releases"))
team = (("team:read"), ("team:write"), ("team:admin"))
event = (("event:read"), ("event:write"), ("event:admin"))
org = (("org:read"), ("org:write"), ("org:admin"))
member = (("member:read"), ("member:write"), ("member:admin"))
def __init__(self):
self.scopes = (
self.__class__.project
+ self.__class__.team
+ self.__class__.event
+ self.__class__.org
+ self.__class__.member
)
def to_bitfield(self):
return tuple((s, s) for s in self.scopes)
def __getitem__(self, value):
return self.scopes.__getitem__(value)
def __len__(self):
return len(self.scopes)
def __repr__(self):
return self.scopes.__repr__()
class HasApiScopes(models.Model):
"""
Mixin for models that hold a list of OAuth Scopes.
"""
class Meta:
abstract = True
# List of scopes in bit form
scopes = BitField(flags=ApiScopes().to_bitfield())
# Human readable list of scopes
scope_list = ArrayField(of=models.TextField)
def get_scopes(self):
if self.scope_list:
return self.scope_list
return [k for k, v in six.iteritems(self.scopes) if v]
def has_scope(self, scope):
return scope in self.get_scopes()
|
{
"content_hash": "22b68a9dead673d4b128326bc95838c1",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 92,
"avg_line_length": 24.861538461538462,
"alnum_prop": 0.5860148514851485,
"repo_name": "mvaled/sentry",
"id": "416b19f21a00f89c2028ae02a4440153be252d76",
"size": "1616",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "src/sentry/models/apiscopes.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "226439"
},
{
"name": "Dockerfile",
"bytes": "6431"
},
{
"name": "HTML",
"bytes": "173429"
},
{
"name": "JavaScript",
"bytes": "9314175"
},
{
"name": "Lua",
"bytes": "65885"
},
{
"name": "Makefile",
"bytes": "9225"
},
{
"name": "Python",
"bytes": "50385401"
},
{
"name": "Ruby",
"bytes": "168"
},
{
"name": "Shell",
"bytes": "5685"
},
{
"name": "TypeScript",
"bytes": "773664"
}
],
"symlink_target": ""
}
|
"""
Represent a set of variables in an Element.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import copy
from echomesh.expression import Expression
from echomesh.expression import UnitSettings
from echomesh.expression.UnitExpression import UnitExpression
from echomesh.expression.Envelope import Envelope
from echomesh.util import Log
from echomesh.util.registry import Registry
from echomesh.util.math import Interval
LOGGER = Log.logger(__name__)
REGISTRY = Registry.Registry('variable classes')
INFINITY = float('inf')
class _Counter(object):
def __init__(self, element, period, begin=None, end=None, count=None,
skip=1, repeat=INFINITY, **kwds):
parts = [
Expression.convert(x, element) for x in (count, begin, end, skip)]
self.count, self.begin, self.end, self.skip = Interval.interval(*parts)
self.element = element
self.period = Expression.convert(period, element)
self.repeat = repeat
if kwds:
LOGGER.error('Unknown keywords "%s" for counter', kwds)
def is_constant(self):
return self.count <= 1
def evaluate(self):
if self.is_constant():
return self.begin
count = int(UnitSettings.get('speed') * self.element.elapsed_time() //
self.period)
if self.count != INFINITY:
repeat = count // self.count
if repeat >= self.repeat:
return self.end
count -= repeat * self.count
value = self.begin + self.skip * count
return value
def _counter(description, element):
return _Counter(element, **description)
REGISTRY.register_all(
counter=_counter,
envelope=Envelope,
value=UnitExpression,
)
def variable(description, element):
if not isinstance(description, dict):
return UnitExpression(description, element)
description = copy.copy(description)
vtype = description.pop('type', None)
if not vtype:
raise Exception('No type in variable, description="%s".' % description)
return REGISTRY.function(vtype)(description, element)
|
{
"content_hash": "88c64d7b8d461746c735e38660e098c3",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 82,
"avg_line_length": 31.28985507246377,
"alnum_prop": 0.6641963872163038,
"repo_name": "rec/echomesh",
"id": "0f91a45946d4456428ad1b6d5cd226b213d00de2",
"size": "2159",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "code/python/echomesh/expression/Variable.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "5723427"
},
{
"name": "C++",
"bytes": "10326191"
},
{
"name": "CSS",
"bytes": "2048"
},
{
"name": "HTML",
"bytes": "22355"
},
{
"name": "Java",
"bytes": "25383"
},
{
"name": "M4",
"bytes": "32321"
},
{
"name": "Makefile",
"bytes": "215120"
},
{
"name": "Objective-C",
"bytes": "93003"
},
{
"name": "Objective-C++",
"bytes": "394207"
},
{
"name": "Python",
"bytes": "1117634"
},
{
"name": "Shell",
"bytes": "735767"
}
],
"symlink_target": ""
}
|
import fileinput
sum = 0
for line in fileinput.input():
sum = sum + int(line)
print(str(sum))
|
{
"content_hash": "1bf3b3df2b6f7463ab8295451cc73bc6",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 30,
"avg_line_length": 12.625,
"alnum_prop": 0.6534653465346535,
"repo_name": "trozamon/hadoop-qa",
"id": "4faa7e8dfae1d900b9d79e5d1a8a604d6b40de64",
"size": "124",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "reduce.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Java",
"bytes": "3124"
},
{
"name": "PigLatin",
"bytes": "240"
},
{
"name": "Python",
"bytes": "962"
},
{
"name": "Shell",
"bytes": "2018"
}
],
"symlink_target": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.