hexsha stringlengths 40 40 | size int64 2 1.02M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 2 1.02M | avg_line_length float64 1 417k | max_line_length int64 1 987k | alphanum_fraction float64 0 1 | content_no_comment stringlengths 0 1.01M | is_comment_constant_removed bool 1
class | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f73d9bbcd9b1bda9942dc3cce52dcb1c4b3fd9cc | 35,070 | py | Python | katdal/h5datav2.py | rubyvanrooyen/katdal | e90bca3c2cd6305492d03ddc9aa48e67c1800428 | [
"BSD-3-Clause"
] | null | null | null | katdal/h5datav2.py | rubyvanrooyen/katdal | e90bca3c2cd6305492d03ddc9aa48e67c1800428 | [
"BSD-3-Clause"
] | null | null | null | katdal/h5datav2.py | rubyvanrooyen/katdal | e90bca3c2cd6305492d03ddc9aa48e67c1800428 | [
"BSD-3-Clause"
] | null | null | null | ################################################################################
# Copyright (c) 2011-2021, National Research Foundation (SARAO)
#
# Licensed under the BSD 3-Clause License (the "License"); you may not use
# this file except in compliance with the License. You may obtain a copy
# of the License at
#
# https://opensource.org/licenses/BSD-3-Clause
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
"""Data accessor class for HDF5 files produced by KAT-7 correlator."""
import logging
import pathlib
import secrets
import h5py
import katpoint
import numpy as np
from .categorical import CategoricalData, sensor_to_categorical
from .dataset import (DEFAULT_SENSOR_PROPS, DEFAULT_VIRTUAL_SENSORS,
BrokenFile, DataSet, Subarray, WrongVersion,
_robust_target, _selection_to_list)
from .flags import DESCRIPTIONS as FLAG_DESCRIPTIONS
from .flags import NAMES as FLAG_NAMES
from .lazy_indexer import LazyIndexer, LazyTransform
from .sensordata import RecordSensorGetter, SensorCache, to_str
from .spectral_window import SpectralWindow
logger = logging.getLogger(__name__)
# Simplify the scan activities to derive the basic state of the antenna (slewing, scanning, tracking, stopped)
SIMPLIFY_STATE = {'scan_ready': 'slew', 'scan': 'scan', 'scan_complete': 'scan', 'track': 'track', 'slew': 'slew'}
SENSOR_PROPS = dict(DEFAULT_SENSOR_PROPS)
SENSOR_PROPS.update({
'*activity': {'greedy_values': ('slew', 'stop'), 'initial_value': 'slew',
'transform': lambda act: SIMPLIFY_STATE.get(act, 'stop')},
'*target': {'initial_value': '', 'transform': _robust_target},
# These float sensors are actually categorical by nature as they represent user settings
'RFE/center-frequency-hz': {'categorical': True},
'RFE/rfe7.lo1.frequency': {'categorical': True},
'*attenuation': {'categorical': True},
'*attenuator.horizontal': {'categorical': True},
'*attenuator.vertical': {'categorical': True},
})
SENSOR_ALIASES = {
'nd_coupler': 'rfe3.rfe15.noise.coupler.on',
'nd_pin': 'rfe3.rfe15.noise.pin.on',
}
def _calc_azel(cache, name, ant):
"""Calculate virtual (az, el) sensors from actual ones in sensor cache."""
base_name = 'pos.actual-scan-azim' if name.endswith('az') else 'pos.actual-scan-elev'
real_sensor = f'Antennas/{ant}/{base_name}'
cache[name] = sensor_data = katpoint.deg2rad(cache.get(real_sensor))
return sensor_data
VIRTUAL_SENSORS = dict(DEFAULT_VIRTUAL_SENSORS)
VIRTUAL_SENSORS.update({'Antennas/{ant}/az': _calc_azel, 'Antennas/{ant}/el': _calc_azel})
WEIGHT_NAMES = ('precision',)
WEIGHT_DESCRIPTIONS = ('visibility precision (inverse variance, i.e. 1 / sigma^2)',)
# -------------------------------------------------------------------------------------------------
# -- Utility functions
# -------------------------------------------------------------------------------------------------
def get_single_value(group, name):
"""Return single value from attribute or dataset with given name in group.
If `name` is an attribute of the HDF5 group `group`, it is returned,
otherwise it is interpreted as an HDF5 dataset of `group` and the last value
of `name` is returned. This is meant to retrieve static configuration values
that potentially get set more than once during capture initialisation, but
then does not change during actual capturing.
Parameters
----------
group : :class:`h5py.Group` object
HDF5 group to query
name : string
Name of HDF5 attribute or dataset to query
Returns
-------
value : object
Attribute or last value of dataset
"""
attrs = group.attrs
value = attrs[name] if name in attrs else group[name][-1]
return to_str(value)
def dummy_dataset(name, shape, dtype, value):
"""Dummy HDF5 dataset containing a single value.
This creates a dummy HDF5 dataset in memory containing a single value. It
can have virtually unlimited size as the dataset is highly compressed.
Parameters
----------
name : string
Name of dataset
shape : sequence of int
Shape of dataset
dtype : :class:`numpy.dtype` object or equivalent
Type of data stored in dataset
value : object
All elements in the dataset will equal this value
Returns
-------
dataset : :class:`h5py.Dataset` object
Dummy HDF5 dataset
"""
# It is important to randomise the filename as h5py does not allow two writable file objects with the same name
# Without this randomness katdal can only open one file requiring a dummy dataset
dummy_file = h5py.File(f'{name}_{secrets.token_hex(8)}.h5', 'x', driver='core', backing_store=False)
return dummy_file.create_dataset(name, shape=shape, maxshape=shape,
dtype=dtype, fillvalue=value, compression='gzip')
# -------------------------------------------------------------------------------------------------
# -- CLASS : H5DataV2
# -------------------------------------------------------------------------------------------------
class H5DataV2(DataSet):
"""Load HDF5 format version 2 file produced by KAT-7 correlator.
For more information on attributes, see the :class:`DataSet` docstring.
Parameters
----------
filename : string
Name of HDF5 file
ref_ant : string, optional
Name of reference antenna, used to partition data set into scans
(default is first antenna in use)
time_offset : float, optional
Offset to add to all correlator timestamps, in seconds
mode : string, optional
HDF5 file opening mode (e.g. 'r+' to open file in write mode)
quicklook : {False, True}
True if synthesised timestamps should be used to partition data set even
if real timestamps are irregular, thereby avoiding the slow loading of
real timestamps at the cost of slightly inaccurate label borders
keepdims : {False, True}, optional
Force vis / weights / flags to be 3-dimensional, regardless of selection
kwargs : dict, optional
Extra keyword arguments, typically meant for other formats and ignored
Attributes
----------
file : :class:`h5py.File` object
Underlying HDF5 file, exposed via :mod:`h5py` interface
"""
def __init__(self, filename, ref_ant='', time_offset=0.0, mode='r',
quicklook=False, keepdims=False, **kwargs):
# The closest thing to a capture block ID is the Unix timestamp in the original filename
# There is only one (unnamed) output stream, so leave off the stream name
cbid = pathlib.Path(filename).stem
DataSet.__init__(self, cbid, ref_ant, time_offset, url=filename)
# Load file
self.file, self.version = H5DataV2._open(filename, mode)
f = self.file
# Load main HDF5 groups
data_group, sensors_group, config_group = f['Data'], f['MetaData/Sensors'], f['MetaData/Configuration']
markup_group = f['Markup']
# Get observation script parameters, with defaults
for k, v in config_group['Observation'].items():
# For KAT-7 (v2.1) data, strip the 'script_' prefix from most parameters
k = k if self.version > '2.1' or k in ('script_name', 'script_arguments') else k[7:]
self.obs_params[str(k)] = to_str(v)
self.observer = self.obs_params.get('observer', '')
self.description = self.obs_params.get('description', '')
self.experiment_id = self.obs_params.get('experiment_id', '')
# Get script log from History group
self.obs_script_log = f['History/script_log']['log'].tolist()
# ------ Extract timestamps ------
self.dump_period = get_single_value(config_group['Correlator'], 'int_time')
# Obtain visibility data and timestamps
self._vis = data_group['correlator_data']
self._timestamps = data_group['timestamps']
num_dumps = len(self._timestamps)
if num_dumps != self._vis.shape[0]:
raise BrokenFile(f'Number of timestamps received from k7_capture ({num_dumps}) '
f'differs from number of dumps in data ({self._vis.shape[0]})')
# Discard the last sample if the timestamp is a duplicate (caused by stop packet in k7_capture)
num_dumps = (num_dumps - 1) if num_dumps > 1 and (self._timestamps[-1] == self._timestamps[-2]) else num_dumps
# Do quick test for uniform spacing of timestamps (necessary but not sufficient)
expected_dumps = (self._timestamps[num_dumps - 1] - self._timestamps[0]) / self.dump_period + 1
# The expected_dumps should always be an integer (like num_dumps), unless the timestamps and/or dump period
# are messed up in the file, so the threshold of this test is a bit arbitrary (e.g. could use > 0.5)
irregular = abs(expected_dumps - num_dumps) >= 0.01
if irregular:
# Warn the user, as this is anomalous
logger.warning("Irregular timestamps detected in file '%s': expected %.3f dumps "
"based on dump period and start/end times, got %d instead",
filename, expected_dumps, num_dumps)
if quicklook:
logger.warning("Quicklook option selected - partitioning data based on synthesised timestamps instead")
if not irregular or quicklook:
# Estimate timestamps by assuming they are uniformly spaced (much quicker than loading them from file).
# This is useful for the purpose of segmenting data set, where accurate timestamps are not that crucial.
# The real timestamps are still loaded when the user explicitly asks for them.
data_timestamps = self._timestamps[0] + self.dump_period * np.arange(num_dumps)
else:
# Load the real timestamps instead (could take several seconds on a large data set)
data_timestamps = self._timestamps[:num_dumps]
# Move timestamps from start of each dump to the middle of the dump
data_timestamps += 0.5 * self.dump_period + self.time_offset
if data_timestamps[0] < 1e9:
logger.warning("File '%s' has invalid first correlator timestamp (%f)", filename, data_timestamps[0])
self._time_keep = np.ones(num_dumps, dtype=np.bool)
self.start_time = katpoint.Timestamp(data_timestamps[0] - 0.5 * self.dump_period)
self.end_time = katpoint.Timestamp(data_timestamps[-1] + 0.5 * self.dump_period)
self._keepdims = keepdims
# ------ Extract flags ------
# Check if flag group is present, else use dummy flag data
self._flags = markup_group['flags'] if 'flags' in markup_group else \
dummy_dataset('dummy_flags', shape=self._vis.shape[:-1], dtype=np.uint8, value=0)
# Obtain flag descriptions from file or recreate default flag description table
self._flags_description = to_str(markup_group['flags_description'][:]) \
if 'flags_description' in markup_group else np.array(list(zip(FLAG_NAMES, FLAG_DESCRIPTIONS)))
self._flags_select = np.array([0], dtype=np.uint8)
self._flags_keep = 'all'
# ------ Extract weights ------
# Check if weight group present, else use dummy weight data
self._weights = markup_group['weights'] if 'weights' in markup_group else \
dummy_dataset('dummy_weights', shape=self._vis.shape[:-1], dtype=np.float32, value=1.0)
# Obtain weight descriptions from file or recreate default weight description table
self._weights_description = to_str(markup_group['weights_description'][:]) \
if 'weights_description' in markup_group else np.array(list(zip(WEIGHT_NAMES, WEIGHT_DESCRIPTIONS)))
self._weights_select = []
self._weights_keep = 'all'
# ------ Extract sensors ------
# Populate sensor cache with all HDF5 datasets below sensor group that fit the description of a sensor
cache = {}
def register_sensor(name, obj):
"""A sensor is defined as a non-empty dataset with expected dtype."""
if isinstance(obj, h5py.Dataset) and obj.shape != () and \
obj.dtype.names == ('timestamp', 'value', 'status'):
# Rename pedestal sensors from the old regime to become sensors of the corresponding antenna
name = ('Antennas/ant' + name[13:]) if name.startswith('Pedestals/ped') else name
cache[name] = RecordSensorGetter(obj, name)
sensors_group.visititems(register_sensor)
# Use estimated data timestamps for now, to speed up data segmentation
self.sensor = SensorCache(cache, data_timestamps, self.dump_period, keep=self._time_keep,
props=SENSOR_PROPS, virtual=VIRTUAL_SENSORS, aliases=SENSOR_ALIASES)
# ------ Extract subarrays ------
# By default, only pick antennas that were in use by the script
script_ants = to_str(config_group['Observation'].attrs['script_ants']).split(',')
self.ref_ant = script_ants[0] if not ref_ant else ref_ant
# Original list of correlation products as pairs of input labels
corrprods = get_single_value(config_group['Correlator'], 'bls_ordering')
if len(corrprods) != self._vis.shape[2]:
# Apply k7_capture baseline mask after the fact, in the hope that it fixes correlation product mislabelling
corrprods = np.array([cp for cp in corrprods if cp[0][:-1] in script_ants and cp[1][:-1] in script_ants])
# If there is still a mismatch between labels and data shape, file is considered broken (maybe bad labels?)
if len(corrprods) != self._vis.shape[2]:
raise BrokenFile('Number of baseline labels (containing expected antenna names) '
'received from correlator (%d) differs from number of baselines in data (%d)' %
(len(corrprods), self._vis.shape[2]))
else:
logger.warning('Reapplied k7_capture baseline mask to fix unexpected number of baseline labels')
# All antennas in configuration as katpoint Antenna objects
ants = [katpoint.Antenna(to_str(config_group['Antennas'][name].attrs['description']))
for name in config_group['Antennas']]
self.subarrays = [Subarray(ants, corrprods)]
self.sensor['Observation/subarray'] = CategoricalData(self.subarrays, [0, len(data_timestamps)])
self.sensor['Observation/subarray_index'] = CategoricalData([0], [0, len(data_timestamps)])
# Store antenna objects in sensor cache too, for use in virtual sensor calculations
for ant in ants:
self.sensor[f'Antennas/{ant.name}/antenna'] = CategoricalData([ant], [0, len(data_timestamps)])
# Extract array reference from first antenna (first 5 fields of description)
array_ant_fields = ['array'] + ants[0].description.split(',')[1:5]
array_ant = katpoint.Antenna(','.join(array_ant_fields))
self.sensor['Antennas/array/antenna'] = CategoricalData([array_ant], [0, len(data_timestamps)])
# ------ Extract spectral windows / frequencies ------
# Ideally we would like to use calculated center-frequency-hz sensor produced by k7_capture (better for nband)
if self.version >= '2.1':
centre_freq = self.sensor.get('RFE/center-frequency-hz')
else:
# Fall back to basic RFE7 LO frequency, as this supported multiple spectral windows before k7_capture did
# This assumes WBC mode, though (NBC modes only fully supported since HDF5 v2.1)
centre_freq = self.sensor.get('RFE/rfe7.lo1.frequency')
centre_freq.unique_values = [freq - 4200e6 for freq in centre_freq.unique_values]
num_chans = get_single_value(config_group['Correlator'], 'n_chans')
if num_chans != self._vis.shape[1]:
raise BrokenFile(f'Number of channels received from correlator ({num_chans}) '
f'differs from number of channels in data ({self._vis.shape[1]})')
bandwidth = get_single_value(config_group['Correlator'], 'bandwidth')
channel_width = bandwidth / num_chans
try:
mode = self.sensor.get('DBE/dbe.mode').unique_values[0]
except (KeyError, IndexError):
# Guess the mode for version 2.0 files that haven't been re-augmented
mode = 'wbc' if num_chans <= 1024 else 'wbc8k' if bandwidth > 200e6 else 'nbc'
self.spectral_windows = [SpectralWindow(spw_centre, channel_width, num_chans, mode)
for spw_centre in centre_freq.unique_values]
self.sensor['Observation/spw'] = CategoricalData([self.spectral_windows[idx] for idx in centre_freq.indices],
centre_freq.events)
self.sensor['Observation/spw_index'] = CategoricalData(centre_freq.indices, centre_freq.events)
# ------ Extract scans / compound scans / targets ------
# Use the activity sensor of reference antenna to partition the data set into scans (and to set their states)
scan = self.sensor.get(f'Antennas/{self.ref_ant}/activity')
# If the antenna starts slewing on the second dump, incorporate the first dump into the slew too.
# This scenario typically occurs when the first target is only set after the first dump is received.
# The workaround avoids putting the first dump in a scan by itself, typically with an irrelevant target.
if len(scan) > 1 and scan.events[1] == 1 and scan[1] == 'slew':
scan.events, scan.indices = scan.events[1:], scan.indices[1:]
scan.events[0] = 0
# Use labels to partition the data set into compound scans
label = sensor_to_categorical(markup_group['labels']['timestamp'], to_str(markup_group['labels']['label'][:]),
data_timestamps, self.dump_period, **SENSOR_PROPS['Observation/label'])
# Discard empty labels (typically found in raster scans, where first scan has proper label and rest are empty)
# However, if all labels are empty, keep them, otherwise whole data set will be one pathological compscan...
if len(label.unique_values) > 1:
label.remove('')
# Create duplicate scan events where labels are set during a scan (i.e. not at start of scan)
# ASSUMPTION: Number of scans >= number of labels (i.e. each label should introduce a new scan)
scan.add_unmatched(label.events)
self.sensor['Observation/scan_state'] = scan
self.sensor['Observation/scan_index'] = CategoricalData(list(range(len(scan))), scan.events)
# Move proper label events onto the nearest scan start
# ASSUMPTION: Number of labels <= number of scans (i.e. only a single label allowed per scan)
label.align(scan.events)
# If one or more scans at start of data set have no corresponding label, add a default label for them
if label.events[0] > 0:
label.add(0, '')
self.sensor['Observation/label'] = label
self.sensor['Observation/compscan_index'] = CategoricalData(list(range(len(label))), label.events)
# Use the target sensor of reference antenna to set the target for each scan
target = self.sensor.get(f'Antennas/{self.ref_ant}/target')
# Move target events onto the nearest scan start
# ASSUMPTION: Number of targets <= number of scans (i.e. only a single target allowed per scan)
target.align(scan.events)
self.sensor['Observation/target'] = target
self.sensor['Observation/target_index'] = CategoricalData(target.indices, target.events)
# Set up catalogue containing all targets in file, with reference antenna as default antenna
self.catalogue.add(target.unique_values)
self.catalogue.antenna = self.sensor[f'Antennas/{self.ref_ant}/antenna'][0]
# Ensure that each target flux model spans all frequencies in data set if possible
self._fix_flux_freq_range()
# Avoid storing reference to self in transform closure below, as this hinders garbage collection
dump_period, time_offset = self.dump_period, self.time_offset
# Restore original (slow) timestamps so that subsequent sensors (e.g. pointing) will have accurate values
extract_time = LazyTransform('extract_time', lambda t, keep: t + 0.5 * dump_period + time_offset)
self.sensor.timestamps = LazyIndexer(self._timestamps, keep=slice(num_dumps), transforms=[extract_time])
# Apply default selection and initialise all members that depend on selection in the process
self.select(spw=0, subarray=0, ants=script_ants)
@staticmethod
def _open(filename, mode='r'):
"""Open file and do basic version and augmentation sanity check."""
f = h5py.File(filename, mode)
version = to_str(f.attrs.get('version', '1.x'))
if not version.startswith('2.'):
raise WrongVersion(f"Attempting to load version '{version}' file with version 2 loader")
if 'augment_ts' not in f.attrs:
raise BrokenFile('HDF5 file not augmented - please run '
'k7_augment.py (provided by katcapture package)')
return f, version
@staticmethod
def _get_ants(filename):
"""Quick look function to get the list of antennas in a data file.
This is intended to be called without createing a full katdal object.
Parameters
----------
filename : string
Data file name
Returns
-------
antennas : list of :class:'katpoint.Antenna' objects
"""
f, version = H5DataV2._open(filename)
config_group = f['MetaData/Configuration']
all_ants = [ant for ant in config_group['Antennas']]
script_ants = to_str(config_group['Observation'].attrs.get('script_ants'))
script_ants = script_ants.split(',') if script_ants else all_ants
return [katpoint.Antenna(to_str(config_group['Antennas'][ant].attrs['description']))
for ant in script_ants if ant in all_ants]
@staticmethod
def _get_targets(filename):
"""Quick look function to get the list of targets in a data file.
This is intended to be called without createing a full katdal object.
Parameters
----------
filename : string
Data file name
Returns
-------
targets : :class:'katpoint.Catalogue' object
All targets in file
"""
f, version = H5DataV2._open(filename)
# Use the delay-tracking centre as the one and only target
# Try two different sensors for the DBE target
try:
target_list = f['MetaData/Sensors/DBE/target']
except Exception:
# Since h5py errors have varied over the years, we need Exception
target_list = f['MetaData/Sensors/Beams/Beam0/target']
all_target_strings = [to_str(target_data[1]) for target_data in target_list]
return katpoint.Catalogue(np.unique(all_target_strings))
def __str__(self):
"""Verbose human-friendly string representation of data set."""
descr = [super().__str__()]
# append the process_log, if it exists, for non-concatenated h5 files
if 'process_log' in self.file['History']:
descr.append('-------------------------------------------------------------------------------')
descr.append('Process log:')
for proc in self.file['History']['process_log']:
# proc has a structured dtype and to_str doesn't work on it, so
# we have to to_str each element.
param_list = f'{to_str(proc[0]):>15}:'
for param in to_str(proc[1]).split(','):
param_list += f' {param}'
descr.append(param_list)
return '\n'.join(descr)
@property
def _weights_keep(self):
known_weights = [row[0] for row in getattr(self, '_weights_description', [])]
return [known_weights[ind] for ind in self._weights_select]
@_weights_keep.setter
def _weights_keep(self, names):
known_weights = [row[0] for row in getattr(self, '_weights_description', [])]
# Ensure a sequence of weight names
names = _selection_to_list(names, all=known_weights)
# Create index list for desired weights
selection = []
for name in names:
try:
selection.append(known_weights.index(name))
except ValueError:
logger.warning("%r is not a legitimate weight type for this file, "
"supported ones are %s", name, known_weights)
if known_weights and not selection:
logger.warning('No valid weights were selected - setting all weights to 1.0 by default')
self._weights_select = selection
@property
def _flags_keep(self):
if not hasattr(self, '_flags_description'):
return []
known_flags = [row[0] for row in self._flags_description]
# The KAT-7 flagger uses the np.packbits convention (bit 0 = MSB) so don't flip
selection = np.unpackbits(self._flags_select)
assert len(known_flags) == len(selection), \
f'Expected {len(selection)} flag types in file, got {self._flags_description}'
return [name for name, bit in zip(known_flags, selection) if bit]
@_flags_keep.setter
def _flags_keep(self, names):
if not hasattr(self, '_flags_description'):
self._flags_select = np.array([0], dtype=np.uint8)
return
known_flags = [row[0] for row in self._flags_description]
# Ensure `names` is a sequence of valid flag names (or an empty list)
names = _selection_to_list(names, all=known_flags)
# Create boolean list for desired flags
selection = np.zeros(8, dtype=np.uint8)
assert len(known_flags) == len(selection), \
f'Expected {len(selection)} flag types in file, got {self._flags_description}'
for name in names:
try:
selection[known_flags.index(name)] = 1
except ValueError:
logger.warning("%r is not a legitimate flag type for this file, "
"supported ones are %s", name, known_flags)
# Pack index list into bit mask
# The KAT-7 flagger uses the np.packbits convention (bit 0 = MSB) so don't flip
flagmask = np.packbits(selection)
if known_flags and not flagmask:
logger.warning('No valid flags were selected - setting all flags to False by default')
self._flags_select = flagmask
@property
def timestamps(self):
"""Visibility timestamps in UTC seconds since Unix epoch.
The timestamps are returned as an array indexer of float64, shape (*T*,),
with one timestamp per integration aligned with the integration
*midpoint*. To get the data array itself from the indexer `x`, do `x[:]`
or perform any other form of indexing on it.
"""
# Avoid storing reference to self in transform closure below, as this hinders garbage collection
dump_period, time_offset = self.dump_period, self.time_offset
extract_time = LazyTransform('extract_time', lambda t, keep: t + 0.5 * dump_period + time_offset)
return LazyIndexer(self._timestamps, keep=self._time_keep, transforms=[extract_time])
def _vislike_indexer(self, dataset, extractor):
"""Lazy indexer for vis-like datasets (vis / weights / flags).
This operates on datasets with shape (*T*, *F*, *B*) and potentially
different dtypes. The data type conversions are all left to the provided
extractor transform, while this method takes care of the common
selection issues, such as preserving singleton dimensions and dealing
with duplicate final dumps.
Parameters
----------
dataset : :class:`h5py.Dataset` object or equivalent
Underlying vis-like dataset on which lazy indexing will be done
extractor : function, signature ``data = f(data, keep)``
Transform to apply to data (`keep` is user-provided 2nd-stage index)
Returns
-------
indexer : :class:`LazyIndexer` object
Lazy indexer with appropriate selectors and transforms included
"""
# Create first-stage index from dataset selectors
time_keep = self._time_keep
# If there is a duplicate final dump, these lengths don't match -> ignore last dump in file
if len(time_keep) == len(dataset) - 1:
time_keep = np.zeros(len(dataset), dtype=np.bool)
time_keep[:len(self._time_keep)] = self._time_keep
stage1 = (time_keep, self._freq_keep, self._corrprod_keep)
def _force_3dim(data, keep):
"""Keep singleton dimensions in stage 2 (i.e. final) indexing."""
# Ensure that keep tuple has length of 3 (truncate or pad with blanket slices as necessary)
keep = keep[:3] + (slice(None),) * (3 - len(keep))
# Final indexing ensures that returned data are always 3-dimensional (i.e. keep singleton dimensions)
keep_singles = [(np.newaxis if np.isscalar(dim_keep) else slice(None))
for dim_keep in keep]
return data[tuple(keep_singles)]
force_3dim = LazyTransform('force_3dim', _force_3dim)
transforms = [extractor, force_3dim] if self._keepdims else [extractor]
return LazyIndexer(dataset, stage1, transforms)
@property
def vis(self):
r"""Complex visibility data as a function of time, frequency and baseline.
The visibility data are returned as an array indexer of complex64, shape
(*T*, *F*, *B*), with time along the first dimension, frequency along the
second dimension and correlation product ("baseline") index along the
third dimension. The returned array always has all three dimensions,
even for scalar (single) values. The number of integrations *T* matches
the length of :meth:`timestamps`, the number of frequency channels *F*
matches the length of :meth:`freqs` and the number of correlation
products *B* matches the length of :meth:`corr_products`. To get the
data array itself from the indexer `x`, do `x[:]` or perform any other
form of indexing on it. Only then will data be loaded into memory.
The sign convention of the imaginary part is consistent with an
electric field of :math:`e^{i(\omega t - jz)}` i.e. phase that
increases with time.
"""
extract = LazyTransform('extract_vis',
# Discard the 4th / last dimension as this is subsumed in complex view
# The visibilities are conjugated due to using the lower sideband
lambda vis, keep: vis.view(np.complex64)[..., 0].conjugate(),
lambda shape: shape[:-1], np.complex64)
return self._vislike_indexer(self._vis, extract)
@property
def weights(self):
"""Visibility weights as a function of time, frequency and baseline.
The weights data are returned as an array indexer of float32, shape
(*T*, *F*, *B*), with time along the first dimension, frequency along the
second dimension and correlation product ("baseline") index along the
third dimension. The number of integrations *T* matches the length of
:meth:`timestamps`, the number of frequency channels *F* matches the
length of :meth:`freqs` and the number of correlation products *B*
matches the length of :meth:`corr_products`. To get the data array
itself from the indexer `x`, do `x[:]` or perform any other form of
indexing on it. Only then will data be loaded into memory.
"""
# We currently only cater for a single weight type (i.e. either select it or fall back to 1.0)
def transform(weights, keep):
return weights.astype(np.float32) if self._weights_select else \
np.ones_like(weights, dtype=np.float32)
extract = LazyTransform('extract_weights', transform, dtype=np.float32)
return self._vislike_indexer(self._weights, extract)
@property
def flags(self):
"""Flags as a function of time, frequency and baseline.
The flags data are returned as an array indexer of bool, shape
(*T*, *F*, *B*), with time along the first dimension, frequency along the
second dimension and correlation product ("baseline") index along the
third dimension. The number of integrations *T* matches the length of
:meth:`timestamps`, the number of frequency channels *F* matches the
length of :meth:`freqs` and the number of correlation products *B*
matches the length of :meth:`corr_products`. To get the data array
itself from the indexer `x`, do `x[:]` or perform any other form of
indexing on it. Only then will data be loaded into memory.
"""
def transform(flags, keep):
"""Use flagmask to blank out the flags we don't want."""
# Then convert uint8 to bool -> if any flag bits set, flag is set
return np.bool_(np.bitwise_and(self._flags_select, flags))
extract = LazyTransform('extract_flags', transform, dtype=np.bool)
return self._vislike_indexer(self._flags, extract)
@property
def temperature(self):
"""Air temperature in degrees Celsius."""
return self.sensor['Enviro/asc.air.temperature']
@property
def pressure(self):
"""Barometric pressure in millibars."""
return self.sensor['Enviro/asc.air.pressure']
@property
def humidity(self):
"""Relative humidity as a percentage."""
return self.sensor['Enviro/asc.air.relative-humidity']
@property
def wind_speed(self):
"""Wind speed in metres per second."""
return self.sensor['Enviro/asc.wind.speed']
@property
def wind_direction(self):
"""Wind direction as an azimuth angle in degrees."""
return self.sensor['Enviro/asc.wind.direction']
| 51.878698 | 119 | 0.643057 | st(zip(WEIGHT_NAMES, WEIGHT_DESCRIPTIONS)))
self._weights_select = []
self._weights_keep = 'all'
cache = {}
def register_sensor(name, obj):
if isinstance(obj, h5py.Dataset) and obj.shape != () and \
obj.dtype.names == ('timestamp', 'value', 'status'):
name = ('Antennas/ant' + name[13:]) if name.startswith('Pedestals/ped') else name
cache[name] = RecordSensorGetter(obj, name)
sensors_group.visititems(register_sensor)
self.sensor = SensorCache(cache, data_timestamps, self.dump_period, keep=self._time_keep,
props=SENSOR_PROPS, virtual=VIRTUAL_SENSORS, aliases=SENSOR_ALIASES)
script_ants = to_str(config_group['Observation'].attrs['script_ants']).split(',')
self.ref_ant = script_ants[0] if not ref_ant else ref_ant
corrprods = get_single_value(config_group['Correlator'], 'bls_ordering')
if len(corrprods) != self._vis.shape[2]:
corrprods = np.array([cp for cp in corrprods if cp[0][:-1] in script_ants and cp[1][:-1] in script_ants])
if len(corrprods) != self._vis.shape[2]:
raise BrokenFile('Number of baseline labels (containing expected antenna names) '
'received from correlator (%d) differs from number of baselines in data (%d)' %
(len(corrprods), self._vis.shape[2]))
else:
logger.warning('Reapplied k7_capture baseline mask to fix unexpected number of baseline labels')
ants = [katpoint.Antenna(to_str(config_group['Antennas'][name].attrs['description']))
for name in config_group['Antennas']]
self.subarrays = [Subarray(ants, corrprods)]
self.sensor['Observation/subarray'] = CategoricalData(self.subarrays, [0, len(data_timestamps)])
self.sensor['Observation/subarray_index'] = CategoricalData([0], [0, len(data_timestamps)])
for ant in ants:
self.sensor[f'Antennas/{ant.name}/antenna'] = CategoricalData([ant], [0, len(data_timestamps)])
array_ant_fields = ['array'] + ants[0].description.split(',')[1:5]
array_ant = katpoint.Antenna(','.join(array_ant_fields))
self.sensor['Antennas/array/antenna'] = CategoricalData([array_ant], [0, len(data_timestamps)])
if self.version >= '2.1':
centre_freq = self.sensor.get('RFE/center-frequency-hz')
else:
centre_freq = self.sensor.get('RFE/rfe7.lo1.frequency')
centre_freq.unique_values = [freq - 4200e6 for freq in centre_freq.unique_values]
num_chans = get_single_value(config_group['Correlator'], 'n_chans')
if num_chans != self._vis.shape[1]:
raise BrokenFile(f'Number of channels received from correlator ({num_chans}) '
f'differs from number of channels in data ({self._vis.shape[1]})')
bandwidth = get_single_value(config_group['Correlator'], 'bandwidth')
channel_width = bandwidth / num_chans
try:
mode = self.sensor.get('DBE/dbe.mode').unique_values[0]
except (KeyError, IndexError):
mode = 'wbc' if num_chans <= 1024 else 'wbc8k' if bandwidth > 200e6 else 'nbc'
self.spectral_windows = [SpectralWindow(spw_centre, channel_width, num_chans, mode)
for spw_centre in centre_freq.unique_values]
self.sensor['Observation/spw'] = CategoricalData([self.spectral_windows[idx] for idx in centre_freq.indices],
centre_freq.events)
self.sensor['Observation/spw_index'] = CategoricalData(centre_freq.indices, centre_freq.events)
# ------ Extract scans / compound scans / targets ------
# Use the activity sensor of reference antenna to partition the data set into scans (and to set their states)
scan = self.sensor.get(f'Antennas/{self.ref_ant}/activity')
# If the antenna starts slewing on the second dump, incorporate the first dump into the slew too.
# This scenario typically occurs when the first target is only set after the first dump is received.
# The workaround avoids putting the first dump in a scan by itself, typically with an irrelevant target.
if len(scan) > 1 and scan.events[1] == 1 and scan[1] == 'slew':
scan.events, scan.indices = scan.events[1:], scan.indices[1:]
scan.events[0] = 0
# Use labels to partition the data set into compound scans
label = sensor_to_categorical(markup_group['labels']['timestamp'], to_str(markup_group['labels']['label'][:]),
data_timestamps, self.dump_period, **SENSOR_PROPS['Observation/label'])
# Discard empty labels (typically found in raster scans, where first scan has proper label and rest are empty)
# However, if all labels are empty, keep them, otherwise whole data set will be one pathological compscan...
if len(label.unique_values) > 1:
label.remove('')
# Create duplicate scan events where labels are set during a scan (i.e. not at start of scan)
# ASSUMPTION: Number of scans >= number of labels (i.e. each label should introduce a new scan)
scan.add_unmatched(label.events)
self.sensor['Observation/scan_state'] = scan
self.sensor['Observation/scan_index'] = CategoricalData(list(range(len(scan))), scan.events)
# Move proper label events onto the nearest scan start
# ASSUMPTION: Number of labels <= number of scans (i.e. only a single label allowed per scan)
label.align(scan.events)
# If one or more scans at start of data set have no corresponding label, add a default label for them
if label.events[0] > 0:
label.add(0, '')
self.sensor['Observation/label'] = label
self.sensor['Observation/compscan_index'] = CategoricalData(list(range(len(label))), label.events)
# Use the target sensor of reference antenna to set the target for each scan
target = self.sensor.get(f'Antennas/{self.ref_ant}/target')
# Move target events onto the nearest scan start
# ASSUMPTION: Number of targets <= number of scans (i.e. only a single target allowed per scan)
target.align(scan.events)
self.sensor['Observation/target'] = target
self.sensor['Observation/target_index'] = CategoricalData(target.indices, target.events)
# Set up catalogue containing all targets in file, with reference antenna as default antenna
self.catalogue.add(target.unique_values)
self.catalogue.antenna = self.sensor[f'Antennas/{self.ref_ant}/antenna'][0]
# Ensure that each target flux model spans all frequencies in data set if possible
self._fix_flux_freq_range()
# Avoid storing reference to self in transform closure below, as this hinders garbage collection
dump_period, time_offset = self.dump_period, self.time_offset
# Restore original (slow) timestamps so that subsequent sensors (e.g. pointing) will have accurate values
extract_time = LazyTransform('extract_time', lambda t, keep: t + 0.5 * dump_period + time_offset)
self.sensor.timestamps = LazyIndexer(self._timestamps, keep=slice(num_dumps), transforms=[extract_time])
# Apply default selection and initialise all members that depend on selection in the process
self.select(spw=0, subarray=0, ants=script_ants)
@staticmethod
def _open(filename, mode='r'):
f = h5py.File(filename, mode)
version = to_str(f.attrs.get('version', '1.x'))
if not version.startswith('2.'):
raise WrongVersion(f"Attempting to load version '{version}' file with version 2 loader")
if 'augment_ts' not in f.attrs:
raise BrokenFile('HDF5 file not augmented - please run '
'k7_augment.py (provided by katcapture package)')
return f, version
@staticmethod
def _get_ants(filename):
f, version = H5DataV2._open(filename)
config_group = f['MetaData/Configuration']
all_ants = [ant for ant in config_group['Antennas']]
script_ants = to_str(config_group['Observation'].attrs.get('script_ants'))
script_ants = script_ants.split(',') if script_ants else all_ants
return [katpoint.Antenna(to_str(config_group['Antennas'][ant].attrs['description']))
for ant in script_ants if ant in all_ants]
@staticmethod
def _get_targets(filename):
f, version = H5DataV2._open(filename)
# Use the delay-tracking centre as the one and only target
# Try two different sensors for the DBE target
try:
target_list = f['MetaData/Sensors/DBE/target']
except Exception:
# Since h5py errors have varied over the years, we need Exception
target_list = f['MetaData/Sensors/Beams/Beam0/target']
all_target_strings = [to_str(target_data[1]) for target_data in target_list]
return katpoint.Catalogue(np.unique(all_target_strings))
def __str__(self):
descr = [super().__str__()]
# append the process_log, if it exists, for non-concatenated h5 files
if 'process_log' in self.file['History']:
descr.append('-------------------------------------------------------------------------------')
descr.append('Process log:')
for proc in self.file['History']['process_log']:
# proc has a structured dtype and to_str doesn't work on it, so
param_list = f'{to_str(proc[0]):>15}:'
for param in to_str(proc[1]).split(','):
param_list += f' {param}'
descr.append(param_list)
return '\n'.join(descr)
@property
def _weights_keep(self):
known_weights = [row[0] for row in getattr(self, '_weights_description', [])]
return [known_weights[ind] for ind in self._weights_select]
@_weights_keep.setter
def _weights_keep(self, names):
known_weights = [row[0] for row in getattr(self, '_weights_description', [])]
names = _selection_to_list(names, all=known_weights)
selection = []
for name in names:
try:
selection.append(known_weights.index(name))
except ValueError:
logger.warning("%r is not a legitimate weight type for this file, "
"supported ones are %s", name, known_weights)
if known_weights and not selection:
logger.warning('No valid weights were selected - setting all weights to 1.0 by default')
self._weights_select = selection
@property
def _flags_keep(self):
if not hasattr(self, '_flags_description'):
return []
known_flags = [row[0] for row in self._flags_description]
selection = np.unpackbits(self._flags_select)
assert len(known_flags) == len(selection), \
f'Expected {len(selection)} flag types in file, got {self._flags_description}'
return [name for name, bit in zip(known_flags, selection) if bit]
@_flags_keep.setter
def _flags_keep(self, names):
if not hasattr(self, '_flags_description'):
self._flags_select = np.array([0], dtype=np.uint8)
return
known_flags = [row[0] for row in self._flags_description]
# Ensure `names` is a sequence of valid flag names (or an empty list)
names = _selection_to_list(names, all=known_flags)
# Create boolean list for desired flags
selection = np.zeros(8, dtype=np.uint8)
assert len(known_flags) == len(selection), \
f'Expected {len(selection)} flag types in file, got {self._flags_description}'
for name in names:
try:
selection[known_flags.index(name)] = 1
except ValueError:
logger.warning("%r is not a legitimate flag type for this file, "
"supported ones are %s", name, known_flags)
# Pack index list into bit mask
# The KAT-7 flagger uses the np.packbits convention (bit 0 = MSB) so don't flip
flagmask = np.packbits(selection)
if known_flags and not flagmask:
logger.warning('No valid flags were selected - setting all flags to False by default')
self._flags_select = flagmask
@property
def timestamps(self):
dump_period, time_offset = self.dump_period, self.time_offset
extract_time = LazyTransform('extract_time', lambda t, keep: t + 0.5 * dump_period + time_offset)
return LazyIndexer(self._timestamps, keep=self._time_keep, transforms=[extract_time])
def _vislike_indexer(self, dataset, extractor):
time_keep = self._time_keep
if len(time_keep) == len(dataset) - 1:
time_keep = np.zeros(len(dataset), dtype=np.bool)
time_keep[:len(self._time_keep)] = self._time_keep
stage1 = (time_keep, self._freq_keep, self._corrprod_keep)
def _force_3dim(data, keep):
# Ensure that keep tuple has length of 3 (truncate or pad with blanket slices as necessary)
keep = keep[:3] + (slice(None),) * (3 - len(keep))
# Final indexing ensures that returned data are always 3-dimensional (i.e. keep singleton dimensions)
keep_singles = [(np.newaxis if np.isscalar(dim_keep) else slice(None))
for dim_keep in keep]
return data[tuple(keep_singles)]
force_3dim = LazyTransform('force_3dim', _force_3dim)
transforms = [extractor, force_3dim] if self._keepdims else [extractor]
return LazyIndexer(dataset, stage1, transforms)
@property
def vis(self):
extract = LazyTransform('extract_vis',
# Discard the 4th / last dimension as this is subsumed in complex view
# The visibilities are conjugated due to using the lower sideband
lambda vis, keep: vis.view(np.complex64)[..., 0].conjugate(),
lambda shape: shape[:-1], np.complex64)
return self._vislike_indexer(self._vis, extract)
@property
def weights(self):
# We currently only cater for a single weight type (i.e. either select it or fall back to 1.0)
def transform(weights, keep):
return weights.astype(np.float32) if self._weights_select else \
np.ones_like(weights, dtype=np.float32)
extract = LazyTransform('extract_weights', transform, dtype=np.float32)
return self._vislike_indexer(self._weights, extract)
@property
def flags(self):
def transform(flags, keep):
# Then convert uint8 to bool -> if any flag bits set, flag is set
return np.bool_(np.bitwise_and(self._flags_select, flags))
extract = LazyTransform('extract_flags', transform, dtype=np.bool)
return self._vislike_indexer(self._flags, extract)
@property
def temperature(self):
return self.sensor['Enviro/asc.air.temperature']
@property
def pressure(self):
return self.sensor['Enviro/asc.air.pressure']
@property
def humidity(self):
return self.sensor['Enviro/asc.air.relative-humidity']
@property
def wind_speed(self):
return self.sensor['Enviro/asc.wind.speed']
@property
def wind_direction(self):
return self.sensor['Enviro/asc.wind.direction']
| true | true |
f73d9c6b7fdc7a86f8577e78f0eaf9fbe8b79689 | 788 | py | Python | kingfisher_scrapy/items.py | open-contracting/kingfisher-collect | 2fbbd6361a0ec959e0603343a4b363f97fae3815 | [
"BSD-3-Clause"
] | 7 | 2020-07-24T13:15:37.000Z | 2021-12-11T22:40:07.000Z | kingfisher_scrapy/items.py | open-contracting/kingfisher-collect | 2fbbd6361a0ec959e0603343a4b363f97fae3815 | [
"BSD-3-Clause"
] | 418 | 2020-04-27T22:15:27.000Z | 2022-03-31T23:49:34.000Z | kingfisher_scrapy/items.py | open-contracting/kingfisher-collect | 2fbbd6361a0ec959e0603343a4b363f97fae3815 | [
"BSD-3-Clause"
] | 6 | 2020-05-28T16:06:53.000Z | 2021-03-16T02:54:15.000Z | # https://docs.scrapy.org/en/latest/topics/items.html
import scrapy
class Item(scrapy.Item):
file_name = scrapy.Field()
url = scrapy.Field()
validate = True
class File(Item):
data = scrapy.Field()
data_type = scrapy.Field()
# Added by the FilesStore extension, for the KingfisherProcessAPI extension to read the file.
path = scrapy.Field()
files_store = scrapy.Field()
class FileItem(Item):
number = scrapy.Field()
data = scrapy.Field()
data_type = scrapy.Field()
# Added by the FilesStore extension, for the KingfisherProcessAPI extension to read the file.
path = scrapy.Field()
files_store = scrapy.Field()
class FileError(Item):
errors = scrapy.Field()
class PluckedItem(scrapy.Item):
value = scrapy.Field()
| 21.297297 | 97 | 0.685279 |
import scrapy
class Item(scrapy.Item):
file_name = scrapy.Field()
url = scrapy.Field()
validate = True
class File(Item):
data = scrapy.Field()
data_type = scrapy.Field()
path = scrapy.Field()
files_store = scrapy.Field()
class FileItem(Item):
number = scrapy.Field()
data = scrapy.Field()
data_type = scrapy.Field()
path = scrapy.Field()
files_store = scrapy.Field()
class FileError(Item):
errors = scrapy.Field()
class PluckedItem(scrapy.Item):
value = scrapy.Field()
| true | true |
f73d9cb0127b034f466f748f81ab0ccff916ad23 | 3,808 | py | Python | doc/sphinxarg/parser.py | zifengqi123/robot_upstart | 987376f5c49864e9e250e5bb5b88ae13e0ed6973 | [
"BSD-3-Clause"
] | 158 | 2015-01-30T09:45:02.000Z | 2022-03-14T18:03:38.000Z | doc/sphinxarg/parser.py | zifengqi123/robot_upstart | 987376f5c49864e9e250e5bb5b88ae13e0ed6973 | [
"BSD-3-Clause"
] | 93 | 2015-01-09T08:27:17.000Z | 2022-02-22T07:52:39.000Z | doc/sphinxarg/parser.py | zifengqi123/robot_upstart | 987376f5c49864e9e250e5bb5b88ae13e0ed6973 | [
"BSD-3-Clause"
] | 96 | 2015-02-13T13:00:07.000Z | 2022-03-16T05:04:42.000Z | from argparse import _HelpAction, _SubParsersAction
import re
class NavigationException(Exception):
pass
def parser_navigate(parser_result, path, current_path=None):
if isinstance(path, str):
if path == '':
return parser_result
path = re.split(r'\s+', path)
current_path = current_path or []
if len(path) == 0:
return parser_result
if 'children' not in parser_result:
raise NavigationException(
'Current parser have no children elements. (path: %s)' %
' '.join(current_path))
next_hop = path.pop(0)
for child in parser_result['children']:
if child['name'] == next_hop:
current_path.append(next_hop)
return parser_navigate(child, path, current_path)
raise NavigationException(
'Current parser have no children element with name: %s (path: %s)' % (
next_hop, ' '.join(current_path)))
def _try_add_parser_attribute(data, parser, attribname):
attribval = getattr(parser, attribname, None)
if attribval is None:
return
if not isinstance(attribval, str):
return
if len(attribval) > 0:
data[attribname] = attribval
def _format_usage_without_prefix(parser):
"""
Use private argparse APIs to get the usage string without
the 'usage: ' prefix.
"""
fmt = parser._get_formatter()
fmt.add_usage(parser.usage, parser._actions,
parser._mutually_exclusive_groups, prefix='')
return fmt.format_help().strip()
def parse_parser(parser, data=None, **kwargs):
if data is None:
data = {
'name': '',
'usage': parser.format_usage().strip(),
'bare_usage': _format_usage_without_prefix(parser),
'prog': parser.prog,
}
_try_add_parser_attribute(data, parser, 'description')
_try_add_parser_attribute(data, parser, 'epilog')
for action in parser._get_positional_actions():
if isinstance(action, _HelpAction):
continue
if isinstance(action, _SubParsersAction):
helps = {}
for item in action._choices_actions:
helps[item.dest] = item.help
for name, subaction in action._name_parser_map.items():
subaction.prog = '%s %s' % (parser.prog, name)
subdata = {
'name': name,
'help': helps[name] if name in helps else '',
'usage': subaction.format_usage().strip(),
'bare_usage': _format_usage_without_prefix(subaction),
}
parse_parser(subaction, subdata, **kwargs)
if 'children' not in data:
data['children'] = []
data['children'].append(subdata)
continue
if 'args' not in data:
data['args'] = []
arg = {
'name': action.dest,
'help': action.help or '',
'metavar': action.metavar
}
if action.choices:
arg['choices'] = action.choices
data['args'].append(arg)
show_defaults = (
('skip_default_values' not in kwargs) or
(kwargs['skip_default_values'] is False))
for action in parser._get_optional_actions():
if isinstance(action, _HelpAction):
continue
if 'options' not in data:
data['options'] = []
option = {
'name': action.option_strings,
'default': action.default if show_defaults else '==SUPPRESS==',
'help': action.help or ''
}
if action.choices:
option['choices'] = action.choices
if "==SUPPRESS==" not in option['help']:
data['options'].append(option)
return data
| 34.618182 | 79 | 0.57563 | from argparse import _HelpAction, _SubParsersAction
import re
class NavigationException(Exception):
pass
def parser_navigate(parser_result, path, current_path=None):
if isinstance(path, str):
if path == '':
return parser_result
path = re.split(r'\s+', path)
current_path = current_path or []
if len(path) == 0:
return parser_result
if 'children' not in parser_result:
raise NavigationException(
'Current parser have no children elements. (path: %s)' %
' '.join(current_path))
next_hop = path.pop(0)
for child in parser_result['children']:
if child['name'] == next_hop:
current_path.append(next_hop)
return parser_navigate(child, path, current_path)
raise NavigationException(
'Current parser have no children element with name: %s (path: %s)' % (
next_hop, ' '.join(current_path)))
def _try_add_parser_attribute(data, parser, attribname):
attribval = getattr(parser, attribname, None)
if attribval is None:
return
if not isinstance(attribval, str):
return
if len(attribval) > 0:
data[attribname] = attribval
def _format_usage_without_prefix(parser):
fmt = parser._get_formatter()
fmt.add_usage(parser.usage, parser._actions,
parser._mutually_exclusive_groups, prefix='')
return fmt.format_help().strip()
def parse_parser(parser, data=None, **kwargs):
if data is None:
data = {
'name': '',
'usage': parser.format_usage().strip(),
'bare_usage': _format_usage_without_prefix(parser),
'prog': parser.prog,
}
_try_add_parser_attribute(data, parser, 'description')
_try_add_parser_attribute(data, parser, 'epilog')
for action in parser._get_positional_actions():
if isinstance(action, _HelpAction):
continue
if isinstance(action, _SubParsersAction):
helps = {}
for item in action._choices_actions:
helps[item.dest] = item.help
for name, subaction in action._name_parser_map.items():
subaction.prog = '%s %s' % (parser.prog, name)
subdata = {
'name': name,
'help': helps[name] if name in helps else '',
'usage': subaction.format_usage().strip(),
'bare_usage': _format_usage_without_prefix(subaction),
}
parse_parser(subaction, subdata, **kwargs)
if 'children' not in data:
data['children'] = []
data['children'].append(subdata)
continue
if 'args' not in data:
data['args'] = []
arg = {
'name': action.dest,
'help': action.help or '',
'metavar': action.metavar
}
if action.choices:
arg['choices'] = action.choices
data['args'].append(arg)
show_defaults = (
('skip_default_values' not in kwargs) or
(kwargs['skip_default_values'] is False))
for action in parser._get_optional_actions():
if isinstance(action, _HelpAction):
continue
if 'options' not in data:
data['options'] = []
option = {
'name': action.option_strings,
'default': action.default if show_defaults else '==SUPPRESS==',
'help': action.help or ''
}
if action.choices:
option['choices'] = action.choices
if "==SUPPRESS==" not in option['help']:
data['options'].append(option)
return data
| true | true |
f73d9cd2eb95d3f0d9a90211d763018289b675a6 | 2,317 | gyp | Python | Mozc-for-iOS/src/converter/converter_main.gyp | spanfish/JapaneseKeyboard | 84fa7ef799d145fb9897b6e86bc7bc50610ccb2b | [
"Apache-2.0"
] | 33 | 2015-01-21T09:50:21.000Z | 2022-02-12T15:18:25.000Z | Mozc-for-iOS/src/converter/converter_main.gyp | spanfish/JapaneseKeyboard | 84fa7ef799d145fb9897b6e86bc7bc50610ccb2b | [
"Apache-2.0"
] | 1 | 2019-03-08T08:07:14.000Z | 2019-03-08T08:07:14.000Z | Mozc-for-iOS/src/converter/converter_main.gyp | spanfish/JapaneseKeyboard | 84fa7ef799d145fb9897b6e86bc7bc50610ccb2b | [
"Apache-2.0"
] | 8 | 2015-06-08T15:57:25.000Z | 2019-05-15T08:52:58.000Z | # Copyright 2010-2014, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
{
'variables': {
'relative_mozc_dir': '',
'gen_out_mozc_dir': '<(SHARED_INTERMEDIATE_DIR)/<(relative_mozc_dir)',
},
'targets': [
{
'target_name': 'converter_main',
'type': 'executable',
'sources': [
'converter_main.cc',
],
'dependencies': [
'../composer/composer.gyp:composer',
'../config/config.gyp:config_protocol',
'../engine/engine.gyp:oss_engine_factory',
'../engine/engine.gyp:engine',
'../engine/engine.gyp:engine_factory',
'../engine/engine.gyp:mock_data_engine_factory',
'../session/session_base.gyp:session_protocol',
'converter.gyp:converter',
'converter_base.gyp:pos_id_printer',
'converter_base.gyp:segments',
],
},
],
}
| 40.649123 | 74 | 0.715149 |
{
'variables': {
'relative_mozc_dir': '',
'gen_out_mozc_dir': '<(SHARED_INTERMEDIATE_DIR)/<(relative_mozc_dir)',
},
'targets': [
{
'target_name': 'converter_main',
'type': 'executable',
'sources': [
'converter_main.cc',
],
'dependencies': [
'../composer/composer.gyp:composer',
'../config/config.gyp:config_protocol',
'../engine/engine.gyp:oss_engine_factory',
'../engine/engine.gyp:engine',
'../engine/engine.gyp:engine_factory',
'../engine/engine.gyp:mock_data_engine_factory',
'../session/session_base.gyp:session_protocol',
'converter.gyp:converter',
'converter_base.gyp:pos_id_printer',
'converter_base.gyp:segments',
],
},
],
}
| true | true |
f73d9ce52f85067f94782a29c44063bd133920b6 | 538 | py | Python | manage.py | Shekcon/Web_django | 63e1a4d6e4e5307b33c2caf9fb24ca68c531a501 | [
"MIT"
] | 1 | 2019-01-14T17:22:55.000Z | 2019-01-14T17:22:55.000Z | manage.py | Shekcon/Web_django | 63e1a4d6e4e5307b33c2caf9fb24ca68c531a501 | [
"MIT"
] | null | null | null | manage.py | Shekcon/Web_django | 63e1a4d6e4e5307b33c2caf9fb24ca68c531a501 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import os
import sys
if __name__ == '__main__':
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'Houses.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
| 33.625 | 73 | 0.685874 |
import os
import sys
if __name__ == '__main__':
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'Houses.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
| true | true |
f73d9d036d7b740cd46b8185f60c619665f14626 | 773 | py | Python | test/timing_system_test-2.0.1.py | bopopescu/Lauecollect | 60ae2b05ea8596ba0decf426e37aeaca0bc8b6be | [
"MIT"
] | null | null | null | test/timing_system_test-2.0.1.py | bopopescu/Lauecollect | 60ae2b05ea8596ba0decf426e37aeaca0bc8b6be | [
"MIT"
] | 1 | 2019-10-22T21:28:31.000Z | 2019-10-22T21:39:12.000Z | test/timing_system_test-2.0.1.py | bopopescu/Lauecollect | 60ae2b05ea8596ba0decf426e37aeaca0bc8b6be | [
"MIT"
] | 2 | 2019-06-06T15:06:46.000Z | 2020-07-20T02:03:22.000Z | """Delay line linearity characterization
Friedrich Schotte, Jul 22, 2015 - Jul 23, 2015
Setup:
Ramsay-100B RF Generator, 351.93398 MHz +10 dBm -> FPGA RF IN
FPGA 1: X-scope trig -> CH1, DC50, 500 mV/div
FPGA 13: ps L oscill -> DC block -> 90-MHz low-pass -> CH2, DC50, 500 mV/div
Timebase 5 ns/div
Measurement P1 CH2, time@level, Percent, 50%, Slope Pos, Gate Start 4.5 div, Stop 5.5 div
FPGA Frequency: 41 Hz
"""
__version__ = "2.0.1"
from instrumentation import lxd,bcf,lecroy_scope
from scan import rscan
delay = lecroy_scope("pico21").measurement(1)
tmax = 5/bcf
def scan():
lxd.value = 0
global data
data = rscan([lxd,delay.gate.start,delay.gate.stop],0,[tmax,-tmax,-tmax],
640,delay,averaging_time=60.0,logfile="test/delay.log")
print('scan()')
| 32.208333 | 89 | 0.702458 | __version__ = "2.0.1"
from instrumentation import lxd,bcf,lecroy_scope
from scan import rscan
delay = lecroy_scope("pico21").measurement(1)
tmax = 5/bcf
def scan():
lxd.value = 0
global data
data = rscan([lxd,delay.gate.start,delay.gate.stop],0,[tmax,-tmax,-tmax],
640,delay,averaging_time=60.0,logfile="test/delay.log")
print('scan()')
| true | true |
f73d9d4077640cd984b300592a49749be06c379c | 6,674 | py | Python | freight/admin.py | buahaha/aa-freight | 69eb85188988d7cfaffc7c485d22ddb442a4a2b3 | [
"MIT"
] | null | null | null | freight/admin.py | buahaha/aa-freight | 69eb85188988d7cfaffc7c485d22ddb442a4a2b3 | [
"MIT"
] | null | null | null | freight/admin.py | buahaha/aa-freight | 69eb85188988d7cfaffc7c485d22ddb442a4a2b3 | [
"MIT"
] | null | null | null | from django.contrib import admin
from . import tasks
from .app_settings import FREIGHT_DEVELOPER_MODE
from .models import (
Contract,
ContractCustomerNotification,
ContractHandler,
EveEntity,
Location,
Pricing,
)
from .tasks import update_locations
@admin.register(Location)
class LocationAdmin(admin.ModelAdmin):
list_display = ("id", "name", "_category", "_solar_system")
list_filter = ("category_id",)
search_fields = ["name"]
list_select_related = True
actions = ["update_location"]
if not FREIGHT_DEVELOPER_MODE:
list_display_links = None
def _category(self, obj):
return obj.get_category_id_display()
_category.admin_order_field = "category_id"
def _solar_system(self, obj):
return obj.solar_system_name
def has_add_permission(self, request):
if FREIGHT_DEVELOPER_MODE:
return True
else:
return False
def has_change_permission(self, request):
if FREIGHT_DEVELOPER_MODE:
return True
else:
return False
def update_location(self, request, queryset):
location_ids = list()
for obj in queryset:
location_ids.append(obj.pk)
update_locations.delay(location_ids)
self.message_user(
request,
"Started updating {} locations. "
"This can take a short while to complete.".format(len(location_ids)),
)
update_location.short_description = "Update selected locations from ESI"
if FREIGHT_DEVELOPER_MODE:
@admin.register(EveEntity)
class EveEntityAdmin(admin.ModelAdmin):
list_display = ("name", "category")
list_filter = ("category",)
@admin.register(Pricing)
class PricingAdmin(admin.ModelAdmin):
list_display = (
"name",
"start_location",
"end_location",
"_bidirectional",
"_default",
"_active",
)
list_filter = (
"is_bidirectional",
"is_active",
("start_location", admin.RelatedOnlyFieldListFilter),
("end_location", admin.RelatedOnlyFieldListFilter),
)
list_select_related = True
def _bidirectional(self, obj):
return obj.is_bidirectional
_bidirectional.boolean = True
def _active(self, obj):
return obj.is_active
_active.boolean = True
def _default(self, obj):
return obj.is_default
_default.boolean = True
@admin.register(ContractHandler)
class ContractHandlerAdmin(admin.ModelAdmin):
list_display = (
"organization",
"character",
"operation_mode",
"last_sync",
"_is_sync_ok",
)
actions = ("start_sync", "send_notifications", "update_pricing")
if not FREIGHT_DEVELOPER_MODE:
readonly_fields = (
"organization",
"character",
"operation_mode",
"version_hash",
"last_sync",
"last_error",
)
def _is_sync_ok(self, obj):
return obj.is_sync_ok
_is_sync_ok.boolean = True
_is_sync_ok.short_description = "sync ok"
def start_sync(self, request, queryset):
for obj in queryset:
tasks.run_contracts_sync.delay(force_sync=True, user_pk=request.user.pk)
text = "Started syncing contracts for: {} ".format(obj)
text += "You will receive a report once it is completed."
self.message_user(request, text)
start_sync.short_description = "Fetch contracts from Eve Online server"
def send_notifications(self, request, queryset):
for obj in queryset:
tasks.send_contract_notifications.delay(force_sent=True)
text = "Started sending notifications for: {} ".format(obj)
self.message_user(request, text)
send_notifications.short_description = (
"Send notifications for outstanding contracts"
)
def update_pricing(self, request, queryset):
del queryset
tasks.update_contracts_pricing.delay()
self.message_user(
request, "Started updating pricing relations for all contracts"
)
update_pricing.short_description = "Update pricing info for all contracts"
def has_add_permission(self, request):
return False
@admin.register(Contract)
class ContractAdmin(admin.ModelAdmin):
list_display = [
"contract_id",
"status",
"date_issued",
"issuer",
"_pilots_notified",
"_customer_notified",
]
list_filter = (
"status",
("issuer", admin.RelatedOnlyFieldListFilter),
)
search_fields = ["issuer"]
list_select_related = True
actions = ["send_pilots_notification", "send_customer_notification"]
def get_queryset(self, request):
qs = super().get_queryset(request)
return qs.prefetch_related("customer_notifications")
def _pilots_notified(self, contract):
return contract.date_notified is not None
_pilots_notified.boolean = True
def _customer_notified(self, contract):
return ", ".join(
sorted(
[x.status for x in contract.customer_notifications.all()],
reverse=True,
)
)
def send_pilots_notification(self, request, queryset):
for obj in queryset:
obj.send_pilot_notification()
self.message_user(
request,
"Sent pilots notification for contract {} to Discord".format(
obj.contract_id
),
)
send_pilots_notification.short_description = (
"Sent pilots notification for selected contracts to Discord"
)
def send_customer_notification(self, request, queryset):
for obj in queryset:
obj.send_customer_notification(force_sent=True)
self.message_user(
request,
"Sent customer notification for contract {} to Discord".format(
obj.contract_id
),
)
send_customer_notification.short_description = (
"Sent customer notification for selected contracts to Discord"
)
def has_add_permission(self, request):
if FREIGHT_DEVELOPER_MODE:
return True
else:
return False
def has_change_permission(self, request, obj=None):
if FREIGHT_DEVELOPER_MODE:
return True
else:
return False
if FREIGHT_DEVELOPER_MODE:
@admin.register(ContractCustomerNotification)
class ContractCustomerNotificationAdmin(admin.ModelAdmin):
pass
| 26.589641 | 84 | 0.630656 | from django.contrib import admin
from . import tasks
from .app_settings import FREIGHT_DEVELOPER_MODE
from .models import (
Contract,
ContractCustomerNotification,
ContractHandler,
EveEntity,
Location,
Pricing,
)
from .tasks import update_locations
@admin.register(Location)
class LocationAdmin(admin.ModelAdmin):
list_display = ("id", "name", "_category", "_solar_system")
list_filter = ("category_id",)
search_fields = ["name"]
list_select_related = True
actions = ["update_location"]
if not FREIGHT_DEVELOPER_MODE:
list_display_links = None
def _category(self, obj):
return obj.get_category_id_display()
_category.admin_order_field = "category_id"
def _solar_system(self, obj):
return obj.solar_system_name
def has_add_permission(self, request):
if FREIGHT_DEVELOPER_MODE:
return True
else:
return False
def has_change_permission(self, request):
if FREIGHT_DEVELOPER_MODE:
return True
else:
return False
def update_location(self, request, queryset):
location_ids = list()
for obj in queryset:
location_ids.append(obj.pk)
update_locations.delay(location_ids)
self.message_user(
request,
"Started updating {} locations. "
"This can take a short while to complete.".format(len(location_ids)),
)
update_location.short_description = "Update selected locations from ESI"
if FREIGHT_DEVELOPER_MODE:
@admin.register(EveEntity)
class EveEntityAdmin(admin.ModelAdmin):
list_display = ("name", "category")
list_filter = ("category",)
@admin.register(Pricing)
class PricingAdmin(admin.ModelAdmin):
list_display = (
"name",
"start_location",
"end_location",
"_bidirectional",
"_default",
"_active",
)
list_filter = (
"is_bidirectional",
"is_active",
("start_location", admin.RelatedOnlyFieldListFilter),
("end_location", admin.RelatedOnlyFieldListFilter),
)
list_select_related = True
def _bidirectional(self, obj):
return obj.is_bidirectional
_bidirectional.boolean = True
def _active(self, obj):
return obj.is_active
_active.boolean = True
def _default(self, obj):
return obj.is_default
_default.boolean = True
@admin.register(ContractHandler)
class ContractHandlerAdmin(admin.ModelAdmin):
list_display = (
"organization",
"character",
"operation_mode",
"last_sync",
"_is_sync_ok",
)
actions = ("start_sync", "send_notifications", "update_pricing")
if not FREIGHT_DEVELOPER_MODE:
readonly_fields = (
"organization",
"character",
"operation_mode",
"version_hash",
"last_sync",
"last_error",
)
def _is_sync_ok(self, obj):
return obj.is_sync_ok
_is_sync_ok.boolean = True
_is_sync_ok.short_description = "sync ok"
def start_sync(self, request, queryset):
for obj in queryset:
tasks.run_contracts_sync.delay(force_sync=True, user_pk=request.user.pk)
text = "Started syncing contracts for: {} ".format(obj)
text += "You will receive a report once it is completed."
self.message_user(request, text)
start_sync.short_description = "Fetch contracts from Eve Online server"
def send_notifications(self, request, queryset):
for obj in queryset:
tasks.send_contract_notifications.delay(force_sent=True)
text = "Started sending notifications for: {} ".format(obj)
self.message_user(request, text)
send_notifications.short_description = (
"Send notifications for outstanding contracts"
)
def update_pricing(self, request, queryset):
del queryset
tasks.update_contracts_pricing.delay()
self.message_user(
request, "Started updating pricing relations for all contracts"
)
update_pricing.short_description = "Update pricing info for all contracts"
def has_add_permission(self, request):
return False
@admin.register(Contract)
class ContractAdmin(admin.ModelAdmin):
list_display = [
"contract_id",
"status",
"date_issued",
"issuer",
"_pilots_notified",
"_customer_notified",
]
list_filter = (
"status",
("issuer", admin.RelatedOnlyFieldListFilter),
)
search_fields = ["issuer"]
list_select_related = True
actions = ["send_pilots_notification", "send_customer_notification"]
def get_queryset(self, request):
qs = super().get_queryset(request)
return qs.prefetch_related("customer_notifications")
def _pilots_notified(self, contract):
return contract.date_notified is not None
_pilots_notified.boolean = True
def _customer_notified(self, contract):
return ", ".join(
sorted(
[x.status for x in contract.customer_notifications.all()],
reverse=True,
)
)
def send_pilots_notification(self, request, queryset):
for obj in queryset:
obj.send_pilot_notification()
self.message_user(
request,
"Sent pilots notification for contract {} to Discord".format(
obj.contract_id
),
)
send_pilots_notification.short_description = (
"Sent pilots notification for selected contracts to Discord"
)
def send_customer_notification(self, request, queryset):
for obj in queryset:
obj.send_customer_notification(force_sent=True)
self.message_user(
request,
"Sent customer notification for contract {} to Discord".format(
obj.contract_id
),
)
send_customer_notification.short_description = (
"Sent customer notification for selected contracts to Discord"
)
def has_add_permission(self, request):
if FREIGHT_DEVELOPER_MODE:
return True
else:
return False
def has_change_permission(self, request, obj=None):
if FREIGHT_DEVELOPER_MODE:
return True
else:
return False
if FREIGHT_DEVELOPER_MODE:
@admin.register(ContractCustomerNotification)
class ContractCustomerNotificationAdmin(admin.ModelAdmin):
pass
| true | true |
f73d9db33588aaa977bf8161f86b487813b08884 | 2,685 | py | Python | cortado/cutcovfactor.py | Statfactory/ColdBrew | ee16eee73e8dc89646abd6ee3e19858e49c6ffb7 | [
"MIT"
] | 8 | 2020-04-26T09:42:29.000Z | 2021-04-27T21:55:49.000Z | cortado/cutcovfactor.py | pm390/cortado | ee16eee73e8dc89646abd6ee3e19858e49c6ffb7 | [
"MIT"
] | 2 | 2020-03-24T23:37:45.000Z | 2021-08-23T20:49:01.000Z | cortado/cutcovfactor.py | pm390/cortado | ee16eee73e8dc89646abd6ee3e19858e49c6ffb7 | [
"MIT"
] | 4 | 2020-06-10T06:49:43.000Z | 2021-09-17T14:04:20.000Z | from cortado.abstractfactor import AbstractFactor
import numpy as np
from cortado.seq import Seq
from cortado.funcslicer import FuncSlicer
from cortado.consts import HEADLENGTH, SLICELEN, MISSINGLEVEL
from numba import jit
from numba.typed import Dict
from numba import types
@jit(nopython=True, cache=False)
def g_leftclosed(slice, buf, cuts, k):
def f(x):
if np.isnan(x):
return 0
if x == np.PINF:
return k
else:
i = np.searchsorted(cuts, x, side='right')
return i
for i in range(len(slice)):
buf[i] = f(slice[i])
if len(buf) == len(slice):
return buf
else:
return buf[:len(slice)]
@jit(nopython=True, cache=False)
def g_rightclosed(slice, buf, cuts):
def f(x):
if np.isnan(x):
return 0
if x == np.NINF:
return 1
else:
i = np.searchsorted(cuts, x, side='left')
return i
for i in range(len(slice)):
buf[i] = f(slice[i])
if len(buf) == len(slice):
return buf
else:
return buf[:len(slice)]
class CutCovFactor(AbstractFactor):
def __init__(self, covariate, cuts, rightclosed = False):
self.covariate = covariate
self.cuts = cuts
assert cuts[0] == np.NINF and cuts[-1] == np.PINF
levelcount = len(cuts) - 1
if rightclosed:
levels = [MISSINGLEVEL] + ["{z}{x},{y}]".format(x=str(cuts[i]), y=str(cuts[i + 1]), z="[" if i == 0 else "(") for i in range(levelcount)]
else:
levels = [MISSINGLEVEL] + ["[{x},{y}{z}".format(x=str(cuts[i]), y=str(cuts[i + 1]), z="]" if i == (levelcount - 1) else ")") for i in range(levelcount)]
dtype = np.uint8 if levelcount <= 256 else np.uint16
def slicer(start, length, slicelen):
length = min(len(self) - start, length)
slicelen = min(length, slicelen)
buf = np.empty(slicelen, dtype = dtype)
if rightclosed:
return Seq.map((lambda s: g_rightclosed(s, buf, cuts)), covariate.slicer(start, length, slicelen))
else:
return Seq.map((lambda s: g_leftclosed(s, buf, cuts, levelcount - 1)), covariate.slicer(start, length, slicelen))
self._levels = levels
self._slicer = FuncSlicer(slicer, dtype)
@property
def name(self):
return self.covariate.name
def __len__(self):
return len(self.covariate)
@property
def isordinal(self):
return True
@property
def levels(self):
return self._levels
@property
def slicer(self):
return self._slicer | 29.833333 | 164 | 0.575047 | from cortado.abstractfactor import AbstractFactor
import numpy as np
from cortado.seq import Seq
from cortado.funcslicer import FuncSlicer
from cortado.consts import HEADLENGTH, SLICELEN, MISSINGLEVEL
from numba import jit
from numba.typed import Dict
from numba import types
@jit(nopython=True, cache=False)
def g_leftclosed(slice, buf, cuts, k):
def f(x):
if np.isnan(x):
return 0
if x == np.PINF:
return k
else:
i = np.searchsorted(cuts, x, side='right')
return i
for i in range(len(slice)):
buf[i] = f(slice[i])
if len(buf) == len(slice):
return buf
else:
return buf[:len(slice)]
@jit(nopython=True, cache=False)
def g_rightclosed(slice, buf, cuts):
def f(x):
if np.isnan(x):
return 0
if x == np.NINF:
return 1
else:
i = np.searchsorted(cuts, x, side='left')
return i
for i in range(len(slice)):
buf[i] = f(slice[i])
if len(buf) == len(slice):
return buf
else:
return buf[:len(slice)]
class CutCovFactor(AbstractFactor):
def __init__(self, covariate, cuts, rightclosed = False):
self.covariate = covariate
self.cuts = cuts
assert cuts[0] == np.NINF and cuts[-1] == np.PINF
levelcount = len(cuts) - 1
if rightclosed:
levels = [MISSINGLEVEL] + ["{z}{x},{y}]".format(x=str(cuts[i]), y=str(cuts[i + 1]), z="[" if i == 0 else "(") for i in range(levelcount)]
else:
levels = [MISSINGLEVEL] + ["[{x},{y}{z}".format(x=str(cuts[i]), y=str(cuts[i + 1]), z="]" if i == (levelcount - 1) else ")") for i in range(levelcount)]
dtype = np.uint8 if levelcount <= 256 else np.uint16
def slicer(start, length, slicelen):
length = min(len(self) - start, length)
slicelen = min(length, slicelen)
buf = np.empty(slicelen, dtype = dtype)
if rightclosed:
return Seq.map((lambda s: g_rightclosed(s, buf, cuts)), covariate.slicer(start, length, slicelen))
else:
return Seq.map((lambda s: g_leftclosed(s, buf, cuts, levelcount - 1)), covariate.slicer(start, length, slicelen))
self._levels = levels
self._slicer = FuncSlicer(slicer, dtype)
@property
def name(self):
return self.covariate.name
def __len__(self):
return len(self.covariate)
@property
def isordinal(self):
return True
@property
def levels(self):
return self._levels
@property
def slicer(self):
return self._slicer | true | true |
f73d9ddc42aa94f59ee3852e9e992473d92b432d | 891 | py | Python | oosc/oosc/absence/serializers.py | C4DLabOrg/da_api | 3d876576a189ce35c6b4b2f1c728f4b91e4b2ed0 | [
"MIT"
] | null | null | null | oosc/oosc/absence/serializers.py | C4DLabOrg/da_api | 3d876576a189ce35c6b4b2f1c728f4b91e4b2ed0 | [
"MIT"
] | null | null | null | oosc/oosc/absence/serializers.py | C4DLabOrg/da_api | 3d876576a189ce35c6b4b2f1c728f4b91e4b2ed0 | [
"MIT"
] | null | null | null |
from oosc.absence.models import Absence
from rest_framework import serializers
from oosc.students.serializers import SimpleStudentSerializer
from datetime import datetime
class AbsenceSerializer(serializers.ModelSerializer):
class Meta:
model=Absence
fields=('id','student','_class','reasons','date_from','date_to','status')
class DetailedAbsenceserializer(serializers.ModelSerializer):
student=serializers.SerializerMethodField()
days=serializers.SerializerMethodField()
class Meta:
model=Absence
fields=('id','student','_class','days','status','student','reasons','date_from','date_to')
def get_student(self,obj):
return SimpleStudentSerializer(obj.student).data
def get_days(self,obj):
if obj.date_to:
return (obj.date_to-obj.date_from).days
return (datetime.now().date()-obj.date_from).days | 35.64 | 98 | 0.725028 |
from oosc.absence.models import Absence
from rest_framework import serializers
from oosc.students.serializers import SimpleStudentSerializer
from datetime import datetime
class AbsenceSerializer(serializers.ModelSerializer):
class Meta:
model=Absence
fields=('id','student','_class','reasons','date_from','date_to','status')
class DetailedAbsenceserializer(serializers.ModelSerializer):
student=serializers.SerializerMethodField()
days=serializers.SerializerMethodField()
class Meta:
model=Absence
fields=('id','student','_class','days','status','student','reasons','date_from','date_to')
def get_student(self,obj):
return SimpleStudentSerializer(obj.student).data
def get_days(self,obj):
if obj.date_to:
return (obj.date_to-obj.date_from).days
return (datetime.now().date()-obj.date_from).days | true | true |
f73d9e918e4201fec3820bd47fa3fc21170831aa | 67,103 | py | Python | ultracart/models/coupon.py | UltraCart/rest_api_v2_sdk_python | d734ea13fabc7a57872ff68bac06861edb8fd882 | [
"Apache-2.0"
] | 1 | 2018-03-15T16:56:23.000Z | 2018-03-15T16:56:23.000Z | ultracart/models/coupon.py | UltraCart/rest_api_v2_sdk_python | d734ea13fabc7a57872ff68bac06861edb8fd882 | [
"Apache-2.0"
] | null | null | null | ultracart/models/coupon.py | UltraCart/rest_api_v2_sdk_python | d734ea13fabc7a57872ff68bac06861edb8fd882 | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
"""
UltraCart Rest API V2
UltraCart REST API Version 2 # noqa: E501
OpenAPI spec version: 2.0.0
Contact: support@ultracart.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class Coupon(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'affiliate_oid': 'int',
'allow_multiple_one_time_codes': 'bool',
'amount_off_items': 'CouponAmountOffItems',
'amount_off_shipping': 'CouponAmountOffShipping',
'amount_off_shipping_with_items_purchase': 'CouponAmountOffShippingWithItemsPurchase',
'amount_off_subtotal': 'CouponAmountOffSubtotal',
'amount_off_subtotal_and_free_shipping': 'CouponAmountOffSubtotalFreeShippingWithPurchase',
'amount_off_subtotal_and_shipping': 'CouponAmountOffSubtotalAndShipping',
'amount_off_subtotal_with_block_purchase': 'CouponAmountOffSubtotalWithBlockPurchase',
'amount_off_subtotal_with_items_purchase': 'CouponAmountOffSubtotalWithItemsPurchase',
'amount_off_subtotal_with_purchase': 'CouponAmountOffSubtotalWithPurchase',
'automatically_apply_coupon_codes': 'CouponAutomaticallyApplyCouponCodes',
'buy_one_get_one': 'CouponBuyOneGetOneLimit',
'calculated_description': 'str',
'can_be_used_with_other_coupons': 'bool',
'coupon_oid': 'int',
'coupon_type': 'str',
'description': 'str',
'discount_item_with_item_purchase': 'CouponDiscountItemWithItemPurchase',
'discount_items': 'CouponDiscountItems',
'expiration_dts': 'str',
'free_item_and_shipping_with_subtotal': 'CouponFreeItemAndShippingWithSubtotal',
'free_item_with_item_purchase': 'CouponFreeItemWithItemPurchase',
'free_item_with_subtotal': 'CouponFreeItemWithSubtotal',
'free_items_with_item_purchase': 'CouponFreeItemsWithItemPurchase',
'free_items_with_mixmatch_purchase': 'CouponFreeItemsWithMixMatchPurchase',
'free_shipping': 'CouponFreeShipping',
'free_shipping_specific_items': 'CouponFreeShippingSpecificItems',
'free_shipping_with_items_purchase': 'CouponFreeShippingWithItemsPurchase',
'free_shipping_with_subtotal': 'CouponFreeShippingWithSubtotal',
'hide_from_customer': 'bool',
'merchant_code': 'str',
'merchant_notes': 'str',
'multiple_amounts_off_items': 'CouponMultipleAmountsOffItems',
'no_discount': 'CouponNoDiscount',
'percent_off_item_with_items_quantity_purchase': 'CouponPercentOffItemWithItemsQuantityPurchase',
'percent_off_items': 'CouponPercentOffItems',
'percent_off_items_and_free_shipping': 'CouponPercentOffItemsAndFreeShipping',
'percent_off_items_with_items_purchase': 'CouponPercentOffItemsWithItemsPurchase',
'percent_off_msrp_items': 'CouponPercentOffMsrpItems',
'percent_off_retail_price_items': 'CouponPercentOffRetailPriceItems',
'percent_off_shipping': 'CouponPercentOffShipping',
'percent_off_subtotal': 'CouponPercentOffSubtotal',
'percent_off_subtotal_and_free_shipping': 'CouponPercentOffSubtotalAndFreeShipping',
'percent_off_subtotal_limit': 'CouponPercentOffSubtotalLimit',
'percent_off_subtotal_with_items_purchase': 'CouponPercentOffSubtotalWithItemsPurchase',
'percent_off_subtotal_with_subtotal': 'CouponPercentOffSubtotalWithSubtotal',
'quickbooks_code': 'str',
'restrict_by_postal_codes': 'list[str]',
'restrict_by_screen_branding_theme_codes': 'list[CouponRestriction]',
'restrict_by_storefronts': 'list[CouponRestriction]',
'start_dts': 'str',
'super_coupon': 'bool',
'tiered_amount_off_items': 'CouponTieredAmountOffItems',
'tiered_amount_off_subtotal': 'CouponTieredAmountOffSubtotal',
'tiered_percent_off_items': 'CouponTieredPercentOffItems',
'tiered_percent_off_shipping': 'CouponTieredPercentOffShipping',
'tiered_percent_off_subtotal': 'CouponTieredPercentOffSubtotal',
'usable_by': 'str'
}
attribute_map = {
'affiliate_oid': 'affiliate_oid',
'allow_multiple_one_time_codes': 'allow_multiple_one_time_codes',
'amount_off_items': 'amount_off_items',
'amount_off_shipping': 'amount_off_shipping',
'amount_off_shipping_with_items_purchase': 'amount_off_shipping_with_items_purchase',
'amount_off_subtotal': 'amount_off_subtotal',
'amount_off_subtotal_and_free_shipping': 'amount_off_subtotal_and_free_shipping',
'amount_off_subtotal_and_shipping': 'amount_off_subtotal_and_shipping',
'amount_off_subtotal_with_block_purchase': 'amount_off_subtotal_with_block_purchase',
'amount_off_subtotal_with_items_purchase': 'amount_off_subtotal_with_items_purchase',
'amount_off_subtotal_with_purchase': 'amount_off_subtotal_with_purchase',
'automatically_apply_coupon_codes': 'automatically_apply_coupon_codes',
'buy_one_get_one': 'buy_one_get_one',
'calculated_description': 'calculated_description',
'can_be_used_with_other_coupons': 'can_be_used_with_other_coupons',
'coupon_oid': 'coupon_oid',
'coupon_type': 'coupon_type',
'description': 'description',
'discount_item_with_item_purchase': 'discount_item_with_item_purchase',
'discount_items': 'discount_items',
'expiration_dts': 'expiration_dts',
'free_item_and_shipping_with_subtotal': 'free_item_and_shipping_with_subtotal',
'free_item_with_item_purchase': 'free_item_with_item_purchase',
'free_item_with_subtotal': 'free_item_with_subtotal',
'free_items_with_item_purchase': 'free_items_with_item_purchase',
'free_items_with_mixmatch_purchase': 'free_items_with_mixmatch_purchase',
'free_shipping': 'free_shipping',
'free_shipping_specific_items': 'free_shipping_specific_items',
'free_shipping_with_items_purchase': 'free_shipping_with_items_purchase',
'free_shipping_with_subtotal': 'free_shipping_with_subtotal',
'hide_from_customer': 'hide_from_customer',
'merchant_code': 'merchant_code',
'merchant_notes': 'merchant_notes',
'multiple_amounts_off_items': 'multiple_amounts_off_items',
'no_discount': 'no_discount',
'percent_off_item_with_items_quantity_purchase': 'percent_off_item_with_items_quantity_purchase',
'percent_off_items': 'percent_off_items',
'percent_off_items_and_free_shipping': 'percent_off_items_and_free_shipping',
'percent_off_items_with_items_purchase': 'percent_off_items_with_items_purchase',
'percent_off_msrp_items': 'percent_off_msrp_items',
'percent_off_retail_price_items': 'percent_off_retail_price_items',
'percent_off_shipping': 'percent_off_shipping',
'percent_off_subtotal': 'percent_off_subtotal',
'percent_off_subtotal_and_free_shipping': 'percent_off_subtotal_and_free_shipping',
'percent_off_subtotal_limit': 'percent_off_subtotal_limit',
'percent_off_subtotal_with_items_purchase': 'percent_off_subtotal_with_items_purchase',
'percent_off_subtotal_with_subtotal': 'percent_off_subtotal_with_subtotal',
'quickbooks_code': 'quickbooks_code',
'restrict_by_postal_codes': 'restrict_by_postal_codes',
'restrict_by_screen_branding_theme_codes': 'restrict_by_screen_branding_theme_codes',
'restrict_by_storefronts': 'restrict_by_storefronts',
'start_dts': 'start_dts',
'super_coupon': 'super_coupon',
'tiered_amount_off_items': 'tiered_amount_off_items',
'tiered_amount_off_subtotal': 'tiered_amount_off_subtotal',
'tiered_percent_off_items': 'tiered_percent_off_items',
'tiered_percent_off_shipping': 'tiered_percent_off_shipping',
'tiered_percent_off_subtotal': 'tiered_percent_off_subtotal',
'usable_by': 'usable_by'
}
def __init__(self, affiliate_oid=None, allow_multiple_one_time_codes=None, amount_off_items=None, amount_off_shipping=None, amount_off_shipping_with_items_purchase=None, amount_off_subtotal=None, amount_off_subtotal_and_free_shipping=None, amount_off_subtotal_and_shipping=None, amount_off_subtotal_with_block_purchase=None, amount_off_subtotal_with_items_purchase=None, amount_off_subtotal_with_purchase=None, automatically_apply_coupon_codes=None, buy_one_get_one=None, calculated_description=None, can_be_used_with_other_coupons=None, coupon_oid=None, coupon_type=None, description=None, discount_item_with_item_purchase=None, discount_items=None, expiration_dts=None, free_item_and_shipping_with_subtotal=None, free_item_with_item_purchase=None, free_item_with_subtotal=None, free_items_with_item_purchase=None, free_items_with_mixmatch_purchase=None, free_shipping=None, free_shipping_specific_items=None, free_shipping_with_items_purchase=None, free_shipping_with_subtotal=None, hide_from_customer=None, merchant_code=None, merchant_notes=None, multiple_amounts_off_items=None, no_discount=None, percent_off_item_with_items_quantity_purchase=None, percent_off_items=None, percent_off_items_and_free_shipping=None, percent_off_items_with_items_purchase=None, percent_off_msrp_items=None, percent_off_retail_price_items=None, percent_off_shipping=None, percent_off_subtotal=None, percent_off_subtotal_and_free_shipping=None, percent_off_subtotal_limit=None, percent_off_subtotal_with_items_purchase=None, percent_off_subtotal_with_subtotal=None, quickbooks_code=None, restrict_by_postal_codes=None, restrict_by_screen_branding_theme_codes=None, restrict_by_storefronts=None, start_dts=None, super_coupon=None, tiered_amount_off_items=None, tiered_amount_off_subtotal=None, tiered_percent_off_items=None, tiered_percent_off_shipping=None, tiered_percent_off_subtotal=None, usable_by=None): # noqa: E501
"""Coupon - a model defined in Swagger""" # noqa: E501
self._affiliate_oid = None
self._allow_multiple_one_time_codes = None
self._amount_off_items = None
self._amount_off_shipping = None
self._amount_off_shipping_with_items_purchase = None
self._amount_off_subtotal = None
self._amount_off_subtotal_and_free_shipping = None
self._amount_off_subtotal_and_shipping = None
self._amount_off_subtotal_with_block_purchase = None
self._amount_off_subtotal_with_items_purchase = None
self._amount_off_subtotal_with_purchase = None
self._automatically_apply_coupon_codes = None
self._buy_one_get_one = None
self._calculated_description = None
self._can_be_used_with_other_coupons = None
self._coupon_oid = None
self._coupon_type = None
self._description = None
self._discount_item_with_item_purchase = None
self._discount_items = None
self._expiration_dts = None
self._free_item_and_shipping_with_subtotal = None
self._free_item_with_item_purchase = None
self._free_item_with_subtotal = None
self._free_items_with_item_purchase = None
self._free_items_with_mixmatch_purchase = None
self._free_shipping = None
self._free_shipping_specific_items = None
self._free_shipping_with_items_purchase = None
self._free_shipping_with_subtotal = None
self._hide_from_customer = None
self._merchant_code = None
self._merchant_notes = None
self._multiple_amounts_off_items = None
self._no_discount = None
self._percent_off_item_with_items_quantity_purchase = None
self._percent_off_items = None
self._percent_off_items_and_free_shipping = None
self._percent_off_items_with_items_purchase = None
self._percent_off_msrp_items = None
self._percent_off_retail_price_items = None
self._percent_off_shipping = None
self._percent_off_subtotal = None
self._percent_off_subtotal_and_free_shipping = None
self._percent_off_subtotal_limit = None
self._percent_off_subtotal_with_items_purchase = None
self._percent_off_subtotal_with_subtotal = None
self._quickbooks_code = None
self._restrict_by_postal_codes = None
self._restrict_by_screen_branding_theme_codes = None
self._restrict_by_storefronts = None
self._start_dts = None
self._super_coupon = None
self._tiered_amount_off_items = None
self._tiered_amount_off_subtotal = None
self._tiered_percent_off_items = None
self._tiered_percent_off_shipping = None
self._tiered_percent_off_subtotal = None
self._usable_by = None
self.discriminator = None
if affiliate_oid is not None:
self.affiliate_oid = affiliate_oid
if allow_multiple_one_time_codes is not None:
self.allow_multiple_one_time_codes = allow_multiple_one_time_codes
if amount_off_items is not None:
self.amount_off_items = amount_off_items
if amount_off_shipping is not None:
self.amount_off_shipping = amount_off_shipping
if amount_off_shipping_with_items_purchase is not None:
self.amount_off_shipping_with_items_purchase = amount_off_shipping_with_items_purchase
if amount_off_subtotal is not None:
self.amount_off_subtotal = amount_off_subtotal
if amount_off_subtotal_and_free_shipping is not None:
self.amount_off_subtotal_and_free_shipping = amount_off_subtotal_and_free_shipping
if amount_off_subtotal_and_shipping is not None:
self.amount_off_subtotal_and_shipping = amount_off_subtotal_and_shipping
if amount_off_subtotal_with_block_purchase is not None:
self.amount_off_subtotal_with_block_purchase = amount_off_subtotal_with_block_purchase
if amount_off_subtotal_with_items_purchase is not None:
self.amount_off_subtotal_with_items_purchase = amount_off_subtotal_with_items_purchase
if amount_off_subtotal_with_purchase is not None:
self.amount_off_subtotal_with_purchase = amount_off_subtotal_with_purchase
if automatically_apply_coupon_codes is not None:
self.automatically_apply_coupon_codes = automatically_apply_coupon_codes
if buy_one_get_one is not None:
self.buy_one_get_one = buy_one_get_one
if calculated_description is not None:
self.calculated_description = calculated_description
if can_be_used_with_other_coupons is not None:
self.can_be_used_with_other_coupons = can_be_used_with_other_coupons
if coupon_oid is not None:
self.coupon_oid = coupon_oid
if coupon_type is not None:
self.coupon_type = coupon_type
if description is not None:
self.description = description
if discount_item_with_item_purchase is not None:
self.discount_item_with_item_purchase = discount_item_with_item_purchase
if discount_items is not None:
self.discount_items = discount_items
if expiration_dts is not None:
self.expiration_dts = expiration_dts
if free_item_and_shipping_with_subtotal is not None:
self.free_item_and_shipping_with_subtotal = free_item_and_shipping_with_subtotal
if free_item_with_item_purchase is not None:
self.free_item_with_item_purchase = free_item_with_item_purchase
if free_item_with_subtotal is not None:
self.free_item_with_subtotal = free_item_with_subtotal
if free_items_with_item_purchase is not None:
self.free_items_with_item_purchase = free_items_with_item_purchase
if free_items_with_mixmatch_purchase is not None:
self.free_items_with_mixmatch_purchase = free_items_with_mixmatch_purchase
if free_shipping is not None:
self.free_shipping = free_shipping
if free_shipping_specific_items is not None:
self.free_shipping_specific_items = free_shipping_specific_items
if free_shipping_with_items_purchase is not None:
self.free_shipping_with_items_purchase = free_shipping_with_items_purchase
if free_shipping_with_subtotal is not None:
self.free_shipping_with_subtotal = free_shipping_with_subtotal
if hide_from_customer is not None:
self.hide_from_customer = hide_from_customer
if merchant_code is not None:
self.merchant_code = merchant_code
if merchant_notes is not None:
self.merchant_notes = merchant_notes
if multiple_amounts_off_items is not None:
self.multiple_amounts_off_items = multiple_amounts_off_items
if no_discount is not None:
self.no_discount = no_discount
if percent_off_item_with_items_quantity_purchase is not None:
self.percent_off_item_with_items_quantity_purchase = percent_off_item_with_items_quantity_purchase
if percent_off_items is not None:
self.percent_off_items = percent_off_items
if percent_off_items_and_free_shipping is not None:
self.percent_off_items_and_free_shipping = percent_off_items_and_free_shipping
if percent_off_items_with_items_purchase is not None:
self.percent_off_items_with_items_purchase = percent_off_items_with_items_purchase
if percent_off_msrp_items is not None:
self.percent_off_msrp_items = percent_off_msrp_items
if percent_off_retail_price_items is not None:
self.percent_off_retail_price_items = percent_off_retail_price_items
if percent_off_shipping is not None:
self.percent_off_shipping = percent_off_shipping
if percent_off_subtotal is not None:
self.percent_off_subtotal = percent_off_subtotal
if percent_off_subtotal_and_free_shipping is not None:
self.percent_off_subtotal_and_free_shipping = percent_off_subtotal_and_free_shipping
if percent_off_subtotal_limit is not None:
self.percent_off_subtotal_limit = percent_off_subtotal_limit
if percent_off_subtotal_with_items_purchase is not None:
self.percent_off_subtotal_with_items_purchase = percent_off_subtotal_with_items_purchase
if percent_off_subtotal_with_subtotal is not None:
self.percent_off_subtotal_with_subtotal = percent_off_subtotal_with_subtotal
if quickbooks_code is not None:
self.quickbooks_code = quickbooks_code
if restrict_by_postal_codes is not None:
self.restrict_by_postal_codes = restrict_by_postal_codes
if restrict_by_screen_branding_theme_codes is not None:
self.restrict_by_screen_branding_theme_codes = restrict_by_screen_branding_theme_codes
if restrict_by_storefronts is not None:
self.restrict_by_storefronts = restrict_by_storefronts
if start_dts is not None:
self.start_dts = start_dts
if super_coupon is not None:
self.super_coupon = super_coupon
if tiered_amount_off_items is not None:
self.tiered_amount_off_items = tiered_amount_off_items
if tiered_amount_off_subtotal is not None:
self.tiered_amount_off_subtotal = tiered_amount_off_subtotal
if tiered_percent_off_items is not None:
self.tiered_percent_off_items = tiered_percent_off_items
if tiered_percent_off_shipping is not None:
self.tiered_percent_off_shipping = tiered_percent_off_shipping
if tiered_percent_off_subtotal is not None:
self.tiered_percent_off_subtotal = tiered_percent_off_subtotal
if usable_by is not None:
self.usable_by = usable_by
@property
def affiliate_oid(self):
"""Gets the affiliate_oid of this Coupon. # noqa: E501
Associates an order with an affiliate when this value is set. # noqa: E501
:return: The affiliate_oid of this Coupon. # noqa: E501
:rtype: int
"""
return self._affiliate_oid
@affiliate_oid.setter
def affiliate_oid(self, affiliate_oid):
"""Sets the affiliate_oid of this Coupon.
Associates an order with an affiliate when this value is set. # noqa: E501
:param affiliate_oid: The affiliate_oid of this Coupon. # noqa: E501
:type: int
"""
self._affiliate_oid = affiliate_oid
@property
def allow_multiple_one_time_codes(self):
"""Gets the allow_multiple_one_time_codes of this Coupon. # noqa: E501
True if multiple one time codes for this coupon can be used on a cart at the same time. # noqa: E501
:return: The allow_multiple_one_time_codes of this Coupon. # noqa: E501
:rtype: bool
"""
return self._allow_multiple_one_time_codes
@allow_multiple_one_time_codes.setter
def allow_multiple_one_time_codes(self, allow_multiple_one_time_codes):
"""Sets the allow_multiple_one_time_codes of this Coupon.
True if multiple one time codes for this coupon can be used on a cart at the same time. # noqa: E501
:param allow_multiple_one_time_codes: The allow_multiple_one_time_codes of this Coupon. # noqa: E501
:type: bool
"""
self._allow_multiple_one_time_codes = allow_multiple_one_time_codes
@property
def amount_off_items(self):
"""Gets the amount_off_items of this Coupon. # noqa: E501
:return: The amount_off_items of this Coupon. # noqa: E501
:rtype: CouponAmountOffItems
"""
return self._amount_off_items
@amount_off_items.setter
def amount_off_items(self, amount_off_items):
"""Sets the amount_off_items of this Coupon.
:param amount_off_items: The amount_off_items of this Coupon. # noqa: E501
:type: CouponAmountOffItems
"""
self._amount_off_items = amount_off_items
@property
def amount_off_shipping(self):
"""Gets the amount_off_shipping of this Coupon. # noqa: E501
:return: The amount_off_shipping of this Coupon. # noqa: E501
:rtype: CouponAmountOffShipping
"""
return self._amount_off_shipping
@amount_off_shipping.setter
def amount_off_shipping(self, amount_off_shipping):
"""Sets the amount_off_shipping of this Coupon.
:param amount_off_shipping: The amount_off_shipping of this Coupon. # noqa: E501
:type: CouponAmountOffShipping
"""
self._amount_off_shipping = amount_off_shipping
@property
def amount_off_shipping_with_items_purchase(self):
"""Gets the amount_off_shipping_with_items_purchase of this Coupon. # noqa: E501
:return: The amount_off_shipping_with_items_purchase of this Coupon. # noqa: E501
:rtype: CouponAmountOffShippingWithItemsPurchase
"""
return self._amount_off_shipping_with_items_purchase
@amount_off_shipping_with_items_purchase.setter
def amount_off_shipping_with_items_purchase(self, amount_off_shipping_with_items_purchase):
"""Sets the amount_off_shipping_with_items_purchase of this Coupon.
:param amount_off_shipping_with_items_purchase: The amount_off_shipping_with_items_purchase of this Coupon. # noqa: E501
:type: CouponAmountOffShippingWithItemsPurchase
"""
self._amount_off_shipping_with_items_purchase = amount_off_shipping_with_items_purchase
@property
def amount_off_subtotal(self):
"""Gets the amount_off_subtotal of this Coupon. # noqa: E501
:return: The amount_off_subtotal of this Coupon. # noqa: E501
:rtype: CouponAmountOffSubtotal
"""
return self._amount_off_subtotal
@amount_off_subtotal.setter
def amount_off_subtotal(self, amount_off_subtotal):
"""Sets the amount_off_subtotal of this Coupon.
:param amount_off_subtotal: The amount_off_subtotal of this Coupon. # noqa: E501
:type: CouponAmountOffSubtotal
"""
self._amount_off_subtotal = amount_off_subtotal
@property
def amount_off_subtotal_and_free_shipping(self):
"""Gets the amount_off_subtotal_and_free_shipping of this Coupon. # noqa: E501
:return: The amount_off_subtotal_and_free_shipping of this Coupon. # noqa: E501
:rtype: CouponAmountOffSubtotalFreeShippingWithPurchase
"""
return self._amount_off_subtotal_and_free_shipping
@amount_off_subtotal_and_free_shipping.setter
def amount_off_subtotal_and_free_shipping(self, amount_off_subtotal_and_free_shipping):
"""Sets the amount_off_subtotal_and_free_shipping of this Coupon.
:param amount_off_subtotal_and_free_shipping: The amount_off_subtotal_and_free_shipping of this Coupon. # noqa: E501
:type: CouponAmountOffSubtotalFreeShippingWithPurchase
"""
self._amount_off_subtotal_and_free_shipping = amount_off_subtotal_and_free_shipping
@property
def amount_off_subtotal_and_shipping(self):
"""Gets the amount_off_subtotal_and_shipping of this Coupon. # noqa: E501
:return: The amount_off_subtotal_and_shipping of this Coupon. # noqa: E501
:rtype: CouponAmountOffSubtotalAndShipping
"""
return self._amount_off_subtotal_and_shipping
@amount_off_subtotal_and_shipping.setter
def amount_off_subtotal_and_shipping(self, amount_off_subtotal_and_shipping):
"""Sets the amount_off_subtotal_and_shipping of this Coupon.
:param amount_off_subtotal_and_shipping: The amount_off_subtotal_and_shipping of this Coupon. # noqa: E501
:type: CouponAmountOffSubtotalAndShipping
"""
self._amount_off_subtotal_and_shipping = amount_off_subtotal_and_shipping
@property
def amount_off_subtotal_with_block_purchase(self):
"""Gets the amount_off_subtotal_with_block_purchase of this Coupon. # noqa: E501
:return: The amount_off_subtotal_with_block_purchase of this Coupon. # noqa: E501
:rtype: CouponAmountOffSubtotalWithBlockPurchase
"""
return self._amount_off_subtotal_with_block_purchase
@amount_off_subtotal_with_block_purchase.setter
def amount_off_subtotal_with_block_purchase(self, amount_off_subtotal_with_block_purchase):
"""Sets the amount_off_subtotal_with_block_purchase of this Coupon.
:param amount_off_subtotal_with_block_purchase: The amount_off_subtotal_with_block_purchase of this Coupon. # noqa: E501
:type: CouponAmountOffSubtotalWithBlockPurchase
"""
self._amount_off_subtotal_with_block_purchase = amount_off_subtotal_with_block_purchase
@property
def amount_off_subtotal_with_items_purchase(self):
"""Gets the amount_off_subtotal_with_items_purchase of this Coupon. # noqa: E501
:return: The amount_off_subtotal_with_items_purchase of this Coupon. # noqa: E501
:rtype: CouponAmountOffSubtotalWithItemsPurchase
"""
return self._amount_off_subtotal_with_items_purchase
@amount_off_subtotal_with_items_purchase.setter
def amount_off_subtotal_with_items_purchase(self, amount_off_subtotal_with_items_purchase):
"""Sets the amount_off_subtotal_with_items_purchase of this Coupon.
:param amount_off_subtotal_with_items_purchase: The amount_off_subtotal_with_items_purchase of this Coupon. # noqa: E501
:type: CouponAmountOffSubtotalWithItemsPurchase
"""
self._amount_off_subtotal_with_items_purchase = amount_off_subtotal_with_items_purchase
@property
def amount_off_subtotal_with_purchase(self):
"""Gets the amount_off_subtotal_with_purchase of this Coupon. # noqa: E501
:return: The amount_off_subtotal_with_purchase of this Coupon. # noqa: E501
:rtype: CouponAmountOffSubtotalWithPurchase
"""
return self._amount_off_subtotal_with_purchase
@amount_off_subtotal_with_purchase.setter
def amount_off_subtotal_with_purchase(self, amount_off_subtotal_with_purchase):
"""Sets the amount_off_subtotal_with_purchase of this Coupon.
:param amount_off_subtotal_with_purchase: The amount_off_subtotal_with_purchase of this Coupon. # noqa: E501
:type: CouponAmountOffSubtotalWithPurchase
"""
self._amount_off_subtotal_with_purchase = amount_off_subtotal_with_purchase
@property
def automatically_apply_coupon_codes(self):
"""Gets the automatically_apply_coupon_codes of this Coupon. # noqa: E501
:return: The automatically_apply_coupon_codes of this Coupon. # noqa: E501
:rtype: CouponAutomaticallyApplyCouponCodes
"""
return self._automatically_apply_coupon_codes
@automatically_apply_coupon_codes.setter
def automatically_apply_coupon_codes(self, automatically_apply_coupon_codes):
"""Sets the automatically_apply_coupon_codes of this Coupon.
:param automatically_apply_coupon_codes: The automatically_apply_coupon_codes of this Coupon. # noqa: E501
:type: CouponAutomaticallyApplyCouponCodes
"""
self._automatically_apply_coupon_codes = automatically_apply_coupon_codes
@property
def buy_one_get_one(self):
"""Gets the buy_one_get_one of this Coupon. # noqa: E501
:return: The buy_one_get_one of this Coupon. # noqa: E501
:rtype: CouponBuyOneGetOneLimit
"""
return self._buy_one_get_one
@buy_one_get_one.setter
def buy_one_get_one(self, buy_one_get_one):
"""Sets the buy_one_get_one of this Coupon.
:param buy_one_get_one: The buy_one_get_one of this Coupon. # noqa: E501
:type: CouponBuyOneGetOneLimit
"""
self._buy_one_get_one = buy_one_get_one
@property
def calculated_description(self):
"""Gets the calculated_description of this Coupon. # noqa: E501
Calculated description displayed to the customer if no description is specified. # noqa: E501
:return: The calculated_description of this Coupon. # noqa: E501
:rtype: str
"""
return self._calculated_description
@calculated_description.setter
def calculated_description(self, calculated_description):
"""Sets the calculated_description of this Coupon.
Calculated description displayed to the customer if no description is specified. # noqa: E501
:param calculated_description: The calculated_description of this Coupon. # noqa: E501
:type: str
"""
self._calculated_description = calculated_description
@property
def can_be_used_with_other_coupons(self):
"""Gets the can_be_used_with_other_coupons of this Coupon. # noqa: E501
True if this coupon can be used with other coupons in a single order. # noqa: E501
:return: The can_be_used_with_other_coupons of this Coupon. # noqa: E501
:rtype: bool
"""
return self._can_be_used_with_other_coupons
@can_be_used_with_other_coupons.setter
def can_be_used_with_other_coupons(self, can_be_used_with_other_coupons):
"""Sets the can_be_used_with_other_coupons of this Coupon.
True if this coupon can be used with other coupons in a single order. # noqa: E501
:param can_be_used_with_other_coupons: The can_be_used_with_other_coupons of this Coupon. # noqa: E501
:type: bool
"""
self._can_be_used_with_other_coupons = can_be_used_with_other_coupons
@property
def coupon_oid(self):
"""Gets the coupon_oid of this Coupon. # noqa: E501
Coupon oid. # noqa: E501
:return: The coupon_oid of this Coupon. # noqa: E501
:rtype: int
"""
return self._coupon_oid
@coupon_oid.setter
def coupon_oid(self, coupon_oid):
"""Sets the coupon_oid of this Coupon.
Coupon oid. # noqa: E501
:param coupon_oid: The coupon_oid of this Coupon. # noqa: E501
:type: int
"""
self._coupon_oid = coupon_oid
@property
def coupon_type(self):
"""Gets the coupon_type of this Coupon. # noqa: E501
Coupon type. # noqa: E501
:return: The coupon_type of this Coupon. # noqa: E501
:rtype: str
"""
return self._coupon_type
@coupon_type.setter
def coupon_type(self, coupon_type):
"""Sets the coupon_type of this Coupon.
Coupon type. # noqa: E501
:param coupon_type: The coupon_type of this Coupon. # noqa: E501
:type: str
"""
if coupon_type is not None and len(coupon_type) > 65:
raise ValueError("Invalid value for `coupon_type`, length must be less than or equal to `65`") # noqa: E501
self._coupon_type = coupon_type
@property
def description(self):
"""Gets the description of this Coupon. # noqa: E501
Description of the coupon up to 50 characters. # noqa: E501
:return: The description of this Coupon. # noqa: E501
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""Sets the description of this Coupon.
Description of the coupon up to 50 characters. # noqa: E501
:param description: The description of this Coupon. # noqa: E501
:type: str
"""
if description is not None and len(description) > 50:
raise ValueError("Invalid value for `description`, length must be less than or equal to `50`") # noqa: E501
self._description = description
@property
def discount_item_with_item_purchase(self):
"""Gets the discount_item_with_item_purchase of this Coupon. # noqa: E501
:return: The discount_item_with_item_purchase of this Coupon. # noqa: E501
:rtype: CouponDiscountItemWithItemPurchase
"""
return self._discount_item_with_item_purchase
@discount_item_with_item_purchase.setter
def discount_item_with_item_purchase(self, discount_item_with_item_purchase):
"""Sets the discount_item_with_item_purchase of this Coupon.
:param discount_item_with_item_purchase: The discount_item_with_item_purchase of this Coupon. # noqa: E501
:type: CouponDiscountItemWithItemPurchase
"""
self._discount_item_with_item_purchase = discount_item_with_item_purchase
@property
def discount_items(self):
"""Gets the discount_items of this Coupon. # noqa: E501
:return: The discount_items of this Coupon. # noqa: E501
:rtype: CouponDiscountItems
"""
return self._discount_items
@discount_items.setter
def discount_items(self, discount_items):
"""Sets the discount_items of this Coupon.
:param discount_items: The discount_items of this Coupon. # noqa: E501
:type: CouponDiscountItems
"""
self._discount_items = discount_items
@property
def expiration_dts(self):
"""Gets the expiration_dts of this Coupon. # noqa: E501
Date/time when coupon expires # noqa: E501
:return: The expiration_dts of this Coupon. # noqa: E501
:rtype: str
"""
return self._expiration_dts
@expiration_dts.setter
def expiration_dts(self, expiration_dts):
"""Sets the expiration_dts of this Coupon.
Date/time when coupon expires # noqa: E501
:param expiration_dts: The expiration_dts of this Coupon. # noqa: E501
:type: str
"""
self._expiration_dts = expiration_dts
@property
def free_item_and_shipping_with_subtotal(self):
"""Gets the free_item_and_shipping_with_subtotal of this Coupon. # noqa: E501
:return: The free_item_and_shipping_with_subtotal of this Coupon. # noqa: E501
:rtype: CouponFreeItemAndShippingWithSubtotal
"""
return self._free_item_and_shipping_with_subtotal
@free_item_and_shipping_with_subtotal.setter
def free_item_and_shipping_with_subtotal(self, free_item_and_shipping_with_subtotal):
"""Sets the free_item_and_shipping_with_subtotal of this Coupon.
:param free_item_and_shipping_with_subtotal: The free_item_and_shipping_with_subtotal of this Coupon. # noqa: E501
:type: CouponFreeItemAndShippingWithSubtotal
"""
self._free_item_and_shipping_with_subtotal = free_item_and_shipping_with_subtotal
@property
def free_item_with_item_purchase(self):
"""Gets the free_item_with_item_purchase of this Coupon. # noqa: E501
:return: The free_item_with_item_purchase of this Coupon. # noqa: E501
:rtype: CouponFreeItemWithItemPurchase
"""
return self._free_item_with_item_purchase
@free_item_with_item_purchase.setter
def free_item_with_item_purchase(self, free_item_with_item_purchase):
"""Sets the free_item_with_item_purchase of this Coupon.
:param free_item_with_item_purchase: The free_item_with_item_purchase of this Coupon. # noqa: E501
:type: CouponFreeItemWithItemPurchase
"""
self._free_item_with_item_purchase = free_item_with_item_purchase
@property
def free_item_with_subtotal(self):
"""Gets the free_item_with_subtotal of this Coupon. # noqa: E501
:return: The free_item_with_subtotal of this Coupon. # noqa: E501
:rtype: CouponFreeItemWithSubtotal
"""
return self._free_item_with_subtotal
@free_item_with_subtotal.setter
def free_item_with_subtotal(self, free_item_with_subtotal):
"""Sets the free_item_with_subtotal of this Coupon.
:param free_item_with_subtotal: The free_item_with_subtotal of this Coupon. # noqa: E501
:type: CouponFreeItemWithSubtotal
"""
self._free_item_with_subtotal = free_item_with_subtotal
@property
def free_items_with_item_purchase(self):
"""Gets the free_items_with_item_purchase of this Coupon. # noqa: E501
:return: The free_items_with_item_purchase of this Coupon. # noqa: E501
:rtype: CouponFreeItemsWithItemPurchase
"""
return self._free_items_with_item_purchase
@free_items_with_item_purchase.setter
def free_items_with_item_purchase(self, free_items_with_item_purchase):
"""Sets the free_items_with_item_purchase of this Coupon.
:param free_items_with_item_purchase: The free_items_with_item_purchase of this Coupon. # noqa: E501
:type: CouponFreeItemsWithItemPurchase
"""
self._free_items_with_item_purchase = free_items_with_item_purchase
@property
def free_items_with_mixmatch_purchase(self):
"""Gets the free_items_with_mixmatch_purchase of this Coupon. # noqa: E501
:return: The free_items_with_mixmatch_purchase of this Coupon. # noqa: E501
:rtype: CouponFreeItemsWithMixMatchPurchase
"""
return self._free_items_with_mixmatch_purchase
@free_items_with_mixmatch_purchase.setter
def free_items_with_mixmatch_purchase(self, free_items_with_mixmatch_purchase):
"""Sets the free_items_with_mixmatch_purchase of this Coupon.
:param free_items_with_mixmatch_purchase: The free_items_with_mixmatch_purchase of this Coupon. # noqa: E501
:type: CouponFreeItemsWithMixMatchPurchase
"""
self._free_items_with_mixmatch_purchase = free_items_with_mixmatch_purchase
@property
def free_shipping(self):
"""Gets the free_shipping of this Coupon. # noqa: E501
:return: The free_shipping of this Coupon. # noqa: E501
:rtype: CouponFreeShipping
"""
return self._free_shipping
@free_shipping.setter
def free_shipping(self, free_shipping):
"""Sets the free_shipping of this Coupon.
:param free_shipping: The free_shipping of this Coupon. # noqa: E501
:type: CouponFreeShipping
"""
self._free_shipping = free_shipping
@property
def free_shipping_specific_items(self):
"""Gets the free_shipping_specific_items of this Coupon. # noqa: E501
:return: The free_shipping_specific_items of this Coupon. # noqa: E501
:rtype: CouponFreeShippingSpecificItems
"""
return self._free_shipping_specific_items
@free_shipping_specific_items.setter
def free_shipping_specific_items(self, free_shipping_specific_items):
"""Sets the free_shipping_specific_items of this Coupon.
:param free_shipping_specific_items: The free_shipping_specific_items of this Coupon. # noqa: E501
:type: CouponFreeShippingSpecificItems
"""
self._free_shipping_specific_items = free_shipping_specific_items
@property
def free_shipping_with_items_purchase(self):
"""Gets the free_shipping_with_items_purchase of this Coupon. # noqa: E501
:return: The free_shipping_with_items_purchase of this Coupon. # noqa: E501
:rtype: CouponFreeShippingWithItemsPurchase
"""
return self._free_shipping_with_items_purchase
@free_shipping_with_items_purchase.setter
def free_shipping_with_items_purchase(self, free_shipping_with_items_purchase):
"""Sets the free_shipping_with_items_purchase of this Coupon.
:param free_shipping_with_items_purchase: The free_shipping_with_items_purchase of this Coupon. # noqa: E501
:type: CouponFreeShippingWithItemsPurchase
"""
self._free_shipping_with_items_purchase = free_shipping_with_items_purchase
@property
def free_shipping_with_subtotal(self):
"""Gets the free_shipping_with_subtotal of this Coupon. # noqa: E501
:return: The free_shipping_with_subtotal of this Coupon. # noqa: E501
:rtype: CouponFreeShippingWithSubtotal
"""
return self._free_shipping_with_subtotal
@free_shipping_with_subtotal.setter
def free_shipping_with_subtotal(self, free_shipping_with_subtotal):
"""Sets the free_shipping_with_subtotal of this Coupon.
:param free_shipping_with_subtotal: The free_shipping_with_subtotal of this Coupon. # noqa: E501
:type: CouponFreeShippingWithSubtotal
"""
self._free_shipping_with_subtotal = free_shipping_with_subtotal
@property
def hide_from_customer(self):
"""Gets the hide_from_customer of this Coupon. # noqa: E501
Hide coupon from customer during checkout. Often used when coupons are automatic discounting mechanisms. # noqa: E501
:return: The hide_from_customer of this Coupon. # noqa: E501
:rtype: bool
"""
return self._hide_from_customer
@hide_from_customer.setter
def hide_from_customer(self, hide_from_customer):
"""Sets the hide_from_customer of this Coupon.
Hide coupon from customer during checkout. Often used when coupons are automatic discounting mechanisms. # noqa: E501
:param hide_from_customer: The hide_from_customer of this Coupon. # noqa: E501
:type: bool
"""
self._hide_from_customer = hide_from_customer
@property
def merchant_code(self):
"""Gets the merchant_code of this Coupon. # noqa: E501
Merchant code of coupon up to 20 characters. # noqa: E501
:return: The merchant_code of this Coupon. # noqa: E501
:rtype: str
"""
return self._merchant_code
@merchant_code.setter
def merchant_code(self, merchant_code):
"""Sets the merchant_code of this Coupon.
Merchant code of coupon up to 20 characters. # noqa: E501
:param merchant_code: The merchant_code of this Coupon. # noqa: E501
:type: str
"""
if merchant_code is not None and len(merchant_code) > 20:
raise ValueError("Invalid value for `merchant_code`, length must be less than or equal to `20`") # noqa: E501
self._merchant_code = merchant_code
@property
def merchant_notes(self):
"""Gets the merchant_notes of this Coupon. # noqa: E501
Internal notes about this coupon. These are not visible to customer. # noqa: E501
:return: The merchant_notes of this Coupon. # noqa: E501
:rtype: str
"""
return self._merchant_notes
@merchant_notes.setter
def merchant_notes(self, merchant_notes):
"""Sets the merchant_notes of this Coupon.
Internal notes about this coupon. These are not visible to customer. # noqa: E501
:param merchant_notes: The merchant_notes of this Coupon. # noqa: E501
:type: str
"""
if merchant_notes is not None and len(merchant_notes) > 250:
raise ValueError("Invalid value for `merchant_notes`, length must be less than or equal to `250`") # noqa: E501
self._merchant_notes = merchant_notes
@property
def multiple_amounts_off_items(self):
"""Gets the multiple_amounts_off_items of this Coupon. # noqa: E501
:return: The multiple_amounts_off_items of this Coupon. # noqa: E501
:rtype: CouponMultipleAmountsOffItems
"""
return self._multiple_amounts_off_items
@multiple_amounts_off_items.setter
def multiple_amounts_off_items(self, multiple_amounts_off_items):
"""Sets the multiple_amounts_off_items of this Coupon.
:param multiple_amounts_off_items: The multiple_amounts_off_items of this Coupon. # noqa: E501
:type: CouponMultipleAmountsOffItems
"""
self._multiple_amounts_off_items = multiple_amounts_off_items
@property
def no_discount(self):
"""Gets the no_discount of this Coupon. # noqa: E501
:return: The no_discount of this Coupon. # noqa: E501
:rtype: CouponNoDiscount
"""
return self._no_discount
@no_discount.setter
def no_discount(self, no_discount):
"""Sets the no_discount of this Coupon.
:param no_discount: The no_discount of this Coupon. # noqa: E501
:type: CouponNoDiscount
"""
self._no_discount = no_discount
@property
def percent_off_item_with_items_quantity_purchase(self):
"""Gets the percent_off_item_with_items_quantity_purchase of this Coupon. # noqa: E501
:return: The percent_off_item_with_items_quantity_purchase of this Coupon. # noqa: E501
:rtype: CouponPercentOffItemWithItemsQuantityPurchase
"""
return self._percent_off_item_with_items_quantity_purchase
@percent_off_item_with_items_quantity_purchase.setter
def percent_off_item_with_items_quantity_purchase(self, percent_off_item_with_items_quantity_purchase):
"""Sets the percent_off_item_with_items_quantity_purchase of this Coupon.
:param percent_off_item_with_items_quantity_purchase: The percent_off_item_with_items_quantity_purchase of this Coupon. # noqa: E501
:type: CouponPercentOffItemWithItemsQuantityPurchase
"""
self._percent_off_item_with_items_quantity_purchase = percent_off_item_with_items_quantity_purchase
@property
def percent_off_items(self):
"""Gets the percent_off_items of this Coupon. # noqa: E501
:return: The percent_off_items of this Coupon. # noqa: E501
:rtype: CouponPercentOffItems
"""
return self._percent_off_items
@percent_off_items.setter
def percent_off_items(self, percent_off_items):
"""Sets the percent_off_items of this Coupon.
:param percent_off_items: The percent_off_items of this Coupon. # noqa: E501
:type: CouponPercentOffItems
"""
self._percent_off_items = percent_off_items
@property
def percent_off_items_and_free_shipping(self):
"""Gets the percent_off_items_and_free_shipping of this Coupon. # noqa: E501
:return: The percent_off_items_and_free_shipping of this Coupon. # noqa: E501
:rtype: CouponPercentOffItemsAndFreeShipping
"""
return self._percent_off_items_and_free_shipping
@percent_off_items_and_free_shipping.setter
def percent_off_items_and_free_shipping(self, percent_off_items_and_free_shipping):
"""Sets the percent_off_items_and_free_shipping of this Coupon.
:param percent_off_items_and_free_shipping: The percent_off_items_and_free_shipping of this Coupon. # noqa: E501
:type: CouponPercentOffItemsAndFreeShipping
"""
self._percent_off_items_and_free_shipping = percent_off_items_and_free_shipping
@property
def percent_off_items_with_items_purchase(self):
"""Gets the percent_off_items_with_items_purchase of this Coupon. # noqa: E501
:return: The percent_off_items_with_items_purchase of this Coupon. # noqa: E501
:rtype: CouponPercentOffItemsWithItemsPurchase
"""
return self._percent_off_items_with_items_purchase
@percent_off_items_with_items_purchase.setter
def percent_off_items_with_items_purchase(self, percent_off_items_with_items_purchase):
"""Sets the percent_off_items_with_items_purchase of this Coupon.
:param percent_off_items_with_items_purchase: The percent_off_items_with_items_purchase of this Coupon. # noqa: E501
:type: CouponPercentOffItemsWithItemsPurchase
"""
self._percent_off_items_with_items_purchase = percent_off_items_with_items_purchase
@property
def percent_off_msrp_items(self):
"""Gets the percent_off_msrp_items of this Coupon. # noqa: E501
:return: The percent_off_msrp_items of this Coupon. # noqa: E501
:rtype: CouponPercentOffMsrpItems
"""
return self._percent_off_msrp_items
@percent_off_msrp_items.setter
def percent_off_msrp_items(self, percent_off_msrp_items):
"""Sets the percent_off_msrp_items of this Coupon.
:param percent_off_msrp_items: The percent_off_msrp_items of this Coupon. # noqa: E501
:type: CouponPercentOffMsrpItems
"""
self._percent_off_msrp_items = percent_off_msrp_items
@property
def percent_off_retail_price_items(self):
"""Gets the percent_off_retail_price_items of this Coupon. # noqa: E501
:return: The percent_off_retail_price_items of this Coupon. # noqa: E501
:rtype: CouponPercentOffRetailPriceItems
"""
return self._percent_off_retail_price_items
@percent_off_retail_price_items.setter
def percent_off_retail_price_items(self, percent_off_retail_price_items):
"""Sets the percent_off_retail_price_items of this Coupon.
:param percent_off_retail_price_items: The percent_off_retail_price_items of this Coupon. # noqa: E501
:type: CouponPercentOffRetailPriceItems
"""
self._percent_off_retail_price_items = percent_off_retail_price_items
@property
def percent_off_shipping(self):
"""Gets the percent_off_shipping of this Coupon. # noqa: E501
:return: The percent_off_shipping of this Coupon. # noqa: E501
:rtype: CouponPercentOffShipping
"""
return self._percent_off_shipping
@percent_off_shipping.setter
def percent_off_shipping(self, percent_off_shipping):
"""Sets the percent_off_shipping of this Coupon.
:param percent_off_shipping: The percent_off_shipping of this Coupon. # noqa: E501
:type: CouponPercentOffShipping
"""
self._percent_off_shipping = percent_off_shipping
@property
def percent_off_subtotal(self):
"""Gets the percent_off_subtotal of this Coupon. # noqa: E501
:return: The percent_off_subtotal of this Coupon. # noqa: E501
:rtype: CouponPercentOffSubtotal
"""
return self._percent_off_subtotal
@percent_off_subtotal.setter
def percent_off_subtotal(self, percent_off_subtotal):
"""Sets the percent_off_subtotal of this Coupon.
:param percent_off_subtotal: The percent_off_subtotal of this Coupon. # noqa: E501
:type: CouponPercentOffSubtotal
"""
self._percent_off_subtotal = percent_off_subtotal
@property
def percent_off_subtotal_and_free_shipping(self):
"""Gets the percent_off_subtotal_and_free_shipping of this Coupon. # noqa: E501
:return: The percent_off_subtotal_and_free_shipping of this Coupon. # noqa: E501
:rtype: CouponPercentOffSubtotalAndFreeShipping
"""
return self._percent_off_subtotal_and_free_shipping
@percent_off_subtotal_and_free_shipping.setter
def percent_off_subtotal_and_free_shipping(self, percent_off_subtotal_and_free_shipping):
"""Sets the percent_off_subtotal_and_free_shipping of this Coupon.
:param percent_off_subtotal_and_free_shipping: The percent_off_subtotal_and_free_shipping of this Coupon. # noqa: E501
:type: CouponPercentOffSubtotalAndFreeShipping
"""
self._percent_off_subtotal_and_free_shipping = percent_off_subtotal_and_free_shipping
@property
def percent_off_subtotal_limit(self):
"""Gets the percent_off_subtotal_limit of this Coupon. # noqa: E501
:return: The percent_off_subtotal_limit of this Coupon. # noqa: E501
:rtype: CouponPercentOffSubtotalLimit
"""
return self._percent_off_subtotal_limit
@percent_off_subtotal_limit.setter
def percent_off_subtotal_limit(self, percent_off_subtotal_limit):
"""Sets the percent_off_subtotal_limit of this Coupon.
:param percent_off_subtotal_limit: The percent_off_subtotal_limit of this Coupon. # noqa: E501
:type: CouponPercentOffSubtotalLimit
"""
self._percent_off_subtotal_limit = percent_off_subtotal_limit
@property
def percent_off_subtotal_with_items_purchase(self):
"""Gets the percent_off_subtotal_with_items_purchase of this Coupon. # noqa: E501
:return: The percent_off_subtotal_with_items_purchase of this Coupon. # noqa: E501
:rtype: CouponPercentOffSubtotalWithItemsPurchase
"""
return self._percent_off_subtotal_with_items_purchase
@percent_off_subtotal_with_items_purchase.setter
def percent_off_subtotal_with_items_purchase(self, percent_off_subtotal_with_items_purchase):
"""Sets the percent_off_subtotal_with_items_purchase of this Coupon.
:param percent_off_subtotal_with_items_purchase: The percent_off_subtotal_with_items_purchase of this Coupon. # noqa: E501
:type: CouponPercentOffSubtotalWithItemsPurchase
"""
self._percent_off_subtotal_with_items_purchase = percent_off_subtotal_with_items_purchase
@property
def percent_off_subtotal_with_subtotal(self):
"""Gets the percent_off_subtotal_with_subtotal of this Coupon. # noqa: E501
:return: The percent_off_subtotal_with_subtotal of this Coupon. # noqa: E501
:rtype: CouponPercentOffSubtotalWithSubtotal
"""
return self._percent_off_subtotal_with_subtotal
@percent_off_subtotal_with_subtotal.setter
def percent_off_subtotal_with_subtotal(self, percent_off_subtotal_with_subtotal):
"""Sets the percent_off_subtotal_with_subtotal of this Coupon.
:param percent_off_subtotal_with_subtotal: The percent_off_subtotal_with_subtotal of this Coupon. # noqa: E501
:type: CouponPercentOffSubtotalWithSubtotal
"""
self._percent_off_subtotal_with_subtotal = percent_off_subtotal_with_subtotal
@property
def quickbooks_code(self):
"""Gets the quickbooks_code of this Coupon. # noqa: E501
Quickbooks accounting code. # noqa: E501
:return: The quickbooks_code of this Coupon. # noqa: E501
:rtype: str
"""
return self._quickbooks_code
@quickbooks_code.setter
def quickbooks_code(self, quickbooks_code):
"""Sets the quickbooks_code of this Coupon.
Quickbooks accounting code. # noqa: E501
:param quickbooks_code: The quickbooks_code of this Coupon. # noqa: E501
:type: str
"""
if quickbooks_code is not None and len(quickbooks_code) > 20:
raise ValueError("Invalid value for `quickbooks_code`, length must be less than or equal to `20`") # noqa: E501
self._quickbooks_code = quickbooks_code
@property
def restrict_by_postal_codes(self):
"""Gets the restrict_by_postal_codes of this Coupon. # noqa: E501
Optional list of postal codes which restrict a coupon to within these postal codes. # noqa: E501
:return: The restrict_by_postal_codes of this Coupon. # noqa: E501
:rtype: list[str]
"""
return self._restrict_by_postal_codes
@restrict_by_postal_codes.setter
def restrict_by_postal_codes(self, restrict_by_postal_codes):
"""Sets the restrict_by_postal_codes of this Coupon.
Optional list of postal codes which restrict a coupon to within these postal codes. # noqa: E501
:param restrict_by_postal_codes: The restrict_by_postal_codes of this Coupon. # noqa: E501
:type: list[str]
"""
self._restrict_by_postal_codes = restrict_by_postal_codes
@property
def restrict_by_screen_branding_theme_codes(self):
"""Gets the restrict_by_screen_branding_theme_codes of this Coupon. # noqa: E501
Optional list of legacy screen branding theme codes to limit coupon use to only those themes. # noqa: E501
:return: The restrict_by_screen_branding_theme_codes of this Coupon. # noqa: E501
:rtype: list[CouponRestriction]
"""
return self._restrict_by_screen_branding_theme_codes
@restrict_by_screen_branding_theme_codes.setter
def restrict_by_screen_branding_theme_codes(self, restrict_by_screen_branding_theme_codes):
"""Sets the restrict_by_screen_branding_theme_codes of this Coupon.
Optional list of legacy screen branding theme codes to limit coupon use to only those themes. # noqa: E501
:param restrict_by_screen_branding_theme_codes: The restrict_by_screen_branding_theme_codes of this Coupon. # noqa: E501
:type: list[CouponRestriction]
"""
self._restrict_by_screen_branding_theme_codes = restrict_by_screen_branding_theme_codes
@property
def restrict_by_storefronts(self):
"""Gets the restrict_by_storefronts of this Coupon. # noqa: E501
Optional list of storefronts to limit coupon use to only those storefronts. # noqa: E501
:return: The restrict_by_storefronts of this Coupon. # noqa: E501
:rtype: list[CouponRestriction]
"""
return self._restrict_by_storefronts
@restrict_by_storefronts.setter
def restrict_by_storefronts(self, restrict_by_storefronts):
"""Sets the restrict_by_storefronts of this Coupon.
Optional list of storefronts to limit coupon use to only those storefronts. # noqa: E501
:param restrict_by_storefronts: The restrict_by_storefronts of this Coupon. # noqa: E501
:type: list[CouponRestriction]
"""
self._restrict_by_storefronts = restrict_by_storefronts
@property
def start_dts(self):
"""Gets the start_dts of this Coupon. # noqa: E501
Date/time when coupon is valid # noqa: E501
:return: The start_dts of this Coupon. # noqa: E501
:rtype: str
"""
return self._start_dts
@start_dts.setter
def start_dts(self, start_dts):
"""Sets the start_dts of this Coupon.
Date/time when coupon is valid # noqa: E501
:param start_dts: The start_dts of this Coupon. # noqa: E501
:type: str
"""
self._start_dts = start_dts
@property
def super_coupon(self):
"""Gets the super_coupon of this Coupon. # noqa: E501
If true, this coupon can be used with ANY other coupon regardless of the other coupons configuration # noqa: E501
:return: The super_coupon of this Coupon. # noqa: E501
:rtype: bool
"""
return self._super_coupon
@super_coupon.setter
def super_coupon(self, super_coupon):
"""Sets the super_coupon of this Coupon.
If true, this coupon can be used with ANY other coupon regardless of the other coupons configuration # noqa: E501
:param super_coupon: The super_coupon of this Coupon. # noqa: E501
:type: bool
"""
self._super_coupon = super_coupon
@property
def tiered_amount_off_items(self):
"""Gets the tiered_amount_off_items of this Coupon. # noqa: E501
:return: The tiered_amount_off_items of this Coupon. # noqa: E501
:rtype: CouponTieredAmountOffItems
"""
return self._tiered_amount_off_items
@tiered_amount_off_items.setter
def tiered_amount_off_items(self, tiered_amount_off_items):
"""Sets the tiered_amount_off_items of this Coupon.
:param tiered_amount_off_items: The tiered_amount_off_items of this Coupon. # noqa: E501
:type: CouponTieredAmountOffItems
"""
self._tiered_amount_off_items = tiered_amount_off_items
@property
def tiered_amount_off_subtotal(self):
"""Gets the tiered_amount_off_subtotal of this Coupon. # noqa: E501
:return: The tiered_amount_off_subtotal of this Coupon. # noqa: E501
:rtype: CouponTieredAmountOffSubtotal
"""
return self._tiered_amount_off_subtotal
@tiered_amount_off_subtotal.setter
def tiered_amount_off_subtotal(self, tiered_amount_off_subtotal):
"""Sets the tiered_amount_off_subtotal of this Coupon.
:param tiered_amount_off_subtotal: The tiered_amount_off_subtotal of this Coupon. # noqa: E501
:type: CouponTieredAmountOffSubtotal
"""
self._tiered_amount_off_subtotal = tiered_amount_off_subtotal
@property
def tiered_percent_off_items(self):
"""Gets the tiered_percent_off_items of this Coupon. # noqa: E501
:return: The tiered_percent_off_items of this Coupon. # noqa: E501
:rtype: CouponTieredPercentOffItems
"""
return self._tiered_percent_off_items
@tiered_percent_off_items.setter
def tiered_percent_off_items(self, tiered_percent_off_items):
"""Sets the tiered_percent_off_items of this Coupon.
:param tiered_percent_off_items: The tiered_percent_off_items of this Coupon. # noqa: E501
:type: CouponTieredPercentOffItems
"""
self._tiered_percent_off_items = tiered_percent_off_items
@property
def tiered_percent_off_shipping(self):
"""Gets the tiered_percent_off_shipping of this Coupon. # noqa: E501
:return: The tiered_percent_off_shipping of this Coupon. # noqa: E501
:rtype: CouponTieredPercentOffShipping
"""
return self._tiered_percent_off_shipping
@tiered_percent_off_shipping.setter
def tiered_percent_off_shipping(self, tiered_percent_off_shipping):
"""Sets the tiered_percent_off_shipping of this Coupon.
:param tiered_percent_off_shipping: The tiered_percent_off_shipping of this Coupon. # noqa: E501
:type: CouponTieredPercentOffShipping
"""
self._tiered_percent_off_shipping = tiered_percent_off_shipping
@property
def tiered_percent_off_subtotal(self):
"""Gets the tiered_percent_off_subtotal of this Coupon. # noqa: E501
:return: The tiered_percent_off_subtotal of this Coupon. # noqa: E501
:rtype: CouponTieredPercentOffSubtotal
"""
return self._tiered_percent_off_subtotal
@tiered_percent_off_subtotal.setter
def tiered_percent_off_subtotal(self, tiered_percent_off_subtotal):
"""Sets the tiered_percent_off_subtotal of this Coupon.
:param tiered_percent_off_subtotal: The tiered_percent_off_subtotal of this Coupon. # noqa: E501
:type: CouponTieredPercentOffSubtotal
"""
self._tiered_percent_off_subtotal = tiered_percent_off_subtotal
@property
def usable_by(self):
"""Gets the usable_by of this Coupon. # noqa: E501
Who may use this coupon. # noqa: E501
:return: The usable_by of this Coupon. # noqa: E501
:rtype: str
"""
return self._usable_by
@usable_by.setter
def usable_by(self, usable_by):
"""Sets the usable_by of this Coupon.
Who may use this coupon. # noqa: E501
:param usable_by: The usable_by of this Coupon. # noqa: E501
:type: str
"""
if usable_by is not None and len(usable_by) > 50:
raise ValueError("Invalid value for `usable_by`, length must be less than or equal to `50`") # noqa: E501
self._usable_by = usable_by
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(Coupon, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Coupon):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 40.133373 | 1,910 | 0.718373 |
import pprint
import re
import six
class Coupon(object):
swagger_types = {
'affiliate_oid': 'int',
'allow_multiple_one_time_codes': 'bool',
'amount_off_items': 'CouponAmountOffItems',
'amount_off_shipping': 'CouponAmountOffShipping',
'amount_off_shipping_with_items_purchase': 'CouponAmountOffShippingWithItemsPurchase',
'amount_off_subtotal': 'CouponAmountOffSubtotal',
'amount_off_subtotal_and_free_shipping': 'CouponAmountOffSubtotalFreeShippingWithPurchase',
'amount_off_subtotal_and_shipping': 'CouponAmountOffSubtotalAndShipping',
'amount_off_subtotal_with_block_purchase': 'CouponAmountOffSubtotalWithBlockPurchase',
'amount_off_subtotal_with_items_purchase': 'CouponAmountOffSubtotalWithItemsPurchase',
'amount_off_subtotal_with_purchase': 'CouponAmountOffSubtotalWithPurchase',
'automatically_apply_coupon_codes': 'CouponAutomaticallyApplyCouponCodes',
'buy_one_get_one': 'CouponBuyOneGetOneLimit',
'calculated_description': 'str',
'can_be_used_with_other_coupons': 'bool',
'coupon_oid': 'int',
'coupon_type': 'str',
'description': 'str',
'discount_item_with_item_purchase': 'CouponDiscountItemWithItemPurchase',
'discount_items': 'CouponDiscountItems',
'expiration_dts': 'str',
'free_item_and_shipping_with_subtotal': 'CouponFreeItemAndShippingWithSubtotal',
'free_item_with_item_purchase': 'CouponFreeItemWithItemPurchase',
'free_item_with_subtotal': 'CouponFreeItemWithSubtotal',
'free_items_with_item_purchase': 'CouponFreeItemsWithItemPurchase',
'free_items_with_mixmatch_purchase': 'CouponFreeItemsWithMixMatchPurchase',
'free_shipping': 'CouponFreeShipping',
'free_shipping_specific_items': 'CouponFreeShippingSpecificItems',
'free_shipping_with_items_purchase': 'CouponFreeShippingWithItemsPurchase',
'free_shipping_with_subtotal': 'CouponFreeShippingWithSubtotal',
'hide_from_customer': 'bool',
'merchant_code': 'str',
'merchant_notes': 'str',
'multiple_amounts_off_items': 'CouponMultipleAmountsOffItems',
'no_discount': 'CouponNoDiscount',
'percent_off_item_with_items_quantity_purchase': 'CouponPercentOffItemWithItemsQuantityPurchase',
'percent_off_items': 'CouponPercentOffItems',
'percent_off_items_and_free_shipping': 'CouponPercentOffItemsAndFreeShipping',
'percent_off_items_with_items_purchase': 'CouponPercentOffItemsWithItemsPurchase',
'percent_off_msrp_items': 'CouponPercentOffMsrpItems',
'percent_off_retail_price_items': 'CouponPercentOffRetailPriceItems',
'percent_off_shipping': 'CouponPercentOffShipping',
'percent_off_subtotal': 'CouponPercentOffSubtotal',
'percent_off_subtotal_and_free_shipping': 'CouponPercentOffSubtotalAndFreeShipping',
'percent_off_subtotal_limit': 'CouponPercentOffSubtotalLimit',
'percent_off_subtotal_with_items_purchase': 'CouponPercentOffSubtotalWithItemsPurchase',
'percent_off_subtotal_with_subtotal': 'CouponPercentOffSubtotalWithSubtotal',
'quickbooks_code': 'str',
'restrict_by_postal_codes': 'list[str]',
'restrict_by_screen_branding_theme_codes': 'list[CouponRestriction]',
'restrict_by_storefronts': 'list[CouponRestriction]',
'start_dts': 'str',
'super_coupon': 'bool',
'tiered_amount_off_items': 'CouponTieredAmountOffItems',
'tiered_amount_off_subtotal': 'CouponTieredAmountOffSubtotal',
'tiered_percent_off_items': 'CouponTieredPercentOffItems',
'tiered_percent_off_shipping': 'CouponTieredPercentOffShipping',
'tiered_percent_off_subtotal': 'CouponTieredPercentOffSubtotal',
'usable_by': 'str'
}
attribute_map = {
'affiliate_oid': 'affiliate_oid',
'allow_multiple_one_time_codes': 'allow_multiple_one_time_codes',
'amount_off_items': 'amount_off_items',
'amount_off_shipping': 'amount_off_shipping',
'amount_off_shipping_with_items_purchase': 'amount_off_shipping_with_items_purchase',
'amount_off_subtotal': 'amount_off_subtotal',
'amount_off_subtotal_and_free_shipping': 'amount_off_subtotal_and_free_shipping',
'amount_off_subtotal_and_shipping': 'amount_off_subtotal_and_shipping',
'amount_off_subtotal_with_block_purchase': 'amount_off_subtotal_with_block_purchase',
'amount_off_subtotal_with_items_purchase': 'amount_off_subtotal_with_items_purchase',
'amount_off_subtotal_with_purchase': 'amount_off_subtotal_with_purchase',
'automatically_apply_coupon_codes': 'automatically_apply_coupon_codes',
'buy_one_get_one': 'buy_one_get_one',
'calculated_description': 'calculated_description',
'can_be_used_with_other_coupons': 'can_be_used_with_other_coupons',
'coupon_oid': 'coupon_oid',
'coupon_type': 'coupon_type',
'description': 'description',
'discount_item_with_item_purchase': 'discount_item_with_item_purchase',
'discount_items': 'discount_items',
'expiration_dts': 'expiration_dts',
'free_item_and_shipping_with_subtotal': 'free_item_and_shipping_with_subtotal',
'free_item_with_item_purchase': 'free_item_with_item_purchase',
'free_item_with_subtotal': 'free_item_with_subtotal',
'free_items_with_item_purchase': 'free_items_with_item_purchase',
'free_items_with_mixmatch_purchase': 'free_items_with_mixmatch_purchase',
'free_shipping': 'free_shipping',
'free_shipping_specific_items': 'free_shipping_specific_items',
'free_shipping_with_items_purchase': 'free_shipping_with_items_purchase',
'free_shipping_with_subtotal': 'free_shipping_with_subtotal',
'hide_from_customer': 'hide_from_customer',
'merchant_code': 'merchant_code',
'merchant_notes': 'merchant_notes',
'multiple_amounts_off_items': 'multiple_amounts_off_items',
'no_discount': 'no_discount',
'percent_off_item_with_items_quantity_purchase': 'percent_off_item_with_items_quantity_purchase',
'percent_off_items': 'percent_off_items',
'percent_off_items_and_free_shipping': 'percent_off_items_and_free_shipping',
'percent_off_items_with_items_purchase': 'percent_off_items_with_items_purchase',
'percent_off_msrp_items': 'percent_off_msrp_items',
'percent_off_retail_price_items': 'percent_off_retail_price_items',
'percent_off_shipping': 'percent_off_shipping',
'percent_off_subtotal': 'percent_off_subtotal',
'percent_off_subtotal_and_free_shipping': 'percent_off_subtotal_and_free_shipping',
'percent_off_subtotal_limit': 'percent_off_subtotal_limit',
'percent_off_subtotal_with_items_purchase': 'percent_off_subtotal_with_items_purchase',
'percent_off_subtotal_with_subtotal': 'percent_off_subtotal_with_subtotal',
'quickbooks_code': 'quickbooks_code',
'restrict_by_postal_codes': 'restrict_by_postal_codes',
'restrict_by_screen_branding_theme_codes': 'restrict_by_screen_branding_theme_codes',
'restrict_by_storefronts': 'restrict_by_storefronts',
'start_dts': 'start_dts',
'super_coupon': 'super_coupon',
'tiered_amount_off_items': 'tiered_amount_off_items',
'tiered_amount_off_subtotal': 'tiered_amount_off_subtotal',
'tiered_percent_off_items': 'tiered_percent_off_items',
'tiered_percent_off_shipping': 'tiered_percent_off_shipping',
'tiered_percent_off_subtotal': 'tiered_percent_off_subtotal',
'usable_by': 'usable_by'
}
def __init__(self, affiliate_oid=None, allow_multiple_one_time_codes=None, amount_off_items=None, amount_off_shipping=None, amount_off_shipping_with_items_purchase=None, amount_off_subtotal=None, amount_off_subtotal_and_free_shipping=None, amount_off_subtotal_and_shipping=None, amount_off_subtotal_with_block_purchase=None, amount_off_subtotal_with_items_purchase=None, amount_off_subtotal_with_purchase=None, automatically_apply_coupon_codes=None, buy_one_get_one=None, calculated_description=None, can_be_used_with_other_coupons=None, coupon_oid=None, coupon_type=None, description=None, discount_item_with_item_purchase=None, discount_items=None, expiration_dts=None, free_item_and_shipping_with_subtotal=None, free_item_with_item_purchase=None, free_item_with_subtotal=None, free_items_with_item_purchase=None, free_items_with_mixmatch_purchase=None, free_shipping=None, free_shipping_specific_items=None, free_shipping_with_items_purchase=None, free_shipping_with_subtotal=None, hide_from_customer=None, merchant_code=None, merchant_notes=None, multiple_amounts_off_items=None, no_discount=None, percent_off_item_with_items_quantity_purchase=None, percent_off_items=None, percent_off_items_and_free_shipping=None, percent_off_items_with_items_purchase=None, percent_off_msrp_items=None, percent_off_retail_price_items=None, percent_off_shipping=None, percent_off_subtotal=None, percent_off_subtotal_and_free_shipping=None, percent_off_subtotal_limit=None, percent_off_subtotal_with_items_purchase=None, percent_off_subtotal_with_subtotal=None, quickbooks_code=None, restrict_by_postal_codes=None, restrict_by_screen_branding_theme_codes=None, restrict_by_storefronts=None, start_dts=None, super_coupon=None, tiered_amount_off_items=None, tiered_amount_off_subtotal=None, tiered_percent_off_items=None, tiered_percent_off_shipping=None, tiered_percent_off_subtotal=None, usable_by=None):
self._affiliate_oid = None
self._allow_multiple_one_time_codes = None
self._amount_off_items = None
self._amount_off_shipping = None
self._amount_off_shipping_with_items_purchase = None
self._amount_off_subtotal = None
self._amount_off_subtotal_and_free_shipping = None
self._amount_off_subtotal_and_shipping = None
self._amount_off_subtotal_with_block_purchase = None
self._amount_off_subtotal_with_items_purchase = None
self._amount_off_subtotal_with_purchase = None
self._automatically_apply_coupon_codes = None
self._buy_one_get_one = None
self._calculated_description = None
self._can_be_used_with_other_coupons = None
self._coupon_oid = None
self._coupon_type = None
self._description = None
self._discount_item_with_item_purchase = None
self._discount_items = None
self._expiration_dts = None
self._free_item_and_shipping_with_subtotal = None
self._free_item_with_item_purchase = None
self._free_item_with_subtotal = None
self._free_items_with_item_purchase = None
self._free_items_with_mixmatch_purchase = None
self._free_shipping = None
self._free_shipping_specific_items = None
self._free_shipping_with_items_purchase = None
self._free_shipping_with_subtotal = None
self._hide_from_customer = None
self._merchant_code = None
self._merchant_notes = None
self._multiple_amounts_off_items = None
self._no_discount = None
self._percent_off_item_with_items_quantity_purchase = None
self._percent_off_items = None
self._percent_off_items_and_free_shipping = None
self._percent_off_items_with_items_purchase = None
self._percent_off_msrp_items = None
self._percent_off_retail_price_items = None
self._percent_off_shipping = None
self._percent_off_subtotal = None
self._percent_off_subtotal_and_free_shipping = None
self._percent_off_subtotal_limit = None
self._percent_off_subtotal_with_items_purchase = None
self._percent_off_subtotal_with_subtotal = None
self._quickbooks_code = None
self._restrict_by_postal_codes = None
self._restrict_by_screen_branding_theme_codes = None
self._restrict_by_storefronts = None
self._start_dts = None
self._super_coupon = None
self._tiered_amount_off_items = None
self._tiered_amount_off_subtotal = None
self._tiered_percent_off_items = None
self._tiered_percent_off_shipping = None
self._tiered_percent_off_subtotal = None
self._usable_by = None
self.discriminator = None
if affiliate_oid is not None:
self.affiliate_oid = affiliate_oid
if allow_multiple_one_time_codes is not None:
self.allow_multiple_one_time_codes = allow_multiple_one_time_codes
if amount_off_items is not None:
self.amount_off_items = amount_off_items
if amount_off_shipping is not None:
self.amount_off_shipping = amount_off_shipping
if amount_off_shipping_with_items_purchase is not None:
self.amount_off_shipping_with_items_purchase = amount_off_shipping_with_items_purchase
if amount_off_subtotal is not None:
self.amount_off_subtotal = amount_off_subtotal
if amount_off_subtotal_and_free_shipping is not None:
self.amount_off_subtotal_and_free_shipping = amount_off_subtotal_and_free_shipping
if amount_off_subtotal_and_shipping is not None:
self.amount_off_subtotal_and_shipping = amount_off_subtotal_and_shipping
if amount_off_subtotal_with_block_purchase is not None:
self.amount_off_subtotal_with_block_purchase = amount_off_subtotal_with_block_purchase
if amount_off_subtotal_with_items_purchase is not None:
self.amount_off_subtotal_with_items_purchase = amount_off_subtotal_with_items_purchase
if amount_off_subtotal_with_purchase is not None:
self.amount_off_subtotal_with_purchase = amount_off_subtotal_with_purchase
if automatically_apply_coupon_codes is not None:
self.automatically_apply_coupon_codes = automatically_apply_coupon_codes
if buy_one_get_one is not None:
self.buy_one_get_one = buy_one_get_one
if calculated_description is not None:
self.calculated_description = calculated_description
if can_be_used_with_other_coupons is not None:
self.can_be_used_with_other_coupons = can_be_used_with_other_coupons
if coupon_oid is not None:
self.coupon_oid = coupon_oid
if coupon_type is not None:
self.coupon_type = coupon_type
if description is not None:
self.description = description
if discount_item_with_item_purchase is not None:
self.discount_item_with_item_purchase = discount_item_with_item_purchase
if discount_items is not None:
self.discount_items = discount_items
if expiration_dts is not None:
self.expiration_dts = expiration_dts
if free_item_and_shipping_with_subtotal is not None:
self.free_item_and_shipping_with_subtotal = free_item_and_shipping_with_subtotal
if free_item_with_item_purchase is not None:
self.free_item_with_item_purchase = free_item_with_item_purchase
if free_item_with_subtotal is not None:
self.free_item_with_subtotal = free_item_with_subtotal
if free_items_with_item_purchase is not None:
self.free_items_with_item_purchase = free_items_with_item_purchase
if free_items_with_mixmatch_purchase is not None:
self.free_items_with_mixmatch_purchase = free_items_with_mixmatch_purchase
if free_shipping is not None:
self.free_shipping = free_shipping
if free_shipping_specific_items is not None:
self.free_shipping_specific_items = free_shipping_specific_items
if free_shipping_with_items_purchase is not None:
self.free_shipping_with_items_purchase = free_shipping_with_items_purchase
if free_shipping_with_subtotal is not None:
self.free_shipping_with_subtotal = free_shipping_with_subtotal
if hide_from_customer is not None:
self.hide_from_customer = hide_from_customer
if merchant_code is not None:
self.merchant_code = merchant_code
if merchant_notes is not None:
self.merchant_notes = merchant_notes
if multiple_amounts_off_items is not None:
self.multiple_amounts_off_items = multiple_amounts_off_items
if no_discount is not None:
self.no_discount = no_discount
if percent_off_item_with_items_quantity_purchase is not None:
self.percent_off_item_with_items_quantity_purchase = percent_off_item_with_items_quantity_purchase
if percent_off_items is not None:
self.percent_off_items = percent_off_items
if percent_off_items_and_free_shipping is not None:
self.percent_off_items_and_free_shipping = percent_off_items_and_free_shipping
if percent_off_items_with_items_purchase is not None:
self.percent_off_items_with_items_purchase = percent_off_items_with_items_purchase
if percent_off_msrp_items is not None:
self.percent_off_msrp_items = percent_off_msrp_items
if percent_off_retail_price_items is not None:
self.percent_off_retail_price_items = percent_off_retail_price_items
if percent_off_shipping is not None:
self.percent_off_shipping = percent_off_shipping
if percent_off_subtotal is not None:
self.percent_off_subtotal = percent_off_subtotal
if percent_off_subtotal_and_free_shipping is not None:
self.percent_off_subtotal_and_free_shipping = percent_off_subtotal_and_free_shipping
if percent_off_subtotal_limit is not None:
self.percent_off_subtotal_limit = percent_off_subtotal_limit
if percent_off_subtotal_with_items_purchase is not None:
self.percent_off_subtotal_with_items_purchase = percent_off_subtotal_with_items_purchase
if percent_off_subtotal_with_subtotal is not None:
self.percent_off_subtotal_with_subtotal = percent_off_subtotal_with_subtotal
if quickbooks_code is not None:
self.quickbooks_code = quickbooks_code
if restrict_by_postal_codes is not None:
self.restrict_by_postal_codes = restrict_by_postal_codes
if restrict_by_screen_branding_theme_codes is not None:
self.restrict_by_screen_branding_theme_codes = restrict_by_screen_branding_theme_codes
if restrict_by_storefronts is not None:
self.restrict_by_storefronts = restrict_by_storefronts
if start_dts is not None:
self.start_dts = start_dts
if super_coupon is not None:
self.super_coupon = super_coupon
if tiered_amount_off_items is not None:
self.tiered_amount_off_items = tiered_amount_off_items
if tiered_amount_off_subtotal is not None:
self.tiered_amount_off_subtotal = tiered_amount_off_subtotal
if tiered_percent_off_items is not None:
self.tiered_percent_off_items = tiered_percent_off_items
if tiered_percent_off_shipping is not None:
self.tiered_percent_off_shipping = tiered_percent_off_shipping
if tiered_percent_off_subtotal is not None:
self.tiered_percent_off_subtotal = tiered_percent_off_subtotal
if usable_by is not None:
self.usable_by = usable_by
@property
def affiliate_oid(self):
return self._affiliate_oid
@affiliate_oid.setter
def affiliate_oid(self, affiliate_oid):
self._affiliate_oid = affiliate_oid
@property
def allow_multiple_one_time_codes(self):
return self._allow_multiple_one_time_codes
@allow_multiple_one_time_codes.setter
def allow_multiple_one_time_codes(self, allow_multiple_one_time_codes):
self._allow_multiple_one_time_codes = allow_multiple_one_time_codes
@property
def amount_off_items(self):
return self._amount_off_items
@amount_off_items.setter
def amount_off_items(self, amount_off_items):
self._amount_off_items = amount_off_items
@property
def amount_off_shipping(self):
return self._amount_off_shipping
@amount_off_shipping.setter
def amount_off_shipping(self, amount_off_shipping):
self._amount_off_shipping = amount_off_shipping
@property
def amount_off_shipping_with_items_purchase(self):
return self._amount_off_shipping_with_items_purchase
@amount_off_shipping_with_items_purchase.setter
def amount_off_shipping_with_items_purchase(self, amount_off_shipping_with_items_purchase):
self._amount_off_shipping_with_items_purchase = amount_off_shipping_with_items_purchase
@property
def amount_off_subtotal(self):
return self._amount_off_subtotal
@amount_off_subtotal.setter
def amount_off_subtotal(self, amount_off_subtotal):
self._amount_off_subtotal = amount_off_subtotal
@property
def amount_off_subtotal_and_free_shipping(self):
return self._amount_off_subtotal_and_free_shipping
@amount_off_subtotal_and_free_shipping.setter
def amount_off_subtotal_and_free_shipping(self, amount_off_subtotal_and_free_shipping):
self._amount_off_subtotal_and_free_shipping = amount_off_subtotal_and_free_shipping
@property
def amount_off_subtotal_and_shipping(self):
return self._amount_off_subtotal_and_shipping
@amount_off_subtotal_and_shipping.setter
def amount_off_subtotal_and_shipping(self, amount_off_subtotal_and_shipping):
self._amount_off_subtotal_and_shipping = amount_off_subtotal_and_shipping
@property
def amount_off_subtotal_with_block_purchase(self):
return self._amount_off_subtotal_with_block_purchase
@amount_off_subtotal_with_block_purchase.setter
def amount_off_subtotal_with_block_purchase(self, amount_off_subtotal_with_block_purchase):
self._amount_off_subtotal_with_block_purchase = amount_off_subtotal_with_block_purchase
@property
def amount_off_subtotal_with_items_purchase(self):
return self._amount_off_subtotal_with_items_purchase
@amount_off_subtotal_with_items_purchase.setter
def amount_off_subtotal_with_items_purchase(self, amount_off_subtotal_with_items_purchase):
self._amount_off_subtotal_with_items_purchase = amount_off_subtotal_with_items_purchase
@property
def amount_off_subtotal_with_purchase(self):
return self._amount_off_subtotal_with_purchase
@amount_off_subtotal_with_purchase.setter
def amount_off_subtotal_with_purchase(self, amount_off_subtotal_with_purchase):
self._amount_off_subtotal_with_purchase = amount_off_subtotal_with_purchase
@property
def automatically_apply_coupon_codes(self):
return self._automatically_apply_coupon_codes
@automatically_apply_coupon_codes.setter
def automatically_apply_coupon_codes(self, automatically_apply_coupon_codes):
self._automatically_apply_coupon_codes = automatically_apply_coupon_codes
@property
def buy_one_get_one(self):
return self._buy_one_get_one
@buy_one_get_one.setter
def buy_one_get_one(self, buy_one_get_one):
self._buy_one_get_one = buy_one_get_one
@property
def calculated_description(self):
return self._calculated_description
@calculated_description.setter
def calculated_description(self, calculated_description):
self._calculated_description = calculated_description
@property
def can_be_used_with_other_coupons(self):
return self._can_be_used_with_other_coupons
@can_be_used_with_other_coupons.setter
def can_be_used_with_other_coupons(self, can_be_used_with_other_coupons):
self._can_be_used_with_other_coupons = can_be_used_with_other_coupons
@property
def coupon_oid(self):
return self._coupon_oid
@coupon_oid.setter
def coupon_oid(self, coupon_oid):
self._coupon_oid = coupon_oid
@property
def coupon_type(self):
return self._coupon_type
@coupon_type.setter
def coupon_type(self, coupon_type):
if coupon_type is not None and len(coupon_type) > 65:
raise ValueError("Invalid value for `coupon_type`, length must be less than or equal to `65`")
self._coupon_type = coupon_type
@property
def description(self):
return self._description
@description.setter
def description(self, description):
if description is not None and len(description) > 50:
raise ValueError("Invalid value for `description`, length must be less than or equal to `50`")
self._description = description
@property
def discount_item_with_item_purchase(self):
return self._discount_item_with_item_purchase
@discount_item_with_item_purchase.setter
def discount_item_with_item_purchase(self, discount_item_with_item_purchase):
self._discount_item_with_item_purchase = discount_item_with_item_purchase
@property
def discount_items(self):
return self._discount_items
@discount_items.setter
def discount_items(self, discount_items):
self._discount_items = discount_items
@property
def expiration_dts(self):
return self._expiration_dts
@expiration_dts.setter
def expiration_dts(self, expiration_dts):
self._expiration_dts = expiration_dts
@property
def free_item_and_shipping_with_subtotal(self):
return self._free_item_and_shipping_with_subtotal
@free_item_and_shipping_with_subtotal.setter
def free_item_and_shipping_with_subtotal(self, free_item_and_shipping_with_subtotal):
self._free_item_and_shipping_with_subtotal = free_item_and_shipping_with_subtotal
@property
def free_item_with_item_purchase(self):
return self._free_item_with_item_purchase
@free_item_with_item_purchase.setter
def free_item_with_item_purchase(self, free_item_with_item_purchase):
self._free_item_with_item_purchase = free_item_with_item_purchase
@property
def free_item_with_subtotal(self):
return self._free_item_with_subtotal
@free_item_with_subtotal.setter
def free_item_with_subtotal(self, free_item_with_subtotal):
self._free_item_with_subtotal = free_item_with_subtotal
@property
def free_items_with_item_purchase(self):
return self._free_items_with_item_purchase
@free_items_with_item_purchase.setter
def free_items_with_item_purchase(self, free_items_with_item_purchase):
self._free_items_with_item_purchase = free_items_with_item_purchase
@property
def free_items_with_mixmatch_purchase(self):
return self._free_items_with_mixmatch_purchase
@free_items_with_mixmatch_purchase.setter
def free_items_with_mixmatch_purchase(self, free_items_with_mixmatch_purchase):
self._free_items_with_mixmatch_purchase = free_items_with_mixmatch_purchase
@property
def free_shipping(self):
return self._free_shipping
@free_shipping.setter
def free_shipping(self, free_shipping):
self._free_shipping = free_shipping
@property
def free_shipping_specific_items(self):
return self._free_shipping_specific_items
@free_shipping_specific_items.setter
def free_shipping_specific_items(self, free_shipping_specific_items):
self._free_shipping_specific_items = free_shipping_specific_items
@property
def free_shipping_with_items_purchase(self):
return self._free_shipping_with_items_purchase
@free_shipping_with_items_purchase.setter
def free_shipping_with_items_purchase(self, free_shipping_with_items_purchase):
self._free_shipping_with_items_purchase = free_shipping_with_items_purchase
@property
def free_shipping_with_subtotal(self):
return self._free_shipping_with_subtotal
@free_shipping_with_subtotal.setter
def free_shipping_with_subtotal(self, free_shipping_with_subtotal):
self._free_shipping_with_subtotal = free_shipping_with_subtotal
@property
def hide_from_customer(self):
return self._hide_from_customer
@hide_from_customer.setter
def hide_from_customer(self, hide_from_customer):
self._hide_from_customer = hide_from_customer
@property
def merchant_code(self):
return self._merchant_code
@merchant_code.setter
def merchant_code(self, merchant_code):
if merchant_code is not None and len(merchant_code) > 20:
raise ValueError("Invalid value for `merchant_code`, length must be less than or equal to `20`")
self._merchant_code = merchant_code
@property
def merchant_notes(self):
return self._merchant_notes
@merchant_notes.setter
def merchant_notes(self, merchant_notes):
if merchant_notes is not None and len(merchant_notes) > 250:
raise ValueError("Invalid value for `merchant_notes`, length must be less than or equal to `250`")
self._merchant_notes = merchant_notes
@property
def multiple_amounts_off_items(self):
return self._multiple_amounts_off_items
@multiple_amounts_off_items.setter
def multiple_amounts_off_items(self, multiple_amounts_off_items):
self._multiple_amounts_off_items = multiple_amounts_off_items
@property
def no_discount(self):
return self._no_discount
@no_discount.setter
def no_discount(self, no_discount):
self._no_discount = no_discount
@property
def percent_off_item_with_items_quantity_purchase(self):
return self._percent_off_item_with_items_quantity_purchase
@percent_off_item_with_items_quantity_purchase.setter
def percent_off_item_with_items_quantity_purchase(self, percent_off_item_with_items_quantity_purchase):
self._percent_off_item_with_items_quantity_purchase = percent_off_item_with_items_quantity_purchase
@property
def percent_off_items(self):
return self._percent_off_items
@percent_off_items.setter
def percent_off_items(self, percent_off_items):
self._percent_off_items = percent_off_items
@property
def percent_off_items_and_free_shipping(self):
return self._percent_off_items_and_free_shipping
@percent_off_items_and_free_shipping.setter
def percent_off_items_and_free_shipping(self, percent_off_items_and_free_shipping):
self._percent_off_items_and_free_shipping = percent_off_items_and_free_shipping
@property
def percent_off_items_with_items_purchase(self):
return self._percent_off_items_with_items_purchase
@percent_off_items_with_items_purchase.setter
def percent_off_items_with_items_purchase(self, percent_off_items_with_items_purchase):
self._percent_off_items_with_items_purchase = percent_off_items_with_items_purchase
@property
def percent_off_msrp_items(self):
return self._percent_off_msrp_items
@percent_off_msrp_items.setter
def percent_off_msrp_items(self, percent_off_msrp_items):
self._percent_off_msrp_items = percent_off_msrp_items
@property
def percent_off_retail_price_items(self):
return self._percent_off_retail_price_items
@percent_off_retail_price_items.setter
def percent_off_retail_price_items(self, percent_off_retail_price_items):
self._percent_off_retail_price_items = percent_off_retail_price_items
@property
def percent_off_shipping(self):
return self._percent_off_shipping
@percent_off_shipping.setter
def percent_off_shipping(self, percent_off_shipping):
self._percent_off_shipping = percent_off_shipping
@property
def percent_off_subtotal(self):
return self._percent_off_subtotal
@percent_off_subtotal.setter
def percent_off_subtotal(self, percent_off_subtotal):
self._percent_off_subtotal = percent_off_subtotal
@property
def percent_off_subtotal_and_free_shipping(self):
return self._percent_off_subtotal_and_free_shipping
@percent_off_subtotal_and_free_shipping.setter
def percent_off_subtotal_and_free_shipping(self, percent_off_subtotal_and_free_shipping):
self._percent_off_subtotal_and_free_shipping = percent_off_subtotal_and_free_shipping
@property
def percent_off_subtotal_limit(self):
return self._percent_off_subtotal_limit
@percent_off_subtotal_limit.setter
def percent_off_subtotal_limit(self, percent_off_subtotal_limit):
self._percent_off_subtotal_limit = percent_off_subtotal_limit
@property
def percent_off_subtotal_with_items_purchase(self):
return self._percent_off_subtotal_with_items_purchase
@percent_off_subtotal_with_items_purchase.setter
def percent_off_subtotal_with_items_purchase(self, percent_off_subtotal_with_items_purchase):
self._percent_off_subtotal_with_items_purchase = percent_off_subtotal_with_items_purchase
@property
def percent_off_subtotal_with_subtotal(self):
return self._percent_off_subtotal_with_subtotal
@percent_off_subtotal_with_subtotal.setter
def percent_off_subtotal_with_subtotal(self, percent_off_subtotal_with_subtotal):
self._percent_off_subtotal_with_subtotal = percent_off_subtotal_with_subtotal
@property
def quickbooks_code(self):
return self._quickbooks_code
@quickbooks_code.setter
def quickbooks_code(self, quickbooks_code):
if quickbooks_code is not None and len(quickbooks_code) > 20:
raise ValueError("Invalid value for `quickbooks_code`, length must be less than or equal to `20`")
self._quickbooks_code = quickbooks_code
@property
def restrict_by_postal_codes(self):
return self._restrict_by_postal_codes
@restrict_by_postal_codes.setter
def restrict_by_postal_codes(self, restrict_by_postal_codes):
self._restrict_by_postal_codes = restrict_by_postal_codes
@property
def restrict_by_screen_branding_theme_codes(self):
return self._restrict_by_screen_branding_theme_codes
@restrict_by_screen_branding_theme_codes.setter
def restrict_by_screen_branding_theme_codes(self, restrict_by_screen_branding_theme_codes):
self._restrict_by_screen_branding_theme_codes = restrict_by_screen_branding_theme_codes
@property
def restrict_by_storefronts(self):
return self._restrict_by_storefronts
@restrict_by_storefronts.setter
def restrict_by_storefronts(self, restrict_by_storefronts):
self._restrict_by_storefronts = restrict_by_storefronts
@property
def start_dts(self):
return self._start_dts
@start_dts.setter
def start_dts(self, start_dts):
self._start_dts = start_dts
@property
def super_coupon(self):
return self._super_coupon
@super_coupon.setter
def super_coupon(self, super_coupon):
self._super_coupon = super_coupon
@property
def tiered_amount_off_items(self):
return self._tiered_amount_off_items
@tiered_amount_off_items.setter
def tiered_amount_off_items(self, tiered_amount_off_items):
self._tiered_amount_off_items = tiered_amount_off_items
@property
def tiered_amount_off_subtotal(self):
return self._tiered_amount_off_subtotal
@tiered_amount_off_subtotal.setter
def tiered_amount_off_subtotal(self, tiered_amount_off_subtotal):
self._tiered_amount_off_subtotal = tiered_amount_off_subtotal
@property
def tiered_percent_off_items(self):
return self._tiered_percent_off_items
@tiered_percent_off_items.setter
def tiered_percent_off_items(self, tiered_percent_off_items):
self._tiered_percent_off_items = tiered_percent_off_items
@property
def tiered_percent_off_shipping(self):
return self._tiered_percent_off_shipping
@tiered_percent_off_shipping.setter
def tiered_percent_off_shipping(self, tiered_percent_off_shipping):
self._tiered_percent_off_shipping = tiered_percent_off_shipping
@property
def tiered_percent_off_subtotal(self):
return self._tiered_percent_off_subtotal
@tiered_percent_off_subtotal.setter
def tiered_percent_off_subtotal(self, tiered_percent_off_subtotal):
self._tiered_percent_off_subtotal = tiered_percent_off_subtotal
@property
def usable_by(self):
return self._usable_by
@usable_by.setter
def usable_by(self, usable_by):
if usable_by is not None and len(usable_by) > 50:
raise ValueError("Invalid value for `usable_by`, length must be less than or equal to `50`")
self._usable_by = usable_by
def to_dict(self):
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(Coupon, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
return pprint.pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, Coupon):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| true | true |
f73d9e9e11ebdd211c93b619889a1739178c14b6 | 685 | py | Python | setup.py | dcutt/eo-flow | 9383be39cfa6fe623916cb17b77d307539adac5e | [
"MIT"
] | null | null | null | setup.py | dcutt/eo-flow | 9383be39cfa6fe623916cb17b77d307539adac5e | [
"MIT"
] | null | null | null | setup.py | dcutt/eo-flow | 9383be39cfa6fe623916cb17b77d307539adac5e | [
"MIT"
] | null | null | null | import os
from setuptools import setup, find_packages
def parse_requirements(file):
return sorted(set(
line.partition('#')[0].strip()
for line in open(os.path.join(os.path.dirname(__file__), file))
) - set(''))
setup(
name='eo-flow',
python_requires='>=3.5',
version='1.1.0',
description='Tensorflow wrapper built for prototyping and deploying earth observation deep models.',
author='Sinergise EO research team',
author_email='eoresearch@sinergise.com',
packages=find_packages(),
install_requires=parse_requirements('requirements.txt'),
extras_require={
'DEV': parse_requirements('requirements-dev.txt')
}
)
| 27.4 | 104 | 0.684672 | import os
from setuptools import setup, find_packages
def parse_requirements(file):
return sorted(set(
line.partition('#')[0].strip()
for line in open(os.path.join(os.path.dirname(__file__), file))
) - set(''))
setup(
name='eo-flow',
python_requires='>=3.5',
version='1.1.0',
description='Tensorflow wrapper built for prototyping and deploying earth observation deep models.',
author='Sinergise EO research team',
author_email='eoresearch@sinergise.com',
packages=find_packages(),
install_requires=parse_requirements('requirements.txt'),
extras_require={
'DEV': parse_requirements('requirements-dev.txt')
}
)
| true | true |
f73da007b7a7b3d23f55105514956d1d3fa81264 | 11,210 | py | Python | eve/utils.py | gregorynicholas/eve | 4562ec94d1d2ee9174faa244bd9fc3797106dfde | [
"BSD-3-Clause"
] | null | null | null | eve/utils.py | gregorynicholas/eve | 4562ec94d1d2ee9174faa244bd9fc3797106dfde | [
"BSD-3-Clause"
] | null | null | null | eve/utils.py | gregorynicholas/eve | 4562ec94d1d2ee9174faa244bd9fc3797106dfde | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""
eve.utils
~~~~~~~~~
Utility functions and classes.
:copyright: (c) 2013 by Nicola Iarocci.
:license: BSD, see LICENSE for more details.
"""
import eve
import hashlib
from flask import request
from flask import current_app as app
from datetime import datetime, timedelta
from bson.json_util import dumps
import werkzeug.exceptions
class Config(object):
""" Helper class used trorough the code to access configuration settings.
If the main flaskapp object is not instantiated yet, returns the default
setting in the eve __init__.py module, otherwise returns the flaskapp
config value (which value might override the static defaults).
"""
def __getattr__(self, name):
try:
# will return 'working outside of application context' if the
# current_app is not available yet
return app.config.get(name)
except:
# fallback to the module-level default value
return getattr(eve, name)
# makes an instance of the Config helper class available to all the modules
# importing eve.utils.
config = Config()
class ParsedRequest(object):
""" This class, by means of its attributes, describes a client request.
.. versonchanged:: 0.1.0
'embedded' keyword.
.. versionchanged:: 0.0.6
Projection queries ('?projection={"name": 1}')
"""
# `where` value of the query string (?where). Defaults to None.
where = None
# `projection` value of the query string (?projection). Defaults to None.
projection = None
# `sort` value of the query string (?sort). Defaults to None.
sort = None
# `page` value of the query string (?page). Defaults to 1.
page = 1
# `max_result` value of the query string (?max_results). Defaults to
# `PAGINATION_DEFAULT` unless pagination is disabled.
max_results = 0
# `If-Modified-Since` request header value. Defaults to None.
if_modified_since = None
# `If-None_match` request header value. Defaults to None.
if_none_match = None
# `If-Match` request header value. Default to None.
if_match = None
# `embedded` value of the query string (?embedded). Defaults to None.
embedded = None
def parse_request(resource):
""" Parses a client request, returning instance of :class:`ParsedRequest`
containing relevant request data.
:param resource: the resource currently being accessed by the client.
.. versionchagend:: 0.1.0
Support for embedded documents.
.. versionchanged:: 0.0.6
projection queries ('?projection={"name": 1}')
.. versionchanged: 0.0.5
Support for optional filters, sorting and pagination.
"""
args = request.args
headers = request.headers
r = ParsedRequest()
if config.DOMAIN[resource]['allowed_filters']:
r.where = args.get('where')
if config.DOMAIN[resource]['projection']:
r.projection = args.get('projection')
if config.DOMAIN[resource]['sorting']:
r.sort = args.get('sort')
if config.DOMAIN[resource]['embedding']:
r.embedded = args.get('embedded')
max_results_default = config.PAGINATION_DEFAULT if \
config.DOMAIN[resource]['pagination'] else 0
try:
r.max_results = int(float(args['max_results']))
assert r.max_results > 0
except (ValueError, werkzeug.exceptions.BadRequestKeyError,
AssertionError):
r.max_results = max_results_default
if config.DOMAIN[resource]['pagination']:
# TODO should probably return a 400 if 'page' is < 1 or non-numeric
if 'page' in args:
try:
r.page = abs(int(args.get('page'))) or 1
except ValueError:
pass
# TODO should probably return a 400 if 'max_results' < 1 or
# non-numeric
if r.max_results > config.PAGINATION_LIMIT:
r.max_results = config.PAGINATION_LIMIT
if headers:
r.if_modified_since = weak_date(headers.get('If-Modified-Since'))
# TODO if_none_match and if_match should probably be validated as
# valid etags, returning 400 on fail. Not sure however since
# we're just going to use these for string-type comparision
r.if_none_match = headers.get('If-None-Match')
r.if_match = headers.get('If-Match')
return r
def weak_date(date):
""" Returns a RFC-1123 string corresponding to a datetime value plus
a 1 second timedelta. This is needed because when saved, documents
LAST_UPDATED values have higher resolution than If-Modified-Since's, which
is limited to seconds.
:param date: the date to be adjusted.
"""
return str_to_date(date) + timedelta(seconds=1) if date else None
def str_to_date(string):
""" Converts a RFC-1123 string to the corresponding datetime value.
:param string: the RFC-1123 string to convert to datetime value.
"""
return datetime.strptime(string, config.DATE_FORMAT) if string else None
def date_to_str(date):
""" Converts a datetime value to the corresponding RFC-1123 string.
:param date: the datetime value to convert.
"""
return datetime.strftime(date, config.DATE_FORMAT) if date else None
def collection_link(resource):
""" Returns a link to a resource endpoint.
:param resource: the resource name.
.. versionchanged:: 0.2
Use new 'resource_title' setting for link title.
.. versionchanged:: 0.0.3
Now returning a JSON link
"""
return {'title': '%s' % config.DOMAIN[resource]['resource_title'],
'href': '%s' % resource_uri(resource)}
def document_link(resource, document_id):
""" Returns a link to a document endpoint.
:param resource: the resource name.
:param document_id: the document unique identifier.
.. versionchanged:: 0.1.0
No more trailing slashes in links.
.. versionchanged:: 0.0.3
Now returning a JSON link
"""
return {'title': '%s' % config.DOMAIN[resource]['item_title'],
'href': '%s/%s' % (resource_uri(resource), document_id)}
def home_link():
""" Returns a link to the API entry point/home page.
.. versionchanged:: 0.1.1
Handle the case of SERVER_NAME being None.
.. versionchanged:: 0.0.3
Now returning a JSON link.
"""
server_name = config.SERVER_NAME if config.SERVER_NAME else ''
return {'title': 'home',
'href': '%s%s' % (server_name, api_prefix())}
def resource_uri(resource):
""" Returns the absolute URI to a resource.
.. versionchanged:: 0.1.1
URL prefixes are now included in config.URLS items, no more need to
explicitly add them to resource links.
Handle the case of SERVER_NAME being None.
.. versionchanged:: 0.1.0
No more trailing slashes in links.
:param resource: the resource name.
"""
server_name = config.SERVER_NAME if config.SERVER_NAME else ''
return '%s/%s' % (server_name, config.URLS[resource])
def api_prefix(url_prefix=None, api_version=None):
""" Returns the prefix to API endpoints, according to the URL_PREFIX and
API_VERSION configuration settings.
:param url_prefix: the prefix string. If `None`, defaults to the current
:class:`~eve.flaskapp` configuration setting.
The class itself will call this function while
initializing. In that case, it will pass its settings
as arguments (as they are not externally available yet)
:param api_version: the api version string. If `None`, defaults to the
current :class:`~eve.flaskapp` configuration setting.
The class itself will call this function while
initializing. In that case, it will pass its settings
as arguments (as they are not externally available yet)
.. versionadded:: 0.0.3
"""
if url_prefix is None:
url_prefix = config.URL_PREFIX
if api_version is None:
api_version = config.API_VERSION
prefix = '/%s' % url_prefix if url_prefix else ''
version = '/%s' % api_version if api_version else ''
return prefix + version
def querydef(max_results=config.PAGINATION_DEFAULT, where=None, sort=None,
page=None):
""" Returns a valid query string.
:param max_results: `max_result` part of the query string. Defaults to
`PAGINATION_DEFAULT`
:param where: `where` part of the query string. Defaults to None.
:param sort: `sort` part of the query string. Defaults to None.
:param page: `page` parte of the query string. Defaults to None.
"""
where_part = '&where=%s' % where if where else ''
sort_part = '&sort=%s' % sort if sort else ''
page_part = '&page=%s' % page if page and page > 1 else ''
max_results_part = 'max_results=%s' % max_results \
if max_results != config.PAGINATION_DEFAULT else ''
return ('?' + ''.join([max_results_part, where_part, sort_part,
page_part]).lstrip('&')).rstrip('?')
def document_etag(value):
""" Computes and returns a valid ETag for the input value.
:param value: the value to compute the ETag with.
.. versionchanged:: 0.0.4
Using bson.json_util.dumps over str(value) to make etag computation
consistent between different runs and/or server instances (#16).
"""
h = hashlib.sha1()
h.update(dumps(value, sort_keys=True).encode('utf-8'))
return h.hexdigest()
def extract_key_values(key, d):
""" Extracts all values that match a key, even in nested dicts.
:param key: the lookup key.
:param d: the dict to scan.
.. versionadded: 0.0.7
"""
if key in d:
yield d[key]
for k in d:
if isinstance(d[k], dict):
for j in extract_key_values(key, d[k]):
yield j
def request_method():
""" Returns the proper request method, also taking into account the
possibile override requested by the client (via 'X-HTTP-Method-Override'
header).
.. versionchanged: 0.1.0
Supports overriding of any HTTP Method (#95).
.. versionadded: 0.0.7
"""
return request.headers.get('X-HTTP-Method-Override', request.method)
def debug_error_message(msg):
""" Returns the error message `msg` if config.DEBUG is True
otherwise returns `None` which will cause Werkzeug to provide
a generic error message
:param msg: The error message to return if config.DEBUG is True
.. versionadded: 0.0.9
"""
if getattr(config, 'DEBUG', False):
return msg
return None
def validate_filters(where, resource):
""" Report any filter which is not allowed by `allowed_filters`
:param where: the where clause, as a dict.
:param resource: the resource being inspected.
.. versionadded: 0.0.9
"""
allowed = config.DOMAIN[resource]['allowed_filters']
if '*' not in allowed:
for filt, _ in where.items():
if filt not in allowed:
return "filter on '%s' not allowed" % filt
return None
| 31.846591 | 79 | 0.648885 |
import eve
import hashlib
from flask import request
from flask import current_app as app
from datetime import datetime, timedelta
from bson.json_util import dumps
import werkzeug.exceptions
class Config(object):
def __getattr__(self, name):
try:
return app.config.get(name)
except:
return getattr(eve, name)
config = Config()
class ParsedRequest(object):
where = None
projection = None
sort = None
page = 1
max_results = 0
if_modified_since = None
if_none_match = None
if_match = None
embedded = None
def parse_request(resource):
args = request.args
headers = request.headers
r = ParsedRequest()
if config.DOMAIN[resource]['allowed_filters']:
r.where = args.get('where')
if config.DOMAIN[resource]['projection']:
r.projection = args.get('projection')
if config.DOMAIN[resource]['sorting']:
r.sort = args.get('sort')
if config.DOMAIN[resource]['embedding']:
r.embedded = args.get('embedded')
max_results_default = config.PAGINATION_DEFAULT if \
config.DOMAIN[resource]['pagination'] else 0
try:
r.max_results = int(float(args['max_results']))
assert r.max_results > 0
except (ValueError, werkzeug.exceptions.BadRequestKeyError,
AssertionError):
r.max_results = max_results_default
if config.DOMAIN[resource]['pagination']:
if 'page' in args:
try:
r.page = abs(int(args.get('page'))) or 1
except ValueError:
pass
if r.max_results > config.PAGINATION_LIMIT:
r.max_results = config.PAGINATION_LIMIT
if headers:
r.if_modified_since = weak_date(headers.get('If-Modified-Since'))
r.if_none_match = headers.get('If-None-Match')
r.if_match = headers.get('If-Match')
return r
def weak_date(date):
return str_to_date(date) + timedelta(seconds=1) if date else None
def str_to_date(string):
return datetime.strptime(string, config.DATE_FORMAT) if string else None
def date_to_str(date):
return datetime.strftime(date, config.DATE_FORMAT) if date else None
def collection_link(resource):
return {'title': '%s' % config.DOMAIN[resource]['resource_title'],
'href': '%s' % resource_uri(resource)}
def document_link(resource, document_id):
return {'title': '%s' % config.DOMAIN[resource]['item_title'],
'href': '%s/%s' % (resource_uri(resource), document_id)}
def home_link():
server_name = config.SERVER_NAME if config.SERVER_NAME else ''
return {'title': 'home',
'href': '%s%s' % (server_name, api_prefix())}
def resource_uri(resource):
server_name = config.SERVER_NAME if config.SERVER_NAME else ''
return '%s/%s' % (server_name, config.URLS[resource])
def api_prefix(url_prefix=None, api_version=None):
if url_prefix is None:
url_prefix = config.URL_PREFIX
if api_version is None:
api_version = config.API_VERSION
prefix = '/%s' % url_prefix if url_prefix else ''
version = '/%s' % api_version if api_version else ''
return prefix + version
def querydef(max_results=config.PAGINATION_DEFAULT, where=None, sort=None,
page=None):
where_part = '&where=%s' % where if where else ''
sort_part = '&sort=%s' % sort if sort else ''
page_part = '&page=%s' % page if page and page > 1 else ''
max_results_part = 'max_results=%s' % max_results \
if max_results != config.PAGINATION_DEFAULT else ''
return ('?' + ''.join([max_results_part, where_part, sort_part,
page_part]).lstrip('&')).rstrip('?')
def document_etag(value):
h = hashlib.sha1()
h.update(dumps(value, sort_keys=True).encode('utf-8'))
return h.hexdigest()
def extract_key_values(key, d):
if key in d:
yield d[key]
for k in d:
if isinstance(d[k], dict):
for j in extract_key_values(key, d[k]):
yield j
def request_method():
return request.headers.get('X-HTTP-Method-Override', request.method)
def debug_error_message(msg):
if getattr(config, 'DEBUG', False):
return msg
return None
def validate_filters(where, resource):
allowed = config.DOMAIN[resource]['allowed_filters']
if '*' not in allowed:
for filt, _ in where.items():
if filt not in allowed:
return "filter on '%s' not allowed" % filt
return None
| true | true |
f73da2181f1cfdd65a4c8bdd67e1ca34b902e63c | 45,812 | py | Python | main_old.py | primasanjaya/muat-github | 4603c6c960188643fb38d8dba82e0dcc1ba00b40 | [
"Apache-2.0"
] | null | null | null | main_old.py | primasanjaya/muat-github | 4603c6c960188643fb38d8dba82e0dcc1ba00b40 | [
"Apache-2.0"
] | null | null | null | main_old.py | primasanjaya/muat-github | 4603c6c960188643fb38d8dba82e0dcc1ba00b40 | [
"Apache-2.0"
] | 1 | 2022-03-22T15:35:57.000Z | 2022-03-22T15:35:57.000Z | # make deterministic
from mingpt.utils import set_seed
set_seed(42)
#frompc
import numpy as np
import torch
import torch.nn as nn
from torch.nn import functional as F
import math
from torch.utils.data import Dataset
from mingpt.model import *
from mingpt.trainer import Trainer, TrainerConfig
from mingpt.utils import sample
import logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
import pdb
from dataset.tcga_dataset import TCGA
from dataset.tcga_conv_dataset import TCGAConv
from dataset.pcawg_conv_dataset import *
from dataset.pcawg_dataset import PCAWG
from dataset.pcawg_emb_dataset import PCAWGEmb
from dataset.pcawg_sepdataset import PCAWGSep
from dataset.pcawg_2stream import PCAWG2Stream
from dataset.tcgadisttabletoemb_dataset import TCGADist
from dataset.tcgamutdist_dataset import TCGAMutDist
from dataset.tcgamutdistasone_dataset import TCGAMutDistasOne
from dataset.tcgapcawg_dataset import TCGAPCAWG
from dataset.newtcgapcawg_dataset import NewTCGAPCAWG
from dataset.finaltcgapcawg_dataset import FinalTCGAPCAWG
from mingpt.bert import *
from preprocessing.dmm.dmm import *
from preprocessing.fromvcffiles import *
import argparse
import os
import pandas as pd
def translate_args(args):
cwd = os.getcwd()
args.cwd = cwd
args.mutation_coding = cwd + '/preprocessing/dmm/data/mutation_codes_sv.tsv'
args.input = args.data_dir
args.output = cwd + '/data/raw/out/00b9d0e6-69dc-4345-bffd-ce32880c8eef.consensus.20160830.somatic.snv_mnv.tsv.gz'
args.reference = '/csc/epitkane/data/ref_genomes/hs37d5_1000GP/hs37d5_1000GP.fa'
args.context = 8
args.sample_id = 'submitted_sample_id'
args.tmp = cwd + '/data/raw/tmp/'
args.verbose = 1
args.generate_negatives = 1
args.report_interval = 100000
return args
def get_args():
parser = argparse.ArgumentParser(description='TCGA / PEACOCK experiment')
# DATASET
parser.add_argument('--cwd', type=str,help='project dir')
parser.add_argument('--dataset', type=str, default='pcawg',
help='dataset')
# MODEL
parser.add_argument('--arch', type=str, default=None,
help='architecture')
# DIRECTORY
parser.add_argument('--data-dir', type=str, default=None,
help='data directory')
parser.add_argument('--crossdata-dir', type=str, default=None,
help='data directory')
parser.add_argument('--adddata-dir', type=str, default=None,
help='data directory')
parser.add_argument('--n-class', type=int, default=None,
help='number of class')
parser.add_argument('--batch-size', type=int, default=1,
help='batch size')
parser.add_argument('--block-size', type=int, default=1000,
help='block of sequence')
parser.add_argument('--context-length', type=int, default=256,
help='length of sequence')
parser.add_argument('--n-layer', type=int, default=1,
help='attention layer')
parser.add_argument('--n-head', type=int, default=8,
help='attention head')
parser.add_argument('--n-emb', type=int, default=128,
help='embedding dimension')
parser.add_argument('--n-vocab-type', type=int, default=1,
help='embedding dimension')
parser.add_argument('--tag', type=str, default='myexperiment',
help='dataset')
parser.add_argument('--train', action='store_true', default=False)
parser.add_argument('--predict', action='store_true', default=False)
parser.add_argument('--trainbp', action='store_true', default=False)
parser.add_argument('--vis-weight', action='store_true', default=False)
parser.add_argument('--top-weight', action='store_true', default=False)
parser.add_argument('--visval', action='store_true', default=False)
parser.add_argument('--single-predict', action='store_true', default=False)
parser.add_argument('--create-dataset', action='store_true', default=False)
parser.add_argument('--two-streams', action='store_true', default=False)
parser.add_argument('--three-streams', action='store_true', default=False)
parser.add_argument('--filter', action='store_true', default=False)
parser.add_argument('--bert', action='store_true', default=False)
parser.add_argument('--withclass', action='store_true', default=False)
parser.add_argument('--default', action='store_true', default=False)
parser.add_argument('--addposition', action='store_true', default=False)
parser.add_argument('--oneDhot', action='store_true', default=False)
parser.add_argument('--addorder', action='store_true', default=False)
parser.add_argument('--addtoken', action='store_true', default=False)
parser.add_argument('--addtriplet', action='store_true', default=False)
parser.add_argument('--addtriplettoken', action='store_true', default=False)
parser.add_argument('--addgestoken', action='store_true', default=False)
parser.add_argument('--addrt', action='store_true', default=False)
parser.add_argument('--addlongcontext', action='store_true', default=False)
parser.add_argument('--tokenizedlongcontext', action='store_true', default=False)
parser.add_argument('--ohlongcontext', action='store_true', default=False)
parser.add_argument('--flattenohlongcontext', action='store_true', default=False)
parser.add_argument('--addpostoken', action='store_true', default=False)
parser.add_argument('--addrttoken', action='store_true', default=False)
parser.add_argument('--balance', action='store_true', default=False)
parser.add_argument('--l1', action='store_true', default=False)
parser.add_argument('--fold', type=int, default=1,
help='number of mutation')
parser.add_argument('--output-mode', type=str, default='token',help='dataset')
parser.add_argument('--rbm', action='store_true', default=False)
parser.add_argument('--newtraining', action='store_true', default=False)
parser.add_argument('--newpredict', action='store_true', default=False)
parser.add_argument('--newpredict2', action='store_true', default=False)
parser.add_argument('--normal', action='store_true', default=False)
parser.add_argument('--freezeemb', action='store_true', default=False)
parser.add_argument('--predictvis', action='store_true', default=False)
parser.add_argument('--crossdata', action='store_true', default=False)
parser.add_argument('--nummut', type=int, default=0,
help='number of mutation')
parser.add_argument('--frac', type=float, default=0,
help='frac')
parser.add_argument('--mutratio', type=str, default='',
help='mutation ratio')
parser.add_argument('--spectral', action='store_true', default=False)
parser.add_argument('--finalpredict', action='store_true', default=False)
parser.add_argument('--finalpredictnewdata', action='store_true', default=False)
parser.add_argument('--single-pred-vcf', action='store_true', default=False)
parser.add_argument('--vis-attention', action='store_true', default=False)
#dmm_parser
parser.add_argument('-v', '--verbose', type=int, help='Try to be more verbose')
parser.add_argument('--mutation-coding', help='Mutation coding table ("ref alt code"/line) [{}]'.format(\
defaults['mutation_coding']), metavar='fn')
parser.add_argument('--config', help='Read parameters from a JSON file')
parser.add_argument('--data-config',
help='Column specification for --input, --validation and --aux-data [{}]'.format(\
defaults['data_config']))
parser.add_argument('--random-seed', default=None, type=int, metavar='seed')
parser.add_argument('--tmp')
parser.add_argument('-i', '--input', action='append', metavar='dir(s)',
help='Either a directory with vcf/maf[.gz] files or a vcf/maf[.gz] file (-i may be given more than once)')
parser.add_argument('-o', '--output', metavar='fn', help='Preprocessed mutation data')
parser.add_argument('-r', '--reference', metavar='ref', help='Reference genome (fasta) [{}]'.format(\
defaults['reference']))
parser.add_argument('-k', '--context', help='Sequence context length (power of 2) [{}]'.format(\
defaults['context']), metavar='bp', type=int,default=8)
parser.add_argument('-e', '--errors', metavar='fn',
help='File where to log errors [{}]'.format(defaults['errors']))
parser.add_argument('--no-ref-preload', help='Use samtools to read reference on demand (slow but fast startup) [false]',
action='store_true')
parser.add_argument('--no-filter', help='Process all variants [default=only PASS/. variants]',
action='store_true')
parser.add_argument('--sample-id', help='Sample identifier column name in MAF file')
parser.add_argument('-n', '--generate_negatives', help='Ratio of negative to positive examples [{}]. Two passes on data are required for n>0.'.format(\
defaults['negative_ratio']), type=float)
parser.add_argument('--median-variant-type-negatives', action='store_true',
help='Generate median number of each variant type as negative examples for each sample')
parser.add_argument('--median-variant-type-file', help='Load median variant numbers from a file')
parser.add_argument('--negative-generation-mode', help='[generate] output in one go (default), [augment] input files or [process] augmented files', default='generate')
parser.add_argument('--info-column', help='Input column name to write toutputo output (MAF input only). May be specified more than once.', action='append')
parser.add_argument('--report-interval', help='Interval to report number of variants processed',
type=int)
parser.add_argument('--array-jobs', help='How many array jobs in total', type=int)
parser.add_argument('--array-index', help='Index of this job', type=int)
parser.add_argument('--nope', help='Only one variant per output sequence', action='store_true')
parser.add_argument('--no-overwrite', help='Do not overwrite if output exists', action='store_true')
args = parser.parse_args()
return args
def get_dataloader(args,train_val,load):
if args.dataset == 'finalpcawg' or args.dataset == 'wgspcawg':
if train_val=='training':
dataloader_class = FinalTCGAPCAWG(dataset_name = args.dataset,
data_dir=args.data_dir,
mode='training',
curr_fold=args.fold,
block_size=args.block_size,
load=False,
mutratio = args.mutratio,
addtriplettoken=args.addtriplettoken,
addpostoken=args.addpostoken,
addgestoken=args.addgestoken,
addrt=args.addrt,
nummut = args.nummut,
frac = args.frac,
crossdata = args.crossdata,
crossdatadir = args.crossdata_dir,
adddatadir = args.adddata_dir
)
elif train_val=='validation':
dataloader_class = FinalTCGAPCAWG(dataset_name = args.dataset,
data_dir=args.data_dir,
mode='validation',
curr_fold=args.fold,
block_size=args.block_size,
load=False,
mutratio = args.mutratio,
addtriplettoken=args.addtriplettoken,
addpostoken=args.addpostoken,
addgestoken=args.addgestoken,
addrt=args.addrt,
nummut = args.nummut,
frac = args.frac,
crossdata = args.crossdata,
crossdatadir = args.crossdata_dir,
adddatadir = args.adddata_dir)
elif args.dataset == 'finaltcga' or args.dataset == 'westcga':
if train_val=='training':
dataloader_class = FinalTCGAPCAWG(dataset_name = args.dataset,
data_dir=args.data_dir,
mode='training',
curr_fold=args.fold,
block_size=args.block_size,
load=False,
mutratio = args.mutratio,
addtriplettoken=args.addtriplettoken,
addpostoken=args.addpostoken,
addgestoken=args.addgestoken,
addrt=args.addrt,
nummut = args.nummut,
frac = args.frac,
crossdata = args.crossdata,
crossdatadir = args.crossdata_dir,
adddatadir = args.adddata_dir)
elif train_val=='validation':
dataloader_class = FinalTCGAPCAWG(dataset_name = args.dataset,
data_dir=args.data_dir,
mode='validation',
curr_fold=args.fold,
block_size=args.block_size,
load=False,
mutratio = args.mutratio,
addtriplettoken=args.addtriplettoken,
addpostoken=args.addpostoken,
addgestoken=args.addgestoken,
addrt=args.addrt,
nummut = args.nummut,
frac = args.frac,
crossdata = args.crossdata,
crossdatadir = args.crossdata_dir,
adddatadir = args.adddata_dir)
return dataloader_class
def get_model(args,mconf):
if args.arch == 'GPTConv':
model = GPTConv(mconf)
elif args.arch == 'GPTConvDeeper':
model = GPTConvDeeper(mconf)
elif args.arch == 'GPTNonPosition':
model = GPTNonPosition(mconf)
elif args.arch == 'CTransformer':
model = CTransformer(mconf)
elif args.arch == 'ConvTransformer':
model = ConvTransformer(mconf)
elif args.arch == 'Conv2DTransformer':
model = Conv2DTransform
elif args.arch == 'Transformer2Stream':
model = Transformer2Stream(mconf)
elif args.arch == 'CTransformerDNN':
model = CTransformerDNN(mconf)
elif args.arch == 'CTransformerMutDist':
model = CTransformerMutDist(mconf)
elif args.arch == 'SimpleAttention':
model = SimpleAttention(mconf)
elif args.arch == 'BertForSequenceClassification':
model = BertForSequenceClassification(mconf)
elif args.arch == 'BertwithPosition':
model = BertwithPosition(mconf)
elif args.arch == 'CTransformerWithPaddingIDX':
model = CTransformerWithPaddingIDX(mconf)
elif args.arch == 'Conv2DTransformerOnehot':
model = Conv2DTransformerOnehot(mconf)
elif args.arch == 'CTransformerWithPaddingIDXandfirstvec':
model = CTransformerWithPaddingIDXandfirstvec(mconf)
elif args.arch == 'Conv2DTransformerOnehotDeeper':
model = Conv2DTransformerOnehotDeeper(mconf)
elif args.arch == 'DNNTransformerOnehotDeeper':
model = DNNTransformerOnehotDeeper(mconf)
elif args.arch == 'CTransformerWithPosition':
model = CTransformerWithPosition(mconf)
elif args.arch == 'CTransformerWithPositionConcate':
model = CTransformerWithPositionConcate(mconf)
elif args.arch == 'DNNTransformerOnehotDeeperwithPosition':
model = DNNTransformerOnehotDeeperwithPosition(mconf)
elif args.arch == 'DNNTransformerOnehotDeeperwithPositionwithOrder':
model = DNNTransformerOnehotDeeperwithPositionwithOrder(mconf)
elif args.arch == 'CTransformerDNNWithPositionConcateToken':
model = CTransformerDNNWithPositionConcateToken(mconf)
elif args.arch == 'CTransformerDNNWithPositionConcateTokenSep':
model = CTransformerDNNWithPositionConcateTokenSep(mconf)
elif args.arch == 'CTransformerRBMWithPositionConcate':
model = CTransformerRBMWithPositionConcate(mconf)
elif args.arch == 'TripletPositionTokenandOnehot':
model = TripletPositionTokenandOnehot(mconf)
elif args.arch == 'PositionToken':
model = PositionToken(mconf)
elif args.arch == 'TripletPositionTokenandOnehotConcAfter':
model = TripletPositionTokenandOnehotConcAfter(mconf)
elif args.arch == 'TripletPositionRTToken':
model = TripletPositionRTToken(mconf)
elif args.arch == 'FullConvTransformer':
model = FullConvTransformer(mconf)
elif args.arch == 'TripletPositionTokenBest':
model = TripletPositionTokenBest(mconf)
elif args.arch == 'TripletPositionTokenRT':
model = TripletPositionTokenRT(mconf)
elif args.arch == 'EmbFC':
model = EmbFC(mconf)
elif args.arch == 'TripletPositionTokenOldBest':
model = TripletPositionTokenOldBest(mconf)
elif args.arch == 'CTransformerPCAWGtoTCGA_TPGES':
model = CTransformerPCAWGtoTCGA_TPGES(mconf)
elif args.arch == 'CTransformerPCAWGtoTCGA_T':
model = CTransformerPCAWGtoTCGA_T(mconf)
elif args.arch == 'TripletPosition':
model = TripletPosition(mconf)
elif args.arch == 'TripletPositionGES':
model = TripletPositionGES(mconf)
elif args.arch == 'TripletPositionGESRT':
model = TripletPositionGESRT (mconf)
elif args.arch == 'TripletPositionF':
model = TripletPositionF(mconf)
elif args.arch == 'TripletPositionGESF':
model = TripletPositionGESF(mconf)
elif args.arch == 'CTransformerF':
model = CTransformerF(mconf)
elif args.arch == 'EmbFCPos':
model = EmbFCPos(mconf)
elif args.arch == 'EmbFCPosGES':
model = EmbFCPosGES(mconf)
return model
def fold_split(args):
num_class = os.listdir(args.data_dir)
class_name = [i for i in num_class if len(i.split('.'))==1]
class_name = sorted(class_name)
num_samples = []
for i in class_name:
ns = len(os.listdir(args.data_dir+i))
num_samples.append(ns)
d = {'class_name':class_name,'n_samples':num_samples}
pd_class_info = pd.DataFrame(d)
folds=10
class_used = pd_class_info.loc[pd_class_info['n_samples']>=folds]
class_used = class_used.rename_axis('class_index').reset_index()
class_used.to_csv(args.data_dir + 'sample_info_' + args.dataset + '.csv', index=False)
num_class=len(class_used)
tuple_list = []
for nm_class in class_used['class_name']:
num_sample = class_used.loc[class_used['class_name']==nm_class]['n_samples'].values[0]
class_idx = class_used.loc[class_used['class_name']==nm_class]['class_index'].values[0]
samples = os.listdir(args.data_dir+nm_class)
count_split = 0
for i in range(0,num_sample):
count_split = count_split+1
if count_split > folds:
count_split = 1
tuple_onerow = tuple([nm_class,class_idx,samples[i],count_split])
tuple_list.append(tuple_onerow)
all_split = pd.DataFrame(tuple_list,columns = ['class_name','class_index','name_samples','split'])
test_split = pd.DataFrame(columns = all_split.columns)
train_split = pd.DataFrame(columns = all_split.columns)
validation_split = pd.DataFrame(columns = all_split.columns)
for i in range(1,folds):
test = all_split.loc[all_split['split']==i]
train = all_split.loc[all_split['split']!=i]
split_min = i + 1
if split_min >= folds:
split_min = 1
validation = train.loc[train['split']==split_min]
train = train.loc[train['split']!=split_min]
train['split'] = i
validation['split'] = i
test_split = test_split.append(test)
validation_split = validation_split.append(validation)
train_split = train_split.append(train)
train_split.to_csv(args.data_dir + 'train_split.csv', index=False)
validation_split.to_csv(args.data_dir + 'validation_split.csv', index=False)
test_split.to_csv(args.data_dir + 'test_split.csv', index=False)
if __name__ == '__main__':
best_accuracy=0
args = get_args()
if args.train:
#class_info = fold_split(args)
block_size = args.block_size # spatial extent of the model for its context
train_dataset = get_dataloader(args=args,train_val='training',load= not args.create_dataset)
validation_dataset = get_dataloader(args=args,train_val='validation',load= not args.create_dataset)
if args.bert:
if args.default:
mconf = BertConfig(vocab_size_or_config_json_file = train_dataset.vocab_size,num_class=args.n_class)
else:
if args.addposition:
mconf = BertConfig(vocab_size_or_config_json_file = train_dataset.vocab_size,num_class=args.n_class,num_hidden_layers=args.n_layer,hidden_size=args.n_emb,num_attention_heads=args.n_head,type_vocab_size=args.n_vocab_type,position_size=train_dataset.position_size)
else:
mconf = BertConfig(vocab_size_or_config_json_file = train_dataset.vocab_size,num_class=args.n_class,num_hidden_layers=args.n_layer,hidden_size=args.n_emb,num_attention_heads=args.n_head,type_vocab_size=args.n_vocab_type)
model = get_model(args,mconf)
string_logs = f"{args.tag}_{args.arch}_bs{args.block_size:.0f}_nl{args.n_layer:.0f}_nh{args.n_head:.0f}_ne{args.n_emb:.0f}_cl{args.context_length:.0f}/"
tconf = TrainerConfig(max_epochs=150, batch_size=1, learning_rate=0.001,
lr_decay=True, warmup_tokens=1*150, final_tokens=150*len(train_dataset)*block_size,
num_workers=1,string_logs=string_logs, args=args)
trainer = Trainer(model, train_dataset, validation_dataset, tconf)
trainer.bert_train()
if args.rbm:
num_class=args.n_class
mconf = GPTConfig(vocab_size=train_dataset.vocab_size, block_size=block_size,num_class=num_class,
n_layer=args.n_layer,n_head=args.n_head, n_embd=args.n_emb,context_length=args.context_length,conv_filter=256)
if args.addposition:
mconf = GPTConfig(vocab_size=train_dataset.vocab_size, block_size=block_size,num_class=num_class,
n_layer=args.n_layer,n_head=args.n_head, n_embd=args.n_emb,context_length=args.context_length,conv_filter=256,position_size=train_dataset.position_size)
model = get_model(args,mconf)
string_logs = f"{args.tag}_{args.arch}_bs{mconf.block_size:.0f}_nl{mconf.n_layer:.0f}_nh{mconf.n_head:.0f}_ne{mconf.n_embd:.0f}_cl{mconf.context_length:.0f}/"
tconf = TrainerConfig(max_epochs=150, batch_size=1, learning_rate=6e-4,
lr_decay=True, warmup_tokens=1*150, final_tokens=150*len(train_dataset)*block_size,
num_workers=1,string_logs=string_logs, args=args)
trainer = Trainer(model, train_dataset, validation_dataset, tconf)
output_mode = args.output_mode.split("_")
if len(output_mode)>1:
trainer.multi_stream_rbm(len(output_mode))
else:
trainer.basic_train()
else:
num_class=args.n_class
mconf = GPTConfig(vocab_size=train_dataset.vocab_size, block_size=block_size,num_class=num_class,
n_layer=args.n_layer,n_head=args.n_head, n_embd=args.n_emb,context_length=args.context_length,conv_filter=256)
if args.addposition:
mconf = GPTConfig(vocab_size=train_dataset.vocab_size, block_size=block_size,num_class=num_class,
n_layer=args.n_layer,n_head=args.n_head, n_embd=args.n_emb,context_length=args.context_length,conv_filter=256,position_size=train_dataset.position_size)
model = get_model(args,mconf)
string_logs = f"{args.tag}_{args.arch}_bs{mconf.block_size:.0f}_nl{mconf.n_layer:.0f}_nh{mconf.n_head:.0f}_ne{mconf.n_embd:.0f}_cl{mconf.context_length:.0f}/"
tconf = TrainerConfig(max_epochs=150, batch_size=1, learning_rate=6e-4,
lr_decay=True, warmup_tokens=1*150, final_tokens=150*len(train_dataset)*block_size,
num_workers=1,string_logs=string_logs, args=args)
trainer = Trainer(model, train_dataset, validation_dataset, tconf)
output_mode = args.output_mode.split("_")
if len(output_mode)>1:
trainer.multi_stream(len(output_mode))
else:
trainer.basic_train()
if args.newtraining:
block_size = args.block_size # spatial extent of the model for its context
train_dataset = get_dataloader(args=args,train_val='training',load= not args.create_dataset)
validation_dataset = get_dataloader(args=args,train_val='validation',load= not args.create_dataset)
mconf = GPTConfig(vocab_size=train_dataset.vocab_size, block_size=block_size,num_class=args.n_class,
n_layer=args.n_layer,n_head=args.n_head, n_embd=args.n_emb,context_length=args.context_length,conv_filter=256)
if args.addpostoken:
mconf = GPTConfig(vocab_size=train_dataset.vocab_size, block_size=block_size,num_class=args.n_class,
n_layer=args.n_layer,n_head=args.n_head, n_embd=args.n_emb,context_length=args.context_length,conv_filter=256,position_size=train_dataset.position_size,rt_size = train_dataset.rt_size)
if args.addgestoken:
mconf = GPTConfig(vocab_size=train_dataset.vocab_size, block_size=block_size,num_class=args.n_class,
n_layer=args.n_layer,n_head=args.n_head, n_embd=args.n_emb,context_length=args.context_length,conv_filter=256,position_size=train_dataset.position_size, ges_size = train_dataset.ges_size,rt_size = train_dataset.rt_size)
model = get_model(args,mconf)
string_logs = f"{args.tag}_{args.arch}_bs{mconf.block_size:.0f}_nl{mconf.n_layer:.0f}_nh{mconf.n_head:.0f}_ne{mconf.n_embd:.0f}_cl{mconf.context_length:.0f}/"
tconf = TrainerConfig(max_epochs=150, batch_size=1, learning_rate=6e-4,
lr_decay=True, warmup_tokens=1*150, final_tokens=150*len(train_dataset)*block_size,
num_workers=1,string_logs=string_logs, args=args)
trainer = Trainer(model, train_dataset, validation_dataset, tconf)
output_mode = args.output_mode.split("_")
trainer.dynamic_stream()
if args.predict:
class_info = fold_split(args)
block_size = args.block_size # spatial extent of the model for its context
training_dataset = get_dataloader(args=args,train_val='training',load=True)
validation_dataset = get_dataloader(args=args,train_val='validation',load=True)
test_dataset = get_dataloader(args=args,train_val='testing',load=True)
num_class=args.n_class
mconf = GPTConfig(vocab_size=validation_dataset.vocab_size, block_size=block_size,num_class=num_class,
n_layer=args.n_layer,n_head=args.n_head, n_embd=args.n_emb,context_length=args.context_length,conv_filter=256)
if args.addposition:
mconf = GPTConfig(vocab_size=validation_dataset.vocab_size, block_size=block_size,num_class=num_class,
n_layer=args.n_layer,n_head=args.n_head, n_embd=args.n_emb,context_length=args.context_length,conv_filter=256,position_size=validation_dataset.position_size)
model = get_model(args,mconf)
string_logs = f"{args.tag}_{args.arch}_bs{mconf.block_size:.0f}_nl{mconf.n_layer:.0f}_nh{mconf.n_head:.0f}_ne{mconf.n_embd:.0f}_cl{mconf.context_length:.0f}/"
tconf = TrainerConfig(max_epochs=200, batch_size=1, learning_rate=6e-3,
lr_decay=True, warmup_tokens=1*200, final_tokens=200*len(validation_dataset)*block_size,
num_workers=20,string_logs=string_logs, args=args)
trainer = Trainer(model, None,[validation_dataset], tconf)
output_mode = args.output_mode.split("_")
if len(output_mode)>1:
trainer.predict_multi_stream(len(output_mode))
else:
trainer.predict()
if args.newpredict:
class_info = fold_split(args)
block_size = args.block_size # spatial extent of the model for its context
training_dataset = get_dataloader(args=args,train_val='training',load=True)
validation_dataset = get_dataloader(args=args,train_val='validation',load=True)
test_dataset = get_dataloader(args=args,train_val='testing',load=True)
num_class=args.n_class
mconf = GPTConfig(vocab_size=validation_dataset.vocab_size, block_size=block_size,num_class=num_class,
n_layer=args.n_layer,n_head=args.n_head, n_embd=args.n_emb,context_length=args.context_length,conv_filter=256,position_size=validation_dataset.position_size, rt_size = validation_dataset.rt_size)
model = get_model(args,mconf)
string_logs = f"{args.tag}_{args.arch}_bs{mconf.block_size:.0f}_nl{mconf.n_layer:.0f}_nh{mconf.n_head:.0f}_ne{mconf.n_embd:.0f}_cl{mconf.context_length:.0f}/"
tconf = TrainerConfig(max_epochs=200, batch_size=1, learning_rate=6e-3,
lr_decay=True, warmup_tokens=1*200, final_tokens=200*len(validation_dataset)*block_size,
num_workers=20,string_logs=string_logs, args=args)
trainer = Trainer(model, None,[validation_dataset], tconf)
if args.visval:
trainer.vis_embed()
if args.crossdata:
trainer.newpredict_dynamic_streamc(args.predictvis)
else:
trainer.newpredict_dynamic_stream(args.predictvis)
if args.finalpredict:
class_info = fold_split(args)
block_size = args.block_size # spatial extent of the model for its context
validation_dataset = get_dataloader(args=args,train_val='validation',load=True)
train_dataset = get_dataloader(args=args,train_val='training',load=True)
num_class=args.n_class
mconf = GPTConfig(vocab_size=validation_dataset.vocab_size, block_size=block_size,num_class=args.n_class, n_layer=args.n_layer,n_head=args.n_head, n_embd=args.n_emb,context_length=args.context_length,conv_filter=256,
position_size=validation_dataset.position_size, ges_size = validation_dataset.ges_size,rt_size = validation_dataset.rt_size)
model = get_model(args,mconf)
string_logs = f"{args.tag}_{args.arch}_bs{mconf.block_size:.0f}_nl{mconf.n_layer:.0f}_nh{mconf.n_head:.0f}_ne{mconf.n_embd:.0f}_cl{mconf.context_length:.0f}/"
tconf = TrainerConfig(max_epochs=200, batch_size=1, learning_rate=6e-3,
lr_decay=True, warmup_tokens=1*200, final_tokens=200*len(validation_dataset)*block_size,
num_workers=20,string_logs=string_logs, args=args)
trainer = Trainer(model, None,[validation_dataset], tconf)
if args.vis_attention:
trainer = Trainer(model, None,[train_dataset, validation_dataset], tconf)
trainer.visualize_attention(args.vis_attention)
else:
if args.visval:
trainer.vis_embed()
if args.predictvis:
trainer = Trainer(model, None,[train_dataset,validation_dataset], tconf)
trainer.finalpredict_dynamic_stream(args.predictvis,args.adddata_dir)
if args.finalpredictnewdata:
class_info = fold_split(args)
block_size = args.block_size # spatial extent of the model for its context
validation_dataset = get_dataloader(args=args,train_val='validation',load=True)
mconf = GPTConfig(vocab_size=validation_dataset.vocab_size, block_size=block_size,num_class=args.n_class, n_layer=args.n_layer,n_head=args.n_head, n_embd=args.n_emb,context_length=args.context_length,conv_filter=256,
position_size=validation_dataset.position_size, ges_size = validation_dataset.ges_size,rt_size = validation_dataset.rt_size)
model = get_model(args,mconf)
string_logs = f"{args.tag}_{args.arch}_bs{mconf.block_size:.0f}_nl{mconf.n_layer:.0f}_nh{mconf.n_head:.0f}_ne{mconf.n_embd:.0f}_cl{mconf.context_length:.0f}/"
tconf = TrainerConfig(max_epochs=200, batch_size=1, learning_rate=6e-3,
lr_decay=True, warmup_tokens=1*200, final_tokens=200*len(validation_dataset)*block_size,
num_workers=20,string_logs=string_logs, args=args)
trainer = Trainer(model, None,[validation_dataset], tconf)
if args.vis_attention:
trainer = Trainer(model, None,[validation_dataset], tconf)
trainer.visualize_attention(args.vis_attention)
else:
if args.visval:
trainer.vis_embed()
if args.predictvis:
trainer = Trainer(model, None,[validation_dataset], tconf)
trainer.finalpredict_newdata(args.predictvis,args.adddata_dir)
if args.newpredict2:
class_info = fold_split(args)
block_size = args.block_size # spatial extent of the model for its context
training_dataset = get_dataloader(args=args,train_val='training',load=True)
validation_dataset = get_dataloader(args=args,train_val='validation',load=True)
num_class=args.n_class
mconf = GPTConfig(vocab_size=validation_dataset.vocab_size, block_size=block_size,num_class=num_class,
n_layer=args.n_layer,n_head=args.n_head, n_embd=args.n_emb,context_length=args.context_length,conv_filter=256,position_size=validation_dataset.position_size, ges_size = validation_dataset.ges_size, rt_size = validation_dataset.rt_size)
model = get_model(args,mconf)
string_logs = f"{args.tag}_{args.arch}_bs{mconf.block_size:.0f}_nl{mconf.n_layer:.0f}_nh{mconf.n_head:.0f}_ne{mconf.n_embd:.0f}_cl{mconf.context_length:.0f}/"
tconf = TrainerConfig(max_epochs=200, batch_size=1, learning_rate=6e-3,
lr_decay=True, warmup_tokens=1*200, final_tokens=200*len(validation_dataset)*block_size,
num_workers=20,string_logs=string_logs, args=args)
trainer = Trainer(model, None,[validation_dataset], tconf)
if args.visval:
trainer.vis_embed2()
trainer.newpredict_dynamic_stream(args.predictvis)
if args.single_predict:
block_size = args.block_size
num_class=args.n_class
validation_dataset = get_dataloader(args=args,train_val='validation',load=True)
test_dataset = SinglePrediction(data_dir = args.data_dir)
mconf = GPTConfig(vocab_size=validation_dataset.vocab_size, block_size=block_size,num_class=num_class,
n_layer=args.n_layer,n_head=args.n_head, n_embd=args.n_emb,context_length=args.context_length,conv_filter=256)
model = get_model(args,mconf)
string_logs = f"{args.tag}_{args.arch}_bs{mconf.block_size:.0f}_nl{mconf.n_layer:.0f}_nh{mconf.n_head:.0f}_ne{mconf.n_embd:.0f}_cl{mconf.context_length:.0f}/"
tconf = TrainerConfig(max_epochs=200, batch_size=1, learning_rate=6e-4,
lr_decay=True, warmup_tokens=1*200, final_tokens=200*len(validation_dataset)*block_size,
num_workers=20,string_logs=string_logs, args=args)
trainer = Trainer(model, None,[test_dataset], tconf)
trainer.single_predict()
if args.vis_weight:
class_info = fold_split(args)
block_size = args.block_size # spatial extent of the model for its context
training_dataset = get_dataloader(args=args,train_val='training',load=True)
validation_dataset = get_dataloader(args=args,train_val='validation',load=True)
test_dataset = get_dataloader(args=args,train_val='testing',load=True)
num_class=args.n_class
mconf = GPTConfig(vocab_size=validation_dataset.vocab_size, block_size=block_size,num_class=num_class,
n_layer=args.n_layer,n_head=args.n_head, n_embd=args.n_emb,context_length=args.context_length,conv_filter=256)
if args.addposition:
mconf = GPTConfig(vocab_size=validation_dataset.vocab_size, block_size=block_size,num_class=num_class,
n_layer=args.n_layer,n_head=args.n_head, n_embd=args.n_emb,context_length=args.context_length,conv_filter=256,position_size=validation_dataset.position_size)
model = get_model(args,mconf)
string_logs = f"{args.tag}_{args.arch}_bs{mconf.block_size:.0f}_nl{mconf.n_layer:.0f}_nh{mconf.n_head:.0f}_ne{mconf.n_embd:.0f}_cl{mconf.context_length:.0f}/"
tconf = TrainerConfig(max_epochs=200, batch_size=1, learning_rate=6e-3,
lr_decay=True, warmup_tokens=1*200, final_tokens=200*len(validation_dataset)*block_size,
num_workers=20,string_logs=string_logs, args=args)
trainer = Trainer(model, None,[validation_dataset,test_dataset], tconf)
output_mode = args.output_mode.split("_")
if len(output_mode)>1:
trainer.predict_vis(len(output_mode))
else:
trainer.predict()
if args.top_weight:
class_info = fold_split(args)
block_size = args.block_size # spatial extent of the model for its context
training_dataset = get_dataloader(args=args,train_val='training',load=True)
validation_dataset = get_dataloader(args=args,train_val='validation',load=True)
test_dataset = get_dataloader(args=args,train_val='testing',load=True)
num_class=args.n_class
mconf = GPTConfig(vocab_size=validation_dataset.vocab_size, block_size=block_size,num_class=num_class,
n_layer=args.n_layer,n_head=args.n_head, n_embd=args.n_emb,context_length=args.context_length,conv_filter=256)
if args.addposition:
mconf = GPTConfig(vocab_size=validation_dataset.vocab_size, block_size=block_size,num_class=num_class,
n_layer=args.n_layer,n_head=args.n_head, n_embd=args.n_emb,context_length=args.context_length,conv_filter=256,position_size=validation_dataset.position_size)
model = get_model(args,mconf)
string_logs = f"{args.tag}_{args.arch}_bs{mconf.block_size:.0f}_nl{mconf.n_layer:.0f}_nh{mconf.n_head:.0f}_ne{mconf.n_embd:.0f}_cl{mconf.context_length:.0f}/"
tconf = TrainerConfig(max_epochs=200, batch_size=1, learning_rate=6e-3,
lr_decay=True, warmup_tokens=1*200, final_tokens=200*len(validation_dataset)*block_size,
num_workers=20,string_logs=string_logs, args=args)
trainer = Trainer(model, None,[training_dataset,validation_dataset,test_dataset], tconf)
output_mode = args.output_mode.split("_")
if len(output_mode)>1:
trainer.topweight_vis(len(output_mode))
else:
trainer.predict()
if args.single_pred_vcf:
args = translate_args(args)
#cmd_preprocess(args)
preprocessing_fromdmm(args)
pdb.set_trace()
class_info = fold_split(args)
block_size = args.block_size # spatial extent of the model for its context
training_dataset = get_dataloader(args=args,train_val='training',load=True)
validation_dataset = get_dataloader(args=args,train_val='validation',load=True)
test_dataset = get_dataloader(args=args,train_val='testing',load=True)
num_class=args.n_class
mconf = GPTConfig(vocab_size=validation_dataset.vocab_size, block_size=block_size,num_class=num_class,
n_layer=args.n_layer,n_head=args.n_head, n_embd=args.n_emb,context_length=args.context_length,conv_filter=256)
if args.addposition:
mconf = GPTConfig(vocab_size=validation_dataset.vocab_size, block_size=block_size,num_class=num_class,
n_layer=args.n_layer,n_head=args.n_head, n_embd=args.n_emb,context_length=args.context_length,conv_filter=256,position_size=validation_dataset.position_size)
model = get_model(args,mconf)
string_logs = f"{args.tag}_{args.arch}_bs{mconf.block_size:.0f}_nl{mconf.n_layer:.0f}_nh{mconf.n_head:.0f}_ne{mconf.n_embd:.0f}_cl{mconf.context_length:.0f}/"
tconf = TrainerConfig(max_epochs=200, batch_size=1, learning_rate=6e-3,
lr_decay=True, warmup_tokens=1*200, final_tokens=200*len(validation_dataset)*block_size,
num_workers=20,string_logs=string_logs, args=args)
trainer = Trainer(model, None,[training_dataset,validation_dataset,test_dataset], tconf)
output_mode = args.output_mode.split("_")
if len(output_mode)>1:
trainer.topweight_vis(len(output_mode))
else:
trainer.predict()
| 52.296804 | 291 | 0.608683 |
from mingpt.utils import set_seed
set_seed(42)
import numpy as np
import torch
import torch.nn as nn
from torch.nn import functional as F
import math
from torch.utils.data import Dataset
from mingpt.model import *
from mingpt.trainer import Trainer, TrainerConfig
from mingpt.utils import sample
import logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
import pdb
from dataset.tcga_dataset import TCGA
from dataset.tcga_conv_dataset import TCGAConv
from dataset.pcawg_conv_dataset import *
from dataset.pcawg_dataset import PCAWG
from dataset.pcawg_emb_dataset import PCAWGEmb
from dataset.pcawg_sepdataset import PCAWGSep
from dataset.pcawg_2stream import PCAWG2Stream
from dataset.tcgadisttabletoemb_dataset import TCGADist
from dataset.tcgamutdist_dataset import TCGAMutDist
from dataset.tcgamutdistasone_dataset import TCGAMutDistasOne
from dataset.tcgapcawg_dataset import TCGAPCAWG
from dataset.newtcgapcawg_dataset import NewTCGAPCAWG
from dataset.finaltcgapcawg_dataset import FinalTCGAPCAWG
from mingpt.bert import *
from preprocessing.dmm.dmm import *
from preprocessing.fromvcffiles import *
import argparse
import os
import pandas as pd
def translate_args(args):
cwd = os.getcwd()
args.cwd = cwd
args.mutation_coding = cwd + '/preprocessing/dmm/data/mutation_codes_sv.tsv'
args.input = args.data_dir
args.output = cwd + '/data/raw/out/00b9d0e6-69dc-4345-bffd-ce32880c8eef.consensus.20160830.somatic.snv_mnv.tsv.gz'
args.reference = '/csc/epitkane/data/ref_genomes/hs37d5_1000GP/hs37d5_1000GP.fa'
args.context = 8
args.sample_id = 'submitted_sample_id'
args.tmp = cwd + '/data/raw/tmp/'
args.verbose = 1
args.generate_negatives = 1
args.report_interval = 100000
return args
def get_args():
parser = argparse.ArgumentParser(description='TCGA / PEACOCK experiment')
parser.add_argument('--cwd', type=str,help='project dir')
parser.add_argument('--dataset', type=str, default='pcawg',
help='dataset')
parser.add_argument('--arch', type=str, default=None,
help='architecture')
parser.add_argument('--data-dir', type=str, default=None,
help='data directory')
parser.add_argument('--crossdata-dir', type=str, default=None,
help='data directory')
parser.add_argument('--adddata-dir', type=str, default=None,
help='data directory')
parser.add_argument('--n-class', type=int, default=None,
help='number of class')
parser.add_argument('--batch-size', type=int, default=1,
help='batch size')
parser.add_argument('--block-size', type=int, default=1000,
help='block of sequence')
parser.add_argument('--context-length', type=int, default=256,
help='length of sequence')
parser.add_argument('--n-layer', type=int, default=1,
help='attention layer')
parser.add_argument('--n-head', type=int, default=8,
help='attention head')
parser.add_argument('--n-emb', type=int, default=128,
help='embedding dimension')
parser.add_argument('--n-vocab-type', type=int, default=1,
help='embedding dimension')
parser.add_argument('--tag', type=str, default='myexperiment',
help='dataset')
parser.add_argument('--train', action='store_true', default=False)
parser.add_argument('--predict', action='store_true', default=False)
parser.add_argument('--trainbp', action='store_true', default=False)
parser.add_argument('--vis-weight', action='store_true', default=False)
parser.add_argument('--top-weight', action='store_true', default=False)
parser.add_argument('--visval', action='store_true', default=False)
parser.add_argument('--single-predict', action='store_true', default=False)
parser.add_argument('--create-dataset', action='store_true', default=False)
parser.add_argument('--two-streams', action='store_true', default=False)
parser.add_argument('--three-streams', action='store_true', default=False)
parser.add_argument('--filter', action='store_true', default=False)
parser.add_argument('--bert', action='store_true', default=False)
parser.add_argument('--withclass', action='store_true', default=False)
parser.add_argument('--default', action='store_true', default=False)
parser.add_argument('--addposition', action='store_true', default=False)
parser.add_argument('--oneDhot', action='store_true', default=False)
parser.add_argument('--addorder', action='store_true', default=False)
parser.add_argument('--addtoken', action='store_true', default=False)
parser.add_argument('--addtriplet', action='store_true', default=False)
parser.add_argument('--addtriplettoken', action='store_true', default=False)
parser.add_argument('--addgestoken', action='store_true', default=False)
parser.add_argument('--addrt', action='store_true', default=False)
parser.add_argument('--addlongcontext', action='store_true', default=False)
parser.add_argument('--tokenizedlongcontext', action='store_true', default=False)
parser.add_argument('--ohlongcontext', action='store_true', default=False)
parser.add_argument('--flattenohlongcontext', action='store_true', default=False)
parser.add_argument('--addpostoken', action='store_true', default=False)
parser.add_argument('--addrttoken', action='store_true', default=False)
parser.add_argument('--balance', action='store_true', default=False)
parser.add_argument('--l1', action='store_true', default=False)
parser.add_argument('--fold', type=int, default=1,
help='number of mutation')
parser.add_argument('--output-mode', type=str, default='token',help='dataset')
parser.add_argument('--rbm', action='store_true', default=False)
parser.add_argument('--newtraining', action='store_true', default=False)
parser.add_argument('--newpredict', action='store_true', default=False)
parser.add_argument('--newpredict2', action='store_true', default=False)
parser.add_argument('--normal', action='store_true', default=False)
parser.add_argument('--freezeemb', action='store_true', default=False)
parser.add_argument('--predictvis', action='store_true', default=False)
parser.add_argument('--crossdata', action='store_true', default=False)
parser.add_argument('--nummut', type=int, default=0,
help='number of mutation')
parser.add_argument('--frac', type=float, default=0,
help='frac')
parser.add_argument('--mutratio', type=str, default='',
help='mutation ratio')
parser.add_argument('--spectral', action='store_true', default=False)
parser.add_argument('--finalpredict', action='store_true', default=False)
parser.add_argument('--finalpredictnewdata', action='store_true', default=False)
parser.add_argument('--single-pred-vcf', action='store_true', default=False)
parser.add_argument('--vis-attention', action='store_true', default=False)
parser.add_argument('-v', '--verbose', type=int, help='Try to be more verbose')
parser.add_argument('--mutation-coding', help='Mutation coding table ("ref alt code"/line) [{}]'.format(\
defaults['mutation_coding']), metavar='fn')
parser.add_argument('--config', help='Read parameters from a JSON file')
parser.add_argument('--data-config',
help='Column specification for --input, --validation and --aux-data [{}]'.format(\
defaults['data_config']))
parser.add_argument('--random-seed', default=None, type=int, metavar='seed')
parser.add_argument('--tmp')
parser.add_argument('-i', '--input', action='append', metavar='dir(s)',
help='Either a directory with vcf/maf[.gz] files or a vcf/maf[.gz] file (-i may be given more than once)')
parser.add_argument('-o', '--output', metavar='fn', help='Preprocessed mutation data')
parser.add_argument('-r', '--reference', metavar='ref', help='Reference genome (fasta) [{}]'.format(\
defaults['reference']))
parser.add_argument('-k', '--context', help='Sequence context length (power of 2) [{}]'.format(\
defaults['context']), metavar='bp', type=int,default=8)
parser.add_argument('-e', '--errors', metavar='fn',
help='File where to log errors [{}]'.format(defaults['errors']))
parser.add_argument('--no-ref-preload', help='Use samtools to read reference on demand (slow but fast startup) [false]',
action='store_true')
parser.add_argument('--no-filter', help='Process all variants [default=only PASS/. variants]',
action='store_true')
parser.add_argument('--sample-id', help='Sample identifier column name in MAF file')
parser.add_argument('-n', '--generate_negatives', help='Ratio of negative to positive examples [{}]. Two passes on data are required for n>0.'.format(\
defaults['negative_ratio']), type=float)
parser.add_argument('--median-variant-type-negatives', action='store_true',
help='Generate median number of each variant type as negative examples for each sample')
parser.add_argument('--median-variant-type-file', help='Load median variant numbers from a file')
parser.add_argument('--negative-generation-mode', help='[generate] output in one go (default), [augment] input files or [process] augmented files', default='generate')
parser.add_argument('--info-column', help='Input column name to write toutputo output (MAF input only). May be specified more than once.', action='append')
parser.add_argument('--report-interval', help='Interval to report number of variants processed',
type=int)
parser.add_argument('--array-jobs', help='How many array jobs in total', type=int)
parser.add_argument('--array-index', help='Index of this job', type=int)
parser.add_argument('--nope', help='Only one variant per output sequence', action='store_true')
parser.add_argument('--no-overwrite', help='Do not overwrite if output exists', action='store_true')
args = parser.parse_args()
return args
def get_dataloader(args,train_val,load):
if args.dataset == 'finalpcawg' or args.dataset == 'wgspcawg':
if train_val=='training':
dataloader_class = FinalTCGAPCAWG(dataset_name = args.dataset,
data_dir=args.data_dir,
mode='training',
curr_fold=args.fold,
block_size=args.block_size,
load=False,
mutratio = args.mutratio,
addtriplettoken=args.addtriplettoken,
addpostoken=args.addpostoken,
addgestoken=args.addgestoken,
addrt=args.addrt,
nummut = args.nummut,
frac = args.frac,
crossdata = args.crossdata,
crossdatadir = args.crossdata_dir,
adddatadir = args.adddata_dir
)
elif train_val=='validation':
dataloader_class = FinalTCGAPCAWG(dataset_name = args.dataset,
data_dir=args.data_dir,
mode='validation',
curr_fold=args.fold,
block_size=args.block_size,
load=False,
mutratio = args.mutratio,
addtriplettoken=args.addtriplettoken,
addpostoken=args.addpostoken,
addgestoken=args.addgestoken,
addrt=args.addrt,
nummut = args.nummut,
frac = args.frac,
crossdata = args.crossdata,
crossdatadir = args.crossdata_dir,
adddatadir = args.adddata_dir)
elif args.dataset == 'finaltcga' or args.dataset == 'westcga':
if train_val=='training':
dataloader_class = FinalTCGAPCAWG(dataset_name = args.dataset,
data_dir=args.data_dir,
mode='training',
curr_fold=args.fold,
block_size=args.block_size,
load=False,
mutratio = args.mutratio,
addtriplettoken=args.addtriplettoken,
addpostoken=args.addpostoken,
addgestoken=args.addgestoken,
addrt=args.addrt,
nummut = args.nummut,
frac = args.frac,
crossdata = args.crossdata,
crossdatadir = args.crossdata_dir,
adddatadir = args.adddata_dir)
elif train_val=='validation':
dataloader_class = FinalTCGAPCAWG(dataset_name = args.dataset,
data_dir=args.data_dir,
mode='validation',
curr_fold=args.fold,
block_size=args.block_size,
load=False,
mutratio = args.mutratio,
addtriplettoken=args.addtriplettoken,
addpostoken=args.addpostoken,
addgestoken=args.addgestoken,
addrt=args.addrt,
nummut = args.nummut,
frac = args.frac,
crossdata = args.crossdata,
crossdatadir = args.crossdata_dir,
adddatadir = args.adddata_dir)
return dataloader_class
def get_model(args,mconf):
if args.arch == 'GPTConv':
model = GPTConv(mconf)
elif args.arch == 'GPTConvDeeper':
model = GPTConvDeeper(mconf)
elif args.arch == 'GPTNonPosition':
model = GPTNonPosition(mconf)
elif args.arch == 'CTransformer':
model = CTransformer(mconf)
elif args.arch == 'ConvTransformer':
model = ConvTransformer(mconf)
elif args.arch == 'Conv2DTransformer':
model = Conv2DTransform
elif args.arch == 'Transformer2Stream':
model = Transformer2Stream(mconf)
elif args.arch == 'CTransformerDNN':
model = CTransformerDNN(mconf)
elif args.arch == 'CTransformerMutDist':
model = CTransformerMutDist(mconf)
elif args.arch == 'SimpleAttention':
model = SimpleAttention(mconf)
elif args.arch == 'BertForSequenceClassification':
model = BertForSequenceClassification(mconf)
elif args.arch == 'BertwithPosition':
model = BertwithPosition(mconf)
elif args.arch == 'CTransformerWithPaddingIDX':
model = CTransformerWithPaddingIDX(mconf)
elif args.arch == 'Conv2DTransformerOnehot':
model = Conv2DTransformerOnehot(mconf)
elif args.arch == 'CTransformerWithPaddingIDXandfirstvec':
model = CTransformerWithPaddingIDXandfirstvec(mconf)
elif args.arch == 'Conv2DTransformerOnehotDeeper':
model = Conv2DTransformerOnehotDeeper(mconf)
elif args.arch == 'DNNTransformerOnehotDeeper':
model = DNNTransformerOnehotDeeper(mconf)
elif args.arch == 'CTransformerWithPosition':
model = CTransformerWithPosition(mconf)
elif args.arch == 'CTransformerWithPositionConcate':
model = CTransformerWithPositionConcate(mconf)
elif args.arch == 'DNNTransformerOnehotDeeperwithPosition':
model = DNNTransformerOnehotDeeperwithPosition(mconf)
elif args.arch == 'DNNTransformerOnehotDeeperwithPositionwithOrder':
model = DNNTransformerOnehotDeeperwithPositionwithOrder(mconf)
elif args.arch == 'CTransformerDNNWithPositionConcateToken':
model = CTransformerDNNWithPositionConcateToken(mconf)
elif args.arch == 'CTransformerDNNWithPositionConcateTokenSep':
model = CTransformerDNNWithPositionConcateTokenSep(mconf)
elif args.arch == 'CTransformerRBMWithPositionConcate':
model = CTransformerRBMWithPositionConcate(mconf)
elif args.arch == 'TripletPositionTokenandOnehot':
model = TripletPositionTokenandOnehot(mconf)
elif args.arch == 'PositionToken':
model = PositionToken(mconf)
elif args.arch == 'TripletPositionTokenandOnehotConcAfter':
model = TripletPositionTokenandOnehotConcAfter(mconf)
elif args.arch == 'TripletPositionRTToken':
model = TripletPositionRTToken(mconf)
elif args.arch == 'FullConvTransformer':
model = FullConvTransformer(mconf)
elif args.arch == 'TripletPositionTokenBest':
model = TripletPositionTokenBest(mconf)
elif args.arch == 'TripletPositionTokenRT':
model = TripletPositionTokenRT(mconf)
elif args.arch == 'EmbFC':
model = EmbFC(mconf)
elif args.arch == 'TripletPositionTokenOldBest':
model = TripletPositionTokenOldBest(mconf)
elif args.arch == 'CTransformerPCAWGtoTCGA_TPGES':
model = CTransformerPCAWGtoTCGA_TPGES(mconf)
elif args.arch == 'CTransformerPCAWGtoTCGA_T':
model = CTransformerPCAWGtoTCGA_T(mconf)
elif args.arch == 'TripletPosition':
model = TripletPosition(mconf)
elif args.arch == 'TripletPositionGES':
model = TripletPositionGES(mconf)
elif args.arch == 'TripletPositionGESRT':
model = TripletPositionGESRT (mconf)
elif args.arch == 'TripletPositionF':
model = TripletPositionF(mconf)
elif args.arch == 'TripletPositionGESF':
model = TripletPositionGESF(mconf)
elif args.arch == 'CTransformerF':
model = CTransformerF(mconf)
elif args.arch == 'EmbFCPos':
model = EmbFCPos(mconf)
elif args.arch == 'EmbFCPosGES':
model = EmbFCPosGES(mconf)
return model
def fold_split(args):
num_class = os.listdir(args.data_dir)
class_name = [i for i in num_class if len(i.split('.'))==1]
class_name = sorted(class_name)
num_samples = []
for i in class_name:
ns = len(os.listdir(args.data_dir+i))
num_samples.append(ns)
d = {'class_name':class_name,'n_samples':num_samples}
pd_class_info = pd.DataFrame(d)
folds=10
class_used = pd_class_info.loc[pd_class_info['n_samples']>=folds]
class_used = class_used.rename_axis('class_index').reset_index()
class_used.to_csv(args.data_dir + 'sample_info_' + args.dataset + '.csv', index=False)
num_class=len(class_used)
tuple_list = []
for nm_class in class_used['class_name']:
num_sample = class_used.loc[class_used['class_name']==nm_class]['n_samples'].values[0]
class_idx = class_used.loc[class_used['class_name']==nm_class]['class_index'].values[0]
samples = os.listdir(args.data_dir+nm_class)
count_split = 0
for i in range(0,num_sample):
count_split = count_split+1
if count_split > folds:
count_split = 1
tuple_onerow = tuple([nm_class,class_idx,samples[i],count_split])
tuple_list.append(tuple_onerow)
all_split = pd.DataFrame(tuple_list,columns = ['class_name','class_index','name_samples','split'])
test_split = pd.DataFrame(columns = all_split.columns)
train_split = pd.DataFrame(columns = all_split.columns)
validation_split = pd.DataFrame(columns = all_split.columns)
for i in range(1,folds):
test = all_split.loc[all_split['split']==i]
train = all_split.loc[all_split['split']!=i]
split_min = i + 1
if split_min >= folds:
split_min = 1
validation = train.loc[train['split']==split_min]
train = train.loc[train['split']!=split_min]
train['split'] = i
validation['split'] = i
test_split = test_split.append(test)
validation_split = validation_split.append(validation)
train_split = train_split.append(train)
train_split.to_csv(args.data_dir + 'train_split.csv', index=False)
validation_split.to_csv(args.data_dir + 'validation_split.csv', index=False)
test_split.to_csv(args.data_dir + 'test_split.csv', index=False)
if __name__ == '__main__':
best_accuracy=0
args = get_args()
if args.train:
block_size = args.block_size
train_dataset = get_dataloader(args=args,train_val='training',load= not args.create_dataset)
validation_dataset = get_dataloader(args=args,train_val='validation',load= not args.create_dataset)
if args.bert:
if args.default:
mconf = BertConfig(vocab_size_or_config_json_file = train_dataset.vocab_size,num_class=args.n_class)
else:
if args.addposition:
mconf = BertConfig(vocab_size_or_config_json_file = train_dataset.vocab_size,num_class=args.n_class,num_hidden_layers=args.n_layer,hidden_size=args.n_emb,num_attention_heads=args.n_head,type_vocab_size=args.n_vocab_type,position_size=train_dataset.position_size)
else:
mconf = BertConfig(vocab_size_or_config_json_file = train_dataset.vocab_size,num_class=args.n_class,num_hidden_layers=args.n_layer,hidden_size=args.n_emb,num_attention_heads=args.n_head,type_vocab_size=args.n_vocab_type)
model = get_model(args,mconf)
string_logs = f"{args.tag}_{args.arch}_bs{args.block_size:.0f}_nl{args.n_layer:.0f}_nh{args.n_head:.0f}_ne{args.n_emb:.0f}_cl{args.context_length:.0f}/"
tconf = TrainerConfig(max_epochs=150, batch_size=1, learning_rate=0.001,
lr_decay=True, warmup_tokens=1*150, final_tokens=150*len(train_dataset)*block_size,
num_workers=1,string_logs=string_logs, args=args)
trainer = Trainer(model, train_dataset, validation_dataset, tconf)
trainer.bert_train()
if args.rbm:
num_class=args.n_class
mconf = GPTConfig(vocab_size=train_dataset.vocab_size, block_size=block_size,num_class=num_class,
n_layer=args.n_layer,n_head=args.n_head, n_embd=args.n_emb,context_length=args.context_length,conv_filter=256)
if args.addposition:
mconf = GPTConfig(vocab_size=train_dataset.vocab_size, block_size=block_size,num_class=num_class,
n_layer=args.n_layer,n_head=args.n_head, n_embd=args.n_emb,context_length=args.context_length,conv_filter=256,position_size=train_dataset.position_size)
model = get_model(args,mconf)
string_logs = f"{args.tag}_{args.arch}_bs{mconf.block_size:.0f}_nl{mconf.n_layer:.0f}_nh{mconf.n_head:.0f}_ne{mconf.n_embd:.0f}_cl{mconf.context_length:.0f}/"
tconf = TrainerConfig(max_epochs=150, batch_size=1, learning_rate=6e-4,
lr_decay=True, warmup_tokens=1*150, final_tokens=150*len(train_dataset)*block_size,
num_workers=1,string_logs=string_logs, args=args)
trainer = Trainer(model, train_dataset, validation_dataset, tconf)
output_mode = args.output_mode.split("_")
if len(output_mode)>1:
trainer.multi_stream_rbm(len(output_mode))
else:
trainer.basic_train()
else:
num_class=args.n_class
mconf = GPTConfig(vocab_size=train_dataset.vocab_size, block_size=block_size,num_class=num_class,
n_layer=args.n_layer,n_head=args.n_head, n_embd=args.n_emb,context_length=args.context_length,conv_filter=256)
if args.addposition:
mconf = GPTConfig(vocab_size=train_dataset.vocab_size, block_size=block_size,num_class=num_class,
n_layer=args.n_layer,n_head=args.n_head, n_embd=args.n_emb,context_length=args.context_length,conv_filter=256,position_size=train_dataset.position_size)
model = get_model(args,mconf)
string_logs = f"{args.tag}_{args.arch}_bs{mconf.block_size:.0f}_nl{mconf.n_layer:.0f}_nh{mconf.n_head:.0f}_ne{mconf.n_embd:.0f}_cl{mconf.context_length:.0f}/"
tconf = TrainerConfig(max_epochs=150, batch_size=1, learning_rate=6e-4,
lr_decay=True, warmup_tokens=1*150, final_tokens=150*len(train_dataset)*block_size,
num_workers=1,string_logs=string_logs, args=args)
trainer = Trainer(model, train_dataset, validation_dataset, tconf)
output_mode = args.output_mode.split("_")
if len(output_mode)>1:
trainer.multi_stream(len(output_mode))
else:
trainer.basic_train()
if args.newtraining:
block_size = args.block_size
train_dataset = get_dataloader(args=args,train_val='training',load= not args.create_dataset)
validation_dataset = get_dataloader(args=args,train_val='validation',load= not args.create_dataset)
mconf = GPTConfig(vocab_size=train_dataset.vocab_size, block_size=block_size,num_class=args.n_class,
n_layer=args.n_layer,n_head=args.n_head, n_embd=args.n_emb,context_length=args.context_length,conv_filter=256)
if args.addpostoken:
mconf = GPTConfig(vocab_size=train_dataset.vocab_size, block_size=block_size,num_class=args.n_class,
n_layer=args.n_layer,n_head=args.n_head, n_embd=args.n_emb,context_length=args.context_length,conv_filter=256,position_size=train_dataset.position_size,rt_size = train_dataset.rt_size)
if args.addgestoken:
mconf = GPTConfig(vocab_size=train_dataset.vocab_size, block_size=block_size,num_class=args.n_class,
n_layer=args.n_layer,n_head=args.n_head, n_embd=args.n_emb,context_length=args.context_length,conv_filter=256,position_size=train_dataset.position_size, ges_size = train_dataset.ges_size,rt_size = train_dataset.rt_size)
model = get_model(args,mconf)
string_logs = f"{args.tag}_{args.arch}_bs{mconf.block_size:.0f}_nl{mconf.n_layer:.0f}_nh{mconf.n_head:.0f}_ne{mconf.n_embd:.0f}_cl{mconf.context_length:.0f}/"
tconf = TrainerConfig(max_epochs=150, batch_size=1, learning_rate=6e-4,
lr_decay=True, warmup_tokens=1*150, final_tokens=150*len(train_dataset)*block_size,
num_workers=1,string_logs=string_logs, args=args)
trainer = Trainer(model, train_dataset, validation_dataset, tconf)
output_mode = args.output_mode.split("_")
trainer.dynamic_stream()
if args.predict:
class_info = fold_split(args)
block_size = args.block_size
training_dataset = get_dataloader(args=args,train_val='training',load=True)
validation_dataset = get_dataloader(args=args,train_val='validation',load=True)
test_dataset = get_dataloader(args=args,train_val='testing',load=True)
num_class=args.n_class
mconf = GPTConfig(vocab_size=validation_dataset.vocab_size, block_size=block_size,num_class=num_class,
n_layer=args.n_layer,n_head=args.n_head, n_embd=args.n_emb,context_length=args.context_length,conv_filter=256)
if args.addposition:
mconf = GPTConfig(vocab_size=validation_dataset.vocab_size, block_size=block_size,num_class=num_class,
n_layer=args.n_layer,n_head=args.n_head, n_embd=args.n_emb,context_length=args.context_length,conv_filter=256,position_size=validation_dataset.position_size)
model = get_model(args,mconf)
string_logs = f"{args.tag}_{args.arch}_bs{mconf.block_size:.0f}_nl{mconf.n_layer:.0f}_nh{mconf.n_head:.0f}_ne{mconf.n_embd:.0f}_cl{mconf.context_length:.0f}/"
tconf = TrainerConfig(max_epochs=200, batch_size=1, learning_rate=6e-3,
lr_decay=True, warmup_tokens=1*200, final_tokens=200*len(validation_dataset)*block_size,
num_workers=20,string_logs=string_logs, args=args)
trainer = Trainer(model, None,[validation_dataset], tconf)
output_mode = args.output_mode.split("_")
if len(output_mode)>1:
trainer.predict_multi_stream(len(output_mode))
else:
trainer.predict()
if args.newpredict:
class_info = fold_split(args)
block_size = args.block_size
training_dataset = get_dataloader(args=args,train_val='training',load=True)
validation_dataset = get_dataloader(args=args,train_val='validation',load=True)
test_dataset = get_dataloader(args=args,train_val='testing',load=True)
num_class=args.n_class
mconf = GPTConfig(vocab_size=validation_dataset.vocab_size, block_size=block_size,num_class=num_class,
n_layer=args.n_layer,n_head=args.n_head, n_embd=args.n_emb,context_length=args.context_length,conv_filter=256,position_size=validation_dataset.position_size, rt_size = validation_dataset.rt_size)
model = get_model(args,mconf)
string_logs = f"{args.tag}_{args.arch}_bs{mconf.block_size:.0f}_nl{mconf.n_layer:.0f}_nh{mconf.n_head:.0f}_ne{mconf.n_embd:.0f}_cl{mconf.context_length:.0f}/"
tconf = TrainerConfig(max_epochs=200, batch_size=1, learning_rate=6e-3,
lr_decay=True, warmup_tokens=1*200, final_tokens=200*len(validation_dataset)*block_size,
num_workers=20,string_logs=string_logs, args=args)
trainer = Trainer(model, None,[validation_dataset], tconf)
if args.visval:
trainer.vis_embed()
if args.crossdata:
trainer.newpredict_dynamic_streamc(args.predictvis)
else:
trainer.newpredict_dynamic_stream(args.predictvis)
if args.finalpredict:
class_info = fold_split(args)
block_size = args.block_size
validation_dataset = get_dataloader(args=args,train_val='validation',load=True)
train_dataset = get_dataloader(args=args,train_val='training',load=True)
num_class=args.n_class
mconf = GPTConfig(vocab_size=validation_dataset.vocab_size, block_size=block_size,num_class=args.n_class, n_layer=args.n_layer,n_head=args.n_head, n_embd=args.n_emb,context_length=args.context_length,conv_filter=256,
position_size=validation_dataset.position_size, ges_size = validation_dataset.ges_size,rt_size = validation_dataset.rt_size)
model = get_model(args,mconf)
string_logs = f"{args.tag}_{args.arch}_bs{mconf.block_size:.0f}_nl{mconf.n_layer:.0f}_nh{mconf.n_head:.0f}_ne{mconf.n_embd:.0f}_cl{mconf.context_length:.0f}/"
tconf = TrainerConfig(max_epochs=200, batch_size=1, learning_rate=6e-3,
lr_decay=True, warmup_tokens=1*200, final_tokens=200*len(validation_dataset)*block_size,
num_workers=20,string_logs=string_logs, args=args)
trainer = Trainer(model, None,[validation_dataset], tconf)
if args.vis_attention:
trainer = Trainer(model, None,[train_dataset, validation_dataset], tconf)
trainer.visualize_attention(args.vis_attention)
else:
if args.visval:
trainer.vis_embed()
if args.predictvis:
trainer = Trainer(model, None,[train_dataset,validation_dataset], tconf)
trainer.finalpredict_dynamic_stream(args.predictvis,args.adddata_dir)
if args.finalpredictnewdata:
class_info = fold_split(args)
block_size = args.block_size
validation_dataset = get_dataloader(args=args,train_val='validation',load=True)
mconf = GPTConfig(vocab_size=validation_dataset.vocab_size, block_size=block_size,num_class=args.n_class, n_layer=args.n_layer,n_head=args.n_head, n_embd=args.n_emb,context_length=args.context_length,conv_filter=256,
position_size=validation_dataset.position_size, ges_size = validation_dataset.ges_size,rt_size = validation_dataset.rt_size)
model = get_model(args,mconf)
string_logs = f"{args.tag}_{args.arch}_bs{mconf.block_size:.0f}_nl{mconf.n_layer:.0f}_nh{mconf.n_head:.0f}_ne{mconf.n_embd:.0f}_cl{mconf.context_length:.0f}/"
tconf = TrainerConfig(max_epochs=200, batch_size=1, learning_rate=6e-3,
lr_decay=True, warmup_tokens=1*200, final_tokens=200*len(validation_dataset)*block_size,
num_workers=20,string_logs=string_logs, args=args)
trainer = Trainer(model, None,[validation_dataset], tconf)
if args.vis_attention:
trainer = Trainer(model, None,[validation_dataset], tconf)
trainer.visualize_attention(args.vis_attention)
else:
if args.visval:
trainer.vis_embed()
if args.predictvis:
trainer = Trainer(model, None,[validation_dataset], tconf)
trainer.finalpredict_newdata(args.predictvis,args.adddata_dir)
if args.newpredict2:
class_info = fold_split(args)
block_size = args.block_size
training_dataset = get_dataloader(args=args,train_val='training',load=True)
validation_dataset = get_dataloader(args=args,train_val='validation',load=True)
num_class=args.n_class
mconf = GPTConfig(vocab_size=validation_dataset.vocab_size, block_size=block_size,num_class=num_class,
n_layer=args.n_layer,n_head=args.n_head, n_embd=args.n_emb,context_length=args.context_length,conv_filter=256,position_size=validation_dataset.position_size, ges_size = validation_dataset.ges_size, rt_size = validation_dataset.rt_size)
model = get_model(args,mconf)
string_logs = f"{args.tag}_{args.arch}_bs{mconf.block_size:.0f}_nl{mconf.n_layer:.0f}_nh{mconf.n_head:.0f}_ne{mconf.n_embd:.0f}_cl{mconf.context_length:.0f}/"
tconf = TrainerConfig(max_epochs=200, batch_size=1, learning_rate=6e-3,
lr_decay=True, warmup_tokens=1*200, final_tokens=200*len(validation_dataset)*block_size,
num_workers=20,string_logs=string_logs, args=args)
trainer = Trainer(model, None,[validation_dataset], tconf)
if args.visval:
trainer.vis_embed2()
trainer.newpredict_dynamic_stream(args.predictvis)
if args.single_predict:
block_size = args.block_size
num_class=args.n_class
validation_dataset = get_dataloader(args=args,train_val='validation',load=True)
test_dataset = SinglePrediction(data_dir = args.data_dir)
mconf = GPTConfig(vocab_size=validation_dataset.vocab_size, block_size=block_size,num_class=num_class,
n_layer=args.n_layer,n_head=args.n_head, n_embd=args.n_emb,context_length=args.context_length,conv_filter=256)
model = get_model(args,mconf)
string_logs = f"{args.tag}_{args.arch}_bs{mconf.block_size:.0f}_nl{mconf.n_layer:.0f}_nh{mconf.n_head:.0f}_ne{mconf.n_embd:.0f}_cl{mconf.context_length:.0f}/"
tconf = TrainerConfig(max_epochs=200, batch_size=1, learning_rate=6e-4,
lr_decay=True, warmup_tokens=1*200, final_tokens=200*len(validation_dataset)*block_size,
num_workers=20,string_logs=string_logs, args=args)
trainer = Trainer(model, None,[test_dataset], tconf)
trainer.single_predict()
if args.vis_weight:
class_info = fold_split(args)
block_size = args.block_size
training_dataset = get_dataloader(args=args,train_val='training',load=True)
validation_dataset = get_dataloader(args=args,train_val='validation',load=True)
test_dataset = get_dataloader(args=args,train_val='testing',load=True)
num_class=args.n_class
mconf = GPTConfig(vocab_size=validation_dataset.vocab_size, block_size=block_size,num_class=num_class,
n_layer=args.n_layer,n_head=args.n_head, n_embd=args.n_emb,context_length=args.context_length,conv_filter=256)
if args.addposition:
mconf = GPTConfig(vocab_size=validation_dataset.vocab_size, block_size=block_size,num_class=num_class,
n_layer=args.n_layer,n_head=args.n_head, n_embd=args.n_emb,context_length=args.context_length,conv_filter=256,position_size=validation_dataset.position_size)
model = get_model(args,mconf)
string_logs = f"{args.tag}_{args.arch}_bs{mconf.block_size:.0f}_nl{mconf.n_layer:.0f}_nh{mconf.n_head:.0f}_ne{mconf.n_embd:.0f}_cl{mconf.context_length:.0f}/"
tconf = TrainerConfig(max_epochs=200, batch_size=1, learning_rate=6e-3,
lr_decay=True, warmup_tokens=1*200, final_tokens=200*len(validation_dataset)*block_size,
num_workers=20,string_logs=string_logs, args=args)
trainer = Trainer(model, None,[validation_dataset,test_dataset], tconf)
output_mode = args.output_mode.split("_")
if len(output_mode)>1:
trainer.predict_vis(len(output_mode))
else:
trainer.predict()
if args.top_weight:
class_info = fold_split(args)
block_size = args.block_size
training_dataset = get_dataloader(args=args,train_val='training',load=True)
validation_dataset = get_dataloader(args=args,train_val='validation',load=True)
test_dataset = get_dataloader(args=args,train_val='testing',load=True)
num_class=args.n_class
mconf = GPTConfig(vocab_size=validation_dataset.vocab_size, block_size=block_size,num_class=num_class,
n_layer=args.n_layer,n_head=args.n_head, n_embd=args.n_emb,context_length=args.context_length,conv_filter=256)
if args.addposition:
mconf = GPTConfig(vocab_size=validation_dataset.vocab_size, block_size=block_size,num_class=num_class,
n_layer=args.n_layer,n_head=args.n_head, n_embd=args.n_emb,context_length=args.context_length,conv_filter=256,position_size=validation_dataset.position_size)
model = get_model(args,mconf)
string_logs = f"{args.tag}_{args.arch}_bs{mconf.block_size:.0f}_nl{mconf.n_layer:.0f}_nh{mconf.n_head:.0f}_ne{mconf.n_embd:.0f}_cl{mconf.context_length:.0f}/"
tconf = TrainerConfig(max_epochs=200, batch_size=1, learning_rate=6e-3,
lr_decay=True, warmup_tokens=1*200, final_tokens=200*len(validation_dataset)*block_size,
num_workers=20,string_logs=string_logs, args=args)
trainer = Trainer(model, None,[training_dataset,validation_dataset,test_dataset], tconf)
output_mode = args.output_mode.split("_")
if len(output_mode)>1:
trainer.topweight_vis(len(output_mode))
else:
trainer.predict()
if args.single_pred_vcf:
args = translate_args(args)
preprocessing_fromdmm(args)
pdb.set_trace()
class_info = fold_split(args)
block_size = args.block_size
training_dataset = get_dataloader(args=args,train_val='training',load=True)
validation_dataset = get_dataloader(args=args,train_val='validation',load=True)
test_dataset = get_dataloader(args=args,train_val='testing',load=True)
num_class=args.n_class
mconf = GPTConfig(vocab_size=validation_dataset.vocab_size, block_size=block_size,num_class=num_class,
n_layer=args.n_layer,n_head=args.n_head, n_embd=args.n_emb,context_length=args.context_length,conv_filter=256)
if args.addposition:
mconf = GPTConfig(vocab_size=validation_dataset.vocab_size, block_size=block_size,num_class=num_class,
n_layer=args.n_layer,n_head=args.n_head, n_embd=args.n_emb,context_length=args.context_length,conv_filter=256,position_size=validation_dataset.position_size)
model = get_model(args,mconf)
string_logs = f"{args.tag}_{args.arch}_bs{mconf.block_size:.0f}_nl{mconf.n_layer:.0f}_nh{mconf.n_head:.0f}_ne{mconf.n_embd:.0f}_cl{mconf.context_length:.0f}/"
tconf = TrainerConfig(max_epochs=200, batch_size=1, learning_rate=6e-3,
lr_decay=True, warmup_tokens=1*200, final_tokens=200*len(validation_dataset)*block_size,
num_workers=20,string_logs=string_logs, args=args)
trainer = Trainer(model, None,[training_dataset,validation_dataset,test_dataset], tconf)
output_mode = args.output_mode.split("_")
if len(output_mode)>1:
trainer.topweight_vis(len(output_mode))
else:
trainer.predict()
| true | true |
f73da25349e92dccf2eb30151609fa1c48c757f9 | 374 | py | Python | bike-sharing-demand/one_hot_encoder_transformer.py | Bartosz-D3V/ml-dataset-analysis | cb2458dcb7cecba01f52be5b12e816ca00ce7da4 | [
"MIT"
] | null | null | null | bike-sharing-demand/one_hot_encoder_transformer.py | Bartosz-D3V/ml-dataset-analysis | cb2458dcb7cecba01f52be5b12e816ca00ce7da4 | [
"MIT"
] | null | null | null | bike-sharing-demand/one_hot_encoder_transformer.py | Bartosz-D3V/ml-dataset-analysis | cb2458dcb7cecba01f52be5b12e816ca00ce7da4 | [
"MIT"
] | null | null | null | import pandas as pd
from sklearn.base import BaseEstimator, TransformerMixin
class OneHotEncoderTransformer(BaseEstimator, TransformerMixin):
def __init__(self, columns) -> None:
self.columns = columns
def fit(self, X, y=None):
return self
def transform(self, X, y=None):
X = pd.get_dummies(X, columns=self.columns)
return X
| 23.375 | 64 | 0.684492 | import pandas as pd
from sklearn.base import BaseEstimator, TransformerMixin
class OneHotEncoderTransformer(BaseEstimator, TransformerMixin):
def __init__(self, columns) -> None:
self.columns = columns
def fit(self, X, y=None):
return self
def transform(self, X, y=None):
X = pd.get_dummies(X, columns=self.columns)
return X
| true | true |
f73da2d7eebd1de32958ef149f23f75b023f780b | 181 | py | Python | chapter_eleven/give_the_interface_a_native_feel_with_objects/test_flyweight.py | PacktPublishing/Speed-up-your-Python-with-Rust | 1fce5fb59ea966015768e7eca51c0e31d69531ec | [
"MIT"
] | 21 | 2021-09-10T12:46:26.000Z | 2022-03-23T02:50:39.000Z | chapter_eleven/give_the_interface_a_native_feel_with_objects/test_flyweight.py | PacktPublishing/Speed-up-your-Python-with-Rust | 1fce5fb59ea966015768e7eca51c0e31d69531ec | [
"MIT"
] | null | null | null | chapter_eleven/give_the_interface_a_native_feel_with_objects/test_flyweight.py | PacktPublishing/Speed-up-your-Python-with-Rust | 1fce5fb59ea966015768e7eca51c0e31d69531ec | [
"MIT"
] | 6 | 2021-09-02T08:32:37.000Z | 2022-03-17T21:15:25.000Z | from .calculate_coordinates import Particle
test = Particle(4, 6)
test_two = Particle(3, 8)
test_three = Particle(4, 6)
print(id(test))
print(id(test_three))
print(id(test_two))
| 16.454545 | 43 | 0.740331 | from .calculate_coordinates import Particle
test = Particle(4, 6)
test_two = Particle(3, 8)
test_three = Particle(4, 6)
print(id(test))
print(id(test_three))
print(id(test_two))
| true | true |
f73da378a893de05bf0a378ab7e152c97a9c621b | 1,315 | py | Python | practice_python/33_birthday_dictionaries.py | facmartoni/python_exercises | 7f05c7491a0eee05e32f04c7f07ddc1ba688b7a2 | [
"Apache-2.0"
] | null | null | null | practice_python/33_birthday_dictionaries.py | facmartoni/python_exercises | 7f05c7491a0eee05e32f04c7f07ddc1ba688b7a2 | [
"Apache-2.0"
] | null | null | null | practice_python/33_birthday_dictionaries.py | facmartoni/python_exercises | 7f05c7491a0eee05e32f04c7f07ddc1ba688b7a2 | [
"Apache-2.0"
] | 1 | 2021-10-11T00:25:14.000Z | 2021-10-11T00:25:14.000Z | import os
# This exercise is Part 1 of 4 of the birthday data exercise series.
# For this exercise, we will keep track of when our friend’s birthdays are, and be able to find that information based on their name.
# Create a dictionary (in your file) of names and birthdays.
# When you run your program it should ask the user to enter a name, and return the birthday of that person back to them.
# The interaction should look something like this:
# >>> Welcome to the birthday dictionary. We know the birthdays of:
# Albert Einstein
# Benjamin Franklin
# Ada Lovelace
# >>> Who's birthday do you want to look up?
# Benjamin Franklin
# >>> Benjamin Franklin's birthday is 01/17/1706.
BIRTHDAYS = {
'Gaston Costas': '01/01/0000',
'Gaston Gonzalez': '02/01/0000',
'Jose Diaz': '03/01/0000',
'Marcio Riviere': '04/01/0000',
'Ignacio Gonzalez Ley': '05/01/0000'
}
def run():
os.system('cls')
print('Welcome to the birthday dictionary!! 🎈🎈🎈')
print(f'\nWe know the birthdays of:\n')
for key in BIRTHDAYS.keys():
print(key)
name = input("\nWho's birthday do you want to look up?: ").title()
try:
print(f"\n{name}'s birthday is {BIRTHDAYS[name]} 🎉")
except KeyError:
print("\nWe don't know that person 🙁")
if __name__ == '__main__':
run()
| 30.581395 | 133 | 0.670722 | import os
# Benjamin Franklin
# >>> Benjamin Franklin's birthday is 01/17/1706.
BIRTHDAYS = {
'Gaston Costas': '01/01/0000',
'Gaston Gonzalez': '02/01/0000',
'Jose Diaz': '03/01/0000',
'Marcio Riviere': '04/01/0000',
'Ignacio Gonzalez Ley': '05/01/0000'
}
def run():
os.system('cls')
print('Welcome to the birthday dictionary!! 🎈🎈🎈')
print(f'\nWe know the birthdays of:\n')
for key in BIRTHDAYS.keys():
print(key)
name = input("\nWho's birthday do you want to look up?: ").title()
try:
print(f"\n{name}'s birthday is {BIRTHDAYS[name]} 🎉")
except KeyError:
print("\nWe don't know that person 🙁")
if __name__ == '__main__':
run()
| true | true |
f73da3fbe1b53fab6068d70faf53d1acf12b8ea9 | 1,893 | py | Python | app_sme12/migrations/0012_auto_20200503_0207.py | konjing/django_sme_award | 840ed3685299c77be8516acf1e8a0123930dd63d | [
"MIT"
] | null | null | null | app_sme12/migrations/0012_auto_20200503_0207.py | konjing/django_sme_award | 840ed3685299c77be8516acf1e8a0123930dd63d | [
"MIT"
] | 5 | 2021-03-19T02:32:48.000Z | 2021-06-10T19:01:30.000Z | app_sme12/migrations/0012_auto_20200503_0207.py | konjing/django_sme_award | 840ed3685299c77be8516acf1e8a0123930dd63d | [
"MIT"
] | null | null | null | # Generated by Django 3.0.5 on 2020-05-03 02:07
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('app_sme12', '0011_auto_20200502_0716'),
]
operations = [
migrations.CreateModel(
name='AuthorizeCapital',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100, verbose_name='')),
('code', models.CharField(blank=True, max_length=20, null=True, verbose_name='รหัส')),
('active', models.BooleanField(default=True, verbose_name='สถานะการใช้งาน')),
],
),
migrations.CreateModel(
name='ProfitPrevious',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100, verbose_name='')),
('code', models.CharField(blank=True, max_length=20, null=True, verbose_name='รหัส')),
('active', models.BooleanField(default=True, verbose_name='สถานะการใช้งาน')),
],
),
migrations.RemoveField(
model_name='formregister',
name='profit',
),
migrations.AddField(
model_name='formregister',
name='authorize_capital',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='app_sme12.AuthorizeCapital'),
),
migrations.AddField(
model_name='formregister',
name='profit_previous',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='app_sme12.ProfitPrevious'),
),
]
| 40.276596 | 138 | 0.602219 |
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('app_sme12', '0011_auto_20200502_0716'),
]
operations = [
migrations.CreateModel(
name='AuthorizeCapital',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100, verbose_name='')),
('code', models.CharField(blank=True, max_length=20, null=True, verbose_name='รหัส')),
('active', models.BooleanField(default=True, verbose_name='สถานะการใช้งาน')),
],
),
migrations.CreateModel(
name='ProfitPrevious',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100, verbose_name='')),
('code', models.CharField(blank=True, max_length=20, null=True, verbose_name='รหัส')),
('active', models.BooleanField(default=True, verbose_name='สถานะการใช้งาน')),
],
),
migrations.RemoveField(
model_name='formregister',
name='profit',
),
migrations.AddField(
model_name='formregister',
name='authorize_capital',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='app_sme12.AuthorizeCapital'),
),
migrations.AddField(
model_name='formregister',
name='profit_previous',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='app_sme12.ProfitPrevious'),
),
]
| true | true |
f73da43b89d671a9e31ca3905fec01ac0f2ef4ab | 890 | py | Python | doc/release/report_functions_without_rst_generated.py | jebogaert/networkx | 8563c3313223a53c548530f39c8cfb6e433539d3 | [
"BSD-3-Clause"
] | 10,024 | 2015-01-01T13:06:43.000Z | 2022-03-31T12:45:25.000Z | doc/release/report_functions_without_rst_generated.py | jebogaert/networkx | 8563c3313223a53c548530f39c8cfb6e433539d3 | [
"BSD-3-Clause"
] | 3,191 | 2015-01-01T18:13:11.000Z | 2022-03-31T22:06:00.000Z | doc/release/report_functions_without_rst_generated.py | jebogaert/networkx | 8563c3313223a53c548530f39c8cfb6e433539d3 | [
"BSD-3-Clause"
] | 3,272 | 2015-01-01T05:04:53.000Z | 2022-03-31T17:46:35.000Z | import os
import inspect
import networkx as nx
print("Run this script from the doc/ directory of the repository")
funcs = inspect.getmembers(nx, inspect.isfunction)
for n, f in funcs:
# print(n + ": "+str(f))
cmd = r"find . -name *\." + n + ".rst -print"
# print(cmd)
result = os.popen(cmd).read()
# print(result)
old_names = (
"find_cores",
"test",
"edge_betweenness",
"betweenness_centrality_source",
"write_graphml_lxml",
"write_graphml_xml",
"adj_matrix",
"project",
"fruchterman_reingold_layout",
"node_degree_xy",
"node_attribute_xy",
"find_cliques_recursive",
"recursive_simple_cycles",
)
if len(result) == 0 and n not in old_names:
print("Missing file from docs: ", n)
print("Done finding functions that are missing from the docs")
| 25.428571 | 66 | 0.608989 | import os
import inspect
import networkx as nx
print("Run this script from the doc/ directory of the repository")
funcs = inspect.getmembers(nx, inspect.isfunction)
for n, f in funcs:
cmd = r"find . -name *\." + n + ".rst -print"
result = os.popen(cmd).read()
old_names = (
"find_cores",
"test",
"edge_betweenness",
"betweenness_centrality_source",
"write_graphml_lxml",
"write_graphml_xml",
"adj_matrix",
"project",
"fruchterman_reingold_layout",
"node_degree_xy",
"node_attribute_xy",
"find_cliques_recursive",
"recursive_simple_cycles",
)
if len(result) == 0 and n not in old_names:
print("Missing file from docs: ", n)
print("Done finding functions that are missing from the docs")
| true | true |
f73da4a3f39a3addbf145c7510c3fd158e0128ed | 1,241 | py | Python | netforce_hr/setup.py | nfco/netforce | 35252eecd0a6633ab9d82162e9e3ff57d4da029a | [
"MIT"
] | 27 | 2015-09-30T23:53:30.000Z | 2021-06-07T04:56:25.000Z | netforce_hr/setup.py | nfco/netforce | 35252eecd0a6633ab9d82162e9e3ff57d4da029a | [
"MIT"
] | 191 | 2015-10-08T11:46:30.000Z | 2019-11-14T02:24:36.000Z | netforce_hr/setup.py | nfco/netforce | 35252eecd0a6633ab9d82162e9e3ff57d4da029a | [
"MIT"
] | 32 | 2015-10-01T03:59:43.000Z | 2022-01-13T07:31:05.000Z | #!/usr/bin/env python3
# Copyright (c) 2012-2015 Netforce Co. Ltd.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
# OR OTHER DEALINGS IN THE SOFTWARE.
from setuptools import setup
setup(
name="netforce_hr",
version="3.1.0",
description="HR module",
)
| 42.793103 | 80 | 0.763094 |
from setuptools import setup
setup(
name="netforce_hr",
version="3.1.0",
description="HR module",
)
| true | true |
f73da53b6092a031a07e14b133deb565dd0587b6 | 9,386 | py | Python | docs/conf.py | vidyasagar-r/django-simple-pagination | 8cb68b2b51b06cf8f72a13afa64564b1c49c3b41 | [
"MIT"
] | null | null | null | docs/conf.py | vidyasagar-r/django-simple-pagination | 8cb68b2b51b06cf8f72a13afa64564b1c49c3b41 | [
"MIT"
] | null | null | null | docs/conf.py | vidyasagar-r/django-simple-pagination | 8cb68b2b51b06cf8f72a13afa64564b1c49c3b41 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
#
# pietrack documentation build configuration file, created by
# sphinx-quickstart on Wed Jul 29 18:10:32 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'django-simple-pagination'
copyright = u'2016, Micro Pyramid'
author = u'Micro Pyramid'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.3'
# The full version, including alpha/beta/rc tags.
release = '1.3'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
import sphinx_rtd_theme
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'simplepagination'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'simplepagination.tex', u'django simple pagination Documentation',
u'Micro Pyramid', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'simplepagination', u'django simple pagination Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'simplepagination', u'django simple pagination Documentation',
author, 'simplepagination', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| 32.254296 | 81 | 0.721394 |
import sys
import os
import shlex
extensions = [
'sphinx.ext.autodoc',
]
templates_path = ['_templates']
source_suffix = '.rst'
master_doc = 'index'
project = u'django-simple-pagination'
copyright = u'2016, Micro Pyramid'
author = u'Micro Pyramid'
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.3'
# The full version, including alpha/beta/rc tags.
release = '1.3'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
import sphinx_rtd_theme
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'simplepagination'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'simplepagination.tex', u'django simple pagination Documentation',
u'Micro Pyramid', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'simplepagination', u'django simple pagination Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'simplepagination', u'django simple pagination Documentation',
author, 'simplepagination', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
| true | true |
f73da557cb00e868a90c0d89edb360f9632ad256 | 8,869 | py | Python | tests/thread/stress_aes.py | sebastien-riou/micropython | 116c15842fd48ddb77b0bc016341d936a0756573 | [
"MIT"
] | 4,538 | 2017-10-20T05:19:03.000Z | 2022-03-30T02:29:30.000Z | tests/thread/stress_aes.py | sebastien-riou/micropython | 116c15842fd48ddb77b0bc016341d936a0756573 | [
"MIT"
] | 1,088 | 2017-10-21T07:57:22.000Z | 2022-03-31T08:15:49.000Z | tests/thread/stress_aes.py | sebastien-riou/micropython | 116c15842fd48ddb77b0bc016341d936a0756573 | [
"MIT"
] | 1,860 | 2017-10-20T05:22:35.000Z | 2022-03-27T10:54:14.000Z | # Stress test for threads using AES encryption routines.
#
# AES was chosen because it is integer based and inplace so doesn't use the
# heap. It is therefore a good test of raw performance and correctness of the
# VM/runtime. It can be used to measure threading performance (concurrency is
# in principle possible) and correctness (it's non trivial for the encryption/
# decryption to give the correct answer).
#
# The AES code comes first (code originates from a C version authored by D.P.George)
# and then the test harness at the bottom. It can be tuned to be more/less
# aggressive by changing the amount of data to encrypt, the number of loops and
# the number of threads.
#
# MIT license; Copyright (c) 2016 Damien P. George on behalf of Pycom Ltd
##################################################################
# discrete arithmetic routines, mostly from a precomputed table
# non-linear, invertible, substitution box
# fmt: off
aes_s_box_table = bytes((
0x63,0x7c,0x77,0x7b,0xf2,0x6b,0x6f,0xc5,0x30,0x01,0x67,0x2b,0xfe,0xd7,0xab,0x76,
0xca,0x82,0xc9,0x7d,0xfa,0x59,0x47,0xf0,0xad,0xd4,0xa2,0xaf,0x9c,0xa4,0x72,0xc0,
0xb7,0xfd,0x93,0x26,0x36,0x3f,0xf7,0xcc,0x34,0xa5,0xe5,0xf1,0x71,0xd8,0x31,0x15,
0x04,0xc7,0x23,0xc3,0x18,0x96,0x05,0x9a,0x07,0x12,0x80,0xe2,0xeb,0x27,0xb2,0x75,
0x09,0x83,0x2c,0x1a,0x1b,0x6e,0x5a,0xa0,0x52,0x3b,0xd6,0xb3,0x29,0xe3,0x2f,0x84,
0x53,0xd1,0x00,0xed,0x20,0xfc,0xb1,0x5b,0x6a,0xcb,0xbe,0x39,0x4a,0x4c,0x58,0xcf,
0xd0,0xef,0xaa,0xfb,0x43,0x4d,0x33,0x85,0x45,0xf9,0x02,0x7f,0x50,0x3c,0x9f,0xa8,
0x51,0xa3,0x40,0x8f,0x92,0x9d,0x38,0xf5,0xbc,0xb6,0xda,0x21,0x10,0xff,0xf3,0xd2,
0xcd,0x0c,0x13,0xec,0x5f,0x97,0x44,0x17,0xc4,0xa7,0x7e,0x3d,0x64,0x5d,0x19,0x73,
0x60,0x81,0x4f,0xdc,0x22,0x2a,0x90,0x88,0x46,0xee,0xb8,0x14,0xde,0x5e,0x0b,0xdb,
0xe0,0x32,0x3a,0x0a,0x49,0x06,0x24,0x5c,0xc2,0xd3,0xac,0x62,0x91,0x95,0xe4,0x79,
0xe7,0xc8,0x37,0x6d,0x8d,0xd5,0x4e,0xa9,0x6c,0x56,0xf4,0xea,0x65,0x7a,0xae,0x08,
0xba,0x78,0x25,0x2e,0x1c,0xa6,0xb4,0xc6,0xe8,0xdd,0x74,0x1f,0x4b,0xbd,0x8b,0x8a,
0x70,0x3e,0xb5,0x66,0x48,0x03,0xf6,0x0e,0x61,0x35,0x57,0xb9,0x86,0xc1,0x1d,0x9e,
0xe1,0xf8,0x98,0x11,0x69,0xd9,0x8e,0x94,0x9b,0x1e,0x87,0xe9,0xce,0x55,0x28,0xdf,
0x8c,0xa1,0x89,0x0d,0xbf,0xe6,0x42,0x68,0x41,0x99,0x2d,0x0f,0xb0,0x54,0xbb,0x16,
))
# fmt: on
# multiplication of polynomials modulo x^8 + x^4 + x^3 + x + 1 = 0x11b
def aes_gf8_mul_2(x):
if x & 0x80:
return (x << 1) ^ 0x11B
else:
return x << 1
def aes_gf8_mul_3(x):
return x ^ aes_gf8_mul_2(x)
# non-linear, invertible, substitution box
def aes_s_box(a):
return aes_s_box_table[a & 0xFF]
# return 0x02^(a-1) in GF(2^8)
def aes_r_con(a):
ans = 1
while a > 1:
ans <<= 1
if ans & 0x100:
ans ^= 0x11B
a -= 1
return ans
##################################################################
# basic AES algorithm; see FIPS-197
#
# Think of it as a pseudo random number generator, with each
# symbol in the sequence being a 16 byte block (the state). The
# key is a parameter of the algorithm and tells which particular
# sequence of random symbols you want. The initial vector, IV,
# sets the start of the sequence. The idea of a strong cipher
# is that it's very difficult to guess the key even if you know
# a large part of the sequence. The basic AES algorithm simply
# provides such a sequence. En/de-cryption is implemented here
# using OCB, where the sequence is xored against the plaintext.
# Care must be taken to (almost) always choose a different IV.
# all inputs must be size 16
def aes_add_round_key(state, w):
for i in range(16):
state[i] ^= w[i]
# combined sub_bytes, shift_rows, mix_columns, add_round_key
# all inputs must be size 16
def aes_sb_sr_mc_ark(state, w, w_idx, temp):
temp_idx = 0
for i in range(4):
x0 = aes_s_box_table[state[i * 4]]
x1 = aes_s_box_table[state[1 + ((i + 1) & 3) * 4]]
x2 = aes_s_box_table[state[2 + ((i + 2) & 3) * 4]]
x3 = aes_s_box_table[state[3 + ((i + 3) & 3) * 4]]
temp[temp_idx] = aes_gf8_mul_2(x0) ^ aes_gf8_mul_3(x1) ^ x2 ^ x3 ^ w[w_idx]
temp[temp_idx + 1] = x0 ^ aes_gf8_mul_2(x1) ^ aes_gf8_mul_3(x2) ^ x3 ^ w[w_idx + 1]
temp[temp_idx + 2] = x0 ^ x1 ^ aes_gf8_mul_2(x2) ^ aes_gf8_mul_3(x3) ^ w[w_idx + 2]
temp[temp_idx + 3] = aes_gf8_mul_3(x0) ^ x1 ^ x2 ^ aes_gf8_mul_2(x3) ^ w[w_idx + 3]
w_idx += 4
temp_idx += 4
for i in range(16):
state[i] = temp[i]
# combined sub_bytes, shift_rows, add_round_key
# all inputs must be size 16
def aes_sb_sr_ark(state, w, w_idx, temp):
temp_idx = 0
for i in range(4):
x0 = aes_s_box_table[state[i * 4]]
x1 = aes_s_box_table[state[1 + ((i + 1) & 3) * 4]]
x2 = aes_s_box_table[state[2 + ((i + 2) & 3) * 4]]
x3 = aes_s_box_table[state[3 + ((i + 3) & 3) * 4]]
temp[temp_idx] = x0 ^ w[w_idx]
temp[temp_idx + 1] = x1 ^ w[w_idx + 1]
temp[temp_idx + 2] = x2 ^ w[w_idx + 2]
temp[temp_idx + 3] = x3 ^ w[w_idx + 3]
w_idx += 4
temp_idx += 4
for i in range(16):
state[i] = temp[i]
# take state as input and change it to the next state in the sequence
# state and temp have size 16, w has size 16 * (Nr + 1), Nr >= 1
def aes_state(state, w, temp, nr):
aes_add_round_key(state, w)
w_idx = 16
for i in range(nr - 1):
aes_sb_sr_mc_ark(state, w, w_idx, temp)
w_idx += 16
aes_sb_sr_ark(state, w, w_idx, temp)
# expand 'key' to 'w' for use with aes_state
# key has size 4 * Nk, w has size 16 * (Nr + 1), temp has size 16
def aes_key_expansion(key, w, temp, nk, nr):
for i in range(4 * nk):
w[i] = key[i]
w_idx = 4 * nk - 4
for i in range(nk, 4 * (nr + 1)):
t = temp
t_idx = 0
if i % nk == 0:
t[0] = aes_s_box(w[w_idx + 1]) ^ aes_r_con(i // nk)
for j in range(1, 4):
t[j] = aes_s_box(w[w_idx + (j + 1) % 4])
elif nk > 6 and i % nk == 4:
for j in range(0, 4):
t[j] = aes_s_box(w[w_idx + j])
else:
t = w
t_idx = w_idx
w_idx += 4
for j in range(4):
w[w_idx + j] = w[w_idx + j - 4 * nk] ^ t[t_idx + j]
##################################################################
# simple use of AES algorithm, using output feedback (OFB) mode
class AES:
def __init__(self, keysize):
if keysize == 128:
self.nk = 4
self.nr = 10
elif keysize == 192:
self.nk = 6
self.nr = 12
else:
assert keysize == 256
self.nk = 8
self.nr = 14
self.state = bytearray(16)
self.w = bytearray(16 * (self.nr + 1))
self.temp = bytearray(16)
self.state_pos = 16
def set_key(self, key):
aes_key_expansion(key, self.w, self.temp, self.nk, self.nr)
self.state_pos = 16
def set_iv(self, iv):
for i in range(16):
self.state[i] = iv[i]
self.state_pos = 16
def get_some_state(self, n_needed):
if self.state_pos >= 16:
aes_state(self.state, self.w, self.temp, self.nr)
self.state_pos = 0
n = 16 - self.state_pos
if n > n_needed:
n = n_needed
return n
def apply_to(self, data):
idx = 0
n = len(data)
while n > 0:
ln = self.get_some_state(n)
n -= ln
for i in range(ln):
data[idx + i] ^= self.state[self.state_pos + i]
idx += ln
self.state_pos += n
##################################################################
# test code
try:
import utime as time
except ImportError:
import time
import _thread
class LockedCounter:
def __init__(self):
self.lock = _thread.allocate_lock()
self.value = 0
def add(self, val):
self.lock.acquire()
self.value += val
self.lock.release()
count = LockedCounter()
def thread_entry():
global count
aes = AES(256)
key = bytearray(256 // 8)
iv = bytearray(16)
data = bytearray(128)
# from now on we don't use the heap
for loop in range(5):
# encrypt
aes.set_key(key)
aes.set_iv(iv)
for i in range(8):
aes.apply_to(data)
# decrypt
aes.set_key(key)
aes.set_iv(iv)
for i in range(8):
aes.apply_to(data)
# verify
for i in range(len(data)):
assert data[i] == 0
count.add(1)
if __name__ == "__main__":
n_thread = 20
for i in range(n_thread):
_thread.start_new_thread(thread_entry, ())
while count.value < n_thread:
time.sleep(1)
| 32.487179 | 91 | 0.588567 |
# heap. It is therefore a good test of raw performance and correctness of the
# VM/runtime. It can be used to measure threading performance (concurrency is
# in principle possible) and correctness (it's non trivial for the encryption/
t_idx = 0
if i % nk == 0:
t[0] = aes_s_box(w[w_idx + 1]) ^ aes_r_con(i // nk)
for j in range(1, 4):
t[j] = aes_s_box(w[w_idx + (j + 1) % 4])
elif nk > 6 and i % nk == 4:
for j in range(0, 4):
t[j] = aes_s_box(w[w_idx + j])
else:
t = w
t_idx = w_idx
w_idx += 4
for j in range(4):
w[w_idx + j] = w[w_idx + j - 4 * nk] ^ t[t_idx + j]
##################################################################
# simple use of AES algorithm, using output feedback (OFB) mode
class AES:
def __init__(self, keysize):
if keysize == 128:
self.nk = 4
self.nr = 10
elif keysize == 192:
self.nk = 6
self.nr = 12
else:
assert keysize == 256
self.nk = 8
self.nr = 14
self.state = bytearray(16)
self.w = bytearray(16 * (self.nr + 1))
self.temp = bytearray(16)
self.state_pos = 16
def set_key(self, key):
aes_key_expansion(key, self.w, self.temp, self.nk, self.nr)
self.state_pos = 16
def set_iv(self, iv):
for i in range(16):
self.state[i] = iv[i]
self.state_pos = 16
def get_some_state(self, n_needed):
if self.state_pos >= 16:
aes_state(self.state, self.w, self.temp, self.nr)
self.state_pos = 0
n = 16 - self.state_pos
if n > n_needed:
n = n_needed
return n
def apply_to(self, data):
idx = 0
n = len(data)
while n > 0:
ln = self.get_some_state(n)
n -= ln
for i in range(ln):
data[idx + i] ^= self.state[self.state_pos + i]
idx += ln
self.state_pos += n
##################################################################
# test code
try:
import utime as time
except ImportError:
import time
import _thread
class LockedCounter:
def __init__(self):
self.lock = _thread.allocate_lock()
self.value = 0
def add(self, val):
self.lock.acquire()
self.value += val
self.lock.release()
count = LockedCounter()
def thread_entry():
global count
aes = AES(256)
key = bytearray(256 // 8)
iv = bytearray(16)
data = bytearray(128)
# from now on we don't use the heap
for loop in range(5):
aes.set_key(key)
aes.set_iv(iv)
for i in range(8):
aes.apply_to(data)
aes.set_key(key)
aes.set_iv(iv)
for i in range(8):
aes.apply_to(data)
for i in range(len(data)):
assert data[i] == 0
count.add(1)
if __name__ == "__main__":
n_thread = 20
for i in range(n_thread):
_thread.start_new_thread(thread_entry, ())
while count.value < n_thread:
time.sleep(1)
| true | true |
f73da55b24da55a3ee69d0f00e5ee751c2b39a47 | 32,856 | py | Python | venv/lib/python3.8/site-packages/pyobs/events.py | landrs-toolkit/PySOSA | 1993668bd75bc882286da818955a40dd01d2f7c6 | [
"Apache-2.0"
] | 1 | 2019-12-21T06:08:35.000Z | 2019-12-21T06:08:35.000Z | venv/lib/python3.8/site-packages/pyobs/events.py | landrs-toolkit/PySOSA | 1993668bd75bc882286da818955a40dd01d2f7c6 | [
"Apache-2.0"
] | 7 | 2020-06-23T15:07:12.000Z | 2020-07-14T13:50:50.000Z | venv/lib/python3.8/site-packages/pyobs/events.py | landrs-toolkit/PySOSA | 1993668bd75bc882286da818955a40dd01d2f7c6 | [
"Apache-2.0"
] | 2 | 2019-11-25T06:39:46.000Z | 2020-03-27T13:20:25.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# THIS FILE WAS GENERATED BY generate_classes.py - DO NOT EDIT #
# (Generated on 2019-09-11 10:14:02.142913) #
from .base_classes import BaseEvent
class SwitchScenes(BaseEvent):
"""
Indicates a scene change.
:Returns:
*scene_name*
type: String
The new scene.
*sources*
type: Array<SceneItem>
List of scene items in the new scene. Same specification as [`GetCurrentScene`](#getcurrentscene).
"""
def __init__(self):
BaseEvent.__init__(self)
self._name = 'SwitchScenes'
self._returns['scene-name'] = None
self._returns['sources'] = None
@property
def scene_name(self):
return self._returns['scene-name']
@property
def sources(self):
return self._returns['sources']
class ScenesChanged(BaseEvent):
"""
The scene list has been modified.
Scenes have been added, removed, or renamed.
"""
def __init__(self):
BaseEvent.__init__(self)
self._name = 'ScenesChanged'
class SceneCollectionChanged(BaseEvent):
"""
Triggered when switching to another scene collection or when renaming the current scene collection.
"""
def __init__(self):
BaseEvent.__init__(self)
self._name = 'SceneCollectionChanged'
class SceneCollectionListChanged(BaseEvent):
"""
Triggered when a scene collection is created, added, renamed, or removed.
"""
def __init__(self):
BaseEvent.__init__(self)
self._name = 'SceneCollectionListChanged'
class SwitchTransition(BaseEvent):
"""
The active transition has been changed.
:Returns:
*transition_name*
type: String
The name of the new active transition.
"""
def __init__(self):
BaseEvent.__init__(self)
self._name = 'SwitchTransition'
self._returns['transition-name'] = None
@property
def transition_name(self):
return self._returns['transition-name']
class TransitionListChanged(BaseEvent):
"""
The list of available transitions has been modified.
Transitions have been added, removed, or renamed.
"""
def __init__(self):
BaseEvent.__init__(self)
self._name = 'TransitionListChanged'
class TransitionDurationChanged(BaseEvent):
"""
The active transition duration has been changed.
:Returns:
*new_duration*
type: int
New transition duration.
"""
def __init__(self):
BaseEvent.__init__(self)
self._name = 'TransitionDurationChanged'
self._returns['new-duration'] = None
@property
def new_duration(self):
return self._returns['new-duration']
class TransitionBegin(BaseEvent):
"""
A transition (other than "cut") has begun.
:Returns:
*name*
type: String
Transition name.
*duration*
type: int
Transition duration (in milliseconds).
*from_scene*
type: String
Source scene of the transition
*to_scene*
type: String
Destination scene of the transition
"""
def __init__(self):
BaseEvent.__init__(self)
self._name = 'TransitionBegin'
self._returns['name'] = None
self._returns['duration'] = None
self._returns['from-scene'] = None
self._returns['to-scene'] = None
@property
def name(self):
return self._returns['name']
@property
def duration(self):
return self._returns['duration']
@property
def from_scene(self):
return self._returns['from-scene']
@property
def to_scene(self):
return self._returns['to-scene']
class ProfileChanged(BaseEvent):
"""
Triggered when switching to another profile or when renaming the current profile.
"""
def __init__(self):
BaseEvent.__init__(self)
self._name = 'ProfileChanged'
class ProfileListChanged(BaseEvent):
"""
Triggered when a profile is created, added, renamed, or removed.
"""
def __init__(self):
BaseEvent.__init__(self)
self._name = 'ProfileListChanged'
class StreamStarting(BaseEvent):
"""
A request to start streaming has been issued.
:Returns:
*preview_only*
type: boolean
Always false (retrocompatibility).
"""
def __init__(self):
BaseEvent.__init__(self)
self._name = 'StreamStarting'
self._returns['preview-only'] = None
@property
def preview_only(self):
return self._returns['preview-only']
class StreamStarted(BaseEvent):
"""
Streaming started successfully.
"""
def __init__(self):
BaseEvent.__init__(self)
self._name = 'StreamStarted'
class StreamStopping(BaseEvent):
"""
A request to stop streaming has been issued.
:Returns:
*preview_only*
type: boolean
Always false (retrocompatibility).
"""
def __init__(self):
BaseEvent.__init__(self)
self._name = 'StreamStopping'
self._returns['preview-only'] = None
@property
def preview_only(self):
return self._returns['preview-only']
class StreamStopped(BaseEvent):
"""
Streaming stopped successfully.
"""
def __init__(self):
BaseEvent.__init__(self)
self._name = 'StreamStopped'
class StreamStatus(BaseEvent):
"""
Emit every 2 seconds.
:Returns:
*streaming*
type: boolean
Current streaming state.
*recording*
type: boolean
Current recording state.
*replay_buffer_active*
type: boolean
Replay Buffer status
*bytes_per_sec*
type: int
Amount of data per second (in bytes) transmitted by the stream encoder.
*kbits_per_sec*
type: int
Amount of data per second (in kilobits) transmitted by the stream encoder.
*strain*
type: double
Percentage of dropped frames.
*total_stream_time*
type: int
Total time (in seconds) since the stream started.
*num_total_frames*
type: int
Total number of frames transmitted since the stream started.
*num_dropped_frames*
type: int
Number of frames dropped by the encoder since the stream started.
*fps*
type: double
Current framerate.
*render_total_frames*
type: int
Number of frames rendered
*render_missed_frames*
type: int
Number of frames missed due to rendering lag
*output_total_frames*
type: int
Number of frames outputted
*output_skipped_frames*
type: int
Number of frames skipped due to encoding lag
*average_frame_time*
type: double
Average frame time (in milliseconds)
*cpu_usage*
type: double
Current CPU usage (percentage)
*memory_usage*
type: double
Current RAM usage (in megabytes)
*free_disk_space*
type: double
Free recording disk space (in megabytes)
*preview_only*
type: boolean
Always false (retrocompatibility).
"""
def __init__(self):
BaseEvent.__init__(self)
self._name = 'StreamStatus'
self._returns['streaming'] = None
self._returns['recording'] = None
self._returns['replay-buffer-active'] = None
self._returns['bytes-per-sec'] = None
self._returns['kbits-per-sec'] = None
self._returns['strain'] = None
self._returns['total-stream-time'] = None
self._returns['num-total-frames'] = None
self._returns['num-dropped-frames'] = None
self._returns['fps'] = None
self._returns['render-total-frames'] = None
self._returns['render-missed-frames'] = None
self._returns['output-total-frames'] = None
self._returns['output-skipped-frames'] = None
self._returns['average-frame-time'] = None
self._returns['cpu-usage'] = None
self._returns['memory-usage'] = None
self._returns['free-disk-space'] = None
self._returns['preview-only'] = None
@property
def streaming(self):
return self._returns['streaming']
@property
def recording(self):
return self._returns['recording']
@property
def replay_buffer_active(self):
return self._returns['replay-buffer-active']
@property
def bytes_per_sec(self):
return self._returns['bytes-per-sec']
@property
def kbits_per_sec(self):
return self._returns['kbits-per-sec']
@property
def strain(self):
return self._returns['strain']
@property
def total_stream_time(self):
return self._returns['total-stream-time']
@property
def num_total_frames(self):
return self._returns['num-total-frames']
@property
def num_dropped_frames(self):
return self._returns['num-dropped-frames']
@property
def fps(self):
return self._returns['fps']
@property
def render_total_frames(self):
return self._returns['render-total-frames']
@property
def render_missed_frames(self):
return self._returns['render-missed-frames']
@property
def output_total_frames(self):
return self._returns['output-total-frames']
@property
def output_skipped_frames(self):
return self._returns['output-skipped-frames']
@property
def average_frame_time(self):
return self._returns['average-frame-time']
@property
def cpu_usage(self):
return self._returns['cpu-usage']
@property
def memory_usage(self):
return self._returns['memory-usage']
@property
def free_disk_space(self):
return self._returns['free-disk-space']
@property
def preview_only(self):
return self._returns['preview-only']
class RecordingStarting(BaseEvent):
"""
A request to start recording has been issued.
"""
def __init__(self):
BaseEvent.__init__(self)
self._name = 'RecordingStarting'
class RecordingStarted(BaseEvent):
"""
Recording started successfully.
"""
def __init__(self):
BaseEvent.__init__(self)
self._name = 'RecordingStarted'
class RecordingStopping(BaseEvent):
"""
A request to stop recording has been issued.
"""
def __init__(self):
BaseEvent.__init__(self)
self._name = 'RecordingStopping'
class RecordingStopped(BaseEvent):
"""
Recording stopped successfully.
"""
def __init__(self):
BaseEvent.__init__(self)
self._name = 'RecordingStopped'
class RecordingPaused(BaseEvent):
"""
Current recording paused
"""
def __init__(self):
BaseEvent.__init__(self)
self._name = 'RecordingPaused'
class RecordingResumed(BaseEvent):
"""
Current recording resumed
"""
def __init__(self):
BaseEvent.__init__(self)
self._name = 'RecordingResumed'
class ReplayStarting(BaseEvent):
"""
A request to start the replay buffer has been issued.
"""
def __init__(self):
BaseEvent.__init__(self)
self._name = 'ReplayStarting'
class ReplayStarted(BaseEvent):
"""
Replay Buffer started successfully
"""
def __init__(self):
BaseEvent.__init__(self)
self._name = 'ReplayStarted'
class ReplayStopping(BaseEvent):
"""
A request to stop the replay buffer has been issued.
"""
def __init__(self):
BaseEvent.__init__(self)
self._name = 'ReplayStopping'
class ReplayStopped(BaseEvent):
"""
Replay Buffer stopped successfully
"""
def __init__(self):
BaseEvent.__init__(self)
self._name = 'ReplayStopped'
class Exiting(BaseEvent):
"""
OBS is exiting.
"""
def __init__(self):
BaseEvent.__init__(self)
self._name = 'Exiting'
class Heartbeat(BaseEvent):
"""
Emitted every 2 seconds after enabling it by calling SetHeartbeat.
:Returns:
*pulse*
type: boolean
Toggles between every JSON message as an "I am alive" indicator.
*current_profile*
type: string (optional)
Current active profile.
*current_scene*
type: string (optional)
Current active scene.
*streaming*
type: boolean (optional)
Current streaming state.
*total_stream_time*
type: int (optional)
Total time (in seconds) since the stream started.
*total_stream_bytes*
type: int (optional)
Total bytes sent since the stream started.
*total_stream_frames*
type: int (optional)
Total frames streamed since the stream started.
*recording*
type: boolean (optional)
Current recording state.
*total_record_time*
type: int (optional)
Total time (in seconds) since recording started.
*total_record_bytes*
type: int (optional)
Total bytes recorded since the recording started.
*total_record_frames*
type: int (optional)
Total frames recorded since the recording started.
*stats*
type: OBSStats
OBS Stats
"""
def __init__(self):
BaseEvent.__init__(self)
self._name = 'Heartbeat'
self._returns['pulse'] = None
self._returns['current-profile'] = None
self._returns['current-scene'] = None
self._returns['streaming'] = None
self._returns['total-stream-time'] = None
self._returns['total-stream-bytes'] = None
self._returns['total-stream-frames'] = None
self._returns['recording'] = None
self._returns['total-record-time'] = None
self._returns['total-record-bytes'] = None
self._returns['total-record-frames'] = None
self._returns['stats'] = None
@property
def pulse(self):
return self._returns['pulse']
@property
def current_profile(self):
return self._returns['current-profile']
@property
def current_scene(self):
return self._returns['current-scene']
@property
def streaming(self):
return self._returns['streaming']
@property
def total_stream_time(self):
return self._returns['total-stream-time']
@property
def total_stream_bytes(self):
return self._returns['total-stream-bytes']
@property
def total_stream_frames(self):
return self._returns['total-stream-frames']
@property
def recording(self):
return self._returns['recording']
@property
def total_record_time(self):
return self._returns['total-record-time']
@property
def total_record_bytes(self):
return self._returns['total-record-bytes']
@property
def total_record_frames(self):
return self._returns['total-record-frames']
@property
def stats(self):
return self._returns['stats']
class BroadcastCustomMessage(BaseEvent):
"""
A custom broadcast message was received
:Returns:
*realm*
type: String
Identifier provided by the sender
*data*
type: Object
User-defined data
"""
def __init__(self):
BaseEvent.__init__(self)
self._name = 'BroadcastCustomMessage'
self._returns['realm'] = None
self._returns['data'] = None
@property
def realm(self):
return self._returns['realm']
@property
def data(self):
return self._returns['data']
class SourceCreated(BaseEvent):
"""
A source has been created. A source can be an input, a scene or a transition.
:Returns:
*source_name*
type: String
Source name
*source_type*
type: String
Source type. Can be "input", "scene", "transition" or "filter".
*source_kind*
type: String
Source kind.
*source_settings*
type: Object
Source settings
"""
def __init__(self):
BaseEvent.__init__(self)
self._name = 'SourceCreated'
self._returns['sourceName'] = None
self._returns['sourceType'] = None
self._returns['sourceKind'] = None
self._returns['sourceSettings'] = None
@property
def source_name(self):
return self._returns['sourceName']
@property
def source_type(self):
return self._returns['sourceType']
@property
def source_kind(self):
return self._returns['sourceKind']
@property
def source_settings(self):
return self._returns['sourceSettings']
class SourceDestroyed(BaseEvent):
"""
A source has been destroyed/removed. A source can be an input, a scene or a transition.
:Returns:
*source_name*
type: String
Source name
*source_type*
type: String
Source type. Can be "input", "scene", "transition" or "filter".
*source_kind*
type: String
Source kind.
"""
def __init__(self):
BaseEvent.__init__(self)
self._name = 'SourceDestroyed'
self._returns['sourceName'] = None
self._returns['sourceType'] = None
self._returns['sourceKind'] = None
@property
def source_name(self):
return self._returns['sourceName']
@property
def source_type(self):
return self._returns['sourceType']
@property
def source_kind(self):
return self._returns['sourceKind']
class SourceVolumeChanged(BaseEvent):
"""
The volume of a source has changed.
:Returns:
*source_name*
type: String
Source name
*volume*
type: float
Source volume
"""
def __init__(self):
BaseEvent.__init__(self)
self._name = 'SourceVolumeChanged'
self._returns['sourceName'] = None
self._returns['volume'] = None
@property
def source_name(self):
return self._returns['sourceName']
@property
def volume(self):
return self._returns['volume']
class SourceMuteStateChanged(BaseEvent):
"""
A source has been muted or unmuted.
:Returns:
*source_name*
type: String
Source name
*muted*
type: boolean
Mute status of the source
"""
def __init__(self):
BaseEvent.__init__(self)
self._name = 'SourceMuteStateChanged'
self._returns['sourceName'] = None
self._returns['muted'] = None
@property
def source_name(self):
return self._returns['sourceName']
@property
def muted(self):
return self._returns['muted']
class SourceAudioSyncOffsetChanged(BaseEvent):
"""
The audio sync offset of a source has changed.
:Returns:
*source_name*
type: String
Source name
*sync_offset*
type: int
Audio sync offset of the source (in nanoseconds)
"""
def __init__(self):
BaseEvent.__init__(self)
self._name = 'SourceAudioSyncOffsetChanged'
self._returns['sourceName'] = None
self._returns['syncOffset'] = None
@property
def source_name(self):
return self._returns['sourceName']
@property
def sync_offset(self):
return self._returns['syncOffset']
class SourceAudioMixersChanged(BaseEvent):
"""
Audio mixer routing changed on a source.
:Returns:
*source_name*
type: String
Source name
*mixers*
type: Array<Object>
Routing status of the source for each audio mixer (array of 6 values)
*mixers.*.id*
type: int
Mixer number
*mixers.*.enabled*
type: boolean
Routing status
*hex_mixers_value*
type: String
Raw mixer flags (little-endian, one bit per mixer) as an hexadecimal value
"""
def __init__(self):
BaseEvent.__init__(self)
self._name = 'SourceAudioMixersChanged'
self._returns['sourceName'] = None
self._returns['mixers'] = None
self._returns['hexMixersValue'] = None
@property
def source_name(self):
return self._returns['sourceName']
@property
def mixers(self):
return self._returns['mixers']
@property
def hex_mixers_value(self):
return self._returns['hexMixersValue']
class SourceRenamed(BaseEvent):
"""
A source has been renamed.
:Returns:
*previous_name*
type: String
Previous source name
*new_name*
type: String
New source name
"""
def __init__(self):
BaseEvent.__init__(self)
self._name = 'SourceRenamed'
self._returns['previousName'] = None
self._returns['newName'] = None
@property
def previous_name(self):
return self._returns['previousName']
@property
def new_name(self):
return self._returns['newName']
class SourceFilterAdded(BaseEvent):
"""
A filter was added to a source.
:Returns:
*source_name*
type: String
Source name
*filter_name*
type: String
Filter name
*filter_type*
type: String
Filter type
*filter_settings*
type: Object
Filter settings
"""
def __init__(self):
BaseEvent.__init__(self)
self._name = 'SourceFilterAdded'
self._returns['sourceName'] = None
self._returns['filterName'] = None
self._returns['filterType'] = None
self._returns['filterSettings'] = None
@property
def source_name(self):
return self._returns['sourceName']
@property
def filter_name(self):
return self._returns['filterName']
@property
def filter_type(self):
return self._returns['filterType']
@property
def filter_settings(self):
return self._returns['filterSettings']
class SourceFilterRemoved(BaseEvent):
"""
A filter was removed from a source.
:Returns:
*source_name*
type: String
Source name
*filter_name*
type: String
Filter name
*filter_type*
type: String
Filter type
"""
def __init__(self):
BaseEvent.__init__(self)
self._name = 'SourceFilterRemoved'
self._returns['sourceName'] = None
self._returns['filterName'] = None
self._returns['filterType'] = None
@property
def source_name(self):
return self._returns['sourceName']
@property
def filter_name(self):
return self._returns['filterName']
@property
def filter_type(self):
return self._returns['filterType']
class SourceFiltersReordered(BaseEvent):
"""
Filters in a source have been reordered.
:Returns:
*source_name*
type: String
Source name
*filters*
type: Array<Object>
Ordered Filters list
*filters.*.name*
type: String
Filter name
*filters.*.type*
type: String
Filter type
"""
def __init__(self):
BaseEvent.__init__(self)
self._name = 'SourceFiltersReordered'
self._returns['sourceName'] = None
self._returns['filters'] = None
@property
def source_name(self):
return self._returns['sourceName']
@property
def filters(self):
return self._returns['filters']
class SourceOrderChanged(BaseEvent):
"""
Scene items have been reordered.
:Returns:
*scene_name*
type: String
Name of the scene where items have been reordered.
*scene_items*
type: Array<Object>
Ordered list of scene items
*scene_items.*.source_name*
type: String
Item source name
*scene_items.*.item_id*
type: int
Scene item unique ID
"""
def __init__(self):
BaseEvent.__init__(self)
self._name = 'SourceOrderChanged'
self._returns['scene-name'] = None
self._returns['scene-items'] = None
@property
def scene_name(self):
return self._returns['scene-name']
@property
def scene_items(self):
return self._returns['scene-items']
class SceneItemAdded(BaseEvent):
"""
An item has been added to the current scene.
:Returns:
*scene_name*
type: String
Name of the scene.
*item_name*
type: String
Name of the item added to the scene.
*item_id*
type: int
Scene item ID
"""
def __init__(self):
BaseEvent.__init__(self)
self._name = 'SceneItemAdded'
self._returns['scene-name'] = None
self._returns['item-name'] = None
self._returns['item-id'] = None
@property
def scene_name(self):
return self._returns['scene-name']
@property
def item_name(self):
return self._returns['item-name']
@property
def item_id(self):
return self._returns['item-id']
class SceneItemRemoved(BaseEvent):
"""
An item has been removed from the current scene.
:Returns:
*scene_name*
type: String
Name of the scene.
*item_name*
type: String
Name of the item removed from the scene.
*item_id*
type: int
Scene item ID
"""
def __init__(self):
BaseEvent.__init__(self)
self._name = 'SceneItemRemoved'
self._returns['scene-name'] = None
self._returns['item-name'] = None
self._returns['item-id'] = None
@property
def scene_name(self):
return self._returns['scene-name']
@property
def item_name(self):
return self._returns['item-name']
@property
def item_id(self):
return self._returns['item-id']
class SceneItemVisibilityChanged(BaseEvent):
"""
An item's visibility has been toggled.
:Returns:
*scene_name*
type: String
Name of the scene.
*item_name*
type: String
Name of the item in the scene.
*item_id*
type: int
Scene item ID
*item_visible*
type: boolean
New visibility state of the item.
"""
def __init__(self):
BaseEvent.__init__(self)
self._name = 'SceneItemVisibilityChanged'
self._returns['scene-name'] = None
self._returns['item-name'] = None
self._returns['item-id'] = None
self._returns['item-visible'] = None
@property
def scene_name(self):
return self._returns['scene-name']
@property
def item_name(self):
return self._returns['item-name']
@property
def item_id(self):
return self._returns['item-id']
@property
def item_visible(self):
return self._returns['item-visible']
class SceneItemTransformChanged(BaseEvent):
"""
An item's transform has been changed.
:Returns:
*scene_name*
type: String
Name of the scene.
*item_name*
type: String
Name of the item in the scene.
*item_id*
type: int
Scene item ID
*transform*
type: SceneItemTransform
Scene item transform properties
"""
def __init__(self):
BaseEvent.__init__(self)
self._name = 'SceneItemTransformChanged'
self._returns['scene-name'] = None
self._returns['item-name'] = None
self._returns['item-id'] = None
self._returns['transform'] = None
@property
def scene_name(self):
return self._returns['scene-name']
@property
def item_name(self):
return self._returns['item-name']
@property
def item_id(self):
return self._returns['item-id']
@property
def transform(self):
return self._returns['transform']
class SceneItemSelected(BaseEvent):
"""
A scene item is selected.
:Returns:
*scene_name*
type: String
Name of the scene.
*item_name*
type: String
Name of the item in the scene.
*item_id*
type: int
Name of the item in the scene.
"""
def __init__(self):
BaseEvent.__init__(self)
self._name = 'SceneItemSelected'
self._returns['scene-name'] = None
self._returns['item-name'] = None
self._returns['item-id'] = None
@property
def scene_name(self):
return self._returns['scene-name']
@property
def item_name(self):
return self._returns['item-name']
@property
def item_id(self):
return self._returns['item-id']
class SceneItemDeselected(BaseEvent):
"""
A scene item is deselected.
:Returns:
*scene_name*
type: String
Name of the scene.
*item_name*
type: String
Name of the item in the scene.
*item_id*
type: int
Name of the item in the scene.
"""
def __init__(self):
BaseEvent.__init__(self)
self._name = 'SceneItemDeselected'
self._returns['scene-name'] = None
self._returns['item-name'] = None
self._returns['item-id'] = None
@property
def scene_name(self):
return self._returns['scene-name']
@property
def item_name(self):
return self._returns['item-name']
@property
def item_id(self):
return self._returns['item-id']
class PreviewSceneChanged(BaseEvent):
"""
The selected preview scene has changed (only available in Studio Mode).
:Returns:
*scene_name*
type: String
Name of the scene being previewed.
*sources*
type: Array<SceneItem>
List of sources composing the scene. Same specification as [`GetCurrentScene`](#getcurrentscene).
"""
def __init__(self):
BaseEvent.__init__(self)
self._name = 'PreviewSceneChanged'
self._returns['scene-name'] = None
self._returns['sources'] = None
@property
def scene_name(self):
return self._returns['scene-name']
@property
def sources(self):
return self._returns['sources']
class StudioModeSwitched(BaseEvent):
"""
Studio Mode has been enabled or disabled.
:Returns:
*new_state*
type: boolean
The new enabled state of Studio Mode.
"""
def __init__(self):
BaseEvent.__init__(self)
self._name = 'StudioModeSwitched'
self._returns['new-state'] = None
@property
def new_state(self):
return self._returns['new-state']
| 25.891253 | 111 | 0.572163 |
from .base_classes import BaseEvent
class SwitchScenes(BaseEvent):
def __init__(self):
BaseEvent.__init__(self)
self._name = 'SwitchScenes'
self._returns['scene-name'] = None
self._returns['sources'] = None
@property
def scene_name(self):
return self._returns['scene-name']
@property
def sources(self):
return self._returns['sources']
class ScenesChanged(BaseEvent):
def __init__(self):
BaseEvent.__init__(self)
self._name = 'ScenesChanged'
class SceneCollectionChanged(BaseEvent):
def __init__(self):
BaseEvent.__init__(self)
self._name = 'SceneCollectionChanged'
class SceneCollectionListChanged(BaseEvent):
def __init__(self):
BaseEvent.__init__(self)
self._name = 'SceneCollectionListChanged'
class SwitchTransition(BaseEvent):
def __init__(self):
BaseEvent.__init__(self)
self._name = 'SwitchTransition'
self._returns['transition-name'] = None
@property
def transition_name(self):
return self._returns['transition-name']
class TransitionListChanged(BaseEvent):
def __init__(self):
BaseEvent.__init__(self)
self._name = 'TransitionListChanged'
class TransitionDurationChanged(BaseEvent):
def __init__(self):
BaseEvent.__init__(self)
self._name = 'TransitionDurationChanged'
self._returns['new-duration'] = None
@property
def new_duration(self):
return self._returns['new-duration']
class TransitionBegin(BaseEvent):
def __init__(self):
BaseEvent.__init__(self)
self._name = 'TransitionBegin'
self._returns['name'] = None
self._returns['duration'] = None
self._returns['from-scene'] = None
self._returns['to-scene'] = None
@property
def name(self):
return self._returns['name']
@property
def duration(self):
return self._returns['duration']
@property
def from_scene(self):
return self._returns['from-scene']
@property
def to_scene(self):
return self._returns['to-scene']
class ProfileChanged(BaseEvent):
def __init__(self):
BaseEvent.__init__(self)
self._name = 'ProfileChanged'
class ProfileListChanged(BaseEvent):
def __init__(self):
BaseEvent.__init__(self)
self._name = 'ProfileListChanged'
class StreamStarting(BaseEvent):
def __init__(self):
BaseEvent.__init__(self)
self._name = 'StreamStarting'
self._returns['preview-only'] = None
@property
def preview_only(self):
return self._returns['preview-only']
class StreamStarted(BaseEvent):
def __init__(self):
BaseEvent.__init__(self)
self._name = 'StreamStarted'
class StreamStopping(BaseEvent):
def __init__(self):
BaseEvent.__init__(self)
self._name = 'StreamStopping'
self._returns['preview-only'] = None
@property
def preview_only(self):
return self._returns['preview-only']
class StreamStopped(BaseEvent):
def __init__(self):
BaseEvent.__init__(self)
self._name = 'StreamStopped'
class StreamStatus(BaseEvent):
def __init__(self):
BaseEvent.__init__(self)
self._name = 'StreamStatus'
self._returns['streaming'] = None
self._returns['recording'] = None
self._returns['replay-buffer-active'] = None
self._returns['bytes-per-sec'] = None
self._returns['kbits-per-sec'] = None
self._returns['strain'] = None
self._returns['total-stream-time'] = None
self._returns['num-total-frames'] = None
self._returns['num-dropped-frames'] = None
self._returns['fps'] = None
self._returns['render-total-frames'] = None
self._returns['render-missed-frames'] = None
self._returns['output-total-frames'] = None
self._returns['output-skipped-frames'] = None
self._returns['average-frame-time'] = None
self._returns['cpu-usage'] = None
self._returns['memory-usage'] = None
self._returns['free-disk-space'] = None
self._returns['preview-only'] = None
@property
def streaming(self):
return self._returns['streaming']
@property
def recording(self):
return self._returns['recording']
@property
def replay_buffer_active(self):
return self._returns['replay-buffer-active']
@property
def bytes_per_sec(self):
return self._returns['bytes-per-sec']
@property
def kbits_per_sec(self):
return self._returns['kbits-per-sec']
@property
def strain(self):
return self._returns['strain']
@property
def total_stream_time(self):
return self._returns['total-stream-time']
@property
def num_total_frames(self):
return self._returns['num-total-frames']
@property
def num_dropped_frames(self):
return self._returns['num-dropped-frames']
@property
def fps(self):
return self._returns['fps']
@property
def render_total_frames(self):
return self._returns['render-total-frames']
@property
def render_missed_frames(self):
return self._returns['render-missed-frames']
@property
def output_total_frames(self):
return self._returns['output-total-frames']
@property
def output_skipped_frames(self):
return self._returns['output-skipped-frames']
@property
def average_frame_time(self):
return self._returns['average-frame-time']
@property
def cpu_usage(self):
return self._returns['cpu-usage']
@property
def memory_usage(self):
return self._returns['memory-usage']
@property
def free_disk_space(self):
return self._returns['free-disk-space']
@property
def preview_only(self):
return self._returns['preview-only']
class RecordingStarting(BaseEvent):
def __init__(self):
BaseEvent.__init__(self)
self._name = 'RecordingStarting'
class RecordingStarted(BaseEvent):
def __init__(self):
BaseEvent.__init__(self)
self._name = 'RecordingStarted'
class RecordingStopping(BaseEvent):
def __init__(self):
BaseEvent.__init__(self)
self._name = 'RecordingStopping'
class RecordingStopped(BaseEvent):
def __init__(self):
BaseEvent.__init__(self)
self._name = 'RecordingStopped'
class RecordingPaused(BaseEvent):
def __init__(self):
BaseEvent.__init__(self)
self._name = 'RecordingPaused'
class RecordingResumed(BaseEvent):
def __init__(self):
BaseEvent.__init__(self)
self._name = 'RecordingResumed'
class ReplayStarting(BaseEvent):
def __init__(self):
BaseEvent.__init__(self)
self._name = 'ReplayStarting'
class ReplayStarted(BaseEvent):
def __init__(self):
BaseEvent.__init__(self)
self._name = 'ReplayStarted'
class ReplayStopping(BaseEvent):
def __init__(self):
BaseEvent.__init__(self)
self._name = 'ReplayStopping'
class ReplayStopped(BaseEvent):
def __init__(self):
BaseEvent.__init__(self)
self._name = 'ReplayStopped'
class Exiting(BaseEvent):
def __init__(self):
BaseEvent.__init__(self)
self._name = 'Exiting'
class Heartbeat(BaseEvent):
def __init__(self):
BaseEvent.__init__(self)
self._name = 'Heartbeat'
self._returns['pulse'] = None
self._returns['current-profile'] = None
self._returns['current-scene'] = None
self._returns['streaming'] = None
self._returns['total-stream-time'] = None
self._returns['total-stream-bytes'] = None
self._returns['total-stream-frames'] = None
self._returns['recording'] = None
self._returns['total-record-time'] = None
self._returns['total-record-bytes'] = None
self._returns['total-record-frames'] = None
self._returns['stats'] = None
@property
def pulse(self):
return self._returns['pulse']
@property
def current_profile(self):
return self._returns['current-profile']
@property
def current_scene(self):
return self._returns['current-scene']
@property
def streaming(self):
return self._returns['streaming']
@property
def total_stream_time(self):
return self._returns['total-stream-time']
@property
def total_stream_bytes(self):
return self._returns['total-stream-bytes']
@property
def total_stream_frames(self):
return self._returns['total-stream-frames']
@property
def recording(self):
return self._returns['recording']
@property
def total_record_time(self):
return self._returns['total-record-time']
@property
def total_record_bytes(self):
return self._returns['total-record-bytes']
@property
def total_record_frames(self):
return self._returns['total-record-frames']
@property
def stats(self):
return self._returns['stats']
class BroadcastCustomMessage(BaseEvent):
def __init__(self):
BaseEvent.__init__(self)
self._name = 'BroadcastCustomMessage'
self._returns['realm'] = None
self._returns['data'] = None
@property
def realm(self):
return self._returns['realm']
@property
def data(self):
return self._returns['data']
class SourceCreated(BaseEvent):
def __init__(self):
BaseEvent.__init__(self)
self._name = 'SourceCreated'
self._returns['sourceName'] = None
self._returns['sourceType'] = None
self._returns['sourceKind'] = None
self._returns['sourceSettings'] = None
@property
def source_name(self):
return self._returns['sourceName']
@property
def source_type(self):
return self._returns['sourceType']
@property
def source_kind(self):
return self._returns['sourceKind']
@property
def source_settings(self):
return self._returns['sourceSettings']
class SourceDestroyed(BaseEvent):
def __init__(self):
BaseEvent.__init__(self)
self._name = 'SourceDestroyed'
self._returns['sourceName'] = None
self._returns['sourceType'] = None
self._returns['sourceKind'] = None
@property
def source_name(self):
return self._returns['sourceName']
@property
def source_type(self):
return self._returns['sourceType']
@property
def source_kind(self):
return self._returns['sourceKind']
class SourceVolumeChanged(BaseEvent):
def __init__(self):
BaseEvent.__init__(self)
self._name = 'SourceVolumeChanged'
self._returns['sourceName'] = None
self._returns['volume'] = None
@property
def source_name(self):
return self._returns['sourceName']
@property
def volume(self):
return self._returns['volume']
class SourceMuteStateChanged(BaseEvent):
def __init__(self):
BaseEvent.__init__(self)
self._name = 'SourceMuteStateChanged'
self._returns['sourceName'] = None
self._returns['muted'] = None
@property
def source_name(self):
return self._returns['sourceName']
@property
def muted(self):
return self._returns['muted']
class SourceAudioSyncOffsetChanged(BaseEvent):
def __init__(self):
BaseEvent.__init__(self)
self._name = 'SourceAudioSyncOffsetChanged'
self._returns['sourceName'] = None
self._returns['syncOffset'] = None
@property
def source_name(self):
return self._returns['sourceName']
@property
def sync_offset(self):
return self._returns['syncOffset']
class SourceAudioMixersChanged(BaseEvent):
def __init__(self):
BaseEvent.__init__(self)
self._name = 'SourceAudioMixersChanged'
self._returns['sourceName'] = None
self._returns['mixers'] = None
self._returns['hexMixersValue'] = None
@property
def source_name(self):
return self._returns['sourceName']
@property
def mixers(self):
return self._returns['mixers']
@property
def hex_mixers_value(self):
return self._returns['hexMixersValue']
class SourceRenamed(BaseEvent):
def __init__(self):
BaseEvent.__init__(self)
self._name = 'SourceRenamed'
self._returns['previousName'] = None
self._returns['newName'] = None
@property
def previous_name(self):
return self._returns['previousName']
@property
def new_name(self):
return self._returns['newName']
class SourceFilterAdded(BaseEvent):
def __init__(self):
BaseEvent.__init__(self)
self._name = 'SourceFilterAdded'
self._returns['sourceName'] = None
self._returns['filterName'] = None
self._returns['filterType'] = None
self._returns['filterSettings'] = None
@property
def source_name(self):
return self._returns['sourceName']
@property
def filter_name(self):
return self._returns['filterName']
@property
def filter_type(self):
return self._returns['filterType']
@property
def filter_settings(self):
return self._returns['filterSettings']
class SourceFilterRemoved(BaseEvent):
def __init__(self):
BaseEvent.__init__(self)
self._name = 'SourceFilterRemoved'
self._returns['sourceName'] = None
self._returns['filterName'] = None
self._returns['filterType'] = None
@property
def source_name(self):
return self._returns['sourceName']
@property
def filter_name(self):
return self._returns['filterName']
@property
def filter_type(self):
return self._returns['filterType']
class SourceFiltersReordered(BaseEvent):
def __init__(self):
BaseEvent.__init__(self)
self._name = 'SourceFiltersReordered'
self._returns['sourceName'] = None
self._returns['filters'] = None
@property
def source_name(self):
return self._returns['sourceName']
@property
def filters(self):
return self._returns['filters']
class SourceOrderChanged(BaseEvent):
def __init__(self):
BaseEvent.__init__(self)
self._name = 'SourceOrderChanged'
self._returns['scene-name'] = None
self._returns['scene-items'] = None
@property
def scene_name(self):
return self._returns['scene-name']
@property
def scene_items(self):
return self._returns['scene-items']
class SceneItemAdded(BaseEvent):
def __init__(self):
BaseEvent.__init__(self)
self._name = 'SceneItemAdded'
self._returns['scene-name'] = None
self._returns['item-name'] = None
self._returns['item-id'] = None
@property
def scene_name(self):
return self._returns['scene-name']
@property
def item_name(self):
return self._returns['item-name']
@property
def item_id(self):
return self._returns['item-id']
class SceneItemRemoved(BaseEvent):
def __init__(self):
BaseEvent.__init__(self)
self._name = 'SceneItemRemoved'
self._returns['scene-name'] = None
self._returns['item-name'] = None
self._returns['item-id'] = None
@property
def scene_name(self):
return self._returns['scene-name']
@property
def item_name(self):
return self._returns['item-name']
@property
def item_id(self):
return self._returns['item-id']
class SceneItemVisibilityChanged(BaseEvent):
def __init__(self):
BaseEvent.__init__(self)
self._name = 'SceneItemVisibilityChanged'
self._returns['scene-name'] = None
self._returns['item-name'] = None
self._returns['item-id'] = None
self._returns['item-visible'] = None
@property
def scene_name(self):
return self._returns['scene-name']
@property
def item_name(self):
return self._returns['item-name']
@property
def item_id(self):
return self._returns['item-id']
@property
def item_visible(self):
return self._returns['item-visible']
class SceneItemTransformChanged(BaseEvent):
def __init__(self):
BaseEvent.__init__(self)
self._name = 'SceneItemTransformChanged'
self._returns['scene-name'] = None
self._returns['item-name'] = None
self._returns['item-id'] = None
self._returns['transform'] = None
@property
def scene_name(self):
return self._returns['scene-name']
@property
def item_name(self):
return self._returns['item-name']
@property
def item_id(self):
return self._returns['item-id']
@property
def transform(self):
return self._returns['transform']
class SceneItemSelected(BaseEvent):
def __init__(self):
BaseEvent.__init__(self)
self._name = 'SceneItemSelected'
self._returns['scene-name'] = None
self._returns['item-name'] = None
self._returns['item-id'] = None
@property
def scene_name(self):
return self._returns['scene-name']
@property
def item_name(self):
return self._returns['item-name']
@property
def item_id(self):
return self._returns['item-id']
class SceneItemDeselected(BaseEvent):
def __init__(self):
BaseEvent.__init__(self)
self._name = 'SceneItemDeselected'
self._returns['scene-name'] = None
self._returns['item-name'] = None
self._returns['item-id'] = None
@property
def scene_name(self):
return self._returns['scene-name']
@property
def item_name(self):
return self._returns['item-name']
@property
def item_id(self):
return self._returns['item-id']
class PreviewSceneChanged(BaseEvent):
def __init__(self):
BaseEvent.__init__(self)
self._name = 'PreviewSceneChanged'
self._returns['scene-name'] = None
self._returns['sources'] = None
@property
def scene_name(self):
return self._returns['scene-name']
@property
def sources(self):
return self._returns['sources']
class StudioModeSwitched(BaseEvent):
def __init__(self):
BaseEvent.__init__(self)
self._name = 'StudioModeSwitched'
self._returns['new-state'] = None
@property
def new_state(self):
return self._returns['new-state']
| true | true |
f73da7bb4672fcd42fa69782e781e23c094a5b01 | 452 | py | Python | idealista-scraper/auto/call.py | Kwsswart/scraper | 1e04df1ef097d0ad2301ad3fa0ae299402ec1d2f | [
"MIT"
] | null | null | null | idealista-scraper/auto/call.py | Kwsswart/scraper | 1e04df1ef097d0ad2301ad3fa0ae299402ec1d2f | [
"MIT"
] | null | null | null | idealista-scraper/auto/call.py | Kwsswart/scraper | 1e04df1ef097d0ad2301ad3fa0ae299402ec1d2f | [
"MIT"
] | null | null | null | from random import randint
from scraper import Scraper
from time import sleep
def main():
url = "https://www.idealista.com/alquiler-viviendas/santa-cruz-de-tenerife/centro-ifara/centro/"
a = Scraper(url)
for i in range(len(a.links)):
a.get_page()
print(len(a.links))
print(len(a.data))
sleep((randint(1,10) + 10) * 60)
a.to_json()
a.to_csv()
a.to_text()
if __name__ == "__main__":
main() | 21.52381 | 100 | 0.619469 | from random import randint
from scraper import Scraper
from time import sleep
def main():
url = "https://www.idealista.com/alquiler-viviendas/santa-cruz-de-tenerife/centro-ifara/centro/"
a = Scraper(url)
for i in range(len(a.links)):
a.get_page()
print(len(a.links))
print(len(a.data))
sleep((randint(1,10) + 10) * 60)
a.to_json()
a.to_csv()
a.to_text()
if __name__ == "__main__":
main() | true | true |
f73da99dfaed82dd63b5f9de566a5f192e4ff623 | 6,561 | py | Python | applications/tensorflow/detection/yolov3/log.py | payoto/graphcore_examples | 46d2b7687b829778369fc6328170a7b14761e5c6 | [
"MIT"
] | 260 | 2019-11-18T01:50:00.000Z | 2022-03-28T23:08:53.000Z | applications/tensorflow/detection/yolov3/log.py | payoto/graphcore_examples | 46d2b7687b829778369fc6328170a7b14761e5c6 | [
"MIT"
] | 27 | 2020-01-28T23:07:50.000Z | 2022-02-14T15:37:06.000Z | applications/tensorflow/detection/yolov3/log.py | payoto/graphcore_examples | 46d2b7687b829778369fc6328170a7b14761e5c6 | [
"MIT"
] | 56 | 2019-11-18T02:13:12.000Z | 2022-02-28T14:36:09.000Z | # Copyright (c) 2021 Graphcore Ltd. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Logging utilities.
"""
import csv
import datetime
import json
import logging
import os
import random
import subprocess
import numpy as np
import tensorflow as tf
from tensorflow import pywrap_tensorflow
# Set Python logger
# Match TensorFlow's default logging format.
logFormatter = logging.Formatter(
'%(asctime)s.%(msecs)06d: %(levelname)-1.1s %(message)s', datefmt='%Y-%m-%d %H:%M:%S')
logger = logging.getLogger()
logger.setLevel(logging.INFO)
consoleHandler = logging.StreamHandler()
consoleHandler.setFormatter(logFormatter)
logger.addHandler(consoleHandler)
def get_logger():
return logger
def set_log_file_path(log_file_path):
global logger
fileHandler = logging.FileHandler(log_file_path)
fileHandler.setFormatter(logFormatter)
logger.addHandler(fileHandler)
def add_arguments(parser):
group = parser.add_argument_group('Logging')
group.add_argument('--log-dir', type=str, default="./logs/",
help="Log and weights save directory")
group.add_argument('--name-suffix', type=str,
help="Suffix added to name string")
group.add_argument('--steps-per-logs', type=int, default=1,
help="Logs per epoch (if number of epochs specified)")
group.add_argument('--steps-per-tensorboard', type=int, default=0,
help='Number of steps between saving statistics to TensorBoard. 0 to disable.')
return parser
def set_defaults(opts):
name = opts['name']
if opts["name_suffix"]:
name = name + "_" + opts["name_suffix"]
if opts.get("poplar_version"):
v = opts['poplar_version']
# name += "_v" + v[v.find("version ") + 8: v.rfind(' ')]
name += "_v" + v[v.find("version ") + 8: v.find(' (')]
# We want this to be random even if random seeds have been set so that we don't overwrite
# when re-running with the same seed
random_state = random.getstate()
random.seed()
random.setstate(random_state)
# System time with milliseconds
time = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f')[:-3]
name += "_{}".format(time)
if not os.path.isdir(opts["save_path"]):
os.makedirs(opts["save_path"], exist_ok=True)
opts["logs_path"] = os.path.join(opts["save_path"], name)
opts["checkpoint_path"] = os.path.join(opts["save_path"], name, 'ckpt')
if not os.path.isdir(opts["logs_path"]):
os.makedirs(opts["logs_path"], exist_ok=True)
set_log_file_path(os.path.join(opts['logs_path'], 'log.txt'))
with open(os.path.join(opts["logs_path"], 'arguments.json'), 'w') as fp:
json.dump(opts, fp, sort_keys=True, indent=4, separators=(',', ': '))
return opts
def write_to_csv(d, write_header, training, logs_path):
if logs_path:
filename = 'training.csv' if training else 'validation.csv'
with open(os.path.join(logs_path, filename), 'a+') as f:
w = csv.DictWriter(f, d.keys())
if write_header:
w.writeheader()
w.writerow(d)
def print_trainable_variables(logs_path):
logger.info('Trainable Variables:')
total_parameters = 0
for variable in tf.trainable_variables():
logger.info(variable)
variable_parameters = 1
for DIM in variable.get_shape():
variable_parameters *= DIM.value
total_parameters += variable_parameters
logger.info('Total Parameters:' + str(total_parameters) + '\n')
def make_histogram(values, bins=512):
# From https://gist.github.com/gyglim/1f8dfb1b5c82627ae3efcfbbadb9f514
# License: BSD License 2.0
# Author Michael Gygli
# Logs the histogram of a list/vector of values.
# Convert to a numpy array
values = np.array(values)
# Create histogram using numpy
counts, bin_edges = np.histogram(values, bins=bins)
# Fill fields of histogram proto
hist = tf.HistogramProto()
hist.min = float(np.min(values))
hist.max = float(np.max(values))
hist.num = int(np.prod(values.shape))
hist.sum = float(np.sum(values))
hist.sum_squares = float(np.sum(values**2))
# Requires equal number as bins, where the first goes from -DBL_MAX to bin_edges[1]
# See https://github.com/tensorflow/tensorflow/blob/r2.6/tensorflow/core/framework/summary.proto#L30
# Thus, we drop the start of the first bin
bin_edges = bin_edges[1:]
# Add bin edges and counts
for edge in bin_edges:
hist.bucket_limit.append(edge)
for c in counts:
hist.bucket.append(c)
# Create and write Summary
return hist
# return tf.Summary.Value(tag=tag, histo=hist)
def save_model_statistics(checkpoint_path, summary_writer, step=0):
initializers = load_initializers_from_checkpoint(checkpoint_path)
summary = tf.Summary()
for name, np_weight in initializers.items():
name = name.replace(":", "_")
tensor = np_weight.astype(np.float32)
if not np.any(np.isnan(tensor)):
summary.value.add(tag=name, histo=make_histogram(tensor))
summary.value.add(tag=f"L2/{name}", simple_value=np.linalg.norm(tensor))
summary_writer.add_summary(summary, step)
summary_writer.flush()
def load_initializers_from_checkpoint(checkpoint_path):
initializers = {}
reader = pywrap_tensorflow.NewCheckpointReader(checkpoint_path)
var_to_map = reader.get_variable_to_shape_map()
for key, dim in var_to_map.items():
if key == 'global_step':
continue
# if reader.get_tensor(key).dtype.name == 'float16':
# int_data = np.asarray(reader.get_tensor(key), np.int32)
# np_weight = int_data.view(dtype=np.float16).reshape(dim)
# else:
np_weight = reader.get_tensor(key)
initializers[key] = np_weight
return initializers
def get_git_revision():
return subprocess.check_output(["git", "describe", "--always", "--dirty"]).strip().decode()
| 34.171875 | 104 | 0.673373 |
import csv
import datetime
import json
import logging
import os
import random
import subprocess
import numpy as np
import tensorflow as tf
from tensorflow import pywrap_tensorflow
logFormatter = logging.Formatter(
'%(asctime)s.%(msecs)06d: %(levelname)-1.1s %(message)s', datefmt='%Y-%m-%d %H:%M:%S')
logger = logging.getLogger()
logger.setLevel(logging.INFO)
consoleHandler = logging.StreamHandler()
consoleHandler.setFormatter(logFormatter)
logger.addHandler(consoleHandler)
def get_logger():
return logger
def set_log_file_path(log_file_path):
global logger
fileHandler = logging.FileHandler(log_file_path)
fileHandler.setFormatter(logFormatter)
logger.addHandler(fileHandler)
def add_arguments(parser):
group = parser.add_argument_group('Logging')
group.add_argument('--log-dir', type=str, default="./logs/",
help="Log and weights save directory")
group.add_argument('--name-suffix', type=str,
help="Suffix added to name string")
group.add_argument('--steps-per-logs', type=int, default=1,
help="Logs per epoch (if number of epochs specified)")
group.add_argument('--steps-per-tensorboard', type=int, default=0,
help='Number of steps between saving statistics to TensorBoard. 0 to disable.')
return parser
def set_defaults(opts):
name = opts['name']
if opts["name_suffix"]:
name = name + "_" + opts["name_suffix"]
if opts.get("poplar_version"):
v = opts['poplar_version']
# name += "_v" + v[v.find("version ") + 8: v.rfind(' ')]
name += "_v" + v[v.find("version ") + 8: v.find(' (')]
# We want this to be random even if random seeds have been set so that we don't overwrite
random_state = random.getstate()
random.seed()
random.setstate(random_state)
time = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f')[:-3]
name += "_{}".format(time)
if not os.path.isdir(opts["save_path"]):
os.makedirs(opts["save_path"], exist_ok=True)
opts["logs_path"] = os.path.join(opts["save_path"], name)
opts["checkpoint_path"] = os.path.join(opts["save_path"], name, 'ckpt')
if not os.path.isdir(opts["logs_path"]):
os.makedirs(opts["logs_path"], exist_ok=True)
set_log_file_path(os.path.join(opts['logs_path'], 'log.txt'))
with open(os.path.join(opts["logs_path"], 'arguments.json'), 'w') as fp:
json.dump(opts, fp, sort_keys=True, indent=4, separators=(',', ': '))
return opts
def write_to_csv(d, write_header, training, logs_path):
if logs_path:
filename = 'training.csv' if training else 'validation.csv'
with open(os.path.join(logs_path, filename), 'a+') as f:
w = csv.DictWriter(f, d.keys())
if write_header:
w.writeheader()
w.writerow(d)
def print_trainable_variables(logs_path):
logger.info('Trainable Variables:')
total_parameters = 0
for variable in tf.trainable_variables():
logger.info(variable)
variable_parameters = 1
for DIM in variable.get_shape():
variable_parameters *= DIM.value
total_parameters += variable_parameters
logger.info('Total Parameters:' + str(total_parameters) + '\n')
def make_histogram(values, bins=512):
values = np.array(values)
counts, bin_edges = np.histogram(values, bins=bins)
hist = tf.HistogramProto()
hist.min = float(np.min(values))
hist.max = float(np.max(values))
hist.num = int(np.prod(values.shape))
hist.sum = float(np.sum(values))
hist.sum_squares = float(np.sum(values**2))
bin_edges = bin_edges[1:]
for edge in bin_edges:
hist.bucket_limit.append(edge)
for c in counts:
hist.bucket.append(c)
return hist
def save_model_statistics(checkpoint_path, summary_writer, step=0):
initializers = load_initializers_from_checkpoint(checkpoint_path)
summary = tf.Summary()
for name, np_weight in initializers.items():
name = name.replace(":", "_")
tensor = np_weight.astype(np.float32)
if not np.any(np.isnan(tensor)):
summary.value.add(tag=name, histo=make_histogram(tensor))
summary.value.add(tag=f"L2/{name}", simple_value=np.linalg.norm(tensor))
summary_writer.add_summary(summary, step)
summary_writer.flush()
def load_initializers_from_checkpoint(checkpoint_path):
initializers = {}
reader = pywrap_tensorflow.NewCheckpointReader(checkpoint_path)
var_to_map = reader.get_variable_to_shape_map()
for key, dim in var_to_map.items():
if key == 'global_step':
continue
np_weight = reader.get_tensor(key)
initializers[key] = np_weight
return initializers
def get_git_revision():
return subprocess.check_output(["git", "describe", "--always", "--dirty"]).strip().decode()
| true | true |
f73da9bd3c45d828f774d8b42d5bd71f5c855faa | 8,575 | py | Python | docs/tools/website.py | KinderRiven/ClickHouse | 2edc03b3d4a950848720064db82f11581b2e8d8a | [
"Apache-2.0"
] | 8,629 | 2016-06-14T21:03:01.000Z | 2019-09-23T07:46:38.000Z | docs/tools/website.py | KinderRiven/ClickHouse | 2edc03b3d4a950848720064db82f11581b2e8d8a | [
"Apache-2.0"
] | 4,335 | 2016-06-15T12:58:31.000Z | 2019-09-23T11:18:43.000Z | docs/tools/website.py | KinderRiven/ClickHouse | 2edc03b3d4a950848720064db82f11581b2e8d8a | [
"Apache-2.0"
] | 1,700 | 2016-06-15T09:25:11.000Z | 2019-09-23T11:16:38.000Z | import hashlib
import json
import logging
import os
import shutil
import subprocess
import bs4
import util
def handle_iframe(iframe, soup):
allowed_domains = ["https://www.youtube.com/"]
illegal_domain = True
iframe_src = iframe.attrs["src"]
for domain in allowed_domains:
if iframe_src.startswith(domain):
illegal_domain = False
break
if illegal_domain:
raise RuntimeError(f"iframe from illegal domain: {iframe_src}")
wrapper = soup.new_tag("div")
wrapper.attrs["class"] = ["embed-responsive", "embed-responsive-16by9"]
iframe.insert_before(wrapper)
iframe.extract()
wrapper.insert(0, iframe)
if "width" in iframe.attrs:
del iframe.attrs["width"]
if "height" in iframe.attrs:
del iframe.attrs["height"]
iframe.attrs[
"allow"
] = "accelerometer; autoplay; encrypted-media; gyroscope; picture-in-picture"
iframe.attrs["class"] = "embed-responsive-item"
iframe.attrs["frameborder"] = "0"
iframe.attrs["allowfullscreen"] = "1"
def adjust_markdown_html(content):
soup = bs4.BeautifulSoup(content, features="html.parser")
for a in soup.find_all("a"):
a_class = a.attrs.get("class")
a_href = a.attrs.get("href")
if a_class and "headerlink" in a_class:
a.string = "\xa0"
if a_href and a_href.startswith("http"):
a.attrs["target"] = "_blank"
for code in soup.find_all("code"):
code_class = code.attrs.get("class")
if code_class:
code.attrs["class"] = code_class + ["syntax"]
else:
code.attrs["class"] = "syntax"
for iframe in soup.find_all("iframe"):
handle_iframe(iframe, soup)
for img in soup.find_all("img"):
if img.attrs.get("alt") == "iframe":
img.name = "iframe"
img.string = ""
handle_iframe(img, soup)
continue
img_class = img.attrs.get("class")
if img_class:
img.attrs["class"] = img_class + ["img-fluid"]
else:
img.attrs["class"] = "img-fluid"
for details in soup.find_all("details"):
for summary in details.find_all("summary"):
if summary.parent != details:
summary.extract()
details.insert(0, summary)
for dd in soup.find_all("dd"):
dd_class = dd.attrs.get("class")
if dd_class:
dd.attrs["class"] = dd_class + ["pl-3"]
else:
dd.attrs["class"] = "pl-3"
for div in soup.find_all("div"):
div_class = div.attrs.get("class")
is_admonition = div_class and "admonition" in div.attrs.get("class")
if is_admonition:
for a in div.find_all("a"):
a_class = a.attrs.get("class")
if a_class:
a.attrs["class"] = a_class + ["alert-link"]
else:
a.attrs["class"] = "alert-link"
for p in div.find_all("p"):
p_class = p.attrs.get("class")
if is_admonition and p_class and ("admonition-title" in p_class):
p.attrs["class"] = p_class + [
"alert-heading",
"display-4",
"text-reset",
"mb-2",
]
if is_admonition:
div.attrs["role"] = "alert"
if ("info" in div_class) or ("note" in div_class):
mode = "alert-primary"
elif ("attention" in div_class) or ("warning" in div_class):
mode = "alert-warning"
elif "important" in div_class:
mode = "alert-danger"
elif "tip" in div_class:
mode = "alert-info"
else:
mode = "alert-secondary"
div.attrs["class"] = div_class + ["alert", "pb-0", "mb-4", mode]
return str(soup)
def build_website(args):
logging.info("Building website")
env = util.init_jinja2_env(args)
shutil.copytree(
args.website_dir,
args.output_dir,
ignore=shutil.ignore_patterns(
"*.md",
"*.sh",
"*.css",
"*.json",
"js/*.js",
"build",
"docs",
"public",
"node_modules",
"src",
"templates",
"locale",
".gitkeep",
),
)
shutil.copytree(
os.path.join(args.website_dir, "images"),
os.path.join(args.output_dir, "docs", "images"),
)
# This file can be requested to check for available ClickHouse releases.
shutil.copy2(
os.path.join(args.src_dir, "utils", "list-versions", "version_date.tsv"),
os.path.join(args.output_dir, "data", "version_date.tsv"),
)
# This file can be requested to install ClickHouse.
shutil.copy2(
os.path.join(args.src_dir, "docs", "_includes", "install", "universal.sh"),
os.path.join(args.output_dir, "data", "install.sh"),
)
for root, _, filenames in os.walk(args.output_dir):
for filename in filenames:
if filename == "main.html":
continue
path = os.path.join(root, filename)
if not filename.endswith(".html"):
continue
logging.info("Processing %s", path)
with open(path, "rb") as f:
content = f.read().decode("utf-8")
template = env.from_string(content)
content = template.render(args.__dict__)
with open(path, "wb") as f:
f.write(content.encode("utf-8"))
def get_css_in(args):
return [
f"'{args.website_dir}/css/bootstrap.css'",
f"'{args.website_dir}/css/docsearch.css'",
f"'{args.website_dir}/css/base.css'",
f"'{args.website_dir}/css/blog.css'",
f"'{args.website_dir}/css/docs.css'",
f"'{args.website_dir}/css/highlight.css'",
f"'{args.website_dir}/css/main.css'",
]
def get_js_in(args):
return [
f"'{args.website_dir}/js/jquery.js'",
f"'{args.website_dir}/js/popper.js'",
f"'{args.website_dir}/js/bootstrap.js'",
f"'{args.website_dir}/js/sentry.js'",
f"'{args.website_dir}/js/base.js'",
f"'{args.website_dir}/js/index.js'",
f"'{args.website_dir}/js/docsearch.js'",
f"'{args.website_dir}/js/docs.js'",
f"'{args.website_dir}/js/main.js'",
]
def minify_website(args):
css_in = " ".join(get_css_in(args))
css_out = f"{args.output_dir}/docs/css/base.css"
os.makedirs(f"{args.output_dir}/docs/css")
command = f"cat {css_in}"
output = subprocess.check_output(command, shell=True)
with open(css_out, "wb+") as f:
f.write(output)
with open(css_out, "rb") as f:
css_digest = hashlib.sha3_224(f.read()).hexdigest()[0:8]
js_in = " ".join(get_js_in(args))
js_out = f"{args.output_dir}/docs/js/base.js"
os.makedirs(f"{args.output_dir}/docs/js")
command = f"cat {js_in}"
output = subprocess.check_output(command, shell=True)
with open(js_out, "wb+") as f:
f.write(output)
with open(js_out, "rb") as f:
js_digest = hashlib.sha3_224(f.read()).hexdigest()[0:8]
logging.info(js_digest)
def process_benchmark_results(args):
benchmark_root = os.path.join(args.website_dir, "benchmark")
required_keys = {
"dbms": ["result"],
"hardware": ["result", "system", "system_full", "kind"],
"versions": ["version", "system"],
}
for benchmark_kind in ["dbms", "hardware", "versions"]:
results = []
results_root = os.path.join(benchmark_root, benchmark_kind, "results")
for result in sorted(os.listdir(results_root)):
result_file = os.path.join(results_root, result)
logging.info(f"Reading benchmark result from {result_file}")
with open(result_file, "r") as f:
result = json.loads(f.read())
for item in result:
for required_key in required_keys[benchmark_kind]:
assert (
required_key in item
), f'No "{required_key}" in {result_file}'
results += result
results_js = os.path.join(
args.output_dir, "benchmark", benchmark_kind, "results.js"
)
with open(results_js, "w") as f:
data = json.dumps(results)
f.write(f"var results = {data};")
| 32.481061 | 83 | 0.552536 | import hashlib
import json
import logging
import os
import shutil
import subprocess
import bs4
import util
def handle_iframe(iframe, soup):
allowed_domains = ["https://www.youtube.com/"]
illegal_domain = True
iframe_src = iframe.attrs["src"]
for domain in allowed_domains:
if iframe_src.startswith(domain):
illegal_domain = False
break
if illegal_domain:
raise RuntimeError(f"iframe from illegal domain: {iframe_src}")
wrapper = soup.new_tag("div")
wrapper.attrs["class"] = ["embed-responsive", "embed-responsive-16by9"]
iframe.insert_before(wrapper)
iframe.extract()
wrapper.insert(0, iframe)
if "width" in iframe.attrs:
del iframe.attrs["width"]
if "height" in iframe.attrs:
del iframe.attrs["height"]
iframe.attrs[
"allow"
] = "accelerometer; autoplay; encrypted-media; gyroscope; picture-in-picture"
iframe.attrs["class"] = "embed-responsive-item"
iframe.attrs["frameborder"] = "0"
iframe.attrs["allowfullscreen"] = "1"
def adjust_markdown_html(content):
soup = bs4.BeautifulSoup(content, features="html.parser")
for a in soup.find_all("a"):
a_class = a.attrs.get("class")
a_href = a.attrs.get("href")
if a_class and "headerlink" in a_class:
a.string = "\xa0"
if a_href and a_href.startswith("http"):
a.attrs["target"] = "_blank"
for code in soup.find_all("code"):
code_class = code.attrs.get("class")
if code_class:
code.attrs["class"] = code_class + ["syntax"]
else:
code.attrs["class"] = "syntax"
for iframe in soup.find_all("iframe"):
handle_iframe(iframe, soup)
for img in soup.find_all("img"):
if img.attrs.get("alt") == "iframe":
img.name = "iframe"
img.string = ""
handle_iframe(img, soup)
continue
img_class = img.attrs.get("class")
if img_class:
img.attrs["class"] = img_class + ["img-fluid"]
else:
img.attrs["class"] = "img-fluid"
for details in soup.find_all("details"):
for summary in details.find_all("summary"):
if summary.parent != details:
summary.extract()
details.insert(0, summary)
for dd in soup.find_all("dd"):
dd_class = dd.attrs.get("class")
if dd_class:
dd.attrs["class"] = dd_class + ["pl-3"]
else:
dd.attrs["class"] = "pl-3"
for div in soup.find_all("div"):
div_class = div.attrs.get("class")
is_admonition = div_class and "admonition" in div.attrs.get("class")
if is_admonition:
for a in div.find_all("a"):
a_class = a.attrs.get("class")
if a_class:
a.attrs["class"] = a_class + ["alert-link"]
else:
a.attrs["class"] = "alert-link"
for p in div.find_all("p"):
p_class = p.attrs.get("class")
if is_admonition and p_class and ("admonition-title" in p_class):
p.attrs["class"] = p_class + [
"alert-heading",
"display-4",
"text-reset",
"mb-2",
]
if is_admonition:
div.attrs["role"] = "alert"
if ("info" in div_class) or ("note" in div_class):
mode = "alert-primary"
elif ("attention" in div_class) or ("warning" in div_class):
mode = "alert-warning"
elif "important" in div_class:
mode = "alert-danger"
elif "tip" in div_class:
mode = "alert-info"
else:
mode = "alert-secondary"
div.attrs["class"] = div_class + ["alert", "pb-0", "mb-4", mode]
return str(soup)
def build_website(args):
logging.info("Building website")
env = util.init_jinja2_env(args)
shutil.copytree(
args.website_dir,
args.output_dir,
ignore=shutil.ignore_patterns(
"*.md",
"*.sh",
"*.css",
"*.json",
"js/*.js",
"build",
"docs",
"public",
"node_modules",
"src",
"templates",
"locale",
".gitkeep",
),
)
shutil.copytree(
os.path.join(args.website_dir, "images"),
os.path.join(args.output_dir, "docs", "images"),
)
shutil.copy2(
os.path.join(args.src_dir, "utils", "list-versions", "version_date.tsv"),
os.path.join(args.output_dir, "data", "version_date.tsv"),
)
shutil.copy2(
os.path.join(args.src_dir, "docs", "_includes", "install", "universal.sh"),
os.path.join(args.output_dir, "data", "install.sh"),
)
for root, _, filenames in os.walk(args.output_dir):
for filename in filenames:
if filename == "main.html":
continue
path = os.path.join(root, filename)
if not filename.endswith(".html"):
continue
logging.info("Processing %s", path)
with open(path, "rb") as f:
content = f.read().decode("utf-8")
template = env.from_string(content)
content = template.render(args.__dict__)
with open(path, "wb") as f:
f.write(content.encode("utf-8"))
def get_css_in(args):
return [
f"'{args.website_dir}/css/bootstrap.css'",
f"'{args.website_dir}/css/docsearch.css'",
f"'{args.website_dir}/css/base.css'",
f"'{args.website_dir}/css/blog.css'",
f"'{args.website_dir}/css/docs.css'",
f"'{args.website_dir}/css/highlight.css'",
f"'{args.website_dir}/css/main.css'",
]
def get_js_in(args):
return [
f"'{args.website_dir}/js/jquery.js'",
f"'{args.website_dir}/js/popper.js'",
f"'{args.website_dir}/js/bootstrap.js'",
f"'{args.website_dir}/js/sentry.js'",
f"'{args.website_dir}/js/base.js'",
f"'{args.website_dir}/js/index.js'",
f"'{args.website_dir}/js/docsearch.js'",
f"'{args.website_dir}/js/docs.js'",
f"'{args.website_dir}/js/main.js'",
]
def minify_website(args):
css_in = " ".join(get_css_in(args))
css_out = f"{args.output_dir}/docs/css/base.css"
os.makedirs(f"{args.output_dir}/docs/css")
command = f"cat {css_in}"
output = subprocess.check_output(command, shell=True)
with open(css_out, "wb+") as f:
f.write(output)
with open(css_out, "rb") as f:
css_digest = hashlib.sha3_224(f.read()).hexdigest()[0:8]
js_in = " ".join(get_js_in(args))
js_out = f"{args.output_dir}/docs/js/base.js"
os.makedirs(f"{args.output_dir}/docs/js")
command = f"cat {js_in}"
output = subprocess.check_output(command, shell=True)
with open(js_out, "wb+") as f:
f.write(output)
with open(js_out, "rb") as f:
js_digest = hashlib.sha3_224(f.read()).hexdigest()[0:8]
logging.info(js_digest)
def process_benchmark_results(args):
benchmark_root = os.path.join(args.website_dir, "benchmark")
required_keys = {
"dbms": ["result"],
"hardware": ["result", "system", "system_full", "kind"],
"versions": ["version", "system"],
}
for benchmark_kind in ["dbms", "hardware", "versions"]:
results = []
results_root = os.path.join(benchmark_root, benchmark_kind, "results")
for result in sorted(os.listdir(results_root)):
result_file = os.path.join(results_root, result)
logging.info(f"Reading benchmark result from {result_file}")
with open(result_file, "r") as f:
result = json.loads(f.read())
for item in result:
for required_key in required_keys[benchmark_kind]:
assert (
required_key in item
), f'No "{required_key}" in {result_file}'
results += result
results_js = os.path.join(
args.output_dir, "benchmark", benchmark_kind, "results.js"
)
with open(results_js, "w") as f:
data = json.dumps(results)
f.write(f"var results = {data};")
| true | true |
f73daa3b803904b24609179625d3defeaf2a42cf | 1,596 | py | Python | test/test_geometry/test_line2d.py | alisianoi/algos-py | c99747b2ce6976f2509fd183bf71040e6f988b77 | [
"MIT"
] | 6 | 2018-12-31T19:46:58.000Z | 2020-11-17T11:32:27.000Z | test/test_geometry/test_line2d.py | algos-all/algos-py | b275da8d9f6cdf63d854a2712ada08a069421a74 | [
"MIT"
] | 152 | 2018-01-05T00:08:38.000Z | 2020-12-12T22:37:52.000Z | test/test_geometry/test_line2d.py | alisianoi/algos-py | c99747b2ce6976f2509fd183bf71040e6f988b77 | [
"MIT"
] | null | null | null | import pytest
from src.geometry.line2d import standard_line
def test_standard_line_0():
# One point is not enough to form a line
for i in range(-10, 11):
for j in range(-10, 11):
A, B, C = standard_line(i, j, i, j)
assert A == 0 and B == 0 and C == 0
@pytest.mark.parametrize("points", [
[(0, 0), (1, 1)], [(1, 1), (0, 0)],
[(1, 1), (2, 2)], [(2, 2), (1, 1)],
[(100, 100), (200, 200)], [(200, 200), (100, 100)]
])
def test_standard_line_1(points):
point0, point1 = points
A, B, C = standard_line(
point0[0], point0[1], point1[0], point1[1]
)
for i in range(-10, 10):
assert i * A + i * B + C == 0
@pytest.mark.parametrize("points", [
[(0, 0), (1, -1)], [(1, -1), (0, 0)],
[(1, -1), (2, -2)], [(2, -2), (1, -1)],
[(100, -100), (200, -200)], [(200, -200), (100, -100)]
])
def test_standard_line_2(points):
point0, point1 = points
A, B, C = standard_line(
point0[0], point0[1], point1[0], point1[1]
)
for i in range(-10, 10):
assert i * A + (-i) * B + C == 0
def test_standard_line_3():
A, B, C = standard_line(0, 0, 0, 1)
assert A * 0 + B * 2 + C == 0
assert A * 0 + B * 101 + C == 0
assert A * 0 + B * (-101) + C == 0
def test_standard_line_4():
A, B, C = standard_line(0, 0, 1, 0)
assert A * 0 + B * 0 + C == 0
assert A * 101 + B * 0 + C == 0
assert A * (-101) + B * 0 + C == 0
def test_standard_line_5():
A, B, C = standard_line(0, 2, 10, 2)
assert A * 101 + B * 2 + C == 0
assert A *(-101) + B * 2 + C == 0
| 25.741935 | 58 | 0.489348 | import pytest
from src.geometry.line2d import standard_line
def test_standard_line_0():
for i in range(-10, 11):
for j in range(-10, 11):
A, B, C = standard_line(i, j, i, j)
assert A == 0 and B == 0 and C == 0
@pytest.mark.parametrize("points", [
[(0, 0), (1, 1)], [(1, 1), (0, 0)],
[(1, 1), (2, 2)], [(2, 2), (1, 1)],
[(100, 100), (200, 200)], [(200, 200), (100, 100)]
])
def test_standard_line_1(points):
point0, point1 = points
A, B, C = standard_line(
point0[0], point0[1], point1[0], point1[1]
)
for i in range(-10, 10):
assert i * A + i * B + C == 0
@pytest.mark.parametrize("points", [
[(0, 0), (1, -1)], [(1, -1), (0, 0)],
[(1, -1), (2, -2)], [(2, -2), (1, -1)],
[(100, -100), (200, -200)], [(200, -200), (100, -100)]
])
def test_standard_line_2(points):
point0, point1 = points
A, B, C = standard_line(
point0[0], point0[1], point1[0], point1[1]
)
for i in range(-10, 10):
assert i * A + (-i) * B + C == 0
def test_standard_line_3():
A, B, C = standard_line(0, 0, 0, 1)
assert A * 0 + B * 2 + C == 0
assert A * 0 + B * 101 + C == 0
assert A * 0 + B * (-101) + C == 0
def test_standard_line_4():
A, B, C = standard_line(0, 0, 1, 0)
assert A * 0 + B * 0 + C == 0
assert A * 101 + B * 0 + C == 0
assert A * (-101) + B * 0 + C == 0
def test_standard_line_5():
A, B, C = standard_line(0, 2, 10, 2)
assert A * 101 + B * 2 + C == 0
assert A *(-101) + B * 2 + C == 0
| true | true |
f73dab42c4962c6e911448f41f8488b03e796eea | 1,609 | py | Python | surf/script_tab.py | githmy/vnpymy | f6a172629f0961bea13e9f10c8fc47de225094ec | [
"MIT"
] | 1 | 2021-04-09T06:46:35.000Z | 2021-04-09T06:46:35.000Z | surf/script_tab.py | githmy/vnpymy | f6a172629f0961bea13e9f10c8fc47de225094ec | [
"MIT"
] | 1 | 2021-12-31T02:38:36.000Z | 2021-12-31T02:38:36.000Z | surf/script_tab.py | githmy/vnpymy | f6a172629f0961bea13e9f10c8fc47de225094ec | [
"MIT"
] | 1 | 2021-06-27T12:13:47.000Z | 2021-06-27T12:13:47.000Z | #
sector = [
"stock",
"fund"
]
keytab = {
"功能": [
"项目设置",
"重启",
"中止",
"数据处理",
"训练拆分",
"序列特征",
"数据运算",
"数据合并",
"数据复制",
"数据提取",
"训练拟合",
"数据预测",
"回测分析",
"图形展示",
],
"项目设置": {
"位置": "",
"结束提示": False,
},
"重启": {
"功能名": "数据处理",
"预清空": 0,
"排序号": 1
},
"中止": {"": None},
"数据处理": {
"输入数据": [],
"处理方法": [],
"输出前缀": "",
},
"训练拆分": {
"输入数据": [],
"处理方法": [],
"输出前缀": "",
},
"序列特征": {
"输入数据": [],
"输出前缀": "",
"处理方法": ["均值n", "标准差n", "涨幅比n", "回撤n", "最涨n", "夏普n"],
# 输出文件名 用方法的值 替换方法名的n,加 _
},
"数据合并": {
"输入数据": [],
"输出性能": []
},
"数据复制": {
"输入数据": [],
"输入前缀": ""
},
"数据运算": {
"输入数据": [],
"处理方法": ["+", "-", "*", "**", "/"],
"输出前缀": "",
"输出性能": []
},
"数据提取": {
"输入数据": [],
"处理方法": ["profit_all_annualized", "profit_trade_annualized", "胜率"],
"输出前缀": "",
"输出性能": []
},
"训练拟合": {
"输入数据": [],
"处理方法": [],
"输出模型": "",
"输出性能": []
},
"数据预测": {
"输入数据": [],
"输入模型": "",
"标签文件": "",
"处理方法": [],
"输出前缀": "",
},
"回测分析": {
"输入数据": [],
"处理方法": [],
"输出前缀": "",
"输出性能": []
},
"图形展示": {
"静默模式": 0,
"处理方法": {"序列特征": [], "预测回测": [], "回测统计": []},
"输出后缀": "png",
},
}
| 17.117021 | 75 | 0.252331 |
sector = [
"stock",
"fund"
]
keytab = {
"功能": [
"项目设置",
"重启",
"中止",
"数据处理",
"训练拆分",
"序列特征",
"数据运算",
"数据合并",
"数据复制",
"数据提取",
"训练拟合",
"数据预测",
"回测分析",
"图形展示",
],
"项目设置": {
"位置": "",
"结束提示": False,
},
"重启": {
"功能名": "数据处理",
"预清空": 0,
"排序号": 1
},
"中止": {"": None},
"数据处理": {
"输入数据": [],
"处理方法": [],
"输出前缀": "",
},
"训练拆分": {
"输入数据": [],
"处理方法": [],
"输出前缀": "",
},
"序列特征": {
"输入数据": [],
"输出前缀": "",
"处理方法": ["均值n", "标准差n", "涨幅比n", "回撤n", "最涨n", "夏普n"],
},
"数据合并": {
"输入数据": [],
"输出性能": []
},
"数据复制": {
"输入数据": [],
"输入前缀": ""
},
"数据运算": {
"输入数据": [],
"处理方法": ["+", "-", "*", "**", "/"],
"输出前缀": "",
"输出性能": []
},
"数据提取": {
"输入数据": [],
"处理方法": ["profit_all_annualized", "profit_trade_annualized", "胜率"],
"输出前缀": "",
"输出性能": []
},
"训练拟合": {
"输入数据": [],
"处理方法": [],
"输出模型": "",
"输出性能": []
},
"数据预测": {
"输入数据": [],
"输入模型": "",
"标签文件": "",
"处理方法": [],
"输出前缀": "",
},
"回测分析": {
"输入数据": [],
"处理方法": [],
"输出前缀": "",
"输出性能": []
},
"图形展示": {
"静默模式": 0,
"处理方法": {"序列特征": [], "预测回测": [], "回测统计": []},
"输出后缀": "png",
},
}
| true | true |
f73dac9bb61cd767e7d6b014f56eb13521640cbc | 8,590 | py | Python | viper/modules/pdf.py | Mario-Kart-Felix/mal-scrap | bc396a15ea5b144eb1c0f05759d1f9419d6671df | [
"BSD-3-Clause"
] | 2 | 2015-12-17T20:25:09.000Z | 2017-10-08T19:14:57.000Z | viper/modules/pdf.py | Mario-Kart-Felix/mal-scrap | bc396a15ea5b144eb1c0f05759d1f9419d6671df | [
"BSD-3-Clause"
] | 1 | 2015-01-05T18:07:13.000Z | 2015-01-07T21:43:57.000Z | viper/modules/pdf.py | Mario-Kart-Felix/mal-scrap | bc396a15ea5b144eb1c0f05759d1f9419d6671df | [
"BSD-3-Clause"
] | 3 | 2017-10-18T00:56:53.000Z | 2020-05-24T09:38:54.000Z | # -*- coding: utf-8 -*-
# This file is part of Viper - https://github.com/viper-framework/viper
# See the file 'LICENSE' for copying permission.
import os
import json
import tempfile
from viper.common.abstracts import Module
from viper.core.session import __sessions__
from .pdftools.pdfid import PDFiD, PDFiD2JSON
from .pdftools import (cPDFParser, PDF_ELEMENT_COMMENT, PDF_ELEMENT_INDIRECT_OBJECT,
PDF_ELEMENT_XREF, PDF_ELEMENT_TRAILER, PDF_ELEMENT_STARTXREF,
PDF_ELEMENT_MALFORMED, FormatOutput)
class PDF(Module):
cmd = 'pdf'
description = 'Parse and analyze PDF documents'
authors = ['Kevin Breen', 'nex']
def __init__(self):
super(PDF, self).__init__()
subparsers = self.parser.add_subparsers(dest='subname')
subparsers.add_parser('id', help='Show general information on the PDF')
parser_streams = subparsers.add_parser('streams', help='Extract stream objects from PDF')
parser_streams.add_argument('-d', '--dump', help='Destination directory to store resource files in')
parser_streams.add_argument('-o', '--open', help='Open a session on the specified resource')
parser_streams.add_argument('-s', '--show', help='Show the content of the specified resource')
def pdf_id(self):
# Run the parser - Returns an XML DOM Instance.
pdf_data = PDFiD(__sessions__.current.file.path, False, True)
# This converts to string.
# pdf_string = PDFiD2String(pdf_data, True)
# This converts to JSON.
pdf_json = PDFiD2JSON(pdf_data, True)
# Convert from string.
pdf = json.loads(pdf_json)[0]
# Get general info and format.
info = [
['PDF Header', pdf['pdfid']['header']],
['Total Entropy', pdf['pdfid']['totalEntropy']],
['Entropy In Streams', pdf['pdfid']['streamEntropy']],
['Entropy Out Streams', pdf['pdfid']['nonStreamEntropy']],
['Count %% EOF', pdf['pdfid']['countEof']],
['Data After EOF', pdf['pdfid']['countChatAfterLastEof']]
]
# If there are date sections lets get them as well.
dates = pdf['pdfid']['dates']['date']
for date in dates:
info.append([date['name'], date['value']])
# Get streams, counts and format.
streams = []
for stream in pdf['pdfid']['keywords']['keyword']:
streams.append([stream['name'], stream['count']])
self.log('info', "General Info:")
self.log('table', dict(header=['Desc', 'Value'], rows=info))
self.log('info', "Streams & Count:")
self.log('table', dict(header=['Name', 'Count'], rows=streams))
def streams(self):
def get_streams():
# Initialize pdf parser.
parser = cPDFParser(__sessions__.current.file.path)
# Generate statistics.
results = []
objects = []
oid = 0
while True:
pdf_object = parser.GetObject()
if pdf_object is None:
break
oid += 1
objects.append(pdf_object)
obj_type = pdf_object.type
obj_id = '/'
if obj_type == PDF_ELEMENT_STARTXREF:
obj_content = pdf_object.index
obj_type = 'STARTXREF'
elif obj_type == PDF_ELEMENT_COMMENT:
obj_content = pdf_object.comment.encode()
obj_type = 'COMMENT'
elif obj_type in (PDF_ELEMENT_MALFORMED, PDF_ELEMENT_TRAILER, PDF_ELEMENT_XREF,
PDF_ELEMENT_INDIRECT_OBJECT):
obj_content = dump_content(pdf_object.content)
if obj_type == PDF_ELEMENT_MALFORMED:
obj_type = 'MALFORMED'
elif obj_type == PDF_ELEMENT_TRAILER:
obj_type = 'TRAILER'
elif obj_type == PDF_ELEMENT_XREF:
obj_type = 'XREF'
elif obj_type == PDF_ELEMENT_INDIRECT_OBJECT:
obj_id = pdf_object.id
obj_type = pdf_object.GetType()
else:
# Can it happen?
continue
if isinstance(obj_content, int):
obj_len = 0
else:
obj_len = len(obj_content)
result = [oid, obj_id, obj_len, obj_type]
# If the stream needs to be dumped or opened, we do it
# and expand the results with the path to the stream dump.
if arg_open or arg_dump:
# If was instructed to dump, we already have a base folder.
if arg_dump:
folder = arg_dump
# Otherwise we juts generate a temporary one.
else:
folder = tempfile.gettempdir()
# Confirm the dump path
if not os.path.exists(folder):
try:
os.makedirs(folder)
except Exception as e:
self.log('error', "Unable to create directory at {0}: {1}".format(folder, e))
return results
else:
if not os.path.isdir(folder):
self.log('error', "You need to specify a folder not a file")
return results
if obj_len == 0:
continue
# Dump stream to this path.
dump_path = '{0}/{1}_{2}_pdf_stream.bin'.format(folder, __sessions__.current.file.md5, oid)
with open(dump_path, 'wb') as handle:
handle.write(obj_content)
# Add dump path to the stream attributes.
result.append(dump_path)
elif arg_show and int(arg_show) == int(oid):
to_print = FormatOutput(obj_content, True)
if isinstance(to_print, int):
self.log('info', to_print)
else:
self.log('info', to_print.decode())
if pdf_object.type == PDF_ELEMENT_INDIRECT_OBJECT and pdf_object.ContainsStream():
self.log('Success', 'Stream content:')
self.log('info', FormatOutput(pdf_object.Stream(True), True).decode())
# Update list of streams.
results.append(result)
return sorted(results, key=lambda x: int(x[0]))
def dump_content(data):
if isinstance(data, list):
return b''.join([x[1].encode() for x in data])
else:
return data.encode()
arg_open = self.args.open
arg_dump = self.args.dump
arg_show = self.args.show
# Retrieve list of streams.
streams = get_streams()
if not arg_show:
# Show list of streams.
header = ['#', 'Object ID', 'Size', 'Type']
if arg_dump or arg_open:
header.append('Dumped To')
self.log('table', dict(header=header, rows=streams))
# If the user requested to open a specific stream, we open a new
# session on it.
if arg_open:
for stream in streams:
if int(arg_open) == int(stream[0]):
__sessions__.new(stream[4])
return
def run(self):
super(PDF, self).run()
if self.args is None:
return
if not __sessions__.is_set():
self.log('error', "No open session")
return False
if 'PDF' not in __sessions__.current.file.type:
# A file with '%PDF' signature inside first 1024 bytes is a valid
# PDF file. magic lib doesn't detect it if there is an offset
header = __sessions__.current.file.data[:1024]
if b'%PDF' not in header:
self.log('error', "The opened file doesn't appear to be a PDF document")
return
if self.args.subname == 'id':
self.pdf_id()
elif self.args.subname == 'streams':
self.streams()
else:
self.log('error', 'At least one of the parameters is required')
self.usage()
| 39.40367 | 111 | 0.526542 |
import os
import json
import tempfile
from viper.common.abstracts import Module
from viper.core.session import __sessions__
from .pdftools.pdfid import PDFiD, PDFiD2JSON
from .pdftools import (cPDFParser, PDF_ELEMENT_COMMENT, PDF_ELEMENT_INDIRECT_OBJECT,
PDF_ELEMENT_XREF, PDF_ELEMENT_TRAILER, PDF_ELEMENT_STARTXREF,
PDF_ELEMENT_MALFORMED, FormatOutput)
class PDF(Module):
cmd = 'pdf'
description = 'Parse and analyze PDF documents'
authors = ['Kevin Breen', 'nex']
def __init__(self):
super(PDF, self).__init__()
subparsers = self.parser.add_subparsers(dest='subname')
subparsers.add_parser('id', help='Show general information on the PDF')
parser_streams = subparsers.add_parser('streams', help='Extract stream objects from PDF')
parser_streams.add_argument('-d', '--dump', help='Destination directory to store resource files in')
parser_streams.add_argument('-o', '--open', help='Open a session on the specified resource')
parser_streams.add_argument('-s', '--show', help='Show the content of the specified resource')
def pdf_id(self):
pdf_data = PDFiD(__sessions__.current.file.path, False, True)
pdf_json = PDFiD2JSON(pdf_data, True)
pdf = json.loads(pdf_json)[0]
info = [
['PDF Header', pdf['pdfid']['header']],
['Total Entropy', pdf['pdfid']['totalEntropy']],
['Entropy In Streams', pdf['pdfid']['streamEntropy']],
['Entropy Out Streams', pdf['pdfid']['nonStreamEntropy']],
['Count %% EOF', pdf['pdfid']['countEof']],
['Data After EOF', pdf['pdfid']['countChatAfterLastEof']]
]
dates = pdf['pdfid']['dates']['date']
for date in dates:
info.append([date['name'], date['value']])
streams = []
for stream in pdf['pdfid']['keywords']['keyword']:
streams.append([stream['name'], stream['count']])
self.log('info', "General Info:")
self.log('table', dict(header=['Desc', 'Value'], rows=info))
self.log('info', "Streams & Count:")
self.log('table', dict(header=['Name', 'Count'], rows=streams))
def streams(self):
def get_streams():
parser = cPDFParser(__sessions__.current.file.path)
results = []
objects = []
oid = 0
while True:
pdf_object = parser.GetObject()
if pdf_object is None:
break
oid += 1
objects.append(pdf_object)
obj_type = pdf_object.type
obj_id = '/'
if obj_type == PDF_ELEMENT_STARTXREF:
obj_content = pdf_object.index
obj_type = 'STARTXREF'
elif obj_type == PDF_ELEMENT_COMMENT:
obj_content = pdf_object.comment.encode()
obj_type = 'COMMENT'
elif obj_type in (PDF_ELEMENT_MALFORMED, PDF_ELEMENT_TRAILER, PDF_ELEMENT_XREF,
PDF_ELEMENT_INDIRECT_OBJECT):
obj_content = dump_content(pdf_object.content)
if obj_type == PDF_ELEMENT_MALFORMED:
obj_type = 'MALFORMED'
elif obj_type == PDF_ELEMENT_TRAILER:
obj_type = 'TRAILER'
elif obj_type == PDF_ELEMENT_XREF:
obj_type = 'XREF'
elif obj_type == PDF_ELEMENT_INDIRECT_OBJECT:
obj_id = pdf_object.id
obj_type = pdf_object.GetType()
else:
continue
if isinstance(obj_content, int):
obj_len = 0
else:
obj_len = len(obj_content)
result = [oid, obj_id, obj_len, obj_type]
if arg_open or arg_dump:
if arg_dump:
folder = arg_dump
else:
folder = tempfile.gettempdir()
if not os.path.exists(folder):
try:
os.makedirs(folder)
except Exception as e:
self.log('error', "Unable to create directory at {0}: {1}".format(folder, e))
return results
else:
if not os.path.isdir(folder):
self.log('error', "You need to specify a folder not a file")
return results
if obj_len == 0:
continue
dump_path = '{0}/{1}_{2}_pdf_stream.bin'.format(folder, __sessions__.current.file.md5, oid)
with open(dump_path, 'wb') as handle:
handle.write(obj_content)
result.append(dump_path)
elif arg_show and int(arg_show) == int(oid):
to_print = FormatOutput(obj_content, True)
if isinstance(to_print, int):
self.log('info', to_print)
else:
self.log('info', to_print.decode())
if pdf_object.type == PDF_ELEMENT_INDIRECT_OBJECT and pdf_object.ContainsStream():
self.log('Success', 'Stream content:')
self.log('info', FormatOutput(pdf_object.Stream(True), True).decode())
results.append(result)
return sorted(results, key=lambda x: int(x[0]))
def dump_content(data):
if isinstance(data, list):
return b''.join([x[1].encode() for x in data])
else:
return data.encode()
arg_open = self.args.open
arg_dump = self.args.dump
arg_show = self.args.show
streams = get_streams()
if not arg_show:
header = ['#', 'Object ID', 'Size', 'Type']
if arg_dump or arg_open:
header.append('Dumped To')
self.log('table', dict(header=header, rows=streams))
if arg_open:
for stream in streams:
if int(arg_open) == int(stream[0]):
__sessions__.new(stream[4])
return
def run(self):
super(PDF, self).run()
if self.args is None:
return
if not __sessions__.is_set():
self.log('error', "No open session")
return False
if 'PDF' not in __sessions__.current.file.type:
header = __sessions__.current.file.data[:1024]
if b'%PDF' not in header:
self.log('error', "The opened file doesn't appear to be a PDF document")
return
if self.args.subname == 'id':
self.pdf_id()
elif self.args.subname == 'streams':
self.streams()
else:
self.log('error', 'At least one of the parameters is required')
self.usage()
| true | true |
f73daccb645210cdbd0b3f1890cb813ecec2676b | 18,855 | py | Python | mmseg/models/decode_heads/knet_head.py | rehohoho/mmsegmentation | a73ae7a421e07741fda62c9d81b335cbc4b7f7d6 | [
"Apache-2.0"
] | 1 | 2022-03-07T19:46:03.000Z | 2022-03-07T19:46:03.000Z | mmseg/models/decode_heads/knet_head.py | rehohoho/mmsegmentation | a73ae7a421e07741fda62c9d81b335cbc4b7f7d6 | [
"Apache-2.0"
] | 2 | 2022-02-25T03:07:23.000Z | 2022-03-08T12:54:05.000Z | mmseg/models/decode_heads/knet_head.py | rehohoho/mmsegmentation | a73ae7a421e07741fda62c9d81b335cbc4b7f7d6 | [
"Apache-2.0"
] | 1 | 2022-01-04T01:16:12.000Z | 2022-01-04T01:16:12.000Z | # Copyright (c) OpenMMLab. All rights reserved.
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import ConvModule, build_activation_layer, build_norm_layer
from mmcv.cnn.bricks.transformer import (FFN, TRANSFORMER_LAYER,
MultiheadAttention,
build_transformer_layer)
from mmseg.models.builder import HEADS, build_head
from mmseg.models.decode_heads.decode_head import BaseDecodeHead
from mmseg.utils import get_root_logger
@TRANSFORMER_LAYER.register_module()
class KernelUpdator(nn.Module):
"""Dynamic Kernel Updator in Kernel Update Head.
Args:
in_channels (int): The number of channels of input feature map.
Default: 256.
feat_channels (int): The number of middle-stage channels in
the kernel updator. Default: 64.
out_channels (int): The number of output channels.
gate_sigmoid (bool): Whether use sigmoid function in gate
mechanism. Default: True.
gate_norm_act (bool): Whether add normalization and activation
layer in gate mechanism. Default: False.
activate_out: Whether add activation after gate mechanism.
Default: False.
norm_cfg (dict | None): Config of norm layers.
Default: dict(type='LN').
act_cfg (dict): Config of activation layers.
Default: dict(type='ReLU').
"""
def __init__(
self,
in_channels=256,
feat_channels=64,
out_channels=None,
gate_sigmoid=True,
gate_norm_act=False,
activate_out=False,
norm_cfg=dict(type='LN'),
act_cfg=dict(type='ReLU', inplace=True),
):
super(KernelUpdator, self).__init__()
self.in_channels = in_channels
self.feat_channels = feat_channels
self.out_channels_raw = out_channels
self.gate_sigmoid = gate_sigmoid
self.gate_norm_act = gate_norm_act
self.activate_out = activate_out
self.act_cfg = act_cfg
self.norm_cfg = norm_cfg
self.out_channels = out_channels if out_channels else in_channels
self.num_params_in = self.feat_channels
self.num_params_out = self.feat_channels
self.dynamic_layer = nn.Linear(
self.in_channels, self.num_params_in + self.num_params_out)
self.input_layer = nn.Linear(self.in_channels,
self.num_params_in + self.num_params_out,
1)
self.input_gate = nn.Linear(self.in_channels, self.feat_channels, 1)
self.update_gate = nn.Linear(self.in_channels, self.feat_channels, 1)
if self.gate_norm_act:
self.gate_norm = build_norm_layer(norm_cfg, self.feat_channels)[1]
self.norm_in = build_norm_layer(norm_cfg, self.feat_channels)[1]
self.norm_out = build_norm_layer(norm_cfg, self.feat_channels)[1]
self.input_norm_in = build_norm_layer(norm_cfg, self.feat_channels)[1]
self.input_norm_out = build_norm_layer(norm_cfg, self.feat_channels)[1]
self.activation = build_activation_layer(act_cfg)
self.fc_layer = nn.Linear(self.feat_channels, self.out_channels, 1)
self.fc_norm = build_norm_layer(norm_cfg, self.out_channels)[1]
def forward(self, update_feature, input_feature):
"""Forward function of KernelUpdator.
Args:
update_feature (torch.Tensor): Feature map assembled from
each group. It would be reshaped with last dimension
shape: `self.in_channels`.
input_feature (torch.Tensor): Intermediate feature
with shape: (N, num_classes, conv_kernel_size**2, channels).
Returns:
Tensor: The output tensor of shape (N*C1/C2, K*K, C2), where N is
the number of classes, C1 and C2 are the feature map channels of
KernelUpdateHead and KernelUpdator, respectively.
"""
update_feature = update_feature.reshape(-1, self.in_channels)
num_proposals = update_feature.size(0)
# dynamic_layer works for
# phi_1 and psi_3 in Eq.(4) and (5) of K-Net paper
parameters = self.dynamic_layer(update_feature)
param_in = parameters[:, :self.num_params_in].view(
-1, self.feat_channels)
param_out = parameters[:, -self.num_params_out:].view(
-1, self.feat_channels)
# input_layer works for
# phi_2 and psi_4 in Eq.(4) and (5) of K-Net paper
input_feats = self.input_layer(
input_feature.reshape(num_proposals, -1, self.feat_channels))
input_in = input_feats[..., :self.num_params_in]
input_out = input_feats[..., -self.num_params_out:]
# `gate_feats` is F^G in K-Net paper
gate_feats = input_in * param_in.unsqueeze(-2)
if self.gate_norm_act:
gate_feats = self.activation(self.gate_norm(gate_feats))
input_gate = self.input_norm_in(self.input_gate(gate_feats))
update_gate = self.norm_in(self.update_gate(gate_feats))
if self.gate_sigmoid:
input_gate = input_gate.sigmoid()
update_gate = update_gate.sigmoid()
param_out = self.norm_out(param_out)
input_out = self.input_norm_out(input_out)
if self.activate_out:
param_out = self.activation(param_out)
input_out = self.activation(input_out)
# Gate mechanism. Eq.(5) in original paper.
# param_out has shape (batch_size, feat_channels, out_channels)
features = update_gate * param_out.unsqueeze(
-2) + input_gate * input_out
features = self.fc_layer(features)
features = self.fc_norm(features)
features = self.activation(features)
return features
@HEADS.register_module()
class KernelUpdateHead(nn.Module):
"""Kernel Update Head in K-Net.
Args:
num_classes (int): Number of classes. Default: 150.
num_ffn_fcs (int): The number of fully-connected layers in
FFNs. Default: 2.
num_heads (int): The number of parallel attention heads.
Default: 8.
num_mask_fcs (int): The number of fully connected layers for
mask prediction. Default: 3.
feedforward_channels (int): The hidden dimension of FFNs.
Defaults: 2048.
in_channels (int): The number of channels of input feature map.
Default: 256.
out_channels (int): The number of output channels.
Default: 256.
dropout (float): The Probability of an element to be
zeroed in MultiheadAttention and FFN. Default 0.0.
act_cfg (dict): Config of activation layers.
Default: dict(type='ReLU').
ffn_act_cfg (dict): Config of activation layers in FFN.
Default: dict(type='ReLU').
conv_kernel_size (int): The kernel size of convolution in
Kernel Update Head for dynamic kernel updation.
Default: 1.
feat_transform_cfg (dict | None): Config of feature transform.
Default: None.
kernel_init (bool): Whether initiate mask kernel in mask head.
Default: False.
with_ffn (bool): Whether add FFN in kernel update head.
Default: True.
feat_gather_stride (int): Stride of convolution in feature transform.
Default: 1.
mask_transform_stride (int): Stride of mask transform.
Default: 1.
kernel_updator_cfg (dict): Config of kernel updator.
Default: dict(
type='DynamicConv',
in_channels=256,
feat_channels=64,
out_channels=256,
act_cfg=dict(type='ReLU', inplace=True),
norm_cfg=dict(type='LN')).
"""
def __init__(self,
num_classes=150,
num_ffn_fcs=2,
num_heads=8,
num_mask_fcs=3,
feedforward_channels=2048,
in_channels=256,
out_channels=256,
dropout=0.0,
act_cfg=dict(type='ReLU', inplace=True),
ffn_act_cfg=dict(type='ReLU', inplace=True),
conv_kernel_size=1,
feat_transform_cfg=None,
kernel_init=False,
with_ffn=True,
feat_gather_stride=1,
mask_transform_stride=1,
kernel_updator_cfg=dict(
type='DynamicConv',
in_channels=256,
feat_channels=64,
out_channels=256,
act_cfg=dict(type='ReLU', inplace=True),
norm_cfg=dict(type='LN'))):
super(KernelUpdateHead, self).__init__()
self.num_classes = num_classes
self.in_channels = in_channels
self.out_channels = out_channels
self.fp16_enabled = False
self.dropout = dropout
self.num_heads = num_heads
self.kernel_init = kernel_init
self.with_ffn = with_ffn
self.conv_kernel_size = conv_kernel_size
self.feat_gather_stride = feat_gather_stride
self.mask_transform_stride = mask_transform_stride
self.attention = MultiheadAttention(in_channels * conv_kernel_size**2,
num_heads, dropout)
self.attention_norm = build_norm_layer(
dict(type='LN'), in_channels * conv_kernel_size**2)[1]
self.kernel_update_conv = build_transformer_layer(kernel_updator_cfg)
if feat_transform_cfg is not None:
kernel_size = feat_transform_cfg.pop('kernel_size', 1)
transform_channels = in_channels
self.feat_transform = ConvModule(
transform_channels,
in_channels,
kernel_size,
stride=feat_gather_stride,
padding=int(feat_gather_stride // 2),
**feat_transform_cfg)
else:
self.feat_transform = None
if self.with_ffn:
self.ffn = FFN(
in_channels,
feedforward_channels,
num_ffn_fcs,
act_cfg=ffn_act_cfg,
dropout=dropout)
self.ffn_norm = build_norm_layer(dict(type='LN'), in_channels)[1]
self.mask_fcs = nn.ModuleList()
for _ in range(num_mask_fcs):
self.mask_fcs.append(
nn.Linear(in_channels, in_channels, bias=False))
self.mask_fcs.append(
build_norm_layer(dict(type='LN'), in_channels)[1])
self.mask_fcs.append(build_activation_layer(act_cfg))
self.fc_mask = nn.Linear(in_channels, out_channels)
def init_weights(self):
"""Use xavier initialization for all weight parameter and set
classification head bias as a specific value when use focal loss."""
for p in self.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
else:
# adopt the default initialization for
# the weight and bias of the layer norm
pass
if self.kernel_init:
logger = get_root_logger()
logger.info(
'mask kernel in mask head is normal initialized by std 0.01')
nn.init.normal_(self.fc_mask.weight, mean=0, std=0.01)
def forward(self, x, proposal_feat, mask_preds, mask_shape=None):
"""Forward function of Dynamic Instance Interactive Head.
Args:
x (Tensor): Feature map from FPN with shape
(batch_size, feature_dimensions, H , W).
proposal_feat (Tensor): Intermediate feature get from
diihead in last stage, has shape
(batch_size, num_proposals, feature_dimensions)
mask_preds (Tensor): mask prediction from the former stage in shape
(batch_size, num_proposals, H, W).
Returns:
Tuple: The first tensor is predicted mask with shape
(N, num_classes, H, W), the second tensor is dynamic kernel
with shape (N, num_classes, channels, K, K).
"""
N, num_proposals = proposal_feat.shape[:2]
if self.feat_transform is not None:
x = self.feat_transform(x)
C, H, W = x.shape[-3:]
mask_h, mask_w = mask_preds.shape[-2:]
if mask_h != H or mask_w != W:
gather_mask = F.interpolate(
mask_preds, (H, W), align_corners=False, mode='bilinear')
else:
gather_mask = mask_preds
sigmoid_masks = gather_mask.softmax(dim=1)
# Group Feature Assembling. Eq.(3) in original paper.
# einsum is faster than bmm by 30%
x_feat = torch.einsum('bnhw,bchw->bnc', sigmoid_masks, x)
# obj_feat in shape [B, N, C, K, K] -> [B, N, C, K*K] -> [B, N, K*K, C]
proposal_feat = proposal_feat.reshape(N, num_proposals,
self.in_channels,
-1).permute(0, 1, 3, 2)
obj_feat = self.kernel_update_conv(x_feat, proposal_feat)
# [B, N, K*K, C] -> [B, N, K*K*C] -> [N, B, K*K*C]
obj_feat = obj_feat.reshape(N, num_proposals, -1).permute(1, 0, 2)
obj_feat = self.attention_norm(self.attention(obj_feat))
# [N, B, K*K*C] -> [B, N, K*K*C]
obj_feat = obj_feat.permute(1, 0, 2)
# obj_feat in shape [B, N, K*K*C] -> [B, N, K*K, C]
obj_feat = obj_feat.reshape(N, num_proposals, -1, self.in_channels)
# FFN
if self.with_ffn:
obj_feat = self.ffn_norm(self.ffn(obj_feat))
mask_feat = obj_feat
for reg_layer in self.mask_fcs:
mask_feat = reg_layer(mask_feat)
# [B, N, K*K, C] -> [B, N, C, K*K]
mask_feat = self.fc_mask(mask_feat).permute(0, 1, 3, 2)
if (self.mask_transform_stride == 2 and self.feat_gather_stride == 1):
mask_x = F.interpolate(
x, scale_factor=0.5, mode='bilinear', align_corners=False)
H, W = mask_x.shape[-2:]
else:
mask_x = x
# group conv is 5x faster than unfold and uses about 1/5 memory
# Group conv vs. unfold vs. concat batch, 2.9ms :13.5ms :3.8ms
# Group conv vs. unfold vs. concat batch, 278 : 1420 : 369
# but in real training group conv is slower than concat batch
# so we keep using concat batch.
# fold_x = F.unfold(
# mask_x,
# self.conv_kernel_size,
# padding=int(self.conv_kernel_size // 2))
# mask_feat = mask_feat.reshape(N, num_proposals, -1)
# new_mask_preds = torch.einsum('bnc,bcl->bnl', mask_feat, fold_x)
# [B, N, C, K*K] -> [B*N, C, K, K]
mask_feat = mask_feat.reshape(N, num_proposals, C,
self.conv_kernel_size,
self.conv_kernel_size)
# [B, C, H, W] -> [1, B*C, H, W]
new_mask_preds = []
for i in range(N):
new_mask_preds.append(
F.conv2d(
mask_x[i:i + 1],
mask_feat[i],
padding=int(self.conv_kernel_size // 2)))
new_mask_preds = torch.cat(new_mask_preds, dim=0)
new_mask_preds = new_mask_preds.reshape(N, num_proposals, H, W)
if self.mask_transform_stride == 2:
new_mask_preds = F.interpolate(
new_mask_preds,
scale_factor=2,
mode='bilinear',
align_corners=False)
if mask_shape is not None and mask_shape[0] != H:
new_mask_preds = F.interpolate(
new_mask_preds,
mask_shape,
align_corners=False,
mode='bilinear')
return new_mask_preds, obj_feat.permute(0, 1, 3, 2).reshape(
N, num_proposals, self.in_channels, self.conv_kernel_size,
self.conv_kernel_size)
@HEADS.register_module()
class IterativeDecodeHead(BaseDecodeHead):
"""K-Net: Towards Unified Image Segmentation.
This head is the implementation of
`K-Net: <https://arxiv.org/abs/2106.14855>`_.
Args:
num_stages (int): The number of stages (kernel update heads)
in IterativeDecodeHead. Default: 3.
kernel_generate_head:(dict): Config of kernel generate head which
generate mask predictions, dynamic kernels and class predictions
for next kernel update heads.
kernel_update_head (dict): Config of kernel update head which refine
dynamic kernels and class predictions iteratively.
"""
def __init__(self, num_stages, kernel_generate_head, kernel_update_head,
**kwargs):
super(BaseDecodeHead, self).__init__(**kwargs)
assert num_stages == len(kernel_update_head)
self.num_stages = num_stages
self.kernel_generate_head = build_head(kernel_generate_head)
self.kernel_update_head = nn.ModuleList()
self.align_corners = self.kernel_generate_head.align_corners
self.num_classes = self.kernel_generate_head.num_classes
self.input_transform = self.kernel_generate_head.input_transform
self.ignore_index = self.kernel_generate_head.ignore_index
for head_cfg in kernel_update_head:
self.kernel_update_head.append(build_head(head_cfg))
def forward(self, inputs):
"""Forward function."""
feats = self.kernel_generate_head._forward_feature(inputs)
sem_seg = self.kernel_generate_head.cls_seg(feats)
seg_kernels = self.kernel_generate_head.conv_seg.weight.clone()
seg_kernels = seg_kernels[None].expand(
feats.size(0), *seg_kernels.size())
stage_segs = [sem_seg]
for i in range(self.num_stages):
sem_seg, seg_kernels = self.kernel_update_head[i](feats,
seg_kernels,
sem_seg)
stage_segs.append(sem_seg)
if self.training:
return stage_segs
# only return the prediction of the last stage during testing
return stage_segs[-1]
def losses(self, seg_logit, seg_label):
losses = dict()
for i, logit in enumerate(seg_logit):
loss = self.kernel_generate_head.losses(logit, seg_label)
for k, v in loss.items():
losses[f'{k}.s{i}'] = v
return losses
| 41.530837 | 79 | 0.596022 |
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import ConvModule, build_activation_layer, build_norm_layer
from mmcv.cnn.bricks.transformer import (FFN, TRANSFORMER_LAYER,
MultiheadAttention,
build_transformer_layer)
from mmseg.models.builder import HEADS, build_head
from mmseg.models.decode_heads.decode_head import BaseDecodeHead
from mmseg.utils import get_root_logger
@TRANSFORMER_LAYER.register_module()
class KernelUpdator(nn.Module):
def __init__(
self,
in_channels=256,
feat_channels=64,
out_channels=None,
gate_sigmoid=True,
gate_norm_act=False,
activate_out=False,
norm_cfg=dict(type='LN'),
act_cfg=dict(type='ReLU', inplace=True),
):
super(KernelUpdator, self).__init__()
self.in_channels = in_channels
self.feat_channels = feat_channels
self.out_channels_raw = out_channels
self.gate_sigmoid = gate_sigmoid
self.gate_norm_act = gate_norm_act
self.activate_out = activate_out
self.act_cfg = act_cfg
self.norm_cfg = norm_cfg
self.out_channels = out_channels if out_channels else in_channels
self.num_params_in = self.feat_channels
self.num_params_out = self.feat_channels
self.dynamic_layer = nn.Linear(
self.in_channels, self.num_params_in + self.num_params_out)
self.input_layer = nn.Linear(self.in_channels,
self.num_params_in + self.num_params_out,
1)
self.input_gate = nn.Linear(self.in_channels, self.feat_channels, 1)
self.update_gate = nn.Linear(self.in_channels, self.feat_channels, 1)
if self.gate_norm_act:
self.gate_norm = build_norm_layer(norm_cfg, self.feat_channels)[1]
self.norm_in = build_norm_layer(norm_cfg, self.feat_channels)[1]
self.norm_out = build_norm_layer(norm_cfg, self.feat_channels)[1]
self.input_norm_in = build_norm_layer(norm_cfg, self.feat_channels)[1]
self.input_norm_out = build_norm_layer(norm_cfg, self.feat_channels)[1]
self.activation = build_activation_layer(act_cfg)
self.fc_layer = nn.Linear(self.feat_channels, self.out_channels, 1)
self.fc_norm = build_norm_layer(norm_cfg, self.out_channels)[1]
def forward(self, update_feature, input_feature):
update_feature = update_feature.reshape(-1, self.in_channels)
num_proposals = update_feature.size(0)
parameters = self.dynamic_layer(update_feature)
param_in = parameters[:, :self.num_params_in].view(
-1, self.feat_channels)
param_out = parameters[:, -self.num_params_out:].view(
-1, self.feat_channels)
input_feats = self.input_layer(
input_feature.reshape(num_proposals, -1, self.feat_channels))
input_in = input_feats[..., :self.num_params_in]
input_out = input_feats[..., -self.num_params_out:]
gate_feats = input_in * param_in.unsqueeze(-2)
if self.gate_norm_act:
gate_feats = self.activation(self.gate_norm(gate_feats))
input_gate = self.input_norm_in(self.input_gate(gate_feats))
update_gate = self.norm_in(self.update_gate(gate_feats))
if self.gate_sigmoid:
input_gate = input_gate.sigmoid()
update_gate = update_gate.sigmoid()
param_out = self.norm_out(param_out)
input_out = self.input_norm_out(input_out)
if self.activate_out:
param_out = self.activation(param_out)
input_out = self.activation(input_out)
features = update_gate * param_out.unsqueeze(
-2) + input_gate * input_out
features = self.fc_layer(features)
features = self.fc_norm(features)
features = self.activation(features)
return features
@HEADS.register_module()
class KernelUpdateHead(nn.Module):
def __init__(self,
num_classes=150,
num_ffn_fcs=2,
num_heads=8,
num_mask_fcs=3,
feedforward_channels=2048,
in_channels=256,
out_channels=256,
dropout=0.0,
act_cfg=dict(type='ReLU', inplace=True),
ffn_act_cfg=dict(type='ReLU', inplace=True),
conv_kernel_size=1,
feat_transform_cfg=None,
kernel_init=False,
with_ffn=True,
feat_gather_stride=1,
mask_transform_stride=1,
kernel_updator_cfg=dict(
type='DynamicConv',
in_channels=256,
feat_channels=64,
out_channels=256,
act_cfg=dict(type='ReLU', inplace=True),
norm_cfg=dict(type='LN'))):
super(KernelUpdateHead, self).__init__()
self.num_classes = num_classes
self.in_channels = in_channels
self.out_channels = out_channels
self.fp16_enabled = False
self.dropout = dropout
self.num_heads = num_heads
self.kernel_init = kernel_init
self.with_ffn = with_ffn
self.conv_kernel_size = conv_kernel_size
self.feat_gather_stride = feat_gather_stride
self.mask_transform_stride = mask_transform_stride
self.attention = MultiheadAttention(in_channels * conv_kernel_size**2,
num_heads, dropout)
self.attention_norm = build_norm_layer(
dict(type='LN'), in_channels * conv_kernel_size**2)[1]
self.kernel_update_conv = build_transformer_layer(kernel_updator_cfg)
if feat_transform_cfg is not None:
kernel_size = feat_transform_cfg.pop('kernel_size', 1)
transform_channels = in_channels
self.feat_transform = ConvModule(
transform_channels,
in_channels,
kernel_size,
stride=feat_gather_stride,
padding=int(feat_gather_stride // 2),
**feat_transform_cfg)
else:
self.feat_transform = None
if self.with_ffn:
self.ffn = FFN(
in_channels,
feedforward_channels,
num_ffn_fcs,
act_cfg=ffn_act_cfg,
dropout=dropout)
self.ffn_norm = build_norm_layer(dict(type='LN'), in_channels)[1]
self.mask_fcs = nn.ModuleList()
for _ in range(num_mask_fcs):
self.mask_fcs.append(
nn.Linear(in_channels, in_channels, bias=False))
self.mask_fcs.append(
build_norm_layer(dict(type='LN'), in_channels)[1])
self.mask_fcs.append(build_activation_layer(act_cfg))
self.fc_mask = nn.Linear(in_channels, out_channels)
def init_weights(self):
for p in self.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
else:
pass
if self.kernel_init:
logger = get_root_logger()
logger.info(
'mask kernel in mask head is normal initialized by std 0.01')
nn.init.normal_(self.fc_mask.weight, mean=0, std=0.01)
def forward(self, x, proposal_feat, mask_preds, mask_shape=None):
N, num_proposals = proposal_feat.shape[:2]
if self.feat_transform is not None:
x = self.feat_transform(x)
C, H, W = x.shape[-3:]
mask_h, mask_w = mask_preds.shape[-2:]
if mask_h != H or mask_w != W:
gather_mask = F.interpolate(
mask_preds, (H, W), align_corners=False, mode='bilinear')
else:
gather_mask = mask_preds
sigmoid_masks = gather_mask.softmax(dim=1)
x_feat = torch.einsum('bnhw,bchw->bnc', sigmoid_masks, x)
proposal_feat = proposal_feat.reshape(N, num_proposals,
self.in_channels,
-1).permute(0, 1, 3, 2)
obj_feat = self.kernel_update_conv(x_feat, proposal_feat)
obj_feat = obj_feat.reshape(N, num_proposals, -1).permute(1, 0, 2)
obj_feat = self.attention_norm(self.attention(obj_feat))
obj_feat = obj_feat.permute(1, 0, 2)
obj_feat = obj_feat.reshape(N, num_proposals, -1, self.in_channels)
if self.with_ffn:
obj_feat = self.ffn_norm(self.ffn(obj_feat))
mask_feat = obj_feat
for reg_layer in self.mask_fcs:
mask_feat = reg_layer(mask_feat)
mask_feat = self.fc_mask(mask_feat).permute(0, 1, 3, 2)
if (self.mask_transform_stride == 2 and self.feat_gather_stride == 1):
mask_x = F.interpolate(
x, scale_factor=0.5, mode='bilinear', align_corners=False)
H, W = mask_x.shape[-2:]
else:
mask_x = x
mask_feat = mask_feat.reshape(N, num_proposals, C,
self.conv_kernel_size,
self.conv_kernel_size)
new_mask_preds = []
for i in range(N):
new_mask_preds.append(
F.conv2d(
mask_x[i:i + 1],
mask_feat[i],
padding=int(self.conv_kernel_size // 2)))
new_mask_preds = torch.cat(new_mask_preds, dim=0)
new_mask_preds = new_mask_preds.reshape(N, num_proposals, H, W)
if self.mask_transform_stride == 2:
new_mask_preds = F.interpolate(
new_mask_preds,
scale_factor=2,
mode='bilinear',
align_corners=False)
if mask_shape is not None and mask_shape[0] != H:
new_mask_preds = F.interpolate(
new_mask_preds,
mask_shape,
align_corners=False,
mode='bilinear')
return new_mask_preds, obj_feat.permute(0, 1, 3, 2).reshape(
N, num_proposals, self.in_channels, self.conv_kernel_size,
self.conv_kernel_size)
@HEADS.register_module()
class IterativeDecodeHead(BaseDecodeHead):
def __init__(self, num_stages, kernel_generate_head, kernel_update_head,
**kwargs):
super(BaseDecodeHead, self).__init__(**kwargs)
assert num_stages == len(kernel_update_head)
self.num_stages = num_stages
self.kernel_generate_head = build_head(kernel_generate_head)
self.kernel_update_head = nn.ModuleList()
self.align_corners = self.kernel_generate_head.align_corners
self.num_classes = self.kernel_generate_head.num_classes
self.input_transform = self.kernel_generate_head.input_transform
self.ignore_index = self.kernel_generate_head.ignore_index
for head_cfg in kernel_update_head:
self.kernel_update_head.append(build_head(head_cfg))
def forward(self, inputs):
feats = self.kernel_generate_head._forward_feature(inputs)
sem_seg = self.kernel_generate_head.cls_seg(feats)
seg_kernels = self.kernel_generate_head.conv_seg.weight.clone()
seg_kernels = seg_kernels[None].expand(
feats.size(0), *seg_kernels.size())
stage_segs = [sem_seg]
for i in range(self.num_stages):
sem_seg, seg_kernels = self.kernel_update_head[i](feats,
seg_kernels,
sem_seg)
stage_segs.append(sem_seg)
if self.training:
return stage_segs
return stage_segs[-1]
def losses(self, seg_logit, seg_label):
losses = dict()
for i, logit in enumerate(seg_logit):
loss = self.kernel_generate_head.losses(logit, seg_label)
for k, v in loss.items():
losses[f'{k}.s{i}'] = v
return losses
| true | true |
f73dae4b53ec2c161693f06b78677f806ac9f816 | 2,258 | py | Python | euler/tools/generate_euler_data.py | timpcfan/euler | c2a71faae59c1495b6dabcf6aec0acb4d93a7bb1 | [
"Apache-2.0"
] | 2,829 | 2019-01-12T09:16:03.000Z | 2022-03-29T14:00:58.000Z | euler/tools/generate_euler_data.py | timpcfan/euler | c2a71faae59c1495b6dabcf6aec0acb4d93a7bb1 | [
"Apache-2.0"
] | 331 | 2019-01-17T21:07:49.000Z | 2022-03-30T06:38:17.000Z | euler/tools/generate_euler_data.py | timpcfan/euler | c2a71faae59c1495b6dabcf6aec0acb4d93a7bb1 | [
"Apache-2.0"
] | 578 | 2019-01-16T10:48:53.000Z | 2022-03-21T13:41:34.000Z | # Copyright 2020 Alibaba Group Holding Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from euler.tools.json2meta import Generator
from euler.tools.json2partdat import Converter as DatConverter
from euler.tools.json2partindex import Converter as IndexConverter
import os
import sys
class EulerGenerator(object):
def __init__(self, graph_json, index_meta, output_dir, partition_num):
self.graph_json = os.path.realpath(graph_json)
self.index_meta = index_meta
self.output_dir = os.path.realpath(output_dir)
self.partition_num = partition_num
def do(self):
meta_dir = os.path.join(self.output_dir, 'euler.meta')
g = Generator([self.graph_json], meta_dir, self.partition_num)
g.do()
d = DatConverter(self.graph_json,
meta_dir,
self.output_dir,
self.partition_num)
d.do()
if self.index_meta is not None:
i = IndexConverter(self.index_meta,
self.graph_json,
self.output_dir,
self.partition_num)
i.do()
if __name__ == '__main__':
if len(sys.argv) < 4:
print("python generate_euler_data.py graph.json output_dir "
"partition_num [index.meta]")
exit(1)
index_meta = None
if len(sys.argv) == 5:
index_meta = sys.argv[4]
g = EulerGenerator(sys.argv[1], index_meta, sys.argv[2], int(sys.argv[3]))
g.do()
| 35.84127 | 80 | 0.634632 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from euler.tools.json2meta import Generator
from euler.tools.json2partdat import Converter as DatConverter
from euler.tools.json2partindex import Converter as IndexConverter
import os
import sys
class EulerGenerator(object):
def __init__(self, graph_json, index_meta, output_dir, partition_num):
self.graph_json = os.path.realpath(graph_json)
self.index_meta = index_meta
self.output_dir = os.path.realpath(output_dir)
self.partition_num = partition_num
def do(self):
meta_dir = os.path.join(self.output_dir, 'euler.meta')
g = Generator([self.graph_json], meta_dir, self.partition_num)
g.do()
d = DatConverter(self.graph_json,
meta_dir,
self.output_dir,
self.partition_num)
d.do()
if self.index_meta is not None:
i = IndexConverter(self.index_meta,
self.graph_json,
self.output_dir,
self.partition_num)
i.do()
if __name__ == '__main__':
if len(sys.argv) < 4:
print("python generate_euler_data.py graph.json output_dir "
"partition_num [index.meta]")
exit(1)
index_meta = None
if len(sys.argv) == 5:
index_meta = sys.argv[4]
g = EulerGenerator(sys.argv[1], index_meta, sys.argv[2], int(sys.argv[3]))
g.do()
| true | true |
f73daed22d722fc401139e28b3873a3ec1a66e9e | 2,941 | py | Python | desktop/core/ext-py/tablib-0.10.0/tablib/packages/openpyxl/writer/strings.py | kokosing/hue | 2307f5379a35aae9be871e836432e6f45138b3d9 | [
"Apache-2.0"
] | 11 | 2019-03-20T07:38:35.000Z | 2021-06-18T09:42:46.000Z | desktop/core/ext-py/tablib-0.10.0/tablib/packages/openpyxl/writer/strings.py | zks888/hue | 93a8c370713e70b216c428caa2f75185ef809deb | [
"Apache-2.0"
] | 80 | 2018-04-13T13:46:24.000Z | 2022-02-16T16:01:46.000Z | desktop/core/ext-py/tablib-0.10.0/tablib/packages/openpyxl/writer/strings.py | zks888/hue | 93a8c370713e70b216c428caa2f75185ef809deb | [
"Apache-2.0"
] | 6 | 2018-06-06T19:55:32.000Z | 2021-09-30T15:16:40.000Z | # file openpyxl/writer/strings.py
# Copyright (c) 2010 openpyxl
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# @license: http://www.opensource.org/licenses/mit-license.php
# @author: Eric Gazoni
"""Write the shared string table."""
# Python stdlib imports
from ....compat import BytesIO as StringIO
# package imports
from ..shared.xmltools import start_tag, end_tag, tag, XMLGenerator
def create_string_table(workbook):
"""Compile the string table for a workbook."""
strings = set()
for sheet in workbook.worksheets:
for cell in sheet.get_cell_collection():
if cell.data_type == cell.TYPE_STRING and cell._value is not None:
strings.add(cell.value)
return dict((key, i) for i, key in enumerate(strings))
def write_string_table(string_table):
"""Write the string table xml."""
temp_buffer = StringIO()
doc = XMLGenerator(temp_buffer, 'utf-8')
start_tag(doc, 'sst', {'xmlns':
'http://schemas.openxmlformats.org/spreadsheetml/2006/main',
'uniqueCount': '%d' % len(string_table)})
strings_to_write = sorted(string_table.items(),
key=lambda pair: pair[1])
for key in [pair[0] for pair in strings_to_write]:
start_tag(doc, 'si')
if key.strip() != key:
attr = {'xml:space': 'preserve'}
else:
attr = {}
tag(doc, 't', attr, key)
end_tag(doc, 'si')
end_tag(doc, 'sst')
string_table_xml = temp_buffer.getvalue()
temp_buffer.close()
return string_table_xml
class StringTableBuilder(object):
def __init__(self):
self.counter = 0
self.dct = {}
def add(self, key):
key = key.strip()
try:
return self.dct[key]
except KeyError:
res = self.dct[key] = self.counter
self.counter += 1
return res
def get_table(self):
return self.dct
| 33.804598 | 79 | 0.67494 |
from ....compat import BytesIO as StringIO
from ..shared.xmltools import start_tag, end_tag, tag, XMLGenerator
def create_string_table(workbook):
strings = set()
for sheet in workbook.worksheets:
for cell in sheet.get_cell_collection():
if cell.data_type == cell.TYPE_STRING and cell._value is not None:
strings.add(cell.value)
return dict((key, i) for i, key in enumerate(strings))
def write_string_table(string_table):
temp_buffer = StringIO()
doc = XMLGenerator(temp_buffer, 'utf-8')
start_tag(doc, 'sst', {'xmlns':
'http://schemas.openxmlformats.org/spreadsheetml/2006/main',
'uniqueCount': '%d' % len(string_table)})
strings_to_write = sorted(string_table.items(),
key=lambda pair: pair[1])
for key in [pair[0] for pair in strings_to_write]:
start_tag(doc, 'si')
if key.strip() != key:
attr = {'xml:space': 'preserve'}
else:
attr = {}
tag(doc, 't', attr, key)
end_tag(doc, 'si')
end_tag(doc, 'sst')
string_table_xml = temp_buffer.getvalue()
temp_buffer.close()
return string_table_xml
class StringTableBuilder(object):
def __init__(self):
self.counter = 0
self.dct = {}
def add(self, key):
key = key.strip()
try:
return self.dct[key]
except KeyError:
res = self.dct[key] = self.counter
self.counter += 1
return res
def get_table(self):
return self.dct
| true | true |
f73daeee5b60e7b1777cb98999b129b6a59c4f52 | 302 | py | Python | vaxthesat/python/vax_generator/vax_generator/__main__.py | cypher-me/HAS-Qualifier-Challenges | bb795303716155dad4a930880a58fecb5d9b50c5 | [
"MIT"
] | 75 | 2020-07-20T20:54:00.000Z | 2022-03-09T09:18:37.000Z | vaxthesat/python/vax_generator/vax_generator/__main__.py | cypher-me/HAS-Qualifier-Challenges | bb795303716155dad4a930880a58fecb5d9b50c5 | [
"MIT"
] | 3 | 2020-09-13T00:46:49.000Z | 2021-07-06T16:18:22.000Z | vaxthesat/python/vax_generator/vax_generator/__main__.py | cypher-me/HAS-Qualifier-Challenges | bb795303716155dad4a930880a58fecb5d9b50c5 | [
"MIT"
] | 14 | 2020-07-22T16:34:51.000Z | 2021-09-13T12:19:59.000Z | import logging
import sys
from vax_common.vax_config import get_config
from vax_generator.vax_generator import VaxGenerator
logging.basicConfig(stream=sys.stderr, level=logging.INFO)
def main():
generator = VaxGenerator(get_config())
generator.run()
if __name__ == "__main__":
main()
| 17.764706 | 58 | 0.764901 | import logging
import sys
from vax_common.vax_config import get_config
from vax_generator.vax_generator import VaxGenerator
logging.basicConfig(stream=sys.stderr, level=logging.INFO)
def main():
generator = VaxGenerator(get_config())
generator.run()
if __name__ == "__main__":
main()
| true | true |
f73dafb5471d095abbf2136a78f1f9ed0d71576d | 6,789 | py | Python | lib/services/vloadbalancer/ncloud_vloadbalancer/model/get_load_balancer_instance_list_response.py | NaverCloudPlatform/ncloud-sdk-python | 5976dfabd205c615fcf57ac2f0ab67313ee6953c | [
"MIT"
] | 12 | 2018-11-20T04:30:49.000Z | 2021-11-09T12:34:26.000Z | lib/services/vloadbalancer/ncloud_vloadbalancer/model/get_load_balancer_instance_list_response.py | NaverCloudPlatform/ncloud-sdk-python | 5976dfabd205c615fcf57ac2f0ab67313ee6953c | [
"MIT"
] | 1 | 2019-01-24T15:56:15.000Z | 2019-05-31T07:56:55.000Z | lib/services/vloadbalancer/ncloud_vloadbalancer/model/get_load_balancer_instance_list_response.py | NaverCloudPlatform/ncloud-sdk-python | 5976dfabd205c615fcf57ac2f0ab67313ee6953c | [
"MIT"
] | 6 | 2018-06-29T03:45:50.000Z | 2022-03-18T01:51:45.000Z | # coding: utf-8
"""
vloadbalancer
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from ncloud_vloadbalancer.model.load_balancer_instance import LoadBalancerInstance # noqa: F401,E501
class GetLoadBalancerInstanceListResponse(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'request_id': 'str',
'return_code': 'str',
'return_message': 'str',
'total_rows': 'int',
'load_balancer_instance_list': 'list[LoadBalancerInstance]'
}
attribute_map = {
'request_id': 'requestId',
'return_code': 'returnCode',
'return_message': 'returnMessage',
'total_rows': 'totalRows',
'load_balancer_instance_list': 'loadBalancerInstanceList'
}
def __init__(self, request_id=None, return_code=None, return_message=None, total_rows=None, load_balancer_instance_list=None): # noqa: E501
"""GetLoadBalancerInstanceListResponse - a model defined in Swagger""" # noqa: E501
self._request_id = None
self._return_code = None
self._return_message = None
self._total_rows = None
self._load_balancer_instance_list = None
self.discriminator = None
if request_id is not None:
self.request_id = request_id
if return_code is not None:
self.return_code = return_code
if return_message is not None:
self.return_message = return_message
if total_rows is not None:
self.total_rows = total_rows
if load_balancer_instance_list is not None:
self.load_balancer_instance_list = load_balancer_instance_list
@property
def request_id(self):
"""Gets the request_id of this GetLoadBalancerInstanceListResponse. # noqa: E501
:return: The request_id of this GetLoadBalancerInstanceListResponse. # noqa: E501
:rtype: str
"""
return self._request_id
@request_id.setter
def request_id(self, request_id):
"""Sets the request_id of this GetLoadBalancerInstanceListResponse.
:param request_id: The request_id of this GetLoadBalancerInstanceListResponse. # noqa: E501
:type: str
"""
self._request_id = request_id
@property
def return_code(self):
"""Gets the return_code of this GetLoadBalancerInstanceListResponse. # noqa: E501
:return: The return_code of this GetLoadBalancerInstanceListResponse. # noqa: E501
:rtype: str
"""
return self._return_code
@return_code.setter
def return_code(self, return_code):
"""Sets the return_code of this GetLoadBalancerInstanceListResponse.
:param return_code: The return_code of this GetLoadBalancerInstanceListResponse. # noqa: E501
:type: str
"""
self._return_code = return_code
@property
def return_message(self):
"""Gets the return_message of this GetLoadBalancerInstanceListResponse. # noqa: E501
:return: The return_message of this GetLoadBalancerInstanceListResponse. # noqa: E501
:rtype: str
"""
return self._return_message
@return_message.setter
def return_message(self, return_message):
"""Sets the return_message of this GetLoadBalancerInstanceListResponse.
:param return_message: The return_message of this GetLoadBalancerInstanceListResponse. # noqa: E501
:type: str
"""
self._return_message = return_message
@property
def total_rows(self):
"""Gets the total_rows of this GetLoadBalancerInstanceListResponse. # noqa: E501
:return: The total_rows of this GetLoadBalancerInstanceListResponse. # noqa: E501
:rtype: int
"""
return self._total_rows
@total_rows.setter
def total_rows(self, total_rows):
"""Sets the total_rows of this GetLoadBalancerInstanceListResponse.
:param total_rows: The total_rows of this GetLoadBalancerInstanceListResponse. # noqa: E501
:type: int
"""
self._total_rows = total_rows
@property
def load_balancer_instance_list(self):
"""Gets the load_balancer_instance_list of this GetLoadBalancerInstanceListResponse. # noqa: E501
:return: The load_balancer_instance_list of this GetLoadBalancerInstanceListResponse. # noqa: E501
:rtype: list[LoadBalancerInstance]
"""
return self._load_balancer_instance_list
@load_balancer_instance_list.setter
def load_balancer_instance_list(self, load_balancer_instance_list):
"""Sets the load_balancer_instance_list of this GetLoadBalancerInstanceListResponse.
:param load_balancer_instance_list: The load_balancer_instance_list of this GetLoadBalancerInstanceListResponse. # noqa: E501
:type: list[LoadBalancerInstance]
"""
self._load_balancer_instance_list = load_balancer_instance_list
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, GetLoadBalancerInstanceListResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 31.430556 | 144 | 0.644278 |
import pprint
import re
import six
from ncloud_vloadbalancer.model.load_balancer_instance import LoadBalancerInstance
class GetLoadBalancerInstanceListResponse(object):
swagger_types = {
'request_id': 'str',
'return_code': 'str',
'return_message': 'str',
'total_rows': 'int',
'load_balancer_instance_list': 'list[LoadBalancerInstance]'
}
attribute_map = {
'request_id': 'requestId',
'return_code': 'returnCode',
'return_message': 'returnMessage',
'total_rows': 'totalRows',
'load_balancer_instance_list': 'loadBalancerInstanceList'
}
def __init__(self, request_id=None, return_code=None, return_message=None, total_rows=None, load_balancer_instance_list=None):
self._request_id = None
self._return_code = None
self._return_message = None
self._total_rows = None
self._load_balancer_instance_list = None
self.discriminator = None
if request_id is not None:
self.request_id = request_id
if return_code is not None:
self.return_code = return_code
if return_message is not None:
self.return_message = return_message
if total_rows is not None:
self.total_rows = total_rows
if load_balancer_instance_list is not None:
self.load_balancer_instance_list = load_balancer_instance_list
@property
def request_id(self):
return self._request_id
@request_id.setter
def request_id(self, request_id):
self._request_id = request_id
@property
def return_code(self):
return self._return_code
@return_code.setter
def return_code(self, return_code):
self._return_code = return_code
@property
def return_message(self):
return self._return_message
@return_message.setter
def return_message(self, return_message):
self._return_message = return_message
@property
def total_rows(self):
return self._total_rows
@total_rows.setter
def total_rows(self, total_rows):
self._total_rows = total_rows
@property
def load_balancer_instance_list(self):
return self._load_balancer_instance_list
@load_balancer_instance_list.setter
def load_balancer_instance_list(self, load_balancer_instance_list):
self._load_balancer_instance_list = load_balancer_instance_list
def to_dict(self):
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
return pprint.pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, GetLoadBalancerInstanceListResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| true | true |
f73db073fbc6af5b294207e83405f9a180fcc696 | 216 | py | Python | crediapp/crediapp/doctype/analisis_capital/test_analisis_capital.py | edwinorlando83/crediapp | 111e8472b7d13bc553a9ebf660b0f97c34e87164 | [
"MIT"
] | null | null | null | crediapp/crediapp/doctype/analisis_capital/test_analisis_capital.py | edwinorlando83/crediapp | 111e8472b7d13bc553a9ebf660b0f97c34e87164 | [
"MIT"
] | null | null | null | crediapp/crediapp/doctype/analisis_capital/test_analisis_capital.py | edwinorlando83/crediapp | 111e8472b7d13bc553a9ebf660b0f97c34e87164 | [
"MIT"
] | 2 | 2021-05-06T14:59:24.000Z | 2021-08-20T21:21:20.000Z | # -*- coding: utf-8 -*-
# Copyright (c) 2021, orlando and Contributors
# See license.txt
from __future__ import unicode_literals
# import frappe
import unittest
class Testanalisis_capital(unittest.TestCase):
pass
| 19.636364 | 46 | 0.768519 |
from __future__ import unicode_literals
import unittest
class Testanalisis_capital(unittest.TestCase):
pass
| true | true |
f73db2266a5aeca052487d18b4c21435dc002cb0 | 19,155 | py | Python | .jupyter/jupyter_notebook_config.py | elsdrium/.unix_settings | 1c3cf9dfc9a4a465178d22c82f3a05f380cda926 | [
"MIT"
] | 5 | 2016-11-06T07:17:08.000Z | 2019-02-24T11:15:23.000Z | .jupyter/jupyter_notebook_config.py | elsdrium/.unix_settings | 1c3cf9dfc9a4a465178d22c82f3a05f380cda926 | [
"MIT"
] | null | null | null | .jupyter/jupyter_notebook_config.py | elsdrium/.unix_settings | 1c3cf9dfc9a4a465178d22c82f3a05f380cda926 | [
"MIT"
] | null | null | null | # Configuration file for jupyter-notebook.
#------------------------------------------------------------------------------
# Configurable configuration
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
# SingletonConfigurable configuration
#------------------------------------------------------------------------------
# A configurable that only allows one instance.
#
# This class is for classes that should only have one instance of itself or
# *any* subclass. To create and retrieve such a class use the
# :meth:`SingletonConfigurable.instance` method.
#------------------------------------------------------------------------------
# Application configuration
#------------------------------------------------------------------------------
# This is an application.
# The date format used by logging formatters for %(asctime)s
# c.Application.log_datefmt = '%Y-%m-%d %H:%M:%S'
# The Logging format template
# c.Application.log_format = '[%(name)s]%(highlevel)s %(message)s'
# Set the log level by value or name.
# c.Application.log_level = 30
#------------------------------------------------------------------------------
# JupyterApp configuration
#------------------------------------------------------------------------------
# Base class for Jupyter applications
# Answer yes to any prompts.
# c.JupyterApp.answer_yes = False
# Generate default config file.
# c.JupyterApp.generate_config = False
# Full path of a config file.
# c.JupyterApp.config_file = ''
# Specify a config file to load.
# c.JupyterApp.config_file_name = ''
#------------------------------------------------------------------------------
# NotebookApp configuration
#------------------------------------------------------------------------------
# The notebook manager class to use.
# c.NotebookApp.contents_manager_class = <class 'notebook.services.contents.filemanager.FileContentsManager'>
# The login handler class to use.
# c.NotebookApp.login_handler_class = <class 'notebook.auth.login.LoginHandler'>
# The url for MathJax.js.
# c.NotebookApp.mathjax_url = ''
# extra paths to look for Javascript notebook extensions
# c.NotebookApp.extra_nbextensions_path = traitlets.Undefined
# The base URL for websockets, if it differs from the HTTP server (hint: it
# almost certainly doesn't).
#
# Should be in the form of an HTTP origin: ws[s]://hostname[:port]
# c.NotebookApp.websocket_url = ''
# DEPRECATED use base_url
# c.NotebookApp.base_project_url = '/'
# The base URL for the notebook server.
#
# Leading and trailing slashes can be omitted, and will automatically be added.
# c.NotebookApp.base_url = '/'
# The default URL to redirect to from `/`
# c.NotebookApp.default_url = '/tree'
# Whether to enable MathJax for typesetting math/TeX
#
# MathJax is the javascript library IPython uses to render math/LaTeX. It is
# very large, so you may want to disable it if you have a slow internet
# connection, or for offline use of the notebook.
#
# When disabled, equations etc. will appear as their untransformed TeX source.
# c.NotebookApp.enable_mathjax = True
# The IP address the notebook server will listen on.
c.NotebookApp.ip = '*'
# The port the notebook server will listen on.
c.NotebookApp.port = 9999
# Set the Access-Control-Allow-Credentials: true header
# c.NotebookApp.allow_credentials = False
# Specify what command to use to invoke a web browser when opening the notebook.
# If not specified, the default browser will be determined by the `webbrowser`
# standard library module, which allows setting of the BROWSER environment
# variable to override it.
# c.NotebookApp.browser = ''
# The directory to use for notebooks and kernels.
# c.NotebookApp.notebook_dir = ''
# The full path to an SSL/TLS certificate file.
c.NotebookApp.certfile = u'/etc/jupyter/mycert.pem'
# The number of additional ports to try if the specified port is not available.
# c.NotebookApp.port_retries = 50
# The random bytes used to secure cookies. By default this is a new random
# number every time you start the Notebook. Set it to a value in a config file
# to enable logins to persist across server sessions.
#
# Note: Cookie secrets should be kept private, do not share config files with
# cookie_secret stored in plaintext (you can read the value from a file).
# c.NotebookApp.cookie_secret = b''
#
# c.NotebookApp.file_to_run = ''
# Reraise exceptions encountered loading server extensions?
# c.NotebookApp.reraise_server_extension_failures = False
# Supply overrides for the tornado.web.Application that the IPython notebook
# uses.
# c.NotebookApp.tornado_settings = traitlets.Undefined
# Extra variables to supply to jinja templates when rendering.
# c.NotebookApp.jinja_template_vars = traitlets.Undefined
# The session manager class to use.
# c.NotebookApp.session_manager_class = <class 'notebook.services.sessions.sessionmanager.SessionManager'>
# Extra paths to search for serving static files.
#
# This allows adding javascript/css to be available from the notebook server
# machine, or overriding individual files in the IPython
# c.NotebookApp.extra_static_paths = traitlets.Undefined
# The kernel spec manager class to use. Should be a subclass of
# `jupyter_client.kernelspec.KernelSpecManager`.
#
# The Api of KernelSpecManager is provisional and might change without warning
# between this version of IPython and the next stable one.
# c.NotebookApp.kernel_spec_manager_class = <class 'jupyter_client.kernelspec.KernelSpecManager'>
# The logout handler class to use.
# c.NotebookApp.logout_handler_class = <class 'notebook.auth.logout.LogoutHandler'>
# Whether to open in a browser after starting. The specific browser used is
# platform dependent and determined by the python standard library `webbrowser`
# module, unless it is overridden using the --browser (NotebookApp.browser)
# configuration option.
c.NotebookApp.open_browser = False
# Hashed password to use for web authentication.
#
# To generate, type in a python/IPython shell:
#
# from notebook.auth import passwd; passwd()
#
# The string should be of the form type:salt:hashed-password.
c.NotebookApp.password = u'sha1:34021a6e32ab:f4f5416b2264624938a19c0de8ed091d82f291be'
# The full path to a private key file for usage with SSL/TLS.
c.NotebookApp.keyfile = '/etc/jupyter/mykey.key'
# Supply extra arguments that will be passed to Jinja environment.
# c.NotebookApp.jinja_environment_options = traitlets.Undefined
# Use a regular expression for the Access-Control-Allow-Origin header
#
# Requests from an origin matching the expression will get replies with:
#
# Access-Control-Allow-Origin: origin
#
# where `origin` is the origin of the request.
#
# Ignored if allow_origin is set.
# c.NotebookApp.allow_origin_pat = ''
# Supply SSL options for the tornado HTTPServer. See the tornado docs for
# details.
# c.NotebookApp.ssl_options = traitlets.Undefined
# Whether to trust or not X-Scheme/X-Forwarded-Proto and X-Real-Ip/X-Forwarded-
# For headerssent by the upstream reverse proxy. Necessary if the proxy handles
# SSL
# c.NotebookApp.trust_xheaders = False
# DEPRECATED, use tornado_settings
# c.NotebookApp.webapp_settings = traitlets.Undefined
# Extra paths to search for serving jinja templates.
#
# Can be used to override templates from notebook.templates.
# c.NotebookApp.extra_template_paths = traitlets.Undefined
# The file where the cookie secret is stored.
# c.NotebookApp.cookie_secret_file = ''
# Set the Access-Control-Allow-Origin header
#
# Use '*' to allow any origin to access your server.
#
# Takes precedence over allow_origin_pat.
# c.NotebookApp.allow_origin = ''
# The kernel manager class to use.
# c.NotebookApp.kernel_manager_class = <class 'notebook.services.kernels.kernelmanager.MappingKernelManager'>
# DISABLED: use %pylab or %matplotlib in the notebook to enable matplotlib.
# c.NotebookApp.pylab = 'disabled'
# Python modules to load as notebook server extensions. This is an experimental
# API, and may change in future releases.
# c.NotebookApp.server_extensions = traitlets.Undefined
# The config manager class to use
# c.NotebookApp.config_manager_class = <class 'notebook.services.config.manager.ConfigManager'>
# ipyparallel
# c.NotebookApp.server_extensions.append('ipyparallel.nbextension')
#------------------------------------------------------------------------------
# LoggingConfigurable configuration
#------------------------------------------------------------------------------
# A parent class for Configurables that log.
#
# Subclasses have a log trait, and the default behavior is to get the logger
# from the currently running Application.
#------------------------------------------------------------------------------
# ConnectionFileMixin configuration
#------------------------------------------------------------------------------
# Mixin for configurable classes that work with connection files
# set the control (ROUTER) port [default: random]
# c.ConnectionFileMixin.control_port = 0
# set the iopub (PUB) port [default: random]
# c.ConnectionFileMixin.iopub_port = 0
#
# c.ConnectionFileMixin.transport = 'tcp'
# set the heartbeat port [default: random]
# c.ConnectionFileMixin.hb_port = 0
# JSON file in which to store connection info [default: kernel-<pid>.json]
#
# This file will contain the IP, ports, and authentication key needed to connect
# clients to this kernel. By default, this file will be created in the security
# dir of the current profile, but can be specified by absolute path.
# c.ConnectionFileMixin.connection_file = ''
# set the shell (ROUTER) port [default: random]
# c.ConnectionFileMixin.shell_port = 0
# Set the kernel's IP address [default localhost]. If the IP address is
# something other than localhost, then Consoles on other machines will be able
# to connect to the Kernel, so be careful!
# c.ConnectionFileMixin.ip = ''
# set the stdin (ROUTER) port [default: random]
# c.ConnectionFileMixin.stdin_port = 0
#------------------------------------------------------------------------------
# KernelManager configuration
#------------------------------------------------------------------------------
# Manages a single kernel in a subprocess on this host.
#
# This version starts kernels with Popen.
# DEPRECATED: Use kernel_name instead.
#
# The Popen Command to launch the kernel. Override this if you have a custom
# kernel. If kernel_cmd is specified in a configuration file, Jupyter does not
# pass any arguments to the kernel, because it cannot make any assumptions about
# the arguments that the kernel understands. In particular, this means that the
# kernel does not receive the option --debug if it given on the Jupyter command
# line.
# c.KernelManager.kernel_cmd = traitlets.Undefined
# Should we autorestart the kernel if it dies.
# c.KernelManager.autorestart = False
#------------------------------------------------------------------------------
# Session configuration
#------------------------------------------------------------------------------
# Object for handling serialization and sending of messages.
#
# The Session object handles building messages and sending them with ZMQ sockets
# or ZMQStream objects. Objects can communicate with each other over the
# network via Session objects, and only need to work with the dict-based IPython
# message spec. The Session will handle serialization/deserialization, security,
# and metadata.
#
# Sessions support configurable serialization via packer/unpacker traits, and
# signing with HMAC digests via the key/keyfile traits.
#
# Parameters ----------
#
# debug : bool
# whether to trigger extra debugging statements
# packer/unpacker : str : 'json', 'pickle' or import_string
# importstrings for methods to serialize message parts. If just
# 'json' or 'pickle', predefined JSON and pickle packers will be used.
# Otherwise, the entire importstring must be used.
#
# The functions must accept at least valid JSON input, and output *bytes*.
#
# For example, to use msgpack:
# packer = 'msgpack.packb', unpacker='msgpack.unpackb'
# pack/unpack : callables
# You can also set the pack/unpack callables for serialization directly.
# session : bytes
# the ID of this Session object. The default is to generate a new UUID.
# username : unicode
# username added to message headers. The default is to ask the OS.
# key : bytes
# The key used to initialize an HMAC signature. If unset, messages
# will not be signed or checked.
# keyfile : filepath
# The file containing a key. If this is set, `key` will be initialized
# to the contents of the file.
# The name of the unpacker for unserializing messages. Only used with custom
# functions for `packer`.
# c.Session.unpacker = 'json'
# Username for the Session. Default is your system username.
# c.Session.username = 'elsdrm'
# The maximum number of items for a container to be introspected for custom
# serialization. Containers larger than this are pickled outright.
# c.Session.item_threshold = 64
# Metadata dictionary, which serves as the default top-level metadata dict for
# each message.
# c.Session.metadata = traitlets.Undefined
# Debug output in the Session
# c.Session.debug = False
# Threshold (in bytes) beyond which a buffer should be sent without copying.
# c.Session.copy_threshold = 65536
# The name of the packer for serializing messages. Should be one of 'json',
# 'pickle', or an import name for a custom callable serializer.
# c.Session.packer = 'json'
# path to file containing execution key.
# c.Session.keyfile = ''
# The maximum number of digests to remember.
#
# The digest history will be culled when it exceeds this value.
# c.Session.digest_history_size = 65536
# execution key, for signing messages.
# c.Session.key = b''
# Threshold (in bytes) beyond which an object's buffer should be extracted to
# avoid pickling.
# c.Session.buffer_threshold = 1024
# The digest scheme used to construct the message signatures. Must have the form
# 'hmac-HASH'.
# c.Session.signature_scheme = 'hmac-sha256'
# The UUID identifying this session.
# c.Session.session = ''
#------------------------------------------------------------------------------
# MultiKernelManager configuration
#------------------------------------------------------------------------------
# A class for managing multiple kernels.
# The kernel manager class. This is configurable to allow subclassing of the
# KernelManager for customized behavior.
# c.MultiKernelManager.kernel_manager_class = 'jupyter_client.ioloop.IOLoopKernelManager'
# The name of the default kernel to start
# c.MultiKernelManager.default_kernel_name = 'python3'
#------------------------------------------------------------------------------
# MappingKernelManager configuration
#------------------------------------------------------------------------------
# A KernelManager that handles notebook mapping and HTTP error handling
#
# c.MappingKernelManager.root_dir = ''
#------------------------------------------------------------------------------
# ContentsManager configuration
#------------------------------------------------------------------------------
# Base class for serving files and directories.
#
# This serves any text or binary file, as well as directories, with special
# handling for JSON notebook documents.
#
# Most APIs take a path argument, which is always an API-style unicode path, and
# always refers to a directory.
#
# - unicode, not url-escaped
# - '/'-separated
# - leading and trailing '/' will be stripped
# - if unspecified, path defaults to '',
# indicating the root path.
#
# c.ContentsManager.checkpoints = traitlets.Undefined
# Glob patterns to hide in file and directory listings.
# c.ContentsManager.hide_globs = traitlets.Undefined
# Python callable or importstring thereof
#
# To be called on a contents model prior to save.
#
# This can be used to process the structure, such as removing notebook outputs
# or other side effects that should not be saved.
#
# It will be called as (all arguments passed by keyword)::
#
# hook(path=path, model=model, contents_manager=self)
#
# - model: the model to be saved. Includes file contents.
# Modifying this dict will affect the file that is stored.
# - path: the API path of the save destination
# - contents_manager: this ContentsManager instance
# c.ContentsManager.pre_save_hook = None
#
# c.ContentsManager.checkpoints_class = <class 'notebook.services.contents.checkpoints.Checkpoints'>
# The base name used when creating untitled files.
# c.ContentsManager.untitled_file = 'untitled'
# The base name used when creating untitled directories.
# c.ContentsManager.untitled_directory = 'Untitled Folder'
# The base name used when creating untitled notebooks.
# c.ContentsManager.untitled_notebook = 'Untitled'
#
# c.ContentsManager.checkpoints_kwargs = traitlets.Undefined
#------------------------------------------------------------------------------
# FileContentsManager configuration
#------------------------------------------------------------------------------
# DEPRECATED, use post_save_hook
# c.FileContentsManager.save_script = False
#
# c.FileContentsManager.root_dir = ''
# Python callable or importstring thereof
#
# to be called on the path of a file just saved.
#
# This can be used to process the file on disk, such as converting the notebook
# to a script or HTML via nbconvert.
#
# It will be called as (all arguments passed by keyword)::
#
# hook(os_path=os_path, model=model, contents_manager=instance)
#
# - path: the filesystem path to the file just written - model: the model
# representing the file - contents_manager: this ContentsManager instance
# c.FileContentsManager.post_save_hook = None
#------------------------------------------------------------------------------
# NotebookNotary configuration
#------------------------------------------------------------------------------
# A class for computing and verifying notebook signatures.
# The file where the secret key is stored.
# c.NotebookNotary.secret_file = ''
# The hashing algorithm used to sign notebooks.
# c.NotebookNotary.algorithm = 'sha256'
# The secret key with which notebooks are signed.
# c.NotebookNotary.secret = b''
# The sqlite file in which to store notebook signatures. By default, this will
# be in your Jupyter runtime directory. You can set it to ':memory:' to disable
# sqlite writing to the filesystem.
# c.NotebookNotary.db_file = ''
# The number of notebook signatures to cache. When the number of signatures
# exceeds this value, the oldest 25% of signatures will be culled.
# c.NotebookNotary.cache_size = 65535
#------------------------------------------------------------------------------
# KernelSpecManager configuration
#------------------------------------------------------------------------------
# Whitelist of allowed kernel names.
#
# By default, all installed kernels are allowed.
# c.KernelSpecManager.whitelist = traitlets.Undefined
| 36.836538 | 109 | 0.668755 |
#
# Should be in the form of an HTTP origin: ws[s]://hostname[:port]
# c.NotebookApp.websocket_url = ''
# DEPRECATED use base_url
# c.NotebookApp.base_project_url = '/'
# The base URL for the notebook server.
#
# Leading and trailing slashes can be omitted, and will automatically be added.
# c.NotebookApp.base_url = '/'
# The default URL to redirect to from `/`
# c.NotebookApp.default_url = '/tree'
# Whether to enable MathJax for typesetting math/TeX
#
# MathJax is the javascript library IPython uses to render math/LaTeX. It is
# very large, so you may want to disable it if you have a slow internet
# connection, or for offline use of the notebook.
#
# When disabled, equations etc. will appear as their untransformed TeX source.
# c.NotebookApp.enable_mathjax = True
# The IP address the notebook server will listen on.
c.NotebookApp.ip = '*'
# The port the notebook server will listen on.
c.NotebookApp.port = 9999
# Set the Access-Control-Allow-Credentials: true header
# c.NotebookApp.allow_credentials = False
# Specify what command to use to invoke a web browser when opening the notebook.
# If not specified, the default browser will be determined by the `webbrowser`
# standard library module, which allows setting of the BROWSER environment
# variable to override it.
# c.NotebookApp.browser = ''
# The directory to use for notebooks and kernels.
# c.NotebookApp.notebook_dir = ''
# The full path to an SSL/TLS certificate file.
c.NotebookApp.certfile = u'/etc/jupyter/mycert.pem'
# The number of additional ports to try if the specified port is not available.
# c.NotebookApp.port_retries = 50
# The random bytes used to secure cookies. By default this is a new random
# number every time you start the Notebook. Set it to a value in a config file
# to enable logins to persist across server sessions.
#
# Note: Cookie secrets should be kept private, do not share config files with
# cookie_secret stored in plaintext (you can read the value from a file).
# c.NotebookApp.cookie_secret = b''
#
# c.NotebookApp.file_to_run = ''
# Reraise exceptions encountered loading server extensions?
# c.NotebookApp.reraise_server_extension_failures = False
# Supply overrides for the tornado.web.Application that the IPython notebook
# uses.
# c.NotebookApp.tornado_settings = traitlets.Undefined
# Extra variables to supply to jinja templates when rendering.
# c.NotebookApp.jinja_template_vars = traitlets.Undefined
# The session manager class to use.
# c.NotebookApp.session_manager_class = <class 'notebook.services.sessions.sessionmanager.SessionManager'>
# Extra paths to search for serving static files.
#
# This allows adding javascript/css to be available from the notebook server
# machine, or overriding individual files in the IPython
# c.NotebookApp.extra_static_paths = traitlets.Undefined
# The kernel spec manager class to use. Should be a subclass of
# `jupyter_client.kernelspec.KernelSpecManager`.
#
# The Api of KernelSpecManager is provisional and might change without warning
# between this version of IPython and the next stable one.
# c.NotebookApp.kernel_spec_manager_class = <class 'jupyter_client.kernelspec.KernelSpecManager'>
# The logout handler class to use.
# c.NotebookApp.logout_handler_class = <class 'notebook.auth.logout.LogoutHandler'>
# Whether to open in a browser after starting. The specific browser used is
# platform dependent and determined by the python standard library `webbrowser`
# module, unless it is overridden using the --browser (NotebookApp.browser)
# configuration option.
c.NotebookApp.open_browser = False
# Hashed password to use for web authentication.
#
# To generate, type in a python/IPython shell:
#
# from notebook.auth import passwd; passwd()
#
# The string should be of the form type:salt:hashed-password.
c.NotebookApp.password = u'sha1:34021a6e32ab:f4f5416b2264624938a19c0de8ed091d82f291be'
# The full path to a private key file for usage with SSL/TLS.
c.NotebookApp.keyfile = '/etc/jupyter/mykey.key'
# Supply extra arguments that will be passed to Jinja environment.
# c.NotebookApp.jinja_environment_options = traitlets.Undefined
# Use a regular expression for the Access-Control-Allow-Origin header
#
# Requests from an origin matching the expression will get replies with:
#
# Access-Control-Allow-Origin: origin
#
# where `origin` is the origin of the request.
#
# Ignored if allow_origin is set.
# c.NotebookApp.allow_origin_pat = ''
# Supply SSL options for the tornado HTTPServer. See the tornado docs for
# details.
# c.NotebookApp.ssl_options = traitlets.Undefined
# Whether to trust or not X-Scheme/X-Forwarded-Proto and X-Real-Ip/X-Forwarded-
# For headerssent by the upstream reverse proxy. Necessary if the proxy handles
# SSL
# c.NotebookApp.trust_xheaders = False
# DEPRECATED, use tornado_settings
# c.NotebookApp.webapp_settings = traitlets.Undefined
# Extra paths to search for serving jinja templates.
#
# Can be used to override templates from notebook.templates.
# c.NotebookApp.extra_template_paths = traitlets.Undefined
# The file where the cookie secret is stored.
# c.NotebookApp.cookie_secret_file = ''
# Set the Access-Control-Allow-Origin header
#
# Use '*' to allow any origin to access your server.
#
# Takes precedence over allow_origin_pat.
# c.NotebookApp.allow_origin = ''
# The kernel manager class to use.
# c.NotebookApp.kernel_manager_class = <class 'notebook.services.kernels.kernelmanager.MappingKernelManager'>
# DISABLED: use %pylab or %matplotlib in the notebook to enable matplotlib.
# c.NotebookApp.pylab = 'disabled'
# Python modules to load as notebook server extensions. This is an experimental
# API, and may change in future releases.
# c.NotebookApp.server_extensions = traitlets.Undefined
# The config manager class to use
# c.NotebookApp.config_manager_class = <class 'notebook.services.config.manager.ConfigManager'>
# ipyparallel
# c.NotebookApp.server_extensions.append('ipyparallel.nbextension')
#------------------------------------------------------------------------------
# LoggingConfigurable configuration
#------------------------------------------------------------------------------
# A parent class for Configurables that log.
#
# Subclasses have a log trait, and the default behavior is to get the logger
# from the currently running Application.
#------------------------------------------------------------------------------
# ConnectionFileMixin configuration
#------------------------------------------------------------------------------
# Mixin for configurable classes that work with connection files
# set the control (ROUTER) port [default: random]
# c.ConnectionFileMixin.control_port = 0
# set the iopub (PUB) port [default: random]
# c.ConnectionFileMixin.iopub_port = 0
#
# c.ConnectionFileMixin.transport = 'tcp'
# set the heartbeat port [default: random]
# c.ConnectionFileMixin.hb_port = 0
# JSON file in which to store connection info [default: kernel-<pid>.json]
#
# This file will contain the IP, ports, and authentication key needed to connect
# clients to this kernel. By default, this file will be created in the security
# dir of the current profile, but can be specified by absolute path.
# c.ConnectionFileMixin.connection_file = ''
# set the shell (ROUTER) port [default: random]
# c.ConnectionFileMixin.shell_port = 0
# Set the kernel's IP address [default localhost]. If the IP address is
# avoid pickling.
# c.Session.buffer_threshold = 1024
# The digest scheme used to construct the message signatures. Must have the form
# 'hmac-HASH'.
# c.Session.signature_scheme = 'hmac-sha256'
# The UUID identifying this session.
# c.Session.session = ''
#------------------------------------------------------------------------------
# MultiKernelManager configuration
#------------------------------------------------------------------------------
# A class for managing multiple kernels.
# The kernel manager class. This is configurable to allow subclassing of the
# KernelManager for customized behavior.
# c.MultiKernelManager.kernel_manager_class = 'jupyter_client.ioloop.IOLoopKernelManager'
# The name of the default kernel to start
# c.MultiKernelManager.default_kernel_name = 'python3'
#------------------------------------------------------------------------------
# MappingKernelManager configuration
#------------------------------------------------------------------------------
# A KernelManager that handles notebook mapping and HTTP error handling
#
# c.MappingKernelManager.root_dir = ''
#------------------------------------------------------------------------------
# ContentsManager configuration
#------------------------------------------------------------------------------
# Base class for serving files and directories.
#
# This serves any text or binary file, as well as directories, with special
# handling for JSON notebook documents.
#
# Most APIs take a path argument, which is always an API-style unicode path, and
# always refers to a directory.
#
# - unicode, not url-escaped
# - '/'-separated
# - leading and trailing '/' will be stripped
# - if unspecified, path defaults to '',
# indicating the root path.
#
# c.ContentsManager.checkpoints = traitlets.Undefined
# Glob patterns to hide in file and directory listings.
# c.ContentsManager.hide_globs = traitlets.Undefined
# Python callable or importstring thereof
#
# To be called on a contents model prior to save.
#
# This can be used to process the structure, such as removing notebook outputs
# or other side effects that should not be saved.
#
# It will be called as (all arguments passed by keyword)::
#
# hook(path=path, model=model, contents_manager=self)
#
# - model: the model to be saved. Includes file contents.
# Modifying this dict will affect the file that is stored.
# - path: the API path of the save destination
# - contents_manager: this ContentsManager instance
# c.ContentsManager.pre_save_hook = None
#
# c.ContentsManager.checkpoints_class = <class 'notebook.services.contents.checkpoints.Checkpoints'>
# The base name used when creating untitled files.
# c.ContentsManager.untitled_file = 'untitled'
# The base name used when creating untitled directories.
# c.ContentsManager.untitled_directory = 'Untitled Folder'
# The base name used when creating untitled notebooks.
# c.ContentsManager.untitled_notebook = 'Untitled'
#
# c.ContentsManager.checkpoints_kwargs = traitlets.Undefined
#------------------------------------------------------------------------------
# FileContentsManager configuration
#------------------------------------------------------------------------------
# DEPRECATED, use post_save_hook
# c.FileContentsManager.save_script = False
#
# c.FileContentsManager.root_dir = ''
# Python callable or importstring thereof
#
# to be called on the path of a file just saved.
#
# This can be used to process the file on disk, such as converting the notebook
# to a script or HTML via nbconvert.
#
# It will be called as (all arguments passed by keyword)::
#
# hook(os_path=os_path, model=model, contents_manager=instance)
#
# - path: the filesystem path to the file just written - model: the model
# representing the file - contents_manager: this ContentsManager instance
# c.FileContentsManager.post_save_hook = None
#------------------------------------------------------------------------------
# NotebookNotary configuration
#------------------------------------------------------------------------------
# A class for computing and verifying notebook signatures.
# The file where the secret key is stored.
# c.NotebookNotary.secret_file = ''
# The hashing algorithm used to sign notebooks.
# c.NotebookNotary.algorithm = 'sha256'
# The secret key with which notebooks are signed.
# c.NotebookNotary.secret = b''
# The sqlite file in which to store notebook signatures. By default, this will
# be in your Jupyter runtime directory. You can set it to ':memory:' to disable
# sqlite writing to the filesystem.
# c.NotebookNotary.db_file = ''
# The number of notebook signatures to cache. When the number of signatures
# exceeds this value, the oldest 25% of signatures will be culled.
# c.NotebookNotary.cache_size = 65535
#------------------------------------------------------------------------------
# KernelSpecManager configuration
#------------------------------------------------------------------------------
# Whitelist of allowed kernel names.
#
# By default, all installed kernels are allowed.
# c.KernelSpecManager.whitelist = traitlets.Undefined
| true | true |
f73db2b8c9c24c124f2df232ed47a7c74213aabe | 465 | py | Python | instagram/migrations/0006_comment_username.py | israelwangila/insta | 48653270edd60aabe7d4a42c24032709c2d86c10 | [
"MIT"
] | 4 | 2020-01-29T04:43:58.000Z | 2022-03-06T02:50:37.000Z | instagram/migrations/0006_comment_username.py | israelwangila/insta | 48653270edd60aabe7d4a42c24032709c2d86c10 | [
"MIT"
] | 4 | 2021-03-19T00:43:44.000Z | 2021-09-08T01:00:15.000Z | instagram/migrations/0006_comment_username.py | israelwangila/insta | 48653270edd60aabe7d4a42c24032709c2d86c10 | [
"MIT"
] | 7 | 2020-02-20T06:03:03.000Z | 2022-03-11T02:57:41.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2019-05-23 13:08
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('instagram', '0005_auto_20190523_1540'),
]
operations = [
migrations.AddField(
model_name='comment',
name='username',
field=models.CharField(blank=True, max_length=255),
),
]
| 22.142857 | 63 | 0.623656 |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('instagram', '0005_auto_20190523_1540'),
]
operations = [
migrations.AddField(
model_name='comment',
name='username',
field=models.CharField(blank=True, max_length=255),
),
]
| true | true |
f73db3e4eff7bf7f250f3b85d9937d14e39434d1 | 14,859 | py | Python | codalab/worker/main.py | ana13S/codalab-worksheets | 3f839dbe92e505a94ddc627ffab9cfea621cb2d1 | [
"Apache-2.0"
] | null | null | null | codalab/worker/main.py | ana13S/codalab-worksheets | 3f839dbe92e505a94ddc627ffab9cfea621cb2d1 | [
"Apache-2.0"
] | null | null | null | codalab/worker/main.py | ana13S/codalab-worksheets | 3f839dbe92e505a94ddc627ffab9cfea621cb2d1 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# For information about the design of the worker, see design.pdf in the same
# directory as this file. For information about running a worker, see the
# tutorial on the CodaLab documentation.
import argparse
import getpass
import os
import logging
import signal
import socket
import stat
import sys
import psutil
import requests
from codalab.common import SingularityError
from codalab.lib.formatting import parse_size
from codalab.lib.telemetry_util import initialize_sentry, load_sentry_data, using_sentry
from .bundle_service_client import BundleServiceClient, BundleAuthException
from . import docker_utils
from .worker import Worker
from codalab.worker.dependency_manager import DependencyManager
from codalab.worker.docker_image_manager import DockerImageManager
from codalab.worker.singularity_image_manager import SingularityImageManager
logger = logging.getLogger(__name__)
DEFAULT_EXIT_AFTER_NUM_RUNS = 999999999
def parse_args():
parser = argparse.ArgumentParser(description='CodaLab worker.')
parser.add_argument(
'--tag',
help='Tag (can only contain letters, numbers or hyphens) that allows for scheduling runs on specific workers.',
)
parser.add_argument(
'--server',
default='https://worksheets.codalab.org',
help='URL of the CodaLab server, in the format '
'<http|https>://<hostname>[:<port>] (e.g., https://worksheets.codalab.org)',
)
parser.add_argument(
'--work-dir',
default='codalab-worker-scratch',
help='Directory where to store temporary bundle data, '
'including dependencies and the data from run '
'bundles.',
)
parser.add_argument(
'--network-prefix', default='codalab_worker_network', help='Docker network name prefix'
)
parser.add_argument(
'--cpuset',
type=parse_cpuset_args,
metavar='CPUSET_STR',
default='ALL',
help='Comma-separated list of CPUs in which to allow bundle execution, '
'(e.g., \"0,2,3\", \"1\").',
)
parser.add_argument(
'--gpuset',
type=parse_gpuset_args,
metavar='GPUSET_STR',
default='ALL',
help='Comma-separated list of GPUs in which to allow bundle execution. '
'Each GPU can be specified by its index or UUID'
'(e.g., \"0,1\", \"1\", \"GPU-62casdfasd-asfas...\"',
)
parser.add_argument(
'--max-work-dir-size',
type=parse_size,
metavar='SIZE',
default='10g',
help='Maximum size of the temporary bundle data ' '(e.g., 3, 3k, 3m, 3g, 3t).',
)
parser.add_argument(
'--max-image-cache-size',
type=parse_size,
metavar='SIZE',
default=None,
help='Limit the disk space used to cache Docker images '
'for worker jobs to the specified amount (e.g. '
'3, 3k, 3m, 3g, 3t). If the limit is exceeded, '
'the least recently used images are removed first. '
'Worker will not remove any images if this option '
'is not specified.',
)
parser.add_argument(
'--max-image-size',
type=parse_size,
metavar='SIZE',
default=None,
help='Limit the size of Docker images to download from the Docker Hub'
'(e.g. 3, 3k, 3m, 3g, 3t). If the limit is exceeded, '
'the requested image will not be downloaded. '
'The bundle depends on this image will fail accordingly. '
'If running an image on the singularity runtime, there is no size '
'check because singularity hub does not support the querying of image size',
)
parser.add_argument(
'--max-memory',
type=parse_size,
metavar='SIZE',
default=None,
help='Limit the amount of memory to a worker in bytes' '(e.g. 3, 3k, 3m, 3g, 3t).',
)
parser.add_argument(
'--password-file',
help='Path to the file containing the username and '
'password for logging into the bundle service, '
'each on a separate line. If not specified, the '
'password is read from standard input.',
)
parser.add_argument(
'--verbose', action='store_true', help='Whether to output verbose log messages.'
)
parser.add_argument(
'--exit-when-idle',
action='store_true',
help='If specified the worker quits if it finds itself with no jobs after a checkin',
)
parser.add_argument(
'--container-runtime',
choices=['docker', 'singularity'],
default='docker',
help='The worker will run jobs on the specified backend. The options are docker (default) or singularity',
)
parser.add_argument(
'--idle-seconds',
help='Not running anything for this many seconds constitutes idle',
type=int,
default=0,
)
parser.add_argument(
'--checkin-frequency-seconds',
help='Number of seconds to wait between worker check-ins',
type=int,
default=5,
)
parser.add_argument(
'--id',
default='%s(%d)' % (socket.gethostname(), os.getpid()),
help='Internal use: ID to use for the worker.',
)
parser.add_argument(
'--shared-file-system',
action='store_true',
help='To be used when the server and the worker share the bundle store on their filesystems.',
)
parser.add_argument(
'--group', default=None, help='Name of the group that can run jobs on this worker'
)
parser.add_argument(
'--tag-exclusive',
action='store_true',
help='To be used when the worker should only run bundles that match the worker\'s tag.',
)
parser.add_argument(
'--pass-down-termination',
action='store_true',
help='Terminate the worker and kill all the existing running bundles.',
)
parser.add_argument(
'--delete-work-dir-on-exit',
action='store_true',
help="Delete the worker's working directory when the worker process exits.",
)
parser.add_argument(
'--exit-after-num-runs',
type=int,
default=DEFAULT_EXIT_AFTER_NUM_RUNS,
help='The worker quits after this many jobs assigned to this worker',
)
parser.add_argument(
'--exit-on-exception',
action='store_true',
help="Exit the worker if it encounters an exception (rather than sleeping).",
)
parser.add_argument(
'--download-dependencies-max-retries',
type=int,
default=3,
help='The number of times to retry downloading dependencies after a failure (defaults to 3).',
)
parser.add_argument(
'--shared-memory-size-gb',
type=int,
default=1,
help='The shared memory size of the run container in GB (defaults to 1).',
)
return parser.parse_args()
def connect_to_codalab_server(server, password_file):
# Get the username and password.
logger.info('Connecting to %s' % server)
if password_file:
if os.stat(password_file).st_mode & (stat.S_IRWXG | stat.S_IRWXO):
print(
"Permissions on password file are too lax.\n\
Only the user should be allowed to access the file.\n\
On Linux, run:\n\
chmod 600 %s"
% password_file,
file=sys.stderr,
)
sys.exit(1)
with open(password_file) as f:
username = f.readline().strip()
password = f.readline().strip()
else:
username = os.environ.get('CODALAB_USERNAME')
if username is None:
username = input('Username: ')
password = os.environ.get('CODALAB_PASSWORD')
if password is None:
password = getpass.getpass()
try:
bundle_service = BundleServiceClient(server, username, password)
return bundle_service
except BundleAuthException as ex:
logger.error(
'Cannot log into the bundle service. Please check your worker credentials.\n'
f'Username: "{username}" , server "{server}"\n'
)
logger.debug('Auth error: {}'.format(ex))
sys.exit(1)
def main():
args = parse_args()
if args.tag and not args.tag.replace("-", "").isalnum():
raise argparse.ArgumentTypeError(
"Worker tag must only contain letters, numbers or hyphens."
)
# Configure logging
log_format: str = '%(asctime)s %(message)s'
if args.verbose:
log_format += ' %(pathname)s %(lineno)d'
log_level = logging.DEBUG
else:
log_level = logging.INFO
logging.basicConfig(format=log_format, level=log_level)
logging.getLogger('urllib3').setLevel(logging.INFO)
# Initialize sentry logging
if using_sentry():
initialize_sentry()
# This quits if connection unsuccessful
bundle_service = connect_to_codalab_server(args.server, args.password_file)
# Load some data into sentry
if using_sentry():
load_sentry_data(username=bundle_service._username, **vars(args))
if args.shared_file_system:
# No need to store bundles locally if filesystems are shared
local_bundles_dir = None
# Also no need to download dependencies if they're on the filesystem already
dependency_manager = None
else:
local_bundles_dir = os.path.join(args.work_dir, 'runs')
dependency_manager = DependencyManager(
os.path.join(args.work_dir, 'dependencies-state.json'),
bundle_service,
args.work_dir,
args.max_work_dir_size,
args.download_dependencies_max_retries,
)
if args.container_runtime == "singularity":
singularity_folder = os.path.join(args.work_dir, 'codalab_singularity_images')
if not os.path.exists(singularity_folder):
logger.info(
'Local singularity image location %s doesn\'t exist, creating.', singularity_folder,
)
os.makedirs(singularity_folder, 0o770)
image_manager = SingularityImageManager(
args.max_image_size, args.max_image_cache_size, singularity_folder,
)
# todo workers with singularity don't work because this is set to none -- handle this
docker_runtime = None
else:
image_manager = DockerImageManager(
os.path.join(args.work_dir, 'images-state.json'),
args.max_image_cache_size,
args.max_image_size,
)
docker_runtime = docker_utils.get_available_runtime()
# Set up local directories
if not os.path.exists(args.work_dir):
logging.debug('Work dir %s doesn\'t exist, creating.', args.work_dir)
os.makedirs(args.work_dir, 0o770)
if local_bundles_dir and not os.path.exists(local_bundles_dir):
logger.info('%s doesn\'t exist, creating.', local_bundles_dir)
os.makedirs(local_bundles_dir, 0o770)
worker = Worker(
image_manager,
dependency_manager,
os.path.join(args.work_dir, 'worker-state.json'),
args.cpuset,
args.gpuset,
args.max_memory,
args.id,
args.tag,
args.work_dir,
local_bundles_dir,
args.exit_when_idle,
args.exit_after_num_runs,
args.idle_seconds,
args.checkin_frequency_seconds,
bundle_service,
args.shared_file_system,
args.tag_exclusive,
args.group,
docker_runtime=docker_runtime,
docker_network_prefix=args.network_prefix,
pass_down_termination=args.pass_down_termination,
delete_work_dir_on_exit=args.delete_work_dir_on_exit,
exit_on_exception=args.exit_on_exception,
shared_memory_size_gb=args.shared_memory_size_gb,
)
# Register a signal handler to ensure safe shutdown.
for sig in [signal.SIGTERM, signal.SIGINT, signal.SIGHUP]:
signal.signal(sig, lambda signup, frame: worker.signal())
# BEGIN: DO NOT CHANGE THIS LINE UNLESS YOU KNOW WHAT YOU ARE DOING
# THIS IS HERE TO KEEP TEST-CLI FROM HANGING
logger.info('Worker started!')
# END
worker.start()
def parse_cpuset_args(arg):
"""
Parse given arg into a set of integers representing cpus
Arguments:
arg: comma separated string of ints, or "ALL" representing all available cpus
"""
try:
# Get the set of cores that the process can actually use.
# For instance, on Slurm, the returning value may contain only 4 cores: {2,3,20,21}.
return os.sched_getaffinity(0)
except AttributeError:
# os.sched_getaffinity() isn't available on all platforms,
# so fallback to using the number of physical cores.
cpu_count = psutil.cpu_count(logical=False)
if arg == 'ALL':
cpuset = list(range(cpu_count))
else:
try:
cpuset = [int(s) for s in arg.split(',')]
except ValueError:
raise argparse.ArgumentTypeError(
"CPUSET_STR invalid format: must be a string of comma-separated integers"
)
if not len(cpuset) == len(set(cpuset)):
raise argparse.ArgumentTypeError("CPUSET_STR invalid: CPUs not distinct values")
if not all(cpu in range(cpu_count) for cpu in cpuset):
raise argparse.ArgumentTypeError("CPUSET_STR invalid: CPUs out of range")
return set(cpuset)
def parse_gpuset_args(arg):
"""
Parse given arg into a set of strings representing gpu UUIDs
By default, we will try to start a Docker container with nvidia-smi to get the GPUs.
If we get an exception that the Docker socket does not exist, which will be the case
on Singularity workers, because they do not have root access, and therefore, access to
the Docker socket, we should try to get the GPUs with Singularity.
Arguments:
arg: comma separated string of ints, or "ALL" representing all gpus
"""
logger.info(f"GPUSET arg: {arg}")
if arg == '' or arg == 'NONE':
return set()
try:
all_gpus = docker_utils.get_nvidia_devices() # Dict[GPU index: GPU UUID]
except docker_utils.DockerException:
all_gpus = {}
# Docker socket can't be used
except requests.exceptions.ConnectionError:
try:
all_gpus = docker_utils.get_nvidia_devices(use_docker=False)
except SingularityError:
all_gpus = {}
if arg == 'ALL':
return set(all_gpus.values())
else:
gpuset = arg.split(',')
if not all(gpu in all_gpus or gpu in all_gpus.values() for gpu in gpuset):
raise argparse.ArgumentTypeError("GPUSET_STR invalid: GPUs out of range")
return set(all_gpus.get(gpu, gpu) for gpu in gpuset)
if __name__ == '__main__':
main()
| 35.978208 | 119 | 0.63941 |
import argparse
import getpass
import os
import logging
import signal
import socket
import stat
import sys
import psutil
import requests
from codalab.common import SingularityError
from codalab.lib.formatting import parse_size
from codalab.lib.telemetry_util import initialize_sentry, load_sentry_data, using_sentry
from .bundle_service_client import BundleServiceClient, BundleAuthException
from . import docker_utils
from .worker import Worker
from codalab.worker.dependency_manager import DependencyManager
from codalab.worker.docker_image_manager import DockerImageManager
from codalab.worker.singularity_image_manager import SingularityImageManager
logger = logging.getLogger(__name__)
DEFAULT_EXIT_AFTER_NUM_RUNS = 999999999
def parse_args():
parser = argparse.ArgumentParser(description='CodaLab worker.')
parser.add_argument(
'--tag',
help='Tag (can only contain letters, numbers or hyphens) that allows for scheduling runs on specific workers.',
)
parser.add_argument(
'--server',
default='https://worksheets.codalab.org',
help='URL of the CodaLab server, in the format '
'<http|https>://<hostname>[:<port>] (e.g., https://worksheets.codalab.org)',
)
parser.add_argument(
'--work-dir',
default='codalab-worker-scratch',
help='Directory where to store temporary bundle data, '
'including dependencies and the data from run '
'bundles.',
)
parser.add_argument(
'--network-prefix', default='codalab_worker_network', help='Docker network name prefix'
)
parser.add_argument(
'--cpuset',
type=parse_cpuset_args,
metavar='CPUSET_STR',
default='ALL',
help='Comma-separated list of CPUs in which to allow bundle execution, '
'(e.g., \"0,2,3\", \"1\").',
)
parser.add_argument(
'--gpuset',
type=parse_gpuset_args,
metavar='GPUSET_STR',
default='ALL',
help='Comma-separated list of GPUs in which to allow bundle execution. '
'Each GPU can be specified by its index or UUID'
'(e.g., \"0,1\", \"1\", \"GPU-62casdfasd-asfas...\"',
)
parser.add_argument(
'--max-work-dir-size',
type=parse_size,
metavar='SIZE',
default='10g',
help='Maximum size of the temporary bundle data ' '(e.g., 3, 3k, 3m, 3g, 3t).',
)
parser.add_argument(
'--max-image-cache-size',
type=parse_size,
metavar='SIZE',
default=None,
help='Limit the disk space used to cache Docker images '
'for worker jobs to the specified amount (e.g. '
'3, 3k, 3m, 3g, 3t). If the limit is exceeded, '
'the least recently used images are removed first. '
'Worker will not remove any images if this option '
'is not specified.',
)
parser.add_argument(
'--max-image-size',
type=parse_size,
metavar='SIZE',
default=None,
help='Limit the size of Docker images to download from the Docker Hub'
'(e.g. 3, 3k, 3m, 3g, 3t). If the limit is exceeded, '
'the requested image will not be downloaded. '
'The bundle depends on this image will fail accordingly. '
'If running an image on the singularity runtime, there is no size '
'check because singularity hub does not support the querying of image size',
)
parser.add_argument(
'--max-memory',
type=parse_size,
metavar='SIZE',
default=None,
help='Limit the amount of memory to a worker in bytes' '(e.g. 3, 3k, 3m, 3g, 3t).',
)
parser.add_argument(
'--password-file',
help='Path to the file containing the username and '
'password for logging into the bundle service, '
'each on a separate line. If not specified, the '
'password is read from standard input.',
)
parser.add_argument(
'--verbose', action='store_true', help='Whether to output verbose log messages.'
)
parser.add_argument(
'--exit-when-idle',
action='store_true',
help='If specified the worker quits if it finds itself with no jobs after a checkin',
)
parser.add_argument(
'--container-runtime',
choices=['docker', 'singularity'],
default='docker',
help='The worker will run jobs on the specified backend. The options are docker (default) or singularity',
)
parser.add_argument(
'--idle-seconds',
help='Not running anything for this many seconds constitutes idle',
type=int,
default=0,
)
parser.add_argument(
'--checkin-frequency-seconds',
help='Number of seconds to wait between worker check-ins',
type=int,
default=5,
)
parser.add_argument(
'--id',
default='%s(%d)' % (socket.gethostname(), os.getpid()),
help='Internal use: ID to use for the worker.',
)
parser.add_argument(
'--shared-file-system',
action='store_true',
help='To be used when the server and the worker share the bundle store on their filesystems.',
)
parser.add_argument(
'--group', default=None, help='Name of the group that can run jobs on this worker'
)
parser.add_argument(
'--tag-exclusive',
action='store_true',
help='To be used when the worker should only run bundles that match the worker\'s tag.',
)
parser.add_argument(
'--pass-down-termination',
action='store_true',
help='Terminate the worker and kill all the existing running bundles.',
)
parser.add_argument(
'--delete-work-dir-on-exit',
action='store_true',
help="Delete the worker's working directory when the worker process exits.",
)
parser.add_argument(
'--exit-after-num-runs',
type=int,
default=DEFAULT_EXIT_AFTER_NUM_RUNS,
help='The worker quits after this many jobs assigned to this worker',
)
parser.add_argument(
'--exit-on-exception',
action='store_true',
help="Exit the worker if it encounters an exception (rather than sleeping).",
)
parser.add_argument(
'--download-dependencies-max-retries',
type=int,
default=3,
help='The number of times to retry downloading dependencies after a failure (defaults to 3).',
)
parser.add_argument(
'--shared-memory-size-gb',
type=int,
default=1,
help='The shared memory size of the run container in GB (defaults to 1).',
)
return parser.parse_args()
def connect_to_codalab_server(server, password_file):
logger.info('Connecting to %s' % server)
if password_file:
if os.stat(password_file).st_mode & (stat.S_IRWXG | stat.S_IRWXO):
print(
"Permissions on password file are too lax.\n\
Only the user should be allowed to access the file.\n\
On Linux, run:\n\
chmod 600 %s"
% password_file,
file=sys.stderr,
)
sys.exit(1)
with open(password_file) as f:
username = f.readline().strip()
password = f.readline().strip()
else:
username = os.environ.get('CODALAB_USERNAME')
if username is None:
username = input('Username: ')
password = os.environ.get('CODALAB_PASSWORD')
if password is None:
password = getpass.getpass()
try:
bundle_service = BundleServiceClient(server, username, password)
return bundle_service
except BundleAuthException as ex:
logger.error(
'Cannot log into the bundle service. Please check your worker credentials.\n'
f'Username: "{username}" , server "{server}"\n'
)
logger.debug('Auth error: {}'.format(ex))
sys.exit(1)
def main():
args = parse_args()
if args.tag and not args.tag.replace("-", "").isalnum():
raise argparse.ArgumentTypeError(
"Worker tag must only contain letters, numbers or hyphens."
)
log_format: str = '%(asctime)s %(message)s'
if args.verbose:
log_format += ' %(pathname)s %(lineno)d'
log_level = logging.DEBUG
else:
log_level = logging.INFO
logging.basicConfig(format=log_format, level=log_level)
logging.getLogger('urllib3').setLevel(logging.INFO)
if using_sentry():
initialize_sentry()
bundle_service = connect_to_codalab_server(args.server, args.password_file)
if using_sentry():
load_sentry_data(username=bundle_service._username, **vars(args))
if args.shared_file_system:
local_bundles_dir = None
dependency_manager = None
else:
local_bundles_dir = os.path.join(args.work_dir, 'runs')
dependency_manager = DependencyManager(
os.path.join(args.work_dir, 'dependencies-state.json'),
bundle_service,
args.work_dir,
args.max_work_dir_size,
args.download_dependencies_max_retries,
)
if args.container_runtime == "singularity":
singularity_folder = os.path.join(args.work_dir, 'codalab_singularity_images')
if not os.path.exists(singularity_folder):
logger.info(
'Local singularity image location %s doesn\'t exist, creating.', singularity_folder,
)
os.makedirs(singularity_folder, 0o770)
image_manager = SingularityImageManager(
args.max_image_size, args.max_image_cache_size, singularity_folder,
)
docker_runtime = None
else:
image_manager = DockerImageManager(
os.path.join(args.work_dir, 'images-state.json'),
args.max_image_cache_size,
args.max_image_size,
)
docker_runtime = docker_utils.get_available_runtime()
# Set up local directories
if not os.path.exists(args.work_dir):
logging.debug('Work dir %s doesn\'t exist, creating.', args.work_dir)
os.makedirs(args.work_dir, 0o770)
if local_bundles_dir and not os.path.exists(local_bundles_dir):
logger.info('%s doesn\'t exist, creating.', local_bundles_dir)
os.makedirs(local_bundles_dir, 0o770)
worker = Worker(
image_manager,
dependency_manager,
os.path.join(args.work_dir, 'worker-state.json'),
args.cpuset,
args.gpuset,
args.max_memory,
args.id,
args.tag,
args.work_dir,
local_bundles_dir,
args.exit_when_idle,
args.exit_after_num_runs,
args.idle_seconds,
args.checkin_frequency_seconds,
bundle_service,
args.shared_file_system,
args.tag_exclusive,
args.group,
docker_runtime=docker_runtime,
docker_network_prefix=args.network_prefix,
pass_down_termination=args.pass_down_termination,
delete_work_dir_on_exit=args.delete_work_dir_on_exit,
exit_on_exception=args.exit_on_exception,
shared_memory_size_gb=args.shared_memory_size_gb,
)
# Register a signal handler to ensure safe shutdown.
for sig in [signal.SIGTERM, signal.SIGINT, signal.SIGHUP]:
signal.signal(sig, lambda signup, frame: worker.signal())
# BEGIN: DO NOT CHANGE THIS LINE UNLESS YOU KNOW WHAT YOU ARE DOING
# THIS IS HERE TO KEEP TEST-CLI FROM HANGING
logger.info('Worker started!')
# END
worker.start()
def parse_cpuset_args(arg):
try:
# Get the set of cores that the process can actually use.
# For instance, on Slurm, the returning value may contain only 4 cores: {2,3,20,21}.
return os.sched_getaffinity(0)
except AttributeError:
# os.sched_getaffinity() isn't available on all platforms,
cpu_count = psutil.cpu_count(logical=False)
if arg == 'ALL':
cpuset = list(range(cpu_count))
else:
try:
cpuset = [int(s) for s in arg.split(',')]
except ValueError:
raise argparse.ArgumentTypeError(
"CPUSET_STR invalid format: must be a string of comma-separated integers"
)
if not len(cpuset) == len(set(cpuset)):
raise argparse.ArgumentTypeError("CPUSET_STR invalid: CPUs not distinct values")
if not all(cpu in range(cpu_count) for cpu in cpuset):
raise argparse.ArgumentTypeError("CPUSET_STR invalid: CPUs out of range")
return set(cpuset)
def parse_gpuset_args(arg):
logger.info(f"GPUSET arg: {arg}")
if arg == '' or arg == 'NONE':
return set()
try:
all_gpus = docker_utils.get_nvidia_devices()
except docker_utils.DockerException:
all_gpus = {}
except requests.exceptions.ConnectionError:
try:
all_gpus = docker_utils.get_nvidia_devices(use_docker=False)
except SingularityError:
all_gpus = {}
if arg == 'ALL':
return set(all_gpus.values())
else:
gpuset = arg.split(',')
if not all(gpu in all_gpus or gpu in all_gpus.values() for gpu in gpuset):
raise argparse.ArgumentTypeError("GPUSET_STR invalid: GPUs out of range")
return set(all_gpus.get(gpu, gpu) for gpu in gpuset)
if __name__ == '__main__':
main()
| true | true |
f73db49a5d63d3e71b4e8e7f71b35b7760e34700 | 15,116 | py | Python | captoolkit/readgla12.py | tsutterley/captoolkit | 314c4d34f49012c25286478c943b0ab13c893c62 | [
"Apache-2.0"
] | 37 | 2019-09-27T00:36:16.000Z | 2022-01-31T01:51:19.000Z | captoolkit/readgla12.py | tsutterley/captoolkit | 314c4d34f49012c25286478c943b0ab13c893c62 | [
"Apache-2.0"
] | 3 | 2020-02-27T21:22:50.000Z | 2020-10-14T01:31:26.000Z | captoolkit/readgla12.py | tsutterley/captoolkit | 314c4d34f49012c25286478c943b0ab13c893c62 | [
"Apache-2.0"
] | 15 | 2019-09-24T08:06:49.000Z | 2021-11-03T14:44:19.000Z | #!/usr/bin/env python
"""
Reads GLA12 Release 634 HDF5.
Reads several files in parallel if njobs > 1 is specified.
Extracts a subset of the data based on a mask.tif file.
Example:
python readgla.py /mnt/devon-r0/shared_data/icesat/GLAH12.034/ /mnt/devon-r0/shared_data/icesat/grounded/ /mnt/devon-r0/shared_data/masks/ANT_groundedice_240m.tif 3031 A 600 1
See full GLA12 parameters at:
http://nsidc.org/data/docs/daac/glas_altimetry/data-dictionary-glah12.html
Notes:
For previous releases the path of some fields have changed!
Corrections applied by default (i.e. data come corrected):
instrument corrections - was applied
atmospheric delays (wet/dry tropo) - was applied
tides and load - was applied
GC offset - was applied
saturation (d_satElevCorr) - was NOT applied [1]
inter-campaign bias - was NOT applied
[1] If it is invalid, then the elevation should not be used.
The saturation correction flag (sat_corr_flg) is an important
flag to understand the possible quality of the elevation data.
To REMOVE the tide and load cor, and APPLY saturation cor:
elev_retide = d_elev + d_ocElv + d_ldElv + d_satElevCorr
"""
import os
import sys
import h5py
import pyproj
import numpy as np
import matplotlib.pyplot as plt
from joblib import Parallel, delayed
from gdalconst import *
from osgeo import gdal, osr
from scipy.ndimage import map_coordinates
def geotiffread(ifile,metaData):
"""Read raster from file."""
file = gdal.Open(ifile, GA_ReadOnly)
projection = file.GetProjection()
src = osr.SpatialReference()
src.ImportFromWkt(projection)
proj = src.ExportToWkt()
Nx = file.RasterXSize
Ny = file.RasterYSize
trans = file.GetGeoTransform()
dx = trans[1]
dy = trans[5]
if metaData == "A":
xp = np.arange(Nx)
yp = np.arange(Ny)
(Xp, Yp) = np.meshgrid(xp,yp)
X = trans[0] + (Xp+0.5)*trans[1] + (Yp+0.5)*trans[2] #FIXME: bottleneck!
Y = trans[3] + (Xp+0.5)*trans[4] + (Yp+0.5)*trans[5]
if metaData == "P":
xp = np.arange(Nx)
yp = np.arange(Ny)
(Xp, Yp) = np.meshgrid(xp,yp)
X = trans[0] + Xp*trans[1] + Yp*trans[2] #FIXME: bottleneck!
Y = trans[3] + Xp*trans[4] + Yp*trans[5]
band = file.GetRasterBand(1)
Z = band.ReadAsArray()
dx = np.abs(dx)
dy = np.abs(dy)
return X, Y, Z, dx, dy, proj
def bilinear2d(xd,yd,data,xq,yq, **kwargs):
"""Bilinear interpolation from grid."""
xd = np.flipud(xd)
yd = np.flipud(yd)
data = np.flipud(data)
xd = xd[0,:]
yd = yd[:,0]
nx, ny = xd.size, yd.size
(x_step, y_step) = (xd[1]-xd[0]), (yd[1]-yd[0])
assert (ny, nx) == data.shape
assert (xd[-1] > xd[0]) and (yd[-1] > yd[0])
if np.size(xq) == 1 and np.size(yq) > 1:
xq = xq*ones(yq.size)
elif np.size(yq) == 1 and np.size(xq) > 1:
yq = yq*ones(xq.size)
xp = (xq-xd[0])*(nx-1)/(xd[-1]-xd[0])
yp = (yq-yd[0])*(ny-1)/(yd[-1]-yd[0])
coord = np.vstack([yp,xp])
zq = map_coordinates(data, coord, **kwargs)
return zq
def wrap_to_180(lon):
"""Wrapps longitude to -180 to 180 degrees."""
lon[lon>180] -= 360.
return lon
def list_files(path, endswith='.h5'):
""" List files in dir recursively. """
return [os.path.join(dpath, f)
for dpath, dnames, fnames in os.walk(path)
for f in fnames if f.endswith(endswith)]
def track_type(time, lat):
"""
Determines ascending and descending tracks.
Defines unique tracks as segments with time breaks > tmax,
and tests whether lat increases or decreases w/time.
"""
# Generate track segment
tracks = np.zeros(lat.shape)
# Set values for segment
tracks[0:np.argmax(np.abs(lat))] = 1
# Output index array
i_asc = np.zeros(tracks.shape, dtype=bool)
# Loop trough individual tracks
for track in np.unique(tracks):
# Get all points from an individual track
i_track, = np.where(track == tracks)
# Test tracks length
if len(i_track) < 2:
continue
# Test if lat increases (asc) or decreases (des) w/time
i_min = time[i_track].argmin()
i_max = time[i_track].argmax()
lat_diff = lat[i_track][i_max] - lat[i_track][i_min]
# Determine track type
if lat_diff > 0:
i_asc[i_track] = True
# Output index vector's
return i_asc, np.invert(i_asc)
indir = sys.argv[1] # input dir
outdir = sys.argv[2] # output dir
fmask = sys.argv[3] # geotiff file with mask
proj = str(sys.argv[4]) # epsg number
meta = sys.argv[5] # "A" or "P"
index = int(sys.argv[6]) # mission index
njobs = int(sys.argv[7]) # number of parallel jobs
# Generate file list
files = list_files(indir, endswith='.H5')
print(('input dir:', indir))
print(('output dir:', outdir))
print(('mask file:', fmask))
print(('epsg num:', proj))
print(('metadata:', meta))
print(('njobs:', njobs))
print(('# files:', len(files)))
# Projection - unprojected lat/lon
projGeo = pyproj.Proj("+init=EPSG:4326")
# Make pyproj format
projection = '+init=EPSG:' + proj
# Projection - prediction grid
projGrd = pyproj.Proj(projection)
iter = 1
index = 600
# Test for mask
if fmask != 'None':
# Read in masking grid
(Xm, Ym, Zm, dX, dY, Proj) = geotiffread(fmask, meta)
def main(fname):
print(('readg:', fname, '...'))
global iter
f = h5py.File(fname)
d = {} # Dictionary for input fields
d['t_sec'] = f['Data_40HZ/Time/d_UTCTime_40'] # [secs since 2000-01-01 12:00:00 UTC]
d['lat'] = f['Data_40HZ/Geolocation/d_lat'] # [deg]
d['lon'] = f['Data_40HZ/Geolocation/d_lon'] # [deg]
d['num_pk'] = f['Data_40HZ/Waveform/i_numPk'] # Num Peaks found in the Return
d['gain'] = f['Data_40HZ/Waveform/i_gval_rcv'] # counts [unitless]
d['rec_nrg'] = f['Data_40HZ/Reflectivity/d_RecNrgAll'] # [joules]
d['tx_nrg'] = f['Data_40HZ/Transmit_Energy/d_TxNrg'] # [joules]
d['h_sat'] = f['Data_40HZ/Elevation_Corrections/d_satElevCorr'] # saturation cor [m]
d['h_gc'] = f['Data_40HZ/Elevation_Corrections/d_GmC'] # GC-offset cor [m]
d['h_dry'] = f['Data_40HZ/Elevation_Corrections/d_dTrop'] # dry tropo [m]
d['h_wet'] = f['Data_40HZ/Elevation_Corrections/d_wTrop'] # wet tropo [m]
d['h_sol'] = f['Data_40HZ/Geophysical/d_erElv'] # solid tide [m]
d['h_geo'] = f['Data_40HZ/Geophysical/d_poTide'] # geoc pole tide [m]
d['h_equi'] = f['Data_40HZ/Geophysical/d_eqElv'] # equilib tide [m]
d['h_ellip'] = f['Data_40HZ/Geophysical/d_deltaEllip'] # h_TP - h_WGS84 [m]
d['h_tide'] = f['Data_40HZ/Geophysical/d_ocElv'] # ocean tide [m]
d['h_load'] = f['Data_40HZ/Geophysical/d_ldElv'] # load tide [m]
d['h_cor'] = f['Data_40HZ/Elevation_Surfaces/d_elev'] # corrected height [m]
d['misfit'] = f['Data_40HZ/Elevation_Surfaces/d_IceSVar'] # gaussian misfit [volts] [2]
d['rec_ndx'] = f['Data_40HZ/Time/i_rec_ndx'] # record index
d['shot_count'] = f['Data_40HZ/Time/i_shot_count'] # shot index within record
# Elevation quality flag: 0=valid, 1=not_valid
d['use_flg'] = f['Data_40HZ/Quality/elev_use_flg']
# Cloud contamination flag: 0=false, 1=true
d['cloud_flg'] = f['Data_40HZ/Elevation_Flags/elv_cloud_flg']
# Attitude quality flag: 0=good, 50=warning, 100=bad, 127=not_valid
d['att_flg'] = f['Data_40HZ/Quality/sigma_att_flg']
# Saturation Correction Flag:
# 0=not_saturated, 1=inconsequential, 2=applicable 3=not_computed 4=not_applicable
d['sat_flg'] = f['Data_40HZ/Quality/sat_corr_flg']
# 1Hz Track
track_01Hz = f['Data_1HZ/Geolocation/i_track'][:]
# Get unique track numbers
track_id = np.unique(track_01Hz)
# Initialize vector
track_40Hz = np.empty((0,1), dtype='int')
# Construct 40 Hz track vector - IMPROVE! SLOW WAY OF DOING IT
for i in range(len(track_01Hz)):
# Create 40 Hz vector
track_40Hz = np.vstack((track_40Hz, np.ones((40,1),dtype='int') * track_01Hz[i]))
# Construct cycle vector
#cycle = int(fname[fname.rfind('/') + 1:].split('_')[3]) * np.ones(track_40Hz.shape)
# Induvidual track identifier
#d['orbit'] = np.char.add(cycle.astype('int').astype('str'), track_40Hz.astype('int').astype('str')).astype('int')
'''
[2] For postprocessing: The RMS error converged to about 0.25 m after
removing the data with the 5% highest waveform misfits in each campaign, so we
adopted that as a data-editing threshold, retaining 95% of the original data.
Also, filter out cloud-contaminated points using the 'cloud_flg' param.
'''
# Wrap longitude to -180/180 degrees
d['lon'] = wrap_to_180(d['lon'][:])
# Reproject coordinates
lon, lat = d['lon'][:], d['lat'][:]
# Converte to Stereographical coordinates
(x, y) = pyproj.transform(projGeo, projGrd, lon, lat)
# Test for mask
if fmask != 'None':
# Interpolation of grid to points for masking
Ii = bilinear2d(Xm, Ym, Zm, x.T, y.T, order=1)
# Set all NaN's to zero
Ii[np.isnan(Ii)] = 0
# Convert to boolean
mask = Ii == 1
else:
# Select all data
mask = np.ones(lat.shape, dtype='bool')
# Parameters for selecting valid pts
h_cor = d['h_cor'][:]
h_sat = d['h_sat'][:]
use_flg = d['use_flg'][:]
sat_flg = d['sat_flg'][:]
att_flg = d['att_flg'][:]
num_pk = d['num_pk'][:]
# Get index of valid pts
idx, = np.where(
(mask == 1) &
(np.abs(h_cor) < 1e10) &
(np.abs(h_sat) < 1e10) &
(np.abs(lat) <= 90) &
(np.abs(lon) <= 180) &
(use_flg == 0) &
(sat_flg <= 2) &
(att_flg == 0) &
(num_pk == 1))
# Check if no valid pts
if len(idx) == 0:
print(('no valid pts:', fname))
return
# Keep only valid pts (and load to memory)
for k in list(d.keys()):
# Edit all the fields
d[k] = d[k][:][idx]
# Unapply tides (retide)
d['h_cor'] += d['h_tide'] + d['h_load']
# Apply saturation cor
d['h_cor'] += d['h_sat']
# Convert ellipsoid: h_TP -> h_WGS84
d['h_cor'] -= d['h_ellip']
#FIXME: THIS IS NOT ORBIT NUMBER (ONE ID FOR EACH TRACK)!!!
# Combine rec_ndx and shot_count to uniquely identify each GLAS laser shot
#d['orbit'] = np.char.add(d['rec_ndx'].astype('str'),
# d['shot_count'].astype('str')).astype('int')
# Compute correct time - add back 'year 2000 + 12 hours' in secs
d['t_sec'] += (2000 * 365.25 * 24 * 3600.) + (12 * 3600.)
# Compute time in decimal years
d['t_year'] = d['t_sec'] / (365.25 * 24 * 3600.)
# Compute time since 1970 - remove year 1970 in secs
d['t_sec'] -= 1970 * 365.25 * 24 * 3600.
# Change path and/or name of read file
name, ext = os.path.splitext(os.path.basename(fname))
# Clip track vector
tracks_40Hz = track_40Hz[idx]
# Compute unique tracks
tracks = np.unique(tracks_40Hz)
# Create orbit array
d['orbit'] = np.ones(d['lat'][:].shape) * np.nan
# Select fields to save
out = ['orbit',
't_sec',
't_year',
'lon',
'lat',
'h_cor',
'h_dry',
'h_ellip',
'h_equi',
'h_gc',
'h_geo',
'h_sat',
'h_sol',
'h_wet',
'gain',
'misfit',
'tx_nrg',
'rec_nrg',
'cloud_flg',]
# Loop through tracks
for i in range(len(tracks)):
# Get index of induvidual tracks
ind = (tracks_40Hz == tracks[i]).reshape(d['lat'][:].shape)
# Set track datum identifier for each track
(dec,year)=np.modf(d['t_year'][ind][0])
month = np.round(dec * 12, decimals=0)
day = np.round(dec * 365.25, decimals=0)
# Datum string
date = str(int(year))+'_'+str(int(month)).zfill(2)+'_'+str(int(day)).zfill(3)
# Separate tracks
(i_asc, i_des) = track_type(d['t_sec'][ind], d['lat'][ind])
# Save ascending track
if len(d['lat'][ind][i_asc]) > 0:
# Psudo orbit number generation
d['orbit'][ind] = np.char.add(str(index), str(iter)).astype('int')
# Orbit type identifier
str_orb = 'READ_A'
# Track number string
str_trknum = '_'+str(int(iter)).zfill(6)+'_'
# Fullname of output file
outfile = os.path.join(outdir, name[0:7] + date + str_trknum + str_orb + ext)
# Save data
with h5py.File(outfile, 'w') as fout:
[fout.create_dataset(k, data=d[k][ind][i_asc]) for k in out]
# Update counter
iter += 1
print(('output file:', outfile))
# Save descending track
if len(d['lat'][ind][i_des]) > 0:
# Psudo orbit number generation
d['orbit'][ind] = np.char.add(str(index), str(iter)).astype('int')
# Orbit type identifier
str_orb = 'READ_D'
# Track number string
str_trknum = '_'+str(int(iter)).zfill(6)+'_'
# Fullname of output file
outfile = os.path.join(outdir, name[0:7] + date + str_trknum + str_orb + ext)
# Save data
with h5py.File(outfile, 'w') as fout:
[fout.create_dataset(k, data=d[k][ind][i_des]) for k in out]
# Update counter
iter += 1
print(('output file:', outfile))
f.close()
if njobs == 1:
print('running sequential code ...')
[main(f) for f in files]
else:
print(('running parallel code (%d jobs) ...' % njobs))
from joblib import Parallel, delayed
Parallel(n_jobs=njobs, verbose=5)(delayed(main)(f) for f in files)
| 31.039014 | 180 | 0.546705 |
import os
import sys
import h5py
import pyproj
import numpy as np
import matplotlib.pyplot as plt
from joblib import Parallel, delayed
from gdalconst import *
from osgeo import gdal, osr
from scipy.ndimage import map_coordinates
def geotiffread(ifile,metaData):
file = gdal.Open(ifile, GA_ReadOnly)
projection = file.GetProjection()
src = osr.SpatialReference()
src.ImportFromWkt(projection)
proj = src.ExportToWkt()
Nx = file.RasterXSize
Ny = file.RasterYSize
trans = file.GetGeoTransform()
dx = trans[1]
dy = trans[5]
if metaData == "A":
xp = np.arange(Nx)
yp = np.arange(Ny)
(Xp, Yp) = np.meshgrid(xp,yp)
X = trans[0] + (Xp+0.5)*trans[1] + (Yp+0.5)*trans[2]
Y = trans[3] + (Xp+0.5)*trans[4] + (Yp+0.5)*trans[5]
if metaData == "P":
xp = np.arange(Nx)
yp = np.arange(Ny)
(Xp, Yp) = np.meshgrid(xp,yp)
X = trans[0] + Xp*trans[1] + Yp*trans[2]
Y = trans[3] + Xp*trans[4] + Yp*trans[5]
band = file.GetRasterBand(1)
Z = band.ReadAsArray()
dx = np.abs(dx)
dy = np.abs(dy)
return X, Y, Z, dx, dy, proj
def bilinear2d(xd,yd,data,xq,yq, **kwargs):
xd = np.flipud(xd)
yd = np.flipud(yd)
data = np.flipud(data)
xd = xd[0,:]
yd = yd[:,0]
nx, ny = xd.size, yd.size
(x_step, y_step) = (xd[1]-xd[0]), (yd[1]-yd[0])
assert (ny, nx) == data.shape
assert (xd[-1] > xd[0]) and (yd[-1] > yd[0])
if np.size(xq) == 1 and np.size(yq) > 1:
xq = xq*ones(yq.size)
elif np.size(yq) == 1 and np.size(xq) > 1:
yq = yq*ones(xq.size)
xp = (xq-xd[0])*(nx-1)/(xd[-1]-xd[0])
yp = (yq-yd[0])*(ny-1)/(yd[-1]-yd[0])
coord = np.vstack([yp,xp])
zq = map_coordinates(data, coord, **kwargs)
return zq
def wrap_to_180(lon):
lon[lon>180] -= 360.
return lon
def list_files(path, endswith='.h5'):
return [os.path.join(dpath, f)
for dpath, dnames, fnames in os.walk(path)
for f in fnames if f.endswith(endswith)]
def track_type(time, lat):
tracks = np.zeros(lat.shape)
tracks[0:np.argmax(np.abs(lat))] = 1
i_asc = np.zeros(tracks.shape, dtype=bool)
for track in np.unique(tracks):
i_track, = np.where(track == tracks)
if len(i_track) < 2:
continue
i_min = time[i_track].argmin()
i_max = time[i_track].argmax()
lat_diff = lat[i_track][i_max] - lat[i_track][i_min]
if lat_diff > 0:
i_asc[i_track] = True
return i_asc, np.invert(i_asc)
indir = sys.argv[1] # input dir
outdir = sys.argv[2] # output dir
fmask = sys.argv[3] # geotiff file with mask
proj = str(sys.argv[4]) # epsg number
meta = sys.argv[5] # "A" or "P"
index = int(sys.argv[6]) # mission index
njobs = int(sys.argv[7]) # number of parallel jobs
# Generate file list
files = list_files(indir, endswith='.H5')
print(('input dir:', indir))
print(('output dir:', outdir))
print(('mask file:', fmask))
print(('epsg num:', proj))
print(('metadata:', meta))
print(('njobs:', njobs))
print(('
# Projection - unprojected lat/lon
projGeo = pyproj.Proj("+init=EPSG:4326")
# Make pyproj format
projection = '+init=EPSG:' + proj
# Projection - prediction grid
projGrd = pyproj.Proj(projection)
iter = 1
index = 600
# Test for mask
if fmask != 'None':
# Read in masking grid
(Xm, Ym, Zm, dX, dY, Proj) = geotiffread(fmask, meta)
def main(fname):
print(('readg:', fname, '...'))
global iter
f = h5py.File(fname)
d = {} # Dictionary for input fields
d['t_sec'] = f['Data_40HZ/Time/d_UTCTime_40'] # [secs since 2000-01-01 12:00:00 UTC]
d['lat'] = f['Data_40HZ/Geolocation/d_lat'] # [deg]
d['lon'] = f['Data_40HZ/Geolocation/d_lon'] # [deg]
d['num_pk'] = f['Data_40HZ/Waveform/i_numPk'] # Num Peaks found in the Return
d['gain'] = f['Data_40HZ/Waveform/i_gval_rcv'] # counts [unitless]
d['rec_nrg'] = f['Data_40HZ/Reflectivity/d_RecNrgAll'] # [joules]
d['tx_nrg'] = f['Data_40HZ/Transmit_Energy/d_TxNrg'] # [joules]
d['h_sat'] = f['Data_40HZ/Elevation_Corrections/d_satElevCorr'] # saturation cor [m]
d['h_gc'] = f['Data_40HZ/Elevation_Corrections/d_GmC'] # GC-offset cor [m]
d['h_dry'] = f['Data_40HZ/Elevation_Corrections/d_dTrop'] # dry tropo [m]
d['h_wet'] = f['Data_40HZ/Elevation_Corrections/d_wTrop'] # wet tropo [m]
d['h_sol'] = f['Data_40HZ/Geophysical/d_erElv'] # solid tide [m]
d['h_geo'] = f['Data_40HZ/Geophysical/d_poTide'] # geoc pole tide [m]
d['h_equi'] = f['Data_40HZ/Geophysical/d_eqElv'] # equilib tide [m]
d['h_ellip'] = f['Data_40HZ/Geophysical/d_deltaEllip'] # h_TP - h_WGS84 [m]
d['h_tide'] = f['Data_40HZ/Geophysical/d_ocElv'] # ocean tide [m]
d['h_load'] = f['Data_40HZ/Geophysical/d_ldElv'] # load tide [m]
d['h_cor'] = f['Data_40HZ/Elevation_Surfaces/d_elev'] # corrected height [m]
d['misfit'] = f['Data_40HZ/Elevation_Surfaces/d_IceSVar'] # gaussian misfit [volts] [2]
d['rec_ndx'] = f['Data_40HZ/Time/i_rec_ndx'] # record index
d['shot_count'] = f['Data_40HZ/Time/i_shot_count'] # shot index within record
# Elevation quality flag: 0=valid, 1=not_valid
d['use_flg'] = f['Data_40HZ/Quality/elev_use_flg']
# Cloud contamination flag: 0=false, 1=true
d['cloud_flg'] = f['Data_40HZ/Elevation_Flags/elv_cloud_flg']
# Attitude quality flag: 0=good, 50=warning, 100=bad, 127=not_valid
d['att_flg'] = f['Data_40HZ/Quality/sigma_att_flg']
# Saturation Correction Flag:
# 0=not_saturated, 1=inconsequential, 2=applicable 3=not_computed 4=not_applicable
d['sat_flg'] = f['Data_40HZ/Quality/sat_corr_flg']
# 1Hz Track
track_01Hz = f['Data_1HZ/Geolocation/i_track'][:]
# Get unique track numbers
track_id = np.unique(track_01Hz)
# Initialize vector
track_40Hz = np.empty((0,1), dtype='int')
# Construct 40 Hz track vector - IMPROVE! SLOW WAY OF DOING IT
for i in range(len(track_01Hz)):
# Create 40 Hz vector
track_40Hz = np.vstack((track_40Hz, np.ones((40,1),dtype='int') * track_01Hz[i]))
# Construct cycle vector
#cycle = int(fname[fname.rfind('/') + 1:].split('_')[3]) * np.ones(track_40Hz.shape)
# Induvidual track identifier
#d['orbit'] = np.char.add(cycle.astype('int').astype('str'), track_40Hz.astype('int').astype('str')).astype('int')
# Wrap longitude to -180/180 degrees
d['lon'] = wrap_to_180(d['lon'][:])
# Reproject coordinates
lon, lat = d['lon'][:], d['lat'][:]
# Converte to Stereographical coordinates
(x, y) = pyproj.transform(projGeo, projGrd, lon, lat)
# Test for mask
if fmask != 'None':
# Interpolation of grid to points for masking
Ii = bilinear2d(Xm, Ym, Zm, x.T, y.T, order=1)
# Set all NaN's to zero
Ii[np.isnan(Ii)] = 0
mask = Ii == 1
else:
mask = np.ones(lat.shape, dtype='bool')
h_cor = d['h_cor'][:]
h_sat = d['h_sat'][:]
use_flg = d['use_flg'][:]
sat_flg = d['sat_flg'][:]
att_flg = d['att_flg'][:]
num_pk = d['num_pk'][:]
idx, = np.where(
(mask == 1) &
(np.abs(h_cor) < 1e10) &
(np.abs(h_sat) < 1e10) &
(np.abs(lat) <= 90) &
(np.abs(lon) <= 180) &
(use_flg == 0) &
(sat_flg <= 2) &
(att_flg == 0) &
(num_pk == 1))
if len(idx) == 0:
print(('no valid pts:', fname))
return
for k in list(d.keys()):
d[k] = d[k][:][idx]
d['h_cor'] += d['h_tide'] + d['h_load']
d['h_cor'] += d['h_sat']
d['h_cor'] -= d['h_ellip']
d['t_sec'] += (2000 * 365.25 * 24 * 3600.) + (12 * 3600.)
d['t_year'] = d['t_sec'] / (365.25 * 24 * 3600.)
d['t_sec'] -= 1970 * 365.25 * 24 * 3600.
name, ext = os.path.splitext(os.path.basename(fname))
tracks_40Hz = track_40Hz[idx]
tracks = np.unique(tracks_40Hz)
d['orbit'] = np.ones(d['lat'][:].shape) * np.nan
out = ['orbit',
't_sec',
't_year',
'lon',
'lat',
'h_cor',
'h_dry',
'h_ellip',
'h_equi',
'h_gc',
'h_geo',
'h_sat',
'h_sol',
'h_wet',
'gain',
'misfit',
'tx_nrg',
'rec_nrg',
'cloud_flg',]
for i in range(len(tracks)):
ind = (tracks_40Hz == tracks[i]).reshape(d['lat'][:].shape)
(dec,year)=np.modf(d['t_year'][ind][0])
month = np.round(dec * 12, decimals=0)
day = np.round(dec * 365.25, decimals=0)
date = str(int(year))+'_'+str(int(month)).zfill(2)+'_'+str(int(day)).zfill(3)
(i_asc, i_des) = track_type(d['t_sec'][ind], d['lat'][ind])
if len(d['lat'][ind][i_asc]) > 0:
d['orbit'][ind] = np.char.add(str(index), str(iter)).astype('int')
str_orb = 'READ_A'
str_trknum = '_'+str(int(iter)).zfill(6)+'_'
outfile = os.path.join(outdir, name[0:7] + date + str_trknum + str_orb + ext)
with h5py.File(outfile, 'w') as fout:
[fout.create_dataset(k, data=d[k][ind][i_asc]) for k in out]
iter += 1
print(('output file:', outfile))
if len(d['lat'][ind][i_des]) > 0:
d['orbit'][ind] = np.char.add(str(index), str(iter)).astype('int')
str_orb = 'READ_D'
str_trknum = '_'+str(int(iter)).zfill(6)+'_'
outfile = os.path.join(outdir, name[0:7] + date + str_trknum + str_orb + ext)
with h5py.File(outfile, 'w') as fout:
[fout.create_dataset(k, data=d[k][ind][i_des]) for k in out]
iter += 1
print(('output file:', outfile))
f.close()
if njobs == 1:
print('running sequential code ...')
[main(f) for f in files]
else:
print(('running parallel code (%d jobs) ...' % njobs))
from joblib import Parallel, delayed
Parallel(n_jobs=njobs, verbose=5)(delayed(main)(f) for f in files)
| true | true |
f73db4de47a20be629ea257ee658b17d1279217d | 4,289 | py | Python | src/interactive_conditional_samples.py | mathyouf/GPT-Games | bf6e558bf6ec92d1fba97770587610da0f3447eb | [
"MIT"
] | null | null | null | src/interactive_conditional_samples.py | mathyouf/GPT-Games | bf6e558bf6ec92d1fba97770587610da0f3447eb | [
"MIT"
] | null | null | null | src/interactive_conditional_samples.py | mathyouf/GPT-Games | bf6e558bf6ec92d1fba97770587610da0f3447eb | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import fire
import json
import os
import re
import numpy as np
import tensorflow as tf
import model, sample, encoder
def modify_raw_text(raw_text, interviewer, interviewee):
return interviewer+": \"" + raw_text + "\" "+ interviewee +":\""
def interact_model(
model_name='124M',
seed=None,
nsamples=1,
batch_size=1,
length=None,
temperature=1,
top_k=0,
top_p=1,
models_dir='models',
):
"""
Interactively run the model
:model_name=124M : String, which model to use
:seed=None : Integer seed for random number generators, fix seed to reproduce
results
:nsamples=1 : Number of samples to return total
:batch_size=1 : Number of batches (only affects speed/memory). Must divide nsamples.
:length=None : Number of tokens in generated text, if None (default), is
determined by model hyperparameters
:temperature=1 : Float value controlling randomness in boltzmann
distribution. Lower temperature results in less random completions. As the
temperature approaches zero, the model will become deterministic and
repetitive. Higher temperature results in more random completions.
:top_k=0 : Integer value controlling diversity. 1 means only 1 word is
considered for each step (token), resulting in deterministic completions,
while 40 means 40 words are considered at each step. 0 (default) is a
special setting meaning no restrictions. 40 generally is a good value.
:models_dir : path to parent folder containing model subfolders
(i.e. contains the <model_name> folder)
"""
models_dir = os.path.expanduser(os.path.expandvars(models_dir))
if batch_size is None:
batch_size = 1
assert nsamples % batch_size == 0
enc = encoder.get_encoder(model_name, models_dir)
hparams = model.default_hparams()
with open(os.path.join(models_dir, model_name, 'hparams.json')) as f:
hparams.override_from_dict(json.load(f))
if length is None:
length = hparams.n_ctx // 2
elif length > hparams.n_ctx:
raise ValueError("Can't get samples longer than window size: %s" % hparams.n_ctx)
with tf.Session(graph=tf.Graph()) as sess:
context = tf.placeholder(tf.int32, [batch_size, None])
np.random.seed(seed)
tf.set_random_seed(seed)
output = sample.sample_sequence(
hparams=hparams, length=length,
context=context,
batch_size=batch_size,
temperature=temperature, top_k=top_k, top_p=top_p
)
saver = tf.train.Saver()
ckpt = tf.train.latest_checkpoint(os.path.join(models_dir, model_name))
saver.restore(sess, ckpt)
interviewer = input("What is your name? ")
interviewee = input("Who are you talking to? ")
previous_memory = ""
while True:
raw_text = input(interviewer+" >>> ")
while not raw_text:
print('Prompt should not be empty!')
raw_text = input(interviewer+" >>> ")
raw_text = modify_raw_text(raw_text, interviewer, interviewee)
previous_memory += raw_text
response = re.match(r'(.*?)"', '495839045')
while not response:
context_tokens = enc.encode(previous_memory)
generated = 0
for _ in range(nsamples // batch_size):
out = sess.run(output, feed_dict={
context: [context_tokens for _ in range(batch_size)]
})[:, len(context_tokens):]
for i in range(batch_size):
generated += 1
text = enc.decode(out[i])
response = re.match(r'(.*?)"\'', text)
if response:
match = re.match(r'(.*?)"', text).group(0)
# print("=" * 40 + " SAMPLE " + str(generated) + " " + "=" * 40)
# print("Raw Input:", previous_memory)
print(interviewee+" >>> ",match[:-1])
# print("=" * 80)
previous_memory += match + " "
if __name__ == '__main__':
fire.Fire(interact_model)
| 39.712963 | 92 | 0.600373 |
import fire
import json
import os
import re
import numpy as np
import tensorflow as tf
import model, sample, encoder
def modify_raw_text(raw_text, interviewer, interviewee):
return interviewer+": \"" + raw_text + "\" "+ interviewee +":\""
def interact_model(
model_name='124M',
seed=None,
nsamples=1,
batch_size=1,
length=None,
temperature=1,
top_k=0,
top_p=1,
models_dir='models',
):
models_dir = os.path.expanduser(os.path.expandvars(models_dir))
if batch_size is None:
batch_size = 1
assert nsamples % batch_size == 0
enc = encoder.get_encoder(model_name, models_dir)
hparams = model.default_hparams()
with open(os.path.join(models_dir, model_name, 'hparams.json')) as f:
hparams.override_from_dict(json.load(f))
if length is None:
length = hparams.n_ctx // 2
elif length > hparams.n_ctx:
raise ValueError("Can't get samples longer than window size: %s" % hparams.n_ctx)
with tf.Session(graph=tf.Graph()) as sess:
context = tf.placeholder(tf.int32, [batch_size, None])
np.random.seed(seed)
tf.set_random_seed(seed)
output = sample.sample_sequence(
hparams=hparams, length=length,
context=context,
batch_size=batch_size,
temperature=temperature, top_k=top_k, top_p=top_p
)
saver = tf.train.Saver()
ckpt = tf.train.latest_checkpoint(os.path.join(models_dir, model_name))
saver.restore(sess, ckpt)
interviewer = input("What is your name? ")
interviewee = input("Who are you talking to? ")
previous_memory = ""
while True:
raw_text = input(interviewer+" >>> ")
while not raw_text:
print('Prompt should not be empty!')
raw_text = input(interviewer+" >>> ")
raw_text = modify_raw_text(raw_text, interviewer, interviewee)
previous_memory += raw_text
response = re.match(r'(.*?)"', '495839045')
while not response:
context_tokens = enc.encode(previous_memory)
generated = 0
for _ in range(nsamples // batch_size):
out = sess.run(output, feed_dict={
context: [context_tokens for _ in range(batch_size)]
})[:, len(context_tokens):]
for i in range(batch_size):
generated += 1
text = enc.decode(out[i])
response = re.match(r'(.*?)"\'', text)
if response:
match = re.match(r'(.*?)"', text).group(0)
print(interviewee+" >>> ",match[:-1])
previous_memory += match + " "
if __name__ == '__main__':
fire.Fire(interact_model)
| true | true |
f73db598f3ff18207f79581af2c746e66ab19135 | 18,256 | py | Python | nova/objects/base.py | bopopescu/nova-38 | 93526327b4eb627e981eb7f24f8fac3dc3ed3fab | [
"Apache-2.0"
] | null | null | null | nova/objects/base.py | bopopescu/nova-38 | 93526327b4eb627e981eb7f24f8fac3dc3ed3fab | [
"Apache-2.0"
] | null | null | null | nova/objects/base.py | bopopescu/nova-38 | 93526327b4eb627e981eb7f24f8fac3dc3ed3fab | [
"Apache-2.0"
] | 1 | 2020-07-24T09:32:37.000Z | 2020-07-24T09:32:37.000Z | # Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Nova common internal object model"""
import collections
from nova import context
from nova import exception
from nova.objects import utils as obj_utils
from nova.openstack.common import log as logging
from nova.openstack.common.rpc import common as rpc_common
import nova.openstack.common.rpc.dispatcher
import nova.openstack.common.rpc.proxy
import nova.openstack.common.rpc.serializer
LOG = logging.getLogger('object')
def get_attrname(name):
"""Return the mangled name of the attribute's underlying storage."""
return '_%s' % name
def make_class_properties(cls):
# NOTE(danms): Inherit NovaObject's base fields only
cls.fields.update(NovaObject.fields)
for name, typefn in cls.fields.iteritems():
def getter(self, name=name):
attrname = get_attrname(name)
if not hasattr(self, attrname):
self.obj_load_attr(name)
return getattr(self, attrname)
def setter(self, value, name=name, typefn=typefn):
self._changed_fields.add(name)
try:
return setattr(self, get_attrname(name), typefn(value))
except Exception:
attr = "%s.%s" % (self.obj_name(), name)
LOG.exception(_('Error setting %(attr)s') %
{'attr': attr})
raise
setattr(cls, name, property(getter, setter))
class NovaObjectMetaclass(type):
"""Metaclass that allows tracking of object classes."""
# NOTE(danms): This is what controls whether object operations are
# remoted. If this is not None, use it to remote things over RPC.
indirection_api = None
def __init__(cls, names, bases, dict_):
if not hasattr(cls, '_obj_classes'):
# This will be set in the 'NovaObject' class.
cls._obj_classes = collections.defaultdict(list)
else:
# Add the subclass to NovaObject._obj_classes
make_class_properties(cls)
cls._obj_classes[cls.obj_name()].append(cls)
# These are decorators that mark an object's method as remotable.
# If the metaclass is configured to forward object methods to an
# indirection service, these will result in making an RPC call
# instead of directly calling the implementation in the object. Instead,
# the object implementation on the remote end will perform the
# requested action and the result will be returned here.
def remotable_classmethod(fn):
"""Decorator for remotable classmethods."""
def wrapper(cls, context, *args, **kwargs):
if NovaObject.indirection_api:
result = NovaObject.indirection_api.object_class_action(
context, cls.obj_name(), fn.__name__, cls.version,
args, kwargs)
else:
result = fn(cls, context, *args, **kwargs)
if isinstance(result, NovaObject):
result._context = context
return result
return classmethod(wrapper)
# See comment above for remotable_classmethod()
#
# Note that this will use either the provided context, or the one
# stashed in the object. If neither are present, the object is
# "orphaned" and remotable methods cannot be called.
def remotable(fn):
"""Decorator for remotable object methods."""
def wrapper(self, *args, **kwargs):
ctxt = self._context
try:
if isinstance(args[0], (context.RequestContext,
rpc_common.CommonRpcContext)):
ctxt = args[0]
args = args[1:]
except IndexError:
pass
if ctxt is None:
raise exception.OrphanedObjectError(method=fn.__name__,
objtype=self.obj_name())
if NovaObject.indirection_api:
updates, result = NovaObject.indirection_api.object_action(
ctxt, self, fn.__name__, args, kwargs)
for key, value in updates.iteritems():
if key in self.fields:
self[key] = self._attr_from_primitive(key, value)
self._changed_fields = set(updates.get('obj_what_changed', []))
return result
else:
return fn(self, ctxt, *args, **kwargs)
return wrapper
# Object versioning rules
#
# Each service has its set of objects, each with a version attached. When
# a client attempts to call an object method, the server checks to see if
# the version of that object matches (in a compatible way) its object
# implementation. If so, cool, and if not, fail.
def check_object_version(server, client):
try:
client_major, _client_minor = client.split('.')
server_major, _server_minor = server.split('.')
client_minor = int(_client_minor)
server_minor = int(_server_minor)
except ValueError:
raise exception.IncompatibleObjectVersion(
_('Invalid version string'))
if client_major != server_major:
raise exception.IncompatibleObjectVersion(
dict(client=client_major, server=server_major))
if client_minor > server_minor:
raise exception.IncompatibleObjectVersion(
dict(client=client_minor, server=server_minor))
class NovaObject(object):
"""Base class and object factory.
This forms the base of all objects that can be remoted or instantiated
via RPC. Simply defining a class that inherits from this base class
will make it remotely instantiatable. Objects should implement the
necessary "get" classmethod routines as well as "save" object methods
as appropriate.
"""
__metaclass__ = NovaObjectMetaclass
# Version of this object (see rules above check_object_version())
version = '1.0'
# The fields present in this object as key:typefn pairs. For example:
#
# fields = { 'foo': int,
# 'bar': str,
# 'baz': lambda x: str(x).ljust(8),
# }
#
# NOTE(danms): The base NovaObject class' fields will be inherited
# by subclasses, but that is a special case. Objects inheriting from
# other objects will not receive this merging of fields contents.
fields = {
'created_at': obj_utils.datetime_or_str_or_none,
'updated_at': obj_utils.datetime_or_str_or_none,
'deleted_at': obj_utils.datetime_or_str_or_none,
'deleted': bool,
}
obj_extra_fields = []
def __init__(self):
self._changed_fields = set()
self._context = None
@classmethod
def obj_name(cls):
"""Return a canonical name for this object which will be used over
the wire for remote hydration.
"""
return cls.__name__
@classmethod
def obj_class_from_name(cls, objname, objver):
"""Returns a class from the registry based on a name and version."""
if objname not in cls._obj_classes:
LOG.error(_('Unable to instantiate unregistered object type '
'%(objtype)s') % dict(objtype=objname))
raise exception.UnsupportedObjectError(objtype=objname)
compatible_match = None
for objclass in cls._obj_classes[objname]:
if objclass.version == objver:
return objclass
try:
check_object_version(objclass.version, objver)
compatible_match = objclass
except exception.IncompatibleObjectVersion:
pass
if compatible_match:
return compatible_match
raise exception.IncompatibleObjectVersion(objname=objname,
objver=objver)
_attr_created_at_from_primitive = obj_utils.dt_deserializer
_attr_updated_at_from_primitive = obj_utils.dt_deserializer
_attr_deleted_at_from_primitive = obj_utils.dt_deserializer
def _attr_from_primitive(self, attribute, value):
"""Attribute deserialization dispatcher.
This calls self._attr_foo_from_primitive(value) for an attribute
foo with value, if it exists, otherwise it assumes the value
is suitable for the attribute's setter method.
"""
handler = '_attr_%s_from_primitive' % attribute
if hasattr(self, handler):
return getattr(self, handler)(value)
return value
@classmethod
def obj_from_primitive(cls, primitive, context=None):
"""Simple base-case hydration.
This calls self._attr_from_primitive() for each item in fields.
"""
if primitive['nova_object.namespace'] != 'nova':
# NOTE(danms): We don't do anything with this now, but it's
# there for "the future"
raise exception.UnsupportedObjectError(
objtype='%s.%s' % (primitive['nova_object.namespace'],
primitive['nova_object.name']))
objname = primitive['nova_object.name']
objver = primitive['nova_object.version']
objdata = primitive['nova_object.data']
objclass = cls.obj_class_from_name(objname, objver)
self = objclass()
self._context = context
for name in self.fields:
if name in objdata:
setattr(self, name,
self._attr_from_primitive(name, objdata[name]))
changes = primitive.get('nova_object.changes', [])
self._changed_fields = set([x for x in changes if x in self.fields])
return self
_attr_created_at_to_primitive = obj_utils.dt_serializer('created_at')
_attr_updated_at_to_primitive = obj_utils.dt_serializer('updated_at')
_attr_deleted_at_to_primitive = obj_utils.dt_serializer('deleted_at')
def _attr_to_primitive(self, attribute):
"""Attribute serialization dispatcher.
This calls self._attr_foo_to_primitive() for an attribute foo,
if it exists, otherwise it assumes the attribute itself is
primitive-enough to be sent over the RPC wire.
"""
handler = '_attr_%s_to_primitive' % attribute
if hasattr(self, handler):
return getattr(self, handler)()
else:
return getattr(self, attribute)
def obj_to_primitive(self):
"""Simple base-case dehydration.
This calls self._attr_to_primitive() for each item in fields.
"""
primitive = dict()
for name in self.fields:
if hasattr(self, get_attrname(name)):
primitive[name] = self._attr_to_primitive(name)
obj = {'nova_object.name': self.obj_name(),
'nova_object.namespace': 'nova',
'nova_object.version': self.version,
'nova_object.data': primitive}
if self.obj_what_changed():
obj['nova_object.changes'] = list(self.obj_what_changed())
return obj
def obj_load_attr(self, attrname):
"""Load an additional attribute from the real object.
This should use self._conductor, and cache any data that might
be useful for future load operations.
"""
raise NotImplementedError(
_("Cannot load '%s' in the base class") % attrname)
def save(self, context):
"""Save the changed fields back to the store.
This is optional for subclasses, but is presented here in the base
class for consistency among those that do.
"""
raise NotImplementedError('Cannot save anything in the base class')
def obj_what_changed(self):
"""Returns a set of fields that have been modified."""
return self._changed_fields
def obj_reset_changes(self, fields=None):
"""Reset the list of fields that have been changed.
Note that this is NOT "revert to previous values"
"""
if fields:
self._changed_fields -= set(fields)
else:
self._changed_fields.clear()
# dictish syntactic sugar
def iteritems(self):
"""For backwards-compatibility with dict-based objects.
NOTE(danms): May be removed in the future.
"""
for name in self.fields.keys() + self.obj_extra_fields:
if (hasattr(self, get_attrname(name)) or
name in self.obj_extra_fields):
yield name, getattr(self, name)
items = lambda self: list(self.iteritems())
def __getitem__(self, name):
"""For backwards-compatibility with dict-based objects.
NOTE(danms): May be removed in the future.
"""
return getattr(self, name)
def __setitem__(self, name, value):
"""For backwards-compatibility with dict-based objects.
NOTE(danms): May be removed in the future.
"""
setattr(self, name, value)
def __contains__(self, name):
"""For backwards-compatibility with dict-based objects.
NOTE(danms): May be removed in the future.
"""
return hasattr(self, get_attrname(name))
def get(self, key, value=None):
"""For backwards-compatibility with dict-based objects.
NOTE(danms): May be removed in the future.
"""
return self[key]
def update(self, updates):
"""For backwards-compatibility with dict-base objects.
NOTE(danms): May be removed in the future.
"""
for key, value in updates.items():
self[key] = value
class ObjectListBase(object):
"""Mixin class for lists of objects.
This mixin class can be added as a base class for an object that
is implementing a list of objects. It adds a single field of 'objects',
which is the list store, and behaves like a list itself. It supports
serialization of the list of objects automatically.
"""
fields = {
'objects': list,
}
def __iter__(self):
"""List iterator interface."""
return iter(self.objects)
def __len__(self):
"""List length."""
return len(self.objects)
def __getitem__(self, index):
"""List index access."""
if isinstance(index, slice):
new_obj = self.__class__()
new_obj.objects = self.objects[index]
# NOTE(danms): We must be mixed in with a NovaObject!
new_obj.obj_reset_changes()
new_obj._context = self._context
return new_obj
return self.objects[index]
def __contains__(self, value):
"""List membership test."""
return value in self.objects
def count(self, value):
"""List count of value occurrences."""
return self.objects.count(value)
def index(self, value):
"""List index of value."""
return self.objects.index(value)
def _attr_objects_to_primitive(self):
"""Serialization of object list."""
return [x.obj_to_primitive() for x in self.objects]
def _attr_objects_from_primitive(self, value):
"""Deserialization of object list."""
objects = []
for entity in value:
obj = NovaObject.obj_from_primitive(entity, context=self._context)
objects.append(obj)
return objects
class NovaObjectSerializer(nova.openstack.common.rpc.serializer.Serializer):
"""A NovaObject-aware Serializer.
This implements the Oslo Serializer interface and provides the
ability to serialize and deserialize NovaObject entities. Any service
that needs to accept or return NovaObjects as arguments or result values
should pass this to its RpcProxy and RpcDispatcher objects.
"""
def _process_iterable(self, context, action_fn, values):
"""Process an iterable, taking an action on each value.
:param:context: Request context
:param:action_fn: Action to take on each item in values
:param:values: Iterable container of things to take action on
:returns: A new container of the same type (except set) with
items from values having had action applied.
"""
iterable = values.__class__
if iterable == set:
# NOTE(danms): A set can't have an unhashable value inside, such as
# a dict. Convert sets to tuples, which is fine, since we can't
# send them over RPC anyway.
iterable = tuple
return iterable([action_fn(context, value) for value in values])
def serialize_entity(self, context, entity):
if isinstance(entity, (tuple, list, set)):
entity = self._process_iterable(context, self.serialize_entity,
entity)
elif (hasattr(entity, 'obj_to_primitive') and
callable(entity.obj_to_primitive)):
entity = entity.obj_to_primitive()
return entity
def deserialize_entity(self, context, entity):
if isinstance(entity, dict) and 'nova_object.name' in entity:
entity = NovaObject.obj_from_primitive(entity, context=context)
elif isinstance(entity, (tuple, list, set)):
entity = self._process_iterable(context, self.deserialize_entity,
entity)
return entity
def obj_to_primitive(obj):
"""Recursively turn an object into a python primitive.
A NovaObject becomes a dict, and anything that implements ObjectListBase
becomes a list.
"""
if isinstance(obj, ObjectListBase):
return [obj_to_primitive(x) for x in obj]
elif isinstance(obj, NovaObject):
result = {}
for key, value in obj.iteritems():
result[key] = obj_to_primitive(value)
return result
else:
return obj
| 37.105691 | 79 | 0.637599 |
import collections
from nova import context
from nova import exception
from nova.objects import utils as obj_utils
from nova.openstack.common import log as logging
from nova.openstack.common.rpc import common as rpc_common
import nova.openstack.common.rpc.dispatcher
import nova.openstack.common.rpc.proxy
import nova.openstack.common.rpc.serializer
LOG = logging.getLogger('object')
def get_attrname(name):
return '_%s' % name
def make_class_properties(cls):
cls.fields.update(NovaObject.fields)
for name, typefn in cls.fields.iteritems():
def getter(self, name=name):
attrname = get_attrname(name)
if not hasattr(self, attrname):
self.obj_load_attr(name)
return getattr(self, attrname)
def setter(self, value, name=name, typefn=typefn):
self._changed_fields.add(name)
try:
return setattr(self, get_attrname(name), typefn(value))
except Exception:
attr = "%s.%s" % (self.obj_name(), name)
LOG.exception(_('Error setting %(attr)s') %
{'attr': attr})
raise
setattr(cls, name, property(getter, setter))
class NovaObjectMetaclass(type):
# NOTE(danms): This is what controls whether object operations are
# remoted. If this is not None, use it to remote things over RPC.
indirection_api = None
def __init__(cls, names, bases, dict_):
if not hasattr(cls, '_obj_classes'):
# This will be set in the 'NovaObject' class.
cls._obj_classes = collections.defaultdict(list)
else:
# Add the subclass to NovaObject._obj_classes
make_class_properties(cls)
cls._obj_classes[cls.obj_name()].append(cls)
# These are decorators that mark an object's method as remotable.
def remotable_classmethod(fn):
def wrapper(cls, context, *args, **kwargs):
if NovaObject.indirection_api:
result = NovaObject.indirection_api.object_class_action(
context, cls.obj_name(), fn.__name__, cls.version,
args, kwargs)
else:
result = fn(cls, context, *args, **kwargs)
if isinstance(result, NovaObject):
result._context = context
return result
return classmethod(wrapper)
def remotable(fn):
def wrapper(self, *args, **kwargs):
ctxt = self._context
try:
if isinstance(args[0], (context.RequestContext,
rpc_common.CommonRpcContext)):
ctxt = args[0]
args = args[1:]
except IndexError:
pass
if ctxt is None:
raise exception.OrphanedObjectError(method=fn.__name__,
objtype=self.obj_name())
if NovaObject.indirection_api:
updates, result = NovaObject.indirection_api.object_action(
ctxt, self, fn.__name__, args, kwargs)
for key, value in updates.iteritems():
if key in self.fields:
self[key] = self._attr_from_primitive(key, value)
self._changed_fields = set(updates.get('obj_what_changed', []))
return result
else:
return fn(self, ctxt, *args, **kwargs)
return wrapper
def check_object_version(server, client):
try:
client_major, _client_minor = client.split('.')
server_major, _server_minor = server.split('.')
client_minor = int(_client_minor)
server_minor = int(_server_minor)
except ValueError:
raise exception.IncompatibleObjectVersion(
_('Invalid version string'))
if client_major != server_major:
raise exception.IncompatibleObjectVersion(
dict(client=client_major, server=server_major))
if client_minor > server_minor:
raise exception.IncompatibleObjectVersion(
dict(client=client_minor, server=server_minor))
class NovaObject(object):
__metaclass__ = NovaObjectMetaclass
version = '1.0'
# by subclasses, but that is a special case. Objects inheriting from
# other objects will not receive this merging of fields contents.
fields = {
'created_at': obj_utils.datetime_or_str_or_none,
'updated_at': obj_utils.datetime_or_str_or_none,
'deleted_at': obj_utils.datetime_or_str_or_none,
'deleted': bool,
}
obj_extra_fields = []
def __init__(self):
self._changed_fields = set()
self._context = None
@classmethod
def obj_name(cls):
return cls.__name__
@classmethod
def obj_class_from_name(cls, objname, objver):
if objname not in cls._obj_classes:
LOG.error(_('Unable to instantiate unregistered object type '
'%(objtype)s') % dict(objtype=objname))
raise exception.UnsupportedObjectError(objtype=objname)
compatible_match = None
for objclass in cls._obj_classes[objname]:
if objclass.version == objver:
return objclass
try:
check_object_version(objclass.version, objver)
compatible_match = objclass
except exception.IncompatibleObjectVersion:
pass
if compatible_match:
return compatible_match
raise exception.IncompatibleObjectVersion(objname=objname,
objver=objver)
_attr_created_at_from_primitive = obj_utils.dt_deserializer
_attr_updated_at_from_primitive = obj_utils.dt_deserializer
_attr_deleted_at_from_primitive = obj_utils.dt_deserializer
def _attr_from_primitive(self, attribute, value):
handler = '_attr_%s_from_primitive' % attribute
if hasattr(self, handler):
return getattr(self, handler)(value)
return value
@classmethod
def obj_from_primitive(cls, primitive, context=None):
if primitive['nova_object.namespace'] != 'nova':
# NOTE(danms): We don't do anything with this now, but it's
# there for "the future"
raise exception.UnsupportedObjectError(
objtype='%s.%s' % (primitive['nova_object.namespace'],
primitive['nova_object.name']))
objname = primitive['nova_object.name']
objver = primitive['nova_object.version']
objdata = primitive['nova_object.data']
objclass = cls.obj_class_from_name(objname, objver)
self = objclass()
self._context = context
for name in self.fields:
if name in objdata:
setattr(self, name,
self._attr_from_primitive(name, objdata[name]))
changes = primitive.get('nova_object.changes', [])
self._changed_fields = set([x for x in changes if x in self.fields])
return self
_attr_created_at_to_primitive = obj_utils.dt_serializer('created_at')
_attr_updated_at_to_primitive = obj_utils.dt_serializer('updated_at')
_attr_deleted_at_to_primitive = obj_utils.dt_serializer('deleted_at')
def _attr_to_primitive(self, attribute):
handler = '_attr_%s_to_primitive' % attribute
if hasattr(self, handler):
return getattr(self, handler)()
else:
return getattr(self, attribute)
def obj_to_primitive(self):
primitive = dict()
for name in self.fields:
if hasattr(self, get_attrname(name)):
primitive[name] = self._attr_to_primitive(name)
obj = {'nova_object.name': self.obj_name(),
'nova_object.namespace': 'nova',
'nova_object.version': self.version,
'nova_object.data': primitive}
if self.obj_what_changed():
obj['nova_object.changes'] = list(self.obj_what_changed())
return obj
def obj_load_attr(self, attrname):
raise NotImplementedError(
_("Cannot load '%s' in the base class") % attrname)
def save(self, context):
raise NotImplementedError('Cannot save anything in the base class')
def obj_what_changed(self):
return self._changed_fields
def obj_reset_changes(self, fields=None):
if fields:
self._changed_fields -= set(fields)
else:
self._changed_fields.clear()
# dictish syntactic sugar
def iteritems(self):
for name in self.fields.keys() + self.obj_extra_fields:
if (hasattr(self, get_attrname(name)) or
name in self.obj_extra_fields):
yield name, getattr(self, name)
items = lambda self: list(self.iteritems())
def __getitem__(self, name):
return getattr(self, name)
def __setitem__(self, name, value):
setattr(self, name, value)
def __contains__(self, name):
return hasattr(self, get_attrname(name))
def get(self, key, value=None):
return self[key]
def update(self, updates):
for key, value in updates.items():
self[key] = value
class ObjectListBase(object):
fields = {
'objects': list,
}
def __iter__(self):
return iter(self.objects)
def __len__(self):
return len(self.objects)
def __getitem__(self, index):
if isinstance(index, slice):
new_obj = self.__class__()
new_obj.objects = self.objects[index]
# NOTE(danms): We must be mixed in with a NovaObject!
new_obj.obj_reset_changes()
new_obj._context = self._context
return new_obj
return self.objects[index]
def __contains__(self, value):
return value in self.objects
def count(self, value):
return self.objects.count(value)
def index(self, value):
return self.objects.index(value)
def _attr_objects_to_primitive(self):
return [x.obj_to_primitive() for x in self.objects]
def _attr_objects_from_primitive(self, value):
objects = []
for entity in value:
obj = NovaObject.obj_from_primitive(entity, context=self._context)
objects.append(obj)
return objects
class NovaObjectSerializer(nova.openstack.common.rpc.serializer.Serializer):
def _process_iterable(self, context, action_fn, values):
iterable = values.__class__
if iterable == set:
# NOTE(danms): A set can't have an unhashable value inside, such as
# send them over RPC anyway.
iterable = tuple
return iterable([action_fn(context, value) for value in values])
def serialize_entity(self, context, entity):
if isinstance(entity, (tuple, list, set)):
entity = self._process_iterable(context, self.serialize_entity,
entity)
elif (hasattr(entity, 'obj_to_primitive') and
callable(entity.obj_to_primitive)):
entity = entity.obj_to_primitive()
return entity
def deserialize_entity(self, context, entity):
if isinstance(entity, dict) and 'nova_object.name' in entity:
entity = NovaObject.obj_from_primitive(entity, context=context)
elif isinstance(entity, (tuple, list, set)):
entity = self._process_iterable(context, self.deserialize_entity,
entity)
return entity
def obj_to_primitive(obj):
if isinstance(obj, ObjectListBase):
return [obj_to_primitive(x) for x in obj]
elif isinstance(obj, NovaObject):
result = {}
for key, value in obj.iteritems():
result[key] = obj_to_primitive(value)
return result
else:
return obj
| true | true |
f73db5a98392c6ebea5629c45055e023ca7833f4 | 14,126 | py | Python | spike/TwitterMonitor/ConnectTwitterAPI.py | jian-frank-cao/spike | a02f6d26f8705c79104110b07263f69199325093 | [
"MIT"
] | null | null | null | spike/TwitterMonitor/ConnectTwitterAPI.py | jian-frank-cao/spike | a02f6d26f8705c79104110b07263f69199325093 | [
"MIT"
] | null | null | null | spike/TwitterMonitor/ConnectTwitterAPI.py | jian-frank-cao/spike | a02f6d26f8705c79104110b07263f69199325093 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Thu Jan 28 09:28:54 2021
@author: Jian Cao
Collect Tweets from Twitter API (Stream, REST, Lab-COVID19)
"""
## Set environment ------------------------------------------------------------
# import os
import time
import json
import requests
import uuid
import multiprocessing
from datetime import datetime, timedelta
from requests import HTTPError, ConnectionError
from TwitterAPI import TwitterAPI, TwitterConnectionError, TwitterRequestError
## Define class ---------------------------------------------------------------
class ConnectTwitterAPI:
"""Object that connects Twitter API (Stream, REST, Lab-COVID19)
Functions:
StartMonitor(input_dict, api_type, outlet_type)
"""
def __init__(self, consumer_key, consumer_secret,
access_token_key, access_token_secret):
if (not consumer_key or not consumer_secret or not
access_token_key or not access_token_secret):
raise ValueError('COMSUMER KEY&SECRET, ACCESS KEY&SECRET are needed.')
self.consumer_key = consumer_key
self.consumer_secret = consumer_secret
self.access_token_key = access_token_key
self.access_token_secret = access_token_secret
def GetBearerToken(self, key, secret): # might not be necessary if use TwitterAPI
response = requests.post("https://api.twitter.com/oauth2/token",
auth=(key, secret),
data={'grant_type': 'client_credentials'},
headers={"User-Agent": "BEBOBEBOP"})
if response.status_code != 200:
print(response.status_code)
print(response.text)
raise Exception("Bearer token error")
body = response.json()
print("Bearer token is ready.")
return body['access_token']
def _get_ready(self):
# check input
if not any([x == self.api_type for
x in ['stream_v1', 'rest_v1', 'lab_covid19']]): # add stream_v2, rest_v2
raise Exception('API ' + self.api_type +
' is not currently supported')
if not any([x == self.outlet_type for
x in ['local', 'local_count']]): # add pubsub, kinesis, oracle stream
raise Exception('OUTLET ' + self.outlet_type +
' is not currently supported')
# authorization
if self.api_type == 'stream_v1':
self.twitter_api = TwitterAPI(self.consumer_key,
self.consumer_secret,
self.access_token_key,
self.access_token_secret)
print('oAuth1 is ready.')
if any(x == self.api_type for x in ['rest_v1', 'lab_covid19']):
self.twitter_api = TwitterAPI(self.consumer_key,
self.consumer_secret,
auth_type='oAuth2')
print('oAuth2 is ready.')
if any(x == self.api_type for x in ['stream_v2', 'rest_v2']): # modify this to use TwitterAPI
self.bearer_token = self.GetBearerToken(self.consumer_key,
self.consumer_secret)
# prepare requests
if self.api_type == 'stream_v1':
if any(x not in self.input_dict for x in
['keywords']):
raise ValueError('KEYWORDS is needed.')
self.resource = 'statuses/filter'
self.params = {'track': self.input_dict['keywords']} # add more rules in the params as needed
if self.api_type == 'rest_v1':
if any(x not in self.input_dict for x in
['keywords', 'max_id', 'since_id']):
raise ValueError('KEYWORDS, MAX_ID, and SINCE_ID are needed.')
keywords = '(' + ') OR ('.join(self.input_dict['keywords']) + ')'
self.resource = 'search/tweets'
self.params = {'q': keywords,
'max_id': self.input_dict['max_id'],
'since_id': self.input_dict['since_id'],
'count': 100,
'tweet_mode': 'extended'}
if 'tweets_per_qry' in self.input_dict:
self.params['count'] = self.input_dict['tweets_per_qry']
self.tweet_downloaded = 0
if self.api_type == 'lab_covid19':
if any(x not in self.input_dict for x in
['partition']):
raise ValueError('PARTITION is needed.')
self.params = {'partition': self.input_dict['partition']}
self.resource = 'labs/1/tweets/stream/covid19'
# prepare outlet
if not hasattr(self, 'pipe_in') or not hasattr(self, 'pipe_out'):
self.pipe_in, self.pipe_out = multiprocessing.Pipe()
if ('local' in self.outlet_type and
not hasattr(self, 'tweets')):
self.tweets = []
self.tweet_count = 0
if self.outlet_type == 'local_count':
self.tweets_per_file = 15000
if 'tweets_per_file' in self.input_dict:
self.tweets_per_file = self.input_dict['tweets_per_file']
if self.outlet_type == 'local':
self.minutes_per_file = timedelta(minutes = 15)
if 'minutes_per_file' in self.input_dict:
self.minutes_per_file = timedelta(
minutes = float(self.input_dict['minutes_per_file']))
if any(x not in self.input_dict for x in
['file_prefix', 'download_path']):
raise ValueError('FILE_PREFIX, DOWNLOAD_PATH are needed.')
def _request_stream_v1(self):
self.response = self.twitter_api.request(self.resource,
self.params)
if 'stream_v1' in self.api_type:
print('Connected to Stream API v1.1.')
print('First 10 keywords: ' +
', '.join(self.input_dict['keywords'][:10]) + '.')
else:
print(('Connected to Lab API COVID19 partition ' +
str(self.input_dict['partition'])))
print('Collecting tweets...')
for tweet in self.response:
if 'text' in tweet:
self.pipe_in.send(tweet)
elif 'disconnect' in tweet:
event = tweet['disconnect']
if event['code'] in [2,5,6,7]:
raise Exception(event['reason']) # something needs to be fixed before re-connecting
else:
print(('Disconnect Code: ' + event['code'] +
'. Reason: ' + event['reason']))
return(True) # temporary interruption, re-try request
return(True) # stream stopped with no reason, re-try request
def _request_rest_v1(self):
self.response = self.twitter_api.request(self.resource,
self.params)
if ('statuses' not in self.response.json()):
raise TypeError ('"statuses" not in response.json().')
tweets = self.response.json()['statuses']
n_tweets = len(tweets)
for tweet in tweets:
self.pipe_in.send(tweet)
if n_tweets == 0:
print('No more tweets found.')
self.pipe_in.send("FINISHED")
return(False)
self.tweet_downloaded += n_tweets
print('Downloaded {} tweets.'.format(self.tweet_downloaded))
self.input_dict['max_id'] = tweet['id'] - 1
return(True)
def _collect_tweets(self):
last_error = None
go = True
while go:
retry = False
try:
if any(x in self.api_type for
x in ['stream_v1', 'lab_covid19']):
go = self._request_stream_v1()
if 'rest_v1' in self.api_type:
go = self._request_rest_v1()
except IOError as ioe:
print('[Caught IOError]\n' + str(ioe))
retry = True
except HTTPError as he:
print('[Caught HTTPError]\n' + str(he))
retry = True
except ConnectionError as ce:
print('[Caught ConnectionError]\n' + str(ce))
retry = True
except TypeError as te:
print('[Caught TypeError]\n' + str(te))
retry = True
except TwitterConnectionError as tce:
print('[Caught TwitterConnectionError]\n' + str(tce))
retry = True
except TwitterRequestError as tre:
print('[Caught TwitterRequestError]\n' + str(tre))
retry = True
# retry strategy
if not retry:
if 'rest_v1' in self.api_type:
time.sleep(2.1)
continue
print(self.response.headers)
self.response.close()
if not last_error:
last_error = datetime.now()
error_count = 0
if datetime.now() - last_error > timedelta(seconds = 900):
error_count = 0
wait = min(0.25 * 2**error_count, 30)
last_error = datetime.now()
error_count += 1
print('Wait {} seconds before retrying...'.format(wait))
time.sleep(wait)
def _save_locally(self):
if self.outlet_type == 'local' and not hasattr(self, 'file_timer'):
self.file_timer = datetime.now() + self.minutes_per_file
print('Start saving tweets into local TXT files...')
while True:
tweet = self.pipe_out.recv()
if tweet == "FINISHED":
return(False)
self.tweets.append(tweet)
self.tweet_count += 1
# determine if the file is ready
file_is_ready = False
if 'count' in self.outlet_type:
file_is_ready = self.tweet_count >= self.tweets_per_file
else:
file_is_ready = datetime.now() >= self.file_timer
# file is not ready, continue adding tweets
if not file_is_ready:
continue
# save file
if self.outlet_type == 'local':
self.file_timer = datetime.now() + self.minutes_per_file
tweet_time = self.tweets[-1]['created_at']
time_format = '%a %b %d %H:%M:%S %z %Y'
if 'v2' in self.api_type:
tweet_time = tweet_time[:-5]
time_format = '%Y-%m-%dT%H:%M:%S'
file_time = datetime.strptime(tweet_time,
time_format)
file_name = (self.input_dict['file_prefix'] +
file_time.strftime("-%Y-%m-%d-%H-%M-%S-") +
str(uuid.uuid4()) +
'.txt')
with open(self.input_dict['download_path'] +
file_name, 'w') as file:
file.write(json.dumps(self.tweets))
# confirmation message
if 'count' in self.outlet_type:
print(file_name + ' is saved.')
else:
print('{} ----- {} tweets'.format(str(file_time),
str(len(self.tweets))))
# check stall warnings
if ('warning' in self.tweets[0] and
'percent_full' in tweet['warning']):
if tweet['warning']['percent_full'] > 0: # change threshold when debugging is done.
print('Warning: the queue is ' +
str(tweet['warning']['percent_full']) + '% full.')
# clean self.tweets
self.tweets = []
self.tweet_count = 0
return(True) # stopped with no reason, re-trying
def _tweet_outlet(self):
time.sleep(0.25)
go = True
while go:
if 'local' in self.outlet_type:
go = self._save_locally() # find errors that may occur
"""
retry = False
try:
if 'local' in self.outlet_type:
go = self._save_locally()
except: # find errors that may occur
retry = True
pass
# retry strategy # find better retry strategy
if not retry:
continue
print('Tweet outlet failed, resetting...')
time.sleep(0.25)"""
def StartMonitor(self, input_dict,
api_type = 'stream_v1',
outlet_type = 'local'):
"""Start the monitor
Parameters:
input_dict (dict): dict of input parameters
(parameters, start_id, end_id, etc).
api_type (str): type of API: stream, REST, lab-covid19
outlet_type (str): type of outlet: local disk, pubsub,
kinesis, Oracle stream.
Returns:
None
"""
if not input_dict or not api_type or not outlet_type:
raise ValueError('INPUT_DICT, API_TYPE and ' +
'OUTLET_TYPE are needed.')
# get ready
self.input_dict = input_dict
self.api_type = api_type.lower()
self.outlet_type = outlet_type.lower()
self._get_ready()
# start monitor
self.tweets_in = multiprocessing.Process(target = self._collect_tweets,
args=())
self.tweets_out = multiprocessing.Process(target = self._tweet_outlet,
args=())
self.tweets_in.start()
self.tweets_out.start()
# finish up
self.tweets_in.join()
self.tweets_out.join()
self.pipe_in.close()
self.pipe_out.close()
| 40.944928 | 105 | 0.517698 |
rom datetime import datetime, timedelta
from requests import HTTPError, ConnectionError
from TwitterAPI import TwitterAPI, TwitterConnectionError, TwitterRequestError
ecret,
access_token_key, access_token_secret):
if (not consumer_key or not consumer_secret or not
access_token_key or not access_token_secret):
raise ValueError('COMSUMER KEY&SECRET, ACCESS KEY&SECRET are needed.')
self.consumer_key = consumer_key
self.consumer_secret = consumer_secret
self.access_token_key = access_token_key
self.access_token_secret = access_token_secret
def GetBearerToken(self, key, secret):
response = requests.post("https://api.twitter.com/oauth2/token",
auth=(key, secret),
data={'grant_type': 'client_credentials'},
headers={"User-Agent": "BEBOBEBOP"})
if response.status_code != 200:
print(response.status_code)
print(response.text)
raise Exception("Bearer token error")
body = response.json()
print("Bearer token is ready.")
return body['access_token']
def _get_ready(self):
if not any([x == self.api_type for
x in ['stream_v1', 'rest_v1', 'lab_covid19']]):
raise Exception('API ' + self.api_type +
' is not currently supported')
if not any([x == self.outlet_type for
x in ['local', 'local_count']]):
raise Exception('OUTLET ' + self.outlet_type +
' is not currently supported')
if self.api_type == 'stream_v1':
self.twitter_api = TwitterAPI(self.consumer_key,
self.consumer_secret,
self.access_token_key,
self.access_token_secret)
print('oAuth1 is ready.')
if any(x == self.api_type for x in ['rest_v1', 'lab_covid19']):
self.twitter_api = TwitterAPI(self.consumer_key,
self.consumer_secret,
auth_type='oAuth2')
print('oAuth2 is ready.')
if any(x == self.api_type for x in ['stream_v2', 'rest_v2']):
self.bearer_token = self.GetBearerToken(self.consumer_key,
self.consumer_secret)
if self.api_type == 'stream_v1':
if any(x not in self.input_dict for x in
['keywords']):
raise ValueError('KEYWORDS is needed.')
self.resource = 'statuses/filter'
self.params = {'track': self.input_dict['keywords']}
if self.api_type == 'rest_v1':
if any(x not in self.input_dict for x in
['keywords', 'max_id', 'since_id']):
raise ValueError('KEYWORDS, MAX_ID, and SINCE_ID are needed.')
keywords = '(' + ') OR ('.join(self.input_dict['keywords']) + ')'
self.resource = 'search/tweets'
self.params = {'q': keywords,
'max_id': self.input_dict['max_id'],
'since_id': self.input_dict['since_id'],
'count': 100,
'tweet_mode': 'extended'}
if 'tweets_per_qry' in self.input_dict:
self.params['count'] = self.input_dict['tweets_per_qry']
self.tweet_downloaded = 0
if self.api_type == 'lab_covid19':
if any(x not in self.input_dict for x in
['partition']):
raise ValueError('PARTITION is needed.')
self.params = {'partition': self.input_dict['partition']}
self.resource = 'labs/1/tweets/stream/covid19'
if not hasattr(self, 'pipe_in') or not hasattr(self, 'pipe_out'):
self.pipe_in, self.pipe_out = multiprocessing.Pipe()
if ('local' in self.outlet_type and
not hasattr(self, 'tweets')):
self.tweets = []
self.tweet_count = 0
if self.outlet_type == 'local_count':
self.tweets_per_file = 15000
if 'tweets_per_file' in self.input_dict:
self.tweets_per_file = self.input_dict['tweets_per_file']
if self.outlet_type == 'local':
self.minutes_per_file = timedelta(minutes = 15)
if 'minutes_per_file' in self.input_dict:
self.minutes_per_file = timedelta(
minutes = float(self.input_dict['minutes_per_file']))
if any(x not in self.input_dict for x in
['file_prefix', 'download_path']):
raise ValueError('FILE_PREFIX, DOWNLOAD_PATH are needed.')
def _request_stream_v1(self):
self.response = self.twitter_api.request(self.resource,
self.params)
if 'stream_v1' in self.api_type:
print('Connected to Stream API v1.1.')
print('First 10 keywords: ' +
', '.join(self.input_dict['keywords'][:10]) + '.')
else:
print(('Connected to Lab API COVID19 partition ' +
str(self.input_dict['partition'])))
print('Collecting tweets...')
for tweet in self.response:
if 'text' in tweet:
self.pipe_in.send(tweet)
elif 'disconnect' in tweet:
event = tweet['disconnect']
if event['code'] in [2,5,6,7]:
raise Exception(event['reason'])
else:
print(('Disconnect Code: ' + event['code'] +
'. Reason: ' + event['reason']))
return(True)
return(True)
def _request_rest_v1(self):
self.response = self.twitter_api.request(self.resource,
self.params)
if ('statuses' not in self.response.json()):
raise TypeError ('"statuses" not in response.json().')
tweets = self.response.json()['statuses']
n_tweets = len(tweets)
for tweet in tweets:
self.pipe_in.send(tweet)
if n_tweets == 0:
print('No more tweets found.')
self.pipe_in.send("FINISHED")
return(False)
self.tweet_downloaded += n_tweets
print('Downloaded {} tweets.'.format(self.tweet_downloaded))
self.input_dict['max_id'] = tweet['id'] - 1
return(True)
def _collect_tweets(self):
last_error = None
go = True
while go:
retry = False
try:
if any(x in self.api_type for
x in ['stream_v1', 'lab_covid19']):
go = self._request_stream_v1()
if 'rest_v1' in self.api_type:
go = self._request_rest_v1()
except IOError as ioe:
print('[Caught IOError]\n' + str(ioe))
retry = True
except HTTPError as he:
print('[Caught HTTPError]\n' + str(he))
retry = True
except ConnectionError as ce:
print('[Caught ConnectionError]\n' + str(ce))
retry = True
except TypeError as te:
print('[Caught TypeError]\n' + str(te))
retry = True
except TwitterConnectionError as tce:
print('[Caught TwitterConnectionError]\n' + str(tce))
retry = True
except TwitterRequestError as tre:
print('[Caught TwitterRequestError]\n' + str(tre))
retry = True
if not retry:
if 'rest_v1' in self.api_type:
time.sleep(2.1)
continue
print(self.response.headers)
self.response.close()
if not last_error:
last_error = datetime.now()
error_count = 0
if datetime.now() - last_error > timedelta(seconds = 900):
error_count = 0
wait = min(0.25 * 2**error_count, 30)
last_error = datetime.now()
error_count += 1
print('Wait {} seconds before retrying...'.format(wait))
time.sleep(wait)
def _save_locally(self):
if self.outlet_type == 'local' and not hasattr(self, 'file_timer'):
self.file_timer = datetime.now() + self.minutes_per_file
print('Start saving tweets into local TXT files...')
while True:
tweet = self.pipe_out.recv()
if tweet == "FINISHED":
return(False)
self.tweets.append(tweet)
self.tweet_count += 1
file_is_ready = False
if 'count' in self.outlet_type:
file_is_ready = self.tweet_count >= self.tweets_per_file
else:
file_is_ready = datetime.now() >= self.file_timer
if not file_is_ready:
continue
if self.outlet_type == 'local':
self.file_timer = datetime.now() + self.minutes_per_file
tweet_time = self.tweets[-1]['created_at']
time_format = '%a %b %d %H:%M:%S %z %Y'
if 'v2' in self.api_type:
tweet_time = tweet_time[:-5]
time_format = '%Y-%m-%dT%H:%M:%S'
file_time = datetime.strptime(tweet_time,
time_format)
file_name = (self.input_dict['file_prefix'] +
file_time.strftime("-%Y-%m-%d-%H-%M-%S-") +
str(uuid.uuid4()) +
'.txt')
with open(self.input_dict['download_path'] +
file_name, 'w') as file:
file.write(json.dumps(self.tweets))
if 'count' in self.outlet_type:
print(file_name + ' is saved.')
else:
print('{} ----- {} tweets'.format(str(file_time),
str(len(self.tweets))))
if ('warning' in self.tweets[0] and
'percent_full' in tweet['warning']):
if tweet['warning']['percent_full'] > 0:
print('Warning: the queue is ' +
str(tweet['warning']['percent_full']) + '% full.')
self.tweets = []
self.tweet_count = 0
return(True)
def _tweet_outlet(self):
time.sleep(0.25)
go = True
while go:
if 'local' in self.outlet_type:
go = self._save_locally()
def StartMonitor(self, input_dict,
api_type = 'stream_v1',
outlet_type = 'local'):
if not input_dict or not api_type or not outlet_type:
raise ValueError('INPUT_DICT, API_TYPE and ' +
'OUTLET_TYPE are needed.')
self.input_dict = input_dict
self.api_type = api_type.lower()
self.outlet_type = outlet_type.lower()
self._get_ready()
self.tweets_in = multiprocessing.Process(target = self._collect_tweets,
args=())
self.tweets_out = multiprocessing.Process(target = self._tweet_outlet,
args=())
self.tweets_in.start()
self.tweets_out.start()
self.tweets_in.join()
self.tweets_out.join()
self.pipe_in.close()
self.pipe_out.close()
| true | true |
f73db5c5f482730356afe4792b42e6a6560996bd | 588 | py | Python | cxroots/tests/test_deriv.py | rparini/cxroots | 037247fc47b29781b9cc66857a8395283e8ecc86 | [
"BSD-3-Clause"
] | 10 | 2017-10-28T18:15:35.000Z | 2022-02-10T01:44:34.000Z | cxroots/tests/test_deriv.py | rparini/cxroots | 037247fc47b29781b9cc66857a8395283e8ecc86 | [
"BSD-3-Clause"
] | 50 | 2019-04-11T19:15:07.000Z | 2022-02-14T20:25:08.000Z | cxroots/tests/test_deriv.py | rparini/cxroots | 037247fc47b29781b9cc66857a8395283e8ecc86 | [
"BSD-3-Clause"
] | 1 | 2019-04-11T12:38:29.000Z | 2019-04-11T12:38:29.000Z | import pytest
import numpy as np
from numpy import cos, sin
from cxroots import Circle, Rectangle
from cxroots import CxDerivative
@pytest.mark.parametrize('C', [
pytest.param(Circle(0, 2), id='circle'),
pytest.param(Rectangle([-1.5,1.5],[-2,2]), id='rect'),
pytest.param(None, id='default')
])
def test_CxDerivative(C):
f = lambda z: z**10 - 2*z**5 + sin(z)*cos(z/2)
df = lambda z: 10*(z**9 - z**4) + cos(z)*cos(z/2) - 0.5*sin(z)*sin(z/2)
z = np.array([-1.234, 0.3+1j, 0.1j, -0.9-0.5j])
assert CxDerivative(f, z, n=1, contour=C) == pytest.approx(df(z))
| 28 | 75 | 0.615646 | import pytest
import numpy as np
from numpy import cos, sin
from cxroots import Circle, Rectangle
from cxroots import CxDerivative
@pytest.mark.parametrize('C', [
pytest.param(Circle(0, 2), id='circle'),
pytest.param(Rectangle([-1.5,1.5],[-2,2]), id='rect'),
pytest.param(None, id='default')
])
def test_CxDerivative(C):
f = lambda z: z**10 - 2*z**5 + sin(z)*cos(z/2)
df = lambda z: 10*(z**9 - z**4) + cos(z)*cos(z/2) - 0.5*sin(z)*sin(z/2)
z = np.array([-1.234, 0.3+1j, 0.1j, -0.9-0.5j])
assert CxDerivative(f, z, n=1, contour=C) == pytest.approx(df(z))
| true | true |
f73db6bec758f45a41ffa8dda0c1f75354e82d9c | 9,043 | py | Python | fhirpathpy/engine/evaluators/__init__.py | beda-software/fhirpath-py | aaac1a4209e3e3cbce6f62246c0822c6cdaf5af5 | [
"MIT"
] | 9 | 2020-08-13T18:54:39.000Z | 2022-03-03T01:26:44.000Z | fhirpathpy/engine/evaluators/__init__.py | beda-software/fhirpath-py | aaac1a4209e3e3cbce6f62246c0822c6cdaf5af5 | [
"MIT"
] | 6 | 2020-05-05T06:19:30.000Z | 2021-12-02T07:31:21.000Z | fhirpathpy/engine/evaluators/__init__.py | beda-software/fhirpath-py | aaac1a4209e3e3cbce6f62246c0822c6cdaf5af5 | [
"MIT"
] | 3 | 2020-07-15T08:25:46.000Z | 2021-09-15T16:56:06.000Z | from functools import reduce
import re
import json
import fhirpathpy.engine as engine
import fhirpathpy.engine.util as util
import fhirpathpy.engine.nodes as nodes
def boolean_literal(ctx, parentData, node):
if node["text"] == "true":
return [True]
return [False]
def number_literal(ctx, parentData, node):
float_number = float(node["text"])
int_number = int(float_number)
return [int_number] if float_number == int_number else [float_number]
def identifier(ctx, parentData, node):
return [re.sub(r"(^\"|\"$)", "", node["text"])]
def invocation_term(ctx, parentData, node):
return engine.do_eval(ctx, parentData, node["children"][0])
def invocation_expression(ctx, parentData, node):
return list(
reduce(
lambda accumulator, children: engine.do_eval(ctx, accumulator, children),
node["children"],
parentData,
)
)
def param_list(ctx, parentData, node):
# we do not eval param list because sometimes it should be passed as
# lambda/macro (for example in case of where(...)
return node
def union_expression(ctx, parentData, node):
return engine.infix_invoke(ctx, "|", parentData, node["children"])
def this_invocation(ctx, parentData, node):
return util.arraify(ctx["currentData"])
def op_expression(ctx, parentData, node):
op = node["terminalNodeText"][0]
return engine.infix_invoke(ctx, op, parentData, node["children"])
def alias_op_expression(mapFn):
def func(ctx, parentData, node):
op = node["terminalNodeText"][0]
if not op in mapFn:
raise Exception(
"Do not know how to alias " + op + " by " + json.dumps(mapFn)
)
alias = mapFn[op]
return engine.infix_invoke(ctx, alias, parentData, node["children"])
return func
def term_expression(ctx, parentData, node):
return engine.do_eval(ctx, parentData, node["children"][0])
def null_literal(ctx, parentData, node):
return []
def parenthesized_term(ctx, parentData, node):
return engine.do_eval(ctx, parentData, node["children"][0])
def literal_term(ctx, parentData, node):
term = node["children"][0]
if term:
return engine.do_eval(ctx, parentData, term)
return [node["text"]]
# TODO
def external_constant_term(ctx, parent_data, node):
ext_constant = node["children"][0]
ext_identifier = ext_constant["children"][0]
varName = identifier(ctx, parent_data, ext_identifier)[0]
if not varName in ctx["vars"]:
return []
value = ctx["vars"][varName]
# For convenience, we all variable values to be passed in without their array
# wrapper. However, when evaluating, we need to put the array back in.
if not isinstance(value, list):
return [value]
return value
def string_literal(ctx, parentData, node):
# Remove the beginning and ending quotes.
rtn = re.sub(r"^['\"]|['\"]$", "", node["text"])
rtn = rtn.replace("\\'", "'")
rtn = rtn.replace('\\"', '"')
rtn = rtn.replace("\\r", "\r")
rtn = rtn.replace("\\n", "\n")
rtn = rtn.replace("\\t", "\t")
rtn = rtn.replace("\\f", "\f")
rtn = rtn.replace("\\\\", "\\")
# TODO
# rtn = rtn.replace(/\\(u\d{4}|.)/g, function(match, submatch) {
# if (submatch.length > 1)
# return String.fromCharCode('0x'+submatch.slice(1));
# else
# return submatch;
return [rtn]
def quantity_literal(ctx, parentData, node):
valueNode = node["children"][0]
value = float(valueNode["terminalNodeText"][0])
unitNode = valueNode["children"][0]
unit = unitNode.terminalNodeText[0]
# Sometimes the unit is in a child node of the child
if unit is not None and len(unitNode["children"]) > 0:
unit = unitNode["children"][0]["terminalNodeText"][0]
return [nodes.FP_Quantity(value, unit)]
def date_time_literal(ctx, parentData, node):
dateStr = node["text"][1:]
return [nodes.FP_DateTime(dateStr)]
def time_literal(ctx, parentData, node):
timeStr = node["text"][2:]
return [nodes.FP_Time(timeStr)]
def create_reduce_member_invocation(model, key):
def func(acc, res):
res = nodes.ResourceNode.create_node(res)
childPath = ""
if res.path is not None:
childPath = res.path + "." + key
if (
model is not None
and "pathsDefinedElsewhere" in model
and childPath in model["pathsDefinedElsewhere"]
):
childPath = model["pathsDefinedElsewhere"][childPath]
actualTypes = None
if (
model is not None
and "choiceTypePaths" in model
and childPath in model["choiceTypePaths"]
):
actualTypes = model["choiceTypePaths"][childPath]
toAdd = None
if isinstance(actualTypes, list):
# Use actualTypes to find the field's value
for actualType in actualTypes:
field = key + actualType
if isinstance(res.data, (dict, list)) and field in res.data:
toAdd = res.data[field]
childPath = actualType
break
else:
if isinstance(res.data, (dict, list)) and key in res.data:
toAdd = res.data[key]
if util.is_some(toAdd):
if isinstance(toAdd, list):
mapped = [nodes.ResourceNode.create_node(x, childPath) for x in toAdd]
acc = acc + mapped
else:
acc.append(nodes.ResourceNode.create_node(toAdd, childPath))
return acc
return acc
return func
def member_invocation(ctx, parentData, node):
key = engine.do_eval(ctx, parentData, node["children"][0])[0]
model = ctx["model"]
if isinstance(parentData, list):
if util.is_capitalized(key):
try:
filtered = [x for x in parentData if x["resourceType"] == key]
mapped = [nodes.ResourceNode.create_node(x, key) for x in filtered]
return mapped
except TypeError:
pass
return list(reduce(create_reduce_member_invocation(model, key), parentData, []))
return []
def indexer_expression(ctx, parentData, node):
coll_node = node["children"][0]
idx_node = node["children"][1]
coll = engine.do_eval(ctx, parentData, coll_node)
idx = engine.do_eval(ctx, parentData, idx_node)
if util.is_empty(idx):
return []
idxNum = int(idx[0])
if coll is not None and util.is_some(idxNum) and len(coll) > idxNum and idxNum >= 0:
return [coll[idxNum]]
return []
def functn(ctx, parentData, node):
return [engine.do_eval(ctx, parentData, x) for x in node["children"]]
def function_invocation(ctx, parentData, node):
args = engine.do_eval(ctx, parentData, node["children"][0])
fn_name = args[0]
args = args[1:]
raw_params = None
if isinstance(args, list) and len(args) > 0 and "children" in args[0]:
raw_params = args[0]["children"]
return engine.doInvoke(ctx, fn_name, parentData, raw_params)
def polarity_expression(ctx, parentData, node):
sign = node["terminalNodeText"][0]
rtn = engine.do_eval(ctx, parentData, node["children"][0])
if len(rtn) != 1: # not yet in spec, but per Bryn Rhodes
raise Exception(
"Unary " + sign + " can only be applied to an individual number."
)
if not util.is_number(rtn[0]):
raise Exception("Unary " + sign + " can only be applied to a number.")
if sign == "-":
rtn[0] = -rtn[0]
return rtn
evaluators = {
"Functn": functn,
"ParamList": param_list,
"Identifier": identifier,
# terms
"NullLiteral": null_literal,
"LiteralTerm": literal_term,
"NumberLiteral": number_literal,
"StringLiteral": string_literal,
"BooleanLiteral": boolean_literal,
"QuantityLiteral": quantity_literal,
"DateTimeLiteral": date_time_literal,
"TimeLiteral": time_literal,
"InvocationTerm": invocation_term,
"ParenthesizedTerm": parenthesized_term,
"ExternalConstantTerm": external_constant_term,
# Invocations
"ThisInvocation": this_invocation,
"MemberInvocation": member_invocation,
"FunctionInvocation": function_invocation,
# expressions
"PolarityExpression": polarity_expression,
"IndexerExpression": indexer_expression,
"MembershipExpression": alias_op_expression(
{"contains": "containsOp", "in": "inOp"}
),
"TermExpression": term_expression,
"UnionExpression": union_expression,
"InvocationExpression": invocation_expression,
"InequalityExpression": op_expression,
"AdditiveExpression": op_expression,
"MultiplicativeExpression": op_expression,
"EqualityExpression": op_expression,
"OrExpression": op_expression,
"ImpliesExpression": op_expression,
"AndExpression": op_expression,
"XorExpression": op_expression,
}
| 28.347962 | 88 | 0.630543 | from functools import reduce
import re
import json
import fhirpathpy.engine as engine
import fhirpathpy.engine.util as util
import fhirpathpy.engine.nodes as nodes
def boolean_literal(ctx, parentData, node):
if node["text"] == "true":
return [True]
return [False]
def number_literal(ctx, parentData, node):
float_number = float(node["text"])
int_number = int(float_number)
return [int_number] if float_number == int_number else [float_number]
def identifier(ctx, parentData, node):
return [re.sub(r"(^\"|\"$)", "", node["text"])]
def invocation_term(ctx, parentData, node):
return engine.do_eval(ctx, parentData, node["children"][0])
def invocation_expression(ctx, parentData, node):
return list(
reduce(
lambda accumulator, children: engine.do_eval(ctx, accumulator, children),
node["children"],
parentData,
)
)
def param_list(ctx, parentData, node):
return node
def union_expression(ctx, parentData, node):
return engine.infix_invoke(ctx, "|", parentData, node["children"])
def this_invocation(ctx, parentData, node):
return util.arraify(ctx["currentData"])
def op_expression(ctx, parentData, node):
op = node["terminalNodeText"][0]
return engine.infix_invoke(ctx, op, parentData, node["children"])
def alias_op_expression(mapFn):
def func(ctx, parentData, node):
op = node["terminalNodeText"][0]
if not op in mapFn:
raise Exception(
"Do not know how to alias " + op + " by " + json.dumps(mapFn)
)
alias = mapFn[op]
return engine.infix_invoke(ctx, alias, parentData, node["children"])
return func
def term_expression(ctx, parentData, node):
return engine.do_eval(ctx, parentData, node["children"][0])
def null_literal(ctx, parentData, node):
return []
def parenthesized_term(ctx, parentData, node):
return engine.do_eval(ctx, parentData, node["children"][0])
def literal_term(ctx, parentData, node):
term = node["children"][0]
if term:
return engine.do_eval(ctx, parentData, term)
return [node["text"]]
def external_constant_term(ctx, parent_data, node):
ext_constant = node["children"][0]
ext_identifier = ext_constant["children"][0]
varName = identifier(ctx, parent_data, ext_identifier)[0]
if not varName in ctx["vars"]:
return []
value = ctx["vars"][varName]
if not isinstance(value, list):
return [value]
return value
def string_literal(ctx, parentData, node):
rtn = re.sub(r"^['\"]|['\"]$", "", node["text"])
rtn = rtn.replace("\\'", "'")
rtn = rtn.replace('\\"', '"')
rtn = rtn.replace("\\r", "\r")
rtn = rtn.replace("\\n", "\n")
rtn = rtn.replace("\\t", "\t")
rtn = rtn.replace("\\f", "\f")
rtn = rtn.replace("\\\\", "\\")
return [rtn]
def quantity_literal(ctx, parentData, node):
valueNode = node["children"][0]
value = float(valueNode["terminalNodeText"][0])
unitNode = valueNode["children"][0]
unit = unitNode.terminalNodeText[0]
if unit is not None and len(unitNode["children"]) > 0:
unit = unitNode["children"][0]["terminalNodeText"][0]
return [nodes.FP_Quantity(value, unit)]
def date_time_literal(ctx, parentData, node):
dateStr = node["text"][1:]
return [nodes.FP_DateTime(dateStr)]
def time_literal(ctx, parentData, node):
timeStr = node["text"][2:]
return [nodes.FP_Time(timeStr)]
def create_reduce_member_invocation(model, key):
def func(acc, res):
res = nodes.ResourceNode.create_node(res)
childPath = ""
if res.path is not None:
childPath = res.path + "." + key
if (
model is not None
and "pathsDefinedElsewhere" in model
and childPath in model["pathsDefinedElsewhere"]
):
childPath = model["pathsDefinedElsewhere"][childPath]
actualTypes = None
if (
model is not None
and "choiceTypePaths" in model
and childPath in model["choiceTypePaths"]
):
actualTypes = model["choiceTypePaths"][childPath]
toAdd = None
if isinstance(actualTypes, list):
for actualType in actualTypes:
field = key + actualType
if isinstance(res.data, (dict, list)) and field in res.data:
toAdd = res.data[field]
childPath = actualType
break
else:
if isinstance(res.data, (dict, list)) and key in res.data:
toAdd = res.data[key]
if util.is_some(toAdd):
if isinstance(toAdd, list):
mapped = [nodes.ResourceNode.create_node(x, childPath) for x in toAdd]
acc = acc + mapped
else:
acc.append(nodes.ResourceNode.create_node(toAdd, childPath))
return acc
return acc
return func
def member_invocation(ctx, parentData, node):
key = engine.do_eval(ctx, parentData, node["children"][0])[0]
model = ctx["model"]
if isinstance(parentData, list):
if util.is_capitalized(key):
try:
filtered = [x for x in parentData if x["resourceType"] == key]
mapped = [nodes.ResourceNode.create_node(x, key) for x in filtered]
return mapped
except TypeError:
pass
return list(reduce(create_reduce_member_invocation(model, key), parentData, []))
return []
def indexer_expression(ctx, parentData, node):
coll_node = node["children"][0]
idx_node = node["children"][1]
coll = engine.do_eval(ctx, parentData, coll_node)
idx = engine.do_eval(ctx, parentData, idx_node)
if util.is_empty(idx):
return []
idxNum = int(idx[0])
if coll is not None and util.is_some(idxNum) and len(coll) > idxNum and idxNum >= 0:
return [coll[idxNum]]
return []
def functn(ctx, parentData, node):
return [engine.do_eval(ctx, parentData, x) for x in node["children"]]
def function_invocation(ctx, parentData, node):
args = engine.do_eval(ctx, parentData, node["children"][0])
fn_name = args[0]
args = args[1:]
raw_params = None
if isinstance(args, list) and len(args) > 0 and "children" in args[0]:
raw_params = args[0]["children"]
return engine.doInvoke(ctx, fn_name, parentData, raw_params)
def polarity_expression(ctx, parentData, node):
sign = node["terminalNodeText"][0]
rtn = engine.do_eval(ctx, parentData, node["children"][0])
if len(rtn) != 1: # not yet in spec, but per Bryn Rhodes
raise Exception(
"Unary " + sign + " can only be applied to an individual number."
)
if not util.is_number(rtn[0]):
raise Exception("Unary " + sign + " can only be applied to a number.")
if sign == "-":
rtn[0] = -rtn[0]
return rtn
evaluators = {
"Functn": functn,
"ParamList": param_list,
"Identifier": identifier,
# terms
"NullLiteral": null_literal,
"LiteralTerm": literal_term,
"NumberLiteral": number_literal,
"StringLiteral": string_literal,
"BooleanLiteral": boolean_literal,
"QuantityLiteral": quantity_literal,
"DateTimeLiteral": date_time_literal,
"TimeLiteral": time_literal,
"InvocationTerm": invocation_term,
"ParenthesizedTerm": parenthesized_term,
"ExternalConstantTerm": external_constant_term,
# Invocations
"ThisInvocation": this_invocation,
"MemberInvocation": member_invocation,
"FunctionInvocation": function_invocation,
# expressions
"PolarityExpression": polarity_expression,
"IndexerExpression": indexer_expression,
"MembershipExpression": alias_op_expression(
{"contains": "containsOp", "in": "inOp"}
),
"TermExpression": term_expression,
"UnionExpression": union_expression,
"InvocationExpression": invocation_expression,
"InequalityExpression": op_expression,
"AdditiveExpression": op_expression,
"MultiplicativeExpression": op_expression,
"EqualityExpression": op_expression,
"OrExpression": op_expression,
"ImpliesExpression": op_expression,
"AndExpression": op_expression,
"XorExpression": op_expression,
}
| true | true |
f73db6e05fc9765276202e6de527c63c0343eae1 | 13,178 | py | Python | tests/checks/mock/test_sysstat.py | Mattlk13/dd-agent | 167d0c0ed8d7b66a531dd0c21097f0fa2fba8960 | [
"BSD-3-Clause"
] | 1,172 | 2015-01-04T21:56:16.000Z | 2022-03-13T00:01:44.000Z | tests/checks/mock/test_sysstat.py | Mattlk13/dd-agent | 167d0c0ed8d7b66a531dd0c21097f0fa2fba8960 | [
"BSD-3-Clause"
] | 2,086 | 2015-01-02T16:33:21.000Z | 2022-03-15T10:01:47.000Z | tests/checks/mock/test_sysstat.py | Mattlk13/dd-agent | 167d0c0ed8d7b66a531dd0c21097f0fa2fba8960 | [
"BSD-3-Clause"
] | 972 | 2015-01-02T05:03:46.000Z | 2022-03-23T04:36:19.000Z | # stdlib
import logging
import sys
import unittest
import mock
# project
from checks.system.unix import (
IO,
Load,
Memory,
)
from checks.system.unix import System
from config import get_system_stats
from utils.platform import Platform
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger(__file__)
class TestSystem(unittest.TestCase):
def testUptime(self):
global logger
system = System(logger)
metrics = system.check({})
self.assertTrue("system.uptime" in metrics)
self.assertTrue(metrics["system.uptime"] > 0)
def testLoad(self):
global logger
load = Load(logger)
res = load.check({'system_stats': get_system_stats()})
assert 'system.load.1' in res
if Platform.is_linux():
cores = int(get_system_stats().get('cpuCores'))
assert 'system.load.norm.1' in res
assert abs(res['system.load.1'] - cores * res['system.load.norm.1']) <= 0.1, (res['system.load.1'], cores * res['system.load.norm.1'])
# same test but without cpu count, no normalized load sent.
res = load.check({})
assert 'system.load.1' in res
assert 'system.load.norm.1' not in res
def testMemory(self):
global logger
res = Memory(logger).check({})
if Platform.is_linux():
MEM_METRICS = ["swapTotal", "swapFree", "swapPctFree", "swapUsed", "physTotal", "physFree", "physUsed", "physBuffers", "physCached", "physUsable", "physPctUsable", "physShared"]
for k in MEM_METRICS:
# % metric is only here if total > 0
if k == 'swapPctFree' and res['swapTotal'] == 0:
continue
assert k in res, res
assert res["swapTotal"] == res["swapFree"] + res["swapUsed"]
assert res["physTotal"] == res["physFree"] + res["physUsed"]
elif sys.platform == 'darwin':
for k in ("swapFree", "swapUsed", "physFree", "physUsed"):
assert k in res, res
def testDiskLatency(self):
# example output from `iostat -d 1 2 -x -k` on
# debian testing x86_64, from Debian package
# sysstat@10.0.4-1
debian_iostat_output = """Linux 3.2.0-2-amd64 (fireflyvm) 05/29/2012 _x86_64_ (2 CPU)
Device: rrqm/s wrqm/s r/s w/s rkB/s wkB/s avgrq-sz avgqu-sz await r_await w_await svctm %util
sda 0.44 2.58 5.79 2.84 105.53 639.03 172.57 0.17 19.38 1.82 55.26 0.66 0.57
Device: rrqm/s wrqm/s r/s w/s rkB/s wkB/s avgrq-sz avgqu-sz await r_await w_await svctm %util
sda 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.01
"""
global logger
checker = IO(logger)
results = checker._parse_linux2(debian_iostat_output)
self.assertTrue('sda' in results)
for key in ('rrqm/s', 'wrqm/s', 'r/s', 'w/s', 'rkB/s', 'wkB/s',
'avgrq-sz', 'avgqu-sz', 'await', 'r_await',
'w_await', 'svctm', '%util'):
self.assertTrue(key in results['sda'], 'key %r not in results["sda"]' % key)
if key == r'%util':
expected = 0.01
else:
expected = '0.00'
self.assertEqual(results['sda'][key], expected)
# example output from `iostat -d 1 2 -x -k` on
# ubuntu 18.04 x86_64, from deb package
# sysstat@11.6.1-1; main breaking change is
# that header starts with `Device` instead of `Device:`.
newer_iostat_output = """Linux 4.9.60-linuxkit-aufs (f3cf72f6fb4d) 05/09/18 _x86_64_ (2 CPU)
Device r/s w/s rkB/s wkB/s rrqm/s wrqm/s %rrqm %wrqm r_await w_await aqu-sz rareq-sz wareq-sz svctm %util
sda 0.07 0.08 0.64 5.44 0.00 0.23 0.41 72.99 2.42 19.91 0.00 8.92 65.13 0.38 0.01
Device r/s w/s rkB/s wkB/s rrqm/s wrqm/s %rrqm %wrqm r_await w_await aqu-sz rareq-sz wareq-sz svctm %util
sda 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.01
"""
checker = IO(logger)
results = checker._parse_linux2(newer_iostat_output)
self.assertTrue('sda' in results)
for key in ('rrqm/s', 'wrqm/s', 'r/s', 'w/s', 'rkB/s', 'wkB/s',
'r_await', 'w_await', 'svctm', '%util'):
self.assertTrue(key in results['sda'], 'key %r not in results["sda"]' % key)
if key == r'%util':
expected = 0.01
else:
expected = '0.00'
self.assertEqual(results['sda'][key], expected)
# example output from `iostat -d 1 d -x -k` on
# centos 5.8 x86_64, from RPM package
# sysstat@7.0.2; it differs from the first one by
# not having split-out r_await and w_await fields
centos_iostat_output = """Linux 2.6.18-308.el5 (localhost.localdomain) 05/29/2012
Device: rrqm/s wrqm/s r/s w/s rkB/s wkB/s avgrq-sz avgqu-sz await svctm %util
sda 9.44 7.56 16.76 4.40 322.05 47.75 34.96 0.01 0.59 0.35 0.74
Device: rrqm/s wrqm/s r/s w/s rkB/s wkB/s avgrq-sz avgqu-sz await svctm %util
sda 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.01
"""
checker = IO(logger)
results = checker._parse_linux2(centos_iostat_output)
self.assertTrue('sda' in results)
for key in ('rrqm/s', 'wrqm/s', 'r/s', 'w/s', 'rkB/s', 'wkB/s',
'avgrq-sz', 'avgqu-sz', 'await', 'svctm', '%util'):
self.assertTrue(key in results['sda'], 'key %r not in results["sda"]' % key)
if key == r'%util':
expected = 0.01
else:
expected = '0.00'
self.assertEqual(results['sda'][key], expected)
# iostat -o -d -c 2 -w 1
# OS X 10.8.3 (internal SSD + USB flash attached)
darwin_iostat_output = """ disk0 disk1
KB/t tps MB/s KB/t tps MB/s
21.11 23 0.47 20.01 0 0.00
6.67 3 0.02 0.00 0 0.00
"""
checker = IO(logger)
results = checker._parse_darwin(darwin_iostat_output)
self.assertTrue("disk0" in results.keys())
self.assertTrue("disk1" in results.keys())
self.assertEqual(
results["disk0"],
{'system.io.bytes_per_s': float(0.02 * 2**20),}
)
self.assertEqual(
results["disk1"],
{'system.io.bytes_per_s': float(0),}
)
linux_output_dashes = """Linux 3.13.0-32-generic (ubuntu-1204) 05/20/2016 _x86_64_ (2 CPU)
Device: rrqm/s wrqm/s r/s w/s rkB/s wkB/s avgrq-sz avgqu-sz await r_await w_await svctm %util
sda 5.77 8.20 7.79 30.08 320.67 219.91 28.55 0.05 1.32 1.53 1.27 0.32 1.20
dm-0 0.00 0.00 11.71 37.97 313.61 219.90 21.48 0.11 2.16 2.13 2.17 0.24 1.20
dm-1 0.00 0.00 0.08 0.00 0.32 0.00 8.00 0.00 1.68 1.68 0.00 1.07 0.01
Device: rrqm/s wrqm/s r/s w/s rkB/s wkB/s avgrq-sz avgqu-sz await r_await w_await svctm %util
sda 0.00 0.00 0.00 1.00 0.00 4.00 8.00 0.00 0.00 0.00 0.00 0.00 0.00
dm-0 0.00 0.00 0.00 1.00 0.00 4.00 8.00 0.00 0.00 0.00 0.00 0.00 0.00
dm-1 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00
"""
results = checker._parse_linux2(linux_output_dashes)
self.assertTrue(sorted(results.keys()) == ['dm-0', 'dm-1', 'sda'])
def testLinuxCapIostat(self):
# example output from `iostat -d 1 2 -x -k` on
# debian testing x86_64, from Debian package
# sysstat@10.0.4-1
debian_iostat_output = """Linux 3.2.0-2-amd64 (fireflyvm) 05/29/2012 _x86_64_ (2 CPU)
Device: rrqm/s wrqm/s r/s w/s rkB/s wkB/s avgrq-sz avgqu-sz await r_await w_await svctm %util
sda 0.44 2.58 5.79 2.84 105.53 639.03 172.57 0.17 19.38 1.82 55.26 0.66 0.57
Device: rrqm/s wrqm/s r/s w/s rkB/s wkB/s avgrq-sz avgqu-sz await r_await w_await svctm %util
sda 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 100.01
"""
global logger
checker = IO(logger)
results = checker._parse_linux2(debian_iostat_output)
self.assertTrue('sda' in results)
# Ensure that value is capped and return to 0 if it surpasses 100
expected = 0
self.assertEqual(results['sda']['%util'], expected)
# example output from `iostat -d 1 2 -x -k` on
# ubuntu 18.04 x86_64, from deb package
# sysstat@11.6.1-1; main breaking change is
# that header starts with `Device` instead of `Device:`.
newer_iostat_output = """Linux 4.9.60-linuxkit-aufs (f3cf72f6fb4d) 05/09/18 _x86_64_ (2 CPU)
Device r/s w/s rkB/s wkB/s rrqm/s wrqm/s %rrqm %wrqm r_await w_await aqu-sz rareq-sz wareq-sz svctm %util
sda 0.07 0.08 0.64 5.44 0.00 0.23 0.41 72.99 2.42 19.91 0.00 8.92 65.13 0.38 0.01
Device r/s w/s rkB/s wkB/s rrqm/s wrqm/s %rrqm %wrqm r_await w_await aqu-sz rareq-sz wareq-sz svctm %util
sda 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 99.99
"""
checker = IO(logger)
results = checker._parse_linux2(newer_iostat_output)
self.assertTrue('sda' in results)
expected = 99.99
self.assertEqual(results['sda']['%util'], expected)
# example output from `iostat -d 1 d -x -k` on
# centos 5.8 x86_64, from RPM package
# sysstat@7.0.2; it differs from the first one by
# not having split-out r_await and w_await fields
centos_iostat_output = """Linux 2.6.18-308.el5 (localhost.localdomain) 05/29/2012
Device: rrqm/s wrqm/s r/s w/s rkB/s wkB/s avgrq-sz avgqu-sz await svctm %util
sda 9.44 7.56 16.76 4.40 322.05 47.75 34.96 0.01 0.59 0.35 0.74
Device: rrqm/s wrqm/s r/s w/s rkB/s wkB/s avgrq-sz avgqu-sz await svctm %util
sda 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 102.01
"""
checker = IO(logger)
results = checker._parse_linux2(centos_iostat_output)
self.assertTrue('sda' in results)
# %util value is over 100, and value is set to 0
expected = 0
self.assertEqual(results['sda']['%util'], expected)
def sunos5_output(self, *args, **kwargs):
output = """extended device statistics <-- since boot
device r/s w/s kr/s kw/s wait actv svc_t %w %b
ramdisk1 0.0 0.0 0.1 0.1 0.0 0.0 0.0 0 0
sd0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0 0
sd1 79.9 149.9 1237.6 6737.9 0.0 0.5 2.3 0 11
extended device statistics <-- past second
device r/s w/s kr/s kw/s wait actv svc_t %w %b
ramdisk1 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0 0
sd0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0 102
sd1 0.0 139.0 0.0 1850.6 0.0 0.0 0.1 0 10
"""
return output, 0, 0
def freebsd_output(self, *args, **kwargs):
output = """extended device statistics
device r/s w/s kr/s kw/s wait svc_t %b
ad0 3.1 1.3 49.9 18.8 0 0.7 0
extended device statistics
device r/s w/s kr/s kw/s wait svc_t %b
ad0 0.0 2.0 0.0 31.8 0 0.2 102
"""
return output, 0, 0
@mock.patch('checks.system.unix.sys.platform', 'sunos5')
@mock.patch('checks.system.unix.get_subprocess_output', side_effect=sunos5_output)
def testSunos5CapIostat(self, mock_subprocess):
global logger
checker = IO(logger)
results = checker.check({})
for res in results:
if res == 'sd1':
expected = 10
else:
expected = 0
self.assertEqual(results[res]['%util'], expected)
@mock.patch('checks.system.unix.sys.platform', 'freebsd')
@mock.patch('checks.system.unix.get_subprocess_output', side_effect=freebsd_output)
def testFreebsdCapIostat(self, mock_subprocess):
global logger
checker = IO(logger)
results = checker.check({})
expected = 0
for res in results:
self.assertEqual(results[res]['%util'], expected)
| 45.916376 | 189 | 0.532934 |
import logging
import sys
import unittest
import mock
from checks.system.unix import (
IO,
Load,
Memory,
)
from checks.system.unix import System
from config import get_system_stats
from utils.platform import Platform
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger(__file__)
class TestSystem(unittest.TestCase):
def testUptime(self):
global logger
system = System(logger)
metrics = system.check({})
self.assertTrue("system.uptime" in metrics)
self.assertTrue(metrics["system.uptime"] > 0)
def testLoad(self):
global logger
load = Load(logger)
res = load.check({'system_stats': get_system_stats()})
assert 'system.load.1' in res
if Platform.is_linux():
cores = int(get_system_stats().get('cpuCores'))
assert 'system.load.norm.1' in res
assert abs(res['system.load.1'] - cores * res['system.load.norm.1']) <= 0.1, (res['system.load.1'], cores * res['system.load.norm.1'])
res = load.check({})
assert 'system.load.1' in res
assert 'system.load.norm.1' not in res
def testMemory(self):
global logger
res = Memory(logger).check({})
if Platform.is_linux():
MEM_METRICS = ["swapTotal", "swapFree", "swapPctFree", "swapUsed", "physTotal", "physFree", "physUsed", "physBuffers", "physCached", "physUsable", "physPctUsable", "physShared"]
for k in MEM_METRICS:
if k == 'swapPctFree' and res['swapTotal'] == 0:
continue
assert k in res, res
assert res["swapTotal"] == res["swapFree"] + res["swapUsed"]
assert res["physTotal"] == res["physFree"] + res["physUsed"]
elif sys.platform == 'darwin':
for k in ("swapFree", "swapUsed", "physFree", "physUsed"):
assert k in res, res
def testDiskLatency(self):
debian_iostat_output = """Linux 3.2.0-2-amd64 (fireflyvm) 05/29/2012 _x86_64_ (2 CPU)
Device: rrqm/s wrqm/s r/s w/s rkB/s wkB/s avgrq-sz avgqu-sz await r_await w_await svctm %util
sda 0.44 2.58 5.79 2.84 105.53 639.03 172.57 0.17 19.38 1.82 55.26 0.66 0.57
Device: rrqm/s wrqm/s r/s w/s rkB/s wkB/s avgrq-sz avgqu-sz await r_await w_await svctm %util
sda 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.01
"""
global logger
checker = IO(logger)
results = checker._parse_linux2(debian_iostat_output)
self.assertTrue('sda' in results)
for key in ('rrqm/s', 'wrqm/s', 'r/s', 'w/s', 'rkB/s', 'wkB/s',
'avgrq-sz', 'avgqu-sz', 'await', 'r_await',
'w_await', 'svctm', '%util'):
self.assertTrue(key in results['sda'], 'key %r not in results["sda"]' % key)
if key == r'%util':
expected = 0.01
else:
expected = '0.00'
self.assertEqual(results['sda'][key], expected)
newer_iostat_output = """Linux 4.9.60-linuxkit-aufs (f3cf72f6fb4d) 05/09/18 _x86_64_ (2 CPU)
Device r/s w/s rkB/s wkB/s rrqm/s wrqm/s %rrqm %wrqm r_await w_await aqu-sz rareq-sz wareq-sz svctm %util
sda 0.07 0.08 0.64 5.44 0.00 0.23 0.41 72.99 2.42 19.91 0.00 8.92 65.13 0.38 0.01
Device r/s w/s rkB/s wkB/s rrqm/s wrqm/s %rrqm %wrqm r_await w_await aqu-sz rareq-sz wareq-sz svctm %util
sda 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.01
"""
checker = IO(logger)
results = checker._parse_linux2(newer_iostat_output)
self.assertTrue('sda' in results)
for key in ('rrqm/s', 'wrqm/s', 'r/s', 'w/s', 'rkB/s', 'wkB/s',
'r_await', 'w_await', 'svctm', '%util'):
self.assertTrue(key in results['sda'], 'key %r not in results["sda"]' % key)
if key == r'%util':
expected = 0.01
else:
expected = '0.00'
self.assertEqual(results['sda'][key], expected)
centos_iostat_output = """Linux 2.6.18-308.el5 (localhost.localdomain) 05/29/2012
Device: rrqm/s wrqm/s r/s w/s rkB/s wkB/s avgrq-sz avgqu-sz await svctm %util
sda 9.44 7.56 16.76 4.40 322.05 47.75 34.96 0.01 0.59 0.35 0.74
Device: rrqm/s wrqm/s r/s w/s rkB/s wkB/s avgrq-sz avgqu-sz await svctm %util
sda 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.01
"""
checker = IO(logger)
results = checker._parse_linux2(centos_iostat_output)
self.assertTrue('sda' in results)
for key in ('rrqm/s', 'wrqm/s', 'r/s', 'w/s', 'rkB/s', 'wkB/s',
'avgrq-sz', 'avgqu-sz', 'await', 'svctm', '%util'):
self.assertTrue(key in results['sda'], 'key %r not in results["sda"]' % key)
if key == r'%util':
expected = 0.01
else:
expected = '0.00'
self.assertEqual(results['sda'][key], expected)
darwin_iostat_output = """ disk0 disk1
KB/t tps MB/s KB/t tps MB/s
21.11 23 0.47 20.01 0 0.00
6.67 3 0.02 0.00 0 0.00
"""
checker = IO(logger)
results = checker._parse_darwin(darwin_iostat_output)
self.assertTrue("disk0" in results.keys())
self.assertTrue("disk1" in results.keys())
self.assertEqual(
results["disk0"],
{'system.io.bytes_per_s': float(0.02 * 2**20),}
)
self.assertEqual(
results["disk1"],
{'system.io.bytes_per_s': float(0),}
)
linux_output_dashes = """Linux 3.13.0-32-generic (ubuntu-1204) 05/20/2016 _x86_64_ (2 CPU)
Device: rrqm/s wrqm/s r/s w/s rkB/s wkB/s avgrq-sz avgqu-sz await r_await w_await svctm %util
sda 5.77 8.20 7.79 30.08 320.67 219.91 28.55 0.05 1.32 1.53 1.27 0.32 1.20
dm-0 0.00 0.00 11.71 37.97 313.61 219.90 21.48 0.11 2.16 2.13 2.17 0.24 1.20
dm-1 0.00 0.00 0.08 0.00 0.32 0.00 8.00 0.00 1.68 1.68 0.00 1.07 0.01
Device: rrqm/s wrqm/s r/s w/s rkB/s wkB/s avgrq-sz avgqu-sz await r_await w_await svctm %util
sda 0.00 0.00 0.00 1.00 0.00 4.00 8.00 0.00 0.00 0.00 0.00 0.00 0.00
dm-0 0.00 0.00 0.00 1.00 0.00 4.00 8.00 0.00 0.00 0.00 0.00 0.00 0.00
dm-1 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00
"""
results = checker._parse_linux2(linux_output_dashes)
self.assertTrue(sorted(results.keys()) == ['dm-0', 'dm-1', 'sda'])
def testLinuxCapIostat(self):
debian_iostat_output = """Linux 3.2.0-2-amd64 (fireflyvm) 05/29/2012 _x86_64_ (2 CPU)
Device: rrqm/s wrqm/s r/s w/s rkB/s wkB/s avgrq-sz avgqu-sz await r_await w_await svctm %util
sda 0.44 2.58 5.79 2.84 105.53 639.03 172.57 0.17 19.38 1.82 55.26 0.66 0.57
Device: rrqm/s wrqm/s r/s w/s rkB/s wkB/s avgrq-sz avgqu-sz await r_await w_await svctm %util
sda 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 100.01
"""
global logger
checker = IO(logger)
results = checker._parse_linux2(debian_iostat_output)
self.assertTrue('sda' in results)
expected = 0
self.assertEqual(results['sda']['%util'], expected)
newer_iostat_output = """Linux 4.9.60-linuxkit-aufs (f3cf72f6fb4d) 05/09/18 _x86_64_ (2 CPU)
Device r/s w/s rkB/s wkB/s rrqm/s wrqm/s %rrqm %wrqm r_await w_await aqu-sz rareq-sz wareq-sz svctm %util
sda 0.07 0.08 0.64 5.44 0.00 0.23 0.41 72.99 2.42 19.91 0.00 8.92 65.13 0.38 0.01
Device r/s w/s rkB/s wkB/s rrqm/s wrqm/s %rrqm %wrqm r_await w_await aqu-sz rareq-sz wareq-sz svctm %util
sda 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 99.99
"""
checker = IO(logger)
results = checker._parse_linux2(newer_iostat_output)
self.assertTrue('sda' in results)
expected = 99.99
self.assertEqual(results['sda']['%util'], expected)
centos_iostat_output = """Linux 2.6.18-308.el5 (localhost.localdomain) 05/29/2012
Device: rrqm/s wrqm/s r/s w/s rkB/s wkB/s avgrq-sz avgqu-sz await svctm %util
sda 9.44 7.56 16.76 4.40 322.05 47.75 34.96 0.01 0.59 0.35 0.74
Device: rrqm/s wrqm/s r/s w/s rkB/s wkB/s avgrq-sz avgqu-sz await svctm %util
sda 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 102.01
"""
checker = IO(logger)
results = checker._parse_linux2(centos_iostat_output)
self.assertTrue('sda' in results)
expected = 0
self.assertEqual(results['sda']['%util'], expected)
def sunos5_output(self, *args, **kwargs):
output = """extended device statistics <-- since boot
device r/s w/s kr/s kw/s wait actv svc_t %w %b
ramdisk1 0.0 0.0 0.1 0.1 0.0 0.0 0.0 0 0
sd0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0 0
sd1 79.9 149.9 1237.6 6737.9 0.0 0.5 2.3 0 11
extended device statistics <-- past second
device r/s w/s kr/s kw/s wait actv svc_t %w %b
ramdisk1 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0 0
sd0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0 102
sd1 0.0 139.0 0.0 1850.6 0.0 0.0 0.1 0 10
"""
return output, 0, 0
def freebsd_output(self, *args, **kwargs):
output = """extended device statistics
device r/s w/s kr/s kw/s wait svc_t %b
ad0 3.1 1.3 49.9 18.8 0 0.7 0
extended device statistics
device r/s w/s kr/s kw/s wait svc_t %b
ad0 0.0 2.0 0.0 31.8 0 0.2 102
"""
return output, 0, 0
@mock.patch('checks.system.unix.sys.platform', 'sunos5')
@mock.patch('checks.system.unix.get_subprocess_output', side_effect=sunos5_output)
def testSunos5CapIostat(self, mock_subprocess):
global logger
checker = IO(logger)
results = checker.check({})
for res in results:
if res == 'sd1':
expected = 10
else:
expected = 0
self.assertEqual(results[res]['%util'], expected)
@mock.patch('checks.system.unix.sys.platform', 'freebsd')
@mock.patch('checks.system.unix.get_subprocess_output', side_effect=freebsd_output)
def testFreebsdCapIostat(self, mock_subprocess):
global logger
checker = IO(logger)
results = checker.check({})
expected = 0
for res in results:
self.assertEqual(results[res]['%util'], expected)
| true | true |
f73db710ced49724733324912f22c9b331229261 | 9,716 | py | Python | src/utils.py | nicholasphair/iot-inspector-client | 165a02b0210d7bf55513a9c205e0a61accf03888 | [
"MIT"
] | null | null | null | src/utils.py | nicholasphair/iot-inspector-client | 165a02b0210d7bf55513a9c205e0a61accf03888 | [
"MIT"
] | null | null | null | src/utils.py | nicholasphair/iot-inspector-client | 165a02b0210d7bf55513a9c205e0a61accf03888 | [
"MIT"
] | null | null | null | """
Misc functions.
"""
import ipaddress
import datetime
import hashlib
import json
import netaddr
import netifaces
import os
import re
import requests
import scapy.all as sc
import socket
import subprocess
import sys
import threading
import time
import traceback
import uuid
import webbrowser
import server_config
IPv4_REGEX = re.compile(r'[0-9]{0,3}\.[0-9]{0,3}\.[0-9]{0,3}\.[0-9]{0,3}')
sc.conf.verb = 0
# If non empty, then only devices with the following MAC addresses with be
# inspected. Do not populate this list in production. For internal testing.
TEST_OUI_LIST = [
# 'd83134', # Roku
# '74f61c', # Danny's Pixel phone
]
# Make sure Inspector's directory exits
home_dir = os.path.join(os.path.expanduser('~'), 'princeton-iot-inspector')
if not os.path.isdir(home_dir):
os.mkdir(home_dir)
def is_ipv4_addr(value):
return IPv4_REGEX.match(value)
def get_user_config():
"""Returns the user_config dict."""
user_config_file = os.path.join(
os.path.expanduser('~'),
'princeton-iot-inspector',
'iot_inspector_config.json'
)
try:
with open(user_config_file) as fp:
return json.load(fp)
except Exception:
pass
while True:
user_key = requests.get(server_config.NEW_USER_URL).text.strip()
# Make sure we're not getting server's error messages
if len(user_key) == 32:
break
time.sleep(1)
user_key = user_key.replace('-', '')
secret_salt = str(uuid.uuid4())
with open(user_config_file, 'w') as fp:
config_dict = {
'user_key': user_key,
'secret_salt': secret_salt
}
json.dump(config_dict, fp)
return config_dict
class TimeoutError(Exception):
pass
_lock = threading.Lock()
def log(*args):
log_str = '[%s] ' % datetime.datetime.today()
log_str += ' '.join([str(v) for v in args])
log_file_path = os.path.join(
os.path.expanduser('~'),
'princeton-iot-inspector',
'iot_inspector_logs.txt'
)
with open(log_file_path, 'a') as fp:
fp.write(log_str + '\n')
def get_gateway_ip(timeout=10):
"""Returns the IP address of the gateway."""
return get_default_route(timeout)[0]
def get_host_ip(timeout=10):
"""Returns the host's local IP (where IoT Inspector client runs)."""
return get_default_route(timeout)[2]
def _get_routes():
while True:
sc.conf.route.resync()
routes = sc.conf.route.routes
if routes:
return routes
time.sleep(1)
def get_default_route():
"""Returns (gateway_ip, iface, host_ip)."""
# Discover the active/preferred network interface
# by connecting to Google's public DNS server
try:
with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as s:
s.settimeout(2)
s.connect(("8.8.8.8", 80))
iface_ip = s.getsockname()[0]
except socket.error:
sys.stderr.write('IoT Inspector cannot run without network connectivity.\n')
sys.exit(1)
while True:
routes = _get_routes()
default_route = None
for route in routes:
if route[4] == iface_ip:
# Reassign scapy's default interface to the one we selected
sc.conf.iface = route[3]
default_route = route[2:5]
break
if default_route:
break
log('get_default_route: retrying')
time.sleep(1)
# If we are using windows, conf.route.routes table doesn't update.
# We have to update routing table manually for packets
# to pick the correct route.
if sys.platform.startswith('win'):
for i, route in enumerate(routes):
# if we see our selected iface, update the metrics to 0
if route[3] == default_route[1]:
routes[i] = (*route[:-1], 0)
return default_route
def get_network_ip_range_windows():
default_iface = get_default_route()
iface_filter = default_iface[1]
ip_set = set()
iface_ip = iface_filter.ip
iface_guid = iface_filter.guid
for k, v in netifaces.ifaddresses(iface_guid).items():
if v[0]['addr'] == iface_ip:
netmask = v[0]['netmask']
break
network = netaddr.IPAddress(iface_ip)
cidr = netaddr.IPAddress(netmask).netmask_bits()
subnet = netaddr.IPNetwork('{}/{}'.format(network, cidr))
return ip_set
def check_ethernet_network():
"""
Check presence of non-Ethernet network adapters (e.g., VPN).
VPNs use TUN interfaces which don't have a hardware address.
"""
default_iface = get_default_route()
assert default_iface[1] == sc.conf.iface, "incorrect sc.conf.iface"
iface_str = ''
if sys.platform.startswith('win'):
iface_info = sc.conf.iface
iface_str = iface_info.guid
else:
iface_str = sc.conf.iface
ifaddresses = netifaces.ifaddresses(str(iface_str))
try:
iface_mac = ifaddresses[netifaces.AF_LINK][0]['addr']
except KeyError:
return False
return iface_mac != ''
def get_network_ip_range():
"""
Gets network IP range for the default interface.
"""
ip_set = set()
default_route = get_default_route()
assert default_route[1] == sc.conf.iface, "incorrect sc.conf.iface"
iface_str = ''
if sys.platform.startswith('win'):
iface_info = sc.conf.iface
iface_str = iface_info.guid
else:
iface_str = sc.conf.iface
netmask = None
for k, v in netifaces.ifaddresses(str(iface_str)).items():
if v[0]['addr'] == default_route[2]:
netmask = v[0]['netmask']
break
if netmask is None:
return set()
gateway_ip = netaddr.IPAddress(default_route[0])
cidr = netaddr.IPAddress(netmask).netmask_bits()
subnet = netaddr.IPNetwork('{}/{}'.format(gateway_ip, cidr))
for ip in subnet:
ip_set.add(str(ip))
return ip_set
def get_my_mac():
"""Returns the MAC addr of the default route interface."""
mac_set = get_my_mac_set(iface_filter=get_default_route()[1])
return mac_set.pop()
def get_my_mac_set(iface_filter=None):
"""Returns a set of MAC addresses of the current host."""
out_set = set()
if sys.platform.startswith("win"):
from scapy.arch.windows import NetworkInterface
if type(iface_filter) == NetworkInterface:
out_set.add(iface_filter.mac)
for iface in sc.get_if_list():
if iface_filter is not None and iface != iface_filter:
continue
try:
mac = sc.get_if_hwaddr(iface)
except Exception as e:
continue
else:
out_set.add(mac)
return out_set
class _SafeRunError(object):
"""Used privately to denote error state in safe_run()."""
def __init__(self):
pass
def restart_upon_crash(func, args=[], kwargs={}):
"""Restarts func upon unexpected exception and logs stack trace."""
while True:
result = safe_run(func, args, kwargs)
if isinstance(result, _SafeRunError):
time.sleep(1)
continue
return result
def safe_run(func, args=[], kwargs={}):
"""Returns _SafeRunError() upon failure and logs stack trace."""
try:
return func(*args, **kwargs)
except Exception as e:
err_msg = '=' * 80 + '\n'
err_msg += 'Time: %s\n' % datetime.datetime.today()
err_msg += 'Function: %s %s %s\n' % (func, args, kwargs)
err_msg += 'Exception: %s\n' % e
err_msg += str(traceback.format_exc()) + '\n\n\n'
with _lock:
sys.stderr.write(err_msg + '\n')
log(err_msg)
return _SafeRunError()
def get_device_id(device_mac, host_state):
device_mac = str(device_mac).lower().replace(':', '')
s = device_mac + str(host_state.secret_salt)
return 's' + hashlib.sha256(s.encode('utf-8')).hexdigest()[0:10]
def smart_max(v1, v2):
"""
Returns max value even if one value is None.
Python cannot compare None and int, so build a wrapper
around it.
"""
if v1 is None:
return v2
if v2 is None:
return v1
return max(v1, v2)
def smart_min(v1, v2):
"""
Returns min value even if one of the value is None.
By default min(None, x) == None per Python default behavior.
"""
if v1 is None:
return v2
if v2 is None:
return v1
return min(v1, v2)
def get_min_max_tuple(min_max_tuple, value):
"""
Returns a new min_max_tuple with value considered.
For example:
min_max_tuple = (2, 3)
print get_min_max_tuple(min_max_tuple, 4)
We get back (2, 4).
"""
min_v, max_v = min_max_tuple
min_v = smart_min(min_v, value)
max_v = smart_max(max_v, value)
return (min_v, max_v)
def get_oui(mac):
return mac.replace(':', '').lower()[0:6]
def get_os():
"""Returns 'mac', 'linux', or 'windows'. Raises RuntimeError otherwise."""
os_platform = sys.platform
if os_platform.startswith('darwin'):
return 'mac'
if os_platform.startswith('linux'):
return 'linux'
if os_platform.startswith('win'):
return 'windows'
raise RuntimeError('Unsupported operating system.')
def open_browser(url):
try:
try:
webbrowser.get('chrome').open(url, new=2)
except webbrowser.Error:
webbrowser.open(url, new=2)
except Exception:
pass
def test():
# check_ethernet_network()
print(get_default_route())
if __name__ == '__main__':
test() | 23.188544 | 84 | 0.616097 | import ipaddress
import datetime
import hashlib
import json
import netaddr
import netifaces
import os
import re
import requests
import scapy.all as sc
import socket
import subprocess
import sys
import threading
import time
import traceback
import uuid
import webbrowser
import server_config
IPv4_REGEX = re.compile(r'[0-9]{0,3}\.[0-9]{0,3}\.[0-9]{0,3}\.[0-9]{0,3}')
sc.conf.verb = 0
TEST_OUI_LIST = [
ctor's directory exits
home_dir = os.path.join(os.path.expanduser('~'), 'princeton-iot-inspector')
if not os.path.isdir(home_dir):
os.mkdir(home_dir)
def is_ipv4_addr(value):
return IPv4_REGEX.match(value)
def get_user_config():
user_config_file = os.path.join(
os.path.expanduser('~'),
'princeton-iot-inspector',
'iot_inspector_config.json'
)
try:
with open(user_config_file) as fp:
return json.load(fp)
except Exception:
pass
while True:
user_key = requests.get(server_config.NEW_USER_URL).text.strip()
if len(user_key) == 32:
break
time.sleep(1)
user_key = user_key.replace('-', '')
secret_salt = str(uuid.uuid4())
with open(user_config_file, 'w') as fp:
config_dict = {
'user_key': user_key,
'secret_salt': secret_salt
}
json.dump(config_dict, fp)
return config_dict
class TimeoutError(Exception):
pass
_lock = threading.Lock()
def log(*args):
log_str = '[%s] ' % datetime.datetime.today()
log_str += ' '.join([str(v) for v in args])
log_file_path = os.path.join(
os.path.expanduser('~'),
'princeton-iot-inspector',
'iot_inspector_logs.txt'
)
with open(log_file_path, 'a') as fp:
fp.write(log_str + '\n')
def get_gateway_ip(timeout=10):
return get_default_route(timeout)[0]
def get_host_ip(timeout=10):
return get_default_route(timeout)[2]
def _get_routes():
while True:
sc.conf.route.resync()
routes = sc.conf.route.routes
if routes:
return routes
time.sleep(1)
def get_default_route():
try:
with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as s:
s.settimeout(2)
s.connect(("8.8.8.8", 80))
iface_ip = s.getsockname()[0]
except socket.error:
sys.stderr.write('IoT Inspector cannot run without network connectivity.\n')
sys.exit(1)
while True:
routes = _get_routes()
default_route = None
for route in routes:
if route[4] == iface_ip:
# Reassign scapy's default interface to the one we selected
sc.conf.iface = route[3]
default_route = route[2:5]
break
if default_route:
break
log('get_default_route: retrying')
time.sleep(1)
# We have to update routing table manually for packets
# to pick the correct route.
if sys.platform.startswith('win'):
for i, route in enumerate(routes):
# if we see our selected iface, update the metrics to 0
if route[3] == default_route[1]:
routes[i] = (*route[:-1], 0)
return default_route
def get_network_ip_range_windows():
default_iface = get_default_route()
iface_filter = default_iface[1]
ip_set = set()
iface_ip = iface_filter.ip
iface_guid = iface_filter.guid
for k, v in netifaces.ifaddresses(iface_guid).items():
if v[0]['addr'] == iface_ip:
netmask = v[0]['netmask']
break
network = netaddr.IPAddress(iface_ip)
cidr = netaddr.IPAddress(netmask).netmask_bits()
subnet = netaddr.IPNetwork('{}/{}'.format(network, cidr))
return ip_set
def check_ethernet_network():
default_iface = get_default_route()
assert default_iface[1] == sc.conf.iface, "incorrect sc.conf.iface"
iface_str = ''
if sys.platform.startswith('win'):
iface_info = sc.conf.iface
iface_str = iface_info.guid
else:
iface_str = sc.conf.iface
ifaddresses = netifaces.ifaddresses(str(iface_str))
try:
iface_mac = ifaddresses[netifaces.AF_LINK][0]['addr']
except KeyError:
return False
return iface_mac != ''
def get_network_ip_range():
ip_set = set()
default_route = get_default_route()
assert default_route[1] == sc.conf.iface, "incorrect sc.conf.iface"
iface_str = ''
if sys.platform.startswith('win'):
iface_info = sc.conf.iface
iface_str = iface_info.guid
else:
iface_str = sc.conf.iface
netmask = None
for k, v in netifaces.ifaddresses(str(iface_str)).items():
if v[0]['addr'] == default_route[2]:
netmask = v[0]['netmask']
break
if netmask is None:
return set()
gateway_ip = netaddr.IPAddress(default_route[0])
cidr = netaddr.IPAddress(netmask).netmask_bits()
subnet = netaddr.IPNetwork('{}/{}'.format(gateway_ip, cidr))
for ip in subnet:
ip_set.add(str(ip))
return ip_set
def get_my_mac():
mac_set = get_my_mac_set(iface_filter=get_default_route()[1])
return mac_set.pop()
def get_my_mac_set(iface_filter=None):
out_set = set()
if sys.platform.startswith("win"):
from scapy.arch.windows import NetworkInterface
if type(iface_filter) == NetworkInterface:
out_set.add(iface_filter.mac)
for iface in sc.get_if_list():
if iface_filter is not None and iface != iface_filter:
continue
try:
mac = sc.get_if_hwaddr(iface)
except Exception as e:
continue
else:
out_set.add(mac)
return out_set
class _SafeRunError(object):
def __init__(self):
pass
def restart_upon_crash(func, args=[], kwargs={}):
while True:
result = safe_run(func, args, kwargs)
if isinstance(result, _SafeRunError):
time.sleep(1)
continue
return result
def safe_run(func, args=[], kwargs={}):
try:
return func(*args, **kwargs)
except Exception as e:
err_msg = '=' * 80 + '\n'
err_msg += 'Time: %s\n' % datetime.datetime.today()
err_msg += 'Function: %s %s %s\n' % (func, args, kwargs)
err_msg += 'Exception: %s\n' % e
err_msg += str(traceback.format_exc()) + '\n\n\n'
with _lock:
sys.stderr.write(err_msg + '\n')
log(err_msg)
return _SafeRunError()
def get_device_id(device_mac, host_state):
device_mac = str(device_mac).lower().replace(':', '')
s = device_mac + str(host_state.secret_salt)
return 's' + hashlib.sha256(s.encode('utf-8')).hexdigest()[0:10]
def smart_max(v1, v2):
if v1 is None:
return v2
if v2 is None:
return v1
return max(v1, v2)
def smart_min(v1, v2):
if v1 is None:
return v2
if v2 is None:
return v1
return min(v1, v2)
def get_min_max_tuple(min_max_tuple, value):
min_v, max_v = min_max_tuple
min_v = smart_min(min_v, value)
max_v = smart_max(max_v, value)
return (min_v, max_v)
def get_oui(mac):
return mac.replace(':', '').lower()[0:6]
def get_os():
os_platform = sys.platform
if os_platform.startswith('darwin'):
return 'mac'
if os_platform.startswith('linux'):
return 'linux'
if os_platform.startswith('win'):
return 'windows'
raise RuntimeError('Unsupported operating system.')
def open_browser(url):
try:
try:
webbrowser.get('chrome').open(url, new=2)
except webbrowser.Error:
webbrowser.open(url, new=2)
except Exception:
pass
def test():
# check_ethernet_network()
print(get_default_route())
if __name__ == '__main__':
test() | true | true |
f73db7538a0f6afa8557fa9495c4d50bd138351a | 3,539 | py | Python | scripts/models/01_prepare_and_save_models.py | kingagla/reviews_classification | 9bf9636035bf14fb3ce151d075a6c04f4cdbfde6 | [
"MIT"
] | 3 | 2021-04-07T04:05:33.000Z | 2021-12-09T12:57:43.000Z | scripts/models/01_prepare_and_save_models.py | kingagla/reviews_classification | 9bf9636035bf14fb3ce151d075a6c04f4cdbfde6 | [
"MIT"
] | 1 | 2021-07-30T20:43:34.000Z | 2021-07-30T20:43:34.000Z | scripts/models/01_prepare_and_save_models.py | kingagla/reviews_classification | 9bf9636035bf14fb3ce151d075a6c04f4cdbfde6 | [
"MIT"
] | null | null | null | import os
import pickle
import pandas as pd
from sklearn.cluster import DBSCAN
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import classification_report
from sklearn.preprocessing import LabelEncoder
from tensorflow.keras.callbacks import ModelCheckpoint
from tensorflow.keras.layers import Dense, Dropout
from tensorflow.keras.models import Sequential
from scripts.settings import *
from scripts.utils import create_dir
def prepare_for_learning(file_path, model_path, n_samples=5000, use_neutral=False):
# load data
rev_vec = pd.read_pickle(file_path)
# remove neutral if not used
if not use_neutral:
rev_vec = rev_vec[rev_vec['Information'] != 'neu']
# use only part of available data
rev_vec = rev_vec.sample(n_samples)
# save indices of training and validation set
pickle.dump(rev_vec.index, open(learning_index_path, 'wb'))
X, y = rev_vec[[col for col in rev_vec.columns if col.startswith('Vec')]], rev_vec['Information']
le = LabelEncoder()
le.fit(y.values.reshape(-1, 1))
create_dir(os.path.dirname(model_path))
pickle.dump(le, open(model_path, 'wb'))
return rev_vec, X, y
def classification_report_to_excel(y_test, y_pred, excel_path):
cr = classification_report(y_test, y_pred, output_dict=True)
create_dir(os.path.dirname(excel_path))
pd.DataFrame(cr).T.to_excel(excel_path)
def neural_network():
model = Sequential()
model.add(Dense(256, input_dim=1024, activation='relu', use_bias=True,
kernel_initializer='random_normal'))
model.add(Dropout(0.5))
model.add(Dense(64, activation='relu', use_bias=True, kernel_initializer='random_normal'))
model.add(Dropout(0.5))
model.add(Dense(16, activation='relu', use_bias=True, kernel_initializer='random_normal'))
model.add(Dense(1, activation='sigmoid', use_bias=True, kernel_initializer='random_normal'))
model.compile(loss='binary_crossentropy', optimizer='adadelta', metrics=['acc'])
return model
def fit_and_save_model(X_train, y_train, model, model_path, network=False):
# create directory for model
create_dir(os.path.dirname(model_path))
if network:
checkpoint = ModelCheckpoint(model_path, monitor='val_acc', verbose=1, save_best_only=True)
model.fit(X_train, y_train, epochs=150, batch_size=512, validation_split=0.2, callbacks=[checkpoint])
else:
model.fit(X_train, y_train)
pickle.dump(model, open(model_path, 'wb'))
def main():
rev_vec, X, y = prepare_for_learning(rev_path,
os.path.join(model_dir, label_encoder_file),
n_samples=5000,
use_neutral=False)
le_path = os.path.join(model_dir, label_encoder_file)
le = pickle.load(open(le_path, 'rb'))
y = le.transform(y)
# learn random forest
rf = RandomForestClassifier(n_estimators=100, max_depth=5,
min_samples_leaf=2,
class_weight='balanced', criterion='entropy')
fit_and_save_model(X, y, rf, os.path.join(model_dir, random_forest_file), network=False)
# use DBSCAN to find negative
dbs = DBSCAN(eps=0.01, min_samples=2)
pickle.dump(dbs, open(os.path.join(model_dir, dbscan_file), 'wb'))
# use neural network
network = neural_network()
fit_and_save_model(X, y, network, os.path.join(model_dir, network_file), network=True)
if __name__ == '__main__':
main()
| 39.322222 | 109 | 0.693134 | import os
import pickle
import pandas as pd
from sklearn.cluster import DBSCAN
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import classification_report
from sklearn.preprocessing import LabelEncoder
from tensorflow.keras.callbacks import ModelCheckpoint
from tensorflow.keras.layers import Dense, Dropout
from tensorflow.keras.models import Sequential
from scripts.settings import *
from scripts.utils import create_dir
def prepare_for_learning(file_path, model_path, n_samples=5000, use_neutral=False):
rev_vec = pd.read_pickle(file_path)
if not use_neutral:
rev_vec = rev_vec[rev_vec['Information'] != 'neu']
rev_vec = rev_vec.sample(n_samples)
pickle.dump(rev_vec.index, open(learning_index_path, 'wb'))
X, y = rev_vec[[col for col in rev_vec.columns if col.startswith('Vec')]], rev_vec['Information']
le = LabelEncoder()
le.fit(y.values.reshape(-1, 1))
create_dir(os.path.dirname(model_path))
pickle.dump(le, open(model_path, 'wb'))
return rev_vec, X, y
def classification_report_to_excel(y_test, y_pred, excel_path):
cr = classification_report(y_test, y_pred, output_dict=True)
create_dir(os.path.dirname(excel_path))
pd.DataFrame(cr).T.to_excel(excel_path)
def neural_network():
model = Sequential()
model.add(Dense(256, input_dim=1024, activation='relu', use_bias=True,
kernel_initializer='random_normal'))
model.add(Dropout(0.5))
model.add(Dense(64, activation='relu', use_bias=True, kernel_initializer='random_normal'))
model.add(Dropout(0.5))
model.add(Dense(16, activation='relu', use_bias=True, kernel_initializer='random_normal'))
model.add(Dense(1, activation='sigmoid', use_bias=True, kernel_initializer='random_normal'))
model.compile(loss='binary_crossentropy', optimizer='adadelta', metrics=['acc'])
return model
def fit_and_save_model(X_train, y_train, model, model_path, network=False):
create_dir(os.path.dirname(model_path))
if network:
checkpoint = ModelCheckpoint(model_path, monitor='val_acc', verbose=1, save_best_only=True)
model.fit(X_train, y_train, epochs=150, batch_size=512, validation_split=0.2, callbacks=[checkpoint])
else:
model.fit(X_train, y_train)
pickle.dump(model, open(model_path, 'wb'))
def main():
rev_vec, X, y = prepare_for_learning(rev_path,
os.path.join(model_dir, label_encoder_file),
n_samples=5000,
use_neutral=False)
le_path = os.path.join(model_dir, label_encoder_file)
le = pickle.load(open(le_path, 'rb'))
y = le.transform(y)
rf = RandomForestClassifier(n_estimators=100, max_depth=5,
min_samples_leaf=2,
class_weight='balanced', criterion='entropy')
fit_and_save_model(X, y, rf, os.path.join(model_dir, random_forest_file), network=False)
dbs = DBSCAN(eps=0.01, min_samples=2)
pickle.dump(dbs, open(os.path.join(model_dir, dbscan_file), 'wb'))
network = neural_network()
fit_and_save_model(X, y, network, os.path.join(model_dir, network_file), network=True)
if __name__ == '__main__':
main()
| true | true |
f73db7da7b2faa63f87c36a9e1cfdf5d6c9f380b | 17,646 | py | Python | plugin.video.vstream/resources/lib/runscript.py | akuala/REPO.KUALA | ea9a157025530d2ce8fa0d88431c46c5352e89d4 | [
"Apache-2.0"
] | 2 | 2018-11-02T19:55:30.000Z | 2020-08-14T02:22:20.000Z | plugin.video.vstream/resources/lib/runscript.py | akuala/REPO.KUALA | ea9a157025530d2ce8fa0d88431c46c5352e89d4 | [
"Apache-2.0"
] | null | null | null | plugin.video.vstream/resources/lib/runscript.py | akuala/REPO.KUALA | ea9a157025530d2ce8fa0d88431c46c5352e89d4 | [
"Apache-2.0"
] | 3 | 2019-12-17T20:47:00.000Z | 2021-02-11T19:03:59.000Z | # -*- coding: utf-8 -*-
# https://github.com/Kodi-vStream/venom-xbmc-addons
# Venom.
# vstream = xbmcaddon.Addon('plugin.video.vstream')
# sLibrary = xbmc.translatePath(vstream.getAddonInfo("path")).decode("utf-8")
# sys.path.append (sLibrary)
from resources.lib.comaddon import addon, dialog, VSlog, xbmc, xbmcgui, window
import xbmcvfs
import sys
import urllib
import urllib2
# from util import VStranslatePath
# from resources.lib.util import VStranslatePath
try:
from sqlite3 import dbapi2 as sqlite
VSlog('SQLITE 3 as DB engine')
except:
from pysqlite2 import dbapi2 as sqlite
VSlog('SQLITE 2 as DB engine')
try:
import json
except:
import simplejson as json
SITE_IDENTIFIER = 'runscript'
SITE_NAME = 'runscript'
class cClear:
DIALOG = dialog()
ADDON = addon()
def __init__(self):
self.main(sys.argv[1])
# self.__sFunctionName = ''
def main(self, env):
if (env == 'urlresolver'):
addon('script.module.urlresolver').openSettings()
return
elif (env == 'metahandler'):
addon('script.module.metahandler').openSettings()
return
elif (env == 'changelog_old'):
try:
sUrl = 'https://raw.githubusercontent.com/Kodi-vStream/venom-xbmc-addons/master/plugin.video.vstream/changelog.txt'
oRequest = urllib2.Request(sUrl)
oResponse = urllib2.urlopen(oRequest)
sContent = oResponse.read()
self.TextBoxes('vStream Changelog', sContent)
except:
self.DIALOG.VSerror("%s, %s" % (self.ADDON.VSlang(30205), sUrl))
return
elif (env == 'changelog'):
class XMLDialog(xbmcgui.WindowXMLDialog):
def __init__(self, *args, **kwargs):
xbmcgui.WindowXMLDialog.__init__(self)
pass
def onInit(self):
self.container = self.getControl(6)
self.button = self.getControl(5)
self.getControl(3).setVisible(False)
self.getControl(1).setLabel('ChangeLog')
self.button.setLabel('OK')
sUrl = 'https://api.github.com/repos/Kodi-vStream/venom-xbmc-addons/commits'
oRequest = urllib2.Request(sUrl)
oResponse = urllib2.urlopen(oRequest)
sContent = oResponse.read()
result = json.loads(sContent)
listitems = []
for item in result:
# autor
icon = item['author']['avatar_url']
login = item['author']['login']
# message
try:
desc = item['commit']['message'].encode("utf-8")
except:
desc = 'None'
listitem = xbmcgui.ListItem(label = login, label2 = desc)
listitem.setArt({'icon': icon, 'thumb': icon})
listitems.append(listitem)
self.container.addItems(listitems)
self.setFocus(self.container)
def onClick(self, controlId):
self.close()
return
def onFocus(self, controlId):
self.controlId = controlId
def _close_dialog(self):
self.close()
# path = cConfig().getAddonPath()
path = "special://home/addons/plugin.video.vstream"
wd = XMLDialog('DialogSelect.xml', path, "Default")
wd.doModal()
del wd
return
elif (env == 'soutient'):
try:
sUrl = 'https://raw.githubusercontent.com/Kodi-vStream/venom-xbmc-addons/master/plugin.video.vstream/soutient.txt'
oRequest = urllib2.Request(sUrl)
oResponse = urllib2.urlopen(oRequest)
sContent = oResponse.read()
self.TextBoxes('vStream Soutient', sContent)
except:
self.DIALOG.VSerror("%s, %s" % (self.ADDON.VSlang(30205), sUrl))
return
elif (env == 'addon'):
if self.DIALOG.VSyesno(self.ADDON.VSlang(30456)):
# cached_Cache = cConfig().getFileCache()
# cached_Cache = xbmc.translatePath(cached_Cache).decode("utf-8")
cached_Cache = "special://home/userdata/addon_data/plugin.video.vstream/video_cache.db"
# self.ClearDir2(cached_Cache, True)
try:
xbmcvfs.delete(cached_Cache)
self.DIALOG.VSinfo(self.ADDON.VSlang(30089))
except:
self.DIALOG.VSerror(self.ADDON.VSlang(30087))
return
elif (env == 'clean'):
liste = ['Historiques', 'Lecture en cours', 'Marqués vues', 'Marque-Pages', 'Téléchargements']
ret = self.DIALOG.select(self.ADDON.VSlang(30110), liste)
# cached_DB = cConfig().getFileDB()
cached_DB = "special://home/userdata/addon_data/plugin.video.vstream/vstream.db"
# important seul xbmcvfs peux lire le special
cached_DB = xbmc.translatePath(cached_DB).decode("utf-8")
sql_drop = ""
if ret > -1:
if ret == 0:
sql_drop = "DROP TABLE history"
elif ret == 1:
sql_drop = "DROP TABLE resume"
elif ret == 2:
sql_drop = "DROP TABLE watched"
elif ret == 3:
sql_drop = "DROP TABLE favorite"
elif ret == 4:
sql_drop = "DROP TABLE download"
try:
db = sqlite.connect(cached_DB)
dbcur = db.cursor()
dbcur.execute(sql_drop)
db.commit()
dbcur.close()
db.close()
self.DIALOG.VSok(self.ADDON.VSlang(30090))
except:
self.DIALOG.VSerror(self.ADDON.VSlang(30091))
return
elif (env == 'xbmc'):
if self.DIALOG.VSyesno(self.ADDON.VSlang(30456)):
# temp = xbmc.translatePath('special://temp/').decode("utf-8")
path = "special://temp/"
# self.ClearDir(temp,True)
try:
xbmcvfs.rmdir(path, True)
self.DIALOG.VSok(self.ADDON.VSlang(30092))
except:
self.DIALOG.VSerror(self.ADDON.VSlang(30093))
return
elif (env == 'fi'):
if self.DIALOG.VSyesno(self.ADDON.VSlang(30456)):
# path = xbmc.translatePath('special://temp/').decode("utf-8")
path = "special://temp/archive_cache/"
try:
xbmcvfs.rmdir(path, True)
self.DIALOG.VSok(self.ADDON.VSlang(30095))
except:
self.DIALOG.VSerror(self.ADDON.VSlang(30096))
# filenames = next(os.walk(path))[2]
# for i in filenames:
# if ".fi" in i:
# os.remove(os.path.join(path, i))
return
elif (env == 'uplog'):
if self.DIALOG.VSyesno(self.ADDON.VSlang(30456)):
# path = xbmc.translatePath('special://logpath/').decode("utf-8")
path = "special://logpath/kodi.log"
UA = 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:50.0) Gecko/20100101 Firefox/50.0'
headers = {'User-Agent': UA}
# filenames = next(os.walk(path))[2]
# for i in filenames:
if xbmcvfs.exists(path):
post_data = {}
cUrl = 'http://slexy.org/index.php/submit'
# logop = open(path + i, 'rb')
logop = xbmcvfs.File(path, 'rb')
result = logop.read()
logop.close()
post_data['raw_paste'] = result
post_data['author'] = 'kodi.log'
post_data['language'] = 'text'
post_data['permissions'] = 1 # private
post_data['expire'] = 259200 # 3j
post_data['submit'] = 'Submit+Paste'
request = urllib2.Request(cUrl, urllib.urlencode(post_data), headers)
reponse = urllib2.urlopen(request)
code = reponse.geturl().replace('http://slexy.org/view/', '')
reponse.close()
self.ADDON.setSetting('service_log', code)
self.DIALOG.VSok(self.ADDON.VSlang(30097) + ' ' + code)
return
elif (env == 'search'):
from resources.lib.handler.pluginHandler import cPluginHandler
valid = '[COLOR green][x][/COLOR]'
class XMLDialog(xbmcgui.WindowXMLDialog):
ADDON = addon()
def __init__(self, *args, **kwargs):
xbmcgui.WindowXMLDialog.__init__(self)
pass
def onInit(self):
self.container = self.getControl(6)
self.button = self.getControl(5)
self.getControl(3).setVisible(False)
self.getControl(1).setLabel(self.ADDON.VSlang(30094))
self.button.setLabel('OK')
listitems = []
oPluginHandler = cPluginHandler()
aPlugins = oPluginHandler.getAllPlugins()
for aPlugin in aPlugins:
# teste si deja dans le dsip
sPluginSettingsName = 'plugin_' + aPlugin[1]
bPlugin = self.ADDON.getSetting(sPluginSettingsName)
# icon = os.path.join(unicode(cConfig().getRootArt(), 'utf-8'), 'sites', aPlugin[1] + '.png')
icon = "special://home/addons/plugin.video.vstream/resources/art/sites/%s.png" % aPlugin[1]
stitle = aPlugin[0].replace('[COLOR violet]', '').replace('[COLOR orange]', '').replace('[/COLOR]', '').replace('[COLOR dodgerblue]', '').replace('[COLOR coral]', '')
if (bPlugin == 'true'):
stitle = ('%s %s') % (stitle, valid)
listitem = xbmcgui.ListItem(label = stitle, label2 = aPlugin[2])
listitem.setArt({'icon': icon, 'thumb': icon})
listitem.setProperty('Addon.Summary', aPlugin[2])
listitem.setProperty('sitename', aPlugin[1])
if (bPlugin == 'true'):
listitem.select(True)
listitems.append(listitem)
self.container.addItems(listitems)
self.setFocus(self.container)
def onClick(self, controlId):
if controlId == 5:
self.close()
return
elif controlId == 99:
window = xbmcgui.Window(xbmcgui.getCurrentWindowId())
del window
self.close()
return
elif controlId == 7:
window = xbmcgui.Window(xbmcgui.getCurrentWindowId())
del window
self.close()
return
elif controlId == 6:
item = self.container.getSelectedItem()
if item.isSelected() == True:
label = item.getLabel().replace(valid, '')
item.setLabel(label)
item.select(False)
sPluginSettingsName = ('plugin_%s') % (item.getProperty('sitename'))
self.ADDON.setSetting(sPluginSettingsName, str('false'))
else:
label = ('%s %s') % (item.getLabel(), valid)
item.setLabel(label)
item.select(True)
sPluginSettingsName = ('plugin_%s') % (item.getProperty('sitename'))
self.ADDON.setSetting(sPluginSettingsName, str('true'))
return
def onFocus(self, controlId):
self.controlId = controlId
def _close_dialog(self):
self.close()
# def onAction(self, action):
# if action.getId() in (9, 10, 92, 216, 247, 257, 275, 61467, 61448, ):
# self.close()
# path = cConfig().getAddonPath()
path = "special://home/addons/plugin.video.vstream"
wd = XMLDialog('DialogSelect.xml', path, "Default")
wd.doModal()
del wd
return
elif (env == 'thumb'):
if self.DIALOG.VSyesno(self.ADDON.VSlang(30098)):
text = False
# path = xbmc.translatePath('special://userdata/Thumbnails/').decode("utf-8")
path = "special://userdata/Thumbnails/"
path_DB = "special://userdata/Database"
try:
xbmcvfs.rmdir(path, True)
text = 'Clear Thumbnail Folder, Successful[CR]'
except:
text = 'Clear Thumbnail Folder, Error[CR]'
# for i in os.listdir(path):
# folders = os.path.join(path, i).encode('utf-8')
# if os.path.isdir(folders):
# p = next(os.walk(folders))[2]
# for x in p:
# os.remove(os.path.join(folders, x).encode('utf-8'))
# filenames = next(os.walk(path2))[2]
folder, items = xbmcvfs.listdir(path_DB)
items.sort()
for sItemName in items:
if "extures" in sItemName:
cached_Cache = "/".join([path_DB, sItemName])
try:
xbmcvfs.delete(cached_Cache)
text += 'Clear Thumbnail DB, Successful[CR]'
except:
text += 'Clear Thumbnail DB, Error[CR]'
if text:
text = "%s (Important relancer Kodi)" % text
self.DIALOG.VSok(text)
# for x in filenames:
# if "exture" in x:
# con = sqlite.connect(os.path.join(path2, x).encode('utf-8'))
# cursor = con.cursor()
# cursor.execute("DELETE FROM texture")
# con.commit()
# cursor.close()
# con.close()
return
elif (env == 'sauv'):
# dialog.select('Choose a playlist', ['Playlist #1', 'Playlist #2, 'Playlist #3'])
select = self.DIALOG.VSselect(['Import', 'Export'])
DB = "special://home/userdata/addon_data/plugin.video.vstream/vstream.db"
if select >= 0:
new = self.DIALOG.browse(3, 'vStream', "files")
if new:
try:
if select == 0:
xbmcvfs.delete(DB)
# copy(source, destination)--copy file to destination, returns true/false.
xbmcvfs.copy(new + 'vstream.db', DB)
elif select == 1:
# copy(source, destination)--copy file to destination, returns true/false.
xbmcvfs.copy(DB, new + 'vstream.db')
self.DIALOG.VSinfo(self.ADDON.VSlang(30099))
except:
self.DIALOG.VSerror(self.ADDON.VSlang(30100))
return
else:
return
return
# def ClearDir(self, dir, clearNested = False):
# try:
# dir = dir.decode("utf8")
# except:
# pass
# for the_file in os.listdir(dir):
# file_path = os.path.join(dir, the_file).encode('utf-8')
# if clearNested and os.path.isdir(file_path):
# self.ClearDir(file_path, clearNested)
# try: os.rmdir(file_path)
# except Exception, e: print str(e)
# else:
# try:os.unlink(file_path)
# except Exception, e: print str(e)
# def ClearDir2(self, dir, clearNested = False):
# try:
# dir = dir.decode("utf8")
# except:
# pass
# try:os.unlink(dir)
# except Exception, e: print str(e)
def TextBoxes(self, heading, anounce):
# activate the text viewer window
xbmc.executebuiltin("ActivateWindow(%d)" % (10147))
# get window
win = window(10147)
# win.show()
# give window time to initialize
xbmc.sleep(100)
# set heading
win.getControl(1).setLabel(heading)
win.getControl(5).setText(anounce)
return
cClear()
| 40.104545 | 190 | 0.476652 |
from resources.lib.comaddon import addon, dialog, VSlog, xbmc, xbmcgui, window
import xbmcvfs
import sys
import urllib
import urllib2
try:
from sqlite3 import dbapi2 as sqlite
VSlog('SQLITE 3 as DB engine')
except:
from pysqlite2 import dbapi2 as sqlite
VSlog('SQLITE 2 as DB engine')
try:
import json
except:
import simplejson as json
SITE_IDENTIFIER = 'runscript'
SITE_NAME = 'runscript'
class cClear:
DIALOG = dialog()
ADDON = addon()
def __init__(self):
self.main(sys.argv[1])
def main(self, env):
if (env == 'urlresolver'):
addon('script.module.urlresolver').openSettings()
return
elif (env == 'metahandler'):
addon('script.module.metahandler').openSettings()
return
elif (env == 'changelog_old'):
try:
sUrl = 'https://raw.githubusercontent.com/Kodi-vStream/venom-xbmc-addons/master/plugin.video.vstream/changelog.txt'
oRequest = urllib2.Request(sUrl)
oResponse = urllib2.urlopen(oRequest)
sContent = oResponse.read()
self.TextBoxes('vStream Changelog', sContent)
except:
self.DIALOG.VSerror("%s, %s" % (self.ADDON.VSlang(30205), sUrl))
return
elif (env == 'changelog'):
class XMLDialog(xbmcgui.WindowXMLDialog):
def __init__(self, *args, **kwargs):
xbmcgui.WindowXMLDialog.__init__(self)
pass
def onInit(self):
self.container = self.getControl(6)
self.button = self.getControl(5)
self.getControl(3).setVisible(False)
self.getControl(1).setLabel('ChangeLog')
self.button.setLabel('OK')
sUrl = 'https://api.github.com/repos/Kodi-vStream/venom-xbmc-addons/commits'
oRequest = urllib2.Request(sUrl)
oResponse = urllib2.urlopen(oRequest)
sContent = oResponse.read()
result = json.loads(sContent)
listitems = []
for item in result:
icon = item['author']['avatar_url']
login = item['author']['login']
try:
desc = item['commit']['message'].encode("utf-8")
except:
desc = 'None'
listitem = xbmcgui.ListItem(label = login, label2 = desc)
listitem.setArt({'icon': icon, 'thumb': icon})
listitems.append(listitem)
self.container.addItems(listitems)
self.setFocus(self.container)
def onClick(self, controlId):
self.close()
return
def onFocus(self, controlId):
self.controlId = controlId
def _close_dialog(self):
self.close()
path = "special://home/addons/plugin.video.vstream"
wd = XMLDialog('DialogSelect.xml', path, "Default")
wd.doModal()
del wd
return
elif (env == 'soutient'):
try:
sUrl = 'https://raw.githubusercontent.com/Kodi-vStream/venom-xbmc-addons/master/plugin.video.vstream/soutient.txt'
oRequest = urllib2.Request(sUrl)
oResponse = urllib2.urlopen(oRequest)
sContent = oResponse.read()
self.TextBoxes('vStream Soutient', sContent)
except:
self.DIALOG.VSerror("%s, %s" % (self.ADDON.VSlang(30205), sUrl))
return
elif (env == 'addon'):
if self.DIALOG.VSyesno(self.ADDON.VSlang(30456)):
cached_Cache = "special://home/userdata/addon_data/plugin.video.vstream/video_cache.db"
try:
xbmcvfs.delete(cached_Cache)
self.DIALOG.VSinfo(self.ADDON.VSlang(30089))
except:
self.DIALOG.VSerror(self.ADDON.VSlang(30087))
return
elif (env == 'clean'):
liste = ['Historiques', 'Lecture en cours', 'Marqués vues', 'Marque-Pages', 'Téléchargements']
ret = self.DIALOG.select(self.ADDON.VSlang(30110), liste)
cached_DB = "special://home/userdata/addon_data/plugin.video.vstream/vstream.db"
cached_DB = xbmc.translatePath(cached_DB).decode("utf-8")
sql_drop = ""
if ret > -1:
if ret == 0:
sql_drop = "DROP TABLE history"
elif ret == 1:
sql_drop = "DROP TABLE resume"
elif ret == 2:
sql_drop = "DROP TABLE watched"
elif ret == 3:
sql_drop = "DROP TABLE favorite"
elif ret == 4:
sql_drop = "DROP TABLE download"
try:
db = sqlite.connect(cached_DB)
dbcur = db.cursor()
dbcur.execute(sql_drop)
db.commit()
dbcur.close()
db.close()
self.DIALOG.VSok(self.ADDON.VSlang(30090))
except:
self.DIALOG.VSerror(self.ADDON.VSlang(30091))
return
elif (env == 'xbmc'):
if self.DIALOG.VSyesno(self.ADDON.VSlang(30456)):
path = "special://temp/"
try:
xbmcvfs.rmdir(path, True)
self.DIALOG.VSok(self.ADDON.VSlang(30092))
except:
self.DIALOG.VSerror(self.ADDON.VSlang(30093))
return
elif (env == 'fi'):
if self.DIALOG.VSyesno(self.ADDON.VSlang(30456)):
path = "special://temp/archive_cache/"
try:
xbmcvfs.rmdir(path, True)
self.DIALOG.VSok(self.ADDON.VSlang(30095))
except:
self.DIALOG.VSerror(self.ADDON.VSlang(30096))
return
elif (env == 'uplog'):
if self.DIALOG.VSyesno(self.ADDON.VSlang(30456)):
path = "special://logpath/kodi.log"
UA = 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:50.0) Gecko/20100101 Firefox/50.0'
headers = {'User-Agent': UA}
if xbmcvfs.exists(path):
post_data = {}
cUrl = 'http://slexy.org/index.php/submit'
logop = xbmcvfs.File(path, 'rb')
result = logop.read()
logop.close()
post_data['raw_paste'] = result
post_data['author'] = 'kodi.log'
post_data['language'] = 'text'
post_data['permissions'] = 1
post_data['expire'] = 259200
post_data['submit'] = 'Submit+Paste'
request = urllib2.Request(cUrl, urllib.urlencode(post_data), headers)
reponse = urllib2.urlopen(request)
code = reponse.geturl().replace('http://slexy.org/view/', '')
reponse.close()
self.ADDON.setSetting('service_log', code)
self.DIALOG.VSok(self.ADDON.VSlang(30097) + ' ' + code)
return
elif (env == 'search'):
from resources.lib.handler.pluginHandler import cPluginHandler
valid = '[COLOR green][x][/COLOR]'
class XMLDialog(xbmcgui.WindowXMLDialog):
ADDON = addon()
def __init__(self, *args, **kwargs):
xbmcgui.WindowXMLDialog.__init__(self)
pass
def onInit(self):
self.container = self.getControl(6)
self.button = self.getControl(5)
self.getControl(3).setVisible(False)
self.getControl(1).setLabel(self.ADDON.VSlang(30094))
self.button.setLabel('OK')
listitems = []
oPluginHandler = cPluginHandler()
aPlugins = oPluginHandler.getAllPlugins()
for aPlugin in aPlugins:
sPluginSettingsName = 'plugin_' + aPlugin[1]
bPlugin = self.ADDON.getSetting(sPluginSettingsName)
icon = "special://home/addons/plugin.video.vstream/resources/art/sites/%s.png" % aPlugin[1]
stitle = aPlugin[0].replace('[COLOR violet]', '').replace('[COLOR orange]', '').replace('[/COLOR]', '').replace('[COLOR dodgerblue]', '').replace('[COLOR coral]', '')
if (bPlugin == 'true'):
stitle = ('%s %s') % (stitle, valid)
listitem = xbmcgui.ListItem(label = stitle, label2 = aPlugin[2])
listitem.setArt({'icon': icon, 'thumb': icon})
listitem.setProperty('Addon.Summary', aPlugin[2])
listitem.setProperty('sitename', aPlugin[1])
if (bPlugin == 'true'):
listitem.select(True)
listitems.append(listitem)
self.container.addItems(listitems)
self.setFocus(self.container)
def onClick(self, controlId):
if controlId == 5:
self.close()
return
elif controlId == 99:
window = xbmcgui.Window(xbmcgui.getCurrentWindowId())
del window
self.close()
return
elif controlId == 7:
window = xbmcgui.Window(xbmcgui.getCurrentWindowId())
del window
self.close()
return
elif controlId == 6:
item = self.container.getSelectedItem()
if item.isSelected() == True:
label = item.getLabel().replace(valid, '')
item.setLabel(label)
item.select(False)
sPluginSettingsName = ('plugin_%s') % (item.getProperty('sitename'))
self.ADDON.setSetting(sPluginSettingsName, str('false'))
else:
label = ('%s %s') % (item.getLabel(), valid)
item.setLabel(label)
item.select(True)
sPluginSettingsName = ('plugin_%s') % (item.getProperty('sitename'))
self.ADDON.setSetting(sPluginSettingsName, str('true'))
return
def onFocus(self, controlId):
self.controlId = controlId
def _close_dialog(self):
self.close()
path = "special://home/addons/plugin.video.vstream"
wd = XMLDialog('DialogSelect.xml', path, "Default")
wd.doModal()
del wd
return
elif (env == 'thumb'):
if self.DIALOG.VSyesno(self.ADDON.VSlang(30098)):
text = False
path = "special://userdata/Thumbnails/"
path_DB = "special://userdata/Database"
try:
xbmcvfs.rmdir(path, True)
text = 'Clear Thumbnail Folder, Successful[CR]'
except:
text = 'Clear Thumbnail Folder, Error[CR]'
folder, items = xbmcvfs.listdir(path_DB)
items.sort()
for sItemName in items:
if "extures" in sItemName:
cached_Cache = "/".join([path_DB, sItemName])
try:
xbmcvfs.delete(cached_Cache)
text += 'Clear Thumbnail DB, Successful[CR]'
except:
text += 'Clear Thumbnail DB, Error[CR]'
if text:
text = "%s (Important relancer Kodi)" % text
self.DIALOG.VSok(text)
return
elif (env == 'sauv'):
select = self.DIALOG.VSselect(['Import', 'Export'])
DB = "special://home/userdata/addon_data/plugin.video.vstream/vstream.db"
if select >= 0:
new = self.DIALOG.browse(3, 'vStream', "files")
if new:
try:
if select == 0:
xbmcvfs.delete(DB)
# copy(source, destination)--copy file to destination, returns true/false.
xbmcvfs.copy(new + 'vstream.db', DB)
elif select == 1:
# copy(source, destination)--copy file to destination, returns true/false.
xbmcvfs.copy(DB, new + 'vstream.db')
self.DIALOG.VSinfo(self.ADDON.VSlang(30099))
except:
self.DIALOG.VSerror(self.ADDON.VSlang(30100))
return
else:
return
return
# def ClearDir(self, dir, clearNested = False):
# try:
# dir = dir.decode("utf8")
# except:
# pass
# for the_file in os.listdir(dir):
# file_path = os.path.join(dir, the_file).encode('utf-8')
# if clearNested and os.path.isdir(file_path):
# self.ClearDir(file_path, clearNested)
# try: os.rmdir(file_path)
# except Exception, e: print str(e)
# else:
# try:os.unlink(file_path)
# except Exception, e: print str(e)
# def ClearDir2(self, dir, clearNested = False):
# try:
# dir = dir.decode("utf8")
# except:
# pass
# try:os.unlink(dir)
# except Exception, e: print str(e)
def TextBoxes(self, heading, anounce):
# activate the text viewer window
xbmc.executebuiltin("ActivateWindow(%d)" % (10147))
# get window
win = window(10147)
# win.show()
# give window time to initialize
xbmc.sleep(100)
# set heading
win.getControl(1).setLabel(heading)
win.getControl(5).setText(anounce)
return
cClear()
| true | true |
f73db876a0a318f78e2772b1e62008c40b7a6f23 | 45 | py | Python | __init__.py | dhimmel/serg-pycode | 075de0ba470e4fbda5e33dfc23cb9ecd86ec53b7 | [
"BSD-2-Clause-Patent"
] | null | null | null | __init__.py | dhimmel/serg-pycode | 075de0ba470e4fbda5e33dfc23cb9ecd86ec53b7 | [
"BSD-2-Clause-Patent"
] | null | null | null | __init__.py | dhimmel/serg-pycode | 075de0ba470e4fbda5e33dfc23cb9ecd86ec53b7 | [
"BSD-2-Clause-Patent"
] | null | null | null | """Add this directory to your python path"""
| 22.5 | 44 | 0.711111 | true | true | |
f73db8f68d92d442da81fc6a9ad218bece8ed0c1 | 7,718 | py | Python | emstore/create.py | suhasaggarwal/VectorStore | 07a970b26fce64cb069e63f1a656774322055fe8 | [
"MIT"
] | null | null | null | emstore/create.py | suhasaggarwal/VectorStore | 07a970b26fce64cb069e63f1a656774322055fe8 | [
"MIT"
] | null | null | null | emstore/create.py | suhasaggarwal/VectorStore | 07a970b26fce64cb069e63f1a656774322055fe8 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import os
import struct
from contextlib import contextmanager
from functools import partial
from io import BufferedReader, UnsupportedOperation
from subprocess import call
from zipfile import BadZipFile, ZipFile
from tqdm import tqdm
import time
from emstore.open import open_leveldb
import threading
import traceback
STRUCT_FORMAT = 'e'
class CustomVectorIO:
def __init__(self, topic, vector, vector_size):
self.topic = topic
self.vector = vector
self.vector_size = vector_size
self.vectortranslate()
def vectortranslate(self):
self.pack = struct.Struct(str(self.vector_size) + STRUCT_FORMAT).pack
return self.deriveKVs(self.topic, self.vector)
def deriveKVs(self, topic, vector):
v = [float(f) for f in vector]
return bytes(topic, 'utf-8'), self.pack(*v)
class VecIOWrapper(BufferedReader):
def __init__(self, *args, vector_size=None, fasttext_format=False, **kwargs):
super().__init__(*args, **kwargs)
if vector_size is None:
try:
vector_size, fasttext_format = self.infer_vector_size()
except UnsupportedOperation:
raise Exception(
'''Unable to infer vector size without read loss.
Please specify vector size''')
self.vector_size = vector_size
self.pack = struct.Struct(str(vector_size) + STRUCT_FORMAT).pack
if fasttext_format:
# pass first line
super().__next__()
def __next__(self):
line = super().__next__()[:-1] # read and drop newline char
x = line.split(b' ') # split by whitespace
if len(x) > self.vector_size + 1:
k, v = b''.join(x[:-self.vector_size]), x[-self.vector_size:]
else:
k, v = x[0], x[1:]
v = [float(f) for f in v]
return k, self.pack(*v)
def infer_vector_size(self):
# sample 1 entry
first_line = super().readline()
first_line = first_line.split(b' ')
fasttext_format = False
if len(first_line) == 2:
# could be a fasttext format file - read another line
first_line = super().readline()
first_line = first_line.split(b' ')
fasttext_format = True
self.seek(0)
return len(first_line) - 1, fasttext_format
lock = threading.Lock()
@contextmanager
def open_embeddings_file(path, vector_size=None, archive_file=None):
"""Universal context manager to open CSV-like files with word embeddings.
Returns a file-like object (BufferedReader subclass).
Accepts both compressed and uncompressed files.
Infers vector size if not specified, and matches all vectors to that size.
If path is an archive that contains multiple files,
please specify archive_file.
"""
try:
archive = ZipFile(path)
filenames = [f.filename for f in archive.filelist]
if len(filenames) == 0:
raise Exception('Empty archive.')
elif archive_file is not None:
file = archive_file
elif len(filenames) == 1:
file = filenames[0]
elif len(filenames) > 1:
raise Exception('\n'.join([
'Multiple files in archive.',
'Please specify the archive_file argument.', 'Available files:'
] + filenames))
open_f = archive.open
if vector_size is None:
with open_f(file) as g:
# sample 1 entry
first_line = g.readline()
first_line = first_line.split(b' ')
fasttext_format = False
if len(first_line) == 2:
# could be a fasttext format file - read another line
first_line = g.readline()
first_line = first_line.split(b' ')
fasttext_format = True
vector_size = len(first_line) - 1
except BadZipFile:
file = path
open_f = partial(open, mode='rb')
with open_f(file) as g:
yield VecIOWrapper(g, vector_size=vector_size,
fasttext_format=fasttext_format)
def create_embedding_database(embeddings_file,
path_to_database,
datasize=None,
overwrite=False):
"""Create embedding store in leveldb.
Arguments:
embeddings_file {str} -- path to downloaded GloVe embeddings. 'None'
will trigger download
path_to_database {str} -- Destination - where to create the embeddings database. 'None' by default - builds in ~/glove
Keyword Arguments:
datasize {int} -- number of lines if you want to see a progress bar when loading from a zip file (default: {None})
overwrite {bool} -- [description] (default: {False})
"""
if overwrite:
if os.path.exists(path_to_database):
call(['rm', '-rf', path_to_database])
if not os.path.exists(path_to_database):
os.makedirs(path_to_database)
with open_leveldb(
path_to_database,
create_if_missing=True,
error_if_exists=not overwrite) as db:
leveldb_write_batch = 256
i = 0
batch = db.write_batch()
with open_embeddings_file(embeddings_file) as a:
for key, embedding in tqdm(a, total=datasize):
i += 1
batch.put(key, embedding)
if i % leveldb_write_batch == 0:
batch.write()
batch = db.write_batch()
batch.write()
def populate_batch_buffer_leveldb(keyList, vectorList, database):
global lock
keyBuffer = []
vectorBuffer = []
lock.acquire()
keyBuffer.extend(keyList)
vectorBuffer.extend(vectorList)
create_custom_embedding_database(keyBuffer, vectorBuffer, database, overwrite=False)
keyBuffer.clear()
vectorBuffer.clear()
lock.release()
def create_custom_embedding_database(topicList,
vectorList,
path_to_database,
overwrite=False):
"""Create custom embedding store in leveldb.
Arguments:
topicList -- Keys to serialise
vectorList -- Vectors to serialise
path_to_database {str} -- Destination - where to create the embeddings database. 'None' by default
"""
t0 = time.time()
if overwrite:
if os.path.exists(path_to_database):
call(['rm', '-rf', path_to_database])
if not os.path.exists(path_to_database):
os.makedirs(path_to_database)
with open_leveldb(
path_to_database,
block_size=65536,
lru_cache_size=200000,
bloom_filter_bits=10,
create_if_missing=True,
error_if_exists=False) as db:
leveldb_write_batch = 200
i = 0
batch = db.write_batch()
for topic, vector in zip(topicList, vectorList):
try:
# Vector dimensions can be changed here - 400 dimensions for sample
key, value = CustomVectorIO(topic, vector, 400).vectortranslate()
i += 1
batch.put(key, value)
except Exception:
traceback.print_exc()
pass
if i % leveldb_write_batch == 0:
batch.write()
batch = db.write_batch()
batch.write()
db.close()
t1 = time.time()
print("Vector Batch Write Time", t1 - t0)
| 35.242009 | 130 | 0.584478 |
import os
import struct
from contextlib import contextmanager
from functools import partial
from io import BufferedReader, UnsupportedOperation
from subprocess import call
from zipfile import BadZipFile, ZipFile
from tqdm import tqdm
import time
from emstore.open import open_leveldb
import threading
import traceback
STRUCT_FORMAT = 'e'
class CustomVectorIO:
def __init__(self, topic, vector, vector_size):
self.topic = topic
self.vector = vector
self.vector_size = vector_size
self.vectortranslate()
def vectortranslate(self):
self.pack = struct.Struct(str(self.vector_size) + STRUCT_FORMAT).pack
return self.deriveKVs(self.topic, self.vector)
def deriveKVs(self, topic, vector):
v = [float(f) for f in vector]
return bytes(topic, 'utf-8'), self.pack(*v)
class VecIOWrapper(BufferedReader):
def __init__(self, *args, vector_size=None, fasttext_format=False, **kwargs):
super().__init__(*args, **kwargs)
if vector_size is None:
try:
vector_size, fasttext_format = self.infer_vector_size()
except UnsupportedOperation:
raise Exception(
'''Unable to infer vector size without read loss.
Please specify vector size''')
self.vector_size = vector_size
self.pack = struct.Struct(str(vector_size) + STRUCT_FORMAT).pack
if fasttext_format:
super().__next__()
def __next__(self):
line = super().__next__()[:-1]
x = line.split(b' ')
if len(x) > self.vector_size + 1:
k, v = b''.join(x[:-self.vector_size]), x[-self.vector_size:]
else:
k, v = x[0], x[1:]
v = [float(f) for f in v]
return k, self.pack(*v)
def infer_vector_size(self):
first_line = super().readline()
first_line = first_line.split(b' ')
fasttext_format = False
if len(first_line) == 2:
first_line = super().readline()
first_line = first_line.split(b' ')
fasttext_format = True
self.seek(0)
return len(first_line) - 1, fasttext_format
lock = threading.Lock()
@contextmanager
def open_embeddings_file(path, vector_size=None, archive_file=None):
try:
archive = ZipFile(path)
filenames = [f.filename for f in archive.filelist]
if len(filenames) == 0:
raise Exception('Empty archive.')
elif archive_file is not None:
file = archive_file
elif len(filenames) == 1:
file = filenames[0]
elif len(filenames) > 1:
raise Exception('\n'.join([
'Multiple files in archive.',
'Please specify the archive_file argument.', 'Available files:'
] + filenames))
open_f = archive.open
if vector_size is None:
with open_f(file) as g:
first_line = g.readline()
first_line = first_line.split(b' ')
fasttext_format = False
if len(first_line) == 2:
first_line = g.readline()
first_line = first_line.split(b' ')
fasttext_format = True
vector_size = len(first_line) - 1
except BadZipFile:
file = path
open_f = partial(open, mode='rb')
with open_f(file) as g:
yield VecIOWrapper(g, vector_size=vector_size,
fasttext_format=fasttext_format)
def create_embedding_database(embeddings_file,
path_to_database,
datasize=None,
overwrite=False):
if overwrite:
if os.path.exists(path_to_database):
call(['rm', '-rf', path_to_database])
if not os.path.exists(path_to_database):
os.makedirs(path_to_database)
with open_leveldb(
path_to_database,
create_if_missing=True,
error_if_exists=not overwrite) as db:
leveldb_write_batch = 256
i = 0
batch = db.write_batch()
with open_embeddings_file(embeddings_file) as a:
for key, embedding in tqdm(a, total=datasize):
i += 1
batch.put(key, embedding)
if i % leveldb_write_batch == 0:
batch.write()
batch = db.write_batch()
batch.write()
def populate_batch_buffer_leveldb(keyList, vectorList, database):
global lock
keyBuffer = []
vectorBuffer = []
lock.acquire()
keyBuffer.extend(keyList)
vectorBuffer.extend(vectorList)
create_custom_embedding_database(keyBuffer, vectorBuffer, database, overwrite=False)
keyBuffer.clear()
vectorBuffer.clear()
lock.release()
def create_custom_embedding_database(topicList,
vectorList,
path_to_database,
overwrite=False):
t0 = time.time()
if overwrite:
if os.path.exists(path_to_database):
call(['rm', '-rf', path_to_database])
if not os.path.exists(path_to_database):
os.makedirs(path_to_database)
with open_leveldb(
path_to_database,
block_size=65536,
lru_cache_size=200000,
bloom_filter_bits=10,
create_if_missing=True,
error_if_exists=False) as db:
leveldb_write_batch = 200
i = 0
batch = db.write_batch()
for topic, vector in zip(topicList, vectorList):
try:
key, value = CustomVectorIO(topic, vector, 400).vectortranslate()
i += 1
batch.put(key, value)
except Exception:
traceback.print_exc()
pass
if i % leveldb_write_batch == 0:
batch.write()
batch = db.write_batch()
batch.write()
db.close()
t1 = time.time()
print("Vector Batch Write Time", t1 - t0)
| true | true |
f73dba845e5a27dd797442fe6a7b56c90fe5b684 | 7,615 | py | Python | kubernetes-the-hard-way/system/collections/ansible_collections/community/general/plugins/cache/memcached.py | jkroepke/homelab | ffdd849e39b52972870f5552e734fd74cb1254a1 | [
"Apache-2.0"
] | 5 | 2020-12-16T21:42:09.000Z | 2022-03-28T16:04:32.000Z | kubernetes-the-hard-way/system/collections/ansible_collections/community/general/plugins/cache/memcached.py | jkroepke/kubernetes-the-hard-way | 70fd096a04addec0777744c9731a4e3fbdc40c8f | [
"Apache-2.0"
] | null | null | null | kubernetes-the-hard-way/system/collections/ansible_collections/community/general/plugins/cache/memcached.py | jkroepke/kubernetes-the-hard-way | 70fd096a04addec0777744c9731a4e3fbdc40c8f | [
"Apache-2.0"
] | null | null | null | # (c) 2014, Brian Coca, Josh Drake, et al
# (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
author: Unknown (!UNKNOWN)
cache: memcached
short_description: Use memcached DB for cache
description:
- This cache uses JSON formatted, per host records saved in memcached.
requirements:
- memcache (python lib)
options:
_uri:
description:
- List of connection information for the memcached DBs
default: ['127.0.0.1:11211']
type: list
env:
- name: ANSIBLE_CACHE_PLUGIN_CONNECTION
ini:
- key: fact_caching_connection
section: defaults
_prefix:
description: User defined prefix to use when creating the DB entries
default: ansible_facts
env:
- name: ANSIBLE_CACHE_PLUGIN_PREFIX
ini:
- key: fact_caching_prefix
section: defaults
_timeout:
default: 86400
description: Expiration timeout in seconds for the cache plugin data. Set to 0 to never expire
env:
- name: ANSIBLE_CACHE_PLUGIN_TIMEOUT
ini:
- key: fact_caching_timeout
section: defaults
type: integer
'''
import collections
import os
import time
from multiprocessing import Lock
from itertools import chain
from ansible import constants as C
from ansible.errors import AnsibleError
from ansible.module_utils.common._collections_compat import MutableSet
from ansible.plugins.cache import BaseCacheModule
from ansible.utils.display import Display
try:
import memcache
except ImportError:
raise AnsibleError("python-memcached is required for the memcached fact cache")
display = Display()
class ProxyClientPool(object):
"""
Memcached connection pooling for thread/fork safety. Inspired by py-redis
connection pool.
Available connections are maintained in a deque and released in a FIFO manner.
"""
def __init__(self, *args, **kwargs):
self.max_connections = kwargs.pop('max_connections', 1024)
self.connection_args = args
self.connection_kwargs = kwargs
self.reset()
def reset(self):
self.pid = os.getpid()
self._num_connections = 0
self._available_connections = collections.deque(maxlen=self.max_connections)
self._locked_connections = set()
self._lock = Lock()
def _check_safe(self):
if self.pid != os.getpid():
with self._lock:
if self.pid == os.getpid():
# bail out - another thread already acquired the lock
return
self.disconnect_all()
self.reset()
def get_connection(self):
self._check_safe()
try:
connection = self._available_connections.popleft()
except IndexError:
connection = self.create_connection()
self._locked_connections.add(connection)
return connection
def create_connection(self):
if self._num_connections >= self.max_connections:
raise RuntimeError("Too many memcached connections")
self._num_connections += 1
return memcache.Client(*self.connection_args, **self.connection_kwargs)
def release_connection(self, connection):
self._check_safe()
self._locked_connections.remove(connection)
self._available_connections.append(connection)
def disconnect_all(self):
for conn in chain(self._available_connections, self._locked_connections):
conn.disconnect_all()
def __getattr__(self, name):
def wrapped(*args, **kwargs):
return self._proxy_client(name, *args, **kwargs)
return wrapped
def _proxy_client(self, name, *args, **kwargs):
conn = self.get_connection()
try:
return getattr(conn, name)(*args, **kwargs)
finally:
self.release_connection(conn)
class CacheModuleKeys(MutableSet):
"""
A set subclass that keeps track of insertion time and persists
the set in memcached.
"""
PREFIX = 'ansible_cache_keys'
def __init__(self, cache, *args, **kwargs):
self._cache = cache
self._keyset = dict(*args, **kwargs)
def __contains__(self, key):
return key in self._keyset
def __iter__(self):
return iter(self._keyset)
def __len__(self):
return len(self._keyset)
def add(self, key):
self._keyset[key] = time.time()
self._cache.set(self.PREFIX, self._keyset)
def discard(self, key):
del self._keyset[key]
self._cache.set(self.PREFIX, self._keyset)
def remove_by_timerange(self, s_min, s_max):
for k in self._keyset.keys():
t = self._keyset[k]
if s_min < t < s_max:
del self._keyset[k]
self._cache.set(self.PREFIX, self._keyset)
class CacheModule(BaseCacheModule):
def __init__(self, *args, **kwargs):
connection = ['127.0.0.1:11211']
try:
super(CacheModule, self).__init__(*args, **kwargs)
if self.get_option('_uri'):
connection = self.get_option('_uri')
self._timeout = self.get_option('_timeout')
self._prefix = self.get_option('_prefix')
except KeyError:
display.deprecated('Rather than importing CacheModules directly, '
'use ansible.plugins.loader.cache_loader',
version='2.0.0', collection_name='community.general') # was Ansible 2.12
if C.CACHE_PLUGIN_CONNECTION:
connection = C.CACHE_PLUGIN_CONNECTION.split(',')
self._timeout = C.CACHE_PLUGIN_TIMEOUT
self._prefix = C.CACHE_PLUGIN_PREFIX
self._cache = {}
self._db = ProxyClientPool(connection, debug=0)
self._keys = CacheModuleKeys(self._db, self._db.get(CacheModuleKeys.PREFIX) or [])
def _make_key(self, key):
return "{0}{1}".format(self._prefix, key)
def _expire_keys(self):
if self._timeout > 0:
expiry_age = time.time() - self._timeout
self._keys.remove_by_timerange(0, expiry_age)
def get(self, key):
if key not in self._cache:
value = self._db.get(self._make_key(key))
# guard against the key not being removed from the keyset;
# this could happen in cases where the timeout value is changed
# between invocations
if value is None:
self.delete(key)
raise KeyError
self._cache[key] = value
return self._cache.get(key)
def set(self, key, value):
self._db.set(self._make_key(key), value, time=self._timeout, min_compress_len=1)
self._cache[key] = value
self._keys.add(key)
def keys(self):
self._expire_keys()
return list(iter(self._keys))
def contains(self, key):
self._expire_keys()
return key in self._keys
def delete(self, key):
del self._cache[key]
self._db.delete(self._make_key(key))
self._keys.discard(key)
def flush(self):
for key in self.keys():
self.delete(key)
def copy(self):
return self._keys.copy()
def __getstate__(self):
return dict()
def __setstate__(self, data):
self.__init__()
| 31.081633 | 104 | 0.624819 |
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
author: Unknown (!UNKNOWN)
cache: memcached
short_description: Use memcached DB for cache
description:
- This cache uses JSON formatted, per host records saved in memcached.
requirements:
- memcache (python lib)
options:
_uri:
description:
- List of connection information for the memcached DBs
default: ['127.0.0.1:11211']
type: list
env:
- name: ANSIBLE_CACHE_PLUGIN_CONNECTION
ini:
- key: fact_caching_connection
section: defaults
_prefix:
description: User defined prefix to use when creating the DB entries
default: ansible_facts
env:
- name: ANSIBLE_CACHE_PLUGIN_PREFIX
ini:
- key: fact_caching_prefix
section: defaults
_timeout:
default: 86400
description: Expiration timeout in seconds for the cache plugin data. Set to 0 to never expire
env:
- name: ANSIBLE_CACHE_PLUGIN_TIMEOUT
ini:
- key: fact_caching_timeout
section: defaults
type: integer
'''
import collections
import os
import time
from multiprocessing import Lock
from itertools import chain
from ansible import constants as C
from ansible.errors import AnsibleError
from ansible.module_utils.common._collections_compat import MutableSet
from ansible.plugins.cache import BaseCacheModule
from ansible.utils.display import Display
try:
import memcache
except ImportError:
raise AnsibleError("python-memcached is required for the memcached fact cache")
display = Display()
class ProxyClientPool(object):
def __init__(self, *args, **kwargs):
self.max_connections = kwargs.pop('max_connections', 1024)
self.connection_args = args
self.connection_kwargs = kwargs
self.reset()
def reset(self):
self.pid = os.getpid()
self._num_connections = 0
self._available_connections = collections.deque(maxlen=self.max_connections)
self._locked_connections = set()
self._lock = Lock()
def _check_safe(self):
if self.pid != os.getpid():
with self._lock:
if self.pid == os.getpid():
return
self.disconnect_all()
self.reset()
def get_connection(self):
self._check_safe()
try:
connection = self._available_connections.popleft()
except IndexError:
connection = self.create_connection()
self._locked_connections.add(connection)
return connection
def create_connection(self):
if self._num_connections >= self.max_connections:
raise RuntimeError("Too many memcached connections")
self._num_connections += 1
return memcache.Client(*self.connection_args, **self.connection_kwargs)
def release_connection(self, connection):
self._check_safe()
self._locked_connections.remove(connection)
self._available_connections.append(connection)
def disconnect_all(self):
for conn in chain(self._available_connections, self._locked_connections):
conn.disconnect_all()
def __getattr__(self, name):
def wrapped(*args, **kwargs):
return self._proxy_client(name, *args, **kwargs)
return wrapped
def _proxy_client(self, name, *args, **kwargs):
conn = self.get_connection()
try:
return getattr(conn, name)(*args, **kwargs)
finally:
self.release_connection(conn)
class CacheModuleKeys(MutableSet):
PREFIX = 'ansible_cache_keys'
def __init__(self, cache, *args, **kwargs):
self._cache = cache
self._keyset = dict(*args, **kwargs)
def __contains__(self, key):
return key in self._keyset
def __iter__(self):
return iter(self._keyset)
def __len__(self):
return len(self._keyset)
def add(self, key):
self._keyset[key] = time.time()
self._cache.set(self.PREFIX, self._keyset)
def discard(self, key):
del self._keyset[key]
self._cache.set(self.PREFIX, self._keyset)
def remove_by_timerange(self, s_min, s_max):
for k in self._keyset.keys():
t = self._keyset[k]
if s_min < t < s_max:
del self._keyset[k]
self._cache.set(self.PREFIX, self._keyset)
class CacheModule(BaseCacheModule):
def __init__(self, *args, **kwargs):
connection = ['127.0.0.1:11211']
try:
super(CacheModule, self).__init__(*args, **kwargs)
if self.get_option('_uri'):
connection = self.get_option('_uri')
self._timeout = self.get_option('_timeout')
self._prefix = self.get_option('_prefix')
except KeyError:
display.deprecated('Rather than importing CacheModules directly, '
'use ansible.plugins.loader.cache_loader',
version='2.0.0', collection_name='community.general')
if C.CACHE_PLUGIN_CONNECTION:
connection = C.CACHE_PLUGIN_CONNECTION.split(',')
self._timeout = C.CACHE_PLUGIN_TIMEOUT
self._prefix = C.CACHE_PLUGIN_PREFIX
self._cache = {}
self._db = ProxyClientPool(connection, debug=0)
self._keys = CacheModuleKeys(self._db, self._db.get(CacheModuleKeys.PREFIX) or [])
def _make_key(self, key):
return "{0}{1}".format(self._prefix, key)
def _expire_keys(self):
if self._timeout > 0:
expiry_age = time.time() - self._timeout
self._keys.remove_by_timerange(0, expiry_age)
def get(self, key):
if key not in self._cache:
value = self._db.get(self._make_key(key))
if value is None:
self.delete(key)
raise KeyError
self._cache[key] = value
return self._cache.get(key)
def set(self, key, value):
self._db.set(self._make_key(key), value, time=self._timeout, min_compress_len=1)
self._cache[key] = value
self._keys.add(key)
def keys(self):
self._expire_keys()
return list(iter(self._keys))
def contains(self, key):
self._expire_keys()
return key in self._keys
def delete(self, key):
del self._cache[key]
self._db.delete(self._make_key(key))
self._keys.discard(key)
def flush(self):
for key in self.keys():
self.delete(key)
def copy(self):
return self._keys.copy()
def __getstate__(self):
return dict()
def __setstate__(self, data):
self.__init__()
| true | true |
f73dbaa4f5a730e9a1cb1b2b9201a52c75fbf4cd | 2,169 | py | Python | kubernetes/test/test_v1alpha1_dns_endpoint_spec_endpoints.py | mariusgheorghies/python | 68ac7e168963d8b5a81dc493b1973d29e903a15b | [
"Apache-2.0"
] | null | null | null | kubernetes/test/test_v1alpha1_dns_endpoint_spec_endpoints.py | mariusgheorghies/python | 68ac7e168963d8b5a81dc493b1973d29e903a15b | [
"Apache-2.0"
] | null | null | null | kubernetes/test/test_v1alpha1_dns_endpoint_spec_endpoints.py | mariusgheorghies/python | 68ac7e168963d8b5a81dc493b1973d29e903a15b | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: v1.20.7
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import datetime
import kubernetes.client
from kubernetes.client.models.v1alpha1_dns_endpoint_spec_endpoints import V1alpha1DNSEndpointSpecEndpoints # noqa: E501
from kubernetes.client.rest import ApiException
class TestV1alpha1DNSEndpointSpecEndpoints(unittest.TestCase):
"""V1alpha1DNSEndpointSpecEndpoints unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def make_instance(self, include_optional):
"""Test V1alpha1DNSEndpointSpecEndpoints
include_option is a boolean, when False only required
params are included, when True both required and
optional params are included """
# model = kubernetes.client.models.v1alpha1_dns_endpoint_spec_endpoints.V1alpha1DNSEndpointSpecEndpoints() # noqa: E501
if include_optional :
return V1alpha1DNSEndpointSpecEndpoints(
dns_name = '0',
labels = {
'key' : '0'
},
provider_specific = [
kubernetes.client.models.v1alpha1_dns_endpoint_spec_provider_specific.v1alpha1_DNSEndpoint_spec_providerSpecific(
name = '0',
value = '0', )
],
record_ttl = 56,
record_type = '0',
set_identifier = '0',
targets = [
'0'
]
)
else :
return V1alpha1DNSEndpointSpecEndpoints(
)
def testV1alpha1DNSEndpointSpecEndpoints(self):
"""Test V1alpha1DNSEndpointSpecEndpoints"""
inst_req_only = self.make_instance(include_optional=False)
inst_req_and_optional = self.make_instance(include_optional=True)
if __name__ == '__main__':
unittest.main()
| 32.373134 | 133 | 0.628861 |
from __future__ import absolute_import
import unittest
import datetime
import kubernetes.client
from kubernetes.client.models.v1alpha1_dns_endpoint_spec_endpoints import V1alpha1DNSEndpointSpecEndpoints
from kubernetes.client.rest import ApiException
class TestV1alpha1DNSEndpointSpecEndpoints(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def make_instance(self, include_optional):
include_optional :
return V1alpha1DNSEndpointSpecEndpoints(
dns_name = '0',
labels = {
'key' : '0'
},
provider_specific = [
kubernetes.client.models.v1alpha1_dns_endpoint_spec_provider_specific.v1alpha1_DNSEndpoint_spec_providerSpecific(
name = '0',
value = '0', )
],
record_ttl = 56,
record_type = '0',
set_identifier = '0',
targets = [
'0'
]
)
else :
return V1alpha1DNSEndpointSpecEndpoints(
)
def testV1alpha1DNSEndpointSpecEndpoints(self):
inst_req_only = self.make_instance(include_optional=False)
inst_req_and_optional = self.make_instance(include_optional=True)
if __name__ == '__main__':
unittest.main()
| true | true |
f73dbb17fbff75070ae6a18c9c1c8883035f31de | 26 | py | Python | modules/wifi/uniflex_module_wifi/__init__.py | danieldUKIM/uniflex_wishrem | 44ca1cfaafc33a83e856dbf9eaf9c1b83d0a477b | [
"Apache-2.0"
] | null | null | null | modules/wifi/uniflex_module_wifi/__init__.py | danieldUKIM/uniflex_wishrem | 44ca1cfaafc33a83e856dbf9eaf9c1b83d0a477b | [
"Apache-2.0"
] | 2 | 2018-02-02T07:58:14.000Z | 2018-02-05T12:43:32.000Z | modules/wifi/uniflex_module_wifi/__init__.py | danieldUKIM/uniflex_wishrem | 44ca1cfaafc33a83e856dbf9eaf9c1b83d0a477b | [
"Apache-2.0"
] | 4 | 2017-04-11T13:22:22.000Z | 2019-11-02T20:27:15.000Z | from .module_wifi import * | 26 | 26 | 0.807692 | from .module_wifi import * | true | true |
f73dbc51ad772452908776a726c9826c6f05703e | 1,764 | py | Python | app.py | Junhua9981/WebProjectFinal | 8db619b4196fa3bc684202ddb24a725c15e06d78 | [
"MIT"
] | null | null | null | app.py | Junhua9981/WebProjectFinal | 8db619b4196fa3bc684202ddb24a725c15e06d78 | [
"MIT"
] | null | null | null | app.py | Junhua9981/WebProjectFinal | 8db619b4196fa3bc684202ddb24a725c15e06d78 | [
"MIT"
] | null | null | null | from fastapi import FastAPI, Depends
from fastapi.middleware.cors import CORSMiddleware
from auth.jwt_bearer import JWTBearer
# from routes.student import router as StudentRouter
# from routes.admin import router as AdminRouter
from routes.user import router as UserRouter
from routes.teacher import router as TeacherRouter
from routes.comments import router as CommentRouter
from routes.build_db import router as BuildDBRouter
from routes.view_teacher import router as ViewTeacherRouter
from decouple import config
app = FastAPI()
token_listener = JWTBearer()
origins = [
"http://localhost.tiangolo.com",
"https://localhost.tiangolo.com",
"http://localhost:5501",
"http://localhost:8080",
"http://127.0.0.1:5501",
"https://webprojfrontend.herokuapp.com",
"https://professorratingsystem.herokuapp.com/"
]
app.add_middleware(CORSMiddleware,
allow_origins=['*'],
allow_credentials=True,
allow_methods=['*'],
allow_headers=['*']
)
JWT_SECRET = config('secret')
MOGO = config('MONGO_DETAILS')
@app.get("/", tags=["Root"])
async def read_root():
return {"message": f"Welcome to this fantastic app."}
# app.include_router(AdminRouter, tags=["Administrator"], prefix="/admin")
app.include_router(UserRouter, tags=["Users"], prefix="/user")
app.include_router(BuildDBRouter, tags=["BuildDB"], prefix="/build_db")
# app.include_router(StudentRouter, tags=["Students"], prefix="/student")
app.include_router(CommentRouter, tags=["Comments"], prefix="/comment")
app.include_router(TeacherRouter, tags=["Teachers"], prefix="/teacher", dependencies=[Depends(token_listener)])
app.include_router(ViewTeacherRouter, tags=["ViewTeachers"], prefix="/view_teacher") | 36 | 111 | 0.72449 | from fastapi import FastAPI, Depends
from fastapi.middleware.cors import CORSMiddleware
from auth.jwt_bearer import JWTBearer
from routes.user import router as UserRouter
from routes.teacher import router as TeacherRouter
from routes.comments import router as CommentRouter
from routes.build_db import router as BuildDBRouter
from routes.view_teacher import router as ViewTeacherRouter
from decouple import config
app = FastAPI()
token_listener = JWTBearer()
origins = [
"http://localhost.tiangolo.com",
"https://localhost.tiangolo.com",
"http://localhost:5501",
"http://localhost:8080",
"http://127.0.0.1:5501",
"https://webprojfrontend.herokuapp.com",
"https://professorratingsystem.herokuapp.com/"
]
app.add_middleware(CORSMiddleware,
allow_origins=['*'],
allow_credentials=True,
allow_methods=['*'],
allow_headers=['*']
)
JWT_SECRET = config('secret')
MOGO = config('MONGO_DETAILS')
@app.get("/", tags=["Root"])
async def read_root():
return {"message": f"Welcome to this fantastic app."}
app.include_router(UserRouter, tags=["Users"], prefix="/user")
app.include_router(BuildDBRouter, tags=["BuildDB"], prefix="/build_db")
app.include_router(CommentRouter, tags=["Comments"], prefix="/comment")
app.include_router(TeacherRouter, tags=["Teachers"], prefix="/teacher", dependencies=[Depends(token_listener)])
app.include_router(ViewTeacherRouter, tags=["ViewTeachers"], prefix="/view_teacher") | true | true |
f73dbc65a6e1608b55a7281381ea2a4dde32b688 | 435 | py | Python | kamui/entrypoints/rest/topic/__init__.py | thepabloaguilar/kamui | b740d0fd4ff722a48e1e2e71c17aa5f1453cc57a | [
"Apache-2.0"
] | 5 | 2020-06-22T12:29:36.000Z | 2020-12-25T16:08:15.000Z | kamui/entrypoints/rest/topic/__init__.py | thepabloaguilar/kamui | b740d0fd4ff722a48e1e2e71c17aa5f1453cc57a | [
"Apache-2.0"
] | 280 | 2020-06-22T14:22:06.000Z | 2022-03-31T11:03:55.000Z | kamui/entrypoints/rest/topic/__init__.py | thepabloaguilar/kamui | b740d0fd4ff722a48e1e2e71c17aa5f1453cc57a | [
"Apache-2.0"
] | null | null | null | from flask import Blueprint
from flask_restful import Api
from .get_topic_schema import GetTopicSchemaResource
from .get_topic_names import GetTopicNamesResource
rest_topic_bp = Blueprint("rest_topic", __name__)
rest_topic_api = Api(rest_topic_bp, prefix="/api")
rest_topic_api.add_resource(GetTopicSchemaResource, GetTopicSchemaResource.API_PATH)
rest_topic_api.add_resource(GetTopicNamesResource, GetTopicNamesResource.API_PATH)
| 33.461538 | 84 | 0.866667 | from flask import Blueprint
from flask_restful import Api
from .get_topic_schema import GetTopicSchemaResource
from .get_topic_names import GetTopicNamesResource
rest_topic_bp = Blueprint("rest_topic", __name__)
rest_topic_api = Api(rest_topic_bp, prefix="/api")
rest_topic_api.add_resource(GetTopicSchemaResource, GetTopicSchemaResource.API_PATH)
rest_topic_api.add_resource(GetTopicNamesResource, GetTopicNamesResource.API_PATH)
| true | true |
f73dbd0f0c9773be3f22210c1746a840d0094f83 | 68,877 | py | Python | test/integration/test_resource_controller_v2.py | JonahFarc/platform-services-python-sdk | f5cee0d629ce81048680e19c81cea8448ecab217 | [
"Apache-2.0"
] | 10 | 2020-04-02T15:48:33.000Z | 2021-06-23T05:12:49.000Z | test/integration/test_resource_controller_v2.py | JonahFarc/platform-services-python-sdk | f5cee0d629ce81048680e19c81cea8448ecab217 | [
"Apache-2.0"
] | 151 | 2020-03-30T20:24:39.000Z | 2022-03-30T16:51:22.000Z | test/integration/test_resource_controller_v2.py | JonahFarc/platform-services-python-sdk | f5cee0d629ce81048680e19c81cea8448ecab217 | [
"Apache-2.0"
] | 25 | 2020-04-16T21:03:19.000Z | 2021-12-13T19:37:39.000Z | # coding: utf-8
# Copyright 2019, 2020 IBM All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This class contains an integration test for the Resource Controller service.
"""
import pytest
import unittest
import os
import os.path
import uuid
import time
from ibm_cloud_sdk_core import *
from ibm_platform_services.resource_controller_v2 import *
# Read config file
configFile = 'resource_controller.env'
results_per_page = 20
class TestResourceControllerV2(unittest.TestCase):
"""
Integration Test Class for ResourceControllerV2
"""
@classmethod
def setUpClass(cls):
if os.path.exists(configFile):
os.environ['IBM_CREDENTIALS_FILE'] = configFile
cls.config = read_external_sources(
ResourceControllerV2.DEFAULT_SERVICE_NAME)
cls.testAccountId = cls.config['ACCOUNT_ID']
cls.testResourceGroupGuid = cls.config['RESOURCE_GROUP']
cls.testOrgGuid = cls.config['ORGANIZATION_GUID']
cls.testSpaceGuid = cls.config['SPACE_GUID']
cls.testAppGuid = cls.config['APPLICATION_GUID']
cls.testPlanId1 = cls.config['PLAN_ID']
cls.testPlanId2 = cls.config['RECLAMATION_PLAN_ID']
else:
raise unittest.SkipTest(
'External configuration not available, skipping...')
cls.service = ResourceControllerV2.new_instance()
assert cls.service is not None
cls.testInstanceCrn = ''
cls.testInstanceGuid = ''
cls.testAliasCrn = ''
cls.testAliasGuid = ''
cls.testBindingCrn = ''
cls.testBindingGuid = ''
cls.testInstanceKeyCrn = ''
cls.testInstanceKeyGuid = ''
cls.testAliasKeyCrn = ''
cls.testAliasKeyGuid = ''
cls.aliasTargetCrn = ''
cls.bindTargetCrn = ''
cls.testReclaimInstanceCrn = ''
cls.testReclaimInstanceGuid = ''
cls.testReclamationId1 = ''
cls.testReclamationId2 = ''
cls.testRegionId1 = 'global'
cls.testRegionId2 = 'global'
cls.reclaimInstanceName = 'RcSdkReclaimInstance1'
cls.lockedInstanceNameUpdate = 'RcSdkLockedInstanceUpdate1'
cls.instanceNames = {'name': 'RcSdkInstance1Python',
'update': 'RcSdkInstanceUpdate1Python'}
cls.keyNames = {'name': 'RcSdkKey1Python', 'update': 'RcSdkKeyUpdate1Python',
'name2': 'RcSdkKey2Python', 'update2': 'RcSdkKeyUpdate2Python'}
cls.bindingNames = {'name': 'RcSdkBinding1Python',
'update': 'RcSdkBindingUpdate1Python'}
cls.aliasNames = {'name': 'RcSdkAlias1Python',
'update': 'RcSdkAliasUpdate1Python'}
cls.transactionId = str(uuid.uuid4())
print('\nTransaction-Id for Test Run: ' + cls.transactionId)
print('\nBegin pre-test clean up by name.')
cls.cleanupByName()
print('\nPre-test cleanup done.')
print('\nSetup complete.')
@classmethod
def tearDownClass(cls):
cls.cleanupResources()
cls.cleanupReclamationInstance()
cls.cleanupByName()
print('\nClean up complete.')
def test_00_create_resource_instance(self):
customHeaders = {}
customHeaders["Transaction-Id"] = "rc-sdk-python-test00-" + \
self.transactionId
response = self.service.create_resource_instance(
self.instanceNames['name'],
self.testRegionId1,
self.testResourceGroupGuid,
self.testPlanId1,
headers=customHeaders
)
assert response is not None
assert response.get_status_code() == 201
result = response.get_result()
assert result is not None
assert result.get('id') is not None
assert result.get('guid') is not None
assert result.get('crn') is not None
assert result.get('id') == result.get('crn')
assert result.get('name') == self.instanceNames['name']
assert result.get('account_id') == self.testAccountId
assert result.get('resource_group_id') == self.testResourceGroupGuid
assert result.get('resource_plan_id') == self.testPlanId1
assert result.get('state') == "active"
assert not result.get('locked')
assert result.get('last_operation').get('type') == "create"
assert not result.get('last_operation').get('async')
assert result.get('last_operation').get('state') == "succeeded"
self.__class__.testInstanceCrn = result.get('id')
self.__class__.testInstanceGuid = result.get('guid')
assert self.testInstanceCrn != ''
assert self.testInstanceGuid != ''
def test_01_get_resource_instance(self):
customHeaders = {}
customHeaders["Transaction-Id"] = "rc-sdk-python-test01-" + \
self.transactionId
response = self.service.get_resource_instance(
self.testInstanceGuid, headers=customHeaders)
assert response is not None
assert response.get_status_code() == 200
result = response.get_result()
assert result is not None
assert result.get('id') == self.testInstanceCrn
assert result.get('guid') == self.testInstanceGuid
assert result.get('crn') == self.testInstanceCrn
assert result.get('name') == self.instanceNames['name']
assert result.get('account_id') == self.testAccountId
assert result.get('resource_group_id') == self.testResourceGroupGuid
assert result.get('resource_plan_id') == self.testPlanId1
assert result.get('state') == "active"
assert not result.get('locked')
assert result.get('last_operation').get('type') == "create"
assert not result.get('last_operation').get('async')
assert result.get('last_operation').get('state') == "succeeded"
def test_02_update_resource_instance(self):
customHeaders = {}
customHeaders["Transaction-Id"] = "rc-sdk-python-test02-" + \
self.transactionId
params = {}
params["hello"] = "bye"
response = self.service.update_resource_instance(
self.testInstanceGuid,
name=self.instanceNames['update'],
parameters=params,
headers=customHeaders
)
assert response is not None
assert response.get_status_code() == 200
result = response.get_result()
assert result is not None
assert result.get('id') == self.testInstanceCrn
assert result.get('name') == self.instanceNames['update']
assert result.get('state') == "active"
assert result.get('last_operation').get('type') == "update"
assert result.get('last_operation').get('sub_type') == "config"
assert not result.get('last_operation').get('async')
assert result.get('last_operation').get('state') == "succeeded"
def test_03_list_resource_instances_no_filter(self):
customHeaders = {}
customHeaders["Transaction-Id"] = "rc-sdk-python-test03-" + \
self.transactionId
start = None
while True:
response = self.service.list_resource_instances(
limit=results_per_page,
start=start,
headers=customHeaders)
assert response is not None
assert response.get_status_code() == 200
result = response.get_result()
assert result is not None
assert len(result.get('resources')) >= 1
assert len(result.get('resources')) <= results_per_page
assert result.get('rows_count') >= 1
assert result.get('rows_count') <= results_per_page
start = get_query_param(result.get('next_url'), 'start')
if start is None:
break
def test_04_list_resource_instances_by_guid(self):
customHeaders = {}
customHeaders["Transaction-Id"] = "rc-sdk-python-test04-" + \
self.transactionId
response = self.service.list_resource_instances(
guid=self.testInstanceGuid, headers=customHeaders)
assert response is not None
assert response.get_status_code() == 200
result = response.get_result()
assert result is not None
assert len(result.get('resources')) == 1
assert result.get('rows_count') == 1
instance = result.get('resources')[0]
assert instance.get('id') == self.testInstanceCrn
assert instance.get('guid') == self.testInstanceGuid
assert instance.get('name') == self.instanceNames['update']
assert instance.get('state') == "active"
assert instance.get('last_operation').get('type') == "update"
assert instance.get('last_operation').get('sub_type') == "config"
assert not instance.get('last_operation').get('async')
assert instance.get('last_operation').get('state') == "succeeded"
def test_05_list_resource_instances_by_name(self):
customHeaders = {}
customHeaders["Transaction-Id"] = "rc-sdk-python-test05-" + \
self.transactionId
response = self.service.list_resource_instances(
name=self.instanceNames['update'],
headers=customHeaders)
assert response is not None
assert response.get_status_code() == 200
result = response.get_result()
assert result is not None
assert len(result.get('resources')) == 1
assert result.get('rows_count') == 1
def test_06_create_resource_alias(self):
customHeaders = {}
customHeaders["Transaction-Id"] = "rc-sdk-python-test06-" + \
self.transactionId
target = "crn:v1:bluemix:public:bluemix:us-south:o/" + \
self.testOrgGuid + "::cf-space:" + self.testSpaceGuid
self.__class__.aliasTargetCrn = "crn:v1:bluemix:public:cf:us-south:o/" + \
self.testOrgGuid + "::cf-space:" + self.testSpaceGuid
assert self.aliasTargetCrn != ''
response = self.service.create_resource_alias(
self.aliasNames['name'],
self.testInstanceGuid,
target,
headers=customHeaders
)
assert response is not None
assert response.get_status_code() == 201
result = response.get_result()
assert result is not None
assert result.get('id') is not None
assert result.get('guid') is not None
assert result.get('crn') is not None
assert result.get('id') == result.get('crn')
assert result.get('name') == self.aliasNames['name']
assert result.get('account_id') == self.testAccountId
assert result.get('resource_group_id') == self.testResourceGroupGuid
assert result.get('target_crn') == self.aliasTargetCrn
assert result.get('state') == "active"
assert result.get('resource_instance_id') == self.testInstanceCrn
self.__class__.testAliasCrn = result.get('id')
self.__class__.testAliasGuid = result.get('guid')
assert self.testAliasCrn != ''
assert self.testAliasGuid != ''
def test_07_get_resource_alias(self):
customHeaders = {}
customHeaders["Transaction-Id"] = "rc-sdk-python-test07-" + \
self.transactionId
response = self.service.get_resource_alias(
self.testAliasGuid, headers=customHeaders)
assert response is not None
assert response.get_status_code() == 200
result = response.get_result()
assert result is not None
assert result.get('id') == self.testAliasCrn
assert result.get('guid') == self.testAliasGuid
assert result.get('crn') == self.testAliasCrn
assert result.get('name') == self.aliasNames['name']
assert result.get('account_id') == self.testAccountId
assert result.get('resource_group_id') == self.testResourceGroupGuid
assert result.get('target_crn') == self.aliasTargetCrn
assert result.get('state') == "active"
assert result.get('resource_instance_id') == self.testInstanceCrn
def test_08_update_resource_alias(self):
customHeaders = {}
customHeaders["Transaction-Id"] = "rc-sdk-python-test08-" + \
self.transactionId
response = self.service.update_resource_alias(
self.testAliasGuid,
name=self.aliasNames['update'],
headers=customHeaders
)
assert response is not None
assert response.get_status_code() == 200
result = response.get_result()
assert result is not None
assert result.get('id') == self.testAliasCrn
assert result.get('name') == self.aliasNames['update']
assert result.get('state') == "active"
def test_09_list_resource_aliases_no_filter(self):
customHeaders = {}
customHeaders["Transaction-Id"] = "rc-sdk-python-test09-" + \
self.transactionId
start = None
while True:
response = self.service.list_resource_aliases(
limit=results_per_page,
start=start,
headers=customHeaders)
assert response is not None
assert response.get_status_code() == 200
result = response.get_result()
assert result is not None
assert len(result.get('resources')) >= 1
assert len(result.get('resources')) <= results_per_page
assert result.get('rows_count') >= 1
assert result.get('rows_count') <= results_per_page
start = get_query_param(result.get('next_url'), 'start')
if start is None:
break
def test_10_list_resource_aliases_by_guid(self):
customHeaders = {}
customHeaders["Transaction-Id"] = "rc-sdk-python-test10-" + \
self.transactionId
response = self.service.list_resource_aliases(
guid=self.testAliasGuid, headers=customHeaders)
assert response is not None
assert response.get_status_code() == 200
result = response.get_result()
assert result is not None
assert len(result.get('resources')) == 1
assert result.get('rows_count') == 1
alias = result.get('resources')[0]
assert alias.get('id') == self.testAliasCrn
assert alias.get('guid') == self.testAliasGuid
assert alias.get('name') == self.aliasNames['update']
assert alias.get('resource_group_id') == self.testResourceGroupGuid
assert alias.get('target_crn') == self.aliasTargetCrn
assert alias.get('state') == "active"
assert alias.get('resource_instance_id') == self.testInstanceCrn
def test_11_list_resource_aliases_by_name(self):
customHeaders = {}
customHeaders["Transaction-Id"] = "rc-sdk-python-test11-" + \
self.transactionId
response = self.service.list_resource_aliases(
name=self.aliasNames['update'],
headers=customHeaders)
assert response is not None
assert response.get_status_code() == 200
result = response.get_result()
assert result is not None
assert len(result.get('resources')) == 1
assert result.get('rows_count') == 1
def test_11a_list_resource_aliases_for_instance(self):
assert self.testInstanceGuid is not None
start = None
while True:
response = self.service.list_resource_aliases_for_instance(
id=self.testInstanceGuid,
limit=results_per_page,
start=start)
assert response is not None
assert response.get_status_code() == 200
result = response.get_result()
assert result is not None
assert len(result.get('resources')) == 1
assert result.get('rows_count') == 1
start = get_query_param(result.get('next_url'), 'start')
if start is None:
break
def test_12_create_resource_binding(self):
customHeaders = {}
customHeaders["Transaction-Id"] = "rc-sdk-python-test12-" + \
self.transactionId
parameters = {
'parameter1': 'value1',
'parameter2': 'value2'
}
target = "crn:v1:staging:public:bluemix:us-south:s/" + \
self.testSpaceGuid + "::cf-application:" + self.testAppGuid
self.__class__.bindTargetCrn = "crn:v1:staging:public:cf:us-south:s/" + \
self.testSpaceGuid + "::cf-application:" + self.testAppGuid
assert self.bindTargetCrn != ''
response = self.service.create_resource_binding(
source=self.testAliasGuid,
target=target,
name=self.bindingNames['name'],
parameters=parameters,
headers=customHeaders
)
assert response is not None
assert response.get_status_code() == 201
result = response.get_result()
assert result is not None
assert result.get('id') is not None
assert result.get('guid') is not None
assert result.get('crn') is not None
assert result.get('id') == result.get('crn')
assert result.get('name') == self.bindingNames['name']
assert result.get('account_id') == self.testAccountId
assert result.get('resource_group_id') == self.testResourceGroupGuid
assert result.get('source_crn') == self.testAliasCrn
assert result.get('target_crn') == self.bindTargetCrn
assert result.get('state') == "active"
self.__class__.testBindingCrn = result.get('id')
self.__class__.testBindingGuid = result.get('guid')
assert self.testBindingCrn != ''
assert self.testBindingGuid != ''
def test_13_get_resource_binding(self):
customHeaders = {}
customHeaders["Transaction-Id"] = "rc-sdk-python-test13-" + \
self.transactionId
response = self.service.get_resource_binding(
self.testBindingGuid, headers=customHeaders)
assert response is not None
assert response.get_status_code() == 200
result = response.get_result()
assert result is not None
assert result.get('id') == self.testBindingCrn
assert result.get('guid') == self.testBindingGuid
assert result.get('crn') == self.testBindingCrn
assert result.get('name') == self.bindingNames['name']
assert result.get('account_id') == self.testAccountId
assert result.get('resource_group_id') == self.testResourceGroupGuid
assert result.get('source_crn') == self.testAliasCrn
assert result.get('target_crn') == self.bindTargetCrn
assert result.get('state') == "active"
def test_14_update_resource_binding(self):
customHeaders = {}
customHeaders["Transaction-Id"] = "rc-sdk-python-test14-" + \
self.transactionId
response = self.service.update_resource_binding(
self.testBindingGuid,
self.bindingNames['update'],
headers=customHeaders)
assert response is not None
assert response.get_status_code() == 200
result = response.get_result()
assert result is not None
assert result.get('id') == self.testBindingCrn
assert result.get('name') == self.bindingNames['update']
assert result.get('state') == "active"
def test_15_list_resource_bindings_no_filter(self):
customHeaders = {}
customHeaders["Transaction-Id"] = "rc-sdk-python-test15-" + \
self.transactionId
start = None
while True:
response = self.service.list_resource_bindings(
limit=results_per_page,
start=start,
headers=customHeaders)
assert response is not None
assert response.get_status_code() == 200
result = response.get_result()
assert result is not None
assert len(result.get('resources')) >= 1
assert len(result.get('resources')) <= results_per_page
assert result.get('rows_count') >= 1
assert result.get('rows_count') <= results_per_page
start = get_query_param(result.get('next_url'), 'start')
if start is None:
break
def test_16_list_resource_bindings_by_guid(self):
customHeaders = {}
customHeaders["Transaction-Id"] = "rc-sdk-python-test16-" + \
self.transactionId
response = self.service.list_resource_bindings(
guid=self.testBindingGuid, headers=customHeaders)
assert response is not None
assert response.get_status_code() == 200
result = response.get_result()
assert result is not None
assert len(result.get('resources')) == 1
assert result.get('rows_count') == 1
binding = result.get('resources')[0]
assert binding.get('id') == self.testBindingCrn
assert binding.get('guid') == self.testBindingGuid
assert binding.get('name') == self.bindingNames['update']
assert binding.get('resource_group_id') == self.testResourceGroupGuid
assert binding.get('source_crn') == self.testAliasCrn
assert binding.get('target_crn') == self.bindTargetCrn
assert binding.get('state') == "active"
def test_17_list_resource_bindings_by_name(self):
customHeaders = {}
customHeaders["Transaction-Id"] = "rc-sdk-python-test17-" + \
self.transactionId
response = self.service.list_resource_bindings(
name=self.bindingNames['update'],
headers=customHeaders)
assert response is not None
assert response.get_status_code() == 200
result = response.get_result()
assert result is not None
assert len(result.get('resources')) == 1
assert result.get('rows_count') == 1
def test_17a_list_resource_bindings_for_alias(self):
assert self.testAliasGuid is not None
start = None
while True:
response = self.service.list_resource_bindings_for_alias(
id=self.testAliasGuid,
limit=results_per_page,
start=start)
assert response is not None
assert response.get_status_code() == 200
result = response.get_result()
assert result is not None
assert len(result.get('resources')) == 1
assert result.get('rows_count') == 1
start = get_query_param(result.get('next_url'), 'start')
if start is None:
break
def test_18_create_resource_key_for_instance(self):
customHeaders = {}
customHeaders["Transaction-Id"] = "rc-sdk-python-test18-" + \
self.transactionId
parameters = {
'parameter1': 'value1',
'parameter2': 'value2'
}
response = self.service.create_resource_key(
name=self.keyNames['name'],
source=self.testInstanceGuid,
parameters=parameters,
headers=customHeaders)
assert response is not None
assert response.get_status_code() == 201
result = response.get_result()
assert result is not None
assert result.get('id') is not None
assert result.get('guid') is not None
assert result.get('crn') is not None
assert result.get('id') == result.get('crn')
assert result.get('name') == self.keyNames['name']
assert result.get('account_id') == self.testAccountId
assert result.get('resource_group_id') == self.testResourceGroupGuid
assert result.get('source_crn') == self.testInstanceCrn
assert result.get('state') == "active"
self.__class__.testInstanceKeyCrn = result.get('id')
self.__class__.testInstanceKeyGuid = result.get('guid')
assert self.testInstanceKeyCrn != ''
assert self.testInstanceKeyGuid != ''
def test_19_get_resource_key(self):
customHeaders = {}
customHeaders["Transaction-Id"] = "rc-sdk-python-test19-" + \
self.transactionId
response = self.service.get_resource_key(
self.testInstanceKeyGuid, headers=customHeaders)
assert response is not None
assert response.get_status_code() == 200
result = response.get_result()
assert result is not None
assert result.get('id') == self.testInstanceKeyCrn
assert result.get('guid') == self.testInstanceKeyGuid
assert result.get('crn') == self.testInstanceKeyCrn
assert result.get('name') == self.keyNames['name']
assert result.get('account_id') == self.testAccountId
assert result.get('resource_group_id') == self.testResourceGroupGuid
assert result.get('source_crn') == self.testInstanceCrn
assert result.get('state') == "active"
def test_20_update_resource_key(self):
customHeaders = {}
customHeaders["Transaction-Id"] = "rc-sdk-python-test20-" + \
self.transactionId
response = self.service.update_resource_key(
self.testInstanceKeyGuid, self.keyNames['update'], headers=customHeaders)
assert response is not None
assert response.get_status_code() == 200
result = response.get_result()
assert result is not None
assert result.get('id') == self.testInstanceKeyCrn
assert result.get('name') == self.keyNames['update']
assert result.get('state') == "active"
def test_21_list_resource_keys_no_filter(self):
customHeaders = {}
customHeaders["Transaction-Id"] = "rc-sdk-python-test21-" + \
self.transactionId
start = None
result_count = 0
while True:
response = self.service.list_resource_keys(
limit=results_per_page,
start=start,
headers=customHeaders)
assert response is not None
assert response.get_status_code() == 200
result = response.get_result()
assert result is not None
result_count+= result.get('rows_count')
start = get_query_param(result.get('next_url'), 'start')
if start is None:
break
assert result_count > 0
def test_22_list_resource_keys_by_guid(self):
customHeaders = {}
customHeaders["Transaction-Id"] = "rc-sdk-python-test22-" + \
self.transactionId
response = self.service.list_resource_keys(
guid=self.testInstanceKeyGuid, headers=customHeaders)
assert response is not None
assert response.get_status_code() == 200
result = response.get_result()
assert result is not None
assert len(result.get('resources')) == 1
assert result.get('rows_count') == 1
key = result.get('resources')[0]
assert key.get('id') == self.testInstanceKeyCrn
assert key.get('guid') == self.testInstanceKeyGuid
assert key.get('name') == self.keyNames['update']
assert key.get('resource_group_id') == self.testResourceGroupGuid
assert key.get('source_crn') == self.testInstanceCrn
assert key.get('state') == "active"
def test_23_list_resource_keys_by_name(self):
customHeaders = {}
customHeaders["Transaction-Id"] = "rc-sdk-python-test23-" + \
self.transactionId
response = self.service.list_resource_keys(
name=self.keyNames['update'], headers=customHeaders)
assert response is not None
assert response.get_status_code() == 200
result = response.get_result()
assert result is not None
assert len(result.get('resources')) == 1
assert result.get('rows_count') == 1
def test_23a_list_resource_keys_for_instance(self):
assert self.testInstanceGuid is not None
start = None
while True:
response = self.service.list_resource_keys_for_instance(
id=self.testInstanceGuid,
limit=results_per_page,
start=start)
assert response is not None
assert response.get_status_code() == 200
result = response.get_result()
assert result is not None
assert len(result.get('resources')) == 1
assert result.get('rows_count') == 1
start = get_query_param(result.get('next_url'), 'start')
if start is None:
break
def test_24_create_resource_key_for_alias(self):
customHeaders = {}
customHeaders["Transaction-Id"] = "rc-sdk-python-test24-" + \
self.transactionId
response = self.service.create_resource_key(
self.keyNames['name2'],
self.testAliasGuid,
headers=customHeaders)
assert response is not None
assert response.get_status_code() == 201
result = response.get_result()
assert result is not None
assert result.get('id') is not None
assert result.get('guid') is not None
assert result.get('crn') is not None
assert result.get('id') == result.get('crn')
assert result.get('name') == self.keyNames['name2']
assert result.get('account_id') == self.testAccountId
assert result.get('resource_group_id') == self.testResourceGroupGuid
assert result.get('source_crn') == self.testAliasCrn
assert result.get('state') == "active"
self.__class__.testAliasKeyCrn = result.get('id')
self.__class__.testAliasKeyGuid = result.get('guid')
assert self.testAliasKeyCrn != ''
assert self.testAliasKeyCrn != ''
def test_25_get_resource_key(self):
customHeaders = {}
customHeaders["Transaction-Id"] = "rc-sdk-python-test25-" + \
self.transactionId
response = self.service.get_resource_key(
self.testAliasKeyGuid, headers=customHeaders)
assert response is not None
assert response.get_status_code() == 200
result = response.get_result()
assert result is not None
assert result.get('id') == self.testAliasKeyCrn
assert result.get('guid') == self.testAliasKeyGuid
assert result.get('crn') == self.testAliasKeyCrn
assert result.get('name') == self.keyNames['name2']
assert result.get('account_id') == self.testAccountId
assert result.get('resource_group_id') == self.testResourceGroupGuid
assert result.get('source_crn') == self.testAliasCrn
assert result.get('state') == "active"
def test_26_update_resource_key(self):
customHeaders = {}
customHeaders["Transaction-Id"] = "rc-sdk-python-test26-" + \
self.transactionId
response = self.service.update_resource_key(
self.testAliasKeyGuid, self.keyNames['update2'], headers=customHeaders)
assert response is not None
assert response.get_status_code() == 200
result = response.get_result()
assert result is not None
assert result.get('id') == self.testAliasKeyCrn
assert result.get('name') == self.keyNames['update2']
assert result.get('state') == "active"
def test_27_list_resource_keys_no_filter(self):
customHeaders = {}
customHeaders["Transaction-Id"] = "rc-sdk-python-test27-" + \
self.transactionId
response = self.service.list_resource_keys(headers=customHeaders)
assert response is not None
assert response.get_status_code() == 200
result = response.get_result()
assert result is not None
assert len(result.get('resources')) >= 1
assert result.get('rows_count') >= 1
def test_28_list_resource_keys_by_guid(self):
customHeaders = {}
customHeaders["Transaction-Id"] = "rc-sdk-python-test28-" + \
self.transactionId
response = self.service.list_resource_keys(
guid=self.testAliasKeyGuid, headers=customHeaders)
assert response is not None
assert response.get_status_code() == 200
result = response.get_result()
assert result is not None
assert len(result.get('resources')) == 1
assert result.get('rows_count') == 1
key = result.get('resources')[0]
assert key.get('id') == self.testAliasKeyCrn
assert key.get('guid') == self.testAliasKeyGuid
assert key.get('name') == self.keyNames['update2']
assert key.get('resource_group_id') == self.testResourceGroupGuid
assert key.get('source_crn') == self.testAliasCrn
assert key.get('state') == "active"
def test_29_list_resource_keys_by_name(self):
customHeaders = {}
customHeaders["Transaction-Id"] = "rc-sdk-python-test29-" + \
self.transactionId
response = self.service.list_resource_keys(
name=self.keyNames['update2'], headers=customHeaders)
assert response is not None
assert response.get_status_code() == 200
result = response.get_result()
assert result is not None
assert len(result.get('resources')) == 1
assert result.get('rows_count') == 1
def test_30_delete_resource_alias_fail(self):
customHeaders = {}
customHeaders["Transaction-Id"] = "rc-sdk-python-test30-" + \
self.transactionId
with pytest.raises(ApiException) as e:
response = self.service.delete_resource_alias(
self.testAliasGuid,
headers=customHeaders)
assert response is not None
assert response.get_status_code() == 400
def test_31_delete_resource_instance_fail(self):
customHeaders = {}
customHeaders["Transaction-Id"] = "rc-sdk-python-test31-" + \
self.transactionId
with pytest.raises(ApiException) as e:
response = self.service.delete_resource_instance(
self.testInstanceGuid,
headers=customHeaders)
assert response is not None
assert response.get_status_code() == 400
def test_32_delete_resource_binding(self):
customHeaders = {}
customHeaders["Transaction-Id"] = "rc-sdk-python-test32-" + \
self.transactionId
response = self.service.delete_resource_binding(
self.testBindingGuid,
headers=customHeaders)
assert response is not None
assert response.get_status_code() == 204
def test_33_verify_resource_binding_was_deleted(self):
customHeaders = {}
customHeaders["Transaction-Id"] = "rc-sdk-python-test33-" + \
self.transactionId
response = self.service.get_resource_binding(
self.testBindingGuid,
headers=customHeaders)
assert response is not None
assert response.get_status_code() == 200
result = response.get_result()
assert result is not None
assert result.get('id') == self.testBindingCrn
assert result.get('state') == "removed"
def test_34_delete_resource_keys(self):
customHeaders = {}
customHeaders["Transaction-Id"] = "rc-sdk-python-test34-" + \
self.transactionId
response = self.service.delete_resource_key(
self.testInstanceKeyGuid, headers=customHeaders)
assert response is not None
assert response.get_status_code() == 204
customHeaders2 = {}
customHeaders2["Transaction-Id"] = "rc-sdk-python-test34-" + \
self.transactionId
response2 = self.service.delete_resource_key(
self.testAliasKeyGuid, headers=customHeaders2)
assert response2 is not None
assert response2.get_status_code() == 204
def test_35_verify_resource_keys_were_deleted(self):
customHeaders = {}
customHeaders["Transaction-Id"] = "rc-sdk-python-test35-" + \
self.transactionId
response = self.service.get_resource_key(
self.testInstanceKeyGuid,
headers=customHeaders)
assert response is not None
assert response.get_status_code() == 200
result = response.get_result()
assert result is not None
assert result.get('id') == self.testInstanceKeyCrn
assert result.get('state') == "removed"
customHeaders2 = {}
customHeaders2["Transaction-Id"] = "rc-sdk-python-test35-" + \
self.transactionId
response2 = self.service.get_resource_key(
self.testAliasKeyGuid, headers=customHeaders2)
assert response2 is not None
assert response2.get_status_code() == 200
result2 = response2.get_result()
assert result2 is not None
assert result2.get('id') == self.testAliasKeyCrn
assert result2.get('state') == "removed"
def test_36_delete_resource_alias(self):
customHeaders = {}
customHeaders["Transaction-Id"] = "rc-sdk-python-test36-" + \
self.transactionId
response = self.service.delete_resource_alias(
self.testAliasGuid, headers=customHeaders)
assert response is not None
assert response.get_status_code() == 204
def test_37_verify_resource_alias_was_deleted(self):
customHeaders = {}
customHeaders["Transaction-Id"] = "rc-sdk-python-test37-" + \
self.transactionId
response = self.service.get_resource_alias(
self.testAliasGuid, headers=customHeaders)
assert response is not None
assert response.get_status_code() == 200
result = response.get_result()
assert result is not None
assert result.get('id') == self.testAliasCrn
assert result.get('state') == "removed"
def test_38_lock_resource_instance(self):
customHeaders = {}
customHeaders["Transaction-Id"] = "rc-sdk-python-test38-" + \
self.transactionId
response = self.service.lock_resource_instance(
self.testInstanceGuid, headers=customHeaders)
assert response is not None
assert response.get_status_code() == 200
result = response.get_result()
assert result is not None
assert result.get('id') == self.testInstanceCrn
assert result.get('locked')
assert result.get('last_operation').get('type') == "lock"
assert not result.get('last_operation').get('async')
assert result.get('last_operation').get('state') == "succeeded"
def test_39_update_locked_resource_instance_fail(self):
customHeaders = {}
customHeaders["Transaction-Id"] = "rc-sdk-python-test39-" + \
self.transactionId
with pytest.raises(ApiException) as e:
response = self.service.update_resource_instance(
self.testInstanceGuid,
name=self.lockedInstanceNameUpdate,
headers=customHeaders
)
assert response is not None
assert response.get_status_code() == 400
def test_40_delete__locked_resource_instance_fail(self):
customHeaders = {}
customHeaders["Transaction-Id"] = "rc-sdk-python-test40-" + \
self.transactionId
with pytest.raises(ApiException) as e:
response = self.service.delete_resource_instance(
self.testInstanceGuid, headers=customHeaders)
assert response is not None
assert response.get_status_code() == 400
def test_41_unlock_resource_instance(self):
customHeaders = {}
customHeaders["Transaction-Id"] = "rc-sdk-python-test41-" + \
self.transactionId
response = self.service.unlock_resource_instance(
self.testInstanceGuid, headers=customHeaders)
assert response is not None
assert response.get_status_code() == 200
result = response.get_result()
assert result is not None
assert result.get('id') == self.testInstanceCrn
assert not result.get('locked')
assert result.get('last_operation').get('type') == "unlock"
assert not result.get('last_operation').get('async')
assert result.get('last_operation').get('state') == "succeeded"
def test_42_delete_resource_instance(self):
customHeaders = {}
customHeaders["Transaction-Id"] = "rc-sdk-python-test42-" + \
self.transactionId
response = self.service.delete_resource_instance(
id=self.testInstanceGuid,
recursive=False,
headers=customHeaders)
assert response is not None
assert response.get_status_code() == 204
def test_43_verify_resource_instance_was_deleted(self):
customHeaders = {}
customHeaders["Transaction-Id"] = "rc-sdk-python-test43-" + \
self.transactionId
response = self.service.get_resource_instance(
self.testInstanceGuid, headers=customHeaders)
assert response is not None
assert response.get_status_code() == 200
result = response.get_result()
assert result is not None
assert result.get('id') == self.testInstanceCrn
assert result.get('state') == "removed"
def test_44_create_resource_instance_for_reclamation_enabled_plan(self):
customHeaders = {}
customHeaders["Transaction-Id"] = "rc-sdk-python-test44-" + \
self.transactionId
response = self.service.create_resource_instance(
self.reclaimInstanceName,
self.testRegionId2,
self.testResourceGroupGuid,
self.testPlanId2,
headers=customHeaders
)
assert response is not None
assert response.get_status_code() == 201
result = response.get_result()
assert result is not None
assert result.get('id') is not None
assert result.get('guid') is not None
assert result.get('crn') is not None
assert result.get('id') == result.get('crn')
assert result.get('name') == self.reclaimInstanceName
assert result.get('account_id') == self.testAccountId
assert result.get('resource_group_id') == self.testResourceGroupGuid
assert result.get('resource_plan_id') == self.testPlanId2
assert result.get('state') == "active"
assert not result.get('locked')
assert result.get('last_operation').get('type') == "create"
assert not result.get('last_operation').get('async')
assert result.get('last_operation').get('state') == "succeeded"
self.__class__.testReclaimInstanceCrn = result.get('id')
self.__class__.testReclaimInstanceGuid = result.get('guid')
assert self.testReclaimInstanceCrn != ''
assert self.testReclaimInstanceGuid != ''
def test_45_schedule_resource_instance_for_reclamation(self):
customHeaders = {}
customHeaders["Transaction-Id"] = "rc-sdk-python-test45-" + \
self.transactionId
response = self.service.delete_resource_instance(
self.testReclaimInstanceGuid, headers=customHeaders)
assert response is not None
assert response.get_status_code() == 204
time.sleep(20)
# Commented because redis timeouts cause intermittent failure
# def test_46_verify_resource_instance_is_pending_reclamation(self):
# customHeaders = {}
# customHeaders["Transaction-Id"] = "rc-sdk-python-test46-" + self.transactionId
# response = self.service.get_resource_instance(self.testReclaimInstanceGuid, headers=customHeaders)
# assert response is not None
# assert response.get_status_code() == 200
# result = response.get_result()
# assert result is not None
# assert result.get('id') == self.testReclaimInstanceCrn
# assert result.get('state') == "pending_reclamation"
# assert result.get('last_operation').get('type') == "reclamation"
# assert result.get('last_operation').get('sub_type') == "pending"
# assert not result.get('last_operation').get('async')
# assert result.get('last_operation').get('state') == "succeeded"
def test_47_list_reclamation_for_account_id(self):
customHeaders = {}
customHeaders["Transaction-Id"] = "rc-sdk-python-test47-" + \
self.transactionId
response = self.service.list_reclamations(
# account_id=self.testAccountId,
# checking reclamations with instance guid for more test reliability
resource_instance_id=self.testReclaimInstanceGuid,
headers=customHeaders
)
assert response is not None
assert response.get_status_code() == 200
result = response.get_result()
assert len(result.get('resources')) >= 1
foundReclamation = False
for res in result.get('resources'):
if res.get('resource_instance_id') == self.testReclaimInstanceGuid:
assert res.get(
'resource_instance_id') == self.testReclaimInstanceGuid
assert res.get('account_id') == self.testAccountId
assert res.get(
'resource_group_id') == self.testResourceGroupGuid
assert res.get('state') == 'SCHEDULED'
foundReclamation = True
self.__class__.testReclamationId1 = res.get('id')
assert foundReclamation
assert self.testReclamationId1 != ''
def test_48_restore_resource_instance(self):
customHeaders = {}
customHeaders["Transaction-Id"] = "rc-sdk-python-test48-" + \
self.transactionId
response = self.service.run_reclamation_action(
self.testReclamationId1, 'restore', headers=customHeaders)
assert response is not None
assert response.get_status_code() == 200
result = response.get_result()
assert result.get('id') == self.testReclamationId1
assert result.get(
'resource_instance_id') == self.testReclaimInstanceGuid
assert result.get('account_id') == self.testAccountId
assert result.get('resource_group_id') == self.testResourceGroupGuid
assert result.get('state') == 'RESTORING'
time.sleep(20)
# Commented because redis timeouts cause intermittent failure
# def test_49_verify_resource_instance_is_restored(self):
# customHeaders = {}
# customHeaders["Transaction-Id"] = "rc-sdk-python-test49-" + self.transactionId
# response = self.service.get_resource_instance(self.testReclaimInstanceGuid, headers=customHeaders)
# assert response is not None
# assert response.get_status_code() == 200
# result = response.get_result()
# assert result is not None
# assert result.get('id') == self.testReclaimInstanceCrn
# assert result.get('state') == "active"
# assert result.get('last_operation').get('type') == "reclamation"
# assert result.get('last_operation').get('sub_type') == "restore"
# assert not result.get('last_operation').get('async')
# assert result.get('last_operation').get('state') == "succeeded"
def test_50_schedule_resource_instance_for_reclamation2(self):
customHeaders = {}
customHeaders["Transaction-Id"] = "rc-sdk-python-test50-" + \
self.transactionId
response = self.service.delete_resource_instance(
self.testReclaimInstanceGuid, headers=customHeaders)
assert response is not None
assert response.get_status_code() == 204
time.sleep(20)
def test_51_list_reclamation_for_account_id_and_resource_instance_id(self):
customHeaders = {}
customHeaders["Transaction-Id"] = "rc-sdk-python-test51-" + \
self.transactionId
response = self.service.list_reclamations(
account_id=self.testAccountId,
resource_instance_id=self.testReclaimInstanceGuid,
headers=customHeaders
)
assert response is not None
assert response.get_status_code() == 200
result = response.get_result()
assert len(result.get('resources')) == 1
res = result.get('resources')[0]
assert res.get('resource_instance_id') == self.testReclaimInstanceGuid
assert res.get('account_id') == self.testAccountId
assert res.get('resource_group_id') == self.testResourceGroupGuid
assert res.get('state') == 'SCHEDULED'
self.__class__.testReclamationId2 = res.get('id')
assert self.testReclamationId2 != ''
def test_52_reclaim_resource_instance(self):
customHeaders = {}
customHeaders["Transaction-Id"] = "rc-sdk-python-test52-" + \
self.transactionId
response = self.service.run_reclamation_action(
self.testReclamationId2, 'reclaim', headers=customHeaders)
assert response is not None
assert response.get_status_code() == 200
result = response.get_result()
assert result.get('id') == self.testReclamationId2
assert result.get(
'resource_instance_id') == self.testReclaimInstanceGuid
assert result.get('account_id') == self.testAccountId
assert result.get('resource_group_id') == self.testResourceGroupGuid
assert result.get('state') == 'RECLAIMING'
time.sleep(20)
# Commented because redis timeouts cause intermittent failure
# def test_53_verify_resource_instance_is_reclaimed(self):
# customHeaders = {}
# customHeaders["Transaction-Id"] = "rc-sdk-python-test53-" + self.transactionId
# response = self.service.get_resource_instance(self.testReclaimInstanceGuid, headers=customHeaders)
# assert response is not None
# assert response.get_status_code() == 200
# result = response.get_result()
# assert result is not None
# assert result.get('id') == self.testReclaimInstanceCrn
# assert result.get('state') == "removed"
# assert result.get('last_operation').get('type') == "reclamation"
# assert result.get('last_operation').get('sub_type') == "delete"
# assert not result.get('last_operation').get('async')
# assert result.get('last_operation').get('state') == "succeeded"
@classmethod
def cleanupResources(cls):
if cls.testInstanceKeyGuid != '':
try:
customHeaders = {}
customHeaders["Transaction-Id"] = "rc-sdk-python-cleanup-" + \
cls.transactionId
cls.service.delete_resource_key(
cls.testInstanceKeyGuid, headers=customHeaders)
print('\nSuccessfully cleaned up key ' +
cls.testInstanceKeyGuid + '.')
except ApiException as errResponse:
if errResponse.code == 410:
print('\nKey ' + cls.testInstanceKeyGuid +
' was already deleted by the tests.')
else:
print('\nFailed to clean up key ' +
cls.testInstanceKeyGuid + '. Error: ' + errResponse.message)
else:
print('\nKey was not created. No cleanup needed.')
if cls.testAliasKeyGuid != '':
try:
customHeaders = {}
customHeaders["Transaction-Id"] = "rc-sdk-python-cleanup-" + \
cls.transactionId
cls.service.delete_resource_key(
cls.testAliasKeyGuid, headers=customHeaders)
print('\nSuccessfully cleaned up key ' +
cls.testAliasKeyGuid + '.')
except ApiException as errResponse:
if errResponse.code == 410:
print('\nKey ' + cls.testAliasKeyGuid +
' was already deleted by the tests.')
else:
print('\nFailed to clean up key ' +
cls.testAliasKeyGuid + '. Error: ' + errResponse.message)
else:
print('\nKey was not created. No cleanup needed.')
if cls.testBindingGuid != '':
try:
customHeaders = {}
customHeaders["Transaction-Id"] = "rc-sdk-python-cleanup-" + \
cls.transactionId
cls.service.delete_resource_binding(
cls.testBindingGuid, headers=customHeaders)
print('\nSuccessfully cleaned up binding ' +
cls.testBindingGuid + '.')
except ApiException as errResponse:
if errResponse.code == 410:
print('\nBinding ' + cls.testBindingGuid +
' was already deleted by the tests.')
else:
print('\nFailed to clean up binding ' +
cls.testBindingGuid + '. Error: ' + errResponse.message)
else:
print('\nBinding was not created. No cleanup needed.')
if cls.testAliasGuid != '':
try:
customHeaders = {}
customHeaders["Transaction-Id"] = "rc-sdk-python-cleanup-" + \
cls.transactionId
cls.service.delete_resource_alias(
cls.testAliasGuid, headers=customHeaders)
print('\nSuccessfully cleaned up alias ' +
cls.testAliasGuid + '.')
except ApiException as errResponse:
if errResponse.code == 410:
print('\nAlias ' + cls.testAliasGuid +
' was already deleted by the tests.')
else:
print('\nFailed to clean up alias ' +
cls.testAliasGuid + '. Error: ' + errResponse.message)
else:
print('\nAlias was not created. No cleanup needed.')
if cls.testInstanceGuid != '':
cls.cleanupInstance()
else:
print('\nInstance was not created. No cleanup needed.')
@classmethod
def cleanupByName(cls):
# Resource Keys
for resourceKeyName in cls.keyNames.values():
try:
customHeaders = {}
customHeaders["Transaction-Id"] = "rc-sdk-python-cleanup-by-name-" + \
cls.transactionId
response = cls.service.list_resource_keys(
name=resourceKeyName, headers=customHeaders)
except ApiException as errResponse:
print('\nFailed to retrieve key with name' + resourceKeyName +
' for cleanup. Error: ' + errResponse.message)
else:
resources = response.get_result().get('resources')
if (len(resources) > 0):
for res in resources:
keyGuid = res.get('guid')
try:
customHeaders = {}
customHeaders["Transaction-Id"] = "rc-sdk-python-cleanup-by-name-" + \
cls.transactionId
cls.service.delete_resource_key(
keyGuid, headers=customHeaders)
print('\nSuccessfully cleaned up key ' + keyGuid + '.')
except ApiException as errResponse:
if errResponse.code == 410:
print('\nKey ' + keyGuid +
' was already deleted by the tests.')
else:
print('\nFailed to clean up key ' +
keyGuid + '. Error: ' + errResponse.message)
else:
print('\nNo keys found for name ' + resourceKeyName)
# Resource Instances
for resourceInstanceName in cls.instanceNames.values():
try:
customHeaders = {}
customHeaders["Transaction-Id"] = "rc-sdk-python-cleanup-by-name-" + \
cls.transactionId
response = cls.service.list_resource_instances(
name=resourceInstanceName, headers=customHeaders)
except ApiException as errResponse:
print('\nFailed to retrieve instance with name' +
resourceInstanceName + ' for cleanup. Error: ' + errResponse.message)
else:
resources = response.get_result().get('resources')
if (len(resources) > 0):
for res in resources:
instanceGuid = res.get('guid')
# unlock instance if it is locked
if res.get('state') == "active" and res.get('locked'):
try:
customHeaders = {}
customHeaders["Transaction-Id"] = "rc-sdk-python-cleanup-by-name" + \
cls.transactionId
cls.service.unlock_resource_instance(
instanceGuid, headers=customHeaders)
print('\nSuccessfully unlocked instance ' +
instanceGuid + ' for cleanup.')
except ApiException as errResponse:
print('\nFailed to unlock instance ' + instanceGuid +
' for cleanup. Error: ' + errResponse.message)
try:
customHeaders = {}
customHeaders["Transaction-Id"] = "rc-sdk-python-cleanup-by-name-" + \
cls.transactionId
cls.service.delete_resource_instance(
instanceGuid, headers=customHeaders)
print('\nSuccessfully cleaned up instance ' +
instanceGuid + '.')
except ApiException as errResponse:
if errResponse.code == 410:
print('\nInstance ' + instanceGuid +
' was already deleted by the tests.')
else:
print('\nFailed to clean up instance ' +
instanceGuid + '. Error: ' + errResponse.message)
else:
print('\nNo instances found for name ' + resourceInstanceName)
# Resource Bindings
for resourceBindingName in cls.bindingNames.values():
try:
customHeaders = {}
customHeaders["Transaction-Id"] = "rc-sdk-python-cleanup-by-name-" + \
cls.transactionId
response = cls.service.list_resource_bindings(
name=resourceBindingName, headers=customHeaders)
except ApiException as errResponse:
print('\nFailed to retrieve binding with name' +
resourceBindingName + ' for cleanup. Error: ' + errResponse.message)
else:
resources = response.get_result().get('resources')
if (len(resources) > 0):
for res in resources:
bindingGuid = res.get('guid')
try:
customHeaders = {}
customHeaders["Transaction-Id"] = "rc-sdk-python-cleanup-by-name-" + \
cls.transactionId
cls.service.delete_resource_key(
bindingGuid, headers=customHeaders)
print('\nSuccessfully cleaned up binding ' +
bindingGuid + '.')
except ApiException as errResponse:
if errResponse.code == 410:
print('\nBinding ' + bindingGuid +
' was already deleted by the tests.')
else:
print('\nFailed to clean up binding ' +
bindingGuid + '. Error: ' + errResponse.message)
else:
print('\nNo bindings found for name ' + resourceBindingName)
# Resource Aliases
for resourceAliasName in cls.aliasNames.values():
try:
customHeaders = {}
customHeaders["Transaction-Id"] = "rc-sdk-python-cleanup-by-name-" + \
cls.transactionId
response = cls.service.list_resource_aliases(
name=resourceAliasName, headers=customHeaders)
except ApiException as errResponse:
print('\nFailed to retrieve alias with name' +
resourceAliasName + ' for cleanup. Error: ' + errResponse.message)
else:
resources = response.get_result().get('resources')
if (len(resources) > 0):
for res in resources:
aliasGuid = res.get('guid')
try:
customHeaders = {}
customHeaders["Transaction-Id"] = "rc-sdk-python-cleanup-by-name-" + \
cls.transactionId
cls.service.delete_resource_alias(
aliasGuid, headers=customHeaders)
print('\nSuccessfully cleaned up alias ' +
aliasGuid + '.')
except ApiException as errResponse:
if errResponse.code == 410:
print('\nAlias ' + aliasGuid +
' was already deleted by the tests.')
else:
print('\nFailed to clean up alias ' +
aliasGuid + '. Error: ' + errResponse.message)
else:
print('\nNo aliases found for name ' + resourceAliasName)
@classmethod
def cleanupInstance(cls):
try:
customHeaders = {}
customHeaders["Transaction-Id"] = "rc-sdk-python-cleanup-" + \
cls.transactionId
response = cls.service.get_resource_instance(
cls.testInstanceGuid, headers=customHeaders)
except ApiException as errResponse:
print('\nFailed to retrieve instance ' + cls.testInstanceGuid +
' for cleanup. Error: ' + errResponse.message)
else:
if response.get_result().get('state') == "active" and response.get_result().get('locked'):
try:
customHeaders = {}
customHeaders["Transaction-Id"] = "rc-sdk-python-cleanup-" + \
cls.transactionId
cls.service.unlock_resource_instance(
cls.testInstanceGuid, headers=customHeaders)
print('\nSuccessfully unlocked instance ' +
cls.testInstanceGuid + ' for cleanup.')
except ApiException as errResponse:
print('\nFailed to unlock instance ' + cls.testInstanceGuid +
' for cleanup. Error: ' + errResponse.message)
try:
customHeaders = {}
customHeaders["Transaction-Id"] = "rc-sdk-python-cleanup-" + \
cls.transactionId
cls.service.delete_resource_instance(
cls.testInstanceGuid, headers=customHeaders)
print('\nSuccessfully cleaned up instance ' +
cls.testInstanceGuid + '.')
except ApiException as errResponse:
if errResponse.code == 410:
print('\nInstance ' + cls.testInstanceGuid +
' was already deleted by the tests.')
else:
print('\nFailed to clean up instance ' +
cls.testInstanceGuid + '. Error: ' + errResponse.message)
@classmethod
def cleanupReclamationInstance(cls):
if cls.testReclaimInstanceGuid != '':
try:
customHeaders = {}
customHeaders["Transaction-Id"] = "rc-sdk-python-cleanup-" + \
cls.transactionId
response = cls.service.get_resource_instance(
cls.testReclaimInstanceGuid, headers=customHeaders)
except ApiException as errResponse:
print('\nFailed to retrieve instance ' + cls.testReclaimInstanceGuid +
' for cleanup. Error: ' + errResponse.message)
else:
if response.get_result().get('state') == "removed":
print('\nInstance ' + cls.testReclaimInstanceGuid +
' was already reclaimed by the tests.')
elif response.get_result().get('state') == "pending_reclamation":
cls.cleanupInstancePendingReclamation()
else:
try:
customHeaders = {}
customHeaders["Transaction-Id"] = "rc-sdk-python-cleanup-" + \
cls.transactionId
cls.service.delete_resource_instance(
cls.testReclaimInstanceGuid, headers=customHeaders)
print('\nSuccessfully scheduled instance ' +
cls.testReclaimInstanceGuid + ' for reclamation.')
except ApiException as errResponse:
print('\nFailed to schedule instance ' + cls.testReclaimInstanceGuid +
' for reclamation. Error: ' + errResponse.message)
else:
time.sleep(20)
cls.cleanupInstancePendingReclamation()
else:
print('\nReclamation instance was not created. No cleanup needed.')
@classmethod
def cleanupInstancePendingReclamation(cls):
try:
customHeaders = {}
customHeaders["Transaction-Id"] = "rc-sdk-python-cleanup-" + \
cls.transactionId
response = cls.service.list_reclamations(
account_id=cls.testAccountId,
resource_instance_id=cls.testReclaimInstanceGuid,
headers=customHeaders
)
except ApiException as errResponse:
print('\nFailed to retrieve reclamation to process to reclaim instance ' +
cls.testReclaimInstanceGuid + ' for cleanup. Error: ' + errResponse.message)
else:
res = response.get_result().get('resources')
if len(res) == 0:
print('\nNo reclamations for instance ' +
cls.testReclaimInstanceGuid + ' were returned.')
else:
reclamationId = res[0].get('id')
try:
customHeaders = {}
customHeaders["Transaction-Id"] = "rc-sdk-python-cleanup-" + \
cls.transactionId
response = cls.service.run_reclamation_action(
reclamationId, 'reclaim', headers=customHeaders)
print('\nSuccessfully reclaimed instance ' +
cls.testReclaimInstanceGuid)
except ApiException as errResponse:
print('\nFailed to process reclamation ' + reclamationId +
' for instance ' + cls.testInstanceGuid + '. Error: ' + errResponse.message)
| 41.517179 | 108 | 0.592825 |
import pytest
import unittest
import os
import os.path
import uuid
import time
from ibm_cloud_sdk_core import *
from ibm_platform_services.resource_controller_v2 import *
configFile = 'resource_controller.env'
results_per_page = 20
class TestResourceControllerV2(unittest.TestCase):
@classmethod
def setUpClass(cls):
if os.path.exists(configFile):
os.environ['IBM_CREDENTIALS_FILE'] = configFile
cls.config = read_external_sources(
ResourceControllerV2.DEFAULT_SERVICE_NAME)
cls.testAccountId = cls.config['ACCOUNT_ID']
cls.testResourceGroupGuid = cls.config['RESOURCE_GROUP']
cls.testOrgGuid = cls.config['ORGANIZATION_GUID']
cls.testSpaceGuid = cls.config['SPACE_GUID']
cls.testAppGuid = cls.config['APPLICATION_GUID']
cls.testPlanId1 = cls.config['PLAN_ID']
cls.testPlanId2 = cls.config['RECLAMATION_PLAN_ID']
else:
raise unittest.SkipTest(
'External configuration not available, skipping...')
cls.service = ResourceControllerV2.new_instance()
assert cls.service is not None
cls.testInstanceCrn = ''
cls.testInstanceGuid = ''
cls.testAliasCrn = ''
cls.testAliasGuid = ''
cls.testBindingCrn = ''
cls.testBindingGuid = ''
cls.testInstanceKeyCrn = ''
cls.testInstanceKeyGuid = ''
cls.testAliasKeyCrn = ''
cls.testAliasKeyGuid = ''
cls.aliasTargetCrn = ''
cls.bindTargetCrn = ''
cls.testReclaimInstanceCrn = ''
cls.testReclaimInstanceGuid = ''
cls.testReclamationId1 = ''
cls.testReclamationId2 = ''
cls.testRegionId1 = 'global'
cls.testRegionId2 = 'global'
cls.reclaimInstanceName = 'RcSdkReclaimInstance1'
cls.lockedInstanceNameUpdate = 'RcSdkLockedInstanceUpdate1'
cls.instanceNames = {'name': 'RcSdkInstance1Python',
'update': 'RcSdkInstanceUpdate1Python'}
cls.keyNames = {'name': 'RcSdkKey1Python', 'update': 'RcSdkKeyUpdate1Python',
'name2': 'RcSdkKey2Python', 'update2': 'RcSdkKeyUpdate2Python'}
cls.bindingNames = {'name': 'RcSdkBinding1Python',
'update': 'RcSdkBindingUpdate1Python'}
cls.aliasNames = {'name': 'RcSdkAlias1Python',
'update': 'RcSdkAliasUpdate1Python'}
cls.transactionId = str(uuid.uuid4())
print('\nTransaction-Id for Test Run: ' + cls.transactionId)
print('\nBegin pre-test clean up by name.')
cls.cleanupByName()
print('\nPre-test cleanup done.')
print('\nSetup complete.')
@classmethod
def tearDownClass(cls):
cls.cleanupResources()
cls.cleanupReclamationInstance()
cls.cleanupByName()
print('\nClean up complete.')
def test_00_create_resource_instance(self):
customHeaders = {}
customHeaders["Transaction-Id"] = "rc-sdk-python-test00-" + \
self.transactionId
response = self.service.create_resource_instance(
self.instanceNames['name'],
self.testRegionId1,
self.testResourceGroupGuid,
self.testPlanId1,
headers=customHeaders
)
assert response is not None
assert response.get_status_code() == 201
result = response.get_result()
assert result is not None
assert result.get('id') is not None
assert result.get('guid') is not None
assert result.get('crn') is not None
assert result.get('id') == result.get('crn')
assert result.get('name') == self.instanceNames['name']
assert result.get('account_id') == self.testAccountId
assert result.get('resource_group_id') == self.testResourceGroupGuid
assert result.get('resource_plan_id') == self.testPlanId1
assert result.get('state') == "active"
assert not result.get('locked')
assert result.get('last_operation').get('type') == "create"
assert not result.get('last_operation').get('async')
assert result.get('last_operation').get('state') == "succeeded"
self.__class__.testInstanceCrn = result.get('id')
self.__class__.testInstanceGuid = result.get('guid')
assert self.testInstanceCrn != ''
assert self.testInstanceGuid != ''
def test_01_get_resource_instance(self):
customHeaders = {}
customHeaders["Transaction-Id"] = "rc-sdk-python-test01-" + \
self.transactionId
response = self.service.get_resource_instance(
self.testInstanceGuid, headers=customHeaders)
assert response is not None
assert response.get_status_code() == 200
result = response.get_result()
assert result is not None
assert result.get('id') == self.testInstanceCrn
assert result.get('guid') == self.testInstanceGuid
assert result.get('crn') == self.testInstanceCrn
assert result.get('name') == self.instanceNames['name']
assert result.get('account_id') == self.testAccountId
assert result.get('resource_group_id') == self.testResourceGroupGuid
assert result.get('resource_plan_id') == self.testPlanId1
assert result.get('state') == "active"
assert not result.get('locked')
assert result.get('last_operation').get('type') == "create"
assert not result.get('last_operation').get('async')
assert result.get('last_operation').get('state') == "succeeded"
def test_02_update_resource_instance(self):
customHeaders = {}
customHeaders["Transaction-Id"] = "rc-sdk-python-test02-" + \
self.transactionId
params = {}
params["hello"] = "bye"
response = self.service.update_resource_instance(
self.testInstanceGuid,
name=self.instanceNames['update'],
parameters=params,
headers=customHeaders
)
assert response is not None
assert response.get_status_code() == 200
result = response.get_result()
assert result is not None
assert result.get('id') == self.testInstanceCrn
assert result.get('name') == self.instanceNames['update']
assert result.get('state') == "active"
assert result.get('last_operation').get('type') == "update"
assert result.get('last_operation').get('sub_type') == "config"
assert not result.get('last_operation').get('async')
assert result.get('last_operation').get('state') == "succeeded"
def test_03_list_resource_instances_no_filter(self):
customHeaders = {}
customHeaders["Transaction-Id"] = "rc-sdk-python-test03-" + \
self.transactionId
start = None
while True:
response = self.service.list_resource_instances(
limit=results_per_page,
start=start,
headers=customHeaders)
assert response is not None
assert response.get_status_code() == 200
result = response.get_result()
assert result is not None
assert len(result.get('resources')) >= 1
assert len(result.get('resources')) <= results_per_page
assert result.get('rows_count') >= 1
assert result.get('rows_count') <= results_per_page
start = get_query_param(result.get('next_url'), 'start')
if start is None:
break
def test_04_list_resource_instances_by_guid(self):
customHeaders = {}
customHeaders["Transaction-Id"] = "rc-sdk-python-test04-" + \
self.transactionId
response = self.service.list_resource_instances(
guid=self.testInstanceGuid, headers=customHeaders)
assert response is not None
assert response.get_status_code() == 200
result = response.get_result()
assert result is not None
assert len(result.get('resources')) == 1
assert result.get('rows_count') == 1
instance = result.get('resources')[0]
assert instance.get('id') == self.testInstanceCrn
assert instance.get('guid') == self.testInstanceGuid
assert instance.get('name') == self.instanceNames['update']
assert instance.get('state') == "active"
assert instance.get('last_operation').get('type') == "update"
assert instance.get('last_operation').get('sub_type') == "config"
assert not instance.get('last_operation').get('async')
assert instance.get('last_operation').get('state') == "succeeded"
def test_05_list_resource_instances_by_name(self):
customHeaders = {}
customHeaders["Transaction-Id"] = "rc-sdk-python-test05-" + \
self.transactionId
response = self.service.list_resource_instances(
name=self.instanceNames['update'],
headers=customHeaders)
assert response is not None
assert response.get_status_code() == 200
result = response.get_result()
assert result is not None
assert len(result.get('resources')) == 1
assert result.get('rows_count') == 1
def test_06_create_resource_alias(self):
customHeaders = {}
customHeaders["Transaction-Id"] = "rc-sdk-python-test06-" + \
self.transactionId
target = "crn:v1:bluemix:public:bluemix:us-south:o/" + \
self.testOrgGuid + "::cf-space:" + self.testSpaceGuid
self.__class__.aliasTargetCrn = "crn:v1:bluemix:public:cf:us-south:o/" + \
self.testOrgGuid + "::cf-space:" + self.testSpaceGuid
assert self.aliasTargetCrn != ''
response = self.service.create_resource_alias(
self.aliasNames['name'],
self.testInstanceGuid,
target,
headers=customHeaders
)
assert response is not None
assert response.get_status_code() == 201
result = response.get_result()
assert result is not None
assert result.get('id') is not None
assert result.get('guid') is not None
assert result.get('crn') is not None
assert result.get('id') == result.get('crn')
assert result.get('name') == self.aliasNames['name']
assert result.get('account_id') == self.testAccountId
assert result.get('resource_group_id') == self.testResourceGroupGuid
assert result.get('target_crn') == self.aliasTargetCrn
assert result.get('state') == "active"
assert result.get('resource_instance_id') == self.testInstanceCrn
self.__class__.testAliasCrn = result.get('id')
self.__class__.testAliasGuid = result.get('guid')
assert self.testAliasCrn != ''
assert self.testAliasGuid != ''
def test_07_get_resource_alias(self):
customHeaders = {}
customHeaders["Transaction-Id"] = "rc-sdk-python-test07-" + \
self.transactionId
response = self.service.get_resource_alias(
self.testAliasGuid, headers=customHeaders)
assert response is not None
assert response.get_status_code() == 200
result = response.get_result()
assert result is not None
assert result.get('id') == self.testAliasCrn
assert result.get('guid') == self.testAliasGuid
assert result.get('crn') == self.testAliasCrn
assert result.get('name') == self.aliasNames['name']
assert result.get('account_id') == self.testAccountId
assert result.get('resource_group_id') == self.testResourceGroupGuid
assert result.get('target_crn') == self.aliasTargetCrn
assert result.get('state') == "active"
assert result.get('resource_instance_id') == self.testInstanceCrn
def test_08_update_resource_alias(self):
customHeaders = {}
customHeaders["Transaction-Id"] = "rc-sdk-python-test08-" + \
self.transactionId
response = self.service.update_resource_alias(
self.testAliasGuid,
name=self.aliasNames['update'],
headers=customHeaders
)
assert response is not None
assert response.get_status_code() == 200
result = response.get_result()
assert result is not None
assert result.get('id') == self.testAliasCrn
assert result.get('name') == self.aliasNames['update']
assert result.get('state') == "active"
def test_09_list_resource_aliases_no_filter(self):
customHeaders = {}
customHeaders["Transaction-Id"] = "rc-sdk-python-test09-" + \
self.transactionId
start = None
while True:
response = self.service.list_resource_aliases(
limit=results_per_page,
start=start,
headers=customHeaders)
assert response is not None
assert response.get_status_code() == 200
result = response.get_result()
assert result is not None
assert len(result.get('resources')) >= 1
assert len(result.get('resources')) <= results_per_page
assert result.get('rows_count') >= 1
assert result.get('rows_count') <= results_per_page
start = get_query_param(result.get('next_url'), 'start')
if start is None:
break
def test_10_list_resource_aliases_by_guid(self):
customHeaders = {}
customHeaders["Transaction-Id"] = "rc-sdk-python-test10-" + \
self.transactionId
response = self.service.list_resource_aliases(
guid=self.testAliasGuid, headers=customHeaders)
assert response is not None
assert response.get_status_code() == 200
result = response.get_result()
assert result is not None
assert len(result.get('resources')) == 1
assert result.get('rows_count') == 1
alias = result.get('resources')[0]
assert alias.get('id') == self.testAliasCrn
assert alias.get('guid') == self.testAliasGuid
assert alias.get('name') == self.aliasNames['update']
assert alias.get('resource_group_id') == self.testResourceGroupGuid
assert alias.get('target_crn') == self.aliasTargetCrn
assert alias.get('state') == "active"
assert alias.get('resource_instance_id') == self.testInstanceCrn
def test_11_list_resource_aliases_by_name(self):
customHeaders = {}
customHeaders["Transaction-Id"] = "rc-sdk-python-test11-" + \
self.transactionId
response = self.service.list_resource_aliases(
name=self.aliasNames['update'],
headers=customHeaders)
assert response is not None
assert response.get_status_code() == 200
result = response.get_result()
assert result is not None
assert len(result.get('resources')) == 1
assert result.get('rows_count') == 1
def test_11a_list_resource_aliases_for_instance(self):
assert self.testInstanceGuid is not None
start = None
while True:
response = self.service.list_resource_aliases_for_instance(
id=self.testInstanceGuid,
limit=results_per_page,
start=start)
assert response is not None
assert response.get_status_code() == 200
result = response.get_result()
assert result is not None
assert len(result.get('resources')) == 1
assert result.get('rows_count') == 1
start = get_query_param(result.get('next_url'), 'start')
if start is None:
break
def test_12_create_resource_binding(self):
customHeaders = {}
customHeaders["Transaction-Id"] = "rc-sdk-python-test12-" + \
self.transactionId
parameters = {
'parameter1': 'value1',
'parameter2': 'value2'
}
target = "crn:v1:staging:public:bluemix:us-south:s/" + \
self.testSpaceGuid + "::cf-application:" + self.testAppGuid
self.__class__.bindTargetCrn = "crn:v1:staging:public:cf:us-south:s/" + \
self.testSpaceGuid + "::cf-application:" + self.testAppGuid
assert self.bindTargetCrn != ''
response = self.service.create_resource_binding(
source=self.testAliasGuid,
target=target,
name=self.bindingNames['name'],
parameters=parameters,
headers=customHeaders
)
assert response is not None
assert response.get_status_code() == 201
result = response.get_result()
assert result is not None
assert result.get('id') is not None
assert result.get('guid') is not None
assert result.get('crn') is not None
assert result.get('id') == result.get('crn')
assert result.get('name') == self.bindingNames['name']
assert result.get('account_id') == self.testAccountId
assert result.get('resource_group_id') == self.testResourceGroupGuid
assert result.get('source_crn') == self.testAliasCrn
assert result.get('target_crn') == self.bindTargetCrn
assert result.get('state') == "active"
self.__class__.testBindingCrn = result.get('id')
self.__class__.testBindingGuid = result.get('guid')
assert self.testBindingCrn != ''
assert self.testBindingGuid != ''
def test_13_get_resource_binding(self):
customHeaders = {}
customHeaders["Transaction-Id"] = "rc-sdk-python-test13-" + \
self.transactionId
response = self.service.get_resource_binding(
self.testBindingGuid, headers=customHeaders)
assert response is not None
assert response.get_status_code() == 200
result = response.get_result()
assert result is not None
assert result.get('id') == self.testBindingCrn
assert result.get('guid') == self.testBindingGuid
assert result.get('crn') == self.testBindingCrn
assert result.get('name') == self.bindingNames['name']
assert result.get('account_id') == self.testAccountId
assert result.get('resource_group_id') == self.testResourceGroupGuid
assert result.get('source_crn') == self.testAliasCrn
assert result.get('target_crn') == self.bindTargetCrn
assert result.get('state') == "active"
def test_14_update_resource_binding(self):
customHeaders = {}
customHeaders["Transaction-Id"] = "rc-sdk-python-test14-" + \
self.transactionId
response = self.service.update_resource_binding(
self.testBindingGuid,
self.bindingNames['update'],
headers=customHeaders)
assert response is not None
assert response.get_status_code() == 200
result = response.get_result()
assert result is not None
assert result.get('id') == self.testBindingCrn
assert result.get('name') == self.bindingNames['update']
assert result.get('state') == "active"
def test_15_list_resource_bindings_no_filter(self):
customHeaders = {}
customHeaders["Transaction-Id"] = "rc-sdk-python-test15-" + \
self.transactionId
start = None
while True:
response = self.service.list_resource_bindings(
limit=results_per_page,
start=start,
headers=customHeaders)
assert response is not None
assert response.get_status_code() == 200
result = response.get_result()
assert result is not None
assert len(result.get('resources')) >= 1
assert len(result.get('resources')) <= results_per_page
assert result.get('rows_count') >= 1
assert result.get('rows_count') <= results_per_page
start = get_query_param(result.get('next_url'), 'start')
if start is None:
break
def test_16_list_resource_bindings_by_guid(self):
customHeaders = {}
customHeaders["Transaction-Id"] = "rc-sdk-python-test16-" + \
self.transactionId
response = self.service.list_resource_bindings(
guid=self.testBindingGuid, headers=customHeaders)
assert response is not None
assert response.get_status_code() == 200
result = response.get_result()
assert result is not None
assert len(result.get('resources')) == 1
assert result.get('rows_count') == 1
binding = result.get('resources')[0]
assert binding.get('id') == self.testBindingCrn
assert binding.get('guid') == self.testBindingGuid
assert binding.get('name') == self.bindingNames['update']
assert binding.get('resource_group_id') == self.testResourceGroupGuid
assert binding.get('source_crn') == self.testAliasCrn
assert binding.get('target_crn') == self.bindTargetCrn
assert binding.get('state') == "active"
def test_17_list_resource_bindings_by_name(self):
customHeaders = {}
customHeaders["Transaction-Id"] = "rc-sdk-python-test17-" + \
self.transactionId
response = self.service.list_resource_bindings(
name=self.bindingNames['update'],
headers=customHeaders)
assert response is not None
assert response.get_status_code() == 200
result = response.get_result()
assert result is not None
assert len(result.get('resources')) == 1
assert result.get('rows_count') == 1
def test_17a_list_resource_bindings_for_alias(self):
assert self.testAliasGuid is not None
start = None
while True:
response = self.service.list_resource_bindings_for_alias(
id=self.testAliasGuid,
limit=results_per_page,
start=start)
assert response is not None
assert response.get_status_code() == 200
result = response.get_result()
assert result is not None
assert len(result.get('resources')) == 1
assert result.get('rows_count') == 1
start = get_query_param(result.get('next_url'), 'start')
if start is None:
break
def test_18_create_resource_key_for_instance(self):
customHeaders = {}
customHeaders["Transaction-Id"] = "rc-sdk-python-test18-" + \
self.transactionId
parameters = {
'parameter1': 'value1',
'parameter2': 'value2'
}
response = self.service.create_resource_key(
name=self.keyNames['name'],
source=self.testInstanceGuid,
parameters=parameters,
headers=customHeaders)
assert response is not None
assert response.get_status_code() == 201
result = response.get_result()
assert result is not None
assert result.get('id') is not None
assert result.get('guid') is not None
assert result.get('crn') is not None
assert result.get('id') == result.get('crn')
assert result.get('name') == self.keyNames['name']
assert result.get('account_id') == self.testAccountId
assert result.get('resource_group_id') == self.testResourceGroupGuid
assert result.get('source_crn') == self.testInstanceCrn
assert result.get('state') == "active"
self.__class__.testInstanceKeyCrn = result.get('id')
self.__class__.testInstanceKeyGuid = result.get('guid')
assert self.testInstanceKeyCrn != ''
assert self.testInstanceKeyGuid != ''
def test_19_get_resource_key(self):
customHeaders = {}
customHeaders["Transaction-Id"] = "rc-sdk-python-test19-" + \
self.transactionId
response = self.service.get_resource_key(
self.testInstanceKeyGuid, headers=customHeaders)
assert response is not None
assert response.get_status_code() == 200
result = response.get_result()
assert result is not None
assert result.get('id') == self.testInstanceKeyCrn
assert result.get('guid') == self.testInstanceKeyGuid
assert result.get('crn') == self.testInstanceKeyCrn
assert result.get('name') == self.keyNames['name']
assert result.get('account_id') == self.testAccountId
assert result.get('resource_group_id') == self.testResourceGroupGuid
assert result.get('source_crn') == self.testInstanceCrn
assert result.get('state') == "active"
def test_20_update_resource_key(self):
customHeaders = {}
customHeaders["Transaction-Id"] = "rc-sdk-python-test20-" + \
self.transactionId
response = self.service.update_resource_key(
self.testInstanceKeyGuid, self.keyNames['update'], headers=customHeaders)
assert response is not None
assert response.get_status_code() == 200
result = response.get_result()
assert result is not None
assert result.get('id') == self.testInstanceKeyCrn
assert result.get('name') == self.keyNames['update']
assert result.get('state') == "active"
def test_21_list_resource_keys_no_filter(self):
customHeaders = {}
customHeaders["Transaction-Id"] = "rc-sdk-python-test21-" + \
self.transactionId
start = None
result_count = 0
while True:
response = self.service.list_resource_keys(
limit=results_per_page,
start=start,
headers=customHeaders)
assert response is not None
assert response.get_status_code() == 200
result = response.get_result()
assert result is not None
result_count+= result.get('rows_count')
start = get_query_param(result.get('next_url'), 'start')
if start is None:
break
assert result_count > 0
def test_22_list_resource_keys_by_guid(self):
customHeaders = {}
customHeaders["Transaction-Id"] = "rc-sdk-python-test22-" + \
self.transactionId
response = self.service.list_resource_keys(
guid=self.testInstanceKeyGuid, headers=customHeaders)
assert response is not None
assert response.get_status_code() == 200
result = response.get_result()
assert result is not None
assert len(result.get('resources')) == 1
assert result.get('rows_count') == 1
key = result.get('resources')[0]
assert key.get('id') == self.testInstanceKeyCrn
assert key.get('guid') == self.testInstanceKeyGuid
assert key.get('name') == self.keyNames['update']
assert key.get('resource_group_id') == self.testResourceGroupGuid
assert key.get('source_crn') == self.testInstanceCrn
assert key.get('state') == "active"
def test_23_list_resource_keys_by_name(self):
customHeaders = {}
customHeaders["Transaction-Id"] = "rc-sdk-python-test23-" + \
self.transactionId
response = self.service.list_resource_keys(
name=self.keyNames['update'], headers=customHeaders)
assert response is not None
assert response.get_status_code() == 200
result = response.get_result()
assert result is not None
assert len(result.get('resources')) == 1
assert result.get('rows_count') == 1
def test_23a_list_resource_keys_for_instance(self):
assert self.testInstanceGuid is not None
start = None
while True:
response = self.service.list_resource_keys_for_instance(
id=self.testInstanceGuid,
limit=results_per_page,
start=start)
assert response is not None
assert response.get_status_code() == 200
result = response.get_result()
assert result is not None
assert len(result.get('resources')) == 1
assert result.get('rows_count') == 1
start = get_query_param(result.get('next_url'), 'start')
if start is None:
break
def test_24_create_resource_key_for_alias(self):
customHeaders = {}
customHeaders["Transaction-Id"] = "rc-sdk-python-test24-" + \
self.transactionId
response = self.service.create_resource_key(
self.keyNames['name2'],
self.testAliasGuid,
headers=customHeaders)
assert response is not None
assert response.get_status_code() == 201
result = response.get_result()
assert result is not None
assert result.get('id') is not None
assert result.get('guid') is not None
assert result.get('crn') is not None
assert result.get('id') == result.get('crn')
assert result.get('name') == self.keyNames['name2']
assert result.get('account_id') == self.testAccountId
assert result.get('resource_group_id') == self.testResourceGroupGuid
assert result.get('source_crn') == self.testAliasCrn
assert result.get('state') == "active"
self.__class__.testAliasKeyCrn = result.get('id')
self.__class__.testAliasKeyGuid = result.get('guid')
assert self.testAliasKeyCrn != ''
assert self.testAliasKeyCrn != ''
def test_25_get_resource_key(self):
customHeaders = {}
customHeaders["Transaction-Id"] = "rc-sdk-python-test25-" + \
self.transactionId
response = self.service.get_resource_key(
self.testAliasKeyGuid, headers=customHeaders)
assert response is not None
assert response.get_status_code() == 200
result = response.get_result()
assert result is not None
assert result.get('id') == self.testAliasKeyCrn
assert result.get('guid') == self.testAliasKeyGuid
assert result.get('crn') == self.testAliasKeyCrn
assert result.get('name') == self.keyNames['name2']
assert result.get('account_id') == self.testAccountId
assert result.get('resource_group_id') == self.testResourceGroupGuid
assert result.get('source_crn') == self.testAliasCrn
assert result.get('state') == "active"
def test_26_update_resource_key(self):
customHeaders = {}
customHeaders["Transaction-Id"] = "rc-sdk-python-test26-" + \
self.transactionId
response = self.service.update_resource_key(
self.testAliasKeyGuid, self.keyNames['update2'], headers=customHeaders)
assert response is not None
assert response.get_status_code() == 200
result = response.get_result()
assert result is not None
assert result.get('id') == self.testAliasKeyCrn
assert result.get('name') == self.keyNames['update2']
assert result.get('state') == "active"
def test_27_list_resource_keys_no_filter(self):
customHeaders = {}
customHeaders["Transaction-Id"] = "rc-sdk-python-test27-" + \
self.transactionId
response = self.service.list_resource_keys(headers=customHeaders)
assert response is not None
assert response.get_status_code() == 200
result = response.get_result()
assert result is not None
assert len(result.get('resources')) >= 1
assert result.get('rows_count') >= 1
def test_28_list_resource_keys_by_guid(self):
customHeaders = {}
customHeaders["Transaction-Id"] = "rc-sdk-python-test28-" + \
self.transactionId
response = self.service.list_resource_keys(
guid=self.testAliasKeyGuid, headers=customHeaders)
assert response is not None
assert response.get_status_code() == 200
result = response.get_result()
assert result is not None
assert len(result.get('resources')) == 1
assert result.get('rows_count') == 1
key = result.get('resources')[0]
assert key.get('id') == self.testAliasKeyCrn
assert key.get('guid') == self.testAliasKeyGuid
assert key.get('name') == self.keyNames['update2']
assert key.get('resource_group_id') == self.testResourceGroupGuid
assert key.get('source_crn') == self.testAliasCrn
assert key.get('state') == "active"
def test_29_list_resource_keys_by_name(self):
customHeaders = {}
customHeaders["Transaction-Id"] = "rc-sdk-python-test29-" + \
self.transactionId
response = self.service.list_resource_keys(
name=self.keyNames['update2'], headers=customHeaders)
assert response is not None
assert response.get_status_code() == 200
result = response.get_result()
assert result is not None
assert len(result.get('resources')) == 1
assert result.get('rows_count') == 1
def test_30_delete_resource_alias_fail(self):
customHeaders = {}
customHeaders["Transaction-Id"] = "rc-sdk-python-test30-" + \
self.transactionId
with pytest.raises(ApiException) as e:
response = self.service.delete_resource_alias(
self.testAliasGuid,
headers=customHeaders)
assert response is not None
assert response.get_status_code() == 400
def test_31_delete_resource_instance_fail(self):
customHeaders = {}
customHeaders["Transaction-Id"] = "rc-sdk-python-test31-" + \
self.transactionId
with pytest.raises(ApiException) as e:
response = self.service.delete_resource_instance(
self.testInstanceGuid,
headers=customHeaders)
assert response is not None
assert response.get_status_code() == 400
def test_32_delete_resource_binding(self):
customHeaders = {}
customHeaders["Transaction-Id"] = "rc-sdk-python-test32-" + \
self.transactionId
response = self.service.delete_resource_binding(
self.testBindingGuid,
headers=customHeaders)
assert response is not None
assert response.get_status_code() == 204
def test_33_verify_resource_binding_was_deleted(self):
customHeaders = {}
customHeaders["Transaction-Id"] = "rc-sdk-python-test33-" + \
self.transactionId
response = self.service.get_resource_binding(
self.testBindingGuid,
headers=customHeaders)
assert response is not None
assert response.get_status_code() == 200
result = response.get_result()
assert result is not None
assert result.get('id') == self.testBindingCrn
assert result.get('state') == "removed"
def test_34_delete_resource_keys(self):
customHeaders = {}
customHeaders["Transaction-Id"] = "rc-sdk-python-test34-" + \
self.transactionId
response = self.service.delete_resource_key(
self.testInstanceKeyGuid, headers=customHeaders)
assert response is not None
assert response.get_status_code() == 204
customHeaders2 = {}
customHeaders2["Transaction-Id"] = "rc-sdk-python-test34-" + \
self.transactionId
response2 = self.service.delete_resource_key(
self.testAliasKeyGuid, headers=customHeaders2)
assert response2 is not None
assert response2.get_status_code() == 204
def test_35_verify_resource_keys_were_deleted(self):
customHeaders = {}
customHeaders["Transaction-Id"] = "rc-sdk-python-test35-" + \
self.transactionId
response = self.service.get_resource_key(
self.testInstanceKeyGuid,
headers=customHeaders)
assert response is not None
assert response.get_status_code() == 200
result = response.get_result()
assert result is not None
assert result.get('id') == self.testInstanceKeyCrn
assert result.get('state') == "removed"
customHeaders2 = {}
customHeaders2["Transaction-Id"] = "rc-sdk-python-test35-" + \
self.transactionId
response2 = self.service.get_resource_key(
self.testAliasKeyGuid, headers=customHeaders2)
assert response2 is not None
assert response2.get_status_code() == 200
result2 = response2.get_result()
assert result2 is not None
assert result2.get('id') == self.testAliasKeyCrn
assert result2.get('state') == "removed"
def test_36_delete_resource_alias(self):
customHeaders = {}
customHeaders["Transaction-Id"] = "rc-sdk-python-test36-" + \
self.transactionId
response = self.service.delete_resource_alias(
self.testAliasGuid, headers=customHeaders)
assert response is not None
assert response.get_status_code() == 204
def test_37_verify_resource_alias_was_deleted(self):
customHeaders = {}
customHeaders["Transaction-Id"] = "rc-sdk-python-test37-" + \
self.transactionId
response = self.service.get_resource_alias(
self.testAliasGuid, headers=customHeaders)
assert response is not None
assert response.get_status_code() == 200
result = response.get_result()
assert result is not None
assert result.get('id') == self.testAliasCrn
assert result.get('state') == "removed"
def test_38_lock_resource_instance(self):
customHeaders = {}
customHeaders["Transaction-Id"] = "rc-sdk-python-test38-" + \
self.transactionId
response = self.service.lock_resource_instance(
self.testInstanceGuid, headers=customHeaders)
assert response is not None
assert response.get_status_code() == 200
result = response.get_result()
assert result is not None
assert result.get('id') == self.testInstanceCrn
assert result.get('locked')
assert result.get('last_operation').get('type') == "lock"
assert not result.get('last_operation').get('async')
assert result.get('last_operation').get('state') == "succeeded"
def test_39_update_locked_resource_instance_fail(self):
customHeaders = {}
customHeaders["Transaction-Id"] = "rc-sdk-python-test39-" + \
self.transactionId
with pytest.raises(ApiException) as e:
response = self.service.update_resource_instance(
self.testInstanceGuid,
name=self.lockedInstanceNameUpdate,
headers=customHeaders
)
assert response is not None
assert response.get_status_code() == 400
def test_40_delete__locked_resource_instance_fail(self):
customHeaders = {}
customHeaders["Transaction-Id"] = "rc-sdk-python-test40-" + \
self.transactionId
with pytest.raises(ApiException) as e:
response = self.service.delete_resource_instance(
self.testInstanceGuid, headers=customHeaders)
assert response is not None
assert response.get_status_code() == 400
def test_41_unlock_resource_instance(self):
customHeaders = {}
customHeaders["Transaction-Id"] = "rc-sdk-python-test41-" + \
self.transactionId
response = self.service.unlock_resource_instance(
self.testInstanceGuid, headers=customHeaders)
assert response is not None
assert response.get_status_code() == 200
result = response.get_result()
assert result is not None
assert result.get('id') == self.testInstanceCrn
assert not result.get('locked')
assert result.get('last_operation').get('type') == "unlock"
assert not result.get('last_operation').get('async')
assert result.get('last_operation').get('state') == "succeeded"
def test_42_delete_resource_instance(self):
customHeaders = {}
customHeaders["Transaction-Id"] = "rc-sdk-python-test42-" + \
self.transactionId
response = self.service.delete_resource_instance(
id=self.testInstanceGuid,
recursive=False,
headers=customHeaders)
assert response is not None
assert response.get_status_code() == 204
def test_43_verify_resource_instance_was_deleted(self):
customHeaders = {}
customHeaders["Transaction-Id"] = "rc-sdk-python-test43-" + \
self.transactionId
response = self.service.get_resource_instance(
self.testInstanceGuid, headers=customHeaders)
assert response is not None
assert response.get_status_code() == 200
result = response.get_result()
assert result is not None
assert result.get('id') == self.testInstanceCrn
assert result.get('state') == "removed"
def test_44_create_resource_instance_for_reclamation_enabled_plan(self):
customHeaders = {}
customHeaders["Transaction-Id"] = "rc-sdk-python-test44-" + \
self.transactionId
response = self.service.create_resource_instance(
self.reclaimInstanceName,
self.testRegionId2,
self.testResourceGroupGuid,
self.testPlanId2,
headers=customHeaders
)
assert response is not None
assert response.get_status_code() == 201
result = response.get_result()
assert result is not None
assert result.get('id') is not None
assert result.get('guid') is not None
assert result.get('crn') is not None
assert result.get('id') == result.get('crn')
assert result.get('name') == self.reclaimInstanceName
assert result.get('account_id') == self.testAccountId
assert result.get('resource_group_id') == self.testResourceGroupGuid
assert result.get('resource_plan_id') == self.testPlanId2
assert result.get('state') == "active"
assert not result.get('locked')
assert result.get('last_operation').get('type') == "create"
assert not result.get('last_operation').get('async')
assert result.get('last_operation').get('state') == "succeeded"
self.__class__.testReclaimInstanceCrn = result.get('id')
self.__class__.testReclaimInstanceGuid = result.get('guid')
assert self.testReclaimInstanceCrn != ''
assert self.testReclaimInstanceGuid != ''
def test_45_schedule_resource_instance_for_reclamation(self):
customHeaders = {}
customHeaders["Transaction-Id"] = "rc-sdk-python-test45-" + \
self.transactionId
response = self.service.delete_resource_instance(
self.testReclaimInstanceGuid, headers=customHeaders)
assert response is not None
assert response.get_status_code() == 204
time.sleep(20)
def test_47_list_reclamation_for_account_id(self):
customHeaders = {}
customHeaders["Transaction-Id"] = "rc-sdk-python-test47-" + \
self.transactionId
response = self.service.list_reclamations(
resource_instance_id=self.testReclaimInstanceGuid,
headers=customHeaders
)
assert response is not None
assert response.get_status_code() == 200
result = response.get_result()
assert len(result.get('resources')) >= 1
foundReclamation = False
for res in result.get('resources'):
if res.get('resource_instance_id') == self.testReclaimInstanceGuid:
assert res.get(
'resource_instance_id') == self.testReclaimInstanceGuid
assert res.get('account_id') == self.testAccountId
assert res.get(
'resource_group_id') == self.testResourceGroupGuid
assert res.get('state') == 'SCHEDULED'
foundReclamation = True
self.__class__.testReclamationId1 = res.get('id')
assert foundReclamation
assert self.testReclamationId1 != ''
def test_48_restore_resource_instance(self):
customHeaders = {}
customHeaders["Transaction-Id"] = "rc-sdk-python-test48-" + \
self.transactionId
response = self.service.run_reclamation_action(
self.testReclamationId1, 'restore', headers=customHeaders)
assert response is not None
assert response.get_status_code() == 200
result = response.get_result()
assert result.get('id') == self.testReclamationId1
assert result.get(
'resource_instance_id') == self.testReclaimInstanceGuid
assert result.get('account_id') == self.testAccountId
assert result.get('resource_group_id') == self.testResourceGroupGuid
assert result.get('state') == 'RESTORING'
time.sleep(20)
def test_50_schedule_resource_instance_for_reclamation2(self):
customHeaders = {}
customHeaders["Transaction-Id"] = "rc-sdk-python-test50-" + \
self.transactionId
response = self.service.delete_resource_instance(
self.testReclaimInstanceGuid, headers=customHeaders)
assert response is not None
assert response.get_status_code() == 204
time.sleep(20)
def test_51_list_reclamation_for_account_id_and_resource_instance_id(self):
customHeaders = {}
customHeaders["Transaction-Id"] = "rc-sdk-python-test51-" + \
self.transactionId
response = self.service.list_reclamations(
account_id=self.testAccountId,
resource_instance_id=self.testReclaimInstanceGuid,
headers=customHeaders
)
assert response is not None
assert response.get_status_code() == 200
result = response.get_result()
assert len(result.get('resources')) == 1
res = result.get('resources')[0]
assert res.get('resource_instance_id') == self.testReclaimInstanceGuid
assert res.get('account_id') == self.testAccountId
assert res.get('resource_group_id') == self.testResourceGroupGuid
assert res.get('state') == 'SCHEDULED'
self.__class__.testReclamationId2 = res.get('id')
assert self.testReclamationId2 != ''
def test_52_reclaim_resource_instance(self):
customHeaders = {}
customHeaders["Transaction-Id"] = "rc-sdk-python-test52-" + \
self.transactionId
response = self.service.run_reclamation_action(
self.testReclamationId2, 'reclaim', headers=customHeaders)
assert response is not None
assert response.get_status_code() == 200
result = response.get_result()
assert result.get('id') == self.testReclamationId2
assert result.get(
'resource_instance_id') == self.testReclaimInstanceGuid
assert result.get('account_id') == self.testAccountId
assert result.get('resource_group_id') == self.testResourceGroupGuid
assert result.get('state') == 'RECLAIMING'
time.sleep(20)
@classmethod
def cleanupResources(cls):
if cls.testInstanceKeyGuid != '':
try:
customHeaders = {}
customHeaders["Transaction-Id"] = "rc-sdk-python-cleanup-" + \
cls.transactionId
cls.service.delete_resource_key(
cls.testInstanceKeyGuid, headers=customHeaders)
print('\nSuccessfully cleaned up key ' +
cls.testInstanceKeyGuid + '.')
except ApiException as errResponse:
if errResponse.code == 410:
print('\nKey ' + cls.testInstanceKeyGuid +
' was already deleted by the tests.')
else:
print('\nFailed to clean up key ' +
cls.testInstanceKeyGuid + '. Error: ' + errResponse.message)
else:
print('\nKey was not created. No cleanup needed.')
if cls.testAliasKeyGuid != '':
try:
customHeaders = {}
customHeaders["Transaction-Id"] = "rc-sdk-python-cleanup-" + \
cls.transactionId
cls.service.delete_resource_key(
cls.testAliasKeyGuid, headers=customHeaders)
print('\nSuccessfully cleaned up key ' +
cls.testAliasKeyGuid + '.')
except ApiException as errResponse:
if errResponse.code == 410:
print('\nKey ' + cls.testAliasKeyGuid +
' was already deleted by the tests.')
else:
print('\nFailed to clean up key ' +
cls.testAliasKeyGuid + '. Error: ' + errResponse.message)
else:
print('\nKey was not created. No cleanup needed.')
if cls.testBindingGuid != '':
try:
customHeaders = {}
customHeaders["Transaction-Id"] = "rc-sdk-python-cleanup-" + \
cls.transactionId
cls.service.delete_resource_binding(
cls.testBindingGuid, headers=customHeaders)
print('\nSuccessfully cleaned up binding ' +
cls.testBindingGuid + '.')
except ApiException as errResponse:
if errResponse.code == 410:
print('\nBinding ' + cls.testBindingGuid +
' was already deleted by the tests.')
else:
print('\nFailed to clean up binding ' +
cls.testBindingGuid + '. Error: ' + errResponse.message)
else:
print('\nBinding was not created. No cleanup needed.')
if cls.testAliasGuid != '':
try:
customHeaders = {}
customHeaders["Transaction-Id"] = "rc-sdk-python-cleanup-" + \
cls.transactionId
cls.service.delete_resource_alias(
cls.testAliasGuid, headers=customHeaders)
print('\nSuccessfully cleaned up alias ' +
cls.testAliasGuid + '.')
except ApiException as errResponse:
if errResponse.code == 410:
print('\nAlias ' + cls.testAliasGuid +
' was already deleted by the tests.')
else:
print('\nFailed to clean up alias ' +
cls.testAliasGuid + '. Error: ' + errResponse.message)
else:
print('\nAlias was not created. No cleanup needed.')
if cls.testInstanceGuid != '':
cls.cleanupInstance()
else:
print('\nInstance was not created. No cleanup needed.')
@classmethod
def cleanupByName(cls):
for resourceKeyName in cls.keyNames.values():
try:
customHeaders = {}
customHeaders["Transaction-Id"] = "rc-sdk-python-cleanup-by-name-" + \
cls.transactionId
response = cls.service.list_resource_keys(
name=resourceKeyName, headers=customHeaders)
except ApiException as errResponse:
print('\nFailed to retrieve key with name' + resourceKeyName +
' for cleanup. Error: ' + errResponse.message)
else:
resources = response.get_result().get('resources')
if (len(resources) > 0):
for res in resources:
keyGuid = res.get('guid')
try:
customHeaders = {}
customHeaders["Transaction-Id"] = "rc-sdk-python-cleanup-by-name-" + \
cls.transactionId
cls.service.delete_resource_key(
keyGuid, headers=customHeaders)
print('\nSuccessfully cleaned up key ' + keyGuid + '.')
except ApiException as errResponse:
if errResponse.code == 410:
print('\nKey ' + keyGuid +
' was already deleted by the tests.')
else:
print('\nFailed to clean up key ' +
keyGuid + '. Error: ' + errResponse.message)
else:
print('\nNo keys found for name ' + resourceKeyName)
for resourceInstanceName in cls.instanceNames.values():
try:
customHeaders = {}
customHeaders["Transaction-Id"] = "rc-sdk-python-cleanup-by-name-" + \
cls.transactionId
response = cls.service.list_resource_instances(
name=resourceInstanceName, headers=customHeaders)
except ApiException as errResponse:
print('\nFailed to retrieve instance with name' +
resourceInstanceName + ' for cleanup. Error: ' + errResponse.message)
else:
resources = response.get_result().get('resources')
if (len(resources) > 0):
for res in resources:
instanceGuid = res.get('guid')
if res.get('state') == "active" and res.get('locked'):
try:
customHeaders = {}
customHeaders["Transaction-Id"] = "rc-sdk-python-cleanup-by-name" + \
cls.transactionId
cls.service.unlock_resource_instance(
instanceGuid, headers=customHeaders)
print('\nSuccessfully unlocked instance ' +
instanceGuid + ' for cleanup.')
except ApiException as errResponse:
print('\nFailed to unlock instance ' + instanceGuid +
' for cleanup. Error: ' + errResponse.message)
try:
customHeaders = {}
customHeaders["Transaction-Id"] = "rc-sdk-python-cleanup-by-name-" + \
cls.transactionId
cls.service.delete_resource_instance(
instanceGuid, headers=customHeaders)
print('\nSuccessfully cleaned up instance ' +
instanceGuid + '.')
except ApiException as errResponse:
if errResponse.code == 410:
print('\nInstance ' + instanceGuid +
' was already deleted by the tests.')
else:
print('\nFailed to clean up instance ' +
instanceGuid + '. Error: ' + errResponse.message)
else:
print('\nNo instances found for name ' + resourceInstanceName)
for resourceBindingName in cls.bindingNames.values():
try:
customHeaders = {}
customHeaders["Transaction-Id"] = "rc-sdk-python-cleanup-by-name-" + \
cls.transactionId
response = cls.service.list_resource_bindings(
name=resourceBindingName, headers=customHeaders)
except ApiException as errResponse:
print('\nFailed to retrieve binding with name' +
resourceBindingName + ' for cleanup. Error: ' + errResponse.message)
else:
resources = response.get_result().get('resources')
if (len(resources) > 0):
for res in resources:
bindingGuid = res.get('guid')
try:
customHeaders = {}
customHeaders["Transaction-Id"] = "rc-sdk-python-cleanup-by-name-" + \
cls.transactionId
cls.service.delete_resource_key(
bindingGuid, headers=customHeaders)
print('\nSuccessfully cleaned up binding ' +
bindingGuid + '.')
except ApiException as errResponse:
if errResponse.code == 410:
print('\nBinding ' + bindingGuid +
' was already deleted by the tests.')
else:
print('\nFailed to clean up binding ' +
bindingGuid + '. Error: ' + errResponse.message)
else:
print('\nNo bindings found for name ' + resourceBindingName)
for resourceAliasName in cls.aliasNames.values():
try:
customHeaders = {}
customHeaders["Transaction-Id"] = "rc-sdk-python-cleanup-by-name-" + \
cls.transactionId
response = cls.service.list_resource_aliases(
name=resourceAliasName, headers=customHeaders)
except ApiException as errResponse:
print('\nFailed to retrieve alias with name' +
resourceAliasName + ' for cleanup. Error: ' + errResponse.message)
else:
resources = response.get_result().get('resources')
if (len(resources) > 0):
for res in resources:
aliasGuid = res.get('guid')
try:
customHeaders = {}
customHeaders["Transaction-Id"] = "rc-sdk-python-cleanup-by-name-" + \
cls.transactionId
cls.service.delete_resource_alias(
aliasGuid, headers=customHeaders)
print('\nSuccessfully cleaned up alias ' +
aliasGuid + '.')
except ApiException as errResponse:
if errResponse.code == 410:
print('\nAlias ' + aliasGuid +
' was already deleted by the tests.')
else:
print('\nFailed to clean up alias ' +
aliasGuid + '. Error: ' + errResponse.message)
else:
print('\nNo aliases found for name ' + resourceAliasName)
@classmethod
def cleanupInstance(cls):
try:
customHeaders = {}
customHeaders["Transaction-Id"] = "rc-sdk-python-cleanup-" + \
cls.transactionId
response = cls.service.get_resource_instance(
cls.testInstanceGuid, headers=customHeaders)
except ApiException as errResponse:
print('\nFailed to retrieve instance ' + cls.testInstanceGuid +
' for cleanup. Error: ' + errResponse.message)
else:
if response.get_result().get('state') == "active" and response.get_result().get('locked'):
try:
customHeaders = {}
customHeaders["Transaction-Id"] = "rc-sdk-python-cleanup-" + \
cls.transactionId
cls.service.unlock_resource_instance(
cls.testInstanceGuid, headers=customHeaders)
print('\nSuccessfully unlocked instance ' +
cls.testInstanceGuid + ' for cleanup.')
except ApiException as errResponse:
print('\nFailed to unlock instance ' + cls.testInstanceGuid +
' for cleanup. Error: ' + errResponse.message)
try:
customHeaders = {}
customHeaders["Transaction-Id"] = "rc-sdk-python-cleanup-" + \
cls.transactionId
cls.service.delete_resource_instance(
cls.testInstanceGuid, headers=customHeaders)
print('\nSuccessfully cleaned up instance ' +
cls.testInstanceGuid + '.')
except ApiException as errResponse:
if errResponse.code == 410:
print('\nInstance ' + cls.testInstanceGuid +
' was already deleted by the tests.')
else:
print('\nFailed to clean up instance ' +
cls.testInstanceGuid + '. Error: ' + errResponse.message)
@classmethod
def cleanupReclamationInstance(cls):
if cls.testReclaimInstanceGuid != '':
try:
customHeaders = {}
customHeaders["Transaction-Id"] = "rc-sdk-python-cleanup-" + \
cls.transactionId
response = cls.service.get_resource_instance(
cls.testReclaimInstanceGuid, headers=customHeaders)
except ApiException as errResponse:
print('\nFailed to retrieve instance ' + cls.testReclaimInstanceGuid +
' for cleanup. Error: ' + errResponse.message)
else:
if response.get_result().get('state') == "removed":
print('\nInstance ' + cls.testReclaimInstanceGuid +
' was already reclaimed by the tests.')
elif response.get_result().get('state') == "pending_reclamation":
cls.cleanupInstancePendingReclamation()
else:
try:
customHeaders = {}
customHeaders["Transaction-Id"] = "rc-sdk-python-cleanup-" + \
cls.transactionId
cls.service.delete_resource_instance(
cls.testReclaimInstanceGuid, headers=customHeaders)
print('\nSuccessfully scheduled instance ' +
cls.testReclaimInstanceGuid + ' for reclamation.')
except ApiException as errResponse:
print('\nFailed to schedule instance ' + cls.testReclaimInstanceGuid +
' for reclamation. Error: ' + errResponse.message)
else:
time.sleep(20)
cls.cleanupInstancePendingReclamation()
else:
print('\nReclamation instance was not created. No cleanup needed.')
@classmethod
def cleanupInstancePendingReclamation(cls):
try:
customHeaders = {}
customHeaders["Transaction-Id"] = "rc-sdk-python-cleanup-" + \
cls.transactionId
response = cls.service.list_reclamations(
account_id=cls.testAccountId,
resource_instance_id=cls.testReclaimInstanceGuid,
headers=customHeaders
)
except ApiException as errResponse:
print('\nFailed to retrieve reclamation to process to reclaim instance ' +
cls.testReclaimInstanceGuid + ' for cleanup. Error: ' + errResponse.message)
else:
res = response.get_result().get('resources')
if len(res) == 0:
print('\nNo reclamations for instance ' +
cls.testReclaimInstanceGuid + ' were returned.')
else:
reclamationId = res[0].get('id')
try:
customHeaders = {}
customHeaders["Transaction-Id"] = "rc-sdk-python-cleanup-" + \
cls.transactionId
response = cls.service.run_reclamation_action(
reclamationId, 'reclaim', headers=customHeaders)
print('\nSuccessfully reclaimed instance ' +
cls.testReclaimInstanceGuid)
except ApiException as errResponse:
print('\nFailed to process reclamation ' + reclamationId +
' for instance ' + cls.testInstanceGuid + '. Error: ' + errResponse.message)
| true | true |
f73dbd0f2c868a565911d4fffb23702bcc6ab121 | 4,537 | py | Python | tengp_eval/optimizers/sa.py | Jarino/cgp-optimization | 3b50813a591c3535c7846b7e8acf5f5959122d02 | [
"MIT"
] | null | null | null | tengp_eval/optimizers/sa.py | Jarino/cgp-optimization | 3b50813a591c3535c7846b7e8acf5f5959122d02 | [
"MIT"
] | null | null | null | tengp_eval/optimizers/sa.py | Jarino/cgp-optimization | 3b50813a591c3535c7846b7e8acf5f5959122d02 | [
"MIT"
] | null | null | null | from configparser import ConfigParser
import numpy as np
from sklearn.metrics import mean_squared_error, r2_score
import pygmo as pg
from tengp.individual import IndividualBuilder, NPIndividual
from tengp import Parameters, FunctionSet
from tengp_eval.coevolution import TrainersSet, GaPredictors
def fitness_function(individual, x, y):
output = individual.transform(x)
try:
#return adjusted_r2_score(y, output, len(x), len(individual.genes))
return mean_squared_error(output, y)
except ValueError:
return 10e10
class cost_function:
def __init__(self, X, Y, params, bounds):
self.params = params
self.bounds = bounds
self.X = X
self.Y = Y
def fitness(self, x):
individual = NPIndividual(list(x), self.bounds, self.params)
fitness = fitness_function(individual, self.X, self.Y)
return [fitness]
def get_bounds(self):
return self.bounds
def define_cgp_system(n_nodes, n_inputs, n_outputs, funset, max_back):
"""
define CCGP system
Return:
IndividualBuilder object
Parameters
bounds (tuple)
"""
params = Parameters(n_inputs, n_outputs, 1, n_nodes, funset, real_valued=True, max_back=max_back)
ib = IndividualBuilder(params)
bounds = ib.create().bounds
return ib, params, bounds
def run_benchmark_coevolution(cp, x_train, y_train, funset):
ib, params, bounds = define_cgp_system(
cp.getint('CGPPARAMS', 'n_nodes'),
x_train.shape[1] if len(x_train.shape) > 1 else 1,
y_train.shape[1] if len(y_train.shape) > 1 else 1,
funset,
cp.getint('CGPPARAMS', 'max_back'))
# setup the coevolution elements
ts = TrainersSet(ib, 16, fitness_function, x_train, y_train)
predictors = GaPredictors(x_train, y_train, 10, 24)
predictors.evaluate_fitness(ts)
x_reduced, y_reduced = predictors.best_predictors_data()
GENS_STEP = 50
cf = cost_function(x_reduced, y_reduced, params, bounds)
prob = pg.problem(cf)
algo = pg.algorithm(pg.pso(
gen=GENS_STEP,
omega=cp.getfloat('OPTIMPARAMS', 'omega'),
eta1=cp.getfloat('OPTIMPARAMS', 'eta1'),
eta2=cp.getfloat('OPTIMPARAMS', 'eta2'),
memory=True))
algo.set_verbosity(1)
pop = pg.population(prob, cp.getint('DEFAULT', 'population_size'))
n_gens = GENS_STEP
while n_gens < 500:
pop = algo.evolve(pop)
# calculate exact fitness of champion and
# add it to the trainers set
champion = NPIndividual(pop.champion_x, cf.bounds, cf.params)
try:
champion.fitness = fitness_function(champion, x_train, y_train)
ts.add_trainer(champion)
except ValueError:
print('unsuccessful adding of champion')
# update random population
ts.update_random_population()
predictors.predictors_evolution_step(ts)
print('changing the subset, best predictor: ', predictors.best_predictor.fitness)
x_reduced, y_reduced = predictors.best_predictors_data()
pop.problem.extract(object).X = x_reduced
pop.problem.extract(object).Y = y_reduced
n_gens += GENS_STEP
uda = algo.extract(pg.pso)
champion = NPIndividual(pop.champion_x, cf.bounds, cf.params)
champion.fitness = fitness_function(champion, x_train, y_train)
fitnesses = [x[2] for x in uda.get_log()]
fitnesses.append(champion.fitness)
return fitnesses
def run_benchmark(cp, x_train, y_train, funset):
ib, params, bounds = define_cgp_system(
cp.getint('CGPPARAMS', 'n_nodes'),
x_train.shape[1] if len(x_train.shape) > 1 else 1,
y_train.shape[1] if len(y_train.shape) > 1 else 1,
funset,
cp.getint('CGPPARAMS', 'max_back'))
cf = cost_function(x_train, y_train, params, bounds)
prob = pg.problem(cf)
algo = pg.algorithm(pg.simulated_annealing(
Ts=cp.getfloat('OPTIMPARAMS', 'Ts'),
Tf=cp.getfloat('OPTIMPARAMS', 'Tf'),
n_T_adj=cp.getint('OPTIMPARAMS', 'n_T_adj'),
n_range_adj=cp.getint('OPTIMPARAMS', 'n_range_adj'),
bin_size=cp.getint('OPTIMPARAMS', 'bin_size'),
start_range=cp.getfloat('OPTIMPARAMS', 'start_range')))
algo.set_verbosity(100)
pop = pg.population(prob, 1)
pop = algo.evolve(pop)
uda = algo.extract(pg.simulated_annealing)
return [x[2] for x in uda.get_log()]
RUNNERS = [run_benchmark]
| 31.950704 | 101 | 0.65616 | from configparser import ConfigParser
import numpy as np
from sklearn.metrics import mean_squared_error, r2_score
import pygmo as pg
from tengp.individual import IndividualBuilder, NPIndividual
from tengp import Parameters, FunctionSet
from tengp_eval.coevolution import TrainersSet, GaPredictors
def fitness_function(individual, x, y):
output = individual.transform(x)
try:
return mean_squared_error(output, y)
except ValueError:
return 10e10
class cost_function:
def __init__(self, X, Y, params, bounds):
self.params = params
self.bounds = bounds
self.X = X
self.Y = Y
def fitness(self, x):
individual = NPIndividual(list(x), self.bounds, self.params)
fitness = fitness_function(individual, self.X, self.Y)
return [fitness]
def get_bounds(self):
return self.bounds
def define_cgp_system(n_nodes, n_inputs, n_outputs, funset, max_back):
params = Parameters(n_inputs, n_outputs, 1, n_nodes, funset, real_valued=True, max_back=max_back)
ib = IndividualBuilder(params)
bounds = ib.create().bounds
return ib, params, bounds
def run_benchmark_coevolution(cp, x_train, y_train, funset):
ib, params, bounds = define_cgp_system(
cp.getint('CGPPARAMS', 'n_nodes'),
x_train.shape[1] if len(x_train.shape) > 1 else 1,
y_train.shape[1] if len(y_train.shape) > 1 else 1,
funset,
cp.getint('CGPPARAMS', 'max_back'))
ts = TrainersSet(ib, 16, fitness_function, x_train, y_train)
predictors = GaPredictors(x_train, y_train, 10, 24)
predictors.evaluate_fitness(ts)
x_reduced, y_reduced = predictors.best_predictors_data()
GENS_STEP = 50
cf = cost_function(x_reduced, y_reduced, params, bounds)
prob = pg.problem(cf)
algo = pg.algorithm(pg.pso(
gen=GENS_STEP,
omega=cp.getfloat('OPTIMPARAMS', 'omega'),
eta1=cp.getfloat('OPTIMPARAMS', 'eta1'),
eta2=cp.getfloat('OPTIMPARAMS', 'eta2'),
memory=True))
algo.set_verbosity(1)
pop = pg.population(prob, cp.getint('DEFAULT', 'population_size'))
n_gens = GENS_STEP
while n_gens < 500:
pop = algo.evolve(pop)
champion = NPIndividual(pop.champion_x, cf.bounds, cf.params)
try:
champion.fitness = fitness_function(champion, x_train, y_train)
ts.add_trainer(champion)
except ValueError:
print('unsuccessful adding of champion')
ts.update_random_population()
predictors.predictors_evolution_step(ts)
print('changing the subset, best predictor: ', predictors.best_predictor.fitness)
x_reduced, y_reduced = predictors.best_predictors_data()
pop.problem.extract(object).X = x_reduced
pop.problem.extract(object).Y = y_reduced
n_gens += GENS_STEP
uda = algo.extract(pg.pso)
champion = NPIndividual(pop.champion_x, cf.bounds, cf.params)
champion.fitness = fitness_function(champion, x_train, y_train)
fitnesses = [x[2] for x in uda.get_log()]
fitnesses.append(champion.fitness)
return fitnesses
def run_benchmark(cp, x_train, y_train, funset):
ib, params, bounds = define_cgp_system(
cp.getint('CGPPARAMS', 'n_nodes'),
x_train.shape[1] if len(x_train.shape) > 1 else 1,
y_train.shape[1] if len(y_train.shape) > 1 else 1,
funset,
cp.getint('CGPPARAMS', 'max_back'))
cf = cost_function(x_train, y_train, params, bounds)
prob = pg.problem(cf)
algo = pg.algorithm(pg.simulated_annealing(
Ts=cp.getfloat('OPTIMPARAMS', 'Ts'),
Tf=cp.getfloat('OPTIMPARAMS', 'Tf'),
n_T_adj=cp.getint('OPTIMPARAMS', 'n_T_adj'),
n_range_adj=cp.getint('OPTIMPARAMS', 'n_range_adj'),
bin_size=cp.getint('OPTIMPARAMS', 'bin_size'),
start_range=cp.getfloat('OPTIMPARAMS', 'start_range')))
algo.set_verbosity(100)
pop = pg.population(prob, 1)
pop = algo.evolve(pop)
uda = algo.extract(pg.simulated_annealing)
return [x[2] for x in uda.get_log()]
RUNNERS = [run_benchmark]
| true | true |
f73dbda467ecdc2427f1f0af05f8e5e6550a9ea5 | 1,368 | py | Python | azure-mgmt-datamigration/azure/mgmt/datamigration/models/mongo_db_shard_key_field.py | JonathanGailliez/azure-sdk-for-python | f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b | [
"MIT"
] | 1 | 2019-05-10T19:58:09.000Z | 2019-05-10T19:58:09.000Z | azure-mgmt-datamigration/azure/mgmt/datamigration/models/mongo_db_shard_key_field.py | JonathanGailliez/azure-sdk-for-python | f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b | [
"MIT"
] | 2 | 2019-10-02T23:37:38.000Z | 2020-10-02T01:17:31.000Z | azure-mgmt-datamigration/azure/mgmt/datamigration/models/mongo_db_shard_key_field.py | JonathanGailliez/azure-sdk-for-python | f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b | [
"MIT"
] | 1 | 2021-07-28T14:50:54.000Z | 2021-07-28T14:50:54.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class MongoDbShardKeyField(Model):
"""Describes a field reference within a MongoDB shard key.
All required parameters must be populated in order to send to Azure.
:param name: Required. The name of the field
:type name: str
:param order: Required. The field ordering. Possible values include:
'Forward', 'Reverse', 'Hashed'
:type order: str or ~azure.mgmt.datamigration.models.MongoDbShardKeyOrder
"""
_validation = {
'name': {'required': True},
'order': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'order': {'key': 'order', 'type': 'str'},
}
def __init__(self, **kwargs):
super(MongoDbShardKeyField, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.order = kwargs.get('order', None)
| 33.365854 | 77 | 0.589181 |
from msrest.serialization import Model
class MongoDbShardKeyField(Model):
_validation = {
'name': {'required': True},
'order': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'order': {'key': 'order', 'type': 'str'},
}
def __init__(self, **kwargs):
super(MongoDbShardKeyField, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.order = kwargs.get('order', None)
| true | true |
f73dbdacb50a77a4c5613973e62bea13705ee353 | 8,838 | py | Python | admin.py | Borgli/Ohminator | 7aa12e3b0cf77b4c7da73e34fe5787554db41cc0 | [
"Apache-2.0"
] | 2 | 2018-01-13T15:27:04.000Z | 2018-03-25T20:41:30.000Z | admin.py | Borgli/Ohminator | 7aa12e3b0cf77b4c7da73e34fe5787554db41cc0 | [
"Apache-2.0"
] | 10 | 2018-01-13T16:55:03.000Z | 2019-11-12T09:09:27.000Z | admin.py | Borgli/Ohminator | 7aa12e3b0cf77b4c7da73e34fe5787554db41cc0 | [
"Apache-2.0"
] | null | null | null | import traceback
from utils import *
async def assign_default_role(client, member, role_name):
roles = list(filter(lambda k: k.name == role_name, member.server.roles))
if len(roles) == 0:
return
await client.add_roles(member, roles[0])
async def notify_of_leaving_person(client, member):
bot_channel = get_server(member.server).bot_channel
await client.send_message(bot_channel, '**{}** just left {}. Bye, bye!'.format(member.name, member.server))
async def notify_of_joining_person(client, member):
bot_channel = get_server(member.server).bot_channel
await client.send_message(bot_channel, '**{}** just joined {}. Welcome!'.format(member.name, member.server))
# Used for broadcasting Ohminator announcements
@register_command("broadcast")
async def broadcast(message, bot_channel, client):
await client.delete_message(message)
if message.author.id != "159315181288030208":
await client.send_message(bot_channel,
"{}: Sorry, this command is only for the author of Ohminator!".format(
message.author.name))
return
split_message = message.content.split()
if len(split_message) > 2:
# If all is written instead of channel id, all bot-spam channels will be messaged
if split_message[1] == "all":
for channel in map(lambda s: s.bot_channel, server_list):
await client.send_message(channel, "**Announcement**: {}".format(" ".join(split_message[2:])))
else:
channel = client.get_channel(split_message[1])
if channel:
await client.send_message(channel, "**Announcement**: {}".format(" ".join(split_message[2:])))
else:
servers = list(filter(lambda s: s.name == split_message[1] or s.id == split_message[1], server_list))
if len(servers) > 0:
for server in servers:
await client.send_message(server.bot_channel,
"**Announcement**: {}".format(" ".join(split_message[2:])))
else:
await client.send_message(bot_channel,
"{}: No channel with the given ID or server with the given ID or name."
.format(message.author.name))
else:
await client.send_message(bot_channel,
"{}: Use: !broadcast [all/channel id/server name] [announcement]"
.format(message.author.name))
@register_command("move")
async def move(message, bot_channel, client):
await client.delete_message(message)
parameters = message.content.split()
if message.author.id == "184635136724303873" or message.author.id == "159315181288030208":
member = message.author.server.get_member("159315181288030208")
if member and message.author.voice_channel and member.voice_channel:
channel = message.author.voice_channel
if len(parameters) > 1:
try:
channel = message.author.server.get_channel(parameters[1])
except:
return
try:
await client.move_member(member=member, channel=channel)
except:
traceback.print_exc()
@register_command("settings")
async def settings(message, bot_channel, client):
await client.delete_message(message)
tokens = message.content.split()
if len(tokens) < 2:
await client.send_message(message.channel,
'{}: Usage !settings [client name or id] [([permission to change]'
' [value to change to])]'.format(message.author.name))
return
server = get_server(message.server)
if tokens[1] == message.server.id:
settings_source = server
else:
settings_source = server.get_channel(tokens[1])
if len(tokens) < 3:
# No other arguments -> list all settings for given channel
settings_str = "Settings for {} {}:".format("server" if settings_source == server else "channel", settings_source.name)
for key, val in settings_source.list_settings().items():
settings_str += "\n{}: {}".format(key, val)
await client.send_message(message.channel,
'{}: {}'.format(message.author.name, settings_str))
elif len(tokens) < 4:
await client.send_message(message.channel,
'{}: Usage !settings [client/server name or id] [([permission to change]'
' [value to change to])]'.format(message.author.name))
else:
if tokens[2] in settings_source.list_settings().keys():
settings_source.change_settings({tokens[2] : tokens[3]})
await client.send_message(message.channel,
'{}: The setting {} har been changed to {}.'.format(message.author.name, tokens[2], tokens[3]))
else:
await client.send_message(message.channel,
'{}: The setting {} does not exist.'.format(message.author.name, tokens[2]))
@register_command("getbotinvite", "gbi")
async def get_bot_invite(message, bot_channel, client):
await client.delete_message(message)
permissions = discord.Permissions.all()
await client.send_message(message.channel,
'{}: {}'.format(message.author.name,
discord.utils.oauth_url('176432800331857920', permissions=permissions)))
@register_command("suggest")
async def suggest(message, bot_channel, client):
suggestion = message.content[9:]
if len(suggestion) < 3:
await client.send_message(bot_channel,
"{}: Please suggest something proper.".format(message.author.mention))
return
server = get_server(message.server)
member = server.get_member(message.author.id)
suggestion_loc = 'suggestions.txt'.format(server.server_loc, member.member_loc)
with open(suggestion_loc, 'a') as f:
f.write("Suggestion from {} on server {}:\n{}\n".format(message.author, message.server, suggestion))
await client.send_message(bot_channel,
'{}: Your suggestion has been noted. Thank you!'.format(message.author.mention))
async def print_page(resource, message, bot_channel, client, prefix_user=True):
if resource == 'web-page-ad':
content = "**Go to http://www.ohminator.com for a web version of the documentation.**"
else:
with open('resources/{}'.format(resource)) as f:
content = f.read()
help_page = "{}{}".format("{}:\n".format(message.author.name) if prefix_user else "", content)
await client.send_message(bot_channel, help_page)
@register_command("help", "commands", "command", "info")
async def help(message, bot_channel, client):
await client.delete_message(message)
async def print_help_page(help_resource, prefix_user=True):
return await print_page(help_resource, message, bot_channel, client, prefix_user)
if message.content.lower().startswith('!help audio'):
await print_help_page('help_audio.txt')
elif message.content.lower().startswith('!help intro'):
await print_help_page('help_intro.txt')
elif message.content.lower().startswith('!help util'):
await print_help_page('help_utils.txt')
elif message.content.lower().startswith('!help other'):
await print_help_page('help_others.txt')
elif message.content.lower().startswith('!help all'):
await print_help_page('help_all_1.txt')
await print_help_page('help_all_2.txt', False)
await print_help_page('help_all_3.txt', False)
elif message.content.lower().startswith('!help wow'):
await print_help_page('help_wow.txt')
else:
await print_help_page('web-page-ad')
await print_help_page('help.txt', False)
await print_help_page('summary.txt', False)
@register_command("summary")
async def summary(message, bot_channel, client):
await client.delete_message(message)
return await print_page('summary.txt', message, bot_channel, client)
@register_command("showtotalusers")
async def show_total_number_users(message, bot_channel, client):
await client.delete_message(message)
servers = sum(1 for _ in client.servers)
users = sum(1 for _ in client.get_all_members())
await client.send_message(bot_channel, "{}: Ohminator is currently serving {} server{}, {} user{}.".format(
message.author.name, servers, "s" if servers != 1 else "", users, "s" if users != 1 else ""))
| 48.032609 | 133 | 0.625368 | import traceback
from utils import *
async def assign_default_role(client, member, role_name):
roles = list(filter(lambda k: k.name == role_name, member.server.roles))
if len(roles) == 0:
return
await client.add_roles(member, roles[0])
async def notify_of_leaving_person(client, member):
bot_channel = get_server(member.server).bot_channel
await client.send_message(bot_channel, '**{}** just left {}. Bye, bye!'.format(member.name, member.server))
async def notify_of_joining_person(client, member):
bot_channel = get_server(member.server).bot_channel
await client.send_message(bot_channel, '**{}** just joined {}. Welcome!'.format(member.name, member.server))
@register_command("broadcast")
async def broadcast(message, bot_channel, client):
await client.delete_message(message)
if message.author.id != "159315181288030208":
await client.send_message(bot_channel,
"{}: Sorry, this command is only for the author of Ohminator!".format(
message.author.name))
return
split_message = message.content.split()
if len(split_message) > 2:
if split_message[1] == "all":
for channel in map(lambda s: s.bot_channel, server_list):
await client.send_message(channel, "**Announcement**: {}".format(" ".join(split_message[2:])))
else:
channel = client.get_channel(split_message[1])
if channel:
await client.send_message(channel, "**Announcement**: {}".format(" ".join(split_message[2:])))
else:
servers = list(filter(lambda s: s.name == split_message[1] or s.id == split_message[1], server_list))
if len(servers) > 0:
for server in servers:
await client.send_message(server.bot_channel,
"**Announcement**: {}".format(" ".join(split_message[2:])))
else:
await client.send_message(bot_channel,
"{}: No channel with the given ID or server with the given ID or name."
.format(message.author.name))
else:
await client.send_message(bot_channel,
"{}: Use: !broadcast [all/channel id/server name] [announcement]"
.format(message.author.name))
@register_command("move")
async def move(message, bot_channel, client):
await client.delete_message(message)
parameters = message.content.split()
if message.author.id == "184635136724303873" or message.author.id == "159315181288030208":
member = message.author.server.get_member("159315181288030208")
if member and message.author.voice_channel and member.voice_channel:
channel = message.author.voice_channel
if len(parameters) > 1:
try:
channel = message.author.server.get_channel(parameters[1])
except:
return
try:
await client.move_member(member=member, channel=channel)
except:
traceback.print_exc()
@register_command("settings")
async def settings(message, bot_channel, client):
await client.delete_message(message)
tokens = message.content.split()
if len(tokens) < 2:
await client.send_message(message.channel,
'{}: Usage !settings [client name or id] [([permission to change]'
' [value to change to])]'.format(message.author.name))
return
server = get_server(message.server)
if tokens[1] == message.server.id:
settings_source = server
else:
settings_source = server.get_channel(tokens[1])
if len(tokens) < 3:
settings_str = "Settings for {} {}:".format("server" if settings_source == server else "channel", settings_source.name)
for key, val in settings_source.list_settings().items():
settings_str += "\n{}: {}".format(key, val)
await client.send_message(message.channel,
'{}: {}'.format(message.author.name, settings_str))
elif len(tokens) < 4:
await client.send_message(message.channel,
'{}: Usage !settings [client/server name or id] [([permission to change]'
' [value to change to])]'.format(message.author.name))
else:
if tokens[2] in settings_source.list_settings().keys():
settings_source.change_settings({tokens[2] : tokens[3]})
await client.send_message(message.channel,
'{}: The setting {} har been changed to {}.'.format(message.author.name, tokens[2], tokens[3]))
else:
await client.send_message(message.channel,
'{}: The setting {} does not exist.'.format(message.author.name, tokens[2]))
@register_command("getbotinvite", "gbi")
async def get_bot_invite(message, bot_channel, client):
await client.delete_message(message)
permissions = discord.Permissions.all()
await client.send_message(message.channel,
'{}: {}'.format(message.author.name,
discord.utils.oauth_url('176432800331857920', permissions=permissions)))
@register_command("suggest")
async def suggest(message, bot_channel, client):
suggestion = message.content[9:]
if len(suggestion) < 3:
await client.send_message(bot_channel,
"{}: Please suggest something proper.".format(message.author.mention))
return
server = get_server(message.server)
member = server.get_member(message.author.id)
suggestion_loc = 'suggestions.txt'.format(server.server_loc, member.member_loc)
with open(suggestion_loc, 'a') as f:
f.write("Suggestion from {} on server {}:\n{}\n".format(message.author, message.server, suggestion))
await client.send_message(bot_channel,
'{}: Your suggestion has been noted. Thank you!'.format(message.author.mention))
async def print_page(resource, message, bot_channel, client, prefix_user=True):
if resource == 'web-page-ad':
content = "**Go to http://www.ohminator.com for a web version of the documentation.**"
else:
with open('resources/{}'.format(resource)) as f:
content = f.read()
help_page = "{}{}".format("{}:\n".format(message.author.name) if prefix_user else "", content)
await client.send_message(bot_channel, help_page)
@register_command("help", "commands", "command", "info")
async def help(message, bot_channel, client):
await client.delete_message(message)
async def print_help_page(help_resource, prefix_user=True):
return await print_page(help_resource, message, bot_channel, client, prefix_user)
if message.content.lower().startswith('!help audio'):
await print_help_page('help_audio.txt')
elif message.content.lower().startswith('!help intro'):
await print_help_page('help_intro.txt')
elif message.content.lower().startswith('!help util'):
await print_help_page('help_utils.txt')
elif message.content.lower().startswith('!help other'):
await print_help_page('help_others.txt')
elif message.content.lower().startswith('!help all'):
await print_help_page('help_all_1.txt')
await print_help_page('help_all_2.txt', False)
await print_help_page('help_all_3.txt', False)
elif message.content.lower().startswith('!help wow'):
await print_help_page('help_wow.txt')
else:
await print_help_page('web-page-ad')
await print_help_page('help.txt', False)
await print_help_page('summary.txt', False)
@register_command("summary")
async def summary(message, bot_channel, client):
await client.delete_message(message)
return await print_page('summary.txt', message, bot_channel, client)
@register_command("showtotalusers")
async def show_total_number_users(message, bot_channel, client):
await client.delete_message(message)
servers = sum(1 for _ in client.servers)
users = sum(1 for _ in client.get_all_members())
await client.send_message(bot_channel, "{}: Ohminator is currently serving {} server{}, {} user{}.".format(
message.author.name, servers, "s" if servers != 1 else "", users, "s" if users != 1 else ""))
| true | true |
f73dbe2219095f4449bac4eb8025de0633123dec | 3,436 | py | Python | models/judge.py | Andrew-Talley/mock-trial-tab | 493b0b843d34c732dec724e8ab51f355835a3f46 | [
"MIT"
] | 1 | 2020-10-10T20:24:53.000Z | 2020-10-10T20:24:53.000Z | models/judge.py | Andrew-Talley/mock-trial-tab | 493b0b843d34c732dec724e8ab51f355835a3f46 | [
"MIT"
] | 1 | 2020-10-10T20:30:20.000Z | 2020-10-10T20:30:20.000Z | models/judge.py | Andrew-Talley/mock-trial-tab | 493b0b843d34c732dec724e8ab51f355835a3f46 | [
"MIT"
] | null | null | null | from models.connection import get_cnx, tables
judge_table = tables["judge"]
conflict_table = tables["conflict"]
ballots_table = tables["ballot"]
ballot_matchup_table = tables["ballot_matchup_info"]
class Judge:
@staticmethod
def add_judge(tournament_id: int, name: str):
with get_cnx() as db:
cursor = db.cursor()
cursor.execute(
f"INSERT INTO {judge_table} (tournament_id, name) VALUES (%s, %s)",
(tournament_id, name),
)
db.commit()
return cursor.lastrowid
@staticmethod
def get_judge(tournament_id: int, id: int):
with get_cnx() as db:
cursor = db.cursor()
cursor.execute(
f"SELECT tournament_id, id, name FROM {judge_table} WHERE id = %s",
(id,),
)
tourn_id, judge_id, name = cursor.fetchone()
return {"id": judge_id, "name": name, "tournament_id": tourn_id}
@staticmethod
def add_conflict(tournament_id: int, id: int, school: str):
with get_cnx() as db:
cursor = db.cursor()
cursor.execute(
f"INSERT INTO {conflict_table} (tournament_id, judge_id, school_name) VALUES (%s, %s, %s)",
(tournament_id, id, school),
)
db.commit()
return cursor.lastrowid
@staticmethod
def get_conflicts(tournament_id: int, id: int):
with get_cnx() as db:
cursor = db.cursor()
cursor.execute(
f"SELECT school_name FROM {conflict_table} WHERE judge_id = %s", (id,)
)
conflicts = [name for (name,) in cursor.fetchall()]
return conflicts
@staticmethod
def get_ballots(tournament_id: int, judge_id: int):
with get_cnx() as db:
cursor = db.cursor()
cursor.execute(
f"SELECT id FROM {ballots_table} WHERE judge_id = %s", (judge_id,)
)
ballot_ids = [b_id for (b_id,) in cursor.fetchall()]
return ballot_ids
@staticmethod
def get_ballot_for_round(tournament_id: int, judge_id: int, round_num: int):
with get_cnx() as db:
cursor = db.cursor()
cursor.execute(
f"SELECT ballot_id FROM {ballot_matchup_table} WHERE judge_id = %s AND round_num = %s",
(judge_id, round_num),
)
ballot_ids = [b_id for (b_id,) in cursor.fetchall()]
if len(ballot_ids) == 0:
return None
else:
return ballot_ids[0]
@staticmethod
def set_email(judge_id: int, email: str):
with get_cnx() as db:
cursor = db.cursor()
cursor.execute(
f"""
UPDATE {judge_table}
SET email = %s
WHERE id = %s
""",
(email, judge_id),
)
db.commit()
@staticmethod
def get_email(judge_id: int):
with get_cnx() as db:
cursor = db.cursor()
cursor.execute(
f"""
SELECT email
FROM {judge_table}
WHERE id = %s
""",
(judge_id,),
)
(email,) = cursor.fetchone()
return email
| 28.633333 | 107 | 0.508731 | from models.connection import get_cnx, tables
judge_table = tables["judge"]
conflict_table = tables["conflict"]
ballots_table = tables["ballot"]
ballot_matchup_table = tables["ballot_matchup_info"]
class Judge:
@staticmethod
def add_judge(tournament_id: int, name: str):
with get_cnx() as db:
cursor = db.cursor()
cursor.execute(
f"INSERT INTO {judge_table} (tournament_id, name) VALUES (%s, %s)",
(tournament_id, name),
)
db.commit()
return cursor.lastrowid
@staticmethod
def get_judge(tournament_id: int, id: int):
with get_cnx() as db:
cursor = db.cursor()
cursor.execute(
f"SELECT tournament_id, id, name FROM {judge_table} WHERE id = %s",
(id,),
)
tourn_id, judge_id, name = cursor.fetchone()
return {"id": judge_id, "name": name, "tournament_id": tourn_id}
@staticmethod
def add_conflict(tournament_id: int, id: int, school: str):
with get_cnx() as db:
cursor = db.cursor()
cursor.execute(
f"INSERT INTO {conflict_table} (tournament_id, judge_id, school_name) VALUES (%s, %s, %s)",
(tournament_id, id, school),
)
db.commit()
return cursor.lastrowid
@staticmethod
def get_conflicts(tournament_id: int, id: int):
with get_cnx() as db:
cursor = db.cursor()
cursor.execute(
f"SELECT school_name FROM {conflict_table} WHERE judge_id = %s", (id,)
)
conflicts = [name for (name,) in cursor.fetchall()]
return conflicts
@staticmethod
def get_ballots(tournament_id: int, judge_id: int):
with get_cnx() as db:
cursor = db.cursor()
cursor.execute(
f"SELECT id FROM {ballots_table} WHERE judge_id = %s", (judge_id,)
)
ballot_ids = [b_id for (b_id,) in cursor.fetchall()]
return ballot_ids
@staticmethod
def get_ballot_for_round(tournament_id: int, judge_id: int, round_num: int):
with get_cnx() as db:
cursor = db.cursor()
cursor.execute(
f"SELECT ballot_id FROM {ballot_matchup_table} WHERE judge_id = %s AND round_num = %s",
(judge_id, round_num),
)
ballot_ids = [b_id for (b_id,) in cursor.fetchall()]
if len(ballot_ids) == 0:
return None
else:
return ballot_ids[0]
@staticmethod
def set_email(judge_id: int, email: str):
with get_cnx() as db:
cursor = db.cursor()
cursor.execute(
f"""
UPDATE {judge_table}
SET email = %s
WHERE id = %s
""",
(email, judge_id),
)
db.commit()
@staticmethod
def get_email(judge_id: int):
with get_cnx() as db:
cursor = db.cursor()
cursor.execute(
f"""
SELECT email
FROM {judge_table}
WHERE id = %s
""",
(judge_id,),
)
(email,) = cursor.fetchone()
return email
| true | true |
f73dbe56ab079673bfe206237e38707840caeeed | 6,808 | py | Python | tfx/tools/cli/kubeflow_v2/handler/kubeflow_v2_handler.py | suryaavala/tfx | c315e7cf75822088e974e15b43c96fab86746733 | [
"Apache-2.0"
] | 1 | 2021-05-10T10:41:06.000Z | 2021-05-10T10:41:06.000Z | tfx/tools/cli/kubeflow_v2/handler/kubeflow_v2_handler.py | suryaavala/tfx | c315e7cf75822088e974e15b43c96fab86746733 | [
"Apache-2.0"
] | null | null | null | tfx/tools/cli/kubeflow_v2/handler/kubeflow_v2_handler.py | suryaavala/tfx | c315e7cf75822088e974e15b43c96fab86746733 | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Handler for Kubeflow V2 runner."""
import functools
import os
import re
from typing import Any, Dict, Text
import click
from tfx.dsl.io import fileio
from tfx.tools.cli import labels
from tfx.tools.cli.handler import base_handler
from tfx.tools.cli.handler import kubeflow_handler
from tfx.tools.cli.kubeflow_v2.handler import kubeflow_v2_dag_runner_patcher
from tfx.utils import io_utils
_PIPELINE_ARG_FILE = 'pipeline_args.json'
_PIPELINE_SPEC_FILE = 'pipeline.json'
# Regex pattern used to capture the project id and the pipeline job name.
_FULL_JOB_NAME_PATTERN = r'projects/(\S+)/pipelineJobs/(\S+)'
# Prefix for the pipeline run detail page link.
_RUN_DETAIL_PREFIX = 'https://console.cloud.google.com/ai-platform/pipelines/runs/'
def _get_job_name(run: Dict[Text, Any]) -> Text:
"""Extracts the job name from its full name by regex.
Args:
run: JSON dict of a pipeline run object returned by Kubeflow pipelines REST
API.
Returns:
Job name extracted from the given JSON dict.
Raises:
RuntimeError: if cannot find valid job name from the response.
"""
full_name = run['name']
match_result = re.match(_FULL_JOB_NAME_PATTERN, full_name)
if not match_result:
raise RuntimeError('Invalid job name is received.')
return match_result.group(2)
def _get_job_link(job_name: Text, project_id: Text) -> Text:
"""Gets the link to the pipeline job UI according to job name and project."""
return _RUN_DETAIL_PREFIX + '{job_name}?project={project_id}'.format(
job_name=job_name, project_id=project_id)
class KubeflowV2Handler(base_handler.BaseHandler):
"""Helper methods for Kubeflow V2 Handler."""
def __init__(self, flags_dict: Dict[Text, Any]):
super().__init__(flags_dict)
# Only when the given command is `run` and an API key is specified shall we
# create a API client.
# TODO(b/169095387): re-implement run commands when the unified client
# becomes available.
pass
def create_pipeline(self, update: bool = False) -> None:
"""Creates or updates a pipeline to use in Kubeflow pipelines.
Args:
update: set as true to update pipeline.
"""
if self.flags_dict.get(labels.BUILD_IMAGE):
build_image_fn = functools.partial(
kubeflow_handler.create_container_image,
base_image=self.flags_dict.get(labels.BASE_IMAGE))
else:
build_image_fn = None
patcher = kubeflow_v2_dag_runner_patcher.KubeflowV2DagRunnerPatcher(
call_real_run=True,
build_image_fn=build_image_fn,
prepare_dir_fn=functools.partial(
self._prepare_pipeline_dir, required=update))
context = self.execute_dsl(patcher)
pipeline_name = context[patcher.PIPELINE_NAME]
if update:
click.echo('Pipeline "{}" updated successfully.'.format(pipeline_name))
else:
click.echo('Pipeline "{}" created successfully.'.format(pipeline_name))
def update_pipeline(self) -> None:
"""Updates pipeline in Kubeflow Pipelines."""
self.create_pipeline(update=True)
def list_pipelines(self) -> None:
"""List all the pipelines in the environment."""
# There is no managed storage for pipeline packages, so CLI consults
# local dir to list pipelines.
if not fileio.exists(self._handler_home_dir):
click.echo('No pipelines to display.')
return
pipelines_list = fileio.listdir(self._handler_home_dir)
# Print every pipeline name in a new line.
click.echo('-' * 30)
click.echo('\n'.join(pipelines_list))
click.echo('-' * 30)
def delete_pipeline(self) -> None:
"""Delete pipeline in the environment."""
pipeline_name = self.flags_dict[labels.PIPELINE_NAME]
self._check_pipeline_existence(pipeline_name)
io_utils.delete_dir(os.path.join(self._handler_home_dir, pipeline_name))
click.echo('Pipeline ' + pipeline_name + ' deleted successfully.')
def compile_pipeline(self) -> None:
"""Compiles pipeline into Kubeflow V2 Pipelines spec."""
patcher = kubeflow_v2_dag_runner_patcher.KubeflowV2DagRunnerPatcher(
call_real_run=True)
context = self.execute_dsl(patcher)
click.echo(f'Pipeline {context[patcher.PIPELINE_NAME]} compiled '
'successfully.')
def create_run(self) -> None:
"""Runs a pipeline in Kubeflow Pipelines."""
# TODO(b/169095387): re-implement run commands when the unified client
# becomes available.
raise NotImplementedError('Creating a run has not been implemented for '
'Kubeflow V2 runner yet.')
def terminate_run(self) -> None:
"""Stops a run."""
# TODO(b/155096168): implement this.
raise NotImplementedError('Terminating runs has not been implemented for '
'Kubeflow V2 runner yet.')
def list_runs(self) -> None:
"""Lists all runs of a pipeline."""
# TODO(b/169095387): re-implement run commands when the unified client
# becomes available.
raise NotImplementedError('Listing runs has not been implemented for '
'Kubeflow V2 runner yet.')
def get_run(self) -> None:
"""Checks run status."""
# TODO(b/169095387): re-implement run commands when the unified client
# becomes available.
raise NotImplementedError('Getting run status has not been implemented for '
'Kubeflow V2 runner yet.')
def delete_run(self) -> None:
"""Deletes a run."""
# TODO(b/155096168): implement this.
raise NotImplementedError('Deleting runs has not been implemented for '
'Kubeflow V2 runner yet.')
def _prepare_pipeline_dir(self, pipeline_name: str, required: bool) -> str:
"""Create a directory for pipeline definition in the handler directory."""
self._check_pipeline_existence(pipeline_name, required)
handler_pipeline_path = os.path.join(self._handler_home_dir, pipeline_name)
# If updating pipeline, first delete the pipeline directory.
if fileio.exists(handler_pipeline_path):
io_utils.delete_dir(handler_pipeline_path)
fileio.makedirs(handler_pipeline_path)
# pipeline.json will be stored in KubeflowV2DagRunner.run().
return handler_pipeline_path
| 36.406417 | 83 | 0.711663 |
import functools
import os
import re
from typing import Any, Dict, Text
import click
from tfx.dsl.io import fileio
from tfx.tools.cli import labels
from tfx.tools.cli.handler import base_handler
from tfx.tools.cli.handler import kubeflow_handler
from tfx.tools.cli.kubeflow_v2.handler import kubeflow_v2_dag_runner_patcher
from tfx.utils import io_utils
_PIPELINE_ARG_FILE = 'pipeline_args.json'
_PIPELINE_SPEC_FILE = 'pipeline.json'
_FULL_JOB_NAME_PATTERN = r'projects/(\S+)/pipelineJobs/(\S+)'
_RUN_DETAIL_PREFIX = 'https://console.cloud.google.com/ai-platform/pipelines/runs/'
def _get_job_name(run: Dict[Text, Any]) -> Text:
full_name = run['name']
match_result = re.match(_FULL_JOB_NAME_PATTERN, full_name)
if not match_result:
raise RuntimeError('Invalid job name is received.')
return match_result.group(2)
def _get_job_link(job_name: Text, project_id: Text) -> Text:
return _RUN_DETAIL_PREFIX + '{job_name}?project={project_id}'.format(
job_name=job_name, project_id=project_id)
class KubeflowV2Handler(base_handler.BaseHandler):
def __init__(self, flags_dict: Dict[Text, Any]):
super().__init__(flags_dict)
pass
def create_pipeline(self, update: bool = False) -> None:
if self.flags_dict.get(labels.BUILD_IMAGE):
build_image_fn = functools.partial(
kubeflow_handler.create_container_image,
base_image=self.flags_dict.get(labels.BASE_IMAGE))
else:
build_image_fn = None
patcher = kubeflow_v2_dag_runner_patcher.KubeflowV2DagRunnerPatcher(
call_real_run=True,
build_image_fn=build_image_fn,
prepare_dir_fn=functools.partial(
self._prepare_pipeline_dir, required=update))
context = self.execute_dsl(patcher)
pipeline_name = context[patcher.PIPELINE_NAME]
if update:
click.echo('Pipeline "{}" updated successfully.'.format(pipeline_name))
else:
click.echo('Pipeline "{}" created successfully.'.format(pipeline_name))
def update_pipeline(self) -> None:
self.create_pipeline(update=True)
def list_pipelines(self) -> None:
if not fileio.exists(self._handler_home_dir):
click.echo('No pipelines to display.')
return
pipelines_list = fileio.listdir(self._handler_home_dir)
click.echo('-' * 30)
click.echo('\n'.join(pipelines_list))
click.echo('-' * 30)
def delete_pipeline(self) -> None:
pipeline_name = self.flags_dict[labels.PIPELINE_NAME]
self._check_pipeline_existence(pipeline_name)
io_utils.delete_dir(os.path.join(self._handler_home_dir, pipeline_name))
click.echo('Pipeline ' + pipeline_name + ' deleted successfully.')
def compile_pipeline(self) -> None:
patcher = kubeflow_v2_dag_runner_patcher.KubeflowV2DagRunnerPatcher(
call_real_run=True)
context = self.execute_dsl(patcher)
click.echo(f'Pipeline {context[patcher.PIPELINE_NAME]} compiled '
'successfully.')
def create_run(self) -> None:
raise NotImplementedError('Creating a run has not been implemented for '
'Kubeflow V2 runner yet.')
def terminate_run(self) -> None:
raise NotImplementedError('Terminating runs has not been implemented for '
'Kubeflow V2 runner yet.')
def list_runs(self) -> None:
raise NotImplementedError('Listing runs has not been implemented for '
'Kubeflow V2 runner yet.')
def get_run(self) -> None:
raise NotImplementedError('Getting run status has not been implemented for '
'Kubeflow V2 runner yet.')
def delete_run(self) -> None:
raise NotImplementedError('Deleting runs has not been implemented for '
'Kubeflow V2 runner yet.')
def _prepare_pipeline_dir(self, pipeline_name: str, required: bool) -> str:
self._check_pipeline_existence(pipeline_name, required)
handler_pipeline_path = os.path.join(self._handler_home_dir, pipeline_name)
if fileio.exists(handler_pipeline_path):
io_utils.delete_dir(handler_pipeline_path)
fileio.makedirs(handler_pipeline_path)
return handler_pipeline_path
| true | true |
f73dbe635eb794dbc0e532893662fce61c44181c | 396 | py | Python | src/ihcWrappers/networkInterface.py | F9R/ihcpmslib-wrappers | 4a5e37ab2ecc8c8c1a8437992e45b9271ec18826 | [
"BSD-2-Clause"
] | 1 | 2022-02-09T06:41:20.000Z | 2022-02-09T06:41:20.000Z | src/ihcWrappers/networkInterface.py | F9R/ihcpmslib-wrappers | 4a5e37ab2ecc8c8c1a8437992e45b9271ec18826 | [
"BSD-2-Clause"
] | null | null | null | src/ihcWrappers/networkInterface.py | F9R/ihcpmslib-wrappers | 4a5e37ab2ecc8c8c1a8437992e45b9271ec18826 | [
"BSD-2-Clause"
] | null | null | null | import clr
clr.AddReference("System.Net")
class NetworkInterfaceWrapper:
def __init__(self, networkInterface) -> None:
self.__ni = networkInterface
@property
def Id(self) -> str:
return self.__ni.Id
@property
def Description(self) -> str:
return self.__ni.Description
@property
def Name(self) -> str:
return self.__ni.Name
| 19.8 | 49 | 0.631313 | import clr
clr.AddReference("System.Net")
class NetworkInterfaceWrapper:
def __init__(self, networkInterface) -> None:
self.__ni = networkInterface
@property
def Id(self) -> str:
return self.__ni.Id
@property
def Description(self) -> str:
return self.__ni.Description
@property
def Name(self) -> str:
return self.__ni.Name
| true | true |
f73dbf13213fc9f6c0d33da7ce99286db192dbf6 | 7,532 | py | Python | onadata/apps/logger/management/commands/clean_duplicated_submissions.py | rsakib15/kobocat | c5e2af1d5b68dc40807eb011cd5cde0be9fe5792 | [
"BSD-2-Clause"
] | null | null | null | onadata/apps/logger/management/commands/clean_duplicated_submissions.py | rsakib15/kobocat | c5e2af1d5b68dc40807eb011cd5cde0be9fe5792 | [
"BSD-2-Clause"
] | null | null | null | onadata/apps/logger/management/commands/clean_duplicated_submissions.py | rsakib15/kobocat | c5e2af1d5b68dc40807eb011cd5cde0be9fe5792 | [
"BSD-2-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
from django.db import transaction
from django.db.models import Sum
from django.db.models.aggregates import Count
from django.utils import timezone
from onadata.apps.logger.models.attachment import Attachment
from onadata.apps.logger.models.instance import Instance
from onadata.apps.viewer.models.parsed_instance import ParsedInstance
from onadata.apps.logger.models.xform import XForm
from onadata.libs.utils.common_tags import MONGO_STRFTIME
class Command(BaseCommand):
help = "Deletes duplicated submissions (i.e same `uuid` and same `xml`)"
def __init__(self, **kwargs):
super(Command, self).__init__(**kwargs)
self.__vaccuum = False
self.__users = set([])
def add_arguments(self, parser):
super(Command, self).add_arguments(parser)
parser.add_argument(
"--user",
default=None,
help="Specify a username to clean up only their forms",
)
parser.add_argument(
"--xform",
default=None,
help="Specify a XForm's `id_string` to clean up only this form",
)
parser.add_argument(
"--purge",
action='store_true',
default=False,
help="Erase duplicate `Instance`s from the database entirely instead "
"of marking them as deleted using the `deleted_at` attribute. "
"Default is False",
)
def handle(self, *args, **options):
username = options['user']
xform_id_string = options['xform']
purge = options['purge']
# Retrieve all instances with the same `uuid`.
query = Instance.objects
if xform_id_string:
query = query.filter(xform__id_string=xform_id_string)
if username:
query = query.filter(xform__user__username=username)
# if we don't purge, we don't want to see instances
# that have been marked as deleted. However, if we do purge
# we do need these instances to be in the list in order
# to delete them permanently
if not purge:
query = query.filter(deleted_at=None)
query = query.values_list('uuid', flat=True)\
.annotate(count_uuid=Count('uuid'))\
.filter(count_uuid__gt=1)\
.distinct()
for uuid in query.all():
duplicated_query = Instance.objects.filter(uuid=uuid)
# if we don't purge, we don't want to see instances
# that have been marked as deleted. However, if we do purge
# we do need these instances to be in the list in order
# to delete them permanently
if not purge:
duplicated_query = duplicated_query.filter(deleted_at=None)
instances_with_same_uuid = duplicated_query.values_list('id',
'xml_hash')\
.order_by('xml_hash', 'date_created')
xml_hash_ref = None
instance_id_ref = None
duplicated_instance_ids = []
for instance_with_same_uuid in instances_with_same_uuid:
instance_id = instance_with_same_uuid[0]
instance_xml_hash = instance_with_same_uuid[1]
if instance_xml_hash != xml_hash_ref:
self.__clean_up(instance_id_ref,
duplicated_instance_ids,
purge)
xml_hash_ref = instance_xml_hash
instance_id_ref = instance_id
duplicated_instance_ids = []
continue
duplicated_instance_ids.append(instance_id)
self.__clean_up(instance_id_ref,
duplicated_instance_ids,
purge)
if not self.__vaccuum:
if purge:
self.stdout.write('No instances have been purged.')
else:
self.stdout.write('No instances have been marked as deleted.')
else:
# Update number of submissions for each user.
for user_ in list(self.__users):
result = XForm.objects.filter(user_id=user_.id)\
.aggregate(count=Sum('num_of_submissions'))
user_.profile.num_of_submissions = result['count']
self.stdout.write(
"\tUpdating `{}`'s number of submissions".format(
user_.username))
user_.profile.save(update_fields=['num_of_submissions'])
self.stdout.write(
'\t\tDone! New number: {}'.format(result['count']))
def __clean_up(self, instance_id_ref, duplicated_instance_ids, purge):
if instance_id_ref is not None and len(duplicated_instance_ids) > 0:
self.__vaccuum = True
with transaction.atomic():
self.stdout.write('Link attachments to instance #{}'.format(
instance_id_ref))
# Update attachments
Attachment.objects.select_for_update()\
.filter(instance_id__in=duplicated_instance_ids)\
.update(instance_id=instance_id_ref)
# Update Mongo
main_instance = Instance.objects.select_for_update()\
.get(id=instance_id_ref)
main_instance.parsed_instance.save()
if purge:
self.stdout.write('\tPurging instances: {}'.format(
duplicated_instance_ids))
Instance.objects.select_for_update()\
.filter(id__in=duplicated_instance_ids).delete()
ParsedInstance.objects.select_for_update()\
.filter(instance_id__in=duplicated_instance_ids).delete()
settings.MONGO_DB.instances.remove(
{'_id': {'$in': duplicated_instance_ids}}
)
else:
self.stdout.write('\tMarking instances as deleted: {}'.format(
duplicated_instance_ids))
# We could loop through instances and use `Instance.set_deleted()`
# but it would be way slower.
Instance.objects.select_for_update()\
.filter(id__in=duplicated_instance_ids)\
.update(deleted_at=timezone.now())
settings.MONGO_DB.instances.update_many(
{'_id': {'$in': duplicated_instance_ids}},
{'$set': {
'_deleted_at': timezone.now().strftime(MONGO_STRFTIME)
}}
)
# Update number of submissions
xform = main_instance.xform
self.stdout.write(
'\tUpdating number of submissions of XForm #{} ({})'.format(
xform.id, xform.id_string))
xform_submission_count = xform.submission_count(force_update=True)
self.stdout.write(
'\t\tDone! New number: {}'.format(xform_submission_count))
self.stdout.write('')
self.__users.add(xform.user)
| 41.384615 | 86 | 0.562799 |
from __future__ import unicode_literals
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
from django.db import transaction
from django.db.models import Sum
from django.db.models.aggregates import Count
from django.utils import timezone
from onadata.apps.logger.models.attachment import Attachment
from onadata.apps.logger.models.instance import Instance
from onadata.apps.viewer.models.parsed_instance import ParsedInstance
from onadata.apps.logger.models.xform import XForm
from onadata.libs.utils.common_tags import MONGO_STRFTIME
class Command(BaseCommand):
help = "Deletes duplicated submissions (i.e same `uuid` and same `xml`)"
def __init__(self, **kwargs):
super(Command, self).__init__(**kwargs)
self.__vaccuum = False
self.__users = set([])
def add_arguments(self, parser):
super(Command, self).add_arguments(parser)
parser.add_argument(
"--user",
default=None,
help="Specify a username to clean up only their forms",
)
parser.add_argument(
"--xform",
default=None,
help="Specify a XForm's `id_string` to clean up only this form",
)
parser.add_argument(
"--purge",
action='store_true',
default=False,
help="Erase duplicate `Instance`s from the database entirely instead "
"of marking them as deleted using the `deleted_at` attribute. "
"Default is False",
)
def handle(self, *args, **options):
username = options['user']
xform_id_string = options['xform']
purge = options['purge']
# Retrieve all instances with the same `uuid`.
query = Instance.objects
if xform_id_string:
query = query.filter(xform__id_string=xform_id_string)
if username:
query = query.filter(xform__user__username=username)
# if we don't purge, we don't want to see instances
# that have been marked as deleted. However, if we do purge
# we do need these instances to be in the list in order
# to delete them permanently
if not purge:
query = query.filter(deleted_at=None)
query = query.values_list('uuid', flat=True)\
.annotate(count_uuid=Count('uuid'))\
.filter(count_uuid__gt=1)\
.distinct()
for uuid in query.all():
duplicated_query = Instance.objects.filter(uuid=uuid)
# if we don't purge, we don't want to see instances
# that have been marked as deleted. However, if we do purge
# we do need these instances to be in the list in order
# to delete them permanently
if not purge:
duplicated_query = duplicated_query.filter(deleted_at=None)
instances_with_same_uuid = duplicated_query.values_list('id',
'xml_hash')\
.order_by('xml_hash', 'date_created')
xml_hash_ref = None
instance_id_ref = None
duplicated_instance_ids = []
for instance_with_same_uuid in instances_with_same_uuid:
instance_id = instance_with_same_uuid[0]
instance_xml_hash = instance_with_same_uuid[1]
if instance_xml_hash != xml_hash_ref:
self.__clean_up(instance_id_ref,
duplicated_instance_ids,
purge)
xml_hash_ref = instance_xml_hash
instance_id_ref = instance_id
duplicated_instance_ids = []
continue
duplicated_instance_ids.append(instance_id)
self.__clean_up(instance_id_ref,
duplicated_instance_ids,
purge)
if not self.__vaccuum:
if purge:
self.stdout.write('No instances have been purged.')
else:
self.stdout.write('No instances have been marked as deleted.')
else:
# Update number of submissions for each user.
for user_ in list(self.__users):
result = XForm.objects.filter(user_id=user_.id)\
.aggregate(count=Sum('num_of_submissions'))
user_.profile.num_of_submissions = result['count']
self.stdout.write(
"\tUpdating `{}`'s number of submissions".format(
user_.username))
user_.profile.save(update_fields=['num_of_submissions'])
self.stdout.write(
'\t\tDone! New number: {}'.format(result['count']))
def __clean_up(self, instance_id_ref, duplicated_instance_ids, purge):
if instance_id_ref is not None and len(duplicated_instance_ids) > 0:
self.__vaccuum = True
with transaction.atomic():
self.stdout.write('Link attachments to instance #{}'.format(
instance_id_ref))
Attachment.objects.select_for_update()\
.filter(instance_id__in=duplicated_instance_ids)\
.update(instance_id=instance_id_ref)
main_instance = Instance.objects.select_for_update()\
.get(id=instance_id_ref)
main_instance.parsed_instance.save()
if purge:
self.stdout.write('\tPurging instances: {}'.format(
duplicated_instance_ids))
Instance.objects.select_for_update()\
.filter(id__in=duplicated_instance_ids).delete()
ParsedInstance.objects.select_for_update()\
.filter(instance_id__in=duplicated_instance_ids).delete()
settings.MONGO_DB.instances.remove(
{'_id': {'$in': duplicated_instance_ids}}
)
else:
self.stdout.write('\tMarking instances as deleted: {}'.format(
duplicated_instance_ids))
Instance.objects.select_for_update()\
.filter(id__in=duplicated_instance_ids)\
.update(deleted_at=timezone.now())
settings.MONGO_DB.instances.update_many(
{'_id': {'$in': duplicated_instance_ids}},
{'$set': {
'_deleted_at': timezone.now().strftime(MONGO_STRFTIME)
}}
)
xform = main_instance.xform
self.stdout.write(
'\tUpdating number of submissions of XForm #{} ({})'.format(
xform.id, xform.id_string))
xform_submission_count = xform.submission_count(force_update=True)
self.stdout.write(
'\t\tDone! New number: {}'.format(xform_submission_count))
self.stdout.write('')
self.__users.add(xform.user)
| true | true |
f73dbf6a3b501732739a362fc9c74ba5200314da | 881 | py | Python | bcMathSociety/flaskr/__init__.py | kennywang01/BCMS | 1d226ecda481ad324b26930d9a5cbf4df449214e | [
"MIT"
] | null | null | null | bcMathSociety/flaskr/__init__.py | kennywang01/BCMS | 1d226ecda481ad324b26930d9a5cbf4df449214e | [
"MIT"
] | null | null | null | bcMathSociety/flaskr/__init__.py | kennywang01/BCMS | 1d226ecda481ad324b26930d9a5cbf4df449214e | [
"MIT"
] | null | null | null | import os
from xml.etree.ElementInclude import include
from flask import Flask
from .db import mongo
def create_app(test_config=None):
# create and configure the app
app = Flask(__name__, instance_relative_config=True)
app.config.from_mapping(
SECRET_KEY='testing',
)
if test_config is None:
# load the instance config, if it exists, when not testing
app.config.from_pyfile('config.py', silent=True)
else:
# load the test config if passed in
app.config.from_mapping(test_config)
# ensure the instance folder exists
try:
os.makedirs(app.instance_path)
except OSError:
pass
# a simple page that says hello
@app.route('/hello')
def hello():
return 'Hello, World!'
mongo.init_app(app)
from . import auth
app.register_blueprint(auth.bp)
return app
| 22.025 | 66 | 0.664018 | import os
from xml.etree.ElementInclude import include
from flask import Flask
from .db import mongo
def create_app(test_config=None):
app = Flask(__name__, instance_relative_config=True)
app.config.from_mapping(
SECRET_KEY='testing',
)
if test_config is None:
app.config.from_pyfile('config.py', silent=True)
else:
app.config.from_mapping(test_config)
try:
os.makedirs(app.instance_path)
except OSError:
pass
@app.route('/hello')
def hello():
return 'Hello, World!'
mongo.init_app(app)
from . import auth
app.register_blueprint(auth.bp)
return app
| true | true |
f73dc04e73c6d5fd2b38b1c5b4f62630509cf49b | 7,886 | py | Python | tests/serializer/test_dumps.py | QueoLda/django-unicorn | 01573cd65282c467bfb0925542b180ffa9efba05 | [
"MIT"
] | null | null | null | tests/serializer/test_dumps.py | QueoLda/django-unicorn | 01573cd65282c467bfb0925542b180ffa9efba05 | [
"MIT"
] | null | null | null | tests/serializer/test_dumps.py | QueoLda/django-unicorn | 01573cd65282c467bfb0925542b180ffa9efba05 | [
"MIT"
] | null | null | null | import json
from decimal import Decimal
from django.db import models
from django.utils.timezone import now
import pytest
from django_unicorn import serializer
from django_unicorn.utils import dicts_equal
from example.coffee.models import Flavor
class SimpleTestModel(models.Model):
name = models.CharField(max_length=10)
class Meta:
app_label = "tests"
class ComplicatedTestModel(models.Model):
name = models.CharField(max_length=10)
parent = models.ForeignKey("self", blank=True, null=True, on_delete=models.SET_NULL)
class Meta:
app_label = "tests"
def test_int():
expected = '{"name":123}'
actual = serializer.dumps({"name": 123})
assert expected == actual
def test_decimal():
expected = '{"name":"123.1"}'
actual = serializer.dumps({"name": Decimal("123.1")})
assert expected == actual
def test_string():
expected = '{"name":"abc"}'
actual = serializer.dumps({"name": "abc"})
assert expected == actual
def test_list():
expected = '{"name":["abc","def"]}'
actual = serializer.dumps({"name": ["abc", "def",]})
assert expected == actual
def test_simple_model():
simple_test_model = SimpleTestModel(id=1, name="abc")
expected = '{"simple_test_model":{"name":"abc","pk":1}}'
actual = serializer.dumps({"simple_test_model": simple_test_model})
assert expected == actual
def test_model_with_datetime(db):
datetime = now()
flavor = Flavor(name="name1", datetime=datetime)
expected = {
"flavor": {
"name": "name1",
"label": "",
"parent": None,
"float_value": None,
"decimal_value": None,
"uuid": str(flavor.uuid),
"date": None,
"datetime": datetime.strftime("%Y-%m-%dT%H:%M:%S.%f")[:-3],
"time": None,
"duration": None,
"pk": None,
}
}
actual = serializer.dumps({"flavor": flavor})
assert dicts_equal(expected, json.loads(actual))
def test_model_with_datetime_as_string(db):
datetime = now().strftime("%Y-%m-%dT%H:%M:%S.%f")[:-3]
flavor = Flavor(name="name1", datetime=datetime)
expected = {
"flavor": {
"name": "name1",
"label": "",
"parent": None,
"float_value": None,
"decimal_value": None,
"uuid": str(flavor.uuid),
"date": None,
"datetime": datetime,
"time": None,
"duration": None,
"pk": None,
}
}
actual = serializer.dumps({"flavor": flavor})
assert dicts_equal(expected, json.loads(actual))
def test_model_with_time_as_string(db):
time = now().strftime("%H:%M:%S.%f")[:-3]
flavor = Flavor(name="name1", time=time)
expected = {
"flavor": {
"name": "name1",
"label": "",
"parent": None,
"float_value": None,
"decimal_value": None,
"uuid": str(flavor.uuid),
"date": None,
"datetime": None,
"time": time,
"duration": None,
"pk": None,
}
}
actual = serializer.dumps({"flavor": flavor})
assert dicts_equal(expected, json.loads(actual))
def test_model_with_duration_as_string(db):
duration = "-1 day, 19:00:00"
flavor = Flavor(name="name1", duration=duration)
expected = {
"flavor": {
"name": "name1",
"label": "",
"parent": None,
"float_value": None,
"decimal_value": None,
"uuid": str(flavor.uuid),
"date": None,
"datetime": None,
"time": None,
"duration": "-1 19:00:00",
"pk": None,
}
}
actual = serializer.dumps({"flavor": flavor})
assert dicts_equal(expected, json.loads(actual))
def test_model_foreign_key():
test_model_one = ComplicatedTestModel(id=1, name="abc")
test_model_two = ComplicatedTestModel(id=2, name="def", parent=test_model_one)
expected = '{"test_model_two":{"name":"def","parent":1,"pk":2}}'
actual = serializer.dumps({"test_model_two": test_model_two})
assert expected == actual
def test_model_foreign_key_recursive_parent():
test_model_one = ComplicatedTestModel(id=1, name="abc")
test_model_two = ComplicatedTestModel(id=2, name="def", parent=test_model_one)
test_model_one.parent = test_model_two
expected = '{"test_model_two":{"name":"def","parent":1,"pk":2}}'
actual = serializer.dumps({"test_model_two": test_model_two})
assert expected == actual
@pytest.mark.django_db
def test_dumps_queryset(db):
flavor_one = Flavor(name="name1", label="label1")
flavor_one.save()
flavor_two = Flavor(name="name2", label="label2", parent=flavor_one)
flavor_two.save()
flavors = Flavor.objects.all()
expected_data = {
"flavors": [
{
"name": "name1",
"label": "label1",
"parent": None,
"float_value": None,
"decimal_value": None,
"uuid": str(flavor_one.uuid),
"date": None,
"datetime": None,
"time": None,
"duration": None,
"pk": 1,
},
{
"name": "name2",
"label": "label2",
"parent": 1,
"float_value": None,
"decimal_value": None,
"uuid": str(flavor_two.uuid),
"date": None,
"datetime": None,
"time": None,
"duration": None,
"pk": 2,
},
]
}
actual = serializer.dumps({"flavors": flavors})
assert expected_data == json.loads(actual)
def test_get_model_dict():
flavor_one = Flavor(name="name1", label="label1")
actual = serializer._get_model_dict(flavor_one)
expected = {
"pk": None,
"name": "name1",
"label": "label1",
"parent": None,
"decimal_value": None,
"float_value": None,
"uuid": str(flavor_one.uuid),
"date": None,
"datetime": None,
"time": None,
"duration": None,
}
assert expected == actual
def test_float():
expected = '{"name":"0.0"}'
actual = serializer.dumps({"name": 0.0})
assert expected == actual
def test_dict_float():
expected = '{"name":{"another":"0.0"}}'
actual = serializer.dumps({"name": {"another": 0.0}})
assert expected == actual
def test_list_float():
expected = '{"name":[1,2,"0.0"]}'
actual = serializer.dumps({"name": [1, 2, 0.0]})
assert expected == actual
def test_nested_list_float():
expected = '{"name":{"blob":[1,2,"0.0"]}}'
actual = serializer.dumps({"name": {"blob": [1, 2, 0.0]}})
assert expected == actual
def test_nested_list_float_complicated():
expected = '{"name":{"blob":[1,2,"0.0"]},"more":["1.9",2,5],"another":[{"great":"1.0","ok":["1.6","0.0",4]}]}'
actual = serializer.dumps(
{
"name": {"blob": [1, 2, 0.0]},
"more": [1.9, 2, 5],
"another": [{"great": 1.0, "ok": [1.6, 0.0, 4]}],
}
)
assert expected == actual
def test_nested_list_float_less_complicated():
expected = '{"another":[{"great":"1.0","ok":["1.6","0.0",4]}]}'
actual = serializer.dumps({"another": [{"great": 1.0, "ok": [1.6, 0.0, 4]}],})
assert expected == actual
def test_pydantic():
from pydantic import BaseModel
class Book(BaseModel):
title = "The Grapes of Wrath"
author = "John Steinbeck"
expected = '{"title":"The Grapes of Wrath","author":"John Steinbeck"}'
actual = serializer.dumps(Book())
assert expected == actual
| 25.603896 | 114 | 0.546031 | import json
from decimal import Decimal
from django.db import models
from django.utils.timezone import now
import pytest
from django_unicorn import serializer
from django_unicorn.utils import dicts_equal
from example.coffee.models import Flavor
class SimpleTestModel(models.Model):
name = models.CharField(max_length=10)
class Meta:
app_label = "tests"
class ComplicatedTestModel(models.Model):
name = models.CharField(max_length=10)
parent = models.ForeignKey("self", blank=True, null=True, on_delete=models.SET_NULL)
class Meta:
app_label = "tests"
def test_int():
expected = '{"name":123}'
actual = serializer.dumps({"name": 123})
assert expected == actual
def test_decimal():
expected = '{"name":"123.1"}'
actual = serializer.dumps({"name": Decimal("123.1")})
assert expected == actual
def test_string():
expected = '{"name":"abc"}'
actual = serializer.dumps({"name": "abc"})
assert expected == actual
def test_list():
expected = '{"name":["abc","def"]}'
actual = serializer.dumps({"name": ["abc", "def",]})
assert expected == actual
def test_simple_model():
simple_test_model = SimpleTestModel(id=1, name="abc")
expected = '{"simple_test_model":{"name":"abc","pk":1}}'
actual = serializer.dumps({"simple_test_model": simple_test_model})
assert expected == actual
def test_model_with_datetime(db):
datetime = now()
flavor = Flavor(name="name1", datetime=datetime)
expected = {
"flavor": {
"name": "name1",
"label": "",
"parent": None,
"float_value": None,
"decimal_value": None,
"uuid": str(flavor.uuid),
"date": None,
"datetime": datetime.strftime("%Y-%m-%dT%H:%M:%S.%f")[:-3],
"time": None,
"duration": None,
"pk": None,
}
}
actual = serializer.dumps({"flavor": flavor})
assert dicts_equal(expected, json.loads(actual))
def test_model_with_datetime_as_string(db):
datetime = now().strftime("%Y-%m-%dT%H:%M:%S.%f")[:-3]
flavor = Flavor(name="name1", datetime=datetime)
expected = {
"flavor": {
"name": "name1",
"label": "",
"parent": None,
"float_value": None,
"decimal_value": None,
"uuid": str(flavor.uuid),
"date": None,
"datetime": datetime,
"time": None,
"duration": None,
"pk": None,
}
}
actual = serializer.dumps({"flavor": flavor})
assert dicts_equal(expected, json.loads(actual))
def test_model_with_time_as_string(db):
time = now().strftime("%H:%M:%S.%f")[:-3]
flavor = Flavor(name="name1", time=time)
expected = {
"flavor": {
"name": "name1",
"label": "",
"parent": None,
"float_value": None,
"decimal_value": None,
"uuid": str(flavor.uuid),
"date": None,
"datetime": None,
"time": time,
"duration": None,
"pk": None,
}
}
actual = serializer.dumps({"flavor": flavor})
assert dicts_equal(expected, json.loads(actual))
def test_model_with_duration_as_string(db):
duration = "-1 day, 19:00:00"
flavor = Flavor(name="name1", duration=duration)
expected = {
"flavor": {
"name": "name1",
"label": "",
"parent": None,
"float_value": None,
"decimal_value": None,
"uuid": str(flavor.uuid),
"date": None,
"datetime": None,
"time": None,
"duration": "-1 19:00:00",
"pk": None,
}
}
actual = serializer.dumps({"flavor": flavor})
assert dicts_equal(expected, json.loads(actual))
def test_model_foreign_key():
test_model_one = ComplicatedTestModel(id=1, name="abc")
test_model_two = ComplicatedTestModel(id=2, name="def", parent=test_model_one)
expected = '{"test_model_two":{"name":"def","parent":1,"pk":2}}'
actual = serializer.dumps({"test_model_two": test_model_two})
assert expected == actual
def test_model_foreign_key_recursive_parent():
test_model_one = ComplicatedTestModel(id=1, name="abc")
test_model_two = ComplicatedTestModel(id=2, name="def", parent=test_model_one)
test_model_one.parent = test_model_two
expected = '{"test_model_two":{"name":"def","parent":1,"pk":2}}'
actual = serializer.dumps({"test_model_two": test_model_two})
assert expected == actual
@pytest.mark.django_db
def test_dumps_queryset(db):
flavor_one = Flavor(name="name1", label="label1")
flavor_one.save()
flavor_two = Flavor(name="name2", label="label2", parent=flavor_one)
flavor_two.save()
flavors = Flavor.objects.all()
expected_data = {
"flavors": [
{
"name": "name1",
"label": "label1",
"parent": None,
"float_value": None,
"decimal_value": None,
"uuid": str(flavor_one.uuid),
"date": None,
"datetime": None,
"time": None,
"duration": None,
"pk": 1,
},
{
"name": "name2",
"label": "label2",
"parent": 1,
"float_value": None,
"decimal_value": None,
"uuid": str(flavor_two.uuid),
"date": None,
"datetime": None,
"time": None,
"duration": None,
"pk": 2,
},
]
}
actual = serializer.dumps({"flavors": flavors})
assert expected_data == json.loads(actual)
def test_get_model_dict():
flavor_one = Flavor(name="name1", label="label1")
actual = serializer._get_model_dict(flavor_one)
expected = {
"pk": None,
"name": "name1",
"label": "label1",
"parent": None,
"decimal_value": None,
"float_value": None,
"uuid": str(flavor_one.uuid),
"date": None,
"datetime": None,
"time": None,
"duration": None,
}
assert expected == actual
def test_float():
expected = '{"name":"0.0"}'
actual = serializer.dumps({"name": 0.0})
assert expected == actual
def test_dict_float():
expected = '{"name":{"another":"0.0"}}'
actual = serializer.dumps({"name": {"another": 0.0}})
assert expected == actual
def test_list_float():
expected = '{"name":[1,2,"0.0"]}'
actual = serializer.dumps({"name": [1, 2, 0.0]})
assert expected == actual
def test_nested_list_float():
expected = '{"name":{"blob":[1,2,"0.0"]}}'
actual = serializer.dumps({"name": {"blob": [1, 2, 0.0]}})
assert expected == actual
def test_nested_list_float_complicated():
expected = '{"name":{"blob":[1,2,"0.0"]},"more":["1.9",2,5],"another":[{"great":"1.0","ok":["1.6","0.0",4]}]}'
actual = serializer.dumps(
{
"name": {"blob": [1, 2, 0.0]},
"more": [1.9, 2, 5],
"another": [{"great": 1.0, "ok": [1.6, 0.0, 4]}],
}
)
assert expected == actual
def test_nested_list_float_less_complicated():
expected = '{"another":[{"great":"1.0","ok":["1.6","0.0",4]}]}'
actual = serializer.dumps({"another": [{"great": 1.0, "ok": [1.6, 0.0, 4]}],})
assert expected == actual
def test_pydantic():
from pydantic import BaseModel
class Book(BaseModel):
title = "The Grapes of Wrath"
author = "John Steinbeck"
expected = '{"title":"The Grapes of Wrath","author":"John Steinbeck"}'
actual = serializer.dumps(Book())
assert expected == actual
| true | true |
f73dc226349990101cb313399b23748b50d9dbac | 3,585 | py | Python | SorryNotSorry/solution/solve-1.py | bahorn/covhacksoc-ctf-2020-11-17 | 6be1380da8860d0fc9822a8421b8ca9238197f95 | [
"MIT"
] | 1 | 2020-11-25T20:24:25.000Z | 2020-11-25T20:24:25.000Z | SorryNotSorry/solution/solve-1.py | bahorn/covhacksoc-ctf-2020-11-17 | 6be1380da8860d0fc9822a8421b8ca9238197f95 | [
"MIT"
] | null | null | null | SorryNotSorry/solution/solve-1.py | bahorn/covhacksoc-ctf-2020-11-17 | 6be1380da8860d0fc9822a8421b8ca9238197f95 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import base64
magic = """
eJxlkMlOwzAQhu88xdBLHKiiEprSIvVCCUvpoqhARQ9Ijj1JTLPJdhDh6XHa0rLMxcvM9/8zwzGCqMq
JfXkEJtZYwxBCy5v47tj3nm+m41l/dTuaDebBJLgLXi7G/nT0dG9tqkvK1sgNYPE4OF3UfZfG63q8mA
5WdSebXt0s54/B6Vz4g9ky9h7e+h+T62BoHW1gbpwxZ7Iu.....9Rl5p7LdmO/aaEKirmQOYa1REQqv
EEJUSKBtcxE5fIryQNlbyVKKXJPWHaZpAY8JSjx..............u2T8z9xHURQWM5qAqxlCpqErTG
mLUoBMEVVSSIRSReQllqCKWNGvvoe86..............49ko......b6QzM39CVZKK0FEJdb0e2Xoq
ZCa1H9EwpnWqsNd1wl6XIys4..............ku0K7S2ROVXJqU.....Zi0L8/WhJJc15kTnM003Ta
7sm5a9uOsWikLKqY..............ENbvZRwMd1WZk+AHFzEqTf6j.....zq6tg9YXB7ikxA==_ _
_QWNjb3JkaW5n..........IHRvIGFsbCBrbm93biBsYXdzIG9mIGF2a.....WF0aW9uLCB0aGVyZSB
pcyBubyB3YXk.....gdGhhdCBhYmVlIHNob3VsZCBiZSBhYmxlIHRvIGZs.....eS4gSXRzIHdpbmdz
IGFyZSB0b28g....c21hbGwgdG8gZ2V0IGl0cyBmYXQgbGl0....dGxlIGJv.....ZHlvZmYgdGhlIG
dyb3VuZC4gV....GhlIGJlZSwgb2YgY291cnNlLCBmbGllcy.....Bhbnl3YXk.....uIEJlY2F1c2U
gYmVlcyBkb2....50IGNhcmUgd.............2hhdGh1bW.....FucyB0aGlua.....yBpcyBpbXB
vc3NpYmxlL....iBTRVEuID...................c1IC0gS.....U5UUk8gVE8gQ.....kFSUlkgS
U5ULiBCRU5...TT04gSE9......VU0UgLSBEQ......VlBTkdM....RSBPTjogU25lYW.....tlcnMg
b24gdGhlI....Gdyb3Vu.....ZC4gQ2FtZXJhIF.....BBTlMg.....VVAgdG8gcmV2ZWF.....sIEJ
BUlJZIEJF...TlNPTlN.....CRURS....T09NIEF....OR0xFIE.....9OOiBCYXJyeXMgaG....FuZ
CBmbGlwc....GluZyB.....0aHJvd.....WdoIGRpZmZlcmVudC.....Bzd2VhdGVycyBpbi....Boa
XNjbG9zZ...XQuIEJB.....UlJZIF.....llbGxvdyBibGFjay.......wgeWVsbG93IGJs....YWNr
LCB5ZWx....sb3cgYm.....xhY2ssI.....HllbGxvd..............yBibGFjaywgeWV....sbG9
3YmxhY....2ssIHllbG....xvdyBib............................GFjay4uLm9va....GgsIG
JsYWNr....IGFuZCB5Z.....Wxsb3cu..............Li4gQU5H.....TEUgT046IEJh...cnJ5IH
dlYXJ....pbmcgdGhlc3.....dlYXRlc......iBoZSBwaWNrZWQsI.....Gxvb2tpbmc....gaW4gd
GhlIG....1pcnJvci4gQ.....kFSUlkg.....KENPTlREKSBZZWFoL.....CBsZXRzIHN...oYWtlIG
l0IH....VwYSBsaXR0bGU.....uIEhlIH.....BpY2tzI....HRoZSB.....ibGFjayB....hbmQgeW
VsbG....93IG9uZS4gSGU.....gdGhlbi.....Bnb2VzI....HRvIHR.....oZSBzaW5...rLCB0YWt
lcyB0.....aGV0b3Agb2Zm.....IGEgQ09.....OVEFJT....kVSIE9G.....IEhPTk....VZLCBhbm
QgcHV0c.....yBzb21lIGhv.....bmV5IG.....ludG8.....gaGlzIGh....haXIu....IEhlIHNxd
WlydHMgc2.....9tZWluIGhp.....cyBtb3V0aCBhb......mQgZ2FyZ2xlcy4gVGh....lbiBoZSB0
YWtlcyB0aGU.....gbGlkIG9m.......ZiB0aGU........gYm90dGxlLCBhbmQgc....m9sbHMgc29
tZW9uIGxpa2Ug.....ZGVvZG9yY.................W50LiBDVVQgVE86IElOVC....4gQkVOU09O
IEhPVVNFIEtJVEN.....IRU4gLSBDT.........05USU5VT1VTIEJhcnJ5J3Ntb3....RoZXIsIEpBT
kVUIEJFTlNPTiwgeW.....VsbHMgdXAgYXQgQmFycnkuIEpBTkVUIEJFTlNPTiB....CYXJyeSwgYnJ
lYWtmYXN0IGlzIHJlYW.....R5IUNVVCBUTzogIkJlZSBNb3ZpZSIgLSBK.........UyBSRVZJU0lP
TlMgOC8xMy8wNyAxLiBJT.....lQuIEJBUlJZJ1MgUk9PTSAtI..............ENPTlRJTlVPVVNC
QVJSWSBDb21pbmchIFNGWDo.....gUGhvbmUgUklOR0..............lORy4gQmFycnlzIGFudGVu
bmFlIHZpYnJhdGUgYXMgdGhle.....SBSSU..............5HIGxpa2UgYXBob25lLiBCYXJyeXMg
aGFuZHMgYXJlIHdldC4gSGUgbG9...............va3MgYXJvdW5kIGZvciBhIHRvd2VsLiBCQVJS
WSAoQ09OVEQpIEhhbmdvbiBhIHNlY.....29uZCEgSGUgd2lwZXMgaGlzIGhhbmRzIG9uIGhpcyBzd2
VhdGVyLCBhbmQgcHVsbHMgaGlzIGFudGVubmFlIGRvd24gdG9oaXMgZWFyIGFuZCBtb3V0aC4gQkFSU
lkgKENPTlQnRCkgSGVsbG8/IEhpcyBiZXN0IGZyaWVuZCwgQURBTSBGTEFZTUFOLCBpcyBvbnRoZSBv
dGhlciBlbmQuIEFEQU0gQmFycnk/IEJBUlJZIEFkYW0/IEFEQU0gQ2FuIHlvdSBiZWxpZXZlIHRoaXM
"""
print(
base64.b64decode("CmltcG9ydCByYW5kb20KaW1wb3J0IGhhc2hsaWIKaW1wb3J0IHpsaWIKZXhlYyh6bGliLmRlY29tcHJlc3MoYmFzZTY0LmI2NGRlY29kZSgnJy5qb2luKG1hZ2ljLnJlcGxhY2UoJy4nLCcnKS5zcGxpdCgnXG4nKSkuc3BsaXQoJ18gXyBfJylbMF0pKSkKZnVuKCkK").decode('ascii'))
| 70.294118 | 241 | 0.761785 |
import base64
magic = """
eJxlkMlOwzAQhu88xdBLHKiiEprSIvVCCUvpoqhARQ9Ijj1JTLPJdhDh6XHa0rLMxcvM9/8zwzGCqMq
JfXkEJtZYwxBCy5v47tj3nm+m41l/dTuaDebBJLgLXi7G/nT0dG9tqkvK1sgNYPE4OF3UfZfG63q8mA
5WdSebXt0s54/B6Vz4g9ky9h7e+h+T62BoHW1gbpwxZ7Iu.....9Rl5p7LdmO/aaEKirmQOYa1REQqv
EEJUSKBtcxE5fIryQNlbyVKKXJPWHaZpAY8JSjx..............u2T8z9xHURQWM5qAqxlCpqErTG
mLUoBMEVVSSIRSReQllqCKWNGvvoe86..............49ko......b6QzM39CVZKK0FEJdb0e2Xoq
ZCa1H9EwpnWqsNd1wl6XIys4..............ku0K7S2ROVXJqU.....Zi0L8/WhJJc15kTnM003Ta
7sm5a9uOsWikLKqY..............ENbvZRwMd1WZk+AHFzEqTf6j.....zq6tg9YXB7ikxA==_ _
_QWNjb3JkaW5n..........IHRvIGFsbCBrbm93biBsYXdzIG9mIGF2a.....WF0aW9uLCB0aGVyZSB
pcyBubyB3YXk.....gdGhhdCBhYmVlIHNob3VsZCBiZSBhYmxlIHRvIGZs.....eS4gSXRzIHdpbmdz
IGFyZSB0b28g....c21hbGwgdG8gZ2V0IGl0cyBmYXQgbGl0....dGxlIGJv.....ZHlvZmYgdGhlIG
dyb3VuZC4gV....GhlIGJlZSwgb2YgY291cnNlLCBmbGllcy.....Bhbnl3YXk.....uIEJlY2F1c2U
gYmVlcyBkb2....50IGNhcmUgd.............2hhdGh1bW.....FucyB0aGlua.....yBpcyBpbXB
vc3NpYmxlL....iBTRVEuID...................c1IC0gS.....U5UUk8gVE8gQ.....kFSUlkgS
U5ULiBCRU5...TT04gSE9......VU0UgLSBEQ......VlBTkdM....RSBPTjogU25lYW.....tlcnMg
b24gdGhlI....Gdyb3Vu.....ZC4gQ2FtZXJhIF.....BBTlMg.....VVAgdG8gcmV2ZWF.....sIEJ
BUlJZIEJF...TlNPTlN.....CRURS....T09NIEF....OR0xFIE.....9OOiBCYXJyeXMgaG....FuZ
CBmbGlwc....GluZyB.....0aHJvd.....WdoIGRpZmZlcmVudC.....Bzd2VhdGVycyBpbi....Boa
XNjbG9zZ...XQuIEJB.....UlJZIF.....llbGxvdyBibGFjay.......wgeWVsbG93IGJs....YWNr
LCB5ZWx....sb3cgYm.....xhY2ssI.....HllbGxvd..............yBibGFjaywgeWV....sbG9
3YmxhY....2ssIHllbG....xvdyBib............................GFjay4uLm9va....GgsIG
JsYWNr....IGFuZCB5Z.....Wxsb3cu..............Li4gQU5H.....TEUgT046IEJh...cnJ5IH
dlYXJ....pbmcgdGhlc3.....dlYXRlc......iBoZSBwaWNrZWQsI.....Gxvb2tpbmc....gaW4gd
GhlIG....1pcnJvci4gQ.....kFSUlkg.....KENPTlREKSBZZWFoL.....CBsZXRzIHN...oYWtlIG
l0IH....VwYSBsaXR0bGU.....uIEhlIH.....BpY2tzI....HRoZSB.....ibGFjayB....hbmQgeW
VsbG....93IG9uZS4gSGU.....gdGhlbi.....Bnb2VzI....HRvIHR.....oZSBzaW5...rLCB0YWt
lcyB0.....aGV0b3Agb2Zm.....IGEgQ09.....OVEFJT....kVSIE9G.....IEhPTk....VZLCBhbm
QgcHV0c.....yBzb21lIGhv.....bmV5IG.....ludG8.....gaGlzIGh....haXIu....IEhlIHNxd
WlydHMgc2.....9tZWluIGhp.....cyBtb3V0aCBhb......mQgZ2FyZ2xlcy4gVGh....lbiBoZSB0
YWtlcyB0aGU.....gbGlkIG9m.......ZiB0aGU........gYm90dGxlLCBhbmQgc....m9sbHMgc29
tZW9uIGxpa2Ug.....ZGVvZG9yY.................W50LiBDVVQgVE86IElOVC....4gQkVOU09O
IEhPVVNFIEtJVEN.....IRU4gLSBDT.........05USU5VT1VTIEJhcnJ5J3Ntb3....RoZXIsIEpBT
kVUIEJFTlNPTiwgeW.....VsbHMgdXAgYXQgQmFycnkuIEpBTkVUIEJFTlNPTiB....CYXJyeSwgYnJ
lYWtmYXN0IGlzIHJlYW.....R5IUNVVCBUTzogIkJlZSBNb3ZpZSIgLSBK.........UyBSRVZJU0lP
TlMgOC8xMy8wNyAxLiBJT.....lQuIEJBUlJZJ1MgUk9PTSAtI..............ENPTlRJTlVPVVNC
QVJSWSBDb21pbmchIFNGWDo.....gUGhvbmUgUklOR0..............lORy4gQmFycnlzIGFudGVu
bmFlIHZpYnJhdGUgYXMgdGhle.....SBSSU..............5HIGxpa2UgYXBob25lLiBCYXJyeXMg
aGFuZHMgYXJlIHdldC4gSGUgbG9...............va3MgYXJvdW5kIGZvciBhIHRvd2VsLiBCQVJS
WSAoQ09OVEQpIEhhbmdvbiBhIHNlY.....29uZCEgSGUgd2lwZXMgaGlzIGhhbmRzIG9uIGhpcyBzd2
VhdGVyLCBhbmQgcHVsbHMgaGlzIGFudGVubmFlIGRvd24gdG9oaXMgZWFyIGFuZCBtb3V0aC4gQkFSU
lkgKENPTlQnRCkgSGVsbG8/IEhpcyBiZXN0IGZyaWVuZCwgQURBTSBGTEFZTUFOLCBpcyBvbnRoZSBv
dGhlciBlbmQuIEFEQU0gQmFycnk/IEJBUlJZIEFkYW0/IEFEQU0gQ2FuIHlvdSBiZWxpZXZlIHRoaXM
"""
print(
base64.b64decode("CmltcG9ydCByYW5kb20KaW1wb3J0IGhhc2hsaWIKaW1wb3J0IHpsaWIKZXhlYyh6bGliLmRlY29tcHJlc3MoYmFzZTY0LmI2NGRlY29kZSgnJy5qb2luKG1hZ2ljLnJlcGxhY2UoJy4nLCcnKS5zcGxpdCgnXG4nKSkuc3BsaXQoJ18gXyBfJylbMF0pKSkKZnVuKCkK").decode('ascii'))
| true | true |
f73dc2624a3f6986d7b8b1866b66072de2e8c8b7 | 318 | py | Python | test/test_db_matches_ui.py | ntomczyk/python_training | ca9645e9df1eb390660de7b2d321fcf523cf02f4 | [
"Apache-2.0"
] | null | null | null | test/test_db_matches_ui.py | ntomczyk/python_training | ca9645e9df1eb390660de7b2d321fcf523cf02f4 | [
"Apache-2.0"
] | null | null | null | test/test_db_matches_ui.py | ntomczyk/python_training | ca9645e9df1eb390660de7b2d321fcf523cf02f4 | [
"Apache-2.0"
] | null | null | null | from model.group import Group
def test_group_list(app, db):
ui_list = app.group.get_group_list()
def clean(group):
return Group(id=group.id, name=group.name.strip())
db_list = map(clean, db.get_group_list())
assert sorted(ui_list, key=Group.id_or_max) == sorted (db_list, key=Group.id_or_max)
| 35.333333 | 88 | 0.707547 | from model.group import Group
def test_group_list(app, db):
ui_list = app.group.get_group_list()
def clean(group):
return Group(id=group.id, name=group.name.strip())
db_list = map(clean, db.get_group_list())
assert sorted(ui_list, key=Group.id_or_max) == sorted (db_list, key=Group.id_or_max)
| true | true |
f73dc2c8c8a7dd1504883e45275c9a20a3f01346 | 4,760 | py | Python | maf/db.py | neuromics/maf-api | 60e9bc6bac620bd331348307db2f1a4fda4adb1b | [
"Apache-2.0"
] | null | null | null | maf/db.py | neuromics/maf-api | 60e9bc6bac620bd331348307db2f1a4fda4adb1b | [
"Apache-2.0"
] | null | null | null | maf/db.py | neuromics/maf-api | 60e9bc6bac620bd331348307db2f1a4fda4adb1b | [
"Apache-2.0"
] | null | null | null | import kbr.db_utils as db
class DB(object):
def connect(self, url: str) -> None:
self._db = db.DB(url)
def disconnect(self) -> None:
if self._db is not None:
self._db.close()
def projects(self, **values) -> dict:
return self._db.get('project', **values)
def project_create(self, name:str, descr:str="") -> dict:
return self._db.add_unique('project', {'name': name, 'description': descr}, 'name')
def project_update(self, values: dict) -> dict:
self._db.update('project', values, {'id': values['id']})
def project_delete(self, id) -> dict:
self._db.delete('project', id=id)
def variants(self, **values) -> dict:
return self._db.get('variant', **values)
def variant_get(self, chrom:str, pos:int, ref:str, alt:str) -> dict:
v = self._db.get('variant', chrom=chrom, pos=pos, ref=ref, alt=alt)
if v is not None and v != []:
v = v[0]
v['frequencies'] = self.project_afs(v['id'])
return v
return None
def variant_get_by_id(self, id:str) -> dict:
v = self._db.get_by_id('variant', value=id)
if v is not None and v != []:
v = v[0]
v['frequencies'] = self.project_afs(v['id'])
print( v )
return v
return None
def variant_add(self, chrom:str, pos:int, ref:str, alt:str) -> str:
v = self._db.get('variant', chrom=chrom, pos=pos, ref=ref, alt=alt)
# print( f"v (va) {v}" )
if v is not None and v != []:
# print( 'returning id...')
return v[0]['id']
# print('adding variant')
p = self._db.add('variant', {'chrom': chrom, 'pos': pos, 'ref':ref, 'alt':alt})
# print( "getting variant...")
v = self._db.get('variant', chrom=chrom, pos=pos, ref=ref, alt=alt)
# print( f"v (va2) {v}" )
return v[0]['id']
def variant_update(self, values: dict) -> dict:
self._db.update('variant', values, {'id': values['id']})
def project_variant_add(self, project_id:str, variant_id:str, allele_number:int, allele_count:int, allele_count_hom:int, frequency:float) -> str:
v = self._db.get('project_variant', project_id=project_id, variant_id=variant_id)
if v is not None and v != []:
v = v[0]
id = v['id']
if v['frequency'] == frequency:
# print('already stored')
return
# print('update MAF')
v = {'allele_number': allele_number, 'allele_count': allele_count,
'allele_count_hom': allele_count_hom, 'frequency':frequency}
self._db.update('project_variant', v, {'id': id})
# return v['id']
else:
# print('adding AF')
v = self._db.add('project_variant', {'project_id': project_id, 'variant_id': variant_id,
'allele_number': allele_number, 'allele_count': allele_count,
'allele_count_hom': allele_count_hom, 'frequency':frequency})
# return v[0]['id']
def project_variant(self, project_id:str, variant_id:str) -> dict:
v = self._db.get_single('project_variant', project_id=project_id, variant_id=variant_id)
return v
def variants_in_region(self, chrom:str, start:int=None, end:int=None) -> list:
q = f"SELECT * FROM variant WHERE chrom='{chrom}' "
if start is not None:
q += f" AND pos >= {start}"
if end is not None:
q += f" AND pos <= {end}"
# print( f"Q :: {q} order by chrom,pos;" )
vars = self._db.get_as_dict(f"{q} order by chrom,pos;")
# print( vars )
return vars
def project_afs(self, variant_id:str) -> list:
mfs = self._db.get('project_variant', variant_id=variant_id)
total_allele_number = 0
total_allele_count = 0
total_allele_count_hom = 0
for mf in mfs:
project = self.projects(id=mf['project_id'])
mf['project_name'] = project[0]['name']
del mf['variant_id']
del mf['id']
total_allele_number += mf['allele_number']
total_allele_count += mf['allele_count']
total_allele_count_hom += mf['allele_count_hom']
total_frequency = total_allele_count/total_allele_number*1.0
mfs.append({'project_name': 'total',
'allele_number':total_allele_number,
'allele_count': total_allele_count,
'allele_count_hom': total_allele_count_hom,
'frequency': total_frequency})
return mfs
| 33.521127 | 150 | 0.552941 | import kbr.db_utils as db
class DB(object):
def connect(self, url: str) -> None:
self._db = db.DB(url)
def disconnect(self) -> None:
if self._db is not None:
self._db.close()
def projects(self, **values) -> dict:
return self._db.get('project', **values)
def project_create(self, name:str, descr:str="") -> dict:
return self._db.add_unique('project', {'name': name, 'description': descr}, 'name')
def project_update(self, values: dict) -> dict:
self._db.update('project', values, {'id': values['id']})
def project_delete(self, id) -> dict:
self._db.delete('project', id=id)
def variants(self, **values) -> dict:
return self._db.get('variant', **values)
def variant_get(self, chrom:str, pos:int, ref:str, alt:str) -> dict:
v = self._db.get('variant', chrom=chrom, pos=pos, ref=ref, alt=alt)
if v is not None and v != []:
v = v[0]
v['frequencies'] = self.project_afs(v['id'])
return v
return None
def variant_get_by_id(self, id:str) -> dict:
v = self._db.get_by_id('variant', value=id)
if v is not None and v != []:
v = v[0]
v['frequencies'] = self.project_afs(v['id'])
print( v )
return v
return None
def variant_add(self, chrom:str, pos:int, ref:str, alt:str) -> str:
v = self._db.get('variant', chrom=chrom, pos=pos, ref=ref, alt=alt)
if v is not None and v != []:
return v[0]['id']
p = self._db.add('variant', {'chrom': chrom, 'pos': pos, 'ref':ref, 'alt':alt})
v = self._db.get('variant', chrom=chrom, pos=pos, ref=ref, alt=alt)
return v[0]['id']
def variant_update(self, values: dict) -> dict:
self._db.update('variant', values, {'id': values['id']})
def project_variant_add(self, project_id:str, variant_id:str, allele_number:int, allele_count:int, allele_count_hom:int, frequency:float) -> str:
v = self._db.get('project_variant', project_id=project_id, variant_id=variant_id)
if v is not None and v != []:
v = v[0]
id = v['id']
if v['frequency'] == frequency:
return
v = {'allele_number': allele_number, 'allele_count': allele_count,
'allele_count_hom': allele_count_hom, 'frequency':frequency}
self._db.update('project_variant', v, {'id': id})
else:
v = self._db.add('project_variant', {'project_id': project_id, 'variant_id': variant_id,
'allele_number': allele_number, 'allele_count': allele_count,
'allele_count_hom': allele_count_hom, 'frequency':frequency})
def project_variant(self, project_id:str, variant_id:str) -> dict:
v = self._db.get_single('project_variant', project_id=project_id, variant_id=variant_id)
return v
def variants_in_region(self, chrom:str, start:int=None, end:int=None) -> list:
q = f"SELECT * FROM variant WHERE chrom='{chrom}' "
if start is not None:
q += f" AND pos >= {start}"
if end is not None:
q += f" AND pos <= {end}"
vars = self._db.get_as_dict(f"{q} order by chrom,pos;")
return vars
def project_afs(self, variant_id:str) -> list:
mfs = self._db.get('project_variant', variant_id=variant_id)
total_allele_number = 0
total_allele_count = 0
total_allele_count_hom = 0
for mf in mfs:
project = self.projects(id=mf['project_id'])
mf['project_name'] = project[0]['name']
del mf['variant_id']
del mf['id']
total_allele_number += mf['allele_number']
total_allele_count += mf['allele_count']
total_allele_count_hom += mf['allele_count_hom']
total_frequency = total_allele_count/total_allele_number*1.0
mfs.append({'project_name': 'total',
'allele_number':total_allele_number,
'allele_count': total_allele_count,
'allele_count_hom': total_allele_count_hom,
'frequency': total_frequency})
return mfs
| true | true |
f73dc3b81fa7e4c0973be6b2f9a9e256959397d5 | 3,762 | py | Python | jobs/futures/updateBinData.py | WinQuant/arsenal-data | b0c9eb340b1f56645d304395c6bb29fa05f6ae84 | [
"BSD-3-Clause"
] | null | null | null | jobs/futures/updateBinData.py | WinQuant/arsenal-data | b0c9eb340b1f56645d304395c6bb29fa05f6ae84 | [
"BSD-3-Clause"
] | null | null | null | jobs/futures/updateBinData.py | WinQuant/arsenal-data | b0c9eb340b1f56645d304395c6bb29fa05f6ae84 | [
"BSD-3-Clause"
] | null | null | null | '''This job updates the minute-by-minute trading data for the whole available
futures universe.
'''
'''
Copyright (c) 2017, WinQuant Information and Technology Co. Ltd.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the <organization> nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''
# built-in modules
import datetime as dt
import logging
# third-party modules
# customized modules
import data.api.futures as futuresApi
import data.config as config
import data.driver.mongodb as mongodb
import data.instrument.trading.futures as futuresTrading
# customize logging config
logging.basicConfig( format='[%(levelname)s] %(message)s', level=logging.INFO )
def main():
'''Entry point of the job.
'''
# runtime
asOfDate = dt.date.today()
logging.info( 'Updating minute bin data for futures on date {d:s}...'.format(
d=str( asOfDate ) ) )
# get all futures in the universe
futuresInfo = futuresApi.getFuturesInformation( asOfDate )
universe = dict( zip( futuresInfo.ticker,
futuresInfo.secID ) )
# initialize MongoDB connection
username, password = config.MONGODB_CRED
db = mongodb.getAuthenticatedConnection( config.MONGODB_URL,
config.MONGODB_PORT, username, password, 'binData' )
nFutures = len( universe )
logging.info( 'Minute bin volume for {ns:d} futures in total to be updated...' )
# for bin data, futures are updated one-by-one
for i, ids in enumerate( universe.items() ):
futures, secId = ids
futures = futures.upper()
logging.info( 'Updating minute bin data for {s:s} ({idx:d}/{n:d})...'.format(
s=secId, idx=i + 1, n=nFutures ) )
data = futuresTrading.getBinData( futures, dataDate=asOfDate )
if len( data ) > 0:
mongoDate = dt.datetime.combine( asOfDate, dt.datetime.min.time() )
record = { 'SecID': secId,
'Date': mongoDate,
'Data': data.to_json(),
'Country': 'CN' }
db.futures.update( { 'SecID': secId, 'Date': mongoDate, 'Country': 'CN' },
record, upsert=True )
else:
logging.warning( 'Empty data for {secId:s}'.format( secId=secId ) )
logging.info( 'All futures updated.' )
if __name__ == '__main__':
# let's kick off the job
main()
| 39.6 | 86 | 0.685274 |
import datetime as dt
import logging
import data.api.futures as futuresApi
import data.config as config
import data.driver.mongodb as mongodb
import data.instrument.trading.futures as futuresTrading
logging.basicConfig( format='[%(levelname)s] %(message)s', level=logging.INFO )
def main():
asOfDate = dt.date.today()
logging.info( 'Updating minute bin data for futures on date {d:s}...'.format(
d=str( asOfDate ) ) )
futuresInfo = futuresApi.getFuturesInformation( asOfDate )
universe = dict( zip( futuresInfo.ticker,
futuresInfo.secID ) )
username, password = config.MONGODB_CRED
db = mongodb.getAuthenticatedConnection( config.MONGODB_URL,
config.MONGODB_PORT, username, password, 'binData' )
nFutures = len( universe )
logging.info( 'Minute bin volume for {ns:d} futures in total to be updated...' )
for i, ids in enumerate( universe.items() ):
futures, secId = ids
futures = futures.upper()
logging.info( 'Updating minute bin data for {s:s} ({idx:d}/{n:d})...'.format(
s=secId, idx=i + 1, n=nFutures ) )
data = futuresTrading.getBinData( futures, dataDate=asOfDate )
if len( data ) > 0:
mongoDate = dt.datetime.combine( asOfDate, dt.datetime.min.time() )
record = { 'SecID': secId,
'Date': mongoDate,
'Data': data.to_json(),
'Country': 'CN' }
db.futures.update( { 'SecID': secId, 'Date': mongoDate, 'Country': 'CN' },
record, upsert=True )
else:
logging.warning( 'Empty data for {secId:s}'.format( secId=secId ) )
logging.info( 'All futures updated.' )
if __name__ == '__main__':
main()
| true | true |
f73dc3d98bbb594629247191481fbad9d241c12a | 884 | py | Python | samples/openapi3/client/petstore/python-experimental/test/test_nullable_class.py | data-experts/openapi-generator | b86a51ae17e25feae0e0c9f9e6f423b8ff54057d | [
"Apache-2.0"
] | 1 | 2020-08-07T08:38:39.000Z | 2020-08-07T08:38:39.000Z | samples/openapi3/client/petstore/python-experimental/test/test_nullable_class.py | data-experts/openapi-generator | b86a51ae17e25feae0e0c9f9e6f423b8ff54057d | [
"Apache-2.0"
] | 5 | 2021-05-11T22:59:16.000Z | 2022-02-27T10:31:06.000Z | samples/openapi3/client/petstore/python-experimental/test/test_nullable_class.py | data-experts/openapi-generator | b86a51ae17e25feae0e0c9f9e6f423b8ff54057d | [
"Apache-2.0"
] | 1 | 2020-08-28T20:37:35.000Z | 2020-08-28T20:37:35.000Z | # coding: utf-8
"""
OpenAPI Petstore
This spec is mainly for testing Petstore server and contains fake endpoints, models. Please do not use this for any other purpose. Special characters: \" \\ # noqa: E501
The version of the OpenAPI document: 1.0.0
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import sys
import unittest
import petstore_api
from petstore_api.model.nullable_class import NullableClass
class TestNullableClass(unittest.TestCase):
"""NullableClass unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testNullableClass(self):
"""Test NullableClass"""
# FIXME: construct object with mandatory attributes with example values
# model = NullableClass() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 22.666667 | 174 | 0.695701 |
from __future__ import absolute_import
import sys
import unittest
import petstore_api
from petstore_api.model.nullable_class import NullableClass
class TestNullableClass(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def testNullableClass(self):
s
if __name__ == '__main__':
unittest.main()
| true | true |
f73dc57754926aa3a44eb1e25c07da66ea7a877d | 33,405 | py | Python | rplugin/python3/defx/view.py | sonvt1710/defx.nvim | fd5f9416d6acc908660ccca0799109a9a4f0a2d2 | [
"MIT"
] | 4 | 2017-01-03T05:18:56.000Z | 2017-01-03T10:39:31.000Z | rplugin/python3/defx/view.py | Shougo/defilex.nvim | fd5f9416d6acc908660ccca0799109a9a4f0a2d2 | [
"MIT"
] | null | null | null | rplugin/python3/defx/view.py | Shougo/defilex.nvim | fd5f9416d6acc908660ccca0799109a9a4f0a2d2 | [
"MIT"
] | null | null | null | # ============================================================================
# FILE: view.py
# AUTHOR: Shougo Matsushita <Shougo.Matsu at gmail.com>
# License: MIT license
# ============================================================================
from pathlib import Path
from pynvim import Nvim
from pynvim.api import Buffer
import copy
import time
import typing
from defx.clipboard import Clipboard
from defx.context import Context
from defx.defx import Defx
from defx.session import Session
from defx.util import Candidate
from defx.util import error, import_plugin, len_bytes, readable
Highlights = typing.List[typing.Tuple[str, int, int]]
class View(object):
def __init__(self, vim: Nvim, index: int) -> None:
self._vim: Nvim = vim
self._defxs: typing.List[Defx] = []
self._candidates: typing.List[typing.Dict[str, typing.Any]] = []
self._clipboard = Clipboard()
self._bufnr = -1
self._tabnr = -1
self._prev_bufnr = -1
self._winid = -1
self._index = index
self._bufname = '[defx]'
self._buffer: Buffer = None
self._prev_action = ''
self._prev_syntaxes: typing.List[str] = []
self._prev_highlight_commands: typing.List[str] = []
self._winrestcmd = ''
self._has_preview_window = False
self._session_version = '1.0'
self._sessions: typing.Dict[str, Session] = {}
self._previewed_target: typing.Optional[Candidate] = None
self._previewed_img = ''
self._ns: int = -1
self._has_textprop = False
self._proptypes: typing.Set[str] = set()
def init(self, context: typing.Dict[str, typing.Any]) -> None:
self._context = self._init_context(context)
self._bufname = f'[defx] {self._context.buffer_name}-{self._index}'
self._winrestcmd = self._vim.call('winrestcmd')
self._prev_wininfo = self._get_wininfo()
self._prev_bufnr = self._context.prev_bufnr
self._has_preview_window = len(
[x for x in range(1, self._vim.call('winnr', '$'))
if self._vim.call('getwinvar', x, '&previewwindow')]) > 0
if self._vim.call('defx#util#has_textprop'):
self._has_textprop = True
else:
self._ns = self._vim.call('nvim_create_namespace', 'defx')
def init_paths(self, paths: typing.List[typing.List[str]],
context: typing.Dict[str, typing.Any],
clipboard: Clipboard
) -> bool:
self.init(context)
initialized = self._init_defx(clipboard)
# Window check
if self._vim.call('win_getid') != self._winid:
# Not defx window
return False
if not paths:
if not initialized:
# Don't initialize path
return False
paths = [['file', self._vim.call('getcwd')]]
self._buffer.vars['defx']['paths'] = paths
self._update_defx_paths(paths)
self._init_columns(self._context.columns.split(':'))
self.redraw(True)
if self._context.session_file:
self.do_action('load_session', [],
self._vim.call('defx#init#_context', {}))
for [index, [source_name, path]] in enumerate(paths):
self._check_session(index, path)
for defx in self._defxs:
self._init_cursor(defx)
self._vim.command(
'silent doautocmd <nomodeline> User DefxDirChanged')
return True
def do_action(self, action_name: str,
action_args: typing.List[str],
new_context: typing.Dict[str, typing.Any]) -> None:
"""
Do "action" action.
"""
cursor = new_context['cursor']
visual_start = new_context['visual_start']
visual_end = new_context['visual_end']
defx_targets = {
x._index: self.get_selected_candidates(cursor, x._index)
for x in self._defxs}
all_targets: typing.List[typing.Dict[str, typing.Any]] = []
for targets in defx_targets.values():
all_targets += targets
import defx.action as action
for defx in [x for x in self._defxs
if not all_targets or defx_targets[x._index]]:
context = self._context._replace(
args=action_args,
cursor=cursor,
targets=defx_targets[defx._index],
visual_start=visual_start,
visual_end=visual_end,
)
ret = action.do_action(self, defx, action_name, context)
if ret:
error(self._vim, 'Invalid action_name:' + action_name)
return
# Jump to the defx window
if context.post_action == 'jump':
self._vim.call('win_gotoid', self._winid)
def debug(self, expr: typing.Any) -> None:
error(self._vim, expr)
def print_msg(self, expr: typing.Any) -> None:
self._vim.call('defx#util#print_message', expr)
def close_preview(self) -> None:
self._vim.call('defx#util#close_async_job')
if not self._has_preview_window:
self._vim.command('pclose!')
# Clear previewed buffers
for bufnr in self._vim.vars['defx#_previewed_buffers'].keys():
if (not self._vim.call('win_findbuf', bufnr) and
self._vim.call('buflisted', bufnr)):
self._vim.command('silent bdelete ' + str(bufnr))
self._vim.vars['defx#_previewed_buffers'] = {}
def quit(self) -> None:
# Close preview window
self.close_preview()
winnr = self._vim.call('bufwinnr', self._bufnr)
if winnr < 0:
return
if winnr != self._vim.call('winnr'):
# Use current window
self._context = self._context._replace(
prev_winid=self._vim.call('win_getid'))
self._vim.command(f'{winnr}wincmd w')
if (self._context.split not in ['no', 'tab'] and
self._vim.call('winnr', '$') != 1):
self._vim.command('close')
elif self._check_bufnr(self._prev_bufnr):
self._vim.command('buffer ' + str(self._prev_bufnr))
elif self._check_bufnr(self._context.prev_last_bufnr):
self._vim.command('buffer ' +
str(self._context.prev_last_bufnr))
else:
self._vim.command('enew')
self._vim.call('win_gotoid', self._context.prev_winid)
if self._get_wininfo() and self._get_wininfo() == self._prev_wininfo:
self._vim.command(self._winrestcmd)
self.restore_previous_buffer(self._context.prev_last_bufnr)
def redraw(self, is_force: bool = False) -> None:
"""
Redraw defx buffer.
"""
start = time.time()
[info] = self._vim.call('getbufinfo', self._bufnr)
prev_linenr = info['lnum']
prev = self.get_cursor_candidate(prev_linenr)
if is_force:
self._init_candidates()
self._init_column_length()
for column in self._columns:
column.on_redraw(self, self._context)
lines = []
columns_highlights = []
for (i, candidate) in enumerate(self._candidates):
(text, highlights) = self._get_columns_text(
self._context, candidate)
lines.append(text)
columns_highlights += ([(x[0], i, x[1], x[1] + x[2])
for x in highlights])
self._buffer.options['modifiable'] = True
# NOTE: Different len of buffer line replacement cause cursor jump
if len(lines) >= len(self._buffer):
self._buffer[:] = lines[:len(self._buffer)]
self._buffer.append(lines[len(self._buffer):])
else:
self._buffer[len(lines):] = []
self._buffer[:] = lines
self._buffer.options['modifiable'] = False
self._buffer.options['modified'] = False
# TODO: How to set cursor position for other buffer when
# stay in current buffer
if self._buffer == self._vim.current.buffer:
self._vim.call('cursor', [prev_linenr, 0])
if prev:
self.search_file(prev['action__path'], prev['_defx_index'])
if is_force:
self._init_column_syntax()
# Update highlights
# Note: update_highlights() must be called after init_column_syntax()
if columns_highlights:
self._update_highlights(columns_highlights)
if self._context.profile:
error(self._vim, f'redraw time = {time.time() - start}')
def get_cursor_candidate(
self, cursor: int) -> typing.Dict[str, typing.Any]:
if len(self._candidates) < cursor:
return {}
else:
return self._candidates[cursor - 1]
def get_selected_candidates(
self, cursor: int, index: int = -1
) -> typing.List[typing.Dict[str, typing.Any]]:
if not self._candidates:
return []
candidates = [x for x in self._candidates if x['is_selected']]
if not candidates:
candidates = [self.get_cursor_candidate(cursor)]
return [x for x in candidates
if index < 0 or x.get('_defx_index', -1) == index]
def get_candidate_pos(self, path: Path, index: int) -> int:
for [pos, candidate] in enumerate(self._candidates):
if (candidate['_defx_index'] == index and
candidate['action__path'] == path):
return pos
return -1
def cd(self, defx: Defx, source_name: str,
path: str, cursor: int, save_history: bool = True) -> None:
history = defx._cursor_history
# Save previous cursor position
candidate = self.get_cursor_candidate(cursor)
if candidate:
history[defx._cwd] = candidate['action__path']
if save_history:
global_histories = self._vim.vars['defx#_histories']
global_histories.append([defx._source.name, defx._cwd])
self._vim.vars['defx#_histories'] = global_histories
if source_name != defx._source.name:
if source_name not in self._all_sources:
error(self._vim, 'Invalid source_name:' + source_name)
return
# Replace with new defx
self._defxs[defx._index] = Defx(
self._vim, self._context,
self._all_sources[source_name],
path, defx._index)
defx = self._defxs[defx._index]
defx.cd(path)
self.redraw(True)
self._check_session(defx._index, path)
self._init_cursor(defx)
if path in history:
self.search_file(history[path], defx._index)
self._update_paths(defx._index, path)
self._vim.command(
'silent doautocmd <nomodeline> User DefxDirChanged')
def search_file(self, path: Path, index: int) -> bool:
target = str(path)
if target and target[-1] == '/':
target = target[:-1]
pos = self.get_candidate_pos(Path(target), index)
if pos < 0:
return False
self._vim.call('cursor', [pos + 1, 1])
return True
def search_recursive(self, path: Path, index: int) -> bool:
parents: typing.List[Path] = []
tmppath: Path = path
while (self.get_candidate_pos(tmppath, index) < 0 and
tmppath.parent != path and tmppath.parent != tmppath):
tmppath = tmppath.parent
parents.append(tmppath)
for parent in reversed(parents):
self.open_tree(parent, index, False, 0)
self.update_candidates()
self.redraw()
return self.search_file(path, index)
def update_candidates(self) -> None:
# Update opened/selected state
for defx in self._defxs:
defx._opened_candidates = set()
defx._selected_candidates = set()
for [i, candidate] in [x for x in enumerate(self._candidates)
if x[1]['is_opened_tree']]:
defx = self._defxs[candidate['_defx_index']]
defx._opened_candidates.add(str(candidate['action__path']))
for [i, candidate] in [x for x in enumerate(self._candidates)
if x[1]['is_selected']]:
defx = self._defxs[candidate['_defx_index']]
defx._selected_candidates.add(str(candidate['action__path']))
def open_tree(self, path: Path, index: int, enable_nested: bool,
max_level: int = 0) -> None:
# Search insert position
pos = self.get_candidate_pos(path, index)
if pos < 0:
return
target = self._candidates[pos]
if (not target['is_directory'] or
target['is_opened_tree'] or target['is_root']):
return
target['is_opened_tree'] = True
base_level = target['level'] + 1
defx = self._defxs[index]
children = defx.gather_candidates_recursive(
str(path), base_level, base_level + max_level)
if not children:
return
if (enable_nested and len(children) == 1
and children[0]['is_directory']):
# Merge child.
defx._nested_candidates.add(str(target['action__path']))
target['action__path'] = children[0]['action__path']
target['word'] += children[0]['word']
target['is_opened_tree'] = False
return self.open_tree(target['action__path'],
index, enable_nested, max_level)
for candidate in children:
candidate['_defx_index'] = index
self._candidates = (self._candidates[: pos + 1] +
children + self._candidates[pos + 1:])
def close_tree(self, path: Path, index: int) -> None:
# Search insert position
pos = self.get_candidate_pos(path, index)
if pos < 0:
return
target = self._candidates[pos]
if not target['is_opened_tree'] or target['is_root']:
return
target['is_opened_tree'] = False
defx = self._defxs[index]
self._remove_nested_path(defx, target['action__path'])
start = pos + 1
base_level = target['level']
end = start
for candidate in self._candidates[start:]:
if candidate['level'] <= base_level:
break
self._remove_nested_path(defx, candidate['action__path'])
end += 1
self._candidates = (self._candidates[: start] +
self._candidates[end:])
def restore_previous_buffer(self, bufnr: int) -> None:
if (not self._vim.call('buflisted', bufnr) or
self._vim.call('win_getid') != self._winid):
return
# Note: Convert to full path to prevent error
# "1" matches "123" buffer
prev_bufname = self._vim.call(
'fnamemodify', self._vim.call('bufname', bufnr), ':p')
path_prev = Path(prev_bufname)
if (not self._vim.call('buflisted', prev_bufname) or
not readable(path_prev) or path_prev.is_dir()):
return
self._vim.call('setreg', '#',
self._vim.call('fnameescape', prev_bufname))
def _remove_nested_path(self, defx: Defx, path: Path) -> None:
if str(path) in defx._nested_candidates:
defx._nested_candidates.remove(str(path))
def _init_context(
self, context: typing.Dict[str, typing.Any]) -> Context:
# Convert to int
for attr in [x[0] for x in Context()._asdict().items()
if isinstance(x[1], int) and x[0] in context]:
context[attr] = int(context[attr])
return Context(**context)
def _init_window(self) -> None:
self._winid = self._vim.call('win_getid')
self._tabnr = self._vim.call('tabpagenr')
window_options = self._vim.current.window.options
if (self._context.split == 'vertical'
and self._context.winwidth > 0):
window_options['winfixwidth'] = True
self._vim.command(f'vertical resize {self._context.winwidth}')
elif (self._context.split == 'horizontal' and
self._context.winheight > 0):
window_options['winfixheight'] = True
self._vim.command(f'resize {self._context.winheight}')
def _check_session(self, index: int, path: str) -> None:
if path not in self._sessions:
return
# restore opened_candidates
session = self._sessions[path]
for opened_path in session.opened_candidates:
self.open_tree(Path(opened_path), index, False)
self.update_candidates()
self.redraw()
def _init_defx(self, clipboard: Clipboard) -> bool:
if not self._switch_buffer():
return False
self._buffer = self._vim.current.buffer
self._bufnr = self._buffer.number
self._buffer.vars['defx'] = {
'context': self._context._asdict(),
'paths': [],
}
# Note: Have to use setlocal instead of "current.window.options"
# "current.window.options" changes global value instead of local in
# neovim.
self._vim.command('setlocal colorcolumn=')
self._vim.command('setlocal nocursorcolumn')
self._vim.command('setlocal nofoldenable')
self._vim.command('setlocal foldcolumn=0')
self._vim.command('setlocal nolist')
self._vim.command('setlocal nospell')
self._vim.command('setlocal nowrap')
self._vim.command('setlocal signcolumn=no')
self._init_window()
buffer_options = self._buffer.options
if not self._context.listed:
buffer_options['buflisted'] = False
buffer_options['buftype'] = 'nofile'
buffer_options['bufhidden'] = 'hide'
buffer_options['swapfile'] = False
buffer_options['modeline'] = False
buffer_options['modifiable'] = False
buffer_options['modified'] = False
buffer_options['filetype'] = 'defx'
if not self._vim.call('has', 'nvim'):
# Note: In Vim8, FileType autocmd is not fired after set filetype
# option.
self._vim.command('silent doautocmd FileType defx')
self._vim.command('autocmd! defx * <buffer>')
self._vim.command('autocmd defx '
'CursorHold,FocusGained <buffer> '
'call defx#call_action("check_redraw")')
self._vim.command('autocmd defx FileType <buffer> '
'call defx#call_action("redraw")')
self._prev_highlight_commands = []
# Initialize defx state
self._candidates = []
self._clipboard = clipboard
self._defxs = []
self._init_all_sources()
self._init_all_columns()
self._init_columns(self._context.columns.split(':'))
self._vim.vars['defx#_drives'] = self._context.drives
return True
def _switch_buffer(self) -> bool:
if self._context.split == 'tab':
self._vim.command('tabnew')
if self._context.close:
self.quit()
return False
winnr = self._vim.call('bufwinnr', self._bufnr)
# Note: current window may be defx buffer when `:tabnew`.
if winnr > 0 and self._tabnr == self._vim.call('tabpagenr'):
self._vim.command(f'{winnr}wincmd w')
if self._context.toggle:
self.quit()
else:
self._winid = self._vim.call('win_getid')
self._init_window()
return False
if (self._vim.current.buffer.options['modified'] and
not self._vim.options['hidden'] and
self._context.split == 'no'):
self._context = self._context._replace(split='vertical')
if (self._context.split == 'floating'
and self._vim.call('exists', '*nvim_open_win')):
# Use floating window
self._vim.call(
'nvim_open_win',
self._vim.call('bufnr', '%'), True, {
'relative': self._context.winrelative,
'row': self._context.winrow,
'col': self._context.wincol,
'width': self._context.winwidth,
'height': self._context.winheight,
'border': self._context.winborder,
})
# Create new buffer
vertical = 'vertical' if self._context.split == 'vertical' else ''
no_split = self._context.split in ['no', 'tab', 'floating']
if self._vim.call('bufloaded', self._bufnr):
command = ('buffer' if no_split else 'sbuffer')
self._vim.command(
'silent keepalt %s %s %s %s' % (
self._context.direction,
vertical,
command,
self._bufnr,
)
)
if self._context.resume:
self._init_window()
return False
elif self._vim.call('exists', '*bufadd'):
bufnr = self._vim.call('bufadd', self._bufname)
command = ('buffer' if no_split else 'sbuffer')
self._vim.command(
'silent keepalt %s %s %s %s' % (
self._context.direction,
vertical,
command,
bufnr,
)
)
else:
command = ('edit' if no_split else 'new')
self._vim.call(
'defx#util#execute_path',
'silent keepalt %s %s %s ' % (
self._context.direction,
vertical,
command,
),
self._bufname)
return True
def _init_all_sources(self) -> None:
from defx.base.source import Base as Source
self._all_sources: typing.Dict[str, Source] = {}
for path_source in self._load_custom_sources():
source = import_plugin(path_source, 'source', 'Source')
if not source:
continue
source = source(self._vim)
if source.name not in self._all_sources:
self._all_sources[source.name] = source
def _init_all_columns(self) -> None:
from defx.base.column import Base as Column
self._all_columns: typing.Dict[str, Column] = {}
for path_column in self._load_custom_columns():
column = import_plugin(path_column, 'column', 'Column')
if not column:
continue
column = column(self._vim)
if column.name not in self._all_columns:
self._all_columns[column.name] = column
def _init_columns(self, columns: typing.List[str]) -> None:
from defx.base.column import Base as Column
custom = self._vim.call('defx#custom#_get')['column']
self._columns: typing.List[Column] = [
copy.copy(self._all_columns[x])
for x in columns if x in self._all_columns
]
for column in self._columns:
if column.name in custom:
column.vars.update(custom[column.name])
column.on_init(self, self._context)
def _init_column_length(self) -> None:
if not self._candidates:
return
from defx.base.column import Base as Column
within_variable = False
within_variable_columns: typing.List[Column] = []
start = 1
for [index, column] in enumerate(self._columns):
column.syntax_name = f'Defx_{column.name}_{index}'
column.highlight_name = f'Defx_{column.name}'
if within_variable and not column.is_stop_variable:
within_variable_columns.append(column)
continue
# Calculate variable_length
variable_length = 0
if column.is_stop_variable:
for variable_column in within_variable_columns:
variable_length += variable_column.length(
self._context._replace(targets=self._candidates))
# Note: for "' '.join(variable_texts)" length
if within_variable_columns:
variable_length += len(within_variable_columns) - 1
length = column.length(
self._context._replace(targets=self._candidates,
variable_length=variable_length))
column.start = start
column.end = start + length
if column.is_start_variable:
within_variable = True
within_variable_columns.append(column)
else:
column.is_within_variable = False
start += length + 1
if column.is_stop_variable:
for variable_column in within_variable_columns:
# Overwrite syntax_name
variable_column.syntax_name = column.syntax_name
variable_column.is_within_variable = True
within_variable = False
def _init_column_syntax(self) -> None:
commands: typing.List[str] = []
for syntax in self._prev_syntaxes:
commands.append(
'silent! syntax clear ' + syntax)
if self._proptypes:
self._clear_prop_types()
self._prev_syntaxes = []
for column in self._columns:
source_highlights = column.highlight_commands()
if not source_highlights:
continue
commands += source_highlights
self._prev_syntaxes += column.syntaxes()
syntax_list = commands + [
self._vim.call('execute', 'syntax list'),
self._vim.call('execute', 'highlight'),
]
if syntax_list == self._prev_highlight_commands:
# Skip highlights
return
self._execute_commands(commands)
self._prev_highlight_commands = commands + [
self._vim.call('execute', 'syntax list'),
self._vim.call('execute', 'highlight'),
]
def _execute_commands(self, commands: typing.List[str]) -> None:
# Note: If commands are too huge, vim.command() will fail.
threshold = 15
cnt = 0
while cnt < len(commands):
self._vim.command(' | '.join(commands[cnt: cnt + threshold]))
cnt += threshold
def _init_candidates(self) -> None:
self._candidates = []
for defx in self._defxs:
root = defx.get_root_candidate()
root_path = root['action__path']
defx._mtime = (root_path.stat().st_mtime
if readable(root_path) and root_path.is_dir()
else -1)
candidates = [root]
candidates += defx.tree_candidates(
defx._cwd, 0, self._context.auto_recursive_level)
for candidate in candidates:
candidate['_defx_index'] = defx._index
self._candidates += candidates
def _get_columns_text(self, context: Context, candidate: Candidate
) -> typing.Tuple[str, Highlights]:
texts: typing.List[str] = []
variable_texts: typing.List[str] = []
ret_highlights: typing.List[typing.Tuple[str, int, int]] = []
start = 0
for column in self._columns:
column.start = start
if column.is_stop_variable:
(text, highlights) = column.get_with_variable_text(
context, ''.join(variable_texts), candidate)
texts.append(text)
ret_highlights += highlights
variable_texts = []
else:
if column.has_get_with_highlights:
(text, highlights) = column.get_with_highlights(
context, candidate)
ret_highlights += highlights
else:
# Note: For old columns compatibility
text = column.get(context, candidate)
if column.is_start_variable or column.is_within_variable:
if text:
variable_texts.append(text)
else:
texts.append(text)
start = len_bytes(' '.join(texts))
if texts:
start += 1
if variable_texts:
start += len_bytes(''.join(variable_texts))
return (' '.join(texts), ret_highlights)
def _update_paths(self, index: int, path: str) -> None:
var_defx = self._buffer.vars['defx']
if len(var_defx['paths']) <= index:
var_defx['paths'].append(path)
else:
var_defx['paths'][index] = path
self._buffer.vars['defx'] = var_defx
def _init_cursor(self, defx: Defx) -> None:
self.search_file(Path(defx._cwd), defx._index)
# Move to next
self._vim.call('cursor', [self._vim.call('line', '.') + 1, 1])
def _get_wininfo(self) -> typing.List[str]:
return [
self._vim.options['columns'], self._vim.options['lines'],
self._vim.call('win_getid'), self._vim.call('tabpagebuflist')
]
def _load_custom_sources(self) -> typing.List[Path]:
result = []
result += self._vim.call('globpath',
self._vim.options['runtimepath'],
'rplugin/python3/defx/source/*.py', 1, 1)
result += self._vim.call('globpath',
self._vim.options['runtimepath'],
'rplugin/python3/defx/source/*/*.py', 1, 1)
return [Path(x) for x in result]
def _load_custom_columns(self) -> typing.List[Path]:
result = []
result += self._vim.call('globpath',
self._vim.options['runtimepath'],
'rplugin/python3/defx/column/*.py', 1, 1)
return [Path(x) for x in result]
def _update_defx_paths(self,
paths: typing.List[typing.List[str]]) -> None:
self._defxs = self._defxs[:len(paths)]
for [index, [source_name, path]] in enumerate(paths):
if source_name not in self._all_sources:
error(self._vim, 'Invalid source_name:' + source_name)
return
if index >= len(self._defxs):
self._defxs.append(
Defx(self._vim, self._context,
self._all_sources[source_name],
path, index))
else:
defx = self._defxs[index]
self.cd(defx, defx._source.name, path, self._context.cursor)
self._update_paths(index, path)
def _check_bufnr(self, bufnr: int) -> bool:
return (bool(self._vim.call('bufexists', bufnr)) and
bufnr != self._vim.call('bufnr', '%') and
self._vim.call('getbufvar', bufnr, '&filetype') != 'defx')
def _clear_prop_types(self) -> None:
self._vim.call('defx#util#call_atomic', [
['prop_type_delete', [x, {'bufnr': self._bufnr}]]
for x in self._proptypes
])
self._proptypes = set()
def _update_highlights(self, columns_highlights: typing.List[
typing.Tuple[str, int, int, int]]) -> None:
commands: typing.List[typing.Any] = []
if self._has_textprop:
for proptype in self._proptypes:
commands.append(
['prop_remove', [{'type': proptype, 'bufnr': self._bufnr}]]
)
for highlight in [x for x in columns_highlights if x[0] != '']:
if highlight[0] not in self._proptypes:
commands.append(
['prop_type_add',
[highlight[0],
{'highlight': highlight[0], 'bufnr': self._bufnr}]]
)
self._proptypes.add(highlight[0])
commands.append(
['prop_add',
[highlight[1] + 1, highlight[2] + 1,
{'end_col': highlight[3] + 1,
'type': highlight[0],
'bufnr': self._bufnr}]]
)
else:
commands.append(['nvim_buf_clear_namespace',
[self._bufnr, self._ns, 0, -1]])
commands += [['nvim_buf_add_highlight',
[self._bufnr, self._ns, x[0], x[1], x[2], x[3]]]
for x in columns_highlights]
self._vim.call('defx#util#call_atomic', commands)
if self._has_textprop:
# Note: redraw is needed for text props
self._vim.command('redraw')
| 37.157953 | 79 | 0.553959 |
from pathlib import Path
from pynvim import Nvim
from pynvim.api import Buffer
import copy
import time
import typing
from defx.clipboard import Clipboard
from defx.context import Context
from defx.defx import Defx
from defx.session import Session
from defx.util import Candidate
from defx.util import error, import_plugin, len_bytes, readable
Highlights = typing.List[typing.Tuple[str, int, int]]
class View(object):
def __init__(self, vim: Nvim, index: int) -> None:
self._vim: Nvim = vim
self._defxs: typing.List[Defx] = []
self._candidates: typing.List[typing.Dict[str, typing.Any]] = []
self._clipboard = Clipboard()
self._bufnr = -1
self._tabnr = -1
self._prev_bufnr = -1
self._winid = -1
self._index = index
self._bufname = '[defx]'
self._buffer: Buffer = None
self._prev_action = ''
self._prev_syntaxes: typing.List[str] = []
self._prev_highlight_commands: typing.List[str] = []
self._winrestcmd = ''
self._has_preview_window = False
self._session_version = '1.0'
self._sessions: typing.Dict[str, Session] = {}
self._previewed_target: typing.Optional[Candidate] = None
self._previewed_img = ''
self._ns: int = -1
self._has_textprop = False
self._proptypes: typing.Set[str] = set()
def init(self, context: typing.Dict[str, typing.Any]) -> None:
self._context = self._init_context(context)
self._bufname = f'[defx] {self._context.buffer_name}-{self._index}'
self._winrestcmd = self._vim.call('winrestcmd')
self._prev_wininfo = self._get_wininfo()
self._prev_bufnr = self._context.prev_bufnr
self._has_preview_window = len(
[x for x in range(1, self._vim.call('winnr', '$'))
if self._vim.call('getwinvar', x, '&previewwindow')]) > 0
if self._vim.call('defx#util#has_textprop'):
self._has_textprop = True
else:
self._ns = self._vim.call('nvim_create_namespace', 'defx')
def init_paths(self, paths: typing.List[typing.List[str]],
context: typing.Dict[str, typing.Any],
clipboard: Clipboard
) -> bool:
self.init(context)
initialized = self._init_defx(clipboard)
if self._vim.call('win_getid') != self._winid:
return False
if not paths:
if not initialized:
return False
paths = [['file', self._vim.call('getcwd')]]
self._buffer.vars['defx']['paths'] = paths
self._update_defx_paths(paths)
self._init_columns(self._context.columns.split(':'))
self.redraw(True)
if self._context.session_file:
self.do_action('load_session', [],
self._vim.call('defx [index, [source_name, path]] in enumerate(paths):
self._check_session(index, path)
for defx in self._defxs:
self._init_cursor(defx)
self._vim.command(
'silent doautocmd <nomodeline> User DefxDirChanged')
return True
def do_action(self, action_name: str,
action_args: typing.List[str],
new_context: typing.Dict[str, typing.Any]) -> None:
cursor = new_context['cursor']
visual_start = new_context['visual_start']
visual_end = new_context['visual_end']
defx_targets = {
x._index: self.get_selected_candidates(cursor, x._index)
for x in self._defxs}
all_targets: typing.List[typing.Dict[str, typing.Any]] = []
for targets in defx_targets.values():
all_targets += targets
import defx.action as action
for defx in [x for x in self._defxs
if not all_targets or defx_targets[x._index]]:
context = self._context._replace(
args=action_args,
cursor=cursor,
targets=defx_targets[defx._index],
visual_start=visual_start,
visual_end=visual_end,
)
ret = action.do_action(self, defx, action_name, context)
if ret:
error(self._vim, 'Invalid action_name:' + action_name)
return
# Jump to the defx window
if context.post_action == 'jump':
self._vim.call('win_gotoid', self._winid)
def debug(self, expr: typing.Any) -> None:
error(self._vim, expr)
def print_msg(self, expr: typing.Any) -> None:
self._vim.call('defxw(self) -> None:
self._vim.call('defxelf._has_preview_window:
self._vim.command('pclose!')
# Clear previewed buffers
for bufnr in self._vim.vars['defx
if (not self._vim.call('win_findbuf', bufnr) and
self._vim.call('buflisted', bufnr)):
self._vim.command('silent bdelete ' + str(bufnr))
self._vim.vars['defx
def quit(self) -> None:
# Close preview window
self.close_preview()
winnr = self._vim.call('bufwinnr', self._bufnr)
if winnr < 0:
return
if winnr != self._vim.call('winnr'):
# Use current window
self._context = self._context._replace(
prev_winid=self._vim.call('win_getid'))
self._vim.command(f'{winnr}wincmd w')
if (self._context.split not in ['no', 'tab'] and
self._vim.call('winnr', '$') != 1):
self._vim.command('close')
elif self._check_bufnr(self._prev_bufnr):
self._vim.command('buffer ' + str(self._prev_bufnr))
elif self._check_bufnr(self._context.prev_last_bufnr):
self._vim.command('buffer ' +
str(self._context.prev_last_bufnr))
else:
self._vim.command('enew')
self._vim.call('win_gotoid', self._context.prev_winid)
if self._get_wininfo() and self._get_wininfo() == self._prev_wininfo:
self._vim.command(self._winrestcmd)
self.restore_previous_buffer(self._context.prev_last_bufnr)
def redraw(self, is_force: bool = False) -> None:
start = time.time()
[info] = self._vim.call('getbufinfo', self._bufnr)
prev_linenr = info['lnum']
prev = self.get_cursor_candidate(prev_linenr)
if is_force:
self._init_candidates()
self._init_column_length()
for column in self._columns:
column.on_redraw(self, self._context)
lines = []
columns_highlights = []
for (i, candidate) in enumerate(self._candidates):
(text, highlights) = self._get_columns_text(
self._context, candidate)
lines.append(text)
columns_highlights += ([(x[0], i, x[1], x[1] + x[2])
for x in highlights])
self._buffer.options['modifiable'] = True
# NOTE: Different len of buffer line replacement cause cursor jump
if len(lines) >= len(self._buffer):
self._buffer[:] = lines[:len(self._buffer)]
self._buffer.append(lines[len(self._buffer):])
else:
self._buffer[len(lines):] = []
self._buffer[:] = lines
self._buffer.options['modifiable'] = False
self._buffer.options['modified'] = False
# TODO: How to set cursor position for other buffer when
# stay in current buffer
if self._buffer == self._vim.current.buffer:
self._vim.call('cursor', [prev_linenr, 0])
if prev:
self.search_file(prev['action__path'], prev['_defx_index'])
if is_force:
self._init_column_syntax()
# Update highlights
# Note: update_highlights() must be called after init_column_syntax()
if columns_highlights:
self._update_highlights(columns_highlights)
if self._context.profile:
error(self._vim, f'redraw time = {time.time() - start}')
def get_cursor_candidate(
self, cursor: int) -> typing.Dict[str, typing.Any]:
if len(self._candidates) < cursor:
return {}
else:
return self._candidates[cursor - 1]
def get_selected_candidates(
self, cursor: int, index: int = -1
) -> typing.List[typing.Dict[str, typing.Any]]:
if not self._candidates:
return []
candidates = [x for x in self._candidates if x['is_selected']]
if not candidates:
candidates = [self.get_cursor_candidate(cursor)]
return [x for x in candidates
if index < 0 or x.get('_defx_index', -1) == index]
def get_candidate_pos(self, path: Path, index: int) -> int:
for [pos, candidate] in enumerate(self._candidates):
if (candidate['_defx_index'] == index and
candidate['action__path'] == path):
return pos
return -1
def cd(self, defx: Defx, source_name: str,
path: str, cursor: int, save_history: bool = True) -> None:
history = defx._cursor_history
# Save previous cursor position
candidate = self.get_cursor_candidate(cursor)
if candidate:
history[defx._cwd] = candidate['action__path']
if save_history:
global_histories = self._vim.vars['defx
global_histories.append([defx._source.name, defx._cwd])
self._vim.vars['defx
if source_name != defx._source.name:
if source_name not in self._all_sources:
error(self._vim, 'Invalid source_name:' + source_name)
return
# Replace with new defx
self._defxs[defx._index] = Defx(
self._vim, self._context,
self._all_sources[source_name],
path, defx._index)
defx = self._defxs[defx._index]
defx.cd(path)
self.redraw(True)
self._check_session(defx._index, path)
self._init_cursor(defx)
if path in history:
self.search_file(history[path], defx._index)
self._update_paths(defx._index, path)
self._vim.command(
'silent doautocmd <nomodeline> User DefxDirChanged')
def search_file(self, path: Path, index: int) -> bool:
target = str(path)
if target and target[-1] == '/':
target = target[:-1]
pos = self.get_candidate_pos(Path(target), index)
if pos < 0:
return False
self._vim.call('cursor', [pos + 1, 1])
return True
def search_recursive(self, path: Path, index: int) -> bool:
parents: typing.List[Path] = []
tmppath: Path = path
while (self.get_candidate_pos(tmppath, index) < 0 and
tmppath.parent != path and tmppath.parent != tmppath):
tmppath = tmppath.parent
parents.append(tmppath)
for parent in reversed(parents):
self.open_tree(parent, index, False, 0)
self.update_candidates()
self.redraw()
return self.search_file(path, index)
def update_candidates(self) -> None:
# Update opened/selected state
for defx in self._defxs:
defx._opened_candidates = set()
defx._selected_candidates = set()
for [i, candidate] in [x for x in enumerate(self._candidates)
if x[1]['is_opened_tree']]:
defx = self._defxs[candidate['_defx_index']]
defx._opened_candidates.add(str(candidate['action__path']))
for [i, candidate] in [x for x in enumerate(self._candidates)
if x[1]['is_selected']]:
defx = self._defxs[candidate['_defx_index']]
defx._selected_candidates.add(str(candidate['action__path']))
def open_tree(self, path: Path, index: int, enable_nested: bool,
max_level: int = 0) -> None:
# Search insert position
pos = self.get_candidate_pos(path, index)
if pos < 0:
return
target = self._candidates[pos]
if (not target['is_directory'] or
target['is_opened_tree'] or target['is_root']):
return
target['is_opened_tree'] = True
base_level = target['level'] + 1
defx = self._defxs[index]
children = defx.gather_candidates_recursive(
str(path), base_level, base_level + max_level)
if not children:
return
if (enable_nested and len(children) == 1
and children[0]['is_directory']):
# Merge child.
defx._nested_candidates.add(str(target['action__path']))
target['action__path'] = children[0]['action__path']
target['word'] += children[0]['word']
target['is_opened_tree'] = False
return self.open_tree(target['action__path'],
index, enable_nested, max_level)
for candidate in children:
candidate['_defx_index'] = index
self._candidates = (self._candidates[: pos + 1] +
children + self._candidates[pos + 1:])
def close_tree(self, path: Path, index: int) -> None:
# Search insert position
pos = self.get_candidate_pos(path, index)
if pos < 0:
return
target = self._candidates[pos]
if not target['is_opened_tree'] or target['is_root']:
return
target['is_opened_tree'] = False
defx = self._defxs[index]
self._remove_nested_path(defx, target['action__path'])
start = pos + 1
base_level = target['level']
end = start
for candidate in self._candidates[start:]:
if candidate['level'] <= base_level:
break
self._remove_nested_path(defx, candidate['action__path'])
end += 1
self._candidates = (self._candidates[: start] +
self._candidates[end:])
def restore_previous_buffer(self, bufnr: int) -> None:
if (not self._vim.call('buflisted', bufnr) or
self._vim.call('win_getid') != self._winid):
return
# Note: Convert to full path to prevent error
# "1" matches "123" buffer
prev_bufname = self._vim.call(
'fnamemodify', self._vim.call('bufname', bufnr), ':p')
path_prev = Path(prev_bufname)
if (not self._vim.call('buflisted', prev_bufname) or
not readable(path_prev) or path_prev.is_dir()):
return
self._vim.call('setreg', '
self._vim.call('fnameescape', prev_bufname))
def _remove_nested_path(self, defx: Defx, path: Path) -> None:
if str(path) in defx._nested_candidates:
defx._nested_candidates.remove(str(path))
def _init_context(
self, context: typing.Dict[str, typing.Any]) -> Context:
# Convert to int
for attr in [x[0] for x in Context()._asdict().items()
if isinstance(x[1], int) and x[0] in context]:
context[attr] = int(context[attr])
return Context(**context)
def _init_window(self) -> None:
self._winid = self._vim.call('win_getid')
self._tabnr = self._vim.call('tabpagenr')
window_options = self._vim.current.window.options
if (self._context.split == 'vertical'
and self._context.winwidth > 0):
window_options['winfixwidth'] = True
self._vim.command(f'vertical resize {self._context.winwidth}')
elif (self._context.split == 'horizontal' and
self._context.winheight > 0):
window_options['winfixheight'] = True
self._vim.command(f'resize {self._context.winheight}')
def _check_session(self, index: int, path: str) -> None:
if path not in self._sessions:
return
# restore opened_candidates
session = self._sessions[path]
for opened_path in session.opened_candidates:
self.open_tree(Path(opened_path), index, False)
self.update_candidates()
self.redraw()
def _init_defx(self, clipboard: Clipboard) -> bool:
if not self._switch_buffer():
return False
self._buffer = self._vim.current.buffer
self._bufnr = self._buffer.number
self._buffer.vars['defx'] = {
'context': self._context._asdict(),
'paths': [],
}
# Note: Have to use setlocal instead of "current.window.options"
# "current.window.options" changes global value instead of local in
# neovim.
self._vim.command('setlocal colorcolumn=')
self._vim.command('setlocal nocursorcolumn')
self._vim.command('setlocal nofoldenable')
self._vim.command('setlocal foldcolumn=0')
self._vim.command('setlocal nolist')
self._vim.command('setlocal nospell')
self._vim.command('setlocal nowrap')
self._vim.command('setlocal signcolumn=no')
self._init_window()
buffer_options = self._buffer.options
if not self._context.listed:
buffer_options['buflisted'] = False
buffer_options['buftype'] = 'nofile'
buffer_options['bufhidden'] = 'hide'
buffer_options['swapfile'] = False
buffer_options['modeline'] = False
buffer_options['modifiable'] = False
buffer_options['modified'] = False
buffer_options['filetype'] = 'defx'
if not self._vim.call('has', 'nvim'):
# Note: In Vim8, FileType autocmd is not fired after set filetype
# option.
self._vim.command('silent doautocmd FileType defx')
self._vim.command('autocmd! defx * <buffer>')
self._vim.command('autocmd defx '
'CursorHold,FocusGained <buffer> '
'call defx
self._vim.command('autocmd defx FileType <buffer> '
'call defx
self._prev_highlight_commands = []
# Initialize defx state
self._candidates = []
self._clipboard = clipboard
self._defxs = []
self._init_all_sources()
self._init_all_columns()
self._init_columns(self._context.columns.split(':'))
self._vim.vars['defx
return True
def _switch_buffer(self) -> bool:
if self._context.split == 'tab':
self._vim.command('tabnew')
if self._context.close:
self.quit()
return False
winnr = self._vim.call('bufwinnr', self._bufnr)
# Note: current window may be defx buffer when `:tabnew`.
if winnr > 0 and self._tabnr == self._vim.call('tabpagenr'):
self._vim.command(f'{winnr}wincmd w')
if self._context.toggle:
self.quit()
else:
self._winid = self._vim.call('win_getid')
self._init_window()
return False
if (self._vim.current.buffer.options['modified'] and
not self._vim.options['hidden'] and
self._context.split == 'no'):
self._context = self._context._replace(split='vertical')
if (self._context.split == 'floating'
and self._vim.call('exists', '*nvim_open_win')):
# Use floating window
self._vim.call(
'nvim_open_win',
self._vim.call('bufnr', '%'), True, {
'relative': self._context.winrelative,
'row': self._context.winrow,
'col': self._context.wincol,
'width': self._context.winwidth,
'height': self._context.winheight,
'border': self._context.winborder,
})
# Create new buffer
vertical = 'vertical' if self._context.split == 'vertical' else ''
no_split = self._context.split in ['no', 'tab', 'floating']
if self._vim.call('bufloaded', self._bufnr):
command = ('buffer' if no_split else 'sbuffer')
self._vim.command(
'silent keepalt %s %s %s %s' % (
self._context.direction,
vertical,
command,
self._bufnr,
)
)
if self._context.resume:
self._init_window()
return False
elif self._vim.call('exists', '*bufadd'):
bufnr = self._vim.call('bufadd', self._bufname)
command = ('buffer' if no_split else 'sbuffer')
self._vim.command(
'silent keepalt %s %s %s %s' % (
self._context.direction,
vertical,
command,
bufnr,
)
)
else:
command = ('edit' if no_split else 'new')
self._vim.call(
'defx 'silent keepalt %s %s %s ' % (
self._context.direction,
vertical,
command,
),
self._bufname)
return True
def _init_all_sources(self) -> None:
from defx.base.source import Base as Source
self._all_sources: typing.Dict[str, Source] = {}
for path_source in self._load_custom_sources():
source = import_plugin(path_source, 'source', 'Source')
if not source:
continue
source = source(self._vim)
if source.name not in self._all_sources:
self._all_sources[source.name] = source
def _init_all_columns(self) -> None:
from defx.base.column import Base as Column
self._all_columns: typing.Dict[str, Column] = {}
for path_column in self._load_custom_columns():
column = import_plugin(path_column, 'column', 'Column')
if not column:
continue
column = column(self._vim)
if column.name not in self._all_columns:
self._all_columns[column.name] = column
def _init_columns(self, columns: typing.List[str]) -> None:
from defx.base.column import Base as Column
custom = self._vim.call('defxlumns: typing.List[Column] = [
copy.copy(self._all_columns[x])
for x in columns if x in self._all_columns
]
for column in self._columns:
if column.name in custom:
column.vars.update(custom[column.name])
column.on_init(self, self._context)
def _init_column_length(self) -> None:
if not self._candidates:
return
from defx.base.column import Base as Column
within_variable = False
within_variable_columns: typing.List[Column] = []
start = 1
for [index, column] in enumerate(self._columns):
column.syntax_name = f'Defx_{column.name}_{index}'
column.highlight_name = f'Defx_{column.name}'
if within_variable and not column.is_stop_variable:
within_variable_columns.append(column)
continue
# Calculate variable_length
variable_length = 0
if column.is_stop_variable:
for variable_column in within_variable_columns:
variable_length += variable_column.length(
self._context._replace(targets=self._candidates))
# Note: for "' '.join(variable_texts)" length
if within_variable_columns:
variable_length += len(within_variable_columns) - 1
length = column.length(
self._context._replace(targets=self._candidates,
variable_length=variable_length))
column.start = start
column.end = start + length
if column.is_start_variable:
within_variable = True
within_variable_columns.append(column)
else:
column.is_within_variable = False
start += length + 1
if column.is_stop_variable:
for variable_column in within_variable_columns:
# Overwrite syntax_name
variable_column.syntax_name = column.syntax_name
variable_column.is_within_variable = True
within_variable = False
def _init_column_syntax(self) -> None:
commands: typing.List[str] = []
for syntax in self._prev_syntaxes:
commands.append(
'silent! syntax clear ' + syntax)
if self._proptypes:
self._clear_prop_types()
self._prev_syntaxes = []
for column in self._columns:
source_highlights = column.highlight_commands()
if not source_highlights:
continue
commands += source_highlights
self._prev_syntaxes += column.syntaxes()
syntax_list = commands + [
self._vim.call('execute', 'syntax list'),
self._vim.call('execute', 'highlight'),
]
if syntax_list == self._prev_highlight_commands:
# Skip highlights
return
self._execute_commands(commands)
self._prev_highlight_commands = commands + [
self._vim.call('execute', 'syntax list'),
self._vim.call('execute', 'highlight'),
]
def _execute_commands(self, commands: typing.List[str]) -> None:
# Note: If commands are too huge, vim.command() will fail.
threshold = 15
cnt = 0
while cnt < len(commands):
self._vim.command(' | '.join(commands[cnt: cnt + threshold]))
cnt += threshold
def _init_candidates(self) -> None:
self._candidates = []
for defx in self._defxs:
root = defx.get_root_candidate()
root_path = root['action__path']
defx._mtime = (root_path.stat().st_mtime
if readable(root_path) and root_path.is_dir()
else -1)
candidates = [root]
candidates += defx.tree_candidates(
defx._cwd, 0, self._context.auto_recursive_level)
for candidate in candidates:
candidate['_defx_index'] = defx._index
self._candidates += candidates
def _get_columns_text(self, context: Context, candidate: Candidate
) -> typing.Tuple[str, Highlights]:
texts: typing.List[str] = []
variable_texts: typing.List[str] = []
ret_highlights: typing.List[typing.Tuple[str, int, int]] = []
start = 0
for column in self._columns:
column.start = start
if column.is_stop_variable:
(text, highlights) = column.get_with_variable_text(
context, ''.join(variable_texts), candidate)
texts.append(text)
ret_highlights += highlights
variable_texts = []
else:
if column.has_get_with_highlights:
(text, highlights) = column.get_with_highlights(
context, candidate)
ret_highlights += highlights
else:
# Note: For old columns compatibility
text = column.get(context, candidate)
if column.is_start_variable or column.is_within_variable:
if text:
variable_texts.append(text)
else:
texts.append(text)
start = len_bytes(' '.join(texts))
if texts:
start += 1
if variable_texts:
start += len_bytes(''.join(variable_texts))
return (' '.join(texts), ret_highlights)
def _update_paths(self, index: int, path: str) -> None:
var_defx = self._buffer.vars['defx']
if len(var_defx['paths']) <= index:
var_defx['paths'].append(path)
else:
var_defx['paths'][index] = path
self._buffer.vars['defx'] = var_defx
def _init_cursor(self, defx: Defx) -> None:
self.search_file(Path(defx._cwd), defx._index)
# Move to next
self._vim.call('cursor', [self._vim.call('line', '.') + 1, 1])
def _get_wininfo(self) -> typing.List[str]:
return [
self._vim.options['columns'], self._vim.options['lines'],
self._vim.call('win_getid'), self._vim.call('tabpagebuflist')
]
def _load_custom_sources(self) -> typing.List[Path]:
result = []
result += self._vim.call('globpath',
self._vim.options['runtimepath'],
'rplugin/python3/defx/source/*.py', 1, 1)
result += self._vim.call('globpath',
self._vim.options['runtimepath'],
'rplugin/python3/defx/source/*/*.py', 1, 1)
return [Path(x) for x in result]
def _load_custom_columns(self) -> typing.List[Path]:
result = []
result += self._vim.call('globpath',
self._vim.options['runtimepath'],
'rplugin/python3/defx/column/*.py', 1, 1)
return [Path(x) for x in result]
def _update_defx_paths(self,
paths: typing.List[typing.List[str]]) -> None:
self._defxs = self._defxs[:len(paths)]
for [index, [source_name, path]] in enumerate(paths):
if source_name not in self._all_sources:
error(self._vim, 'Invalid source_name:' + source_name)
return
if index >= len(self._defxs):
self._defxs.append(
Defx(self._vim, self._context,
self._all_sources[source_name],
path, index))
else:
defx = self._defxs[index]
self.cd(defx, defx._source.name, path, self._context.cursor)
self._update_paths(index, path)
def _check_bufnr(self, bufnr: int) -> bool:
return (bool(self._vim.call('bufexists', bufnr)) and
bufnr != self._vim.call('bufnr', '%') and
self._vim.call('getbufvar', bufnr, '&filetype') != 'defx')
def _clear_prop_types(self) -> None:
self._vim.call('defxrop_type_delete', [x, {'bufnr': self._bufnr}]]
for x in self._proptypes
])
self._proptypes = set()
def _update_highlights(self, columns_highlights: typing.List[
typing.Tuple[str, int, int, int]]) -> None:
commands: typing.List[typing.Any] = []
if self._has_textprop:
for proptype in self._proptypes:
commands.append(
['prop_remove', [{'type': proptype, 'bufnr': self._bufnr}]]
)
for highlight in [x for x in columns_highlights if x[0] != '']:
if highlight[0] not in self._proptypes:
commands.append(
['prop_type_add',
[highlight[0],
{'highlight': highlight[0], 'bufnr': self._bufnr}]]
)
self._proptypes.add(highlight[0])
commands.append(
['prop_add',
[highlight[1] + 1, highlight[2] + 1,
{'end_col': highlight[3] + 1,
'type': highlight[0],
'bufnr': self._bufnr}]]
)
else:
commands.append(['nvim_buf_clear_namespace',
[self._bufnr, self._ns, 0, -1]])
commands += [['nvim_buf_add_highlight',
[self._bufnr, self._ns, x[0], x[1], x[2], x[3]]]
for x in columns_highlights]
self._vim.call('defxextprop:
# Note: redraw is needed for text props
self._vim.command('redraw')
| true | true |
f73dc68fc2d1ada95b12f276219df22734d87ae5 | 17,339 | py | Python | sdk/python/pulumi_azure_native/network/v20170601/route_filter.py | polivbr/pulumi-azure-native | 09571f3bf6bdc4f3621aabefd1ba6c0d4ecfb0e7 | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/network/v20170601/route_filter.py | polivbr/pulumi-azure-native | 09571f3bf6bdc4f3621aabefd1ba6c0d4ecfb0e7 | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/network/v20170601/route_filter.py | polivbr/pulumi-azure-native | 09571f3bf6bdc4f3621aabefd1ba6c0d4ecfb0e7 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['RouteFilterInitArgs', 'RouteFilter']
@pulumi.input_type
class RouteFilterInitArgs:
def __init__(__self__, *,
resource_group_name: pulumi.Input[str],
id: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
peerings: Optional[pulumi.Input[Sequence[pulumi.Input['ExpressRouteCircuitPeeringArgs']]]] = None,
route_filter_name: Optional[pulumi.Input[str]] = None,
rules: Optional[pulumi.Input[Sequence[pulumi.Input['RouteFilterRuleArgs']]]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
The set of arguments for constructing a RouteFilter resource.
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[str] id: Resource ID.
:param pulumi.Input[str] location: Resource location.
:param pulumi.Input[Sequence[pulumi.Input['ExpressRouteCircuitPeeringArgs']]] peerings: A collection of references to express route circuit peerings.
:param pulumi.Input[str] route_filter_name: The name of the route filter.
:param pulumi.Input[Sequence[pulumi.Input['RouteFilterRuleArgs']]] rules: Collection of RouteFilterRules contained within a route filter.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags.
"""
pulumi.set(__self__, "resource_group_name", resource_group_name)
if id is not None:
pulumi.set(__self__, "id", id)
if location is not None:
pulumi.set(__self__, "location", location)
if peerings is not None:
pulumi.set(__self__, "peerings", peerings)
if route_filter_name is not None:
pulumi.set(__self__, "route_filter_name", route_filter_name)
if rules is not None:
pulumi.set(__self__, "rules", rules)
if tags is not None:
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the resource group.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter
def id(self) -> Optional[pulumi.Input[str]]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@id.setter
def id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "id", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
Resource location.
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter
def peerings(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ExpressRouteCircuitPeeringArgs']]]]:
"""
A collection of references to express route circuit peerings.
"""
return pulumi.get(self, "peerings")
@peerings.setter
def peerings(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ExpressRouteCircuitPeeringArgs']]]]):
pulumi.set(self, "peerings", value)
@property
@pulumi.getter(name="routeFilterName")
def route_filter_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the route filter.
"""
return pulumi.get(self, "route_filter_name")
@route_filter_name.setter
def route_filter_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "route_filter_name", value)
@property
@pulumi.getter
def rules(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['RouteFilterRuleArgs']]]]:
"""
Collection of RouteFilterRules contained within a route filter.
"""
return pulumi.get(self, "rules")
@rules.setter
def rules(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['RouteFilterRuleArgs']]]]):
pulumi.set(self, "rules", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
class RouteFilter(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
id: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
peerings: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ExpressRouteCircuitPeeringArgs']]]]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
route_filter_name: Optional[pulumi.Input[str]] = None,
rules: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['RouteFilterRuleArgs']]]]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None):
"""
Route Filter Resource.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] id: Resource ID.
:param pulumi.Input[str] location: Resource location.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ExpressRouteCircuitPeeringArgs']]]] peerings: A collection of references to express route circuit peerings.
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[str] route_filter_name: The name of the route filter.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['RouteFilterRuleArgs']]]] rules: Collection of RouteFilterRules contained within a route filter.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: RouteFilterInitArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Route Filter Resource.
:param str resource_name: The name of the resource.
:param RouteFilterInitArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(RouteFilterInitArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
id: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
peerings: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ExpressRouteCircuitPeeringArgs']]]]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
route_filter_name: Optional[pulumi.Input[str]] = None,
rules: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['RouteFilterRuleArgs']]]]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = RouteFilterInitArgs.__new__(RouteFilterInitArgs)
__props__.__dict__["id"] = id
__props__.__dict__["location"] = location
__props__.__dict__["peerings"] = peerings
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["route_filter_name"] = route_filter_name
__props__.__dict__["rules"] = rules
__props__.__dict__["tags"] = tags
__props__.__dict__["etag"] = None
__props__.__dict__["name"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:network/v20170601:RouteFilter"), pulumi.Alias(type_="azure-native:network:RouteFilter"), pulumi.Alias(type_="azure-nextgen:network:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20161201:RouteFilter"), pulumi.Alias(type_="azure-nextgen:network/v20161201:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20170301:RouteFilter"), pulumi.Alias(type_="azure-nextgen:network/v20170301:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20170801:RouteFilter"), pulumi.Alias(type_="azure-nextgen:network/v20170801:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20170901:RouteFilter"), pulumi.Alias(type_="azure-nextgen:network/v20170901:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20171001:RouteFilter"), pulumi.Alias(type_="azure-nextgen:network/v20171001:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20171101:RouteFilter"), pulumi.Alias(type_="azure-nextgen:network/v20171101:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20180101:RouteFilter"), pulumi.Alias(type_="azure-nextgen:network/v20180101:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20180201:RouteFilter"), pulumi.Alias(type_="azure-nextgen:network/v20180201:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20180401:RouteFilter"), pulumi.Alias(type_="azure-nextgen:network/v20180401:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20180601:RouteFilter"), pulumi.Alias(type_="azure-nextgen:network/v20180601:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20180701:RouteFilter"), pulumi.Alias(type_="azure-nextgen:network/v20180701:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20180801:RouteFilter"), pulumi.Alias(type_="azure-nextgen:network/v20180801:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20181001:RouteFilter"), pulumi.Alias(type_="azure-nextgen:network/v20181001:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20181101:RouteFilter"), pulumi.Alias(type_="azure-nextgen:network/v20181101:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20181201:RouteFilter"), pulumi.Alias(type_="azure-nextgen:network/v20181201:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20190201:RouteFilter"), pulumi.Alias(type_="azure-nextgen:network/v20190201:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20190401:RouteFilter"), pulumi.Alias(type_="azure-nextgen:network/v20190401:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20190601:RouteFilter"), pulumi.Alias(type_="azure-nextgen:network/v20190601:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20190701:RouteFilter"), pulumi.Alias(type_="azure-nextgen:network/v20190701:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20190801:RouteFilter"), pulumi.Alias(type_="azure-nextgen:network/v20190801:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20190901:RouteFilter"), pulumi.Alias(type_="azure-nextgen:network/v20190901:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20191101:RouteFilter"), pulumi.Alias(type_="azure-nextgen:network/v20191101:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20191201:RouteFilter"), pulumi.Alias(type_="azure-nextgen:network/v20191201:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20200301:RouteFilter"), pulumi.Alias(type_="azure-nextgen:network/v20200301:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20200401:RouteFilter"), pulumi.Alias(type_="azure-nextgen:network/v20200401:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20200501:RouteFilter"), pulumi.Alias(type_="azure-nextgen:network/v20200501:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20200601:RouteFilter"), pulumi.Alias(type_="azure-nextgen:network/v20200601:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20200701:RouteFilter"), pulumi.Alias(type_="azure-nextgen:network/v20200701:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20200801:RouteFilter"), pulumi.Alias(type_="azure-nextgen:network/v20200801:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20201101:RouteFilter"), pulumi.Alias(type_="azure-nextgen:network/v20201101:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20210201:RouteFilter"), pulumi.Alias(type_="azure-nextgen:network/v20210201:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20210301:RouteFilter"), pulumi.Alias(type_="azure-nextgen:network/v20210301:RouteFilter")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(RouteFilter, __self__).__init__(
'azure-native:network/v20170601:RouteFilter',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'RouteFilter':
"""
Get an existing RouteFilter resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = RouteFilterInitArgs.__new__(RouteFilterInitArgs)
__props__.__dict__["etag"] = None
__props__.__dict__["location"] = None
__props__.__dict__["name"] = None
__props__.__dict__["peerings"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["rules"] = None
__props__.__dict__["tags"] = None
__props__.__dict__["type"] = None
return RouteFilter(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def etag(self) -> pulumi.Output[str]:
"""
Gets a unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def location(self) -> pulumi.Output[str]:
"""
Resource location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def peerings(self) -> pulumi.Output[Optional[Sequence['outputs.ExpressRouteCircuitPeeringResponse']]]:
"""
A collection of references to express route circuit peerings.
"""
return pulumi.get(self, "peerings")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[str]:
"""
The provisioning state of the resource. Possible values are: 'Updating', 'Deleting', 'Succeeded' and 'Failed'.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def rules(self) -> pulumi.Output[Optional[Sequence['outputs.RouteFilterRuleResponse']]]:
"""
Collection of RouteFilterRules contained within a route filter.
"""
return pulumi.get(self, "rules")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Resource type.
"""
return pulumi.get(self, "type")
| 55.044444 | 4,489 | 0.683661 |
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['RouteFilterInitArgs', 'RouteFilter']
@pulumi.input_type
class RouteFilterInitArgs:
def __init__(__self__, *,
resource_group_name: pulumi.Input[str],
id: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
peerings: Optional[pulumi.Input[Sequence[pulumi.Input['ExpressRouteCircuitPeeringArgs']]]] = None,
route_filter_name: Optional[pulumi.Input[str]] = None,
rules: Optional[pulumi.Input[Sequence[pulumi.Input['RouteFilterRuleArgs']]]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
pulumi.set(__self__, "resource_group_name", resource_group_name)
if id is not None:
pulumi.set(__self__, "id", id)
if location is not None:
pulumi.set(__self__, "location", location)
if peerings is not None:
pulumi.set(__self__, "peerings", peerings)
if route_filter_name is not None:
pulumi.set(__self__, "route_filter_name", route_filter_name)
if rules is not None:
pulumi.set(__self__, "rules", rules)
if tags is not None:
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter
def id(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "id")
@id.setter
def id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "id", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter
def peerings(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ExpressRouteCircuitPeeringArgs']]]]:
return pulumi.get(self, "peerings")
@peerings.setter
def peerings(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ExpressRouteCircuitPeeringArgs']]]]):
pulumi.set(self, "peerings", value)
@property
@pulumi.getter(name="routeFilterName")
def route_filter_name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "route_filter_name")
@route_filter_name.setter
def route_filter_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "route_filter_name", value)
@property
@pulumi.getter
def rules(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['RouteFilterRuleArgs']]]]:
return pulumi.get(self, "rules")
@rules.setter
def rules(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['RouteFilterRuleArgs']]]]):
pulumi.set(self, "rules", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
class RouteFilter(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
id: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
peerings: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ExpressRouteCircuitPeeringArgs']]]]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
route_filter_name: Optional[pulumi.Input[str]] = None,
rules: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['RouteFilterRuleArgs']]]]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None):
...
@overload
def __init__(__self__,
resource_name: str,
args: RouteFilterInitArgs,
opts: Optional[pulumi.ResourceOptions] = None):
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(RouteFilterInitArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
id: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
peerings: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ExpressRouteCircuitPeeringArgs']]]]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
route_filter_name: Optional[pulumi.Input[str]] = None,
rules: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['RouteFilterRuleArgs']]]]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = RouteFilterInitArgs.__new__(RouteFilterInitArgs)
__props__.__dict__["id"] = id
__props__.__dict__["location"] = location
__props__.__dict__["peerings"] = peerings
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["route_filter_name"] = route_filter_name
__props__.__dict__["rules"] = rules
__props__.__dict__["tags"] = tags
__props__.__dict__["etag"] = None
__props__.__dict__["name"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:network/v20170601:RouteFilter"), pulumi.Alias(type_="azure-native:network:RouteFilter"), pulumi.Alias(type_="azure-nextgen:network:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20161201:RouteFilter"), pulumi.Alias(type_="azure-nextgen:network/v20161201:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20170301:RouteFilter"), pulumi.Alias(type_="azure-nextgen:network/v20170301:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20170801:RouteFilter"), pulumi.Alias(type_="azure-nextgen:network/v20170801:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20170901:RouteFilter"), pulumi.Alias(type_="azure-nextgen:network/v20170901:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20171001:RouteFilter"), pulumi.Alias(type_="azure-nextgen:network/v20171001:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20171101:RouteFilter"), pulumi.Alias(type_="azure-nextgen:network/v20171101:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20180101:RouteFilter"), pulumi.Alias(type_="azure-nextgen:network/v20180101:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20180201:RouteFilter"), pulumi.Alias(type_="azure-nextgen:network/v20180201:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20180401:RouteFilter"), pulumi.Alias(type_="azure-nextgen:network/v20180401:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20180601:RouteFilter"), pulumi.Alias(type_="azure-nextgen:network/v20180601:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20180701:RouteFilter"), pulumi.Alias(type_="azure-nextgen:network/v20180701:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20180801:RouteFilter"), pulumi.Alias(type_="azure-nextgen:network/v20180801:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20181001:RouteFilter"), pulumi.Alias(type_="azure-nextgen:network/v20181001:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20181101:RouteFilter"), pulumi.Alias(type_="azure-nextgen:network/v20181101:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20181201:RouteFilter"), pulumi.Alias(type_="azure-nextgen:network/v20181201:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20190201:RouteFilter"), pulumi.Alias(type_="azure-nextgen:network/v20190201:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20190401:RouteFilter"), pulumi.Alias(type_="azure-nextgen:network/v20190401:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20190601:RouteFilter"), pulumi.Alias(type_="azure-nextgen:network/v20190601:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20190701:RouteFilter"), pulumi.Alias(type_="azure-nextgen:network/v20190701:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20190801:RouteFilter"), pulumi.Alias(type_="azure-nextgen:network/v20190801:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20190901:RouteFilter"), pulumi.Alias(type_="azure-nextgen:network/v20190901:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20191101:RouteFilter"), pulumi.Alias(type_="azure-nextgen:network/v20191101:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20191201:RouteFilter"), pulumi.Alias(type_="azure-nextgen:network/v20191201:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20200301:RouteFilter"), pulumi.Alias(type_="azure-nextgen:network/v20200301:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20200401:RouteFilter"), pulumi.Alias(type_="azure-nextgen:network/v20200401:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20200501:RouteFilter"), pulumi.Alias(type_="azure-nextgen:network/v20200501:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20200601:RouteFilter"), pulumi.Alias(type_="azure-nextgen:network/v20200601:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20200701:RouteFilter"), pulumi.Alias(type_="azure-nextgen:network/v20200701:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20200801:RouteFilter"), pulumi.Alias(type_="azure-nextgen:network/v20200801:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20201101:RouteFilter"), pulumi.Alias(type_="azure-nextgen:network/v20201101:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20210201:RouteFilter"), pulumi.Alias(type_="azure-nextgen:network/v20210201:RouteFilter"), pulumi.Alias(type_="azure-native:network/v20210301:RouteFilter"), pulumi.Alias(type_="azure-nextgen:network/v20210301:RouteFilter")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(RouteFilter, __self__).__init__(
'azure-native:network/v20170601:RouteFilter',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'RouteFilter':
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = RouteFilterInitArgs.__new__(RouteFilterInitArgs)
__props__.__dict__["etag"] = None
__props__.__dict__["location"] = None
__props__.__dict__["name"] = None
__props__.__dict__["peerings"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["rules"] = None
__props__.__dict__["tags"] = None
__props__.__dict__["type"] = None
return RouteFilter(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def etag(self) -> pulumi.Output[str]:
return pulumi.get(self, "etag")
@property
@pulumi.getter
def location(self) -> pulumi.Output[str]:
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
return pulumi.get(self, "name")
@property
@pulumi.getter
def peerings(self) -> pulumi.Output[Optional[Sequence['outputs.ExpressRouteCircuitPeeringResponse']]]:
return pulumi.get(self, "peerings")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[str]:
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def rules(self) -> pulumi.Output[Optional[Sequence['outputs.RouteFilterRuleResponse']]]:
return pulumi.get(self, "rules")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
return pulumi.get(self, "type")
| true | true |
f73dc78ee012fbeb96c05de21ede83d4b0678180 | 7,110 | py | Python | app/model/zygosity.py | evgeniyabrosin/anfisa | ac4aef1a816de05ee2a45aa5b220e2baf93574de | [
"Apache-2.0"
] | 8 | 2019-03-26T16:07:46.000Z | 2021-12-30T13:38:06.000Z | app/model/zygosity.py | evgeniyabrosin/anfisa | ac4aef1a816de05ee2a45aa5b220e2baf93574de | [
"Apache-2.0"
] | 13 | 2018-11-07T19:37:20.000Z | 2022-02-21T17:11:45.000Z | app/model/zygosity.py | evgeniyabrosin/anfisa | ac4aef1a816de05ee2a45aa5b220e2baf93574de | [
"Apache-2.0"
] | 15 | 2018-10-16T08:15:11.000Z | 2022-02-21T14:07:29.000Z | # Copyright (c) 2019. Partners HealthCare and other members of
# Forome Association
#
# Developed by Sergey Trifonov based on contributions by Joel Krier,
# Michael Bouzinier, Shamil Sunyaev and other members of Division of
# Genetics, Brigham and Women's Hospital
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
from app.config.a_config import AnfisaConfig
from app.eval.condition import ZYG_BOUNDS_VAL
#===============================================
class ZygositySupport:
sMaxGeneCompCount = AnfisaConfig.configOption("max.gene.comp.count")
def __init__(self, ds_h):
self.mEvalSpace = ds_h.getEvalSpace()
self.mFamilyInfo = ds_h.getFamilyInfo()
self.mXCondition = None
self.mApproxInfo = []
self.mGeneUnits = dict()
def setupX(self, x_unit, x_values):
self.mXCondition = self.mEvalSpace.makeEnumCond(
self.mEvalSpace.getUnit(x_unit), x_values)
def regGeneApprox(self, approx_key, unit_name, approx_title):
self.mGeneUnits[approx_key] = self.mEvalSpace.getUnit(unit_name)
assert self.mGeneUnits[approx_key] is not None, (
"Bad gene unit: " + unit_name)
self.mApproxInfo.append([approx_key, approx_title])
def getFamilyInfo(self):
return self.mFamilyInfo
def getApproxInfo(self):
return self.mApproxInfo
def getAffectedGroup(self):
return self.mFamilyInfo.getAffectedGroup()
def getNames(self):
return self.mFamilyInfo.getNames()
def filter(self, p_group):
return self.mFamilyInfo.filter(p_group)
def getTrioSeq(self):
return self.mFamilyInfo.getTrioSeq()
def getGeneUnit(self, approx_mode):
return self.mGeneUnits[approx_mode]
def normalizeApprox(self, approx_mode):
if not approx_mode:
return self.mApproxInfo[0][0]
if approx_mode in self.mGeneUnits:
return approx_mode
return False
def hasXLinked(self):
return self.mFamilyInfo.groupHasMales()
#=========================
# Scenarios
#=========================
def conditionScenario(self, scenario):
seq = []
for zyg_bounds, seq_samples in scenario.items():
for idx in self.mFamilyInfo.names2idxset(seq_samples):
seq.append(self.mEvalSpace.makeNumericCond(
self.mEvalSpace.getZygUnit(idx), zyg_bounds = zyg_bounds))
return self.mEvalSpace.joinAnd(seq)
def conditionZHomoRecess(self, problem_group):
cond = self._conditionZHomoRecess(problem_group)
if self.mFamilyInfo.groupHasMales(problem_group):
return self.mXCondition.negative().addAnd(cond)
return cond
def _conditionZHomoRecess(self, problem_group):
return self.conditionScenario({
"2": problem_group,
"0-1": self.mFamilyInfo.complement(problem_group)})
def conditionZXLinked(self, problem_group):
if self.mFamilyInfo.groupHasMales(problem_group):
return self.mXCondition.addAnd(
self._conditionZHomoRecess(problem_group))
return self.mEvalSpace.getCondNone()
def conditionZDominant(self, problem_group):
return self.conditionScenario({
"1-2": problem_group,
"0": self.mFamilyInfo.complement(problem_group)})
def conditionZCompens(self, problem_group):
return self.conditionScenario({
"0": problem_group,
"1-2": self.mFamilyInfo.complement(problem_group)})
#=========================
# Compound requests
#=========================
def makeCompoundRequest(self, approx_mode,
actual_condition, c_rq, unit_name):
set_genes = None
cond_scenario_seq = []
for min_count, scenario in c_rq:
cond_scenario = self.conditionScenario(scenario)
if cond_scenario.getCondType() == "null":
continue
if min_count < 1:
continue
cond_scenario_seq.append(cond_scenario)
stat_info = self.mGeneUnits[approx_mode].makeStat(
actual_condition.addAnd(cond_scenario), None)
genes = set()
for info in stat_info["variants"]:
gene, count = info[:2]
if count >= min_count:
genes.add(gene)
if set_genes is not None:
set_genes &= genes
else:
set_genes = genes
if len(set_genes) == 0:
return self.mEvalSpace.getCondNone()
if set_genes is None:
return self.mEvalSpace.getCondNone()
if len(set_genes) >= self.sMaxGeneCompCount:
return None
logging.info("Eval compound genes for %s: %d" %
(unit_name, len(set_genes)))
return self.mEvalSpace.joinAnd([
actual_condition,
self.mEvalSpace.makeEnumCond(
self.mGeneUnits[approx_mode], sorted(set_genes)),
self.mEvalSpace.joinOr(cond_scenario_seq)])
@classmethod
def emptyRequest(cls, request):
for rq_var in request:
if rq_var[0] > 0:
for val in rq_var[1].values():
if val:
return False
return True
#=========================
# Validation
#=========================
@classmethod
def validateScenario(cls, scenario):
if not isinstance(scenario, dict):
return "Scenario expected in form of dict"
bad_keys = set(scenario.keys()) - set(ZYG_BOUNDS_VAL.keys())
if len(bad_keys) > 0:
return ("Improper keys in scenario: "
+ " ".join(sorted(bad_keys)))
for val in scenario.values():
if (not isinstance(val, list)
or not all(isinstance(v, str) for v in val)):
return ("Values in scenario dict "
+ "should be lists of identifiers")
return None
@classmethod
def validateRequest(cls, request):
if not isinstance(request, list):
return "Request expected in form of list"
for idx, rq_var in enumerate(request):
if (not isinstance(rq_var, list) or len(rq_var) != 2
or not isinstance(rq_var[0], int)):
return "Invalid request record no %d" % (idx + 1)
err_msg = cls.validateScenario(rq_var[1])
if err_msg:
return err_msg + (" in record no %d" % (idx + 1))
return None
| 36.839378 | 78 | 0.605907 |
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
from app.config.a_config import AnfisaConfig
from app.eval.condition import ZYG_BOUNDS_VAL
#===============================================
class ZygositySupport:
sMaxGeneCompCount = AnfisaConfig.configOption("max.gene.comp.count")
def __init__(self, ds_h):
self.mEvalSpace = ds_h.getEvalSpace()
self.mFamilyInfo = ds_h.getFamilyInfo()
self.mXCondition = None
self.mApproxInfo = []
self.mGeneUnits = dict()
def setupX(self, x_unit, x_values):
self.mXCondition = self.mEvalSpace.makeEnumCond(
self.mEvalSpace.getUnit(x_unit), x_values)
def regGeneApprox(self, approx_key, unit_name, approx_title):
self.mGeneUnits[approx_key] = self.mEvalSpace.getUnit(unit_name)
assert self.mGeneUnits[approx_key] is not None, (
"Bad gene unit: " + unit_name)
self.mApproxInfo.append([approx_key, approx_title])
def getFamilyInfo(self):
return self.mFamilyInfo
def getApproxInfo(self):
return self.mApproxInfo
def getAffectedGroup(self):
return self.mFamilyInfo.getAffectedGroup()
def getNames(self):
return self.mFamilyInfo.getNames()
def filter(self, p_group):
return self.mFamilyInfo.filter(p_group)
def getTrioSeq(self):
return self.mFamilyInfo.getTrioSeq()
def getGeneUnit(self, approx_mode):
return self.mGeneUnits[approx_mode]
def normalizeApprox(self, approx_mode):
if not approx_mode:
return self.mApproxInfo[0][0]
if approx_mode in self.mGeneUnits:
return approx_mode
return False
def hasXLinked(self):
return self.mFamilyInfo.groupHasMales()
#=========================
# Scenarios
#=========================
def conditionScenario(self, scenario):
seq = []
for zyg_bounds, seq_samples in scenario.items():
for idx in self.mFamilyInfo.names2idxset(seq_samples):
seq.append(self.mEvalSpace.makeNumericCond(
self.mEvalSpace.getZygUnit(idx), zyg_bounds = zyg_bounds))
return self.mEvalSpace.joinAnd(seq)
def conditionZHomoRecess(self, problem_group):
cond = self._conditionZHomoRecess(problem_group)
if self.mFamilyInfo.groupHasMales(problem_group):
return self.mXCondition.negative().addAnd(cond)
return cond
def _conditionZHomoRecess(self, problem_group):
return self.conditionScenario({
"2": problem_group,
"0-1": self.mFamilyInfo.complement(problem_group)})
def conditionZXLinked(self, problem_group):
if self.mFamilyInfo.groupHasMales(problem_group):
return self.mXCondition.addAnd(
self._conditionZHomoRecess(problem_group))
return self.mEvalSpace.getCondNone()
def conditionZDominant(self, problem_group):
return self.conditionScenario({
"1-2": problem_group,
"0": self.mFamilyInfo.complement(problem_group)})
def conditionZCompens(self, problem_group):
return self.conditionScenario({
"0": problem_group,
"1-2": self.mFamilyInfo.complement(problem_group)})
#=========================
# Compound requests
#=========================
def makeCompoundRequest(self, approx_mode,
actual_condition, c_rq, unit_name):
set_genes = None
cond_scenario_seq = []
for min_count, scenario in c_rq:
cond_scenario = self.conditionScenario(scenario)
if cond_scenario.getCondType() == "null":
continue
if min_count < 1:
continue
cond_scenario_seq.append(cond_scenario)
stat_info = self.mGeneUnits[approx_mode].makeStat(
actual_condition.addAnd(cond_scenario), None)
genes = set()
for info in stat_info["variants"]:
gene, count = info[:2]
if count >= min_count:
genes.add(gene)
if set_genes is not None:
set_genes &= genes
else:
set_genes = genes
if len(set_genes) == 0:
return self.mEvalSpace.getCondNone()
if set_genes is None:
return self.mEvalSpace.getCondNone()
if len(set_genes) >= self.sMaxGeneCompCount:
return None
logging.info("Eval compound genes for %s: %d" %
(unit_name, len(set_genes)))
return self.mEvalSpace.joinAnd([
actual_condition,
self.mEvalSpace.makeEnumCond(
self.mGeneUnits[approx_mode], sorted(set_genes)),
self.mEvalSpace.joinOr(cond_scenario_seq)])
@classmethod
def emptyRequest(cls, request):
for rq_var in request:
if rq_var[0] > 0:
for val in rq_var[1].values():
if val:
return False
return True
#=========================
# Validation
#=========================
@classmethod
def validateScenario(cls, scenario):
if not isinstance(scenario, dict):
return "Scenario expected in form of dict"
bad_keys = set(scenario.keys()) - set(ZYG_BOUNDS_VAL.keys())
if len(bad_keys) > 0:
return ("Improper keys in scenario: "
+ " ".join(sorted(bad_keys)))
for val in scenario.values():
if (not isinstance(val, list)
or not all(isinstance(v, str) for v in val)):
return ("Values in scenario dict "
+ "should be lists of identifiers")
return None
@classmethod
def validateRequest(cls, request):
if not isinstance(request, list):
return "Request expected in form of list"
for idx, rq_var in enumerate(request):
if (not isinstance(rq_var, list) or len(rq_var) != 2
or not isinstance(rq_var[0], int)):
return "Invalid request record no %d" % (idx + 1)
err_msg = cls.validateScenario(rq_var[1])
if err_msg:
return err_msg + (" in record no %d" % (idx + 1))
return None
| true | true |
f73dc793dc595d0d972e0c483dbf269d153d71ac | 5,405 | py | Python | tests/test_setup.py | fanmolh/mysql-connector-python | 2c46b9ea8005a7902b274c893ba4a22f58fbaecd | [
"Artistic-1.0-Perl"
] | 1 | 2019-12-23T05:21:34.000Z | 2019-12-23T05:21:34.000Z | tests/test_setup.py | aliang2017/mysql-connector-python | e424cbf2ba6093caaa96bda1db5dbdfec2e60e1c | [
"Artistic-1.0-Perl"
] | 1 | 2019-10-24T02:06:24.000Z | 2019-10-24T02:06:24.000Z | tests/test_setup.py | aliang2017/mysql-connector-python | e424cbf2ba6093caaa96bda1db5dbdfec2e60e1c | [
"Artistic-1.0-Perl"
] | 1 | 2020-07-20T23:26:58.000Z | 2020-07-20T23:26:58.000Z | # Copyright (c) 2013, 2018, Oracle and/or its affiliates. All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License, version 2.0, as
# published by the Free Software Foundation.
#
# This program is also distributed with certain software (including
# but not limited to OpenSSL) that is licensed under separate terms,
# as designated in a particular file or component or in included license
# documentation. The authors of MySQL hereby grant you an
# additional permission to link the program and your derivative works
# with the separately licensed software that they have included with
# MySQL.
#
# Without limiting anything contained in the foregoing, this file,
# which is part of MySQL Connector/Python, is also subject to the
# Universal FOSS Exception, version 1.0, a copy of which can be found at
# http://oss.oracle.com/licenses/universal-foss-exception.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License, version 2.0, for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""Unit tests for the setup script of Connector/Python
"""
import sys
import tests
import imp
import setupinfo
class VersionTests(tests.MySQLConnectorTests):
"""Testing the version of Connector/Python"""
def test_version(self):
"""Test validity of version"""
vs = setupinfo.VERSION
self.assertTrue(all(
[isinstance(vs[0], int),
isinstance(vs[1], int),
isinstance(vs[2], int),
isinstance(vs[3], str),
isinstance(vs[4], int)]))
def test___version__(self):
"""Test module __version__ and __version_info__"""
import mysql.connector
self.assertTrue(hasattr(mysql.connector, '__version__'))
self.assertTrue(hasattr(mysql.connector, '__version_info__'))
self.assertTrue(isinstance(mysql.connector.__version__, str))
self.assertTrue(isinstance(mysql.connector.__version_info__, tuple))
self.assertEqual(setupinfo.VERSION_TEXT, mysql.connector.__version__)
self.assertEqual(setupinfo.VERSION, mysql.connector.__version_info__)
class SetupInfoTests(tests.MySQLConnectorTests):
"""Testing meta setup information
We are importing the setupinfo module insite the unit tests
to be able to actually do tests.
"""
def setUp(self):
# we temper with version_info, play safe, keep copy
self._sys_version_info = sys.version_info
def tearDown(self):
# we temper with version_info, play safe, restore copy
sys.version_info = self._sys_version_info
def test_name(self):
"""Test the name of Connector/Python"""
import setupinfo
self.assertEqual('mysql-connector-python', setupinfo.name)
def test_dev_statuses(self):
"""Test the development statuses"""
import setupinfo
exp = {
'a': '3 - Alpha',
'b': '4 - Beta',
'rc': '4 - Beta',
'': '5 - Production/Stable'
}
self.assertEqual(exp, setupinfo.DEVELOPMENT_STATUSES)
def test_package_dir(self):
"""Test the package directory"""
import setupinfo
exp = {
'': 'lib',
}
self.assertEqual(exp, setupinfo.package_dir)
def test_unsupported_python(self):
"""Test if old Python version are unsupported"""
import setupinfo
tmp = sys.version_info
sys.version_info = (3, 0, 0, 'final', 0)
try:
imp.reload(setupinfo)
except RuntimeError:
pass
else:
self.fail("RuntimeError not raised with unsupported Python")
sys.version_info = tmp
def test_version(self):
"""Test the imported version information"""
import setupinfo
ver = setupinfo.VERSION
exp = '{0}.{1}.{2}'.format(*ver[0:3])
self.assertEqual(exp, setupinfo.version)
def test_misc_meta(self):
"""Test miscellaneous data such as URLs"""
import setupinfo
self.assertEqual(
'http://dev.mysql.com/doc/connector-python/en/index.html',
setupinfo.url)
self.assertEqual(
'http://dev.mysql.com/downloads/connector/python/',
setupinfo.download_url)
def test_classifiers(self):
"""Test Trove classifiers"""
import setupinfo
for clsfr in setupinfo.classifiers:
if 'Programming Language :: Python' in clsfr:
ver = clsfr.replace('Programming Language :: Python :: ', '')
if ver not in ('2.6', '2.7', '3', '3.1', '3.2', '3.3', '3.4',
'3.5', '3.6', '3.7', '3.8'):
self.fail('Unsupported version in classifiers')
if 'Development Status ::' in clsfr:
status = clsfr.replace('Development Status :: ', '')
self.assertEqual(
setupinfo.DEVELOPMENT_STATUSES[setupinfo.VERSION[3]],
status)
| 36.52027 | 79 | 0.642183 |
import sys
import tests
import imp
import setupinfo
class VersionTests(tests.MySQLConnectorTests):
def test_version(self):
vs = setupinfo.VERSION
self.assertTrue(all(
[isinstance(vs[0], int),
isinstance(vs[1], int),
isinstance(vs[2], int),
isinstance(vs[3], str),
isinstance(vs[4], int)]))
def test___version__(self):
import mysql.connector
self.assertTrue(hasattr(mysql.connector, '__version__'))
self.assertTrue(hasattr(mysql.connector, '__version_info__'))
self.assertTrue(isinstance(mysql.connector.__version__, str))
self.assertTrue(isinstance(mysql.connector.__version_info__, tuple))
self.assertEqual(setupinfo.VERSION_TEXT, mysql.connector.__version__)
self.assertEqual(setupinfo.VERSION, mysql.connector.__version_info__)
class SetupInfoTests(tests.MySQLConnectorTests):
def setUp(self):
self._sys_version_info = sys.version_info
def tearDown(self):
sys.version_info = self._sys_version_info
def test_name(self):
import setupinfo
self.assertEqual('mysql-connector-python', setupinfo.name)
def test_dev_statuses(self):
import setupinfo
exp = {
'a': '3 - Alpha',
'b': '4 - Beta',
'rc': '4 - Beta',
'': '5 - Production/Stable'
}
self.assertEqual(exp, setupinfo.DEVELOPMENT_STATUSES)
def test_package_dir(self):
import setupinfo
exp = {
'': 'lib',
}
self.assertEqual(exp, setupinfo.package_dir)
def test_unsupported_python(self):
import setupinfo
tmp = sys.version_info
sys.version_info = (3, 0, 0, 'final', 0)
try:
imp.reload(setupinfo)
except RuntimeError:
pass
else:
self.fail("RuntimeError not raised with unsupported Python")
sys.version_info = tmp
def test_version(self):
import setupinfo
ver = setupinfo.VERSION
exp = '{0}.{1}.{2}'.format(*ver[0:3])
self.assertEqual(exp, setupinfo.version)
def test_misc_meta(self):
import setupinfo
self.assertEqual(
'http://dev.mysql.com/doc/connector-python/en/index.html',
setupinfo.url)
self.assertEqual(
'http://dev.mysql.com/downloads/connector/python/',
setupinfo.download_url)
def test_classifiers(self):
import setupinfo
for clsfr in setupinfo.classifiers:
if 'Programming Language :: Python' in clsfr:
ver = clsfr.replace('Programming Language :: Python :: ', '')
if ver not in ('2.6', '2.7', '3', '3.1', '3.2', '3.3', '3.4',
'3.5', '3.6', '3.7', '3.8'):
self.fail('Unsupported version in classifiers')
if 'Development Status ::' in clsfr:
status = clsfr.replace('Development Status :: ', '')
self.assertEqual(
setupinfo.DEVELOPMENT_STATUSES[setupinfo.VERSION[3]],
status)
| true | true |
f73dc82bb993411b88f9d2bc84704c9cb8182d46 | 18,695 | py | Python | SDLabs/SDOM/PY/iclick.py | hippiejesus/SDOM | 271328e306343dbc2f76269317950993cf5d9b4e | [
"BSD-3-Clause"
] | null | null | null | SDLabs/SDOM/PY/iclick.py | hippiejesus/SDOM | 271328e306343dbc2f76269317950993cf5d9b4e | [
"BSD-3-Clause"
] | null | null | null | SDLabs/SDOM/PY/iclick.py | hippiejesus/SDOM | 271328e306343dbc2f76269317950993cf5d9b4e | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#self.listProduct
#self.label
import os.path
import classes as cl
import datetime
import qdarkstyle
import re
import csv
import sys
import subprocess
from PyQt4 import QtGui, QtCore, uic
cl.load()
options = {'Containers':cl.inv.listAllContainers,
'Product':cl.inv.listAllFinishedProduct,
'Unfinished Product':cl.inv.listAllUnfinishedProduct,
'Trim':cl.inv.listAllTotes,
'Runs':cl.inv.listAllRuns,
'Shipments':cl.inv.listAllShipments,
'Locations':cl.inv.listAllLocations,
'Transactions':cl.inv.listAllTransactions}
class SearchWindow(QtGui.QDialog):
def __init__(self):
super(SearchWindow, self).__init__()
uic.loadUi(cl.UIstem+'inventoryView.ui', self)
self.currentItem = None
self.currentCategory = None
self.edit = dict()
self.currentLabel = None
self.listProduct.itemDoubleClicked.connect(self.doubleEvent)
self.pushBack.clicked.connect(self.back)
self.save.clicked.connect(self.saveData)
self.setContents()
self.center()
def center(self):
frameGm = self.frameGeometry()
screen = QtGui.QApplication.desktop().screenNumber(QtGui.QApplication.desktop().cursor().pos())
centerPoint = QtGui.QApplication.desktop().screenGeometry(screen).center()
frameGm.moveCenter(centerPoint)
self.move(frameGm.topLeft())
def saveData(self):
cl.save()
def back(self):
if self.currentItem != None: self.currentItem = None
elif self.currentCategory != None: self.currentCategory = None
self.setContents()
def doubleEvent(self):
if self.currentCategory != None:
if self.currentItem != None:
current = self.listProduct.currentItem()
text = str(current.text())
split = text.split(' : ')
current = self.currentItem
if split[0] == 'runs included':
optionList = []
for i in self.currentItem.runsIncluded:
optionList.append(i.ID)
inn, ok = QtGui.QInputDialog.getItem(self,'Choose','Choose run to view:',optionList)
if ok:
for i in self.currentItem.runsIncluded:
if i.ID == inn:
self.currentItem = i
self.setContents()
return
if split[0] == 'trim included':
optionList = []
for i in self.currentItem.trimIncluded:
optionList.append(i.ID)
inn, ok = QtGui.QInputDialog.getItem(self,'Choose','Choose trim to view:',optionList)
if ok:
for i in self.currentItem.trimIncluded:
if i.ID == inn:
self.currentItem = i
self.setContents()
return
elif split[0] == 'bags recieved':
optionList = []
for i in self.currentItem.bags:
optionList.append(str(i.ID))
inn, ok = QtGui.QInputDialog.getItem(self,'Choose','Choose bag to view:',optionList)
if ok:
for i in cl.inv.listAllBags:
if i.ID == inn:
self.currentItem = i
self.setContents()
return
elif split[0] == 'location':
optionList = []
for i in cl.inv.listAllLocations:
optionList.append(i.ID)
inn, ok = QtGui.QInputDialog.getItem(self,'Choose','Choose location to move to:',optionList)
if ok:
for i in cl.inv.listAllLocations:
if i.ID == inn:
try:
self.currentItem.location.items.pop(self.currentItem.location.items.index(self.currentItem))
except: pass
self.currentItem.location = i
i.items.append(self.currentItem)
self.setContents()
return
elif split[0] == 'shipment':
for i in cl.inv.listAllShipments:
if i.ID == split[1]:
self.currentItem = i
self.setContents()
return
elif split[0] == 'container':
for i in cl.inv.listAllContainers:
if i.ID == split[1]:
self.currentItem = i
self.setContents()
return
elif split[0] == 'unfinished product included':
optionList = []
for i in self.currentItem.unfinishedProductIncluded:
optionList.append(str(i.ID))
inn, ok = QtGui.QInputDialog.getItem(self,'Choose','Choose unfinished product to view:',optionList)
if ok:
for i in self.currentItem.unfinishedProductIncluded:
if str(i.ID) == inn:
self.currentItem = i
self.setContents()
return
elif split[0] == 'product included':
optionList = []
for i in self.currentItem.productIncluded:
optionList.append(str(i.ID))
inn, ok = QtGui.QInputDialog.getItem(self,'Choose','Choose finished product to view:',optionList)
if ok:
for i in self.currentItem.productIncluded:
if str(i.ID) == inn:
self.currentItem = i
self.setContents()
return
inn, ok = QtGui.QInputDialog.getText(self,'Choose','Choose a new value for this variable:')
if ok:
print(split[0])
try:
if split[0] == 'ID':
current.ID = str(inn)
elif split[0] == 'owner':
current.owner = str(inn)
elif split[0] == 'trim weight':
current.trimWeight = float(inn)
elif split[0] == 'weight':
current.weight = float(inn)
elif split[0] == 'original trim weight':
current.ogTrimWeight = float(inn)
elif split[0] == 'flavor':
current.flavor = str(inn)
elif split[0] == 'test results':
pass
except:
print('double click error')
self.setContents()
else:
current = self.listProduct.currentItem()
text = str(current.text())
split = text.split(' : ')
if options[self.currentCategory] == cl.inv.listAllTransactions:
self.currentItem = options[self.currentCategory][self.listProduct.currentRow()]
self.setContents()
return
for item in options[self.currentCategory]:
try:
if item.ID == split[0]:
self.currentItem = item
except:
runs = []
for i in cl.inv.listAllRuns:
for run in i[1]:
runs.append(run)
for run in runs:
if run.ID == split[0]:
self.currentItem = run
else:
current = self.listProduct.currentItem()
self.currentCategory = str(current.text())
self.setContents()
def setContents(self):
self.listProduct.clear()
if self.currentItem == None and self.currentCategory == None:
self.label.hide()
for item in options.keys():
self.listProduct.addItem(str(item))
elif self.currentItem != None:
self.label.show()
item = self.currentItem
if isinstance(item,cl.transaction):
self.label.setText(str(item.time_stamp))
else: self.label.setText(str(self.currentItem.ID)+': '+str(self.currentItem.kind))
if isinstance(item,cl.trimTote):
self.listProduct.addItem('ID : '+str(item.ID))
self.listProduct.addItem('shipment : '+str(item.shipment.ID))
self.listProduct.addItem('owner : '+str(item.owner))
self.listProduct.addItem('trim weight : '+str(item.trimWeight))
self.listProduct.addItem('original trim weight : '+str(item.ogTrimWeight))
self.listProduct.addItem('flavor : '+str(item.flavor))
try:
self.listProduct.addItem('location : '+str(item.location.ID))
except:
self.listProduct.addItem('location : '+str(item.location))
self.listProduct.addItem('test results : '+str(item.testResults))
elif isinstance(item,cl.trimBag):
self.listProduct.addItem('ID : '+str(item.ID))
self.listProduct.addItem('shipment : '+str(item.shipment.ID))
self.listProduct.addItem('owner : '+str(item.owner))
self.listProduct.addItem('trim weight : '+str(item.trimWeight))
self.listProduct.addItem('original trim weight : '+str(item.ogTrimWeight))
self.listProduct.addItem('flavor : '+str(item.flavor))
elif isinstance(item,cl.unfinishedProduct):
self.listProduct.addItem('ID : '+str(item.ID))
runsIn = []
for i in item.runsIncluded:
runsIn.append(str(i.ID))
self.listProduct.addItem('runs included : '+str(runsIn))
self.listProduct.addItem('owner : '+str(item.owner))
self.listProduct.addItem('intended finish : '+str(item.intendedFinish))
self.listProduct.addItem('test results : '+str(item.testResults))
try:
self.listProduct.addItem('location : '+str(item.location.ID))
except:
self.listProduct.addItem('location : '+str(item.location))
elif isinstance(item,cl.finishedProduct):
self.listProduct.addItem('ID : '+str(item.ID))
uPIn = []
for i in item.unfinishedProductIncluded:
uPIn.append(str(i.ID))
self.listProduct.addItem('unfinished product included : '+str(uPIn))
self.listProduct.addItem('owner : '+str(item.owner))
self.listProduct.addItem('kind : '+str(item.kind))
self.listProduct.addItem('weight : '+str(item.weight))
self.listProduct.addItem('container : '+str(item.container.ID))
self.listProduct.addItem('test results : '+str(item.testResults))
elif isinstance(item,cl.transaction):
try:
self.listProduct.addItem('Recipient of Payment : '+str(item.recievingEntity.name))
except:
self.listProduct.addItem('Recipient of Payment : '+str(item.recievingEntity))
try:
self.listProduct.addItem('Payee : '+str(item.sendingEntity.name))
except:
self.listProduct.addItem('Payee : '+str(item.sendingEntity))
self.listProduct.addItem('Amount Paid : '+str(item.amountPayed))
self.listProduct.addItem('Amount Owed : '+str(item.amountToBePayed))
self.listProduct.addItem('Total Amount : '+str(item.amountPayed+item.amountToBePayed))
items = list()
for i in item.valuedEntity:
items.append(i.ID)
self.listProduct.addItem('Items Sold : '+str(items))
elif isinstance(item,cl.run):
self.listProduct.addItem('ID : '+str(item.ID))
trimIn = []
for i in item.trimIncluded:
trimIn.append(str(i.ID))
trimAm = []
for i in item.trimAmounts:
trimAm.append(i)
self.listProduct.addItem('trim included : '+str(trimIn))
self.listProduct.addItem('trim amounts : '+str(trimAm))
self.listProduct.addItem('start time : '+str(item.timeStart))
self.listProduct.addItem('owner : '+str(item.owner))
self.listProduct.addItem('blaster : '+str(item.blaster))
try:
self.listProduct.addItem('location : '+str(item.location.ID))
except:
self.listProduct.addItem('location : '+str(item.location))
elif isinstance(item,cl.shipment):
self.listProduct.addItem('ID : '+str(item.ID))
self.listProduct.addItem('source : '+str(item.source))
self.listProduct.addItem('flavor : '+str(item.flavor))
self.listProduct.addItem('date in : '+str(item.dateIn))
self.listProduct.addItem('test results : '+str(item.testResults))
bags = []
for i in item.bags:
bags.append(str(i.ID))
self.listProduct.addItem('bags recieved : '+str(bags))
self.listProduct.addItem('total weight : '+str(item.totalWeight))
self.listProduct.addItem('total price : '+str(item.totalPrice))
elif isinstance(item,cl.container):
self.listProduct.addItem('ID : '+str(item.ID))
self.listProduct.addItem('kind : '+str(item.kind))
self.listProduct.addItem('weight : '+str(item.weight))
self.listProduct.addItem('# of units : '+str(item.numberOfUnits))
self.listProduct.addItem('unit size : '+str(item.unitSize))
self.listProduct.addItem('history : '+str(item.history))
try:
self.listProduct.addItem('location : '+str(item.location.ID))
except:
self.listProduct.addItem('location : '+str(item.location))
prod = []
for i in item.productIncluded:
prod.append(str(i.ID))
self.listProduct.addItem('product included : '+str(prod))
elif isinstance(item,cl.location):
self.listProduct.addItem('ITEMS IN '+str(item.ID))
for i in item.items:
self.listProduct.addItem(str(i.kind)+' : '+str(i.ID))
elif self.currentCategory != None:
self.label.show()
self.label.setText(self.currentCategory)
for item in options[self.currentCategory]:
if self.currentCategory == 'Runs':
runs = []
for i in cl.inv.listAllRuns:
for run in i[1]:
runs.append(run)
for i in runs:
self.listProduct.addItem(str(i.ID)+' : '+str(i.owner))
if isinstance(item,cl.container):
self.listProduct.addItem(str(item.ID)+' : '+str(item.kind)+' : '+str(item.weight))
elif isinstance(item,cl.finishedProduct):
self.listProduct.addItem(str(item.ID)+' : '+str(item.kind)+' : '+str(item.weight))
elif isinstance(item,cl.trimTote):
self.listProduct.addItem(str(item.ID)+' : '+str(item.trimWeight)+' : '+str(item.owner))
elif isinstance(item,cl.unfinishedProduct):
self.listProduct.addItem(str(item.ID)+' : '+str(item.intendedFinish))
#elif isinstance(item,cl.run):
# self.listProduct.addItem(str(item.ID)+' : '+str(item.owner))
elif isinstance(item,cl.shipment):
self.listProduct.addItem(str(item.ID)+' : '+str(item.source)+' : '+str(item.dateIn))
elif isinstance(item,cl.location):
self.listProduct.addItem(str(item.ID)+' : '+str(item.description))
elif isinstance(item,cl.transaction):
try:
self.listProduct.addItem(str(item.recievingEntity)+' <-- '+str(item.sendingEntity.name)+' : '+str(item.amountPayed)+'/'+str(item.amountPayed+item.amountToBePayed))
except:
self.listProduct.addItem(str(item.recievingEntity.name)+' <-- '+str(item.sendingEntity)+' : '+str(item.amountPayed)+'/'+str(item.amountPayed+item.amountToBePayed))
def logClose():
app.quit()
#lg.write('Terminating Session...')
#lg.close()
import atexit
atexit.register(logClose)
if __name__ == '__main__':
app = QtGui.QApplication(sys.argv)
app.setStyleSheet(qdarkstyle.load_stylesheet_pyqt())
#Create Base Windows
sw = SearchWindow()
#Display Start
sw.show()
sys.exit(app.exec_())
| 50.120643 | 187 | 0.484942 |
import os.path
import classes as cl
import datetime
import qdarkstyle
import re
import csv
import sys
import subprocess
from PyQt4 import QtGui, QtCore, uic
cl.load()
options = {'Containers':cl.inv.listAllContainers,
'Product':cl.inv.listAllFinishedProduct,
'Unfinished Product':cl.inv.listAllUnfinishedProduct,
'Trim':cl.inv.listAllTotes,
'Runs':cl.inv.listAllRuns,
'Shipments':cl.inv.listAllShipments,
'Locations':cl.inv.listAllLocations,
'Transactions':cl.inv.listAllTransactions}
class SearchWindow(QtGui.QDialog):
def __init__(self):
super(SearchWindow, self).__init__()
uic.loadUi(cl.UIstem+'inventoryView.ui', self)
self.currentItem = None
self.currentCategory = None
self.edit = dict()
self.currentLabel = None
self.listProduct.itemDoubleClicked.connect(self.doubleEvent)
self.pushBack.clicked.connect(self.back)
self.save.clicked.connect(self.saveData)
self.setContents()
self.center()
def center(self):
frameGm = self.frameGeometry()
screen = QtGui.QApplication.desktop().screenNumber(QtGui.QApplication.desktop().cursor().pos())
centerPoint = QtGui.QApplication.desktop().screenGeometry(screen).center()
frameGm.moveCenter(centerPoint)
self.move(frameGm.topLeft())
def saveData(self):
cl.save()
def back(self):
if self.currentItem != None: self.currentItem = None
elif self.currentCategory != None: self.currentCategory = None
self.setContents()
def doubleEvent(self):
if self.currentCategory != None:
if self.currentItem != None:
current = self.listProduct.currentItem()
text = str(current.text())
split = text.split(' : ')
current = self.currentItem
if split[0] == 'runs included':
optionList = []
for i in self.currentItem.runsIncluded:
optionList.append(i.ID)
inn, ok = QtGui.QInputDialog.getItem(self,'Choose','Choose run to view:',optionList)
if ok:
for i in self.currentItem.runsIncluded:
if i.ID == inn:
self.currentItem = i
self.setContents()
return
if split[0] == 'trim included':
optionList = []
for i in self.currentItem.trimIncluded:
optionList.append(i.ID)
inn, ok = QtGui.QInputDialog.getItem(self,'Choose','Choose trim to view:',optionList)
if ok:
for i in self.currentItem.trimIncluded:
if i.ID == inn:
self.currentItem = i
self.setContents()
return
elif split[0] == 'bags recieved':
optionList = []
for i in self.currentItem.bags:
optionList.append(str(i.ID))
inn, ok = QtGui.QInputDialog.getItem(self,'Choose','Choose bag to view:',optionList)
if ok:
for i in cl.inv.listAllBags:
if i.ID == inn:
self.currentItem = i
self.setContents()
return
elif split[0] == 'location':
optionList = []
for i in cl.inv.listAllLocations:
optionList.append(i.ID)
inn, ok = QtGui.QInputDialog.getItem(self,'Choose','Choose location to move to:',optionList)
if ok:
for i in cl.inv.listAllLocations:
if i.ID == inn:
try:
self.currentItem.location.items.pop(self.currentItem.location.items.index(self.currentItem))
except: pass
self.currentItem.location = i
i.items.append(self.currentItem)
self.setContents()
return
elif split[0] == 'shipment':
for i in cl.inv.listAllShipments:
if i.ID == split[1]:
self.currentItem = i
self.setContents()
return
elif split[0] == 'container':
for i in cl.inv.listAllContainers:
if i.ID == split[1]:
self.currentItem = i
self.setContents()
return
elif split[0] == 'unfinished product included':
optionList = []
for i in self.currentItem.unfinishedProductIncluded:
optionList.append(str(i.ID))
inn, ok = QtGui.QInputDialog.getItem(self,'Choose','Choose unfinished product to view:',optionList)
if ok:
for i in self.currentItem.unfinishedProductIncluded:
if str(i.ID) == inn:
self.currentItem = i
self.setContents()
return
elif split[0] == 'product included':
optionList = []
for i in self.currentItem.productIncluded:
optionList.append(str(i.ID))
inn, ok = QtGui.QInputDialog.getItem(self,'Choose','Choose finished product to view:',optionList)
if ok:
for i in self.currentItem.productIncluded:
if str(i.ID) == inn:
self.currentItem = i
self.setContents()
return
inn, ok = QtGui.QInputDialog.getText(self,'Choose','Choose a new value for this variable:')
if ok:
print(split[0])
try:
if split[0] == 'ID':
current.ID = str(inn)
elif split[0] == 'owner':
current.owner = str(inn)
elif split[0] == 'trim weight':
current.trimWeight = float(inn)
elif split[0] == 'weight':
current.weight = float(inn)
elif split[0] == 'original trim weight':
current.ogTrimWeight = float(inn)
elif split[0] == 'flavor':
current.flavor = str(inn)
elif split[0] == 'test results':
pass
except:
print('double click error')
self.setContents()
else:
current = self.listProduct.currentItem()
text = str(current.text())
split = text.split(' : ')
if options[self.currentCategory] == cl.inv.listAllTransactions:
self.currentItem = options[self.currentCategory][self.listProduct.currentRow()]
self.setContents()
return
for item in options[self.currentCategory]:
try:
if item.ID == split[0]:
self.currentItem = item
except:
runs = []
for i in cl.inv.listAllRuns:
for run in i[1]:
runs.append(run)
for run in runs:
if run.ID == split[0]:
self.currentItem = run
else:
current = self.listProduct.currentItem()
self.currentCategory = str(current.text())
self.setContents()
def setContents(self):
self.listProduct.clear()
if self.currentItem == None and self.currentCategory == None:
self.label.hide()
for item in options.keys():
self.listProduct.addItem(str(item))
elif self.currentItem != None:
self.label.show()
item = self.currentItem
if isinstance(item,cl.transaction):
self.label.setText(str(item.time_stamp))
else: self.label.setText(str(self.currentItem.ID)+': '+str(self.currentItem.kind))
if isinstance(item,cl.trimTote):
self.listProduct.addItem('ID : '+str(item.ID))
self.listProduct.addItem('shipment : '+str(item.shipment.ID))
self.listProduct.addItem('owner : '+str(item.owner))
self.listProduct.addItem('trim weight : '+str(item.trimWeight))
self.listProduct.addItem('original trim weight : '+str(item.ogTrimWeight))
self.listProduct.addItem('flavor : '+str(item.flavor))
try:
self.listProduct.addItem('location : '+str(item.location.ID))
except:
self.listProduct.addItem('location : '+str(item.location))
self.listProduct.addItem('test results : '+str(item.testResults))
elif isinstance(item,cl.trimBag):
self.listProduct.addItem('ID : '+str(item.ID))
self.listProduct.addItem('shipment : '+str(item.shipment.ID))
self.listProduct.addItem('owner : '+str(item.owner))
self.listProduct.addItem('trim weight : '+str(item.trimWeight))
self.listProduct.addItem('original trim weight : '+str(item.ogTrimWeight))
self.listProduct.addItem('flavor : '+str(item.flavor))
elif isinstance(item,cl.unfinishedProduct):
self.listProduct.addItem('ID : '+str(item.ID))
runsIn = []
for i in item.runsIncluded:
runsIn.append(str(i.ID))
self.listProduct.addItem('runs included : '+str(runsIn))
self.listProduct.addItem('owner : '+str(item.owner))
self.listProduct.addItem('intended finish : '+str(item.intendedFinish))
self.listProduct.addItem('test results : '+str(item.testResults))
try:
self.listProduct.addItem('location : '+str(item.location.ID))
except:
self.listProduct.addItem('location : '+str(item.location))
elif isinstance(item,cl.finishedProduct):
self.listProduct.addItem('ID : '+str(item.ID))
uPIn = []
for i in item.unfinishedProductIncluded:
uPIn.append(str(i.ID))
self.listProduct.addItem('unfinished product included : '+str(uPIn))
self.listProduct.addItem('owner : '+str(item.owner))
self.listProduct.addItem('kind : '+str(item.kind))
self.listProduct.addItem('weight : '+str(item.weight))
self.listProduct.addItem('container : '+str(item.container.ID))
self.listProduct.addItem('test results : '+str(item.testResults))
elif isinstance(item,cl.transaction):
try:
self.listProduct.addItem('Recipient of Payment : '+str(item.recievingEntity.name))
except:
self.listProduct.addItem('Recipient of Payment : '+str(item.recievingEntity))
try:
self.listProduct.addItem('Payee : '+str(item.sendingEntity.name))
except:
self.listProduct.addItem('Payee : '+str(item.sendingEntity))
self.listProduct.addItem('Amount Paid : '+str(item.amountPayed))
self.listProduct.addItem('Amount Owed : '+str(item.amountToBePayed))
self.listProduct.addItem('Total Amount : '+str(item.amountPayed+item.amountToBePayed))
items = list()
for i in item.valuedEntity:
items.append(i.ID)
self.listProduct.addItem('Items Sold : '+str(items))
elif isinstance(item,cl.run):
self.listProduct.addItem('ID : '+str(item.ID))
trimIn = []
for i in item.trimIncluded:
trimIn.append(str(i.ID))
trimAm = []
for i in item.trimAmounts:
trimAm.append(i)
self.listProduct.addItem('trim included : '+str(trimIn))
self.listProduct.addItem('trim amounts : '+str(trimAm))
self.listProduct.addItem('start time : '+str(item.timeStart))
self.listProduct.addItem('owner : '+str(item.owner))
self.listProduct.addItem('blaster : '+str(item.blaster))
try:
self.listProduct.addItem('location : '+str(item.location.ID))
except:
self.listProduct.addItem('location : '+str(item.location))
elif isinstance(item,cl.shipment):
self.listProduct.addItem('ID : '+str(item.ID))
self.listProduct.addItem('source : '+str(item.source))
self.listProduct.addItem('flavor : '+str(item.flavor))
self.listProduct.addItem('date in : '+str(item.dateIn))
self.listProduct.addItem('test results : '+str(item.testResults))
bags = []
for i in item.bags:
bags.append(str(i.ID))
self.listProduct.addItem('bags recieved : '+str(bags))
self.listProduct.addItem('total weight : '+str(item.totalWeight))
self.listProduct.addItem('total price : '+str(item.totalPrice))
elif isinstance(item,cl.container):
self.listProduct.addItem('ID : '+str(item.ID))
self.listProduct.addItem('kind : '+str(item.kind))
self.listProduct.addItem('weight : '+str(item.weight))
self.listProduct.addItem('# of units : '+str(item.numberOfUnits))
self.listProduct.addItem('unit size : '+str(item.unitSize))
self.listProduct.addItem('history : '+str(item.history))
try:
self.listProduct.addItem('location : '+str(item.location.ID))
except:
self.listProduct.addItem('location : '+str(item.location))
prod = []
for i in item.productIncluded:
prod.append(str(i.ID))
self.listProduct.addItem('product included : '+str(prod))
elif isinstance(item,cl.location):
self.listProduct.addItem('ITEMS IN '+str(item.ID))
for i in item.items:
self.listProduct.addItem(str(i.kind)+' : '+str(i.ID))
elif self.currentCategory != None:
self.label.show()
self.label.setText(self.currentCategory)
for item in options[self.currentCategory]:
if self.currentCategory == 'Runs':
runs = []
for i in cl.inv.listAllRuns:
for run in i[1]:
runs.append(run)
for i in runs:
self.listProduct.addItem(str(i.ID)+' : '+str(i.owner))
if isinstance(item,cl.container):
self.listProduct.addItem(str(item.ID)+' : '+str(item.kind)+' : '+str(item.weight))
elif isinstance(item,cl.finishedProduct):
self.listProduct.addItem(str(item.ID)+' : '+str(item.kind)+' : '+str(item.weight))
elif isinstance(item,cl.trimTote):
self.listProduct.addItem(str(item.ID)+' : '+str(item.trimWeight)+' : '+str(item.owner))
elif isinstance(item,cl.unfinishedProduct):
self.listProduct.addItem(str(item.ID)+' : '+str(item.intendedFinish))
elif isinstance(item,cl.shipment):
self.listProduct.addItem(str(item.ID)+' : '+str(item.source)+' : '+str(item.dateIn))
elif isinstance(item,cl.location):
self.listProduct.addItem(str(item.ID)+' : '+str(item.description))
elif isinstance(item,cl.transaction):
try:
self.listProduct.addItem(str(item.recievingEntity)+' <-- '+str(item.sendingEntity.name)+' : '+str(item.amountPayed)+'/'+str(item.amountPayed+item.amountToBePayed))
except:
self.listProduct.addItem(str(item.recievingEntity.name)+' <-- '+str(item.sendingEntity)+' : '+str(item.amountPayed)+'/'+str(item.amountPayed+item.amountToBePayed))
def logClose():
app.quit()
import atexit
atexit.register(logClose)
if __name__ == '__main__':
app = QtGui.QApplication(sys.argv)
app.setStyleSheet(qdarkstyle.load_stylesheet_pyqt())
sw = SearchWindow()
sw.show()
sys.exit(app.exec_())
| true | true |
f73dc88693200625331d9f1cd25f9e2e5a0f8e7d | 2,315 | py | Python | ArduinoControl/arduino_control.py | lucascarrafa/FunUtils | e065fd499a155b4daa43716a75ac722a4474c659 | [
"MIT"
] | 48 | 2017-03-17T22:23:20.000Z | 2021-12-04T17:52:04.000Z | ArduinoControl/arduino_control.py | lucascarrafa/FunUtils | e065fd499a155b4daa43716a75ac722a4474c659 | [
"MIT"
] | 23 | 2017-10-13T04:06:11.000Z | 2021-06-02T15:01:34.000Z | ArduinoControl/arduino_control.py | lucascarrafa/FunUtils | e065fd499a155b4daa43716a75ac722a4474c659 | [
"MIT"
] | 83 | 2017-07-18T03:17:37.000Z | 2022-03-18T00:01:15.000Z | import serial
# serial communication params
SERIAL_PORT = "/dev/ttyUSB0"
DEFAULT_BAUD_RATE = 9600
class ArduinoControlService:
def __init__(self, port=SERIAL_PORT, baud_rate=DEFAULT_BAUD_RATE):
self._controller = serial.Serial(port, baud_rate)
self._state = 0
# public methods
def get_state(self):
"""
Returns output state.
:return: output state 0/1
"""
return self._state
def control(self, state):
"""
Control arduino writing through serial port. Output state is written as str.
:param state: value that determines output state - one of the following values (`switch`, `power off`,
`power on`) (str)
:return: void method
"""
print("Calling arduino control method with params: [state = {}]".format(state))
self._set_state(state)
self._controller.write(str(self._state).encode())
def dispose(self):
"""
Closes the serial port.
:return: void method
"""
self._controller.close()
# private methods
def _state_switch(self):
"""
Switches the output state.
:return: void method
"""
self._state = 1 - self._state
def _turn_on(self):
"""
Sets output state to high.
:return: void method
"""
self._state = 1
def _turn_off(self):
"""
Sets output state to low.
:return: void method
"""
self._state = 0
def _set_state(self, state):
"""
Sets output state based on state value.
:param state: value that determines output state - one of the following values (`switch`, `power off`,
`power on`) (str)
:return: void method
"""
if state == "switch":
self._state_switch()
elif state == "power off":
self._turn_off()
elif state == "power on":
self._turn_on()
else:
raise ValueError("Invalid state.")
print("Current relay state = {}".format(self.get_state()))
import time
ar_s = ArduinoControlService()
for i in range(6):
ar_s.control("switch")
print(ar_s.get_state())
time.sleep(3)
ar_s.control("power on")
ar_s.control("power off")
ar_s.dispose() | 26.011236 | 110 | 0.578834 | import serial
SERIAL_PORT = "/dev/ttyUSB0"
DEFAULT_BAUD_RATE = 9600
class ArduinoControlService:
def __init__(self, port=SERIAL_PORT, baud_rate=DEFAULT_BAUD_RATE):
self._controller = serial.Serial(port, baud_rate)
self._state = 0
def get_state(self):
return self._state
def control(self, state):
print("Calling arduino control method with params: [state = {}]".format(state))
self._set_state(state)
self._controller.write(str(self._state).encode())
def dispose(self):
self._controller.close()
def _state_switch(self):
self._state = 1 - self._state
def _turn_on(self):
self._state = 1
def _turn_off(self):
self._state = 0
def _set_state(self, state):
if state == "switch":
self._state_switch()
elif state == "power off":
self._turn_off()
elif state == "power on":
self._turn_on()
else:
raise ValueError("Invalid state.")
print("Current relay state = {}".format(self.get_state()))
import time
ar_s = ArduinoControlService()
for i in range(6):
ar_s.control("switch")
print(ar_s.get_state())
time.sleep(3)
ar_s.control("power on")
ar_s.control("power off")
ar_s.dispose() | true | true |
f73dc89ef331e90943dadea02fc7babf9074b3a4 | 13,705 | py | Python | testing/test_session.py | markshao/pytest | 611b579d21f7e62b4c8ed54ab70fbfee7c6f5f64 | [
"MIT"
] | 9,225 | 2015-06-15T21:56:14.000Z | 2022-03-31T20:47:38.000Z | testing/test_session.py | markshao/pytest | 611b579d21f7e62b4c8ed54ab70fbfee7c6f5f64 | [
"MIT"
] | 7,794 | 2015-06-15T21:06:34.000Z | 2022-03-31T10:56:54.000Z | testing/test_session.py | markshao/pytest | 611b579d21f7e62b4c8ed54ab70fbfee7c6f5f64 | [
"MIT"
] | 2,598 | 2015-06-15T21:42:39.000Z | 2022-03-29T13:48:22.000Z | import pytest
from _pytest.config import ExitCode
from _pytest.monkeypatch import MonkeyPatch
from _pytest.pytester import Pytester
class SessionTests:
def test_basic_testitem_events(self, pytester: Pytester) -> None:
tfile = pytester.makepyfile(
"""
def test_one():
pass
def test_one_one():
assert 0
def test_other():
raise ValueError(23)
class TestClass(object):
def test_two(self, someargs):
pass
"""
)
reprec = pytester.inline_run(tfile)
passed, skipped, failed = reprec.listoutcomes()
assert len(skipped) == 0
assert len(passed) == 1
assert len(failed) == 3
def end(x):
return x.nodeid.split("::")[-1]
assert end(failed[0]) == "test_one_one"
assert end(failed[1]) == "test_other"
itemstarted = reprec.getcalls("pytest_itemcollected")
assert len(itemstarted) == 4
# XXX check for failing funcarg setup
# colreports = reprec.getcalls("pytest_collectreport")
# assert len(colreports) == 4
# assert colreports[1].report.failed
def test_nested_import_error(self, pytester: Pytester) -> None:
tfile = pytester.makepyfile(
"""
import import_fails
def test_this():
assert import_fails.a == 1
""",
import_fails="""
import does_not_work
a = 1
""",
)
reprec = pytester.inline_run(tfile)
values = reprec.getfailedcollections()
assert len(values) == 1
out = str(values[0].longrepr)
assert out.find("does_not_work") != -1
def test_raises_output(self, pytester: Pytester) -> None:
reprec = pytester.inline_runsource(
"""
import pytest
def test_raises_doesnt():
pytest.raises(ValueError, int, "3")
"""
)
passed, skipped, failed = reprec.listoutcomes()
assert len(failed) == 1
out = failed[0].longrepr.reprcrash.message # type: ignore[union-attr]
assert "DID NOT RAISE" in out
def test_syntax_error_module(self, pytester: Pytester) -> None:
reprec = pytester.inline_runsource("this is really not python")
values = reprec.getfailedcollections()
assert len(values) == 1
out = str(values[0].longrepr)
assert out.find("not python") != -1
def test_exit_first_problem(self, pytester: Pytester) -> None:
reprec = pytester.inline_runsource(
"""
def test_one(): assert 0
def test_two(): assert 0
""",
"--exitfirst",
)
passed, skipped, failed = reprec.countoutcomes()
assert failed == 1
assert passed == skipped == 0
def test_maxfail(self, pytester: Pytester) -> None:
reprec = pytester.inline_runsource(
"""
def test_one(): assert 0
def test_two(): assert 0
def test_three(): assert 0
""",
"--maxfail=2",
)
passed, skipped, failed = reprec.countoutcomes()
assert failed == 2
assert passed == skipped == 0
def test_broken_repr(self, pytester: Pytester) -> None:
p = pytester.makepyfile(
"""
import pytest
class reprexc(BaseException):
def __str__(self):
return "Ha Ha fooled you, I'm a broken repr()."
class BrokenRepr1(object):
foo=0
def __repr__(self):
raise reprexc
class TestBrokenClass(object):
def test_explicit_bad_repr(self):
t = BrokenRepr1()
with pytest.raises(BaseException, match="broken repr"):
repr(t)
def test_implicit_bad_repr1(self):
t = BrokenRepr1()
assert t.foo == 1
"""
)
reprec = pytester.inline_run(p)
passed, skipped, failed = reprec.listoutcomes()
assert (len(passed), len(skipped), len(failed)) == (1, 0, 1)
out = failed[0].longrepr.reprcrash.message # type: ignore[union-attr]
assert out.find("<[reprexc() raised in repr()] BrokenRepr1") != -1
def test_broken_repr_with_showlocals_verbose(self, pytester: Pytester) -> None:
p = pytester.makepyfile(
"""
class ObjWithErrorInRepr:
def __repr__(self):
raise NotImplementedError
def test_repr_error():
x = ObjWithErrorInRepr()
assert x == "value"
"""
)
reprec = pytester.inline_run("--showlocals", "-vv", p)
passed, skipped, failed = reprec.listoutcomes()
assert (len(passed), len(skipped), len(failed)) == (0, 0, 1)
entries = failed[0].longrepr.reprtraceback.reprentries # type: ignore[union-attr]
assert len(entries) == 1
repr_locals = entries[0].reprlocals
assert repr_locals.lines
assert len(repr_locals.lines) == 1
assert repr_locals.lines[0].startswith(
"x = <[NotImplementedError() raised in repr()] ObjWithErrorInRepr"
)
def test_skip_file_by_conftest(self, pytester: Pytester) -> None:
pytester.makepyfile(
conftest="""
import pytest
def pytest_collect_file():
pytest.skip("intentional")
""",
test_file="""
def test_one(): pass
""",
)
try:
reprec = pytester.inline_run(pytester.path)
except pytest.skip.Exception: # pragma: no cover
pytest.fail("wrong skipped caught")
reports = reprec.getreports("pytest_collectreport")
assert len(reports) == 1
assert reports[0].skipped
class TestNewSession(SessionTests):
def test_order_of_execution(self, pytester: Pytester) -> None:
reprec = pytester.inline_runsource(
"""
values = []
def test_1():
values.append(1)
def test_2():
values.append(2)
def test_3():
assert values == [1,2]
class Testmygroup(object):
reslist = values
def test_1(self):
self.reslist.append(1)
def test_2(self):
self.reslist.append(2)
def test_3(self):
self.reslist.append(3)
def test_4(self):
assert self.reslist == [1,2,1,2,3]
"""
)
passed, skipped, failed = reprec.countoutcomes()
assert failed == skipped == 0
assert passed == 7
def test_collect_only_with_various_situations(self, pytester: Pytester) -> None:
p = pytester.makepyfile(
test_one="""
def test_one():
raise ValueError()
class TestX(object):
def test_method_one(self):
pass
class TestY(TestX):
pass
""",
test_three="xxxdsadsadsadsa",
__init__="",
)
reprec = pytester.inline_run("--collect-only", p.parent)
itemstarted = reprec.getcalls("pytest_itemcollected")
assert len(itemstarted) == 3
assert not reprec.getreports("pytest_runtest_logreport")
started = reprec.getcalls("pytest_collectstart")
finished = reprec.getreports("pytest_collectreport")
assert len(started) == len(finished)
assert len(started) == 6
colfail = [x for x in finished if x.failed]
assert len(colfail) == 1
def test_minus_x_import_error(self, pytester: Pytester) -> None:
pytester.makepyfile(__init__="")
pytester.makepyfile(test_one="xxxx", test_two="yyyy")
reprec = pytester.inline_run("-x", pytester.path)
finished = reprec.getreports("pytest_collectreport")
colfail = [x for x in finished if x.failed]
assert len(colfail) == 1
def test_minus_x_overridden_by_maxfail(self, pytester: Pytester) -> None:
pytester.makepyfile(__init__="")
pytester.makepyfile(test_one="xxxx", test_two="yyyy", test_third="zzz")
reprec = pytester.inline_run("-x", "--maxfail=2", pytester.path)
finished = reprec.getreports("pytest_collectreport")
colfail = [x for x in finished if x.failed]
assert len(colfail) == 2
def test_plugin_specify(pytester: Pytester) -> None:
with pytest.raises(ImportError):
pytester.parseconfig("-p", "nqweotexistent")
# pytest.raises(ImportError,
# "config.do_configure(config)"
# )
def test_plugin_already_exists(pytester: Pytester) -> None:
config = pytester.parseconfig("-p", "terminal")
assert config.option.plugins == ["terminal"]
config._do_configure()
config._ensure_unconfigure()
def test_exclude(pytester: Pytester) -> None:
hellodir = pytester.mkdir("hello")
hellodir.joinpath("test_hello.py").write_text("x y syntaxerror")
hello2dir = pytester.mkdir("hello2")
hello2dir.joinpath("test_hello2.py").write_text("x y syntaxerror")
pytester.makepyfile(test_ok="def test_pass(): pass")
result = pytester.runpytest("--ignore=hello", "--ignore=hello2")
assert result.ret == 0
result.stdout.fnmatch_lines(["*1 passed*"])
def test_exclude_glob(pytester: Pytester) -> None:
hellodir = pytester.mkdir("hello")
hellodir.joinpath("test_hello.py").write_text("x y syntaxerror")
hello2dir = pytester.mkdir("hello2")
hello2dir.joinpath("test_hello2.py").write_text("x y syntaxerror")
hello3dir = pytester.mkdir("hallo3")
hello3dir.joinpath("test_hello3.py").write_text("x y syntaxerror")
subdir = pytester.mkdir("sub")
subdir.joinpath("test_hello4.py").write_text("x y syntaxerror")
pytester.makepyfile(test_ok="def test_pass(): pass")
result = pytester.runpytest("--ignore-glob=*h[ea]llo*")
assert result.ret == 0
result.stdout.fnmatch_lines(["*1 passed*"])
def test_deselect(pytester: Pytester) -> None:
pytester.makepyfile(
test_a="""
import pytest
def test_a1(): pass
@pytest.mark.parametrize('b', range(3))
def test_a2(b): pass
class TestClass:
def test_c1(self): pass
def test_c2(self): pass
"""
)
result = pytester.runpytest(
"-v",
"--deselect=test_a.py::test_a2[1]",
"--deselect=test_a.py::test_a2[2]",
"--deselect=test_a.py::TestClass::test_c1",
)
assert result.ret == 0
result.stdout.fnmatch_lines(["*3 passed, 3 deselected*"])
for line in result.stdout.lines:
assert not line.startswith(("test_a.py::test_a2[1]", "test_a.py::test_a2[2]"))
def test_sessionfinish_with_start(pytester: Pytester) -> None:
pytester.makeconftest(
"""
import os
values = []
def pytest_sessionstart():
values.append(os.getcwd())
os.chdir("..")
def pytest_sessionfinish():
assert values[0] == os.getcwd()
"""
)
res = pytester.runpytest("--collect-only")
assert res.ret == ExitCode.NO_TESTS_COLLECTED
def test_collection_args_do_not_duplicate_modules(pytester: Pytester) -> None:
"""Test that when multiple collection args are specified on the command line
for the same module, only a single Module collector is created.
Regression test for #723, #3358.
"""
pytester.makepyfile(
**{
"d/test_it": """
def test_1(): pass
def test_2(): pass
"""
}
)
result = pytester.runpytest(
"--collect-only",
"d/test_it.py::test_1",
"d/test_it.py::test_2",
)
result.stdout.fnmatch_lines(
[
"<Module d/test_it.py>",
" <Function test_1>",
" <Function test_2>",
],
consecutive=True,
)
# Different, but related case.
result = pytester.runpytest(
"--collect-only",
"--keep-duplicates",
"d",
"d",
)
result.stdout.fnmatch_lines(
[
"<Module d/test_it.py>",
" <Function test_1>",
" <Function test_2>",
" <Function test_1>",
" <Function test_2>",
],
consecutive=True,
)
@pytest.mark.parametrize("path", ["root", "{relative}/root", "{environment}/root"])
def test_rootdir_option_arg(
pytester: Pytester, monkeypatch: MonkeyPatch, path: str
) -> None:
monkeypatch.setenv("PY_ROOTDIR_PATH", str(pytester.path))
path = path.format(relative=str(pytester.path), environment="$PY_ROOTDIR_PATH")
rootdir = pytester.path / "root" / "tests"
rootdir.mkdir(parents=True)
pytester.makepyfile(
"""
import os
def test_one():
assert 1
"""
)
result = pytester.runpytest(f"--rootdir={path}")
result.stdout.fnmatch_lines(
[
f"*rootdir: {pytester.path}/root",
"root/test_rootdir_option_arg.py *",
"*1 passed*",
]
)
def test_rootdir_wrong_option_arg(pytester: Pytester) -> None:
result = pytester.runpytest("--rootdir=wrong_dir")
result.stderr.fnmatch_lines(
["*Directory *wrong_dir* not found. Check your '--rootdir' option.*"]
)
| 32.787081 | 90 | 0.568771 | import pytest
from _pytest.config import ExitCode
from _pytest.monkeypatch import MonkeyPatch
from _pytest.pytester import Pytester
class SessionTests:
def test_basic_testitem_events(self, pytester: Pytester) -> None:
tfile = pytester.makepyfile(
"""
def test_one():
pass
def test_one_one():
assert 0
def test_other():
raise ValueError(23)
class TestClass(object):
def test_two(self, someargs):
pass
"""
)
reprec = pytester.inline_run(tfile)
passed, skipped, failed = reprec.listoutcomes()
assert len(skipped) == 0
assert len(passed) == 1
assert len(failed) == 3
def end(x):
return x.nodeid.split("::")[-1]
assert end(failed[0]) == "test_one_one"
assert end(failed[1]) == "test_other"
itemstarted = reprec.getcalls("pytest_itemcollected")
assert len(itemstarted) == 4
def test_nested_import_error(self, pytester: Pytester) -> None:
tfile = pytester.makepyfile(
"""
import import_fails
def test_this():
assert import_fails.a == 1
""",
import_fails="""
import does_not_work
a = 1
""",
)
reprec = pytester.inline_run(tfile)
values = reprec.getfailedcollections()
assert len(values) == 1
out = str(values[0].longrepr)
assert out.find("does_not_work") != -1
def test_raises_output(self, pytester: Pytester) -> None:
reprec = pytester.inline_runsource(
"""
import pytest
def test_raises_doesnt():
pytest.raises(ValueError, int, "3")
"""
)
passed, skipped, failed = reprec.listoutcomes()
assert len(failed) == 1
out = failed[0].longrepr.reprcrash.message
assert "DID NOT RAISE" in out
def test_syntax_error_module(self, pytester: Pytester) -> None:
reprec = pytester.inline_runsource("this is really not python")
values = reprec.getfailedcollections()
assert len(values) == 1
out = str(values[0].longrepr)
assert out.find("not python") != -1
def test_exit_first_problem(self, pytester: Pytester) -> None:
reprec = pytester.inline_runsource(
"""
def test_one(): assert 0
def test_two(): assert 0
""",
"--exitfirst",
)
passed, skipped, failed = reprec.countoutcomes()
assert failed == 1
assert passed == skipped == 0
def test_maxfail(self, pytester: Pytester) -> None:
reprec = pytester.inline_runsource(
"""
def test_one(): assert 0
def test_two(): assert 0
def test_three(): assert 0
""",
"--maxfail=2",
)
passed, skipped, failed = reprec.countoutcomes()
assert failed == 2
assert passed == skipped == 0
def test_broken_repr(self, pytester: Pytester) -> None:
p = pytester.makepyfile(
"""
import pytest
class reprexc(BaseException):
def __str__(self):
return "Ha Ha fooled you, I'm a broken repr()."
class BrokenRepr1(object):
foo=0
def __repr__(self):
raise reprexc
class TestBrokenClass(object):
def test_explicit_bad_repr(self):
t = BrokenRepr1()
with pytest.raises(BaseException, match="broken repr"):
repr(t)
def test_implicit_bad_repr1(self):
t = BrokenRepr1()
assert t.foo == 1
"""
)
reprec = pytester.inline_run(p)
passed, skipped, failed = reprec.listoutcomes()
assert (len(passed), len(skipped), len(failed)) == (1, 0, 1)
out = failed[0].longrepr.reprcrash.message # type: ignore[union-attr]
assert out.find("<[reprexc() raised in repr()] BrokenRepr1") != -1
def test_broken_repr_with_showlocals_verbose(self, pytester: Pytester) -> None:
p = pytester.makepyfile(
"""
class ObjWithErrorInRepr:
def __repr__(self):
raise NotImplementedError
def test_repr_error():
x = ObjWithErrorInRepr()
assert x == "value"
"""
)
reprec = pytester.inline_run("--showlocals", "-vv", p)
passed, skipped, failed = reprec.listoutcomes()
assert (len(passed), len(skipped), len(failed)) == (0, 0, 1)
entries = failed[0].longrepr.reprtraceback.reprentries # type: ignore[union-attr]
assert len(entries) == 1
repr_locals = entries[0].reprlocals
assert repr_locals.lines
assert len(repr_locals.lines) == 1
assert repr_locals.lines[0].startswith(
"x = <[NotImplementedError() raised in repr()] ObjWithErrorInRepr"
)
def test_skip_file_by_conftest(self, pytester: Pytester) -> None:
pytester.makepyfile(
conftest="""
import pytest
def pytest_collect_file():
pytest.skip("intentional")
""",
test_file="""
def test_one(): pass
""",
)
try:
reprec = pytester.inline_run(pytester.path)
except pytest.skip.Exception: # pragma: no cover
pytest.fail("wrong skipped caught")
reports = reprec.getreports("pytest_collectreport")
assert len(reports) == 1
assert reports[0].skipped
class TestNewSession(SessionTests):
def test_order_of_execution(self, pytester: Pytester) -> None:
reprec = pytester.inline_runsource(
"""
values = []
def test_1():
values.append(1)
def test_2():
values.append(2)
def test_3():
assert values == [1,2]
class Testmygroup(object):
reslist = values
def test_1(self):
self.reslist.append(1)
def test_2(self):
self.reslist.append(2)
def test_3(self):
self.reslist.append(3)
def test_4(self):
assert self.reslist == [1,2,1,2,3]
"""
)
passed, skipped, failed = reprec.countoutcomes()
assert failed == skipped == 0
assert passed == 7
def test_collect_only_with_various_situations(self, pytester: Pytester) -> None:
p = pytester.makepyfile(
test_one="""
def test_one():
raise ValueError()
class TestX(object):
def test_method_one(self):
pass
class TestY(TestX):
pass
""",
test_three="xxxdsadsadsadsa",
__init__="",
)
reprec = pytester.inline_run("--collect-only", p.parent)
itemstarted = reprec.getcalls("pytest_itemcollected")
assert len(itemstarted) == 3
assert not reprec.getreports("pytest_runtest_logreport")
started = reprec.getcalls("pytest_collectstart")
finished = reprec.getreports("pytest_collectreport")
assert len(started) == len(finished)
assert len(started) == 6
colfail = [x for x in finished if x.failed]
assert len(colfail) == 1
def test_minus_x_import_error(self, pytester: Pytester) -> None:
pytester.makepyfile(__init__="")
pytester.makepyfile(test_one="xxxx", test_two="yyyy")
reprec = pytester.inline_run("-x", pytester.path)
finished = reprec.getreports("pytest_collectreport")
colfail = [x for x in finished if x.failed]
assert len(colfail) == 1
def test_minus_x_overridden_by_maxfail(self, pytester: Pytester) -> None:
pytester.makepyfile(__init__="")
pytester.makepyfile(test_one="xxxx", test_two="yyyy", test_third="zzz")
reprec = pytester.inline_run("-x", "--maxfail=2", pytester.path)
finished = reprec.getreports("pytest_collectreport")
colfail = [x for x in finished if x.failed]
assert len(colfail) == 2
def test_plugin_specify(pytester: Pytester) -> None:
with pytest.raises(ImportError):
pytester.parseconfig("-p", "nqweotexistent")
# pytest.raises(ImportError,
# "config.do_configure(config)"
# )
def test_plugin_already_exists(pytester: Pytester) -> None:
config = pytester.parseconfig("-p", "terminal")
assert config.option.plugins == ["terminal"]
config._do_configure()
config._ensure_unconfigure()
def test_exclude(pytester: Pytester) -> None:
hellodir = pytester.mkdir("hello")
hellodir.joinpath("test_hello.py").write_text("x y syntaxerror")
hello2dir = pytester.mkdir("hello2")
hello2dir.joinpath("test_hello2.py").write_text("x y syntaxerror")
pytester.makepyfile(test_ok="def test_pass(): pass")
result = pytester.runpytest("--ignore=hello", "--ignore=hello2")
assert result.ret == 0
result.stdout.fnmatch_lines(["*1 passed*"])
def test_exclude_glob(pytester: Pytester) -> None:
hellodir = pytester.mkdir("hello")
hellodir.joinpath("test_hello.py").write_text("x y syntaxerror")
hello2dir = pytester.mkdir("hello2")
hello2dir.joinpath("test_hello2.py").write_text("x y syntaxerror")
hello3dir = pytester.mkdir("hallo3")
hello3dir.joinpath("test_hello3.py").write_text("x y syntaxerror")
subdir = pytester.mkdir("sub")
subdir.joinpath("test_hello4.py").write_text("x y syntaxerror")
pytester.makepyfile(test_ok="def test_pass(): pass")
result = pytester.runpytest("--ignore-glob=*h[ea]llo*")
assert result.ret == 0
result.stdout.fnmatch_lines(["*1 passed*"])
def test_deselect(pytester: Pytester) -> None:
pytester.makepyfile(
test_a="""
import pytest
def test_a1(): pass
@pytest.mark.parametrize('b', range(3))
def test_a2(b): pass
class TestClass:
def test_c1(self): pass
def test_c2(self): pass
"""
)
result = pytester.runpytest(
"-v",
"--deselect=test_a.py::test_a2[1]",
"--deselect=test_a.py::test_a2[2]",
"--deselect=test_a.py::TestClass::test_c1",
)
assert result.ret == 0
result.stdout.fnmatch_lines(["*3 passed, 3 deselected*"])
for line in result.stdout.lines:
assert not line.startswith(("test_a.py::test_a2[1]", "test_a.py::test_a2[2]"))
def test_sessionfinish_with_start(pytester: Pytester) -> None:
pytester.makeconftest(
"""
import os
values = []
def pytest_sessionstart():
values.append(os.getcwd())
os.chdir("..")
def pytest_sessionfinish():
assert values[0] == os.getcwd()
"""
)
res = pytester.runpytest("--collect-only")
assert res.ret == ExitCode.NO_TESTS_COLLECTED
def test_collection_args_do_not_duplicate_modules(pytester: Pytester) -> None:
pytester.makepyfile(
**{
"d/test_it": """
def test_1(): pass
def test_2(): pass
"""
}
)
result = pytester.runpytest(
"--collect-only",
"d/test_it.py::test_1",
"d/test_it.py::test_2",
)
result.stdout.fnmatch_lines(
[
"<Module d/test_it.py>",
" <Function test_1>",
" <Function test_2>",
],
consecutive=True,
)
# Different, but related case.
result = pytester.runpytest(
"--collect-only",
"--keep-duplicates",
"d",
"d",
)
result.stdout.fnmatch_lines(
[
"<Module d/test_it.py>",
" <Function test_1>",
" <Function test_2>",
" <Function test_1>",
" <Function test_2>",
],
consecutive=True,
)
@pytest.mark.parametrize("path", ["root", "{relative}/root", "{environment}/root"])
def test_rootdir_option_arg(
pytester: Pytester, monkeypatch: MonkeyPatch, path: str
) -> None:
monkeypatch.setenv("PY_ROOTDIR_PATH", str(pytester.path))
path = path.format(relative=str(pytester.path), environment="$PY_ROOTDIR_PATH")
rootdir = pytester.path / "root" / "tests"
rootdir.mkdir(parents=True)
pytester.makepyfile(
"""
import os
def test_one():
assert 1
"""
)
result = pytester.runpytest(f"--rootdir={path}")
result.stdout.fnmatch_lines(
[
f"*rootdir: {pytester.path}/root",
"root/test_rootdir_option_arg.py *",
"*1 passed*",
]
)
def test_rootdir_wrong_option_arg(pytester: Pytester) -> None:
result = pytester.runpytest("--rootdir=wrong_dir")
result.stderr.fnmatch_lines(
["*Directory *wrong_dir* not found. Check your '--rootdir' option.*"]
)
| true | true |
f73dc9b8993bc2850bf810fe3a5b6a1c9216dd6f | 4,594 | py | Python | NLP/normalization.py | jmelm93/MLTS | 5004995c6355ba066abaeb3fc45cf48474a8463c | [
"Apache-2.0"
] | 117 | 2018-11-25T02:30:31.000Z | 2022-03-24T08:05:21.000Z | NLP/normalization.py | jmelm93/MLTS | 5004995c6355ba066abaeb3fc45cf48474a8463c | [
"Apache-2.0"
] | 4 | 2019-03-01T04:12:54.000Z | 2019-12-10T21:09:30.000Z | NLP/normalization.py | jmelm93/MLTS | 5004995c6355ba066abaeb3fc45cf48474a8463c | [
"Apache-2.0"
] | 43 | 2019-01-28T22:07:55.000Z | 2022-03-18T21:48:00.000Z | # -*- coding: utf-8 -*-
"""
Created on Fri Aug 26 20:45:10 2016
@author: DIP
"""
from lib.contractions import CONTRACTION_MAP
import re
import nltk
import string
from nltk.stem import WordNetLemmatizer
from html.parser import HTMLParser
import unicodedata
stopword_list = nltk.corpus.stopwords.words('english')
wnl = WordNetLemmatizer()
html_parser = HTMLParser()
def tokenize_text(text):
tokens = nltk.word_tokenize(text)
tokens = [token.strip() for token in tokens]
return tokens
def expand_contractions(text, contraction_mapping):
contractions_pattern = re.compile('({})'.format('|'.join(contraction_mapping.keys())),
flags=re.IGNORECASE|re.DOTALL)
def expand_match(contraction):
match = contraction.group(0)
first_char = match[0]
expanded_contraction = contraction_mapping.get(match)\
if contraction_mapping.get(match)\
else contraction_mapping.get(match.lower())
expanded_contraction = first_char+expanded_contraction[1:]
return expanded_contraction
expanded_text = contractions_pattern.sub(expand_match, text)
expanded_text = re.sub("'", "", expanded_text)
return expanded_text
from nltk.corpus import wordnet as wn
import en_core_web_sm
nlp = en_core_web_sm.load()
# Annotate text tokens with POS tags
def pos_tag_text(text):
def penn_to_wn_tags(pos_tag):
if pos_tag.startswith('ADJ'):
return wn.ADJ
elif pos_tag.startswith('VERB'):
return wn.VERB
elif pos_tag.startswith('NOUN'):
return wn.NOUN
elif pos_tag.startswith('ADV'):
return wn.ADV
else:
return None
tagged_text = nlp(text)
tagged_lower_text = [(str(word).lower(), penn_to_wn_tags(word.pos_))
for word in
tagged_text]
return tagged_lower_text
# lemmatize text based on POS tags
def lemmatize_text(text):
pos_tagged_text = pos_tag_text(text)
lemmatized_tokens = [wnl.lemmatize(word, pos_tag) if pos_tag
else word
for word, pos_tag in pos_tagged_text]
lemmatized_text = ' '.join(lemmatized_tokens)
return lemmatized_text
def remove_special_characters(text):
tokens = tokenize_text(text)
pattern = re.compile('[{}]'.format(re.escape(string.punctuation)))
filtered_tokens = filter(None, [pattern.sub(' ', token) for token in tokens])
filtered_text = ' '.join(filtered_tokens)
return filtered_text
def remove_stopwords(text):
tokens = tokenize_text(text)
filtered_tokens = [token for token in tokens if token not in stopword_list]
filtered_text = ' '.join(filtered_tokens)
return filtered_text
def sort_terms(text):
tokens = tokenize_text(text)
tokens.sort()
filtered_text = ' '.join(tokens)
return filtered_text
def keep_text_characters(text):
filtered_tokens = []
tokens = tokenize_text(text)
for token in tokens:
if re.search('[a-zA-Z]', token):
filtered_tokens.append(token)
filtered_text = ' '.join(filtered_tokens)
return filtered_text
def unescape_html(parser, text):
return parser.unescape(text)
def normalize_corpus(corpus, lemmatize=True,
only_text_chars=False,
tokenize=False, sort_text=False):
normalized_corpus = []
for text in corpus:
text = html_parser.unescape(text)
text = expand_contractions(text, CONTRACTION_MAP)
if lemmatize:
text = lemmatize_text(text)
else:
text = text.lower()
text = remove_special_characters(text)
text = remove_stopwords(text)
if sort_text:
text = sort_terms(text)
if only_text_chars:
text = keep_text_characters(text)
if tokenize:
text = tokenize_text(text)
normalized_corpus.append(text)
else:
normalized_corpus.append(text)
return normalized_corpus
def parse_document(document):
document = re.sub('\n', ' ', document)
if isinstance(document, str):
document = document
elif isinstance(document, unicode):
return unicodedata.normalize('NFKD', document).encode('ascii', 'ignore')
else:
raise ValueError('Document is not string or unicode!')
document = document.strip()
sentences = nltk.sent_tokenize(document)
sentences = [sentence.strip() for sentence in sentences]
return sentences
| 30.026144 | 90 | 0.651937 |
from lib.contractions import CONTRACTION_MAP
import re
import nltk
import string
from nltk.stem import WordNetLemmatizer
from html.parser import HTMLParser
import unicodedata
stopword_list = nltk.corpus.stopwords.words('english')
wnl = WordNetLemmatizer()
html_parser = HTMLParser()
def tokenize_text(text):
tokens = nltk.word_tokenize(text)
tokens = [token.strip() for token in tokens]
return tokens
def expand_contractions(text, contraction_mapping):
contractions_pattern = re.compile('({})'.format('|'.join(contraction_mapping.keys())),
flags=re.IGNORECASE|re.DOTALL)
def expand_match(contraction):
match = contraction.group(0)
first_char = match[0]
expanded_contraction = contraction_mapping.get(match)\
if contraction_mapping.get(match)\
else contraction_mapping.get(match.lower())
expanded_contraction = first_char+expanded_contraction[1:]
return expanded_contraction
expanded_text = contractions_pattern.sub(expand_match, text)
expanded_text = re.sub("'", "", expanded_text)
return expanded_text
from nltk.corpus import wordnet as wn
import en_core_web_sm
nlp = en_core_web_sm.load()
# Annotate text tokens with POS tags
def pos_tag_text(text):
def penn_to_wn_tags(pos_tag):
if pos_tag.startswith('ADJ'):
return wn.ADJ
elif pos_tag.startswith('VERB'):
return wn.VERB
elif pos_tag.startswith('NOUN'):
return wn.NOUN
elif pos_tag.startswith('ADV'):
return wn.ADV
else:
return None
tagged_text = nlp(text)
tagged_lower_text = [(str(word).lower(), penn_to_wn_tags(word.pos_))
for word in
tagged_text]
return tagged_lower_text
# lemmatize text based on POS tags
def lemmatize_text(text):
pos_tagged_text = pos_tag_text(text)
lemmatized_tokens = [wnl.lemmatize(word, pos_tag) if pos_tag
else word
for word, pos_tag in pos_tagged_text]
lemmatized_text = ' '.join(lemmatized_tokens)
return lemmatized_text
def remove_special_characters(text):
tokens = tokenize_text(text)
pattern = re.compile('[{}]'.format(re.escape(string.punctuation)))
filtered_tokens = filter(None, [pattern.sub(' ', token) for token in tokens])
filtered_text = ' '.join(filtered_tokens)
return filtered_text
def remove_stopwords(text):
tokens = tokenize_text(text)
filtered_tokens = [token for token in tokens if token not in stopword_list]
filtered_text = ' '.join(filtered_tokens)
return filtered_text
def sort_terms(text):
tokens = tokenize_text(text)
tokens.sort()
filtered_text = ' '.join(tokens)
return filtered_text
def keep_text_characters(text):
filtered_tokens = []
tokens = tokenize_text(text)
for token in tokens:
if re.search('[a-zA-Z]', token):
filtered_tokens.append(token)
filtered_text = ' '.join(filtered_tokens)
return filtered_text
def unescape_html(parser, text):
return parser.unescape(text)
def normalize_corpus(corpus, lemmatize=True,
only_text_chars=False,
tokenize=False, sort_text=False):
normalized_corpus = []
for text in corpus:
text = html_parser.unescape(text)
text = expand_contractions(text, CONTRACTION_MAP)
if lemmatize:
text = lemmatize_text(text)
else:
text = text.lower()
text = remove_special_characters(text)
text = remove_stopwords(text)
if sort_text:
text = sort_terms(text)
if only_text_chars:
text = keep_text_characters(text)
if tokenize:
text = tokenize_text(text)
normalized_corpus.append(text)
else:
normalized_corpus.append(text)
return normalized_corpus
def parse_document(document):
document = re.sub('\n', ' ', document)
if isinstance(document, str):
document = document
elif isinstance(document, unicode):
return unicodedata.normalize('NFKD', document).encode('ascii', 'ignore')
else:
raise ValueError('Document is not string or unicode!')
document = document.strip()
sentences = nltk.sent_tokenize(document)
sentences = [sentence.strip() for sentence in sentences]
return sentences
| true | true |
f73dcaab37aa6c834598f92d22bd2bc7a51c87e6 | 171 | py | Python | setup.py | softwarefactory-project/rpmreq | b9b30cf6a184929db23ac86c8cc037592ee8b6be | [
"Apache-2.0"
] | null | null | null | setup.py | softwarefactory-project/rpmreq | b9b30cf6a184929db23ac86c8cc037592ee8b6be | [
"Apache-2.0"
] | null | null | null | setup.py | softwarefactory-project/rpmreq | b9b30cf6a184929db23ac86c8cc037592ee8b6be | [
"Apache-2.0"
] | 1 | 2019-03-10T10:07:04.000Z | 2019-03-10T10:07:04.000Z | #!/usr/bin/env python
import re
import setuptools
import sys
setuptools.setup(
setup_requires=['pbr', 'pytest-runner'],
tests_require=['pytest'],
pbr=True)
| 14.25 | 44 | 0.690058 |
import re
import setuptools
import sys
setuptools.setup(
setup_requires=['pbr', 'pytest-runner'],
tests_require=['pytest'],
pbr=True)
| true | true |
f73dcb9e0b920f37f92eabaa99983df378c7e367 | 879 | py | Python | src/spaceone/secret/manager/identity_manager.py | ku524/secret | c5dad49f40ab1cbbaa0b8f01222de10ae73d1fb1 | [
"Apache-2.0"
] | 7 | 2020-06-04T23:01:12.000Z | 2021-01-31T08:41:29.000Z | src/spaceone/secret/manager/identity_manager.py | ku524/secret | c5dad49f40ab1cbbaa0b8f01222de10ae73d1fb1 | [
"Apache-2.0"
] | 2 | 2020-08-05T13:31:53.000Z | 2021-03-07T15:15:14.000Z | src/spaceone/secret/manager/identity_manager.py | ku524/secret | c5dad49f40ab1cbbaa0b8f01222de10ae73d1fb1 | [
"Apache-2.0"
] | 6 | 2020-06-10T01:59:35.000Z | 2021-11-25T06:30:35.000Z | # -*- coding: utf-8 -*-
from spaceone.core.manager import BaseManager
from spaceone.secret.connector.identity_connector import IdentityConnector
class IdentityManager(BaseManager):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.identity_conn: IdentityConnector = self.locator.get_connector('IdentityConnector')
def get_service_account(self, service_account_id, domain_id):
return self.identity_conn.get_service_account(service_account_id, domain_id)
def list_service_accounts(self, query, domain_id):
return self.identity_conn.list_service_accounts(query, domain_id)
def get_project(self, project_id, domain_id):
return self.identity_conn.get_project(project_id, domain_id)
def list_projects(self, query, domain_id):
return self.identity_conn.list_projects(query, domain_id)
| 38.217391 | 95 | 0.759954 |
from spaceone.core.manager import BaseManager
from spaceone.secret.connector.identity_connector import IdentityConnector
class IdentityManager(BaseManager):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.identity_conn: IdentityConnector = self.locator.get_connector('IdentityConnector')
def get_service_account(self, service_account_id, domain_id):
return self.identity_conn.get_service_account(service_account_id, domain_id)
def list_service_accounts(self, query, domain_id):
return self.identity_conn.list_service_accounts(query, domain_id)
def get_project(self, project_id, domain_id):
return self.identity_conn.get_project(project_id, domain_id)
def list_projects(self, query, domain_id):
return self.identity_conn.list_projects(query, domain_id)
| true | true |
f73dcc30d9dec0a03cff19c93fb142a9a0f3ca37 | 313 | py | Python | hg_tweetfeeder/__init__.py | HaguDeGozaru/hagu-tweet-bot | 3e46551aaf5e9b63fafa4018bae5e8ca27a19f9c | [
"MIT"
] | null | null | null | hg_tweetfeeder/__init__.py | HaguDeGozaru/hagu-tweet-bot | 3e46551aaf5e9b63fafa4018bae5e8ca27a19f9c | [
"MIT"
] | null | null | null | hg_tweetfeeder/__init__.py | HaguDeGozaru/hagu-tweet-bot | 3e46551aaf5e9b63fafa4018bae5e8ca27a19f9c | [
"MIT"
] | null | null | null | ''' init for hg_tweetfeeder module '''
from hg_tweetfeeder.bot import TweetFeeder, BotFunctions, BotEvents
from hg_tweetfeeder.config import Config
from hg_tweetfeeder.file_io import LoadFromFile
from hg_tweetfeeder.flags import BotFunctions
__author__ = 'Ian M. <hagudegozaru@gmail.com>'
__version__ = '0.0.1'
| 31.3 | 67 | 0.811502 |
from hg_tweetfeeder.bot import TweetFeeder, BotFunctions, BotEvents
from hg_tweetfeeder.config import Config
from hg_tweetfeeder.file_io import LoadFromFile
from hg_tweetfeeder.flags import BotFunctions
__author__ = 'Ian M. <hagudegozaru@gmail.com>'
__version__ = '0.0.1'
| true | true |
f73dccc281d1bae392f9764d0472c5878d8027f0 | 155 | py | Python | src/fixed_income/util.py | Bocha84/fixed-income | 20489a43e17885045b7cfece221c49041b767ff3 | [
"Apache-2.0"
] | 7 | 2017-12-18T07:17:55.000Z | 2020-02-02T07:04:33.000Z | src/fixed_income/util.py | Bocha84/fixed-income | 20489a43e17885045b7cfece221c49041b767ff3 | [
"Apache-2.0"
] | null | null | null | src/fixed_income/util.py | Bocha84/fixed-income | 20489a43e17885045b7cfece221c49041b767ff3 | [
"Apache-2.0"
] | 5 | 2017-12-18T05:01:50.000Z | 2020-03-03T16:42:10.000Z | import re
def camel_to_snake(phrase):
s1 = re.sub("(.)([A-Z][a-z]+)", r"\1_\2", phrase)
return re.sub("([a-z0-9])([A-Z])", r"\1_\2", s1).lower()
| 22.142857 | 60 | 0.516129 | import re
def camel_to_snake(phrase):
s1 = re.sub("(.)([A-Z][a-z]+)", r"\1_\2", phrase)
return re.sub("([a-z0-9])([A-Z])", r"\1_\2", s1).lower()
| true | true |
f73dcd568d2ed4bd0144f958c062c778352b2d3c | 1,906 | py | Python | EISeg/eiseg/util/language.py | Amanda-Barbara/PaddleSeg | a7de36a5fae96011f5b188987670274101b8ede1 | [
"Apache-2.0"
] | 2 | 2021-11-26T09:02:58.000Z | 2021-12-10T08:35:37.000Z | EISeg/eiseg/util/language.py | Amanda-Barbara/PaddleSeg | a7de36a5fae96011f5b188987670274101b8ede1 | [
"Apache-2.0"
] | null | null | null | EISeg/eiseg/util/language.py | Amanda-Barbara/PaddleSeg | a7de36a5fae96011f5b188987670274101b8ede1 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os.path as osp
import re
from eiseg import pjpath
from collections import defaultdict
import json
from urllib import parse
import requests
class TransUI(object):
def __init__(self, is_trans=False):
super().__init__()
self.trans_dict = defaultdict(dict)
with open(
osp.join(pjpath, "config/zh_CN.EN"), "r",
encoding="utf-8") as f:
texts = f.readlines()
for txt in texts:
strs = txt.split("@")
self.trans_dict[strs[0].strip()] = strs[1].strip()
self.is_trans = is_trans
self.youdao_url = "http://fanyi.youdao.com/translate?&doctype=json&type=AUTO&i="
def put(self, zh_CN):
if self.is_trans == False:
return zh_CN
else:
try:
return str(self.trans_dict[zh_CN])
except:
return zh_CN
# 联网动态翻译
def tr(self, zh_CN):
try:
tr_url = self.youdao_url + parse.quote(zh_CN)
response = requests.get(tr_url)
js = json.loads(response.text)
result_EN = js["translateResult"][0][0]["tgt"]
return str(result_EN)
except:
return zh_CN
| 33.438596 | 89 | 0.603882 |
import os.path as osp
import re
from eiseg import pjpath
from collections import defaultdict
import json
from urllib import parse
import requests
class TransUI(object):
def __init__(self, is_trans=False):
super().__init__()
self.trans_dict = defaultdict(dict)
with open(
osp.join(pjpath, "config/zh_CN.EN"), "r",
encoding="utf-8") as f:
texts = f.readlines()
for txt in texts:
strs = txt.split("@")
self.trans_dict[strs[0].strip()] = strs[1].strip()
self.is_trans = is_trans
self.youdao_url = "http://fanyi.youdao.com/translate?&doctype=json&type=AUTO&i="
def put(self, zh_CN):
if self.is_trans == False:
return zh_CN
else:
try:
return str(self.trans_dict[zh_CN])
except:
return zh_CN
def tr(self, zh_CN):
try:
tr_url = self.youdao_url + parse.quote(zh_CN)
response = requests.get(tr_url)
js = json.loads(response.text)
result_EN = js["translateResult"][0][0]["tgt"]
return str(result_EN)
except:
return zh_CN
| true | true |
f73dcd747465e130f1fdbf7c3f64eefcbbd8024d | 11,580 | py | Python | mocasin/simulate/application.py | tud-ccc/mocasin | 6cf0a169e24d65d0fc859398f181dd500f928340 | [
"0BSD"
] | 1 | 2022-03-13T19:27:50.000Z | 2022-03-13T19:27:50.000Z | mocasin/simulate/application.py | tud-ccc/mocasin | 6cf0a169e24d65d0fc859398f181dd500f928340 | [
"0BSD"
] | null | null | null | mocasin/simulate/application.py | tud-ccc/mocasin | 6cf0a169e24d65d0fc859398f181dd500f928340 | [
"0BSD"
] | null | null | null | # Copyright (C) 2017 TU Dresden
# Licensed under the ISC license (see LICENSE.txt)
#
# Authors: Christian Menard
from mocasin.util import logging
from mocasin.simulate.channel import RuntimeChannel
from mocasin.simulate.process import RuntimeDataflowProcess
from mocasin.simulate.adapter import SimulateLoggerAdapter
log = logging.getLogger(__name__)
class RuntimeApplication(object):
"""Represents the runtime instance of an application.
Attributes:
name (str): the application name
system (System): the system the application is supposed to be
executed on
"""
def __init__(self, name, system):
"""Initialize a RuntimeApplication
Args:
name (str): the application name
system (System): the system the application is supposed to be
executed on
"""
self.name = name
self.system = system
@property
def env(self):
"""The simpy environment"""
return self.system.env
class RuntimeDataflowApplication(RuntimeApplication):
"""Represents the runtime instance of a dataflow application.
Attributes:
mapping (Mapping): a mapping object for this application
_pocesses (list of RuntimeProcess): a list of runtime processes that
belong to this application
_channeles (list of RuntimeChannel): a list of runtime channels that
belong to this application
Args:
name (str): the application name
graph (DataflowGraph): the graph denoting the dataflow application
mapping (Mapping): a mapping to the platform implemented by system
trace (DataflowTrace): the trace representing the execution
behavior of the application
system (System): the system the application is supposed to be
executed on
wait_for_initial_tokens (bool): If true, the application's processes
only start if initial tokens (first reads in the trace) are
available. Otherwise, they would start and immediately block.
"""
def __init__(
self,
name,
graph,
app_trace,
system,
wait_for_initial_tokens=False,
):
super().__init__(name, system)
self.graph = graph
self.trace = app_trace
self._is_new = True
self._is_running = False
self._is_paused = False
self._is_finished = False
# a dict mapping each process to a processor/scheduler
# leave it uninitialized for now, it will set by calling run()
self._process_mappings = None
log.debug("initialize new runtime application: %s", name)
# Instantiate all channels
self._channels = {}
for c in graph.channels():
self._channels[c.name] = RuntimeChannel(c.name, c.token_size, self)
# Instantiate all processes
self._processes = {}
for p in graph.processes():
proc = RuntimeDataflowProcess(p.name, self, wait_for_initial_tokens)
self._processes[p.name] = proc
for c in p.incoming_channels:
rc = self._channels[c.name]
proc.connect_to_incomming_channel(rc)
for c in p.outgoing_channels:
rc = self._channels[c.name]
proc.connect_to_outgoing_channel(rc)
self._log = SimulateLoggerAdapter(log, self.name, self.env)
def processes(self):
"""Get a list of all processes
:returns: a list of the application's processes
:rtype: list[RuntimeProcess]
"""
return self._processes.values()
def channels(self):
"""Get a list of all channels
:returns: a list of the application's channels
:rtype: list[RuntimeChannel]
"""
return self._channels.values()
def find_process(self, process_name):
"""Find a process by name"""
return self._processes[process_name]
def find_channel(self, channel_name):
"""Find a channel by name"""
return self._channels[channel_name]
def run(self, mapping):
"""Start execution of this application
Yields:
~simpy.events.Event: an event that is triggered when the
application finishes execution.
"""
assert self.is_new()
assert not self._process_mappings
self._is_new = False
self._is_running = True
self._log.info(f"Application {self.name} starts")
# map all processes and channels
# first some sanity checks
if mapping.graph != self.graph:
raise RuntimeError("dataflow graph and mapping incompatible")
if mapping.platform != self.system.platform:
raise RuntimeError(
f"Mapping {self.name} to an incompatible platform"
)
# map all channels:
for channel in self.graph.channels():
info = mapping.channel_info(channel)
self.find_channel(channel.name).update_mapping_info(info)
# map all processes
self._process_mappings = {}
for process in self.graph.processes():
info = mapping.process_info(process)
runtime_process = self.find_process(process.name)
self._process_mappings[runtime_process] = info.affinity
# start all the processes
for process, processor in self._process_mappings.items():
self.system.start_process(process, processor)
# create an event that is triggered when all processes completed and
# wait for this event
finished = self.env.all_of([p.finished for p in self.processes()])
finished.callbacks.append(self._app_finished_callback)
yield finished
def _app_finished_callback(self, event):
self._log.info(f"Application {self.name} terminates")
self._is_running = False
self._is_finished = True
def kill(self):
"""Stop execution of this application
This method kills each running process of this application. The
processes might not stop immediately as operations such as producing
or consuming tokens are considered atomic an cannot be interrupted.
The simpy process managing run will terminate as soon as all processes
terminated.
Examples:
Usage::
app_finished = env.process(app.run())
yield env.timeout(1000000000) # wait 1ms
app.kill()
# wait until the application stopped completely
yield app_finished
"""
for p in self.processes():
p.kill()
def _is_state_valid(self):
"""Check that the application is exactly in one state."""
tup = (
self._is_new,
self._is_running,
self._is_paused,
self._is_finished,
)
return sum(tup) == 1
def is_new(self):
"""Check if the application has not yet started."""
assert self._is_state_valid()
return self._is_new
def is_running(self):
"""Check if the application is running."""
assert self._is_state_valid()
return self._is_running
def is_paused(self):
"""Check if the application is paused."""
assert self._is_state_valid()
return self._is_paused
def is_finished(self):
"""Check if the application is finished."""
assert self._is_state_valid()
return self._is_finished
def update_mapping(self, mapping):
"""Update the mapping used by this application, causing a migration of
processes.
Args:
Mapping: an updated mapping to be used by the application
"""
assert self.is_running()
self._log.debug("Update mapping")
# iterate over all proceses
for process in self._process_mappings.keys():
current_processor = self._process_mappings[process]
dataflow_process = self.graph.find_process(process.name)
new_processor = mapping.process_info(dataflow_process).affinity
# move the processes
if current_processor != new_processor:
self._log.debug(
f"Move process {process.full_name} from {current_processor}"
f" to {new_processor}"
)
self._process_mappings[process] = new_processor
self.system.move_process(
process, current_processor, new_processor
)
# and also update the channel mappings
self._update_channel_mappings(mapping)
def _update_channel_mappings(self, mapping):
# iterate over all channels
for name, channel in self._channels.items():
dataflow_channel = self.graph.find_channel(name)
mapping_info = mapping.channel_info(dataflow_channel)
self._log.debug(
f"Update channel of {channel.name} primitive to "
f"{mapping_info.primitive.name}"
)
channel.update_mapping_info(mapping_info)
def pause(self):
"""Pause the execution of this application
The application can be resumed later by calling resume()
"""
assert self.is_running()
self._is_running = False
self._is_paused = True
self._log.debug("Pause")
# simply pause all processes
for process, current_processor in self._process_mappings.items():
self.system.pause_process(process, current_processor)
def resume(self, mapping=None):
"""Resume the execution of a paused application
Args:
mapping (Mapping, optional): an optional updated application mapping
If None, the application is resumed with its old mapping.
"""
assert self.is_paused()
self._is_paused = False
self._is_running = True
self._log.debug("Resume")
if mapping:
# if a mapping is provided, we first need to update all channels
self._update_channel_mappings(mapping)
# and then we resume all processes on their new processors
for process in self._process_mappings.keys():
dataflow_process = self.graph.find_process(process.name)
new_processor = mapping.process_info(dataflow_process).affinity
self._process_mappings[process] = new_processor
self.system.resume_process(process, new_processor)
else:
# if no mapping is provided, then we resume all processes according
# to the old mapping
for process, processor in self._process_mappings.items():
self.system.resume_process(process, processor)
def get_progress(self):
"""Calculate how far this application has advanced its computation
The application progress is calculate as the average over the progress
of all processes. Note that the resulting progress can be lower than
expected, if some of the processes are currently running. This is
because processes only update there status at certain points
(preemption, segment completion) and not continuously.
Returns:
float: completion ratio
"""
process_progress = [p.get_progress() for p in self._processes.values()]
return sum(process_progress) / len(process_progress)
| 34.567164 | 80 | 0.628066 |
from mocasin.util import logging
from mocasin.simulate.channel import RuntimeChannel
from mocasin.simulate.process import RuntimeDataflowProcess
from mocasin.simulate.adapter import SimulateLoggerAdapter
log = logging.getLogger(__name__)
class RuntimeApplication(object):
def __init__(self, name, system):
self.name = name
self.system = system
@property
def env(self):
return self.system.env
class RuntimeDataflowApplication(RuntimeApplication):
def __init__(
self,
name,
graph,
app_trace,
system,
wait_for_initial_tokens=False,
):
super().__init__(name, system)
self.graph = graph
self.trace = app_trace
self._is_new = True
self._is_running = False
self._is_paused = False
self._is_finished = False
self._process_mappings = None
log.debug("initialize new runtime application: %s", name)
self._channels = {}
for c in graph.channels():
self._channels[c.name] = RuntimeChannel(c.name, c.token_size, self)
self._processes = {}
for p in graph.processes():
proc = RuntimeDataflowProcess(p.name, self, wait_for_initial_tokens)
self._processes[p.name] = proc
for c in p.incoming_channels:
rc = self._channels[c.name]
proc.connect_to_incomming_channel(rc)
for c in p.outgoing_channels:
rc = self._channels[c.name]
proc.connect_to_outgoing_channel(rc)
self._log = SimulateLoggerAdapter(log, self.name, self.env)
def processes(self):
return self._processes.values()
def channels(self):
return self._channels.values()
def find_process(self, process_name):
return self._processes[process_name]
def find_channel(self, channel_name):
return self._channels[channel_name]
def run(self, mapping):
assert self.is_new()
assert not self._process_mappings
self._is_new = False
self._is_running = True
self._log.info(f"Application {self.name} starts")
if mapping.graph != self.graph:
raise RuntimeError("dataflow graph and mapping incompatible")
if mapping.platform != self.system.platform:
raise RuntimeError(
f"Mapping {self.name} to an incompatible platform"
)
for channel in self.graph.channels():
info = mapping.channel_info(channel)
self.find_channel(channel.name).update_mapping_info(info)
self._process_mappings = {}
for process in self.graph.processes():
info = mapping.process_info(process)
runtime_process = self.find_process(process.name)
self._process_mappings[runtime_process] = info.affinity
for process, processor in self._process_mappings.items():
self.system.start_process(process, processor)
finished = self.env.all_of([p.finished for p in self.processes()])
finished.callbacks.append(self._app_finished_callback)
yield finished
def _app_finished_callback(self, event):
self._log.info(f"Application {self.name} terminates")
self._is_running = False
self._is_finished = True
def kill(self):
for p in self.processes():
p.kill()
def _is_state_valid(self):
tup = (
self._is_new,
self._is_running,
self._is_paused,
self._is_finished,
)
return sum(tup) == 1
def is_new(self):
assert self._is_state_valid()
return self._is_new
def is_running(self):
assert self._is_state_valid()
return self._is_running
def is_paused(self):
assert self._is_state_valid()
return self._is_paused
def is_finished(self):
assert self._is_state_valid()
return self._is_finished
def update_mapping(self, mapping):
assert self.is_running()
self._log.debug("Update mapping")
for process in self._process_mappings.keys():
current_processor = self._process_mappings[process]
dataflow_process = self.graph.find_process(process.name)
new_processor = mapping.process_info(dataflow_process).affinity
if current_processor != new_processor:
self._log.debug(
f"Move process {process.full_name} from {current_processor}"
f" to {new_processor}"
)
self._process_mappings[process] = new_processor
self.system.move_process(
process, current_processor, new_processor
)
self._update_channel_mappings(mapping)
def _update_channel_mappings(self, mapping):
for name, channel in self._channels.items():
dataflow_channel = self.graph.find_channel(name)
mapping_info = mapping.channel_info(dataflow_channel)
self._log.debug(
f"Update channel of {channel.name} primitive to "
f"{mapping_info.primitive.name}"
)
channel.update_mapping_info(mapping_info)
def pause(self):
assert self.is_running()
self._is_running = False
self._is_paused = True
self._log.debug("Pause")
for process, current_processor in self._process_mappings.items():
self.system.pause_process(process, current_processor)
def resume(self, mapping=None):
assert self.is_paused()
self._is_paused = False
self._is_running = True
self._log.debug("Resume")
if mapping:
self._update_channel_mappings(mapping)
for process in self._process_mappings.keys():
dataflow_process = self.graph.find_process(process.name)
new_processor = mapping.process_info(dataflow_process).affinity
self._process_mappings[process] = new_processor
self.system.resume_process(process, new_processor)
else:
for process, processor in self._process_mappings.items():
self.system.resume_process(process, processor)
def get_progress(self):
process_progress = [p.get_progress() for p in self._processes.values()]
return sum(process_progress) / len(process_progress)
| true | true |
f73dcd9fe76ae714278f4d86ca969d114b02e714 | 4,384 | py | Python | vega/algorithms/nas/fis/autogroup_trainer_callback.py | jie311/vega | 1bba6100ead802697e691403b951e6652a99ccae | [
"MIT"
] | 724 | 2020-06-22T12:05:30.000Z | 2022-03-31T07:10:54.000Z | vega/algorithms/nas/fis/autogroup_trainer_callback.py | jie311/vega | 1bba6100ead802697e691403b951e6652a99ccae | [
"MIT"
] | 147 | 2020-06-30T13:34:46.000Z | 2022-03-29T11:30:17.000Z | vega/algorithms/nas/fis/autogroup_trainer_callback.py | jie311/vega | 1bba6100ead802697e691403b951e6652a99ccae | [
"MIT"
] | 160 | 2020-06-29T18:27:58.000Z | 2022-03-23T08:42:21.000Z | # -*- coding: utf-8 -*-
# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the MIT License.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# MIT License for more details.
"""AutoGroup algorithm trainer callback file."""
import logging
import torch.optim as optim
from vega.common import ClassFactory, ClassType
from .ctr_trainer_callback import CtrTrainerCallback
logger = logging.getLogger(__name__)
@ClassFactory.register(ClassType.CALLBACK)
class AutoGroupTrainerCallback(CtrTrainerCallback):
"""AutoGroup algorithm trainer callbacks.
Different from other trainer method, AutoGroup respectively train network params and structure params,
thus, there define two optimizers to train these params respectively.
"""
def __init__(self):
"""Class of AutoGroupTrainerCallback."""
super(AutoGroupTrainerCallback, self).__init__()
logging.info("init autogroup trainer callback finish.")
def before_train(self, logs=None):
"""Be called before the training process."""
self._init_all_settings()
def _init_all_settings(self):
"""Init all settings from config."""
self.config = self.trainer.config
logging.info("AutoGroupTrainerCallbacks: {}".format(self.config))
self.struc_optimizer = self._init_structure_optimizer(self.trainer.model)
self.net_optimizer = self._init_network_optimizer(self.trainer.model)
def _init_structure_optimizer(self, model):
"""
Init structure optimizer for optimize structure params in AutoGroup model.
:param model: Autogroup model
:type model: torch.nn.Module
:return: optimizer object
:rtype: torch.optim.Optimizer
"""
learnable_params = model.structure_params
logging.info("init net optimizer, lr = {}".format(self.config.struc_optim.struct_lr))
optimizer = optim.Adam(learnable_params, lr=float(self.config.struc_optim.struct_lr))
logging.info("init structure optimizer finish.")
return optimizer
def _init_network_optimizer(self, model):
"""
Init structure optimizer for optimize structure params in AutoGroup model.
:param model: Autogroup model
:type model: torch.nn.Module
:return: optimizer object
:rtype: torch.optim.Optimizer
"""
learnable_params = model.net_params
optimizer = optim.Adam(learnable_params, lr=float(self.config.net_optim.net_lr))
logging.info("init net optimizer, lr = {}".format(self.config.net_optim.net_lr))
logging.info("init structure optimizer finish.")
return optimizer
def train_step(self, batch):
"""
Training progress for a batch data.
:param batch: batch train data.
:type batch: list object
:return: loss & training loss
:rtype: dict object
"""
self.trainer.model.train()
input, target = batch
# first step: train network params.
self.net_optimizer.zero_grad()
output = self.trainer.model(input, fix_structure=True)
loss = self.trainer.loss(output, target)
loss.backward()
self.net_optimizer.step()
# second step : train struture params
self.struc_optimizer.zero_grad()
struct_output = self.trainer.model(input, fix_structure=False)
struct_loss = self.trainer.loss(struct_output, target)
struct_loss.backward()
self.struc_optimizer.step()
return {'loss': loss.item(),
'train_batch_output': output,
'lr': self.trainer.lr_scheduler.get_lr()}
def valid_step(self, batch):
"""
Validate progress for a batch data.
:param batch: batch data
:type object
:return: valid batch output
:rtype: dict object
"""
input, target = batch
output = self.trainer.model(input, fix_structure=True)
return {'valid_batch_output': output}
| 36.840336 | 107 | 0.655794 |
import logging
import torch.optim as optim
from vega.common import ClassFactory, ClassType
from .ctr_trainer_callback import CtrTrainerCallback
logger = logging.getLogger(__name__)
@ClassFactory.register(ClassType.CALLBACK)
class AutoGroupTrainerCallback(CtrTrainerCallback):
def __init__(self):
super(AutoGroupTrainerCallback, self).__init__()
logging.info("init autogroup trainer callback finish.")
def before_train(self, logs=None):
self._init_all_settings()
def _init_all_settings(self):
self.config = self.trainer.config
logging.info("AutoGroupTrainerCallbacks: {}".format(self.config))
self.struc_optimizer = self._init_structure_optimizer(self.trainer.model)
self.net_optimizer = self._init_network_optimizer(self.trainer.model)
def _init_structure_optimizer(self, model):
learnable_params = model.structure_params
logging.info("init net optimizer, lr = {}".format(self.config.struc_optim.struct_lr))
optimizer = optim.Adam(learnable_params, lr=float(self.config.struc_optim.struct_lr))
logging.info("init structure optimizer finish.")
return optimizer
def _init_network_optimizer(self, model):
learnable_params = model.net_params
optimizer = optim.Adam(learnable_params, lr=float(self.config.net_optim.net_lr))
logging.info("init net optimizer, lr = {}".format(self.config.net_optim.net_lr))
logging.info("init structure optimizer finish.")
return optimizer
def train_step(self, batch):
self.trainer.model.train()
input, target = batch
self.net_optimizer.zero_grad()
output = self.trainer.model(input, fix_structure=True)
loss = self.trainer.loss(output, target)
loss.backward()
self.net_optimizer.step()
self.struc_optimizer.zero_grad()
struct_output = self.trainer.model(input, fix_structure=False)
struct_loss = self.trainer.loss(struct_output, target)
struct_loss.backward()
self.struc_optimizer.step()
return {'loss': loss.item(),
'train_batch_output': output,
'lr': self.trainer.lr_scheduler.get_lr()}
def valid_step(self, batch):
input, target = batch
output = self.trainer.model(input, fix_structure=True)
return {'valid_batch_output': output}
| true | true |
f73dce2541c72c05471cc53df05f389215dd6d92 | 17,524 | py | Python | neutron/tests/functional/agent/test_ovs_lib.py | ISCAS-VDI/neutron-base | 687f03d7131839ae8bc324d5823194d1245bb050 | [
"Apache-2.0"
] | null | null | null | neutron/tests/functional/agent/test_ovs_lib.py | ISCAS-VDI/neutron-base | 687f03d7131839ae8bc324d5823194d1245bb050 | [
"Apache-2.0"
] | 3 | 2015-02-27T00:48:55.000Z | 2015-04-21T05:29:37.000Z | neutron/tests/functional/agent/test_ovs_lib.py | ISCAS-VDI/neutron-base | 687f03d7131839ae8bc324d5823194d1245bb050 | [
"Apache-2.0"
] | 3 | 2015-02-26T00:55:17.000Z | 2020-03-01T17:05:40.000Z | # Copyright (c) 2015 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import uuid
import mock
from neutron.agent.common import ovs_lib
from neutron.agent.linux import ip_lib
from neutron.tests import base as tests_base
from neutron.tests.common import net_helpers
from neutron.tests.functional.agent.linux import base
class OVSBridgeTestBase(base.BaseOVSLinuxTestCase):
# TODO(twilson) So far, only ovsdb-related tests are written. It would be
# good to also add the openflow-related functions
def setUp(self):
super(OVSBridgeTestBase, self).setUp()
self.ovs = ovs_lib.BaseOVS()
self.br = self.useFixture(net_helpers.OVSBridgeFixture()).bridge
def create_ovs_port(self, *interface_attrs):
# Convert ((a, b), (c, d)) to {a: b, c: d} and add 'type' by default
attrs = collections.OrderedDict(interface_attrs)
attrs.setdefault('type', 'internal')
port_name = tests_base.get_rand_device_name(net_helpers.PORT_PREFIX)
return (port_name, self.br.add_port(port_name, *attrs.items()))
def create_ovs_vif_port(self, iface_id=None, mac=None,
iface_field='iface-id'):
if iface_id is None:
iface_id = base.get_rand_name()
if mac is None:
mac = base.get_rand_name()
attrs = ('external_ids', {iface_field: iface_id, 'attached-mac': mac})
port_name, ofport = self.create_ovs_port(attrs)
return ovs_lib.VifPort(port_name, ofport, iface_id, mac, self.br)
class OVSBridgeTestCase(OVSBridgeTestBase):
def test_port_lifecycle(self):
(port_name, ofport) = self.create_ovs_port(('type', 'internal'))
# ofport should always be an integer string with value -1 or > 0.
self.assertTrue(int(ofport))
self.assertTrue(int(self.br.get_port_ofport(port_name)))
self.assertTrue(self.br.port_exists(port_name))
self.assertEqual(self.br.br_name,
self.br.get_bridge_for_iface(port_name))
self.br.delete_port(port_name)
self.assertFalse(self.br.port_exists(port_name))
def test_duplicate_port_may_exist_false(self):
port_name, ofport = self.create_ovs_port(('type', 'internal'))
cmd = self.br.ovsdb.add_port(self.br.br_name,
port_name, may_exist=False)
self.assertRaises(RuntimeError, cmd.execute, check_error=True)
def test_delete_port_if_exists_false(self):
cmd = self.br.ovsdb.del_port('nonexistantport', if_exists=False)
self.assertRaises(RuntimeError, cmd.execute, check_error=True)
def test_replace_port(self):
port_name = tests_base.get_rand_device_name(net_helpers.PORT_PREFIX)
self.br.replace_port(port_name, ('type', 'internal'))
self.assertTrue(self.br.port_exists(port_name))
self.assertEqual('internal',
self.br.db_get_val('Interface', port_name, 'type'))
self.br.replace_port(port_name, ('type', 'internal'),
('external_ids', {'test': 'test'}))
self.assertTrue(self.br.port_exists(port_name))
self.assertEqual('test', self.br.db_get_val('Interface', port_name,
'external_ids')['test'])
def test_attribute_lifecycle(self):
(port_name, ofport) = self.create_ovs_port()
tag = 42
self.ovs.set_db_attribute('Port', port_name, 'tag', tag)
self.assertEqual(tag, self.ovs.db_get_val('Port', port_name, 'tag'))
self.assertEqual(tag, self.br.get_port_tag_dict()[port_name])
self.ovs.clear_db_attribute('Port', port_name, 'tag')
self.assertEqual([], self.ovs.db_get_val('Port', port_name, 'tag'))
self.assertEqual([], self.br.get_port_tag_dict()[port_name])
def test_get_bridge_external_bridge_id(self):
self.ovs.set_db_attribute('Bridge', self.br.br_name,
'external_ids',
{'bridge-id': self.br.br_name})
self.assertEqual(
self.br.br_name,
self.ovs.get_bridge_external_bridge_id(self.br.br_name))
def test_controller_lifecycle(self):
controllers = {'tcp:127.0.0.1:6633', 'tcp:172.17.16.10:55'}
self.br.set_controller(controllers)
self.assertSetEqual(controllers, set(self.br.get_controller()))
self.br.del_controller()
self.assertEqual([], self.br.get_controller())
def test_non_index_queries(self):
controllers = ['tcp:127.0.0.1:6633']
self.br.set_controller(controllers)
cmd = self.br.ovsdb.db_set('Controller', self.br.br_name,
('connection_mode', 'out-of-band'))
cmd.execute(check_error=True)
self.assertEqual('out-of-band',
self.br.db_get_val('Controller', self.br.br_name,
'connection_mode'))
def test_set_fail_mode_secure(self):
self.br.set_secure_mode()
self._assert_br_fail_mode(ovs_lib.FAILMODE_SECURE)
def test_set_fail_mode_standalone(self):
self.br.set_standalone_mode()
self._assert_br_fail_mode(ovs_lib.FAILMODE_STANDALONE)
def _assert_br_fail_mode(self, fail_mode):
self.assertEqual(
self.br.db_get_val('Bridge', self.br.br_name, 'fail_mode'),
fail_mode)
def test_set_protocols(self):
self.br.set_protocols('OpenFlow10')
self.assertEqual(
self.br.db_get_val('Bridge', self.br.br_name, 'protocols'),
"OpenFlow10")
def test_get_datapath_id(self):
brdev = ip_lib.IPDevice(self.br.br_name)
dpid = brdev.link.attributes['link/ether'].replace(':', '')
self.br.set_db_attribute('Bridge',
self.br.br_name, 'datapath_id', dpid)
self.assertIn(dpid, self.br.get_datapath_id())
def _test_add_tunnel_port(self, attrs):
port_name = tests_base.get_rand_device_name(net_helpers.PORT_PREFIX)
self.br.add_tunnel_port(port_name, attrs['remote_ip'],
attrs['local_ip'])
self.assertEqual('gre',
self.ovs.db_get_val('Interface', port_name, 'type'))
options = self.ovs.db_get_val('Interface', port_name, 'options')
for attr, val in attrs.items():
self.assertEqual(val, options[attr])
def test_add_tunnel_port_ipv4(self):
attrs = {
'remote_ip': '192.0.2.1', # RFC 5737 TEST-NET-1
'local_ip': '198.51.100.1', # RFC 5737 TEST-NET-2
}
self._test_add_tunnel_port(attrs)
def test_add_tunnel_port_ipv6(self):
attrs = {
'remote_ip': '2001:db8:200::1',
'local_ip': '2001:db8:100::1',
}
self._test_add_tunnel_port(attrs)
def test_add_patch_port(self):
local = tests_base.get_rand_device_name(net_helpers.PORT_PREFIX)
peer = 'remotepeer'
self.br.add_patch_port(local, peer)
self.assertEqual(self.ovs.db_get_val('Interface', local, 'type'),
'patch')
options = self.ovs.db_get_val('Interface', local, 'options')
self.assertEqual(peer, options['peer'])
def test_get_port_name_list(self):
# Note that ovs-vsctl's list-ports does not include the port created
# with the same name as the bridge
ports = {self.create_ovs_port()[0] for i in range(5)}
self.assertSetEqual(ports, set(self.br.get_port_name_list()))
def test_get_iface_name_list(self):
ifaces = {self.create_ovs_port()[0] for i in range(5)}
self.assertSetEqual(ifaces, set(self.br.get_iface_name_list()))
def test_get_port_stats(self):
# Nothing seems to use this function?
(port_name, ofport) = self.create_ovs_port()
stats = set(self.br.get_port_stats(port_name).keys())
self.assertTrue(set(['rx_packets', 'tx_packets']).issubset(stats))
def test_get_vif_ports(self):
for i in range(2):
self.create_ovs_port()
vif_ports = [self.create_ovs_vif_port() for i in range(3)]
ports = self.br.get_vif_ports()
self.assertEqual(3, len(ports))
self.assertTrue(all([isinstance(x, ovs_lib.VifPort) for x in ports]))
self.assertEqual(sorted([x.port_name for x in vif_ports]),
sorted([x.port_name for x in ports]))
def test_get_vif_ports_with_bond(self):
for i in range(2):
self.create_ovs_port()
vif_ports = [self.create_ovs_vif_port() for i in range(3)]
# bond ports don't have records in the Interface table but they do in
# the Port table
orig = self.br.get_port_name_list
new_port_name_list = lambda: orig() + ['bondport']
mock.patch.object(self.br, 'get_port_name_list',
new=new_port_name_list).start()
ports = self.br.get_vif_ports()
self.assertEqual(3, len(ports))
self.assertTrue(all([isinstance(x, ovs_lib.VifPort) for x in ports]))
self.assertEqual(sorted([x.port_name for x in vif_ports]),
sorted([x.port_name for x in ports]))
def test_get_vif_port_set(self):
for i in range(2):
self.create_ovs_port()
vif_ports = [self.create_ovs_vif_port() for i in range(2)]
ports = self.br.get_vif_port_set()
expected = set([x.vif_id for x in vif_ports])
self.assertEqual(expected, ports)
def test_get_vif_port_set_with_missing_port(self):
self.create_ovs_port()
vif_ports = [self.create_ovs_vif_port()]
# return an extra port to make sure the db list ignores it
orig = self.br.get_port_name_list
new_port_name_list = lambda: orig() + ['anotherport']
mock.patch.object(self.br, 'get_port_name_list',
new=new_port_name_list).start()
ports = self.br.get_vif_port_set()
expected = set([vif_ports[0].vif_id])
self.assertEqual(expected, ports)
def test_get_vif_port_set_on_empty_bridge_returns_empty_set(self):
# Create a port on self.br
self.create_ovs_vif_port()
# Create another, empty bridge
br_2 = self.useFixture(net_helpers.OVSBridgeFixture()).bridge
# Assert that get_vif_port_set on an empty bridge returns an empty set,
# and does not return the other bridge's ports.
self.assertEqual(set(), br_2.get_vif_port_set())
def test_get_ports_attributes(self):
port_names = [self.create_ovs_port()[0], self.create_ovs_port()[0]]
db_ports = self.br.get_ports_attributes('Interface', columns=['name'])
db_ports_names = [p['name'] for p in db_ports]
self.assertEqual(sorted(port_names), sorted(db_ports_names))
def test_get_port_tag_dict(self):
# Simple case tested in port test_set_get_clear_db_val
pass
def test_get_vif_port_by_id(self):
for i in range(2):
self.create_ovs_port()
vif_ports = [self.create_ovs_vif_port() for i in range(3)]
for vif in vif_ports:
self.assertEqual(self.br.get_vif_port_by_id(vif.vif_id).vif_id,
vif.vif_id)
def test_get_vifs_by_ids(self):
for i in range(2):
self.create_ovs_port()
vif_ports = [self.create_ovs_vif_port() for i in range(3)]
by_id = self.br.get_vifs_by_ids([v.vif_id for v in vif_ports])
# convert to str for comparison of VifPorts
by_id = {vid: str(vport) for vid, vport in by_id.items()}
self.assertEqual({v.vif_id: str(v) for v in vif_ports}, by_id)
def test_delete_ports(self):
# TODO(twilson) I intensely dislike the current delete_ports function
# as the default behavior is really delete_vif_ports(), then it acts
# more like a delete_ports() seems like it should if all_ports=True is
# passed
# Create 2 non-vif ports and 2 vif ports
nonvifs = {self.create_ovs_port()[0] for i in range(2)}
vifs = {self.create_ovs_vif_port().port_name for i in range(2)}
self.assertSetEqual(nonvifs.union(vifs),
set(self.br.get_port_name_list()))
self.br.delete_ports()
self.assertSetEqual(nonvifs, set(self.br.get_port_name_list()))
self.br.delete_ports(all_ports=True)
self.assertEqual(len(self.br.get_port_name_list()), 0)
def test_set_controller_connection_mode(self):
controllers = ['tcp:192.0.2.0:6633']
self._set_controllers_connection_mode(controllers)
def test_set_multi_controllers_connection_mode(self):
controllers = ['tcp:192.0.2.0:6633', 'tcp:192.0.2.1:55']
self._set_controllers_connection_mode(controllers)
def _set_controllers_connection_mode(self, controllers):
self.br.set_controller(controllers)
self.assertEqual(sorted(controllers), sorted(self.br.get_controller()))
self.br.set_controllers_connection_mode('out-of-band')
self._assert_controllers_connection_mode('out-of-band')
self.br.del_controller()
self.assertEqual([], self.br.get_controller())
def _assert_controllers_connection_mode(self, connection_mode):
controllers = self.br.db_get_val('Bridge', self.br.br_name,
'controller')
controllers = [controllers] if isinstance(
controllers, uuid.UUID) else controllers
for controller in controllers:
self.assertEqual(connection_mode,
self.br.db_get_val('Controller',
controller,
'connection_mode'))
def test_egress_bw_limit(self):
port_name, _ = self.create_ovs_port()
self.br.create_egress_bw_limit_for_port(port_name, 700, 70)
max_rate, burst = self.br.get_egress_bw_limit_for_port(port_name)
self.assertEqual(700, max_rate)
self.assertEqual(70, burst)
self.br.delete_egress_bw_limit_for_port(port_name)
max_rate, burst = self.br.get_egress_bw_limit_for_port(port_name)
self.assertIsNone(max_rate)
self.assertIsNone(burst)
class OVSLibTestCase(base.BaseOVSLinuxTestCase):
def setUp(self):
super(OVSLibTestCase, self).setUp()
self.ovs = ovs_lib.BaseOVS()
def test_bridge_lifecycle_baseovs(self):
name = base.get_rand_name(prefix=net_helpers.BR_PREFIX)
self.addCleanup(self.ovs.delete_bridge, name)
br = self.ovs.add_bridge(name)
self.assertEqual(br.br_name, name)
self.assertTrue(self.ovs.bridge_exists(name))
self.ovs.delete_bridge(name)
self.assertFalse(self.ovs.bridge_exists(name))
def test_get_bridges(self):
bridges = {
self.useFixture(net_helpers.OVSBridgeFixture()).bridge.br_name
for i in range(5)}
self.assertTrue(set(self.ovs.get_bridges()).issuperset(bridges))
def test_bridge_lifecycle_ovsbridge(self):
name = base.get_rand_name(prefix=net_helpers.BR_PREFIX)
br = ovs_lib.OVSBridge(name)
self.assertEqual(br.br_name, name)
# Make sure that instantiating an OVSBridge does not actually create
self.assertFalse(self.ovs.bridge_exists(name))
self.addCleanup(self.ovs.delete_bridge, name)
br.create()
self.assertTrue(self.ovs.bridge_exists(name))
br.destroy()
self.assertFalse(self.ovs.bridge_exists(name))
def test_db_find_column_type_list(self):
"""Fixate output for vsctl/native ovsdb_interface.
Makes sure that db_find search queries give the same result for both
implementations.
"""
bridge_name = base.get_rand_name(prefix=net_helpers.BR_PREFIX)
self.addCleanup(self.ovs.delete_bridge, bridge_name)
br = self.ovs.add_bridge(bridge_name)
port_name = base.get_rand_name(prefix=net_helpers.PORT_PREFIX)
br.add_port(port_name)
self.ovs.set_db_attribute('Port', port_name, 'tag', 42)
tags = self.ovs.ovsdb.db_list('Port', columns=['tag']).execute()
# Make sure that there is data to query.
# It should be, but let's be a little paranoid here as otherwise
# the test has no sense
tags_present = [t for t in tags if t['tag'] != []]
self.assertTrue(tags_present)
tags_42 = [t for t in tags_present if t['tag'] == 42]
single_value = self.ovs.ovsdb.db_find(
'Port', ('tag', '=', 42), columns=['tag']).execute()
self.assertEqual(tags_42, single_value)
len_0_list = self.ovs.ovsdb.db_find(
'Port', ('tag', '!=', []), columns=['tag']).execute()
self.assertEqual(tags_present, len_0_list)
| 44.252525 | 79 | 0.644031 |
import collections
import uuid
import mock
from neutron.agent.common import ovs_lib
from neutron.agent.linux import ip_lib
from neutron.tests import base as tests_base
from neutron.tests.common import net_helpers
from neutron.tests.functional.agent.linux import base
class OVSBridgeTestBase(base.BaseOVSLinuxTestCase):
def setUp(self):
super(OVSBridgeTestBase, self).setUp()
self.ovs = ovs_lib.BaseOVS()
self.br = self.useFixture(net_helpers.OVSBridgeFixture()).bridge
def create_ovs_port(self, *interface_attrs):
attrs = collections.OrderedDict(interface_attrs)
attrs.setdefault('type', 'internal')
port_name = tests_base.get_rand_device_name(net_helpers.PORT_PREFIX)
return (port_name, self.br.add_port(port_name, *attrs.items()))
def create_ovs_vif_port(self, iface_id=None, mac=None,
iface_field='iface-id'):
if iface_id is None:
iface_id = base.get_rand_name()
if mac is None:
mac = base.get_rand_name()
attrs = ('external_ids', {iface_field: iface_id, 'attached-mac': mac})
port_name, ofport = self.create_ovs_port(attrs)
return ovs_lib.VifPort(port_name, ofport, iface_id, mac, self.br)
class OVSBridgeTestCase(OVSBridgeTestBase):
def test_port_lifecycle(self):
(port_name, ofport) = self.create_ovs_port(('type', 'internal'))
self.assertTrue(int(ofport))
self.assertTrue(int(self.br.get_port_ofport(port_name)))
self.assertTrue(self.br.port_exists(port_name))
self.assertEqual(self.br.br_name,
self.br.get_bridge_for_iface(port_name))
self.br.delete_port(port_name)
self.assertFalse(self.br.port_exists(port_name))
def test_duplicate_port_may_exist_false(self):
port_name, ofport = self.create_ovs_port(('type', 'internal'))
cmd = self.br.ovsdb.add_port(self.br.br_name,
port_name, may_exist=False)
self.assertRaises(RuntimeError, cmd.execute, check_error=True)
def test_delete_port_if_exists_false(self):
cmd = self.br.ovsdb.del_port('nonexistantport', if_exists=False)
self.assertRaises(RuntimeError, cmd.execute, check_error=True)
def test_replace_port(self):
port_name = tests_base.get_rand_device_name(net_helpers.PORT_PREFIX)
self.br.replace_port(port_name, ('type', 'internal'))
self.assertTrue(self.br.port_exists(port_name))
self.assertEqual('internal',
self.br.db_get_val('Interface', port_name, 'type'))
self.br.replace_port(port_name, ('type', 'internal'),
('external_ids', {'test': 'test'}))
self.assertTrue(self.br.port_exists(port_name))
self.assertEqual('test', self.br.db_get_val('Interface', port_name,
'external_ids')['test'])
def test_attribute_lifecycle(self):
(port_name, ofport) = self.create_ovs_port()
tag = 42
self.ovs.set_db_attribute('Port', port_name, 'tag', tag)
self.assertEqual(tag, self.ovs.db_get_val('Port', port_name, 'tag'))
self.assertEqual(tag, self.br.get_port_tag_dict()[port_name])
self.ovs.clear_db_attribute('Port', port_name, 'tag')
self.assertEqual([], self.ovs.db_get_val('Port', port_name, 'tag'))
self.assertEqual([], self.br.get_port_tag_dict()[port_name])
def test_get_bridge_external_bridge_id(self):
self.ovs.set_db_attribute('Bridge', self.br.br_name,
'external_ids',
{'bridge-id': self.br.br_name})
self.assertEqual(
self.br.br_name,
self.ovs.get_bridge_external_bridge_id(self.br.br_name))
def test_controller_lifecycle(self):
controllers = {'tcp:127.0.0.1:6633', 'tcp:172.17.16.10:55'}
self.br.set_controller(controllers)
self.assertSetEqual(controllers, set(self.br.get_controller()))
self.br.del_controller()
self.assertEqual([], self.br.get_controller())
def test_non_index_queries(self):
controllers = ['tcp:127.0.0.1:6633']
self.br.set_controller(controllers)
cmd = self.br.ovsdb.db_set('Controller', self.br.br_name,
('connection_mode', 'out-of-band'))
cmd.execute(check_error=True)
self.assertEqual('out-of-band',
self.br.db_get_val('Controller', self.br.br_name,
'connection_mode'))
def test_set_fail_mode_secure(self):
self.br.set_secure_mode()
self._assert_br_fail_mode(ovs_lib.FAILMODE_SECURE)
def test_set_fail_mode_standalone(self):
self.br.set_standalone_mode()
self._assert_br_fail_mode(ovs_lib.FAILMODE_STANDALONE)
def _assert_br_fail_mode(self, fail_mode):
self.assertEqual(
self.br.db_get_val('Bridge', self.br.br_name, 'fail_mode'),
fail_mode)
def test_set_protocols(self):
self.br.set_protocols('OpenFlow10')
self.assertEqual(
self.br.db_get_val('Bridge', self.br.br_name, 'protocols'),
"OpenFlow10")
def test_get_datapath_id(self):
brdev = ip_lib.IPDevice(self.br.br_name)
dpid = brdev.link.attributes['link/ether'].replace(':', '')
self.br.set_db_attribute('Bridge',
self.br.br_name, 'datapath_id', dpid)
self.assertIn(dpid, self.br.get_datapath_id())
def _test_add_tunnel_port(self, attrs):
port_name = tests_base.get_rand_device_name(net_helpers.PORT_PREFIX)
self.br.add_tunnel_port(port_name, attrs['remote_ip'],
attrs['local_ip'])
self.assertEqual('gre',
self.ovs.db_get_val('Interface', port_name, 'type'))
options = self.ovs.db_get_val('Interface', port_name, 'options')
for attr, val in attrs.items():
self.assertEqual(val, options[attr])
def test_add_tunnel_port_ipv4(self):
attrs = {
'remote_ip': '192.0.2.1',
'local_ip': '198.51.100.1',
}
self._test_add_tunnel_port(attrs)
def test_add_tunnel_port_ipv6(self):
attrs = {
'remote_ip': '2001:db8:200::1',
'local_ip': '2001:db8:100::1',
}
self._test_add_tunnel_port(attrs)
def test_add_patch_port(self):
local = tests_base.get_rand_device_name(net_helpers.PORT_PREFIX)
peer = 'remotepeer'
self.br.add_patch_port(local, peer)
self.assertEqual(self.ovs.db_get_val('Interface', local, 'type'),
'patch')
options = self.ovs.db_get_val('Interface', local, 'options')
self.assertEqual(peer, options['peer'])
def test_get_port_name_list(self):
# with the same name as the bridge
ports = {self.create_ovs_port()[0] for i in range(5)}
self.assertSetEqual(ports, set(self.br.get_port_name_list()))
def test_get_iface_name_list(self):
ifaces = {self.create_ovs_port()[0] for i in range(5)}
self.assertSetEqual(ifaces, set(self.br.get_iface_name_list()))
def test_get_port_stats(self):
# Nothing seems to use this function?
(port_name, ofport) = self.create_ovs_port()
stats = set(self.br.get_port_stats(port_name).keys())
self.assertTrue(set(['rx_packets', 'tx_packets']).issubset(stats))
def test_get_vif_ports(self):
for i in range(2):
self.create_ovs_port()
vif_ports = [self.create_ovs_vif_port() for i in range(3)]
ports = self.br.get_vif_ports()
self.assertEqual(3, len(ports))
self.assertTrue(all([isinstance(x, ovs_lib.VifPort) for x in ports]))
self.assertEqual(sorted([x.port_name for x in vif_ports]),
sorted([x.port_name for x in ports]))
def test_get_vif_ports_with_bond(self):
for i in range(2):
self.create_ovs_port()
vif_ports = [self.create_ovs_vif_port() for i in range(3)]
# bond ports don't have records in the Interface table but they do in
orig = self.br.get_port_name_list
new_port_name_list = lambda: orig() + ['bondport']
mock.patch.object(self.br, 'get_port_name_list',
new=new_port_name_list).start()
ports = self.br.get_vif_ports()
self.assertEqual(3, len(ports))
self.assertTrue(all([isinstance(x, ovs_lib.VifPort) for x in ports]))
self.assertEqual(sorted([x.port_name for x in vif_ports]),
sorted([x.port_name for x in ports]))
def test_get_vif_port_set(self):
for i in range(2):
self.create_ovs_port()
vif_ports = [self.create_ovs_vif_port() for i in range(2)]
ports = self.br.get_vif_port_set()
expected = set([x.vif_id for x in vif_ports])
self.assertEqual(expected, ports)
def test_get_vif_port_set_with_missing_port(self):
self.create_ovs_port()
vif_ports = [self.create_ovs_vif_port()]
orig = self.br.get_port_name_list
new_port_name_list = lambda: orig() + ['anotherport']
mock.patch.object(self.br, 'get_port_name_list',
new=new_port_name_list).start()
ports = self.br.get_vif_port_set()
expected = set([vif_ports[0].vif_id])
self.assertEqual(expected, ports)
def test_get_vif_port_set_on_empty_bridge_returns_empty_set(self):
self.create_ovs_vif_port()
br_2 = self.useFixture(net_helpers.OVSBridgeFixture()).bridge
self.assertEqual(set(), br_2.get_vif_port_set())
def test_get_ports_attributes(self):
port_names = [self.create_ovs_port()[0], self.create_ovs_port()[0]]
db_ports = self.br.get_ports_attributes('Interface', columns=['name'])
db_ports_names = [p['name'] for p in db_ports]
self.assertEqual(sorted(port_names), sorted(db_ports_names))
def test_get_port_tag_dict(self):
# Simple case tested in port test_set_get_clear_db_val
pass
def test_get_vif_port_by_id(self):
for i in range(2):
self.create_ovs_port()
vif_ports = [self.create_ovs_vif_port() for i in range(3)]
for vif in vif_ports:
self.assertEqual(self.br.get_vif_port_by_id(vif.vif_id).vif_id,
vif.vif_id)
def test_get_vifs_by_ids(self):
for i in range(2):
self.create_ovs_port()
vif_ports = [self.create_ovs_vif_port() for i in range(3)]
by_id = self.br.get_vifs_by_ids([v.vif_id for v in vif_ports])
# convert to str for comparison of VifPorts
by_id = {vid: str(vport) for vid, vport in by_id.items()}
self.assertEqual({v.vif_id: str(v) for v in vif_ports}, by_id)
def test_delete_ports(self):
# TODO(twilson) I intensely dislike the current delete_ports function
# as the default behavior is really delete_vif_ports(), then it acts
# more like a delete_ports() seems like it should if all_ports=True is
# passed
# Create 2 non-vif ports and 2 vif ports
nonvifs = {self.create_ovs_port()[0] for i in range(2)}
vifs = {self.create_ovs_vif_port().port_name for i in range(2)}
self.assertSetEqual(nonvifs.union(vifs),
set(self.br.get_port_name_list()))
self.br.delete_ports()
self.assertSetEqual(nonvifs, set(self.br.get_port_name_list()))
self.br.delete_ports(all_ports=True)
self.assertEqual(len(self.br.get_port_name_list()), 0)
def test_set_controller_connection_mode(self):
controllers = ['tcp:192.0.2.0:6633']
self._set_controllers_connection_mode(controllers)
def test_set_multi_controllers_connection_mode(self):
controllers = ['tcp:192.0.2.0:6633', 'tcp:192.0.2.1:55']
self._set_controllers_connection_mode(controllers)
def _set_controllers_connection_mode(self, controllers):
self.br.set_controller(controllers)
self.assertEqual(sorted(controllers), sorted(self.br.get_controller()))
self.br.set_controllers_connection_mode('out-of-band')
self._assert_controllers_connection_mode('out-of-band')
self.br.del_controller()
self.assertEqual([], self.br.get_controller())
def _assert_controllers_connection_mode(self, connection_mode):
controllers = self.br.db_get_val('Bridge', self.br.br_name,
'controller')
controllers = [controllers] if isinstance(
controllers, uuid.UUID) else controllers
for controller in controllers:
self.assertEqual(connection_mode,
self.br.db_get_val('Controller',
controller,
'connection_mode'))
def test_egress_bw_limit(self):
port_name, _ = self.create_ovs_port()
self.br.create_egress_bw_limit_for_port(port_name, 700, 70)
max_rate, burst = self.br.get_egress_bw_limit_for_port(port_name)
self.assertEqual(700, max_rate)
self.assertEqual(70, burst)
self.br.delete_egress_bw_limit_for_port(port_name)
max_rate, burst = self.br.get_egress_bw_limit_for_port(port_name)
self.assertIsNone(max_rate)
self.assertIsNone(burst)
class OVSLibTestCase(base.BaseOVSLinuxTestCase):
def setUp(self):
super(OVSLibTestCase, self).setUp()
self.ovs = ovs_lib.BaseOVS()
def test_bridge_lifecycle_baseovs(self):
name = base.get_rand_name(prefix=net_helpers.BR_PREFIX)
self.addCleanup(self.ovs.delete_bridge, name)
br = self.ovs.add_bridge(name)
self.assertEqual(br.br_name, name)
self.assertTrue(self.ovs.bridge_exists(name))
self.ovs.delete_bridge(name)
self.assertFalse(self.ovs.bridge_exists(name))
def test_get_bridges(self):
bridges = {
self.useFixture(net_helpers.OVSBridgeFixture()).bridge.br_name
for i in range(5)}
self.assertTrue(set(self.ovs.get_bridges()).issuperset(bridges))
def test_bridge_lifecycle_ovsbridge(self):
name = base.get_rand_name(prefix=net_helpers.BR_PREFIX)
br = ovs_lib.OVSBridge(name)
self.assertEqual(br.br_name, name)
# Make sure that instantiating an OVSBridge does not actually create
self.assertFalse(self.ovs.bridge_exists(name))
self.addCleanup(self.ovs.delete_bridge, name)
br.create()
self.assertTrue(self.ovs.bridge_exists(name))
br.destroy()
self.assertFalse(self.ovs.bridge_exists(name))
def test_db_find_column_type_list(self):
bridge_name = base.get_rand_name(prefix=net_helpers.BR_PREFIX)
self.addCleanup(self.ovs.delete_bridge, bridge_name)
br = self.ovs.add_bridge(bridge_name)
port_name = base.get_rand_name(prefix=net_helpers.PORT_PREFIX)
br.add_port(port_name)
self.ovs.set_db_attribute('Port', port_name, 'tag', 42)
tags = self.ovs.ovsdb.db_list('Port', columns=['tag']).execute()
# Make sure that there is data to query.
# It should be, but let's be a little paranoid here as otherwise
tags_present = [t for t in tags if t['tag'] != []]
self.assertTrue(tags_present)
tags_42 = [t for t in tags_present if t['tag'] == 42]
single_value = self.ovs.ovsdb.db_find(
'Port', ('tag', '=', 42), columns=['tag']).execute()
self.assertEqual(tags_42, single_value)
len_0_list = self.ovs.ovsdb.db_find(
'Port', ('tag', '!=', []), columns=['tag']).execute()
self.assertEqual(tags_present, len_0_list)
| true | true |
f73dcf08333c726afb9f64ef4b38e464a7a0c16e | 4,279 | py | Python | ipython_memory_usage.py | jni/ipython_memory_usage | d835451b625ad7047d404674e91866219f7d9454 | [
"BSD-2-Clause"
] | null | null | null | ipython_memory_usage.py | jni/ipython_memory_usage | d835451b625ad7047d404674e91866219f7d9454 | [
"BSD-2-Clause"
] | null | null | null | ipython_memory_usage.py | jni/ipython_memory_usage | d835451b625ad7047d404674e91866219f7d9454 | [
"BSD-2-Clause"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Profile mem usage envelope of IPython commands and report interactively"""
from __future__ import division # 1/2 == 0.5, as in Py3
from __future__ import absolute_import # avoid hiding global modules with locals
from __future__ import print_function # force use of print("hello")
from __future__ import unicode_literals # force unadorned strings "" to be unicode without prepending u""
import os
import time
import memory_profiler
from IPython import get_ipython
# To run: %run -i ipython_memory_usage.py
# keep a global accounting for the last known memory usage
# which is the reference point for the memory delta calculation
previous_call_memory_usage = memory_profiler.memory_usage()[0]
t1 = time.time() # will be set to current time later
keep_watching = True
peak_memory_usage = -1
watching_memory = True
input_cells = get_ipython().user_ns['In']
def start_watching_memory():
"""Register memory profiling tools to IPython instance."""
global watching_memory
watching_memory = True
ip = get_ipython()
ip.events.register("post_run_cell", watch_memory)
ip.events.register("pre_run_cell", pre_run_cell)
def stop_watching_memory():
"""Unregister memory profiling tools from IPython instance."""
global watching_memory
watching_memory = False
ip = get_ipython()
try:
ip.events.unregister("post_run_cell", watch_memory)
except ValueError:
pass
try:
ip.events.unregister("pre_run_cell", pre_run_cell)
except ValueError:
pass
def watch_memory():
# bring in the global memory usage value from the previous iteration
global previous_call_memory_usage, peak_memory_usage, keep_watching, \
watching_memory, input_cells
new_memory_usage = memory_profiler.memory_usage()[0]
memory_delta = new_memory_usage - previous_call_memory_usage
keep_watching = False
peaked_memory_usage = max(0, peak_memory_usage - new_memory_usage)
# calculate time delta using global t1 (from the pre-run event) and current
# time
time_delta_secs = time.time() - t1
num_commands = len(input_cells) - 1
cmd = "In [{}]".format(num_commands)
# convert the results into a pretty string
output_template = ("{cmd} used {memory_delta:0.4f} MiB RAM in "
"{time_delta:0.2f}s, peaked {peaked_memory_usage:0.2f} "
"MiB above current, total RAM usage "
"{memory_usage:0.2f} MiB")
output = output_template.format(time_delta=time_delta_secs,
cmd=cmd,
memory_delta=memory_delta,
peaked_memory_usage=peaked_memory_usage,
memory_usage=new_memory_usage)
if watching_memory:
print(str(output))
previous_call_memory_usage = new_memory_usage
def during_execution_memory_sampler():
import time
import memory_profiler
global keep_watching, peak_memory_usage
peak_memory_usage = -1
keep_watching = True
n = 0
WAIT_BETWEEN_SAMPLES_SECS = 0.001
MAX_ITERATIONS = 60.0 / WAIT_BETWEEN_SAMPLES_SECS
while True:
mem_usage = memory_profiler.memory_usage()[0]
peak_memory_usage = max(mem_usage, peak_memory_usage)
time.sleep(WAIT_BETWEEN_SAMPLES_SECS)
if not keep_watching or n > MAX_ITERATIONS:
# exit if we've been told our command has finished or if it has run
# for more than a sane amount of time (e.g. maybe something crashed
# and we don't want this to carry on running)
if n > MAX_ITERATIONS:
print("{} SOMETHING WEIRD HAPPENED AND THIS RAN FOR TOO LONG, THIS THREAD IS KILLING ITSELF".format(__file__))
break
n += 1
def pre_run_cell():
"""Capture current time before we execute the current command"""
import time
global t1
t1 = time.time()
# start a thread that samples RAM usage until the current command finishes
import threading
ipython_memory_usage_thread = threading.Thread(target=during_execution_memory_sampler)
ipython_memory_usage_thread.daemon = True
ipython_memory_usage_thread.start()
| 38.205357 | 126 | 0.691049 |
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import os
import time
import memory_profiler
from IPython import get_ipython
previous_call_memory_usage = memory_profiler.memory_usage()[0]
t1 = time.time()
keep_watching = True
peak_memory_usage = -1
watching_memory = True
input_cells = get_ipython().user_ns['In']
def start_watching_memory():
global watching_memory
watching_memory = True
ip = get_ipython()
ip.events.register("post_run_cell", watch_memory)
ip.events.register("pre_run_cell", pre_run_cell)
def stop_watching_memory():
global watching_memory
watching_memory = False
ip = get_ipython()
try:
ip.events.unregister("post_run_cell", watch_memory)
except ValueError:
pass
try:
ip.events.unregister("pre_run_cell", pre_run_cell)
except ValueError:
pass
def watch_memory():
global previous_call_memory_usage, peak_memory_usage, keep_watching, \
watching_memory, input_cells
new_memory_usage = memory_profiler.memory_usage()[0]
memory_delta = new_memory_usage - previous_call_memory_usage
keep_watching = False
peaked_memory_usage = max(0, peak_memory_usage - new_memory_usage)
time_delta_secs = time.time() - t1
num_commands = len(input_cells) - 1
cmd = "In [{}]".format(num_commands)
output_template = ("{cmd} used {memory_delta:0.4f} MiB RAM in "
"{time_delta:0.2f}s, peaked {peaked_memory_usage:0.2f} "
"MiB above current, total RAM usage "
"{memory_usage:0.2f} MiB")
output = output_template.format(time_delta=time_delta_secs,
cmd=cmd,
memory_delta=memory_delta,
peaked_memory_usage=peaked_memory_usage,
memory_usage=new_memory_usage)
if watching_memory:
print(str(output))
previous_call_memory_usage = new_memory_usage
def during_execution_memory_sampler():
import time
import memory_profiler
global keep_watching, peak_memory_usage
peak_memory_usage = -1
keep_watching = True
n = 0
WAIT_BETWEEN_SAMPLES_SECS = 0.001
MAX_ITERATIONS = 60.0 / WAIT_BETWEEN_SAMPLES_SECS
while True:
mem_usage = memory_profiler.memory_usage()[0]
peak_memory_usage = max(mem_usage, peak_memory_usage)
time.sleep(WAIT_BETWEEN_SAMPLES_SECS)
if not keep_watching or n > MAX_ITERATIONS:
# for more than a sane amount of time (e.g. maybe something crashed
# and we don't want this to carry on running)
if n > MAX_ITERATIONS:
print("{} SOMETHING WEIRD HAPPENED AND THIS RAN FOR TOO LONG, THIS THREAD IS KILLING ITSELF".format(__file__))
break
n += 1
def pre_run_cell():
import time
global t1
t1 = time.time()
import threading
ipython_memory_usage_thread = threading.Thread(target=during_execution_memory_sampler)
ipython_memory_usage_thread.daemon = True
ipython_memory_usage_thread.start()
| true | true |
f73dd16d9cd7a3ef584d923eb3ba51f32c287d7b | 920 | py | Python | tests/rules/test_git_remote_seturl_add.py | HiteshMah-Jan/thefuck | 132c62262246824470934c2c6f46919ef6f00203 | [
"MIT"
] | 75,504 | 2015-04-08T18:22:19.000Z | 2022-03-31T23:59:52.000Z | tests/rules/test_git_remote_seturl_add.py | HiteshMah-Jan/thefuck | 132c62262246824470934c2c6f46919ef6f00203 | [
"MIT"
] | 1,160 | 2015-04-17T18:47:12.000Z | 2022-03-30T20:42:26.000Z | tests/rules/test_git_remote_seturl_add.py | HiteshMah-Jan/thefuck | 132c62262246824470934c2c6f46919ef6f00203 | [
"MIT"
] | 4,399 | 2015-04-17T18:36:04.000Z | 2022-03-31T07:01:03.000Z | import pytest
from thefuck.rules.git_remote_seturl_add import match, get_new_command
from thefuck.types import Command
@pytest.mark.parametrize('command', [
Command('git remote set-url origin url', "fatal: No such remote")])
def test_match(command):
assert match(command)
@pytest.mark.parametrize('command', [
Command('git remote set-url origin url', ""),
Command('git remote add origin url', ''),
Command('git remote remove origin', ''),
Command('git remote prune origin', ''),
Command('git remote set-branches origin branch', '')])
def test_not_match(command):
assert not match(command)
@pytest.mark.parametrize('command, new_command', [
(Command('git remote set-url origin git@github.com:nvbn/thefuck.git', ''),
'git remote add origin git@github.com:nvbn/thefuck.git')])
def test_get_new_command(command, new_command):
assert get_new_command(command) == new_command
| 34.074074 | 78 | 0.718478 | import pytest
from thefuck.rules.git_remote_seturl_add import match, get_new_command
from thefuck.types import Command
@pytest.mark.parametrize('command', [
Command('git remote set-url origin url', "fatal: No such remote")])
def test_match(command):
assert match(command)
@pytest.mark.parametrize('command', [
Command('git remote set-url origin url', ""),
Command('git remote add origin url', ''),
Command('git remote remove origin', ''),
Command('git remote prune origin', ''),
Command('git remote set-branches origin branch', '')])
def test_not_match(command):
assert not match(command)
@pytest.mark.parametrize('command, new_command', [
(Command('git remote set-url origin git@github.com:nvbn/thefuck.git', ''),
'git remote add origin git@github.com:nvbn/thefuck.git')])
def test_get_new_command(command, new_command):
assert get_new_command(command) == new_command
| true | true |
f73dd18186567c90c3c5db7807ea0ea6a87deded | 940 | py | Python | project2/contrib/sites/migrations/0002_set_site_domain_and_name.py | joannex/django_2- | b0e1fefc6a12d61def2bf31e01728ad9c718c225 | [
"BSD-3-Clause"
] | null | null | null | project2/contrib/sites/migrations/0002_set_site_domain_and_name.py | joannex/django_2- | b0e1fefc6a12d61def2bf31e01728ad9c718c225 | [
"BSD-3-Clause"
] | null | null | null | project2/contrib/sites/migrations/0002_set_site_domain_and_name.py | joannex/django_2- | b0e1fefc6a12d61def2bf31e01728ad9c718c225 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations
def update_site_forward(apps, schema_editor):
"""Set site domain and name."""
Site = apps.get_model("sites", "Site")
Site.objects.update_or_create(
id=settings.SITE_ID,
defaults={
"domain": "example.com",
"name": "project2"
}
)
def update_site_backward(apps, schema_editor):
"""Revert site domain and name to default."""
Site = apps.get_model("sites", "Site")
Site.objects.update_or_create(
id=settings.SITE_ID,
defaults={
"domain": "example.com",
"name": "example.com"
}
)
class Migration(migrations.Migration):
dependencies = [
('sites', '0001_initial'),
]
operations = [
migrations.RunPython(update_site_forward, update_site_backward),
]
| 22.926829 | 72 | 0.61383 |
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations
def update_site_forward(apps, schema_editor):
Site = apps.get_model("sites", "Site")
Site.objects.update_or_create(
id=settings.SITE_ID,
defaults={
"domain": "example.com",
"name": "project2"
}
)
def update_site_backward(apps, schema_editor):
Site = apps.get_model("sites", "Site")
Site.objects.update_or_create(
id=settings.SITE_ID,
defaults={
"domain": "example.com",
"name": "example.com"
}
)
class Migration(migrations.Migration):
dependencies = [
('sites', '0001_initial'),
]
operations = [
migrations.RunPython(update_site_forward, update_site_backward),
]
| true | true |
f73dd1c7743d1ca808f9019ffdb5cabd81e791f2 | 2,697 | py | Python | apiritif/thread.py | undera/apiritif | 56f42a24604a06cbd390949223f2a840469b34d8 | [
"Apache-2.0"
] | null | null | null | apiritif/thread.py | undera/apiritif | 56f42a24604a06cbd390949223f2a840469b34d8 | [
"Apache-2.0"
] | null | null | null | apiritif/thread.py | undera/apiritif | 56f42a24604a06cbd390949223f2a840469b34d8 | [
"Apache-2.0"
] | null | null | null | """
Copyright 2019 BlazeMeter Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from threading import local
from apiritif.action_plugins import BaseActionHandler
_total = 1
_thread_local = local()
def set_total(total):
global _total
_total = total
def get_total():
global _total
return _total
def set_index(index):
_thread_local.index = index
def get_index():
index = getattr(_thread_local, "index", 0)
return index
def set_iteration(iteration):
_thread_local.iteration = iteration
def get_iteration():
iteration = getattr(_thread_local, "iteration", 0)
return iteration
def put_into_thread_store(*args, **kwargs):
if args:
_thread_local.args = args
if kwargs:
current_kwargs = getattr(_thread_local, "kwargs", {})
current_kwargs.update(kwargs)
_thread_local.kwargs = current_kwargs
def get_from_thread_store(names=None):
if names and hasattr(_thread_local, "kwargs"):
only_one = False
if isinstance(names, str):
names = [names]
only_one = True
kwargs = [_thread_local.kwargs.get(key) for key in names]
if only_one:
return kwargs[0]
else:
return kwargs
elif hasattr(_thread_local, "args"):
return _thread_local.args
def get_transaction_handlers():
transaction_handlers = get_from_thread_store('transaction_handlers')
return transaction_handlers
def clean_transaction_handlers():
handlers = {'enter': [], 'exit': []}
_thread_local.kwargs["transaction_handlers"] = handlers
def set_transaction_handlers(handlers):
put_into_thread_store(transaction_handlers=handlers)
def external_handler(session_id, action_type, action):
for handler in get_action_handlers():
if isinstance(handler, BaseActionHandler):
handler.handle(session_id, action_type, action)
def get_action_handlers():
return get_from_thread_store("action_handlers")
# Deprecated
def external_log(log_line):
pass
# Deprecated
def set_logging_handlers(handlers):
pass
# Deprecated
def get_logging_handlers():
pass
# Deprecated
def add_logging_handlers(methods=None):
pass
| 22.475 | 72 | 0.718205 | from threading import local
from apiritif.action_plugins import BaseActionHandler
_total = 1
_thread_local = local()
def set_total(total):
global _total
_total = total
def get_total():
global _total
return _total
def set_index(index):
_thread_local.index = index
def get_index():
index = getattr(_thread_local, "index", 0)
return index
def set_iteration(iteration):
_thread_local.iteration = iteration
def get_iteration():
iteration = getattr(_thread_local, "iteration", 0)
return iteration
def put_into_thread_store(*args, **kwargs):
if args:
_thread_local.args = args
if kwargs:
current_kwargs = getattr(_thread_local, "kwargs", {})
current_kwargs.update(kwargs)
_thread_local.kwargs = current_kwargs
def get_from_thread_store(names=None):
if names and hasattr(_thread_local, "kwargs"):
only_one = False
if isinstance(names, str):
names = [names]
only_one = True
kwargs = [_thread_local.kwargs.get(key) for key in names]
if only_one:
return kwargs[0]
else:
return kwargs
elif hasattr(_thread_local, "args"):
return _thread_local.args
def get_transaction_handlers():
transaction_handlers = get_from_thread_store('transaction_handlers')
return transaction_handlers
def clean_transaction_handlers():
handlers = {'enter': [], 'exit': []}
_thread_local.kwargs["transaction_handlers"] = handlers
def set_transaction_handlers(handlers):
put_into_thread_store(transaction_handlers=handlers)
def external_handler(session_id, action_type, action):
for handler in get_action_handlers():
if isinstance(handler, BaseActionHandler):
handler.handle(session_id, action_type, action)
def get_action_handlers():
return get_from_thread_store("action_handlers")
def external_log(log_line):
pass
def set_logging_handlers(handlers):
pass
def get_logging_handlers():
pass
def add_logging_handlers(methods=None):
pass
| true | true |
f73dd3829ab3edcd30615a38fedbb215bebbe293 | 5,624 | py | Python | pypureclient/flasharray/FA_2_7/models/protection_group_snapshot_patch.py | Flav-STOR-WL/py-pure-client | 03b889c997d90380ac5d6380ca5d5432792d3e89 | [
"BSD-2-Clause"
] | 14 | 2018-12-07T18:30:27.000Z | 2022-02-22T09:12:33.000Z | pypureclient/flasharray/FA_2_7/models/protection_group_snapshot_patch.py | Flav-STOR-WL/py-pure-client | 03b889c997d90380ac5d6380ca5d5432792d3e89 | [
"BSD-2-Clause"
] | 28 | 2019-09-17T21:03:52.000Z | 2022-03-29T22:07:35.000Z | pypureclient/flasharray/FA_2_7/models/protection_group_snapshot_patch.py | Flav-STOR-WL/py-pure-client | 03b889c997d90380ac5d6380ca5d5432792d3e89 | [
"BSD-2-Clause"
] | 15 | 2020-06-11T15:50:08.000Z | 2022-03-21T09:27:25.000Z | # coding: utf-8
"""
FlashArray REST API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: 2.7
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re
import six
import typing
from ....properties import Property
if typing.TYPE_CHECKING:
from pypureclient.flasharray.FA_2_7 import models
class ProtectionGroupSnapshotPatch(object):
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'name': 'str',
'created': 'int',
'destroyed': 'bool',
'pod': 'FixedReference',
'source': 'FixedReference',
'space': 'Space',
'suffix': 'str',
'time_remaining': 'int'
}
attribute_map = {
'name': 'name',
'created': 'created',
'destroyed': 'destroyed',
'pod': 'pod',
'source': 'source',
'space': 'space',
'suffix': 'suffix',
'time_remaining': 'time_remaining'
}
required_args = {
}
def __init__(
self,
name=None, # type: str
created=None, # type: int
destroyed=None, # type: bool
pod=None, # type: models.FixedReference
source=None, # type: models.FixedReference
space=None, # type: models.Space
suffix=None, # type: str
time_remaining=None, # type: int
):
"""
Keyword args:
name (str): A user-specified name. The name must be locally unique and can be changed.
created (int): The snapshot creation time of the original snapshot source. Measured in milliseconds since the UNIX epoch.
destroyed (bool): Returns a value of `true` if the protection group snapshot has been destroyed and is pending eradication. The `time_remaining` value displays the amount of time left until the destroyed snapshot is permanently eradicated. Before the `time_remaining` period has elapsed, the destroyed snapshot can be recovered by setting `destroyed=false`. Once the `time_remaining` period has elapsed, the snapshot is permanently eradicated and can no longer be recovered.
pod (FixedReference): The pod in which the protection group of the protection group snapshot resides.
source (FixedReference): The original protection group from which this snapshot was taken.
space (Space): Returns provisioned (virtual) size and physical storage consumption data for each protection group.
suffix (str)
time_remaining (int): The amount of time left until the destroyed snapshot is permanently eradicated. Measured in milliseconds. Before the `time_remaining` period has elapsed, the destroyed snapshot can be recovered by setting `destroyed=false`.
"""
if name is not None:
self.name = name
if created is not None:
self.created = created
if destroyed is not None:
self.destroyed = destroyed
if pod is not None:
self.pod = pod
if source is not None:
self.source = source
if space is not None:
self.space = space
if suffix is not None:
self.suffix = suffix
if time_remaining is not None:
self.time_remaining = time_remaining
def __setattr__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `ProtectionGroupSnapshotPatch`".format(key))
self.__dict__[key] = value
def __getattribute__(self, item):
value = object.__getattribute__(self, item)
if isinstance(value, Property):
raise AttributeError
else:
return value
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
if hasattr(self, attr):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(ProtectionGroupSnapshotPatch, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ProtectionGroupSnapshotPatch):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 36.519481 | 486 | 0.59175 |
import pprint
import re
import six
import typing
from ....properties import Property
if typing.TYPE_CHECKING:
from pypureclient.flasharray.FA_2_7 import models
class ProtectionGroupSnapshotPatch(object):
swagger_types = {
'name': 'str',
'created': 'int',
'destroyed': 'bool',
'pod': 'FixedReference',
'source': 'FixedReference',
'space': 'Space',
'suffix': 'str',
'time_remaining': 'int'
}
attribute_map = {
'name': 'name',
'created': 'created',
'destroyed': 'destroyed',
'pod': 'pod',
'source': 'source',
'space': 'space',
'suffix': 'suffix',
'time_remaining': 'time_remaining'
}
required_args = {
}
def __init__(
self,
name=None,
created=None,
destroyed=None,
pod=None,
source=None,
space=None,
suffix=None,
time_remaining=None,
):
if name is not None:
self.name = name
if created is not None:
self.created = created
if destroyed is not None:
self.destroyed = destroyed
if pod is not None:
self.pod = pod
if source is not None:
self.source = source
if space is not None:
self.space = space
if suffix is not None:
self.suffix = suffix
if time_remaining is not None:
self.time_remaining = time_remaining
def __setattr__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `ProtectionGroupSnapshotPatch`".format(key))
self.__dict__[key] = value
def __getattribute__(self, item):
value = object.__getattribute__(self, item)
if isinstance(value, Property):
raise AttributeError
else:
return value
def to_dict(self):
result = {}
for attr, _ in six.iteritems(self.swagger_types):
if hasattr(self, attr):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(ProtectionGroupSnapshotPatch, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
return pprint.pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, ProtectionGroupSnapshotPatch):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| true | true |
f73dd4052f08df8c7bd21f0ed0cd561e40a66502 | 6,205 | py | Python | mybooks/books/books_views.py | yanghao2008/imagetext | 82cbdb21829ccebe0039ad48eaca858d37381a6d | [
"MIT"
] | 1 | 2021-12-30T13:03:52.000Z | 2021-12-30T13:03:52.000Z | mybooks/books/books_views.py | yanghao2008/imagetext | 82cbdb21829ccebe0039ad48eaca858d37381a6d | [
"MIT"
] | null | null | null | mybooks/books/books_views.py | yanghao2008/imagetext | 82cbdb21829ccebe0039ad48eaca858d37381a6d | [
"MIT"
] | null | null | null | from django.shortcuts import render
from books import models # 导入models文件
from django.contrib.auth.decorators import login_required,permission_required
from mybooks import settings
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
import re
import os
from PIL import Image
import re
import base64
from io import BytesIO
def highlight(matched):
value = matched.group('value')
return '<strong style="background:red">'+value+'</strong>'
def abstract(text):
front=text.partition('<strong style="background:red">')
rear=front[2].rpartition('</strong>')
return front[0][-50:]+'<strong style="background:red">'+rear[0]+'</strong>'+rear[2][:50]
def getleginfo(BKname,page):
return '<font color="purple">'+BKname.author+':'+'《'+BKname.bookname+'》,'+BKname.pubaddress+':'+BKname.publisher+','+BKname.year+'年,'+page+'</font>'
def search(request):
if request.method == 'POST':
query_str = request.POST.get('query_str').strip()
query_str=re.sub(' +', ' ', query_str)
bookname = request.POST.get('bookname')
if query_str != '':
if ' ' in query_str:
queryregex = query_str.replace(' ', '.*')
imagetext = models.BooksImageText.objects.filter(text__regex=queryregex, book_id__bookname__contains=bookname).order_by('id')
else:
imagetext = models.BooksImageText.objects.filter(text__regex=query_str, book_id__bookname__contains=bookname).order_by('id')
#分页
paginator = Paginator(imagetext, 20) # 每页条数
page = request.POST.get('page')
try:
pagesImagetext = paginator.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
pagesImagetext = paginator.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of results.
pagesImagetext = paginator.page(paginator.num_pages)
#高亮、取摘要
if ' ' in query_str:
for it in pagesImagetext:
sub_query_strs=query_str.split(' ')
for sqs in sub_query_strs:
it.text=re.sub('(?P<value>'+sqs+')', highlight, it.text)
BKname=models.Info.objects.get(book_id=it.book_id)
copyright=getleginfo(BKname,it.page)
it.text=abstract(it.text)+'<br>'+copyright
else:
for it in pagesImagetext:
BKname=models.Info.objects.get(book_id=it.book_id)
copyright=getleginfo(BKname,it.page)
it.text=abstract(re.sub('(?P<value>'+query_str+')', highlight, it.text))+'<br>'+copyright
return render(request, 'index.html', {'returninfo':'共在'+str(len(imagetext))+'个页面上找到检索信息。','imagetext': pagesImagetext,'query_str': query_str,'bookname': bookname})
else:
return render(request, 'index.html')
else:
return render(request, 'index.html')
def view(request):
if request.method == 'POST':
key_Id = request.POST.get('key_Id')
imagetext = models.BooksImageText.objects.get(id=key_Id)
with open(imagetext.txt,'r',encoding='UTF-8') as txtfile:
textcontent=txtfile.read()
img = Image.open(imagetext.image)
output_buffer = BytesIO()
img.save(output_buffer, format='png')
byte_data = output_buffer.getvalue()
base64_data = base64.b64encode(byte_data)
BKname=models.Info.objects.get(book_id=imagetext.book_id)
copyright=getleginfo(BKname,imagetext.page)
return render(request, 'books_view.html', {'copyright':copyright,'base64_data':str(base64_data,'utf-8'),'imagetext': imagetext,'textcontent':textcontent,'before':int(key_Id)-1,'next':int(key_Id)+1})
def removeBom(file):
'''移除UTF-8文件的BOM字节'''
BOM = b'\xef\xbb\xbf'
existBom = lambda s: True if s == BOM else False
f = open(file, 'rb')
if existBom(f.read(3)):
fbody = f.read()
# f.close()
with open(file, 'wb') as f:
f.write(fbody)
def edit(request):
if request.method == 'POST':
textarea = request.POST.get('textarea')
key_Id = request.POST.get('key_Id')
if textarea != None:
formerImageText=models.BooksImageText.objects.get(id=str(int(key_Id)-1))
formertext=''
#取上一条记录的后30字
text=re.sub('<.+?>','',formerImageText.text)
text=text.strip().replace('\t','').replace('\r','').replace('\n','').replace(' ','').replace(' ','')
if len(text) > 30:
formertext=text[-30:]
else:
formertext=text
#两条记录去除html标记后存入text供检索
current_text=re.sub('<.+?>','',textarea)
current_text=current_text.strip().replace('\t','').replace('\r','').replace('\n','').replace(' ','').replace(' ','')
models.BooksImageText.objects.filter(id=key_Id).update(text=formertext+current_text)
#用新获取的textarea数据更新底层txt文件,要删去多余的换行
imagetext = models.BooksImageText.objects.get(id=key_Id)
removeBom(imagetext.txt)
with open(imagetext.txt,'w',encoding='UTF-8') as txtfile:
txtfile.write('%s' % (textarea.replace('\n','')))
#textarea不存在,不是保存,而是获取textarea
imagetext = models.BooksImageText.objects.get(id=key_Id)
if os.path.exists(imagetext.txt):
removeBom(imagetext.txt)
with open(imagetext.txt,'r',encoding='UTF-8') as txtfile:
textcontent=txtfile.read()
img = Image.open(imagetext.image)
output_buffer = BytesIO()
img.save(output_buffer, format='png')
byte_data = output_buffer.getvalue()
base64_data = base64.b64encode(byte_data)
BKname=models.Info.objects.get(book_id=imagetext.book_id)
copyright=getleginfo(BKname,imagetext.page)
return render(request, 'books_edit.html', {'copyright':copyright,'base64_data':str(base64_data,'utf-8'),'imagetext': imagetext,'textcontent':textcontent,'before':int(key_Id)-1,'next':int(key_Id)+1})
| 46.30597 | 202 | 0.613699 | from django.shortcuts import render
from books import models
from django.contrib.auth.decorators import login_required,permission_required
from mybooks import settings
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
import re
import os
from PIL import Image
import re
import base64
from io import BytesIO
def highlight(matched):
value = matched.group('value')
return '<strong style="background:red">'+value+'</strong>'
def abstract(text):
front=text.partition('<strong style="background:red">')
rear=front[2].rpartition('</strong>')
return front[0][-50:]+'<strong style="background:red">'+rear[0]+'</strong>'+rear[2][:50]
def getleginfo(BKname,page):
return '<font color="purple">'+BKname.author+':'+'《'+BKname.bookname+'》,'+BKname.pubaddress+':'+BKname.publisher+','+BKname.year+'年,'+page+'</font>'
def search(request):
if request.method == 'POST':
query_str = request.POST.get('query_str').strip()
query_str=re.sub(' +', ' ', query_str)
bookname = request.POST.get('bookname')
if query_str != '':
if ' ' in query_str:
queryregex = query_str.replace(' ', '.*')
imagetext = models.BooksImageText.objects.filter(text__regex=queryregex, book_id__bookname__contains=bookname).order_by('id')
else:
imagetext = models.BooksImageText.objects.filter(text__regex=query_str, book_id__bookname__contains=bookname).order_by('id')
paginator = Paginator(imagetext, 20)
page = request.POST.get('page')
try:
pagesImagetext = paginator.page(page)
except PageNotAnInteger:
pagesImagetext = paginator.page(1)
except EmptyPage:
pagesImagetext = paginator.page(paginator.num_pages)
if ' ' in query_str:
for it in pagesImagetext:
sub_query_strs=query_str.split(' ')
for sqs in sub_query_strs:
it.text=re.sub('(?P<value>'+sqs+')', highlight, it.text)
BKname=models.Info.objects.get(book_id=it.book_id)
copyright=getleginfo(BKname,it.page)
it.text=abstract(it.text)+'<br>'+copyright
else:
for it in pagesImagetext:
BKname=models.Info.objects.get(book_id=it.book_id)
copyright=getleginfo(BKname,it.page)
it.text=abstract(re.sub('(?P<value>'+query_str+')', highlight, it.text))+'<br>'+copyright
return render(request, 'index.html', {'returninfo':'共在'+str(len(imagetext))+'个页面上找到检索信息。','imagetext': pagesImagetext,'query_str': query_str,'bookname': bookname})
else:
return render(request, 'index.html')
else:
return render(request, 'index.html')
def view(request):
if request.method == 'POST':
key_Id = request.POST.get('key_Id')
imagetext = models.BooksImageText.objects.get(id=key_Id)
with open(imagetext.txt,'r',encoding='UTF-8') as txtfile:
textcontent=txtfile.read()
img = Image.open(imagetext.image)
output_buffer = BytesIO()
img.save(output_buffer, format='png')
byte_data = output_buffer.getvalue()
base64_data = base64.b64encode(byte_data)
BKname=models.Info.objects.get(book_id=imagetext.book_id)
copyright=getleginfo(BKname,imagetext.page)
return render(request, 'books_view.html', {'copyright':copyright,'base64_data':str(base64_data,'utf-8'),'imagetext': imagetext,'textcontent':textcontent,'before':int(key_Id)-1,'next':int(key_Id)+1})
def removeBom(file):
BOM = b'\xef\xbb\xbf'
existBom = lambda s: True if s == BOM else False
f = open(file, 'rb')
if existBom(f.read(3)):
fbody = f.read()
with open(file, 'wb') as f:
f.write(fbody)
def edit(request):
if request.method == 'POST':
textarea = request.POST.get('textarea')
key_Id = request.POST.get('key_Id')
if textarea != None:
formerImageText=models.BooksImageText.objects.get(id=str(int(key_Id)-1))
formertext=''
text=re.sub('<.+?>','',formerImageText.text)
text=text.strip().replace('\t','').replace('\r','').replace('\n','').replace(' ','').replace(' ','')
if len(text) > 30:
formertext=text[-30:]
else:
formertext=text
current_text=re.sub('<.+?>','',textarea)
current_text=current_text.strip().replace('\t','').replace('\r','').replace('\n','').replace(' ','').replace(' ','')
models.BooksImageText.objects.filter(id=key_Id).update(text=formertext+current_text)
imagetext = models.BooksImageText.objects.get(id=key_Id)
removeBom(imagetext.txt)
with open(imagetext.txt,'w',encoding='UTF-8') as txtfile:
txtfile.write('%s' % (textarea.replace('\n','')))
imagetext = models.BooksImageText.objects.get(id=key_Id)
if os.path.exists(imagetext.txt):
removeBom(imagetext.txt)
with open(imagetext.txt,'r',encoding='UTF-8') as txtfile:
textcontent=txtfile.read()
img = Image.open(imagetext.image)
output_buffer = BytesIO()
img.save(output_buffer, format='png')
byte_data = output_buffer.getvalue()
base64_data = base64.b64encode(byte_data)
BKname=models.Info.objects.get(book_id=imagetext.book_id)
copyright=getleginfo(BKname,imagetext.page)
return render(request, 'books_edit.html', {'copyright':copyright,'base64_data':str(base64_data,'utf-8'),'imagetext': imagetext,'textcontent':textcontent,'before':int(key_Id)-1,'next':int(key_Id)+1})
| true | true |
f73dd541ce5629d84704a46de8edb6c6f2b9dab8 | 6,484 | py | Python | Development_System/detection_opencv.py | grebtsew/Object-and-Facial-detection-in-python | 57c4bf8d934cc8d6dbaa0cfc56b2b343795ceef1 | [
"MIT"
] | 15 | 2018-08-21T12:17:16.000Z | 2022-02-13T13:09:49.000Z | Development_System/detection_opencv.py | grebtsew/Object-and-Facial-detection-in-python | 57c4bf8d934cc8d6dbaa0cfc56b2b343795ceef1 | [
"MIT"
] | 4 | 2020-01-28T17:07:01.000Z | 2021-05-23T14:27:32.000Z | Development_System/detection_opencv.py | grebtsew/Object-and-Facial-detection-in-python | 57c4bf8d934cc8d6dbaa0cfc56b2b343795ceef1 | [
"MIT"
] | 5 | 2018-11-16T19:19:00.000Z | 2022-02-13T13:09:51.000Z | # Detection thread
# Models are from http://alereimondo.no-ip.org/OpenCV/34/
# Check it out, there are plenty of them!
# Useful and fast
'''
If you wanna train your own models check this out!
https://docs.opencv.org/3.4/dc/d88/tutorial_traincascade.html
'''
# Code from https://docs.opencv.org/3.4/d7/d8b/tutorial_py_face_detection.html
# imports
import utils.logging_data as LOG
import cv2
import imutils
import os
import sys
import threading
import numpy as np
import re
import time
import datetime
#Detection
# Class that handle detection in own thread
class Detection(threading.Thread):
face_cascade = []
facial_features_cascade = []
# Flipp testing camera
flipp_test_nr = 1
flipp_test_degree = 90
do_flipp_test = False
flipp_test_long_intervall = 12
# Calculate time
start_time = None
end_time = None
# Thread sleep times
sleep_time = 0.1
LONG_SLEEP = 2
SHORT_SLEEP = 0.5
# Number of detection fails to start energy save
no_face_count = 0
NO_FACE_MAX = 10
Loaded_model = False
# Initiate thread
# parameters name, and shared_variables reference
def __init__(self, name=None, shared_variables = None):
threading.Thread.__init__(self)
self.name = name
self.shared_variables = shared_variables
self.sleep_time = self.SHORT_SLEEP
self.index = int(name)
LOG.info("Create dlib detection" + str(self.index), "SYSTEM-"+self.shared_variables.name)
#Run
#Detection function
def run(self):
# Load model
LOG.info("Loading OPENCV model" + str(self.index),"SYSTEM-"+self.shared_variables.name)
face_cascade = cv2.CascadeClassifier('utils/haarcascade_frontalface_default.xml')
facial_features_cascade = cv2.CascadeClassifier('utils/haarcascade_facial_features.xml')
LOG.info("Start opencv detections " + str(self.index),"SYSTEM-"+self.shared_variables.name)
# Start Loop
while self.shared_variables.system_running:
self.start_time = datetime.datetime.now()
frame = self.shared_variables.frame[self.index]
if self.do_flipp_test:
frame = imutils.rotate(frame, self.flipp_test_degree*self.flipp_test_nr)
# Do detection
if frame is not None :
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
landmarksAndFaces = []
face_patches = face_cascade.detectMultiScale(gray, 1.3, 5)
# if found faces
if len(face_patches) > 0:
landmarksAndFaces.append(face_patches[0].tolist())
for (x,y,w,h) in face_patches:
roi_gray = gray[y:y+h, x:x+w]
# To dont use landmarks, instead use boxes
for (ex,ey,ew,eh) in facial_features_cascade.detectMultiScale(roi_gray):
landmarksAndFaces.append( [x + ex, y + ey, ew, eh] )
self.no_face_count = 0
self.shared_variables.face_found[self.index] = True
# Save boxes
self.shared_variables.face_box[self.index] = landmarksAndFaces
#self.shared_variables.detection_box[self.index] = face_box
self.shared_variables.set_detection_box(landmarksAndFaces, self.index)
# Do flipp test on detection
if self.shared_variables.flipp_test[self.index] and self.do_flipp_test:
# save flipp as success
degree = self.shared_variables.flipp_test_degree[self.index] + self.flipp_test_nr*self.flipp_test_degree
degree = degree - (degree % 360)*360
self.shared_variables.flipp_test_degree[self.index] = degree
# log frame change
LOG.log("Flipp test successful add degree :" + str(self.flipp_test_nr*self.flipp_test_degree),self.shared_variables.name)
# end flipp test
self.do_flipp_test = False
self.flipp_test_nr = 1
else:
# No face
self.shared_variables.face_found[self.index] = False
# if max face misses has been done, stop tracking and do less detections
if self.no_face_count >= self.NO_FACE_MAX :
# do flipp test
if self.shared_variables.flipp_test:
# doing flipp test
if self.do_flipp_test:
self.flipp_test_nr = self.flipp_test_nr + 1
# flipp test did not find anything
if self.flipp_test_nr*self.flipp_test_degree >= 360:
self.do_flipp_test = False
self.flipp_test_nr = 1
self.sleep_time = self.LONG_SLEEP
else:
self.do_flipp_test = True
else:
#self.sleep_time = self.LONG_SLEEP
#self.shared_variables.tracking_running = False
#LOG.log("Initiate energy save",self.shared_variables.name)
pass
else:
self.no_face_count = self.no_face_count + 1
if self.no_face_count >= self.flipp_test_long_intervall and self.shared_variables.flipp_test[self.index]:
self.no_face_count = 0
self.end_time = datetime.datetime.now()
# Debug detection time
if self.shared_variables.debug:
LOG.debug('OPENCV Detection time:' + str(self.end_time - self.start_time),self.shared_variables.name)
time.sleep(self.sleep_time) # sleep if wanted
LOG.info("Ending OPENCV detection " + str(self.index), "SYSTEM-"+self.shared_variables.name )
| 35.823204 | 153 | 0.550123 |
import utils.logging_data as LOG
import cv2
import imutils
import os
import sys
import threading
import numpy as np
import re
import time
import datetime
class Detection(threading.Thread):
face_cascade = []
facial_features_cascade = []
flipp_test_nr = 1
flipp_test_degree = 90
do_flipp_test = False
flipp_test_long_intervall = 12
start_time = None
end_time = None
sleep_time = 0.1
LONG_SLEEP = 2
SHORT_SLEEP = 0.5
no_face_count = 0
NO_FACE_MAX = 10
Loaded_model = False
def __init__(self, name=None, shared_variables = None):
threading.Thread.__init__(self)
self.name = name
self.shared_variables = shared_variables
self.sleep_time = self.SHORT_SLEEP
self.index = int(name)
LOG.info("Create dlib detection" + str(self.index), "SYSTEM-"+self.shared_variables.name)
def run(self):
LOG.info("Loading OPENCV model" + str(self.index),"SYSTEM-"+self.shared_variables.name)
face_cascade = cv2.CascadeClassifier('utils/haarcascade_frontalface_default.xml')
facial_features_cascade = cv2.CascadeClassifier('utils/haarcascade_facial_features.xml')
LOG.info("Start opencv detections " + str(self.index),"SYSTEM-"+self.shared_variables.name)
while self.shared_variables.system_running:
self.start_time = datetime.datetime.now()
frame = self.shared_variables.frame[self.index]
if self.do_flipp_test:
frame = imutils.rotate(frame, self.flipp_test_degree*self.flipp_test_nr)
if frame is not None :
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
landmarksAndFaces = []
face_patches = face_cascade.detectMultiScale(gray, 1.3, 5)
if len(face_patches) > 0:
landmarksAndFaces.append(face_patches[0].tolist())
for (x,y,w,h) in face_patches:
roi_gray = gray[y:y+h, x:x+w]
for (ex,ey,ew,eh) in facial_features_cascade.detectMultiScale(roi_gray):
landmarksAndFaces.append( [x + ex, y + ey, ew, eh] )
self.no_face_count = 0
self.shared_variables.face_found[self.index] = True
self.shared_variables.face_box[self.index] = landmarksAndFaces
self.shared_variables.set_detection_box(landmarksAndFaces, self.index)
if self.shared_variables.flipp_test[self.index] and self.do_flipp_test:
degree = self.shared_variables.flipp_test_degree[self.index] + self.flipp_test_nr*self.flipp_test_degree
degree = degree - (degree % 360)*360
self.shared_variables.flipp_test_degree[self.index] = degree
LOG.log("Flipp test successful add degree :" + str(self.flipp_test_nr*self.flipp_test_degree),self.shared_variables.name)
self.do_flipp_test = False
self.flipp_test_nr = 1
else:
self.shared_variables.face_found[self.index] = False
if self.no_face_count >= self.NO_FACE_MAX :
if self.shared_variables.flipp_test:
if self.do_flipp_test:
self.flipp_test_nr = self.flipp_test_nr + 1
if self.flipp_test_nr*self.flipp_test_degree >= 360:
self.do_flipp_test = False
self.flipp_test_nr = 1
self.sleep_time = self.LONG_SLEEP
else:
self.do_flipp_test = True
else:
pass
else:
self.no_face_count = self.no_face_count + 1
if self.no_face_count >= self.flipp_test_long_intervall and self.shared_variables.flipp_test[self.index]:
self.no_face_count = 0
self.end_time = datetime.datetime.now()
if self.shared_variables.debug:
LOG.debug('OPENCV Detection time:' + str(self.end_time - self.start_time),self.shared_variables.name)
time.sleep(self.sleep_time)
LOG.info("Ending OPENCV detection " + str(self.index), "SYSTEM-"+self.shared_variables.name )
| true | true |
f73dd5de8b21cd2983c194c5e5f78f65b0b7e354 | 3,379 | py | Python | core/dr_utils/dib_renderer_x/utils/sphericalcoord.py | THU-DA-6D-Pose-Group/Self6D-Diff-Renderer | 408330a9c7d7010a5af0a5b0b469f1ef695d18de | [
"Apache-2.0"
] | 90 | 2020-08-15T16:14:45.000Z | 2022-01-22T10:24:13.000Z | core/dr_utils/dib_renderer_x/utils/sphericalcoord.py | THU-DA-6D-Pose-Group/Self6D-Diff-Renderer | 408330a9c7d7010a5af0a5b0b469f1ef695d18de | [
"Apache-2.0"
] | 11 | 2020-09-07T17:31:18.000Z | 2021-11-25T12:07:30.000Z | core/dr_utils/dib_renderer_x/utils/sphericalcoord.py | THU-DA-6D-Pose-Group/Self6D-Diff-Renderer | 408330a9c7d7010a5af0a5b0b469f1ef695d18de | [
"Apache-2.0"
] | 13 | 2020-09-03T04:25:50.000Z | 2021-12-23T08:23:33.000Z | # Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import numpy as np
##################################################################
# symmetric over z axis
def get_spherical_coords_z(X):
# X is N x 3
rad = np.linalg.norm(X, axis=1)
# Inclination
theta = np.arccos(X[:, 2] / rad)
# Azimuth
phi = np.arctan2(X[:, 1], X[:, 0])
# Normalize both to be between [-1, 1]
vv = (theta / np.pi) * 2 - 1
uu = ((phi + np.pi) / (2 * np.pi)) * 2 - 1
# Return N x 2
return np.stack([uu, vv], 1)
# symmetric over x axis
def get_spherical_coords_x(X):
# X is N x 3
rad = np.linalg.norm(X, axis=1)
# Inclination
# y == 1
# cos = 0
# y == -1
# cos = pi
theta = np.arccos(X[:, 0] / rad)
# Azimuth
phi = np.arctan2(X[:, 2], X[:, 1])
# Normalize both to be between [-1, 1]
uu = (theta / np.pi) * 2 - 1
vv = ((phi + np.pi) / (2 * np.pi)) * 2 - 1
# Return N x 2
return np.stack([uu, vv], 1)
# symmetric spherical projection
def get_symmetric_spherical_tex_coords(vertex_pos, symmetry_axis=1, up_axis=2, front_axis=0):
# vertex_pos is N x 3
length = np.linalg.norm(vertex_pos, axis=1)
# Inclination
theta = np.arccos(vertex_pos[:, front_axis] / length)
# Azimuth
phi = np.abs(np.arctan2(vertex_pos[:, symmetry_axis], vertex_pos[:, up_axis]))
# Normalize both to be between [-1, 1]
uu = (theta / np.pi) * 2 - 1
# vv = ((phi + np.pi) / (2 * np.pi)) * 2 - 1
vv = (phi / np.pi) * 2 - 1
# Return N x 2
return np.stack([uu, vv], 1)
#########################################################################
if __name__ == "__main__":
from utils.utils_mesh import loadobj, savemeshtes
import cv2
p, f = loadobj("2.obj")
uv = get_spherical_coords_x(p)
uv[:, 0] = -uv[:, 0]
uv[:, 1] = -uv[:, 1]
uv = (uv + 1) / 2
savemeshtes(p, uv, f, "./2_x.obj")
tex = np.zeros(shape=(256, 512, 3), dtype=np.uint8)
font = cv2.FONT_HERSHEY_SIMPLEX
bottomLeftCornerOfText = (10, 200)
fontScale = 5
fontColor = (0, 255, 255)
lineType = 2
cv2.putText(tex, "Hello World!", bottomLeftCornerOfText, font, fontScale, fontColor, lineType)
cv2.imshow("", tex)
cv2.waitKey()
cv2.imwrite("2_x.png", np.transpose(tex, [1, 0, 2]))
| 32.805825 | 98 | 0.614087 |
import numpy as np
| true | true |
f73dd6346cd312d0a8e3b2bb3b9faabc68392075 | 2,538 | py | Python | blaze/__init__.py | thequackdaddy/blaze | 21ba90c17b6b807623bbc9996bfc838f13ee6ea1 | [
"BSD-3-Clause"
] | null | null | null | blaze/__init__.py | thequackdaddy/blaze | 21ba90c17b6b807623bbc9996bfc838f13ee6ea1 | [
"BSD-3-Clause"
] | null | null | null | blaze/__init__.py | thequackdaddy/blaze | 21ba90c17b6b807623bbc9996bfc838f13ee6ea1 | [
"BSD-3-Clause"
] | null | null | null | from __future__ import absolute_import, division, print_function
try:
import h5py # if we import h5py after tables we segfault
except ImportError:
pass
from pandas import DataFrame
from odo import odo, convert, append, drop, resource
from odo.backends.csv import CSV
from odo.backends.json import JSON, JSONLines
from multipledispatch import halt_ordering, restart_ordering
halt_ordering() # Turn off multipledispatch ordering
from datashape import dshape, discover
from .utils import ignoring
import warnings
from .expr import (
Symbol,
broadcast_collect,
by,
cast,
coalesce,
count,
count_values,
date,
datetime,
day,
distinct,
distinct, head,
head,
hour,
join,
label,
like,
mean,
merge,
microsecond,
millisecond,
month,
ndim,
nunique,
relabel,
sample,
second,
selection,
shape,
sort,
summary,
symbol,
time,
transform,
var,
year,
)
from .expr.arrays import (tensordot, transpose)
from .expr.functions import *
from .index import create_index
from .interactive import *
from .compute.pmap import set_default_pmap
from .compute.csv import *
from .compute.json import *
from .compute.python import *
from .compute.pandas import *
from .compute.numpy import *
from .compute.core import *
from .compute.core import compute
from .cached import CachedDataset
with ignoring(ImportError):
from .server import *
with ignoring(ImportError):
from .sql import *
from .compute.sql import *
with ignoring(ImportError):
from .compute.dask import *
with ignoring(ImportError, AttributeError):
from .compute.spark import *
with ignoring(ImportError, TypeError):
from .compute.sparksql import *
with ignoring(ImportError):
from .compute.h5py import *
with ignoring(ImportError):
from .compute.hdfstore import *
with ignoring(ImportError):
from .compute.pytables import *
with ignoring(ImportError):
from .compute.chunks import *
with ignoring(ImportError):
from .compute.bcolz import *
with ignoring(ImportError):
from .mongo import *
from .compute.mongo import *
with ignoring(ImportError):
from .pytables import *
from .compute.pytables import *
from .expr import concat # Some module re-export toolz.concat and * catches it.
restart_ordering() # Restart multipledispatch ordering and do ordering
inf = float('inf')
nan = float('nan')
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
| 22.864865 | 80 | 0.72104 | from __future__ import absolute_import, division, print_function
try:
import h5py
except ImportError:
pass
from pandas import DataFrame
from odo import odo, convert, append, drop, resource
from odo.backends.csv import CSV
from odo.backends.json import JSON, JSONLines
from multipledispatch import halt_ordering, restart_ordering
halt_ordering()
from datashape import dshape, discover
from .utils import ignoring
import warnings
from .expr import (
Symbol,
broadcast_collect,
by,
cast,
coalesce,
count,
count_values,
date,
datetime,
day,
distinct,
distinct, head,
head,
hour,
join,
label,
like,
mean,
merge,
microsecond,
millisecond,
month,
ndim,
nunique,
relabel,
sample,
second,
selection,
shape,
sort,
summary,
symbol,
time,
transform,
var,
year,
)
from .expr.arrays import (tensordot, transpose)
from .expr.functions import *
from .index import create_index
from .interactive import *
from .compute.pmap import set_default_pmap
from .compute.csv import *
from .compute.json import *
from .compute.python import *
from .compute.pandas import *
from .compute.numpy import *
from .compute.core import *
from .compute.core import compute
from .cached import CachedDataset
with ignoring(ImportError):
from .server import *
with ignoring(ImportError):
from .sql import *
from .compute.sql import *
with ignoring(ImportError):
from .compute.dask import *
with ignoring(ImportError, AttributeError):
from .compute.spark import *
with ignoring(ImportError, TypeError):
from .compute.sparksql import *
with ignoring(ImportError):
from .compute.h5py import *
with ignoring(ImportError):
from .compute.hdfstore import *
with ignoring(ImportError):
from .compute.pytables import *
with ignoring(ImportError):
from .compute.chunks import *
with ignoring(ImportError):
from .compute.bcolz import *
with ignoring(ImportError):
from .mongo import *
from .compute.mongo import *
with ignoring(ImportError):
from .pytables import *
from .compute.pytables import *
from .expr import concat
restart_ordering()
inf = float('inf')
nan = float('nan')
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
| true | true |
f73dd74210b3522d012f83b6d7a261b363f4f32b | 2,902 | py | Python | gridworld/lfl/mdp_utils.py | gioramponi/LOGEL | e862324816c57dd5d07691ee8583259a6a62116c | [
"MIT"
] | null | null | null | gridworld/lfl/mdp_utils.py | gioramponi/LOGEL | e862324816c57dd5d07691ee8583259a6a62116c | [
"MIT"
] | null | null | null | gridworld/lfl/mdp_utils.py | gioramponi/LOGEL | e862324816c57dd5d07691ee8583259a6a62116c | [
"MIT"
] | 1 | 2021-04-16T15:15:41.000Z | 2021-04-16T15:15:41.000Z | """utils for entropy-regularized discrete MDPs."""
from __future__ import print_function
import numpy as np
def softmax(x, tau=1.):
e = np.exp(x * tau)
z = -np.log(sum(e))
return np.exp(x * tau + z)
def score_policy(pi, r, p, alpha, gamma):
"""Returns expected score J(pi) = v_pi(start) using soft policy evaluation."""
n_states, n_actions, _ = p.shape
q_pi = np.random.rand(n_states, n_actions)
v_pi = np.zeros(n_states)
for _ in range(1000):
v_pi = np.zeros(n_states)
for state in range(n_states):
for action_ in range(n_actions):
v_pi[state] += pi[state, action_] * \
(q_pi[state, action_] - alpha * np.log(pi[state, action_]))
q_pi *= 0
for state in range(n_states):
for action in range(n_actions):
q_pi[state, action] = r[state, action]
for state_ in range(n_states):
q_pi[state, action] += gamma * p[state, action, state_] * v_pi[state_]
j_pi = v_pi[0]
return j_pi
def solve_entropy_regularized_mdp(r, p, alpha, gamma):
"""Returns optimal (soft) policy pi* and score J(pi*)."""
n_states, n_actions, _ = p.shape
q = np.zeros((n_states, n_actions))
v = np.log(np.sum(np.exp(q), 1))
# <<<<<<< HEAD
print("r, p: ", r.shape, p.shape)
# =======
#
# >>>>>>> aed0552fe0dea9129b017edf7ec4b9d4c4dcf9f2
for _ in range(1000):
q = r + gamma * np.sum(p * np.tile(v, (n_states, n_actions, 1)), 2)
v = alpha * np.log(np.sum(np.exp(q / alpha), 1))
pi_star = np.zeros((n_states, n_actions))
for state in range(n_states):
pi_star[state, :] = softmax(q[state, :] / alpha)
j_pi_star = v[0]
return pi_star, j_pi_star
def sample_sa_trajectory(p, pi, length):
"""Returns a trajectory sampled from the learner's policy pi."""
n_states, n_actions, _ = p.shape
trajectory = []
state = 0
action = np.random.choice(range(n_actions), p=pi[state, :])
for _ in range(length):
new_state = np.random.choice(range(n_states), p=p[state, action, :])
new_action = np.random.choice(range(n_actions), p=pi[new_state, :])
trajectory.append((state, action))
state = new_state
action = new_action
return trajectory
def sample_sar_trajectory(p, pi, r, length):
"""Returns a trajectory sampled from the learner's policy pi."""
n_states, n_actions, _ = p.shape
trajectory = []
state = 0
action = np.random.choice(range(n_actions), p=pi[state, :])
for _ in range(length):
new_state = np.random.choice(range(n_states), p=p[state, action, :])
new_action = np.random.choice(range(n_actions), p=pi[new_state, :])
trajectory.append((state, action, r[state, action]))
state = new_state
action = new_action
return trajectory | 34.141176 | 90 | 0.60062 |
from __future__ import print_function
import numpy as np
def softmax(x, tau=1.):
e = np.exp(x * tau)
z = -np.log(sum(e))
return np.exp(x * tau + z)
def score_policy(pi, r, p, alpha, gamma):
n_states, n_actions, _ = p.shape
q_pi = np.random.rand(n_states, n_actions)
v_pi = np.zeros(n_states)
for _ in range(1000):
v_pi = np.zeros(n_states)
for state in range(n_states):
for action_ in range(n_actions):
v_pi[state] += pi[state, action_] * \
(q_pi[state, action_] - alpha * np.log(pi[state, action_]))
q_pi *= 0
for state in range(n_states):
for action in range(n_actions):
q_pi[state, action] = r[state, action]
for state_ in range(n_states):
q_pi[state, action] += gamma * p[state, action, state_] * v_pi[state_]
j_pi = v_pi[0]
return j_pi
def solve_entropy_regularized_mdp(r, p, alpha, gamma):
n_states, n_actions, _ = p.shape
q = np.zeros((n_states, n_actions))
v = np.log(np.sum(np.exp(q), 1))
print("r, p: ", r.shape, p.shape)
for _ in range(1000):
q = r + gamma * np.sum(p * np.tile(v, (n_states, n_actions, 1)), 2)
v = alpha * np.log(np.sum(np.exp(q / alpha), 1))
pi_star = np.zeros((n_states, n_actions))
for state in range(n_states):
pi_star[state, :] = softmax(q[state, :] / alpha)
j_pi_star = v[0]
return pi_star, j_pi_star
def sample_sa_trajectory(p, pi, length):
n_states, n_actions, _ = p.shape
trajectory = []
state = 0
action = np.random.choice(range(n_actions), p=pi[state, :])
for _ in range(length):
new_state = np.random.choice(range(n_states), p=p[state, action, :])
new_action = np.random.choice(range(n_actions), p=pi[new_state, :])
trajectory.append((state, action))
state = new_state
action = new_action
return trajectory
def sample_sar_trajectory(p, pi, r, length):
n_states, n_actions, _ = p.shape
trajectory = []
state = 0
action = np.random.choice(range(n_actions), p=pi[state, :])
for _ in range(length):
new_state = np.random.choice(range(n_states), p=p[state, action, :])
new_action = np.random.choice(range(n_actions), p=pi[new_state, :])
trajectory.append((state, action, r[state, action]))
state = new_state
action = new_action
return trajectory | true | true |
f73dd7c37600d0457452fc4d4695e6e8e1302aa7 | 2,983 | py | Python | cities_light/models.py | suquant/django-cities-light | 786852c8372f24e6f05c9c9b2e03e12873a222b8 | [
"MIT"
] | null | null | null | cities_light/models.py | suquant/django-cities-light | 786852c8372f24e6f05c9c9b2e03e12873a222b8 | [
"MIT"
] | null | null | null | cities_light/models.py | suquant/django-cities-light | 786852c8372f24e6f05c9c9b2e03e12873a222b8 | [
"MIT"
] | null | null | null | """
By default, all models are taken from this package.
But it is possible to customise these models to add some fields.
For such purpose cities_light models are defined as abstract (without
customisation they all inherit abstract versions automatically
without changes).
Steps to customise cities_light models
======================================
- Define **all** of cities abstract models in your app:
.. code:: python
# yourapp/models.py
from cities_light.abstract_models import (AbstractCity, AbstractRegion,
AbstractCountry)
from cities_light.receivers import connect_default_signals
class Country(AbstractCountry):
pass
connect_default_signals(Country)
class Region(AbstractRegion):
pass
connect_default_signals(Region)
class City(AbstractCity):
timezone = models.CharField(max_length=40)
connect_default_signals(City)
- Add post import processing to you model *[optional]*:
.. code:: python
import cities_light
from cities_light.settings import ICity
def set_city_fields(sender, instance, items, **kwargs):
instance.timezone = items[ICity.timezone]
cities_light.signals.city_items_post_import.connect(set_city_fields)
- Define settings.py:
.. code:: python
INSTALLED_APPS = [
# ...
'cities_light',
'yourapp',
]
CITIES_LIGHT_APP_NAME = 'yourapp'
- Create tables:
.. code::
python manage.py syncdb
That's all!
**Notes**:
- model names can't be modified, i.e. you have to use exactly
City, Country, Region names and not MyCity, MyCountry, MyRegion.
- Connect default signals for every custom model by calling
``connect_default_signals`` (or not, if you don't want to trigger
default signals).
- if in further versions of cities_light abstract models will be
updated (some fields will be added/removed), you have to deal with
migrations by yourself, as models are on your own now.
"""
# some imports are present for backwards compatibility and migration process
from .abstract_models import (AbstractCountry, AbstractRegion, AbstractCity,
ToSearchTextField, CONTINENT_CHOICES, to_search, to_ascii)
from .signals import *
from .receivers import *
from .settings import *
__all__ = ['CONTINENT_CHOICES', 'to_search', 'to_ascii', 'filter_non_cities',
'filter_non_included_countries_country',
'filter_non_included_countries_region',
'filter_non_included_countries_city']
if CITIES_LIGHT_APP_NAME == DEFAULT_APP_NAME:
class Country(AbstractCountry):
pass
connect_default_signals(Country)
__all__.append('Country')
class Region(AbstractRegion):
pass
connect_default_signals(Region)
__all__.append('Region')
class City(AbstractCity):
pass
connect_default_signals(City)
__all__.append('City')
| 28.961165 | 79 | 0.690245 |
from .abstract_models import (AbstractCountry, AbstractRegion, AbstractCity,
ToSearchTextField, CONTINENT_CHOICES, to_search, to_ascii)
from .signals import *
from .receivers import *
from .settings import *
__all__ = ['CONTINENT_CHOICES', 'to_search', 'to_ascii', 'filter_non_cities',
'filter_non_included_countries_country',
'filter_non_included_countries_region',
'filter_non_included_countries_city']
if CITIES_LIGHT_APP_NAME == DEFAULT_APP_NAME:
class Country(AbstractCountry):
pass
connect_default_signals(Country)
__all__.append('Country')
class Region(AbstractRegion):
pass
connect_default_signals(Region)
__all__.append('Region')
class City(AbstractCity):
pass
connect_default_signals(City)
__all__.append('City')
| true | true |
f73dd820da9361186f0802d125d95d75e9cd7758 | 549 | py | Python | tests/test_gingerit.py | memahesh/gingerit | 95aeb581fe0abd6ee68f2d97a10d0e1d57d99136 | [
"MIT"
] | null | null | null | tests/test_gingerit.py | memahesh/gingerit | 95aeb581fe0abd6ee68f2d97a10d0e1d57d99136 | [
"MIT"
] | null | null | null | tests/test_gingerit.py | memahesh/gingerit | 95aeb581fe0abd6ee68f2d97a10d0e1d57d99136 | [
"MIT"
] | null | null | null | import pytest
from gingerit.gingerit import GingerIt
@pytest.mark.parametrize("text,expected", [
(
"The smelt of fliwers bring back memories.",
"The smell of flowers brings back memories."
),
(
"Edwards will be sck yesterday",
"Edwards was sick yesterday"
),
(
"Edwards was sick yesterday.",
"Edwards was sick yesterday."
),
(
"",
""
)
])
def test_gingerit(text, expected):
parser = GingerIt()
assert parser.parse(text)["result"] == expected
| 20.333333 | 52 | 0.581056 | import pytest
from gingerit.gingerit import GingerIt
@pytest.mark.parametrize("text,expected", [
(
"The smelt of fliwers bring back memories.",
"The smell of flowers brings back memories."
),
(
"Edwards will be sck yesterday",
"Edwards was sick yesterday"
),
(
"Edwards was sick yesterday.",
"Edwards was sick yesterday."
),
(
"",
""
)
])
def test_gingerit(text, expected):
parser = GingerIt()
assert parser.parse(text)["result"] == expected
| true | true |
f73ddb28a2909316710e5a6997f57b1319ea7eec | 19,354 | py | Python | pyleecan/Classes/OutStruct.py | EmileDvs/pyleecan | ad2f5f25c089a981f373557a198da51c62407928 | [
"Apache-2.0"
] | 95 | 2019-01-23T04:19:45.000Z | 2022-03-17T18:22:10.000Z | pyleecan/Classes/OutStruct.py | EmileDvs/pyleecan | ad2f5f25c089a981f373557a198da51c62407928 | [
"Apache-2.0"
] | 366 | 2019-02-20T07:15:08.000Z | 2022-03-31T13:37:23.000Z | pyleecan/Classes/OutStruct.py | EmileDvs/pyleecan | ad2f5f25c089a981f373557a198da51c62407928 | [
"Apache-2.0"
] | 74 | 2019-01-24T01:47:31.000Z | 2022-02-25T05:44:42.000Z | # -*- coding: utf-8 -*-
# File generated according to Generator/ClassesRef/Output/OutStruct.csv
# WARNING! All changes made in this file will be lost!
"""Method code available at https://github.com/Eomys/pyleecan/tree/master/pyleecan/Methods/Output/OutStruct
"""
from os import linesep
from sys import getsizeof
from logging import getLogger
from ._check import check_var, raise_
from ..Functions.get_logger import get_logger
from ..Functions.save import save
from ..Functions.copy import copy
from ..Functions.load import load_init_dict
from ..Functions.Load.import_class import import_class
from ._frozen import FrozenClass
from ._check import InitUnKnowClassError
from .MeshSolution import MeshSolution
class OutStruct(FrozenClass):
"""Gather the structural module outputs"""
VERSION = 1
# save and copy methods are available in all object
save = save
copy = copy
# get_logger method is available in all object
get_logger = get_logger
def __init__(
self,
Time=None,
Angle=None,
Nt_tot=None,
Na_tot=None,
logger_name="Pyleecan.Structural",
Yr=None,
Vr=None,
Ar=None,
meshsolution=-1,
FEA_dict=None,
init_dict=None,
init_str=None,
):
"""Constructor of the class. Can be use in three ways :
- __init__ (arg1 = 1, arg3 = 5) every parameters have name and default values
for pyleecan type, -1 will call the default constructor
- __init__ (init_dict = d) d must be a dictionary with property names as keys
- __init__ (init_str = s) s must be a string
s is the file path to load
ndarray or list can be given for Vector and Matrix
object or dict can be given for pyleecan Object"""
if init_str is not None: # Load from a file
init_dict = load_init_dict(init_str)[1]
if init_dict is not None: # Initialisation by dict
assert type(init_dict) is dict
# Overwrite default value with init_dict content
if "Time" in list(init_dict.keys()):
Time = init_dict["Time"]
if "Angle" in list(init_dict.keys()):
Angle = init_dict["Angle"]
if "Nt_tot" in list(init_dict.keys()):
Nt_tot = init_dict["Nt_tot"]
if "Na_tot" in list(init_dict.keys()):
Na_tot = init_dict["Na_tot"]
if "logger_name" in list(init_dict.keys()):
logger_name = init_dict["logger_name"]
if "Yr" in list(init_dict.keys()):
Yr = init_dict["Yr"]
if "Vr" in list(init_dict.keys()):
Vr = init_dict["Vr"]
if "Ar" in list(init_dict.keys()):
Ar = init_dict["Ar"]
if "meshsolution" in list(init_dict.keys()):
meshsolution = init_dict["meshsolution"]
if "FEA_dict" in list(init_dict.keys()):
FEA_dict = init_dict["FEA_dict"]
# Set the properties (value check and convertion are done in setter)
self.parent = None
self.Time = Time
self.Angle = Angle
self.Nt_tot = Nt_tot
self.Na_tot = Na_tot
self.logger_name = logger_name
self.Yr = Yr
self.Vr = Vr
self.Ar = Ar
self.meshsolution = meshsolution
self.FEA_dict = FEA_dict
# The class is frozen, for now it's impossible to add new properties
self._freeze()
def __str__(self):
"""Convert this object in a readeable string (for print)"""
OutStruct_str = ""
if self.parent is None:
OutStruct_str += "parent = None " + linesep
else:
OutStruct_str += "parent = " + str(type(self.parent)) + " object" + linesep
OutStruct_str += "Time = " + str(self.Time) + linesep + linesep
OutStruct_str += "Angle = " + str(self.Angle) + linesep + linesep
OutStruct_str += "Nt_tot = " + str(self.Nt_tot) + linesep
OutStruct_str += "Na_tot = " + str(self.Na_tot) + linesep
OutStruct_str += 'logger_name = "' + str(self.logger_name) + '"' + linesep
OutStruct_str += "Yr = " + str(self.Yr) + linesep + linesep
OutStruct_str += "Vr = " + str(self.Vr) + linesep + linesep
OutStruct_str += "Ar = " + str(self.Ar) + linesep + linesep
if self.meshsolution is not None:
tmp = (
self.meshsolution.__str__()
.replace(linesep, linesep + "\t")
.rstrip("\t")
)
OutStruct_str += "meshsolution = " + tmp
else:
OutStruct_str += "meshsolution = None" + linesep + linesep
OutStruct_str += "FEA_dict = " + str(self.FEA_dict) + linesep
return OutStruct_str
def __eq__(self, other):
"""Compare two objects (skip parent)"""
if type(other) != type(self):
return False
if other.Time != self.Time:
return False
if other.Angle != self.Angle:
return False
if other.Nt_tot != self.Nt_tot:
return False
if other.Na_tot != self.Na_tot:
return False
if other.logger_name != self.logger_name:
return False
if other.Yr != self.Yr:
return False
if other.Vr != self.Vr:
return False
if other.Ar != self.Ar:
return False
if other.meshsolution != self.meshsolution:
return False
if other.FEA_dict != self.FEA_dict:
return False
return True
def compare(self, other, name="self", ignore_list=None):
"""Compare two objects and return list of differences"""
if ignore_list is None:
ignore_list = list()
if type(other) != type(self):
return ["type(" + name + ")"]
diff_list = list()
if (other.Time is None and self.Time is not None) or (
other.Time is not None and self.Time is None
):
diff_list.append(name + ".Time None mismatch")
elif self.Time is not None:
diff_list.extend(self.Time.compare(other.Time, name=name + ".Time"))
if (other.Angle is None and self.Angle is not None) or (
other.Angle is not None and self.Angle is None
):
diff_list.append(name + ".Angle None mismatch")
elif self.Angle is not None:
diff_list.extend(self.Angle.compare(other.Angle, name=name + ".Angle"))
if other._Nt_tot != self._Nt_tot:
diff_list.append(name + ".Nt_tot")
if other._Na_tot != self._Na_tot:
diff_list.append(name + ".Na_tot")
if other._logger_name != self._logger_name:
diff_list.append(name + ".logger_name")
if (other.Yr is None and self.Yr is not None) or (
other.Yr is not None and self.Yr is None
):
diff_list.append(name + ".Yr None mismatch")
elif self.Yr is not None:
diff_list.extend(self.Yr.compare(other.Yr, name=name + ".Yr"))
if (other.Vr is None and self.Vr is not None) or (
other.Vr is not None and self.Vr is None
):
diff_list.append(name + ".Vr None mismatch")
elif self.Vr is not None:
diff_list.extend(self.Vr.compare(other.Vr, name=name + ".Vr"))
if (other.Ar is None and self.Ar is not None) or (
other.Ar is not None and self.Ar is None
):
diff_list.append(name + ".Ar None mismatch")
elif self.Ar is not None:
diff_list.extend(self.Ar.compare(other.Ar, name=name + ".Ar"))
if (other.meshsolution is None and self.meshsolution is not None) or (
other.meshsolution is not None and self.meshsolution is None
):
diff_list.append(name + ".meshsolution None mismatch")
elif self.meshsolution is not None:
diff_list.extend(
self.meshsolution.compare(
other.meshsolution, name=name + ".meshsolution"
)
)
if other._FEA_dict != self._FEA_dict:
diff_list.append(name + ".FEA_dict")
# Filter ignore differences
diff_list = list(filter(lambda x: x not in ignore_list, diff_list))
return diff_list
def __sizeof__(self):
"""Return the size in memory of the object (including all subobject)"""
S = 0 # Full size of the object
S += getsizeof(self.Time)
S += getsizeof(self.Angle)
S += getsizeof(self.Nt_tot)
S += getsizeof(self.Na_tot)
S += getsizeof(self.logger_name)
S += getsizeof(self.Yr)
S += getsizeof(self.Vr)
S += getsizeof(self.Ar)
S += getsizeof(self.meshsolution)
if self.FEA_dict is not None:
for key, value in self.FEA_dict.items():
S += getsizeof(value) + getsizeof(key)
return S
def as_dict(self, type_handle_ndarray=0, keep_function=False, **kwargs):
"""
Convert this object in a json serializable dict (can be use in __init__).
type_handle_ndarray: int
How to handle ndarray (0: tolist, 1: copy, 2: nothing)
keep_function : bool
True to keep the function object, else return str
Optional keyword input parameter is for internal use only
and may prevent json serializability.
"""
OutStruct_dict = dict()
if self.Time is None:
OutStruct_dict["Time"] = None
else:
OutStruct_dict["Time"] = self.Time.as_dict(
type_handle_ndarray=type_handle_ndarray,
keep_function=keep_function,
**kwargs
)
if self.Angle is None:
OutStruct_dict["Angle"] = None
else:
OutStruct_dict["Angle"] = self.Angle.as_dict(
type_handle_ndarray=type_handle_ndarray,
keep_function=keep_function,
**kwargs
)
OutStruct_dict["Nt_tot"] = self.Nt_tot
OutStruct_dict["Na_tot"] = self.Na_tot
OutStruct_dict["logger_name"] = self.logger_name
if self.Yr is None:
OutStruct_dict["Yr"] = None
else:
OutStruct_dict["Yr"] = self.Yr.as_dict(
type_handle_ndarray=type_handle_ndarray,
keep_function=keep_function,
**kwargs
)
if self.Vr is None:
OutStruct_dict["Vr"] = None
else:
OutStruct_dict["Vr"] = self.Vr.as_dict(
type_handle_ndarray=type_handle_ndarray,
keep_function=keep_function,
**kwargs
)
if self.Ar is None:
OutStruct_dict["Ar"] = None
else:
OutStruct_dict["Ar"] = self.Ar.as_dict(
type_handle_ndarray=type_handle_ndarray,
keep_function=keep_function,
**kwargs
)
if self.meshsolution is None:
OutStruct_dict["meshsolution"] = None
else:
OutStruct_dict["meshsolution"] = self.meshsolution.as_dict(
type_handle_ndarray=type_handle_ndarray,
keep_function=keep_function,
**kwargs
)
OutStruct_dict["FEA_dict"] = (
self.FEA_dict.copy() if self.FEA_dict is not None else None
)
# The class name is added to the dict for deserialisation purpose
OutStruct_dict["__class__"] = "OutStruct"
return OutStruct_dict
def _set_None(self):
"""Set all the properties to None (except pyleecan object)"""
self.Time = None
self.Angle = None
self.Nt_tot = None
self.Na_tot = None
self.logger_name = None
self.Yr = None
self.Vr = None
self.Ar = None
if self.meshsolution is not None:
self.meshsolution._set_None()
self.FEA_dict = None
def _get_Time(self):
"""getter of Time"""
return self._Time
def _set_Time(self, value):
"""setter of Time"""
if isinstance(value, str): # Load from file
value = load_init_dict(value)[1]
if isinstance(value, dict) and "__class__" in value:
class_obj = import_class(
"SciDataTool.Classes", value.get("__class__"), "Time"
)
value = class_obj(init_dict=value)
elif type(value) is int and value == -1: # Default constructor
value = Data()
check_var("Time", value, "Data")
self._Time = value
Time = property(
fget=_get_Time,
fset=_set_Time,
doc=u"""Structural time Data object
:Type: SciDataTool.Classes.DataND.Data
""",
)
def _get_Angle(self):
"""getter of Angle"""
return self._Angle
def _set_Angle(self, value):
"""setter of Angle"""
if isinstance(value, str): # Load from file
value = load_init_dict(value)[1]
if isinstance(value, dict) and "__class__" in value:
class_obj = import_class(
"SciDataTool.Classes", value.get("__class__"), "Angle"
)
value = class_obj(init_dict=value)
elif type(value) is int and value == -1: # Default constructor
value = Data()
check_var("Angle", value, "Data")
self._Angle = value
Angle = property(
fget=_get_Angle,
fset=_set_Angle,
doc=u"""Structural position Data object
:Type: SciDataTool.Classes.DataND.Data
""",
)
def _get_Nt_tot(self):
"""getter of Nt_tot"""
return self._Nt_tot
def _set_Nt_tot(self, value):
"""setter of Nt_tot"""
check_var("Nt_tot", value, "int")
self._Nt_tot = value
Nt_tot = property(
fget=_get_Nt_tot,
fset=_set_Nt_tot,
doc=u"""Length of the time vector
:Type: int
""",
)
def _get_Na_tot(self):
"""getter of Na_tot"""
return self._Na_tot
def _set_Na_tot(self, value):
"""setter of Na_tot"""
check_var("Na_tot", value, "int")
self._Na_tot = value
Na_tot = property(
fget=_get_Na_tot,
fset=_set_Na_tot,
doc=u"""Length of the angle vector
:Type: int
""",
)
def _get_logger_name(self):
"""getter of logger_name"""
return self._logger_name
def _set_logger_name(self, value):
"""setter of logger_name"""
check_var("logger_name", value, "str")
self._logger_name = value
logger_name = property(
fget=_get_logger_name,
fset=_set_logger_name,
doc=u"""Name of the logger to use
:Type: str
""",
)
def _get_Yr(self):
"""getter of Yr"""
return self._Yr
def _set_Yr(self, value):
"""setter of Yr"""
if isinstance(value, str): # Load from file
value = load_init_dict(value)[1]
if isinstance(value, dict) and "__class__" in value:
class_obj = import_class(
"SciDataTool.Classes", value.get("__class__"), "Yr"
)
value = class_obj(init_dict=value)
elif type(value) is int and value == -1: # Default constructor
value = DataND()
check_var("Yr", value, "DataND")
self._Yr = value
Yr = property(
fget=_get_Yr,
fset=_set_Yr,
doc=u"""Displacement output
:Type: SciDataTool.Classes.DataND.DataND
""",
)
def _get_Vr(self):
"""getter of Vr"""
return self._Vr
def _set_Vr(self, value):
"""setter of Vr"""
if isinstance(value, str): # Load from file
value = load_init_dict(value)[1]
if isinstance(value, dict) and "__class__" in value:
class_obj = import_class(
"SciDataTool.Classes", value.get("__class__"), "Vr"
)
value = class_obj(init_dict=value)
elif type(value) is int and value == -1: # Default constructor
value = DataND()
check_var("Vr", value, "DataND")
self._Vr = value
Vr = property(
fget=_get_Vr,
fset=_set_Vr,
doc=u"""Velocity output
:Type: SciDataTool.Classes.DataND.DataND
""",
)
def _get_Ar(self):
"""getter of Ar"""
return self._Ar
def _set_Ar(self, value):
"""setter of Ar"""
if isinstance(value, str): # Load from file
value = load_init_dict(value)[1]
if isinstance(value, dict) and "__class__" in value:
class_obj = import_class(
"SciDataTool.Classes", value.get("__class__"), "Ar"
)
value = class_obj(init_dict=value)
elif type(value) is int and value == -1: # Default constructor
value = DataND()
check_var("Ar", value, "DataND")
self._Ar = value
Ar = property(
fget=_get_Ar,
fset=_set_Ar,
doc=u"""Acceleration output
:Type: SciDataTool.Classes.DataND.DataND
""",
)
def _get_meshsolution(self):
"""getter of meshsolution"""
return self._meshsolution
def _set_meshsolution(self, value):
"""setter of meshsolution"""
if isinstance(value, str): # Load from file
value = load_init_dict(value)[1]
if isinstance(value, dict) and "__class__" in value:
class_obj = import_class(
"pyleecan.Classes", value.get("__class__"), "meshsolution"
)
value = class_obj(init_dict=value)
elif type(value) is int and value == -1: # Default constructor
value = MeshSolution()
check_var("meshsolution", value, "MeshSolution")
self._meshsolution = value
if self._meshsolution is not None:
self._meshsolution.parent = self
meshsolution = property(
fget=_get_meshsolution,
fset=_set_meshsolution,
doc=u"""FEA software mesh and solution
:Type: MeshSolution
""",
)
def _get_FEA_dict(self):
"""getter of FEA_dict"""
return self._FEA_dict
def _set_FEA_dict(self, value):
"""setter of FEA_dict"""
if type(value) is int and value == -1:
value = dict()
check_var("FEA_dict", value, "dict")
self._FEA_dict = value
FEA_dict = property(
fget=_get_FEA_dict,
fset=_set_FEA_dict,
doc=u"""dictionary containing the main FEA parameter
:Type: dict
""",
)
| 34.872072 | 108 | 0.551566 |
from os import linesep
from sys import getsizeof
from logging import getLogger
from ._check import check_var, raise_
from ..Functions.get_logger import get_logger
from ..Functions.save import save
from ..Functions.copy import copy
from ..Functions.load import load_init_dict
from ..Functions.Load.import_class import import_class
from ._frozen import FrozenClass
from ._check import InitUnKnowClassError
from .MeshSolution import MeshSolution
class OutStruct(FrozenClass):
VERSION = 1
save = save
copy = copy
get_logger = get_logger
def __init__(
self,
Time=None,
Angle=None,
Nt_tot=None,
Na_tot=None,
logger_name="Pyleecan.Structural",
Yr=None,
Vr=None,
Ar=None,
meshsolution=-1,
FEA_dict=None,
init_dict=None,
init_str=None,
):
if init_str is not None:
init_dict = load_init_dict(init_str)[1]
if init_dict is not None:
assert type(init_dict) is dict
if "Time" in list(init_dict.keys()):
Time = init_dict["Time"]
if "Angle" in list(init_dict.keys()):
Angle = init_dict["Angle"]
if "Nt_tot" in list(init_dict.keys()):
Nt_tot = init_dict["Nt_tot"]
if "Na_tot" in list(init_dict.keys()):
Na_tot = init_dict["Na_tot"]
if "logger_name" in list(init_dict.keys()):
logger_name = init_dict["logger_name"]
if "Yr" in list(init_dict.keys()):
Yr = init_dict["Yr"]
if "Vr" in list(init_dict.keys()):
Vr = init_dict["Vr"]
if "Ar" in list(init_dict.keys()):
Ar = init_dict["Ar"]
if "meshsolution" in list(init_dict.keys()):
meshsolution = init_dict["meshsolution"]
if "FEA_dict" in list(init_dict.keys()):
FEA_dict = init_dict["FEA_dict"]
self.parent = None
self.Time = Time
self.Angle = Angle
self.Nt_tot = Nt_tot
self.Na_tot = Na_tot
self.logger_name = logger_name
self.Yr = Yr
self.Vr = Vr
self.Ar = Ar
self.meshsolution = meshsolution
self.FEA_dict = FEA_dict
self._freeze()
def __str__(self):
OutStruct_str = ""
if self.parent is None:
OutStruct_str += "parent = None " + linesep
else:
OutStruct_str += "parent = " + str(type(self.parent)) + " object" + linesep
OutStruct_str += "Time = " + str(self.Time) + linesep + linesep
OutStruct_str += "Angle = " + str(self.Angle) + linesep + linesep
OutStruct_str += "Nt_tot = " + str(self.Nt_tot) + linesep
OutStruct_str += "Na_tot = " + str(self.Na_tot) + linesep
OutStruct_str += 'logger_name = "' + str(self.logger_name) + '"' + linesep
OutStruct_str += "Yr = " + str(self.Yr) + linesep + linesep
OutStruct_str += "Vr = " + str(self.Vr) + linesep + linesep
OutStruct_str += "Ar = " + str(self.Ar) + linesep + linesep
if self.meshsolution is not None:
tmp = (
self.meshsolution.__str__()
.replace(linesep, linesep + "\t")
.rstrip("\t")
)
OutStruct_str += "meshsolution = " + tmp
else:
OutStruct_str += "meshsolution = None" + linesep + linesep
OutStruct_str += "FEA_dict = " + str(self.FEA_dict) + linesep
return OutStruct_str
def __eq__(self, other):
if type(other) != type(self):
return False
if other.Time != self.Time:
return False
if other.Angle != self.Angle:
return False
if other.Nt_tot != self.Nt_tot:
return False
if other.Na_tot != self.Na_tot:
return False
if other.logger_name != self.logger_name:
return False
if other.Yr != self.Yr:
return False
if other.Vr != self.Vr:
return False
if other.Ar != self.Ar:
return False
if other.meshsolution != self.meshsolution:
return False
if other.FEA_dict != self.FEA_dict:
return False
return True
def compare(self, other, name="self", ignore_list=None):
if ignore_list is None:
ignore_list = list()
if type(other) != type(self):
return ["type(" + name + ")"]
diff_list = list()
if (other.Time is None and self.Time is not None) or (
other.Time is not None and self.Time is None
):
diff_list.append(name + ".Time None mismatch")
elif self.Time is not None:
diff_list.extend(self.Time.compare(other.Time, name=name + ".Time"))
if (other.Angle is None and self.Angle is not None) or (
other.Angle is not None and self.Angle is None
):
diff_list.append(name + ".Angle None mismatch")
elif self.Angle is not None:
diff_list.extend(self.Angle.compare(other.Angle, name=name + ".Angle"))
if other._Nt_tot != self._Nt_tot:
diff_list.append(name + ".Nt_tot")
if other._Na_tot != self._Na_tot:
diff_list.append(name + ".Na_tot")
if other._logger_name != self._logger_name:
diff_list.append(name + ".logger_name")
if (other.Yr is None and self.Yr is not None) or (
other.Yr is not None and self.Yr is None
):
diff_list.append(name + ".Yr None mismatch")
elif self.Yr is not None:
diff_list.extend(self.Yr.compare(other.Yr, name=name + ".Yr"))
if (other.Vr is None and self.Vr is not None) or (
other.Vr is not None and self.Vr is None
):
diff_list.append(name + ".Vr None mismatch")
elif self.Vr is not None:
diff_list.extend(self.Vr.compare(other.Vr, name=name + ".Vr"))
if (other.Ar is None and self.Ar is not None) or (
other.Ar is not None and self.Ar is None
):
diff_list.append(name + ".Ar None mismatch")
elif self.Ar is not None:
diff_list.extend(self.Ar.compare(other.Ar, name=name + ".Ar"))
if (other.meshsolution is None and self.meshsolution is not None) or (
other.meshsolution is not None and self.meshsolution is None
):
diff_list.append(name + ".meshsolution None mismatch")
elif self.meshsolution is not None:
diff_list.extend(
self.meshsolution.compare(
other.meshsolution, name=name + ".meshsolution"
)
)
if other._FEA_dict != self._FEA_dict:
diff_list.append(name + ".FEA_dict")
# Filter ignore differences
diff_list = list(filter(lambda x: x not in ignore_list, diff_list))
return diff_list
def __sizeof__(self):
S = 0 # Full size of the object
S += getsizeof(self.Time)
S += getsizeof(self.Angle)
S += getsizeof(self.Nt_tot)
S += getsizeof(self.Na_tot)
S += getsizeof(self.logger_name)
S += getsizeof(self.Yr)
S += getsizeof(self.Vr)
S += getsizeof(self.Ar)
S += getsizeof(self.meshsolution)
if self.FEA_dict is not None:
for key, value in self.FEA_dict.items():
S += getsizeof(value) + getsizeof(key)
return S
def as_dict(self, type_handle_ndarray=0, keep_function=False, **kwargs):
OutStruct_dict = dict()
if self.Time is None:
OutStruct_dict["Time"] = None
else:
OutStruct_dict["Time"] = self.Time.as_dict(
type_handle_ndarray=type_handle_ndarray,
keep_function=keep_function,
**kwargs
)
if self.Angle is None:
OutStruct_dict["Angle"] = None
else:
OutStruct_dict["Angle"] = self.Angle.as_dict(
type_handle_ndarray=type_handle_ndarray,
keep_function=keep_function,
**kwargs
)
OutStruct_dict["Nt_tot"] = self.Nt_tot
OutStruct_dict["Na_tot"] = self.Na_tot
OutStruct_dict["logger_name"] = self.logger_name
if self.Yr is None:
OutStruct_dict["Yr"] = None
else:
OutStruct_dict["Yr"] = self.Yr.as_dict(
type_handle_ndarray=type_handle_ndarray,
keep_function=keep_function,
**kwargs
)
if self.Vr is None:
OutStruct_dict["Vr"] = None
else:
OutStruct_dict["Vr"] = self.Vr.as_dict(
type_handle_ndarray=type_handle_ndarray,
keep_function=keep_function,
**kwargs
)
if self.Ar is None:
OutStruct_dict["Ar"] = None
else:
OutStruct_dict["Ar"] = self.Ar.as_dict(
type_handle_ndarray=type_handle_ndarray,
keep_function=keep_function,
**kwargs
)
if self.meshsolution is None:
OutStruct_dict["meshsolution"] = None
else:
OutStruct_dict["meshsolution"] = self.meshsolution.as_dict(
type_handle_ndarray=type_handle_ndarray,
keep_function=keep_function,
**kwargs
)
OutStruct_dict["FEA_dict"] = (
self.FEA_dict.copy() if self.FEA_dict is not None else None
)
# The class name is added to the dict for deserialisation purpose
OutStruct_dict["__class__"] = "OutStruct"
return OutStruct_dict
def _set_None(self):
self.Time = None
self.Angle = None
self.Nt_tot = None
self.Na_tot = None
self.logger_name = None
self.Yr = None
self.Vr = None
self.Ar = None
if self.meshsolution is not None:
self.meshsolution._set_None()
self.FEA_dict = None
def _get_Time(self):
return self._Time
def _set_Time(self, value):
if isinstance(value, str): # Load from file
value = load_init_dict(value)[1]
if isinstance(value, dict) and "__class__" in value:
class_obj = import_class(
"SciDataTool.Classes", value.get("__class__"), "Time"
)
value = class_obj(init_dict=value)
elif type(value) is int and value == -1: # Default constructor
value = Data()
check_var("Time", value, "Data")
self._Time = value
Time = property(
fget=_get_Time,
fset=_set_Time,
doc=u"""Structural time Data object
:Type: SciDataTool.Classes.DataND.Data
""",
)
def _get_Angle(self):
return self._Angle
def _set_Angle(self, value):
if isinstance(value, str): # Load from file
value = load_init_dict(value)[1]
if isinstance(value, dict) and "__class__" in value:
class_obj = import_class(
"SciDataTool.Classes", value.get("__class__"), "Angle"
)
value = class_obj(init_dict=value)
elif type(value) is int and value == -1: # Default constructor
value = Data()
check_var("Angle", value, "Data")
self._Angle = value
Angle = property(
fget=_get_Angle,
fset=_set_Angle,
doc=u"""Structural position Data object
:Type: SciDataTool.Classes.DataND.Data
""",
)
def _get_Nt_tot(self):
return self._Nt_tot
def _set_Nt_tot(self, value):
check_var("Nt_tot", value, "int")
self._Nt_tot = value
Nt_tot = property(
fget=_get_Nt_tot,
fset=_set_Nt_tot,
doc=u"""Length of the time vector
:Type: int
""",
)
def _get_Na_tot(self):
return self._Na_tot
def _set_Na_tot(self, value):
check_var("Na_tot", value, "int")
self._Na_tot = value
Na_tot = property(
fget=_get_Na_tot,
fset=_set_Na_tot,
doc=u"""Length of the angle vector
:Type: int
""",
)
def _get_logger_name(self):
return self._logger_name
def _set_logger_name(self, value):
check_var("logger_name", value, "str")
self._logger_name = value
logger_name = property(
fget=_get_logger_name,
fset=_set_logger_name,
doc=u"""Name of the logger to use
:Type: str
""",
)
def _get_Yr(self):
return self._Yr
def _set_Yr(self, value):
if isinstance(value, str): # Load from file
value = load_init_dict(value)[1]
if isinstance(value, dict) and "__class__" in value:
class_obj = import_class(
"SciDataTool.Classes", value.get("__class__"), "Yr"
)
value = class_obj(init_dict=value)
elif type(value) is int and value == -1: # Default constructor
value = DataND()
check_var("Yr", value, "DataND")
self._Yr = value
Yr = property(
fget=_get_Yr,
fset=_set_Yr,
doc=u"""Displacement output
:Type: SciDataTool.Classes.DataND.DataND
""",
)
def _get_Vr(self):
return self._Vr
def _set_Vr(self, value):
if isinstance(value, str): # Load from file
value = load_init_dict(value)[1]
if isinstance(value, dict) and "__class__" in value:
class_obj = import_class(
"SciDataTool.Classes", value.get("__class__"), "Vr"
)
value = class_obj(init_dict=value)
elif type(value) is int and value == -1: # Default constructor
value = DataND()
check_var("Vr", value, "DataND")
self._Vr = value
Vr = property(
fget=_get_Vr,
fset=_set_Vr,
doc=u"""Velocity output
:Type: SciDataTool.Classes.DataND.DataND
""",
)
def _get_Ar(self):
return self._Ar
def _set_Ar(self, value):
if isinstance(value, str): # Load from file
value = load_init_dict(value)[1]
if isinstance(value, dict) and "__class__" in value:
class_obj = import_class(
"SciDataTool.Classes", value.get("__class__"), "Ar"
)
value = class_obj(init_dict=value)
elif type(value) is int and value == -1: # Default constructor
value = DataND()
check_var("Ar", value, "DataND")
self._Ar = value
Ar = property(
fget=_get_Ar,
fset=_set_Ar,
doc=u"""Acceleration output
:Type: SciDataTool.Classes.DataND.DataND
""",
)
def _get_meshsolution(self):
return self._meshsolution
def _set_meshsolution(self, value):
if isinstance(value, str): # Load from file
value = load_init_dict(value)[1]
if isinstance(value, dict) and "__class__" in value:
class_obj = import_class(
"pyleecan.Classes", value.get("__class__"), "meshsolution"
)
value = class_obj(init_dict=value)
elif type(value) is int and value == -1: # Default constructor
value = MeshSolution()
check_var("meshsolution", value, "MeshSolution")
self._meshsolution = value
if self._meshsolution is not None:
self._meshsolution.parent = self
meshsolution = property(
fget=_get_meshsolution,
fset=_set_meshsolution,
doc=u"""FEA software mesh and solution
:Type: MeshSolution
""",
)
def _get_FEA_dict(self):
return self._FEA_dict
def _set_FEA_dict(self, value):
if type(value) is int and value == -1:
value = dict()
check_var("FEA_dict", value, "dict")
self._FEA_dict = value
FEA_dict = property(
fget=_get_FEA_dict,
fset=_set_FEA_dict,
doc=u"""dictionary containing the main FEA parameter
:Type: dict
""",
)
| true | true |
f73ddb833ead2d28797d765cc7aee4ff041ee001 | 13,441 | py | Python | _unittest/test_21_Circuit.py | pyansys/PyAEDT | 312d2d2a6c091dbae4272b6ce3ff489cdd21aa21 | [
"MIT"
] | 12 | 2021-07-01T06:35:12.000Z | 2021-09-22T15:53:07.000Z | _unittest/test_21_Circuit.py | pyansys/PyAEDT | 312d2d2a6c091dbae4272b6ce3ff489cdd21aa21 | [
"MIT"
] | 111 | 2021-07-01T16:02:36.000Z | 2021-09-29T12:36:44.000Z | _unittest/test_21_Circuit.py | pyansys/PyAEDT | 312d2d2a6c091dbae4272b6ce3ff489cdd21aa21 | [
"MIT"
] | 5 | 2021-07-09T14:24:59.000Z | 2021-09-07T12:42:03.000Z | import gc
import os
import time
# Import required modules
from pyaedt import Circuit
from pyaedt.generic.filesystem import Scratch
from pyaedt.generic.TouchstoneParser import read_touchstone
# Setup paths for module imports
from _unittest.conftest import local_path, scratch_path, config
try:
import pytest # noqa: F401
except ImportError:
import _unittest_ironpython.conf_unittest as pytest # noqa: F401
original_project_name = "Galileo_t21"
test_project_name = "Galileo_t21"
netlist1 = "netlist_small.cir"
netlist2 = "Schematic1.qcv"
touchstone = "SSN_ssn.s6p"
touchstone2 = "Galileo_V3P3S0.ts"
ami_project = "AMI_Example"
class TestClass:
def setup_class(self):
with Scratch(scratch_path) as self.local_scratch:
time.sleep(2)
example_project = os.path.join(local_path, "example_models", original_project_name + ".aedt")
netlist_file1 = os.path.join(local_path, "example_models", netlist1)
netlist_file2 = os.path.join(local_path, "example_models", netlist2)
touchstone_file = os.path.join(local_path, "example_models", touchstone)
touchstone_file2 = os.path.join(local_path, "example_models", touchstone2)
self.test_project = self.local_scratch.copyfile(
example_project, os.path.join(self.local_scratch.path, test_project_name + ".aedt")
)
self.local_scratch.copyfile(netlist_file1)
self.local_scratch.copyfile(netlist_file2)
self.local_scratch.copyfile(touchstone_file)
self.local_scratch.copyfile(touchstone_file2)
self.local_scratch.copyfolder(
os.path.join(local_path, "example_models", original_project_name + ".aedb"),
os.path.join(self.local_scratch.path, test_project_name + ".aedb"),
)
ami_example_project = os.path.join(local_path, "example_models", ami_project + ".aedt")
self.ami_example_project = self.local_scratch.copyfile(ami_example_project)
self.local_scratch.copyfolder(
os.path.join(local_path, "example_models", ami_project + ".aedb"),
os.path.join(self.local_scratch.path, ami_project + ".aedb"),
)
self.aedtapp = Circuit(self.test_project)
def teardown_class(self):
self.aedtapp._desktop.ClearMessages("", "", 3)
for proj in self.aedtapp.project_list:
try:
self.aedtapp.close_project(proj, saveproject=False)
except:
pass
self.local_scratch.remove()
gc.collect()
def test_01_create_inductor(self):
myind = self.aedtapp.modeler.schematic.create_inductor(value=1e-9, location=[0.2, 0.2])
assert type(myind.id) is int
assert myind.parameters["L"] == "1e-09"
def test_02_create_resistor(self):
myres = self.aedtapp.modeler.schematic.create_resistor(value=50, location=[0.4, 0.2])
assert type(myres.id) is int
assert myres.parameters["R"] == "50"
def test_03_create_capacitor(self):
mycap = self.aedtapp.modeler.schematic.create_capacitor(value=1e-12, location=[0.6, 0.2])
assert type(mycap.id) is int
assert mycap.parameters["C"] == "1e-12"
def test_04_getpin_names(self):
mycap2 = self.aedtapp.modeler.schematic.create_capacitor(value=1e-12)
pinnames = self.aedtapp.modeler.schematic.get_pins(mycap2)
pinnames2 = self.aedtapp.modeler.schematic.get_pins(mycap2.id)
pinnames3 = self.aedtapp.modeler.schematic.get_pins(mycap2.composed_name)
assert pinnames2 == pinnames3
assert type(pinnames) is list
assert len(pinnames) == 2
def test_05_getpin_location(self):
for el in self.aedtapp.modeler.schematic.components:
pinnames = self.aedtapp.modeler.schematic.get_pins(el)
for pinname in pinnames:
pinlocation = self.aedtapp.modeler.schematic.get_pin_location(el, pinname)
assert len(pinlocation) == 2
def test_06_add_3dlayout_component(self):
myedb = self.aedtapp.modeler.schematic.add_subcircuit_3dlayout("Galileo_G87173_204")
assert type(myedb.id) is int
def test_07_add_hfss_component(self):
my_model, myname = self.aedtapp.modeler.schematic.create_field_model(
"uUSB", "Setup1 : Sweep", ["usb_N_conn", "usb_N_pcb", "usb_P_conn", "usb_P_pcb"]
)
assert type(my_model) is int
def test_07a_push_excitation(self):
setup_name = "LNA"
LNA_setup = self.aedtapp.create_setup(setup_name)
assert LNA_setup
assert self.aedtapp.push_excitations(instance_name="U1", setup_name="LNA", thevenin_calculation=False)
assert self.aedtapp.push_excitations(instance_name="U1", setup_name="LNA", thevenin_calculation=True)
def test_08_import_mentor_netlist(self):
self.aedtapp.insert_design("MentorSchematicImport")
assert self.aedtapp.create_schematic_from_mentor_netlist(os.path.join(self.local_scratch.path, netlist2))
pass
def test_09_import_netlist(self):
self.aedtapp.insert_design("SchematicImport")
assert self.aedtapp.create_schematic_from_netlist(os.path.join(self.local_scratch.path, netlist1))
def test_10_import_touchstone(self):
self.aedtapp.insert_design("Touchstone_import")
ports = self.aedtapp.import_touchstone_solution(os.path.join(self.local_scratch.path, touchstone))
ports2 = self.aedtapp.import_touchstone_solution(os.path.join(self.local_scratch.path, touchstone2))
numports = len(ports)
assert numports == 6
numports2 = len(ports2)
assert numports2 == 3
tx = ports[: int(numports / 2)]
rx = ports[int(numports / 2) :]
insertions = ["dB(S({},{}))".format(i, j) for i, j in zip(tx, rx)]
assert self.aedtapp.create_touchstone_report("Insertion Losses", insertions)
touchstone_data = self.aedtapp.get_touchstone_data(insertions)
assert touchstone_data
def test_11_export_fullwave(self):
output = self.aedtapp.export_fullwave_spice(
os.path.join(self.local_scratch.path, touchstone), is_solution_file=True
)
assert output
def test_12_connect_components(self):
myind = self.aedtapp.modeler.schematic.create_inductor("L100", 1e-9)
myres = self.aedtapp.modeler.schematic.create_resistor("R100", 50)
mycap = self.aedtapp.modeler.schematic.create_capacitor("C100", 1e-12)
portname = self.aedtapp.modeler.schematic.create_interface_port("Port1")
assert "Port1" in portname.name
assert myind.pins[0].connect_to_component(portname.pins[0])
assert myind.pins[1].connect_to_component(myres.pins[1])
assert self.aedtapp.modeler.connect_schematic_components(myres.id, mycap.id, pinnum_first=1)
gnd = self.aedtapp.modeler.schematic.create_gnd()
assert mycap.pins[1].connect_to_component(gnd.pins[0])
# create_interface_port
L1_pins = myind.pins
L1_pin2location = {}
for pin in L1_pins:
L1_pin2location[pin.name] = pin.location
def test_13_properties(self):
assert self.aedtapp.modeler.model_units
def test_14_move(self):
assert self.aedtapp.modeler.move("L100", [0, -0.00508])
assert self.aedtapp.modeler.move("L100", [0, 200], "mil")
def test_15_rotate(self):
assert self.aedtapp.modeler.rotate("Port1")
def test_16_read_touchstone(self):
data = read_touchstone(os.path.join(self.local_scratch.path, touchstone))
assert len(data.expressions) > 0
assert data.data_real()
assert data.data_imag()
assert data.data_db()
def test_17_create_setup(self):
setup_name = "Dom_LNA"
LNA_setup = self.aedtapp.create_setup(setup_name)
LNA_setup.SweepDefinition = [
("Variable", "Freq"),
("Data", "LIN 1GHz 5GHz 1001"),
("OffsetF1", False),
("Synchronize", 0),
]
assert LNA_setup.update()
@pytest.mark.skipif(os.name == "posix", reason="To be investigated on linux.")
def test_18_export_touchstone(self):
assert self.aedtapp.analyze_nominal()
time.sleep(30)
assert self.aedtapp.export_touchstone("Dom_LNA", "Dom_LNA", os.path.join(self.local_scratch.path, "new.s2p"))
def test_19A_create_sweeps(self):
setup_name = "Sweep_LNA"
LNA_setup = self.aedtapp.create_setup(setup_name)
LNA_setup.add_sweep_step("Freq", 1, 2, 0.01, "GHz", override_existing_sweep=True)
assert LNA_setup.props["SweepDefinition"]["Data"] == "LIN 1GHz 2GHz 0.01GHz"
LNA_setup.add_sweep_points("Freq", [11, 12, 13.4], "GHz", override_existing_sweep=False)
assert "13.4GHz" in LNA_setup.props["SweepDefinition"]["Data"]
assert "LIN 1GHz 2GHz 0.01GHz" in LNA_setup.props["SweepDefinition"]["Data"]
LNA_setup.add_sweep_count("Temp", 20, 100, 81, "cel", count_type="Decade", override_existing_sweep=True)
assert isinstance(LNA_setup.props["SweepDefinition"], list)
assert LNA_setup.props["SweepDefinition"][1]["Variable"] == "Temp"
assert LNA_setup.props["SweepDefinition"][1]["Data"] == "DEC 20cel 100cel 81"
def test_19B_create_EyE_setups(self):
setup_name = "Dom_Verify"
assert self.aedtapp.create_setup(setup_name, "NexximVerifEye")
setup_name = "Dom_Quick"
assert self.aedtapp.create_setup(setup_name, "NexximQuickEye")
setup_name = "Dom_AMI"
assert self.aedtapp.create_setup(setup_name, "NexximAMI")
def test_20_create_AMI_plots(self):
self.aedtapp.load_project(self.ami_example_project, close_active_proj=True)
report_name = "MyReport"
assert (
self.aedtapp.post.create_ami_initial_response_plot(
"AMIAnalysis",
"b_input_15",
self.aedtapp.available_variations.nominal,
plot_type="Rectangular Stacked Plot",
plot_final_response=True,
plot_intermediate_response=True,
plotname=report_name,
)
== report_name
)
setup_name = "Dom_Verify"
assert self.aedtapp.create_setup(setup_name, "NexximVerifEye")
setup_name = "Dom_Quick"
assert self.aedtapp.create_setup(setup_name, "NexximQuickEye")
assert (
self.aedtapp.post.create_ami_statistical_eye_plot(
"AMIAnalysis", "b_output4_14", self.aedtapp.available_variations.nominal, plotname="MyReport1"
)
== "MyReport1"
)
assert (
self.aedtapp.post.create_statistical_eye_plot(
"Dom_Quick",
"b_input_15.int_ami_rx.eye_probe",
self.aedtapp.available_variations.nominal,
plotname="MyReportQ",
)
== "MyReportQ"
)
@pytest.mark.skipif(config["desktopVersion"] > "2021.2", reason="Skipped on versions higher than 2021.2")
def test_20B_create_AMI_plots(self):
assert (
self.aedtapp.post.create_statistical_eye_plot(
"Dom_Verify",
"b_input_15.int_ami_rx.eye_probe",
self.aedtapp.available_variations.nominal,
plotname="MyReportV",
)
== "MyReportV"
)
def test_21_assign_voltage_sinusoidal_excitation_to_ports(self):
settings = ["123 V", "10deg", "", "", "0V", "15GHz", "0s", "0", "0deg", ""]
ports_list = ["P1_1", "P2_2"]
assert self.aedtapp.assign_voltage_sinusoidal_excitation_to_ports(ports_list, settings)
def test_22_assign_current_sinusoidal_excitation_to_ports(self):
settings = ["", "", "20A", "50A", "4A", "", "0s", "0", "0deg", "1", "20Hz"]
ports_list = ["P1_1"]
assert self.aedtapp.assign_current_sinusoidal_excitation_to_ports(ports_list, settings)
def test_23_assign_power_sinusoidal_excitation_to_ports(self):
settings = ["", "", "", "", "20W", "14GHz", "0s", "0", "0deg", "0Hz"]
ports_list = ["P2_2"]
assert self.aedtapp.assign_power_sinusoidal_excitation_to_ports(ports_list, settings)
def test_24_new_connect_components(self):
self.aedtapp.insert_design("Components")
myind = self.aedtapp.modeler.schematic.create_inductor("L100", 1e-9)
myres = self.aedtapp.modeler.components.create_resistor("R100", 50)
mycap = self.aedtapp.modeler.components.create_capacitor("C100", 1e-12)
myind2 = self.aedtapp.modeler.components.create_inductor("L101", 1e-9)
port = self.aedtapp.modeler.components.create_interface_port("Port1")
assert self.aedtapp.modeler.schematic.connect_components_in_series([myind, myres.composed_name])
assert self.aedtapp.modeler.schematic.connect_components_in_parallel([mycap, port, myind2.id])
def test_25_import_model(self):
self.aedtapp.insert_design("Touch_import")
touch = os.path.join(local_path, "example_models", "SSN_ssn.s6p")
t1 = self.aedtapp.modeler.schematic.create_touchsthone_component(touch)
assert t1
assert len(t1.pins) == 6
t2 = self.aedtapp.modeler.schematic.create_touchsthone_component(touch)
assert t2
| 45.255892 | 117 | 0.666543 | import gc
import os
import time
from pyaedt import Circuit
from pyaedt.generic.filesystem import Scratch
from pyaedt.generic.TouchstoneParser import read_touchstone
from _unittest.conftest import local_path, scratch_path, config
try:
import pytest
except ImportError:
import _unittest_ironpython.conf_unittest as pytest
original_project_name = "Galileo_t21"
test_project_name = "Galileo_t21"
netlist1 = "netlist_small.cir"
netlist2 = "Schematic1.qcv"
touchstone = "SSN_ssn.s6p"
touchstone2 = "Galileo_V3P3S0.ts"
ami_project = "AMI_Example"
class TestClass:
def setup_class(self):
with Scratch(scratch_path) as self.local_scratch:
time.sleep(2)
example_project = os.path.join(local_path, "example_models", original_project_name + ".aedt")
netlist_file1 = os.path.join(local_path, "example_models", netlist1)
netlist_file2 = os.path.join(local_path, "example_models", netlist2)
touchstone_file = os.path.join(local_path, "example_models", touchstone)
touchstone_file2 = os.path.join(local_path, "example_models", touchstone2)
self.test_project = self.local_scratch.copyfile(
example_project, os.path.join(self.local_scratch.path, test_project_name + ".aedt")
)
self.local_scratch.copyfile(netlist_file1)
self.local_scratch.copyfile(netlist_file2)
self.local_scratch.copyfile(touchstone_file)
self.local_scratch.copyfile(touchstone_file2)
self.local_scratch.copyfolder(
os.path.join(local_path, "example_models", original_project_name + ".aedb"),
os.path.join(self.local_scratch.path, test_project_name + ".aedb"),
)
ami_example_project = os.path.join(local_path, "example_models", ami_project + ".aedt")
self.ami_example_project = self.local_scratch.copyfile(ami_example_project)
self.local_scratch.copyfolder(
os.path.join(local_path, "example_models", ami_project + ".aedb"),
os.path.join(self.local_scratch.path, ami_project + ".aedb"),
)
self.aedtapp = Circuit(self.test_project)
def teardown_class(self):
self.aedtapp._desktop.ClearMessages("", "", 3)
for proj in self.aedtapp.project_list:
try:
self.aedtapp.close_project(proj, saveproject=False)
except:
pass
self.local_scratch.remove()
gc.collect()
def test_01_create_inductor(self):
myind = self.aedtapp.modeler.schematic.create_inductor(value=1e-9, location=[0.2, 0.2])
assert type(myind.id) is int
assert myind.parameters["L"] == "1e-09"
def test_02_create_resistor(self):
myres = self.aedtapp.modeler.schematic.create_resistor(value=50, location=[0.4, 0.2])
assert type(myres.id) is int
assert myres.parameters["R"] == "50"
def test_03_create_capacitor(self):
mycap = self.aedtapp.modeler.schematic.create_capacitor(value=1e-12, location=[0.6, 0.2])
assert type(mycap.id) is int
assert mycap.parameters["C"] == "1e-12"
def test_04_getpin_names(self):
mycap2 = self.aedtapp.modeler.schematic.create_capacitor(value=1e-12)
pinnames = self.aedtapp.modeler.schematic.get_pins(mycap2)
pinnames2 = self.aedtapp.modeler.schematic.get_pins(mycap2.id)
pinnames3 = self.aedtapp.modeler.schematic.get_pins(mycap2.composed_name)
assert pinnames2 == pinnames3
assert type(pinnames) is list
assert len(pinnames) == 2
def test_05_getpin_location(self):
for el in self.aedtapp.modeler.schematic.components:
pinnames = self.aedtapp.modeler.schematic.get_pins(el)
for pinname in pinnames:
pinlocation = self.aedtapp.modeler.schematic.get_pin_location(el, pinname)
assert len(pinlocation) == 2
def test_06_add_3dlayout_component(self):
myedb = self.aedtapp.modeler.schematic.add_subcircuit_3dlayout("Galileo_G87173_204")
assert type(myedb.id) is int
def test_07_add_hfss_component(self):
my_model, myname = self.aedtapp.modeler.schematic.create_field_model(
"uUSB", "Setup1 : Sweep", ["usb_N_conn", "usb_N_pcb", "usb_P_conn", "usb_P_pcb"]
)
assert type(my_model) is int
def test_07a_push_excitation(self):
setup_name = "LNA"
LNA_setup = self.aedtapp.create_setup(setup_name)
assert LNA_setup
assert self.aedtapp.push_excitations(instance_name="U1", setup_name="LNA", thevenin_calculation=False)
assert self.aedtapp.push_excitations(instance_name="U1", setup_name="LNA", thevenin_calculation=True)
def test_08_import_mentor_netlist(self):
self.aedtapp.insert_design("MentorSchematicImport")
assert self.aedtapp.create_schematic_from_mentor_netlist(os.path.join(self.local_scratch.path, netlist2))
pass
def test_09_import_netlist(self):
self.aedtapp.insert_design("SchematicImport")
assert self.aedtapp.create_schematic_from_netlist(os.path.join(self.local_scratch.path, netlist1))
def test_10_import_touchstone(self):
self.aedtapp.insert_design("Touchstone_import")
ports = self.aedtapp.import_touchstone_solution(os.path.join(self.local_scratch.path, touchstone))
ports2 = self.aedtapp.import_touchstone_solution(os.path.join(self.local_scratch.path, touchstone2))
numports = len(ports)
assert numports == 6
numports2 = len(ports2)
assert numports2 == 3
tx = ports[: int(numports / 2)]
rx = ports[int(numports / 2) :]
insertions = ["dB(S({},{}))".format(i, j) for i, j in zip(tx, rx)]
assert self.aedtapp.create_touchstone_report("Insertion Losses", insertions)
touchstone_data = self.aedtapp.get_touchstone_data(insertions)
assert touchstone_data
def test_11_export_fullwave(self):
output = self.aedtapp.export_fullwave_spice(
os.path.join(self.local_scratch.path, touchstone), is_solution_file=True
)
assert output
def test_12_connect_components(self):
myind = self.aedtapp.modeler.schematic.create_inductor("L100", 1e-9)
myres = self.aedtapp.modeler.schematic.create_resistor("R100", 50)
mycap = self.aedtapp.modeler.schematic.create_capacitor("C100", 1e-12)
portname = self.aedtapp.modeler.schematic.create_interface_port("Port1")
assert "Port1" in portname.name
assert myind.pins[0].connect_to_component(portname.pins[0])
assert myind.pins[1].connect_to_component(myres.pins[1])
assert self.aedtapp.modeler.connect_schematic_components(myres.id, mycap.id, pinnum_first=1)
gnd = self.aedtapp.modeler.schematic.create_gnd()
assert mycap.pins[1].connect_to_component(gnd.pins[0])
L1_pins = myind.pins
L1_pin2location = {}
for pin in L1_pins:
L1_pin2location[pin.name] = pin.location
def test_13_properties(self):
assert self.aedtapp.modeler.model_units
def test_14_move(self):
assert self.aedtapp.modeler.move("L100", [0, -0.00508])
assert self.aedtapp.modeler.move("L100", [0, 200], "mil")
def test_15_rotate(self):
assert self.aedtapp.modeler.rotate("Port1")
def test_16_read_touchstone(self):
data = read_touchstone(os.path.join(self.local_scratch.path, touchstone))
assert len(data.expressions) > 0
assert data.data_real()
assert data.data_imag()
assert data.data_db()
def test_17_create_setup(self):
setup_name = "Dom_LNA"
LNA_setup = self.aedtapp.create_setup(setup_name)
LNA_setup.SweepDefinition = [
("Variable", "Freq"),
("Data", "LIN 1GHz 5GHz 1001"),
("OffsetF1", False),
("Synchronize", 0),
]
assert LNA_setup.update()
@pytest.mark.skipif(os.name == "posix", reason="To be investigated on linux.")
def test_18_export_touchstone(self):
assert self.aedtapp.analyze_nominal()
time.sleep(30)
assert self.aedtapp.export_touchstone("Dom_LNA", "Dom_LNA", os.path.join(self.local_scratch.path, "new.s2p"))
def test_19A_create_sweeps(self):
setup_name = "Sweep_LNA"
LNA_setup = self.aedtapp.create_setup(setup_name)
LNA_setup.add_sweep_step("Freq", 1, 2, 0.01, "GHz", override_existing_sweep=True)
assert LNA_setup.props["SweepDefinition"]["Data"] == "LIN 1GHz 2GHz 0.01GHz"
LNA_setup.add_sweep_points("Freq", [11, 12, 13.4], "GHz", override_existing_sweep=False)
assert "13.4GHz" in LNA_setup.props["SweepDefinition"]["Data"]
assert "LIN 1GHz 2GHz 0.01GHz" in LNA_setup.props["SweepDefinition"]["Data"]
LNA_setup.add_sweep_count("Temp", 20, 100, 81, "cel", count_type="Decade", override_existing_sweep=True)
assert isinstance(LNA_setup.props["SweepDefinition"], list)
assert LNA_setup.props["SweepDefinition"][1]["Variable"] == "Temp"
assert LNA_setup.props["SweepDefinition"][1]["Data"] == "DEC 20cel 100cel 81"
def test_19B_create_EyE_setups(self):
setup_name = "Dom_Verify"
assert self.aedtapp.create_setup(setup_name, "NexximVerifEye")
setup_name = "Dom_Quick"
assert self.aedtapp.create_setup(setup_name, "NexximQuickEye")
setup_name = "Dom_AMI"
assert self.aedtapp.create_setup(setup_name, "NexximAMI")
def test_20_create_AMI_plots(self):
self.aedtapp.load_project(self.ami_example_project, close_active_proj=True)
report_name = "MyReport"
assert (
self.aedtapp.post.create_ami_initial_response_plot(
"AMIAnalysis",
"b_input_15",
self.aedtapp.available_variations.nominal,
plot_type="Rectangular Stacked Plot",
plot_final_response=True,
plot_intermediate_response=True,
plotname=report_name,
)
== report_name
)
setup_name = "Dom_Verify"
assert self.aedtapp.create_setup(setup_name, "NexximVerifEye")
setup_name = "Dom_Quick"
assert self.aedtapp.create_setup(setup_name, "NexximQuickEye")
assert (
self.aedtapp.post.create_ami_statistical_eye_plot(
"AMIAnalysis", "b_output4_14", self.aedtapp.available_variations.nominal, plotname="MyReport1"
)
== "MyReport1"
)
assert (
self.aedtapp.post.create_statistical_eye_plot(
"Dom_Quick",
"b_input_15.int_ami_rx.eye_probe",
self.aedtapp.available_variations.nominal,
plotname="MyReportQ",
)
== "MyReportQ"
)
@pytest.mark.skipif(config["desktopVersion"] > "2021.2", reason="Skipped on versions higher than 2021.2")
def test_20B_create_AMI_plots(self):
assert (
self.aedtapp.post.create_statistical_eye_plot(
"Dom_Verify",
"b_input_15.int_ami_rx.eye_probe",
self.aedtapp.available_variations.nominal,
plotname="MyReportV",
)
== "MyReportV"
)
def test_21_assign_voltage_sinusoidal_excitation_to_ports(self):
settings = ["123 V", "10deg", "", "", "0V", "15GHz", "0s", "0", "0deg", ""]
ports_list = ["P1_1", "P2_2"]
assert self.aedtapp.assign_voltage_sinusoidal_excitation_to_ports(ports_list, settings)
def test_22_assign_current_sinusoidal_excitation_to_ports(self):
settings = ["", "", "20A", "50A", "4A", "", "0s", "0", "0deg", "1", "20Hz"]
ports_list = ["P1_1"]
assert self.aedtapp.assign_current_sinusoidal_excitation_to_ports(ports_list, settings)
def test_23_assign_power_sinusoidal_excitation_to_ports(self):
settings = ["", "", "", "", "20W", "14GHz", "0s", "0", "0deg", "0Hz"]
ports_list = ["P2_2"]
assert self.aedtapp.assign_power_sinusoidal_excitation_to_ports(ports_list, settings)
def test_24_new_connect_components(self):
self.aedtapp.insert_design("Components")
myind = self.aedtapp.modeler.schematic.create_inductor("L100", 1e-9)
myres = self.aedtapp.modeler.components.create_resistor("R100", 50)
mycap = self.aedtapp.modeler.components.create_capacitor("C100", 1e-12)
myind2 = self.aedtapp.modeler.components.create_inductor("L101", 1e-9)
port = self.aedtapp.modeler.components.create_interface_port("Port1")
assert self.aedtapp.modeler.schematic.connect_components_in_series([myind, myres.composed_name])
assert self.aedtapp.modeler.schematic.connect_components_in_parallel([mycap, port, myind2.id])
def test_25_import_model(self):
self.aedtapp.insert_design("Touch_import")
touch = os.path.join(local_path, "example_models", "SSN_ssn.s6p")
t1 = self.aedtapp.modeler.schematic.create_touchsthone_component(touch)
assert t1
assert len(t1.pins) == 6
t2 = self.aedtapp.modeler.schematic.create_touchsthone_component(touch)
assert t2
| true | true |
f73ddbe5ca259d15bf50fa56bc52d738e9c181c7 | 1,680 | py | Python | network_model/tools/bn_tools_t.py | b3ttin4/network_simulation_and_analysis | 56ec3fd497ad95eee6eec00042d332133495288e | [
"MIT"
] | null | null | null | network_model/tools/bn_tools_t.py | b3ttin4/network_simulation_and_analysis | 56ec3fd497ad95eee6eec00042d332133495288e | [
"MIT"
] | null | null | null | network_model/tools/bn_tools_t.py | b3ttin4/network_simulation_and_analysis | 56ec3fd497ad95eee6eec00042d332133495288e | [
"MIT"
] | null | null | null | import numpy as np
# Nonlinearity functions (Numpy implementation)
nl_linear = lambda x: x
nl_tanh = lambda x: np.tanh(x)
nl_sigmoid = lambda x: 1./(1+np.exp(-x))
nl_rect = lambda x: np.clip(x, 0, np.inf)
#nl_rect = lambda x: np.clip(x, -np.inf, np.inf)
nl_shallow_rect = lambda x: np.clip(0.1*x, 0, np.inf)
nl_clip = lambda x: np.clip(x, 0, 1)
nl_softplus = lambda x: np.log(1. + np.exp(x)) #
#'''
# Nonlinearity functions (Theano implementation)
import numpy, theano
import numpy.distutils
import numpy.distutils.__config__
import theano.tensor as T
nl_linear_t = lambda x: x
nl_tanh_t = lambda x: T.tanh(x)
nl_sigmoid_t = lambda x: T.nnet.sigmoid(x)
nl_fermi_t = lambda x: T.nnet.sigmoid(x*50)
nl_clip_t = lambda x: T.clip(x, 0., 1.)
nl_rect_t = lambda x: T.maximum(x, 0.)
nl_rect_squared_t = lambda x: T.maximum(x**2, 0.)
nl_shallow_rect_t = lambda x: T.maximum(0.1*x, 0.)
#'''
def convert_input_const_to_time(inp, num_frames):
if inp.shape[0] != 1:
raise Exception("First axis of inp has to be 1-dim.")
if inp.shape[1] != 1:
inp = inp[:, 0:1, :]
print('WARNING (bn_tools): Input has more than one frame. Only first frame will be broadcast.')
inp = np.tile(inp, (1, num_frames, 1))
return inp
def check_nonlinearities():
import matplotlib.pyplot as plt
x_np=np.arange(-5,5,0.1).astype('float32')
x=theano.shared(x_np)
# for fkt in [nl_linear_t,nl_rect_t,nl_clip_t,nl_sigmoid_t, nl_tanh_t]:
for fkt in [nl_clip_t,nl_sigmoid_t]:
y= fkt(x)
tf = theano.function([],y)
plt.plot(x_np, tf())
plt.show()
if __name__=='__main__':
check_nonlinearities()
| 32.307692 | 103 | 0.657143 | import numpy as np
nl_linear = lambda x: x
nl_tanh = lambda x: np.tanh(x)
nl_sigmoid = lambda x: 1./(1+np.exp(-x))
nl_rect = lambda x: np.clip(x, 0, np.inf)
nl_shallow_rect = lambda x: np.clip(0.1*x, 0, np.inf)
nl_clip = lambda x: np.clip(x, 0, 1)
nl_softplus = lambda x: np.log(1. + np.exp(x))
# Nonlinearity functions (Theano implementation)
import numpy, theano
import numpy.distutils
import numpy.distutils.__config__
import theano.tensor as T
nl_linear_t = lambda x: x
nl_tanh_t = lambda x: T.tanh(x)
nl_sigmoid_t = lambda x: T.nnet.sigmoid(x)
nl_fermi_t = lambda x: T.nnet.sigmoid(x*50)
nl_clip_t = lambda x: T.clip(x, 0., 1.)
nl_rect_t = lambda x: T.maximum(x, 0.)
nl_rect_squared_t = lambda x: T.maximum(x**2, 0.)
nl_shallow_rect_t = lambda x: T.maximum(0.1*x, 0.)
#'''
def convert_input_const_to_time(inp, num_frames):
if inp.shape[0] != 1:
raise Exception("First axis of inp has to be 1-dim.")
if inp.shape[1] != 1:
inp = inp[:, 0:1, :]
print('WARNING (bn_tools): Input has more than one frame. Only first frame will be broadcast.')
inp = np.tile(inp, (1, num_frames, 1))
return inp
def check_nonlinearities():
import matplotlib.pyplot as plt
x_np=np.arange(-5,5,0.1).astype('float32')
x=theano.shared(x_np)
for fkt in [nl_clip_t,nl_sigmoid_t]:
y= fkt(x)
tf = theano.function([],y)
plt.plot(x_np, tf())
plt.show()
if __name__=='__main__':
check_nonlinearities()
| true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.