text string | size int64 | token_count int64 |
|---|---|---|
LEVELS = dict(
ok=0,
warn=1,
error=2)
OPERATORS = dict(
map='=>',
delimiter=',',
comment='#')
| 107 | 54 |
import json
import os
import pickle
import subprocess
from inspect import getframeinfo, stack
import langid
PARAMS = dict(
TRACE=1,
TARGET_LANG='en', # tried zh,fr,sp,de,hu,ro,ar,el,la,it,ru,ja
RANKER='betweenness',
UPLOAD_DIRECTORY='uploads/',
OUTPUT_DIRECTORY='out/',
k_count=7,
s_count=5,
translation=True,
pics=False,
CACHING=0
)
def out_dirs():
out = PARAMS['OUTPUT_DIRECTORY']
return [out + x for x in
('overview.txt',
'pdftexts/',
'sums/',
'keys/'
)
]
def pdf2txt(pdf, txt):
subprocess.run(["pdftotext", "-q", pdf, txt])
if os.path.getsize(txt) > 32:
return True
os.remove(txt)
return False
def detect_lang(text):
return langid.classify(text)[0]
def to_json(obj, fname, indent=1):
"""
serializes an object to a json file
assumes object made of array and dicts
"""
with open(fname, "w") as outf:
# encode('utf8')
json.dump(obj, outf, indent=indent, ensure_ascii=False)
def from_json(fname):
"""
deserializes an object from a json file
"""
with open(fname, "r") as inf:
obj = json.load(inf)
return obj
def exists_file(fname):
""" if it exists as file or dir"""
return os.path.exists(fname)
def home_dir():
from pathlib import Path
return str(Path.home())
def ensure_path(fname):
folder, _ = os.path.split(fname)
os.makedirs(folder, exist_ok=True)
def to_pickle(obj, fname='./arxiv.pickle'):
"""
serializes an object to a .pickle file
"""
ensure_path(fname)
with open(fname, "wb") as outf:
pickle.dump(obj, outf)
def from_pickle(fname):
"""
deserializes an object from a pickle file
"""
with open(fname, "rb") as inf:
return pickle.load(inf)
def load_delimited(fname, delimiter):
with open(fname, mode="rt") as f:
for line in f:
xs = line.split(delimiter)
last = xs[-1]
xs[-1] = last[0:-1]
yield xs
def take(n, gen):
for i, x in enumerate(gen):
if i >= n: break
yield x
def pp(gen, n=10):
if isinstance(gen, dict):
gen = gen.items()
for x in take(n, gen):
print(x)
def ppp(*args, **kwargs):
"""
logging mechanism with possible DEBUG extras
will tell from which line in which file the printed
messge orginates from
"""
if PARAMS["TRACE"] < 1: return
if PARAMS["TRACE"] >= 1:
caller = getframeinfo(stack()[1][0])
print('DEBUG:',
caller.filename.split('/')[-1],
'->', caller.lineno, end=': ')
print(*args, **kwargs)
"""
def force_quiet(fun,*args,**kwargs) :
sout=sys.stdout
serr = sys.stderr
f = open(os.devnull, 'w')
sys.stdout = f
sys.stderr = f
result=fun(*args,**kwargs)
sys.stdout = sout
sys.stderr = serr
return result
"""
| 2,956 | 1,076 |
"""Contains the n dimensional inverted pendulum environment."""
import warnings
from typing import Optional
import matplotlib.pyplot as plt
import numpy as np
from numpy import ndarray
from polytope import polytope
from scipy.integrate import ode
from scipy.spatial.qhull import ConvexHull
from ..utils import assert_shape
from .environments import Environment
class NDPendulum(Environment):
"""N dimensional inverted pendulum environment.
The pendulum is represented using hyperspherical coordinates, with a fixed value for r. Thus the state is (n-1)
angles, along with their associated velocities:
0. d_theta1
...
n-1-1. d_theta(n-1)
n-1. theta1
...
(n-1)*2-1. theta(n-1)
There are (n-1) actions, each exerting torque in the plane of one of the angles.
"""
def __init__(self, name: str = "NDPendulum", n: int = 3, l: float = .5, m: float = .15, g: float = 9.82,
b: float = 0., dt: float = .05, init_m: Optional[float] = None, init_std: Optional[float] = None,
plant_noise: ndarray = np.array([0.01, 0.01, 0.01, 0.01]) ** 2, u_min: float = -1., u_max: float = 1,
target: ndarray = np.array([0.0, 0.0]), verbosity: int = 1, norm_x=None, norm_u=None):
"""
:param name: name of the system
:param n: number of dimensions, >=3
:param l: length of the pendulum
:param m: mass of the pendulum
:param g: gravitation constant
:param b: friction coefficient of the system
:param init_m: [(n-1)*2 x 0] initial state mean
:param init_std: standard deviation of the start state sample distribution. Note: This is not(!) the uncertainty
of the state but merely allows for variation in the initial (deterministic) state.
:param u_min: maximum negative torque applied to the system in any dimension
:param u_max: maximum positive torque applied to the system in any dimension
:param target: [(n-1)*2 x 0] target state
"""
assert b == 0., 'Friction is not supported.'
# We have n-1 angles, and for each a position and velocity.
state_dimen = (n - 1) * 2
# We can exert torque in the plane of each of the angles.
action_dimen = (n - 1)
num_angles = n - 1
u_min = np.array([u_min] * num_angles)
u_max = np.array([u_max] * num_angles)
p_origin = np.array([0.0] * state_dimen)
init_m = init_m if init_m is not None else np.array([0., ] * state_dimen)
init_std = init_std if init_std is not None else np.array([0.01, ] * state_dimen)
super().__init__(name, state_dimen, action_dimen, dt, init_m, init_std, plant_noise, u_min, u_max, target,
verbosity, p_origin)
self.odesolver = ode(self._dynamics)
self.l = l
self.m = m
self.g = g
self.b = b
self.target = target
self.target_ilqr = init_m
self.n = n
self.num_angles = num_angles
warnings.warn("Normalization turned off for now. Need to look into it")
max_deg = 30
if norm_x is None:
norm_x = np.array([1.] * state_dimen) # norm_x = np.array([np.sqrt(g/l), np.deg2rad(max_deg)])
if norm_u is None:
norm_u = np.array([1.] * action_dimen) # norm_u = np.array([g*m*l*np.sin(np.deg2rad(max_deg))])
self.norm = [norm_x, norm_u]
self.inv_norm = [arr ** -1 for arr in self.norm]
self._init_safety_constraints()
raise NotImplementedError('NDPendulum doesn\'t work properly yet!')
@property
def l_mu(self) -> ndarray:
return np.array(([0.05] * self.num_angles) + ([.02] * self.num_angles))
@property
def l_sigm(self) -> ndarray:
return np.array(([0.05] * self.num_angles) + ([.02] * self.num_angles))
def _reset(self):
self.odesolver.set_initial_value(self.current_state, 0.0)
def _check_current_state(self, state=None):
if state is None:
state = self.current_state
# Check if the state lies inside the safe polytope i.e. A * x <= b.
res = np.matmul(self.h_mat_safe, state) - self.h_safe.T
satisfied = not (res > 0).any()
# We don't use the status code.
status_code = 0
return not satisfied, status_code
def _dynamics(self, t, state, action):
""" Evaluate the system dynamics
Parameters
----------
t: float
Input Parameter required for the odesolver for time-dependent
odes. Has no influence in this system.
state: n_sx1 array[float]
The current state of the system
action: n_ux1 array[float]
The action to be applied at the current time step
Returns
-------
dz: n_sx1 array[float]
The ode evaluated at the given inputs.
"""
assert_shape(state, (self.n_s,))
assert_shape(action, (self.n_u,))
velocity = state[:self.num_angles]
position = state[self.num_angles:]
gravity_proj = np.zeros_like(position)
gravity_proj[0] = self.g / self.l * np.sin(position[0])
inertia = self.m * self.l ** 2
dvelocity = gravity_proj + action / inertia # - b / inertia * state[0]
dposition = velocity
return np.concatenate((dvelocity.flat, dposition.flat))
def _jac_dynamics(self):
""" Evaluate the jacobians of the system dynamics
Returns
-------
jac: (n_s) x (n_s+n_u) array[float]
The jacobian of the dynamics w.r.t. the state and action
"""
state = np.zeros((self.n_s,))
position = state[self.num_angles:]
theta1 = position[0]
inertia = self.m * self.l ** 2
jac_acl = np.array([[0., 0., self.g / self.l * np.cos(theta1), 0., 1/inertia, 0.], #
[0., 0., 0., 0., 0., 1/inertia]])
jac_vel = np.eye(self.num_angles, self.n_s + self.n_u)
return np.vstack((jac_acl, jac_vel))
def state_to_obs(self, state=None, add_noise=False):
""" Transform the dynamics state to the state to be observed
Parameters
----------
state: n_sx0 1darray[float]
The internal state of the system.
add_noise: bool, optional
If this is set to TRUE, a noisy observation is returned
Returns
-------
state: 2x0 1darray[float]
The state as is observed by the agent.
In the case of the inverted pendulum, this is the same.
"""
if state is None:
state = self.current_state
noise = 0
if add_noise:
noise += np.random.randn(self.n_s) * np.sqrt(self.plant_noise)
state_noise = state + noise
state_norm = state_noise * self.inv_norm[0]
return state_norm
def random_action(self) -> ndarray:
c = 0.5
return c * (np.random.rand(self.n_u) * (self.u_max - self.u_min) + self.u_min)
def _init_safety_constraints(self):
""" Get state and safety constraints
We define the state constraints as:
x_0 - 3*x_1 <= 1
x_0 - 3*x_1 >= -1
x_1 <= max_rad
x_1 >= -max_rad
"""
max_dx = 2.0
max_theta1_deg = 20
max_dtheta1 = 1.2
max_dtheta1_at_vertical = 0.8
max_theta1_rad = np.deg2rad(max_theta1_deg)
# -max_dtheta <dtheta <= max_dtheta
h_0_mat = np.asarray([[1., 0.], [-1., 0.]])
h_0_vec = np.array([max_dtheta1, max_dtheta1])[:, None]
# (1/.4)*dtheta + (2/.26)*theta <= 1
# 2*max_dtheta + c*max_rad <= 1
# => c = (1+2*max_dtheta) / max_rad
# for max_deg = 30, max_dtheta = 1.5 => c \approx 7.62
corners_polygon = np.array([[max_dtheta1, max_dtheta1, -max_theta1_rad, -max_theta1_rad], #
[max_dtheta1, -max_dtheta1, -max_theta1_rad, max_theta1_rad], #
[max_dtheta1, max_dtheta1_at_vertical, -max_theta1_rad, 0.], #
[max_dtheta1, -max_dtheta1_at_vertical, -max_theta1_rad, 0.], #
[-max_dtheta1, max_dtheta1, max_theta1_rad, -max_theta1_rad], #
[-max_dtheta1, -max_dtheta1, max_theta1_rad, max_theta1_rad], #
[-max_dtheta1, max_dtheta1_at_vertical, max_theta1_rad, 0.], #
[-max_dtheta1, -max_dtheta1_at_vertical, max_theta1_rad, 0.], #
[max_dtheta1_at_vertical, max_dtheta1, 0., -max_theta1_rad], #
[max_dtheta1_at_vertical, -max_dtheta1, 0., max_theta1_rad], #
[max_dtheta1_at_vertical, max_dtheta1_at_vertical, 0., 0.], #
[max_dtheta1_at_vertical, -max_dtheta1_at_vertical, 0., 0.], #
[-max_dtheta1_at_vertical, max_dtheta1, 0., -max_theta1_rad], #
[-max_dtheta1_at_vertical, -max_dtheta1, 0., max_theta1_rad], #
[-max_dtheta1_at_vertical, max_dtheta1_at_vertical, 0., 0.], #
[-max_dtheta1_at_vertical, -max_dtheta1_at_vertical, 0., 0.]])
ch = ConvexHull(corners_polygon)
# returns the equation for the convex hull of the corner points s.t. eq = [H,h]
# with Hx <= -h
eq = ch.equations
h_mat_safe = eq[:, :self.n_s]
h_safe = -eq[:, self.n_s:] # We want the form Ax <= b , hence A = H, b = -h
p = polytope.qhull(corners_polygon)
# normalize safety bounds
self.h_mat_safe = h_mat_safe
self.h_safe = h_safe
self.h_mat_obs = None # p.asarray([[0.,1.],[0.,-1.]])
self.h_obs = None # np.array([.6,.6]).reshape(2,1)
# arrange the corner points such that it can be ploted via a line plot
self.corners_polygon = corners_polygon
self.ch_safety_bounds = ch
def get_safety_constraints(self, normalize=True):
""" Return the safe constraints
Parameters
----------
normalize: boolean, optional
If TRUE: Returns normalized constraints
"""
if normalize:
m_x = np.diag(self.norm[0])
h_mat_safe = np.dot(self.h_mat_safe, m_x)
else:
h_mat_safe = self.h_mat_safe
return h_mat_safe, self.h_safe, self.h_mat_obs, self.h_obs
def _render_env(self, screen, axis: [float], display_width: int, display_height: int):
theta = self.current_state[2]
phi = self.current_state[3]
x = np.sin(theta) * np.cos(phi)
y = np.sin(theta) * np.sin(phi)
z = np.cos(theta)
fig = plt.figure(figsize=plt.figaspect(0.5))
ax1 = fig.add_subplot(1, 2, 1, projection='3d')
ax1.elev = 90
ax1.azim = 90
ax1.set_xlim(-1, 1)
ax1.set_ylim(-1, 1)
ax1.set_zlim(-1, 1)
ax1.plot([0, 1], [0, 0], [0, 0], color='grey')
ax1.plot([0, 0], [0, 1], [0, 0], color='grey')
ax1.plot([0, 0], [0, 0], [0, 1], color='grey')
ax1.plot([0, x], [0, y], [0, z])
ax1.scatter([x], [y], [z])
ax2 = fig.add_subplot(1, 2, 2, projection='3d')
ax2.elev = 0
ax2.azim = 90
ax2.set_xlim(-1, 1)
ax2.set_ylim(-1, 1)
ax2.set_zlim(-1, 1)
ax2.plot([0, 1], [0, 0], [0, 0], color='grey')
ax2.plot([0, 0], [0, 1], [0, 0], color='grey')
ax2.plot([0, 0], [0, 0], [0, 1], color='grey')
ax2.plot([0, x], [0, y], [0, z])
ax2.scatter([x], [y], [z])
plt.show()
# # Clear screen to black.
# screen.fill((0, 0, 0))
#
# center_x = display_width / 2
# center_y = display_height / 2
#
# length = min(display_width, display_height) / 3
#
# theta = self.current_state[1]
# end_x = center_x - length * math.sin(theta)
# end_y = center_y - length * math.cos(theta)
#
# pygame.draw.circle(screen, (255, 255, 255), (center_x, center_y), 10)
# pygame.draw.line(screen, (255, 255, 255), (center_x, center_y), (end_x, end_y), width=3)
def plot_ellipsoid_trajectory(self, p, q, vis_safety_bounds=True):
raise NotImplementedError
| 12,589 | 4,505 |
# -*- coding: utf-8 -*-
import guzzle_sphinx_theme
from recommonmark.parser import CommonMarkParser
# -- General configuration ------------------------------------------------
project = u'Keycard'
copyright = u'2018, Regents of the University of Michigan'
author = u'Noah Botimer'
version = u'0.2.4'
release = u'0.2.4'
extensions = ['guzzle_sphinx_theme']
templates_path = ['_templates']
master_doc = 'index'
source_parsers = {
'.md': CommonMarkParser,
}
source_suffix = ['.rst', '.md']
language = None
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
pygments_style = 'sphinx'
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
html_theme_path = guzzle_sphinx_theme.html_theme_path()
html_theme = 'guzzle_sphinx_theme'
html_static_path = ['_static']
# Guzzle theme options (see theme.conf for more information)
html_theme_options = {
"project_nav_name": "Keycard",
}
html_sidebars = {
'**': [
'logo-text.html',
'globaltoc.html',
'searchbox.html',
]
}
| 1,062 | 374 |
from webtest import TestApp
import bottle
from bottle import route, template
import bugsnag
from bugsnag.wsgi.middleware import BugsnagMiddleware
from tests.utils import IntegrationTest
class TestBottle(IntegrationTest):
def setUp(self):
super(TestBottle, self).setUp()
bugsnag.configure(endpoint=self.server.url,
session_endpoint=self.server.url,
auto_capture_sessions=False,
api_key='3874876376238728937',
asynchronous=False)
def test_routing_error(self):
@route('/beans')
def index():
raise Exception('oh no!')
app = bottle.app()
app.catchall = False
app = TestApp(BugsnagMiddleware(app))
self.assertRaises(Exception, lambda: app.get('/beans'))
self.assertEqual(1, len(self.server.received))
payload = self.server.received[0]['json_body']
event = payload['events'][0]
self.assertTrue(event['unhandled'])
self.assertEqual(event['context'], 'GET /beans')
self.assertEqual(event['exceptions'][0]['errorClass'], 'Exception')
self.assertEqual(event['exceptions'][0]['message'], 'oh no!')
runtime_versions = event['device']['runtimeVersions']
self.assertEqual(runtime_versions['bottle'], '0.12.18')
assert 'environment' not in event['metaData']
def test_enable_environment(self):
bugsnag.configure(send_environment=True)
@route('/beans')
def index():
raise Exception('oh no!')
app = bottle.app()
app.catchall = False
app = TestApp(BugsnagMiddleware(app))
self.assertRaises(Exception, lambda: app.get('/beans'))
self.assertEqual(1, len(self.server.received))
payload = self.server.received[0]['json_body']
metadata = payload['events'][0]['metaData']
self.assertEqual(metadata['environment']['PATH_INFO'], '/beans')
def test_template_error(self):
@route('/berries/<variety>')
def index(variety):
return template('{{type1}} {{type2}}', type1=variety)
app = bottle.app()
app.catchall = False
app = TestApp(BugsnagMiddleware(app))
self.assertRaises(Exception, lambda: app.get('/berries/red'))
self.assertEqual(1, len(self.server.received))
payload = self.server.received[0]['json_body']
event = payload['events'][0]
self.assertTrue(event['unhandled'])
self.assertEqual(event['context'], 'GET /berries/red')
self.assertEqual(event['exceptions'][0]['errorClass'], 'NameError')
self.assertEqual(event['exceptions'][0]['message'],
"name 'type2' is not defined")
assert 'environment' not in event['metaData']
runtime_versions = event['device']['runtimeVersions']
self.assertEqual(runtime_versions['bottle'], bottle.__version__)
| 2,970 | 880 |
# -*- coding: utf-8 -*-
"""Implementation of the ``somatic_variant_filtration`` step
=====================
Default Configuration
=====================
The default configuration is as follows.
.. include:: DEFAULT_CONFIG_somatic_variant_filtration.rst
=========
Important
=========
Because the EB Filter step is so time consuming, the data going
can be heavily prefiltered! (e.g. using Jannovar with the offExome flag).
TODO: document filter, for now see the eb_filter wrapper!
=======
Concept
=======
All variants are annotated with the dkfz-bias-filter to remove sequencing
and PCR artifacts. The variants annotatated with EBFilter are variable, i.e.
only variants that have the PASS flag set because we assume only those will
be kept.
We borrowed the general workflow from variant_filtration, i.e. working with
pre-defined filter sets and exon/region lists.
========
Workflow
========
* 1. Do the filtering genome wide (this file needs to be there, always)
- dkfz-ebfilter-filterset1-genomewide
* 2. optionally, subset to regions defined in bed file, which return
- dkfz-ebfilter-filterset1-regions1
and so on for filterset1 to n
filterset1:
filter bPcr, bSeq flags from dkfz-bias-filter
filterset2:
additionally filter variants with EBscore < x, x is configurable
"""
from collections import OrderedDict
import os
import random
import sys
from biomedsheets.shortcuts import CancerCaseSheet, CancerCaseSheetOptions, is_not_background
from snakemake.io import expand
from snappy_pipeline.utils import dictify, listify
from snappy_pipeline.workflows.abstract import BaseStep, BaseStepPart, LinkOutStepPart
from snappy_pipeline.workflows.ngs_mapping import NgsMappingWorkflow
from snappy_pipeline.workflows.somatic_variant_annotation import SomaticVariantAnnotationWorkflow
from snappy_pipeline.workflows.somatic_variant_calling import (
SOMATIC_VARIANT_CALLERS_MATCHED,
SomaticVariantCallingWorkflow,
)
__author__ = "Manuel Holtgrewe <manuel.holtgrewe@bihealth.de>"
#: Extensions of files to create as main payload
EXT_VALUES = (".vcf.gz", ".vcf.gz.tbi", ".vcf.gz.md5", ".vcf.gz.tbi.md5")
#: Names of the files to create for the extension
EXT_NAMES = ("vcf", "tbi", "vcf_md5", "tbi_md5")
#: Default configuration for the somatic_variant_calling step
DEFAULT_CONFIG = r"""
# Default configuration variant_annotation
step_config:
somatic_variant_filtration:
drmaa_snippet: '' # default, you can override by step below
path_somatic_variant_annotation: ../somatic_variant_annotation
path_ngs_mapping: ../ngs_mapping
tools_ngs_mapping: null
tools_somatic_variant_calling: null
filter_sets:
# no_filter: no_filters # implicit, always defined
dkfz_only: '' # empty
dkfz_and_ebfilter:
ebfilter_threshold: 2.4
dkfz_and_ebfilter_and_oxog:
vaf_threshold: 0.08
coverage_threshold: 5
dkfz_and_oxog:
vaf_threshold: 0.08
coverage_threshold: 5
exon_lists: {}
# genome_wide: null # implicit, always defined
# ensembl74: path/to/ensembl47.bed
eb_filter:
shuffle_seed: 1
panel_of_normals_size: 25
min_mapq: 20
min_baseq: 15
# Parallelization configuration
drmaa_snippet: '' # value to pass in as additional DRMAA arguments
window_length: 10000000 # split input into windows of this size, each triggers a job
num_jobs: 500 # number of windows to process in parallel
use_drmaa: true # use drmaa for parallel processing
restart_times: 5 # number of times to re-launch jobs in case of failure
max_jobs_per_second: 2 # throttling of job creation
max_status_checks_per_second: 10 # throttling of status checks
debug_trunc_tokens: 0 # truncation to first N tokens (0 for none)
keep_tmpdir: never # keep temporary directory, {always, never, onerror}
job_mult_memory: 1 # memory multiplier
job_mult_time: 1 # running time multiplier
merge_mult_memory: 1 # memory multiplier for merging
merge_mult_time: 1 # running time multiplier for merging
ignore_chroms: # patterns of chromosome names to ignore
- NC_007605 # herpes virus
- hs37d5 # GRCh37 decoy
- chrEBV # Eppstein-Barr Virus
- '*_decoy' # decoy contig
- 'HLA-*' # HLA genes
- 'GL000220.*' # Contig with problematic, repetitive DNA in GRCh37
"""
class SomaticVariantFiltrationStepPart(BaseStepPart):
"""Shared code for all tools in somatic_variant_filtration"""
def __init__(self, parent):
super().__init__(parent)
self.log_path = (
r"work/{mapper}.{var_caller}.jannovar_annotate_somatic_vcf."
r"dkfz_bias_filter.{tumor_library,[^\.]+}/log/snakemake.dkfz_bias_filter.log"
)
# Build shortcut from cancer bio sample name to matched cancer sample
self.tumor_ngs_library_to_sample_pair = OrderedDict()
for sheet in self.parent.shortcut_sheets:
self.tumor_ngs_library_to_sample_pair.update(
sheet.all_sample_pairs_by_tumor_dna_ngs_library
)
# Build mapping from donor name to donor.
self.donors = OrderedDict()
for sheet in self.parent.shortcut_sheets:
for donor in sheet.donors:
self.donors[donor.name] = donor
@dictify
def _get_log_file(self, action):
"""Return path to log file for the given action"""
assert action in self.actions, "Invalid action"
if action == "write_panel":
return (
"work/{mapper}.eb_filter.panel_of_normals/log/"
"{mapper}.eb_filter.panel_of_normals.log"
)
else:
name_pattern = self.token
key_ext = (
("log", ".log"),
("conda_info", ".conda_info.txt"),
("conda_list", ".conda_list.txt"),
)
for key, ext in key_ext:
yield key, os.path.join("work", name_pattern, "log", name_pattern + ext)
def get_normal_lib_name(self, wildcards):
"""Return name of normal (non-cancer) library"""
pair = self.tumor_ngs_library_to_sample_pair[wildcards.tumor_library]
return pair.normal_sample.dna_ngs_library.name
def get_params(self, action):
"""Return arguments to pass down."""
_ = action
def params_function(wildcards):
if wildcards.tumor_library not in self.donors:
return {
"tumor_library": wildcards.tumor_library,
"normal_library": self.get_normal_lib_name(wildcards),
}
else:
return {}
return params_function
class DkfzBiasFilterStepPart(SomaticVariantFiltrationStepPart):
"""Flag variants with the DKFZ bias filter"""
name = "dkfz_bias_filter"
def __init__(self, parent):
super().__init__(parent)
self.actions = ("run",)
self.token = (
"{mapper}.{var_caller}.jannovar_annotate_somatic_vcf."
"dkfz_bias_filter.{tumor_library}"
)
@dictify
def get_input_files(self, action):
"""Return path to jannovar-annotated vcf input file"""
assert action == "run"
# VCF file and index
tpl = (
"output/{mapper}.{var_caller}.jannovar_annotate_somatic_vcf.{tumor_library}/out/"
"{mapper}.{var_caller}.jannovar_annotate_somatic_vcf.{tumor_library}"
)
key_ext = {"vcf": ".vcf.gz", "tbi": ".vcf.gz.tbi"}
variant_annotation = self.parent.sub_workflows["somatic_variant_annotation"]
for key, ext in key_ext.items():
yield key, variant_annotation(tpl + ext)
# BAM file and index
tpl = "output/{mapper}.{tumor_library}/out/{mapper}.{tumor_library}"
key_ext = {"bam": ".bam", "bai": ".bam.bai"}
ngs_mapping = self.parent.sub_workflows["ngs_mapping"]
for key, ext in key_ext.items():
yield key, ngs_mapping(tpl + ext)
@dictify
def get_output_files(self, action):
"""Return output files for the filtration"""
assert action == "run"
prefix = (
r"work/{mapper}.{var_caller}.jannovar_annotate_somatic_vcf."
r"dkfz_bias_filter.{tumor_library,[^\.]+}/out/{mapper}.{var_caller}."
r"jannovar_annotate_somatic_vcf.dkfz_bias_filter.{tumor_library}"
)
key_ext = {
"vcf": ".vcf.gz",
"tbi": ".vcf.gz.tbi",
"vcf_md5": ".vcf.gz.md5",
"tbi_md5": ".vcf.gz.tbi.md5",
}
for key, ext in key_ext.items():
yield key, prefix + ext
@classmethod
def update_cluster_config(cls, cluster_config):
"""Update cluster configuration with resource requirements"""
cluster_config["somatic_variant_filtration_dkfz_bias_filter_run"] = {
"mem": 3 * 1024,
"time": "72:00",
"ntasks": 1,
}
class EbFilterStepPart(SomaticVariantFiltrationStepPart):
"""Flag variants with EBFilter"""
name = "eb_filter"
def __init__(self, parent):
super().__init__(parent)
self.actions = ("run", "write_panel")
self.token = (
"{mapper}.{var_caller}.jannovar_annotate_somatic_vcf."
"dkfz_bias_filter.eb_filter.{tumor_library}"
)
def get_input_files(self, action):
assert action in self.actions
return getattr(self, "_get_input_files_{}".format(action))
@dictify
def _get_input_files_run(self, wildcards):
# VCF file and index
tpl = (
"work/{mapper}.{var_caller}.jannovar_annotate_somatic_vcf."
"dkfz_bias_filter.{tumor_library}/out/{mapper}.{var_caller}."
"jannovar_annotate_somatic_vcf.dkfz_bias_filter."
"{tumor_library}"
)
key_ext = {"vcf": ".vcf.gz", "tbi": ".vcf.gz.tbi"}
for key, ext in key_ext.items():
yield key, tpl.format(**wildcards) + ext
# BAM file and index
tpl = "output/{mapper}.{tumor_library}/out/{mapper}.{tumor_library}"
key_ext = {"bam": ".bam", "bai": ".bam.bai"}
ngs_mapping = self.parent.sub_workflows["ngs_mapping"]
for key, ext in key_ext.items():
yield key, ngs_mapping(tpl.format(**wildcards) + ext)
# Panel of normals TXT file
yield "txt", self._get_output_files_write_panel()["txt"].format(**wildcards)
def _get_input_files_write_panel(self, wildcards):
bam_paths = self._get_panel_of_normal_bams(wildcards)
return {"bam": bam_paths, "bai": [p + ".bai" for p in bam_paths]}
def get_output_files(self, action):
"""Return output files for the filtration"""
assert action in self.actions
return getattr(self, "_get_output_files_{}".format(action))()
@dictify
def _get_output_files_run(self):
prefix = (
r"work/{mapper}.{var_caller}.jannovar_annotate_somatic_vcf."
r"dkfz_bias_filter.eb_filter.{tumor_library,[^\.]+}/out/"
r"{mapper}.{var_caller}.jannovar_annotate_somatic_vcf."
r"dkfz_bias_filter.eb_filter.{tumor_library}"
)
key_ext = {
"vcf": ".vcf.gz",
"tbi": ".vcf.gz.tbi",
"vcf_md5": ".vcf.gz.md5",
"tbi_md5": ".vcf.gz.tbi.md5",
}
for key, ext in key_ext.items():
yield key, prefix + ext
@dictify
def _get_output_files_write_panel(self):
# TODO: add the actual normal sample here?!
yield "txt", (
"work/{mapper}.eb_filter.panel_of_normals/out/{mapper}.eb_filter."
"panel_of_normals.txt"
)
def write_panel_of_normals_file(self, wildcards):
"""Write out file with paths to panels-of-normal"""
output_path = self.get_output_files("write_panel")["txt"].format(**wildcards)
with open(output_path, "wt") as outf:
for bam_path in self._get_panel_of_normal_bams(wildcards):
print(bam_path, file=outf)
@listify
def _get_panel_of_normal_bams(self, wildcards):
"""Return list of "panel of normal" BAM files."""
libraries = []
for sheet in self.parent.shortcut_sheets:
for donor in sheet.donors:
for bio_sample in donor.bio_samples.values():
if not bio_sample.extra_infos["isTumor"]:
libraries.append(bio_sample.dna_ngs_library.name)
libraries.sort()
random.seed(self.config["eb_filter"]["shuffle_seed"])
lib_count = self.config["eb_filter"]["panel_of_normals_size"]
random.shuffle(libraries)
ngs_mapping = self.parent.sub_workflows["ngs_mapping"]
tpl = "output/{mapper}.{normal_library}/out/{mapper}.{normal_library}"
for library in libraries[:lib_count]:
yield ngs_mapping(tpl.format(normal_library=library, **wildcards) + ".bam")
@staticmethod
def update_cluster_config(cluster_config):
"""Update cluster configuration with resource requirements"""
cluster_config["somatic_variant_filtration_eb_filter_run"] = {
"mem": 8 * 1024,
"time": "144:00",
"ntasks": 1,
}
class ApplyFiltersStepPartBase(SomaticVariantFiltrationStepPart):
"""Base class for the different filters."""
name = None
def __init__(self, parent):
super().__init__(parent)
name_pattern = (
"{mapper}.{var_caller}.jannovar_annotate_somatic_vcf."
"dkfz_bias_filter.eb_filter.{tumor_library}.{filter_set}.{exon_list}"
)
self.base_path_out = os.path.join("work", name_pattern, "out", name_pattern + "{ext}")
self.path_log = os.path.join("work", name_pattern, "log", name_pattern + ".log")
def update_cluster_config(self, cluster_config):
cluster_config["variant_filtration_{}_run".format(self.name)] = {
"mem": int(3.75 * 1024 * 2),
"time": "01:00",
"ntasks": 2,
}
class ApplyFiltersStepPart(ApplyFiltersStepPartBase):
"""Apply the configured filters."""
name = "apply_filters"
def get_args(self, action):
def args_function(wildcards):
result = {
"normal_sample": self.get_normal_lib_name(wildcards),
"tumor_sample": wildcards.tumor_library,
}
return result
assert action == "run"
return args_function
@dictify
def get_input_files(self, action):
assert action == "run", "Unsupported actions"
tpl = (
"work/{mapper}.{var_caller}.jannovar_annotate_somatic_vcf."
"dkfz_bias_filter.eb_filter.{tumor_library}/out/{mapper}.{var_caller}."
"jannovar_annotate_somatic_vcf.dkfz_bias_filter.eb_filter."
"{tumor_library}"
)
key_ext = {"vcf": ".vcf.gz", "tbi": ".vcf.gz.tbi"}
for key, ext in key_ext.items():
yield key, tpl + ext
@dictify
def get_output_files(self, action):
assert action == "run"
for key, ext in zip(EXT_NAMES, EXT_VALUES):
yield key, self.base_path_out.replace("{step}", self.name).replace(
"{exon_list}", "genome_wide"
).replace("{ext}", ext)
def get_log_file(self, action):
assert action == "run"
return self.path_log.replace("{step}", self.name).replace("{exon_list}", "genome_wide")
class FilterToExonsStepPart(ApplyFiltersStepPartBase):
"""Apply the configured filters."""
name = "filter_to_exons"
def get_input_files(self, action):
@dictify
def input_function(wildcards):
for key, ext in zip(EXT_NAMES, EXT_VALUES):
yield key, self.base_path_out.format(
step="apply_filters",
mapper=wildcards.mapper,
var_caller=wildcards.var_caller,
filter_set=wildcards.filter_set,
exon_list="genome_wide",
ext=ext,
)
assert action == "run", "Unsupported actions"
return input_function
@dictify
def get_output_files(self, action):
assert action == "run"
for key, ext in zip(EXT_NAMES, EXT_VALUES):
yield key, self.base_path_out.replace("{step}", "filter_to_exons").replace("{ext}", ext)
def get_log_file(self, action):
assert action == "run"
return self.path_log.replace("{step}", self.name)
class SomaticVariantFiltrationWorkflow(BaseStep):
"""Perform somatic variant filtration"""
name = "somatic_variant_filtration"
sheet_shortcut_class = CancerCaseSheet
sheet_shortcut_kwargs = {
"options": CancerCaseSheetOptions(allow_missing_normal=True, allow_missing_tumor=True)
}
@classmethod
def default_config_yaml(cls):
"""Return default config YAML, to be overwritten by project-specific one."""
return DEFAULT_CONFIG
def __init__(
self, workflow, config, cluster_config, config_lookup_paths, config_paths, workdir
):
super().__init__(
workflow,
config,
cluster_config,
config_lookup_paths,
config_paths,
workdir,
(SomaticVariantAnnotationWorkflow, SomaticVariantCallingWorkflow, NgsMappingWorkflow),
)
# Register sub step classes so the sub steps are available
self.register_sub_step_classes(
(
DkfzBiasFilterStepPart,
EbFilterStepPart,
ApplyFiltersStepPart,
FilterToExonsStepPart,
LinkOutStepPart,
)
)
# Register sub workflows
self.register_sub_workflow(
"somatic_variant_annotation", self.config["path_somatic_variant_annotation"]
)
self.register_sub_workflow("ngs_mapping", self.config["path_ngs_mapping"])
# Copy over "tools" setting from somatic_variant_calling/ngs_mapping if not set here
if not self.config["tools_ngs_mapping"]:
self.config["tools_ngs_mapping"] = self.w_config["step_config"]["ngs_mapping"]["tools"][
"dna"
]
if not self.config["tools_somatic_variant_calling"]:
self.config["tools_somatic_variant_calling"] = self.w_config["step_config"][
"somatic_variant_calling"
]["tools"]
@listify
def get_result_files(self):
"""Return list of result files
Process all primary DNA libraries and perform pairwise calling for tumor/normal pairs
"""
callers = set(self.config["tools_somatic_variant_calling"])
name_pattern = (
"{mapper}.{caller}.jannovar_annotate_somatic_vcf."
"dkfz_bias_filter.eb_filter.{tumor_library.name}."
"{filter_set}.{exon_list}"
)
filter_sets = ["no_filter"]
filter_sets += self.config["filter_sets"].keys()
exon_lists = ["genome_wide"]
exon_lists += list(self.config["exon_lists"].keys())
yield from self._yield_result_files_matched(
os.path.join("output", name_pattern, "out", name_pattern + "{ext}"),
mapper=self.config["tools_ngs_mapping"],
caller=callers & set(SOMATIC_VARIANT_CALLERS_MATCHED),
filter_set=filter_sets,
exon_list=exon_lists,
ext=EXT_VALUES,
)
# TODO: filtration for joint calling not implemented yet
def _yield_result_files_matched(self, tpl, **kwargs):
"""Build output paths from path template and extension list.
This function returns the results from the matched somatic variant callers such as
Mutect.
"""
for sheet in filter(is_not_background, self.shortcut_sheets):
for sample_pair in sheet.all_sample_pairs:
if (
not sample_pair.tumor_sample.dna_ngs_library
or not sample_pair.normal_sample.dna_ngs_library
):
msg = (
"INFO: sample pair for cancer bio sample {} has is missing primary"
"normal or primary cancer NGS library"
)
print(msg.format(sample_pair.tumor_sample.name), file=sys.stderr)
continue
yield from expand(
tpl, tumor_library=[sample_pair.tumor_sample.dna_ngs_library], **kwargs
)
def check_config(self):
"""Check that the path to the NGS mapping is present"""
self.ensure_w_config(
("step_config", "somatic_variant_filtration", "path_somatic_variant_annotation"),
"Path to variant calling not configured but required for somatic variant annotation",
)
| 21,087 | 6,752 |
__version__ = "0.0.1"
import asyncio
import logging
from contextvars import ContextVar
from pathlib import Path
from typing import Any, Awaitable, Union, cast
from .context import Context
logger = logging.getLogger(__name__)
class wrapper:
def __getattr__(self, name: str) -> Any:
return getattr(_context.get(), name)
def __setattr__(self, name: str, value: Any) -> None:
setattr(_context.get(), name, value)
_context: ContextVar[Context] = ContextVar("var", default=Context())
context: Context = cast(Context, wrapper())
def init_context(
cache_dir: Union[Path, str, None] = None,
) -> None:
logger.debug("start context %s", cache_dir)
_context.get().open_cache(cache_dir)
def run(
func: Awaitable[Any],
) -> Any:
async def _run() -> Any:
try:
ret = await func
finally:
await context.close()
return ret
ret = asyncio.run(_run())
logger.debug("finished")
return ret
| 983 | 309 |
import os
import random
import time
import json
import datetime
from random import randint
from pyfiglet import figlet_format
from flask import Flask, g, session, redirect, request, url_for, jsonify
from requests_oauthlib import OAuth2Session
OAUTH2_CLIENT_ID = '456608429843283998' #os.environ['OAUTH2_CLIENT_ID']
OAUTH2_CLIENT_SECRET = '03D26-iZchBxx5ncJxN6fjxJkP6k0x-g' #os.environ['OAUTH2_CLIENT_SECRET']
OAUTH2_REDIRECT_URI = 'http://128.1932.254.226:5000/callback'
API_BASE_URL = os.environ.get('API_BASE_URL', 'https://discordapp.com/api')
AUTHORIZATION_BASE_URL = API_BASE_URL + '/oauth2/authorize'
TOKEN_URL = API_BASE_URL + '/oauth2/token'
app = Flask(__name__)
app.debug = True
app.config['SECRET_KEY'] = OAUTH2_CLIENT_SECRET
quotes = [
'"The death of one man is a tragedy. The death of millions is a statistic."',
'"It is enough that the people know there was an election. The people who cast the votes decide nothing. The people who count the votes decide everything."',
'"Death is the solution to all problems. No man - no problem."'
'"The only real power comes out of a long rifle."',
'"Education is a weapon, whose effect depends on who holds it in his hands and at whom it is aimed."',
'"In the Soviet army it takes more courage to retreat than advance."',
'"Gaiety is the most outstanding feature of the Soviet Union."',
'"I trust no one, not even myself."',
'"The Pope! How many divisions has _he_ got?"',
'"BENIS"'
]
expandList = [
'cunt',
'fuck',
'goddamn',
'bitch',
'whore',
'slut',
'fortnight',
'fortnut',
'fortnite',
'mixed reality',
'microsoft',
'emac',
'ruby'
'webscale',
'web scale',
'windows',
'dick'
]
import discord
TOKEN = 'NDU2NjA4NDI5ODQzMjgzOTk4.DgNcRw.EviOEVoX7Lwtb1oHcOp3RGzg5L8'
# 0 = none, 1 = lobby phase, 2 = in progress
gameStatus = 0
host = None
players = []
spies = []
regulars = []
missionsAttempted = 0
missionsFailed = 0
missionsPassed = 0
leader = None
team = []
votes = []
teamStatus = 0
rejects = 0
spiesPerPlayers = [2, 2, 3, 3, 3, 4]
playersPerMission = [
[2, 2, 2, 3],
[3, 3, 3, 4],
[2, 4, 3, 4],
[3, 3, 4, 5],
[3, 4, 4, 5]
]
client = discord.Client()
async def say(text, channel):
global client
await client.send_message(channel, text)
def gameEnd():
global gameStatus
global host
global spies
global regulars
global missionsAttempted
global missionsFailed
global missionsPassed
global leader
global team
global votes
global teamStatus
global rejects
gameStatus = 0 # 0 = none, 1 = lobby phase, 2 = in progress
host = None
players = []
spies = []
regulars = []
missionsAttempted = 0
missionsFailed = 0
missionsPassed = 0
leader = None
team = []
votes = []
teamStatus = 0
rejects = 0
async def gameBegin():
global gameStatus
global host
global spies
global regulars
global missionsAttempted
global missionsFailed
global missionsPassed
global leader
global team
global votes
global teamStatus
global rejects
randLeader = randint(0,len(players)-1)
leader = players[randLeader]
for p in players:
regulars.append(p)
totalSpies = spiesPerPlayers[len(players)-5]
for x in range(0, totalSpies):
randSpy = randint(0,len(regulars)-1)
print(str(randSpy))
spy = regulars[randSpy]
print(str(spy))
regulars.remove(spy)
spies.append(spy)
spyMessage = ''.join(str(e) for e in spies)
for p in players:
if p in spies:
await say('You are a spy! Your partner(s) in crime are: ' + spyMessage, p)
#client.send_message(p, 'You are a spy! Your partner(s) in crime are: ' + spyMessage)
else:
await say('You are part of the resistance!', p)
#client.send_message(p, 'You are part of the resistance!')
def checkVotes():
global gameStatus
global host
global spies
global regulars
global missionsAttempted
global missionsFailed
global missionsPassed
global leader
global team
global votes
global teamStatus
global rejects
@client.event
async def on_message(message):
global gameStatus
global host
global spies
global regulars
global missionsAttempted
global missionsFailed
global missionsPassed
global leader
global team
global votes
global teamStatus
global rejects
# we do not want the bot to reply to itself
if message.author == client.user:
return
print(message.content)
#text = message.content.split(' ')[1]
text = ' '.join(message.content.split()[1:])
#text2 = ' '.join(message.content.split()[2:])
message.content = message.content.lower()
f = open('log.txt', 'a')
curTime = datetime.datetime.utcnow().isoformat() + '|' + '{:30.30}'.format(message.author.name) + '|' + message.author.id + '|' + message.content + '\n'
f.write(curTime)
if(message.channel.id == '360125095043268608'):
return
if(message.channel.id == '364919001434030101'):
return
for word in expandList:
if word in message.content:
msg = 'Expand your vocabulary.'
await client.send_message(message.channel, msg)
break
if message.content.startswith('_help'):
msg = 'Commands: _help, _guidance, _big, _pig, _soviet, _avatar'
await client.send_message(message.channel, msg)
if message.content.startswith('_roll'):
print('text: ' + text)
sides = int(text)
if sides:
num = randint(1, sides)
comment = ''
if(num == 1 and sides != 1):
comment = 'The universe has deathed you, have fun kiddo.'
elif(num == sides):
comment = 'Hot diggety dice-eyes, nice roll partner!'
elif(num > (sides/2)):
comment = 'Not bad.'
elif(num <= (sides/2)):
comment = 'Could be better.'
msg = str(sides) + ' sided die result: `' + str(num) + '`\n' + comment
await client.send_message(message.channel, msg)
if message.content.startswith('_resist host'):
if message.channel.type != discord.ChannelType.text:
await client.send_message(message.channel, 'This must be in a guild channel.')
print(message.channel.type.id)
return
if gameStatus != 0:
await client.send_message(message.channel, 'A game is currently in progress.')
return
host = message.author
players.append(message.author)
votes.append(-1)
gameStatus = 1
await client.send_message(message.channel, 'A resistance lobby is now being hosted.')
if message.content.startswith('_resist start'):
if message.channel.type != discord.ChannelType.text:
await client.send_message(message.channel, 'This must be in a guild channel.')
return
if gameStatus != 1:
await client.send_message(message.channel, 'You are not hosting a lobby')
return
if message.author != host:
await client.send_message(message.channel, 'You are not the host')
return
if len(players) < 5:
await client.send_message(message.channel, 'You need at least 5 players to play.')
return
await client.send_message(message.channel, 'The game has begun')
await gameBegin()
if message.content.startswith('_resist close'):
if message.channel.type != discord.ChannelType.text:
await client.send_message(message.channel, 'This must be in a guild channel.')
return
if message.author != host:
await client.send_message(message.channel, 'You are not the host.')
return
gameEnd()
if message.content.startswith('_resist pick'):
if message.channel.type != discord.ChannelType.private:
await client.send_message(message.channel, 'This must be in a DM.')
return
if message.author not in players:
await say('You are not playing', message.channel)
return
if message.author != leader:
await say('You are not the mission leader.', message.channel)
return
if len(agents) != 0:
await say('Agents have already been assigned.', message.channel)
return
index = len(players) - 5
if index > 2:
index = 3;
if len(message.mentions) != playersPerMission[missionsAttempted][index]:
await say('You must assign exactly ' + playersPerMission[missionsAttempted][index] + ' agents.', message.channel)
return
for agent in message.mentions:
agents.append(agent)
if message.content.startswith('_resist approve'):
if message.channel.type != discord.ChannelType.private:
await client.send_message(message.channel, 'This must be in a DM.')
return
if message.author not in players:
await say('You are not playing.', message.channel)
return
if teamStatus != 0:
await say('Team must be forming.', message.channel)
return
index = players.index(message.author)
votes[index] = 1
checkVotes()
if message.content.startswith('_resist reject'):
if message.channel.type != discord.ChannelType.private:
await client.send_message(message.channel, 'This must be in a DM.')
return
if message.author not in players:
await say('You are not playing.', message.channel)
return
if teamStatus != 0:
await say('Team must be forming.', message.channel)
return
index = players.index(message.author)
votes[index] = 0
checkVotes()
if message.content.startswith('_resist pass'):
if message.channel.type != discord.ChannelType.priate:
await client.send_message(message.channel, 'This must be in a DM.')
return
if message.author not in players:
await say('You are not playing.', message.channel)
return
if teamStatus != 1:
await say('Team must be approved.', message.channel)
return
if message.author not in agents:
await say('You are not on the team.', message.channel)
index = players.index(message.author)
votes[index] = 1
checkVotes()
if message.content.startswith('_resist fail'):
if message.channel.type != discord.ChannelType.private:
await client.send_message(message.channel, 'This must be in a DM.')
return
if message.author not in players:
await say('You are not playing.', message.channel)
return
if teamStatus != 1:
await say('Team must be approved.', message.channel)
return
if message.author not in agents:
await say('You are not on the team.', message.channel)
index = players.index(message.author)
votes[index] = 0
checkVotes()
if message.content.startswith('_resist join'):
if message.channel.type != discord.ChannelType.text:
await client.send_message(message.channel, 'This must be in a guild channel.')
return
if len(players) >= 10:
await client.end_message(message.channel, 'Game full.')
return
if message.author in players:
await client.send_message(message.channel, 'You are already in this game.')
return
players.append(message.author)
votes.append(-1)
if message.content.startswith('_resist leave'):
if message.channel.type != discord.ChannelType.text:
await client.send_message(message.channel, 'This must be in a guild channel.')
return
if message.author not in players:
await client.send_message(message.channel, 'You cannot leave a game you aren\'t in.')
return
if message.author == host:
gameEnd()
elif gameStatus == 1:
players.remove(message.author)
votes.pop(0)
elif gameStatus == 2:
gameEnd()
if message.content.startswith('_resist players'):
if message.channel.type != discord.ChannelType.text:
await client.send_message(message.channel, 'This must be in a guild channel.')
return
msg = ''.join(str(e) for e in players)
await client.send_message(message.channel, msg)
if message.content.startswith('_guidance'):
#msg = 'Hello {0.author.mention}'.format(message)
msg = random.choice(quotes)
await client.send_message(message.channel, msg)
'''
if message.content.startswith('_death'):
target = message.mentions
if target:
if target[0]:
targ = target[0]
currentTime = datetime.datetime.utcnow()
delta
with open('death.json', 'r') as read_file:
data = json.load(read_file)
entry = data[message.author.id]
allow = True
if entry:
death = entry['death']
life = entry['life']
if death:
delta = (currentTime - death).days
else:
delta = (currentTime - life).days
if(delta < 1):
msg = 'You have to wait a whole day'
else:
msg = '{0.author.mention}'.format(message) + ' has ***DEATHED*** ' + targ.mention
data = {}
with open('death.json', 'w') as write_file:
json.dump(data, write_file)
await client.send_message(message.channel, msg)
'''
if message.content.startswith('_big'):
msg = '```' + figlet_format(text, width=160) + '```'
await client.send_message(message.channel, msg)
if message.content.startswith('_soviet'):
#msg = '`This message is from the capitalist pigs at OSU:`\n\n' + text
msg = '`Capitalist pig` <' + message.author.name + '> ' + text
await client.send_message(client.get_channel('456665509555994624'), msg)
if message.content.startswith('_pig'):
#msg = '`This message is from the soviet scum in the Clubhaus:`\n\n' + text
msg = '`Soviet scum` <' + message.author.name + '> ' + text
await client.send_message(client.get_channel('456716843407638529'), msg)
if message.content.startswith('_avatar'):
msg = message.mentions[0].avatar_url
await client.send_message(message.channel, msg)
if client.user in message.mentions:
msg = 'Da'
await client.send_message(message.channel, msg)
@client.event
async def on_ready():
print('vvvv')
print('Logged in as')
print(client.user.name)
print(client.user.id)
await client.change_presence(game=discord.Game(name='with capitalist pigs'))
print('^^^^')
client.run(TOKEN)
if __name__ == '__main__':
app.run()
| 15,739 | 4,735 |
import unittest
import pysal
from pysal.core.IOHandlers.gwt import GwtIO
import tempfile
import os
import warnings
class test_GwtIO(unittest.TestCase):
def setUp(self):
self.test_file = test_file = pysal.examples.get_path('juvenile.gwt')
self.obj = GwtIO(test_file, 'r')
def test_close(self):
f = self.obj
f.close()
self.failUnlessRaises(ValueError, f.read)
def test_read(self):
w = self.obj.read()
self.assertEqual(168, w.n)
self.assertEqual(16.678571428571427, w.mean_neighbors)
w.transform = 'B'
self.assertEqual([1.0], w[1].values())
def test_seek(self):
self.test_read()
self.failUnlessRaises(StopIteration, self.obj.read)
self.obj.seek(0)
self.test_read()
# Commented out by CRS, GWT 'w' mode removed until we can find a good solution for retaining distances.
# see issue #153.
# Added back by CRS,
def test_write(self):
w = self.obj.read()
f = tempfile.NamedTemporaryFile(
suffix='.gwt', dir=pysal.examples.get_path(''))
fname = f.name
f.close()
o = pysal.open(fname, 'w')
#copy the shapefile and ID variable names from the old gwt.
# this is only available after the read() method has been called.
#o.shpName = self.obj.shpName
#o.varName = self.obj.varName
o.write(w)
o.close()
wnew = pysal.open(fname, 'r').read()
self.assertEqual(wnew.pct_nonzero, w.pct_nonzero)
os.remove(fname)
if __name__ == '__main__':
unittest.main()
| 1,614 | 585 |
"""
Filters cells based on gene number, total counts, and % mitochondrial
From sc-rna-tools package
Created on Mon Jan 10 15:57:46 2022
@author: joe germino (joe.germino@ucsf.edu)
"""
# external imports
from anndata import AnnData
from typing import Tuple, Optional
# sc-rna-tools package imports
from .._configs import configs
from .._utils import debug
from ..plotting import qc_plotting
logger = configs.create_logger(__name__.split('_', 1)[1])
# -------------------------------------------------------function----------------------------------------------------- #
@debug(logger, configs)
def filter_cells(
adata: AnnData,
gene_thresholds: Tuple[int, int],
count_thresholds: Tuple[int, int],
mt_threshold: int = 10,
save_path: Optional[str] = None,
file_type: str = "png",
*args, **kwargs
) -> AnnData:
"""Filters cells based on gene number, total counts, and % mitochondrial
Parameters
----------
adata
The AnnData with the data to filter
gene_thresholds
A Tuple of thresholds for the number of genes per cell with 'gene_thresholds[0]' being the lower bound and
'gene_thresholds[1]' being the upper bound (both exclusive).
count_thresholds
A Tuple of thresholds for the number of total counts cell with 'count_thresholds[0]' being the lower bound and
'count_thresholds[1]' being the upper bound (both exclusive).
mt_threshold
The maximum percent mitochondrial reads per cell. Default 10%.
args
Arguments to pass on to qc_plotting function calls
kwargs
Keyword arguments to pass on to qc_plotting function calls
save_path
The path and file name prefix to save QC plots to ('_qc_plots' or '_filtered_qc_plots' and the file type
provided with 'file_type' will be appended
file_type
The file type for the figures to be saved
Returns
-------
An AnnData object with cells that don't pass the thresholds filtered out
"""
qc_plotting(
adata,
counts_thresholds=count_thresholds,
genes_thresholds=gene_thresholds,
save_path=f"{save_path}_qc_plots.{file_type}",
*args, **kwargs
)
logger.info(f"Number of cells before QC filtering: {len(adata.obs)}")
filtered_adata = adata[adata.obs.pct_counts_mt < mt_threshold].copy()
filtered_adata = filtered_adata[filtered_adata.obs.total_counts < count_thresholds[1]]
filtered_adata = filtered_adata[filtered_adata.obs.total_counts > count_thresholds[0]]
filtered_adata = filtered_adata[filtered_adata.obs.n_genes_by_counts < gene_thresholds[1]]
filtered_adata = filtered_adata[filtered_adata.obs.n_genes_by_counts > gene_thresholds[0]].copy()
logger.info(f"Number of cells after QC filtering: {len(filtered_adata.obs)}")
qc_plotting(
filtered_adata,
show_thresholds=False,
save_path=f"{save_path}_filtered_qc_plots.{file_type}",
*args, **kwargs
)
return filtered_adata
| 3,036 | 961 |
from django.db import models
# Create your models
# well hello
| 65 | 19 |
import unittest
import sys
import itertools
sys.path.append('./')
solutions = __import__('solutions.046_permutations', fromlist='*')
class Test046(unittest.TestCase):
def test_permute(self):
s = solutions.Solution()
l = [1]
self.assertEqual(s.permute(l), self._permutaions(l))
l = [1, 2]
self.assertEqual(s.permute(l), self._permutaions(l))
l = [1, 2, 3]
self.assertEqual(s.permute(l), self._permutaions(l))
l = [1, 2, 3, 4]
self.assertEqual(s.permute(l), self._permutaions(l))
# for i in xrange(9):
# l = [ n for n in xrange(i+1) ]
# self.assertEqual(s.permute(l), self._permutaions(l))
def _permutaions(self, l):
return sorted([ list(pair) for pair in itertools.permutations(l) ])
if __name__ == '__main__':
unittest.main()
| 870 | 337 |
# coding: utf-8
"""
KubeVirt API
This is KubeVirt API an add-on for Kubernetes.
OpenAPI spec version: 1.0.0
Contact: kubevirt-dev@googlegroups.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V1VirtualMachineInstanceGuestAgentInfo(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'api_version': 'str',
'fs_info': 'V1VirtualMachineInstanceFileSystemInfo',
'guest_agent_version': 'str',
'hostname': 'str',
'kind': 'str',
'os': 'V1VirtualMachineInstanceGuestOSInfo',
'timezone': 'str',
'user_list': 'list[V1VirtualMachineInstanceGuestOSUser]'
}
attribute_map = {
'api_version': 'apiVersion',
'fs_info': 'fsInfo',
'guest_agent_version': 'guestAgentVersion',
'hostname': 'hostname',
'kind': 'kind',
'os': 'os',
'timezone': 'timezone',
'user_list': 'userList'
}
def __init__(self, api_version=None, fs_info=None, guest_agent_version=None, hostname=None, kind=None, os=None, timezone=None, user_list=None):
"""
V1VirtualMachineInstanceGuestAgentInfo - a model defined in Swagger
"""
self._api_version = None
self._fs_info = None
self._guest_agent_version = None
self._hostname = None
self._kind = None
self._os = None
self._timezone = None
self._user_list = None
if api_version is not None:
self.api_version = api_version
if fs_info is not None:
self.fs_info = fs_info
if guest_agent_version is not None:
self.guest_agent_version = guest_agent_version
if hostname is not None:
self.hostname = hostname
if kind is not None:
self.kind = kind
if os is not None:
self.os = os
if timezone is not None:
self.timezone = timezone
if user_list is not None:
self.user_list = user_list
@property
def api_version(self):
"""
Gets the api_version of this V1VirtualMachineInstanceGuestAgentInfo.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
:return: The api_version of this V1VirtualMachineInstanceGuestAgentInfo.
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""
Sets the api_version of this V1VirtualMachineInstanceGuestAgentInfo.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
:param api_version: The api_version of this V1VirtualMachineInstanceGuestAgentInfo.
:type: str
"""
self._api_version = api_version
@property
def fs_info(self):
"""
Gets the fs_info of this V1VirtualMachineInstanceGuestAgentInfo.
FSInfo is a guest os filesystem information containing the disk mapping and disk mounts with usage
:return: The fs_info of this V1VirtualMachineInstanceGuestAgentInfo.
:rtype: V1VirtualMachineInstanceFileSystemInfo
"""
return self._fs_info
@fs_info.setter
def fs_info(self, fs_info):
"""
Sets the fs_info of this V1VirtualMachineInstanceGuestAgentInfo.
FSInfo is a guest os filesystem information containing the disk mapping and disk mounts with usage
:param fs_info: The fs_info of this V1VirtualMachineInstanceGuestAgentInfo.
:type: V1VirtualMachineInstanceFileSystemInfo
"""
self._fs_info = fs_info
@property
def guest_agent_version(self):
"""
Gets the guest_agent_version of this V1VirtualMachineInstanceGuestAgentInfo.
GAVersion is a version of currently installed guest agent
:return: The guest_agent_version of this V1VirtualMachineInstanceGuestAgentInfo.
:rtype: str
"""
return self._guest_agent_version
@guest_agent_version.setter
def guest_agent_version(self, guest_agent_version):
"""
Sets the guest_agent_version of this V1VirtualMachineInstanceGuestAgentInfo.
GAVersion is a version of currently installed guest agent
:param guest_agent_version: The guest_agent_version of this V1VirtualMachineInstanceGuestAgentInfo.
:type: str
"""
self._guest_agent_version = guest_agent_version
@property
def hostname(self):
"""
Gets the hostname of this V1VirtualMachineInstanceGuestAgentInfo.
Hostname represents FQDN of a guest
:return: The hostname of this V1VirtualMachineInstanceGuestAgentInfo.
:rtype: str
"""
return self._hostname
@hostname.setter
def hostname(self, hostname):
"""
Sets the hostname of this V1VirtualMachineInstanceGuestAgentInfo.
Hostname represents FQDN of a guest
:param hostname: The hostname of this V1VirtualMachineInstanceGuestAgentInfo.
:type: str
"""
self._hostname = hostname
@property
def kind(self):
"""
Gets the kind of this V1VirtualMachineInstanceGuestAgentInfo.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
:return: The kind of this V1VirtualMachineInstanceGuestAgentInfo.
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""
Sets the kind of this V1VirtualMachineInstanceGuestAgentInfo.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
:param kind: The kind of this V1VirtualMachineInstanceGuestAgentInfo.
:type: str
"""
self._kind = kind
@property
def os(self):
"""
Gets the os of this V1VirtualMachineInstanceGuestAgentInfo.
OS contains the guest operating system information
:return: The os of this V1VirtualMachineInstanceGuestAgentInfo.
:rtype: V1VirtualMachineInstanceGuestOSInfo
"""
return self._os
@os.setter
def os(self, os):
"""
Sets the os of this V1VirtualMachineInstanceGuestAgentInfo.
OS contains the guest operating system information
:param os: The os of this V1VirtualMachineInstanceGuestAgentInfo.
:type: V1VirtualMachineInstanceGuestOSInfo
"""
self._os = os
@property
def timezone(self):
"""
Gets the timezone of this V1VirtualMachineInstanceGuestAgentInfo.
Timezone is guest os current timezone
:return: The timezone of this V1VirtualMachineInstanceGuestAgentInfo.
:rtype: str
"""
return self._timezone
@timezone.setter
def timezone(self, timezone):
"""
Sets the timezone of this V1VirtualMachineInstanceGuestAgentInfo.
Timezone is guest os current timezone
:param timezone: The timezone of this V1VirtualMachineInstanceGuestAgentInfo.
:type: str
"""
self._timezone = timezone
@property
def user_list(self):
"""
Gets the user_list of this V1VirtualMachineInstanceGuestAgentInfo.
UserList is a list of active guest OS users
:return: The user_list of this V1VirtualMachineInstanceGuestAgentInfo.
:rtype: list[V1VirtualMachineInstanceGuestOSUser]
"""
return self._user_list
@user_list.setter
def user_list(self, user_list):
"""
Sets the user_list of this V1VirtualMachineInstanceGuestAgentInfo.
UserList is a list of active guest OS users
:param user_list: The user_list of this V1VirtualMachineInstanceGuestAgentInfo.
:type: list[V1VirtualMachineInstanceGuestOSUser]
"""
self._user_list = user_list
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, V1VirtualMachineInstanceGuestAgentInfo):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 10,618 | 2,915 |
# -*- coding: utf-8 -*-
"""SOG infile processor.
Do various operations on input files for the the SOG bio-physical
model of deep estuaries. Most notably, convert new YAML infiles into
the old Fortran-style infiles that SOG reads.
This module provides services to the SOG command processor.
:Author: Doug Latornell <djl@douglatornell.ca>
:License: Apache License, Version 2.0
Copyright 2010-2014 Doug Latornell and The University of British Columbia
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import (
absolute_import,
division,
print_function,
unicode_literals,
)
import pprint
import sys
from tempfile import NamedTemporaryFile
import colander
import yaml
from . import SOG_infile
from .SOG_infile_schema import (
SOG_Infile,
SOG_KEYS,
SOG_EXTRA_KEYS,
SOG_AVG_HIST_FORCING_KEYS,
)
from .SOG_YAML_schema import (
YAML_Infile,
yaml_to_infile,
)
__all__ = ['create_infile', 'read_infile']
def create_infile(yaml_infile, edit_files):
"""Create a SOG Fortran-style infile for SOG to read from
`yaml_infile`.
:arg yaml_infile: Path/name of a SOG YAML infile.
:type yaml_infile: str
:arg edit_files: Paths/names of YAML infile snippets to be merged
into `yaml_infile`.
:type edit_files: list
:returns infile_name: Path/name of the SOG Fortran-style temporary
infile that is created.
:rtype: str
"""
data = _read_yaml_infile(yaml_infile)
YAML = YAML_Infile()
yaml_struct = _deserialize_yaml(data, YAML, yaml_infile)
for edit_file in edit_files:
edit_data = _read_yaml_infile(edit_file)
edit_struct = _deserialize_yaml(
edit_data, YAML, edit_file, edit_mode=True)
_merge_yaml_structs(edit_struct, yaml_struct, YAML)
infile_struct = yaml_to_infile(YAML, yaml_struct)
SOG = SOG_Infile()
data = SOG.serialize(infile_struct)
with NamedTemporaryFile(mode='wt', suffix='.infile', delete=False) as f:
SOG_infile.dump(
data, SOG_KEYS, SOG_EXTRA_KEYS, SOG_AVG_HIST_FORCING_KEYS, f)
infile_name = f.name
return infile_name
def read_infile(yaml_infile, edit_files, key):
"""Return value for specified infile key.
:arg yaml_infile: Path/name of a SOG YAML infile.
:type yaml_infile: str
:arg edit_files: Paths/names of YAML infile snippets to be merged
into `yaml_infile`.
:type edit_files: list
:arg key: Infile key to return value for.
Key must be "fully qualified";
i.e. a dotted name path through the nested YAML mappings,
like :kbd:`initial_conditions.init_datetime`.
:type key: str
:returns value: Infile value associated with key
:rtype: str
"""
data = _read_yaml_infile(yaml_infile)
YAML = YAML_Infile()
yaml_struct = _deserialize_yaml(data, YAML, yaml_infile, edit_mode=True)
for edit_file in edit_files:
edit_data = _read_yaml_infile(edit_file)
edit_struct = _deserialize_yaml(
edit_data, YAML, edit_file, edit_mode=True)
_merge_yaml_structs(edit_struct, yaml_struct, YAML)
try:
value = YAML.get_value(yaml_struct, key)['value']
except KeyError:
print('KeyError: {0}'.format(key), file=sys.stderr)
sys.exit(2)
return value
def _read_yaml_infile(yaml_infile):
"""Read `yaml_infile` and return the resulting Python dict.
:arg yaml_infile: Path/name of a SOG YAML infile.
:type yaml_infile: str
:returns data: Content of `yaml_infile` as a Python dict.
:rtype: dict
"""
with open(yaml_infile, 'rt') as f:
try:
data = yaml.safe_load(f)
except yaml.scanner.ScannerError:
print('Unable to parse {0}: Are you sure that it is YAML?'
.format(yaml_infile), file=sys.stderr)
sys.exit(2)
return data
def _deserialize_yaml(data, yaml_schema, yaml_infile, edit_mode=False):
"""Deserialize `data` according to `yaml_schema` and return the
resulting YAML schema data structure.
:arg data: Content of `yaml_infile` as a Python dict.
:type data: dict
:arg yaml_schema: SOG YAML infile schema instance
:type yaml_schema: :class:`YAML_Infile` instance
:arg yaml_infile: Path/name of a SOG YAML infile.
:type yaml_infile: str
:arg edit_mode: Turn edit mode on/off for schema binding;
defaults to False.
True means that elements can be missing from schema block
mappings;
used to deserialize edit files.
False means that missing elements aren't allowed;
used to deserialize the base infile.
:type edit_mode: boolean
:returns yaml_struct: SOG YAML infile data structure
:rtype: nested dicts
"""
yaml_schema = yaml_schema.bind(allow_missing=edit_mode)
try:
yaml_struct = yaml_schema.deserialize(data)
except colander.Invalid as e:
print('Invalid SOG YAML in {0}. '
'The following parameters are missing or misspelled:'
.format(yaml_infile), file=sys.stderr)
pprint.pprint(e.asdict(), sys.stderr)
sys.exit(2)
return yaml_struct
def _merge_yaml_structs(edit_struct, yaml_struct, schema):
"""Merge non-None values in `edit_struct` into `yaml_struct`.
:arg edit_struct: Edit file data structure to be merged into `yaml_struct`.
:type edit_struct: dict
:arg yaml_struct: SOG YAML infile data structure to receive merge from
`edit_struct`.
:type yaml_struct: dict
:arg schema: SOG YAML infile schema instance.
:type schema: :class:`YAML_Infile` instance
"""
for key in schema.flatten(yaml_struct):
try:
value = schema.get_value(edit_struct, key)
if value is not None:
schema.set_value(yaml_struct, key, value)
except TypeError:
# Ignore empty block mappings
pass
| 6,560 | 2,139 |
# -*- coding: utf-8 -*-
"""
Epic Games Store API Wrapper
~~~~~~~~~~~~~~~~~~~
An API wrapper for Epic Games Store
:copyright: (c) 2020 SD4RK
:license: MIT, see LICENSE for more details.
"""
from .api import *
from .models import *
from .exc import EGSException, EGSNotFound
| 278 | 106 |
import sys
import pkg_resources
from cStringIO import StringIO
from textwrap import TextWrapper
from zopeskel.base import wrap_help_paras
from paste.script.command import get_commands
from zopeskel.ui import list_sorted_templates
USAGE = """
Usage:
zopeskel <template> <output-name> [var1=value] ... [varN=value]
zopeskel --help Full help
zopeskel --list List template verbosely, with details
zopeskel --make-config-file Output .zopeskel prefs file
zopeskel --version Print installed version
%s
Warning: use of the --svn-repository argument is not allowed with this script
For further help information, please invoke this script with the
option "--help".
"""
DESCRIPTION = """
This script allows you to create basic skeletons for plone and zope
products and buildouts based on best-practice templates.
It is a wrapper around PasteScript ("paster"), providing an easier
syntax for invoking and better help.
Invoking this script
--------------------
Basic usage::
zopeskel <template>
(To get a list of the templates, run the script without any arguments;
for a verbose list with full descriptions, run the ``zopeskel --list``)
For example::
zopeskel archetypes
To create an Archetypes-based product for Plone. This will prompt you
for the name of your product, and for other information about it.
If you to specify your output name (resulting product, egg, or buildout,
depending on the template being used), you can also do so::
zopeskel <template> <output-name>
For example::
zopeskel archetypes Products.Example
In addition, you can pass variables to this that would be requested
by that template, and these will then be used. This is an advanced
feature mostly useful for scripted use of this::
zopeskel archetypes Products.Example author_email=joel@joelburton.com
(You can specify as many of these as you want, in name=value pairs.
To get the list of variables that a template expects, you can ask for
this with ``paster create -t <template-name> --list-variables``).
Interactive Help
----------------
While being prompted on each question, you can enter with a single
question mark to receive interactive help for that question.
For example::
Description (One-line description of the project) ['']: ?
| This should be a single-line description of your project. It will
| be used in the egg's setup.py, and, for Zope/Plone projects, may be
| used in the GenericSetup profile description.
Providing defaults
------------------
It is also possible to set up default values to be used for any template by
creating a file called ``.zopeskel`` in your home directory. This file
should be in INI format.
For example, our ``$HOME/.zopeskel`` could contain::
[DEFAULT]
author_email = joel@joelburton.com
license_name = GPL
master_keywords = my common keywords here
[plone3_theme]
empty_styles = False
license_name = BSD
keywords = %(master_keywords)s additional keywords
You can generate a starter .zopeskel file by running this script with
the --make-config-file option. This output can be redirected into
your ``.zopeskel`` file::
bin/zopeskel --make-config-file > /path/to/home/.zopeskel
Notes:
1) "empty_styles" applies only to themes; we can make this setting
in the template-specific section of this file. This setting will
not be used for other templates.
2) For a common setting, like our email address, we can set this in
a section called DEFAULT; settings made in this section are used
for all templates.
3) We can make a setting in DEFAULT and then override it for a
particular template. In this example, we might generally prefer the GPL,
but issue our themes under the BSD license.
4) You can refer to variables from the same section or from the
DEFAULT section using Python string formatting. In this example,
we have a common set of keywords set in DEFAULT and extend it
for the theming template by referring to the master list.
Differences from the 'paster create' command
--------------------------------------------
1) The --svn-repository argument that can be provided to 'paster create' is not
allowed when using the zopeskel script. It will raise an error. The reasons
for this are discussed at length in the zopeskel mailing list and in the
zopeskel issue tracker:
http://plone.org/products/zopeskel/issues/34
http://plone.org/products/zopeskel/issues/35
If this argument is desired, the user should revert to calling 'paster create'
directly. However, be warned that buildout templates will not work with the
argument due to assumptions in the base paster code.
Questions
---------
If you have further questions about the usage of bin/zopeskel, please feel
free to post your questions to the zopeskel mailing list or jump onto the
plone IRC channel (#plone) at irc.freenode.net.
To see the templates supported, run this script without any options.
For a verbose listing with help, use ``zopeskel --list``.
"""
DOT_HELP = {
0: """
This template expects a project name with no dots in it (a simple
Python package name, like 'foo').
""",
1: """
This template expects a project name with 1 dot in it (a 'basic
namespace', like 'foo.bar').
""",
2: """
This template expects a project name with 2 dots in it (a 'nested
namespace', like 'foo.bar.baz').
"""
}
def checkdots(template, name):
"""Check if project name appears legal, given template requirements.
Templates can provide number of namespaces they expect (provided
in 'ndots' attributes for number-of-dots in name). This checks that
provided project name is has correct number of namespaces and that
each part is a legal Python identifier.
"""
ndots = getattr(template, 'ndots', None)
if ndots is None: return # No validation possible
cdots = name.count(".")
if ndots != cdots:
raise ValueError(
"Project name expected %i dots, supplied '%s' has %i dots" % (
ndots, name, cdots))
for part in name.split("."):
# Check if Python identifier, http://code.activestate.com/recipes/413487/
try:
class test(object): __slots__ = [part]
except TypeError:
raise ValueError(
"Not a valid Python dotted name: %s ('%s' is not an identifier)" % (name, part))
def usage():
templates = list_printable_templates()
print USAGE % templates
def show_help():
print DESCRIPTION
def show_version():
try:
dist = pkg_resources.get_distribution('zopeskel')
print dist.version
except pkg_resources.DistributionNotFound:
print 'unable to identify zopeskel version'
def list_verbose():
"""List templates verbosely, with full help."""
textwrapper = TextWrapper(
initial_indent=" ", subsequent_indent=" ")
cats = list_sorted_templates()
for title, items in cats.items():
print "\n"+ title
print "-" * len(title)
for temp in items:
print "\n%s: %s\n" % (temp['name'], temp['summary'])
if temp['help']:
wrap_help_paras(textwrapper, temp['help'])
print
def list_printable_templates():
"""
Printable list of all templates, sorted into two categories.
"""
s = StringIO()
cats = list_sorted_templates()
templates = sum(cats.values(), []) # flatten into single list
max_name = max([len(x['name']) for x in templates])
for title, items in cats.items():
print >>s, "\n%s\n" % title
for entry in items:
print >>s, "| %s:%s %s\n" % (
entry['name'],
' '*(max_name-len(entry['name'])),
entry['summary']),
s.seek(0)
return s.read()
def generate_dotzopeskel():
"""Make an example .zopeskel file for user."""
cats = list_sorted_templates()
print """
# This file can contain preferences for zopeskel.
# To do so, uncomment the lines that look like:
# variable_name = Default Value
[DEFAULT]
"""
for temp in sum(cats.values(), []):
print "\n[%(name)s]\n" % temp
tempc = temp['entry'].load()
for var in tempc.vars:
if hasattr(var, 'pretty_description'):
print "# %s" % var.pretty_description()
print "# %s = %s\n" % ( var.name, var.default )
def process_args():
""" return a tuple of template_name, output_name and everything else
everything else will be returned as a dictionary of key/value pairs
"""
args = sys.argv[1:]
try:
template_name = args.pop(0)
except IndexError:
raise SyntaxError('No template name provided')
output_name = None
others = {}
for arg in args:
eq_index = arg.find('=')
if eq_index == -1 and not output_name:
output_name = arg
elif eq_index > 0:
key, val = arg.split('=')
# the --svn-repository argument to paster does some things that cause
# it to be pretty much incompatible with zopeskel. See the following
# zopeskel issues:
# http://plone.org/products/zopeskel/issues/35
# http://plone.org/products/zopeskel/issues/34
# For this reason, we are going to disallow using the --svn-repository
# argument when using the zopeskel wrapper. Those who wish to use it
# can still do so by going back to paster, with the caveat that there
# are some templates (particularly the buildout ones) for which the
# argument will always throw errors (at least until the problems are
# fixed upstream in paster itself).
if 'svn-repository' in key:
msg = 'for a number of reasons, the --svn-repository argument '
msg += 'is not allowed with the zopeskel script. '
msg += "Try --help for more information"
raise SyntaxError(msg)
others[key] = val
else:
raise SyntaxError(arg)
return template_name, output_name, others
def run():
""" """
if "--help" in sys.argv:
show_help()
return
if "--make-config-file" in sys.argv:
generate_dotzopeskel()
return
if "--list" in sys.argv:
list_verbose()
return
if "--version" in sys.argv:
show_version()
return
if len(sys.argv) == 1:
usage()
return
try:
template_name, output_name, opts = process_args()
except SyntaxError, e:
usage()
print "ERROR: There was a problem with your arguments: %s\n" % e
return
rez = pkg_resources.iter_entry_points(
'paste.paster_create_template',
template_name)
rez = list(rez)
if not rez:
usage()
print "ERROR: No such template: %s\n" % template_name
return
template = rez[0].load()
print "\n%s: %s" % (template_name, template.summary)
help = getattr(template, 'help', None)
if help:
print template.help
create = get_commands()['create'].load()
command = create('create')
if output_name:
try:
checkdots(template, output_name)
except ValueError, e:
print "ERROR: %s\n" % e
return
else:
ndots = getattr(template, 'ndots', None)
help = DOT_HELP.get(ndots)
while True:
if help: print help
try:
output_name = command.challenge("Enter project name")
checkdots(template, output_name)
except ValueError, e:
print "\nERROR: %s" % e
else:
break
print """
If at any point, you need additional help for a question, you can enter
'?' and press RETURN.
"""
optslist = [ '%s=%s' % (k,v) for k, v in opts.items() ]
if output_name is not None:
optslist.insert(0, output_name)
command.run( [ '-q', '-t', template_name ] + optslist )
| 12,127 | 3,487 |
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Library module for TPU Embedding mid level API test."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.ops import init_ops_v2
from tensorflow.python.platform import test
from tensorflow.python.tpu import tpu_embedding_v2_utils
class EmbeddingTestBase(test.TestCase):
"""Base embedding test class for use on CPU and TPU."""
def _create_initial_data(self):
"""Create the common test data used by both TPU and CPU."""
self.embedding_values = np.array(list(range(32)), dtype=np.float64)
self.initializer = init_ops_v2.Constant(self.embedding_values)
# Embedding for video initialized to
# 0 1 2 3
# 4 5 6 7
# ...
self.table_video = tpu_embedding_v2_utils.TableConfig(
vocabulary_size=8,
dim=4,
initializer=self.initializer,
combiner='sum',
name='video')
# Embedding for user initialized to
# 0 1
# 2 3
# 4 5
# 6 7
# ...
self.table_user = tpu_embedding_v2_utils.TableConfig(
vocabulary_size=16,
dim=2,
initializer=self.initializer,
combiner='mean',
name='user')
self.feature_config = (
tpu_embedding_v2_utils.FeatureConfig(
table=self.table_video, name='watched'),
tpu_embedding_v2_utils.FeatureConfig(
table=self.table_video, name='favorited'),
tpu_embedding_v2_utils.FeatureConfig(
table=self.table_user, name='friends'))
self.batch_size = 2
self.data_batch_size = 4
# One (global) batch of inputs
# sparse tensor for watched:
# row 0: 0
# row 1: 0, 1
# row 2: 0, 1
# row 3: 1
self.feature_watched_indices = [[0, 0], [1, 0], [1, 1],
[2, 0], [2, 1], [3, 0]]
self.feature_watched_values = [0, 0, 1, 0, 1, 1]
self.feature_watched_row_lengths = [1, 2, 2, 1]
# sparse tensor for favorited:
# row 0: 0, 1
# row 1: 1
# row 2: 0
# row 3: 0, 1
self.feature_favorited_indices = [[0, 0], [0, 1], [1, 0],
[2, 0], [3, 0], [3, 1]]
self.feature_favorited_values = [0, 1, 1, 0, 0, 1]
self.feature_favorited_row_lengths = [2, 1, 1, 2]
# sparse tensor for friends:
# row 0: 3
# row 1: 0, 1, 2
# row 2: 3
# row 3: 0, 1, 2
self.feature_friends_indices = [[0, 0], [1, 0], [1, 1], [1, 2],
[2, 0], [3, 0], [3, 1], [3, 2]]
self.feature_friends_values = [3, 0, 1, 2, 3, 0, 1, 2]
self.feature_friends_row_lengths = [1, 3, 1, 3]
| 3,330 | 1,234 |
INITALQCFINISHEDLIB = {'Description':'All processes included in the initial qc protocol for finished libraries, except the aggregation step.',
'24' : 'Customer Gel QC',
'62' : 'qPCR QC (Library Validation) 4.0',
'64' : 'Quant-iT QC (Library Validation) 4.0',
'67' : 'Qubit QC (Library Validation) 4.0',
'20' : 'CaliperGX QC (DNA)',
'17' : 'Bioanalyzer QC (Library Validation) 4.0'}
INITALQC ={'Description':'All processes included in the initial qc protocol, except the aggrigation step.',
'63' : 'Quant-iT QC (DNA) 4.0',
'65' : 'Quant-iT QC (RNA) 4.0',
'66' : 'Qubit QC (DNA) 4.0',
'68' : 'Qubit QC (RNA) 4.0',
'24' : 'Customer Gel QC',
'20' : 'CaliperGX QC (DNA)',
'16' : 'Bioanalyzer QC (DNA) 4.0',
'18' : 'Bioanalyzer QC (RNA) 4.0',
'116' : 'CaliperGX QC (RNA)',
'504' : 'Volume Measurement QC'}
AGRINITQC = {'Description':'Aggregation step of the initial qc protocol',
'7' : 'Aggregate QC (DNA) 4.0',
'9' : 'Aggregate QC (RNA) 4.0'}
PREPREPSTART = {'Description':'Process/processes that can be defined as a start of the library preparation protocol. If the work flow involves two library preparation protocols, such as for exome captue, only the steps of the first protocol should be given here.',
'304' : "Ligate 3' adapters (TruSeq small RNA) 1.0"}
POOLING = {'Description':'To identify the reagent labels (indexes) of each sample. If multiple pooling steps, the first pooling step after indexing should be specified',
'42': "Library Pooling (Illumina SBS) 4.0",
'43': "Library Pooling (MiSeq) 4.0",
'44': "Library Pooling (TruSeq Amplicon) 4.0",
'45': "Library Pooling (TruSeq Exome) 4.0",
'58': "Pooling For Multiplexed Sequencing (SS XT) 4.0",
'255': "Library Pooling (Finished Libraries) 4.0",
'308': "Library Pooling (TruSeq Small RNA) 1.0",
'404': "Pre-Pooling (Illumina SBS) 4.0",
'506': "Pre-Pooling (MiSeq) 4.0",
'508': "Applications Pre-Pooling",
'716': 'Library Pooling (HiSeq X) 1.0'}
PREPSTART = {'Description':'Process/processes that can be defined as a start of the library preparation protocol. The first one of these that are run in lims is used to set the prep start date. If the work flow involves two library preparation protocols, such as for exome capture, the prep start step of the second protocol should be given here. ',
'10' : 'Aliquot Libraries for Hybridization (SS XT)',
'47' : 'mRNA Purification, Fragmentation & cDNA synthesis (TruSeq RNA) 4.0',
'33' : 'Fragment DNA (TruSeq DNA) 4.0',
'407' : 'Fragment DNA (ThruPlex)',
'308': 'Library Pooling (TruSeq Small RNA) 1.0',
'117' : 'Applications Generic Process',
'454' : 'ThruPlex template preparation and synthesis',
'405' : 'RiboZero depletion'}
PREPEND = {'Description':'Process that can be defined as a end of the library preparation. If more than one library preparation protocol is included in the work flow, only the prep end step of the second protocol should be given here. Used to set the prep finished date.',
'157': 'Applications Finish Prep',
'109' : 'CA Purification',
'456' : 'Purification (ThruPlex)',
'111' : 'Amplify Captured Libraries to Add Index Tags (SS XT) 4.0',
'406' : 'End repair, size selection, A-tailing and adapter ligation (TruSeq PCR-free DNA) 4.0',
'311': 'Sample Placement (Size Selection)'}
LIBVAL = {'Description':'All processes included in the library validation protocol, except the aggregation step. If the work flow involves two library preparation protocols, such as for exome capture, only the steps of the second protocol should be given here.',
'62' : 'qPCR QC (Library Validation) 4.0',
'64' : 'Quant-iT QC (Library Validation) 4.0',
'67' : 'Qubit QC (Library Validation) 4.0',
'20' : 'CaliperGX QC (DNA)',
'17' : 'Bioanalyzer QC (Library Validation) 4.0'}
LIBVALFINISHEDLIB = {'Description':'',
'62' : 'qPCR QC (Library Validation) 4.0',
'64' : 'Quant-iT QC (Library Validation) 4.0',
'67' : 'Qubit QC (Library Validation) 4.0',
'20' : 'CaliperGX QC (DNA)',
'17' : 'Bioanalyzer QC (Library Validation) 4.0',
'24' : 'Customer Gel QC'}
AGRLIBVAL = {'Description':'The aggregation step of the library validation protocol',
'8': 'Aggregate QC (Library Validation) 4.0'}
SEQSTART = {'Description':'These processes are used to set the sequencing_start_date',
'23':'Cluster Generation (Illumina SBS) 4.0',
'26':'Denature, Dilute and Load Sample (MiSeq) 4.0',
'710':'Cluster Generation (HiSeq X) 1.0'}
DILSTART = {'Description':'These processes are used to set the dilution_and_pooling_start_date',
'40' : 'Library Normalization (MiSeq) 4.0',
'39' : 'Library Normalization (Illumina SBS) 4.0',
'715': 'Library Normalization (HiSeq X) 1.0'}
SEQUENCING = {'Description':'Sequencing',
'38' : 'Illumina Sequencing (Illumina SBS) 4.0',
'46' : 'MiSeq Run (MiSeq) 4.0',
'714': 'Illumina Sequencing (HiSeq X) 1.0'}
WORKSET = {'Description':'To identify the work sets on which the samples has been run. The process used to define a workset for the protocol. ',
'204' : 'Setup Workset/Plate'}
SUMMARY = {'Description':'',
'356' : 'Project Summary 1.3'}
DEMULTIPLEX={'Description':'',
'13' : 'Bcl Conversion & Demultiplexing (Illumina SBS) 4.0'}
CALIPER = {'Description':'',
'20' : 'CaliperGX QC (DNA)',
'116' : 'CaliperGX QC (RNA)'}
FINLIB = ['Finished library', 'Amplicon']
PROJ_UDF_EXCEPTIONS = ['customer_reference','uppnex_id','reference_genome','application']
SAMP_UDF_EXCEPTIONS = ['customer_name','reads_requested_(millions)','min_reads',
'm_reads','dup_rm','status_auto','status_manual','average_size_bp','incoming_qc_status']
PROCESSCATEGORIES = {'INITALQCFINISHEDLIB' : INITALQCFINISHEDLIB,
'INITALQC':INITALQC,
'AGRINITQC':AGRINITQC,
'PREPREPSTART':PREPREPSTART,
'POOLING':POOLING,
'PREPSTART':PREPSTART,
'PREPEND':PREPEND,
'LIBVAL':LIBVAL,
'LIBVALFINISHEDLIB':LIBVALFINISHEDLIB,
'AGRLIBVAL':AGRLIBVAL,
'SEQSTART':SEQSTART,
'DILSTART':DILSTART,
'SEQUENCING':SEQUENCING,
'WORKSET':WORKSET,
'SUMMARY':SUMMARY,
'DEMULTIPLEX':DEMULTIPLEX,
'CALIPER':CALIPER}
| 6,505 | 2,448 |
"""
Implement a basic calculator to evaluate a simple expression string.
The expression string contains only non-negative integers, +, -, *, / operators and empty spaces.
The integer division should truncate toward zero.
You may assume that the given expression is always valid.
Some examples:
"3+2*2" = 7
" 3/2 " = 1
" 3+5 / 2 " = 5
"""
class Solution:
# @param {string} s
# @return {integer}
def calculate(self, s):
# empty case
if len(s) == 0:
return 0
# do post fix transformation
stack = []
postfix = self.transform_postfix(s)
for c in postfix:
if str(c) in "+-/*": #operator
b = stack.pop()
a = stack.pop()
if c == '+':
result = a + b
elif c == '-':
result = a - b
elif c == '*':
result = a * b
else: # c == '/'
result = a / b
stack.append(result)
else: # number
stack.append(c)
return stack.pop()
def rank(self, operator):
if operator == '*' or operator == '/':
return 2
elif operator == '+' or operator == '-':
return 1
else: # '('
return 0
def transform_postfix(self, s):
# will hold the transformation
postfix = []
# will hold +, -, (
operators = []
# for buffering numbers
num = 0
prev_was_number = False
for c in s:
# escape empty chars
if c == ' ':
continue
# number or part of it
if c >= '0' and c <= '9':
num = num * 10 + int(c)
prev_was_number = True
# check if there was number buffering
else:
if prev_was_number:
postfix.append(num)
num = 0
prev_was_number = False
# nothing to be done here
if c == '(':
operators.append(c)
# pop till matching '('
elif c == ')':
while operators[-1] != '(':
postfix.append(operators.pop())
operators.pop() # popping '('
# operator pop till '('
elif c in "+-/*":
while operators and self.rank(operators[-1]) >= self.rank(c):
postfix.append(operators.pop())
operators.append(c)
# check if the last operand was number
if prev_was_number:
postfix.append(num)
# pop remaning operators
while operators:
postfix.append(operators.pop())
return postfix
s = Solution()
print s.calculate("3+5 / 2 ") | 2,240 | 980 |
import contextlib
import unittest
from test import support
from itertools import permutations, product
from random import randrange, sample, choice
import warnings
import sys, array, io
from decimal import Decimal
from fractions import Fraction
try:
from _testbuffer import *
except ImportError:
ndarray = None
try:
import struct
except ImportError:
struct = None
try:
import ctypes
except ImportError:
ctypes = None
try:
with warnings.catch_warnings():
from numpy import ndarray as numpy_array
except ImportError:
numpy_array = None
SHORT_TEST = True
NATIVE = {'?': 0, 'c': 0, 'b': 0, 'B': 0, 'h': 0, 'H': 0, 'i': 0, 'I': 0,
'l': 0, 'L': 0, 'n': 0, 'N': 0, 'f': 0, 'd': 0, 'P': 0}
if numpy_array:
del NATIVE['n']
del NATIVE['N']
if struct:
try:
struct.pack('Q', 2 ** 64 - 1)
NATIVE['q'] = 0
NATIVE['Q'] = 0
except struct.error:
pass
STANDARD = {'?': (0, 2), 'c': (0, 1 << 8), 'b': (-(1 << 7), 1 << 7), 'B': (
0, 1 << 8), 'h': (-(1 << 15), 1 << 15), 'H': (0, 1 << 16), 'i': (-(1 <<
31), 1 << 31), 'I': (0, 1 << 32), 'l': (-(1 << 31), 1 << 31), 'L': (0,
1 << 32), 'q': (-(1 << 63), 1 << 63), 'Q': (0, 1 << 64), 'f': (-(1 <<
63), 1 << 63), 'd': (-(1 << 1023), 1 << 1023)}
def native_type_range(fmt):
"""Return range of a native type."""
if fmt == 'c':
lh = 0, 256
elif fmt == '?':
lh = 0, 2
elif fmt == 'f':
lh = -(1 << 63), 1 << 63
elif fmt == 'd':
lh = -(1 << 1023), 1 << 1023
else:
for exp in (128, 127, 64, 63, 32, 31, 16, 15, 8, 7):
try:
struct.pack(fmt, (1 << exp) - 1)
break
except struct.error:
pass
lh = (-(1 << exp), 1 << exp) if exp & 1 else (0, 1 << exp)
return lh
fmtdict = {'': NATIVE, '@': NATIVE, '<': STANDARD, '>': STANDARD, '=':
STANDARD, '!': STANDARD}
if struct:
for fmt in fmtdict['@']:
fmtdict['@'][fmt] = native_type_range(fmt)
MEMORYVIEW = NATIVE.copy()
ARRAY = NATIVE.copy()
for k in NATIVE:
if not k in 'bBhHiIlLfd':
del ARRAY[k]
BYTEFMT = NATIVE.copy()
for k in NATIVE:
if not k in 'Bbc':
del BYTEFMT[k]
fmtdict['m'] = MEMORYVIEW
fmtdict['@m'] = MEMORYVIEW
fmtdict['a'] = ARRAY
fmtdict['b'] = BYTEFMT
fmtdict['@b'] = BYTEFMT
MODE = 0
MULT = 1
cap = {'ndarray': (['', '@', '<', '>', '=', '!'], ['', '1', '2', '3']),
'array': (['a'], ['']), 'numpy': ([''], ['']), 'memoryview': (['@m',
'm'], ['']), 'bytefmt': (['@b', 'b'], [''])}
def randrange_fmt(mode, char, obj):
"""Return random item for a type specified by a mode and a single
format character."""
x = randrange(*fmtdict[mode][char])
if char == 'c':
x = bytes([x])
if obj == 'numpy' and x == b'\x00':
x = b'\x01'
if char == '?':
x = bool(x)
if char == 'f' or char == 'd':
x = struct.pack(char, x)
x = struct.unpack(char, x)[0]
return x
def gen_item(fmt, obj):
"""Return single random item."""
mode, chars = fmt.split('#')
x = []
for c in chars:
x.append(randrange_fmt(mode, c, obj))
return x[0] if len(x) == 1 else tuple(x)
def gen_items(n, fmt, obj):
"""Return a list of random items (or a scalar)."""
if n == 0:
return gen_item(fmt, obj)
lst = [0] * n
for i in range(n):
lst[i] = gen_item(fmt, obj)
return lst
def struct_items(n, obj):
mode = choice(cap[obj][MODE])
xfmt = mode + '#'
fmt = mode.strip('amb')
nmemb = randrange(2, 10)
for _ in range(nmemb):
char = choice(tuple(fmtdict[mode]))
multiplier = choice(cap[obj][MULT])
xfmt += char * int(multiplier if multiplier else 1)
fmt += multiplier + char
items = gen_items(n, xfmt, obj)
item = gen_item(xfmt, obj)
return fmt, items, item
def randitems(n, obj='ndarray', mode=None, char=None):
"""Return random format, items, item."""
if mode is None:
mode = choice(cap[obj][MODE])
if char is None:
char = choice(tuple(fmtdict[mode]))
multiplier = choice(cap[obj][MULT])
fmt = mode + '#' + char * int(multiplier if multiplier else 1)
items = gen_items(n, fmt, obj)
item = gen_item(fmt, obj)
fmt = mode.strip('amb') + multiplier + char
return fmt, items, item
def iter_mode(n, obj='ndarray'):
"""Iterate through supported mode/char combinations."""
for mode in cap[obj][MODE]:
for char in fmtdict[mode]:
yield randitems(n, obj, mode, char)
def iter_format(nitems, testobj='ndarray'):
"""Yield (format, items, item) for all possible modes and format
characters plus one random compound format string."""
for t in iter_mode(nitems, testobj):
yield t
if testobj != 'ndarray':
return
yield struct_items(nitems, testobj)
def is_byte_format(fmt):
return 'c' in fmt or 'b' in fmt or 'B' in fmt
def is_memoryview_format(fmt):
"""format suitable for memoryview"""
x = len(fmt)
return (x == 1 or x == 2 and fmt[0] == '@') and fmt[x - 1] in MEMORYVIEW
NON_BYTE_FORMAT = [c for c in fmtdict['@'] if not is_byte_format(c)]
def atomp(lst):
"""Tuple items (representing structs) are regarded as atoms."""
return not isinstance(lst, list)
def listp(lst):
return isinstance(lst, list)
def prod(lst):
"""Product of list elements."""
if len(lst) == 0:
return 0
x = lst[0]
for v in lst[1:]:
x *= v
return x
def strides_from_shape(ndim, shape, itemsize, layout):
"""Calculate strides of a contiguous array. Layout is 'C' or
'F' (Fortran)."""
if ndim == 0:
return ()
if layout == 'C':
strides = list(shape[1:]) + [itemsize]
for i in range(ndim - 2, -1, -1):
strides[i] *= strides[i + 1]
else:
strides = [itemsize] + list(shape[:-1])
for i in range(1, ndim):
strides[i] *= strides[i - 1]
return strides
def _ca(items, s):
"""Convert flat item list to the nested list representation of a
multidimensional C array with shape 's'."""
if atomp(items):
return items
if len(s) == 0:
return items[0]
lst = [0] * s[0]
stride = len(items) // s[0] if s[0] else 0
for i in range(s[0]):
start = i * stride
lst[i] = _ca(items[start:start + stride], s[1:])
return lst
def _fa(items, s):
"""Convert flat item list to the nested list representation of a
multidimensional Fortran array with shape 's'."""
if atomp(items):
return items
if len(s) == 0:
return items[0]
lst = [0] * s[0]
stride = s[0]
for i in range(s[0]):
lst[i] = _fa(items[i::stride], s[1:])
return lst
def carray(items, shape):
if listp(items) and not 0 in shape and prod(shape) != len(items):
raise ValueError('prod(shape) != len(items)')
return _ca(items, shape)
def farray(items, shape):
if listp(items) and not 0 in shape and prod(shape) != len(items):
raise ValueError('prod(shape) != len(items)')
return _fa(items, shape)
def indices(shape):
"""Generate all possible tuples of indices."""
iterables = [range(v) for v in shape]
return product(*iterables)
def getindex(ndim, ind, strides):
"""Convert multi-dimensional index to the position in the flat list."""
ret = 0
for i in range(ndim):
ret += strides[i] * ind[i]
return ret
def transpose(src, shape):
"""Transpose flat item list that is regarded as a multi-dimensional
matrix defined by shape: dest...[k][j][i] = src[i][j][k]... """
if not shape:
return src
ndim = len(shape)
sstrides = strides_from_shape(ndim, shape, 1, 'C')
dstrides = strides_from_shape(ndim, shape[::-1], 1, 'C')
dest = [0] * len(src)
for ind in indices(shape):
fr = getindex(ndim, ind, sstrides)
to = getindex(ndim, ind[::-1], dstrides)
dest[to] = src[fr]
return dest
def _flatten(lst):
"""flatten list"""
if lst == []:
return lst
if atomp(lst):
return [lst]
return _flatten(lst[0]) + _flatten(lst[1:])
def flatten(lst):
"""flatten list or return scalar"""
if atomp(lst):
return lst
return _flatten(lst)
def slice_shape(lst, slices):
"""Get the shape of lst after slicing: slices is a list of slice
objects."""
if atomp(lst):
return []
return [len(lst[slices[0]])] + slice_shape(lst[0], slices[1:])
def multislice(lst, slices):
"""Multi-dimensional slicing: slices is a list of slice objects."""
if atomp(lst):
return lst
return [multislice(sublst, slices[1:]) for sublst in lst[slices[0]]]
def m_assign(llst, rlst, lslices, rslices):
"""Multi-dimensional slice assignment: llst and rlst are the operands,
lslices and rslices are lists of slice objects. llst and rlst must
have the same structure.
For a two-dimensional example, this is not implemented in Python:
llst[0:3:2, 0:3:2] = rlst[1:3:1, 1:3:1]
Instead we write:
lslices = [slice(0,3,2), slice(0,3,2)]
rslices = [slice(1,3,1), slice(1,3,1)]
multislice_assign(llst, rlst, lslices, rslices)
"""
if atomp(rlst):
return rlst
rlst = [m_assign(l, r, lslices[1:], rslices[1:]) for l, r in zip(llst[
lslices[0]], rlst[rslices[0]])]
llst[lslices[0]] = rlst
return llst
def cmp_structure(llst, rlst, lslices, rslices):
"""Compare the structure of llst[lslices] and rlst[rslices]."""
lshape = slice_shape(llst, lslices)
rshape = slice_shape(rlst, rslices)
if len(lshape) != len(rshape):
return -1
for i in range(len(lshape)):
if lshape[i] != rshape[i]:
return -1
if lshape[i] == 0:
return 0
return 0
def multislice_assign(llst, rlst, lslices, rslices):
"""Return llst after assigning: llst[lslices] = rlst[rslices]"""
if cmp_structure(llst, rlst, lslices, rslices) < 0:
raise ValueError('lvalue and rvalue have different structures')
return m_assign(llst, rlst, lslices, rslices)
def verify_structure(memlen, itemsize, ndim, shape, strides, offset):
"""Verify that the parameters represent a valid array within
the bounds of the allocated memory:
char *mem: start of the physical memory block
memlen: length of the physical memory block
offset: (char *)buf - mem
"""
if offset % itemsize:
return False
if offset < 0 or offset + itemsize > memlen:
return False
if any(v % itemsize for v in strides):
return False
if ndim <= 0:
return ndim == 0 and not shape and not strides
if 0 in shape:
return True
imin = sum(strides[j] * (shape[j] - 1) for j in range(ndim) if strides[
j] <= 0)
imax = sum(strides[j] * (shape[j] - 1) for j in range(ndim) if strides[
j] > 0)
return 0 <= offset + imin and offset + imax + itemsize <= memlen
def get_item(lst, indices):
for i in indices:
lst = lst[i]
return lst
def memory_index(indices, t):
"""Location of an item in the underlying memory."""
memlen, itemsize, ndim, shape, strides, offset = t
p = offset
for i in range(ndim):
p += strides[i] * indices[i]
return p
def is_overlapping(t):
"""The structure 't' is overlapping if at least one memory location
is visited twice while iterating through all possible tuples of
indices."""
memlen, itemsize, ndim, shape, strides, offset = t
visited = 1 << memlen
for ind in indices(shape):
i = memory_index(ind, t)
bit = 1 << i
if visited & bit:
return True
visited |= bit
return False
def rand_structure(itemsize, valid, maxdim=5, maxshape=16, shape=()):
"""Return random structure:
(memlen, itemsize, ndim, shape, strides, offset)
If 'valid' is true, the returned structure is valid, otherwise invalid.
If 'shape' is given, use that instead of creating a random shape.
"""
if not shape:
ndim = randrange(maxdim + 1)
if ndim == 0:
if valid:
return itemsize, itemsize, ndim, (), (), 0
else:
nitems = randrange(1, 16 + 1)
memlen = nitems * itemsize
offset = -itemsize if randrange(2) == 0 else memlen
return memlen, itemsize, ndim, (), (), offset
minshape = 2
n = randrange(100)
if n >= 95 and valid:
minshape = 0
elif n >= 90:
minshape = 1
shape = [0] * ndim
for i in range(ndim):
shape[i] = randrange(minshape, maxshape + 1)
else:
ndim = len(shape)
maxstride = 5
n = randrange(100)
zero_stride = True if n >= 95 and n & 1 else False
strides = [0] * ndim
strides[ndim - 1] = itemsize * randrange(-maxstride, maxstride + 1)
if not zero_stride and strides[ndim - 1] == 0:
strides[ndim - 1] = itemsize
for i in range(ndim - 2, -1, -1):
maxstride *= shape[i + 1] if shape[i + 1] else 1
if zero_stride:
strides[i] = itemsize * randrange(-maxstride, maxstride + 1)
else:
strides[i] = (1, -1)[randrange(2)] * itemsize * randrange(1,
maxstride + 1)
imin = imax = 0
if not 0 in shape:
imin = sum(strides[j] * (shape[j] - 1) for j in range(ndim) if
strides[j] <= 0)
imax = sum(strides[j] * (shape[j] - 1) for j in range(ndim) if
strides[j] > 0)
nitems = imax - imin
if valid:
offset = -imin * itemsize
memlen = offset + (imax + 1) * itemsize
else:
memlen = (-imin + imax) * itemsize
offset = -imin - itemsize if randrange(2) == 0 else memlen
return memlen, itemsize, ndim, shape, strides, offset
def randslice_from_slicelen(slicelen, listlen):
"""Create a random slice of len slicelen that fits into listlen."""
maxstart = listlen - slicelen
start = randrange(maxstart + 1)
maxstep = (listlen - start) // slicelen if slicelen else 1
step = randrange(1, maxstep + 1)
stop = start + slicelen * step
s = slice(start, stop, step)
_, _, _, control = slice_indices(s, listlen)
if control != slicelen:
raise RuntimeError
return s
def randslice_from_shape(ndim, shape):
"""Create two sets of slices for an array x with shape 'shape'
such that shapeof(x[lslices]) == shapeof(x[rslices])."""
lslices = [0] * ndim
rslices = [0] * ndim
for n in range(ndim):
l = shape[n]
slicelen = randrange(1, l + 1) if l > 0 else 0
lslices[n] = randslice_from_slicelen(slicelen, l)
rslices[n] = randslice_from_slicelen(slicelen, l)
return tuple(lslices), tuple(rslices)
def rand_aligned_slices(maxdim=5, maxshape=16):
"""Create (lshape, rshape, tuple(lslices), tuple(rslices)) such that
shapeof(x[lslices]) == shapeof(y[rslices]), where x is an array
with shape 'lshape' and y is an array with shape 'rshape'."""
ndim = randrange(1, maxdim + 1)
minshape = 2
n = randrange(100)
if n >= 95:
minshape = 0
elif n >= 90:
minshape = 1
all_random = True if randrange(100) >= 80 else False
lshape = [0] * ndim
rshape = [0] * ndim
lslices = [0] * ndim
rslices = [0] * ndim
for n in range(ndim):
small = randrange(minshape, maxshape + 1)
big = randrange(minshape, maxshape + 1)
if big < small:
big, small = small, big
if all_random:
start = randrange(-small, small + 1)
stop = randrange(-small, small + 1)
step = (1, -1)[randrange(2)] * randrange(1, small + 2)
s_small = slice(start, stop, step)
_, _, _, slicelen = slice_indices(s_small, small)
else:
slicelen = randrange(1, small + 1) if small > 0 else 0
s_small = randslice_from_slicelen(slicelen, small)
s_big = randslice_from_slicelen(slicelen, big)
if randrange(2) == 0:
rshape[n], lshape[n] = big, small
rslices[n], lslices[n] = s_big, s_small
else:
rshape[n], lshape[n] = small, big
rslices[n], lslices[n] = s_small, s_big
return lshape, rshape, tuple(lslices), tuple(rslices)
def randitems_from_structure(fmt, t):
"""Return a list of random items for structure 't' with format
'fmtchar'."""
memlen, itemsize, _, _, _, _ = t
return gen_items(memlen // itemsize, '#' + fmt, 'numpy')
def ndarray_from_structure(items, fmt, t, flags=0):
"""Return ndarray from the tuple returned by rand_structure()"""
memlen, itemsize, ndim, shape, strides, offset = t
return ndarray(items, shape=shape, strides=strides, format=fmt, offset=
offset, flags=ND_WRITABLE | flags)
def numpy_array_from_structure(items, fmt, t):
"""Return numpy_array from the tuple returned by rand_structure()"""
memlen, itemsize, ndim, shape, strides, offset = t
buf = bytearray(memlen)
for j, v in enumerate(items):
struct.pack_into(fmt, buf, j * itemsize, v)
return numpy_array(buffer=buf, shape=shape, strides=strides, dtype=fmt,
offset=offset)
def cast_items(exporter, fmt, itemsize, shape=None):
"""Interpret the raw memory of 'exporter' as a list of items with
size 'itemsize'. If shape=None, the new structure is assumed to
be 1-D with n * itemsize = bytelen. If shape is given, the usual
constraint for contiguous arrays prod(shape) * itemsize = bytelen
applies. On success, return (items, shape). If the constraints
cannot be met, return (None, None). If a chunk of bytes is interpreted
as NaN as a result of float conversion, return ('nan', None)."""
bytelen = exporter.nbytes
if shape:
if prod(shape) * itemsize != bytelen:
return None, shape
elif shape == []:
if exporter.ndim == 0 or itemsize != bytelen:
return None, shape
else:
n, r = divmod(bytelen, itemsize)
shape = [n]
if r != 0:
return None, shape
mem = exporter.tobytes()
byteitems = [mem[i:i + itemsize] for i in range(0, len(mem), itemsize)]
items = []
for v in byteitems:
item = struct.unpack(fmt, v)[0]
if item != item:
return 'nan', shape
items.append(item)
return (items, shape) if shape != [] else (items[0], shape)
def gencastshapes():
"""Generate shapes to test casting."""
for n in range(32):
yield [n]
ndim = randrange(4, 6)
minshape = 1 if randrange(100) > 80 else 2
yield [randrange(minshape, 5) for _ in range(ndim)]
ndim = randrange(2, 4)
minshape = 1 if randrange(100) > 80 else 2
yield [randrange(minshape, 5) for _ in range(ndim)]
def genslices(n):
"""Generate all possible slices for a single dimension."""
return product(range(-n, n + 1), range(-n, n + 1), range(-n, n + 1))
def genslices_ndim(ndim, shape):
"""Generate all possible slice tuples for 'shape'."""
iterables = [genslices(shape[n]) for n in range(ndim)]
return product(*iterables)
def rslice(n, allow_empty=False):
"""Generate random slice for a single dimension of length n.
If zero=True, the slices may be empty, otherwise they will
be non-empty."""
minlen = 0 if allow_empty or n == 0 else 1
slicelen = randrange(minlen, n + 1)
return randslice_from_slicelen(slicelen, n)
def rslices(n, allow_empty=False):
"""Generate random slices for a single dimension."""
for _ in range(5):
yield rslice(n, allow_empty)
def rslices_ndim(ndim, shape, iterations=5):
"""Generate random slice tuples for 'shape'."""
for _ in range(iterations):
yield tuple(rslice(shape[n]) for n in range(ndim))
for _ in range(iterations):
yield tuple(rslice(shape[n], allow_empty=True) for n in range(ndim))
yield tuple(slice(0, 1, 0) for _ in range(ndim))
def rpermutation(iterable, r=None):
pool = tuple(iterable)
r = len(pool) if r is None else r
yield tuple(sample(pool, r))
def ndarray_print(nd):
"""Print ndarray for debugging."""
try:
x = nd.tolist()
except (TypeError, NotImplementedError):
x = nd.tobytes()
if isinstance(nd, ndarray):
offset = nd.offset
flags = nd.flags
else:
offset = 'unknown'
flags = 'unknown'
print(
"ndarray(%s, shape=%s, strides=%s, suboffsets=%s, offset=%s, format='%s', itemsize=%s, flags=%s)"
% (x, nd.shape, nd.strides, nd.suboffsets, offset, nd.format, nd.
itemsize, flags))
sys.stdout.flush()
ITERATIONS = 100
MAXDIM = 5
MAXSHAPE = 10
if SHORT_TEST:
ITERATIONS = 10
MAXDIM = 3
MAXSHAPE = 4
genslices = rslices
genslices_ndim = rslices_ndim
permutations = rpermutation
@unittest.skipUnless(struct, 'struct module required for this test.')
@unittest.skipUnless(ndarray, 'ndarray object required for this test')
class TestBufferProtocol(unittest.TestCase):
def setUp(self):
self.sizeof_void_p = get_sizeof_void_p()
def verify(self, result, obj=-1, itemsize={1}, fmt=-1, readonly={1},
ndim={1}, shape=-1, strides=-1, lst=-1, sliced=False, cast=False):
if shape:
expected_len = prod(shape) * itemsize
elif not fmt:
expected_len = len(lst)
else:
expected_len = itemsize
suboffsets = ()
if result.suboffsets:
self.assertGreater(ndim, 0)
suboffset0 = 0
for n in range(1, ndim):
if shape[n] == 0:
break
if strides[n] <= 0:
suboffset0 += -strides[n] * (shape[n] - 1)
suboffsets = [suboffset0] + [(-1) for v in range(ndim - 1)]
stride0 = self.sizeof_void_p
if strides[0] < 0:
stride0 = -stride0
strides = [stride0] + list(strides[1:])
self.assertIs(result.obj, obj)
self.assertEqual(result.nbytes, expected_len)
self.assertEqual(result.itemsize, itemsize)
self.assertEqual(result.format, fmt)
self.assertEqual(result.readonly, readonly)
self.assertEqual(result.ndim, ndim)
self.assertEqual(result.shape, tuple(shape))
if not (sliced and suboffsets):
self.assertEqual(result.strides, tuple(strides))
self.assertEqual(result.suboffsets, tuple(suboffsets))
if isinstance(result, ndarray) or is_memoryview_format(fmt):
rep = result.tolist() if fmt else result.tobytes()
self.assertEqual(rep, lst)
if not fmt:
return
if not cast:
b = bytearray()
buf_err = None
for ind in indices(shape):
try:
item1 = get_pointer(result, ind)
item2 = get_item(lst, ind)
if isinstance(item2, tuple):
x = struct.pack(fmt, *item2)
else:
x = struct.pack(fmt, item2)
b.extend(x)
except BufferError:
buf_err = True
break
self.assertEqual(item1, item2)
if not buf_err:
self.assertEqual(result.tobytes(), b)
m = memoryview(result)
h = ''.join('%02x' % c for c in b)
self.assertEqual(m.hex(), h)
ff = fmt if fmt else 'B'
flattened = flatten(lst)
for order in ['C', 'F', 'A']:
expected = result
if order == 'F':
if not is_contiguous(result, 'A') or is_contiguous(
result, 'C'):
trans = transpose(flattened, shape)
expected = ndarray(trans, shape=shape, format=
ff, flags=ND_FORTRAN)
elif not is_contiguous(result, 'A') or is_contiguous(result
, 'F') and order == 'C':
expected = ndarray(flattened, shape=shape, format=ff)
contig = get_contiguous(result, PyBUF_READ, order)
self.assertEqual(contig.tobytes(), b)
self.assertTrue(cmp_contig(contig, expected))
if ndim == 0:
continue
nmemb = len(flattened)
ro = 0 if readonly else ND_WRITABLE
contig = py_buffer_to_contiguous(result, 'C', PyBUF_FULL_RO
)
self.assertEqual(len(contig), nmemb * itemsize)
initlst = [struct.unpack_from(fmt, contig, n * itemsize
) for n in range(nmemb)]
if len(initlst[0]) == 1:
initlst = [v[0] for v in initlst]
y = ndarray(initlst, shape=shape, flags=ro, format=fmt)
self.assertEqual(memoryview(y), memoryview(result))
contig = py_buffer_to_contiguous(result, 'F', PyBUF_FULL_RO
)
self.assertEqual(len(contig), nmemb * itemsize)
initlst = [struct.unpack_from(fmt, contig, n * itemsize
) for n in range(nmemb)]
if len(initlst[0]) == 1:
initlst = [v[0] for v in initlst]
y = ndarray(initlst, shape=shape, flags=ro | ND_FORTRAN,
format=fmt)
self.assertEqual(memoryview(y), memoryview(result))
contig = py_buffer_to_contiguous(result, 'A', PyBUF_FULL_RO
)
self.assertEqual(len(contig), nmemb * itemsize)
initlst = [struct.unpack_from(fmt, contig, n * itemsize
) for n in range(nmemb)]
if len(initlst[0]) == 1:
initlst = [v[0] for v in initlst]
f = ND_FORTRAN if is_contiguous(result, 'F') else 0
y = ndarray(initlst, shape=shape, flags=f | ro, format=fmt)
self.assertEqual(memoryview(y), memoryview(result))
if is_memoryview_format(fmt):
try:
m = memoryview(result)
except BufferError:
return
ex = result.obj if isinstance(result, memoryview) else result
self.assertIs(m.obj, ex)
self.assertEqual(m.nbytes, expected_len)
self.assertEqual(m.itemsize, itemsize)
self.assertEqual(m.format, fmt)
self.assertEqual(m.readonly, readonly)
self.assertEqual(m.ndim, ndim)
self.assertEqual(m.shape, tuple(shape))
if not (sliced and suboffsets):
self.assertEqual(m.strides, tuple(strides))
self.assertEqual(m.suboffsets, tuple(suboffsets))
n = 1 if ndim == 0 else len(lst)
self.assertEqual(len(m), n)
rep = result.tolist() if fmt else result.tobytes()
self.assertEqual(rep, lst)
self.assertEqual(m, result)
def verify_getbuf(self, orig_ex, ex, req, sliced=False):
def simple_fmt(ex):
return ex.format == '' or ex.format == 'B'
def match(req, flag):
return req & flag == flag
if ex.readonly and match(req, PyBUF_WRITABLE) or match(req,
PyBUF_C_CONTIGUOUS) and not ex.c_contiguous or match(req,
PyBUF_F_CONTIGUOUS) and not ex.f_contiguous or match(req,
PyBUF_ANY_CONTIGUOUS) and not ex.contiguous or not match(req,
PyBUF_INDIRECT) and ex.suboffsets or not match(req, PyBUF_STRIDES
) and not ex.c_contiguous or not match(req, PyBUF_ND) and match(req
, PyBUF_FORMAT):
self.assertRaises(BufferError, ndarray, ex, getbuf=req)
return
if isinstance(ex, ndarray) or is_memoryview_format(ex.format):
lst = ex.tolist()
else:
nd = ndarray(ex, getbuf=PyBUF_FULL_RO)
lst = nd.tolist()
ro = 0 if match(req, PyBUF_WRITABLE) else ex.readonly
fmt = ex.format
itemsize = ex.itemsize
ndim = ex.ndim
if not match(req, PyBUF_FORMAT):
fmt = ''
lst = orig_ex.tobytes()
if not match(req, PyBUF_ND):
ndim = 1
shape = orig_ex.shape if match(req, PyBUF_ND) else ()
strides = orig_ex.strides if match(req, PyBUF_STRIDES) else ()
nd = ndarray(ex, getbuf=req)
self.verify(nd, obj=ex, itemsize=itemsize, fmt=fmt, readonly=ro,
ndim=ndim, shape=shape, strides=strides, lst=lst, sliced=sliced)
def test_ndarray_getbuf(self):
requests = (PyBUF_INDIRECT, PyBUF_STRIDES, PyBUF_ND, PyBUF_SIMPLE,
PyBUF_C_CONTIGUOUS, PyBUF_F_CONTIGUOUS, PyBUF_ANY_CONTIGUOUS,
PyBUF_FULL, PyBUF_FULL_RO, PyBUF_RECORDS, PyBUF_RECORDS_RO,
PyBUF_STRIDED, PyBUF_STRIDED_RO, PyBUF_CONTIG, PyBUF_CONTIG_RO)
items_fmt = ([(True if x % 2 else False) for x in range(12)], '?'), ([
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12], 'b'), ([1, 2, 3, 4, 5,
6, 7, 8, 9, 10, 11, 12], 'B'), ([(2 ** 31 - x if x % 2 else -2 **
31 + x) for x in range(12)], 'l')
structure = ([], [], 0), ([1, 3, 1], [], 0), ([12], [], 0), ([12],
[-1], 11), ([6], [2], 0), ([6], [-2], 11), ([3, 4], [], 0), ([3,
4], [-4, -1], 11), ([2, 2], [4, 1], 4), ([2, 2], [-4, -1], 8)
ndflags = (0, ND_WRITABLE, ND_FORTRAN, ND_FORTRAN | ND_WRITABLE,
ND_PIL, ND_PIL | ND_WRITABLE)
real_flags = (0, PyBUF_WRITABLE, PyBUF_FORMAT, PyBUF_WRITABLE |
PyBUF_FORMAT)
for items, fmt in items_fmt:
itemsize = struct.calcsize(fmt)
for shape, strides, offset in structure:
strides = [(v * itemsize) for v in strides]
offset *= itemsize
for flags in ndflags:
if strides and flags & ND_FORTRAN:
continue
if not shape and flags & ND_PIL:
continue
_items = items if shape else items[0]
ex1 = ndarray(_items, format=fmt, flags=flags, shape=
shape, strides=strides, offset=offset)
ex2 = ex1[::-2] if shape else None
m1 = memoryview(ex1)
if ex2:
m2 = memoryview(ex2)
if ex1.ndim == 0 or ex1.ndim == 1 and shape and strides:
self.assertEqual(m1, ex1)
if ex2 and ex2.ndim == 1 and shape and strides:
self.assertEqual(m2, ex2)
for req in requests:
for bits in real_flags:
self.verify_getbuf(ex1, ex1, req | bits)
self.verify_getbuf(ex1, m1, req | bits)
if ex2:
self.verify_getbuf(ex2, ex2, req | bits,
sliced=True)
self.verify_getbuf(ex2, m2, req | bits,
sliced=True)
items = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]
ex = ndarray(items, shape=[12], flags=ND_GETBUF_FAIL)
self.assertRaises(BufferError, ndarray, ex)
base = ndarray([9], [1])
ex = ndarray(base, getbuf=PyBUF_SIMPLE)
self.assertRaises(BufferError, ndarray, ex, getbuf=PyBUF_WRITABLE)
self.assertRaises(BufferError, ndarray, ex, getbuf=PyBUF_ND)
self.assertRaises(BufferError, ndarray, ex, getbuf=PyBUF_STRIDES)
self.assertRaises(BufferError, ndarray, ex, getbuf=PyBUF_C_CONTIGUOUS)
self.assertRaises(BufferError, ndarray, ex, getbuf=PyBUF_F_CONTIGUOUS)
self.assertRaises(BufferError, ndarray, ex, getbuf=PyBUF_ANY_CONTIGUOUS
)
nd = ndarray(ex, getbuf=PyBUF_SIMPLE)
for shape in ([1, 12, 1], [7, 0, 7]):
for order in (0, ND_FORTRAN):
ex = ndarray(items, shape=shape, flags=order | ND_WRITABLE)
self.assertTrue(is_contiguous(ex, 'F'))
self.assertTrue(is_contiguous(ex, 'C'))
for flags in requests:
nd = ndarray(ex, getbuf=flags)
self.assertTrue(is_contiguous(nd, 'F'))
self.assertTrue(is_contiguous(nd, 'C'))
def test_ndarray_exceptions(self):
nd = ndarray([9], [1])
ndm = ndarray([9], [1], flags=ND_VAREXPORT)
for c in (ndarray, nd.push, ndm.push):
self.assertRaises(TypeError, c, {1, 2, 3})
self.assertRaises(TypeError, c, [1, 2, '3'])
self.assertRaises(TypeError, c, [1, 2, (3, 4)])
self.assertRaises(TypeError, c, [1, 2, 3], shape={3})
self.assertRaises(TypeError, c, [1, 2, 3], shape=[3], strides={1})
self.assertRaises(TypeError, c, [1, 2, 3], shape=[3], offset=[])
self.assertRaises(TypeError, c, [1], shape=[1], format={})
self.assertRaises(TypeError, c, [1], shape=[1], flags={})
self.assertRaises(TypeError, c, [1], shape=[1], getbuf={})
self.assertRaises(TypeError, c, [1], shape=[1], strides=[1],
flags=ND_FORTRAN)
self.assertRaises(TypeError, c, [1], shape=[], flags=ND_PIL)
self.assertRaises(ValueError, c, [], shape=[1])
self.assertRaises(ValueError, c, ['XXX'], shape=[1], format='L')
self.assertRaises(struct.error, c, [1000], shape=[1], format='B')
self.assertRaises(ValueError, c, [1, (2, 3)], shape=[2], format='B'
)
self.assertRaises(ValueError, c, [1, 2, 3], shape=[3], format='QL')
n = ND_MAX_NDIM + 1
self.assertRaises(ValueError, c, [1] * n, shape=[1] * n)
self.assertRaises(ValueError, c, [1], shape=[-1])
self.assertRaises(ValueError, c, [1, 2, 3], shape=['3'])
self.assertRaises(OverflowError, c, [1], shape=[2 ** 128])
self.assertRaises(ValueError, c, [1, 2, 3, 4, 5], shape=[2, 2],
offset=3)
self.assertRaises(ValueError, c, [1, 2, 3], shape=[3], strides=
['1'])
self.assertRaises(OverflowError, c, [1], shape=[1], strides=[2 **
128])
self.assertRaises(ValueError, c, [1, 2], shape=[2, 1], strides=[1])
self.assertRaises(ValueError, c, [1, 2, 3, 4], shape=[2],
strides=[3], format='L')
self.assertRaises(ValueError, c, [1, 2, 3], shape=[3], offset=4)
self.assertRaises(ValueError, c, [1, 2, 3], shape=[1], offset=3,
format='L')
self.assertRaises(ValueError, c, [1, 2, 3], shape=[3], format='')
self.assertRaises(struct.error, c, [(1, 2, 3)], shape=[1],
format='@#$')
items = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertRaises(ValueError, c, items, shape=[2, 3], strides=[
-3, -2], offset=5)
self.assertRaises(TypeError, c, bytearray(), format='Q')
self.assertRaises(TypeError, c, [1], shape=[1], getbuf=PyBUF_FULL)
self.assertRaises(TypeError, c, [1])
self.assertRaises(BufferError, ndarray, b'123', getbuf=PyBUF_WRITABLE)
nd = ndarray([9], [1], flags=ND_VAREXPORT)
self.assertRaises(ValueError, nd.push, [1], [1], flags=ND_VAREXPORT)
nd = ndarray(b'123')
self.assertRaises(BufferError, nd.push, [1], [1])
self.assertRaises(BufferError, nd.pop)
nd = ndarray([9], [1])
nd.push([1], [1])
m = memoryview(nd)
self.assertRaises(BufferError, nd.push, [1], [1])
self.assertRaises(BufferError, nd.pop)
m.release()
nd.pop()
self.assertRaises(BufferError, nd.pop)
del nd
self.assertRaises(TypeError, get_pointer, {}, [1, 2, 3])
self.assertRaises(TypeError, get_pointer, b'123', {})
nd = ndarray(list(range(100)), shape=[1] * 100)
self.assertRaises(ValueError, get_pointer, nd, [5])
nd = ndarray(list(range(12)), shape=[3, 4])
self.assertRaises(ValueError, get_pointer, nd, [2, 3, 4])
self.assertRaises(ValueError, get_pointer, nd, [3, 3])
self.assertRaises(ValueError, get_pointer, nd, [-3, 3])
self.assertRaises(OverflowError, get_pointer, nd, [1 << 64, 3])
ex = ndarray([1, 2, 3], shape=[3], format='L')
nd = ndarray(ex, getbuf=PyBUF_SIMPLE)
self.assertRaises(ValueError, nd.tolist)
ex1 = ndarray([1, 2, 3], shape=[3], format='L')
ex2 = ndarray(ex1)
nd = ndarray(ex2)
self.assertRaises(TypeError, nd.memoryview_from_buffer)
nd = ndarray([(1,) * 200], shape=[1], format='L' * 200)
self.assertRaises(TypeError, nd.memoryview_from_buffer)
n = ND_MAX_NDIM
nd = ndarray(list(range(n)), shape=[1] * n)
self.assertRaises(ValueError, nd.memoryview_from_buffer)
nd = ndarray([1], shape=[1])
self.assertRaises(TypeError, get_contiguous, 1, 2, 3, 4, 5)
self.assertRaises(TypeError, get_contiguous, nd, 'xyz', 'C')
self.assertRaises(OverflowError, get_contiguous, nd, 2 ** 64, 'C')
self.assertRaises(TypeError, get_contiguous, nd, PyBUF_READ, 961)
self.assertRaises(UnicodeEncodeError, get_contiguous, nd,
PyBUF_READ, '\u2007')
self.assertRaises(ValueError, get_contiguous, nd, PyBUF_READ, 'Z')
self.assertRaises(ValueError, get_contiguous, nd, 255, 'A')
nd = ndarray([1], shape=[1])
self.assertRaises(TypeError, cmp_contig, 1, 2, 3, 4, 5)
self.assertRaises(TypeError, cmp_contig, {}, nd)
self.assertRaises(TypeError, cmp_contig, nd, {})
nd = ndarray([1], shape=[1])
self.assertRaises(TypeError, is_contiguous, 1, 2, 3, 4, 5)
self.assertRaises(TypeError, is_contiguous, {}, 'A')
self.assertRaises(TypeError, is_contiguous, nd, 201)
def test_ndarray_linked_list(self):
for perm in permutations(range(5)):
m = [0] * 5
nd = ndarray([1, 2, 3], shape=[3], flags=ND_VAREXPORT)
m[0] = memoryview(nd)
for i in range(1, 5):
nd.push([1, 2, 3], shape=[3])
m[i] = memoryview(nd)
for i in range(5):
m[perm[i]].release()
self.assertRaises(BufferError, nd.pop)
del nd
def test_ndarray_format_scalar(self):
for fmt, scalar, _ in iter_format(0):
itemsize = struct.calcsize(fmt)
nd = ndarray(scalar, shape=(), format=fmt)
self.verify(nd, obj=None, itemsize=itemsize, fmt=fmt, readonly=
1, ndim=0, shape=(), strides=(), lst=scalar)
def test_ndarray_format_shape(self):
nitems = randrange(1, 10)
for fmt, items, _ in iter_format(nitems):
itemsize = struct.calcsize(fmt)
for flags in (0, ND_PIL):
nd = ndarray(items, shape=[nitems], format=fmt, flags=flags)
self.verify(nd, obj=None, itemsize=itemsize, fmt=fmt,
readonly=1, ndim=1, shape=(nitems,), strides=(itemsize,
), lst=items)
def test_ndarray_format_strides(self):
nitems = randrange(1, 30)
for fmt, items, _ in iter_format(nitems):
itemsize = struct.calcsize(fmt)
for step in range(-5, 5):
if step == 0:
continue
shape = [len(items[::step])]
strides = [step * itemsize]
offset = itemsize * (nitems - 1) if step < 0 else 0
for flags in (0, ND_PIL):
nd = ndarray(items, shape=shape, strides=strides,
format=fmt, offset=offset, flags=flags)
self.verify(nd, obj=None, itemsize=itemsize, fmt=fmt,
readonly=1, ndim=1, shape=shape, strides=strides,
lst=items[::step])
def test_ndarray_fortran(self):
items = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]
ex = ndarray(items, shape=(3, 4), strides=(1, 3))
nd = ndarray(ex, getbuf=PyBUF_F_CONTIGUOUS | PyBUF_FORMAT)
self.assertEqual(nd.tolist(), farray(items, (3, 4)))
def test_ndarray_multidim(self):
for ndim in range(5):
shape_t = [randrange(2, 10) for _ in range(ndim)]
nitems = prod(shape_t)
for shape in permutations(shape_t):
fmt, items, _ = randitems(nitems)
itemsize = struct.calcsize(fmt)
for flags in (0, ND_PIL):
if ndim == 0 and flags == ND_PIL:
continue
nd = ndarray(items, shape=shape, format=fmt, flags=flags)
strides = strides_from_shape(ndim, shape, itemsize, 'C')
lst = carray(items, shape)
self.verify(nd, obj=None, itemsize=itemsize, fmt=fmt,
readonly=1, ndim=ndim, shape=shape, strides=strides,
lst=lst)
if is_memoryview_format(fmt):
ex = ndarray(items, shape=shape, format=fmt)
nd = ndarray(ex, getbuf=PyBUF_CONTIG_RO | PyBUF_FORMAT)
self.assertTrue(nd.strides == ())
mv = nd.memoryview_from_buffer()
self.verify(mv, obj=None, itemsize=itemsize, fmt=
fmt, readonly=1, ndim=ndim, shape=shape,
strides=strides, lst=lst)
nd = ndarray(items, shape=shape, format=fmt, flags=
flags | ND_FORTRAN)
strides = strides_from_shape(ndim, shape, itemsize, 'F')
lst = farray(items, shape)
self.verify(nd, obj=None, itemsize=itemsize, fmt=fmt,
readonly=1, ndim=ndim, shape=shape, strides=strides,
lst=lst)
def test_ndarray_index_invalid(self):
nd = ndarray([1], shape=[1])
self.assertRaises(TypeError, nd.__setitem__, 1, 8)
mv = memoryview(nd)
self.assertEqual(mv, nd)
self.assertRaises(TypeError, mv.__setitem__, 1, 8)
nd = ndarray([1], shape=[1], flags=ND_WRITABLE)
self.assertRaises(TypeError, nd.__delitem__, 1)
mv = memoryview(nd)
self.assertEqual(mv, nd)
self.assertRaises(TypeError, mv.__delitem__, 1)
nd = ndarray([1], shape=[1], flags=ND_WRITABLE)
self.assertRaises(OverflowError, nd.__getitem__, 1 << 64)
self.assertRaises(OverflowError, nd.__setitem__, 1 << 64, 8)
mv = memoryview(nd)
self.assertEqual(mv, nd)
self.assertRaises(IndexError, mv.__getitem__, 1 << 64)
self.assertRaises(IndexError, mv.__setitem__, 1 << 64, 8)
items = [1, 2, 3, 4, 5, 6, 7, 8]
nd = ndarray(items, shape=[len(items)], format='B', flags=ND_WRITABLE)
self.assertRaises(struct.error, nd.__setitem__, 2, 300)
self.assertRaises(ValueError, nd.__setitem__, 1, (100, 200))
mv = memoryview(nd)
self.assertEqual(mv, nd)
self.assertRaises(ValueError, mv.__setitem__, 2, 300)
self.assertRaises(TypeError, mv.__setitem__, 1, (100, 200))
items = [(1, 2), (3, 4), (5, 6)]
nd = ndarray(items, shape=[len(items)], format='LQ', flags=ND_WRITABLE)
self.assertRaises(ValueError, nd.__setitem__, 2, 300)
self.assertRaises(struct.error, nd.__setitem__, 1, (b'\x001', 200))
def test_ndarray_index_scalar(self):
nd = ndarray(1, shape=(), flags=ND_WRITABLE)
mv = memoryview(nd)
self.assertEqual(mv, nd)
x = nd[()]
self.assertEqual(x, 1)
x = nd[...]
self.assertEqual(x.tolist(), nd.tolist())
x = mv[()]
self.assertEqual(x, 1)
x = mv[...]
self.assertEqual(x.tolist(), nd.tolist())
self.assertRaises(TypeError, nd.__getitem__, 0)
self.assertRaises(TypeError, mv.__getitem__, 0)
self.assertRaises(TypeError, nd.__setitem__, 0, 8)
self.assertRaises(TypeError, mv.__setitem__, 0, 8)
self.assertEqual(nd.tolist(), 1)
self.assertEqual(mv.tolist(), 1)
nd[()] = 9
self.assertEqual(nd.tolist(), 9)
mv[()] = 9
self.assertEqual(mv.tolist(), 9)
nd[...] = 5
self.assertEqual(nd.tolist(), 5)
mv[...] = 5
self.assertEqual(mv.tolist(), 5)
def test_ndarray_index_null_strides(self):
ex = ndarray(list(range(2 * 4)), shape=[2, 4], flags=ND_WRITABLE)
nd = ndarray(ex, getbuf=PyBUF_CONTIG)
self.assertRaises(BufferError, nd.__getitem__, 1)
self.assertRaises(BufferError, nd.__getitem__, slice(3, 5, 1))
def test_ndarray_index_getitem_single(self):
for fmt, items, _ in iter_format(5):
nd = ndarray(items, shape=[5], format=fmt)
for i in range(-5, 5):
self.assertEqual(nd[i], items[i])
self.assertRaises(IndexError, nd.__getitem__, -6)
self.assertRaises(IndexError, nd.__getitem__, 5)
if is_memoryview_format(fmt):
mv = memoryview(nd)
self.assertEqual(mv, nd)
for i in range(-5, 5):
self.assertEqual(mv[i], items[i])
self.assertRaises(IndexError, mv.__getitem__, -6)
self.assertRaises(IndexError, mv.__getitem__, 5)
for fmt, items, _ in iter_format(5):
ex = ndarray(items, shape=[5], flags=ND_WRITABLE, format=fmt)
nd = ndarray(ex, getbuf=PyBUF_CONTIG | PyBUF_FORMAT)
for i in range(-5, 5):
self.assertEqual(nd[i], items[i])
if is_memoryview_format(fmt):
mv = nd.memoryview_from_buffer()
self.assertIs(mv.__eq__(nd), NotImplemented)
for i in range(-5, 5):
self.assertEqual(mv[i], items[i])
items = [1, 2, 3, 4, 5]
ex = ndarray(items, shape=[5])
nd = ndarray(ex, getbuf=PyBUF_CONTIG_RO)
for i in range(-5, 5):
self.assertEqual(nd[i], items[i])
items = [1, 2, 3, 4, 5]
ex = ndarray(items, shape=[5])
nd = ndarray(ex, getbuf=PyBUF_SIMPLE)
for i in range(-5, 5):
self.assertEqual(nd[i], items[i])
def test_ndarray_index_setitem_single(self):
for fmt, items, single_item in iter_format(5):
nd = ndarray(items, shape=[5], format=fmt, flags=ND_WRITABLE)
for i in range(5):
items[i] = single_item
nd[i] = single_item
self.assertEqual(nd.tolist(), items)
self.assertRaises(IndexError, nd.__setitem__, -6, single_item)
self.assertRaises(IndexError, nd.__setitem__, 5, single_item)
if not is_memoryview_format(fmt):
continue
nd = ndarray(items, shape=[5], format=fmt, flags=ND_WRITABLE)
mv = memoryview(nd)
self.assertEqual(mv, nd)
for i in range(5):
items[i] = single_item
mv[i] = single_item
self.assertEqual(mv.tolist(), items)
self.assertRaises(IndexError, mv.__setitem__, -6, single_item)
self.assertRaises(IndexError, mv.__setitem__, 5, single_item)
for fmt, items, single_item in iter_format(5):
nd = ndarray(items, shape=[5], format=fmt, flags=ND_WRITABLE)
for i in range(-5, 4):
items[i] = items[i + 1]
nd[i] = nd[i + 1]
self.assertEqual(nd.tolist(), items)
if not is_memoryview_format(fmt):
continue
nd = ndarray(items, shape=[5], format=fmt, flags=ND_WRITABLE)
mv = memoryview(nd)
self.assertEqual(mv, nd)
for i in range(-5, 4):
items[i] = items[i + 1]
mv[i] = mv[i + 1]
self.assertEqual(mv.tolist(), items)
def test_ndarray_index_getitem_multidim(self):
shape_t = 2, 3, 5
nitems = prod(shape_t)
for shape in permutations(shape_t):
fmt, items, _ = randitems(nitems)
for flags in (0, ND_PIL):
nd = ndarray(items, shape=shape, format=fmt, flags=flags)
lst = carray(items, shape)
for i in range(-shape[0], shape[0]):
self.assertEqual(lst[i], nd[i].tolist())
for j in range(-shape[1], shape[1]):
self.assertEqual(lst[i][j], nd[i][j].tolist())
for k in range(-shape[2], shape[2]):
self.assertEqual(lst[i][j][k], nd[i][j][k])
nd = ndarray(items, shape=shape, format=fmt, flags=flags |
ND_FORTRAN)
lst = farray(items, shape)
for i in range(-shape[0], shape[0]):
self.assertEqual(lst[i], nd[i].tolist())
for j in range(-shape[1], shape[1]):
self.assertEqual(lst[i][j], nd[i][j].tolist())
for k in range(shape[2], shape[2]):
self.assertEqual(lst[i][j][k], nd[i][j][k])
def test_ndarray_sequence(self):
nd = ndarray(1, shape=())
self.assertRaises(TypeError, eval, '1 in nd', locals())
mv = memoryview(nd)
self.assertEqual(mv, nd)
self.assertRaises(TypeError, eval, '1 in mv', locals())
for fmt, items, _ in iter_format(5):
nd = ndarray(items, shape=[5], format=fmt)
for i, v in enumerate(nd):
self.assertEqual(v, items[i])
self.assertTrue(v in nd)
if is_memoryview_format(fmt):
mv = memoryview(nd)
for i, v in enumerate(mv):
self.assertEqual(v, items[i])
self.assertTrue(v in mv)
def test_ndarray_slice_invalid(self):
items = [1, 2, 3, 4, 5, 6, 7, 8]
xl = ndarray(items, shape=[8], flags=ND_WRITABLE)
ml = memoryview(xl)
self.assertRaises(TypeError, xl.__setitem__, slice(0, 8, 1), items)
self.assertRaises(TypeError, ml.__setitem__, slice(0, 8, 1), items)
xl = ndarray(items, shape=[8], flags=ND_WRITABLE)
ex = ndarray(items, shape=[8], flags=ND_WRITABLE)
xr = ndarray(ex, getbuf=PyBUF_ND)
self.assertRaises(BufferError, xl.__setitem__, slice(0, 8, 1), xr)
nd = ndarray(items, shape=[8], format='L', flags=ND_WRITABLE)
mv = memoryview(nd)
self.assertRaises(ValueError, nd.__getitem__, slice(0, 1, 0))
self.assertRaises(ValueError, mv.__getitem__, slice(0, 1, 0))
nd = ndarray(items, shape=[2, 4], format='L', flags=ND_WRITABLE)
mv = memoryview(nd)
self.assertRaises(ValueError, nd.__getitem__, (slice(0, 1, 1),
slice(0, 1, 0)))
self.assertRaises(ValueError, nd.__getitem__, (slice(0, 1, 0),
slice(0, 1, 1)))
self.assertRaises(TypeError, nd.__getitem__, '@%$')
self.assertRaises(TypeError, nd.__getitem__, ('@%$', slice(0, 1, 1)))
self.assertRaises(TypeError, nd.__getitem__, (slice(0, 1, 1), {}))
self.assertRaises(NotImplementedError, mv.__getitem__, (slice(0, 1,
1), slice(0, 1, 0)))
self.assertRaises(TypeError, mv.__getitem__, '@%$')
xl = ndarray(items, shape=[8], format='B', flags=ND_WRITABLE)
xr = ndarray(items, shape=[8], format='b')
ml = memoryview(xl)
mr = memoryview(xr)
self.assertRaises(ValueError, xl.__setitem__, slice(0, 1, 1), xr[7:8])
self.assertEqual(xl.tolist(), items)
self.assertRaises(ValueError, ml.__setitem__, slice(0, 1, 1), mr[7:8])
self.assertEqual(ml.tolist(), items)
xl = ndarray(items, shape=[8], format='B', flags=ND_WRITABLE)
yr = ndarray(items, shape=[8], format='L')
ml = memoryview(xl)
mr = memoryview(xr)
self.assertRaises(ValueError, xl.__setitem__, slice(0, 1, 1), xr[7:8])
self.assertEqual(xl.tolist(), items)
self.assertRaises(ValueError, ml.__setitem__, slice(0, 1, 1), mr[7:8])
self.assertEqual(ml.tolist(), items)
xl = ndarray(items, shape=[2, 4], format='b', flags=ND_WRITABLE)
xr = ndarray(items, shape=[8], format='b')
ml = memoryview(xl)
mr = memoryview(xr)
self.assertRaises(ValueError, xl.__setitem__, slice(0, 1, 1), xr[7:8])
self.assertEqual(xl.tolist(), [[1, 2, 3, 4], [5, 6, 7, 8]])
self.assertRaises(NotImplementedError, ml.__setitem__, slice(0, 1,
1), mr[7:8])
xl = ndarray(items, shape=[8], format='b', flags=ND_WRITABLE)
xr = ndarray(items, shape=[8], format='b')
ml = memoryview(xl)
mr = memoryview(xr)
self.assertRaises(ValueError, xl.__setitem__, slice(0, 2, 1), xr[7:8])
self.assertEqual(xl.tolist(), items)
self.assertRaises(ValueError, ml.__setitem__, slice(0, 2, 1), mr[7:8])
self.assertEqual(ml.tolist(), items)
self.assertRaises(TypeError, slice_indices, slice(0, 1, 2), {})
self.assertRaises(TypeError, slice_indices, '###########', 1)
self.assertRaises(ValueError, slice_indices, slice(0, 1, 0), 4)
x = ndarray(items, shape=[8], format='b', flags=ND_PIL)
self.assertRaises(TypeError, x.add_suboffsets)
ex = ndarray(items, shape=[8], format='B')
x = ndarray(ex, getbuf=PyBUF_SIMPLE)
self.assertRaises(TypeError, x.add_suboffsets)
def test_ndarray_slice_zero_shape(self):
items = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]
x = ndarray(items, shape=[12], format='L', flags=ND_WRITABLE)
y = ndarray(items, shape=[12], format='L')
x[4:4] = y[9:9]
self.assertEqual(x.tolist(), items)
ml = memoryview(x)
mr = memoryview(y)
self.assertEqual(ml, x)
self.assertEqual(ml, y)
ml[4:4] = mr[9:9]
self.assertEqual(ml.tolist(), items)
x = ndarray(items, shape=[3, 4], format='L', flags=ND_WRITABLE)
y = ndarray(items, shape=[4, 3], format='L')
x[1:2, 2:2] = y[1:2, 3:3]
self.assertEqual(x.tolist(), carray(items, [3, 4]))
def test_ndarray_slice_multidim(self):
shape_t = 2, 3, 5
ndim = len(shape_t)
nitems = prod(shape_t)
for shape in permutations(shape_t):
fmt, items, _ = randitems(nitems)
itemsize = struct.calcsize(fmt)
for flags in (0, ND_PIL):
nd = ndarray(items, shape=shape, format=fmt, flags=flags)
lst = carray(items, shape)
for slices in rslices_ndim(ndim, shape):
listerr = None
try:
sliced = multislice(lst, slices)
except Exception as e:
listerr = e.__class__
nderr = None
try:
ndsliced = nd[slices]
except Exception as e:
nderr = e.__class__
if nderr or listerr:
self.assertIs(nderr, listerr)
else:
self.assertEqual(ndsliced.tolist(), sliced)
def test_ndarray_slice_redundant_suboffsets(self):
shape_t = 2, 3, 5, 2
ndim = len(shape_t)
nitems = prod(shape_t)
for shape in permutations(shape_t):
fmt, items, _ = randitems(nitems)
itemsize = struct.calcsize(fmt)
nd = ndarray(items, shape=shape, format=fmt)
nd.add_suboffsets()
ex = ndarray(items, shape=shape, format=fmt)
ex.add_suboffsets()
mv = memoryview(ex)
lst = carray(items, shape)
for slices in rslices_ndim(ndim, shape):
listerr = None
try:
sliced = multislice(lst, slices)
except Exception as e:
listerr = e.__class__
nderr = None
try:
ndsliced = nd[slices]
except Exception as e:
nderr = e.__class__
if nderr or listerr:
self.assertIs(nderr, listerr)
else:
self.assertEqual(ndsliced.tolist(), sliced)
def test_ndarray_slice_assign_single(self):
for fmt, items, _ in iter_format(5):
for lslice in genslices(5):
for rslice in genslices(5):
for flags in (0, ND_PIL):
f = flags | ND_WRITABLE
nd = ndarray(items, shape=[5], format=fmt, flags=f)
ex = ndarray(items, shape=[5], format=fmt, flags=f)
mv = memoryview(ex)
lsterr = None
diff_structure = None
lst = items[:]
try:
lval = lst[lslice]
rval = lst[rslice]
lst[lslice] = lst[rslice]
diff_structure = len(lval) != len(rval)
except Exception as e:
lsterr = e.__class__
nderr = None
try:
nd[lslice] = nd[rslice]
except Exception as e:
nderr = e.__class__
if diff_structure:
self.assertIs(nderr, ValueError)
else:
self.assertEqual(nd.tolist(), lst)
self.assertIs(nderr, lsterr)
if not is_memoryview_format(fmt):
continue
mverr = None
try:
mv[lslice] = mv[rslice]
except Exception as e:
mverr = e.__class__
if diff_structure:
self.assertIs(mverr, ValueError)
else:
self.assertEqual(mv.tolist(), lst)
self.assertEqual(mv, nd)
self.assertIs(mverr, lsterr)
self.verify(mv, obj=ex, itemsize=nd.itemsize,
fmt=fmt, readonly=0, ndim=nd.ndim, shape=nd
.shape, strides=nd.strides, lst=nd.tolist())
def test_ndarray_slice_assign_multidim(self):
shape_t = 2, 3, 5
ndim = len(shape_t)
nitems = prod(shape_t)
for shape in permutations(shape_t):
fmt, items, _ = randitems(nitems)
for flags in (0, ND_PIL):
for _ in range(ITERATIONS):
lslices, rslices = randslice_from_shape(ndim, shape)
nd = ndarray(items, shape=shape, format=fmt, flags=
flags | ND_WRITABLE)
lst = carray(items, shape)
listerr = None
try:
result = multislice_assign(lst, lst, lslices, rslices)
except Exception as e:
listerr = e.__class__
nderr = None
try:
nd[lslices] = nd[rslices]
except Exception as e:
nderr = e.__class__
if nderr or listerr:
self.assertIs(nderr, listerr)
else:
self.assertEqual(nd.tolist(), result)
def test_ndarray_random(self):
for _ in range(ITERATIONS):
for fmt in fmtdict['@']:
itemsize = struct.calcsize(fmt)
t = rand_structure(itemsize, True, maxdim=MAXDIM, maxshape=
MAXSHAPE)
self.assertTrue(verify_structure(*t))
items = randitems_from_structure(fmt, t)
x = ndarray_from_structure(items, fmt, t)
xlist = x.tolist()
mv = memoryview(x)
if is_memoryview_format(fmt):
mvlist = mv.tolist()
self.assertEqual(mvlist, xlist)
if t[2] > 0:
y = ndarray_from_structure(items, fmt, t, flags=ND_PIL)
ylist = y.tolist()
self.assertEqual(xlist, ylist)
mv = memoryview(y)
if is_memoryview_format(fmt):
self.assertEqual(mv, y)
mvlist = mv.tolist()
self.assertEqual(mvlist, ylist)
if numpy_array:
shape = t[3]
if 0 in shape:
continue
z = numpy_array_from_structure(items, fmt, t)
self.verify(x, obj=None, itemsize=z.itemsize, fmt=fmt,
readonly=0, ndim=z.ndim, shape=z.shape, strides=z.
strides, lst=z.tolist())
def test_ndarray_random_invalid(self):
for _ in range(ITERATIONS):
for fmt in fmtdict['@']:
itemsize = struct.calcsize(fmt)
t = rand_structure(itemsize, False, maxdim=MAXDIM, maxshape
=MAXSHAPE)
self.assertFalse(verify_structure(*t))
items = randitems_from_structure(fmt, t)
nderr = False
try:
x = ndarray_from_structure(items, fmt, t)
except Exception as e:
nderr = e.__class__
self.assertTrue(nderr)
if numpy_array:
numpy_err = False
try:
y = numpy_array_from_structure(items, fmt, t)
except Exception as e:
numpy_err = e.__class__
if 0:
self.assertTrue(numpy_err)
def test_ndarray_random_slice_assign(self):
for _ in range(ITERATIONS):
for fmt in fmtdict['@']:
itemsize = struct.calcsize(fmt)
lshape, rshape, lslices, rslices = rand_aligned_slices(maxdim
=MAXDIM, maxshape=MAXSHAPE)
tl = rand_structure(itemsize, True, shape=lshape)
tr = rand_structure(itemsize, True, shape=rshape)
self.assertTrue(verify_structure(*tl))
self.assertTrue(verify_structure(*tr))
litems = randitems_from_structure(fmt, tl)
ritems = randitems_from_structure(fmt, tr)
xl = ndarray_from_structure(litems, fmt, tl)
xr = ndarray_from_structure(ritems, fmt, tr)
xl[lslices] = xr[rslices]
xllist = xl.tolist()
xrlist = xr.tolist()
ml = memoryview(xl)
mr = memoryview(xr)
self.assertEqual(ml.tolist(), xllist)
self.assertEqual(mr.tolist(), xrlist)
if tl[2] > 0 and tr[2] > 0:
yl = ndarray_from_structure(litems, fmt, tl, flags=ND_PIL)
yr = ndarray_from_structure(ritems, fmt, tr, flags=ND_PIL)
yl[lslices] = yr[rslices]
yllist = yl.tolist()
yrlist = yr.tolist()
self.assertEqual(xllist, yllist)
self.assertEqual(xrlist, yrlist)
ml = memoryview(yl)
mr = memoryview(yr)
self.assertEqual(ml.tolist(), yllist)
self.assertEqual(mr.tolist(), yrlist)
if numpy_array:
if 0 in lshape or 0 in rshape:
continue
zl = numpy_array_from_structure(litems, fmt, tl)
zr = numpy_array_from_structure(ritems, fmt, tr)
zl[lslices] = zr[rslices]
if not is_overlapping(tl) and not is_overlapping(tr):
self.verify(xl, obj=None, itemsize=zl.itemsize, fmt
=fmt, readonly=0, ndim=zl.ndim, shape=zl.shape,
strides=zl.strides, lst=zl.tolist())
self.verify(xr, obj=None, itemsize=zr.itemsize, fmt=fmt,
readonly=0, ndim=zr.ndim, shape=zr.shape, strides=
zr.strides, lst=zr.tolist())
def test_ndarray_re_export(self):
items = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]
nd = ndarray(items, shape=[3, 4], flags=ND_PIL)
ex = ndarray(nd)
self.assertTrue(ex.flags & ND_PIL)
self.assertIs(ex.obj, nd)
self.assertEqual(ex.suboffsets, (0, -1))
self.assertFalse(ex.c_contiguous)
self.assertFalse(ex.f_contiguous)
self.assertFalse(ex.contiguous)
def test_ndarray_zero_shape(self):
for flags in (0, ND_PIL):
nd = ndarray([1, 2, 3], shape=[0], flags=flags)
mv = memoryview(nd)
self.assertEqual(mv, nd)
self.assertEqual(nd.tolist(), [])
self.assertEqual(mv.tolist(), [])
nd = ndarray([1, 2, 3], shape=[0, 3, 3], flags=flags)
self.assertEqual(nd.tolist(), [])
nd = ndarray([1, 2, 3], shape=[3, 0, 3], flags=flags)
self.assertEqual(nd.tolist(), [[], [], []])
nd = ndarray([1, 2, 3], shape=[3, 3, 0], flags=flags)
self.assertEqual(nd.tolist(), [[[], [], []], [[], [], []], [[],
[], []]])
def test_ndarray_zero_strides(self):
for flags in (0, ND_PIL):
nd = ndarray([1], shape=[5], strides=[0], flags=flags)
mv = memoryview(nd)
self.assertEqual(mv, nd)
self.assertEqual(nd.tolist(), [1, 1, 1, 1, 1])
self.assertEqual(mv.tolist(), [1, 1, 1, 1, 1])
def test_ndarray_offset(self):
nd = ndarray(list(range(20)), shape=[3], offset=7)
self.assertEqual(nd.offset, 7)
self.assertEqual(nd.tolist(), [7, 8, 9])
def test_ndarray_memoryview_from_buffer(self):
for flags in (0, ND_PIL):
nd = ndarray(list(range(3)), shape=[3], flags=flags)
m = nd.memoryview_from_buffer()
self.assertEqual(m, nd)
def test_ndarray_get_pointer(self):
for flags in (0, ND_PIL):
nd = ndarray(list(range(3)), shape=[3], flags=flags)
for i in range(3):
self.assertEqual(nd[i], get_pointer(nd, [i]))
def test_ndarray_tolist_null_strides(self):
ex = ndarray(list(range(20)), shape=[2, 2, 5])
nd = ndarray(ex, getbuf=PyBUF_ND | PyBUF_FORMAT)
self.assertEqual(nd.tolist(), ex.tolist())
m = memoryview(ex)
self.assertEqual(m.tolist(), ex.tolist())
def test_ndarray_cmp_contig(self):
self.assertFalse(cmp_contig(b'123', b'456'))
x = ndarray(list(range(12)), shape=[3, 4])
y = ndarray(list(range(12)), shape=[4, 3])
self.assertFalse(cmp_contig(x, y))
x = ndarray([1], shape=[1], format='B')
self.assertTrue(cmp_contig(x, b'\x01'))
self.assertTrue(cmp_contig(b'\x01', x))
def test_ndarray_hash(self):
a = array.array('L', [1, 2, 3])
nd = ndarray(a)
self.assertRaises(ValueError, hash, nd)
b = bytes(list(range(12)))
nd = ndarray(list(range(12)), shape=[12])
self.assertEqual(hash(nd), hash(b))
nd = ndarray(list(range(12)), shape=[3, 4])
self.assertEqual(hash(nd), hash(b))
nd = ndarray(list(range(12)), shape=[3, 2, 2])
self.assertEqual(hash(nd), hash(b))
b = bytes(transpose(list(range(12)), shape=[4, 3]))
nd = ndarray(list(range(12)), shape=[3, 4], flags=ND_FORTRAN)
self.assertEqual(hash(nd), hash(b))
b = bytes(transpose(list(range(12)), shape=[2, 3, 2]))
nd = ndarray(list(range(12)), shape=[2, 3, 2], flags=ND_FORTRAN)
self.assertEqual(hash(nd), hash(b))
b = bytes(list(range(12)))
nd = ndarray(list(range(12)), shape=[2, 2, 3], flags=ND_PIL)
self.assertEqual(hash(nd), hash(b))
nd = ndarray(list(range(12)), shape=[2, 2, 3], format='L')
self.assertEqual(hash(nd), hash(nd.tobytes()))
def test_py_buffer_to_contiguous(self):
requests = (PyBUF_INDIRECT, PyBUF_STRIDES, PyBUF_ND, PyBUF_SIMPLE,
PyBUF_FULL, PyBUF_FULL_RO, PyBUF_RECORDS, PyBUF_RECORDS_RO,
PyBUF_STRIDED, PyBUF_STRIDED_RO, PyBUF_CONTIG, PyBUF_CONTIG_RO)
self.assertRaises(TypeError, py_buffer_to_contiguous, {}, 'F',
PyBUF_FULL_RO)
nd = ndarray(9, shape=(), format='L', flags=ND_WRITABLE)
for order in ['C', 'F', 'A']:
for request in requests:
b = py_buffer_to_contiguous(nd, order, request)
self.assertEqual(b, nd.tobytes())
nd = ndarray([1], shape=[0], format='L', flags=ND_WRITABLE)
for order in ['C', 'F', 'A']:
for request in requests:
b = py_buffer_to_contiguous(nd, order, request)
self.assertEqual(b, b'')
nd = ndarray(list(range(8)), shape=[2, 0, 7], format='L', flags=
ND_WRITABLE)
for order in ['C', 'F', 'A']:
for request in requests:
b = py_buffer_to_contiguous(nd, order, request)
self.assertEqual(b, b'')
for f in [0, ND_FORTRAN]:
nd = ndarray([1], shape=[1], format='h', flags=f | ND_WRITABLE)
ndbytes = nd.tobytes()
for order in ['C', 'F', 'A']:
for request in requests:
b = py_buffer_to_contiguous(nd, order, request)
self.assertEqual(b, ndbytes)
nd = ndarray([1, 2, 3], shape=[3], format='b', flags=f |
ND_WRITABLE)
ndbytes = nd.tobytes()
for order in ['C', 'F', 'A']:
for request in requests:
b = py_buffer_to_contiguous(nd, order, request)
self.assertEqual(b, ndbytes)
nd = ndarray([1, 2, 3], shape=[2], strides=[2], flags=ND_WRITABLE)
ndbytes = nd.tobytes()
for order in ['C', 'F', 'A']:
for request in [PyBUF_STRIDES, PyBUF_FULL]:
b = py_buffer_to_contiguous(nd, order, request)
self.assertEqual(b, ndbytes)
nd = nd[::-1]
ndbytes = nd.tobytes()
for order in ['C', 'F', 'A']:
for request in requests:
try:
b = py_buffer_to_contiguous(nd, order, request)
except BufferError:
continue
self.assertEqual(b, ndbytes)
lst = list(range(12))
for f in [0, ND_FORTRAN]:
nd = ndarray(lst, shape=[3, 4], flags=f | ND_WRITABLE)
if numpy_array:
na = numpy_array(buffer=bytearray(lst), shape=[3, 4], dtype
='B', order='C' if f == 0 else 'F')
if f == ND_FORTRAN:
x = ndarray(transpose(lst, [4, 3]), shape=[3, 4], flags=
ND_WRITABLE)
expected = x.tobytes()
else:
expected = nd.tobytes()
for request in requests:
try:
b = py_buffer_to_contiguous(nd, 'C', request)
except BufferError:
continue
self.assertEqual(b, expected)
y = ndarray([v for v in b], shape=[3, 4], flags=ND_WRITABLE)
self.assertEqual(memoryview(y), memoryview(nd))
if numpy_array:
self.assertEqual(b, na.tostring(order='C'))
if f == 0:
x = ndarray(transpose(lst, [3, 4]), shape=[4, 3], flags=
ND_WRITABLE)
else:
x = ndarray(lst, shape=[3, 4], flags=ND_WRITABLE)
expected = x.tobytes()
for request in [PyBUF_FULL, PyBUF_FULL_RO, PyBUF_INDIRECT,
PyBUF_STRIDES, PyBUF_ND]:
try:
b = py_buffer_to_contiguous(nd, 'F', request)
except BufferError:
continue
self.assertEqual(b, expected)
y = ndarray([v for v in b], shape=[3, 4], flags=ND_FORTRAN |
ND_WRITABLE)
self.assertEqual(memoryview(y), memoryview(nd))
if numpy_array:
self.assertEqual(b, na.tostring(order='F'))
if f == ND_FORTRAN:
x = ndarray(lst, shape=[3, 4], flags=ND_WRITABLE)
expected = x.tobytes()
else:
expected = nd.tobytes()
for request in [PyBUF_FULL, PyBUF_FULL_RO, PyBUF_INDIRECT,
PyBUF_STRIDES, PyBUF_ND]:
try:
b = py_buffer_to_contiguous(nd, 'A', request)
except BufferError:
continue
self.assertEqual(b, expected)
y = ndarray([v for v in b], shape=[3, 4], flags=f | ND_WRITABLE
)
self.assertEqual(memoryview(y), memoryview(nd))
if numpy_array:
self.assertEqual(b, na.tostring(order='A'))
nd = ndarray(list(range(12)), shape=[3, 4], flags=ND_WRITABLE | ND_PIL)
b = py_buffer_to_contiguous(nd, 'C', PyBUF_FULL_RO)
self.assertEqual(b, nd.tobytes())
y = ndarray([v for v in b], shape=[3, 4], flags=ND_WRITABLE)
self.assertEqual(memoryview(y), memoryview(nd))
b = py_buffer_to_contiguous(nd, 'F', PyBUF_FULL_RO)
x = ndarray(transpose(lst, [3, 4]), shape=[4, 3], flags=ND_WRITABLE)
self.assertEqual(b, x.tobytes())
y = ndarray([v for v in b], shape=[3, 4], flags=ND_FORTRAN |
ND_WRITABLE)
self.assertEqual(memoryview(y), memoryview(nd))
b = py_buffer_to_contiguous(nd, 'A', PyBUF_FULL_RO)
self.assertEqual(b, nd.tobytes())
y = ndarray([v for v in b], shape=[3, 4], flags=ND_WRITABLE)
self.assertEqual(memoryview(y), memoryview(nd))
def test_memoryview_construction(self):
items_shape = [(9, []), ([1, 2, 3], [3]), (list(range(2 * 3 * 5)),
[2, 3, 5])]
for items, shape in items_shape:
ex = ndarray(items, shape=shape)
m = memoryview(ex)
self.assertTrue(m.c_contiguous)
self.assertTrue(m.contiguous)
ndim = len(shape)
strides = strides_from_shape(ndim, shape, 1, 'C')
lst = carray(items, shape)
self.verify(m, obj=ex, itemsize=1, fmt='B', readonly=1, ndim=
ndim, shape=shape, strides=strides, lst=lst)
m2 = memoryview(m)
self.verify(m2, obj=ex, itemsize=1, fmt='B', readonly=1, ndim=
ndim, shape=shape, strides=strides, lst=lst)
nd = ndarray(ex, getbuf=PyBUF_CONTIG_RO | PyBUF_FORMAT)
self.assertEqual(nd.strides, ())
m = nd.memoryview_from_buffer()
self.verify(m, obj=None, itemsize=1, fmt='B', readonly=1, ndim=
ndim, shape=shape, strides=strides, lst=lst)
nd = ndarray(ex, getbuf=PyBUF_SIMPLE)
self.assertEqual(nd.format, '')
self.assertEqual(nd.shape, ())
self.assertEqual(nd.strides, ())
m = nd.memoryview_from_buffer()
lst = [items] if ndim == 0 else items
self.verify(m, obj=None, itemsize=1, fmt='B', readonly=1, ndim=
1, shape=[ex.nbytes], strides=(1,), lst=lst)
for items, shape in items_shape:
ex = ndarray(items, shape=shape, flags=ND_FORTRAN)
m = memoryview(ex)
self.assertTrue(m.f_contiguous)
self.assertTrue(m.contiguous)
ndim = len(shape)
strides = strides_from_shape(ndim, shape, 1, 'F')
lst = farray(items, shape)
self.verify(m, obj=ex, itemsize=1, fmt='B', readonly=1, ndim=
ndim, shape=shape, strides=strides, lst=lst)
m2 = memoryview(m)
self.verify(m2, obj=ex, itemsize=1, fmt='B', readonly=1, ndim=
ndim, shape=shape, strides=strides, lst=lst)
for items, shape in items_shape[1:]:
ex = ndarray(items, shape=shape, flags=ND_PIL)
m = memoryview(ex)
ndim = len(shape)
lst = carray(items, shape)
self.verify(m, obj=ex, itemsize=1, fmt='B', readonly=1, ndim=
ndim, shape=shape, strides=ex.strides, lst=lst)
m2 = memoryview(m)
self.verify(m2, obj=ex, itemsize=1, fmt='B', readonly=1, ndim=
ndim, shape=shape, strides=ex.strides, lst=lst)
self.assertRaises(TypeError, memoryview, b'9', 'x')
self.assertRaises(TypeError, memoryview, {})
ex = ndarray([1, 2, 3], shape=[3])
nd = ndarray(ex, getbuf=PyBUF_SIMPLE)
self.assertRaises(BufferError, memoryview, nd)
nd = ndarray(ex, getbuf=PyBUF_CONTIG_RO | PyBUF_FORMAT)
self.assertRaises(BufferError, memoryview, nd)
nd = ndarray([1] * 128, shape=[1] * 128, format='L')
self.assertRaises(ValueError, memoryview, nd)
self.assertRaises(ValueError, nd.memoryview_from_buffer)
self.assertRaises(ValueError, get_contiguous, nd, PyBUF_READ, 'C')
self.assertRaises(ValueError, get_contiguous, nd, PyBUF_READ, 'F')
self.assertRaises(ValueError, get_contiguous, nd[::-1], PyBUF_READ, 'C'
)
def test_memoryview_cast_zero_shape(self):
items = [1, 2, 3]
for shape in ([0, 3, 3], [3, 0, 3], [0, 3, 3]):
ex = ndarray(items, shape=shape)
self.assertTrue(ex.c_contiguous)
msrc = memoryview(ex)
self.assertRaises(TypeError, msrc.cast, 'c')
for fmt, _, _ in iter_format(1, 'memoryview'):
msrc = memoryview(b'')
m = msrc.cast(fmt)
self.assertEqual(m.tobytes(), b'')
self.assertEqual(m.tolist(), [])
check_sizeof = support.check_sizeof
def test_memoryview_sizeof(self):
check = self.check_sizeof
vsize = support.calcvobjsize
base_struct = 'Pnin 2P2n2i5P P'
per_dim = '3n'
items = list(range(8))
check(memoryview(b''), vsize(base_struct + 1 * per_dim))
a = ndarray(items, shape=[2, 4], format='b')
check(memoryview(a), vsize(base_struct + 2 * per_dim))
a = ndarray(items, shape=[2, 2, 2], format='b')
check(memoryview(a), vsize(base_struct + 3 * per_dim))
def test_memoryview_struct_module(self):
class INT(object):
def __init__(self, val):
self.val = val
def __int__(self):
return self.val
class IDX(object):
def __init__(self, val):
self.val = val
def __index__(self):
return self.val
def f():
return 7
values = [INT(9), IDX(9), 2.2 + 3j, Decimal('-21.1'), 12.2,
Fraction(5, 2), [1, 2, 3], {4, 5, 6}, {(7): 8}, (), (9,), True,
False, None, NotImplemented, b'a', b'abc', bytearray(b'a'),
bytearray(b'abc'), 'a', 'abc', 'a', 'abc', f, lambda x: x]
for fmt, items, item in iter_format(10, 'memoryview'):
ex = ndarray(items, shape=[10], format=fmt, flags=ND_WRITABLE)
nd = ndarray(items, shape=[10], format=fmt, flags=ND_WRITABLE)
m = memoryview(ex)
struct.pack_into(fmt, nd, 0, item)
m[0] = item
self.assertEqual(m[0], nd[0])
itemsize = struct.calcsize(fmt)
if 'P' in fmt:
continue
for v in values:
struct_err = None
try:
struct.pack_into(fmt, nd, itemsize, v)
except struct.error:
struct_err = struct.error
mv_err = None
try:
m[1] = v
except (TypeError, ValueError) as e:
mv_err = e.__class__
if struct_err or mv_err:
self.assertIsNot(struct_err, None)
self.assertIsNot(mv_err, None)
else:
self.assertEqual(m[1], nd[1])
def test_memoryview_cast_zero_strides(self):
ex = ndarray([1, 2, 3], shape=[3], strides=[0])
self.assertFalse(ex.c_contiguous)
msrc = memoryview(ex)
self.assertRaises(TypeError, msrc.cast, 'c')
def test_memoryview_cast_invalid(self):
for sfmt in NON_BYTE_FORMAT:
sformat = '@' + sfmt if randrange(2) else sfmt
ssize = struct.calcsize(sformat)
for dfmt in NON_BYTE_FORMAT:
dformat = '@' + dfmt if randrange(2) else dfmt
dsize = struct.calcsize(dformat)
ex = ndarray(list(range(32)), shape=[32 // ssize], format=
sformat)
msrc = memoryview(ex)
self.assertRaises(TypeError, msrc.cast, dfmt, [32 // dsize])
for sfmt, sitems, _ in iter_format(1):
ex = ndarray(sitems, shape=[1], format=sfmt)
msrc = memoryview(ex)
for dfmt, _, _ in iter_format(1):
if not is_memoryview_format(dfmt):
self.assertRaises(ValueError, msrc.cast, dfmt, [32 //
dsize])
elif not is_byte_format(sfmt) and not is_byte_format(dfmt):
self.assertRaises(TypeError, msrc.cast, dfmt, [32 // dsize]
)
size_h = struct.calcsize('h')
size_d = struct.calcsize('d')
ex = ndarray(list(range(2 * 2 * size_d)), shape=[2, 2, size_d],
format='h')
msrc = memoryview(ex)
self.assertRaises(TypeError, msrc.cast, shape=[2, 2, size_h],
format='d')
ex = ndarray(list(range(120)), shape=[1, 2, 3, 4, 5])
m = memoryview(ex)
self.assertRaises(TypeError, m.cast)
self.assertRaises(TypeError, m.cast, 1, 2, 3)
self.assertRaises(TypeError, m.cast, {})
self.assertRaises(ValueError, m.cast, 'X')
self.assertRaises(ValueError, m.cast, '@X')
self.assertRaises(ValueError, m.cast, '@XY')
self.assertRaises(ValueError, m.cast, '=B')
self.assertRaises(ValueError, m.cast, '!L')
self.assertRaises(ValueError, m.cast, '<P')
self.assertRaises(ValueError, m.cast, '>l')
self.assertRaises(ValueError, m.cast, 'BI')
self.assertRaises(ValueError, m.cast, 'xBI')
ex = ndarray([(1, 2), (3, 4)], shape=[2], format='II')
m = memoryview(ex)
self.assertRaises(NotImplementedError, m.__getitem__, 0)
self.assertRaises(NotImplementedError, m.__setitem__, 0, 8)
self.assertRaises(NotImplementedError, m.tolist)
ex = ndarray(list(range(120)), shape=[1, 2, 3, 4, 5])
m = memoryview(ex)
self.assertRaises(TypeError, m.cast, 'B', shape={})
ex = ndarray(list(range(120)), shape=[2 * 3 * 4 * 5])
m = memoryview(ex)
self.assertRaises(OverflowError, m.cast, 'B', shape=[2 ** 64])
self.assertRaises(ValueError, m.cast, 'B', shape=[-1])
self.assertRaises(ValueError, m.cast, 'B', shape=[2, 3, 4, 5, 6, 7, -1]
)
self.assertRaises(ValueError, m.cast, 'B', shape=[2, 3, 4, 5, 6, 7, 0])
self.assertRaises(TypeError, m.cast, 'B', shape=[2, 3, 4, 5, 6, 7, 'x']
)
ex = ndarray(list([(9) for _ in range(3 * 5 * 7 * 11)]), shape=[3,
5, 7, 11])
m = memoryview(ex)
self.assertRaises(TypeError, m.cast, 'I', shape=[2, 3, 4, 5])
nd = ndarray(list(range(128)), shape=[128], format='I')
m = memoryview(nd)
self.assertRaises(ValueError, m.cast, 'I', [1] * 128)
ex = ndarray(list([(9) for _ in range(3 * 5 * 7 * 11)]), shape=[3 *
5 * 7 * 11])
m = memoryview(ex)
self.assertRaises(TypeError, m.cast, 'I', shape=[2, 3, 4, 5])
ex = ndarray(list([(9) for _ in range(3 * 5 * 7 * 11)]), shape=[3 *
5 * 7 * 11])
m = memoryview(ex)
self.assertRaises(TypeError, m.cast, 'B', shape=[2, 3, 4, 5])
nd = ndarray(list(range(128)), shape=[128], format='I')
m1 = memoryview(nd)
nd = ndarray(list(range(128)), shape=[128], format='B')
m2 = memoryview(nd)
if sys.maxsize == 2 ** 63 - 1:
self.assertRaises(TypeError, m1.cast, 'B', [7, 7, 73, 127, 337,
92737, 649657])
self.assertRaises(ValueError, m1.cast, 'B', [2 ** 20, 2 ** 20,
2 ** 10, 2 ** 10, 2 ** 3])
self.assertRaises(ValueError, m2.cast, 'I', [2 ** 20, 2 ** 20,
2 ** 10, 2 ** 10, 2 ** 1])
else:
self.assertRaises(TypeError, m1.cast, 'B', [1, 2147483647])
self.assertRaises(ValueError, m1.cast, 'B', [2 ** 10, 2 ** 10,
2 ** 5, 2 ** 5, 2 ** 1])
self.assertRaises(ValueError, m2.cast, 'I', [2 ** 10, 2 ** 10,
2 ** 5, 2 ** 3, 2 ** 1])
def test_memoryview_cast(self):
bytespec = ('B', lambda ex: list(ex.tobytes())), ('b', lambda ex: [
(x - 256 if x > 127 else x) for x in list(ex.tobytes())]), ('c',
lambda ex: [bytes(chr(x), 'latin-1') for x in list(ex.tobytes())])
def iter_roundtrip(ex, m, items, fmt):
srcsize = struct.calcsize(fmt)
for bytefmt, to_bytelist in bytespec:
m2 = m.cast(bytefmt)
lst = to_bytelist(ex)
self.verify(m2, obj=ex, itemsize=1, fmt=bytefmt, readonly=0,
ndim=1, shape=[31 * srcsize], strides=(1,), lst=lst,
cast=True)
m3 = m2.cast(fmt)
self.assertEqual(m3, ex)
lst = ex.tolist()
self.verify(m3, obj=ex, itemsize=srcsize, fmt=fmt, readonly
=0, ndim=1, shape=[31], strides=(srcsize,), lst=lst,
cast=True)
srcsize = struct.calcsize('I')
ex = ndarray(9, shape=[], format='I')
destitems, destshape = cast_items(ex, 'B', 1)
m = memoryview(ex)
m2 = m.cast('B')
self.verify(m2, obj=ex, itemsize=1, fmt='B', readonly=1, ndim=1,
shape=destshape, strides=(1,), lst=destitems, cast=True)
destsize = struct.calcsize('I')
ex = ndarray([9] * destsize, shape=[destsize], format='B')
destitems, destshape = cast_items(ex, 'I', destsize, shape=[])
m = memoryview(ex)
m2 = m.cast('I', shape=[])
self.verify(m2, obj=ex, itemsize=destsize, fmt='I', readonly=1,
ndim=0, shape=(), strides=(), lst=destitems, cast=True)
for fmt, items, _ in iter_format(31, 'array'):
ex = array.array(fmt, items)
m = memoryview(ex)
iter_roundtrip(ex, m, items, fmt)
for fmt, items, _ in iter_format(31, 'memoryview'):
ex = ndarray(items, shape=[31], format=fmt, flags=ND_WRITABLE)
m = memoryview(ex)
iter_roundtrip(ex, m, items, fmt)
def test_memoryview_cast_1D_ND(self):
for _tshape in gencastshapes():
for char in fmtdict['@']:
tfmt = ('', '@')[randrange(2)] + char
tsize = struct.calcsize(tfmt)
n = prod(_tshape) * tsize
obj = 'memoryview' if is_byte_format(tfmt) else 'bytefmt'
for fmt, items, _ in iter_format(n, obj):
size = struct.calcsize(fmt)
shape = [n] if n > 0 else []
tshape = _tshape + [size]
ex = ndarray(items, shape=shape, format=fmt)
m = memoryview(ex)
titems, tshape = cast_items(ex, tfmt, tsize, shape=tshape)
if titems is None:
self.assertRaises(TypeError, m.cast, tfmt, tshape)
continue
if titems == 'nan':
continue
nd = ndarray(titems, shape=tshape, format=tfmt)
m2 = m.cast(tfmt, shape=tshape)
ndim = len(tshape)
strides = nd.strides
lst = nd.tolist()
self.verify(m2, obj=ex, itemsize=tsize, fmt=tfmt,
readonly=1, ndim=ndim, shape=tshape, strides=
strides, lst=lst, cast=True)
m3 = m2.cast(fmt)
m4 = m2.cast(fmt, shape=shape)
ndim = len(shape)
strides = ex.strides
lst = ex.tolist()
self.verify(m3, obj=ex, itemsize=size, fmt=fmt,
readonly=1, ndim=ndim, shape=shape, strides=strides,
lst=lst, cast=True)
self.verify(m4, obj=ex, itemsize=size, fmt=fmt,
readonly=1, ndim=ndim, shape=shape, strides=strides,
lst=lst, cast=True)
if ctypes:
class BEPoint(ctypes.BigEndianStructure):
_fields_ = [('x', ctypes.c_long), ('y', ctypes.c_double)]
point = BEPoint(100, 200.1)
m1 = memoryview(point)
m2 = m1.cast('B')
self.assertEqual(m2.obj, point)
self.assertEqual(m2.itemsize, 1)
self.assertEqual(m2.readonly, 0)
self.assertEqual(m2.ndim, 1)
self.assertEqual(m2.shape, (m2.nbytes,))
self.assertEqual(m2.strides, (1,))
self.assertEqual(m2.suboffsets, ())
x = ctypes.c_double(1.2)
m1 = memoryview(x)
m2 = m1.cast('c')
self.assertEqual(m2.obj, x)
self.assertEqual(m2.itemsize, 1)
self.assertEqual(m2.readonly, 0)
self.assertEqual(m2.ndim, 1)
self.assertEqual(m2.shape, (m2.nbytes,))
self.assertEqual(m2.strides, (1,))
self.assertEqual(m2.suboffsets, ())
def test_memoryview_tolist(self):
a = array.array('h', list(range(-6, 6)))
m = memoryview(a)
self.assertEqual(m, a)
self.assertEqual(m.tolist(), a.tolist())
a = a[2::3]
m = m[2::3]
self.assertEqual(m, a)
self.assertEqual(m.tolist(), a.tolist())
ex = ndarray(list(range(2 * 3 * 5 * 7 * 11)), shape=[11, 2, 7, 3, 5
], format='L')
m = memoryview(ex)
self.assertEqual(m.tolist(), ex.tolist())
ex = ndarray([(2, 5), (7, 11)], shape=[2], format='lh')
m = memoryview(ex)
self.assertRaises(NotImplementedError, m.tolist)
ex = ndarray([b'12345'], shape=[1], format='s')
m = memoryview(ex)
self.assertRaises(NotImplementedError, m.tolist)
ex = ndarray([b'a', b'b', b'c', b'd', b'e', b'f'], shape=[2, 3],
format='s')
m = memoryview(ex)
self.assertRaises(NotImplementedError, m.tolist)
def test_memoryview_repr(self):
m = memoryview(bytearray(9))
r = m.__repr__()
self.assertTrue(r.startswith('<memory'))
m.release()
r = m.__repr__()
self.assertTrue(r.startswith('<released'))
def test_memoryview_sequence(self):
for fmt in ('d', 'f'):
inf = float(1e1000)
ex = array.array(fmt, [1.0, inf, 3.0])
m = memoryview(ex)
self.assertIn(1.0, m)
self.assertIn(1e1000, m)
self.assertIn(3.0, m)
ex = ndarray(9.0, [], format='f')
m = memoryview(ex)
self.assertRaises(TypeError, eval, '9.0 in m', locals())
@contextlib.contextmanager
def assert_out_of_bounds_error(self, dim):
with self.assertRaises(IndexError) as cm:
yield
self.assertEqual(str(cm.exception),
'index out of bounds on dimension %d' % (dim,))
def test_memoryview_index(self):
ex = ndarray(12.5, shape=[], format='d')
m = memoryview(ex)
self.assertEqual(m[()], 12.5)
self.assertEqual(m[...], m)
self.assertEqual(m[...], ex)
self.assertRaises(TypeError, m.__getitem__, 0)
ex = ndarray((1, 2, 3), shape=[], format='iii')
m = memoryview(ex)
self.assertRaises(NotImplementedError, m.__getitem__, ())
ex = ndarray(list(range(7)), shape=[7], flags=ND_WRITABLE)
m = memoryview(ex)
self.assertRaises(IndexError, m.__getitem__, 2 ** 64)
self.assertRaises(TypeError, m.__getitem__, 2.0)
self.assertRaises(TypeError, m.__getitem__, 0.0)
self.assertRaises(IndexError, m.__getitem__, -8)
self.assertRaises(IndexError, m.__getitem__, 8)
ex = ndarray(list(range(12)), shape=[3, 4], flags=ND_WRITABLE)
m = memoryview(ex)
self.assertEqual(m[0, 0], 0)
self.assertEqual(m[2, 0], 8)
self.assertEqual(m[2, 3], 11)
self.assertEqual(m[-1, -1], 11)
self.assertEqual(m[-3, -4], 0)
for index in (3, -4):
with self.assert_out_of_bounds_error(dim=1):
m[index, 0]
for index in (4, -5):
with self.assert_out_of_bounds_error(dim=2):
m[0, index]
self.assertRaises(IndexError, m.__getitem__, (2 ** 64, 0))
self.assertRaises(IndexError, m.__getitem__, (0, 2 ** 64))
self.assertRaises(TypeError, m.__getitem__, (0, 0, 0))
self.assertRaises(TypeError, m.__getitem__, (0.0, 0.0))
self.assertRaises(NotImplementedError, m.__getitem__, ())
self.assertRaises(NotImplementedError, m.__getitem__, 0)
def test_memoryview_assign(self):
ex = ndarray(12.5, shape=[], format='f', flags=ND_WRITABLE)
m = memoryview(ex)
m[()] = 22.5
self.assertEqual(m[()], 22.5)
m[...] = 23.5
self.assertEqual(m[()], 23.5)
self.assertRaises(TypeError, m.__setitem__, 0, 24.7)
ex = ndarray(list(range(7)), shape=[7])
m = memoryview(ex)
self.assertRaises(TypeError, m.__setitem__, 2, 10)
ex = ndarray(list(range(7)), shape=[7], flags=ND_WRITABLE)
m = memoryview(ex)
self.assertRaises(IndexError, m.__setitem__, 2 ** 64, 9)
self.assertRaises(TypeError, m.__setitem__, 2.0, 10)
self.assertRaises(TypeError, m.__setitem__, 0.0, 11)
self.assertRaises(IndexError, m.__setitem__, -8, 20)
self.assertRaises(IndexError, m.__setitem__, 8, 25)
for fmt in fmtdict['@']:
if fmt == 'c' or fmt == '?':
continue
ex = ndarray([1, 2, 3], shape=[3], format=fmt, flags=ND_WRITABLE)
m = memoryview(ex)
i = randrange(-3, 3)
m[i] = 8
self.assertEqual(m[i], 8)
self.assertEqual(m[i], ex[i])
ex = ndarray([b'1', b'2', b'3'], shape=[3], format='c', flags=
ND_WRITABLE)
m = memoryview(ex)
m[2] = b'9'
self.assertEqual(m[2], b'9')
ex = ndarray([True, False, True], shape=[3], format='?', flags=
ND_WRITABLE)
m = memoryview(ex)
m[1] = True
self.assertEqual(m[1], True)
nd = ndarray([b'x'], shape=[1], format='c', flags=ND_WRITABLE)
m = memoryview(nd)
self.assertRaises(TypeError, m.__setitem__, 0, 100)
ex = ndarray(list(range(120)), shape=[1, 2, 3, 4, 5], flags=ND_WRITABLE
)
m1 = memoryview(ex)
for fmt, _range in fmtdict['@'].items():
if fmt == '?':
continue
if fmt == 'c':
continue
m2 = m1.cast(fmt)
lo, hi = _range
if fmt == 'd' or fmt == 'f':
lo, hi = -2 ** 1024, 2 ** 1024
if fmt != 'P':
self.assertRaises(ValueError, m2.__setitem__, 0, lo - 1)
self.assertRaises(TypeError, m2.__setitem__, 0, 'xyz')
self.assertRaises(ValueError, m2.__setitem__, 0, hi)
m2 = m1.cast('c')
self.assertRaises(ValueError, m2.__setitem__, 0, b'\xff\xff')
ex = ndarray(list(range(1)), shape=[1], format='xL', flags=ND_WRITABLE)
m = memoryview(ex)
self.assertRaises(NotImplementedError, m.__setitem__, 0, 1)
ex = ndarray([b'12345'], shape=[1], format='s', flags=ND_WRITABLE)
m = memoryview(ex)
self.assertRaises(NotImplementedError, m.__setitem__, 0, 1)
ex = ndarray(list(range(12)), shape=[3, 4], flags=ND_WRITABLE)
m = memoryview(ex)
m[0, 1] = 42
self.assertEqual(ex[0][1], 42)
m[-1, -1] = 43
self.assertEqual(ex[2][3], 43)
for index in (3, -4):
with self.assert_out_of_bounds_error(dim=1):
m[index, 0] = 0
for index in (4, -5):
with self.assert_out_of_bounds_error(dim=2):
m[0, index] = 0
self.assertRaises(IndexError, m.__setitem__, (2 ** 64, 0), 0)
self.assertRaises(IndexError, m.__setitem__, (0, 2 ** 64), 0)
self.assertRaises(TypeError, m.__setitem__, (0, 0, 0), 0)
self.assertRaises(TypeError, m.__setitem__, (0.0, 0.0), 0)
self.assertRaises(NotImplementedError, m.__setitem__, 0, [2, 3])
def test_memoryview_slice(self):
ex = ndarray(list(range(12)), shape=[12], flags=ND_WRITABLE)
m = memoryview(ex)
self.assertRaises(ValueError, m.__getitem__, slice(0, 2, 0))
self.assertRaises(ValueError, m.__setitem__, slice(0, 2, 0),
bytearray([1, 2]))
self.assertRaises(NotImplementedError, m.__getitem__, ())
ex = ndarray(list(range(12)), shape=[12], flags=ND_WRITABLE)
m = memoryview(ex)
self.assertRaises(NotImplementedError, m.__getitem__, (slice(0, 2,
1), slice(0, 2, 1)))
self.assertRaises(NotImplementedError, m.__setitem__, (slice(0, 2,
1), slice(0, 2, 1)), bytearray([1, 2]))
self.assertRaises(TypeError, m.__getitem__, (slice(0, 2, 1), {}))
self.assertRaises(TypeError, m.__setitem__, (slice(0, 2, 1), {}),
bytearray([1, 2]))
self.assertRaises(TypeError, m.__setitem__, slice(0, 1, 1), [1])
for flags in (0, ND_PIL):
ex1 = ndarray(list(range(12)), shape=[12], strides=[-1], offset
=11, flags=ND_WRITABLE | flags)
ex2 = ndarray(list(range(24)), shape=[12], strides=[2], flags=flags
)
m1 = memoryview(ex1)
m2 = memoryview(ex2)
ex1[2:5] = ex1[2:5]
m1[2:5] = m2[2:5]
self.assertEqual(m1, ex1)
self.assertEqual(m2, ex2)
ex1[1:3][::-1] = ex2[0:2][::1]
m1[1:3][::-1] = m2[0:2][::1]
self.assertEqual(m1, ex1)
self.assertEqual(m2, ex2)
ex1[4:1:-2][::-1] = ex1[1:4:2][::1]
m1[4:1:-2][::-1] = m1[1:4:2][::1]
self.assertEqual(m1, ex1)
self.assertEqual(m2, ex2)
def test_memoryview_array(self):
def cmptest(testcase, a, b, m, singleitem):
for i, _ in enumerate(a):
ai = a[i]
mi = m[i]
testcase.assertEqual(ai, mi)
a[i] = singleitem
if singleitem != ai:
testcase.assertNotEqual(a, m)
testcase.assertNotEqual(a, b)
else:
testcase.assertEqual(a, m)
testcase.assertEqual(a, b)
m[i] = singleitem
testcase.assertEqual(a, m)
testcase.assertEqual(b, m)
a[i] = ai
m[i] = mi
for n in range(1, 5):
for fmt, items, singleitem in iter_format(n, 'array'):
for lslice in genslices(n):
for rslice in genslices(n):
a = array.array(fmt, items)
b = array.array(fmt, items)
m = memoryview(b)
self.assertEqual(m, a)
self.assertEqual(m.tolist(), a.tolist())
self.assertEqual(m.tobytes(), a.tobytes())
self.assertEqual(len(m), len(a))
cmptest(self, a, b, m, singleitem)
array_err = None
have_resize = None
try:
al = a[lslice]
ar = a[rslice]
a[lslice] = a[rslice]
have_resize = len(al) != len(ar)
except Exception as e:
array_err = e.__class__
m_err = None
try:
m[lslice] = m[rslice]
except Exception as e:
m_err = e.__class__
if have_resize:
self.assertIs(m_err, ValueError)
elif m_err or array_err:
self.assertIs(m_err, array_err)
else:
self.assertEqual(m, a)
self.assertEqual(m.tolist(), a.tolist())
self.assertEqual(m.tobytes(), a.tobytes())
cmptest(self, a, b, m, singleitem)
def test_memoryview_compare_special_cases(self):
a = array.array('L', [1, 2, 3])
b = array.array('L', [1, 2, 7])
v = memoryview(a)
w = memoryview(b)
for attr in ('__lt__', '__le__', '__gt__', '__ge__'):
self.assertIs(getattr(v, attr)(w), NotImplemented)
self.assertIs(getattr(a, attr)(v), NotImplemented)
v = memoryview(a)
v.release()
self.assertEqual(v, v)
self.assertNotEqual(v, a)
self.assertNotEqual(a, v)
v = memoryview(a)
w = memoryview(a)
w.release()
self.assertNotEqual(v, w)
self.assertNotEqual(w, v)
v = memoryview(a)
self.assertNotEqual(v, [1, 2, 3])
nd = ndarray([(0, 0)], shape=[1], format='l x d x', flags=ND_WRITABLE)
nd[0] = -1, float('nan')
self.assertNotEqual(memoryview(nd), nd)
a = array.array('u', 'xyz')
v = memoryview(a)
self.assertNotEqual(a, v)
self.assertNotEqual(v, a)
if ctypes:
class BEPoint(ctypes.BigEndianStructure):
_fields_ = [('x', ctypes.c_long), ('y', ctypes.c_long)]
point = BEPoint(100, 200)
a = memoryview(point)
b = memoryview(point)
self.assertNotEqual(a, b)
self.assertNotEqual(a, point)
self.assertNotEqual(point, a)
self.assertRaises(NotImplementedError, a.tolist)
def test_memoryview_compare_ndim_zero(self):
nd1 = ndarray(1729, shape=[], format='@L')
nd2 = ndarray(1729, shape=[], format='L', flags=ND_WRITABLE)
v = memoryview(nd1)
w = memoryview(nd2)
self.assertEqual(v, w)
self.assertEqual(w, v)
self.assertEqual(v, nd2)
self.assertEqual(nd2, v)
self.assertEqual(w, nd1)
self.assertEqual(nd1, w)
self.assertFalse(v.__ne__(w))
self.assertFalse(w.__ne__(v))
w[()] = 1728
self.assertNotEqual(v, w)
self.assertNotEqual(w, v)
self.assertNotEqual(v, nd2)
self.assertNotEqual(nd2, v)
self.assertNotEqual(w, nd1)
self.assertNotEqual(nd1, w)
self.assertFalse(v.__eq__(w))
self.assertFalse(w.__eq__(v))
nd = ndarray(list(range(12)), shape=[12], flags=ND_WRITABLE | ND_PIL)
ex = ndarray(list(range(12)), shape=[12], flags=ND_WRITABLE | ND_PIL)
m = memoryview(ex)
self.assertEqual(m, nd)
m[9] = 100
self.assertNotEqual(m, nd)
nd1 = ndarray((1729, 1.2, b'12345'), shape=[], format='Lf5s')
nd2 = ndarray((1729, 1.2, b'12345'), shape=[], format='hf5s', flags
=ND_WRITABLE)
v = memoryview(nd1)
w = memoryview(nd2)
self.assertEqual(v, w)
self.assertEqual(w, v)
self.assertEqual(v, nd2)
self.assertEqual(nd2, v)
self.assertEqual(w, nd1)
self.assertEqual(nd1, w)
nd1 = ndarray((1729, 1.2, b'12345'), shape=[], format='Lf5s')
nd2 = ndarray((-1729, 1.2, b'12345'), shape=[], format='hf5s',
flags=ND_WRITABLE)
v = memoryview(nd1)
w = memoryview(nd2)
self.assertNotEqual(v, w)
self.assertNotEqual(w, v)
self.assertNotEqual(v, nd2)
self.assertNotEqual(nd2, v)
self.assertNotEqual(w, nd1)
self.assertNotEqual(nd1, w)
self.assertEqual(v, nd1)
self.assertEqual(w, nd2)
def test_memoryview_compare_ndim_one(self):
nd1 = ndarray([-529, 576, -625, 676, -729], shape=[5], format='@h')
nd2 = ndarray([-529, 576, -625, 676, 729], shape=[5], format='@h')
v = memoryview(nd1)
w = memoryview(nd2)
self.assertEqual(v, nd1)
self.assertEqual(w, nd2)
self.assertNotEqual(v, nd2)
self.assertNotEqual(w, nd1)
self.assertNotEqual(v, w)
nd1 = ndarray([-529, 576, -625, 676, -729], shape=[5], format='<i')
nd2 = ndarray([-529, 576, -625, 676, 729], shape=[5], format='>h')
v = memoryview(nd1)
w = memoryview(nd2)
self.assertEqual(v, nd1)
self.assertEqual(w, nd2)
self.assertNotEqual(v, nd2)
self.assertNotEqual(w, nd1)
self.assertNotEqual(v, w)
nd1 = ndarray([-529, -625, -729], shape=[3], format='@h')
nd2 = ndarray([-529, 576, -625, 676, -729], shape=[5], format='@h')
v = memoryview(nd1)
w = memoryview(nd2)
self.assertEqual(v, nd2[::2])
self.assertEqual(w[::2], nd1)
self.assertEqual(v, w[::2])
self.assertEqual(v[::-1], w[::-2])
nd1 = ndarray([-529, -625, -729], shape=[3], format='!h')
nd2 = ndarray([-529, 576, -625, 676, -729], shape=[5], format='<l')
v = memoryview(nd1)
w = memoryview(nd2)
self.assertEqual(v, nd2[::2])
self.assertEqual(w[::2], nd1)
self.assertEqual(v, w[::2])
self.assertEqual(v[::-1], w[::-2])
nd1 = ndarray([-529, -625, -729], shape=[3], format='@h')
nd2 = ndarray([-529, 576, -625, 676, -729], shape=[5], format='@h',
flags=ND_PIL)
v = memoryview(nd1)
w = memoryview(nd2)
self.assertEqual(v, nd2[::2])
self.assertEqual(w[::2], nd1)
self.assertEqual(v, w[::2])
self.assertEqual(v[::-1], w[::-2])
nd1 = ndarray([-529, -625, -729], shape=[3], format='h 0c')
nd2 = ndarray([-529, 576, -625, 676, -729], shape=[5], format=
'> h', flags=ND_PIL)
v = memoryview(nd1)
w = memoryview(nd2)
self.assertEqual(v, nd2[::2])
self.assertEqual(w[::2], nd1)
self.assertEqual(v, w[::2])
self.assertEqual(v[::-1], w[::-2])
def test_memoryview_compare_zero_shape(self):
nd1 = ndarray([900, 961], shape=[0], format='@h')
nd2 = ndarray([-900, -961], shape=[0], format='@h')
v = memoryview(nd1)
w = memoryview(nd2)
self.assertEqual(v, nd1)
self.assertEqual(w, nd2)
self.assertEqual(v, nd2)
self.assertEqual(w, nd1)
self.assertEqual(v, w)
nd1 = ndarray([900, 961], shape=[0], format='= h0c')
nd2 = ndarray([-900, -961], shape=[0], format='@ i')
v = memoryview(nd1)
w = memoryview(nd2)
self.assertEqual(v, nd1)
self.assertEqual(w, nd2)
self.assertEqual(v, nd2)
self.assertEqual(w, nd1)
self.assertEqual(v, w)
def test_memoryview_compare_zero_strides(self):
nd1 = ndarray([900, 900, 900, 900], shape=[4], format='@L')
nd2 = ndarray([900], shape=[4], strides=[0], format='L')
v = memoryview(nd1)
w = memoryview(nd2)
self.assertEqual(v, nd1)
self.assertEqual(w, nd2)
self.assertEqual(v, nd2)
self.assertEqual(w, nd1)
self.assertEqual(v, w)
nd1 = ndarray([(900, 900)] * 4, shape=[4], format='@ Li')
nd2 = ndarray([(900, 900)], shape=[4], strides=[0], format='!L h')
v = memoryview(nd1)
w = memoryview(nd2)
self.assertEqual(v, nd1)
self.assertEqual(w, nd2)
self.assertEqual(v, nd2)
self.assertEqual(w, nd1)
self.assertEqual(v, w)
def test_memoryview_compare_random_formats(self):
n = 10
for char in fmtdict['@m']:
fmt, items, singleitem = randitems(n, 'memoryview', '@', char)
for flags in (0, ND_PIL):
nd = ndarray(items, shape=[n], format=fmt, flags=flags)
m = memoryview(nd)
self.assertEqual(m, nd)
nd = nd[::-3]
m = memoryview(nd)
self.assertEqual(m, nd)
n = 10
for _ in range(100):
fmt, items, singleitem = randitems(n)
for flags in (0, ND_PIL):
nd = ndarray(items, shape=[n], format=fmt, flags=flags)
m = memoryview(nd)
self.assertEqual(m, nd)
nd = nd[::-3]
m = memoryview(nd)
self.assertEqual(m, nd)
def test_memoryview_compare_multidim_c(self):
nd1 = ndarray(list(range(-15, 15)), shape=[3, 2, 5], format='@h')
nd2 = ndarray(list(range(0, 30)), shape=[3, 2, 5], format='@h')
v = memoryview(nd1)
w = memoryview(nd2)
self.assertEqual(v, nd1)
self.assertEqual(w, nd2)
self.assertNotEqual(v, nd2)
self.assertNotEqual(w, nd1)
self.assertNotEqual(v, w)
nd1 = ndarray([(0, 1, 2)] * 30, shape=[3, 2, 5], format='=f q xxL')
nd2 = ndarray([(-1.2, 1, 2)] * 30, shape=[3, 2, 5], format='< f 2Q')
v = memoryview(nd1)
w = memoryview(nd2)
self.assertEqual(v, nd1)
self.assertEqual(w, nd2)
self.assertNotEqual(v, nd2)
self.assertNotEqual(w, nd1)
self.assertNotEqual(v, w)
nd1 = ndarray(list(range(30)), shape=[2, 3, 5], format='L')
nd2 = ndarray(list(range(30)), shape=[3, 2, 5], format='L')
v = memoryview(nd1)
w = memoryview(nd2)
self.assertEqual(v, nd1)
self.assertEqual(w, nd2)
self.assertNotEqual(v, nd2)
self.assertNotEqual(w, nd1)
self.assertNotEqual(v, w)
nd1 = ndarray([(0, 1, 2)] * 21, shape=[3, 7], format='! b B xL')
nd2 = ndarray([(0, 1, 2)] * 21, shape=[7, 3], format='= Qx l xxL')
v = memoryview(nd1)
w = memoryview(nd2)
self.assertEqual(v, nd1)
self.assertEqual(w, nd2)
self.assertNotEqual(v, nd2)
self.assertNotEqual(w, nd1)
self.assertNotEqual(v, w)
nd1 = ndarray(list(range(30)), shape=[2, 3, 5], format='L')
nd2 = ndarray(list(range(30)), shape=[2, 3, 5], format='l')
v = memoryview(nd1)
w = memoryview(nd2)
self.assertEqual(v, nd1)
self.assertEqual(w, nd2)
self.assertEqual(v, nd2)
self.assertEqual(w, nd1)
self.assertEqual(v, w)
def test_memoryview_compare_multidim_fortran(self):
nd1 = ndarray(list(range(-15, 15)), shape=[5, 2, 3], format='@h',
flags=ND_FORTRAN)
nd2 = ndarray(list(range(0, 30)), shape=[5, 2, 3], format='@h',
flags=ND_FORTRAN)
v = memoryview(nd1)
w = memoryview(nd2)
self.assertEqual(v, nd1)
self.assertEqual(w, nd2)
self.assertNotEqual(v, nd2)
self.assertNotEqual(w, nd1)
self.assertNotEqual(v, w)
nd1 = ndarray([(2 ** 64 - 1, -1)] * 6, shape=[2, 3], format='=Qq',
flags=ND_FORTRAN)
nd2 = ndarray([(-1, 2 ** 64 - 1)] * 6, shape=[2, 3], format='=qQ',
flags=ND_FORTRAN)
v = memoryview(nd1)
w = memoryview(nd2)
self.assertEqual(v, nd1)
self.assertEqual(w, nd2)
self.assertNotEqual(v, nd2)
self.assertNotEqual(w, nd1)
self.assertNotEqual(v, w)
nd1 = ndarray(list(range(-15, 15)), shape=[2, 3, 5], format='l',
flags=ND_FORTRAN)
nd2 = ndarray(list(range(-15, 15)), shape=[3, 2, 5], format='l',
flags=ND_FORTRAN)
v = memoryview(nd1)
w = memoryview(nd2)
self.assertEqual(v, nd1)
self.assertEqual(w, nd2)
self.assertNotEqual(v, nd2)
self.assertNotEqual(w, nd1)
self.assertNotEqual(v, w)
nd1 = ndarray(list(range(-15, 15)), shape=[2, 3, 5], format='0ll',
flags=ND_FORTRAN)
nd2 = ndarray(list(range(-15, 15)), shape=[3, 2, 5], format='l',
flags=ND_FORTRAN)
v = memoryview(nd1)
w = memoryview(nd2)
self.assertEqual(v, nd1)
self.assertEqual(w, nd2)
self.assertNotEqual(v, nd2)
self.assertNotEqual(w, nd1)
self.assertNotEqual(v, w)
nd1 = ndarray(list(range(30)), shape=[5, 2, 3], format='@h', flags=
ND_FORTRAN)
nd2 = ndarray(list(range(30)), shape=[5, 2, 3], format='@b', flags=
ND_FORTRAN)
v = memoryview(nd1)
w = memoryview(nd2)
self.assertEqual(v, nd1)
self.assertEqual(w, nd2)
self.assertEqual(v, nd2)
self.assertEqual(w, nd1)
self.assertEqual(v, w)
def test_memoryview_compare_multidim_mixed(self):
lst1 = list(range(-15, 15))
lst2 = transpose(lst1, [3, 2, 5])
nd1 = ndarray(lst1, shape=[3, 2, 5], format='@l')
nd2 = ndarray(lst2, shape=[3, 2, 5], format='l', flags=ND_FORTRAN)
v = memoryview(nd1)
w = memoryview(nd2)
self.assertEqual(v, nd1)
self.assertEqual(w, nd2)
self.assertEqual(v, w)
lst1 = [(-3.3, -22, b'x')] * 30
lst1[5] = -2.2, -22, b'x'
lst2 = transpose(lst1, [3, 2, 5])
nd1 = ndarray(lst1, shape=[3, 2, 5], format='d b c')
nd2 = ndarray(lst2, shape=[3, 2, 5], format='d h c', flags=ND_FORTRAN)
v = memoryview(nd1)
w = memoryview(nd2)
self.assertEqual(v, nd1)
self.assertEqual(w, nd2)
self.assertEqual(v, w)
ex1 = ndarray(list(range(40)), shape=[5, 8], format='@I')
nd1 = ex1[3:1:-1, ::-2]
ex2 = ndarray(list(range(40)), shape=[5, 8], format='I')
nd2 = ex2[1:3:1, ::-2]
v = memoryview(nd1)
w = memoryview(nd2)
self.assertEqual(v, nd1)
self.assertEqual(w, nd2)
self.assertNotEqual(v, nd2)
self.assertNotEqual(w, nd1)
self.assertNotEqual(v, w)
ex1 = ndarray([(2 ** 31 - 1, -2 ** 31)] * 22, shape=[11, 2], format
='=ii')
nd1 = ex1[3:1:-1, ::-2]
ex2 = ndarray([(2 ** 31 - 1, -2 ** 31)] * 22, shape=[11, 2], format
='>ii')
nd2 = ex2[1:3:1, ::-2]
v = memoryview(nd1)
w = memoryview(nd2)
self.assertEqual(v, nd1)
self.assertEqual(w, nd2)
self.assertEqual(v, nd2)
self.assertEqual(w, nd1)
self.assertEqual(v, w)
ex1 = ndarray(list(range(30)), shape=[2, 3, 5], format='b')
nd1 = ex1[1:3, ::-2]
nd2 = ndarray(list(range(30)), shape=[3, 2, 5], format='b')
nd2 = ex2[1:3, ::-2]
v = memoryview(nd1)
w = memoryview(nd2)
self.assertEqual(v, nd1)
self.assertEqual(w, nd2)
self.assertNotEqual(v, nd2)
self.assertNotEqual(w, nd1)
self.assertNotEqual(v, w)
ex1 = ndarray(list(range(30)), shape=[2, 3, 5], format='B')
nd1 = ex1[1:3, ::-2]
nd2 = ndarray(list(range(30)), shape=[3, 2, 5], format='b')
nd2 = ex2[1:3, ::-2]
v = memoryview(nd1)
w = memoryview(nd2)
self.assertEqual(v, nd1)
self.assertEqual(w, nd2)
self.assertNotEqual(v, nd2)
self.assertNotEqual(w, nd1)
self.assertNotEqual(v, w)
ex1 = ndarray([(2, b'123')] * 30, shape=[5, 3, 2], format='b3s')
nd1 = ex1[1:3, ::-2]
nd2 = ndarray([(2, b'123')] * 30, shape=[5, 3, 2], format='i3s')
nd2 = ex2[1:3, ::-2]
v = memoryview(nd1)
w = memoryview(nd2)
self.assertEqual(v, nd1)
self.assertEqual(w, nd2)
self.assertNotEqual(v, nd2)
self.assertNotEqual(w, nd1)
self.assertNotEqual(v, w)
def test_memoryview_compare_multidim_zero_shape(self):
nd1 = ndarray(list(range(30)), shape=[0, 3, 2], format='i')
nd2 = ndarray(list(range(30)), shape=[5, 0, 2], format='@i')
v = memoryview(nd1)
w = memoryview(nd2)
self.assertEqual(v, nd1)
self.assertEqual(w, nd2)
self.assertNotEqual(v, nd2)
self.assertNotEqual(w, nd1)
self.assertNotEqual(v, w)
nd1 = ndarray(list(range(30)), shape=[0, 3, 2], format='i')
nd2 = ndarray(list(range(30)), shape=[5, 0, 2], format='@i')
v = memoryview(nd1)
w = memoryview(nd2)
self.assertEqual(v, nd1)
self.assertEqual(w, nd2)
self.assertNotEqual(v, nd2)
self.assertNotEqual(w, nd1)
self.assertNotEqual(v, w)
def test_memoryview_compare_multidim_zero_strides(self):
nd1 = ndarray([900] * 80, shape=[4, 5, 4], format='@L')
nd2 = ndarray([900], shape=[4, 5, 4], strides=[0, 0, 0], format='L')
v = memoryview(nd1)
w = memoryview(nd2)
self.assertEqual(v, nd1)
self.assertEqual(w, nd2)
self.assertEqual(v, nd2)
self.assertEqual(w, nd1)
self.assertEqual(v, w)
self.assertEqual(v.tolist(), w.tolist())
nd1 = ndarray([(1, 2)] * 10, shape=[2, 5], format='=lQ')
nd2 = ndarray([(1, 2)], shape=[2, 5], strides=[0, 0], format='<lQ')
v = memoryview(nd1)
w = memoryview(nd2)
self.assertEqual(v, nd1)
self.assertEqual(w, nd2)
self.assertEqual(v, nd2)
self.assertEqual(w, nd1)
self.assertEqual(v, w)
def test_memoryview_compare_multidim_suboffsets(self):
ex1 = ndarray(list(range(40)), shape=[5, 8], format='@I')
nd1 = ex1[3:1:-1, ::-2]
ex2 = ndarray(list(range(40)), shape=[5, 8], format='I', flags=ND_PIL)
nd2 = ex2[1:3:1, ::-2]
v = memoryview(nd1)
w = memoryview(nd2)
self.assertEqual(v, nd1)
self.assertEqual(w, nd2)
self.assertNotEqual(v, nd2)
self.assertNotEqual(w, nd1)
self.assertNotEqual(v, w)
ex1 = ndarray([(2 ** 64 - 1, -1)] * 40, shape=[5, 8], format='=Qq',
flags=ND_WRITABLE)
ex1[2][7] = 1, -2
nd1 = ex1[3:1:-1, ::-2]
ex2 = ndarray([(2 ** 64 - 1, -1)] * 40, shape=[5, 8], format='>Qq',
flags=ND_PIL | ND_WRITABLE)
ex2[2][7] = 1, -2
nd2 = ex2[1:3:1, ::-2]
v = memoryview(nd1)
w = memoryview(nd2)
self.assertEqual(v, nd1)
self.assertEqual(w, nd2)
self.assertEqual(v, nd2)
self.assertEqual(w, nd1)
self.assertEqual(v, w)
ex1 = ndarray(list(range(30)), shape=[2, 3, 5], format='b', flags=
ND_PIL)
nd1 = ex1[1:3, ::-2]
nd2 = ndarray(list(range(30)), shape=[3, 2, 5], format='b')
nd2 = ex2[1:3, ::-2]
v = memoryview(nd1)
w = memoryview(nd2)
self.assertEqual(v, nd1)
self.assertEqual(w, nd2)
self.assertNotEqual(v, nd2)
self.assertNotEqual(w, nd1)
self.assertNotEqual(v, w)
ex1 = ndarray([(2 ** 8 - 1, -1)] * 40, shape=[2, 3, 5], format='Bb',
flags=ND_PIL | ND_WRITABLE)
nd1 = ex1[1:2, ::-2]
ex2 = ndarray([(2 ** 8 - 1, -1)] * 40, shape=[3, 2, 5], format='Bb')
nd2 = ex2[1:2, ::-2]
v = memoryview(nd1)
w = memoryview(nd2)
self.assertEqual(v, nd1)
self.assertEqual(w, nd2)
self.assertNotEqual(v, nd2)
self.assertNotEqual(w, nd1)
self.assertNotEqual(v, w)
ex1 = ndarray(list(range(30)), shape=[5, 3, 2], format='i', flags=
ND_PIL)
nd1 = ex1[1:3, ::-2]
ex2 = ndarray(list(range(30)), shape=[5, 3, 2], format='@I', flags=
ND_PIL)
nd2 = ex2[1:3, ::-2]
v = memoryview(nd1)
w = memoryview(nd2)
self.assertEqual(v, nd1)
self.assertEqual(w, nd2)
self.assertEqual(v, nd2)
self.assertEqual(w, nd1)
self.assertEqual(v, w)
ex1 = ndarray([(b'hello', b'', 1)] * 27, shape=[3, 3, 3], format=
'5s0sP', flags=ND_PIL | ND_WRITABLE)
ex1[1][2][2] = b'sushi', b'', 1
nd1 = ex1[1:3, ::-2]
ex2 = ndarray([(b'hello', b'', 1)] * 27, shape=[3, 3, 3], format=
'5s0sP', flags=ND_PIL | ND_WRITABLE)
ex1[1][2][2] = b'sushi', b'', 1
nd2 = ex2[1:3, ::-2]
v = memoryview(nd1)
w = memoryview(nd2)
self.assertEqual(v, nd1)
self.assertEqual(w, nd2)
self.assertNotEqual(v, nd2)
self.assertNotEqual(w, nd1)
self.assertNotEqual(v, w)
lst1 = list(range(-15, 15))
lst2 = transpose(lst1, [3, 2, 5])
nd1 = ndarray(lst1, shape=[3, 2, 5], format='@l', flags=ND_PIL)
nd2 = ndarray(lst2, shape=[3, 2, 5], format='l', flags=ND_FORTRAN |
ND_PIL)
v = memoryview(nd1)
w = memoryview(nd2)
self.assertEqual(v, nd1)
self.assertEqual(w, nd2)
self.assertEqual(v, w)
lst1 = [(b'sashimi', b'sliced', 20.05)] * 30
lst1[11] = b'ramen', b'spicy', 9.45
lst2 = transpose(lst1, [3, 2, 5])
nd1 = ndarray(lst1, shape=[3, 2, 5], format='< 10p 9p d', flags=ND_PIL)
nd2 = ndarray(lst2, shape=[3, 2, 5], format='> 10p 9p d', flags=
ND_FORTRAN | ND_PIL)
v = memoryview(nd1)
w = memoryview(nd2)
self.assertEqual(v, nd1)
self.assertEqual(w, nd2)
self.assertEqual(v, w)
def test_memoryview_compare_not_equal(self):
for byteorder in ['=', '<', '>', '!']:
x = ndarray([2 ** 63] * 120, shape=[3, 5, 2, 2, 2], format=
byteorder + 'Q')
y = ndarray([2 ** 63] * 120, shape=[3, 5, 2, 2, 2], format=
byteorder + 'Q', flags=ND_WRITABLE | ND_FORTRAN)
y[2][3][1][1][1] = 1
a = memoryview(x)
b = memoryview(y)
self.assertEqual(a, x)
self.assertEqual(b, y)
self.assertNotEqual(a, b)
self.assertNotEqual(a, y)
self.assertNotEqual(b, x)
x = ndarray([(2 ** 63, 2 ** 31, 2 ** 15)] * 120, shape=[3, 5, 2,
2, 2], format=byteorder + 'QLH')
y = ndarray([(2 ** 63, 2 ** 31, 2 ** 15)] * 120, shape=[3, 5, 2,
2, 2], format=byteorder + 'QLH', flags=ND_WRITABLE | ND_FORTRAN
)
y[2][3][1][1][1] = 1, 1, 1
a = memoryview(x)
b = memoryview(y)
self.assertEqual(a, x)
self.assertEqual(b, y)
self.assertNotEqual(a, b)
self.assertNotEqual(a, y)
self.assertNotEqual(b, x)
def test_memoryview_check_released(self):
a = array.array('d', [1.1, 2.2, 3.3])
m = memoryview(a)
m.release()
self.assertRaises(ValueError, memoryview, m)
self.assertRaises(ValueError, m.cast, 'c')
self.assertRaises(ValueError, ndarray, m)
self.assertRaises(ValueError, m.tolist)
self.assertRaises(ValueError, m.tobytes)
self.assertRaises(ValueError, eval, '1.0 in m', locals())
self.assertRaises(ValueError, m.__getitem__, 0)
self.assertRaises(ValueError, m.__setitem__, 0, 1)
for attr in ('obj', 'nbytes', 'readonly', 'itemsize', 'format',
'ndim', 'shape', 'strides', 'suboffsets', 'c_contiguous',
'f_contiguous', 'contiguous'):
self.assertRaises(ValueError, m.__getattribute__, attr)
b = array.array('d', [1.1, 2.2, 3.3])
m1 = memoryview(a)
m2 = memoryview(b)
self.assertEqual(m1, m2)
m1.release()
self.assertNotEqual(m1, m2)
self.assertNotEqual(m1, a)
self.assertEqual(m1, m1)
def test_memoryview_tobytes(self):
t = -529, 576, -625, 676, -729
nd = ndarray(t, shape=[5], format='@h')
m = memoryview(nd)
self.assertEqual(m, nd)
self.assertEqual(m.tobytes(), nd.tobytes())
nd = ndarray([t], shape=[1], format='>hQiLl')
m = memoryview(nd)
self.assertEqual(m, nd)
self.assertEqual(m.tobytes(), nd.tobytes())
nd = ndarray([t for _ in range(12)], shape=[2, 2, 3], format='=hQiLl')
m = memoryview(nd)
self.assertEqual(m, nd)
self.assertEqual(m.tobytes(), nd.tobytes())
nd = ndarray([t for _ in range(120)], shape=[5, 2, 2, 3, 2], format
='<hQiLl')
m = memoryview(nd)
self.assertEqual(m, nd)
self.assertEqual(m.tobytes(), nd.tobytes())
if ctypes:
class BEPoint(ctypes.BigEndianStructure):
_fields_ = [('x', ctypes.c_long), ('y', ctypes.c_long)]
point = BEPoint(100, 200)
a = memoryview(point)
self.assertEqual(a.tobytes(), bytes(point))
def test_memoryview_get_contiguous(self):
self.assertRaises(TypeError, get_contiguous, {}, PyBUF_READ, 'F')
self.assertRaises(BufferError, get_contiguous, b'x', PyBUF_WRITE, 'C')
nd = ndarray([1, 2, 3], shape=[2], strides=[2])
self.assertRaises(BufferError, get_contiguous, nd, PyBUF_WRITE, 'A')
nd = ndarray(9, shape=(), format='L')
for order in ['C', 'F', 'A']:
m = get_contiguous(nd, PyBUF_READ, order)
self.assertEqual(m, nd)
self.assertEqual(m[()], 9)
nd = ndarray(9, shape=(), format='L', flags=ND_WRITABLE)
for order in ['C', 'F', 'A']:
m = get_contiguous(nd, PyBUF_READ, order)
self.assertEqual(m, nd)
self.assertEqual(m[()], 9)
for order in ['C', 'F', 'A']:
nd[()] = 9
m = get_contiguous(nd, PyBUF_WRITE, order)
self.assertEqual(m, nd)
self.assertEqual(m[()], 9)
m[()] = 10
self.assertEqual(m[()], 10)
self.assertEqual(nd[()], 10)
nd = ndarray([1], shape=[0], format='L', flags=ND_WRITABLE)
for order in ['C', 'F', 'A']:
m = get_contiguous(nd, PyBUF_READ, order)
self.assertRaises(IndexError, m.__getitem__, 0)
self.assertEqual(m, nd)
self.assertEqual(m.tolist(), [])
nd = ndarray(list(range(8)), shape=[2, 0, 7], format='L', flags=
ND_WRITABLE)
for order in ['C', 'F', 'A']:
m = get_contiguous(nd, PyBUF_READ, order)
self.assertEqual(ndarray(m).tolist(), [[], []])
nd = ndarray([1], shape=[1], format='h', flags=ND_WRITABLE)
for order in ['C', 'F', 'A']:
m = get_contiguous(nd, PyBUF_WRITE, order)
self.assertEqual(m, nd)
self.assertEqual(m.tolist(), nd.tolist())
nd = ndarray([1, 2, 3], shape=[3], format='b', flags=ND_WRITABLE)
for order in ['C', 'F', 'A']:
m = get_contiguous(nd, PyBUF_WRITE, order)
self.assertEqual(m, nd)
self.assertEqual(m.tolist(), nd.tolist())
nd = ndarray([1, 2, 3], shape=[2], strides=[2], flags=ND_WRITABLE)
for order in ['C', 'F', 'A']:
m = get_contiguous(nd, PyBUF_READ, order)
self.assertEqual(m, nd)
self.assertEqual(m.tolist(), nd.tolist())
self.assertRaises(TypeError, m.__setitem__, 1, 20)
self.assertEqual(m[1], 3)
self.assertEqual(nd[1], 3)
nd = nd[::-1]
for order in ['C', 'F', 'A']:
m = get_contiguous(nd, PyBUF_READ, order)
self.assertEqual(m, nd)
self.assertEqual(m.tolist(), nd.tolist())
self.assertRaises(TypeError, m.__setitem__, 1, 20)
self.assertEqual(m[1], 1)
self.assertEqual(nd[1], 1)
nd = ndarray(list(range(12)), shape=[3, 4], flags=ND_WRITABLE)
for order in ['C', 'A']:
m = get_contiguous(nd, PyBUF_WRITE, order)
self.assertEqual(ndarray(m).tolist(), nd.tolist())
self.assertRaises(BufferError, get_contiguous, nd, PyBUF_WRITE, 'F')
m = get_contiguous(nd, PyBUF_READ, order)
self.assertEqual(ndarray(m).tolist(), nd.tolist())
nd = ndarray(list(range(12)), shape=[3, 4], flags=ND_WRITABLE |
ND_FORTRAN)
for order in ['F', 'A']:
m = get_contiguous(nd, PyBUF_WRITE, order)
self.assertEqual(ndarray(m).tolist(), nd.tolist())
self.assertRaises(BufferError, get_contiguous, nd, PyBUF_WRITE, 'C')
m = get_contiguous(nd, PyBUF_READ, order)
self.assertEqual(ndarray(m).tolist(), nd.tolist())
nd = ndarray(list(range(12)), shape=[3, 4], flags=ND_WRITABLE | ND_PIL)
for order in ['C', 'F', 'A']:
self.assertRaises(BufferError, get_contiguous, nd, PyBUF_WRITE,
order)
m = get_contiguous(nd, PyBUF_READ, order)
self.assertEqual(ndarray(m).tolist(), nd.tolist())
nd = ndarray([1, 2, 3, 4, 5], shape=[3], strides=[2])
m = get_contiguous(nd, PyBUF_READ, 'C')
self.assertTrue(m.c_contiguous)
def test_memoryview_serializing(self):
size = struct.calcsize('i')
a = array.array('i', [1, 2, 3, 4, 5])
m = memoryview(a)
buf = io.BytesIO(m)
b = bytearray(5 * size)
buf.readinto(b)
self.assertEqual(m.tobytes(), b)
size = struct.calcsize('L')
nd = ndarray(list(range(12)), shape=[2, 3, 2], format='L')
m = memoryview(nd)
buf = io.BytesIO(m)
b = bytearray(2 * 3 * 2 * size)
buf.readinto(b)
self.assertEqual(m.tobytes(), b)
def test_memoryview_hash(self):
b = bytes(list(range(12)))
m = memoryview(b)
self.assertEqual(hash(b), hash(m))
mc = m.cast('c', shape=[3, 4])
self.assertEqual(hash(mc), hash(b))
mx = m[::-2]
b = bytes(list(range(12))[::-2])
self.assertEqual(hash(mx), hash(b))
nd = ndarray(list(range(30)), shape=[3, 2, 5], flags=ND_FORTRAN)
m = memoryview(nd)
self.assertEqual(hash(m), hash(nd))
nd = ndarray(list(range(30)), shape=[3, 2, 5])
x = nd[::2, :, ::-1]
m = memoryview(x)
self.assertEqual(hash(m), hash(x))
nd = ndarray(list(range(30)), shape=[2, 5, 3], flags=ND_PIL)
x = nd[::2, :, ::-1]
m = memoryview(x)
self.assertEqual(hash(m), hash(x))
x = ndarray(list(range(12)), shape=[12], format='B')
a = memoryview(x)
y = ndarray(list(range(12)), shape=[12], format='b')
b = memoryview(y)
self.assertEqual(a, b)
self.assertEqual(hash(a), hash(b))
nd = ndarray(list(range(12)), shape=[2, 2, 3], format='L')
m = memoryview(nd)
self.assertRaises(ValueError, m.__hash__)
nd = ndarray(list(range(-6, 6)), shape=[2, 2, 3], format='h')
m = memoryview(nd)
self.assertRaises(ValueError, m.__hash__)
nd = ndarray(list(range(12)), shape=[2, 2, 3], format='= L')
m = memoryview(nd)
self.assertRaises(ValueError, m.__hash__)
nd = ndarray(list(range(-6, 6)), shape=[2, 2, 3], format='< h')
m = memoryview(nd)
self.assertRaises(ValueError, m.__hash__)
def test_memoryview_release(self):
a = bytearray([1, 2, 3])
m = memoryview(a)
nd = ndarray(m)
self.assertRaises(BufferError, m.release)
del nd
m.release()
a = bytearray([1, 2, 3])
m = memoryview(a)
nd1 = ndarray(m, getbuf=PyBUF_FULL_RO, flags=ND_REDIRECT)
nd2 = ndarray(nd1, getbuf=PyBUF_FULL_RO, flags=ND_REDIRECT)
self.assertIs(nd2.obj, m)
self.assertRaises(BufferError, m.release)
del nd1, nd2
m.release()
a = bytearray([1, 2, 3])
m1 = memoryview(a)
m2 = memoryview(m1)
nd = ndarray(m2)
m1.release()
self.assertRaises(BufferError, m2.release)
del nd
m2.release()
a = bytearray([1, 2, 3])
m1 = memoryview(a)
m2 = memoryview(m1)
nd1 = ndarray(m2, getbuf=PyBUF_FULL_RO, flags=ND_REDIRECT)
nd2 = ndarray(nd1, getbuf=PyBUF_FULL_RO, flags=ND_REDIRECT)
self.assertIs(nd2.obj, m2)
m1.release()
self.assertRaises(BufferError, m2.release)
del nd1, nd2
m2.release()
nd = ndarray([1, 2, 3], shape=[3], flags=ND_VAREXPORT)
m1 = memoryview(nd)
nd.push([4, 5, 6, 7, 8], shape=[5])
m2 = memoryview(nd)
x = memoryview(m1)
self.assertEqual(x.tolist(), m1.tolist())
y = memoryview(m2)
self.assertEqual(y.tolist(), m2.tolist())
self.assertEqual(y.tolist(), nd.tolist())
m2.release()
y.release()
nd.pop()
self.assertEqual(x.tolist(), nd.tolist())
del nd
m1.release()
x.release()
def catch22(b):
with memoryview(b) as m2:
pass
x = bytearray(b'123')
with memoryview(x) as m1:
catch22(m1)
self.assertEqual(m1[0], ord(b'1'))
x = ndarray(list(range(12)), shape=[2, 2, 3], format='l')
y = ndarray(x, getbuf=PyBUF_FULL_RO, flags=ND_REDIRECT)
z = ndarray(y, getbuf=PyBUF_FULL_RO, flags=ND_REDIRECT)
self.assertIs(z.obj, x)
with memoryview(z) as m:
catch22(m)
self.assertEqual(m[0:1].tolist(), [[[0, 1, 2], [3, 4, 5]]])
for flags in (0, ND_REDIRECT):
x = bytearray(b'123')
with memoryview(x) as m1:
del x
y = ndarray(m1, getbuf=PyBUF_FULL_RO, flags=flags)
with memoryview(y) as m2:
del y
z = ndarray(m2, getbuf=PyBUF_FULL_RO, flags=flags)
with memoryview(z) as m3:
del z
catch22(m3)
catch22(m2)
catch22(m1)
self.assertEqual(m1[0], ord(b'1'))
self.assertEqual(m2[1], ord(b'2'))
self.assertEqual(m3[2], ord(b'3'))
del m3
del m2
del m1
x = bytearray(b'123')
with memoryview(x) as m1:
del x
y = ndarray(m1, getbuf=PyBUF_FULL_RO, flags=flags)
with memoryview(y) as m2:
del y
z = ndarray(m2, getbuf=PyBUF_FULL_RO, flags=flags)
with memoryview(z) as m3:
del z
catch22(m1)
catch22(m2)
catch22(m3)
self.assertEqual(m1[0], ord(b'1'))
self.assertEqual(m2[1], ord(b'2'))
self.assertEqual(m3[2], ord(b'3'))
del m1, m2, m3
x = bytearray(b'123')
with self.assertRaises(BufferError):
with memoryview(x) as m:
ex = ndarray(m)
m[0] == ord(b'1')
def test_memoryview_redirect(self):
nd = ndarray([(1.0 * x) for x in range(12)], shape=[12], format='d')
a = array.array('d', [(1.0 * x) for x in range(12)])
for x in (nd, a):
y = ndarray(x, getbuf=PyBUF_FULL_RO, flags=ND_REDIRECT)
z = ndarray(y, getbuf=PyBUF_FULL_RO, flags=ND_REDIRECT)
m = memoryview(z)
self.assertIs(y.obj, x)
self.assertIs(z.obj, x)
self.assertIs(m.obj, x)
self.assertEqual(m, x)
self.assertEqual(m, y)
self.assertEqual(m, z)
self.assertEqual(m[1:3], x[1:3])
self.assertEqual(m[1:3], y[1:3])
self.assertEqual(m[1:3], z[1:3])
del y, z
self.assertEqual(m[1:3], x[1:3])
def test_memoryview_from_static_exporter(self):
fmt = 'B'
lst = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
self.assertRaises(TypeError, staticarray, 1, 2, 3)
x = staticarray()
y = memoryview(x)
self.verify(y, obj=x, itemsize=1, fmt=fmt, readonly=1, ndim=1,
shape=[12], strides=[1], lst=lst)
for i in range(12):
self.assertEqual(y[i], i)
del x
del y
x = staticarray()
y = memoryview(x)
del y
del x
x = staticarray()
y = ndarray(x, getbuf=PyBUF_FULL_RO)
z = ndarray(y, getbuf=PyBUF_FULL_RO)
m = memoryview(z)
self.assertIs(y.obj, x)
self.assertIs(m.obj, z)
self.verify(m, obj=z, itemsize=1, fmt=fmt, readonly=1, ndim=1,
shape=[12], strides=[1], lst=lst)
del x, y, z, m
x = staticarray()
y = ndarray(x, getbuf=PyBUF_FULL_RO, flags=ND_REDIRECT)
z = ndarray(y, getbuf=PyBUF_FULL_RO, flags=ND_REDIRECT)
m = memoryview(z)
self.assertIs(y.obj, x)
self.assertIs(z.obj, x)
self.assertIs(m.obj, x)
self.verify(m, obj=x, itemsize=1, fmt=fmt, readonly=1, ndim=1,
shape=[12], strides=[1], lst=lst)
del x, y, z, m
x = staticarray(legacy_mode=True)
y = memoryview(x)
self.verify(y, obj=None, itemsize=1, fmt=fmt, readonly=1, ndim=1,
shape=[12], strides=[1], lst=lst)
for i in range(12):
self.assertEqual(y[i], i)
del x
del y
x = staticarray(legacy_mode=True)
y = memoryview(x)
del y
del x
x = staticarray(legacy_mode=True)
y = ndarray(x, getbuf=PyBUF_FULL_RO)
z = ndarray(y, getbuf=PyBUF_FULL_RO)
m = memoryview(z)
self.assertIs(y.obj, None)
self.assertIs(m.obj, z)
self.verify(m, obj=z, itemsize=1, fmt=fmt, readonly=1, ndim=1,
shape=[12], strides=[1], lst=lst)
del x, y, z, m
x = staticarray(legacy_mode=True)
y = ndarray(x, getbuf=PyBUF_FULL_RO, flags=ND_REDIRECT)
z = ndarray(y, getbuf=PyBUF_FULL_RO, flags=ND_REDIRECT)
m = memoryview(z)
self.assertIs(y.obj, None)
self.assertIs(z.obj, y)
self.assertIs(m.obj, y)
self.verify(m, obj=y, itemsize=1, fmt=fmt, readonly=1, ndim=1,
shape=[12], strides=[1], lst=lst)
del x, y, z, m
def test_memoryview_getbuffer_undefined(self):
nd = ndarray([1, 2, 3], [3], flags=ND_GETBUF_FAIL | ND_GETBUF_UNDEFINED
)
self.assertRaises(BufferError, memoryview, nd)
def test_issue_7385(self):
x = ndarray([1, 2, 3], shape=[3], flags=ND_GETBUF_FAIL)
self.assertRaises(BufferError, memoryview, x)
if __name__ == '__main__':
unittest.main()
| 141,004 | 51,016 |
def setNextPin(self,source):
if self.pinA == None:
self.pinA = source
else:
if self.pinB == None:
self.pinB = source
else:
raise RuntimeError("Error: NO EMPTY PINS")
| 253 | 73 |
# ----------------------------------------------------------------------
# Copyright (c) 2014 Rafael Gonzalez.
#
# See the LICENSE file for details
# ----------------------------------------------------------------------
#--------------------
# System wide imports
# -------------------
from __future__ import division, absolute_import
import sys
# ---------------
# Twisted imports
# ---------------
#--------------
# local imports
# -------------
from ._version import get_versions
# ----------------
# Module constants
# ----------------
PY2 = sys.version_info[0] == 2
# -----------------------
# Module global variables
# -----------------------
__version__ = get_versions()['version']
del get_versions
| 721 | 187 |
from getratings.models.ratings import Ratings
class NA_Jinx_Mid_Aatrox(Ratings):
pass
class NA_Jinx_Mid_Ahri(Ratings):
pass
class NA_Jinx_Mid_Akali(Ratings):
pass
class NA_Jinx_Mid_Alistar(Ratings):
pass
class NA_Jinx_Mid_Amumu(Ratings):
pass
class NA_Jinx_Mid_Anivia(Ratings):
pass
class NA_Jinx_Mid_Annie(Ratings):
pass
class NA_Jinx_Mid_Ashe(Ratings):
pass
class NA_Jinx_Mid_AurelionSol(Ratings):
pass
class NA_Jinx_Mid_Azir(Ratings):
pass
class NA_Jinx_Mid_Bard(Ratings):
pass
class NA_Jinx_Mid_Blitzcrank(Ratings):
pass
class NA_Jinx_Mid_Brand(Ratings):
pass
class NA_Jinx_Mid_Braum(Ratings):
pass
class NA_Jinx_Mid_Caitlyn(Ratings):
pass
class NA_Jinx_Mid_Camille(Ratings):
pass
class NA_Jinx_Mid_Cassiopeia(Ratings):
pass
class NA_Jinx_Mid_Chogath(Ratings):
pass
class NA_Jinx_Mid_Corki(Ratings):
pass
class NA_Jinx_Mid_Darius(Ratings):
pass
class NA_Jinx_Mid_Diana(Ratings):
pass
class NA_Jinx_Mid_Draven(Ratings):
pass
class NA_Jinx_Mid_DrMundo(Ratings):
pass
class NA_Jinx_Mid_Ekko(Ratings):
pass
class NA_Jinx_Mid_Elise(Ratings):
pass
class NA_Jinx_Mid_Evelynn(Ratings):
pass
class NA_Jinx_Mid_Ezreal(Ratings):
pass
class NA_Jinx_Mid_Fiddlesticks(Ratings):
pass
class NA_Jinx_Mid_Fiora(Ratings):
pass
class NA_Jinx_Mid_Fizz(Ratings):
pass
class NA_Jinx_Mid_Galio(Ratings):
pass
class NA_Jinx_Mid_Gangplank(Ratings):
pass
class NA_Jinx_Mid_Garen(Ratings):
pass
class NA_Jinx_Mid_Gnar(Ratings):
pass
class NA_Jinx_Mid_Gragas(Ratings):
pass
class NA_Jinx_Mid_Graves(Ratings):
pass
class NA_Jinx_Mid_Hecarim(Ratings):
pass
class NA_Jinx_Mid_Heimerdinger(Ratings):
pass
class NA_Jinx_Mid_Illaoi(Ratings):
pass
class NA_Jinx_Mid_Irelia(Ratings):
pass
class NA_Jinx_Mid_Ivern(Ratings):
pass
class NA_Jinx_Mid_Janna(Ratings):
pass
class NA_Jinx_Mid_JarvanIV(Ratings):
pass
class NA_Jinx_Mid_Jax(Ratings):
pass
class NA_Jinx_Mid_Jayce(Ratings):
pass
class NA_Jinx_Mid_Jhin(Ratings):
pass
class NA_Jinx_Mid_Jinx(Ratings):
pass
class NA_Jinx_Mid_Kalista(Ratings):
pass
class NA_Jinx_Mid_Karma(Ratings):
pass
class NA_Jinx_Mid_Karthus(Ratings):
pass
class NA_Jinx_Mid_Kassadin(Ratings):
pass
class NA_Jinx_Mid_Katarina(Ratings):
pass
class NA_Jinx_Mid_Kayle(Ratings):
pass
class NA_Jinx_Mid_Kayn(Ratings):
pass
class NA_Jinx_Mid_Kennen(Ratings):
pass
class NA_Jinx_Mid_Khazix(Ratings):
pass
class NA_Jinx_Mid_Kindred(Ratings):
pass
class NA_Jinx_Mid_Kled(Ratings):
pass
class NA_Jinx_Mid_KogMaw(Ratings):
pass
class NA_Jinx_Mid_Leblanc(Ratings):
pass
class NA_Jinx_Mid_LeeSin(Ratings):
pass
class NA_Jinx_Mid_Leona(Ratings):
pass
class NA_Jinx_Mid_Lissandra(Ratings):
pass
class NA_Jinx_Mid_Lucian(Ratings):
pass
class NA_Jinx_Mid_Lulu(Ratings):
pass
class NA_Jinx_Mid_Lux(Ratings):
pass
class NA_Jinx_Mid_Malphite(Ratings):
pass
class NA_Jinx_Mid_Malzahar(Ratings):
pass
class NA_Jinx_Mid_Maokai(Ratings):
pass
class NA_Jinx_Mid_MasterYi(Ratings):
pass
class NA_Jinx_Mid_MissFortune(Ratings):
pass
class NA_Jinx_Mid_MonkeyKing(Ratings):
pass
class NA_Jinx_Mid_Mordekaiser(Ratings):
pass
class NA_Jinx_Mid_Morgana(Ratings):
pass
class NA_Jinx_Mid_Nami(Ratings):
pass
class NA_Jinx_Mid_Nasus(Ratings):
pass
class NA_Jinx_Mid_Nautilus(Ratings):
pass
class NA_Jinx_Mid_Nidalee(Ratings):
pass
class NA_Jinx_Mid_Nocturne(Ratings):
pass
class NA_Jinx_Mid_Nunu(Ratings):
pass
class NA_Jinx_Mid_Olaf(Ratings):
pass
class NA_Jinx_Mid_Orianna(Ratings):
pass
class NA_Jinx_Mid_Ornn(Ratings):
pass
class NA_Jinx_Mid_Pantheon(Ratings):
pass
class NA_Jinx_Mid_Poppy(Ratings):
pass
class NA_Jinx_Mid_Quinn(Ratings):
pass
class NA_Jinx_Mid_Rakan(Ratings):
pass
class NA_Jinx_Mid_Rammus(Ratings):
pass
class NA_Jinx_Mid_RekSai(Ratings):
pass
class NA_Jinx_Mid_Renekton(Ratings):
pass
class NA_Jinx_Mid_Rengar(Ratings):
pass
class NA_Jinx_Mid_Riven(Ratings):
pass
class NA_Jinx_Mid_Rumble(Ratings):
pass
class NA_Jinx_Mid_Ryze(Ratings):
pass
class NA_Jinx_Mid_Sejuani(Ratings):
pass
class NA_Jinx_Mid_Shaco(Ratings):
pass
class NA_Jinx_Mid_Shen(Ratings):
pass
class NA_Jinx_Mid_Shyvana(Ratings):
pass
class NA_Jinx_Mid_Singed(Ratings):
pass
class NA_Jinx_Mid_Sion(Ratings):
pass
class NA_Jinx_Mid_Sivir(Ratings):
pass
class NA_Jinx_Mid_Skarner(Ratings):
pass
class NA_Jinx_Mid_Sona(Ratings):
pass
class NA_Jinx_Mid_Soraka(Ratings):
pass
class NA_Jinx_Mid_Swain(Ratings):
pass
class NA_Jinx_Mid_Syndra(Ratings):
pass
class NA_Jinx_Mid_TahmKench(Ratings):
pass
class NA_Jinx_Mid_Taliyah(Ratings):
pass
class NA_Jinx_Mid_Talon(Ratings):
pass
class NA_Jinx_Mid_Taric(Ratings):
pass
class NA_Jinx_Mid_Teemo(Ratings):
pass
class NA_Jinx_Mid_Thresh(Ratings):
pass
class NA_Jinx_Mid_Tristana(Ratings):
pass
class NA_Jinx_Mid_Trundle(Ratings):
pass
class NA_Jinx_Mid_Tryndamere(Ratings):
pass
class NA_Jinx_Mid_TwistedFate(Ratings):
pass
class NA_Jinx_Mid_Twitch(Ratings):
pass
class NA_Jinx_Mid_Udyr(Ratings):
pass
class NA_Jinx_Mid_Urgot(Ratings):
pass
class NA_Jinx_Mid_Varus(Ratings):
pass
class NA_Jinx_Mid_Vayne(Ratings):
pass
class NA_Jinx_Mid_Veigar(Ratings):
pass
class NA_Jinx_Mid_Velkoz(Ratings):
pass
class NA_Jinx_Mid_Vi(Ratings):
pass
class NA_Jinx_Mid_Viktor(Ratings):
pass
class NA_Jinx_Mid_Vladimir(Ratings):
pass
class NA_Jinx_Mid_Volibear(Ratings):
pass
class NA_Jinx_Mid_Warwick(Ratings):
pass
class NA_Jinx_Mid_Xayah(Ratings):
pass
class NA_Jinx_Mid_Xerath(Ratings):
pass
class NA_Jinx_Mid_XinZhao(Ratings):
pass
class NA_Jinx_Mid_Yasuo(Ratings):
pass
class NA_Jinx_Mid_Yorick(Ratings):
pass
class NA_Jinx_Mid_Zac(Ratings):
pass
class NA_Jinx_Mid_Zed(Ratings):
pass
class NA_Jinx_Mid_Ziggs(Ratings):
pass
class NA_Jinx_Mid_Zilean(Ratings):
pass
class NA_Jinx_Mid_Zyra(Ratings):
pass
| 6,269 | 3,457 |
from django.db import models
from django.core.validators import MinValueValidator, MaxValueValidator
# Create your models here.
class Genomafoodies(models.Model):
title = models.CharField(max_length=50)
ubication = models.TextField()
food_type = models.TextField()
score = models.IntegerField(
validators=[MinValueValidator(0), MaxValueValidator(5)])
visited = models.BooleanField(default=False)
def _str_(self):
return self.title
| 475 | 135 |
"""Find or apply coordinates to stitch an image
"""
#import matplotlib.pyplot as plt
#plt_available = True
plt_available = False
import sklearn.feature_extraction.image as sklim
import numpy as np
import skimage.transform as smtf
import h5py
import logging
import time
import os
# Own imports
from . import inout
from . import pairwisesingle as ps
from .MicroscopeData import MicroscopeData
from .GlobalOptimization import GlobalOptimization
from . import tilejoining
from .. import utils
# Logger
logger = logging.getLogger(__name__)
#################### Initial stitching functions #######################
def get_pairwise_input(ImageProperties,folder, tile_file, hyb_nr, gene = 'Nuclei',
pre_proc_level = 'FilteredData',
est_overlap = 0.1, y_flip = False, nr_dim = 2):
"""Get the information necessary to do the pairwise allignment
Find the pairwise pars for an unknown stitching.
Works best with a folder containing
image with nuclei (DAPI staining)
Parameters:
-----------
folder: str
String representing the path of the folder containing
the tile file and the yaml metadata file. Needs a
trailing slash ('/').
tile_file: pointer
HDF5 file handle. Reference to the opened file containing the tiles.
hyb_nr: int
The number of the hybridization we are going to
stitch. This will be used to navigate tile_file and find
the correct tiles.
gene: str
The name of the gene we are going to stitch.
This will be used to navigate tile_file and find the
correct tiles. (Default: 'Nuclei')
pre_proc_level: str
The name of the pre processing group of
the tiles we are going to stitch.
This will be used to navigate tile_file and find the
correct tiles. (Default: 'Filtered')
est_overlap: float
The fraction of two neighbours that should
overlap, this is used to estimate the shape of the
tile set and then overwritten by the actual average
overlap according to the microscope coordinates.
(default: 0.1)
y_flip: bool
The y_flip variable is designed for the cases where the
microscope sequence is inverted in the y-direction. When
set to True the y-coordinates will also be inverted
before determining the tile set. (Default: False)
nr_dim: int
If 3, the code will assume three dimensional data
for the tile, where z is the first dimension and y and x
the second and third. For any other value 2-dimensional data
is assumed. (Default: 2)
Returns:
--------
tiles: list
List of references to the the tiles in the hdf5 file tile_file.
contig_tuples: list
List of tuples. Each tuple is a tile pair.
Tuples contain two tile indexes denoting these
tiles are contingent to each other.
nr_pixels: int
Height and length of the tile in pixels, tile is assumed to be square.
z_count: int
The number of layers in one tile (size of the z-axis). Is 1 when nr_dim is not 3.
micData: object
MicroscopeData object. Contains coordinates of the tile corners as taken from the microscope.
"""
logger.info("Getting files from folder: {}".format(folder))
# Load the information from the metadata file.
ExperimentInfos, ImageProperties, HybridizationsInfos, \
Converted_Positions, MicroscopeParameters = \
utils.experimental_metadata_parser(folder)
# Get coordinate data for this hybridization
coord_data = Converted_Positions['Hybridization' + str(hyb_nr)]
# Read the number of pixels, z-count and pixel size from the yaml
# file.
try:
nr_pixels = ImageProperties['HybImageSize']['rows']
except KeyError as err:
logger.info(("Number of pixels not found in experimental "
+ "metadata file.\nPlease add "
+ "the number of pixels in an image "
+ "to the experimental "
+ "metadata file under ImageProperties "
+ "--> HybImageSize --> rows.\n"
+ "KeyError: {}").format(err))
raise
if nr_dim == 2:
z_count = 1
else:
try:
z_count = ImageProperties['HybImageSize']['zcount']
except KeyError as err:
logger.info(("Number of pixels not found in experimental "
+ "metadata file.\nPlease add "
+ "the number of slices in the z-stack "
+ "to the experimental "
+ "metadata file under ImageProperties "
+ "--> HybImageSize --> zcount.\n"
+ "KeyError: {}")
.format(err))
raise
try:
pixel_size = ImageProperties['PixelSize']
except KeyError as err:
logger.info(("ImageProperties['PixelSize'] not found in "
+ "experimental metadata file.\nPlease add the "
+ "size of a pixel in um in the experimental "
+ "metadata file under ImageProperties "
+ "--> PixelSize.\nKeyError: {}").format(err))
raise
# Estimate the overlap in pixels with the overlap that the user
# provided, default is 10%
est_x_tol = nr_pixels * (1 - est_overlap)
logger.info("Estimating overlap at {}%, that is {} pixels"
.format(est_overlap * 100, est_x_tol))
logger.debug("Number of pixels: {}".format(nr_pixels))
logger.debug("Number of slices in z-stack: {}".format(z_count))
# Organize the microscope data and determine tile set
micData = MicroscopeData(coord_data, y_flip, nr_dim)
micData.normalize_coords(pixel_size)
micData.make_tile_set(est_x_tol, nr_pixels = nr_pixels)
# Make a list of image numbers, matching with the numbers in the
# image files
flat_tile_set = micData.tile_set.flat[:]
image_list = [micData.tile_nr[ind] if ind >= 0 else -1 for ind in flat_tile_set]
image_list = np.ma.masked_equal(image_list, -1)
logger.info("Getting references for: {}".format(image_list))
# Make a list of the image names
tiles = inout.get_image_names(tile_file, image_list = image_list,
hyb_nr = hyb_nr, gene = gene,
pre_proc_level = pre_proc_level)
logger.info("Size tiles: {} Number of pixels: {} z count: {}"
.format(len(tiles), nr_pixels, z_count))
# Produce an undirected graph of the tiles, tiles that are
# neighbours to each other are connected in this graph.
# noinspection PyPep8Naming
C = np.asarray(sklim.grid_to_graph(*micData.tile_set.shape).todense())
np.fill_diagonal(C, 0)
# noinspection PyPep8Naming
C = np.triu( C )
# Extract the neighbour pairs from the graph
contig_tuples =list(zip( *np.where( C ) ))
logger.info(("Length contingency tuples: {} \n"
+ "Contingency tuples: {}")
.format(len(contig_tuples), contig_tuples))
# Plotting tiles:
#inout.display_tiles(tiles, micData.tile_set, fig_nr = 2, block = False)
#plt.show(block = True)
return (tiles, contig_tuples, nr_pixels, z_count, micData)
def get_pairwise_alignments(tiles, tile_file, contig_tuples,
micData, nr_peaks = 8,
nr_slices = None,
nr_dim = 2):
"""Calculate the pairwise transition
Calculates pairwise transition for each neighbouring pair of
tiles. This functions is only used in the single core version of the
code, not when using MPI.
Parameters:
-----------
tiles: list
List of references to the the tiles in the hdf5 file tile_file.
tile_file: pointer
HDF5 file handle. Reference to the opened file containing the tiles.
contig_tuples: list
List of tuples. Each tuple is a tile pair.
Tuples contain two tile indexes denoting these
tiles are contingent to each other.
micData: object
MicroscopeData object. Containing coordinates of
the tile corners as taken from the microscope.
nr_peaks: int
Number of peaks to be extracted from the PCM (Default: 8)
nr_slices: int
Only applicable when running with 3D
pictures and using 'compres pic' method in
pairwisesingle.py. Determines the number of slices
that are compressed together (compression in the
z-direction). If None, all the slices are compressed
together. (Default: None)
nr_dim: int
If 3, the code will assume three dimensional data
for the tile, where z is the first dimension and y and x
the second and third. For any other value 2-dimensional data
is assumed. (default: 2)
Returns:
--------
: dict
Contains key 'P' with a 1D numpy array
containing pairwise alignment y and x coordinates
(and z-coordinates when applicable) for each
neighbouring pair of tiles, array will be
2 * len(contig_typles) for 2D data
or 3 * len(contig_typles) for 3D data.
Also contains key 'covs' with a 1D numpy array
containing covariance for each pairwise alignment in
'P', 'covs' will be len(contig_typles).
"""
logger.info("Getting pairwise alignments...")
P = np.empty((len(contig_tuples), nr_dim), dtype = int)
covs = np.empty(len(contig_tuples))
for i in range(len(contig_tuples)):
# noinspection PyPep8Naming
P_single, cov, contig_index = ps.align_single_pair(tiles, tile_file,
contig_tuples, i,
micData, nr_peaks,
nr_slices = nr_slices,
nr_dim = nr_dim)
P[contig_index,:] = P_single
covs[contig_index] = cov
logger.info("Raw P: {}".format(P))
#Flatten P
P = np.array(P).flat[:]
logger.info("flat P: {}".format(P))
return {'P': P, 'covs': covs}
############################# Apply ####################################
def get_place_tile_input_apply(folder, tile_file, hyb_nr, data_name,
gene = 'Nuclei',
pre_proc_level = 'Filtered',
nr_dim = 2, check_pairwise = False):
"""Get the data needed to apply stitching to another gene
Parameters:
-----------
folder: str
String representing the path of the folder containing
the tile file, the stitching data file the yaml metadata
file. Needs a trailing slash ('/').
tile_file: pointer
HDF5 file handle. Reference to the opened file
containing the tiles.
hyb_nr: int
The number of the hybridization we are going to
stitch. This will be used to navigate tile_file and find
the correct tiles.
data_name: str
Name of the file containing the pickled stitching data.
gene: str
The name of the gene we are going to stitch.
This will be used to navigate tile_file and find the
correct tiles. (Default: 'Nuclei')
pre_proc_level: str
The name of the pre processing group of
the tiles we are going to stitch.
This will be used to navigate tile_file and find the
correct tiles. (Default: 'Filtered')
nr_dim: int
If 3, the code will assume three dimensional data
for the tile, where z is the first dimension and y and x
the second and third. For any other value 2-dimensional data
is assumed. (Default: 2)
check_pairwise: bool
If True the contig_tuples array is assumed
to be in the pickled data file and will be returned.
(Default: False)
Returns:
--------
joining: dict
Taken from the stitching data file.
Contains keys corner_list and final_image_shape.
Corner_list is a list of list, each list is a pair
of an image number (int) and it's coordinates (numpy
array containing floats).
Final_image_shape is a tuple of size 2 or 3
depending on the numer of dimensions and contains
ints.
tiles: list
List of references to the the tiles in the hdf5 file tile_file.
nr_pixels: int
Height and length of the tile in pixels, tile is assumed to be square.
z_count: int
The number of layers in one tile (size of
the z-axis). Is 1 when nr_dim is not 3.
micData: object
MicroscopeData object. Taken from the pickled
stitching data.
Contains coordinates of the tile corners as taken
from the microscope.
contig_tuples: list
Only returned if check_pairwise == True.
list of tuples. Taken from the pickled
stitching data. Each tuple is a tile pair.
Tuples contain two tile indexes denoting these
tiles are contingent to each other.
"""
logger.info("Getting data to apply stitching from file...")
# Load image list and old joining data
stitching_coord_dict = inout.load_stitching_coord(folder + data_name)
# noinspection PyPep8Naming
micData = stitching_coord_dict['micData']
joining = stitching_coord_dict['joining']
logger.info("Joining object and image list loaded from file")
# Make a list of image numbers
flat_tile_set = micData.tile_set.flat[:]
image_list = [micData.tile_nr[ind] if ind >= 0 else -1 for ind in flat_tile_set]
image_list = np.ma.masked_equal(image_list, -1)
logger.info("Tile set size: {}".format(micData.tile_set.shape))
logger.info("Placing folowing image references in tiles: {}"
.format(image_list))
# Make a list of the tile references
tiles = inout.get_image_names(tile_file, image_list = image_list,
hyb_nr = hyb_nr, gene = gene, pre_proc_level = pre_proc_level)
# Load the data from the metadata file
ExperimentInfos, ImageProperties, HybridizationsInfos, \
Converted_Positions, MicroscopeParameters = \
utils.experimental_metadata_parser(folder)
# Read the number of pixels and z-count from the yaml
# file.
try:
nr_pixels = ImageProperties['HybImageSize']['rows']
except KeyError as err:
logger.info(("Number of pixels not found in experimental "
+ "metadata file.\nPlease add "
+ "the number of pixels in an image "
+ "to the experimental "
+ "metadata file under ImageProperties "
+ "--> HybImageSize --> rows.\n"
+ "KeyError: {}").format(err))
raise
if nr_dim == 2:
z_count = 1
else:
try:
z_count = ImageProperties['HybImageSize']['zcount']
except KeyError as err:
logger.info(
("Number of pixels not found in experimental "
+ "metadata file.\nPlease add "
+ "the number of slices in the z-stack "
+ "to the experimental "
+ "metadata file under ImageProperties "
+ "--> HybImageSize --> zcount.\n"
+ "KeyError: {}")
.format(err))
raise
logger.info("Size tiles: {} Number of pixels: {} z count: {}"
.format(len( tiles), nr_pixels, z_count))
# Check pairwise overlap in signal
if check_pairwise:
contig_tuples = stitching_coord_dict['contig_tuples']
logger.info(
"Length contingency tuples: {} Contingency tuples: {}"
.format(len(contig_tuples), contig_tuples))
return (joining, tiles, nr_pixels, z_count, micData,
contig_tuples)
else:
return (joining, tiles, nr_pixels, z_count, micData)
############################# Refine ###################################
def get_refine_pairwise_input(folder, tile_file, hyb_nr, data_name,
gene = 'Nuclei', pre_proc_level = 'Filtered',
nr_dim = 2):
"""Get the data needed to refine stitching with another gene
Parameters:
-----------
folder: str
String representing the path of the folder containing
the tile file, the stitching data file the yaml metadata
file. Needs a trailing slash ('/').
tile_file: pointer
HDF5 file handle. Reference to the opened file
containing the tiles.
hyb_nr: int
The number of the hybridization we are going to
stitch. This will be used to navigate tile_file and find
the correct tiles.
data_name: str
Name of the file containing the pickled stitching data.
gene: str
The name of the gene we are going to stitch.
This will be used to navigate tile_file and find the
correct tiles. (Default: 'Nuclei')
pre_proc_level: str
The name of the pre processing group of
the tiles we are going to stitch.
This will be used to navigate tile_file and find the
correct tiles. (Default: 'Filtered')
nr_dim: int
If 3, the code will assume three dimensional data
for the tile, where z is the first dimension and y and x
the second and third. For any other value 2-dimensional data
is assumed. (Default: 2)
Returns:
--------
tiles: list
List of references to the the tiles in the hdf5 file tile_file.
contig_tuples: list
List of tuples. Each tuple is a tile pair.
Tuples contain two tile indexes denoting these
tiles are contingent to each other.
nr_pixels: int
Height and length of the tile in pixels, tile
is assumed to be square.
z_count: int
The number of layers in one tile (size of the z-axis). Is 1 when nr_dim is not 3.
micData: object
MicroscopeData object. Contains coordinates of
the tile corners as taken from the microscope.
"""
logger.info("Aplying stitching from file")
# Load image list and old joining data
stitching_coord_dict = inout.load_stitching_coord(folder + data_name)
# noinspection PyPep8Naming
micData = stitching_coord_dict['micData']
logger.info("Joining object and image list loaded from file")
flat_tile_set = micData.tile_set.flat[:]
image_list = [micData.tile_nr[ind] if ind >= 0 else -1 for ind in flat_tile_set]
image_list = np.ma.masked_equal(image_list, -1)
logger.info("Tile set size: {}".format(micData.tile_set.shape))
logger.info("Loading images: {}".format(image_list))
# Make a list of the image names
tiles = inout.get_image_names(tile_file, image_list = image_list,
hyb_nr = hyb_nr, gene = gene, pre_proc_level = pre_proc_level)
contig_tuples = stitching_coord_dict['contig_tuples']
alignment_old = stitching_coord_dict['alignment']['P']
# Load the data from the metadata file
ExperimentInfos, ImageProperties, HybridizationsInfos, \
Converted_Positions, MicroscopeParameters = \
utils.experimental_metadata_parser(folder)
# Read the number of pixels and z-count from the yaml
# file.
try:
nr_pixels = ImageProperties['HybImageSize']['rows']
except KeyError as err:
logger.info(("Number of pixels not found in experimental "
+ "metadata file.\nPlease add "
+ "the number of pixels in an image "
+ "to the experimental "
+ "metadata file under ImageProperties "
+ "--> HybImageSize --> rows.\n"
+ "KeyError: {}").format(err))
raise
if nr_dim == 2:
z_count = 1
else:
try:
z_count = ImageProperties['HybImageSize']['zcount']
except KeyError as err:
logger.info(
("Number of pixels not found in experimental "
+ "metadata file.\nPlease add "
+ "the number of slices in the z-stack "
+ "to the experimental "
+ "metadata file under ImageProperties "
+ "--> HybImageSize --> zcount.\n"
+ "KeyError: {}")
.format(err))
raise
# Recalculate C
C = sklim.grid_to_graph(*micData.tile_set.shape).todense()
np.fill_diagonal(C,0)
C = np.triu( C )
return (tiles, contig_tuples, nr_pixels, z_count, micData, C, alignment_old)
def refine_pairwise_alignments(tiles, tile_file, contig_tuples, alignment_old,
micData = None, nr_peaks = 8,
nr_dim = 2):
"""Calculate the pairwise transition
Calculates pairwise transition for each neighbouring pair of
tiles.
Parameters:
-----------
tiles: np.array
Array of tiles, a tile should be a 2d np.array
representing a picture
contig_tuples: list
List of tuples denoting which tiles are contingent to each other.
micData: object
MicroscopeData object containing coordinates (default None)
nr_peaks: int
nr of peaks to be extracted from the PCM (default 8)
nr_dim: int
If 3, the code will assume three dimensional data
for the tile, where z is the first dimension and y and x
the second and third. For any other value 2-dimensional data
is assumed. (default: 2)
Returns:
--------
: dict
Contains key 'P' with a 1D numpy array
containing pairwise alignment y and x coordinates
(and z-coordinates when applicable) for each
neighbouring pair of tiles, array will be
2 * len(contig_typles) for 2D data
or 3 * len(contig_typles) for 3D data.
Also contains key 'covs' with a 1D numpy array
containing covariance for each pairwise alignment in
'P', 'covs' will be len(contig_typles).
"""
# Make a new P and cov list for the refine pairwise alignments
P_ref = np.empty((len(contig_tuples), nr_dim), dtype = int)
covs_ref = np.empty(len(contig_tuples))
for i in range(len(contig_tuples)):
P_single, cov, contig_index = ps.refine_single_pair(tiles,
tile_file,
contig_tuples, i, micData,
alignment_old['P'], nr_peaks,
nr_dim = nr_dim)
P_ref[contig_index,:] = P_single
covs_ref[contig_index] = cov
logger.info("Raw P: {}".format(P_ref))
# Flatten P
P_ref = np.array(P_ref).flat[:]
logger.info("flat P: {}".format(P_ref))
return {'P': P_ref, 'covs': covs_ref}
######################### General functions ############################
def get_place_tile_input(folder, tiles, contig_tuples,
micData, nr_pixels, z_count, alignment,
data_name, nr_dim = 2, save_alignment = True):
"""Do the global alignment and get the shifted corner coordinates.
Calculates a shift in global coordinates for each tile (global
alignment) and then applies these shifts to the corner coordinates
of each tile and returns and saves these shifted corner coordinates.
This function produces a file with stitching data in folder
called data_name, this file includes the corner coordinates which
can be used to apply the stitching to another gene.
Parameters:
-----------
folder: str
String representing the path of the folder containing
the tile file and the yaml metadata file. Needs a
trailing slash ('/').
tiles: list
List of strings. List of references to the the
tiles in the hdf5 file tile_file.
contig_tuples: list
List of tuples. Each tuple is a tile pair.
Tuples contain two tile indexes denoting these
tiles are contingent to each other.
micData: object
MicroscopeData object. Contains coordinates of
the tile corners as taken from the microscope.
nr_pixels: int
Height and length of the tile in pixels, tile is assumed to be square.
z_count: int
The number of layers in one tile (size of the z-axis). Is 1 when nr_dim is not 3.
alignment: dict
Contains key 'P' with a 1D numpy array
containing pairwise alignment y and x coordinates
(and z-coordinates when applicable) for each
neighbouring pair of tiles, array will be
2 * len(contig_typles) for 2D data
or 3 * len(contig_typles) for 3D data.
Also contains key 'covs' with a 1D numpy array
containing covariance for each pairwise alignment in
'P', 'covs' will be len(contig_typles).
data_name: str
Name of the file containing the pickled stitching data.
nr_dim: int
If 3, the code will assume three dimensional data
for the tile, where z is the first dimension and y and x
the second and third. For any other value 2-dimensional data
is assumed. (default: 2)
save_alignment: bool
When False only the stitching
coordinates and microscope data will be saved. When
True also the contigency tuples and pairwise
alignment will be saved (this is necessary if we
want to refine the stitching later). (Default: True)
Returns:
--------
joining: dict
Contains keys corner_list and
final_image_shape.
Corner_list is a list of list, each list is a pair
of a tile index (int) and it's tile's shifted
coordinates in the final image (numpy array
containing floats).
Final_image_shape is a tuple of size 2 or 3
depending on the numer of dimensions and contains
ints.
"""
# Perform global optimization
logger.debug("Initializing global optimization")
optimization = GlobalOptimization()
logger.debug("Starting optimization, micData")
optimization.performOptimization(micData.tile_set, contig_tuples,
alignment['P'], alignment['covs'],
len(tiles), nr_dim)
# Stitch everything back together
# Determine global corners
joining = tilejoining.calc_corners_coord(tiles,
optimization.global_trans, micData, nr_pixels, z_count)
# Save the data to do the stitching in "data_name":
if joining:
if save_alignment:
inout.save_to_file(folder + data_name,
joining = joining,
contig_tuples = contig_tuples,
alignment = alignment,
micData = micData)
else:
inout.save_to_file(folder + data_name,
micData = micData,
joining = joining)
else:
logger.warning("No results found to save: joining is empty")
return joining
def assess_performance(micData, alignment, joining,
cov_signal, xcov_list, folder,
use_IJ_corners = False):
"""Assess the performance of the stitching
This functions writes its the result to a file in "folder".
Parameters:
-----------
micData: object
MicroscopeData object. Contains coordinates of
the tile corners as taken from the microscope
and contains the tile set.
alignment: dict
Contains key 'P' with a 1D numpy array
containing pairwise alignment y and x coordinates
(and z-coordinates when applicable) for each
neighbouring pair of tiles, array will be
2 * len(contig_typles) for 2D data
or 3 * len(contig_typles) for 3D data.
Also contains key 'covs' with a 1D numpy array
containing covariance for each pairwise alignment in
'P', 'covs' will be len(contig_typles).
joining: dict
Contains keys corner_list and
final_image_shape.
Corner_list is a list of list, each list is a pair
of a tile index (int) and it's tile's shifted
coordinates in the final image (numpy array
containing floats).
Final_image_shape is a tuple of size 2 or 3
depending on the numer of dimensions and contains
ints.
cov_signal: np.array
The covariance of each neighbouring
tile pair in the part of the tiles that overlap in
the final stitched signal image.
xcov_list: list
List of cross covariance of the
overlap of the tiles in the final stitched image.
As returned by tilejoining.assess_overlap.
folder: str
String representing a path. The folder where the
performance report should be saved. Needs a
trailing slash ('/').
use_IJ_corners: bool
If True compare our corners to Image J
found in a file in folder, the file name should
contain: TileConfiguration.
"""
################################Gather data#########################
report_string = ""
if use_IJ_corners:
compare_corners = inout.read_IJ_corners(folder)
# Make a list of image numbers, matching with the numbers in the
# image files
flat_tile_set = micData.tile_set.flat[:]
image_list = [micData.tile_nr[ind] if ind >= 0
else -1 for ind in flat_tile_set]
image_list = np.ma.masked_equal(image_list, -1)
# Compare our corners to the corner the ImageJ plugin found
# Select the tiles we actually used:
compare_corners_new = [[item[0], np.array([item[1][1], item[1][0]])]
for item in compare_corners
if item[0] in image_list]
# Replace the indexes with numbering of the images:
logger.debug('My corners {}'.format(joining['corner_list']))
my_corners_new = [[image_list[i], item[1]] for i, item in enumerate(joining['corner_list'])]
logger.debug('My corners new {}'.format(my_corners_new))
logger.debug('Compare corners new {}'.format(compare_corners_new))
# Normalize, to make the first one the origin and compare
compare_origin = compare_corners_new[0][1]
my_origin = my_corners_new[0][1]
logger.debug("origins: {}, {}".format(compare_origin, my_origin))
cum_diff = np.zeros((1,2))
for i in range(len(my_corners_new)):
my_cur = (my_corners_new[i][1] - my_origin)
compare_cur = (compare_corners_new[i][1] - compare_origin)
diff = abs(my_cur - compare_cur)
cum_diff += diff
report_string += ("My tile: {}, {}, compare tile: {}, {}; difference: {}\n"
.format(my_corners_new[i][0], my_cur,
compare_corners_new[i][0], compare_cur,
diff))
report_string += "\nAverage: {}\n".format(cum_diff / len(my_corners_new))
# Calculate average cross covariances of the overlaps in the final image:
if xcov_list is not None:
av_xcov = np.mean(xcov_list)
else:
logger.info('No cross covariance data available')
av_xcov = None
report_string += "\nAverage cross covariance of final overlap: {}\n".format(av_xcov)
report_string += "Cross covariance list of final overlap: {}\n".format(xcov_list)
#logger.debug(report_string)
###################### Save the performance data ###################
perf_path = folder + 'performance/'
# To print the logging to a file
try:
os.stat(perf_path)
except:
os.mkdir(perf_path)
os.chmod(perf_path,0o777)
dateTag = time.strftime("%y%m%d_%H_%M_%S")
with open(perf_path + dateTag + '-performance' + '.txt', 'w') as f:
f.write(report_string)
if micData is not None:
# If available print the alignment results:
f.write(("\nTile set: \n{} \n"
"Tile numbers: {}\n")
.format(micData.tile_set, micData.tile_nr))
if alignment is not None:
# If available print the alignment results:
f.write(("Pairwise Alignment: {}\n"
"Covariances: {}\n"
"Average covariance: {}\n")
.format(alignment['P'], alignment['covs'],
np.nanmean(alignment['covs'])))
if joining is not None:
f.write(("Corners after alignment: \n{}\n")
.format(joining['corner_list']))
if cov_signal is not None:
f.write(("\nAverage pairwise covariance of the signal: {}\n"
"Pairwise covariance of the signal:\n{}\n")
.format(np.nanmean(cov_signal), cov_signal))
f.close()
############################# Visualization ############################
def save_as_tiff(data_file, hyb_nr, gene, location_image,
pre_proc_level = 'StitchedImage', mode = 'both'):
"""Save the results as a tiff image for visual inspection.
Parameters:
-----------
data_file: pointer
HDF5 file handle. HDF5 file containing the final image.
gene: str
The name of the gene we stitched.
This will be used to navigate data_file and find the
correct final picture.
hyb_nr: int
The number of the hybridization we have
stitched.This will be used to navigate data_file and
find the correct final picture.
location_image: str
Full path to the file where the tiff file
will be saved (extension not necessary).
pre_proc_level: str
The name of the pre processing group of
the tiles we are going to stitch. Normally this will
be 'StitchedImage', but when the final image is
found in another datagroup it may be changed.
This will be used to navigate data_file and find the
correct final image. (Default: 'StitchedImage')
mode: str
Mode determines what color, quality and
how many images are saved.
Possible values for mode: save_ubyte, save_float,
save_rgb. If another or no value is given the image
is saved as is and a as a low quality copy
(pixel depth 8 bits) (Default: 'both')
"""
# Save the results:
if mode == 'save_ubyte':
inout.save_image(data_file, hyb_nr, gene, pre_proc_level, 'final_image_ubyte', location_image + '_byte')
elif mode == 'save_float':
inout.save_image(data_file, hyb_nr, gene, pre_proc_level, 'final_image', location_image)
elif mode == 'save_rgb':
inout.save_image(data_file, hyb_nr, gene, pre_proc_level, 'final_image_rgb', location_image + '_rgb')
else:
inout.save_image(data_file, hyb_nr, gene, pre_proc_level, 'final_image_ubyte', location_image + '_byte')
inout.save_image(data_file, hyb_nr, gene, pre_proc_level, 'final_image', location_image)
def plot_final_image(im_file_name, joining, hyb_nr = 1,
gene = 'Nuclei', fig_name = "final image",
shrink_image = False, block = True):
"""Displays the high quality final image in a plot window.
Takes a lot of working memory for full sized images.
When plt_available is false this function does nothing and returns
None.
Parameters:
-----------
im_file_name: str
Filename of the hdf5 file, containing the final image.
fig_name: str
Name of the plotting window (default: "final image").
shrink_image: bool
Turn on shrink_image to reduce display quality and memory usage. (Default: False)
block: bool
Plot blocks the running program untill
the plotting window is closed if true. Turn off
block to make the code continue untill the next call
of plt.show(block=True) before displaying the
image. (default: True)
"""
if plt_available:
if isinstance(im_file_name, str):
# Load the image from file
im_file = h5py.File(im_file_name + '_Hybridization' +
str(hyb_nr) + '.sf.hdf5', 'r')
for_display = im_file['final_image']
else:
# Load the image from file
for_display = im_file_name[gene] \
['StitchedImage']['final_image']
# Shrink the image if necessary
if shrink_image:
display_size = np.array(joining['final_image_shape'],
dtype = int)/10
logger.debug("display size pixels: {}".format(display_size))
for_display = smtf.resize(for_display, tuple(display_size))
# Plot the image
if for_display.ndim == 3:
inout.plot_3D(for_display)
else:
plt.figure(fig_name)
plt.imshow(for_display, 'gray', interpolation = 'none')
plt.show(block = False)
# Load the image from file
if isinstance(im_file_name, str):
# Load the image from file
im_file = h5py.File(im_file_name + '.hdf5', 'r')
for_display = im_file['temp_mask']
else:
for_display = im_file_name['Hybridization' + str(hyb_nr)][gene] \
['StitchedImage']['temp_mask']
# Shrink the image if necessary
if shrink_image:
display_size = np.array(joining.final_image_shape, dtype=int) / 10
logger.debug("display size pixels: {}".format(display_size))
for_display = smtf.resize(for_display, tuple(display_size))
# Plot the image
plt.figure(fig_name + ' mask')
plt.imshow(for_display, 'gray', interpolation='none')
plt.show(block = block)
else:
return None
def get_pairwise_input_npy(image_properties,converted_positions, hybridization,
est_overlap, y_flip = False, nr_dim = 2):
"""Get the information necessary to do the pairwise allignment
Modified version of the get_pairwise_input functions that work on .npy
files and not on hdf5
Find the pairwise pairs for an unknown stitching.
Parameters:
-----------
image_properties: dict
Dictionary with the image details parsed from the Experimental_metadata.yaml file
converted_positions: dict
Dictionary with the coords of the images for all hybridization
The coords are a list of floats
hybridization: str
Hybridization that will be processed (Ex. Hybridization2)
est_overlap: float
The fraction of two neighbours that should
overlap, this is used to estimate the shape of the
tile set and then overwritten by the actual average
overlap according to the microscope coordinates.
(default: 0.1)
y_flip: bool
The y_flip variable is designed for the cases where the
microscope sequence is inverted in the y-direction. When
set to True the y-coordinates will also be inverted
before determining the tile set. (Default: False)
nr_dim: int
If 3, the code will assume three dimensional data
for the tile, where z is the first dimension and y and x
the second and third. For any other value 2-dimensional data
is assumed. (Default: 2)
Returns:
--------
tiles: np.array
Array of int with the tiles number. -1 indicate an empty tile
contig_tuples: list
List of tuples. Each tuple is a tile pair.
Tuples contain two tile indexes denoting these
tiles are contingent to each other.
nr_pixels: int
Height and length of the tile in pixels, tile is assumed to be square.
z_count: int
The number of layers in one tile (size of
the z-axis). Is 1 when nr_dim is not 3.
micData: object
MicroscopeData object. Contains coordinates of
the tile corners as taken from the microscope.
"""
# Get coordinate data for this hybridization
coord_data = converted_positions[hybridization]
# Read the number of pixels, z-count and pixel size from the yaml
# file.
try:
nr_pixels = image_properties['HybImageSize']['rows']
except KeyError as err:
logger.info(("Number of pixels not found in experimental "
+ "metadata file.\nPlease add "
+ "the number of pixels in an image "
+ "to the experimental "
+ "metadata file under ImageProperties "
+ "--> HybImageSize --> rows.\n"
+ "KeyError: {}").format(err))
raise
if nr_dim == 2:
z_count = 1
else:
try:
z_count = image_properties['HybImageSize']['zcount']
except KeyError as err:
logger.info(("Number of pixels not found in experimental "
+ "metadata file.\nPlease add "
+ "the number of slices in the z-stack "
+ "to the experimental "
+ "metadata file under ImageProperties "
+ "--> HybImageSize --> zcount.\n"
+ "KeyError: {}")
.format(err))
raise
try:
pixel_size = image_properties['PixelSize']
except KeyError as err:
logger.info(("ImageProperties['PixelSize'] not found in "
+ "experimental metadata file.\nPlease add the "
+ "size of a pixel in um in the experimental "
+ "metadata file under ImageProperties "
+ "--> PixelSize.\nKeyError: {}").format(err))
raise
# Estimate the overlap in pixels with the overlap that the user
# provided, default is 10%
est_x_tol = nr_pixels * (1 - est_overlap)
logger.info("Estimating overlap at {}%, that is {} pixels"
.format(est_overlap * 100, est_x_tol))
logger.debug("Number of pixels: {}".format(nr_pixels))
logger.debug("Number of slices in z-stack: {}".format(z_count))
# Organize the microscope data and determine tile set
micData = MicroscopeData(coord_data, y_flip, nr_dim)
micData.normalize_coords(pixel_size)
micData.make_tile_set(est_x_tol, nr_pixels = nr_pixels)
# Make a list of image numbers, matching with the numbers in the
# image files
flat_tile_set = micData.tile_set.flat[:]
image_list = [micData.tile_nr[ind] if ind >= 0 else -1 for ind in flat_tile_set]
image_list = np.ma.masked_equal(image_list, -1)
logger.info("Getting references for: {}".format(image_list))
# Make a list of the image names (-1 is a missing tile)
tiles = image_list.data
# Produce an undirected graph of the tiles, tiles that are
# neighbours to each other are connected in this graph.
# noinspection PyPep8Naming
C = np.asarray(sklim.grid_to_graph(*micData.tile_set.shape).todense())
np.fill_diagonal(C, 0)
# noinspection PyPep8Naming
C = np.triu( C )
# Extract the neighbour pairs from the graph
contig_tuples =list(zip( *np.where( C ) ))
logger.info(("Length contingency tuples: {} \n"
+ "Contingency tuples: {}")
.format(len(contig_tuples), contig_tuples))
return(tiles, contig_tuples, nr_pixels, z_count, micData)
def get_place_tile_input_apply_npy(hyb_dir,stitched_reference_files_dir,data_name,image_properties,nr_dim=2):
"""
Modified version of the get_place_tile_input_apply
Get the data needed to apply stitching to another gene
Parameters:
-----------
hyb_dir: str
String representing the path of the folder containing
the tile file, the stitching data file the yaml metadata file.
stitched_reference_files_dir: str
String representing the path of the folder containing the registered data.
data_name: str
Name of the file containing the pickled stitching data.
image_properties: dict
Dictionary with the image details parsed from the Experimental_metadata.yaml file
nr_dim: int
If 3, the code will assume three dimensional data
for the tile, where z is the first dimension and y and x
the second and third. For any other value 2-dimensional data
is assumed. (Default: 2)
Returns:
--------
joining: dict
Taken from the stitching data file.
Contains keys corner_list and final_image_shape.
Corner_list is a list of list, each list is a pair
of an image number (int) and it's coordinates (numpy
array containing floats).
Final_image_shape is a tuple of size 2 or 3
depending on the numer of dimensions and contains
ints.
tiles: list
List of strings. List of references to the the tiles in the hdf5 file tile_file.
nr_pixels: int
Height and length of the tile in pixels, tile is assumed to be square.
z_count: int
The number of layers in one tile (size of the z-axis). Is 1 when nr_dim is not 3.
micData: object
MicroscopeData object. Taken from the pickled stitching data.
Contains coordinates of the tile corners as taken from the microscope.
"""
logger.info("Getting data to apply stitching from file...")
# Load image list and old joining data
stitching_coord_dict = inout.load_stitching_coord(stitched_reference_files_dir + data_name)
# noinspection PyPep8Naming
micData = stitching_coord_dict['micData']
joining = stitching_coord_dict['joining']
logger.info("Joining object and image list loaded from file")
# Make a list of image numbers
flat_tile_set = micData.tile_set.flat[:]
image_list = [micData.tile_nr[ind] if ind >= 0 else -1 for ind in flat_tile_set]
image_list = np.ma.masked_equal(image_list, -1)
logger.info("Tile set size: {}".format(micData.tile_set.shape))
logger.info("Placing folowing image references in tiles: {}"
.format(image_list))
# Make a list of the image names (-1 is a missing tile)
tiles = image_list.data
# Read the number of pixels and z-count from the yaml
# file.
try:
nr_pixels = image_properties['HybImageSize']['rows']
except KeyError as err:
logger.info(("Number of pixels not found in experimental "
+ "metadata file.\nPlease add "
+ "the number of pixels in an image "
+ "to the experimental "
+ "metadata file under ImageProperties "
+ "--> HybImageSize --> rows.\n"
+ "KeyError: {}").format(err))
raise
if nr_dim == 2:
z_count = 1
else:
try:
z_count = image_properties['HybImageSize']['zcount']
except KeyError as err:
logger.info(
("Number of pixels not found in experimental "
+ "metadata file.\nPlease add "
+ "the number of slices in the z-stack "
+ "to the experimental "
+ "metadata file under ImageProperties "
+ "--> HybImageSize --> zcount.\n"
+ "KeyError: {}")
.format(err))
raise
logger.info("Size tiles: {} Number of pixels: {} z count: {}"
.format(len( tiles), nr_pixels, z_count))
return (joining, tiles, nr_pixels, z_count, micData)
| 48,157 | 13,704 |
from ode_explorer.integrators.integrator_loops import (
constant_h_loop,
adaptive_h_loop
)
from ode_explorer.integrators.integrator import Integrator
from ode_explorer.integrators.loop_factory import loop_factory
| 221 | 71 |
import torch
import torch.nn as nn
from gan.spectral_normalization import SpectralNorm
class Discriminator(nn.Module):
def __init__(self, input_channels=3):
super(Discriminator, self).__init__()
#Hint: Hint: Apply spectral normalization to convolutional layers. Input to SpectralNorm should be your conv nn module
####################################
# YOUR CODE HERE #
####################################
self.conv1 = SpectralNorm(nn.Conv2d(3, 128, 4, stride=2, padding=1))
self.conv2 = SpectralNorm(nn.Conv2d(128, 256, 4, stride=2, padding=1))
self.bn1 = nn.BatchNorm2d(256)
self.conv3 = SpectralNorm(nn.Conv2d(256, 512, 4, stride=2, padding=1))
self.bn2 = nn.BatchNorm2d(512)
self.conv4 = SpectralNorm(nn.Conv2d(512, 1024, 4, stride=2, padding=1))
self.bn3 = nn.BatchNorm2d(1024)
self.conv5 = SpectralNorm(nn.Conv2d(1024, 1, 4, stride=1, padding=0))
self.leakyrelu = nn.LeakyReLU(0.2)
self.conv1 = nn.Conv2d(3, 128, 4, stride=2, padding=1)
self.conv2 = nn.Conv2d(128, 256, 4, stride=2, padding=1)
self.bn1 = nn.BatchNorm2d(256)
self.conv3 = nn.Conv2d(256, 512, 4, stride=2, padding=1)
self.bn2 = nn.BatchNorm2d(512)
self.conv4 = nn.Conv2d(512, 1024, 4, stride=2, padding=1)
self.bn3 = nn.BatchNorm2d(1024)
self.conv5 = nn.Conv2d(1024, 1, 4, stride=1, padding=0)
self.leakyrelu = nn.LeakyReLU(0.2)
########## END ##########
def forward(self, x):
####################################
# YOUR CODE HERE #
####################################
x = self.leakyrelu(self.conv1(x))
x = self.leakyrelu(self.conv2(x))
x = self.bn1(x)
x = self.leakyrelu(self.conv3(x))
x = self.bn2(x)
x = self.leakyrelu(self.conv4(x))
x = self.bn3(x)
x = self.conv5(x)
# x = self.leakyrelu(x)
batch_size = x.shape[0]
x = x.view(batch_size,1)
########## END ##########
return x
class Generator(nn.Module):
def __init__(self, noise_dim, output_channels=3):
super(Generator, self).__init__()
self.noise_dim = noise_dim
####################################
# YOUR CODE HERE #
####################################
self.model = nn.Sequential(
nn.ConvTranspose2d(self.noise_dim, 1024, 4, stride = 1),
nn.BatchNorm2d(1024),
nn.ReLU(),
nn.ConvTranspose2d(1024, 512, 4, stride = 2, padding=1),
nn.BatchNorm2d(512),
nn.ReLU(),
nn.ConvTranspose2d(512, 256, 4, stride = 2, padding=1),
nn.BatchNorm2d(256),
nn.ReLU(),
nn.ConvTranspose2d(256, 128, 4, stride = 2, padding=1),
nn.BatchNorm2d(128),
nn.ReLU(),
nn.ConvTranspose2d(128, 3, 4, stride = 2, padding=1),
nn.Tanh()
)
########## END ##########
def forward(self, x):
####################################
# YOUR CODE HERE #
####################################
x = self.model(x)
########## END ##########
return x
| 3,374 | 1,346 |
import pandas as pd
from .utils import logging, filter_config, DATABASE
logger = logging.getLogger(__name__)
con = f'postgresql:///{DATABASE}'
def get_ids(level):
result = []
for l in range(level, -1, -1):
result += [f'adm{l}_id', f'adm{l}_name']
return ','.join(result)
def rename_id(df, level):
for l in range(level, -1, -1):
df[f'adm{l}_src'] = df[f'adm{l}_id']
df[f'adm{l}_name1'] = None
df[f'adm{l}_name2'] = None
return df
def get_max_pad(df, level):
col = f'adm{level}_id'
col_higher = f'adm{level-1}_id'
prev_id = None
higher_id = None
id_num = None
id_max = 0
for _, row in df.iterrows():
if row[col_higher] != higher_id:
id_num = 1
elif row[col] != prev_id:
id_num = id_num + 1
higher_id = row[col_higher]
prev_id = row[col]
id_max = max(id_max, id_num)
return len(str(id_max))
def create_ids(df, name, level, date):
df['adm0_id'] = f"{name.upper()}-{date.strftime('%Y%m%d')}"
for l in range(1, level+1):
col = f'adm{l}_id'
col_higher = f'adm{l-1}_id'
prev_id = None
higher_id = None
id_num = None
id_max = get_max_pad(df, l)
for _, row in df.iterrows():
if row[col_higher] != higher_id:
id_num = 1
elif row[col] != prev_id:
id_num = id_num + 1
higher_id = row[col_higher]
prev_id = row[col]
new_val = f'{higher_id}-{str(id_num).zfill(id_max)}'
row[f'adm{l}_id'] = new_val
return df
def order_ids(level):
result = []
for l in range(level, -1, -1):
result += [f'adm{l}_id', f'adm{l}_src',
f'adm{l}_name', f'adm{l}_name1', f'adm{l}_name2']
return result
def add_meta(df, row):
meta_1 = ['src_lvl']
for m in meta_1:
df[m] = row[m]
df['src_lang'] = 'en'
df['src_lang1'] = None
df['src_lang2'] = None
meta_2 = ['src_date', 'src_update', 'src_name',
'src_name1', 'src_lic', 'src_url']
for m in meta_2:
df[m] = row[m]
df['src_date'] = pd.to_datetime(df['src_date'])
df['src_update'] = pd.to_datetime(df['src_update'])
df['src_grp'] = 'geoBoundaries'
return df
def handle_filter(df, level, config):
col = f"adm{config['adm']}_src"
for name, (switch, *args) in config['layers'].items():
if switch == '==':
df1 = df[df[col].isin(args)]
elif switch == '!=':
df1 = df[~df[col].isin(args)]
df1.to_sql(f'{name}_adm{level}_01', con,
if_exists='replace', index=False, method='multi')
def main(_, name, level, row):
query = f'SELECT {get_ids(level)} FROM {name}_adm{level}_00'
df = pd.read_sql_query(query, con)
cols = list(map(lambda x: [f'adm{x}_name', f'adm{x}_id'], range(level+1)))
cols = [i for l in cols for i in l]
df = df.sort_values(by=cols)
df = rename_id(df, level)
df = create_ids(df, name, level, row['src_update'])
df = df[order_ids(level)]
df = add_meta(df, row)
if name in filter_config.keys():
handle_filter(df, level, filter_config[name])
else:
df.to_sql(f'{name}_adm{level}_01', con,
if_exists='replace', index=False, method='multi')
logger.info(f'{name}_adm{level}')
| 3,388 | 1,326 |
from app.db import DTOBase
class Vehicle(DTOBase):
__id_field__ = "vin"
__display__ = "display_name"
def __init__(
self,
make: str,
model: str,
year: int,
state: str,
policy_id: int = -1,
vin: str = None,
):
self.make = make
self.model = model
self.year = year
self.state = state
self.policy_id = policy_id
self.vin = vin
@property
def display_name(self):
return f"{self.vin} - {self.make} {self.model}"
class Driver(DTOBase):
__id_field__ = "license"
__display__ = "full_name"
def __init__(
self, fname: str, mname: str, lname: str, birthdate: str, license: str = None
):
self.fname = fname
self.mname = mname
self.lname = lname
self.birthdate = birthdate
self.license = license
@property
def full_name(self):
return f"{self.fname} {self.lname}"
class VehicleDriver:
def __init__(self, license: str = None, vin: str = None):
self.license = license
self.vin = vin
| 1,112 | 376 |
from setuptools import setup, find_packages
with open('requirements.txt') as f:
install_requires = f.read().strip().split('\n')
# get version from __version__ variable in apptest/__init__.py
from apptest import __version__ as version
setup(
name='apptest',
version=version,
description='prueba',
author='orlando Cholota',
author_email='edwin_orlando83@hotmail.com',
packages=find_packages(),
zip_safe=False,
include_package_data=True,
install_requires=install_requires
)
| 484 | 166 |
# !/usr/bin/env python
# coding: utf-8
from flask import Flask
import os
from flask_helper.exception import InvalidHookClass
from flask_helper.flask_hook import FlaskHook
from flask_helper.hooks.cors_hook import CorsHook
from flask_helper.hooks.handle_30x_hook import Handle30xHook
from flask_helper.hooks.real_ip_hook import RealIPHook
from flask_helper.hooks.user_agent_hook import UserAgentHook
from flask_helper.view import View
from flask_helper.utils.loader import load_classes_from_directory
from flask_helper.utils.loader import load_objects_from_directory
from flask_helper.utils.log import DummyLog
__author__ = 'zhouhenglc'
class _HookFlask(object):
def __init__(self, log=None):
self.hooks = []
self._hook_log = log if log else DummyLog()
def before_request_hook(self):
for hook in self.hooks:
resp = hook.before_request()
if resp is not None:
return resp
def after_request_hook(self, response):
for hook in reversed(self.hooks):
response = hook.after_request(response)
return response
def add_hook(self, hook_cls, *args, **kwargs):
if not issubclass(hook_cls, FlaskHook):
raise InvalidHookClass()
hook_obj = hook_cls(self, *args, **kwargs)
insert_i = len(self.hooks)
for i in range(len(self.hooks) - 1, -1, -1):
if type(self.hooks[i]) == hook_cls:
return self.hooks[i]
if hook_obj.priority < self.hooks[i].priority:
insert_i = i
self._hook_log.info('add hook %s priority is %s',
hook_obj.__class__.__name__,
hook_obj.priority)
self.hooks.insert(insert_i, hook_obj)
return hook_obj
class PredefinedHookFlask(_HookFlask):
def cross_domain(self, **kwargs):
hook_obj = self.add_hook(CorsHook, **kwargs)
return hook_obj
def filter_user_agent(self, *args, **kwargs):
hook_obj = self.add_hook(UserAgentHook, *args, **kwargs)
return hook_obj
def handle_30x(self, **kwargs):
hook_obj = self.add_hook(Handle30xHook, **kwargs)
return hook_obj
def real_ip(self, trust_proxy=None):
if trust_proxy is None:
trust_proxy = ["127.0.0.1"]
hook_obj = self.add_hook(RealIPHook, trust_proxy=trust_proxy)
return hook_obj
class FlaskHelper(Flask, PredefinedHookFlask):
def __init__(self, import_name, *args, **kwargs):
self.log = kwargs.pop('log', DummyLog())
Flask.__init__(self, import_name, *args, **kwargs)
PredefinedHookFlask.__init__(self, self.log)
self.before_request_funcs.setdefault(None, [])
self.after_request_funcs.setdefault(None, [])
self.before_request_funcs[None].append(self.before_request_hook)
self.after_request_funcs[None].append(self.after_request_hook)
self.hooks_folders = set()
default_hooks_folder = os.path.join(self.root_path, 'hooks')
if os.path.exists(default_hooks_folder):
self.register_hooks(default_hooks_folder)
self.views_folders = set()
self._views = set()
default_views_folder = os.path.join(self.root_path, 'views')
if os.path.exists(default_views_folder):
self.register_views(default_views_folder)
def register_blueprint(self, blueprint, **options):
if isinstance(blueprint, View):
self.jinja_env.globals.update(blueprint.jinja_env)
self.log.info('register blueprint %s', blueprint.name)
Flask.register_blueprint(self, blueprint, **options)
def register_views(self, views_folder):
self.log.info('register views from %s', views_folder)
views_folder = os.path.abspath(views_folder)
if views_folder in self.views_folders:
return
self.views_folders.add(views_folder)
module_prefix = 'flask_helper.views_%s' % len(self.hooks_folders)
v_objects = load_objects_from_directory(views_folder, module_prefix,
View)
for v_obj in v_objects:
if v_obj.name in self._views:
self.log.warning('%s blueprint name exist', v_obj.name)
return
self.register_blueprint(v_obj)
self._views.add(v_obj.name)
def register_hooks(self, hooks_folder):
self.log.info('register hooks from %s', hooks_folder)
hooks_folder = os.path.abspath(hooks_folder)
if hooks_folder in self.hooks_folders:
return
self.hooks_folders.add(hooks_folder)
module_prefix = 'flask_helper.hooks_%s' % len(self.hooks_folders)
h_classes = load_classes_from_directory(hooks_folder, module_prefix,
FlaskHook)
for h_class in h_classes:
self.add_hook(h_class)
def run(self, host=None, port=None, **options):
log = options.pop('log', None)
try:
import eventlet
from eventlet import wsgi
# eventlet.monkey_patch()
if host is None:
host = '0.0.0.0'
if port is None:
port = 5000
listen = eventlet.listen((host, port))
wsgi.server(listen, self, log=log, **options)
except ImportError:
Flask.run(host, port, **options)
| 5,448 | 1,713 |
###########################
#
# #217 Balanced Numbers - Project Euler
# https://projecteuler.net/problem=217
#
# Code by Kevin Marciniak
#
###########################
| 167 | 53 |
import base64
import boto3
import datetime
import gspread
import json
import logging
from oauth2client.service_account import ServiceAccountCredentials
logger = logging.getLogger()
logger.setLevel(logging.INFO)
def get_client_secret(filename):
"""Get the client secret from encrypted file. Returns decrypted json object"""
with open(filename) as file:
json_file = json.load(file)
cyphertext = json_file['CiphertextBlob']
blob = base64.b64decode(cyphertext)
client = boto3.client('kms')
secret = client.decrypt(CiphertextBlob=blob)['Plaintext']
s = secret.decode('ascii')
return json.loads(s)
def connect(filename):
# use creds to create a client to interact with the Google Drive API
scope = ['https://spreadsheets.google.com/feeds',
'https://www.googleapis.com/auth/drive']
keyfile_dict = get_client_secret(filename)
creds = ServiceAccountCredentials.from_json_keyfile_dict(keyfile_dict=keyfile_dict, scopes=scope)
client = gspread.authorize(creds)
return client
def get_dinner_titles(workbook):
"""gets the list of available dinners from the workbook by worksheet titles"""
worksheets = workbook.worksheets()
worksheet_titles = [i.title for i in worksheets]
# Exclude system titles
worksheet_titles.remove('History')
worksheet_titles.remove('Settings')
return worksheet_titles
def get_next_dinner_title(workbook):
"""gets the title of the next dinner"""
# Check for override in the settings tab
settings_worksheet = workbook.worksheet('Settings')
override_cell = settings_worksheet.find('Override')
override_value = settings_worksheet.cell(override_cell.row, 2).value
next_dinner = override_value
# Checking dinner dates in the History tab
worksheet_titles = get_dinner_titles(workbook)
if override_value not in worksheet_titles:
# Get next week's dinner template by selecting the oldest date from the history table
history_worksheet = workbook.worksheet('History')
history_values = history_worksheet.get_all_values()
oldest_date = datetime.datetime.now()
next_dinner = ''
for h in history_values:
dinner_date = datetime.datetime.strptime(h[1], '%m/%d/%Y %H:%M:%S')
print(h[0], dinner_date)
if dinner_date <= oldest_date:
next_dinner = h[0]
oldest_date = dinner_date
else:
# Clear the override so next week will be back on the regular rotation
settings_worksheet.update_cell(override_cell.row, override_cell.col+1, '')
return next_dinner
def get_next_dinner(workbook):
next_dinner_title = get_next_dinner_title(workbook)
print(next_dinner_title)
return workbook.worksheet(next_dinner_title)
def get_dinner_items(worksheet):
"""gets the list of items from the worksheet dinner template"""
dinner_items = worksheet.col_values(1)
dinner_items = [d.strip(' ') for d in dinner_items]
return dinner_items
def reset_spreadsheet(worksheet, theme_location, fooditem_range):
"""clears last week's items from the spreadsheet"""
# Clear dinner theme
worksheet.update_acell(theme_location, '')
# Clear dinner items
range_of_cells = worksheet.range(fooditem_range)
for cell in range_of_cells:
cell.value = ''
worksheet.update_cells(range_of_cells)
def insert_new_dinner(dinner_worksheet, template_worksheet, theme_location, fooditem_range):
dinner_items = get_dinner_items(template_worksheet)
dinner_theme = template_worksheet.title
# Adding new dinner theme
dinner_worksheet.update_acell(theme_location, dinner_theme)
# Adding new dinner items
fooditem_range_start = fooditem_range.split(':')[0]
fooditem_cell = dinner_worksheet.acell(fooditem_range_start)
for idx, item in enumerate(dinner_items):
update_row = fooditem_cell.row + idx
update_col = fooditem_cell.col
dinner_worksheet.update_cell(update_row, update_col, item)
def set_history_date(workbook, dinner_theme):
history_worksheet = workbook.worksheet('History')
theme_cell = history_worksheet.find(dinner_theme)
datetime_now = datetime.datetime.now().strftime('%m/%d/%Y %H:%M:%S')
history_worksheet.update_cell(theme_cell.row, theme_cell.col+1, datetime_now)
print(datetime_now)
def notify_users(bot_id, msg):
lam = boto3.client('lambda')
payload = {}
payload['Bot_ID'] = bot_id
payload['Message'] = msg
try:
response = lam.invoke(FunctionName='NotifyUsers',
InvocationType='RequestResponse',
Payload=json.dumps(payload))
except Exception as e:
print(e)
raise e
def lambda_handler(event, context):
logger.info(event)
# Variables
theme_location = 'B1'
fooditem_range = 'A4:B50'
# Obtain client connection
client = connect('client_secret_encrypted.json')
# Gather workbooks/worksheets
workbook_templates_name = event['Templates_Workbook']
workbook_dinner_name = event['Dinner_Workbook']
worksheet_dinner_name = event['Dinner_Worksheet']
workbook_template = client.open(workbook_templates_name)
workbook_dinner = client.open(workbook_dinner_name)
dinner_worksheet = workbook_dinner.worksheet(worksheet_dinner_name)
dinner_template_worksheet = get_next_dinner(workbook_template)
dinner_theme = dinner_template_worksheet.title
# Clear out last week's dinner
reset_spreadsheet(dinner_worksheet, theme_location, fooditem_range)
# Insert new dinner
insert_new_dinner(dinner_worksheet, dinner_template_worksheet, theme_location, fooditem_range)
# Set the timestamp for the new dinner in the history sheet
set_history_date(workbook_template, dinner_theme)
# Notify Users the new spreadsheet is up
spreadsheet_url = 'https://docs.google.com/spreadsheets/d/{}/edit?usp=sharing'.format(workbook_dinner.id)
msg = 'Community Bot here *Bleep* *Bloop*\nThe new spreadsheet is up! Next week''s theme is {}.\nPlease sign-up for a few items to share!\n{}'.format(dinner_theme, spreadsheet_url)
notify_users(event['Bot_ID'], msg)
| 5,802 | 1,998 |
from __future__ import absolute_import
import unittest
import numpy as np
from nac.plotter import LogPlotter
class TestLogPlotter(unittest.TestCase):
def test_plot(self):
pass
| 191 | 58 |
"""Constants that will be used to define default
behaviour. Can be overridden by setting env vars
"""
import os
import requests
import logging
log_levels = {
logging.getLevelName(logging.CRITICAL): logging.CRITICAL,
logging.getLevelName(logging.ERROR): logging.ERROR,
logging.getLevelName(logging.WARNING): logging.WARNING,
logging.getLevelName(logging.INFO): logging.INFO,
logging.getLevelName(logging.DEBUG): logging.DEBUG
}
logger = logging.getLogger("pyhystrix")
level = os.environ.get("PHY_LOG")
logger.setLevel(log_levels.get(level, logging.WARNING))
logger.addHandler(logging.StreamHandler())
class Config(object):
@staticmethod
def connect_timeout():
return int(os.environ.get("PYH_CONNECT_TIMEOUT", 5))
@staticmethod
def read_timeout():
return int(os.environ.get("PYH_READ_TIMEOUT", 5))
@staticmethod
def max_tries():
return int(os.environ.get("PHY_MAX_TRIES", 3))
@staticmethod
def backoff_factor():
return float(os.environ.get("PHY_BACKOFF_FACTOR", 0.5))
@staticmethod
def retriable_exceptions():
return (requests.exceptions.ConnectionError,)
@staticmethod
def method_whitelist():
return ['HEAD', 'GET']
@staticmethod
def status_forcelist():
return [500]
@staticmethod
def cb_fail_threshold():
return int(os.environ.get("PYH_CIRCUIT_FAIL_THRESHOLD", 5))
@staticmethod
def cb_alive_threshold():
return int(os.environ.get("PYH_CIRCUIT_ALIVE_THRESHOLD", 20))
@staticmethod
def cb_delay():
return int(os.environ.get("PYH_CIRCUIT_DELAY", 5))
| 1,644 | 565 |
# Helper Functions
try:
from src.utils import load_json
except ImportError as error:
print(f"Error: {error}; Local modules not found")
except Exception as exception:
print(exception)
def load_params_1():
"""Returns source path of images and number of exposures
"""
PKG_1_PARAMS = load_json("config/pkg_1_config.json")
return PKG_1_PARAMS
def load_params_2():
"""Returns source path of images and number of exposures
"""
PKG_2_PARAMS = load_json("config/pkg_2_config.json")
return PKG_2_PARAMS
| 543 | 183 |
#!/usr/bin/env python
import os
import sys
modules = ['common',
'agent',
'dbus',
'client',
'shell',
'utils']
for module in modules:
os.system("cd %s && ./setup.py %s" % (module, " ".join(sys.argv[1:])))
for root, dirs, files in os.walk("plugins"):
if "setup.py" in files:
os.system("cd %s && ./setup.py %s" % (root, " ".join(sys.argv[1:])))
| 380 | 147 |
from kanagata.builder import RestrictionBuilder # NOQA
from kanagata.builder import Module # NOQA
Builder = RestrictionBuilder
| 130 | 38 |
"""
__name__
When run, every Python file has a __name__ variable.
If the file is the main file being run/executed, its value is "__main__".
Otherwise, its value is the file name.
"""
# To ignore executable code on import
if __name__ == "__main__":
# this code will only run if the file is the main file
pass | 318 | 96 |
SECRET_KEY = "TOP_SECRET"
EXPIRATION = 1000
| 44 | 27 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = "Thomas AMORIM"
__credits__ = ["Thomas Amorim", "Pierre-François Bonnefoi" "Scapy"]
__license__ = "MIT"
__version__ = "1.1"
__status__ = "OK"
from scapy.all import *
import sys
def force_arp(ipslash):
p = Ether(dst="ff:ff:ff:ff:ff:ff", src="00:03:24:45:11:34")/ARP(hwsrc="00:03:24:45:11:34",psrc="192.168.1.0",pdst=ipslash)
p.show2()
send(p)
if __name__ == "__main__":
defaut_ip = "192.168.1.0/24"
if len(sys.argv)!=1:
ip = sys.argv[1] or defaut_ip
force_arp("192.168.1.0/24") | 546 | 295 |
"""
Create a CRUD of admin users
"""
import graphene
from django.contrib.auth import get_user_model
from graphql_jwt.decorators import superuser_required, login_required
from accounts.models import Admin
from accounts.types import UserType
class CreateAdmin(graphene.Mutation):
"""Mutation from graphene for creating admin"""
email = graphene.String()
creator = graphene.Field(UserType)
class Arguments:
"""Mutation arguments for create a admin"""
email = graphene.String()
password = graphene.String()
@superuser_required
def mutate(self, info, email, password):
"""Method to execute the mutation"""
admin = get_user_model().objects.create_superuser(
email=email,
password=password,
)
admin.set_password(password)
admin.save()
creator = info.context.user
management = Admin(
admin=admin,
creator=creator
)
management.save()
return CreateAdmin(
email=email,
creator=creator
)
class DeleteAdmin(graphene.Mutation):
"""Mutation from graphene for deleting admin"""
email = graphene.String()
class Arguments:
"""Mutation arguments for delete a admin"""
email = graphene.String(required=True)
@superuser_required
def mutate(self, info, email):
"""Method to execute the mutation"""
user = info.context.user
admin = get_user_model().objects.get(email=email)
creator = Admin.objects.filter(admin=admin).first()
if creator:
creator = creator.creator
if user != admin and user != creator:
raise Exception('Logged in user is not related')
admin.delete()
return DeleteAdmin(
email=email
)
| 1,877 | 508 |
# Resources
from Trajectory import Point_Lander_Drag
from Optimisation import HSS
from PyGMO import *
from numpy import *
import multiprocessing
def main(n):
''' -------------------------------------------------
Problem : SpaceX Dragon 2 Martian Soft Landing
Dynamics : 2-Dimensional Variable Mass Point
Transcription : Hermite Simpson Seperated (HSS)
Produces : Database of fuel-optimal trajectories
>>> python Generate.py
------------------------------------------------- '''
print("Beginning with dataset " + str(n))
si_list = load('Data/Point_Lander_Mars_Initial_States_' + str(n) + '.npy')
# Define the algorithms to use
algo_local = algorithm.scipy_slsqp(max_iter=5000, screen_output=True)
# Load initial guess
print("Loading initial guess..")
z = load('Data/HSS_10_Mars_Base.npy')
# Alot space for solutions
n_traj = len(si_list)
sols = zeros((0, len(z)))
for i in range(n_traj):
si = si_list[i]
print("Trajectory " + str(i))
print("State: " + str(si))
# Initialise the model at that state
model = Point_Lander_Drag(si)
# Initialise the HSS problem
prob = HSS(model, nsegs=10)
# Create empty population
pop = population(prob)
# Guess the previous solution
pop.push_back(z)
# Optimise from that solution
print("Beginning optimisation...")
pop = algo_local.evolve(pop)
# Save the solution if succesfull
if prob.feasibility_x(pop.champion.x):
z = array(pop.champion.x)
# Update the solution array
sols = vstack((sols, z))
save("Data/Mars/HSS_10_Alpha_" + str(n), sols)
if __name__ == "__main__":
jobs = [1,2,3,4]
for n in jobs:
p = multiprocessing.Process(target=main, args=(n,))
p.start()
| 1,887 | 602 |
import functools
from datetime import datetime, timedelta
import pytest
from django.utils.timezone import make_aware
from ephios.core.models import AbstractParticipation, Shift
from ephios.core.signup import LocalParticipation, SignupStats, get_conflicting_participations
from ephios.plugins.basesignup.signup.instant import InstantConfirmationSignupMethod
def test_signup_stats_addition(django_app):
a = SignupStats(4, 2, 3, None, 5, None)
b = SignupStats(5, 2, 3, 5, 5, 7)
c = SignupStats(3, 2, 0, 2, None, 4)
assert a + b == SignupStats(9, 4, 6, None, 10, None)
assert b + c == SignupStats(8, 4, 3, 7, 5, 11)
def test_cannot_sign_up_for_conflicting_shifts(django_app, volunteer, event, conflicting_event):
assert not conflicting_event.shifts.first().signup_method.can_sign_up(
volunteer.as_participant()
)
@pytest.mark.parametrize(
"a_times,b_times,conflict_expected",
[
(
(datetime(2099, 1, 1, 8), datetime(2099, 1, 1, 12)),
(datetime(2099, 1, 1, 6), datetime(2099, 1, 1, 8)),
False,
),
(
(datetime(2099, 1, 1, 8), datetime(2099, 1, 1, 12)),
(datetime(2099, 1, 1, 6), datetime(2099, 1, 1, 10)),
True,
),
(
(datetime(2099, 1, 1, 8), datetime(2099, 1, 1, 12)),
(datetime(2099, 1, 1, 6), datetime(2099, 1, 1, 12)),
True,
),
(
(datetime(2099, 1, 1, 8), datetime(2099, 1, 1, 12)),
(datetime(2099, 1, 1, 10), datetime(2099, 1, 1, 18)),
True,
),
(
(datetime(2099, 1, 1, 8), datetime(2099, 1, 1, 12)),
(datetime(2099, 1, 1, 11, 59), datetime(2099, 1, 1, 12)),
True,
),
(
(datetime(2099, 1, 1, 8), datetime(2099, 1, 1, 12)),
(datetime(2099, 1, 1, 12), datetime(2099, 1, 1, 18)),
False,
),
],
)
def test_get_conflicting_shifts(tz, a_times, b_times, conflict_expected, event, volunteer):
common = dict(signup_method_slug=InstantConfirmationSignupMethod.slug, event=event)
aware = functools.partial(make_aware, timezone=tz)
a = Shift.objects.create(
start_time=aware(a_times[0]),
end_time=aware(a_times[1]),
meeting_time=aware(a_times[0]) - timedelta(minutes=15),
**common
)
b = Shift.objects.create(
start_time=aware(b_times[0]),
end_time=aware(b_times[1]),
meeting_time=aware(b_times[0]) - timedelta(minutes=15),
**common
)
a_participation = LocalParticipation.objects.create(
shift=a, user=volunteer, state=AbstractParticipation.States.CONFIRMED
)
expected = {a_participation} if conflict_expected else set()
assert set(get_conflicting_participations(b, volunteer.as_participant())) == expected
| 2,870 | 1,176 |
import numpy as np
import torch
def covariance(features):
assert len(features.size()) == 2, "TODO: multi-dimensional feature map covariance"
n = features.shape[0]
tmp = torch.ones((1, n), device=features.device) @ features
cov = (features.t() @ features - (tmp.t() @ tmp) / n) / (n - 1)
return cov
def coral(cs, ct):
d = cs.shape[0]
loss = (cs - ct).pow(2).sum() / (4. * d ** 2)
return loss
def linear_mmd(ms, mt):
loss = (ms - mt).pow(2).mean()
return loss
| 504 | 197 |
# from io_scene_gltf2
# Copyright 2018-2019 The glTF-Blender-IO authors.
# Apache 2.0
#
# Script reloading (if the user calls 'Reload Scripts' from Blender)
#
def reload_package(module_dict_main):
import importlib
from pathlib import Path
def reload_package_recursive(current_dir, module_dict):
for path in current_dir.iterdir():
if "__init__" in str(path) or path.stem not in module_dict:
continue
if path.is_file() and path.suffix == ".py":
importlib.reload(module_dict[path.stem])
elif path.is_dir():
reload_package_recursive(path, module_dict[path.stem].__dict__)
reload_package_recursive(Path(__file__).parent, module_dict_main)
| 747 | 244 |
#!/usr/bin/env python
# DEPRECATED -- model didn't work. Was trying to constrain rewards differently.
# Beware -- lack of documentation. Refer to nnAvicaching_find_rewards.py for
# support
from __future__ import print_function
import torch, torch.nn as nn, torch.nn.functional as torchfun, torch.optim as optim
from torch.autograd import Variable
import numpy as np, argparse, time, os, sys
import avicaching_data as ad
# =============================================================================
# options
# =============================================================================
parser = argparse.ArgumentParser(description="NN Avicaching model for finding rewards")
parser.add_argument("--lr", type=float, default=0.01, metavar="LR",
help="inputs learning rate of the network (default=0.01)")
parser.add_argument("--momentum", type=float, default=1.0, metavar="M",
help="inputs SGD momentum (default=1.0)")
parser.add_argument("--no-cuda", action="store_true", default=False,
help="disables CUDA training")
parser.add_argument("--epochs", type=int, default=10, metavar="E",
help="inputs the number of epochs to train for")
parser.add_argument("--locations", type=int, default=116, metavar="J",
help="inputs the number of locations (default=116)")
parser.add_argument("--time", type=int, default=173, metavar="T",
help="inputs total time of data collection; number of weeks (default=173)")
parser.add_argument("--eta", type=float, default=10.0, metavar="F",
help="inputs parameter eta in the model (default=10.0)")
parser.add_argument("--rewards", type=float, default=1000.0, metavar="R",
help="inputs the total budget of rewards to be distributed (default=1000.0)")
parser.add_argument("--weights-file", type=str,
default="./stats/weights/normalizedR_gpu, origXYR_epochs=1000, train= 80%, time=98.6947 sec.txt",
metavar="f", help="inputs the location of the file to use weights from")
parser.add_argument("--log-interval", type=int, default=1, metavar="I",
help="prints training information at I epoch intervals (default=1)")
parser.add_argument("--expand-R", action="store_true", default=False,
help="expands the reward vectors into matrices with distributed rewards")
parser.add_argument("--lambda-loss", type=float, default=10.0,
help="inputs the lambda for penalizing rewards amounting to greater than total rewards (default=10.0)")
parser.add_argument("--lambda-update", type=float, default=0.01,
help="inputs the learning rate for lambda (default=0.01)")
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
args = parser.parse_args()
# assigning cuda check and test check to single variables
args.cuda = not args.no_cuda and torch.cuda.is_available()
torch.manual_seed(args.seed)
np.random.seed(seed=args.seed)
if args.cuda:
torch.cuda.manual_seed(args.seed)
# =============================================================================
# parameters and constants
# =============================================================================
np.set_printoptions(formatter={'float': lambda x: "{0:0.3f}".format(x)})
J, T, weights_file_name = args.locations, args.time, args.weights_file
totalR, l = args.rewards, args.lambda_loss
X, W_for_r, F_DIST, numFeatures = [], [], [], 0
F_DIST_weighted = []
torchten = torch.FloatTensor
# =============================================================================
# data input
# =============================================================================
def read_set_data():
global X, W, F_DIST, numFeatures, F_DIST_weighted, W_for_r
# read f and dist datasets from file, operate on them
F = ad.read_F_file("./data/loc_feature_with_avicaching_combined.csv", J)
DIST = ad.read_dist_file("./data/site_distances_km_drastic_price_histlong_0327_0813_combined.txt", J)
# read W and X
W = ad.read_weights_file(weights_file_name, J)
X, _, _ = ad.read_XYR_file("./data/density_shift_histlong_as_previous_loc_classical_drastic_price_0327_0813.txt", J, T)
# process data for the NN
F, DIST = ad.normalize(F, along_dim=0, using_max=True), ad.normalize(DIST, using_max=True) # normalize using max
numFeatures = len(F[0]) + 1 # distance included
F_DIST = torchten(ad.combine_DIST_F(F, DIST, J, numFeatures))
numFeatures += 1 # for rewards later
# split W and join the multiply the fdist portion with F_DIST
W = np.expand_dims(W, axis=2)
W_for_fdist, W_for_r = ad.split_along_dim(W, numFeatures - 1, dim=1)
F_DIST_weighted = Variable(torch.bmm(F_DIST, torchten(W_for_fdist)).squeeze(dim=2), requires_grad=False)
# condense X along T into a single vector and normalize
X = ad.normalize(X.sum(axis=0), using_max=False)
W_for_r, X = Variable(torchten(W_for_r), requires_grad=False), Variable(torchten(X), requires_grad=False)
# =============================================================================
# MyNet class
# =============================================================================
class MyNet(nn.Module):
def __init__(self, J, totalR, eta):
super(MyNet, self).__init__()
self.J, self.totalR, self.eta = J, totalR, eta
# initiate R
self.r = np.random.multinomial(self.totalR, [1 / float(J)] * J, size=1)
normalizedR = ad.normalize(self.r, using_max=False)
self.R = nn.Parameter(torchten(normalizedR))
print("random rewards:\n", self.r)
def forward(self, inp):
repeatedR = self.R.repeat(J, 1).unsqueeze(dim=2)
inp = torch.bmm(repeatedR, W_for_r).view(-1, J) + F_DIST_weighted
inp = torchfun.relu(inp)
# add eta to inp[u][u]
# eta_matrix = Variable(self.eta * torch.eye(J).type(torchten))
# if args.cuda:
# eta_matrix = eta_matrix.cuda()
# inp += eta_matrix
return torchfun.softmax(inp)
def train(net, optimizer):
global W_for_r, l, totalR
start_time = time.time()
# build input
if args.cuda:
W_for_r = W_for_r.cuda()
# feed in data
P = net(W_for_r).t() # P is now weighted -> softmax
# calculate loss
Y = torch.mv(P, X)
loss = torch.norm(Y - torch.mean(Y).expand_as(Y)).pow(2) / J + \
l * torchfun.relu(torch.sum(net.R) - 1.0)
# backpropagate
optimizer.zero_grad()
loss.backward()
# update the rewards and constrain them
optimizer.step()
net.R.data = project_to_min(net.R.data, 0.0)
l += (args.lambda_update * (torch.sum(net.R.data) - 1.0)) # update lambda
end_time = time.time()
return (end_time - start_time, loss.data[0], net.R.data.sum())
# =============================================================================
# utility functions for training and testing routines
# =============================================================================
def build_input(rt):
"""
Builds the final input for the NN. Joins F_DIST and expanded R
"""
if args.expand_R:
return torch.cat([F_DIST, rt.repeat(J, 1, 1)], dim=2)
return torch.cat([F_DIST, rt.repeat(J, 1)], dim=2)
# =============================================================================
# main program
# =============================================================================
if __name__ == "__main__":
read_set_data()
net = MyNet(J, totalR, args.eta)
if args.cuda:
net.cuda()
optimizer = optim.SGD(net.parameters(), lr=args.lr, momentum=args.momentum, nesterov=True)
total_time = 0
for e in xrange(1, args.epochs + 1):
train_res = train(net, optimizer)
total_time += train_res[0]
if e % 20 == 0:
print("epoch=%5d, loss=%.10f, budget=%.10f" % (e, train_res[1], train_res[2]), train_res[0], l)
print("determined rewards:\n", net.R.data.cpu().numpy() * 1000)
print("total time: %.5f" % total_time) | 7,938 | 2,711 |
# Copyright 2019-2020 SURF.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
from typing import Callable, Iterable, List, Optional, Sequence, Set, TypeVar, Union
import more_itertools
import structlog
logger = structlog.get_logger(__name__)
def first_available_or_next(values: Iterable[int], start: int = 0) -> int:
"""Return first available value or the next logical value.
>>> first_available_or_next([0, 1, 3])
2
>>> first_available_or_next([0, 1, 2, 3])
4
>>> first_available_or_next([1, 2, 3])
0
>>> first_available_or_next([])
0
>>> first_available_or_next([0, 1, 3], start=11)
11
>>> first_available_or_next([0, 1, 3], start=4)
4
>>> first_available_or_next([], 22)
22
>>> first_available_or_next([1, 100, 101], 33)
33
>>> first_available_or_next([11, 22, 33, 44, 55], 33)
34
Args:
values: an iterable of integer values.
start: set starting value.
Returns:
First available value or next logical one.
"""
# +2 -> One +1 to get as many consecutive values up to and including the max+1 value. Another +1 for one extra because range is exclusive.
stop = max(values, default=0) + 2
if start >= stop:
stop = start + 1
return min(set(range(start, stop)) - set(values))
def orig(func: Callable) -> Callable:
"""Return the function wrapped by one or more decorators.
Args:
func: step function
Returns:
Undecorated step function for testing purposes.
"""
f = func
while hasattr(f, "__wrapped__"):
f = f.__wrapped__ # type:ignore
return f
def join_cs(*args: Union[Iterable[str], str]) -> str:
"""Return comma separated string from one or more comma separated strings or iterables of strings.
It deals with empty strings and properly inserting comma's.
See: `test_join_cs` for examples.
Args:
args: One or more comma separated strings or iterables that should be joined.
Returns:
A comma separated string.
"""
def to_iterable(value: Union[Iterable[str], str]) -> Iterable[str]:
if isinstance(value, str):
return filter(None, value.split(","))
return value
return ",".join(itertools.chain(*map(to_iterable, args)))
def expand_ranges(ranges: Sequence[Sequence[int]], inclusive: bool = False) -> List[int]:
"""Expand sequence of range definitions into sorted and deduplicated list of individual values.
A range definition is either a:
* one element sequence -> an individual value.
* two element sequence -> a range of values (either inclusive or exclusive).
>>> expand_ranges([[1], [2], [10, 12]])
[1, 2, 10, 11]
>>> expand_ranges([[1], [2], [10, 12]], inclusive=True)
[1, 2, 10, 11, 12]
>>> expand_ranges([[]])
Traceback (most recent call last):
...
ValueError: Expected 1 or 2 element list for range definition. Got f0 element list instead.
Resulting list is sorted::
>>> expand_ranges([[100], [1, 4]], inclusive=True)
[1, 2, 3, 4, 100]
Args:
ranges: sequence of range definitions
inclusive: are the stop values of the range definition inclusive or exclusive.
Returns:
Sorted deduplicated list of individual values.
Raises:
ValueError: if range definition is not a one or two element sequence.
"""
values: Set[int] = set()
for r in ranges:
if len(r) == 2:
values.update(range(r[0], r[1] + (1 if inclusive else 0)))
elif len(r) == 1:
values.add(r[0])
else:
raise ValueError(f"Expected 1 or 2 element list for range definition. Got f{len(r)} element list instead.")
return sorted(values)
T = TypeVar("T")
def as_t(value: Optional[T]) -> T:
"""Cast `value` to non-Optional.
One often needs to assign a value that was typed as being `Optional` to a variable that is typed non-Optional. MyPy
rightfully takes issue with these assignments (strict Optional checking is default since MyPy 0.600) unless we
have explicitely determined these values to be not `None`. The most succinct way to do that is using an `assert`
statement::
x: Optional[int] = 7
assert x is not None
y: int = x
However that gets tedious pretty fast. One might be inclined to turn off strict Optional checking. However that
would be a bad decision; None values will percolate through data structures and cause issue at locations far from
where they originally came from. A better solution would be to fail right where the issue occurred but using a
somewhat more convenient syntax.
Some languages such as Kotlin provide the `as` operator:
.. code-block:: kotlin
val x: Int? = 7 // ? declaring the Int to be nullable
val y: Int = x as Int
That is the inspiration for this function. `t` referring to the type being wrapped in an `Optional`. Hence `as_t`
meaning `as the non-Optional type`.
The above Python example now becomes::
x: Optional[int] = 7
y: int = as_t(x)
`as_t` checks whether te value passed to it is not `None`, satisfying MyPy. If it happens to be `None` it raises a
`ValueError`, satisfying our requirement to fail at the location where we require the value to be not None and not
somewhere far down the code path.
Args:
value: `Optional` value to be casted to non-Optional
Returns:
non-Optional value.
Raises:
ValueError: in case `value` is `None`
"""
if value is None:
raise ValueError("Trying to cast a value to non-Optional type failed due to value being None.")
return value
def ireplace(iterable: Iterable[T], old: T, new: T) -> Iterable[T]:
"""Replace one or more occurrences of a specific value in an iterable with another value.
The 'i' prefix indicates 'iterable' and is there to distinguish it from other similar functions.
>>> list(ireplace(["1-10", "", "22"], "", "0"))
['1-10', '0', '22']
Args:
iterable: The iterable that needs to have a specific value replaced for all its occurrences.
old: The value in the iterable to replace.
new: The value to replace `old` with.
Returns:
A new iterable with `old` values replaced by `new` values
"""
yield from more_itertools.replace(iterable, lambda v: v == old, [new])
def to_ranges(i: Iterable[int]) -> Iterable[range]:
"""Convert a sorted iterable of ints to an iterable of range objects.
IMPORTANT: the iterable passed in should be sorted and not contain duplicate elements.
Examples::
>>> list(to_ranges([2, 3, 4, 5, 7, 8, 9, 45, 46, 47, 49, 51, 53, 54, 55, 56, 57, 58, 59, 60, 61]))
[range(2, 6), range(7, 10), range(45, 48), range(49, 50), range(51, 52), range(53, 62)]
Args:
i: sorted iterable
Yields:
range object for each consecutive set of integers
"""
# The trick here is the key function (the lambda one) that calculates the difference between an element of the
# iterable `i` and its corresponding enumeration value. For consecutive values in the iterable, this difference
# will be the same! All these values (those with the same difference) are grouped by the `groupby` function. We
# return the first and last element to construct a `range` object
for _, g in itertools.groupby(enumerate(i), lambda t: t[1] - t[0]):
group = list(g)
yield range(group[0][1], group[-1][1] + 1)
| 8,094 | 2,534 |
__version__ = "1.4.14"
| 23 | 14 |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
__all__ = [
'GetSnapshotResult',
'AwaitableGetSnapshotResult',
'get_snapshot',
]
@pulumi.output_type
class GetSnapshotResult:
"""
A collection of values returned by getSnapshot.
"""
def __init__(__self__, completed_at=None, cron_timing=None, hostname=None, id=None, instance_id=None, name=None, next_execution=None, region=None, requested_at=None, safe=None, size_gb=None, state=None, template_id=None):
if completed_at and not isinstance(completed_at, str):
raise TypeError("Expected argument 'completed_at' to be a str")
pulumi.set(__self__, "completed_at", completed_at)
if cron_timing and not isinstance(cron_timing, str):
raise TypeError("Expected argument 'cron_timing' to be a str")
pulumi.set(__self__, "cron_timing", cron_timing)
if hostname and not isinstance(hostname, str):
raise TypeError("Expected argument 'hostname' to be a str")
pulumi.set(__self__, "hostname", hostname)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if instance_id and not isinstance(instance_id, str):
raise TypeError("Expected argument 'instance_id' to be a str")
pulumi.set(__self__, "instance_id", instance_id)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if next_execution and not isinstance(next_execution, str):
raise TypeError("Expected argument 'next_execution' to be a str")
pulumi.set(__self__, "next_execution", next_execution)
if region and not isinstance(region, str):
raise TypeError("Expected argument 'region' to be a str")
pulumi.set(__self__, "region", region)
if requested_at and not isinstance(requested_at, str):
raise TypeError("Expected argument 'requested_at' to be a str")
pulumi.set(__self__, "requested_at", requested_at)
if safe and not isinstance(safe, bool):
raise TypeError("Expected argument 'safe' to be a bool")
pulumi.set(__self__, "safe", safe)
if size_gb and not isinstance(size_gb, int):
raise TypeError("Expected argument 'size_gb' to be a int")
pulumi.set(__self__, "size_gb", size_gb)
if state and not isinstance(state, str):
raise TypeError("Expected argument 'state' to be a str")
pulumi.set(__self__, "state", state)
if template_id and not isinstance(template_id, str):
raise TypeError("Expected argument 'template_id' to be a str")
pulumi.set(__self__, "template_id", template_id)
@property
@pulumi.getter(name="completedAt")
def completed_at(self) -> str:
"""
The date where the snapshot was completed.
"""
return pulumi.get(self, "completed_at")
@property
@pulumi.getter(name="cronTiming")
def cron_timing(self) -> str:
"""
A string with the cron format.
"""
return pulumi.get(self, "cron_timing")
@property
@pulumi.getter
def hostname(self) -> str:
"""
The hostname of the instance.
"""
return pulumi.get(self, "hostname")
@property
@pulumi.getter
def id(self) -> Optional[str]:
return pulumi.get(self, "id")
@property
@pulumi.getter(name="instanceId")
def instance_id(self) -> str:
"""
The ID of the Instance from which the snapshot was be taken.
"""
return pulumi.get(self, "instance_id")
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
The name of the snapshot.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="nextExecution")
def next_execution(self) -> str:
"""
if cron was define this date will be the next execution date.
"""
return pulumi.get(self, "next_execution")
@property
@pulumi.getter
def region(self) -> str:
"""
The region where the snapshot was take.
"""
return pulumi.get(self, "region")
@property
@pulumi.getter(name="requestedAt")
def requested_at(self) -> str:
"""
The date where the snapshot was requested.
"""
return pulumi.get(self, "requested_at")
@property
@pulumi.getter
def safe(self) -> bool:
"""
If is `true` the instance will be shut down during the snapshot if id `false` them not.
"""
return pulumi.get(self, "safe")
@property
@pulumi.getter(name="sizeGb")
def size_gb(self) -> int:
"""
The size of the snapshot in GB.
"""
return pulumi.get(self, "size_gb")
@property
@pulumi.getter
def state(self) -> str:
"""
The status of the snapshot.
"""
return pulumi.get(self, "state")
@property
@pulumi.getter(name="templateId")
def template_id(self) -> str:
"""
The template id.
"""
return pulumi.get(self, "template_id")
class AwaitableGetSnapshotResult(GetSnapshotResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetSnapshotResult(
completed_at=self.completed_at,
cron_timing=self.cron_timing,
hostname=self.hostname,
id=self.id,
instance_id=self.instance_id,
name=self.name,
next_execution=self.next_execution,
region=self.region,
requested_at=self.requested_at,
safe=self.safe,
size_gb=self.size_gb,
state=self.state,
template_id=self.template_id)
def get_snapshot(id: Optional[str] = None,
name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetSnapshotResult:
"""
Snapshots are saved instances of a block storage volume. Use this data
source to retrieve the ID of a Civo snapshot for use in other
resources.
## Example Usage
Get the snapshot:
```python
import pulumi
import pulumi_civo as civo
mysql_vm = civo.get_snapshot(name="mysql-vm")
```
:param str id: The ID of the snapshot.
:param str name: The name of the snapshot.
"""
__args__ = dict()
__args__['id'] = id
__args__['name'] = name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('civo:index/getSnapshot:getSnapshot', __args__, opts=opts, typ=GetSnapshotResult).value
return AwaitableGetSnapshotResult(
completed_at=__ret__.completed_at,
cron_timing=__ret__.cron_timing,
hostname=__ret__.hostname,
id=__ret__.id,
instance_id=__ret__.instance_id,
name=__ret__.name,
next_execution=__ret__.next_execution,
region=__ret__.region,
requested_at=__ret__.requested_at,
safe=__ret__.safe,
size_gb=__ret__.size_gb,
state=__ret__.state,
template_id=__ret__.template_id)
| 7,680 | 2,312 |
from unittest import TestCase, main
from unittest.mock import patch, MagicMock
from airflow import DAG
from sync_documents_to_kernel import (
list_documents,
delete_documents,
register_update_documents,
)
class TestListDocuments(TestCase):
@patch("sync_documents_to_kernel.sync_documents_to_kernel_operations.list_documents")
def test_list_document_gets_sps_package_from_dag_run_conf(self, mk_list_documents):
mk_dag_run = MagicMock()
kwargs = {"ti": MagicMock(), "dag_run": mk_dag_run}
list_documents(**kwargs)
mk_dag_run.conf.get.assert_called_once_with("sps_package")
@patch("sync_documents_to_kernel.sync_documents_to_kernel_operations.list_documents")
def test_list_document_calls_list_documents_operation(self, mk_list_documents):
mk_dag_run = MagicMock()
mk_dag_run.conf.get.return_value = "path_to_sps_package/package.zip"
kwargs = {"ti": MagicMock(), "dag_run": mk_dag_run}
list_documents(**kwargs)
mk_list_documents.assert_called_once_with("path_to_sps_package/package.zip")
@patch("sync_documents_to_kernel.sync_documents_to_kernel_operations.list_documents")
def test_list_document_pushes_xmls_from_packages(self, mk_list_documents):
expected = [
"1806-907X-rba-53-01-1-8.xml",
"1806-907X-rba-53-01-9-18.xml",
"1806-907X-rba-53-01-19-25.xml",
]
mk_dag_run = MagicMock()
mk_dag_run.conf.get.return_value = "path_to_sps_package/package.zip"
kwargs = {"ti": MagicMock(), "dag_run": mk_dag_run}
mk_list_documents.return_value = expected
list_documents(**kwargs)
kwargs["ti"].xcom_push.assert_called_once_with(
key="xmls_filenames", value=expected
)
@patch("sync_documents_to_kernel.sync_documents_to_kernel_operations.list_documents")
def test_list_document_doesnt_call_ti_xcom_push_if_no_xml_files(
self, mk_list_documents
):
mk_dag_run = MagicMock()
mk_dag_run.conf.get.return_value = "path_to_sps_package/package.zip"
kwargs = {"ti": MagicMock(), "dag_run": mk_dag_run}
mk_list_documents.return_value = []
list_documents(**kwargs)
kwargs["ti"].xcom_push.assert_not_called()
class TestDeleteDocuments(TestCase):
@patch("sync_documents_to_kernel.sync_documents_to_kernel_operations.delete_documents")
def test_delete_documents_gets_sps_package_from_dag_run_conf(
self, mk_delete_documents
):
mk_dag_run = MagicMock()
kwargs = {"ti": MagicMock(), "dag_run": mk_dag_run}
delete_documents(**kwargs)
mk_dag_run.conf.get.assert_called_once_with("sps_package")
@patch("sync_documents_to_kernel.sync_documents_to_kernel_operations.delete_documents")
def test_delete_documents_gets_ti_xcom_info(self, mk_delete_documents):
mk_dag_run = MagicMock()
kwargs = {"ti": MagicMock(), "dag_run": mk_dag_run}
delete_documents(**kwargs)
kwargs["ti"].xcom_pull.assert_called_once_with(
key="xmls_filenames", task_ids="list_docs_task_id"
)
@patch("sync_documents_to_kernel.sync_documents_to_kernel_operations.delete_documents")
def test_delete_documents_empty_ti_xcom_info(self, mk_delete_documents):
mk_dag_run = MagicMock()
kwargs = {"ti": MagicMock(), "dag_run": mk_dag_run}
kwargs["ti"].xcom_pull.return_value = None
delete_documents(**kwargs)
mk_delete_documents.assert_not_called()
kwargs["ti"].xcom_push.assert_not_called()
@patch("sync_documents_to_kernel.sync_documents_to_kernel_operations.delete_documents")
def test_delete_documents_calls_delete_documents_operation(
self, mk_delete_documents
):
xmls_filenames = [
"1806-907X-rba-53-01-1-8.xml",
"1806-907X-rba-53-01-9-18.xml",
"1806-907X-rba-53-01-19-25.xml",
]
mk_dag_run = MagicMock()
mk_dag_run.conf.get.return_value = "path_to_sps_package/package.zip"
kwargs = {"ti": MagicMock(), "dag_run": mk_dag_run}
kwargs["ti"].xcom_pull.return_value = xmls_filenames
delete_documents(**kwargs)
mk_delete_documents.assert_called_once_with(
"path_to_sps_package/package.zip", xmls_filenames
)
@patch("sync_documents_to_kernel.sync_documents_to_kernel_operations.delete_documents")
def test_delete_documents_pushes_xmls_to_preserve(self, mk_delete_documents):
xmls_filenames = [
"1806-907X-rba-53-01-1-8.xml",
"1806-907X-rba-53-01-9-18.xml",
"1806-907X-rba-53-01-19-25.xml",
]
xmls_to_preserve = [
"1806-907X-rba-53-01-9-18.xml",
"1806-907X-rba-53-01-19-25.xml",
]
mk_dag_run = MagicMock()
mk_dag_run.conf.get.return_value = "path_to_sps_package/package.zip"
kwargs = {"ti": MagicMock(), "dag_run": mk_dag_run}
kwargs["ti"].xcom_pull.return_value = xmls_filenames
mk_delete_documents.return_value = xmls_to_preserve
delete_documents(**kwargs)
kwargs["ti"].xcom_push.assert_called_once_with(
key="xmls_to_preserve", value=xmls_to_preserve
)
class TestRegisterUpdateDocuments(TestCase):
@patch("sync_documents_to_kernel.sync_documents_to_kernel_operations.register_update_documents")
def test_register_update_documents_gets_sps_package_from_dag_run_conf(
self, mk_register_update_documents
):
mk_dag_run = MagicMock()
kwargs = {"ti": MagicMock(), "dag_run": mk_dag_run}
register_update_documents(**kwargs)
mk_dag_run.conf.get.assert_called_once_with("sps_package")
@patch("sync_documents_to_kernel.sync_documents_to_kernel_operations.register_update_documents")
def test_register_update_documents_gets_ti_xcom_info(self, mk_register_update_documents):
mk_dag_run = MagicMock()
kwargs = {"ti": MagicMock(), "dag_run": mk_dag_run}
register_update_documents(**kwargs)
kwargs["ti"].xcom_pull.assert_called_once_with(
key="xmls_to_preserve", task_ids="delete_docs_task_id"
)
@patch("sync_documents_to_kernel.sync_documents_to_kernel_operations.register_update_documents")
def test_register_update_documents_empty_ti_xcom_info(self, mk_register_update_documents):
mk_dag_run = MagicMock()
kwargs = {"ti": MagicMock(), "dag_run": mk_dag_run}
kwargs["ti"].xcom_pull.return_value = None
register_update_documents(**kwargs)
mk_register_update_documents.assert_not_called()
kwargs["ti"].xcom_push.assert_not_called()
@patch("sync_documents_to_kernel.sync_documents_to_kernel_operations.register_update_documents")
def test_register_update_documents_calls_register_update_documents_operation(
self, mk_register_update_documents
):
xmls_filenames = [
"1806-907X-rba-53-01-1-8.xml",
"1806-907X-rba-53-01-9-18.xml",
"1806-907X-rba-53-01-19-25.xml",
]
mk_dag_run = MagicMock()
mk_dag_run.conf.get.return_value = "path_to_sps_package/package.zip"
kwargs = {"ti": MagicMock(), "dag_run": mk_dag_run}
kwargs["ti"].xcom_pull.return_value = xmls_filenames
register_update_documents(**kwargs)
mk_register_update_documents.assert_called_once_with(
"path_to_sps_package/package.zip", xmls_filenames
)
@patch("sync_documents_to_kernel.sync_documents_to_kernel_operations.register_update_documents")
def test_register_update_documents_does_not_push_if_no_documents_into_kernel(self, mk_register_update_documents):
xmls_filenames = [
"1806-907X-rba-53-01-1-8.xml",
"1806-907X-rba-53-01-9-18.xml",
"1806-907X-rba-53-01-19-25.xml",
]
mk_dag_run = MagicMock()
mk_dag_run.conf.get.return_value = "path_to_sps_package/package.zip"
kwargs = {"ti": MagicMock(), "dag_run": mk_dag_run}
kwargs["ti"].xcom_pull.return_value = xmls_filenames
mk_register_update_documents.return_value = []
register_update_documents(**kwargs)
kwargs["ti"].xcom_push.assert_not_called()
@patch("sync_documents_to_kernel.sync_documents_to_kernel_operations.register_update_documents")
def test_register_update_documents_pushes_documents(self, mk_register_update_documents):
xmls_filenames = [
"1806-907X-rba-53-01-1-8.xml",
"1806-907X-rba-53-01-9-18.xml",
"1806-907X-rba-53-01-19-25.xml",
]
documents = [
"1806-907X-rba-53-01-9-18.xml",
"1806-907X-rba-53-01-19-25.xml",
]
mk_dag_run = MagicMock()
mk_dag_run.conf.get.return_value = "path_to_sps_package/package.zip"
kwargs = {"ti": MagicMock(), "dag_run": mk_dag_run}
kwargs["ti"].xcom_pull.return_value = xmls_filenames
mk_register_update_documents.return_value = documents
register_update_documents(**kwargs)
kwargs["ti"].xcom_push.assert_called_once_with(
key="documents", value=documents
)
if __name__ == "__main__":
main()
| 9,267 | 3,468 |
import itertools
import os
import reframe as rfm
import reframe.utility.sanity as sn
class GromacsBaseCheck(rfm.RunOnlyRegressionTest):
def __init__(self, output_file):
super().__init__()
self.valid_prog_environs = ['PrgEnv-gnu']
self.executable = 'gmx_mpi'
# Reset sources dir relative to the SCS apps prefix
self.sourcesdir = os.path.join(self.current_system.resourcesdir,
'Gromacs')
self.keep_files = [output_file]
energy = sn.extractsingle(r'\s+Potential\s+Kinetic En\.\s+Total Energy'
r'\s+Conserved En\.\s+Temperature\n'
r'(\s+\S+){2}\s+(?P<energy>\S+)(\s+\S+){2}\n'
r'\s+Pressure \(bar\)\s+Constr\. rmsd',
output_file, 'energy', float, item=-1)
energy_reference = -3270799.9
self.sanity_patterns = sn.all([
sn.assert_found('Finished mdrun', output_file),
sn.assert_reference(energy, energy_reference, -0.001, 0.001)
])
self.perf_patterns = {
'perf': sn.extractsingle(r'Performance:\s+(?P<perf>\S+)',
output_file, 'perf', float)
}
self.modules = ['GROMACS']
self.maintainers = ['VH']
self.strict_check = False
self.use_multithreading = False
self.extra_resources = {
'switches': {
'num_switches': 1
}
}
class GromacsGPUCheck(GromacsBaseCheck):
def __init__(self, variant):
super().__init__('md.log')
self.valid_systems = ['daint:gpu', 'dom:gpu']
self.descr = 'GROMACS GPU check'
self.name = 'gromacs_gpu_%s_check' % variant
self.executable_opts = ('mdrun -dlb yes -ntomp 1 -npme 0 '
'-s herflat.tpr ').split()
self.variables = {'CRAY_CUDA_MPS': '1'}
self.tags = {'scs'}
self.num_gpus_per_node = 1
if self.current_system.name == 'dom':
self.num_tasks = 72
self.num_tasks_per_node = 12
else:
self.num_tasks = 192
self.num_tasks_per_node = 12
@rfm.simple_test
class GromacsGPUMaintCheck(GromacsGPUCheck):
def __init__(self):
super().__init__('maint')
self.tags |= {'maintenance'}
self.reference = {
'dom:gpu': {
'perf': (29.3, -0.05, None)
},
'daint:gpu': {
'perf': (42.0, -0.10, None)
},
}
@rfm.simple_test
class GromacsGPUProdCheck(GromacsGPUCheck):
def __init__(self):
super().__init__('prod')
self.tags |= {'production'}
self.reference = {
'dom:gpu': {
'perf': (29.3, -0.05, None)
},
'daint:gpu': {
'perf': (42.0, -0.20, None)
},
}
class GromacsCPUCheck(GromacsBaseCheck):
def __init__(self, variant):
super().__init__('md.log')
self.valid_systems = ['daint:mc', 'dom:mc']
self.descr = 'GROMACS CPU check'
self.name = 'gromacs_cpu_%s_check' % variant
self.executable_opts = ('mdrun -dlb yes -ntomp 1 -npme -1 '
'-nb cpu -s herflat.tpr ').split()
if self.current_system.name == 'dom':
self.num_tasks = 216
self.num_tasks_per_node = 36
else:
self.num_tasks = 576
self.num_tasks_per_node = 36
@rfm.simple_test
class GromacsCPUProdCheck(GromacsCPUCheck):
def __init__(self):
super().__init__('prod')
self.tags |= {'production'}
self.reference = {
'dom:mc': {
'perf': (42.7, -0.05, None)
},
'daint:mc': {
'perf': (70.4, -0.20, None)
},
}
# FIXME: This test is obsolete; it is kept only for reference.
@rfm.parameterized_test([1], [2], [4], [6], [8])
class GromacsCPUMonchAcceptance(GromacsBaseCheck):
def __init__(self, num_nodes):
super().__init__('md.log')
self.valid_systems = ['monch:compute']
self.descr = 'GROMACS %d-node CPU check on monch' % num_nodes
self.name = 'gromacs_cpu_monch_%d_node_check' % num_nodes
self.executable_opts = ('mdrun -dlb yes -ntomp 1 -npme -1 '
'-nsteps 5000 -nb cpu -s herflat.tpr ').split()
self.tags = {'monch_acceptance'}
self.num_tasks_per_node = 20
self.num_tasks = num_nodes * self.num_tasks_per_node
reference_by_nodes = {1: 2.6, 2: 5.1, 4: 11.1, 6: 15.8, 8: 20.6}
self.reference = {
'monch:compute': {
'perf': (reference_by_nodes[num_nodes], -0.15, None)
}
}
| 4,884 | 1,721 |
import unittest
import enum
from typing import NamedTuple
import numpy as np
from numpy.testing import assert_equal
from scipy.interpolate import PPoly, CubicSpline, BPoly
from being.serialization import (
ENUM_LOOKUP, EOT, NAMED_TUPLE_LOOKUP, FlyByDecoder, dumps, enum_from_dict,
enum_to_dict, loads, named_tuple_as_dict, named_tuple_from_dict,
register_enum, register_named_tuple, _enum_type_qualname,
)
class TestSerialization(unittest.TestCase):
def assert_splines_equal(self, a, b):
assert_equal(a.x, a.x)
assert_equal(a.c, a.c)
self.assertEqual(a.extrapolate, b.extrapolate)
self.assertEqual(a.axis, b.axis)
def test_splines(self):
spline = CubicSpline([0, 1, 2, 4,], [[0, 1], [1, 0], [2, 1], [3, 0],])
splineCpy = loads(dumps(spline))
self.assert_splines_equal(spline, splineCpy)
def test_that_we_end_up_with_the_correct_spline_types(self):
spline = CubicSpline([0, 1, 3, 6], [0, 1, 0, -1])
ppoly = PPoly(spline.c, spline.x)
bpoly = BPoly.from_power_basis(spline)
self.assert_splines_equal(loads(dumps(spline)), ppoly)
self.assert_splines_equal(loads(dumps(ppoly)), ppoly)
self.assert_splines_equal(loads(dumps(bpoly)), bpoly)
def test_numpy_array(self):
arrays = [
np.array(1),
np.array(1.234),
np.random.random(10),
np.random.random((10, 2, 3)),
(255 * np.random.random((10, 2, 3))).astype(np.uint8),
]
for arr in arrays:
arrCpy = loads(dumps(arr))
assert_equal(arrCpy, arr)
def test_with_new_named_tuple(self):
Foo = NamedTuple('Foo', name=str, id=int)
foo = Foo('Calimero', 42)
dct = named_tuple_as_dict(foo)
self.assertEqual(dct, {
'type': 'Foo',
'name': 'Calimero',
'id': 42,
})
with self.assertRaises(RuntimeError):
named_tuple_from_dict(dct)
register_named_tuple(Foo)
foo2 = named_tuple_from_dict(dct)
self.assertEqual(foo, foo2)
self.assertEqual(foo, loads(dumps(foo)))
NAMED_TUPLE_LOOKUP.pop('Foo')
def test_with_enum(self):
Foo = enum.Enum('Foo', 'FIRST SECOND THIRD')
foo = Foo.SECOND
dct = enum_to_dict(foo)
self.assertEqual(dct, {
'type': _enum_type_qualname(Foo),
'members': list(Foo.__members__),
'value': foo.value,
})
with self.assertRaises(RuntimeError):
enum_from_dict(dct)
register_enum(Foo)
foo2 = enum_from_dict(dct)
self.assertEqual(foo, foo2)
self.assertEqual(foo, loads(dumps(foo)))
ENUM_LOOKUP.pop(_enum_type_qualname(Foo))
def test_a_set_mapps_back_to_itself(self):
x = {1, 2, 'Hello, world!'}
y = loads(dumps(x))
self.assertEqual(x, y)
class TestFlyByDecoder(unittest.TestCase):
def test_doc_example(self):
dec = FlyByDecoder()
snippets = [
'"Hello, World!"\x041.23',
'4\x04[1, 2, 3, 4]\x04{"a":',
' 1, "b": 2}\x04'
]
self.assertEqual(list(dec.decode_more(snippets[0])), ['Hello, World!'])
self.assertEqual(list(dec.decode_more(snippets[1])), [1.234, [1, 2, 3, 4]])
self.assertEqual(list(dec.decode_more(snippets[2])), [{'a': 1, 'b': 2}])
if __name__ == '__main__':
unittest.main()
| 3,490 | 1,338 |
import os
PROJECT_PATH = os.path.dirname(os.path.dirname(__file__))
DATA_PATH = os.path.join(PROJECT_PATH,'Datasets')
MODELS_PATH = os.path.join(PROJECT_PATH,'Models')
RESULT_FILE_PATH = os.path.join(PROJECT_PATH,'Results')
CALIBRATED_MODELS_PATH = os.path.join(PROJECT_PATH,'Models')
SAVED_SEG_MAPS = os.path.join(PROJECT_PATH,'Segmentation_outputs') | 355 | 143 |
"""
Define a class named Shape and its subclass Square. The Square class has an init function which takes a length as argument. Both classes have a area function which can print the area of the shape where Shape's area is 0 by default.
"""
"""Define a class named Shape and its subclass Square. The Square class has an init function which takes a length as argument. Both classes have a area function which can print the area of the shape where Shape's area is 0 by default.
Hints:
To override a method in super class, we can define a method with the same name in the super class.
""" | 586 | 145 |
def func(a,b):
if a < 0 and b < 0:
raise ValueError
if a < 0 or b < 0:
return 0
if a == 0 and b == 0:
return 1
if a == 0 or b == 0:
return 0
return a + b | 205 | 86 |
from flask import Flask, render_template, request
import audd_test
app = Flask(__name__)
@app.route('/')
def index():
return render_template('index.html', text='')
@app.route('/rr_check', methods=["POST"])
def rr_check():
url = request.form['url']
is_rr = audd_test.url_to_audio(url)
if is_rr:
return render_template('index.html', text='You would have been rickrolled. It\'s OK. We saved you. You\'re welcome.')
else:
return render_template('index.html', text='You lucked out this time.')
if __name__ == '__main__':
app.run(port=8080)
| 580 | 200 |
# Copyright (C) 2017 Beijing Didi Infinity Technology and Development Co.,Ltd.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Solver utilities."""
import os
import tensorflow as tf
from absl import logging
def get_checkpoint_dir(config):
"""Get the directory of the checkpoint."""
model_path = config['solver']['saver']['model_path']
checkpoint_dir = os.path.join(model_path, "model")
return checkpoint_dir
def get_ckpt_state(config):
"""Get the checkpoint state."""
checkpoint_dir = get_checkpoint_dir(config)
ckpt = tf.train.get_checkpoint_state(checkpoint_dir)
return ckpt
def get_session_conf(config):
"""Get the config for the tensorflow session."""
tfconf = config['solver']['run_config']
session_conf = tf.ConfigProto(
allow_soft_placement=tfconf['allow_soft_placement'],
log_device_placement=tfconf['log_device_placement'],
intra_op_parallelism_threads=tfconf['intra_op_parallelism_threads'],
inter_op_parallelism_threads=tfconf['inter_op_parallelism_threads'],
gpu_options=tf.GPUOptions(allow_growth=tfconf['allow_growth']))
return session_conf
def to_saved_model(config, sess, inputs: dict, outputs: dict):
"""Save model to tensorflow SavedModel."""
export_path_base = config["solver"]["service"]["model_path"]
model_version = config["solver"]["service"]["model_version"]
export_path = os.path.join(
tf.compat.as_bytes(export_path_base), tf.compat.as_bytes(model_version))
export_path = os.path.abspath(export_path)
logging.info('Exporting model to: {}'.format(export_path))
builder = tf.saved_model.builder.SavedModelBuilder(export_path)
# Build the signature_def_map.
signature_def = tf.saved_model.predict_signature_def(inputs, outputs)
builder.add_meta_graph_and_variables(
sess, [tf.saved_model.tag_constants.SERVING],
signature_def_map={'infer': signature_def},
strip_default_attrs=True)
builder.save(as_text=True)
logging.info('Done exporting!')
def save_infer_res(config, logits, preds):
"""Save the result of inference."""
res_file = config["data"]["infer"]["res"]
res_dir = os.path.dirname(res_file)
if not os.path.exists(res_dir):
os.makedirs(res_dir)
logging.info("Save inference result to: {}".format(res_file))
with open(res_file, "w") as in_f:
for logit, pred in zip(logits, preds):
in_f.write(" ".join(["{:.3f}".format(num) for num in logit]) +
"\t{}\n".format(pred))
| 3,056 | 1,002 |
import os;
def createStringCommand(String):
return "echo "+String;
def shellexec(command):
return os.system(command);
def checkFileExist(pathToFile):
exists = os.path.isfile(pathToFile);
if exists:
return True;
else:
return False;
def getCurrentDirectory():
return os.getcwd();
host = "dev.pi";
username = "pi";
encrypted = "";
if(checkFileExist(getCurrentDirectory()+"/encrypted.txt")==True):
encrypted = createStringCommand("File Is Exist");
else :
encrypted = "openssl aes-256-cbc -salt -a -e -in plaintext.txt -out encrypted.txt"
scpCommand = "scp encrypted.txt "+username+"@"+host+":/home/"+username;
Command = [
createStringCommand("Check File Existences"),
encrypted,
createStringCommand("echo Prepare To Send"),
scpCommand
];
for execute in Command:
shellexec(execute); | 858 | 283 |
from collections import defaultdict
def adjacent(y, x):
for dy in [-1, 0, 1]:
for dx in [-1, 0, 1]:
yield y+dy, x+dx
def enhance(image, enhancement):
new_image = defaultdict(lambda: 0)
for cell, value in image.items():
for y1, x1, in adjacent(*cell):
if (y1, x1) in new_image:
continue
bin_str = ''
for y2, x2 in adjacent(y1, x1):
if (y2, x2) in image :
bin_str += str(image[(y2, x2)])
else:
bin_str += '0'
new_image[(y1, x1)] = 1 if enhancement[ int(bin_str, 2) ] == '#' else 0
return new_image
def print_image(img):
print('print image')
for i in range(-5,40):
for j in range(-5, 40):
print('█' if img[(i,j)] == 1 else ' ', end="")
print()
test = """..#.#..#####.#.#.#.###.##.....###.##.#..###.####..#####..#....#..#..##..###..######.###...####..#..#####..##..#.#####...##.#.#..#.##..#.#......#.###.######.###.####...#.##.##..#..#..#####.....#.#....###..#.##......#.....#..#..#..##..#...##.######.####.####.#.#...#.......#..#.#.#...####.##.#......#..#...##.#.##..#...##.#.##..###.#......#.#.......#.#.#.####.###.##...#.....####.#..#..#.##.#....##..#.####....##...##..#...#......#.#.......#.......##..####..#...#.#.#...##..#.#..###..#####........#..####......#..#
#..#.
#....
##..#
..#..
..###"""
if __name__ == "__main__":
with open('input/day20.txt') as f:
content = f.read().split('\n')[:-1]
#content = test.split('\n')
enhancement = content[0]
image = defaultdict(lambda: 0)
for y, line in enumerate(content[2:]):
for x, value in enumerate(line):
image[(y, x)] = 1 if value == '#' else 0
for step in range(2):
print_image(image)
image = enhance(image, enhancement)
#print_image(image)
print(sum(image.values())) # 5658 < x < 5980
# x != 5933
| 2,033 | 803 |
##########################################################################
#
# OpenMP code generator
#
# This routine is called by op2 which parses the input files
#
# It produces a file xxx_kernel.cpp for each kernel,
# plus a master kernel file
#
##########################################################################
import re
import datetime
import os
import op2_gen_common
insert_thread_timers = os.getenv('OP_TIME_THREADS', False);
def comm(line):
global file_text, FORTRAN, CPP
global depth
prefix = ' '*depth
if len(line) == 0:
file_text +='\n'
elif FORTRAN:
file_text +='! '+line+'\n'
elif CPP:
file_text +=prefix+'//'+line.rstrip()+'\n'
def rep(line,m):
global dims, idxs, typs, indtyps, inddims
if m < len(inddims):
line = re.sub('<INDDIM>',str(inddims[m]),line)
line = re.sub('<INDTYP>',str(indtyps[m]),line)
line = re.sub('<INDARG>','ind_arg'+str(m),line)
line = re.sub('<DIM>',str(dims[m]),line)
line = re.sub('<ARG>','arg'+str(m),line)
line = re.sub('<TYP>',typs[m],line)
line = re.sub('<IDX>',str(int(idxs[m])),line)
return line
def code(text):
global file_text, FORTRAN, CPP, g_m
global depth
if text == '':
prefix = ''
else:
prefix = ' '*depth
file_text += prefix+rep(text,g_m).rstrip()+'\n'
def FOR(i,start,finish):
global file_text, FORTRAN, CPP, g_m
global depth
if FORTRAN:
code('do '+i+' = '+start+', '+finish+'-1')
elif CPP:
code('for ( int '+i+'='+start+'; '+i+'<'+finish+'; '+i+'++ ){')
depth += 2
def ENDFOR():
global file_text, FORTRAN, CPP, g_m
global depth
depth -= 2
if FORTRAN:
code('enddo')
elif CPP:
code('}')
def IF(line):
global file_text, FORTRAN, CPP, g_m
global depth
if FORTRAN:
code('if ('+line+') then')
elif CPP:
code('if ('+ line + ') {')
depth += 2
def ENDIF():
global file_text, FORTRAN, CPP, g_m
global depth
depth -= 2
if FORTRAN:
code('endif')
elif CPP:
code('}')
def op2_gen_openmp_simple(master, date, consts, kernels):
global dims, idxs, typs, indtyps, inddims
global FORTRAN, CPP, g_m, file_text, depth
OP_ID = 1; OP_GBL = 2; OP_MAP = 3;
OP_READ = 1; OP_WRITE = 2; OP_RW = 3;
OP_INC = 4; OP_MAX = 5; OP_MIN = 6;
accsstring = ['OP_READ','OP_WRITE','OP_RW','OP_INC','OP_MAX','OP_MIN' ]
any_soa = 0
for nk in range (0,len(kernels)):
any_soa = any_soa or sum(kernels[nk]['soaflags'])
##########################################################################
# create new kernel file
##########################################################################
for nk in range (0,len(kernels)):
name, nargs, dims, maps, var, typs, accs, idxs, inds, soaflags, optflags, decl_filepath, \
ninds, inddims, indaccs, indtyps, invinds, mapnames, invmapinds, mapinds, nmaps, nargs_novec, \
unique_args, vectorised, cumulative_indirect_index = op2_gen_common.create_kernel_info(kernels[nk])
optidxs = [0]*nargs
indopts = [-1]*nargs
nopts = 0
for i in range(0,nargs):
if optflags[i] == 1 and maps[i] == OP_ID:
optidxs[i] = nopts
nopts = nopts+1
elif optflags[i] == 1 and maps[i] == OP_MAP:
if i == invinds[inds[i]-1]: #i.e. I am the first occurence of this dat+map combination
optidxs[i] = nopts
indopts[inds[i]-1] = i
nopts = nopts+1
else:
optidxs[i] = optidxs[invinds[inds[i]-1]]
#
# set two logicals
#
j = -1
for i in range(0,nargs):
if maps[i] == OP_MAP and accs[i] == OP_INC:
j = i
ind_inc = j >= 0
j = -1
for i in range(0,nargs):
if maps[i] == OP_GBL and accs[i] <> OP_READ and accs[i] <> OP_WRITE:
j = i
reduct = j >= 0
##########################################################################
# start with the user kernel function
##########################################################################
FORTRAN = 0;
CPP = 1;
g_m = 0;
file_text = ''
depth = 0
comm('user function')
if FORTRAN:
code('include '+name+'.inc')
elif CPP:
code('#include "../'+decl_filepath+'"')
##########################################################################
# then C++ stub function
##########################################################################
code('')
comm(' host stub function')
code('void op_par_loop_'+name+'(char const *name, op_set set,')
depth += 2
for m in unique_args:
g_m = m - 1
if m == unique_args[len(unique_args)-1]:
code('op_arg <ARG>){');
code('')
else:
code('op_arg <ARG>,')
for g_m in range (0,nargs):
if maps[g_m]==OP_GBL and accs[g_m] <> OP_READ:
code('<TYP>*<ARG>h = (<TYP> *)<ARG>.data;')
code('int nargs = '+str(nargs)+';')
code('op_arg args['+str(nargs)+'];')
code('')
for g_m in range (0,nargs):
u = [i for i in range(0,len(unique_args)) if unique_args[i]-1 == g_m]
if len(u) > 0 and vectorised[g_m] > 0:
code('<ARG>.idx = 0;')
code('args['+str(g_m)+'] = <ARG>;')
v = [int(vectorised[i] == vectorised[g_m]) for i in range(0,len(vectorised))]
first = [i for i in range(0,len(v)) if v[i] == 1]
first = first[0]
if (optflags[g_m] == 1):
argtyp = 'op_opt_arg_dat(arg'+str(first)+'.opt, '
else:
argtyp = 'op_arg_dat('
FOR('v','1',str(sum(v)))
code('args['+str(g_m)+' + v] = '+argtyp+'arg'+str(first)+'.dat, v, arg'+\
str(first)+'.map, <DIM>, "<TYP>", '+accsstring[accs[g_m]-1]+');')
ENDFOR()
code('')
elif vectorised[g_m]>0:
pass
else:
code('args['+str(g_m)+'] = <ARG>;')
#
# start timing
#
code('')
comm(' initialise timers')
code('double cpu_t1, cpu_t2, wall_t1, wall_t2;')
if insert_thread_timers:
code("op_timing_realloc_manytime({0}, {1});".format(str(nk), "omp_get_max_threads()"))
else:
code('op_timing_realloc('+str(nk)+');')
code('op_timers_core(&cpu_t1, &wall_t1);')
if insert_thread_timers:
code('double non_thread_walltime = 0.0;')
code('')
#
# indirect bits
#
if ninds>0:
code('int ninds = '+str(ninds)+';')
line = 'int inds['+str(nargs)+'] = {'
for m in range(0,nargs):
line += str(inds[m]-1)+','
code(line[:-1]+'};')
code('')
IF('OP_diags>2')
code('printf(" kernel routine with indirection: '+name+'\\n");')
ENDIF()
code('')
comm(' get plan')
code('#ifdef OP_PART_SIZE_'+ str(nk))
code(' int part_size = OP_PART_SIZE_'+str(nk)+';')
code('#else')
code(' int part_size = OP_part_size;')
code('#endif')
code('')
code('int set_size = op_mpi_halo_exchanges(set, nargs, args);')
#
# direct bit
#
else:
code('')
IF('OP_diags>2')
code('printf(" kernel routine w/o indirection: '+ name + '");')
ENDIF()
code('')
code('op_mpi_halo_exchanges(set, nargs, args);')
#
# set number of threads in x86 execution and create arrays for reduction
#
if reduct or ninds==0:
comm(' set number of threads')
code('#ifdef _OPENMP')
code(' int nthreads = omp_get_max_threads();')
code('#else')
code(' int nthreads = 1;')
code('#endif')
if reduct:
code('')
comm(' allocate and initialise arrays for global reduction')
for g_m in range(0,nargs):
if maps[g_m]==OP_GBL and accs[g_m]<>OP_READ and accs[g_m] <> OP_WRITE:
code('<TYP> <ARG>_l[nthreads*64];')
FOR('thr','0','nthreads')
if accs[g_m]==OP_INC:
FOR('d','0','<DIM>')
code('<ARG>_l[d+thr*64]=ZERO_<TYP>;')
ENDFOR()
else:
FOR('d','0','<DIM>')
code('<ARG>_l[d+thr*64]=<ARG>h[d];')
ENDFOR()
ENDFOR()
code('')
IF('set->size >0')
code('')
#
# kernel call for indirect version
#
if ninds>0:
code('op_plan *Plan = op_plan_get_stage_upload(name,set,part_size,nargs,args,ninds,inds,OP_STAGE_ALL,0);')
code('')
comm(' execute plan')
code('int block_offset = 0;')
FOR('col','0','Plan->ncolors')
IF('col==Plan->ncolors_core')
code('op_mpi_wait_all(nargs, args);')
ENDIF()
code('int nblocks = Plan->ncolblk[col];')
code('')
if insert_thread_timers:
# Pause process timing and switch to per-thread timing:
code('// Pause process timing and switch to per-thread timing:')
code('op_timers_core(&cpu_t2, &wall_t2);')
code('non_thread_walltime += wall_t2 - wall_t1;')
code('#pragma omp parallel')
code('{')
depth += 2
code('double thr_wall_t1, thr_wall_t2, thr_cpu_t1, thr_cpu_t2;')
code('op_timers_core(&thr_cpu_t1, &thr_wall_t1);')
code('')
code('int nthreads = omp_get_num_threads();')
code('int thr = omp_get_thread_num();')
code('int thr_start = (nblocks * thr) / nthreads;')
code('int thr_end = (nblocks * (thr+1)) / nthreads;')
code('if (thr_end > nblocks) thr_end = nblocks;')
FOR('blockIdx','thr_start','thr_end')
else:
code('#pragma omp parallel for')
FOR('blockIdx','0','nblocks')
code('int blockId = Plan->blkmap[blockIdx + block_offset];')
code('int nelem = Plan->nelems[blockId];')
code('int offset_b = Plan->offset[blockId];')
FOR('n','offset_b','offset_b+nelem')
if nmaps > 0:
k = []
for g_m in range(0,nargs):
if maps[g_m] == OP_MAP and (not mapinds[g_m] in k):
k = k + [mapinds[g_m]]
code('int map'+str(mapinds[g_m])+'idx = arg'+str(invmapinds[inds[g_m]-1])+\
'.map_data[n * arg'+str(invmapinds[inds[g_m]-1])+'.map->dim + '+str(idxs[g_m])+'];')
code('')
for g_m in range (0,nargs):
u = [i for i in range(0,len(unique_args)) if unique_args[i]-1 == g_m]
if len(u) > 0 and vectorised[g_m] > 0:
if accs[g_m] == OP_READ:
line = 'const <TYP>* <ARG>_vec[] = {\n'
else:
line = '<TYP>* <ARG>_vec[] = {\n'
v = [int(vectorised[i] == vectorised[g_m]) for i in range(0,len(vectorised))]
first = [i for i in range(0,len(v)) if v[i] == 1]
first = first[0]
indent = ' '*(depth+2)
for k in range(0,sum(v)):
line = line + indent + ' &((<TYP>*)arg'+str(first)+'.data)[<DIM> * map'+str(mapinds[g_m+k])+'idx],\n'
line = line[:-2]+'};'
code(line)
code('')
line = name+'('
indent = '\n'+' '*(depth+2)
for g_m in range(0,nargs):
if maps[g_m] == OP_ID:
line = line + indent + '&(('+typs[g_m]+'*)arg'+str(g_m)+'.data)['+str(dims[g_m])+' * n]'
if maps[g_m] == OP_MAP:
if vectorised[g_m]:
if g_m+1 in unique_args:
line = line + indent + 'arg'+str(g_m)+'_vec'
else:
line = line + indent + '&(('+typs[g_m]+'*)arg'+str(invinds[inds[g_m]-1])+'.data)['+str(dims[g_m])+' * map'+str(mapinds[g_m])+'idx]'
if maps[g_m] == OP_GBL:
if accs[g_m] <> OP_READ and accs[g_m] <> OP_WRITE:
line = line + indent +'&arg'+str(g_m)+'_l[64*omp_get_thread_num()]'
else:
line = line + indent +'('+typs[g_m]+'*)arg'+str(g_m)+'.data'
if g_m < nargs-1:
if g_m+1 in unique_args and not g_m+1 == unique_args[-1]:
line = line +','
else:
line = line +');'
code(line)
ENDFOR()
if insert_thread_timers:
depth -= 2
code('}')
code('')
code('op_timers_core(&thr_cpu_t2, &thr_wall_t2);')
code('OP_kernels[' +str(nk)+ '].times[thr] += thr_wall_t2 - thr_wall_t1;')
ENDFOR()
code('')
if reduct:
comm(' combine reduction data')
IF('col == Plan->ncolors_owned-1')
for m in range(0,nargs):
if maps[m] == OP_GBL and accs[m] <> OP_READ:
FOR('thr','0','nthreads')
if accs[m]==OP_INC:
FOR('d','0','<DIM>')
code('<ARG>h[d] += <ARG>_l[d+thr*64];')
ENDFOR()
elif accs[m]==OP_MIN:
FOR('d','0','<DIM>')
code('<ARG>h[d] = MIN(<ARG>h[d],<ARG>_l[d+thr*64]);')
ENDFOR()
elif accs[m]==OP_MAX:
FOR('d','0','<DIM>')
code('<ARG>h[d] = MAX(<ARG>h[d],<ARG>_l[d+thr*64]);')
ENDFOR()
else:
error('internal error: invalid reduction option')
ENDFOR()
ENDIF()
if insert_thread_timers:
code('// Revert to process-level timing:')
code('op_timers_core(&cpu_t1, &wall_t1);')
code('')
code('block_offset += nblocks;');
ENDIF()
#
# kernel call for direct version
#
else:
comm(' execute plan')
if insert_thread_timers:
# Pause process timing, and switch to per-thread timing:
code('// Pause process timing, and switch to per-thread timing:')
code('op_timers_core(&cpu_t2, &wall_t2);')
code('non_thread_walltime += wall_t2 - wall_t1;')
code('#pragma omp parallel for')
FOR('thr','0','nthreads')
if insert_thread_timers:
code('double thr_wall_t1, thr_wall_t2, thr_cpu_t1, thr_cpu_t2;')
code('op_timers_core(&thr_cpu_t1, &thr_wall_t1);')
code('int start = (set->size* thr)/nthreads;')
code('int finish = (set->size*(thr+1))/nthreads;')
FOR('n','start','finish')
line = name+'('
indent = '\n'+' '*(depth+2)
for g_m in range(0,nargs):
if maps[g_m] == OP_ID:
line = line + indent + '&(('+typs[g_m]+'*)arg'+str(g_m)+'.data)['+str(dims[g_m])+'*n]'
if maps[g_m] == OP_GBL:
if accs[g_m] <> OP_READ and accs[g_m] <> OP_WRITE:
line = line + indent +'&arg'+str(g_m)+'_l[64*omp_get_thread_num()]'
else:
line = line + indent +'('+typs[g_m]+'*)arg'+str(g_m)+'.data'
if g_m < nargs-1:
line = line +','
else:
line = line +');'
code(line)
ENDFOR()
if insert_thread_timers:
code('op_timers_core(&thr_cpu_t2, &thr_wall_t2);')
code('OP_kernels['+str(nk)+'].times[thr] += thr_wall_t2 - thr_wall_t1;')
ENDFOR()
if insert_thread_timers:
# OpenMP block complete, so switch back to process timing:
code('// OpenMP block complete, so switch back to process timing:')
code('op_timers_core(&cpu_t1, &wall_t1);')
if ninds>0:
code('OP_kernels['+str(nk)+'].transfer += Plan->transfer;')
code('OP_kernels['+str(nk)+'].transfer2 += Plan->transfer2;')
ENDIF()
code('')
#zero set size issues
if ninds>0:
IF('set_size == 0 || set_size == set->core_size')
code('op_mpi_wait_all(nargs, args);')
ENDIF()
#
# combine reduction data from multiple OpenMP threads, direct version
#
comm(' combine reduction data')
for g_m in range(0,nargs):
if maps[g_m]==OP_GBL and accs[g_m]<>OP_READ and accs[g_m] <> OP_WRITE and ninds==0:
FOR('thr','0','nthreads')
if accs[g_m]==OP_INC:
FOR('d','0','<DIM>')
code('<ARG>h[d] += <ARG>_l[d+thr*64];')
ENDFOR()
elif accs[g_m]==OP_MIN:
FOR('d','0','<DIM>')
code('<ARG>h[d] = MIN(<ARG>h[d],<ARG>_l[d+thr*64]);')
ENDFOR()
elif accs[g_m]==OP_MAX:
FOR('d','0','<DIM>')
code('<ARG>h[d] = MAX(<ARG>h[d],<ARG>_l[d+thr*64]);')
ENDFOR()
else:
print 'internal error: invalid reduction option'
ENDFOR()
if maps[g_m]==OP_GBL and accs[g_m]<>OP_READ:
code('op_mpi_reduce(&<ARG>,<ARG>h);')
code('op_mpi_set_dirtybit(nargs, args);')
code('')
#
# update kernel record
#
comm(' update kernel record')
code('op_timers_core(&cpu_t2, &wall_t2);')
if insert_thread_timers:
code('non_thread_walltime += wall_t2 - wall_t1;')
code('OP_kernels[' +str(nk)+ '].name = name;')
code('OP_kernels[' +str(nk)+ '].count += 1;')
if insert_thread_timers:
code('OP_kernels[' +str(nk)+ '].times[0] += non_thread_walltime;')
else:
code('OP_kernels[' +str(nk)+ '].time += wall_t2 - wall_t1;')
if ninds == 0:
line = 'OP_kernels['+str(nk)+'].transfer += (float)set->size *'
for g_m in range (0,nargs):
if optflags[g_m]==1:
IF('<ARG>.opt')
if maps[g_m]<>OP_GBL:
if accs[g_m]==OP_READ:
code(line+' <ARG>.size;')
else:
code(line+' <ARG>.size * 2.0f;')
if optflags[g_m]==1:
ENDIF()
depth -= 2
code('}')
##########################################################################
# output individual kernel file
##########################################################################
if not os.path.exists('openmp'):
os.makedirs('openmp')
fid = open('openmp/'+name+'_kernel.cpp','w')
date = datetime.datetime.now()
fid.write('//\n// auto-generated by op2.py\n//\n\n')
fid.write(file_text)
fid.close()
# end of main kernel call loop
##########################################################################
# output one master kernel file
##########################################################################
file_text =''
code('#ifdef _OPENMP')
code(' #include <omp.h>')
code('#endif')
code('')
comm(' global constants ')
for nc in range (0,len(consts)):
if not consts[nc]['user_declared']:
if consts[nc]['dim']==1:
code('extern '+consts[nc]['type'][1:-1]+' '+consts[nc]['name']+';')
else:
if consts[nc]['dim'] > 0:
num = str(consts[nc]['dim'])
else:
num = 'MAX_CONST_SIZE'
code('extern '+consts[nc]['type'][1:-1]+' '+consts[nc]['name']+'['+num+'];')
code('')
comm(' header ')
if os.path.exists('./user_types.h'):
code('#include "../user_types.h"')
code('#include "op_lib_cpp.h" ')
code('')
comm(' user kernel files')
for nk in range(0,len(kernels)):
code('#include "'+kernels[nk]['name']+'_kernel.cpp"')
master = master.split('.')[0]
fid = open('openmp/'+master.split('.')[0]+'_kernels.cpp','w')
fid.write('//\n// auto-generated by op2.py\n//\n\n')
fid.write(file_text)
fid.close()
| 18,543 | 7,119 |
class ActualValue:
def __init__(self, actual_value):
self.__actual_value = actual_value
def should(self, predicate):
if predicate(self.__actual_value):
return ''
else:
return 'Predicate not true of %s' % self.__actual_value
def should_equal(self, expected_value):
if self.__actual_value == expected_value:
return ''
else:
return '%s does not equal %s' % (self.__actual_value, expected_value)
should_be = should_equal
def should_not_equal(self, expected_value):
if self.__actual_value != expected_value:
return ''
else:
return '%s does equal %s' % (self.__actual_value, expected_value)
should_not_be = should_not_equal
class Expect:
def __init__(self, actual_value):
self.__actual_value = ActualValue(actual_value)
def __getattr__(self, name):
if name.startswith('to'):
return getattr(self.__actual_value, 'should' + name[2:])
elif name.startswith('not_to'):
return getattr(self.__actual_value, 'should_not' + name[6:])
else:
raise AttributeError
It = lambda: It
for method_name in dir(ActualValue):
if method_name.startswith('should'):
(lambda method_name:
setattr(It, method_name,
lambda expected:
lambda actual_value: getattr(actual_value, method_name)(expected))
)(method_name)
| 1,529 | 462 |
#!/usr/bin/env python
import imp
import logging
import os
import ply.yacc
LOGDIR = 'logs'
logging.getLogger().setLevel(logging.DEBUG)
class SPLParser(object):
"""Represents a parser object that can parse SPL queries.
You should not need to create one of these. Simply import
parse from the splparser module.
"""
def __init__(self, lexermod, parsetab_name, parsetab_dir, logname, rulesmod, optimize=True):
"""Creates an SPLParser object.
:param self: The object being created
:type self: SPLParser
:param lexermod: The corresponding lexer module
:type lexermod: module
:param parsetab_name: The name of the toplevel parse table file
:type parsetab_name: str
:param parsetab_dir: The directory in which to store parse tables
:type parsetab_dir: str
:param logname: The name of the log
:type logname: str
:param rulesmod: The module containing the top-level rules
:type rulesmod: module
:param optimize: Whether or not to write a log (it slows performance and can cause the parser to sometimes fail due to too many open file handles -- one for each command parsed as they have separate log files)
:type optimize: dir
:rtype: SPLParser
"""
self.lexer = lexermod.lex()
modname, dirname = self.setup_parsetab(parsetab_name, parsetab_dir)
self.parsetab_name = modname
self.parsetab_dir = dirname
self.rules = rulesmod
self.optimize = optimize
if not optimize: # TODO: Why is this conditional necessary?
self.log = self.setup_log(logname)
self.parser = ply.yacc.yacc(module=self.rules,
debug=True,
debuglog=self.log,
tabmodule=self.parsetab_name,
outputdir=self.parsetab_dir,
optimize=optimize)
else:
self.parser = ply.yacc.yacc(module=self.rules,
tabmodule=self.parsetab_name,
outputdir=self.parsetab_dir,
optimize=optimize)
def setup_parsetab(self, parsetab_name, parsetab_dir):
loaded = False
try: # check for parsetabs in current installation
install_location = os.path.dirname(__file__)
path_to_parsetab = os.path.join(install_location, parsetab_dir, parsetab_name + '.py')
parsetab = imp.load_source(parsetab_name, path_to_parsetab)
loaded = True
install_parsetab_dir = os.path.join(install_location, parsetab_dir)
return parsetab_name, install_parsetab_dir
except IOError: # parsetab module does not exist in install location
pass
if not loaded:
try: # check for parsetabs in current directory
path_to_parsetab = os.path.join(parsetab_dir, parsetab_name + '.py')
parsetab = imp.load_source(parsetab_name, path_to_parsetab)
loaded = True
return parsetab_name, parsetab_dir
except IOError: # parsetab module does not exist in current directory
pass
if not loaded:
try: # in case the above failed, create dir for PLY to write parsetabs in
os.stat(parsetab_dir)
except:
try:
os.makedirs(parsetab_dir)
except OSError:
msg = "ERROR: Need permission to write to ./%s\n" % parsetab_dir
sys.stderr.write(msg)
raise
return parsetab_name, parsetab_dir
def setup_log(self, name):
"""Set up the log so that parsing info can be written out.
:param self: The current SPLParser object
:type self: SPLParser
:param name: The name of the log file
:type name: str
:rtype: logging.Logger
"""
try:
os.stat(LOGDIR)
except:
try:
os.makedirs(LOGDIR)
except OSError:
sys.stderr.write("WARNING: Can't write logs to ./" + LOGDIR + "\n")
logger = logging.getLogger(name)
logger.setLevel(logging.DEBUG)
filehandler = logging.FileHandler(LOGDIR + "/" + str(name) + ".log")
filehandler.setLevel(logging.DEBUG)
logger.addHandler(filehandler)
return logger
def parse(self, data):
"""Parse the given string.
:param self: The current SPLParser object
:type self: SPLParser
:param data: The string to parse
:type data: str
:rtype: ParseTreeNode
"""
parsetree = None
try:
if not self.optimize:
parsetree = self.parser.parse(data, lexer=self.lexer, debug=self.log)
else:
parsetree = self.parser.parse(data, lexer=self.lexer)
except NotImplementedError:
raise
except Exception:
raise
return parsetree
| 5,255 | 1,437 |
# Autogenerated file. ANY CHANGES WILL BE OVERWRITTEN
from to_python.core.types import FunctionType, \
FunctionArgument, \
FunctionArgumentValues, \
FunctionReturnTypes, \
FunctionSignature, \
FunctionDoc, \
EventData, \
CompoundEventData
DUMP_PARTIAL = [
CompoundEventData(
server=[
],
client=[
EventData(
name='onClientSoundBeat',
docs=FunctionDoc(
description='This event is triggered when a sound beats.' ,
arguments={
"theTime": """: the position in the song of the beat """
},
result='' ,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='theTime',
argument_type=FunctionType(
names=['double'],
is_optional=False,
),
default_value=None,
)
]
],
variable_length=False,
),
)
],
),
CompoundEventData(
server=[
],
client=[
EventData(
name='onClientSoundChangedMeta',
docs=FunctionDoc(
description='This event is triggered when a sounds meta tags have been modified.' ,
arguments={
"streamTitle": """: The title of a specific stream """
},
result='' ,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='streamTitle',
argument_type=FunctionType(
names=['string'],
is_optional=False,
),
default_value=None,
)
]
],
variable_length=False,
),
)
],
),
CompoundEventData(
server=[
],
client=[
EventData(
name='onClientSoundFinishedDownload',
docs=FunctionDoc(
description='This event is triggered when a sound has just finished downloading. This means the complete sound file is now loaded in the players RAM, and can be played completely from start to end. Unlike onClientSoundStream, this event only triggers for file streams, not for live ones since live streams never actually end.' ,
arguments={
"length": """: The length of the stream in milliseconds """
},
result='' ,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='length',
argument_type=FunctionType(
names=['int'],
is_optional=False,
),
default_value=None,
)
]
],
variable_length=False,
),
)
],
),
CompoundEventData(
server=[
],
client=[
EventData(
name='onClientSoundStarted',
docs=FunctionDoc(
description='This event is triggered when a sound is started.' ,
arguments={
"reason": """: the reason the sound was started, can be play, resumed or enabled. """
},
result='' ,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='reason',
argument_type=FunctionType(
names=['string'],
is_optional=False,
),
default_value=None,
)
]
],
variable_length=False,
),
)
],
),
CompoundEventData(
server=[
],
client=[
EventData(
name='onClientSoundStopped',
docs=FunctionDoc(
description='This event is triggered when a sound is stopped.' ,
arguments={
"reason": """: the reason the sound was stopped, can be finished, paused, destroyed or disabled. """
},
result='' ,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='reason',
argument_type=FunctionType(
names=['string'],
is_optional=False,
),
default_value=None,
)
]
],
variable_length=False,
),
)
],
),
CompoundEventData(
server=[
],
client=[
EventData(
name='onClientSoundStream',
docs=FunctionDoc(
description='This event is triggered when a sound has just finished initial streaming. For file streams, this means the sound will now start playing, but isnt done downloading yet. For live streams, this just means the stream will start playing. This event will also trigger when, for some reason, the streaming failed.' ,
arguments={
"success": """: A boolean indicating whether the stream was a success or not """,
"length": """: The length of the stream in seconds. Always returns 0 for a live stream """,
"streamName": """: The name of the stream. Note that this isnt the filename. Also note that this isnt always provided """,
"errorMessage": """: A string containing the error message or an empty string if there was no error """
},
result='' ,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='success',
argument_type=FunctionType(
names=['bool'],
is_optional=False,
),
default_value=None,
)
],
[
FunctionArgument(
name='length',
argument_type=FunctionType(
names=['int'],
is_optional=False,
),
default_value=None,
)
],
[
FunctionArgument(
name='streamName',
argument_type=FunctionType(
names=['string'],
is_optional=False,
),
default_value=None,
)
],
[
FunctionArgument(
name='errorMessage',
argument_type=FunctionType(
names=['string'],
is_optional=False,
),
default_value=None,
)
]
],
variable_length=False,
),
)
],
)
]
| 8,884 | 1,679 |
import os
import numpy as np
from PIL import Image
import matplotlib.pyplot as plt
import torch
import random
import numpy as np
import torch
import os
from collections import OrderedDict
# 4-layer
glayerwise = [1.,1.0, 1./15, 1./144]
elayerwise = [1.,0.5, 15., 144.]
# # 5-layer
# glayerwise = [1.,1.0, 1./15, 1./144, 1./144]
# elayerwise = [1.,0.5, 15., 144., 144.]
def apply_cges(args, model, optimizer):
"""Applies proximal GD update rule on the weights"""
global glayerwise
global elayerwise
learning_rate = optimizer.param_groups[0]['lr']
glayerwise = glayerwise
elayerwise = elayerwise
S_vars = OrderedDict()
for key, value in model.state_dict().items():
if 'weight' in key:
S_vars[key] = value
if len(S_vars) > len(glayerwise) or len(S_vars) > len(elayerwise):
raise Exception("S_vars(length: %d) and layerwise ratios(length: %d / %d) lengths do not match!" %
(len(S_vars), len(glayerwise), len(elayerwise)))
state_dict = model.state_dict()
for vind, (key, var) in enumerate(S_vars.items()):
# GS
group_sum = torch.sum(torch.square(var), 0)
g_param = learning_rate * args.lamb * (args.mu - vind * args.chvar)
gl_comp = 1. - g_param * glayerwise[vind] * torch.rsqrt(group_sum)
gl_plus = (gl_comp > 0).type(torch.float32) * gl_comp
gl_stack = torch.stack([gl_plus for _ in range(var.shape[0])], 0)
gl_op = gl_stack * var
# ES
e_param = learning_rate * args.lamb * ((1. - args.mu) + vind * args.chvar)
W_sum = e_param * elayerwise[vind] * torch.sum(torch.abs(gl_op), 0) #Equation 8 of the paper
W_sum_stack = torch.stack([W_sum for _ in range(gl_op.shape[0])], 0)
el_comp = torch.abs(gl_op) - W_sum_stack
el_plus = (el_comp > 0).type(torch.float32) * el_comp
state_dict[key] = el_plus * torch.sign(gl_op) | 4,400 | 929 |
"""
Cache implementation.
Currently only two types of cache are allowed:
* "none" cache switched off
* "redis" use redis for cache
Configuration parameters:
cache.type = redis | none
cache.redis.db
cache.redis.host
cache.redis.port
"""
import os
import json
from config import CONFIG
_REDIS = None
if CONFIG['cache.type'] == 'redis':
import redis
_REDIS = redis.Redis(
host=CONFIG['cache.redis.host'],
port=CONFIG['cache.redis.port'],
db=CONFIG['cache.redis.db'])
_REDIS_PREFIX = ''
if CONFIG.get("cache.redis.prefix", ""):
_REDIS_PREFIX = CONFIG["cache.redis.prefix"] + ":"
def put(key, value):
"""
Save `value` with `key`, and serialize it if needed
"""
if _REDIS_PREFIX:
key = _REDIS_PREFIX + key
if CONFIG["cache.type"] == "redis" and _REDIS:
if isinstance(value, (dict, list)):
value = json.dumps(value)
_REDIS.set(key, value)
def get(key):
"""
Read `value` by `key`, and deserialize it if needed
"""
if _REDIS_PREFIX:
key = _REDIS_PREFIX + key
if CONFIG["cache.type"] == "redis" and _REDIS:
value = _REDIS.get(key)
try:
value = json.loads(value)
except (ValueError, TypeError):
pass
return value
return None
def delete(key):
"""
Remove `key` from the database
"""
if _REDIS:
if _REDIS_PREFIX:
key = _REDIS_PREFIX + key
_REDIS.delete(key)
return None
| 1,529 | 516 |
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
from Bio import SeqIO
import argparse
import sys
from argparse import RawTextHelpFormatter
import numpy as np
from collections import namedtuple
MySNP = namedtuple("MySNP", "pos prev new")
snps=[]
def print_vnc(reference, snps, out):
f = open(out+".vcf", "w")
f.write("##fileformat=VCFv4.1\n")
f.write("##source=s-aligner\n")
f.write("#CHROM POS ID REF ALT QUAL FILTER\n")
for snp in snps:
f.write("xxx")
f.write("\t")
f.write(str(snp.pos))
f.write("\t.\t")
f.write(str(snp.prev))
f.write("\t")
f.write(str(snp.new))
f.write("\t.\tPASS")
f.write("\n")
f.close()
def consensus(fasta, reference, out):
for record in SeqIO.parse(reference, "fasta"):
referenceseq = record.seq
print(len(referenceseq))
c_columns = np.array([0]*len(referenceseq)*2)
g_columns = np.array([0]*len(referenceseq)*2)
t_columns = np.array([0]*len(referenceseq)*2)
a_columns = np.array([0]*len(referenceseq)*2)
gap_columns = np.array([0]*len(referenceseq)*2)
prevseq=''
lastseq=''
maxlen=0
for record in SeqIO.parse(fasta, "fasta"):
recordseq = record.seq
if(len(prevseq)>0):
found_first_char=False
pos=0
for c in prevseq:
if( c!='-'):
if(not found_first_char):
found_first_char = True
if(c=='A'):
a_columns[pos] +=1
elif(c=='C'):
c_columns[pos] +=1
elif(c=='G'):
g_columns[pos] +=1
elif(c=='T'):
t_columns[pos] +=1
elif (found_first_char):
gap_columns[pos] += 1
pos += 1
if(len(recordseq) > maxlen):
maxlen=len(recordseq)
lastseq=recordseq
prevseq=recordseq
consensus= ""
posreal=1
for i in range(maxlen):
x = max(a_columns[i],c_columns[i],g_columns[i],t_columns[i],gap_columns[i])
c='ñ'
if(a_columns[i] == x):
c='A'
elif(c_columns[i] == x):
c='C'
elif(g_columns[i] == x):
c='G'
elif(t_columns[i] == x):
c='T'
elif(gap_columns[i] == x):
c='-'
if(x==0):
c='N'
if(c!=lastseq[i] and c!='ñ'):
snp=MySNP(posreal,lastseq[i],c)
snps.append(snp)
#print(str(posreal)+":: "+str(lastseq[i])+" -> "+str(c))
if(c!='-'):
consensus = "".join((consensus, c))
posreal+=1
#print("consensus:"+consensus)
f = open(out, "w")
f.write(">consensus")
f.write("\n")
f.write(consensus)
f.write("\n")
f.close()
print_vnc(reference,snps,out)
#print(snps)
if __name__ == '__main__':
parser = argparse.ArgumentParser(prog='simplify-fasta', usage='%(prog)s -i inputFasta -o outputFasta', description="""
*****************************************************************************
*********************************BinSanity***********************************
** The `simplify-fasta` script is built to simplify fasta headers so as **
** not to run into errors when running BinSanity. Simplified headers **
** means that every contig id is only made up of a single word. This **
** will rename your fasta ids as `>contig_1`, `>contig_2`, and so on. **
*****************************************************************************""", formatter_class=RawTextHelpFormatter)
parser.add_argument("-i", metavar="", dest="inputFASTA",
help="Specify the name of the input file")
parser.add_argument("-r", metavar="", dest="inputREF",
help="Specify the name of the reference file")
parser.add_argument("-o", metavar="", dest="inputOUT",
help="Specify the name for the output file")
args = parser.parse_args()
if len(sys.argv) < 2:
print(parser.print_help())
if args.inputFASTA is None and args.inputOUT is None:
print("You haven't specified an input or output silly")
elif args.inputFASTA is None:
print("You can't give an output without an input")
elif args.inputOUT is None:
print("Provide and output file")
else:
consensus(args.inputFASTA, args.inputREF, args.inputOUT) | 4,614 | 1,497 |
import requests
import json
import argparse
import xml.etree.ElementTree as ET
#----------------------------------------
# example ---> python cost_metadata.py -s MOBS
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-s',dest='station',default='',help="station, 4 digits, bala")
args = parser.parse_args()
mdat = {}
#----------------------------------------
def getStationSetUp(station_ID,archive_url='https://gws.geodesy.ga.gov.au/setups/search/findCurrentByFourCharacterId?id='):
"""
Example of how to get the latest meta data from the GA archive.
This example will return the latest meta data for each station
To get the latest set-up of a station you can run the following command from a computer:
Ø curl 'https://gws.geodesy.ga.gov.au/setups/search/findCurrentByFourCharacterId?id=MOBS' -
"""
archive_url = archive_url+station_ID
request = requests.get(archive_url)
request.raise_for_status()
jdat = json.loads(request._content)
for dd in jdat['equipmentInUse']:
if dd['content']['id']['equipmentType'] == "gnss receiver":
mdat['rcvT'] = dd['content']['id']['type']
mdat['rcvN'] = dd['content']['id']['serialNumber']
elif dd['content']['id']['equipmentType'] == "gnss antenna":
mdat['antT'] = dd['content']['id']['type']
mdat['antN'] = dd['content']['id']['serialNumber']
mdat['antdU'] = dd['content']['configuration']['markerArpUpEccentricity']
mdat['antdN'] = dd['content']['configuration']['markerArpNorthEccentricity']
mdat['antdE'] = dd['content']['configuration']['markerArpEastEccentricity']
return 1
def getStationLog(station_ID,archive_url='https://gws.geodesy.ga.gov.au/siteLogs/search/findByFourCharacterId?id='):
"""
To get the full site log you can run the command:
Ø curl 'https://gws.geodesy.ga.gov.au/siteLogs/search/findByFourCharacterId?id=MOBS&format=geodesyml' -i
"""
ns = {
'geo': 'urn:xml-gov-au:icsm:egeodesy:0.4',
'gml': 'http://www.opengis.net/gml/3.2',
'xlink': 'http://www.w3.org/1999/xlink',
'gmd': 'http://www.isotc211.org/2005/gmd',
'gmx': 'http://www.isotc211.org/2005/gmx',
'om': 'http://www.opengis.net/om/2.0',
'gco': 'ttp://www.isotc211.org/2005/gco',
'xsi': 'http://www.w3.org/2001/XMLSchema-instance',
}
archive_url = archive_url+station_ID+'&format=geodesyml'
request = requests.get(archive_url)
request.raise_for_status()
root = ET.fromstring(request._content)
mdat['domes'] = root.findall('.//geo:siteIdentification//geo:iersDOMESNumber', ns)[0].text
pos = root.findall('.//geo:siteLocation//geo:geodeticPosition//gml:pos', ns)[0].text.split()
mdat['lat'] = pos[0]
mdat['long'] = pos[1]
mdat['height'] = pos[2]
getStationSetUp(args.station)
getStationLog(args.station)
print('{0:},{1:},{2:},{3:},{4:},{5:},{6:},{7:},{8:},{9:},{10:}'.format(args.station,mdat['domes'],mdat['lat'],mdat['long'],mdat['height'],mdat['rcvT'],mdat['rcvN'],mdat['antT'],mdat['antdU'],mdat['antdN'],mdat['antdE']))
| 3,287 | 1,168 |
"""
Utilies for beacons
"""
import copy
def remove_hidden_options(config, whitelist):
"""
Remove any hidden options not whitelisted
"""
for entry in copy.copy(config):
for func in entry:
if func.startswith("_") and func not in whitelist:
config.remove(entry)
return config
| 332 | 97 |
import random
import math
from Crypto.Util.number import *
def isPrime(p):
for i in range(2, math.isqrt(p) + 1):
if p % i == 0:
return False
return True
flag = bytes_to_long(open('flag.txt','rb').read())
p = int(input('Enter a prime: '))
assert 10 < p, 'Prime too small'
assert p < 250, 'Prime too big'
assert isPrime(p), 'Number not prime'
coeffs = [random.getrandbits(128) for _ in range(1000)]
k = sum(coeffs[i] for i in range(0, len(coeffs), p-1))
coeffs[0] += flag - k
def poly(coeffs, n, p):
return sum(c * pow(n, i, p) for i, c in enumerate(coeffs)) % p
n = int(input('Enter a number: '))
assert 1 < n < p - 1, "We're feeling sneaky today, hmm?"
op = 0
for i in range(1, p):
op += poly(coeffs, pow(n, i, p), p)
print(op % p) | 806 | 361 |
from gym import utils
from wtm_envs.mujoco import ant_env
from wtm_envs.mujoco.hook_env_pddl import PDDLHookEnv
import numpy as np
class AntFourRoomsEnv(ant_env.AntEnv, utils.EzPickle):
def __init__(self, reward_type='sparse'):
name = "ant_four_rooms.xml"
# Provide initial state space consisting of the ranges for all joint angles and velocities.
# In the Ant Reacher task, we use a random initial torso position and use fixed values for the remainder.
initial_joint_pos = np.array([0, 0, 0.55, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, -1.0, 0.0, -1.0, 0.0, 1.0])
initial_joint_pos = np.reshape(initial_joint_pos, (len(initial_joint_pos), 1))
initial_joint_ranges = np.concatenate((initial_joint_pos, initial_joint_pos), 1)
initial_joint_ranges[0] = np.array([-6, 6])
initial_joint_ranges[1] = np.array([-6, 6])
# Concatenate velocity ranges
initial_state_space = np.concatenate(
(initial_joint_ranges, np.zeros((len(initial_joint_ranges) - 1, 2))), 0)
# Provide end goal space.
max_range = 6
goal_space_train = [[-max_range, max_range], [-max_range, max_range], [0.45, 0.55]]
goal_space_test = [[-max_range, max_range], [-max_range, max_range], [0.45, 0.55]]
# Provide a function that maps from the state space to the end goal space.
# This is used to
# (i) determine whether the agent should be given the sparse reward and
# (ii) for Hindsight Experience Replay to determine which end goal was achieved after a sequence of actions.
project_state_to_end_goal = lambda sim, state: state[:3]
# Set end goal achievement thresholds. If the agent is within the threshold for each dimension,
# the end goal has been achieved and the reward of 0 is granted.
# For the Ant Reacher task, the end goal will be the desired (x,y) position of the torso
len_threshold = 0.4
height_threshold = 0.2
end_goal_thresholds = np.array([len_threshold, len_threshold, height_threshold])
# Provide range for each dimension of subgoal space in order to configure subgoal actor networks.
# Subgoal space can be the same as the state space or some other projection out of the state space.
# The subgoal space in the Ant Reacher task is the desired (x,y,z) position and (x,y,z) translational velocity of the torso
cage_max_dim = 8
max_height = 1
max_velo = 3
subgoal_bounds = np.array(
[[-cage_max_dim, cage_max_dim], [-cage_max_dim, cage_max_dim], [0, max_height], [-max_velo, max_velo],
[-max_velo, max_velo]])
# Provide state to subgoal projection function.
# a = np.concatenate((sim.data.qpos[:2], np.array([4 if sim.data.qvel[i] > 4 else -4 if sim.data.qvel[i] < -4 else sim.data.qvel[i] for i in range(3)])))
project_state_to_subgoal = lambda sim, state: np.concatenate((sim.data.qpos[:2], np.array(
[1 if sim.data.qpos[2] > 1 else sim.data.qpos[2]]), np.array(
[3 if sim.data.qvel[i] > 3 else -3 if sim.data.qvel[i] < -3 else sim.data.qvel[i] for i in range(2)])))
# Set subgoal achievement thresholds
velo_threshold = 0.8
quat_threshold = 0.5
# subgoal_thresholds = np.array([len_threshold, len_threshold, height_threshold, quat_threshold, quat_threshold, quat_threshold, quat_threshold, velo_threshold, velo_threshold, velo_threshold])
subgoal_thresholds = np.array([len_threshold, len_threshold, height_threshold, velo_threshold, velo_threshold])
ant_env.AntEnv.__init__(
self, 'ant_four_rooms/environment.xml', n_substeps=15,
reward_type=reward_type, name=name, goal_space_train=goal_space_train, goal_space_test=goal_space_test,
project_state_to_end_goal=project_state_to_end_goal, project_state_to_subgoal=project_state_to_subgoal,
end_goal_thresholds=end_goal_thresholds, initial_state_space=initial_state_space,
subgoal_bounds=subgoal_bounds, subgoal_thresholds=subgoal_thresholds)
utils.EzPickle.__init__(self)
| 4,170 | 1,441 |
# coding=utf-8
from OTLMOW.OTLModel.BaseClasses.AttributeInfo import AttributeInfo
from OTLMOW.OTLModel.BaseClasses.OTLAttribuut import OTLAttribuut
from OTLMOW.OTLModel.Datatypes.ComplexField import ComplexField
from OTLMOW.OTLModel.Datatypes.KlAanplantingswijzeSierbeplanting import KlAanplantingswijzeSierbeplanting
from OTLMOW.OTLModel.Datatypes.KlSierbeplContainer import KlSierbeplContainer
from OTLMOW.OTLModel.Datatypes.KlSierbeplPlantmaat import KlSierbeplPlantmaat
from OTLMOW.OTLModel.Datatypes.KlVegetatiePlantverband import KlVegetatiePlantverband
from OTLMOW.OTLModel.Datatypes.NonNegIntegerField import NonNegIntegerField
# Generated with OTLComplexDatatypeCreator. To modify: extend, do not edit
class DtcSierbeplAanlegWaarden(AttributeInfo):
def __init__(self, parent=None):
AttributeInfo.__init__(self, parent)
self._aanplantingswijze = OTLAttribuut(field=KlAanplantingswijzeSierbeplanting,
naam='aanplantingswijze',
label='aanplantingswijze',
objectUri='https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#DtcSierbeplAanleg.aanplantingswijze',
definition='Manier van aanplanten.',
owner=self)
self._containermaat = OTLAttribuut(field=KlSierbeplContainer,
naam='containermaat',
label='containermaat',
objectUri='https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#DtcSierbeplAanleg.containermaat',
definition='De grootte van de pot of container waarin de plant wordt geleverd. De P staat voor pot, de C voor container. Het getal geeft de grootte weer in centimeter.',
owner=self)
self._plantdichtheid = OTLAttribuut(field=NonNegIntegerField,
naam='plantdichtheid',
label='plantdichtheid',
objectUri='https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#DtcSierbeplAanleg.plantdichtheid',
definition='Aantal planten per vierkante meter.',
owner=self)
self._plantmaat = OTLAttribuut(field=KlSierbeplPlantmaat,
naam='plantmaat',
label='plantmaat',
objectUri='https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#DtcSierbeplAanleg.plantmaat',
definition='De hoogte van de plant in cm gemeten tussen een minimum en maximum waarde.',
owner=self)
self._plantverband = OTLAttribuut(field=KlVegetatiePlantverband,
naam='plantverband',
label='plantverband',
objectUri='https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#DtcSierbeplAanleg.plantverband',
definition='De wijze waarop de planten zijn geschikt.',
owner=self)
@property
def aanplantingswijze(self):
"""Manier van aanplanten."""
return self._aanplantingswijze.get_waarde()
@aanplantingswijze.setter
def aanplantingswijze(self, value):
self._aanplantingswijze.set_waarde(value, owner=self._parent)
@property
def containermaat(self):
"""De grootte van de pot of container waarin de plant wordt geleverd. De P staat voor pot, de C voor container. Het getal geeft de grootte weer in centimeter."""
return self._containermaat.get_waarde()
@containermaat.setter
def containermaat(self, value):
self._containermaat.set_waarde(value, owner=self._parent)
@property
def plantdichtheid(self):
"""Aantal planten per vierkante meter."""
return self._plantdichtheid.get_waarde()
@plantdichtheid.setter
def plantdichtheid(self, value):
self._plantdichtheid.set_waarde(value, owner=self._parent)
@property
def plantmaat(self):
"""De hoogte van de plant in cm gemeten tussen een minimum en maximum waarde."""
return self._plantmaat.get_waarde()
@plantmaat.setter
def plantmaat(self, value):
self._plantmaat.set_waarde(value, owner=self._parent)
@property
def plantverband(self):
"""De wijze waarop de planten zijn geschikt."""
return self._plantverband.get_waarde()
@plantverband.setter
def plantverband(self, value):
self._plantverband.set_waarde(value, owner=self._parent)
# Generated with OTLComplexDatatypeCreator. To modify: extend, do not edit
class DtcSierbeplAanleg(ComplexField, AttributeInfo):
"""Complex datatype voor dat de aanleg van sierbeplanting beschrijft."""
naam = 'DtcSierbeplAanleg'
label = 'Sierbeplanting aanleg'
objectUri = 'https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#DtcSierbeplAanleg'
definition = 'Complex datatype voor dat de aanleg van sierbeplanting beschrijft.'
waardeObject = DtcSierbeplAanlegWaarden
def __str__(self):
return ComplexField.__str__(self)
| 5,546 | 1,602 |
import firebase_admin
from firebase_admin import credentials
from firebase_admin import firestore
ROOM_COLLECTION = "rooms"
cred = credentials.Certificate("../onlinetown-401f0-firebase-adminsdk-gab3z-1a7a54c2da.json");
firebase_admin.initialize_app(cred)
db = firestore.client()
SERVER_MAP = {
"BLANK": "BLANK",
}
if __name__ == '__main__':
docs = db.collection(ROOM_COLLECTION).stream()
for doc in docs:
# map = doc.to_dict()['map']
# print("{} map: {}".format(doc.id, map))
# if (map == 100 or map == 101):
# print("Changing {}".format(doc.id))
# db.collection(ROOM_COLLECTION).document(doc.id).update({'map': 140})
newServer = ""
try:
server = doc.to_dict()['serverURL']
try:
newServer = SERVER_MAP[server]
db.collection(ROOM_COLLECTION).document(doc.id).update({'serverURL': newServer})
except KeyError:
newServer = server
except KeyError:
server = "KeyError"
newServer = ""
print(server, newServer)
| 1,033 | 362 |
#!/usr/bin/env python
#===============================================================================
# Copyright (c) 2014 Geoscience Australia
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither Geoscience Australia nor the names of its contributors may be
# used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#===============================================================================
'''
Created on 21/02/2013
@author: u76345
'''
import os
import sys
import logging
from datetime import datetime, time
from agdc import Stacker
from eotools.utils import log_multiline
# Set top level standard output
console_handler = logging.StreamHandler(sys.stdout)
console_handler.setLevel(logging.INFO)
console_formatter = logging.Formatter('%(message)s')
console_handler.setFormatter(console_formatter)
logger = logging.getLogger(__name__)
if not logger.level:
logger.setLevel(logging.DEBUG) # Default logging level for all modules
logger.addHandler(console_handler)
class PQAStacker(Stacker):
""" Subclass of Stacker
Used to implement specific functionality to create stacks of derived datasets.
"""
def derive_datasets(self, input_dataset_info, stack_output_info, tile_type_info):
""" Overrides abstract function in stacker class. Called in Stacker.stack_derived() function.
Arguments:
input_dataset_info: Dict keyed by processing level (e.g. ORTHO, NBAR, PQA, DEM)
containing all tile info which can be used within the function
A sample is shown below:
input_dataset_info = {'NBAR': {'band_name': 'Visible Blue',
'band_tag': 'B10',
'end_datetime': datetime.datetime(2000, 2, 9, 23, 46, 36, 722217),
'end_row': 77,
'level_name': 'NBAR',
'nodata_value': -999L,
'path': 91,
'satellite_tag': 'LS7',
'sensor_name': 'ETM+',
'start_datetime': datetime.datetime(2000, 2, 9, 23, 46, 12, 722217),
'start_row': 77,
'tile_layer': 1,
'tile_pathname': '/g/data/v10/datacube/EPSG4326_1deg_0.00025pixel/LS7_ETM/150_-025/2000/LS7_ETM_NBAR_150_-025_2000-02-09T23-46-12.722217.tif'},
'ORTHO': {'band_name': 'Thermal Infrared (Low Gain)',
'band_tag': 'B61',
'end_datetime': datetime.datetime(2000, 2, 9, 23, 46, 36, 722217),
'end_row': 77,
'level_name': 'ORTHO',
'nodata_value': 0L,
'path': 91,
'satellite_tag': 'LS7',
'sensor_name': 'ETM+',
'start_datetime': datetime.datetime(2000, 2, 9, 23, 46, 12, 722217),
'start_row': 77,
'tile_layer': 1,
'tile_pathname': '/g/data/v10/datacube/EPSG4326_1deg_0.00025pixel/LS7_ETM/150_-025/2000/LS7_ETM_ORTHO_150_-025_2000-02-09T23-46-12.722217.tif'},
'PQA': {'band_name': 'Pixel Quality Assurance',
'band_tag': 'PQA',
'end_datetime': datetime.datetime(2000, 2, 9, 23, 46, 36, 722217),
'end_row': 77,
'level_name': 'PQA',
'nodata_value': None,
'path': 91,
'satellite_tag': 'LS7',
'sensor_name': 'ETM+',
'start_datetime': datetime.datetime(2000, 2, 9, 23, 46, 12, 722217),
'start_row': 77,
'tile_layer': 1,
'tile_pathname': '/g/data/v10/datacube/EPSG4326_1deg_0.00025pixel/LS7_ETM/150_-025/2000/LS7_ETM_PQA_150_-025_2000-02-09T23-46-12.722217.tif'}
}
Arguments (Cont'd):
stack_output_info: dict containing stack output information.
Obtained from stacker object.
A sample is shown below
stack_output_info = {'x_index': 144,
'y_index': -36,
'stack_output_dir': '/g/data/v10/tmp/ndvi',
'start_datetime': None, # Datetime object or None
'end_datetime': None, # Datetime object or None
'satellite': None, # String or None
'sensor': None} # String or None
Arguments (cont'd):
tile_type_info: dict containing tile type information.
Obtained from stacker object (e.g: stacker.tile_type_info) after instantiation.
A sample is shown below
tile_type_info = {'crs': 'EPSG:4326',
'file_extension': '.tif',
'file_format': 'GTiff',
'format_options': 'COMPRESS=LZW,BIGTIFF=YES',
'tile_directory': 'EPSG4326_1deg_0.00025pixel',
'tile_type_id': 1L,
'tile_type_name': 'Unprojected WGS84 1-degree at 4000 pixels/degree',
'unit': 'degree',
'x_origin': 0.0,
'x_pixel_size': Decimal('0.00025000000000000000'),
'x_pixels': 4000L,
'x_size': 1.0,
'y_origin': 0.0,
'y_pixel_size': Decimal('0.00025000000000000000'),
'y_pixels': 4000L,
'y_size': 1.0}
Function must create one or more GDAL-supported output datasets. Useful functions in the
Stacker class include Stacker.get_pqa_mask(), but it is left to the coder to produce exactly
what is required for a single slice of the temporal stack of derived quantities.
Returns:
output_dataset_info: Dict keyed by stack filename
containing filenames of GDAL-supported output datasets created by this function.
Note that the key(s) will be used as the output filename for the VRT temporal stack
and each dataset created must contain only a single band.
"""
# Replace this with code to do fancy stuff
# Use the code for the Stacker.derive_datasets() as a template
return Stacker.derive_datasets(self, input_dataset_info, stack_output_info, tile_type_info)
if __name__ == '__main__':
def date2datetime(input_date, time_offset=time.min):
if not input_date:
return None
return datetime.combine(input_date, time_offset)
# Stacker class takes care of command line parameters
pqa_stacker = PQAStacker()
if pqa_stacker.debug:
console_handler.setLevel(logging.DEBUG)
# Check for required command line parameters
assert pqa_stacker.x_index, 'Tile X-index not specified (-x or --x_index)'
assert pqa_stacker.y_index, 'Tile Y-index not specified (-y or --y_index)'
assert pqa_stacker.output_dir, 'Output directory not specified (-o or --output)'
stack_info_dict = pqa_stacker.stack_derived(x_index=pqa_stacker.x_index,
y_index=pqa_stacker.y_index,
stack_output_dir=pqa_stacker.output_dir,
start_datetime=date2datetime(pqa_stacker.start_date, time.min),
end_datetime=date2datetime(pqa_stacker.end_date, time.max),
satellite=pqa_stacker.satellite,
sensor=pqa_stacker.sensor)
log_multiline(logger.debug, stack_info_dict, 'stack_info_dict', '\t')
logger.info('Finished creating %d temporal stack files in %s.', len(stack_info_dict), pqa_stacker.output_dir)
| 8,542 | 3,129 |
"""/**
* @author [Jai Miles]
* @email [jaimiles23@gmail.com]
* @create date 2020-05-21 11:55:58
* @modify date 2020-06-16 23:33:58
* @desc [
SC_Difficulty class with methods to set speed challenge difficulty:
- Ask for difficulties
- Acknowledge difficulty message.
]
*/
"""
##########
# Imports
##########
from statistics import mean
import random
from logs import log_func_name, logger
from aux_utils.create_tuple_message_clauses import get_linear_nlg
from pause.pauser import Pauser
import speed_challenge.data
##########
# Imports
##########
class SC_Difficulty(object):
##########
# Ask for Difficulty
##########
@staticmethod
@log_func_name
def get_q_sc_difficulty(player_object, ) -> str:
"""Master method to return what difficulty prompt."""
sc_plays = player_object.sc_plays
speech_list = []
if sc_plays < 2:
ms_difficulty_list = SC_Difficulty.get_ms_difficulty_list()
ms_get_help = SC_Difficulty.h_get_ms_can_ask_help()
speech_list += Pauser.make_ms_pause_level_list(
ms_difficulty_list, 2.1, ms_get_help, 1.75)
q_what_difficulty = SC_Difficulty.h_get_ms_what_difficulty()
speech_list.append(q_what_difficulty)
return ' '.join(speech_list)
@staticmethod
@log_func_name
def h_get_ms_what_difficulty() -> str:
"""Helper method returns prompt asking what difficulty."""
return get_linear_nlg(
speed_challenge.data.MMT_WHAT_DIFFICULTY)
@staticmethod
@log_func_name
def get_ms_difficulty_list() -> str:
"""Returns message of list of difficulties user can select."""
return get_linear_nlg(
speed_challenge.data.MMT_CAN_USE_DIFF)
@staticmethod
@log_func_name
def h_get_ms_can_ask_help() -> str:
"""Tells the user to ask for help to hear about the difficulties."""
return speed_challenge.data.MS_GET_DIFF_HELP
@staticmethod
@log_func_name
def get_ms_not_register() -> str:
"""Returns message that did not register user's input."""
return get_linear_nlg(
speed_challenge.data.MTT_TRY_AGAIN)
##########
# Acknowledge Difficulty
##########
@staticmethod
@log_func_name
def get_ms_using_difficulty(difficulty: str) -> str:
"""Returns message that will use difficulty."""
ms_use = random.choice(
speed_challenge.data.MT_USE)
ms_difficulty = speed_challenge.data.MS_DIFFICULTY_FORMAT.format(
difficulty)
return ' '.join([ms_use, ms_difficulty])
| 2,645 | 920 |
"""
pyTailor Example 7
This example introduces the following NEW concepts:
- Use BranchTask to "branch out" a DAG
- For BranchTask definitions:
- Use *branch_files* to specify which files to use for branching
*branch_files* is given as one or more file tags.
"""
from pytailor import (
PythonTask,
BranchTask,
DAG,
Workflow,
Project,
FileSet,
Files,
Outputs,
)
### workflow definition ###
files = Files()
outputs = Outputs()
with DAG(name="dag") as dag:
with BranchTask(
name="branch",
branch_data=[files.testfiles],
branch_files=[files.testfiles],
):
with DAG(name="sub-dag") as sub_dag:
t1 = PythonTask(
function="glob.glob",
name="task 2",
args=["**/*.txt"],
kwargs={"recursive": True},
download=files.testfiles,
output_to=outputs.glob_res,
)
PythonTask(
function="builtins.print",
name="task 3",
args=[files.testfiles, outputs.glob_res],
parents=t1,
)
### workflow run ###
# open a project
prj = Project.from_name("Test")
# create a fileset and upload files
fileset = FileSet(prj)
fileset.upload(testfiles=["testfiles/testfile_01.txt", "testfiles/testfile_02.txt"])
# create a workflow:
wf = Workflow(project=prj, dag=dag, name="branch workflow 2", fileset=fileset)
# run the workflow
wf.run()
# check the status of the workflow
print("The workflow finished with state:")
print(wf.state)
| 1,600 | 487 |
from drf_spectacular.validation import validate_schema
from rest_framework.test import APIRequestFactory
from grandchallenge.api.urls import SchemaView
def test_schema_is_valid():
schema_view = SchemaView()
# Initialize the url conf
rf = APIRequestFactory()
schema_view.get(rf.get("/"))
generator = schema_view.generator_class(
urlconf=schema_view.urlconf, api_version=schema_view.api_version
)
schema = generator.get_schema(
request=None, public=schema_view.serve_public
)
validate_schema(schema)
# TODO: fix the warnings from types that could not be inferred
# from drf_spectacular.drainage import GENERATOR_STATS
# assert not GENERATOR_STATS
| 716 | 221 |
import socket
s=socket.socket()
#host=socket.gethostname()
#print(host)
host=''
port=112
s.connect((host,port))
print(s.recv())
s.close()
| 141 | 63 |
from SimConnect import AircraftRequests, SimConnect
import csv
import os
import time
class Telemetry():
def __init__(self, keys):
self.requests = self._make_connection()
self.keys = keys
def _make_connection(self):
sm = SimConnect()
return AircraftRequests(sm, _time=0)
def get_data(self):
d = {}
d['time'] = time.time()
for k in self.keys:
d[k] = self.requests.get(k)
return d
def write_log(self, data, path):
fieldnames = [k for k in data]
exists = os.path.exists(path)
with open(path, 'a', newline='') as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
if not exists:
writer.writeheader()
writer.writerow(data)
def listen(self, path, interval=1.0):
print(f'Listening for FS events with {interval} second delay')
while True:
data = self.get_data()
self.write_log(data, path)
print(f'Logged event at {data["time"]}')
time.sleep(interval) | 1,122 | 334 |
#!/usr/bin/python
def run_test():
intab = 'aeiou'
outtab = '54321'
trantab = str.maketrans(intab, outtab)
result = 'this is string example....wow!!!'
print(result)
print(result.translate(trantab))
trantab = str.maketrans(outtab, intab)
print(result.translate(trantab))
if __name__ == '__main__':
run_test() | 346 | 138 |
# -*- coding: utf-8 -*-
from autoselenium.driver import Driver
from autoselenium.download import download_driver, download_default_driver, get_version
__author__ = 'saizk'
__all__ = [
'Driver',
'download_driver',
'download_default_driver',
'get_version'
]
| 276 | 94 |
import pandas as pd
import requests
class hhs_extract:
# initialize with api_key
def __init__(self, bearer_token):
self.api_key = bearer_token
# extract HHS data
def extract_data(self) -> pd.DataFrame:
# read data from Socrata HHS API for county level hospitalizations at CMS registered hospitals
hhs_return_size = 1
hhs_total_size = 0
hhs_df_list = list()
while (hhs_return_size > 0):
payload = {'$$app_token': self.api_key, '$limit':50000, '$offset':hhs_total_size}
hhs_res = requests.get("https://healthdata.gov/resource/anag-cw7u.json", params=payload)
temp_results_df = pd.DataFrame.from_records(hhs_res.json())
hhs_return_size = len(temp_results_df)
hhs_total_size += hhs_return_size
hhs_df_list.append(temp_results_df)
hhs_results_df = pd.concat(hhs_df_list)
return hhs_results_df
class cdc_extract:
# initialize with api_key
def __init__(self, bearer_token):
self.api_key = bearer_token
# extract cdc data
def extract_data(self) -> pd.DataFrame:
# read data from Socrata CDC API for county level vaccinations across all dates
vax_return_size = 1
vax_total_size = 0
vax_df_list = list()
while (vax_return_size > 0):
payload = {'$$app_token': self.api_key, '$limit':50000, '$offset':vax_total_size}
vax_res = requests.get("https://data.cdc.gov/resource/8xkx-amqh.json", params=payload)
temp_results_df = pd.DataFrame.from_records(vax_res.json())
vax_return_size = len(temp_results_df)
vax_total_size += vax_return_size
vax_df_list.append(temp_results_df)
vax_results_df = pd.concat(vax_df_list)
return vax_results_df | 1,840 | 624 |
import socket
import sys
for i in range(1, 65535):
skt = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
if skt.connect((sys.argv[1], i)) == 0:
print("Port: {0} OPEN".format(i))
skt.close()
except:
continue
| 263 | 102 |
import sys
sys.path.insert(1,"../../")
import h2o
from tests import pyunit_utils
def pyunit_unique():
iris = h2o.import_file(pyunit_utils.locate("smalldata/iris/iris.csv"))
uniques = iris[4].unique()
rows, cols = uniques.dim
assert rows == 3 and cols == 1, "Expected 3 rows and 1 column, but got {0} rows and {1} column".format(rows,cols)
assert "Iris-setosa" in uniques[0], "Expected Iris-setosa to be in the set of unique species, but it wasn't"
assert "Iris-virginica" in uniques[0], "Expected Iris-virginica to be in the set of unique species, but it wasn't"
assert "Iris-versicolor" in uniques[0], "Expected Iris-versicolor to be in the set of unique species, but it wasn't"
fr = h2o.create_frame(rows=5, cols=1, time_fraction=1)
assert fr.type(0) == "time"
uf = fr.unique()
assert uf.type(0) == "time"
uf.refresh()
assert uf.type(0) == "time"
prostate = h2o.import_file(pyunit_utils.locate("smalldata/parser/csv2orc/prostate_NA.csv"))
prostate["GLEASON"] = prostate["GLEASON"].asfactor()
uniques = prostate["GLEASON"].unique(include_nas=True)
uniques_without_nas = prostate["GLEASON"].unique()
prostate_pandas = prostate.as_data_frame()
uniques_pandas = prostate_pandas["GLEASON"].unique()
assert uniques.nrows == len(uniques_pandas)
assert uniques_without_nas.nrows == len(uniques_pandas) - 1
# make sure domains are recalculated with each temp assign
df_example = h2o.H2OFrame({'time': ['M','M','M','D','D','M','M','D'],
'amount': [1,4,5,0,0,1,3,0]})
df_example['amount'] = df_example['amount'].asfactor()
filtered = df_example[df_example['time']=='D', 'amount']
uniques = filtered['amount'].unique()
assert len(uniques) == 1
assert uniques.as_data_frame().iat[0,0] == 0
if __name__ == "__main__":
pyunit_utils.standalone_test(pyunit_unique)
else:
pyunit_unique()
| 1,933 | 717 |
# Faça um prgrama que leia um numero inteiro de 1 a 10 no teclado e mostre se você acertou
# ou o número digitado é maior ou meor.
# Quando você acertar o programa deve ser finalizado
from randon import randint
aleatorio = randint(1,10)
print(aleatorio)
if | 261 | 94 |
# init for externals package
from . import argparse
| 52 | 14 |
import socket
s = socket.socket()
host = socket.gethostname()
port = 12346
s.bind((host, port))
s.listen(5)
while True:
c, addr = s.accept()
print("connection from", addr)
rec = c.recv(1024).decode("utf-8")
print(rec)
if rec == 'ping':
c.sendto('pong'.encode(), addr)
c.close() | 310 | 123 |
from datetime import datetime, timezone
import requests
from schemas import Contest
from spider.utils import update_platform
def main():
headers = {
"user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.109 "
"Safari/537.36 "
}
resp = requests.get(
"https://www.luogu.org/contest/list?page=1&_contentOnly=1", headers=headers
)
contests = resp.json()["currentData"]["contests"]["result"]
data = []
tz = timezone.utc
for contest in contests:
link = f'https://www.luogu.org/contest/{contest["id"]}'
start_time = datetime.fromtimestamp(contest["startTime"], tz=tz)
end_time = datetime.fromtimestamp(contest["endTime"], tz=tz)
data.append(
Contest(
name=contest["name"],
link=link,
start_time=start_time,
end_time=end_time,
contest_id=link.split("/")[-1],
)
)
update_platform("洛谷", data)
if __name__ == "__main__":
main()
| 1,085 | 374 |
from processing import *
from decodingModel import *
# Using Transfer learning
feature_extract_model = VggModel()
#Decoding model
colorize = model()
#prepare data in hard disk
PrepareData(datapath = "/content/data" , save_file = "/content/processed/" ,
target_size = (224 , 224) , batch_size = 32 , feature_extract_model = feature_extract_model)
training_dir = "/content/processed"
num_train_samples = 1000
batch_size = 32
steps_per_epoch = np.floor(num_train_samples/batch_size)
epochs = 200
for i in range(epochs):
generator = data_generator_baseline(training_dir, num_train_samples, batch_size)
fit_history = colorize.fit_generator(generator, epochs=1, steps_per_epoch=steps_per_epoch, verbose=1)
if i % 10 == 0:
colorize.save('model_merge_' + str(i) + '.h5')
X = test_images(path = "/content/oldes" , shape = (224 , 224) , batch_size = 2 ,
feature_extract_model = feature_extract_model , model = colorize )
show_images(X , width = 20 , hight = 20 , columns = 2 , rows = 1)
| 1,054 | 399 |
########
# Copyright (c) 2014 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
############
import os
import json
import types
import shutil
import pkgutil
import getpass
import tempfile
import itertools
from base64 import urlsafe_b64encode
import yaml
import requests
from cloudify_rest_client import CloudifyClient
from cloudify_rest_client.client import HTTPClient
from cloudify.cluster_status import CloudifyNodeType
from cloudify_rest_client.utils import is_kerberos_env
from cloudify_rest_client.exceptions import CloudifyClientError
from . import constants
from .exceptions import CloudifyCliError
_ENV_NAME = 'manager'
DEFAULT_LOG_FILE = os.path.expanduser(
'{0}/cloudify-{1}/cloudify-cli.log'.format(
tempfile.gettempdir(), getpass.getuser()))
CLOUDIFY_WORKDIR = os.path.join(
os.environ.get('CFY_WORKDIR', os.path.expanduser('~')),
constants.CLOUDIFY_BASE_DIRECTORY_NAME)
PROFILES_DIR = os.path.join(CLOUDIFY_WORKDIR, 'profiles')
ACTIVE_PROFILE = os.path.join(CLOUDIFY_WORKDIR, 'active.profile')
CLUSTER_RETRY_INTERVAL = 5
def delete_profile(profile_name):
if is_profile_exists(profile_name):
profile_dir = get_profile_dir(profile_name)
shutil.rmtree(profile_dir)
else:
raise CloudifyCliError(
'Profile {0} does not exist'.format(profile_name))
def is_profile_exists(profile_name):
try:
return os.path.isfile(get_context_path(profile_name))
except CloudifyCliError:
return False
def assert_profile_exists(profile_name):
if not is_profile_exists(profile_name):
raise CloudifyCliError(
'Profile {0} does not exist. You can run `cfy init {0}` to '
'create the profile.'.format(profile_name))
def set_active_profile(profile_name):
global profile
with open(ACTIVE_PROFILE, 'w+') as active_profile:
active_profile.write(profile_name)
profile = get_profile_context(profile_name, suppress_error=True)
def get_active_profile():
if os.path.isfile(ACTIVE_PROFILE):
with open(ACTIVE_PROFILE) as active_profile:
return active_profile.read().strip()
else:
# We return None explicitly as no profile is active.
return None
def set_target_manager(manager_host):
global target_manager
target_manager = manager_host
def get_target_manager():
return target_manager
def get_profile_names():
# TODO: This is too.. ambiguous. We should change it so there are
# no exclusions.
excluded = ['local']
profile_names = [item for item in os.listdir(PROFILES_DIR)
if item not in excluded and not item.startswith('.')]
return profile_names
def assert_manager_active():
if not is_manager_active():
raise CloudifyCliError(
'This command is only available when using a manager. '
'Please use the `cfy profiles use` command to connect '
'to a Cloudify Manager.')
def assert_local_active():
if is_manager_active():
raise CloudifyCliError(
'This command is not available when using a manager. '
'You can run `cfy profiles use local` to stop using a manager.')
def assert_credentials_set():
error_msg = 'Manager {0} must be set in order to use a manager.\n' \
'You can set it in the profile by running ' \
'`cfy profiles set {1}`, or you can set the `CLOUDIFY_{2}` ' \
'environment variable.'
if not get_kerberos_env():
if not get_username():
raise CloudifyCliError(
error_msg.format('Username', '--manager-username', 'USERNAME')
)
if not get_password():
raise CloudifyCliError(
error_msg.format('Password', '--manager-password', 'PASSWORD')
)
if not get_tenant_name():
raise CloudifyCliError(
error_msg.format('Tenant', '--manager-tenant', 'TENANT')
)
def is_manager_active():
active_profile = get_active_profile()
if not active_profile:
return False
if active_profile == 'local':
return False
p = get_profile_context(active_profile, suppress_error=True)
if not (p and p.manager_ip):
return False
return True
def get_profile_context(profile_name=None, suppress_error=False):
# empty profile with nothing but default values
default = ProfileContext()
profile_name = profile_name or get_active_profile()
if profile_name == 'local':
if suppress_error:
return default
raise CloudifyCliError('Local profile does not have context')
try:
path = get_context_path(profile_name)
with open(path) as f:
context = yaml.load(f.read())
# fill the default with values from existing profile (the default is
# used as base because some of the attributes may not be in the
# existing profile file)
for key, value in context.__dict__.items():
setattr(default, key, value)
except CloudifyCliError:
if not suppress_error:
raise
return default
def config_initialized_with_logging():
"""
This is for the Windows agent: plugin URLs from
import_resolver are written to config.yaml during installation, so we can
have a scenario where config exists but has no logger paths defined.
"""
has_logging = False
if os.path.isfile(os.path.join(CLOUDIFY_WORKDIR, 'config.yaml')):
with open(os.path.join(CLOUDIFY_WORKDIR, 'config.yaml'), 'r') as f:
has_logging = ('logging' in f.read())
return has_logging
def is_initialized(profile_name=None):
"""
Check if a profile or an environment is initialized.
If profile_name is provided, it will check if the profile
is initialized. If not, it will just check that workenv is.
"""
if profile_name:
return get_profile_dir(profile_name) is not None
else:
return config_initialized_with_logging()
def get_context_path(profile_name, suppress_error=False):
base_dir = get_profile_dir(profile_name, suppress_error)
if not base_dir:
return
return os.path.join(
base_dir,
constants.CLOUDIFY_PROFILE_CONTEXT_FILE_NAME
)
def get_profile_dir(profile_name=None, suppress_error=False):
active_profile = profile_name or get_active_profile()
if active_profile and os.path.isdir(
os.path.join(PROFILES_DIR, active_profile)):
return os.path.join(PROFILES_DIR, active_profile)
elif suppress_error:
return
else:
raise CloudifyCliError('Profile directory does not exist')
def raise_uninitialized():
error = CloudifyCliError(
'Cloudify environment is not initialized')
error.possible_solutions = [
"Run 'cfy init'"
]
raise error
def is_cluster(client_profile=None):
if client_profile is None:
client_profile = profile
return (not isinstance(client_profile.cluster, list) and
client_profile.cluster.get(CloudifyNodeType.MANAGER))
def get_rest_client(client_profile=None,
rest_host=None,
rest_port=None,
rest_protocol=None,
rest_cert=None,
username=None,
password=None,
tenant_name=None,
trust_all=False,
cluster=None,
kerberos_env=None):
if client_profile is None:
client_profile = profile
rest_host = rest_host or client_profile.manager_ip
rest_port = rest_port or client_profile.rest_port
rest_protocol = rest_protocol or client_profile.rest_protocol
rest_cert = rest_cert or get_ssl_cert(client_profile)
username = username or get_username(client_profile)
password = password or get_password(client_profile)
tenant_name = tenant_name or get_tenant_name(client_profile)
trust_all = trust_all or get_ssl_trust_all()
headers = get_auth_header(username, password)
headers[constants.CLOUDIFY_TENANT_HEADER] = tenant_name
cluster = cluster or is_cluster(client_profile)
kerberos_env = kerberos_env \
if kerberos_env is not None else client_profile.kerberos_env
if kerberos_env is False \
or (kerberos_env is None and not is_kerberos_env()):
if not username:
raise CloudifyCliError('Command failed: Missing Username')
if not password:
raise CloudifyCliError('Command failed: Missing password')
if cluster:
client = CloudifyClusterClient(host=rest_host,
port=rest_port,
protocol=rest_protocol,
headers=headers,
cert=rest_cert,
trust_all=trust_all,
profile=client_profile,
kerberos_env=kerberos_env)
else:
client = CloudifyClient(host=rest_host,
port=rest_port,
protocol=rest_protocol,
headers=headers,
cert=rest_cert,
trust_all=trust_all,
kerberos_env=kerberos_env)
return client
def build_manager_host_string(ssh_user='', ip=''):
ip = ip or profile.manager_ip
return build_host_string(ip, ssh_user)
def build_host_string(ip, ssh_user=''):
ssh_user = ssh_user or profile.ssh_user
if not ssh_user:
raise CloudifyCliError('`ssh_user` is not set in the current '
'profile. Please run '
'`cfy profiles set --ssh-user <ssh-user>`.')
return '{0}@{1}'.format(ssh_user, ip)
def get_default_rest_cert_local_path():
base_dir = get_profile_dir(suppress_error=True) or CLOUDIFY_WORKDIR
return os.path.join(base_dir, constants.PUBLIC_REST_CERT)
def get_username(from_profile=None):
if from_profile is None:
from_profile = profile
username = os.environ.get(constants.CLOUDIFY_USERNAME_ENV)
if username and from_profile.manager_username:
raise CloudifyCliError('Manager Username is set in profile *and* in '
'the `CLOUDIFY_USERNAME` env variable. Resolve '
'the conflict before continuing.\n'
'Either unset the env variable, or run '
'`cfy profiles unset --manager-username`')
return username or from_profile.manager_username
def get_password(from_profile=None):
if from_profile is None:
from_profile = profile
password = os.environ.get(constants.CLOUDIFY_PASSWORD_ENV)
if password and from_profile.manager_password:
raise CloudifyCliError('Manager Password is set in profile *and* in '
'the `CLOUDIFY_PASSWORD` env variable. Resolve '
'the conflict before continuing.\n'
'Either unset the env variable, or run '
'`cfy profiles unset --manager-password`')
return password or from_profile.manager_password
def get_tenant_name(from_profile=None):
if from_profile is None:
from_profile = profile
tenant = os.environ.get(constants.CLOUDIFY_TENANT_ENV)
if tenant and from_profile.manager_tenant:
raise CloudifyCliError('Manager Tenant is set in profile *and* in '
'the `CLOUDIFY_TENANT` env variable. Resolve '
'the conflict before continuing.\n'
'Either unset the env variable, or run '
'`cfy profiles unset --manager-tenant`')
return tenant or from_profile.manager_tenant
def get_kerberos_env(from_profile=None):
if from_profile is None:
from_profile = profile
return from_profile.kerberos_env
def get_ssl_cert(from_profile=None):
"""Return the path to a local copy of the manager's public certificate.
:return: If the LOCAL_REST_CERT_FILE env var was set by the user *or* if
`rest_certificate` is set in the profile - use it,
If it wasn't set, check if the certificate file is found in its default
location. If so - use it, otherwise - return None
Note that if it is set in both profile and env var - an error will be
raised
"""
if from_profile is None:
from_profile = profile
cert = os.environ.get(constants.LOCAL_REST_CERT_FILE)
if cert and from_profile.rest_certificate:
raise CloudifyCliError('Rest Certificate is set in profile *and* in '
'the `LOCAL_REST_CERT_FILE` env variable. '
'Resolve the conflict before continuing.\n'
'Either unset the env variable, or run '
'`cfy profiles unset --rest_certificate`')
if cert or from_profile.rest_certificate:
return cert or from_profile.rest_certificate
default_cert_file = get_default_rest_cert_local_path()
return default_cert_file if os.path.isfile(default_cert_file) else None
def get_ssl_trust_all():
trust_all = os.environ.get(constants.CLOUDIFY_SSL_TRUST_ALL)
if trust_all is not None and len(trust_all) > 0:
return True
return False
def get_version_data():
data = pkgutil.get_data('cloudify_cli', 'VERSION')
return json.loads(data)
def get_manager_version_data(rest_client=None):
if not rest_client:
if not get_profile_context(suppress_error=True):
return None
try:
rest_client = get_rest_client()
except CloudifyCliError:
return None
try:
version_data = rest_client.manager.get_version()
except CloudifyClientError:
return None
version_data['ip'] = profile.manager_ip
return version_data
class ProfileContext(yaml.YAMLObject):
yaml_tag = u'!CloudifyProfileContext'
yaml_loader = yaml.Loader
def __init__(self, profile_name=None):
# Note that __init__ is not called when loading from yaml.
# When adding a new ProfileContext attribute, make sure that
# all methods handle the case when the attribute is missing
self._profile_name = profile_name
self.manager_ip = None
self.ssh_key = None
self._ssh_port = None
self.ssh_user = None
self.provider_context = dict()
self.manager_username = None
self.manager_password = None
self.manager_tenant = None
self.rest_port = constants.DEFAULT_REST_PORT
self.rest_protocol = constants.DEFAULT_REST_PROTOCOL
self.rest_certificate = None
self.kerberos_env = False
self._cluster = dict()
def to_dict(self):
return dict(
name=self.profile_name,
manager_ip=self.manager_ip,
ssh_key_path=self.ssh_key,
ssh_port=self.ssh_port,
ssh_user=self.ssh_user,
provider_context=self.provider_context,
manager_username=self.manager_username,
manager_tenant=self.manager_tenant,
rest_port=self.rest_port,
rest_protocol=self.rest_protocol,
rest_certificate=self.rest_certificate,
kerberos_env=self.kerberos_env,
cluster=self.cluster
)
@property
def ssh_port(self):
return self._ssh_port
@ssh_port.setter
def ssh_port(self, ssh_port):
# If the port is int, we want to change it to a string. Otherwise,
# leave None as is
ssh_port = str(ssh_port) if ssh_port else None
self._ssh_port = ssh_port
@property
def profile_name(self):
return getattr(self, '_profile_name', None) \
or getattr(self, 'manager_ip', None)
@property
def cluster(self):
# default the ._cluster attribute here, so that all callers can use it
# as just ._cluster, even if it's not present in the source yaml
if not hasattr(self, '_cluster'):
self._cluster = dict()
return self._cluster
@cluster.setter
def cluster(self, cluster):
self._cluster = cluster
@profile_name.setter
def profile_name(self, profile_name):
self._profile_name = profile_name
def _get_context_path(self):
init_path = get_profile_dir(self.profile_name)
context_path = os.path.join(
init_path,
constants.CLOUDIFY_PROFILE_CONTEXT_FILE_NAME)
return context_path
@property
def workdir(self):
return os.path.join(PROFILES_DIR, self.profile_name)
def save(self, destination=None):
if not self.profile_name:
raise CloudifyCliError('No profile name or Manager IP set')
workdir = destination or self.workdir
# Create a new file
if not os.path.exists(workdir):
os.makedirs(workdir)
target_file_path = os.path.join(
workdir,
constants.CLOUDIFY_PROFILE_CONTEXT_FILE_NAME)
with open(target_file_path, 'w') as f:
f.write(yaml.dump(self))
def get_auth_header(username, password):
header = {}
if username and password:
credentials = '{0}:{1}'.format(username, password)
encoded_credentials = urlsafe_b64encode(credentials)
header = {
constants.CLOUDIFY_AUTHENTICATION_HEADER:
constants.BASIC_AUTH_PREFIX + ' ' + encoded_credentials}
return header
# attributes that can differ for each node in a cluster. Those will be updated
# in the profile when we switch to a new master.
# Dicts with these keys live in profile.cluster, and are added there during
# either `cfy cluster update-profile` (in which case some of them might be
# missing, eg. ssh_*), or during a `cfy cluster join`.
# If a value is missing, we will use the value from the last active manager.
# Only the IP is required.
# Note that not all attributes are allowed - username/password will be
# the same for every node in the cluster.
CLUSTER_NODE_ATTRS = ['host_ip', 'host_type', 'rest_port', 'rest_protocol',
'ssh_port', 'ssh_user', 'ssh_key']
class ClusterHTTPClient(HTTPClient):
def __init__(self, *args, **kwargs):
profile = kwargs.pop('profile')
super(ClusterHTTPClient, self).__init__(*args, **kwargs)
if not profile.cluster:
raise ValueError('Cluster client invoked for an empty cluster!')
self._cluster = list(profile.cluster.get(CloudifyNodeType.MANAGER))
self._profile = profile
first_node = self._cluster[0]
self.cert = first_node.get('cert') or self.cert
self.trust_all = first_node.get('trust_all') or self.trust_all
self.default_timeout_sec = self.default_timeout_sec or (5, None)
def do_request(self, *args, **kwargs):
# this request can be retried for each manager - if the data is
# a generator, we need to copy it, so we can send it more than once
copied_data = None
if isinstance(kwargs.get('data'), types.GeneratorType):
copied_data = itertools.tee(kwargs.pop('data'),
len(self._cluster) + 1)
if kwargs.get('timeout') is None:
kwargs['timeout'] = self.default_timeout_sec
if copied_data is not None:
kwargs['data'] = copied_data[-1]
manager_host = get_target_manager()
if manager_host:
self.host = manager_host
return self._try_do_request(*args, **kwargs)
# First try with the main manager ip given when creating the profile
# with `cfy profiles use`
self.host = self._profile.manager_ip
response = self._try_do_request(*args, **kwargs)
if response:
return response
for node_index, node in list(enumerate(
self._profile.cluster[CloudifyNodeType.MANAGER])):
if self._profile.manager_ip in [node['host_ip'], node['hostname']]:
continue
self._use_node(node)
if copied_data is not None:
kwargs['data'] = copied_data[node_index]
response = self._try_do_request(*args, **kwargs)
if response:
return response
raise CloudifyClientError('All cluster nodes are offline')
def _try_do_request(self, *args, **kwargs):
try:
return super(ClusterHTTPClient, self).do_request(*args,
**kwargs)
except (requests.exceptions.ConnectionError,
CloudifyClientError) as e:
if isinstance(e, CloudifyClientError) and e.status_code != 502:
raise
def _use_node(self, node):
if node['host_ip'] == self.host:
return
self.host = node['host_ip']
for attr in ['rest_port', 'rest_protocol', 'trust_all', 'cert']:
new_value = node.get(attr)
if new_value:
setattr(self, attr, new_value)
self._update_profile(node)
def _update_profile(self, node):
"""
Put the node at the start of the cluster list in profile.
The client tries nodes in the order of the cluster list, so putting
the node first will make the client try it first next time. This makes
the client always try the last-known-active-manager first.
"""
self._profile.cluster[CloudifyNodeType.MANAGER].remove(node)
self._profile.cluster[CloudifyNodeType.MANAGER] = (
[node] + self._profile.cluster[CloudifyNodeType.MANAGER])
for node_attr in CLUSTER_NODE_ATTRS:
if node_attr in node:
setattr(self._profile, node_attr, node[node_attr])
self._profile.save()
class CloudifyClusterClient(CloudifyClient):
"""
A CloudifyClient that will retry the queries with the current manager.
When a request fails with a connection error, this will keep trying with
every node in the cluster, until it finds an active manager.
When an active manager is found, the profile will be updated with its
address.
"""
def __init__(self, profile, *args, **kwargs):
self._profile = profile
super(CloudifyClusterClient, self).__init__(*args, **kwargs)
def client_class(self, *args, **kwargs):
kwargs.setdefault('profile', self._profile)
return ClusterHTTPClient(*args, **kwargs)
profile = get_profile_context(suppress_error=True)
target_manager = None
| 23,434 | 6,720 |
import FWCore.ParameterSet.Config as cms
# output block for alcastream HCAL Isotrk
# output module
# module alcastreamHcalIsotrkOutput = PoolOutputModule
alcastreamHcalIsotrkOutput = cms.PSet(
outputCommands = cms.untracked.vstring('drop *',
'keep *_offlineBeamSpot_*_*',
'keep edmTriggerResults_*_*_*',
'keep triggerTriggerEvent_*_*_*',
'keep *_gtStage2Digis_*_*',
'keep HcalNoiseSummary_hcalnoise_*_*',
'keep *_hbhereco_*_*',
'keep recoTracks_generalTracks_*_*',
'keep recoTrackExtras_generalTracks_*_*',
'keep *_IsoProd_*_*',
)
)
| 972 | 240 |
#Python: String Representations of Objects
#Implement two vehicle classes:
#Car:
#The constructor for Car must take two arguments. The first of them is its maximum speed, and the second one is a string that denotes the units in which the speed is given: either "km/h" or "mph".
#The class must be implemented to return a string based on the arguments. For example, if car is an object of class Car with a maximum speed of 120, and the unit is "km/h", then printing car prints the following string: "Car with the maximum speed of 120 km/h", without quotes. If the maximum speed is 94 and the unit is "mph", then printing car prints in the following string: "Car with the maximum speed of 94 mph", without quotes.
#Boat:
#The constructor for Boat must take a single argument denoting its maximum speed in knots.
#The class must be implemented to return a string based on the argument. For example, if boat is an object of class Boat with a maximum speed of 82, then printing boat prints the following string: "Boat with the maximum speed of 82 knots", without quotes.
#The implementations of the classes will be tested by a provided code stub on several input files. Each input file contains several queries, and each query constructs an object of one of the classes. It then prints the string representation of the object to the standard output.
#Constraints
#1 ≤ the number of queries in one test file ≤ 100
#The lengths of each of the words is at most 10.
#Sample Case 0
#Sample Input
#STDIN Function
#----- -------
#2 → number of queries, q = 2
#car 151 km/h → query parameters = ["car 151 km/h", "boat 77"]
#boat 77
#Sample Output
#Car with the maximum speed of 151 km/h
#Boat with the maximum speed of 77 knots
#Explanation
#There are 2 queries. In the first of them, an object of class Car with the maximum speed of 151 in km/h is constructed, and then its string representation is printed to the output. In the second query, an object of class Boat is constructed with the maximum speed of 77 knots, and then its string representation is printed to the output.
#SOlution:
#!/bin/python3
import math
import os
import random
import re
import sys
class Car:
def __init__(self, maxspeed, speed_unit):
self.maxspeed=maxspeed
self.speed_unit=speed_unit
def __str__(self):
sen1="Car with the maximum speed of {} {}".format(self.maxspeed,self.speed_unit)
return sen1
class Boat:
def __init__(self,maxspeed):
self.maxspeed=maxspeed
def __str__(self):
sen1="Boat with the maximum speed of {} knots".format(self.maxspeed)
return sen1
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
q = int(input())
queries = []
for _ in range(q):
args = input().split()
vehicle_type, params = args[0], args[1:]
if vehicle_type == "car":
max_speed, speed_unit = int(params[0]), params[1]
vehicle = Car(max_speed, speed_unit)
elif vehicle_type == "boat":
max_speed = int(params[0])
vehicle = Boat(max_speed)
else:
raise ValueError("invalid vehicle type")
fptr.write("%s\n" % vehicle)
fptr.close()
| 3,256 | 942 |
# Copyright (C) 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Unit tests for the trip module.
from io import StringIO
from tests import util
import transitfeed
class DuplicateStopSequenceTestCase(util.TestCase):
def runTest(self):
accumulator = util.RecordingProblemAccumulator(
self, ("ExpirationDate", "NoServiceExceptions"))
problems = transitfeed.ProblemReporter(accumulator)
schedule = transitfeed.Schedule(problem_reporter=problems)
schedule.load(util.data_path('duplicate_stop_sequence'),
extra_validation=True)
e = accumulator.pop_exception('InvalidValue')
self.assertEqual('stop_sequence', e.column_name)
self.assertEqual(10, e.value)
accumulator.assert_no_more_exceptions()
class MissingEndpointTimesTestCase(util.TestCase):
def runTest(self):
accumulator = util.RecordingProblemAccumulator(
self, ('ExpirationDate', 'NoServiceExceptions'))
problems = transitfeed.ProblemReporter(accumulator)
schedule = transitfeed.Schedule(problem_reporter=problems)
schedule.load(util.data_path('missing_endpoint_times'),
extra_validation=True)
e = accumulator.pop_invalid_value('arrival_time')
self.assertEqual('', e.value)
e = accumulator.pop_invalid_value('departure_time')
self.assertEqual('', e.value)
class TripMemoryZipTestCase(util.MemoryZipTestCase):
def assertLoadAndCheckExtraValues(self, schedule_file):
"""Load file-like schedule_file and check for extra trip columns."""
load_problems = util.get_test_failure_problem_reporter(
self, ("ExpirationDate", "UnrecognizedColumn"))
loaded_schedule = transitfeed.Loader(schedule_file,
loader_problems=load_problems,
extra_validation=True).load()
self.assertEqual("foo", loaded_schedule.get_trip("AB1")["t_foo"])
self.assertEqual("", loaded_schedule.get_trip("AB2")["t_foo"])
self.assertEqual("", loaded_schedule.get_trip("AB1")["n_foo"])
self.assertEqual("bar", loaded_schedule.get_trip("AB2")["n_foo"])
# Uncomment the following lines to print the string in testExtraFileColumn
# print repr(zipfile.ZipFile(schedule_file).read("trips.txt"))
# self.fail()
def testExtraObjectAttribute(self):
"""Extra columns added to an object are preserved when writing."""
schedule = self.MakeLoaderAndLoad()
# Add an attribute to an existing trip
trip1 = schedule.get_trip("AB1")
trip1.t_foo = "foo"
# Make a copy of trip_id=AB1 and add an attribute before AddTripObject
trip2 = transitfeed.Trip(field_dict=trip1)
trip2.trip_id = "AB2"
trip2.t_foo = ""
trip2.n_foo = "bar"
schedule.add_trip_object(trip2)
trip2.add_stop_time(stop=schedule.get_stop("BULLFROG"), stop_time="09:00:00")
trip2.add_stop_time(stop=schedule.get_stop("STAGECOACH"), stop_time="09:30:00")
saved_schedule_file = StringIO()
schedule.write_google_transit_feed(saved_schedule_file)
self.accumulator.assert_no_more_exceptions()
self.assertLoadAndCheckExtraValues(saved_schedule_file)
def testExtraFileColumn(self):
"""Extra columns loaded from a file are preserved when writing."""
# Uncomment the code in assertLoadAndCheckExtraValues to generate this
# string.
self.SetArchiveContents(
"trips.txt",
"route_id,service_id,trip_id,t_foo,n_foo\n"
"AB,FULLW,AB1,foo,\n"
"AB,FULLW,AB2,,bar\n")
self.AppendToArchiveContents(
"stop_times.txt",
"AB2,09:00:00,09:00:00,BULLFROG,1\n"
"AB2,09:30:00,09:30:00,STAGECOACH,2\n")
load1_problems = util.get_test_failure_problem_reporter(
self, ("ExpirationDate", "UnrecognizedColumn"))
schedule = self.MakeLoaderAndLoad(loader_problems=load1_problems)
saved_schedule_file = StringIO()
schedule.write_google_transit_feed(saved_schedule_file)
self.assertLoadAndCheckExtraValues(saved_schedule_file)
class TripValidationTestCase(util.ValidationTestCase):
def runTest(self):
trip = transitfeed.Trip()
repr(trip) # shouldn't crash
schedule = self.SimpleSchedule()
trip = transitfeed.Trip()
repr(trip) # shouldn't crash
trip = transitfeed.Trip()
trip.trip_headsign = '\xBA\xDF\x0D' # Not valid ascii or utf8
repr(trip) # shouldn't crash
trip.route_id = '054C'
trip.service_id = 'WEEK'
trip.trip_id = '054C-00'
trip.trip_headsign = 'via Polish Hill'
trip.trip_short_name = 'X12'
trip.direction_id = '0'
trip.block_id = None
trip.shape_id = None
trip.bikes_allowed = '1'
trip.wheelchair_accessible = '2'
trip.validate(self.problems)
self.accumulator.assert_no_more_exceptions()
repr(trip) # shouldn't crash
# missing route ID
trip.route_id = None
self.ValidateAndExpectMissingValue(trip, 'route_id')
trip.route_id = '054C'
# missing service ID
trip.service_id = None
self.ValidateAndExpectMissingValue(trip, 'service_id')
trip.service_id = 'WEEK'
# missing trip ID
trip.trip_id = None
self.ValidateAndExpectMissingValue(trip, 'trip_id')
trip.trip_id = '054C-00'
# invalid direction ID
trip.direction_id = 'NORTH'
self.ValidateAndExpectInvalidValue(trip, 'direction_id')
trip.direction_id = '0'
# invalid bikes_allowed
trip.bikes_allowed = '3'
self.ValidateAndExpectInvalidValue(trip, 'bikes_allowed')
trip.bikes_allowed = None
# invalid wheelchair_accessible
trip.wheelchair_accessible = '3'
self.ValidateAndExpectInvalidValue(trip, 'wheelchair_accessible')
trip.wheelchair_accessible = None
# AddTripObject validates that route_id, service_id, .... are found in the
# schedule. The Validate calls made by self.Expect... above can't make this
# check because trip is not in a schedule.
trip.route_id = '054C-notfound'
schedule.add_trip_object(trip, self.problems, True)
e = self.accumulator.pop_exception('InvalidValue')
self.assertEqual('route_id', e.column_name)
self.accumulator.assert_no_more_exceptions()
trip.route_id = '054C'
# Make sure calling Trip.Validate validates that route_id and service_id
# are found in the schedule.
trip.service_id = 'WEEK-notfound'
trip.validate(self.problems)
e = self.accumulator.pop_exception('InvalidValue')
self.assertEqual('service_id', e.column_name)
self.accumulator.assert_no_more_exceptions()
trip.service_id = 'WEEK'
trip.validate(self.problems)
self.accumulator.assert_no_more_exceptions()
# expect no problems for non-overlapping periods
trip.add_frequency("06:00:00", "12:00:00", 600)
trip.add_frequency("01:00:00", "02:00:00", 1200)
trip.add_frequency("04:00:00", "05:00:00", 1000)
trip.add_frequency("12:00:00", "19:00:00", 700)
trip.validate(self.problems)
self.accumulator.assert_no_more_exceptions()
trip.clear_frequencies()
# overlapping headway periods
trip.add_frequency("00:00:00", "12:00:00", 600)
trip.add_frequency("06:00:00", "18:00:00", 1200)
self.ValidateAndExpectOtherProblem(trip)
trip.clear_frequencies()
trip.add_frequency("12:00:00", "20:00:00", 600)
trip.add_frequency("06:00:00", "18:00:00", 1200)
self.ValidateAndExpectOtherProblem(trip)
trip.clear_frequencies()
trip.add_frequency("06:00:00", "12:00:00", 600)
trip.add_frequency("00:00:00", "25:00:00", 1200)
self.ValidateAndExpectOtherProblem(trip)
trip.clear_frequencies()
trip.add_frequency("00:00:00", "20:00:00", 600)
trip.add_frequency("06:00:00", "18:00:00", 1200)
self.ValidateAndExpectOtherProblem(trip)
trip.clear_frequencies()
self.accumulator.assert_no_more_exceptions()
class TripSequenceValidationTestCase(util.ValidationTestCase):
def runTest(self):
schedule = self.SimpleSchedule()
# Make a new trip without any stop times
trip = schedule.get_route("054C").add_trip(trip_id="054C-00")
stop1 = schedule.get_stop('stop1')
stop2 = schedule.get_stop('stop2')
stop3 = schedule.get_stop('stop3')
stoptime1 = transitfeed.StopTime(self.problems, stop1,
stop_time='12:00:00', stop_sequence=1)
stoptime2 = transitfeed.StopTime(self.problems, stop2,
stop_time='11:30:00', stop_sequence=2)
stoptime3 = transitfeed.StopTime(self.problems, stop3,
stop_time='12:15:00', stop_sequence=3)
trip._add_stop_time_object_unordered(stoptime1, schedule)
trip._add_stop_time_object_unordered(stoptime2, schedule)
trip._add_stop_time_object_unordered(stoptime3, schedule)
trip.validate(self.problems)
e = self.accumulator.pop_exception('OtherProblem')
self.assertTrue(e.format_problem().find('Timetravel detected') != -1)
self.assertTrue(e.format_problem().find('number 2 in trip 054C-00') != -1)
self.accumulator.assert_no_more_exceptions()
class TripServiceIDValidationTestCase(util.ValidationTestCase):
def runTest(self):
schedule = self.SimpleSchedule()
trip1 = transitfeed.Trip()
trip1.route_id = "054C"
trip1.service_id = "WEEKDAY"
trip1.trip_id = "054C_WEEK"
self.ExpectInvalidValueInClosure(column_name="service_id",
value="WEEKDAY",
c=lambda: schedule.add_trip_object(trip1,
validate=True))
class TripDistanceFromStopToShapeValidationTestCase(util.ValidationTestCase):
def runTest(self):
schedule = self.SimpleSchedule()
stop1 = schedule.stops["stop1"]
stop2 = schedule.stops["stop2"]
stop3 = schedule.stops["stop3"]
# Set shape_dist_traveled
trip = schedule.trips["CITY1"]
trip.clear_stop_times()
trip.add_stop_time(stop1, stop_time="12:00:00", shape_dist_traveled=0)
trip.add_stop_time(stop2, stop_time="12:00:45", shape_dist_traveled=500)
trip.add_stop_time(stop3, stop_time="12:02:30", shape_dist_traveled=1500)
trip.shape_id = "shape1"
# Add a valid shape for the trip to the current schedule.
shape = transitfeed.Shape("shape1")
shape.add_point(48.2, 1.00, 0)
shape.add_point(48.2, 1.01, 500)
shape.add_point(48.2, 1.03, 1500)
shape.max_distance = 1500
schedule.add_shape_object(shape)
# The schedule should validate with no problems.
self.ExpectNoProblems(schedule)
# Delete a stop latitude. This should not crash validation.
stop1.stop_lat = None
self.ValidateAndExpectMissingValue(schedule, "stop_lat")
class TripHasStopTimeValidationTestCase(util.ValidationTestCase):
def runTest(self):
schedule = self.SimpleSchedule()
trip = schedule.get_route("054C").add_trip(trip_id="054C-00")
# We should get an OtherProblem here because the trip has no stops.
self.ValidateAndExpectOtherProblem(schedule)
# It should trigger a TYPE_ERROR if there are frequencies for the trip
# but no stops
trip.add_frequency("01:00:00", "12:00:00", 600)
schedule.validate(self.problems)
self.accumulator.pop_exception('OtherProblem') # pop first warning
e = self.accumulator.pop_exception('OtherProblem') # pop frequency error
self.assertTrue(e.format_problem().find('Frequencies defined, but') != -1)
self.assertTrue(e.format_problem().find('given in trip 054C-00') != -1)
self.assertEquals(transitfeed.TYPE_ERROR, e.type)
self.accumulator.assert_no_more_exceptions()
trip.clear_frequencies()
# Add a stop, but with only one stop passengers have nowhere to exit!
stop = transitfeed.Stop(36.425288, -117.133162, "Demo Stop 1", "STOP1")
schedule.add_stop_object(stop)
trip.add_stop_time(stop, arrival_time="5:11:00", departure_time="5:12:00")
self.ValidateAndExpectOtherProblem(schedule)
# Add another stop, and then validation should be happy.
stop = transitfeed.Stop(36.424288, -117.133142, "Demo Stop 2", "STOP2")
schedule.add_stop_object(stop)
trip.add_stop_time(stop, arrival_time="5:15:00", departure_time="5:16:00")
schedule.validate(self.problems)
trip.add_stop_time(stop, stop_time="05:20:00")
trip.add_stop_time(stop, stop_time="05:22:00")
# Last stop must always have a time
trip.add_stop_time(stop, arrival_secs=None, departure_secs=None)
self.ExpectInvalidValueInClosure(
'arrival_time', c=lambda: trip.get_end_time(loader_problems=self.problems))
class ShapeDistTraveledOfStopTimeValidationTestCase(util.ValidationTestCase):
def runTest(self):
schedule = self.SimpleSchedule()
shape = transitfeed.Shape("shape_1")
shape.add_point(36.425288, -117.133162, 0)
shape.add_point(36.424288, -117.133142, 1)
schedule.add_shape_object(shape)
trip = schedule.get_route("054C").add_trip(trip_id="054C-00")
trip.shape_id = "shape_1"
stop = transitfeed.Stop(36.425288, -117.133162, "Demo Stop 1", "STOP1")
schedule.add_stop_object(stop)
trip.add_stop_time(stop, arrival_time="5:11:00", departure_time="5:12:00",
stop_sequence=0, shape_dist_traveled=0)
stop = transitfeed.Stop(36.424288, -117.133142, "Demo Stop 2", "STOP2")
schedule.add_stop_object(stop)
trip.add_stop_time(stop, arrival_time="5:15:00", departure_time="5:16:00",
stop_sequence=1, shape_dist_traveled=1)
stop = transitfeed.Stop(36.423288, -117.133122, "Demo Stop 3", "STOP3")
schedule.add_stop_object(stop)
trip.add_stop_time(stop, arrival_time="5:18:00", departure_time="5:19:00",
stop_sequence=2, shape_dist_traveled=2)
self.accumulator.assert_no_more_exceptions()
schedule.validate(self.problems)
e = self.accumulator.pop_exception('OtherProblem')
self.assertMatchesRegex('shape_dist_traveled=2', e.format_problem())
self.accumulator.assert_no_more_exceptions()
# Error if the distance decreases.
shape.add_point(36.421288, -117.133132, 2)
stop = transitfeed.Stop(36.421288, -117.133122, "Demo Stop 4", "STOP4")
schedule.add_stop_object(stop)
stoptime = transitfeed.StopTime(self.problems, stop,
arrival_time="5:29:00",
departure_time="5:29:00", stop_sequence=3,
shape_dist_traveled=1.7)
trip.add_stop_time_object(stoptime, schedule=schedule)
self.accumulator.assert_no_more_exceptions()
schedule.validate(self.problems)
e = self.accumulator.pop_exception('InvalidValue')
self.assertMatchesRegex('stop STOP4 has', e.format_problem())
self.assertMatchesRegex('shape_dist_traveled=1.7', e.format_problem())
self.assertMatchesRegex('distance was 2.0.', e.format_problem())
self.assertEqual(e.type, transitfeed.TYPE_ERROR)
self.accumulator.assert_no_more_exceptions()
# Warning if distance remains the same between two stop_times
stoptime.shape_dist_traveled = 2.0
trip.replace_stop_time_object(stoptime, schedule=schedule)
schedule.validate(self.problems)
e = self.accumulator.pop_exception('InvalidValue')
self.assertMatchesRegex('stop STOP4 has', e.format_problem())
self.assertMatchesRegex('shape_dist_traveled=2.0', e.format_problem())
self.assertMatchesRegex('distance was 2.0.', e.format_problem())
self.assertEqual(e.type, transitfeed.TYPE_WARNING)
self.accumulator.assert_no_more_exceptions()
class StopMatchWithShapeTestCase(util.ValidationTestCase):
def runTest(self):
schedule = self.SimpleSchedule()
shape = transitfeed.Shape("shape_1")
shape.add_point(36.425288, -117.133162, 0)
shape.add_point(36.424288, -117.143142, 1)
schedule.add_shape_object(shape)
trip = schedule.get_route("054C").add_trip(trip_id="054C-00")
trip.shape_id = "shape_1"
# Stop 1 is only 600 meters away from shape, which is allowed.
stop = transitfeed.Stop(36.425288, -117.139162, "Demo Stop 1", "STOP1")
schedule.add_stop_object(stop)
trip.add_stop_time(stop, arrival_time="5:11:00", departure_time="5:12:00",
stop_sequence=0, shape_dist_traveled=0)
# Stop 2 is more than 1000 meters away from shape, which is not allowed.
stop = transitfeed.Stop(36.424288, -117.158142, "Demo Stop 2", "STOP2")
schedule.add_stop_object(stop)
trip.add_stop_time(stop, arrival_time="5:15:00", departure_time="5:16:00",
stop_sequence=1, shape_dist_traveled=1)
schedule.validate(self.problems)
e = self.accumulator.pop_exception('StopTooFarFromShapeWithDistTraveled')
self.assertTrue(e.format_problem().find('Demo Stop 2') != -1)
self.assertTrue(e.format_problem().find('1344 meters away') != -1)
self.accumulator.assert_no_more_exceptions()
class TripAddStopTimeObjectTestCase(util.ValidationTestCase):
def runTest(self):
schedule = transitfeed.Schedule(problem_reporter=self.problems)
schedule.add_agency("\xc8\x8b Fly Agency", "http://iflyagency.com",
"America/Los_Angeles")
schedule.get_default_service_period().set_date_has_service('20070101')
stop1 = schedule.add_stop(lng=140, lat=48.2, name="Stop 1")
stop2 = schedule.add_stop(lng=140.001, lat=48.201, name="Stop 2")
route = schedule.add_route("B", "Beta", "Bus")
trip = route.add_trip(schedule, "bus trip")
trip.add_stop_time_object(transitfeed.StopTime(self.problems, stop1,
arrival_secs=10,
departure_secs=10),
schedule=schedule, loader_problems=self.problems)
trip.add_stop_time_object(transitfeed.StopTime(self.problems, stop2,
arrival_secs=20,
departure_secs=20),
schedule=schedule, loader_problems=self.problems)
# TODO: Factor out checks or use mock problems object
self.ExpectOtherProblemInClosure(lambda:
trip.add_stop_time_object(transitfeed.StopTime(self.problems, stop1,
arrival_secs=15,
departure_secs=15),
schedule=schedule, loader_problems=self.problems))
trip.add_stop_time_object(transitfeed.StopTime(self.problems, stop1),
schedule=schedule, loader_problems=self.problems)
self.ExpectOtherProblemInClosure(lambda:
trip.add_stop_time_object(transitfeed.StopTime(self.problems, stop1,
arrival_secs=15,
departure_secs=15),
schedule=schedule, loader_problems=self.problems))
trip.add_stop_time_object(transitfeed.StopTime(self.problems, stop1,
arrival_secs=30,
departure_secs=30),
schedule=schedule, loader_problems=self.problems)
self.accumulator.assert_no_more_exceptions()
class TripReplaceStopTimeObjectTestCase(util.TestCase):
def runTest(self):
schedule = transitfeed.Schedule()
schedule.add_agency("\xc8\x8b Fly Agency", "http://iflyagency.com", "America/Los_Angeles")
schedule.get_default_service_period().set_date_has_service('20070101')
stop1 = schedule.add_stop(lng=140, lat=48.2, name="Stop 1")
route = schedule.add_route("B", "Beta", "Bus")
trip = route.add_trip(schedule, "bus trip")
stoptime = transitfeed.StopTime(transitfeed.default_problem_reporter, stop1,
arrival_secs=10,
departure_secs=10)
trip.add_stop_time_object(stoptime, schedule=schedule)
stoptime.departure_secs = 20
trip.replace_stop_time_object(stoptime, schedule=schedule)
stoptimes = trip.get_stop_times()
self.assertEqual(len(stoptimes), 1)
self.assertEqual(stoptimes[0].departure_secs, 20)
unknown_stop = schedule.add_stop(lng=140, lat=48.2, name="unknown")
unknown_stoptime = transitfeed.StopTime(
transitfeed.default_problem_reporter, unknown_stop,
arrival_secs=10,
departure_secs=10)
unknown_stoptime.stop_sequence = 5
# Attempting to replace a non-existent StopTime raises an error
self.assertRaises(transitfeed.Error, trip.ReplaceStopTimeObject,
unknown_stoptime, schedule=schedule)
class SingleTripTestCase(util.TestCase):
def setUp(self):
schedule = transitfeed.Schedule(
problem_reporter=util.ExceptionProblemReporterNoExpiration())
schedule.new_default_agency(agency_name="Test Agency",
agency_url="http://example.com",
agency_timezone="America/Los_Angeles")
route = schedule.add_route(short_name="54C", long_name="Polish Hill",
route_type=3)
service_period = schedule.get_default_service_period()
service_period.set_date_has_service("20070101")
trip = route.add_trip(schedule, 'via Polish Hill')
stop1 = schedule.add_stop(36.425288, -117.133162, "Demo Stop 1")
stop2 = schedule.add_stop(36.424288, -117.133142, "Demo Stop 2")
self.schedule = schedule
self.trip = trip
self.stop1 = stop1
self.stop2 = stop2
class TripStopTimeAccessorsTestCase(SingleTripTestCase):
def runTest(self):
self.trip.add_stop_time(
self.stop1, arrival_time="5:11:00", departure_time="5:12:00")
self.trip.add_stop_time(
self.stop2, arrival_time="5:15:00", departure_time="5:16:00")
# Add some more stop times and test GetEndTime does the correct thing
self.assertEqual(transitfeed.format_seconds_since_midnight(
self.trip.get_start_time()), "05:11:00")
self.assertEqual(transitfeed.format_seconds_since_midnight(
self.trip.get_end_time()), "05:16:00")
self.trip.add_stop_time(self.stop1, stop_time="05:20:00")
self.assertEqual(
transitfeed.format_seconds_since_midnight(self.trip.get_end_time()),
"05:20:00")
self.trip.add_stop_time(self.stop2, stop_time="05:22:00")
self.assertEqual(
transitfeed.format_seconds_since_midnight(self.trip.get_end_time()),
"05:22:00")
class TripGetStopTimesTestCase(SingleTripTestCase):
def runTest(self):
self.trip.add_stop_time(
self.stop1,
arrival_time="5:11:00",
departure_time="5:12:00",
stop_headsign='Stop Headsign',
pickup_type=1,
drop_off_type=2,
shape_dist_traveled=100,
timepoint=1)
self.trip.add_stop_time(
self.stop2, arrival_time="5:15:00", departure_time="5:16:00")
stop_times = self.trip.get_stop_times()
self.assertEquals(2, len(stop_times))
st = stop_times[0]
self.assertEquals(self.stop1.stop_id, st.stop_id)
self.assertEquals('05:11:00', st.arrival_time)
self.assertEquals('05:12:00', st.departure_time)
self.assertEquals(u'Stop Headsign', st.stop_headsign)
self.assertEquals(1, st.pickup_type)
self.assertEquals(2, st.drop_off_type)
self.assertEquals(100.0, st.shape_dist_traveled)
self.assertEquals(1, st.timepoint)
st = stop_times[1]
self.assertEquals(self.stop2.stop_id, st.stop_id)
self.assertEquals('05:15:00', st.arrival_time)
self.assertEquals('05:16:00', st.departure_time)
tuples = self.trip.get_stop_times_tuples()
self.assertEquals(2, len(tuples))
self.assertEqual(
(self.trip.trip_id, "05:11:00", "05:12:00", self.stop1.stop_id,
1, u'Stop Headsign', 1, 2, 100.0, 1),
tuples[0])
self.assertEqual(
(self.trip.trip_id, "05:15:00", "05:16:00", self.stop2.stop_id,
2, '', '', '', '', ''),
tuples[1])
class TripClearStopTimesTestCase(util.TestCase):
def runTest(self):
schedule = transitfeed.Schedule(
problem_reporter=util.ExceptionProblemReporterNoExpiration())
schedule.new_default_agency(agency_name="Test Agency",
agency_timezone="America/Los_Angeles")
route = schedule.add_route(short_name="54C", long_name="Hill", route_type=3)
schedule.get_default_service_period().set_date_has_service("20070101")
stop1 = schedule.add_stop(36, -117.1, "Demo Stop 1")
stop2 = schedule.add_stop(36, -117.2, "Demo Stop 2")
stop3 = schedule.add_stop(36, -117.3, "Demo Stop 3")
trip = route.add_trip(schedule, "via Polish Hill")
trip.clear_stop_times()
self.assertFalse(trip.get_stop_times())
trip.add_stop_time(stop1, stop_time="5:11:00")
self.assertTrue(trip.get_stop_times())
trip.clear_stop_times()
self.assertFalse(trip.get_stop_times())
trip.add_stop_time(stop3, stop_time="4:00:00") # Can insert earlier time
trip.add_stop_time(stop2, stop_time="4:15:00")
trip.add_stop_time(stop1, stop_time="4:21:00")
old_stop_times = trip.get_stop_times()
self.assertTrue(old_stop_times)
trip.clear_stop_times()
self.assertFalse(trip.get_stop_times())
for st in old_stop_times:
trip.add_stop_time_object(st)
self.assertEqual(trip.get_start_time(), 4 * 3600)
self.assertEqual(trip.get_end_time(), 4 * 3600 + 21 * 60)
class InvalidRouteAgencyTestCase(util.LoadTestCase):
def runTest(self):
self.load('invalid_route_agency')
self.accumulator.pop_invalid_value("agency_id", "routes.txt")
self.accumulator.pop_invalid_value("route_id", "trips.txt")
self.accumulator.assert_no_more_exceptions()
class InvalidAgencyIdsTestCase(util.LoadTestCase):
def runTest(self):
self.load('invalid_agency_ids')
self.accumulator.pop_exception('OtherProblem')
self.accumulator.assert_no_more_exceptions()
class AddStopTimeParametersTestCase(util.TestCase):
def runTest(self):
problem_reporter = util.get_test_failure_problem_reporter(self)
schedule = transitfeed.Schedule(problem_reporter=problem_reporter)
route = schedule.add_route(short_name="10", long_name="", route_type="Bus")
stop = schedule.add_stop(40, -128, "My stop")
# Stop must be added to schedule so that the call
# AddStopTime -> AddStopTimeObject -> GetStopTimes -> GetStop can work
trip = transitfeed.Trip()
trip.route_id = route.route_id
trip.service_id = schedule.get_default_service_period().service_id
trip.trip_id = "SAMPLE_TRIP"
schedule.add_trip_object(trip)
# First stop must have time
trip.add_stop_time(stop, arrival_secs=300, departure_secs=360)
trip.add_stop_time(stop)
trip.add_stop_time(stop, arrival_time="00:07:00", departure_time="00:07:30")
trip.validate(problem_reporter)
class AddFrequencyValidationTestCase(util.ValidationTestCase):
def ExpectInvalidValue(self, start_time, end_time, headway,
column_name, value):
try:
trip = transitfeed.Trip()
trip.add_frequency(start_time, end_time, headway)
self.fail("Expected InvalidValue error on %s" % column_name)
except transitfeed.InvalidValue as e:
self.assertEqual(column_name, e.column_name)
self.assertEqual(value, e.value)
self.assertEqual(0, len(trip.get_frequency_tuples()))
def ExpectMissingValue(self, start_time, end_time, headway, column_name):
trip = transitfeed.Trip()
try:
trip.add_frequency(start_time, end_time, headway)
self.fail("Expected MissingValue error on %s" % column_name)
except transitfeed.MissingValue as e:
self.assertEqual(column_name, e.column_name)
self.assertEqual(0, len(trip.get_frequency_tuples()))
def runTest(self):
# these should work fine
trip = transitfeed.Trip()
trip.trip_id = "SAMPLE_ID"
trip.add_frequency(0, 50, 1200)
trip.add_frequency("01:00:00", "02:00:00", "600")
trip.add_frequency(u"02:00:00", u"03:00:00", u"1800")
headways = trip.get_frequency_tuples()
self.assertEqual(3, len(headways))
self.assertEqual((0, 50, 1200, 0), headways[0])
self.assertEqual((3600, 7200, 600, 0), headways[1])
self.assertEqual((7200, 10800, 1800, 0), headways[2])
self.assertEqual([("SAMPLE_ID", "00:00:00", "00:00:50", "1200", "0"),
("SAMPLE_ID", "01:00:00", "02:00:00", "600", "0"),
("SAMPLE_ID", "02:00:00", "03:00:00", "1800", "0")],
trip.get_frequency_output_tuples())
# now test invalid input
self.ExpectMissingValue(None, 50, 1200, "start_time")
self.ExpectMissingValue("", 50, 1200, "start_time")
self.ExpectInvalidValue("midnight", 50, 1200, "start_time",
"midnight")
self.ExpectInvalidValue(-50, 50, 1200, "start_time", -50)
self.ExpectMissingValue(0, None, 1200, "end_time")
self.ExpectMissingValue(0, "", 1200, "end_time")
self.ExpectInvalidValue(0, "noon", 1200, "end_time", "noon")
self.ExpectInvalidValue(0, -50, 1200, "end_time", -50)
self.ExpectMissingValue(0, 600, 0, "headway_secs")
self.ExpectMissingValue(0, 600, None, "headway_secs")
self.ExpectMissingValue(0, 600, "", "headway_secs")
self.ExpectInvalidValue(0, 600, "test", "headway_secs", "test")
self.ExpectInvalidValue(0, 600, -60, "headway_secs", -60)
self.ExpectInvalidValue(0, 0, 1200, "end_time", 0)
self.ExpectInvalidValue("12:00:00", "06:00:00", 1200, "end_time",
21600)
class GetTripTimeTestCase(util.TestCase):
"""Test for GetStopTimeTrips and GetTimeInterpolatedStops"""
def setUp(self):
problems = util.get_test_failure_problem_reporter(self)
schedule = transitfeed.Schedule(problem_reporter=problems)
self.schedule = schedule
schedule.add_agency("Agency", "http://iflyagency.com",
"America/Los_Angeles")
service_period = schedule.get_default_service_period()
service_period.set_date_has_service('20070101')
self.stop1 = schedule.add_stop(lng=140.01, lat=0, name="140.01,0")
self.stop2 = schedule.add_stop(lng=140.02, lat=0, name="140.02,0")
self.stop3 = schedule.add_stop(lng=140.03, lat=0, name="140.03,0")
self.stop4 = schedule.add_stop(lng=140.04, lat=0, name="140.04,0")
self.stop5 = schedule.add_stop(lng=140.05, lat=0, name="140.05,0")
self.route1 = schedule.add_route("1", "One", "Bus")
self.trip1 = self.route1.add_trip(schedule, "trip 1", trip_id='trip1')
self.trip1.add_stop_time(self.stop1, schedule=schedule, departure_secs=100,
arrival_secs=100)
self.trip1.add_stop_time(self.stop2, schedule=schedule)
self.trip1.add_stop_time(self.stop3, schedule=schedule)
# loop back to stop2 to test that interpolated stops work ok even when
# a stop between timepoints is further from the timepoint than the
# preceding
self.trip1.add_stop_time(self.stop2, schedule=schedule)
self.trip1.add_stop_time(self.stop4, schedule=schedule, departure_secs=400,
arrival_secs=400)
self.trip2 = self.route1.add_trip(schedule, "trip 2", trip_id='trip2')
self.trip2.add_stop_time(self.stop2, schedule=schedule, departure_secs=500,
arrival_secs=500)
self.trip2.AddStopTime(self.stop3, schedule=schedule, departure_secs=600,
arrival_secs=600)
self.trip2.AddStopTime(self.stop4, schedule=schedule, departure_secs=700,
arrival_secs=700)
self.trip2.AddStopTime(self.stop3, schedule=schedule, departure_secs=800,
arrival_secs=800)
self.trip3 = self.route1.add_trip(schedule, "trip 3", trip_id='trip3')
def testGetTimeInterpolatedStops(self):
rv = self.trip1.get_time_interpolated_stops()
self.assertEqual(5, len(rv))
(secs, stoptimes, istimepoints) = tuple(zip(*rv))
self.assertEqual((100, 160, 220, 280, 400), secs)
self.assertEqual(("140.01,0", "140.02,0", "140.03,0", "140.02,0", "140.04,0"),
tuple([st.stop.stop_name for st in stoptimes]))
self.assertEqual((True, False, False, False, True), istimepoints)
self.assertEqual([], self.trip3.get_time_interpolated_stops())
def testGetTimeInterpolatedStopsUntimedEnd(self):
self.trip2.AddStopTime(self.stop3, schedule=self.schedule)
self.assertRaises(ValueError, self.trip2.GetTimeInterpolatedStops)
def testGetTimeInterpolatedStopsUntimedStart(self):
# Temporarily replace the problem reporter so that adding the first
# StopTime without a time doesn't throw an exception.
old_problems = self.schedule.problem_reporter
self.schedule.problem_reporter = util.get_test_failure_problem_reporter(
self, ("OtherProblem",))
self.trip3.AddStopTime(self.stop3, schedule=self.schedule)
self.schedule.problem_reporter = old_problems
self.trip3.AddStopTime(self.stop2, schedule=self.schedule,
departure_secs=500, arrival_secs=500)
self.assertRaises(ValueError, self.trip3.GetTimeInterpolatedStops)
def testGetTimeInterpolatedStopsSingleStopTime(self):
self.trip3.AddStopTime(self.stop3, schedule=self.schedule,
departure_secs=500, arrival_secs=500)
rv = self.trip3.get_time_interpolated_stops()
self.assertEqual(1, len(rv))
self.assertEqual(500, rv[0][0])
self.assertEqual(True, rv[0][2])
def testGetStopTimeTrips(self):
stopa = self.schedule.get_nearest_stops(lon=140.03, lat=0)[0]
self.assertEqual("140.03,0", stopa.stop_name) # Got stop3?
rv = stopa.get_stop_time_trips(self.schedule)
self.assertEqual(3, len(rv))
(secs, trip_index, istimepoints) = tuple(zip(*rv))
self.assertEqual((220, 600, 800), secs)
self.assertEqual(("trip1", "trip2", "trip2"), tuple([ti[0].trip_id for ti in trip_index]))
self.assertEqual((2, 1, 3), tuple([ti[1] for ti in trip_index]))
self.assertEqual((False, True, True), istimepoints)
def testStopTripIndex(self):
trip_index = self.stop3.trip_index
trip_ids = [t.trip_id for t, i in trip_index]
self.assertEqual(["trip1", "trip2", "trip2"], trip_ids)
self.assertEqual([2, 1, 3], [i for t, i in trip_index])
def testGetTrips(self):
self.assertEqual(
set([t.trip_id for t in self.stop1.get_trips(self.schedule)]),
{self.trip1.trip_id})
self.assertEqual(
set([t.trip_id for t in self.stop2.get_trips(self.schedule)]),
{self.trip1.trip_id, self.trip2.trip_id})
self.assertEqual(
set([t.trip_id for t in self.stop3.get_trips(self.schedule)]),
{self.trip1.trip_id, self.trip2.trip_id})
self.assertEqual(
set([t.trip_id for t in self.stop4.get_trips(self.schedule)]),
{self.trip1.trip_id, self.trip2.trip_id})
self.assertEqual(
set([t.trip_id for t in self.stop5.get_trips(self.schedule)]),
set())
class GetFrequencyTimesTestCase(util.TestCase):
"""Test for GetFrequencyStartTimes and GetFrequencyStopTimes"""
def setUp(self):
problems = util.get_test_failure_problem_reporter(self)
schedule = transitfeed.Schedule(problem_reporter=problems)
self.schedule = schedule
schedule.add_agency("Agency", "http://iflyagency.com",
"America/Los_Angeles")
service_period = schedule.get_default_service_period()
service_period.set_start_date("20080101")
service_period.set_end_date("20090101")
service_period.set_weekday_service(True)
self.stop1 = schedule.add_stop(lng=140.01, lat=0, name="140.01,0")
self.stop2 = schedule.add_stop(lng=140.02, lat=0, name="140.02,0")
self.stop3 = schedule.add_stop(lng=140.03, lat=0, name="140.03,0")
self.stop4 = schedule.add_stop(lng=140.04, lat=0, name="140.04,0")
self.stop5 = schedule.add_stop(lng=140.05, lat=0, name="140.05,0")
self.route1 = schedule.add_route("1", "One", "Bus")
self.trip1 = self.route1.add_trip(schedule, "trip 1", trip_id="trip1")
# add different types of stop times
self.trip1.AddStopTime(self.stop1, arrival_time="17:00:00",
departure_time="17:01:00") # both arrival and departure time
self.trip1.AddStopTime(self.stop2, schedule=schedule) # non timed
self.trip1.AddStopTime(self.stop3, stop_time="17:45:00") # only stop_time
# add headways starting before the trip
self.trip1.add_frequency("16:00:00", "18:00:00", 1800) # each 30 min
self.trip1.add_frequency("18:00:00", "20:00:00", 2700) # each 45 min
def testGetFrequencyStartTimes(self):
start_times = self.trip1.get_frequency_start_times()
self.assertEqual(
["16:00:00", "16:30:00", "17:00:00", "17:30:00",
"18:00:00", "18:45:00", "19:30:00"],
[transitfeed.format_seconds_since_midnight(secs) for secs in start_times])
# GetHeadwayStartTimes is deprecated, but should still return the same
# result as GetFrequencyStartTimes
self.assertEqual(start_times,
self.trip1.get_frequency_start_times())
def testGetFrequencyStopTimes(self):
stoptimes_list = self.trip1.get_frequency_stop_times()
arrival_secs = []
departure_secs = []
for stoptimes in stoptimes_list:
arrival_secs.append([st.arrival_secs for st in stoptimes])
departure_secs.append([st.departure_secs for st in stoptimes])
# GetHeadwayStopTimes is deprecated, but should still return the same
# result as GetFrequencyStopTimes
# StopTimes are instantiated as they're read from the DB so they can't be
# compared directly, but checking {arrival,departure}_secs should be enough
# to catch most errors.
self.trip1.get_frequency_stop_times()
headway_arrival_secs = []
headway_departure_secs = []
for stoptimes in stoptimes_list:
headway_arrival_secs.append([st.arrival_secs for st in stoptimes])
headway_departure_secs.append([st.departure_secs for st in stoptimes])
self.assertEqual(arrival_secs, headway_arrival_secs)
self.assertEqual(departure_secs, headway_departure_secs)
self.assertEqual(([57600, None, 60300], [59400, None, 62100], [61200, None, 63900],
[63000, None, 65700], [64800, None, 67500], [67500, None, 70200],
[70200, None, 72900]),
tuple(arrival_secs))
self.assertEqual(([57660, None, 60300], [59460, None, 62100], [61260, None, 63900],
[63060, None, 65700], [64860, None, 67500], [67560, None, 70200],
[70260, None, 72900]),
tuple(departure_secs))
# test if stoptimes are created with same parameters than the ones from the original trip
stoptimes = self.trip1.get_stop_times()
for stoptimes_clone in stoptimes_list:
self.assertEqual(len(stoptimes_clone), len(stoptimes))
for st_clone, st in zip(stoptimes_clone, stoptimes):
for name in st.__slots__:
if name not in ('arrival_secs', 'departure_secs'):
self.assertEqual(getattr(st, name), getattr(st_clone, name))
| 42,807 | 14,875 |
import micawber
import logging
from django.db import models
from django_postgres_unlimited_varchar import UnlimitedCharField
from django.urls import reverse
from django.utils import timezone
from django.contrib.postgres.fields import JSONField
log = logging.getLogger(__name__)
class Conference(models.Model):
title = UnlimitedCharField()
link = models.URLField(blank=True)
start_date = models.DateField(blank=True, null=True)
end_date = models.DateField(blank=True, null=True)
def __str__(self):
return self.title
PRESENTATION_TYPE_CHOICES = [
(i, i.title()) for i in ["keynote", "talk", "tutorial", "panel"]
]
class Presentation(models.Model):
title = UnlimitedCharField()
slug = models.SlugField()
date = models.DateField()
description = models.TextField(blank=True)
type = UnlimitedCharField(choices=PRESENTATION_TYPE_CHOICES, default="talk")
conference = models.ForeignKey(
Conference, related_name="presentations", on_delete=models.CASCADE
)
# hm... not sure I like introducing the depedency on blog
# so leaving this out for now.
# but maybe I need that dep anyway -- search?
# tags = models.ManyToManyField(Tag, blank=True)
def __str__(self):
return self.title
def get_absolute_url(self):
return reverse("speaking_portfolio_detail", args=[self.slug])
@property
def is_future(self):
return self.date > timezone.now().date()
@property
def is_past(self):
return self.date <= timezone.now().date()
COVERAGE_TYPE_CHOICES = [
(i, i.title()) for i in ["video", "slides", "link", "notes", "write-up"]
]
FA_ICON_MAP = {
"video": "fab fa-youtube",
"slides": "fab fa-slideshare",
"link": "fas fa-link",
"notes": "fas fa-clipboard",
"write-up": "fas fa-file-alt",
}
class Coverage(models.Model):
presentation = models.ForeignKey(
Presentation, related_name="coverage", on_delete=models.CASCADE
)
type = UnlimitedCharField(choices=COVERAGE_TYPE_CHOICES)
url = models.URLField()
oembed = JSONField(blank=True, null=True)
def __str__(self):
return f"{self.presentation} - {self.type}"
class Meta:
verbose_name_plural = "coverage"
def save(self, *args, **kwargs):
providers = micawber.bootstrap_basic()
try:
self.oembed = providers.request(self.url)
except micawber.ProviderException as e:
log.warn(f"error saving oembed for {self}: {e}")
self.oembed = {}
super().save(*args, **kwargs)
@property
def icon_class(self):
return FA_ICON_MAP[self.type]
| 2,668 | 862 |
from preprocessing.sentence_processor import *
from preprocessing.coreference import *
from preprocessing.to_statement import *
from utils.macro import *
class preprocessor:
def __init__(self, config, demo_json):
self.input = []
self.config = config
if config['preprocessing']['load']:
self.output = jsonload(self.config['preprocessing']['output_path'])
return
elif config['mode'] == 'subtitle':
self.subtitle_loader()
elif config['mode'] == 'demo':
self.demo_loader(demo_json)
self.coref = coreference(config, self.input)
self.sentence_processor = sentence_processor(config, self.coref.output)
self.to_stmt = to_statement(config, self.sentence_processor.output)
self.output = self.to_stmt.output
def demo_loader(self, demo_json):
self.input.append(demo_json)
return
def subtitle_loader(self):
subtitle_path = self.config['preprocessing']['substitle_file']
# for path in diriter(subtitle_path):
# self.input.append(jsonload(path))
self.input.append(jsonload(subtitle_path))
return
def save_output(self):
if self.config['preprocessing']['load']:
return
jsondump(self.output, self.config['preprocessing']['output_path'])
return
| 1,365 | 385 |
from enum import Enum, unique
import socket
from pyspark.sql.types import StringType, StructType, ArrayType
from spardaqus import globals
from spardaqus.core.utils import mergedicts
from spardaqus.core.exceptions import \
SpardaqusMissingSparkSQLStructMetadata, \
SpardaqusSparkSQLStructParseError, \
SpardaqusSparkSQLStructMissingRequiredElementsError
class SpardaqusMessage:
def _parse(self, r, x, i):
if type(x) is list:
for y in x:
r = self._parse(r, y, i)
else:
n = x["name"] if "name" in x.keys() else None
f = x["fields"] if "fields" in x.keys() else None
t = x["type"] if "type" in x.keys() else None
if t:
if type(t) is str:
if t == "struct":
r = self._parse(r, f[0], i)
elif t == "string":
m = x["metadata"]
if i in m.keys():
if i == "json":
r = mergedicts(r, {n: m[i]})
elif m[i]:
r = "%s %s = %s" % (r, n, m[i])
elif i == "json":
raise SpardaqusMissingSparkSQLStructMetadata
else:
raise SpardaqusSparkSQLStructParseError
else:
raise SpardaqusSparkSQLStructParseError
elif type(t) is dict:
a = False
if "elementType" in t.keys():
t = t["elementType"]
a = True
if i == "json":
z = {n: [self._parse({}, t["fields"], i)]} if a else {n: self._parse({}, t["fields"], i)}
r = mergedicts(r, z)
else:
r = self._parse(r, t["fields"], i)
else:
raise SpardaqusSparkSQLStructParseError
return r
def _struct2json(self, sparksqlstruct):
return self._parse({}, sparksqlstruct.jsonValue(), "json")
def _struct2queryfragment(self, sparksqlstruct, integration):
return self._parse("", sparksqlstruct.jsonValue(), integration)
def __init__(self, integration):
# .add("spdqdata", StringType(), nullable=False, metadata={"splunk": "_raw,", "json": ""}) \
event = StructType() \
.add("spdqid", StringType(), nullable=False, metadata={"splunk": "substr(sha512(host + \"::\" + _raw), 1, %d),", "json": ""}) \
.add("spdqbkt", StringType(), nullable=False, metadata={"splunk": "\"%s\",", "json": ""}) \
.add("spdqidxn", StringType(), nullable=False, metadata={"splunk": "_index,", "json": ""}) \
.add("spdqephn", StringType(), nullable=False, metadata={"splunk": "host,", "json": ""}) \
.add("spdqsrc", StringType(), nullable=False, metadata={"splunk": "source,", "json": ""}) \
.add("spdqstyp", StringType(), nullable=False, metadata={"splunk": "sourcetype,", "json": ""}) \
.add("spdqtskey", StringType(), nullable=False, metadata={"splunk": "strftime(_time, \"%%Y%%m%%d%%H%%M%%S\"),", "json": ""}) \
.add("spdqtstxt", StringType(), nullable=False, metadata={"splunk": "strftime(%s, \"%s\"),", "json": ""}) \
.add("spdqtssrc", StringType(), nullable=False, metadata={"splunk": "_time", "json": ""})
spdqh = StructType() \
.add("name", StringType(), nullable=False, metadata={"splunk": "", "json": socket.gethostname()}) \
.add("fqdn", StringType(), nullable=False, metadata={"splunk": "", "json": socket.getfqdn()}) \
.add("ips", StringType(), nullable=False, metadata={"splunk": "", "json": ",".join(socket.gethostbyname_ex(socket.gethostname())[-1])})
meta = StructType() \
.add("sent", StringType(), nullable=False, metadata={"splunk": "", "json": "%s"}) \
.add("spdqv", StringType(), nullable=False, metadata={"splunk": "", "json": globals.__VERSION__}) \
.add("spdqh", spdqh)
spdq = StructType(). \
add("meta", meta). \
add("data", ArrayType(event))
self.spark_sql_struct = StructType().add("spdq", spdq)
self.json_envelope = self._struct2json(self.spark_sql_struct)
# required fields check
if "spdq" not in self.json_envelope.keys():
raise SpardaqusSparkSQLStructMissingRequiredElementsError
if "data" not in self.json_envelope["spdq"].keys():
raise SpardaqusSparkSQLStructMissingRequiredElementsError
if type(self.json_envelope["spdq"]["data"]) is not list:
raise SpardaqusSparkSQLStructMissingRequiredElementsError
for check in self.json_envelope["spdq"]["data"]:
if "spdqtskey" not in check.keys() or "spdqid" not in check.keys(): # or "spdqdata" not in check.keys():
raise SpardaqusSparkSQLStructMissingRequiredElementsError
if integration == "splunk":
self.query_fragment = self._struct2queryfragment(self.spark_sql_struct, integration)
else:
self.query_fragment = None
@unique
class SpardaqusTransportStatus(Enum):
STARTING = 0
EMPTY = 1
WAITEXPIRED = 2
PROCESSING = 3
| 5,400 | 1,656 |
import udp_options
import udp_usrreq
def callback(pcb, data=None, options=None, error=None):
print(pcb)
if __name__ == "__main__":
print("startings")
udp_usrreq.bindaddr('0.0.0.0', 5005, callback)
udp_usrreq.run_loop()
| 245 | 100 |
import json
from unittest import mock
from django.http import JsonResponse
from django.test import TestCase
from django.urls import reverse
from slack_utils import signals
class EventsViewTestCase(TestCase):
def test_verification(self):
with mock.patch('slack_utils.decorators.verify_request') as verify_mock:
resp = self.client.post(reverse('slack-events-api'), "{}", content_type='application/json')
self.assertTrue(verify_mock.called)
def test_url_verification_handshake(self):
with mock.patch('slack_utils.decorators.verify_request', return_value=True):
resp = self.client.post(reverse('slack-events-api'), json.dumps({
"token": "Jhj5dZrVaK7ZwHHjRyZWjbDl",
"challenge": "3eZbrw1aBm2rZgRNFdxV2595E9CY3gmdALWMmHkvFXO7tYXAYM8P",
"type": "url_verification"
}), content_type='application/json')
self.assertEqual(resp.status_code, 200)
self.assertEqual(resp.content,
JsonResponse({"challenge": "3eZbrw1aBm2rZgRNFdxV2595E9CY3gmdALWMmHkvFXO7tYXAYM8P"}).content)
def test_app_rate_limited(self):
with mock.patch('slack_utils.decorators.verify_request', return_value=True):
resp = self.client.post(reverse('slack-events-api'), json.dumps({
"token": "Jhj5dZrVaK7ZwHHjRyZWjbDl",
"type": "app_rate_limited",
"team_id": "T123456",
"minute_rate_limited": 1518467820,
"api_app_id": "A123456"
}), content_type='application/json')
self.assertEqual(resp.status_code, 200)
def test_event(self):
def handler(sender, event_type, event_data, signal, **kwargs):
handler.signal_was_called = True
handler.event_type = event_type
handler.event_data = event_data
handler.kwargs = kwargs
signals.event_received.connect(handler)
try:
with mock.patch('slack_utils.decorators.verify_request', return_value=True):
resp = self.client.post(reverse('slack-events-api'), json.dumps({
"token": "z26uFbvR1xHJEdHE1OQiO6t8",
"team_id": "T061EG9RZ",
"api_app_id": "A0FFV41KK",
"event": {
"type": "reaction_added",
"user": "U061F1EUR",
"item": {
"type": "message",
"channel": "C061EG9SL",
"ts": "1464196127.000002"
},
"reaction": "slightly_smiling_face",
"item_user": "U0M4RL1NY",
"event_ts": "1465244570.336841"
},
"type": "event_callback",
"authed_users": [
"U061F7AUR"
],
"event_id": "Ev9UQ52YNA",
"event_time": 1234567890
}), content_type='application/json')
finally:
signals.event_received.disconnect(handler)
self.assertEqual(resp.status_code, 200)
self.assertTrue(handler.signal_was_called)
self.assertEqual(handler.event_type, 'reaction_added')
self.assertDictEqual(handler.event_data, {
"user": "U061F1EUR",
"item": {
"type": "message",
"channel": "C061EG9SL",
"ts": "1464196127.000002"
},
"reaction": "slightly_smiling_face",
"item_user": "U0M4RL1NY",
"event_ts": "1465244570.336841"
})
self.assertDictEqual(handler.kwargs, {
"token": "z26uFbvR1xHJEdHE1OQiO6t8",
"team_id": "T061EG9RZ",
"api_app_id": "A0FFV41KK",
"authed_users": [
"U061F7AUR"
],
"event_id": "Ev9UQ52YNA",
"event_time": 1234567890
}) | 4,095 | 1,402 |
""" Cisco_IOS_XR_ipv4_autorp_oper
This module contains a collection of YANG definitions
for Cisco IOS\-XR ipv4\-autorp package operational data.
This module contains definitions
for the following management objects\:
auto\-rp\: AutoRP operational data
Copyright (c) 2013\-2016 by Cisco Systems, Inc.
All rights reserved.
"""
import re
import collections
from enum import Enum
from ydk.types import Empty, YList, YLeafList, DELETE, Decimal64, FixedBitsDict
from ydk.errors import YPYError, YPYModelError
class AutorpProtocolModeEnum(Enum):
"""
AutorpProtocolModeEnum
Autorp protocol mode
.. data:: sparse = 0
sparse
.. data:: bidirectional = 1
bidirectional
"""
sparse = 0
bidirectional = 1
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_autorp_oper as meta
return meta._meta_table['AutorpProtocolModeEnum']
class AutoRp(object):
"""
AutoRP operational data
.. attribute:: active
Active Process
**type**\: :py:class:`Active <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_autorp_oper.AutoRp.Active>`
.. attribute:: standby
Standby Process
**type**\: :py:class:`Standby <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_autorp_oper.AutoRp.Standby>`
"""
_prefix = 'ipv4-autorp-oper'
_revision = '2015-11-09'
def __init__(self):
self.active = AutoRp.Active()
self.active.parent = self
self.standby = AutoRp.Standby()
self.standby.parent = self
class Standby(object):
"""
Standby Process
.. attribute:: candidate_rps
AutoRP Candidate RP Table
**type**\: :py:class:`CandidateRps <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_autorp_oper.AutoRp.Standby.CandidateRps>`
.. attribute:: mapping_agent
AutoRP Mapping Agent Table
**type**\: :py:class:`MappingAgent <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_autorp_oper.AutoRp.Standby.MappingAgent>`
"""
_prefix = 'ipv4-autorp-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.candidate_rps = AutoRp.Standby.CandidateRps()
self.candidate_rps.parent = self
self.mapping_agent = AutoRp.Standby.MappingAgent()
self.mapping_agent.parent = self
class CandidateRps(object):
"""
AutoRP Candidate RP Table
.. attribute:: candidate_rp
AutoRP Candidate RP Entry
**type**\: list of :py:class:`CandidateRp <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_autorp_oper.AutoRp.Standby.CandidateRps.CandidateRp>`
"""
_prefix = 'ipv4-autorp-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.candidate_rp = YList()
self.candidate_rp.parent = self
self.candidate_rp.name = 'candidate_rp'
class CandidateRp(object):
"""
AutoRP Candidate RP Entry
.. attribute:: access_list_name
ACL Name
**type**\: str
.. attribute:: announce_period
Announce Period
**type**\: int
**range:** \-2147483648..2147483647
.. attribute:: candidate_rp_address
Candidate RP IP Address
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
.. attribute:: interface_name
Interface Name
**type**\: str
**pattern:** (([a\-zA\-Z0\-9\_]\*\\d+/){3,4}\\d+)\|(([a\-zA\-Z0\-9\_]\*\\d+/){3,4}\\d+\\.\\d+)\|(([a\-zA\-Z0\-9\_]\*\\d+/){2}([a\-zA\-Z0\-9\_]\*\\d+))\|(([a\-zA\-Z0\-9\_]\*\\d+/){2}([a\-zA\-Z0\-9\_]+))\|([a\-zA\-Z0\-9\_\-]\*\\d+)\|([a\-zA\-Z0\-9\_\-]\*\\d+\\.\\d+)\|(mpls)\|(dwdm)
.. attribute:: protocol_mode
Protocol Mode
**type**\: :py:class:`AutoRpProtocolModeEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_autorp_datatypes.AutoRpProtocolModeEnum>`
.. attribute:: protocol_mode_xr
Protocol Mode
**type**\: :py:class:`AutorpProtocolModeEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_autorp_oper.AutorpProtocolModeEnum>`
.. attribute:: ttl
TTL
**type**\: int
**range:** \-2147483648..2147483647
"""
_prefix = 'ipv4-autorp-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.access_list_name = None
self.announce_period = None
self.candidate_rp_address = None
self.interface_name = None
self.protocol_mode = None
self.protocol_mode_xr = None
self.ttl = None
@property
def _common_path(self):
return '/Cisco-IOS-XR-ipv4-autorp-oper:auto-rp/Cisco-IOS-XR-ipv4-autorp-oper:standby/Cisco-IOS-XR-ipv4-autorp-oper:candidate-rps/Cisco-IOS-XR-ipv4-autorp-oper:candidate-rp'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.access_list_name is not None:
return True
if self.announce_period is not None:
return True
if self.candidate_rp_address is not None:
return True
if self.interface_name is not None:
return True
if self.protocol_mode is not None:
return True
if self.protocol_mode_xr is not None:
return True
if self.ttl is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_autorp_oper as meta
return meta._meta_table['AutoRp.Standby.CandidateRps.CandidateRp']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-ipv4-autorp-oper:auto-rp/Cisco-IOS-XR-ipv4-autorp-oper:standby/Cisco-IOS-XR-ipv4-autorp-oper:candidate-rps'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.candidate_rp is not None:
for child_ref in self.candidate_rp:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_autorp_oper as meta
return meta._meta_table['AutoRp.Standby.CandidateRps']['meta_info']
class MappingAgent(object):
"""
AutoRP Mapping Agent Table
.. attribute:: rp_addresses
AutoRP Mapping Agent Table Entries
**type**\: :py:class:`RpAddresses <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_autorp_oper.AutoRp.Standby.MappingAgent.RpAddresses>`
.. attribute:: summary
AutoRP Mapping Agent Summary Information
**type**\: :py:class:`Summary <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_autorp_oper.AutoRp.Standby.MappingAgent.Summary>`
"""
_prefix = 'ipv4-autorp-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.rp_addresses = AutoRp.Standby.MappingAgent.RpAddresses()
self.rp_addresses.parent = self
self.summary = AutoRp.Standby.MappingAgent.Summary()
self.summary.parent = self
class RpAddresses(object):
"""
AutoRP Mapping Agent Table Entries
.. attribute:: rp_address
AutoRP Mapping Agent Entry
**type**\: list of :py:class:`RpAddress <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_autorp_oper.AutoRp.Standby.MappingAgent.RpAddresses.RpAddress>`
"""
_prefix = 'ipv4-autorp-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.rp_address = YList()
self.rp_address.parent = self
self.rp_address.name = 'rp_address'
class RpAddress(object):
"""
AutoRP Mapping Agent Entry
.. attribute:: rp_address <key>
RP Address
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
.. attribute:: expiry_time
Time for expiration in seconds
**type**\: int
**range:** 0..18446744073709551615
**units**\: second
.. attribute:: pim_version
PIM version of the CRP
**type**\: int
**range:** 0..255
.. attribute:: range
Array of ranges
**type**\: list of :py:class:`Range <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_autorp_oper.AutoRp.Standby.MappingAgent.RpAddresses.RpAddress.Range>`
.. attribute:: rp_address_xr
Candidate\-RP address
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
"""
_prefix = 'ipv4-autorp-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.rp_address = None
self.expiry_time = None
self.pim_version = None
self.range = YList()
self.range.parent = self
self.range.name = 'range'
self.rp_address_xr = None
class Range(object):
"""
Array of ranges
.. attribute:: check_point_object_id
Checkpoint object id
**type**\: int
**range:** 0..4294967295
.. attribute:: create_type
Source of the entry
**type**\: int
**range:** 0..255
.. attribute:: is_advertised
Is this entry advertised ?
**type**\: bool
.. attribute:: prefix
Prefix of the range
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
.. attribute:: prefix_length
Prefix length of the range
**type**\: int
**range:** 0..255
.. attribute:: protocol_mode
Protocol Mode
**type**\: :py:class:`AutorpProtocolModeEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_autorp_oper.AutorpProtocolModeEnum>`
.. attribute:: uptime
Uptime in seconds
**type**\: int
**range:** 0..18446744073709551615
**units**\: second
"""
_prefix = 'ipv4-autorp-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.check_point_object_id = None
self.create_type = None
self.is_advertised = None
self.prefix = None
self.prefix_length = None
self.protocol_mode = None
self.uptime = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-ipv4-autorp-oper:range'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.check_point_object_id is not None:
return True
if self.create_type is not None:
return True
if self.is_advertised is not None:
return True
if self.prefix is not None:
return True
if self.prefix_length is not None:
return True
if self.protocol_mode is not None:
return True
if self.uptime is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_autorp_oper as meta
return meta._meta_table['AutoRp.Standby.MappingAgent.RpAddresses.RpAddress.Range']['meta_info']
@property
def _common_path(self):
if self.rp_address is None:
raise YPYModelError('Key property rp_address is None')
return '/Cisco-IOS-XR-ipv4-autorp-oper:auto-rp/Cisco-IOS-XR-ipv4-autorp-oper:standby/Cisco-IOS-XR-ipv4-autorp-oper:mapping-agent/Cisco-IOS-XR-ipv4-autorp-oper:rp-addresses/Cisco-IOS-XR-ipv4-autorp-oper:rp-address[Cisco-IOS-XR-ipv4-autorp-oper:rp-address = ' + str(self.rp_address) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.rp_address is not None:
return True
if self.expiry_time is not None:
return True
if self.pim_version is not None:
return True
if self.range is not None:
for child_ref in self.range:
if child_ref._has_data():
return True
if self.rp_address_xr is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_autorp_oper as meta
return meta._meta_table['AutoRp.Standby.MappingAgent.RpAddresses.RpAddress']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-ipv4-autorp-oper:auto-rp/Cisco-IOS-XR-ipv4-autorp-oper:standby/Cisco-IOS-XR-ipv4-autorp-oper:mapping-agent/Cisco-IOS-XR-ipv4-autorp-oper:rp-addresses'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.rp_address is not None:
for child_ref in self.rp_address:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_autorp_oper as meta
return meta._meta_table['AutoRp.Standby.MappingAgent.RpAddresses']['meta_info']
class Summary(object):
"""
AutoRP Mapping Agent Summary Information
.. attribute:: cache_count
Number of group to RP mapping entries in the cache
**type**\: int
**range:** 0..4294967295
.. attribute:: cache_limit
Maximum group to RP mapping entries allowed
**type**\: int
**range:** 0..4294967295
.. attribute:: is_maximum_disabled
Is maximum enforcement disabled ?
**type**\: bool
"""
_prefix = 'ipv4-autorp-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.cache_count = None
self.cache_limit = None
self.is_maximum_disabled = None
@property
def _common_path(self):
return '/Cisco-IOS-XR-ipv4-autorp-oper:auto-rp/Cisco-IOS-XR-ipv4-autorp-oper:standby/Cisco-IOS-XR-ipv4-autorp-oper:mapping-agent/Cisco-IOS-XR-ipv4-autorp-oper:summary'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.cache_count is not None:
return True
if self.cache_limit is not None:
return True
if self.is_maximum_disabled is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_autorp_oper as meta
return meta._meta_table['AutoRp.Standby.MappingAgent.Summary']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-ipv4-autorp-oper:auto-rp/Cisco-IOS-XR-ipv4-autorp-oper:standby/Cisco-IOS-XR-ipv4-autorp-oper:mapping-agent'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.rp_addresses is not None and self.rp_addresses._has_data():
return True
if self.summary is not None and self.summary._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_autorp_oper as meta
return meta._meta_table['AutoRp.Standby.MappingAgent']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-ipv4-autorp-oper:auto-rp/Cisco-IOS-XR-ipv4-autorp-oper:standby'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.candidate_rps is not None and self.candidate_rps._has_data():
return True
if self.mapping_agent is not None and self.mapping_agent._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_autorp_oper as meta
return meta._meta_table['AutoRp.Standby']['meta_info']
class Active(object):
"""
Active Process
.. attribute:: candidate_rps
AutoRP Candidate RP Table
**type**\: :py:class:`CandidateRps <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_autorp_oper.AutoRp.Active.CandidateRps>`
.. attribute:: mapping_agent
AutoRP Mapping Agent Table
**type**\: :py:class:`MappingAgent <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_autorp_oper.AutoRp.Active.MappingAgent>`
"""
_prefix = 'ipv4-autorp-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.candidate_rps = AutoRp.Active.CandidateRps()
self.candidate_rps.parent = self
self.mapping_agent = AutoRp.Active.MappingAgent()
self.mapping_agent.parent = self
class CandidateRps(object):
"""
AutoRP Candidate RP Table
.. attribute:: candidate_rp
AutoRP Candidate RP Entry
**type**\: list of :py:class:`CandidateRp <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_autorp_oper.AutoRp.Active.CandidateRps.CandidateRp>`
"""
_prefix = 'ipv4-autorp-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.candidate_rp = YList()
self.candidate_rp.parent = self
self.candidate_rp.name = 'candidate_rp'
class CandidateRp(object):
"""
AutoRP Candidate RP Entry
.. attribute:: access_list_name
ACL Name
**type**\: str
.. attribute:: announce_period
Announce Period
**type**\: int
**range:** \-2147483648..2147483647
.. attribute:: candidate_rp_address
Candidate RP IP Address
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
.. attribute:: interface_name
Interface Name
**type**\: str
**pattern:** (([a\-zA\-Z0\-9\_]\*\\d+/){3,4}\\d+)\|(([a\-zA\-Z0\-9\_]\*\\d+/){3,4}\\d+\\.\\d+)\|(([a\-zA\-Z0\-9\_]\*\\d+/){2}([a\-zA\-Z0\-9\_]\*\\d+))\|(([a\-zA\-Z0\-9\_]\*\\d+/){2}([a\-zA\-Z0\-9\_]+))\|([a\-zA\-Z0\-9\_\-]\*\\d+)\|([a\-zA\-Z0\-9\_\-]\*\\d+\\.\\d+)\|(mpls)\|(dwdm)
.. attribute:: protocol_mode
Protocol Mode
**type**\: :py:class:`AutoRpProtocolModeEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_autorp_datatypes.AutoRpProtocolModeEnum>`
.. attribute:: protocol_mode_xr
Protocol Mode
**type**\: :py:class:`AutorpProtocolModeEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_autorp_oper.AutorpProtocolModeEnum>`
.. attribute:: ttl
TTL
**type**\: int
**range:** \-2147483648..2147483647
"""
_prefix = 'ipv4-autorp-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.access_list_name = None
self.announce_period = None
self.candidate_rp_address = None
self.interface_name = None
self.protocol_mode = None
self.protocol_mode_xr = None
self.ttl = None
@property
def _common_path(self):
return '/Cisco-IOS-XR-ipv4-autorp-oper:auto-rp/Cisco-IOS-XR-ipv4-autorp-oper:active/Cisco-IOS-XR-ipv4-autorp-oper:candidate-rps/Cisco-IOS-XR-ipv4-autorp-oper:candidate-rp'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.access_list_name is not None:
return True
if self.announce_period is not None:
return True
if self.candidate_rp_address is not None:
return True
if self.interface_name is not None:
return True
if self.protocol_mode is not None:
return True
if self.protocol_mode_xr is not None:
return True
if self.ttl is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_autorp_oper as meta
return meta._meta_table['AutoRp.Active.CandidateRps.CandidateRp']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-ipv4-autorp-oper:auto-rp/Cisco-IOS-XR-ipv4-autorp-oper:active/Cisco-IOS-XR-ipv4-autorp-oper:candidate-rps'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.candidate_rp is not None:
for child_ref in self.candidate_rp:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_autorp_oper as meta
return meta._meta_table['AutoRp.Active.CandidateRps']['meta_info']
class MappingAgent(object):
"""
AutoRP Mapping Agent Table
.. attribute:: rp_addresses
AutoRP Mapping Agent Table Entries
**type**\: :py:class:`RpAddresses <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_autorp_oper.AutoRp.Active.MappingAgent.RpAddresses>`
.. attribute:: summary
AutoRP Mapping Agent Summary Information
**type**\: :py:class:`Summary <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_autorp_oper.AutoRp.Active.MappingAgent.Summary>`
"""
_prefix = 'ipv4-autorp-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.rp_addresses = AutoRp.Active.MappingAgent.RpAddresses()
self.rp_addresses.parent = self
self.summary = AutoRp.Active.MappingAgent.Summary()
self.summary.parent = self
class RpAddresses(object):
"""
AutoRP Mapping Agent Table Entries
.. attribute:: rp_address
AutoRP Mapping Agent Entry
**type**\: list of :py:class:`RpAddress <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_autorp_oper.AutoRp.Active.MappingAgent.RpAddresses.RpAddress>`
"""
_prefix = 'ipv4-autorp-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.rp_address = YList()
self.rp_address.parent = self
self.rp_address.name = 'rp_address'
class RpAddress(object):
"""
AutoRP Mapping Agent Entry
.. attribute:: rp_address <key>
RP Address
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
.. attribute:: expiry_time
Time for expiration in seconds
**type**\: int
**range:** 0..18446744073709551615
**units**\: second
.. attribute:: pim_version
PIM version of the CRP
**type**\: int
**range:** 0..255
.. attribute:: range
Array of ranges
**type**\: list of :py:class:`Range <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_autorp_oper.AutoRp.Active.MappingAgent.RpAddresses.RpAddress.Range>`
.. attribute:: rp_address_xr
Candidate\-RP address
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
"""
_prefix = 'ipv4-autorp-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.rp_address = None
self.expiry_time = None
self.pim_version = None
self.range = YList()
self.range.parent = self
self.range.name = 'range'
self.rp_address_xr = None
class Range(object):
"""
Array of ranges
.. attribute:: check_point_object_id
Checkpoint object id
**type**\: int
**range:** 0..4294967295
.. attribute:: create_type
Source of the entry
**type**\: int
**range:** 0..255
.. attribute:: is_advertised
Is this entry advertised ?
**type**\: bool
.. attribute:: prefix
Prefix of the range
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
.. attribute:: prefix_length
Prefix length of the range
**type**\: int
**range:** 0..255
.. attribute:: protocol_mode
Protocol Mode
**type**\: :py:class:`AutorpProtocolModeEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_autorp_oper.AutorpProtocolModeEnum>`
.. attribute:: uptime
Uptime in seconds
**type**\: int
**range:** 0..18446744073709551615
**units**\: second
"""
_prefix = 'ipv4-autorp-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.check_point_object_id = None
self.create_type = None
self.is_advertised = None
self.prefix = None
self.prefix_length = None
self.protocol_mode = None
self.uptime = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-ipv4-autorp-oper:range'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.check_point_object_id is not None:
return True
if self.create_type is not None:
return True
if self.is_advertised is not None:
return True
if self.prefix is not None:
return True
if self.prefix_length is not None:
return True
if self.protocol_mode is not None:
return True
if self.uptime is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_autorp_oper as meta
return meta._meta_table['AutoRp.Active.MappingAgent.RpAddresses.RpAddress.Range']['meta_info']
@property
def _common_path(self):
if self.rp_address is None:
raise YPYModelError('Key property rp_address is None')
return '/Cisco-IOS-XR-ipv4-autorp-oper:auto-rp/Cisco-IOS-XR-ipv4-autorp-oper:active/Cisco-IOS-XR-ipv4-autorp-oper:mapping-agent/Cisco-IOS-XR-ipv4-autorp-oper:rp-addresses/Cisco-IOS-XR-ipv4-autorp-oper:rp-address[Cisco-IOS-XR-ipv4-autorp-oper:rp-address = ' + str(self.rp_address) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.rp_address is not None:
return True
if self.expiry_time is not None:
return True
if self.pim_version is not None:
return True
if self.range is not None:
for child_ref in self.range:
if child_ref._has_data():
return True
if self.rp_address_xr is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_autorp_oper as meta
return meta._meta_table['AutoRp.Active.MappingAgent.RpAddresses.RpAddress']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-ipv4-autorp-oper:auto-rp/Cisco-IOS-XR-ipv4-autorp-oper:active/Cisco-IOS-XR-ipv4-autorp-oper:mapping-agent/Cisco-IOS-XR-ipv4-autorp-oper:rp-addresses'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.rp_address is not None:
for child_ref in self.rp_address:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_autorp_oper as meta
return meta._meta_table['AutoRp.Active.MappingAgent.RpAddresses']['meta_info']
class Summary(object):
"""
AutoRP Mapping Agent Summary Information
.. attribute:: cache_count
Number of group to RP mapping entries in the cache
**type**\: int
**range:** 0..4294967295
.. attribute:: cache_limit
Maximum group to RP mapping entries allowed
**type**\: int
**range:** 0..4294967295
.. attribute:: is_maximum_disabled
Is maximum enforcement disabled ?
**type**\: bool
"""
_prefix = 'ipv4-autorp-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.cache_count = None
self.cache_limit = None
self.is_maximum_disabled = None
@property
def _common_path(self):
return '/Cisco-IOS-XR-ipv4-autorp-oper:auto-rp/Cisco-IOS-XR-ipv4-autorp-oper:active/Cisco-IOS-XR-ipv4-autorp-oper:mapping-agent/Cisco-IOS-XR-ipv4-autorp-oper:summary'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.cache_count is not None:
return True
if self.cache_limit is not None:
return True
if self.is_maximum_disabled is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_autorp_oper as meta
return meta._meta_table['AutoRp.Active.MappingAgent.Summary']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-ipv4-autorp-oper:auto-rp/Cisco-IOS-XR-ipv4-autorp-oper:active/Cisco-IOS-XR-ipv4-autorp-oper:mapping-agent'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.rp_addresses is not None and self.rp_addresses._has_data():
return True
if self.summary is not None and self.summary._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_autorp_oper as meta
return meta._meta_table['AutoRp.Active.MappingAgent']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-ipv4-autorp-oper:auto-rp/Cisco-IOS-XR-ipv4-autorp-oper:active'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.candidate_rps is not None and self.candidate_rps._has_data():
return True
if self.mapping_agent is not None and self.mapping_agent._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_autorp_oper as meta
return meta._meta_table['AutoRp.Active']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-ipv4-autorp-oper:auto-rp'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.active is not None and self.active._has_data():
return True
if self.standby is not None and self.standby._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_autorp_oper as meta
return meta._meta_table['AutoRp']['meta_info']
| 46,378 | 13,418 |
import sys
from osgeo import gdal
from osgeo.gdalconst import *
from osgeo import osr
import netCDFreader as ncr
import datetime
import numpy as np
class Goes17Reader:
def __init__(self, nc):
self.dataset = ncr.open(nc,'NETCDF4')
self._read_rad()
self._read_projection()
self._extension()
self._read_datetime()
self._read_band_info()
def _extension(self):
self.ext = [-5434894.885056, -5434894.885056, 5434894.885056, 5434894.885056]
def _read_rad(self):
"""Example
<type 'netCDF4.Variable'>
int16 Rad(y, x)
_FillValue: 1023
long_name: ABI L1b Radiances
standard_name: toa_outgoing_radiance_per_unit_wavelength
_Unsigned: true
sensor_band_bit_depth: 10
valid_range: [ 0 1022]
scale_factor: 0.812106
add_offset: -25.9366
units: W m-2 sr-1 um-1
coordinates: band_id band_wavelength t y x
grid_mapping: goes_imager_projection
ancillary_variables: DQF
resolution: y: 0.000056 rad x: 0.000056 rad
cell_methods: t: point area: point
unlimited dimensions:
current shape = (5424, 5424)
filling on"""
rad = ncr.readVar(self.dataset,'Rad')
self.rad = rad[:]
self.rad = np.flip(self.rad, 1)
self.rad = np.rot90(self.rad, 2)
self.dy = self.rad.shape[0]
self.dx = self.rad.shape[1]
self.nodata = rad._FillValue
self.rad_units = str(rad.units)
def _read_projection(self):
"""Example:
<type 'netCDF4.Variable'>
int32 goes_imager_projection()
long_name: GOES-R ABI fixed grid projection
grid_mapping_name: geostationary
perspective_point_height: 35786023.0
semi_major_axis: 6378137.0
semi_minor_axis: 6356752.31414
inverse_flattening: 298.2572221
latitude_of_projection_origin: 0.0
longitude_of_projection_origin: -137.0
sweep_angle_axis: x
unlimited dimensions:
current shape = ()
filling on, default _FillValue of -2147483647 used"""
proj = ncr.readVar(self.dataset,'goes_imager_projection')
lon_0 = proj.longitude_of_projection_origin
h = proj.perspective_point_height
majior_ax = proj.semi_major_axis
minor_ax = proj.semi_minor_axis
self.proj4 = "+proj=geos +lon_0=%s +h=%s +x_0=0 +y_0=0 +a=%s +b=%s +units=m"%(lon_0,h,majior_ax,minor_ax)
def _read_datetime(self):
""" Example
<type 'netCDF4.Variable'>
float64 t()
long_name: J2000 epoch mid-point between the start and end image scan in seconds
standard_name: time
units: seconds since 2000-01-01 12:00:00
axis: T
bounds: time_bounds
unlimited dimensions:
current shape = ()
filling on, default _FillValue of 9.96920996839e+36 used"""
t = ncr.readVar(self.dataset,'t')
tsplit = t.units.split(' ')
date = tsplit[2]
time = tsplit[3]
seconds = int(t[:])
self.datetime = str(datetime.datetime.strptime(str(date+'T'+time),'%Y-%m-%dT%H:%M:%S') + datetime.timedelta(seconds=seconds))
def _read_band_info(self):
self.band_id = str(ncr.readVar(self.dataset,'band_id')[:][0])
bandVar = ncr.readVar(self.dataset,'band_wavelength')
self.band_wavelength = str(bandVar[:][0])+str(bandVar.units)
def export_geotiff(self,output_file):
#tiff file section
format = "GTiff"
#get driver
driver = gdal.GetDriverByName( format )
dst_ds = driver.Create( output_file, self.dx, self.dy, 1, gdal.GDT_Float32 )
adfGeoTransform = [
self.ext[0],
(self.ext[2] - self.ext[0]) / float(self.dx),
0.0,
self.ext[1],
0.0,
(self.ext[3] - self.ext[1]) / float(self.dy)
]
dst_ds.SetGeoTransform( adfGeoTransform )
dst_ds.SetMetadataItem('TIFFTAG_DATETIME',self.datetime,'')
dst_ds.SetMetadataItem('RAD_UNITS',self.rad_units,'')
dst_ds.SetMetadataItem('BAND_ID',self.band_id,'')
dst_ds.SetMetadataItem('BAND_WAVELENGTH',self.band_wavelength,'')
srs = osr.SpatialReference()
srs.ImportFromProj4(self.proj4)
dst_ds.SetProjection( srs.ExportToWkt() )
dst_ds.GetRasterBand(1).SetNoDataValue(float(self.nodata))
#write data
dst_ds.GetRasterBand(1).WriteArray( self.rad )
# Once we're done, close properly the dataset
dst_ds = None
| 4,922 | 1,751 |