content stringlengths 27 928k | path stringlengths 4 230 | size int64 27 928k | nl_text stringlengths 21 396k | nl_size int64 21 396k | nl_language stringlengths 2 3 | nl_language_score float64 0.04 1 |
|---|---|---|---|---|---|---|
# -*- coding: utf-8 -*-
from .amount import Amount
from .instance import BlockchainInstance
from graphenecommon.account import (
Account as GrapheneAccount,
AccountUpdate as GrapheneAccountUpdate,
)
from bitsharesbase import operations
@BlockchainInstance.inject
class Account(GrapheneAccount):
"""
This class allows to easily access Account data.
:param str account_name: Name of the account
:param bitshares.bitshares.BitShares blockchain_instance: BitShares
instance
:param bool full: Obtain all account data including orders, positions, etc.
:param bool lazy: Use lazy loading
:param bool full: Obtain all account data including orders, positions,
etc.
:returns: Account data
:rtype: dictionary
:raises bitshares.exceptions.AccountDoesNotExistsException: if account
does not exist
Instances of this class are dictionaries that come with additional
methods (see below) that allow dealing with an account and it's
corresponding functions.
.. code-block:: python
from bitshares.account import Account
account = Account("init0")
print(account)
.. note:: This class comes with its own caching function to reduce the
load on the API server. Instances of this class can be
refreshed with ``Account.refresh()``.
"""
def define_classes(self):
self.type_id = 2
self.amount_class = Amount
self.operations = operations
@property
def call_positions(self):
"""Alias for :func:bitshares.account.Account.callpositions."""
return self.callpositions()
@property
def callpositions(self):
"""List call positions (collateralized positions :doc:`mpa`)"""
self.ensure_full()
from .dex import Dex
dex = Dex(blockchain_instance=self.blockchain)
return dex.list_debt_positions(self)
@property
def openorders(self):
"""Returns open Orders."""
from .price import Order
self.ensure_full()
return [
Order(o, blockchain_instance=self.blockchain) for o in self["limit_orders"]
]
@BlockchainInstance.inject
class AccountUpdate(GrapheneAccountUpdate):
"""
This purpose of this class is to keep track of account updates as they are pushed
through by :class:`bitshares.notify.Notify`.
Instances of this class are dictionaries and take the following
form:
.. code-block: js
{'id': '2.6.29',
'lifetime_fees_paid': '44261516129',
'most_recent_op': '2.9.0',
'owner': '1.2.29',
'pending_fees': 0,
'pending_vested_fees': 16310,
'total_core_in_orders': '6788845277634',
'total_ops': 0}
"""
def define_classes(self):
self.account_class = Account
| bitshares/account.py | 2,854 | This class allows to easily access Account data.
:param str account_name: Name of the account
:param bitshares.bitshares.BitShares blockchain_instance: BitShares
instance
:param bool full: Obtain all account data including orders, positions, etc.
:param bool lazy: Use lazy loading
:param bool full: Obtain all account data including orders, positions,
etc.
:returns: Account data
:rtype: dictionary
:raises bitshares.exceptions.AccountDoesNotExistsException: if account
does not exist
Instances of this class are dictionaries that come with additional
methods (see below) that allow dealing with an account and it's
corresponding functions.
.. code-block:: python
from bitshares.account import Account
account = Account("init0")
print(account)
.. note:: This class comes with its own caching function to reduce the
load on the API server. Instances of this class can be
refreshed with ``Account.refresh()``.
This purpose of this class is to keep track of account updates as they are pushed
through by :class:`bitshares.notify.Notify`.
Instances of this class are dictionaries and take the following
form:
.. code-block: js
{'id': '2.6.29',
'lifetime_fees_paid': '44261516129',
'most_recent_op': '2.9.0',
'owner': '1.2.29',
'pending_fees': 0,
'pending_vested_fees': 16310,
'total_core_in_orders': '6788845277634',
'total_ops': 0}
Alias for :func:bitshares.account.Account.callpositions.
List call positions (collateralized positions :doc:`mpa`)
Returns open Orders.
-*- coding: utf-8 -*- | 1,590 | en | 0.720109 |
"""BGEN reader implementation (using bgen_reader)"""
import logging
import tempfile
import time
from pathlib import Path
from typing import (
Any,
Dict,
Hashable,
List,
Mapping,
MutableMapping,
Optional,
Tuple,
Union,
)
import dask
import dask.array as da
import dask.dataframe as dd
import numpy as np
import pandas as pd
import xarray as xr
import zarr
from cbgen import bgen_file, bgen_metafile
from rechunker import api as rechunker_api
from xarray import Dataset
from sgkit import create_genotype_dosage_dataset
from sgkit.io.utils import dataframe_to_dict, encode_contigs
from sgkit.typing import ArrayLike, DType, NDArray, PathType
logger = logging.getLogger(__name__)
GT_DATA_VARS = [
"call_genotype_probability",
"call_genotype_probability_mask",
"call_dosage",
"call_dosage_mask",
]
METAFILE_DTYPE = dict(
[
("id", "S"),
("rsid", "S"),
("chrom", "S"),
("pos", "int32"),
("a1", "S"),
("a2", "S"),
("offset", "int64"),
]
)
class BgenReader:
name = "bgen_reader"
def __init__(
self,
path: PathType,
metafile_path: Optional[PathType] = None,
dtype: DType = "float32",
) -> None:
self.path = Path(path)
self.metafile_path = (
Path(metafile_path) if metafile_path else self.path.with_suffix(".metafile")
)
with bgen_file(self.path) as bgen:
self.n_variants = bgen.nvariants
self.n_samples = bgen.nsamples
if not self.metafile_path.exists():
start = time.time()
logger.info(
f"Generating BGEN metafile for '{self.path}' (this may take a while)"
)
bgen.create_metafile(self.metafile_path, verbose=False)
stop = time.time()
logger.info(
f"BGEN metafile generation complete ({stop - start:.0f} seconds)"
)
with bgen_metafile(self.metafile_path) as mf:
assert self.n_variants == mf.nvariants
self.npartitions = mf.npartitions
self.partition_size = mf.partition_size
self.shape = (self.n_variants, self.n_samples, 3)
self.dtype = np.dtype(dtype)
self.precision = 64 if self.dtype.itemsize >= 8 else 32
self.ndim = 3
def __getitem__(self, idx: Any) -> NDArray:
if not isinstance(idx, tuple):
raise IndexError(f"Indexer must be tuple (received {type(idx)})")
if len(idx) != self.ndim:
raise IndexError(
f"Indexer must have {self.ndim} items (received {len(idx)} slices)"
)
if not all(isinstance(i, slice) or isinstance(i, int) for i in idx):
raise IndexError(
f"Indexer must contain only slices or ints (received types {[type(i) for i in idx]})"
)
# Determine which dims should have unit size in result
squeeze_dims = tuple(i for i in range(len(idx)) if isinstance(idx[i], int))
# Convert all indexers to slices
idx = tuple(slice(i, i + 1) if isinstance(i, int) else i for i in idx)
if idx[0].start == idx[0].stop:
return np.empty((0,) * self.ndim, dtype=self.dtype)
# Determine start and end partitions that correspond to the
# given variant dimension indexer
start_partition = idx[0].start // self.partition_size
start_partition_offset = idx[0].start % self.partition_size
end_partition = (idx[0].stop - 1) // self.partition_size
end_partition_offset = (idx[0].stop - 1) % self.partition_size
# Create a list of all offsets into the underlying file at which
# data for each variant begins
all_vaddr = []
with bgen_metafile(self.metafile_path) as mf:
for i in range(start_partition, end_partition + 1):
partition = mf.read_partition(i)
start_offset = start_partition_offset if i == start_partition else 0
end_offset = (
end_partition_offset + 1
if i == end_partition
else self.partition_size
)
vaddr = partition.variants.offset
all_vaddr.extend(vaddr[start_offset:end_offset].tolist())
# Read the probabilities for each variant, apply indexer for
# samples dimension to give probabilities for all genotypes,
# and then apply final genotype dimension indexer
with bgen_file(self.path) as bgen:
res = None
for i, vaddr in enumerate(all_vaddr):
probs = bgen.read_probability(vaddr, precision=self.precision)[idx[1]]
assert len(probs.shape) == 2 and probs.shape[1] == 3
if res is None:
res = np.zeros((len(all_vaddr), len(probs), 3), dtype=self.dtype)
res[i] = probs
res = res[..., idx[2]] # type: ignore[index]
return np.squeeze(res, axis=squeeze_dims)
def _split_alleles(allele_ids: bytes) -> List[bytes]:
alleles = allele_ids.split(b",")
if len(alleles) != 2:
raise NotImplementedError(
f"Bgen reads only supported for biallelic variants (found non-biallelic variant '{str(allele_ids)}')"
)
return alleles
def _read_metafile_partition(path: Path, partition: int) -> pd.DataFrame:
with bgen_metafile(path) as mf:
part = mf.read_partition(partition)
v = part.variants
allele_ids = np.array([_split_alleles(aid) for aid in v.allele_ids])
data = {
"id": v.id,
"rsid": v.rsid,
"chrom": v.chromosome,
"pos": v.position,
"a1": allele_ids[:, 0],
"a2": allele_ids[:, 1],
"offset": v.offset,
}
return pd.DataFrame(data).astype(METAFILE_DTYPE)
def read_metafile(path: PathType) -> dd.DataFrame:
"""Read cbgen metafile containing partitioned variant info"""
with bgen_metafile(path) as mf:
divisions = [mf.partition_size * i for i in range(mf.npartitions)] + [
mf.nvariants - 1
]
dfs = [
dask.delayed(_read_metafile_partition)(path, i)
for i in range(mf.npartitions)
]
meta = dd.utils.make_meta(METAFILE_DTYPE)
return dd.from_delayed(dfs, meta=meta, divisions=divisions)
def read_samples(path: PathType) -> pd.DataFrame:
"""Read BGEN .sample file"""
df = pd.read_csv(path, sep=" ", skiprows=[1], usecols=[0])
df.columns = ["sample_id"]
return df
def read_bgen(
path: PathType,
metafile_path: Optional[PathType] = None,
sample_path: Optional[PathType] = None,
chunks: Union[str, int, Tuple[int, int, int]] = "auto",
lock: bool = False,
persist: bool = True,
contig_dtype: DType = "str",
gp_dtype: DType = "float32",
) -> Dataset:
"""Read BGEN dataset.
Loads a single BGEN dataset as dask arrays within a Dataset
from a ``.bgen`` file.
Parameters
----------
path
Path to BGEN file.
metafile_path
Path to companion index file used to determine BGEN byte offsets.
Defaults to ``path`` + ".metafile" if not provided.
This file is necessary for reading BGEN genotype probabilities and it will be
generated the first time the file is read if it does not already exist.
If it needs to be created, it can make the first call to this function
much slower than subsequent calls.
sample_path
Path to ``.sample`` file, by default None. This is used to fetch sample identifiers
and when provided it is preferred over sample identifiers embedded in the ``.bgen`` file.
chunks
Chunk size for genotype probability data (3 dimensions),
by default "auto".
lock
Whether or not to synchronize concurrent reads of
file blocks, by default False. This is passed through to
[dask.array.from_array](https://docs.dask.org/en/latest/array-api.html#dask.array.from_array).
persist
Whether or not to persist variant information in memory, by default True.
This is an important performance consideration as the metadata file for this data will
be read multiple times when False.
contig_dtype
Data type for contig names, by default "str".
This may also be an integer type (e.g. "int"), but will fail if any of the contig names
cannot be converted to integers.
gp_dtype
Data type for genotype probabilities, by default "float32".
Warnings
--------
Only bi-allelic, diploid BGEN files are currently supported.
Returns
-------
A dataset containing the following variables:
- :data:`sgkit.variables.variant_id_spec` (variants)
- :data:`sgkit.variables.variant_contig_spec` (variants)
- :data:`sgkit.variables.variant_position_spec` (variants)
- :data:`sgkit.variables.variant_allele_spec` (variants)
- :data:`sgkit.variables.sample_id_spec` (samples)
- :data:`sgkit.variables.call_dosage_spec` (variants, samples)
- :data:`sgkit.variables.call_dosage_mask_spec` (variants, samples)
- :data:`sgkit.variables.call_genotype_probability_spec` (variants, samples, genotypes)
- :data:`sgkit.variables.call_genotype_probability_mask_spec` (variants, samples, genotypes)
"""
if isinstance(chunks, tuple) and len(chunks) != 3:
raise ValueError(f"`chunks` must be tuple with 3 items, not {chunks}")
if not np.issubdtype(gp_dtype, np.floating):
raise ValueError(
f"`gp_dtype` must be a floating point data type, not {gp_dtype}"
)
if not np.issubdtype(contig_dtype, np.integer) and np.dtype(
contig_dtype
).kind not in {"U", "S"}:
raise ValueError(
f"`contig_dtype` must be of string or int type, not {contig_dtype}"
)
path = Path(path)
sample_path = Path(sample_path) if sample_path else path.with_suffix(".sample")
if sample_path.exists():
sample_id = read_samples(sample_path).sample_id.values.astype("U")
else:
sample_id = _default_sample_ids(path)
bgen_reader = BgenReader(path, metafile_path=metafile_path, dtype=gp_dtype)
df = read_metafile(bgen_reader.metafile_path)
if persist:
df = df.persist()
arrs = dataframe_to_dict(df, METAFILE_DTYPE)
variant_id = arrs["id"]
variant_contig: ArrayLike = arrs["chrom"].astype(contig_dtype)
variant_contig, variant_contig_names = encode_contigs(variant_contig)
variant_contig_names = list(variant_contig_names)
variant_position = arrs["pos"]
variant_allele = da.hstack((arrs["a1"][:, np.newaxis], arrs["a2"][:, np.newaxis]))
call_genotype_probability = da.from_array(
bgen_reader,
chunks=chunks,
lock=lock,
fancy=False,
asarray=False,
name=f"{bgen_reader.name}:read_bgen:{path}",
)
call_dosage = _to_dosage(call_genotype_probability)
ds: Dataset = create_genotype_dosage_dataset(
variant_contig_names=variant_contig_names,
variant_contig=variant_contig,
variant_position=variant_position,
variant_allele=variant_allele,
sample_id=sample_id,
call_dosage=call_dosage,
call_genotype_probability=call_genotype_probability,
variant_id=variant_id,
)
return ds
def _default_sample_ids(path: PathType) -> ArrayLike:
"""Fetch or generate sample ids"""
with bgen_file(path) as bgen:
if bgen.contain_samples:
return bgen.read_samples()
else:
return np.char.add(b"sample_", np.arange(bgen.nsamples).astype("S")) # type: ignore[no-untyped-call]
def _to_dosage(probs: ArrayLike) -> ArrayLike:
"""Calculate the dosage from genotype likelihoods (probabilities)"""
assert (
probs.shape[-1] == 3
), f"Expecting genotype (trailing) dimension of size 3, got array of shape {probs.shape}"
return probs[..., 1] + 2 * probs[..., 2]
########################
# Rechunking Functions #
########################
def encode_variables(
ds: Dataset,
chunk_length: int,
chunk_width: int,
compressor: Optional[Any] = zarr.Blosc(cname="zstd", clevel=7, shuffle=2),
probability_dtype: Optional[Any] = "uint8",
) -> Dict[Hashable, Dict[str, Any]]:
encoding = {}
for v in ds:
e = {}
if compressor is not None:
e.update({"compressor": compressor})
if v in GT_DATA_VARS:
e.update({"chunks": (chunk_length, chunk_width) + ds[v].shape[2:]})
if probability_dtype is not None and v == "call_genotype_probability":
dtype = np.dtype(probability_dtype)
# Xarray will decode into float32 so any int greater than
# 16 bits will cause overflow/underflow
# See https://en.wikipedia.org/wiki/Floating-point_arithmetic#Internal_representation
# *bits precision column for single precision floats
if dtype not in [np.uint8, np.uint16]: # type: ignore[comparison-overlap]
raise ValueError(
"Probability integer dtype invalid, must "
f"be uint8 or uint16 not {probability_dtype}"
)
divisor = np.iinfo(dtype).max - 1
e.update(
{
"dtype": probability_dtype,
"add_offset": -1.0 / divisor,
"scale_factor": 1.0 / divisor,
"_FillValue": 0,
}
)
if e:
encoding[v] = e
return encoding
def pack_variables(ds: Dataset) -> Dataset:
# Remove dosage as it is unnecessary and should be redefined
# based on encoded probabilities later (w/ reduced precision)
ds = ds.drop_vars(["call_dosage", "call_dosage_mask"], errors="ignore")
# Remove homozygous reference GP and redefine mask
gp = ds["call_genotype_probability"][..., 1:]
gp_mask = ds["call_genotype_probability_mask"].any(dim="genotypes")
ds = ds.drop_vars(["call_genotype_probability", "call_genotype_probability_mask"])
ds = ds.assign(call_genotype_probability=gp, call_genotype_probability_mask=gp_mask)
return ds
def unpack_variables(ds: Dataset, dtype: DType = "float32") -> Dataset:
# Restore homozygous reference GP
gp = ds["call_genotype_probability"].astype(dtype)
if gp.sizes["genotypes"] != 2:
raise ValueError(
"Expecting variable 'call_genotype_probability' to have genotypes "
f"dimension of size 2 (received sizes = {dict(gp.sizes)})"
)
ds = ds.drop_vars("call_genotype_probability")
ds["call_genotype_probability"] = xr.concat(
[1 - gp.sum(dim="genotypes", skipna=False), gp], dim="genotypes"
)
# Restore dosage
ds["call_dosage"] = gp[..., 0] + 2 * gp[..., 1]
ds["call_dosage_mask"] = ds["call_genotype_probability_mask"]
ds["call_genotype_probability_mask"] = ds[
"call_genotype_probability_mask"
].broadcast_like(ds["call_genotype_probability"])
return ds
def rechunk_bgen(
ds: Dataset,
output: Union[PathType, MutableMapping[str, bytes]],
*,
chunk_length: int = 10_000,
chunk_width: int = 1_000,
compressor: Optional[Any] = zarr.Blosc(cname="zstd", clevel=7, shuffle=2),
probability_dtype: Optional[DType] = "uint8",
max_mem: str = "4GB",
pack: bool = True,
tempdir: Optional[PathType] = None,
) -> Dataset:
"""Rechunk BGEN dataset as Zarr.
This function will use the algorithm https://rechunker.readthedocs.io/en/latest/
to rechunk certain fields in a provided Dataset for better downstream performance.
Depending on the system memory available (and the `max_mem` setting) this
rechunking may occur without the need of any intermediate data store. Otherwise,
approximately as much disk space is required as was needed to store the original
BGEN data. Experiments show that this Zarr representation is ~20% larger even
with all available optimizations and fairly aggressive compression (i.e. the
default `clevel` 7).
Note that this function is not evaluated lazily. The rechunking algorithm
will run inline so calls to it may be slow. The resulting Dataset is
generated based on the final, serialized Zarr data.
Parameters
----------
ds
Dataset to rechunk, typically the result from `read_bgen`.
output
Zarr store or path to directory in file system.
chunk_length
Length (number of variants) of chunks in which data are stored, by default 10_000.
chunk_width
Width (number of samples) to use when storing chunks in output, by default 1_000.
compressor
Zarr compressor, no compression is used when set as None.
probability_dtype
Data type used to encode genotype probabilities, must be either uint8 or uint16.
Setting this parameter results in a loss of precision. If None, probabilities
will not be altered when stored.
max_mem
The amount of memory (in bytes) that workers are allowed to use. A string
(e.g. 100MB) can also be used.
pack
Whether or not to optimize variable representations by removing unnecessary
dimensions and elements. This includes storing 2 genotypes instead of 3, omitting
dosage and collapsing the genotype probability mask to 2 dimensions. All of
the above are restored in the resulting Dataset at the expense of extra
computations on read.
tempdir
Temporary directory where intermediate files are stored. The default None means
use the system default temporary directory.
Warnings
--------
This functional is only applicable to diploid, bi-allelic BGEN datasets.
Returns
-------
Dataset
The rechunked dataset.
"""
if isinstance(output, Path):
output = str(output)
chunk_length = min(chunk_length, ds.dims["variants"])
chunk_width = min(chunk_width, ds.dims["samples"])
if pack:
ds = pack_variables(ds)
encoding = encode_variables(
ds,
chunk_length=chunk_length,
chunk_width=chunk_width,
compressor=compressor,
probability_dtype=probability_dtype,
)
target_chunks = {
var: encoding[var]["chunks"] for var in encoding if "chunks" in encoding[var]
}
target_options = {
var: {k: v for k, v in encoding[var].items() if k != "chunks"}
for var in encoding
}
with tempfile.TemporaryDirectory(
prefix="bgen_to_zarr_", suffix=".zarr", dir=tempdir
) as tmpdir:
rechunked = rechunker_api.rechunk(
ds,
max_mem=max_mem,
target_chunks=target_chunks,
target_store=output,
target_options=target_options,
temp_store=tmpdir,
executor="dask",
)
rechunked.execute()
zarr.consolidate_metadata(output)
ds: Dataset = xr.open_zarr(output, concat_characters=False) # type: ignore[no-untyped-call]
if pack:
ds = unpack_variables(ds)
return ds
def bgen_to_zarr(
input: PathType,
output: Union[PathType, MutableMapping[str, bytes]],
region: Optional[Mapping[Hashable, Any]] = None,
chunk_length: int = 10_000,
chunk_width: int = 1_000,
temp_chunk_length: int = 100,
compressor: Optional[Any] = zarr.Blosc(cname="zstd", clevel=7, shuffle=2),
probability_dtype: Optional[DType] = "uint8",
max_mem: str = "4GB",
pack: bool = True,
tempdir: Optional[PathType] = None,
) -> Dataset:
"""Convert a BGEN file to a Zarr on-disk store.
This function is a convenience for calling :func:`read_bgen` followed by
:func:`rechunk_bgen`.
Parameters
----------
input
Path to local BGEN dataset.
output
Zarr store or path to directory in file system.
region
Indexers on dataset dimensions used to define a subset of data to convert.
Must be None or a dict with keys matching dimension names and values
equal to integers or slice objects. This is passed directly to `Dataset.isel`
so it has the same semantics.
chunk_length
Length (number of variants) of chunks in which data are stored, by default 10_000.
chunk_width
Width (number of samples) to use when storing chunks in output, by default 1_000.
temp_chunk_length
Length of chunks used in raw BGEN read, by default 100. This defines the vertical
chunking (i.e. in the variants dimension) used when reading the raw data and because
there is no horizontal chunking at this phase (i.e. in the samples dimension), this
value should be much smaller than the target `chunk_length`.
compressor
Zarr compressor, by default Blosc + zstd with compression level 7. No compression
is used when set as None.
probability_dtype
Data type used to encode genotype probabilities, must be either uint8 or uint16.
Setting this parameter results in a loss of precision. If None, probabilities
will not be altered when stored.
max_mem
The amount of memory (in bytes) that workers are allowed to use. A string
(e.g. 100MB) can also be used.
pack
Whether or not to optimize variable representations by removing unnecessary
dimensions and elements. This includes storing 2 genotypes instead of 3, omitting
dosage and collapsing the genotype probability mask to 2 dimensions. All of
the above are restored in the resulting Dataset at the expense of extra
computations on read.
tempdir
Temporary directory where intermediate files are stored. The default None means
use the system default temporary directory.
Warnings
--------
This functional is only applicable to diploid, bi-allelic BGEN datasets.
Returns
-------
Dataset
The rechunked dataset.
"""
ds = read_bgen(input, chunks=(temp_chunk_length, -1, -1))
if region is not None:
ds = ds.isel(indexers=region)
return rechunk_bgen(
ds,
output,
chunk_length=chunk_length,
chunk_width=chunk_width,
compressor=compressor,
probability_dtype=probability_dtype,
max_mem=max_mem,
pack=pack,
tempdir=tempdir,
)
| sgkit/io/bgen/bgen_reader.py | 22,582 | Fetch or generate sample ids
Calculate the dosage from genotype likelihoods (probabilities)
Convert a BGEN file to a Zarr on-disk store.
This function is a convenience for calling :func:`read_bgen` followed by
:func:`rechunk_bgen`.
Parameters
----------
input
Path to local BGEN dataset.
output
Zarr store or path to directory in file system.
region
Indexers on dataset dimensions used to define a subset of data to convert.
Must be None or a dict with keys matching dimension names and values
equal to integers or slice objects. This is passed directly to `Dataset.isel`
so it has the same semantics.
chunk_length
Length (number of variants) of chunks in which data are stored, by default 10_000.
chunk_width
Width (number of samples) to use when storing chunks in output, by default 1_000.
temp_chunk_length
Length of chunks used in raw BGEN read, by default 100. This defines the vertical
chunking (i.e. in the variants dimension) used when reading the raw data and because
there is no horizontal chunking at this phase (i.e. in the samples dimension), this
value should be much smaller than the target `chunk_length`.
compressor
Zarr compressor, by default Blosc + zstd with compression level 7. No compression
is used when set as None.
probability_dtype
Data type used to encode genotype probabilities, must be either uint8 or uint16.
Setting this parameter results in a loss of precision. If None, probabilities
will not be altered when stored.
max_mem
The amount of memory (in bytes) that workers are allowed to use. A string
(e.g. 100MB) can also be used.
pack
Whether or not to optimize variable representations by removing unnecessary
dimensions and elements. This includes storing 2 genotypes instead of 3, omitting
dosage and collapsing the genotype probability mask to 2 dimensions. All of
the above are restored in the resulting Dataset at the expense of extra
computations on read.
tempdir
Temporary directory where intermediate files are stored. The default None means
use the system default temporary directory.
Warnings
--------
This functional is only applicable to diploid, bi-allelic BGEN datasets.
Returns
-------
Dataset
The rechunked dataset.
Read BGEN dataset.
Loads a single BGEN dataset as dask arrays within a Dataset
from a ``.bgen`` file.
Parameters
----------
path
Path to BGEN file.
metafile_path
Path to companion index file used to determine BGEN byte offsets.
Defaults to ``path`` + ".metafile" if not provided.
This file is necessary for reading BGEN genotype probabilities and it will be
generated the first time the file is read if it does not already exist.
If it needs to be created, it can make the first call to this function
much slower than subsequent calls.
sample_path
Path to ``.sample`` file, by default None. This is used to fetch sample identifiers
and when provided it is preferred over sample identifiers embedded in the ``.bgen`` file.
chunks
Chunk size for genotype probability data (3 dimensions),
by default "auto".
lock
Whether or not to synchronize concurrent reads of
file blocks, by default False. This is passed through to
[dask.array.from_array](https://docs.dask.org/en/latest/array-api.html#dask.array.from_array).
persist
Whether or not to persist variant information in memory, by default True.
This is an important performance consideration as the metadata file for this data will
be read multiple times when False.
contig_dtype
Data type for contig names, by default "str".
This may also be an integer type (e.g. "int"), but will fail if any of the contig names
cannot be converted to integers.
gp_dtype
Data type for genotype probabilities, by default "float32".
Warnings
--------
Only bi-allelic, diploid BGEN files are currently supported.
Returns
-------
A dataset containing the following variables:
- :data:`sgkit.variables.variant_id_spec` (variants)
- :data:`sgkit.variables.variant_contig_spec` (variants)
- :data:`sgkit.variables.variant_position_spec` (variants)
- :data:`sgkit.variables.variant_allele_spec` (variants)
- :data:`sgkit.variables.sample_id_spec` (samples)
- :data:`sgkit.variables.call_dosage_spec` (variants, samples)
- :data:`sgkit.variables.call_dosage_mask_spec` (variants, samples)
- :data:`sgkit.variables.call_genotype_probability_spec` (variants, samples, genotypes)
- :data:`sgkit.variables.call_genotype_probability_mask_spec` (variants, samples, genotypes)
Read cbgen metafile containing partitioned variant info
Read BGEN .sample file
Rechunk BGEN dataset as Zarr.
This function will use the algorithm https://rechunker.readthedocs.io/en/latest/
to rechunk certain fields in a provided Dataset for better downstream performance.
Depending on the system memory available (and the `max_mem` setting) this
rechunking may occur without the need of any intermediate data store. Otherwise,
approximately as much disk space is required as was needed to store the original
BGEN data. Experiments show that this Zarr representation is ~20% larger even
with all available optimizations and fairly aggressive compression (i.e. the
default `clevel` 7).
Note that this function is not evaluated lazily. The rechunking algorithm
will run inline so calls to it may be slow. The resulting Dataset is
generated based on the final, serialized Zarr data.
Parameters
----------
ds
Dataset to rechunk, typically the result from `read_bgen`.
output
Zarr store or path to directory in file system.
chunk_length
Length (number of variants) of chunks in which data are stored, by default 10_000.
chunk_width
Width (number of samples) to use when storing chunks in output, by default 1_000.
compressor
Zarr compressor, no compression is used when set as None.
probability_dtype
Data type used to encode genotype probabilities, must be either uint8 or uint16.
Setting this parameter results in a loss of precision. If None, probabilities
will not be altered when stored.
max_mem
The amount of memory (in bytes) that workers are allowed to use. A string
(e.g. 100MB) can also be used.
pack
Whether or not to optimize variable representations by removing unnecessary
dimensions and elements. This includes storing 2 genotypes instead of 3, omitting
dosage and collapsing the genotype probability mask to 2 dimensions. All of
the above are restored in the resulting Dataset at the expense of extra
computations on read.
tempdir
Temporary directory where intermediate files are stored. The default None means
use the system default temporary directory.
Warnings
--------
This functional is only applicable to diploid, bi-allelic BGEN datasets.
Returns
-------
Dataset
The rechunked dataset.
BGEN reader implementation (using bgen_reader)
Determine which dims should have unit size in result Convert all indexers to slices Determine start and end partitions that correspond to the given variant dimension indexer Create a list of all offsets into the underlying file at which data for each variant begins Read the probabilities for each variant, apply indexer for samples dimension to give probabilities for all genotypes, and then apply final genotype dimension indexer type: ignore[index] type: ignore[no-untyped-call] Rechunking Functions Xarray will decode into float32 so any int greater than 16 bits will cause overflow/underflow See https://en.wikipedia.org/wiki/Floating-point_arithmeticInternal_representation *bits precision column for single precision floats type: ignore[comparison-overlap] Remove dosage as it is unnecessary and should be redefined based on encoded probabilities later (w/ reduced precision) Remove homozygous reference GP and redefine mask Restore homozygous reference GP Restore dosage type: ignore[no-untyped-call] | 7,902 | en | 0.7659 |
import inspect
import os
import pyperclip
import requests
import time
from urllib.parse import quote
# a list of the request error classes
request_errors = [obj for name, obj in inspect.getmembers(requests.exceptions)
if inspect.isclass(obj) and issubclass(obj, Exception)]
# main daemon loop
while True:
# get clipboard value
clipboard = pyperclip.paste()
try:
# percent encode the clipboard value
safe_cb = quote(clipboard,safe='')
# bitly API access token
token = os.environ.get('BITLY_TOKEN')
# URL that will make the API call
bitly_url = 'https://api-ssl.bitly.com/v3/shorten?' + \
'access_token=' + token + '&longUrl=' + safe_cb
# get the json return from the API call
short_url = requests.get(bitly_url).json()
# if everything went as planned
if(short_url['status_txt'] == 'OK'):
pyperclip.copy(short_url['data']['url'])
except Exception as e:
# if something went wrong with the request, i.e. not a link
if(any(issubclass(e.__class__, lv) for lv in request_errors)):
pass
else:
raise(e)
# wait until the clipboard changes
while(pyperclip.paste() == clipboard):
time.sleep(.1)
| autoshort.py | 1,294 | a list of the request error classes main daemon loop get clipboard value percent encode the clipboard value bitly API access token URL that will make the API call get the json return from the API call if everything went as planned if something went wrong with the request, i.e. not a link wait until the clipboard changes | 321 | en | 0.800042 |
# source ./venv/bin/activate
# ===============================================================
# =============================COOL==============================
# ===============================================================
import sys
from general import errors
# import os
# basedir = os.path.abspath(os.path.dirname(__file__))
# ===============================================================
def main():
# TAKE THE INPUT
programs = sys.argv[1:]
# CHECK IF AT LEAST ONE FILE IS GIVEN
if len(programs) == 0:
errors.throw_error(errors.CompilerError(text="No file is given to coolc compiler."))
# CHECK IF FILEOUT IS GIVEN
if programs[0] == '-o':
if len(programs) == 1:
errors.throw_error(errors.CompilerError(text="No fileout is given to coolc compiler."))
fileout = programs[1]
if not str(fileout).endswith(".asm"):
errors.throw_error(errors.CompilerError(text="Fileout must end with .asm extension."))
if len(programs) == 2:
errors.throw_error(errors.CompilerError(text="No file is given to coolc compiler."))
programs = programs[2:]
else:
fileout = programs[0].split(".cl")[0] + ".asm"
# Check all programs have the *.cl extension.
for program in programs:
if not str(program).endswith(".cl"):
errors.throw_error(errors.CompilerError(text="Cool program files must end with a .cl extension."))
code = ""
# Read all program source codes.
for program in programs:
try:
with open(program, encoding="utf-8") as file:
code += file.read() + '\n'
except (IOError, FileNotFoundError):
errors.throw_error(errors.CompilerError(text=f'File "{program}" was not found.'))
except Exception:
errors.throw_error(errors.CompilerError(text="An unexpected error occurred!"))
print(f"Compiling file '{fileout}'...")
# ===============================================================
# ==================ANALISIS-LEXICOGRAFICO=======================
# ===============================================================
from lexicography.lexer_rules import CoolLex
# BUILD THE LEXER
lexer = CoolLex()
lexer.build()
# ===============================================================
# ===============================================================
# =====================ANALISIS-SINTACTICO=======================
# ===============================================================
from lexicography.grammar_rules import CoolParse
# BUILD THE PARSER
parser = CoolParse(lexer)
parser.build()
program_ast = parser.parse(code)
# ===============================================================
# ===============================================================
# ======================ANALISIS-SEMANTICO=======================
# ===============================================================
from semantic.type_collector import TypeCollectorVisitor
from semantic.type_builder import TypeBuilderVisitor
from semantic.type_checker import TypeCheckerVisitor
# from semantic.ast_types_painter import Painter
typeCollector = TypeCollectorVisitor()
typeCollector.visit(program_ast)
typeBuilder = TypeBuilderVisitor(typeCollector.enviroment)
typeBuilder.visit(program_ast)
## CHECK SEMANTIC ERRORS IN THE ENVIROMENT(check_main, cycles and inheritance rules)
final_enviroment = typeBuilder.enviroment
final_enviroment.build_types_graph()
type_checker = TypeCheckerVisitor()
type_checker.visit(program_ast, typeBuilder.enviroment)
typed_ast = program_ast
# ast_painter = Painter()
# print(ast_painter.visit(typed_ast, 0))
# ===============================================================
# ===============================================================
# ========================CODE-GENERATION========================
# ===============================================================
# COOL --> CIL
from generation.cil.cil_generator import CilGeneratorVisitor
# from general.cil_hierarchy import get_formatter
cil_code_generator = CilGeneratorVisitor(typed_ast, typeBuilder.enviroment)
ast_cil = cil_code_generator.generate_code()
# cil_painter = get_formatter()
# print(cil_painter(ast_cil))
# CIL --> MIPS
from generation.mips.mips_writer import MIPSWriterVisitor
from operator import itemgetter
types_ids = typeBuilder.enviroment.types_dict
hierarchy = [0]*len(types_ids)
for _type in typeBuilder.enviroment.types_list[1:]:
hierarchy[types_ids[_type.name]] = types_ids[_type.parent]
# tag_names = sorted(types_ids.items(), key=itemgetter(1))
ast_cil.typesHierarchy = hierarchy
# ast_cil.tag_names = tag_names
mips_code_generator = MIPSWriterVisitor(ast_cil, fileout)
mips_code_generator.generate_Mips()
if __name__ == '__main__':
main()
| src/coolc.py | 5,198 | source ./venv/bin/activate =============================================================== =============================COOL============================== =============================================================== import os basedir = os.path.abspath(os.path.dirname(__file__)) =============================================================== TAKE THE INPUT CHECK IF AT LEAST ONE FILE IS GIVEN CHECK IF FILEOUT IS GIVEN Check all programs have the *.cl extension. Read all program source codes. =============================================================== ==================ANALISIS-LEXICOGRAFICO======================= =============================================================== BUILD THE LEXER =============================================================== =============================================================== =====================ANALISIS-SINTACTICO======================= =============================================================== BUILD THE PARSER =============================================================== =============================================================== ======================ANALISIS-SEMANTICO======================= =============================================================== from semantic.ast_types_painter import Painter CHECK SEMANTIC ERRORS IN THE ENVIROMENT(check_main, cycles and inheritance rules) ast_painter = Painter() print(ast_painter.visit(typed_ast, 0)) =============================================================== =============================================================== ========================CODE-GENERATION======================== =============================================================== COOL --> CIL from general.cil_hierarchy import get_formatter cil_painter = get_formatter() print(cil_painter(ast_cil)) CIL --> MIPS tag_names = sorted(types_ids.items(), key=itemgetter(1)) ast_cil.tag_names = tag_names | 1,901 | en | 0.389898 |
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from typing import (
Any,
AsyncIterable,
Awaitable,
Callable,
Iterable,
Sequence,
Tuple,
Optional,
)
from google.cloud.compute_v1.types import compute
class AggregatedListPager:
"""A pager for iterating through ``aggregated_list`` requests.
This class thinly wraps an initial
:class:`google.cloud.compute_v1.types.TargetInstanceAggregatedList` object, and
provides an ``__iter__`` method to iterate through its
``items`` field.
If there are more pages, the ``__iter__`` method will make additional
``AggregatedList`` requests and continue to iterate
through the ``items`` field on the
corresponding responses.
All the usual :class:`google.cloud.compute_v1.types.TargetInstanceAggregatedList`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(
self,
method: Callable[..., compute.TargetInstanceAggregatedList],
request: compute.AggregatedListTargetInstancesRequest,
response: compute.TargetInstanceAggregatedList,
*,
metadata: Sequence[Tuple[str, str]] = ()
):
"""Instantiate the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.compute_v1.types.AggregatedListTargetInstancesRequest):
The initial request object.
response (google.cloud.compute_v1.types.TargetInstanceAggregatedList):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = compute.AggregatedListTargetInstancesRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
def pages(self) -> Iterable[compute.TargetInstanceAggregatedList]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = self._method(self._request, metadata=self._metadata)
yield self._response
def __iter__(self) -> Iterable[Tuple[str, compute.TargetInstancesScopedList]]:
for page in self.pages:
yield from page.items.items()
def get(self, key: str) -> Optional[compute.TargetInstancesScopedList]:
return self._response.items.get(key)
def __repr__(self) -> str:
return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
class ListPager:
"""A pager for iterating through ``list`` requests.
This class thinly wraps an initial
:class:`google.cloud.compute_v1.types.TargetInstanceList` object, and
provides an ``__iter__`` method to iterate through its
``items`` field.
If there are more pages, the ``__iter__`` method will make additional
``List`` requests and continue to iterate
through the ``items`` field on the
corresponding responses.
All the usual :class:`google.cloud.compute_v1.types.TargetInstanceList`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(
self,
method: Callable[..., compute.TargetInstanceList],
request: compute.ListTargetInstancesRequest,
response: compute.TargetInstanceList,
*,
metadata: Sequence[Tuple[str, str]] = ()
):
"""Instantiate the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.compute_v1.types.ListTargetInstancesRequest):
The initial request object.
response (google.cloud.compute_v1.types.TargetInstanceList):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = compute.ListTargetInstancesRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
def pages(self) -> Iterable[compute.TargetInstanceList]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = self._method(self._request, metadata=self._metadata)
yield self._response
def __iter__(self) -> Iterable[compute.TargetInstance]:
for page in self.pages:
yield from page.items
def __repr__(self) -> str:
return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
| google/cloud/compute_v1/services/target_instances/pagers.py | 5,740 | A pager for iterating through ``aggregated_list`` requests.
This class thinly wraps an initial
:class:`google.cloud.compute_v1.types.TargetInstanceAggregatedList` object, and
provides an ``__iter__`` method to iterate through its
``items`` field.
If there are more pages, the ``__iter__`` method will make additional
``AggregatedList`` requests and continue to iterate
through the ``items`` field on the
corresponding responses.
All the usual :class:`google.cloud.compute_v1.types.TargetInstanceAggregatedList`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
A pager for iterating through ``list`` requests.
This class thinly wraps an initial
:class:`google.cloud.compute_v1.types.TargetInstanceList` object, and
provides an ``__iter__`` method to iterate through its
``items`` field.
If there are more pages, the ``__iter__`` method will make additional
``List`` requests and continue to iterate
through the ``items`` field on the
corresponding responses.
All the usual :class:`google.cloud.compute_v1.types.TargetInstanceList`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
Instantiate the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.compute_v1.types.AggregatedListTargetInstancesRequest):
The initial request object.
response (google.cloud.compute_v1.types.TargetInstanceAggregatedList):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Instantiate the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.compute_v1.types.ListTargetInstancesRequest):
The initial request object.
response (google.cloud.compute_v1.types.TargetInstanceList):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
-*- coding: utf-8 -*- Copyright 2020 Google LLC Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. | 2,796 | en | 0.829502 |
import os
import subprocess
from tempfile import NamedTemporaryFile
from torch.distributed import get_rank
from torch.distributed import get_world_size
from torch.utils.data.sampler import Sampler
import librosa
import numpy as np
import scipy.signal
import torch
from scipy.io.wavfile import read
import math
from torch.utils.data import DataLoader
from torch.utils.data import Dataset
from .spec_augment import spec_augment
from hangul_utils import split_syllable_char, split_syllables, join_jamos
windows = {'hamming': scipy.signal.hamming, 'hann': scipy.signal.hann, 'blackman': scipy.signal.blackman,
'bartlett': scipy.signal.bartlett}
def load_audio(path):
# sample_rate, sound = read(path)
sound, sr = librosa.load(path, sr=16000)
# librosa.output.write_wav('org.wav', sound, sr)
# print('save 1')
# sound = sound.astype('float32') / 32767 # normalize audio
sound = librosa.util.normalize(sound) # normalize audio
sound = sound.astype('float32')
# librosa.output.write_wav('norm.wav', sound, sr)
# print('save 2')
if len(sound.shape) > 1:
if sound.shape[1] == 1:
sound = sound.squeeze()
else:
sound = sound.mean(axis=1) # multiple channels, average
return sound
class AudioParser(object):
def parse_transcript(self, transcript_path):
"""
:param transcript_path: Path where transcript is stored from the manifest file
:return: Transcript in training/testing format
"""
raise NotImplementedError
def parse_audio(self, audio_path):
"""
:param audio_path: Path where audio is stored from the manifest file
:return: Audio in training/testing format
"""
raise NotImplementedError
class NoiseInjection(object):
def __init__(self,
path=None,
sample_rate=16000,
noise_levels=(0, 0.5)):
"""
Adds noise to an input signal with specific SNR. Higher the noise level, the more noise added.
Modified code from https://github.com/willfrey/audio/blob/master/torchaudio/transforms.py
"""
if path is not None and not os.path.exists(path):
print("Directory doesn't exist: {}".format(path))
raise IOError
self.paths = path is not None and librosa.util.find_files(path)
self.sample_rate = sample_rate
self.noise_levels = noise_levels
def inject_noise(self, data):
noise_path = np.random.choice(self.paths)
noise_level = np.random.uniform(*self.noise_levels)
return self.inject_noise_sample(data, noise_path, noise_level)
def inject_noise_sample(self, data, noise_path, noise_level):
noise_len = get_audio_length(noise_path)
data_len = len(data) / self.sample_rate
noise_start = np.random.rand() * (noise_len - data_len)
noise_end = noise_start + data_len
noise_dst = audio_with_sox(noise_path, self.sample_rate, noise_start, noise_end)
assert len(data) == len(noise_dst)
noise_energy = np.sqrt(noise_dst.dot(noise_dst) / noise_dst.size)
data_energy = np.sqrt(data.dot(data) / data.size)
data += noise_level * noise_dst * data_energy / noise_energy
return data
class SpectrogramParser(AudioParser):
def __init__(self, audio_conf, normalize=False, speed_volume_perturb=False, spec_augment=False):
"""
Parses audio file into spectrogram with optional normalization and various augmentations
:param audio_conf: Dictionary containing the sample rate, window and the window length/stride in seconds
:param normalize(default False): Apply standard mean and deviation normalization to audio tensor
:param speed_volume_perturb(default False): Apply random tempo and gain perturbations
:param spec_augment(default False): Apply simple spectral augmentation to mel spectograms
"""
super(SpectrogramParser, self).__init__()
self.window_stride = audio_conf['window_stride']
self.window_size = audio_conf['window_size']
self.sample_rate = audio_conf['sample_rate']
self.window = windows.get(audio_conf['window'], windows['hamming'])
self.normalize = normalize
self.speed_volume_perturb = speed_volume_perturb
self.spec_augment = spec_augment
self.noiseInjector = NoiseInjection(audio_conf['noise_dir'], self.sample_rate,
audio_conf['noise_levels']) if audio_conf.get(
'noise_dir') is not None else None
self.noise_prob = audio_conf.get('noise_prob')
def parse_audio(self, audio_path,audio=None,change_speed=None):
if audio is not None:
y = audio
elif self.speed_volume_perturb:
y = load_randomly_augmented_audio(audio_path, self.sample_rate)
# librosa.output.write_wav('test.wav', y, sr=16000, norm=False)
# print('test')
else:
y = load_audio(audio_path)
# librosa.output.write_wav('y1.wav', y, sr=16000)
# print('save@@@@@@@@@@@@')
# change audio speed
if change_speed is not None:
y = librosa.effects.time_stretch(y, change_speed)
if self.noiseInjector:
add_noise = np.random.binomial(1, self.noise_prob)
if add_noise:
y = self.noiseInjector.inject_noise(y)
# librosa.output.write_wav('y2.wav', y, sr=16000)
# print('save@@@@@@@@@@@@')
# import sys
# sys.exit()
n_fft = int(self.sample_rate * self.window_size)
win_length = n_fft
hop_length = int(self.sample_rate * self.window_stride)
# STFT
D = librosa.stft(y, n_fft=n_fft, hop_length=hop_length,
win_length=win_length, window=self.window)
spect, phase = librosa.magphase(D)
# S = log(S+1)
spect = np.log1p(spect)
spect = torch.FloatTensor(spect)
if self.normalize:
mean = spect.mean()
std = spect.std()
spect.add_(-mean)
spect.div_(std)
if self.spec_augment:
spect = spec_augment(spect)
return spect
def parse_transcript(self, transcript_path):
raise NotImplementedError
class SpectrogramDataset(Dataset, SpectrogramParser):
def __init__(self, audio_conf, manifest_filepath, labels, normalize=False, speed_volume_perturb=False, spec_augment=False):
"""
Dataset that loads tensors via a csv containing file paths to audio files and transcripts separated by
a comma. Each new line is a different sample. Example below:
/path/to/audio.wav,/path/to/audio.txt
...
:param audio_conf: Dictionary containing the sample rate, window and the window length/stride in seconds
:param manifest_filepath: Path to manifest csv as describe above
:param labels: String containing all the possible characters to map to
:param normalize: Apply standard mean and deviation normalization to audio tensor
:param speed_volume_perturb(default False): Apply random tempo and gain perturbations
:param spec_augment(default False): Apply simple spectral augmentation to mel spectograms
"""
with open(manifest_filepath) as f:
ids = f.readlines()
ids = [x.strip().split(',') for x in ids]
self.ids = ids
self.size = len(ids)
self.labels_map = dict([(labels[i], i) for i in range(len(labels))])
try:
self.use_jamo = audio_conf['use_jamo']
except:
self.use_jamo = False
super(SpectrogramDataset, self).__init__(audio_conf, normalize, speed_volume_perturb, spec_augment)
def __getitem__(self, index):
sample = self.ids[index]
audio_path, transcript_path = sample[0], sample[1]
spect = self.parse_audio(audio_path)
transcript = self.parse_transcript(transcript_path)
return spect, transcript
def parse_transcript(self, transcript_path):
with open(transcript_path, 'r', encoding='utf8') as transcript_file:
# with open(transcript_path, 'r', encoding='utf-16') as transcript_file:
transcript = transcript_file.read().replace('\n', '')
if self.use_jamo:
transcript = split_syllables(transcript)
transcript = list(filter(None, [self.labels_map.get(x) for x in list(transcript)]))
return transcript
def __len__(self):
return self.size
def _collate_fn(batch):
def func(p):
return p[0].size(1)
batch = sorted(batch, key=lambda sample: sample[0].size(1), reverse=True)
longest_sample = max(batch, key=func)[0]
freq_size = longest_sample.size(0)
minibatch_size = len(batch)
max_seqlength = longest_sample.size(1)
inputs = torch.zeros(minibatch_size, 1, freq_size, max_seqlength)
input_percentages = torch.FloatTensor(minibatch_size)
target_sizes = torch.IntTensor(minibatch_size)
targets = []
for x in range(minibatch_size):
sample = batch[x]
tensor = sample[0]
target = sample[1]
seq_length = tensor.size(1)
inputs[x][0].narrow(1, 0, seq_length).copy_(tensor)
input_percentages[x] = seq_length / float(max_seqlength)
target_sizes[x] = len(target)
targets.extend(target)
targets = torch.IntTensor(targets)
return inputs, targets, input_percentages, target_sizes
class AudioDataLoader(DataLoader):
def __init__(self, *args, **kwargs):
"""
Creates a data loader for AudioDatasets.
"""
super(AudioDataLoader, self).__init__(*args, **kwargs)
self.collate_fn = _collate_fn
class BucketingSampler(Sampler):
def __init__(self, data_source, batch_size=1):
"""
Samples batches assuming they are in order of size to batch similarly sized samples together.
"""
super(BucketingSampler, self).__init__(data_source)
self.data_source = data_source
ids = list(range(0, len(data_source)))
self.bins = [ids[i:i + batch_size] for i in range(0, len(ids), batch_size)]
def __iter__(self):
for ids in self.bins:
np.random.shuffle(ids)
yield ids
def __len__(self):
return len(self.bins)
def shuffle(self, epoch):
np.random.shuffle(self.bins)
class DistributedBucketingSampler(Sampler):
def __init__(self, data_source, batch_size=1, num_replicas=None, rank=None):
"""
Samples batches assuming they are in order of size to batch similarly sized samples together.
"""
super(DistributedBucketingSampler, self).__init__(data_source)
if num_replicas is None:
num_replicas = get_world_size()
if rank is None:
rank = get_rank()
self.data_source = data_source
self.ids = list(range(0, len(data_source)))
self.batch_size = batch_size
self.bins = [self.ids[i:i + batch_size] for i in range(0, len(self.ids), batch_size)]
self.num_replicas = num_replicas
self.rank = rank
self.num_samples = int(math.ceil(len(self.bins) * 1.0 / self.num_replicas))
self.total_size = self.num_samples * self.num_replicas
def __iter__(self):
offset = self.rank
# add extra samples to make it evenly divisible
bins = self.bins + self.bins[:(self.total_size - len(self.bins))]
assert len(bins) == self.total_size
samples = bins[offset::self.num_replicas] # Get every Nth bin, starting from rank
return iter(samples)
def __len__(self):
return self.num_samples
def shuffle(self, epoch):
# deterministically shuffle based on epoch
g = torch.Generator()
g.manual_seed(epoch)
bin_ids = list(torch.randperm(len(self.bins), generator=g))
self.bins = [self.bins[i] for i in bin_ids]
def get_audio_length(path):
output = subprocess.check_output(['soxi -D \"%s\"' % path.strip()], shell=True)
return float(output)
def audio_with_sox(path, sample_rate, start_time, end_time):
"""
crop and resample the recording with sox and loads it.
"""
with NamedTemporaryFile(suffix=".wav") as tar_file:
tar_filename = tar_file.name
sox_params = "sox \"{}\" -r {} -c 1 -b 16 -e si {} trim {} ={} >/dev/null 2>&1".format(path, sample_rate,
tar_filename, start_time,
end_time)
os.system(sox_params)
y = load_audio(tar_filename)
return y
def augment_audio_with_sox(path, sample_rate, tempo, gain):
"""
Changes tempo and gain of the recording with sox and loads it.
"""
with NamedTemporaryFile(suffix=".wav") as augmented_file:
augmented_filename = augmented_file.name
sox_augment_params = ["tempo", "{:.3f}".format(tempo), "gain", "{:.3f}".format(gain)]
sox_params = "sox \"{}\" -r {} -c 1 -b 16 -e si {} {} >/dev/null 2>&1".format(path, sample_rate,
augmented_filename,
" ".join(sox_augment_params))
os.system(sox_params)
y = load_audio(augmented_filename)
return y
# original tempo_range=(0.85,1.15)
# original gain_range=(-6,8)
def load_randomly_augmented_audio(path, sample_rate=16000, tempo_range=(0.85,1.15),
gain_range=(-6, 8)):
"""
Picks tempo and gain uniformly, applies it to the utterance by using sox utility.
Returns the augmented utterance.
"""
low_tempo, high_tempo = tempo_range
tempo_value = np.random.uniform(low=low_tempo, high=high_tempo)
low_gain, high_gain = gain_range
gain_value = np.random.uniform(low=low_gain, high=high_gain)
audio = augment_audio_with_sox(path=path, sample_rate=sample_rate,
tempo=tempo_value, gain=gain_value)
return audio
| data/data_loader.py | 14,446 | Adds noise to an input signal with specific SNR. Higher the noise level, the more noise added.
Modified code from https://github.com/willfrey/audio/blob/master/torchaudio/transforms.py
Parses audio file into spectrogram with optional normalization and various augmentations
:param audio_conf: Dictionary containing the sample rate, window and the window length/stride in seconds
:param normalize(default False): Apply standard mean and deviation normalization to audio tensor
:param speed_volume_perturb(default False): Apply random tempo and gain perturbations
:param spec_augment(default False): Apply simple spectral augmentation to mel spectograms
Dataset that loads tensors via a csv containing file paths to audio files and transcripts separated by
a comma. Each new line is a different sample. Example below:
/path/to/audio.wav,/path/to/audio.txt
...
:param audio_conf: Dictionary containing the sample rate, window and the window length/stride in seconds
:param manifest_filepath: Path to manifest csv as describe above
:param labels: String containing all the possible characters to map to
:param normalize: Apply standard mean and deviation normalization to audio tensor
:param speed_volume_perturb(default False): Apply random tempo and gain perturbations
:param spec_augment(default False): Apply simple spectral augmentation to mel spectograms
Creates a data loader for AudioDatasets.
Samples batches assuming they are in order of size to batch similarly sized samples together.
Samples batches assuming they are in order of size to batch similarly sized samples together.
crop and resample the recording with sox and loads it.
Changes tempo and gain of the recording with sox and loads it.
Picks tempo and gain uniformly, applies it to the utterance by using sox utility.
Returns the augmented utterance.
:param audio_path: Path where audio is stored from the manifest file
:return: Audio in training/testing format
:param transcript_path: Path where transcript is stored from the manifest file
:return: Transcript in training/testing format
sample_rate, sound = read(path) librosa.output.write_wav('org.wav', sound, sr) print('save 1') sound = sound.astype('float32') / 32767 normalize audio normalize audio librosa.output.write_wav('norm.wav', sound, sr) print('save 2') multiple channels, average librosa.output.write_wav('test.wav', y, sr=16000, norm=False) print('test') librosa.output.write_wav('y1.wav', y, sr=16000) print('save@@@@@@@@@@@@') change audio speed librosa.output.write_wav('y2.wav', y, sr=16000) print('save@@@@@@@@@@@@') import sys sys.exit() STFT S = log(S+1) with open(transcript_path, 'r', encoding='utf-16') as transcript_file: add extra samples to make it evenly divisible Get every Nth bin, starting from rank deterministically shuffle based on epoch original tempo_range=(0.85,1.15) original gain_range=(-6,8) | 2,970 | en | 0.705064 |
# -*- coding: utf8 -*-
def filter_event(event, happening_before):
"""Check if the following keys are present. These
keys only show up when using the API. If fetching
from the iCal, JSON, or RSS feeds it will just compare
the dates
"""
status = True
visibility = True
actions = True
if 'status' in event:
status = event['status'] == 'upcoming'
if 'visibility' in event:
visibility = event['visibility'] == 'public'
if 'self' in event:
actions = 'announce' not in event['self']['actions']
return (status and visibility and actions and
event['time'] < happening_before)
| app/Meetup/Filter.py | 651 | Check if the following keys are present. These
keys only show up when using the API. If fetching
from the iCal, JSON, or RSS feeds it will just compare
the dates
-*- coding: utf8 -*- | 184 | en | 0.75818 |
import csv
from pathlib import Path
import torch
import pandas
import numpy as np
from utils import peek, load_json, dump_json
from .module import ContrastiveModule
from mps import distributed as du
from save import format_rows
def get_penultimates(keys):
penultimates = {}
for key in keys:
view = key[:key.find('_')] # get dataset+model name
layer_name = key[key.find('_') + 1:]
if view not in penultimates:
penultimates[view] = view + '_' + layer_name
elif layer_name > penultimates[view]:
penultimates[view] = view + '_' + layer_name
keys = sorted(list(penultimates.keys()))
return [penultimates[k] for k in keys]
def get_optimizer(params, lr=1e-3):
optimizer = torch.optim.AdamW(
params,
lr=lr,
betas=(0.9, 0.999),
eps=1e-6,
amsgrad=True,
)
return optimizer
def set_lr(optimizer, lr):
for param in optimizer.param_groups:
param['lr'] = lr
return optimizer
def lr_func_linear(current_step, num_training_steps, num_warmup_steps=3):
if current_step < num_warmup_steps:
return float(current_step) / float(max(1, num_warmup_steps))
return max(0.0, float(num_training_steps - current_step) / float(max(1, num_training_steps - num_warmup_steps)))
def update_lr(optimizer, epoch, num_epochs, base_lr=1e-3, num_warmup_steps=3):
lr = lr_func_linear(epoch + 1, num_epochs + 1, num_warmup_steps) * base_lr
optimizer = set_lr(optimizer, lr)
return optimizer, lr
class Contrastive:
def __init__(self, num_epochs=1, device='cpu', base_lr=1e-4,
num_warmup_steps=3, distributed=False):
self.num_epochs = num_epochs
self.device = device
self.base_lr = base_lr
self.num_warmup_steps = num_warmup_steps
self.distributed = distributed
self.epoch = 0
# sizes = self.get_sizes(train)
sizes = self.default_sizes
self.model = ContrastiveModule(*sizes, use_global_batch=distributed)
self.model = self.model.to(self.device)
def init(self, clustering_combinations, candidates):
pass
@property
def default_sizes(self):
# video (slowfast) : 2304, audio (VGGish) : 128
return [2304, 128]
def get_sizes(self, train):
class_data = peek(train)
row = class_data[0]
penultimates = get_penultimates(list(row['features'].keys()))
return [row['features'][k].shape[-1] for k in penultimates]
def get_feature_names(self, train):
class_data = peek(train)
row = peek(class_data)
return sorted(list(row.keys()))
def train_batch(self, batch, optimizer):
moved = []
for feature in batch:
moved.append(feature.to(self.device))
loss, acc = self.model(*moved)
loss.backward()
if self.distributed:
self.model.average_gradient()
optimizer.step()
return loss.item(), acc.item()
def _get_features(self, batch):
unique_ids = pandas.Series(batch['idx']).drop_duplicates().index.tolist()
filenames = [batch['filename'][idx] for idx in unique_ids]
ids = [batch['idx'][idx] for idx in unique_ids]
shard_names = [batch['shard_name'][idx] for idx in unique_ids]
metas = [{'id': idx, 'filename': filename, 'shard_name': shard_name}
for idx, filename, shard_name in zip(ids, filenames, shard_names)]
video_features = batch['SLOWFAST_8x8_R50/kinetics-400']['layer_4']
audio_features = batch['VGGish/YouTube-8M']['layer_4']
unique_ids = torch.Tensor(unique_ids).long()
video_features = video_features.index_select(dim=0, index=unique_ids)
audio_features = audio_features.index_select(dim=0, index=unique_ids)
return metas, [video_features, audio_features]
def get_features(self, batch):
metas, [video_features, audio_features] = self._get_features(batch)
if self.distributed:
i = du.get_rank()
total = du.get_world_size()
metas = metas[i::total]
video_features = video_features[i::total]
audio_features = audio_features[i::total]
return metas, [video_features, audio_features]
def train(self, args, path, dataloader, log_every=1, verbose=True):
self.model.train()
optimizer = get_optimizer(self.model.parameters(), self.base_lr)
for epoch in range(self.epoch, self.num_epochs):
optimizer, lr = update_lr(optimizer, epoch, self.num_epochs, self.base_lr,
self.num_warmup_steps)
epoch_loss = []
epoch_acc = []
pbar = dataloader
for count, batch in enumerate(pbar):
_, features = self.get_features(batch)
loss, acc = self.train_batch(features, optimizer)
epoch_loss.append(loss)
epoch_acc.append(acc)
if verbose and count % log_every == 0:
print("(node {}) training epoch ({}/{}) iter ({}/{}) (lr: {:04f}, loss: {:04f}, acc: {:04f})".format(
du.get_rank(), epoch, self.num_epochs, count, len(dataloader), lr, loss, acc))
epoch_loss = np.array(epoch_loss).mean()
epoch_acc = np.array(epoch_acc).mean()
if verbose:
print("(node {}) epoch ({}/{}) done (lr: {:04f}, loss: {:04f}, acc: {:04f})".format(
du.get_rank(), epoch, self.num_epochs, lr, epoch_loss, epoch_acc))
self.epoch = epoch
self.save_cache(args, path, epoch, verbose)
return
def get_cache_path_run(self, args, epoch):
cache_dir = args.data.output.path.parent / 'caches'
cache_dir.mkdir(parents=True, exist_ok=True)
pid = args.parent_pid
rank = args.node_rank
i = args.chunk_num
name = "contrastive_model_cache_epoch_{}_{}_{}_{}.pkl".format(epoch, pid, rank, i)
path = str(cache_dir / name)
key_name = "contrastive_model_cache_epoch_{}_{}_{}_{}.json".format(epoch, pid, rank, i)
key_path = str(cache_dir / key_name)
return path, key_path
def get_cache_path_load(self, args, path, epoch):
cache_dir = args.data.output.path.parent / 'caches'
cache_dir.mkdir(parents=True, exist_ok=True)
keys = list(cache_dir.glob("contrastive_model_cache_epoch_{}_*.json".format(epoch)))
if len(keys) == 0:
return None
keys = {p.stem: set(load_json(p)) for p in keys}
path = set([Path(p).stem for p in path])
intersections = [(k, len(v & path)) for k, v in keys.items() if len(path - v) == 0]
if len(intersections) == 0:
return None
key = max(intersections, key=lambda x: x[1])[0]
path = cache_dir / key
path = path.parent / (path.stem + '.pkl')
return path
def save_cache(self, args, chunks, epoch, verbose=True):
path, key_path = self.get_cache_path_run(args, epoch)
dt = {
'epoch': self.epoch,
'base_lr': self.base_lr,
'model': self.model.state_dict()
}
if verbose:
print("saved cache file: {}".format(Path(path).stem))
torch.save(dt, path)
keys = [Path(p).stem for p in chunks]
dump_json(keys, key_path)
def load_cache(self, args, path, epoch):
path = self.get_cache_path_load(args, path, epoch)
assert path is not None, 'no cache file'
dt = torch.load(path)
self.epoch = dt['epoch']
self.base_lr = dt['base_lr']
self.model.load_state_dict(dt['model'])
def infer_batch(self, batch):
moved = []
for feature in batch:
moved.append(feature.to(self.device))
logits = self.model.infer(*moved)
return logits.detach().cpu()
def infer(self, args, dataloader, json_metas, subset_size, log_every=1, verbose=True):
self.model.eval()
with torch.no_grad():
logits, filename_ids = self._infer(args, dataloader, json_metas, log_every, verbose)
if subset_size > logits.shape[0]:
subset_size = logits.shape[0]
scores, ids = logits.topk(subset_size, sorted=True)
return scores, ids, filename_ids
def _infer(self, args, dataloader, json_metas, log_every=1, verbose=True):
logits = []
pbar = dataloader
metas = []
for count, batch in enumerate(pbar):
batch_metas, features = self.get_features(batch)
logit = self.infer_batch(features)
logits.append(logit)
metas.extend(batch_metas)
if verbose and count % log_every == 0:
print("inference iter ({}/{}) saving caches".format(count, len(dataloader)))
logits = torch.cat(logits, dim=0)
self.save_inference(args, logits, metas, json_metas)
logits = []
metas = []
if len(metas) > 0:
logits = torch.cat(logits, dim=0)
self.save_inference(args, logits, metas, json_metas)
print("done: inference iter ({}/{}) saving caches".format(count, len(dataloader)))
return logits, metas
def save_inference(self, args, logits, metas, json_metas):
cache_dir = args.data.output.path.parent / 'caches'
cache_dir.mkdir(parents=True, exist_ok=True)
pid = args.parent_pid
local_rank = du.get_rank()
output_name = Path(args.data.output.path).stem
name = "{}_contrastive_inferred_cache_{}_{}.csv".format(output_name, pid, local_rank)
scores = logits.numpy().tolist()
rows = [{'score': score, **v} for score, v in zip(scores, metas)]
lines = format_rows(rows, json_metas, sharded_meta=True,
headers=['score', 'shard_name', 'filename', 'id', 'segment'])
print("saving cache to {}".format(cache_dir / name))
with open(cache_dir / name, 'a+') as f:
writer = csv.writer(f)
for line in lines:
writer.writerow(line)
| subset_selection/code/measures/contrastive/contrastive.py | 10,201 | get dataset+model name sizes = self.get_sizes(train) video (slowfast) : 2304, audio (VGGish) : 128 | 98 | en | 0.505707 |
#!/usr/bin/env python3
# Copyright (c) 2015-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test pricecoind with different proxy configuration.
Test plan:
- Start pricecoind's with different proxy configurations
- Use addnode to initiate connections
- Verify that proxies are connected to, and the right connection command is given
- Proxy configurations to test on pricecoind side:
- `-proxy` (proxy everything)
- `-onion` (proxy just onions)
- `-proxyrandomize` Circuit randomization
- Proxy configurations to test on proxy side,
- support no authentication (other proxy)
- support no authentication + user/pass authentication (Tor)
- proxy on IPv6
- Create various proxies (as threads)
- Create pricecoinds that connect to them
- Manipulate the pricecoinds using addnode (onetry) an observe effects
addnode connect to IPv4
addnode connect to IPv6
addnode connect to onion
addnode connect to generic DNS name
"""
import socket
import os
from test_framework.socks5 import Socks5Configuration, Socks5Command, Socks5Server, AddressType
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
PORT_MIN,
PORT_RANGE,
assert_equal,
)
from test_framework.netutil import test_ipv6_local
RANGE_BEGIN = PORT_MIN + 2 * PORT_RANGE # Start after p2p and rpc ports
class ProxyTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 4
def setup_nodes(self):
self.have_ipv6 = test_ipv6_local()
# Create two proxies on different ports
# ... one unauthenticated
self.conf1 = Socks5Configuration()
self.conf1.addr = ('127.0.0.1', RANGE_BEGIN + (os.getpid() % 1000))
self.conf1.unauth = True
self.conf1.auth = False
# ... one supporting authenticated and unauthenticated (Tor)
self.conf2 = Socks5Configuration()
self.conf2.addr = ('127.0.0.1', RANGE_BEGIN + 1000 + (os.getpid() % 1000))
self.conf2.unauth = True
self.conf2.auth = True
if self.have_ipv6:
# ... one on IPv6 with similar configuration
self.conf3 = Socks5Configuration()
self.conf3.af = socket.AF_INET6
self.conf3.addr = ('::1', RANGE_BEGIN + 2000 + (os.getpid() % 1000))
self.conf3.unauth = True
self.conf3.auth = True
else:
self.log.warning("Testing without local IPv6 support")
self.serv1 = Socks5Server(self.conf1)
self.serv1.start()
self.serv2 = Socks5Server(self.conf2)
self.serv2.start()
if self.have_ipv6:
self.serv3 = Socks5Server(self.conf3)
self.serv3.start()
# Note: proxies are not used to connect to local nodes
# this is because the proxy to use is based on CService.GetNetwork(), which return NET_UNROUTABLE for localhost
args = [
['-listen', '-proxy=%s:%i' % (self.conf1.addr),'-proxyrandomize=1'],
['-listen', '-proxy=%s:%i' % (self.conf1.addr),'-onion=%s:%i' % (self.conf2.addr),'-proxyrandomize=0'],
['-listen', '-proxy=%s:%i' % (self.conf2.addr),'-proxyrandomize=1'],
[]
]
if self.have_ipv6:
args[3] = ['-listen', '-proxy=[%s]:%i' % (self.conf3.addr),'-proxyrandomize=0', '-noonion']
self.add_nodes(self.num_nodes, extra_args=args)
self.start_nodes()
def node_test(self, node, proxies, auth, test_onion=True):
rv = []
# Test: outgoing IPv4 connection through node
node.addnode("15.61.23.23:1234", "onetry")
cmd = proxies[0].queue.get()
assert(isinstance(cmd, Socks5Command))
# Note: bitcoind's SOCKS5 implementation only sends atyp DOMAINNAME, even if connecting directly to IPv4/IPv6
assert_equal(cmd.atyp, AddressType.DOMAINNAME)
assert_equal(cmd.addr, b"15.61.23.23")
assert_equal(cmd.port, 1234)
if not auth:
assert_equal(cmd.username, None)
assert_equal(cmd.password, None)
rv.append(cmd)
if self.have_ipv6:
# Test: outgoing IPv6 connection through node
node.addnode("[1233:3432:2434:2343:3234:2345:6546:4534]:5443", "onetry")
cmd = proxies[1].queue.get()
assert(isinstance(cmd, Socks5Command))
# Note: bitcoind's SOCKS5 implementation only sends atyp DOMAINNAME, even if connecting directly to IPv4/IPv6
assert_equal(cmd.atyp, AddressType.DOMAINNAME)
assert_equal(cmd.addr, b"1233:3432:2434:2343:3234:2345:6546:4534")
assert_equal(cmd.port, 5443)
if not auth:
assert_equal(cmd.username, None)
assert_equal(cmd.password, None)
rv.append(cmd)
if test_onion:
# Test: outgoing onion connection through node
node.addnode("bitcoinostk4e4re.onion:9333", "onetry")
cmd = proxies[2].queue.get()
assert(isinstance(cmd, Socks5Command))
assert_equal(cmd.atyp, AddressType.DOMAINNAME)
assert_equal(cmd.addr, b"bitcoinostk4e4re.onion")
assert_equal(cmd.port, 9333)
if not auth:
assert_equal(cmd.username, None)
assert_equal(cmd.password, None)
rv.append(cmd)
# Test: outgoing DNS name connection through node
node.addnode("node.noumenon:9333", "onetry")
cmd = proxies[3].queue.get()
assert(isinstance(cmd, Socks5Command))
assert_equal(cmd.atyp, AddressType.DOMAINNAME)
assert_equal(cmd.addr, b"node.noumenon")
assert_equal(cmd.port, 9333)
if not auth:
assert_equal(cmd.username, None)
assert_equal(cmd.password, None)
rv.append(cmd)
return rv
def run_test(self):
# basic -proxy
self.node_test(self.nodes[0], [self.serv1, self.serv1, self.serv1, self.serv1], False)
# -proxy plus -onion
self.node_test(self.nodes[1], [self.serv1, self.serv1, self.serv2, self.serv1], False)
# -proxy plus -onion, -proxyrandomize
rv = self.node_test(self.nodes[2], [self.serv2, self.serv2, self.serv2, self.serv2], True)
# Check that credentials as used for -proxyrandomize connections are unique
credentials = set((x.username,x.password) for x in rv)
assert_equal(len(credentials), len(rv))
if self.have_ipv6:
# proxy on IPv6 localhost
self.node_test(self.nodes[3], [self.serv3, self.serv3, self.serv3, self.serv3], False, False)
def networks_dict(d):
r = {}
for x in d['networks']:
r[x['name']] = x
return r
# test RPC getnetworkinfo
n0 = networks_dict(self.nodes[0].getnetworkinfo())
for net in ['ipv4','ipv6','onion']:
assert_equal(n0[net]['proxy'], '%s:%i' % (self.conf1.addr))
assert_equal(n0[net]['proxy_randomize_credentials'], True)
assert_equal(n0['onion']['reachable'], True)
n1 = networks_dict(self.nodes[1].getnetworkinfo())
for net in ['ipv4','ipv6']:
assert_equal(n1[net]['proxy'], '%s:%i' % (self.conf1.addr))
assert_equal(n1[net]['proxy_randomize_credentials'], False)
assert_equal(n1['onion']['proxy'], '%s:%i' % (self.conf2.addr))
assert_equal(n1['onion']['proxy_randomize_credentials'], False)
assert_equal(n1['onion']['reachable'], True)
n2 = networks_dict(self.nodes[2].getnetworkinfo())
for net in ['ipv4','ipv6','onion']:
assert_equal(n2[net]['proxy'], '%s:%i' % (self.conf2.addr))
assert_equal(n2[net]['proxy_randomize_credentials'], True)
assert_equal(n2['onion']['reachable'], True)
if self.have_ipv6:
n3 = networks_dict(self.nodes[3].getnetworkinfo())
for net in ['ipv4','ipv6']:
assert_equal(n3[net]['proxy'], '[%s]:%i' % (self.conf3.addr))
assert_equal(n3[net]['proxy_randomize_credentials'], False)
assert_equal(n3['onion']['reachable'], False)
if __name__ == '__main__':
ProxyTest().main()
| test/functional/feature_proxy.py | 8,356 | Test pricecoind with different proxy configuration.
Test plan:
- Start pricecoind's with different proxy configurations
- Use addnode to initiate connections
- Verify that proxies are connected to, and the right connection command is given
- Proxy configurations to test on pricecoind side:
- `-proxy` (proxy everything)
- `-onion` (proxy just onions)
- `-proxyrandomize` Circuit randomization
- Proxy configurations to test on proxy side,
- support no authentication (other proxy)
- support no authentication + user/pass authentication (Tor)
- proxy on IPv6
- Create various proxies (as threads)
- Create pricecoinds that connect to them
- Manipulate the pricecoinds using addnode (onetry) an observe effects
addnode connect to IPv4
addnode connect to IPv6
addnode connect to onion
addnode connect to generic DNS name
!/usr/bin/env python3 Copyright (c) 2015-2017 The Bitcoin Core developers Distributed under the MIT software license, see the accompanying file COPYING or http://www.opensource.org/licenses/mit-license.php. Start after p2p and rpc ports Create two proxies on different ports ... one unauthenticated ... one supporting authenticated and unauthenticated (Tor) ... one on IPv6 with similar configuration Note: proxies are not used to connect to local nodes this is because the proxy to use is based on CService.GetNetwork(), which return NET_UNROUTABLE for localhost Test: outgoing IPv4 connection through node Note: bitcoind's SOCKS5 implementation only sends atyp DOMAINNAME, even if connecting directly to IPv4/IPv6 Test: outgoing IPv6 connection through node Note: bitcoind's SOCKS5 implementation only sends atyp DOMAINNAME, even if connecting directly to IPv4/IPv6 Test: outgoing onion connection through node Test: outgoing DNS name connection through node basic -proxy -proxy plus -onion -proxy plus -onion, -proxyrandomize Check that credentials as used for -proxyrandomize connections are unique proxy on IPv6 localhost test RPC getnetworkinfo | 1,997 | en | 0.755281 |
# -*- coding: utf-8 -*-
"""
Copyright (c) 2021 Showa Denko Materials co., Ltd. All rights reserved.
This software is for non-profit use only.
THIS SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THIS SOFTWARE OR THE USE OR OTHER DEALINGS IN THIS SOFTWARE.
"""
import time
import numpy as np
from GPyOpt.core.task.objective import Objective
class MultiObjective(Objective):
"""
Class to handle problems with multiple objective functions.
param func: objective function.
param n_obj: number of objective functions
param num_cores: number of cores to use in the process of evaluating the objective (default, 1).
param objective_name: name of the objective function.
param batch_type: Type of batch used. Only 'synchronous' evaluations are possible at the moment.
param space: Not in use.
"""
def __init__(self, func, n_obj, num_cores = 1, objective_name = 'no_name', batch_type = 'synchronous', space = None):
self.func = func
self.n_procs = num_cores
self.num_evaluations = 0
self.space = space
self.objective_name = objective_name
self.n_obj = n_obj
def evaluate(self, x):
"""
Performs the evaluation of the objective at x.
"""
f_evals, cost_evals = self._eval_func(x)
return f_evals, cost_evals
def _eval_func(self, x):
"""
Performs sequential evaluations of the function at x (single location or batch). The computing time of each
evaluation is also provided.
"""
cost_evals = []
f_evals = np.empty(shape=[0, self.n_obj])
for i in range(x.shape[0]):
st_time = time.time()
rlt = self.func(np.atleast_2d(x[i]))
f_evals = np.vstack([f_evals,rlt])
cost_evals += [time.time()-st_time]
return f_evals, cost_evals
| Samples/codes/matopt_review/add_objective.py | 2,232 | Class to handle problems with multiple objective functions.
param func: objective function.
param n_obj: number of objective functions
param num_cores: number of cores to use in the process of evaluating the objective (default, 1).
param objective_name: name of the objective function.
param batch_type: Type of batch used. Only 'synchronous' evaluations are possible at the moment.
param space: Not in use.
Performs sequential evaluations of the function at x (single location or batch). The computing time of each
evaluation is also provided.
Performs the evaluation of the objective at x.
Copyright (c) 2021 Showa Denko Materials co., Ltd. All rights reserved.
This software is for non-profit use only.
THIS SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THIS SOFTWARE OR THE USE OR OTHER DEALINGS IN THIS SOFTWARE.
-*- coding: utf-8 -*- | 1,196 | en | 0.842729 |
"""
Given a rod of length n inches and an array of prices
that includes prices of all pieces of size smaller than n.
Determine the maximum value obtainable by cutting up the rod and
selling the pieces. For example, if the length of the rod is 8
and the values of different pieces are given as the following,
then the maximum obtainable value is 22 (by cutting in two pieces of lengths 2 and 6)
length | 1 2 3 4 5 6 7 8
--------------------------------------------
price | 1 5 8 9 10 17 17 20
In unbounded knapsack their is only one change compared to 0/1 knapsack i.e
****dp[i][j-wt[n-1]]****
wt arr => len arr
val arr => price arr
W => L
"""
def RodCutting(larr, parr, L):
n = len(larr)
dp = [[0 for j in range(L+1)]for i in range(n+1)]
for i in range(1, n+1):
for j in range(1, L+1):
if larr[i-1] <= j:
dp[i][j] = max(parr[i-1]+dp[i][j-larr[i-1]], dp[i-1][j])
else:
dp[i][j] = dp[i-1][j]
print(dp)
return dp[n][L]
print(RodCutting([1, 2, 3, 4, 5, 6, 7, 8], [9, 5, 8, 9, 10, 17, 17, 20], 8))
| DynamicProgramming/UnBoundedKnapSack/RodCutting.py | 1,126 | Given a rod of length n inches and an array of prices
that includes prices of all pieces of size smaller than n.
Determine the maximum value obtainable by cutting up the rod and
selling the pieces. For example, if the length of the rod is 8
and the values of different pieces are given as the following,
then the maximum obtainable value is 22 (by cutting in two pieces of lengths 2 and 6)
length | 1 2 3 4 5 6 7 8
--------------------------------------------
price | 1 5 8 9 10 17 17 20
In unbounded knapsack their is only one change compared to 0/1 knapsack i.e
****dp[i][j-wt[n-1]]****
wt arr => len arr
val arr => price arr
W => L | 677 | en | 0.852099 |
"""Classes for validating data passed to the annotations API."""
import copy
import colander
from dateutil.parser import parse
from pyramid import i18n
from h.schemas.base import JSONSchema, ValidationError
from h.search.query import LIMIT_DEFAULT, LIMIT_MAX, OFFSET_MAX
from h.search.util import wildcard_uri_is_valid
from h.util import document_claims
_ = i18n.TranslationStringFactory(__package__)
def _validate_wildcard_uri(node, value):
"""Raise if wildcards are within the domain of the uri."""
for val in value:
if not wildcard_uri_is_valid(val):
raise colander.Invalid(
node,
"""Wildcards (_ and *) are not permitted within the
domain of wildcard_uri""",
)
class AnnotationSchema(JSONSchema):
"""Validate an annotation object."""
schema = {
"type": "object",
"properties": {
"document": {
"type": "object",
"properties": {
"dc": {
"type": "object",
"properties": {
"identifier": {"type": "array", "items": {"type": "string"}}
},
},
"highwire": {
"type": "object",
"properties": {
"doi": {"type": "array", "items": {"type": "string"}},
"pdf_url": {"type": "array", "items": {"type": "string"}},
},
},
"link": {
"type": "array",
"items": {
"type": "object",
"properties": {
"href": {"type": "string"},
"type": {"type": "string"},
},
"required": ["href"],
},
},
},
},
"group": {"type": "string"},
"permissions": {
"title": "Permissions",
"description": "Annotation action access control list",
"type": "object",
"patternProperties": {
"^(admin|delete|read|update)$": {
"type": "array",
"items": {"type": "string", "pattern": "^(acct:|group:).+$"},
}
},
"required": ["read"],
},
"references": {"type": "array", "items": {"type": "string"}},
"tags": {"type": "array", "items": {"type": "string"}},
"target": {
"type": "array",
"items": {
"type": "object",
"properties": {
"selector": {
"type": "array",
"items": {
"type": "object",
"properties": {"type": {"type": "string"}},
"required": ["type"],
},
}
},
},
},
"text": {"type": "string"},
"uri": {"type": "string"},
},
}
class CreateAnnotationSchema:
"""Validate the POSTed data of a create annotation request."""
def __init__(self, request):
self.structure = AnnotationSchema()
self.request = request
def validate(self, data):
appstruct = self.structure.validate(data)
new_appstruct = {}
_remove_protected_fields(appstruct)
new_appstruct["userid"] = self.request.authenticated_userid
uri = appstruct.pop("uri", "").strip()
if not uri:
raise ValidationError("uri: " + _("'uri' is a required property"))
new_appstruct["target_uri"] = uri
new_appstruct["text"] = appstruct.pop("text", "")
new_appstruct["tags"] = appstruct.pop("tags", [])
new_appstruct["groupid"] = appstruct.pop("group", "__world__")
new_appstruct["references"] = appstruct.pop("references", [])
if "permissions" in appstruct:
new_appstruct["shared"] = _shared(
appstruct.pop("permissions"), new_appstruct["groupid"]
)
else:
new_appstruct["shared"] = False
if "target" in appstruct:
new_appstruct["target_selectors"] = _target_selectors(
appstruct.pop("target")
)
# Replies always get the same groupid as their parent. The parent's
# groupid is added to the reply annotation later by the storage code.
# Here we just delete any group sent by the client from replies.
if new_appstruct["references"] and "groupid" in new_appstruct:
del new_appstruct["groupid"]
new_appstruct["document"] = _document(
appstruct.pop("document", {}), new_appstruct["target_uri"]
)
new_appstruct["extra"] = appstruct
return new_appstruct
class UpdateAnnotationSchema:
"""Validate the POSTed data of an update annotation request."""
def __init__(self, request, existing_target_uri, groupid):
self.request = request
self.existing_target_uri = existing_target_uri
self.groupid = groupid
self.structure = AnnotationSchema()
def validate(self, data):
appstruct = self.structure.validate(data)
new_appstruct = {}
_remove_protected_fields(appstruct)
# Some fields are not allowed to be changed in annotation updates.
for key in ["group", "groupid", "userid", "references"]:
appstruct.pop(key, "")
# Fields that are allowed to be updated and that have a different name
# internally than in the public API.
if "uri" in appstruct:
new_uri = appstruct.pop("uri").strip()
if not new_uri:
raise ValidationError("uri: " + _("'uri' is a required property"))
new_appstruct["target_uri"] = new_uri
if "permissions" in appstruct:
new_appstruct["shared"] = _shared(
appstruct.pop("permissions"), self.groupid
)
if "target" in appstruct:
new_appstruct["target_selectors"] = _target_selectors(
appstruct.pop("target")
)
# Fields that are allowed to be updated and that have the same internal
# and external name.
for key in ["text", "tags"]:
if key in appstruct:
new_appstruct[key] = appstruct.pop(key)
if "document" in appstruct:
new_appstruct["document"] = _document(
appstruct.pop("document"),
new_appstruct.get("target_uri", self.existing_target_uri),
)
new_appstruct["extra"] = appstruct
return new_appstruct
def _document(document, claimant):
"""
Return document meta and document URI data from the given document dict.
Transforms the "document" dict that the client posts into a convenient
format for creating DocumentURI and DocumentMeta objects later.
"""
document = document or {}
document_uri_dicts = document_claims.document_uris_from_data(
copy.deepcopy(document), claimant=claimant
)
document_meta_dicts = document_claims.document_metas_from_data(
copy.deepcopy(document), claimant=claimant
)
return {
"document_uri_dicts": document_uri_dicts,
"document_meta_dicts": document_meta_dicts,
}
def _format_jsonschema_error(error):
"""Format a :py:class:`jsonschema.ValidationError` as a string."""
if error.path:
dotted_path = ".".join([str(c) for c in error.path])
return "{path}: {message}".format(path=dotted_path, message=error.message)
return error.message
def _remove_protected_fields(appstruct):
# Some fields are not to be set by the user, ignore them.
for field in [
"created",
"updated",
"user",
"id",
"links",
"flagged",
"hidden",
"moderation",
"user_info",
]:
appstruct.pop(field, None)
def _shared(permissions, groupid):
"""
Return True if the given permissions object represents shared permissions.
Return False otherwise.
Reduces the client's complex permissions dict to a simple shared boolean.
:param permissions: the permissions dict sent by the client in an
annotation create or update request
:type permissions: dict
:param groupid: the groupid of the annotation that the permissions dict
applies to
:type groupid: unicode
"""
return permissions["read"] == ["group:{id}".format(id=groupid)]
def _target_selectors(targets):
"""
Return the target selectors from the given target list.
Transforms the target lists that the client sends in annotation create and
update requests into our internal target_selectors format.
"""
# Any targets other than the first in the list are discarded.
# Any fields of the target other than 'selector' are discarded.
if targets and "selector" in targets[0]:
return targets[0]["selector"]
return []
class SearchParamsSchema(colander.Schema):
_separate_replies = colander.SchemaNode(
colander.Boolean(),
missing=False,
description="Return a separate set of annotations and their replies.",
)
sort = colander.SchemaNode(
colander.String(),
validator=colander.OneOf(["created", "updated", "group", "id", "user"]),
missing="updated",
description="The field by which annotations should be sorted.",
)
search_after = colander.SchemaNode(
colander.String(),
missing=colander.drop,
description="""Returns results after the annotation who's sort field
has this value. If specifying a date use the format
yyyy-MM-dd'T'HH:mm:ss.SSX or time in miliseconds since the
epoch. This is used for iteration through large collections
of results.""",
)
limit = colander.SchemaNode(
colander.Integer(),
validator=colander.Range(min=0, max=LIMIT_MAX),
missing=LIMIT_DEFAULT,
description="The maximum number of annotations to return.",
)
order = colander.SchemaNode(
colander.String(),
validator=colander.OneOf(["asc", "desc"]),
missing="desc",
description="The direction of sort.",
)
offset = colander.SchemaNode(
colander.Integer(),
validator=colander.Range(min=0, max=OFFSET_MAX),
missing=0,
description="""The number of initial annotations to skip. This is
used for pagination. Not suitable for paging through
thousands of annotations-search_after should be used
instead.""",
)
group = colander.SchemaNode(
colander.String(),
missing=colander.drop,
description="Limit the results to this group of annotations.",
)
quote = colander.SchemaNode(
colander.Sequence(),
colander.SchemaNode(colander.String()),
missing=colander.drop,
description="""Limit the results to annotations that contain this text inside
the text that was annotated.""",
)
references = colander.SchemaNode(
colander.Sequence(),
colander.SchemaNode(colander.String()),
missing=colander.drop,
description="""Returns annotations that are replies to this parent annotation id.""",
)
tag = colander.SchemaNode(
colander.Sequence(),
colander.SchemaNode(colander.String()),
missing=colander.drop,
description="Limit the results to annotations tagged with the specified value.",
)
tags = colander.SchemaNode(
colander.Sequence(),
colander.SchemaNode(colander.String()),
missing=colander.drop,
description="Alias of tag.",
)
text = colander.SchemaNode(
colander.Sequence(),
colander.SchemaNode(colander.String()),
missing=colander.drop,
description="Limit the results to annotations that contain this text in their textual body.",
)
uri = colander.SchemaNode(
colander.Sequence(),
colander.SchemaNode(colander.String()),
missing=colander.drop,
description="""Limit the results to annotations matching the specific URI
or equivalent URIs. URI can be a URL (a web page address) or
a URN representing another kind of resource such as DOI
(Digital Object Identifier) or a PDF fingerprint.""",
)
uri_parts = colander.SchemaNode(
colander.Sequence(),
colander.SchemaNode(colander.String()),
name="uri.parts",
missing=colander.drop,
description="""Limit the results to annotations with the given keyword
appearing in the URL.""",
)
url = colander.SchemaNode(
colander.Sequence(),
colander.SchemaNode(colander.String()),
missing=colander.drop,
description="Alias of uri.",
)
wildcard_uri = colander.SchemaNode(
colander.Sequence(),
colander.SchemaNode(colander.String()),
validator=_validate_wildcard_uri,
missing=colander.drop,
description="""
Limit the results to annotations matching the wildcard URI.
URI can be a URL (a web page address) or a URN representing another
kind of resource such as DOI (Digital Object Identifier) or a
PDF fingerprint.
`*` will match any character sequence (including an empty one),
and a `_` will match any single character. Wildcards are only permitted
within the path and query parts of the URI.
Escaping wildcards is not supported.
Examples of valid uris":" `http://foo.com/*` `urn:x-pdf:*` `file://localhost/_bc.pdf`
Examples of invalid uris":" `*foo.com` `u_n:*` `file://*` `http://foo.com*`
""",
)
any = colander.SchemaNode(
colander.Sequence(),
colander.SchemaNode(colander.String()),
missing=colander.drop,
description="""Limit the results to annotations whose quote, tags,
text or url fields contain this keyword.""",
)
user = colander.SchemaNode(
colander.String(),
missing=colander.drop,
description="Limit the results to annotations made by the specified user.",
)
def validator(self, node, cstruct):
sort = cstruct["sort"]
search_after = cstruct.get("search_after", None)
if search_after:
if sort in ["updated", "created"] and not self._date_is_parsable(
search_after
):
raise colander.Invalid(
node,
"""search_after must be a parsable date in the form
yyyy-MM-dd'T'HH:mm:ss.SSX
or time in miliseconds since the epoch.""",
)
# offset must be set to 0 if search_after is specified.
cstruct["offset"] = 0
def _date_is_parsable(self, value):
"""Return True if date is parsable and False otherwise."""
# Dates like "2017" can also be cast as floats so if a number is less
# than 9999 it is assumed to be a year and not ms since the epoch.
try:
if float(value) < 9999:
raise ValueError("This is not in the form ms since the epoch.")
except ValueError:
try:
parse(value)
except ValueError:
return False
return True
| h/schemas/annotation.py | 16,107 | Validate an annotation object.
Validate the POSTed data of a create annotation request.
Validate the POSTed data of an update annotation request.
Return True if date is parsable and False otherwise.
Return document meta and document URI data from the given document dict.
Transforms the "document" dict that the client posts into a convenient
format for creating DocumentURI and DocumentMeta objects later.
Format a :py:class:`jsonschema.ValidationError` as a string.
Return True if the given permissions object represents shared permissions.
Return False otherwise.
Reduces the client's complex permissions dict to a simple shared boolean.
:param permissions: the permissions dict sent by the client in an
annotation create or update request
:type permissions: dict
:param groupid: the groupid of the annotation that the permissions dict
applies to
:type groupid: unicode
Return the target selectors from the given target list.
Transforms the target lists that the client sends in annotation create and
update requests into our internal target_selectors format.
Raise if wildcards are within the domain of the uri.
Classes for validating data passed to the annotations API.
Replies always get the same groupid as their parent. The parent's groupid is added to the reply annotation later by the storage code. Here we just delete any group sent by the client from replies. Some fields are not allowed to be changed in annotation updates. Fields that are allowed to be updated and that have a different name internally than in the public API. Fields that are allowed to be updated and that have the same internal and external name. Some fields are not to be set by the user, ignore them. Any targets other than the first in the list are discarded. Any fields of the target other than 'selector' are discarded. offset must be set to 0 if search_after is specified. Dates like "2017" can also be cast as floats so if a number is less than 9999 it is assumed to be a year and not ms since the epoch. | 2,010 | en | 0.853505 |
import tensorflow as tf
import tensorflow_zero_out
import numpy as np
import os
# Create a model using low-level tf.* APIs
class ZeroOut(tf.Module):
@tf.function(input_signature=[tf.TensorSpec(shape=[None], dtype=tf.int32)])
def __call__(self, x):
return tensorflow_zero_out.zero_out(x)
model = ZeroOut()
# (ro run your model) result = Squared(5.0) # This prints "25.0"
# (to generate a SavedModel) tf.saved_model.save(model, "saved_model_tf_dir")
concrete_func = model.__call__.get_concrete_function()
# Convert the model.
# Notes that for the versions earlier than TensorFlow 2.7, the
# from_concrete_functions API is able to work when there is only the first
# argument given:
# > converter = tf.lite.TFLiteConverter.from_concrete_functions([concrete_func])
converter = tf.lite.TFLiteConverter.from_concrete_functions([concrete_func],
)
tflite_model = converter.convert()
# Save the model.
with open('model.tflite', 'wb') as f:
f.write(tflite_model) | tensorflow_zero_out/python/ops/convert_to_tflite.py | 1,026 | Create a model using low-level tf.* APIs (ro run your model) result = Squared(5.0) This prints "25.0" (to generate a SavedModel) tf.saved_model.save(model, "saved_model_tf_dir") Convert the model. Notes that for the versions earlier than TensorFlow 2.7, the from_concrete_functions API is able to work when there is only the first argument given: > converter = tf.lite.TFLiteConverter.from_concrete_functions([concrete_func]) Save the model. | 442 | en | 0.612686 |
# -*- coding: utf-8 -*-
#
# Author: oldj
# Email: oldj.wu@gmail.com
# Blog: http://oldj.net
#
import os
import re
import StringIO
from PIL import Image
from PIL import ImageDraw
import pygame
g_script_folder = os.path.dirname(os.path.abspath(__file__))
g_fonts_folder = os.path.join(g_script_folder, "fonts")
g_re_first_word = re.compile((u""
+ u"(%(prefix)s+\S%(postfix)s+)" # 标点
+ u"|(%(prefix)s*\w+%(postfix)s*)" # 单词
+ u"|(%(prefix)s+\S)|(\S%(postfix)s+)" # 标点
+ u"|(\d+%%)" # 百分数
) % {
"prefix": u"['\"\(<\[\{‘“(《「『]",
"postfix": u"[:'\"\)>\]\}:’”)》」』,;\.\?!,、;。?!]",
})
pygame.init()
def getFontForPyGame(font_name="wqy-zenhei.ttc", font_size=14):
return pygame.font.Font(os.path.join(g_fonts_folder, font_name), font_size)
def makeConfig(cfg=None):
if not cfg or type(cfg) != dict:
cfg = {}
default_cfg = {
"width": 440, # px
"padding": (15, 18, 20, 18),
"line-height": 20, #px
"title-line-height": 32, #px
"font-size": 14, # px
"title-font-size": 24, # px
"font-family": "wqy-zenhei.ttc",
# "font-family": "msyh.ttf",
"font-color": (0, 0, 0),
"font-antialiasing": True, # 字体是否反锯齿
"background-color": (255, 255, 255),
"border-size": 1,
"border-color": (192, 192, 192),
"copyright": u"本图文由 txt2.im 自动生成,但不代表 txt2.im 赞同其内容或立场。",
"copyright-center": False, # 版权信息居中显示,如为 False 则居左显示
"first-line-as-title": True,
"break-word": False,
}
default_cfg.update(cfg)
return default_cfg
def makeLineToWordsList(line, break_word=False):
u"""将一行文本转为单词列表"""
if break_word:
return [c for c in line]
lst = []
while line:
ro = g_re_first_word.match(line)
end = 1 if not ro else ro.end()
lst.append(line[:end])
line = line[end:]
return lst
def makeLongLineToLines(long_line, start_x, start_y, width, line_height, font, cn_char_width=0):
u"""将一个长行分成多个可显示的短行"""
txt = long_line
# txt = u"测试汉字abc123"
# txt = txt.decode("utf-8")
if not txt:
return [None]
words = makeLineToWordsList(txt)
lines = []
if not cn_char_width:
cn_char_width, h = font.size(u"汉")
avg_char_per_line = width / cn_char_width
if avg_char_per_line <= 1:
avg_char_per_line = 1
line_x = start_x
line_y = start_y
while words:
tmp_words = words[:avg_char_per_line]
tmp_ln = "".join(tmp_words)
w, h = font.size(tmp_ln)
wc = len(tmp_words)
while w < width and wc < len(words):
wc += 1
tmp_words = words[:wc]
tmp_ln = "".join(tmp_words)
w, h = font.size(tmp_ln)
while w > width and len(tmp_words) > 1:
tmp_words = tmp_words[:-1]
tmp_ln = "".join(tmp_words)
w, h = font.size(tmp_ln)
if w > width and len(tmp_words) == 1:
# 处理一个长单词或长数字
line_y = makeLongWordToLines(
tmp_words[0], line_x, line_y, width, line_height, font, lines
)
words = words[len(tmp_words):]
continue
line = {
"x": line_x,
"y": line_y,
"text": tmp_ln,
"font": font,
}
line_y += line_height
words = words[len(tmp_words):]
lines.append(line)
if len(lines) >= 1:
# 去掉长行的第二行开始的行首的空白字符
while len(words) > 0 and not words[0].strip():
words = words[1:]
return lines
def makeLongWordToLines(long_word, line_x, line_y, width, line_height, font, lines):
if not long_word:
return line_y
c = long_word[0]
char_width, char_height = font.size(c)
default_char_num_per_line = width / char_width
while long_word:
tmp_ln = long_word[:default_char_num_per_line]
w, h = font.size(tmp_ln)
l = len(tmp_ln)
while w < width and l < len(long_word):
l += 1
tmp_ln = long_word[:l]
w, h = font.size(tmp_ln)
while w > width and len(tmp_ln) > 1:
tmp_ln = tmp_ln[:-1]
w, h = font.size(tmp_ln)
l = len(tmp_ln)
long_word = long_word[l:]
line = {
"x": line_x,
"y": line_y,
"text": tmp_ln,
"font": font,
}
line_y += line_height
lines.append(line)
return line_y
def makeMatrix(txt, font, title_font, cfg):
width = cfg["width"]
data = {
"width": width,
"height": 0,
"lines": [],
}
a = txt.split("\n")
cur_x = cfg["padding"][3]
cur_y = cfg["padding"][0]
cn_char_width, h = font.size(u"汉")
for ln_idx, ln in enumerate(a):
ln = ln.rstrip()
if ln_idx == 0 and cfg["first-line-as-title"]:
f = title_font
line_height = cfg["title-line-height"]
else:
f = font
line_height = cfg["line-height"]
current_width = width - cur_x - cfg["padding"][1]
lines = makeLongLineToLines(ln, cur_x, cur_y, current_width, line_height, f, cn_char_width=cn_char_width)
cur_y += line_height * len(lines)
data["lines"].extend(lines)
data["height"] = cur_y + cfg["padding"][2]
return data
def makeImage(data, cfg):
u"""
"""
width, height = data["width"], data["height"]
if cfg["copyright"]:
height += 48
im = Image.new("RGB", (width, height), cfg["background-color"])
dr = ImageDraw.Draw(im)
for ln_idx, line in enumerate(data["lines"]):
__makeLine(im, line, cfg)
# dr.text((line["x"], line["y"]), line["text"], font=font, fill=cfg["font-color"])
# 缩放
# im = im.resize((width / 2, height / 2), Image.ANTIALIAS)
drawBorder(im, dr, cfg)
drawCopyright(im, dr, cfg)
return im
def drawCopyright(im, dr, cfg):
u"""绘制版权信息"""
if not cfg["copyright"]:
return
font = getFontForPyGame(font_name=cfg["font-family"], font_size=12)
rtext = font.render(cfg["copyright"],
cfg["font-antialiasing"], (128, 128, 128), cfg["background-color"]
)
sio = StringIO.StringIO()
pygame.image.save(rtext, sio)
sio.seek(0)
copyright_im = Image.open(sio)
iw, ih = im.size
cw, ch = rtext.get_size()
padding = cfg["padding"]
offset_y = ih - 32 - padding[2]
if cfg["copyright-center"]:
cx = (iw - cw) / 2
else:
cx = cfg["padding"][3]
cy = offset_y + 12
dr.line([(padding[3], offset_y), (iw - padding[1], offset_y)], width=1, fill=(192, 192, 192))
im.paste(copyright_im, (cx, cy))
def drawBorder(im, dr, cfg):
u"""绘制边框"""
if not cfg["border-size"]:
return
w, h = im.size
x, y = w - 1, h - 1
dr.line(
[(0, 0), (x, 0), (x, y), (0, y), (0, 0)],
width=cfg["border-size"],
fill=cfg["border-color"],
)
def __makeLine(im, line, cfg):
if not line:
return
sio = StringIO.StringIO()
x, y = line["x"], line["y"]
text = line["text"]
font = line["font"]
rtext = font.render(text, cfg["font-antialiasing"], cfg["font-color"], cfg["background-color"])
pygame.image.save(rtext, sio)
sio.seek(0)
ln_im = Image.open(sio)
im.paste(ln_im, (x, y))
def txt2im(txt, outfn, cfg=None, show=False):
# print(cfg)
cfg = makeConfig(cfg)
# print(cfg)
font = getFontForPyGame(cfg["font-family"], cfg["font-size"])
title_font = getFontForPyGame(cfg["font-family"], cfg["title-font-size"])
data = makeMatrix(txt, font, title_font, cfg)
im = makeImage(data, cfg)
im.save(outfn)
if os.name == "nt" and show:
im.show()
def test():
c = open("test.txt", "rb").read().decode("utf-8")
txt2im(c, "test.png", show=True)
if __name__ == "__main__":
test()
| hard-gists/9c4d012d6fff059ccea7/snippet.py | 8,249 | 绘制边框
绘制版权信息
将一行文本转为单词列表
将一个长行分成多个可显示的短行
-*- coding: utf-8 -*- Author: oldj Email: oldj.wu@gmail.com Blog: http://oldj.net 标点 单词 标点 百分数 pxpxpx px px "font-family": "msyh.ttf", 字体是否反锯齿 版权信息居中显示,如为 False 则居左显示 txt = u"测试汉字abc123" txt = txt.decode("utf-8") 处理一个长单词或长数字 去掉长行的第二行开始的行首的空白字符 dr.text((line["x"], line["y"]), line["text"], font=font, fill=cfg["font-color"]) 缩放 im = im.resize((width / 2, height / 2), Image.ANTIALIAS) print(cfg) print(cfg) | 482 | zh | 0.539499 |
###
# (C) Copyright [2019-2020] Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
from simplivity.ovc_client import OVC
from simplivity.exceptions import HPESimpliVityException
import pprint
pp = pprint.PrettyPrinter(indent=4)
config = {
"ip": "<ovc_ip>",
"credentials": {
"username": "<username>",
"password": "<password>"
}
}
ovc = OVC(config)
policies = ovc.policies
hosts = ovc.hosts
clusters = ovc.omnistack_clusters
cluster_groups = ovc.cluster_groups
print("\n\nget_all with default params")
all_policies = policies.get_all()
count = len(all_policies)
for policy in all_policies:
print(f"{policy}")
print(f"{pp.pformat(policy.data)} \n")
print(f"Total number of policies : {count}")
policy_object = all_policies[0]
print("\n\nget_all with filters")
all_policies = policies.get_all(filters={'name': policy_object.data["name"]})
count = len(all_policies)
for policy in all_policies:
print(f"{policy}")
print(f"{pp.pformat(policy.data)} \n")
print(f"Total number of policies : {count}")
print("\n\nget_all with pagination")
pagination = policies.get_all(limit=105, pagination=True, page_size=50)
end = False
while not end:
data = pagination.data
print("Page size:", len(data["resources"]))
print(f"{pp.pformat(data)}")
try:
pagination.next_page()
except HPESimpliVityException:
end = True
print("\n\nget_by_id")
policy = policies.get_by_id(policy_object.data["id"])
print(f"{policy}")
print(f"{pp.pformat(policy.data)} \n")
print("\n\nget_by_name")
policy = policies.get_by_name(policy_object.data["name"])
print(f"{policy}")
print(f"{pp.pformat(policy.data)} \n")
print("\n\nget_all VMs using this policy")
vms = policy.get_vms()
print(policy.data)
print(f"{policy}")
print(f"{pp.pformat(policy.data)} \n")
print(f"{pp.pformat(vms)} \n")
print("\n\ncreate policy")
policy_name = "fixed_frequency_retention_policy"
policy = policies.create(policy_name)
print(f"{policy}")
print(f"{pp.pformat(policy.data)} \n")
multiple_rules = [
{
"start_time": "14:30",
"end_time": "15:30",
"application_consistent": False,
"frequency": 3,
"retention": 5
},
{
"frequency": 5,
"retention": 6
}
]
print("\n\nadd rules to policy")
policy.create_rules(multiple_rules)
print(f"{policy}")
print(f"{pp.pformat(policy.data)} \n")
single_rule = {
"frequency": 10,
"retention": 12
}
policy.create_rules(single_rule)
print(f"{policy}")
print(f"{pp.pformat(policy.data)} \n")
print("\n\nget rule")
all_rules = policy.data["rules"]
for rule in all_rules:
rule_obj = policy.get_rule(rule.get('id'))
print(f"{pp.pformat(rule_obj)} \n")
print("\n\ndelete rule")
rule_id = policy.data["rules"][0]['id']
policy.delete_rule(rule_id)
print(f"{policy}")
print(f"{pp.pformat(policy.data)} \n")
print("\n\nsuspend policy on host")
host = hosts.get_all()[0]
policies.suspend(host)
print("\n\nsuspend policy on omnistack_cluster")
cluster = clusters.get_all()[0]
policies.suspend(cluster)
""" cluster_group options works only with setup having MVA, please use below code for setup with MVA
cluster_group = cluster_groups.get_all()[0]
print(f"{cluster_group}")
print(f"{pp.pformat(cluster_group.data)} \n")
policies.suspend(cluster_group)
"""
""" federation options works only with setup NOT having MVA, please use below code for setup without MVA
print("\n\nsuspend policy on federation")
policies.suspend()
"""
print("\n\nrename policy")
policy.rename(f"renamed_{policy.data['name']}")
print(f"{policy}")
print(f"{pp.pformat(policy.data)} \n")
print("\n\ndelete policy")
policy.delete()
| examples/policies.py | 4,181 | (C) Copyright [2019-2020] Hewlett Packard Enterprise Development LP Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. | 587 | en | 0.852934 |
# Copyright 2014-2016 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tornado compatibility layer for Motor, an asynchronous MongoDB driver.
See "Frameworks" in the Developer Guide.
"""
import functools
import os
import tornado.process
import warnings
from concurrent.futures import ThreadPoolExecutor
from tornado import concurrent, gen, ioloop, version as tornado_version
from tornado.gen import chain_future, coroutine # For framework interface.
from .. import DummySession as Session
try:
import contextvars
except ImportError:
contextvars = None
CLASS_PREFIX = ''
def get_event_loop():
return ioloop.IOLoop.current()
def is_event_loop(loop):
return isinstance(loop, ioloop.IOLoop)
def check_event_loop(loop):
if not is_event_loop(loop):
raise TypeError(
"io_loop must be instance of IOLoop, not %r" % loop)
def get_future(loop):
return concurrent.Future()
if 'MOTOR_MAX_WORKERS' in os.environ:
max_workers = int(os.environ['MOTOR_MAX_WORKERS'])
else:
max_workers = tornado.process.cpu_count() * 5
_EXECUTOR = ThreadPoolExecutor(max_workers=max_workers)
def run_on_executor(loop, fn, *args, **kwargs):
if contextvars:
context = contextvars.copy_context()
fn = functools.partial(context.run, fn)
return loop.run_in_executor(
_EXECUTOR, functools.partial(fn, *args, **kwargs))
def chain_return_value(future, loop, return_value):
"""Compatible way to return a value in all Pythons.
PEP 479, raise StopIteration(value) from a coroutine won't work forever,
but "return value" doesn't work in Python 2. Instead, Motor methods that
return values resolve a Future with it, and are implemented with callbacks
rather than a coroutine internally.
"""
chained = concurrent.Future()
def copy(_future):
# Return early if the task was cancelled.
if chained.done():
return
if _future.exception() is not None:
chained.set_exception(_future.exception())
else:
chained.set_result(return_value)
future.add_done_callback(functools.partial(loop.add_callback, copy))
return chained
def is_future(f):
return isinstance(f, concurrent.Future)
def call_soon(loop, callback, *args, **kwargs):
if args or kwargs:
loop.add_callback(functools.partial(callback, *args, **kwargs))
else:
loop.add_callback(callback)
def add_future(loop, future, callback, *args):
loop.add_future(future, functools.partial(callback, *args))
def pymongo_class_wrapper(f, pymongo_class):
"""Executes the coroutine f and wraps its result in a Motor class.
See WrapAsync.
"""
@functools.wraps(f)
async def _wrapper(self, *args, **kwargs):
result = await f(self, *args, **kwargs)
# Don't call isinstance(), not checking subclasses.
if result.__class__ == pymongo_class:
# Delegate to the current object to wrap the result.
return self.wrap(result)
else:
return result
return _wrapper
def yieldable(future):
warnings.warn(
"The yieldable function is deprecated and will be removed in "
"Motor 3.0", DeprecationWarning, stacklevel=2)
return future
def platform_info():
return 'Tornado %s' % (tornado_version,)
| motor/frameworks/tornado/__init__.py | 3,854 | Compatible way to return a value in all Pythons.
PEP 479, raise StopIteration(value) from a coroutine won't work forever,
but "return value" doesn't work in Python 2. Instead, Motor methods that
return values resolve a Future with it, and are implemented with callbacks
rather than a coroutine internally.
Executes the coroutine f and wraps its result in a Motor class.
See WrapAsync.
Tornado compatibility layer for Motor, an asynchronous MongoDB driver.
See "Frameworks" in the Developer Guide.
Copyright 2014-2016 MongoDB, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. For framework interface. Return early if the task was cancelled. Don't call isinstance(), not checking subclasses. Delegate to the current object to wrap the result. | 1,219 | en | 0.848029 |
# -*- coding: utf-8 -*-
"""Collection of useful http error for the Api"""
class JsonApiException(Exception):
"""Base exception class for unknown errors"""
title = "Unknown error"
status = "500"
source = None
def __init__(
self,
detail,
source=None,
title=None,
status=None,
code=None,
id_=None,
links=None,
meta=None,
):
"""Initialize a jsonapi exception
:param dict source: the source of the error
:param str detail: the detail of the error
"""
self.detail = detail
self.source = source
self.code = code
self.id = id_
self.links = links or {}
self.meta = meta or {}
if title is not None:
self.title = title
if status is not None:
self.status = status
def to_dict(self):
"""Return values of each fields of an jsonapi error"""
error_dict = {}
for field in (
"status",
"source",
"title",
"detail",
"id",
"code",
"links",
"meta",
):
if getattr(self, field, None):
error_dict.update({field: getattr(self, field)})
return error_dict
class BadRequest(JsonApiException):
"""BadRequest error"""
title = "Bad request"
status = "400"
class InvalidField(BadRequest):
"""Error to warn that a field specified in fields querystring is not in the requested resource schema"""
title = "Invalid fields querystring parameter."
source = {"parameter": "fields"}
class InvalidInclude(BadRequest):
"""Error to warn that a field specified in include querystring parameter is not a relationship of the requested
resource schema
"""
title = "Invalid include querystring parameter."
source = {"parameter": "include"}
class InvalidFilters(BadRequest):
"""Error to warn that a specified filters in querystring parameter contains errors"""
title = "Invalid filters querystring parameter."
source = {"parameter": "filters"}
class InvalidSort(BadRequest):
"""Error to warn that a field specified in sort querystring parameter is not in the requested resource schema"""
title = "Invalid sort querystring parameter."
source = {"parameter": "sort"}
class ObjectNotFound(JsonApiException):
"""Error to warn that an object is not found in a database"""
title = "Object not found"
status = "404"
class RelatedObjectNotFound(ObjectNotFound):
"""Error to warn that a related object is not found"""
title = "Related object not found"
class RelationNotFound(JsonApiException):
"""Error to warn that a relationship is not found on a model"""
title = "Relation not found"
class InvalidType(JsonApiException):
"""Error to warn that there is a conflit between resource types"""
title = "Invalid type"
status = "409"
class AccessDenied(JsonApiException):
"""Throw this error when requested resource owner doesn't match the user of the ticket"""
title = "Access denied"
status = "403"
class InvalidContentType(JsonApiException):
"""When the request uses a content type the API doesn't understand"""
title = "Bad request"
status = "415"
class InvalidAcceptType(JsonApiException):
"""When the request expects a content type that the API doesn't support"""
title = "Bad request"
status = "406"
| flapison/exceptions.py | 3,516 | Throw this error when requested resource owner doesn't match the user of the ticket
BadRequest error
When the request expects a content type that the API doesn't support
When the request uses a content type the API doesn't understand
Error to warn that a field specified in fields querystring is not in the requested resource schema
Error to warn that a specified filters in querystring parameter contains errors
Error to warn that a field specified in include querystring parameter is not a relationship of the requested
resource schema
Error to warn that a field specified in sort querystring parameter is not in the requested resource schema
Error to warn that there is a conflit between resource types
Base exception class for unknown errors
Error to warn that an object is not found in a database
Error to warn that a related object is not found
Error to warn that a relationship is not found on a model
Initialize a jsonapi exception
:param dict source: the source of the error
:param str detail: the detail of the error
Return values of each fields of an jsonapi error
Collection of useful http error for the Api
-*- coding: utf-8 -*- | 1,144 | en | 0.766823 |
import argparse
import logging
import json
import os
import tempfile
import sys
import re
import flywheel
from .supporting_files import bidsify_flywheel, utils, templates
from .supporting_files.project_tree import get_project_tree
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger('curate-bids')
def clear_meta_info(context, template):
if 'info' in context and template.namespace in context['info']:
del context['info'][template.namespace]
def format_validation_error(err):
path = '/'.join(err.path)
if path:
return path + ' ' + err.message
return err.message
def validate_meta_info(container, template):
""" Validate meta information
Adds 'BIDS.NA' if no BIDS info present
Adds 'BIDS.valid' and 'BIDS.error_message'
to communicate to user if values are valid
Currently, validation is only checking if
mandatory properties are non-empty strings
Could add the following checks:
Are the values alpha numeric?
"""
# Get namespace
namespace = template.namespace
# If 'info' is NOT in container, then must not
# have matched to a template, create 'info'
# field with object {'BIDS': 'NA'}
if 'info' not in container:
container['info'] = {namespace: 'NA'}
# if the namespace ('BIDS') is NOT in 'info',
# then must not have matched to a template,
# add {'BIDS': 'NA'} to the meta info
elif namespace not in container['info']:
container['info'][namespace] = 'NA'
# If already assigned BIDS 'NA', then break
elif container['info'][namespace] == 'NA':
pass
# Otherwise, iterate over keys within container
else:
valid = True
error_message = ''
# Find template
templateName = container['info'][namespace].get('template')
if templateName:
templateDef = template.definitions.get(templateName)
if templateDef:
errors = template.validate(templateDef, container['info'][namespace])
if errors:
valid = False
error_message = '\n'.join([format_validation_error(err) for err in errors])
else:
valid = False
error_message += 'Unknown template: %s. ' % templateName
# Assign 'valid' and 'error_message' values
container['info'][namespace]['valid'] = valid
container['info'][namespace]['error_message'] = error_message
def update_meta_info(fw, context):
""" Update file information
"""
# Modify file
if context['container_type'] == 'file':
# Modify acquisition file
if context['parent_container_type'] == 'acquisition':
fw.set_acquisition_file_info(
context['acquisition']['id'],
context['file']['name'],
context['file']['info']
)
# Modify project file
elif context['parent_container_type'] == 'project':
fw.set_project_file_info(
context['project']['id'],
context['file']['name'],
context['file']['info']
)
# Modify session file
elif context['parent_container_type'] == 'session':
fw.set_session_file_info(
context['session']['id'],
context['file']['name'],
context['file']['info']
)
else:
logger.info('Cannot determine file parent container type: ' + context['parent_container_type'])
# Modify project
elif context['container_type'] == 'project':
fw.replace_project_info(context['project']['id'], context['project']['info'])
# Modify session
elif context['container_type'] == 'session':
fw.replace_session_info(context['session']['id'], context['session']['info'])
# Modify acquisition
elif context['container_type'] == 'acquisition':
fw.replace_acquisition_info(context['acquisition']['id'], context['acquisition']['info'])
# Cannot determine container type
else:
logger.info('Cannot determine container type: ' + context['container_type'])
def curate_bids_dir(fw, project_id, session_id=None, reset=False, template_file=None, session_only=False):
"""
fw: Flywheel client
project_id: project id of project to curate
session_id: The optional session id to curate
reset: Whether or not to reset bids info before curation
template_file: The template file to use
session_only: If true, then only curate the provided session
"""
project = get_project_tree(fw, project_id, session_id=session_id, session_only=session_only)
curate_bids_tree(fw, project, reset, template_file, True)
def curate_bids_tree(fw, project, reset=False, template_file=None, update=True):
# Get project
project_files = project.get('files', [])
# Get template (for now, just use default)
template = templates.DEFAULT_TEMPLATE
# Check for project file
if not template_file:
template_filename = utils.find_custom_template(project_files)
if template_filename:
fd, path = tempfile.mkstemp('.json')
os.close(fd)
logger.info('Using project template: {0}'.format(template_filename))
fw.download_file_from_project(project['id'], template_filename, path)
template_file = path
if template_file:
template = templates.loadTemplate(template_file)
##
# Curation is now a 3-pass process
# 1. Do initial template matching and updating
# 2. Perform any path resolutions
# 3. Send updates to server
##
# 1. Do initial template matching and updating
for context in project.context_iter():
ctype = context['container_type']
parent_ctype = context['parent_container_type']
if reset:
clear_meta_info(context[ctype], template)
elif context[ctype].get('info',{}).get('BIDS') == 'NA':
continue
if ctype == 'project':
bidsify_flywheel.process_matching_templates(context, template)
# Validate meta information
# TODO: Improve the validator to understand what is valid for dataset_description file...
# validate_meta_info(context['project'])
elif ctype == 'session':
bidsify_flywheel.process_matching_templates(context, template)
# Add run_counter
context['run_counters'] = utils.RunCounterMap()
elif ctype == 'acquisition':
bidsify_flywheel.process_matching_templates(context, template)
elif ctype == 'file':
if parent_ctype == 'project' and PROJECT_TEMPLATE_FILE_NAME_REGEX.search(context['file']['name']):
# Don't BIDSIFY project template
continue
# Process matching
context['file'] = bidsify_flywheel.process_matching_templates(context, template)
# Validate meta information
validate_meta_info(context['file'], template)
# 2. Perform any path resolutions
session = None
for context in project.context_iter():
# Resolution
bidsify_flywheel.process_resolvers(context, template)
# 3. Send updates to server
if update:
for context in project.context_iter():
ctype = context['container_type']
node = context[ctype]
if node.is_dirty():
update_meta_info(fw, context)
def main_with_args(api_key, session_id, reset, session_only):
### Prep
# Check API key - raises Error if key is invalid
fw = flywheel.Flywheel(api_key)
if session_id:
project_id = utils.get_project_id_from_session_id(fw, session_id)
else:
print('Session id is required!')
sys.exit(1)
### Curate BIDS project
curate_bids_dir(fw, project_id, session_id, reset=reset, session_only=session_only)
def main():
### Read in arguments
parser = argparse.ArgumentParser(description='BIDS Curation')
parser.add_argument('--api-key', dest='api_key', action='store',
required=True, help='API key')
parser.add_argument('-p', dest='project_label', action='store',
required=False, default=None, help='Project Label on Flywheel instance')
parser.add_argument('--session', dest='session_id', action='store',
required=False, default=None, help='Session ID, used to look up project if project label is not readily available')
parser.add_argument('--reset', dest='reset', action='store_true',
default=False, help='Reset BIDS data before running')
parser.add_argument('--session-only', dest='session_only', action='store_true',
default=False, help='Only curate the session identified by --session')
parser.add_argument('--template-file', dest='template_file', action='store',
default=None, help='Template file to use')
args = parser.parse_args()
### Prep
# Check API key - raises Error if key is invalid
fw = flywheel.Flywheel(args.api_key)
# Get project id from label
if args.project_label:
project_id = utils.validate_project_label(fw, args.project_label)
elif args.session_id:
project_id = utils.get_project_id_from_session_id(fw, args.session_id)
else:
print('Either project label or session id is required!')
sys.exit(1)
### Curate BIDS project
curate_bids_dir(fw, project_id, args.session_id, reset=args.reset, template_file=args.template_file, session_only=args.session_only)
if __name__ == '__main__':
main()
| flywheel_bids/curate_bids.py | 9,721 | fw: Flywheel client
project_id: project id of project to curate
session_id: The optional session id to curate
reset: Whether or not to reset bids info before curation
template_file: The template file to use
session_only: If true, then only curate the provided session
Update file information
Validate meta information
Adds 'BIDS.NA' if no BIDS info present
Adds 'BIDS.valid' and 'BIDS.error_message'
to communicate to user if values are valid
Currently, validation is only checking if
mandatory properties are non-empty strings
Could add the following checks:
Are the values alpha numeric?
Get namespace If 'info' is NOT in container, then must not have matched to a template, create 'info' field with object {'BIDS': 'NA'} if the namespace ('BIDS') is NOT in 'info', then must not have matched to a template, add {'BIDS': 'NA'} to the meta info If already assigned BIDS 'NA', then break Otherwise, iterate over keys within container Find template Assign 'valid' and 'error_message' values Modify file Modify acquisition file Modify project file Modify session file Modify project Modify session Modify acquisition Cannot determine container type Get project Get template (for now, just use default) Check for project file Curation is now a 3-pass process 1. Do initial template matching and updating 2. Perform any path resolutions 3. Send updates to server 1. Do initial template matching and updating Validate meta information TODO: Improve the validator to understand what is valid for dataset_description file... validate_meta_info(context['project']) Add run_counter Don't BIDSIFY project template Process matching Validate meta information 2. Perform any path resolutions Resolution 3. Send updates to server Prep Check API key - raises Error if key is invalid Curate BIDS project Read in arguments Prep Check API key - raises Error if key is invalid Get project id from label Curate BIDS project | 1,934 | en | 0.546357 |
class PlayerResourceHand:
def __init__(self):
self.brick = 0
self.grain = 0
self.lumber = 0
self.ore = 0
self.wool = 0
self.totalResources = 0
def update(self):
self.totalResources = self.brick + self.grain + self.lumber + self.ore + self.wool
class PlayerDevelopmentHand:
def __init__(self):
self.knights = 0
self.roadBuildings = 0
self.yearOfPlenty = 0
self.monopolies = 0
self.victoryPoints = 0
self.totalDevelopments = 0
def update(self):
self.totalDevelopments = self.knights + self.roadBuildings + self.yearOfPlenty + self.monopolies \
+ self.victoryPoints
class EnemyPlayer:
def __init__(self, turnOrder, name, color, nR, nS, nC, lR, lA, hS, dS, vVP):
self.turnOrder = turnOrder
self.name = name
self.color = color
self.handSize = hS
self.developmentSize = dS
self.visibleVictoryPoints = vVP
self.numRoads = nR
self.numSettlements = nS
self.numCities = nC
self.longestRoad = lR
self.largestArmy = lA
class Player:
def __init__(self, name, color, turnOrder):
self.color = color
self.name = name
self.turnOrder = turnOrder
self.numRoads = 15
self.numSettlements = 5
self.numCities = 4
self.longestRoad = 0
self.largestArmy = 0
self.victoryPoints = 0
self.resourceHand = PlayerResourceHand()
self.developmentHand = PlayerDevelopmentHand()
self.ownedRoads = list()
self.ownedNodes = list()
def getNumResources(self):
return self.resourceHand.totalResources
def getNumDevelopment(self):
return self.developmentHand.totalDevelopments
def getSendToEnemies(self):
# toSend = EnemyPlayer(self.turnOrder, self.name, self.color,
# self.numRoads, self.numSettlements, self.numCities,
# self.longestRoad, self.largestArmy)
toSend = ','.join([self.turnOrder, self.name, self.color, self.numRoads, self.numSettlements, self.numCities,
self.longestRoad, self.largestArmy])
return toSend
def acquireRoad(self, road):
self.ownedRoads.append(road)
def acquireNode(self, node):
self.ownedNodes.append(node)
def addResources(self, array):
self.resourceHand.brick += array[0]
self.resourceHand.grain += array[1]
self.resourceHand.lumber += array[2]
self.resourceHand.ore += array[3]
self.resourceHand.wool += array[4]
self.resourceHand.totalResources += array[0] + array[1] + array[2] + array[3] + array[4]
| src/Player.py | 2,781 | toSend = EnemyPlayer(self.turnOrder, self.name, self.color, self.numRoads, self.numSettlements, self.numCities, self.longestRoad, self.largestArmy) | 189 | en | 0.412251 |
from typing import Optional
import pandas as pd
import pytest
from evidently.analyzers.regression_performance_analyzer import RegressionPerformanceAnalyzer
from evidently.model.widget import BaseWidgetInfo
from evidently.options import OptionsProvider
from evidently.pipeline.column_mapping import ColumnMapping
from evidently.dashboard.widgets.reg_error_normality_widget import RegErrorNormalityWidget
@pytest.fixture
def widget() -> RegErrorNormalityWidget:
options_provider = OptionsProvider()
widget = RegErrorNormalityWidget("test_widget")
widget.options_provider = options_provider
return widget
def test_reg_error_normality_widget_analyzer_list(widget: RegErrorNormalityWidget) -> None:
assert widget.analyzers() == [RegressionPerformanceAnalyzer]
@pytest.mark.parametrize(
"reference_data, current_data, data_mapping, dataset, expected_result",
(
(
pd.DataFrame({"target": [1, 2, 3, 4], "prediction": [1, 2, 3, 4]}),
None,
ColumnMapping(),
None,
BaseWidgetInfo(type="big_graph", title="test_widget", size=1),
),
(
pd.DataFrame({"target": [1, 2, 3, 4], "prediction": [1, 2, 3, 4]}),
pd.DataFrame({"target": [1, 2, 3, 4], "prediction": [1, 2, 3, 4]}),
ColumnMapping(),
"reference",
BaseWidgetInfo(type="big_graph", title="test_widget", size=1),
),
),
)
def test_reg_error_normality_widget_simple_case(
widget: RegErrorNormalityWidget,
reference_data: pd.DataFrame,
current_data: pd.DataFrame,
data_mapping: ColumnMapping,
dataset: Optional[str],
expected_result: BaseWidgetInfo,
) -> None:
if dataset is not None:
widget.dataset = dataset
analyzer = RegressionPerformanceAnalyzer()
analyzer.options_provider = widget.options_provider
analyzer_results = analyzer.calculate(reference_data, current_data, data_mapping)
result = widget.calculate(
reference_data, current_data, data_mapping, {RegressionPerformanceAnalyzer: analyzer_results}
)
if expected_result is not None:
# we have some widget for visualization
assert result.type == expected_result.type
assert result.title == expected_result.title
assert result.size == expected_result.size
assert result.params is not None
else:
# no widget data, show nothing
assert result is None
| tests/dashboard/widgets/test_reg_error_normality_widget.py | 2,465 | we have some widget for visualization no widget data, show nothing | 66 | en | 0.780228 |
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/deed/faction_perk/hq/shared_hq_s05.iff"
result.attribute_template_id = 2
result.stfName("deed","hq_s05")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | data/scripts/templates/object/tangible/deed/faction_perk/hq/shared_hq_s05.py | 441 | NOTICE: THIS FILE IS AUTOGENERATED MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES BEGIN MODIFICATIONS END MODIFICATIONS | 168 | en | 0.698026 |
# -*- coding: utf-8 -*-
import unittest.mock
import pytest
import pycamunda.incident
from tests.mock import raise_requests_exception_mock, not_ok_response_mock
def test_get_params(engine_url):
get_incident = pycamunda.incident.Get(url=engine_url, id_='anId')
assert get_incident.url == engine_url + '/incident/anId'
assert get_incident.query_parameters() == {}
assert get_incident.body_parameters() == {}
@unittest.mock.patch('pycamunda.incident.Incident.load', unittest.mock.MagicMock())
@unittest.mock.patch('requests.Session.request')
def test_get_calls_requests(mock, engine_url):
get_incident = pycamunda.incident.Get(url=engine_url, id_='anId')
get_incident()
assert mock.called
assert mock.call_args[1]['method'].upper() == 'GET'
@unittest.mock.patch('requests.Session.request', raise_requests_exception_mock)
def test_get_raises_pycamunda_exception(engine_url):
get_incident = pycamunda.incident.Get(url=engine_url, id_='anId')
with pytest.raises(pycamunda.PyCamundaException):
get_incident()
@unittest.mock.patch('requests.Session.request', not_ok_response_mock)
@unittest.mock.patch('pycamunda.incident.Incident', unittest.mock.MagicMock())
@unittest.mock.patch('pycamunda.base._raise_for_status')
def test_get_raises_for_status(mock, engine_url):
get_incident = pycamunda.incident.Get(url=engine_url, id_='anId')
get_incident()
assert mock.called
@unittest.mock.patch('requests.Session.request', unittest.mock.MagicMock())
@unittest.mock.patch('pycamunda.base.from_isoformat', unittest.mock.MagicMock())
@unittest.mock.patch('pycamunda.incident.IncidentType', unittest.mock.MagicMock())
def test_get_returns_incident(engine_url):
get_incident = pycamunda.incident.Get(url=engine_url, id_='anId')
incident = get_incident()
assert isinstance(incident, pycamunda.incident.Incident)
| tests/incident/test_get.py | 1,879 | -*- coding: utf-8 -*- | 21 | en | 0.767281 |
import uuid
from datetime import datetime, timedelta
import pytest
import simplejson as json
from django.db.models import Q
from mock import Mock, patch
from treeherder.config.settings import IS_WINDOWS
from treeherder.perf.auto_perf_sheriffing.secretary_tool import SecretaryTool
from treeherder.model.models import Push, Job
from treeherder.perf.models import BackfillRecord, BackfillReport, PerformanceSettings
from treeherder.perf.auto_perf_sheriffing.outcome_checker import OutcomeChecker, OutcomeStatus
# we're testing against this (automatically provided by fixtures)
JOB_TYPE_ID = 1
def get_middle_index(successful_jobs):
# get middle index to make sure the push is in range
index_in_range = int((len(successful_jobs) + 1) / 2)
return index_in_range
@pytest.fixture
def record_backfilled(test_perf_alert, record_context_sample):
report = BackfillReport.objects.create(summary=test_perf_alert.summary)
record = BackfillRecord.objects.create(
alert=test_perf_alert,
report=report,
status=BackfillRecord.BACKFILLED,
)
record.set_context(record_context_sample)
record.save()
return record
@pytest.fixture
def range_dates(record_context_sample):
from_date = datetime.fromisoformat(record_context_sample[0]['push_timestamp'])
to_date = datetime.fromisoformat(record_context_sample[-1]['push_timestamp'])
return {
'before_date': from_date - timedelta(days=5),
'from_date': from_date,
'in_range_date': from_date + timedelta(hours=13),
'to_date': to_date,
'after_date': to_date + timedelta(days=3),
}
@pytest.fixture
def outcome_checking_pushes(
create_push, range_dates, record_context_sample, test_repository, test_repository_2
):
from_push_id = record_context_sample[0]['push_id']
to_push_id = record_context_sample[-1]['push_id']
pushes = [
create_push(test_repository, revision=uuid.uuid4(), time=range_dates['before_date']),
create_push(
test_repository,
revision=uuid.uuid4(),
time=range_dates['from_date'],
explicit_id=from_push_id,
),
create_push(test_repository, revision=uuid.uuid4(), time=range_dates['in_range_date']),
create_push(test_repository, revision=uuid.uuid4(), time=range_dates['in_range_date']),
create_push(test_repository, revision=uuid.uuid4(), time=range_dates['in_range_date']),
create_push(test_repository, revision=uuid.uuid4(), time=range_dates['in_range_date']),
create_push(
test_repository,
revision=uuid.uuid4(),
time=range_dates['to_date'],
explicit_id=to_push_id,
),
create_push(test_repository, revision=uuid.uuid4(), time=range_dates['after_date']),
]
return pushes
@pytest.fixture
def successful_jobs(outcome_checking_pushes, eleven_jobs_stored):
jobs = Job.objects.all()
_successful_jobs = []
pairs = zip(outcome_checking_pushes, jobs)
for push, job in pairs:
job.push = push
job.result = 'success'
job.job_type_id = JOB_TYPE_ID
job.save()
_successful_jobs.append(job)
return _successful_jobs
@pytest.fixture
def jobs_with_one_failed(successful_jobs):
index_in_range = get_middle_index(successful_jobs)
job_to_fail = successful_jobs[index_in_range]
job_to_fail.result = 'testfailed'
job_to_fail.save()
@pytest.fixture
def jobs_with_one_pending(successful_jobs):
index_in_range = get_middle_index(successful_jobs)
job_pending = successful_jobs[index_in_range]
job_pending.result = 'unknown'
job_pending.save()
@pytest.fixture
def get_outcome_checker_mock():
def get_outcome_checker_mock(outcome: OutcomeStatus):
return type('', (), {'check': lambda *params: outcome})
return get_outcome_checker_mock
@pytest.mark.skipif(IS_WINDOWS, reason="datetime logic does not work when OS not on GMT")
def test_secretary_tool_updates_only_matured_reports(
test_perf_alert, test_perf_alert_2, create_record
):
# create new report with records
create_record(test_perf_alert)
# create mature report with records
date_past = datetime.utcnow() - timedelta(hours=5)
with patch('django.utils.timezone.now', Mock(return_value=date_past)):
create_record(test_perf_alert_2)
assert BackfillRecord.objects.count() == 2
assert BackfillRecord.objects.filter(status=BackfillRecord.PRELIMINARY).count() == 2
SecretaryTool.mark_reports_for_backfill()
assert BackfillRecord.objects.filter(status=BackfillRecord.PRELIMINARY).count() == 1
def test_secretary_tool_uses_existing_settings(performance_settings):
assert PerformanceSettings.objects.count() == 1
last_reset_date_before = json.loads(performance_settings.settings)["last_reset_date"]
SecretaryTool.validate_settings()
assert PerformanceSettings.objects.count() == 1
settings_after = PerformanceSettings.objects.filter(name="perf_sheriff_bot").first()
assert json.loads(settings_after.settings)["last_reset_date"] == last_reset_date_before
def test_secretary_tool_resets_settings_if_expired(expired_performance_settings):
assert PerformanceSettings.objects.count() == 1
expired_last_reset_date = json.loads(expired_performance_settings.settings)["last_reset_date"]
SecretaryTool.validate_settings()
assert PerformanceSettings.objects.count() == 1
settings_after = PerformanceSettings.objects.filter(name="perf_sheriff_bot").first()
assert json.loads(settings_after.settings)["last_reset_date"] != expired_last_reset_date
def test_secretary_tool_creates_new_settings_if_none_exist(db):
assert PerformanceSettings.objects.count() == 0
SecretaryTool.validate_settings()
assert PerformanceSettings.objects.count() == 1
def test_check_outcome_after_success(get_outcome_checker_mock, record_backfilled):
outcome_checker_mock = get_outcome_checker_mock(OutcomeStatus.SUCCESSFUL)
secretary = SecretaryTool(outcome_checker_mock)
assert BackfillRecord.objects.filter(status=BackfillRecord.BACKFILLED).count() == 1
assert BackfillRecord.objects.filter(status=BackfillRecord.SUCCESSFUL).count() == 0
secretary.check_outcome()
assert BackfillRecord.objects.filter(status=BackfillRecord.BACKFILLED).count() == 0
assert BackfillRecord.objects.filter(status=BackfillRecord.SUCCESSFUL).count() == 1
def test_check_outcome_after_fail(get_outcome_checker_mock, record_backfilled):
outcome_checker_mock = get_outcome_checker_mock(OutcomeStatus.FAILED)
secretary = SecretaryTool(outcome_checker_mock)
assert BackfillRecord.objects.filter(status=BackfillRecord.BACKFILLED).count() == 1
assert BackfillRecord.objects.filter(status=BackfillRecord.FAILED).count() == 0
secretary.check_outcome()
assert BackfillRecord.objects.filter(status=BackfillRecord.BACKFILLED).count() == 0
assert BackfillRecord.objects.filter(status=BackfillRecord.FAILED).count() == 1
def test_no_action_when_in_progress(get_outcome_checker_mock, record_backfilled):
outcome_checker_mock = get_outcome_checker_mock(OutcomeStatus.IN_PROGRESS)
secretary = SecretaryTool(outcome_checker_mock)
assert BackfillRecord.objects.filter(status=BackfillRecord.BACKFILLED).count() == 1
secretary.check_outcome()
assert BackfillRecord.objects.filter(status=BackfillRecord.BACKFILLED).count() == 1
def test_outcome_checker_identifies_pushes_in_range(
record_backfilled, test_repository, test_repository_2, range_dates, outcome_checking_pushes
):
# TODO: retarget this test to BackfillRecord.get_pushes_in_range()
outcome_checker = OutcomeChecker()
total_pushes = Push.objects.count()
from_time = range_dates['from_date']
to_time = range_dates['to_date']
total_outside_pushes = Push.objects.filter(
Q(repository=test_repository) & (Q(time__lt=from_time) | Q(time__gt=to_time))
).count()
pushes_in_range = outcome_checker._get_pushes_in_range(from_time, to_time, test_repository.id)
assert len(pushes_in_range) == total_pushes - total_outside_pushes
# change repository for the first 2 pushes in range
assert test_repository.id != test_repository_2.id
total_changed_pushes = 2
for push in pushes_in_range[:total_changed_pushes]:
push.repository = test_repository_2
push.save()
total_other_repo_pushes = Push.objects.filter(repository=test_repository_2).count()
assert total_other_repo_pushes == total_changed_pushes
updated_pushes_in_range = outcome_checker._get_pushes_in_range(
from_time, to_time, test_repository.id
)
assert len(updated_pushes_in_range) == len(pushes_in_range) - total_other_repo_pushes
class TestOutcomeChecker:
@patch('treeherder.perf.auto_perf_sheriffing.outcome_checker.get_job_type')
def test_successful_jobs_mean_successful_outcome(
self, mock_get_job_type, record_backfilled, outcome_checking_pushes, successful_jobs
):
# TODO: remove job type mock after soft launch lands
mock_get_job_type.return_value = JOB_TYPE_ID
outcome_checker = OutcomeChecker()
response = outcome_checker.check(record_backfilled)
assert response == OutcomeStatus.SUCCESSFUL
@patch('treeherder.perf.auto_perf_sheriffing.outcome_checker.get_job_type')
def test_failed_job_means_failed_outcome(
self, mock_get_job_type, record_backfilled, outcome_checking_pushes, jobs_with_one_failed
):
mock_get_job_type.return_value = JOB_TYPE_ID
outcome_checker = OutcomeChecker()
response = outcome_checker.check(record_backfilled)
assert response == OutcomeStatus.FAILED
@patch('treeherder.perf.auto_perf_sheriffing.outcome_checker.get_job_type')
def test_pending_job_means_in_progress_outcome(
self, mock_get_job_type, record_backfilled, outcome_checking_pushes, jobs_with_one_pending
):
mock_get_job_type.return_value = JOB_TYPE_ID
outcome_checker = OutcomeChecker()
response = outcome_checker.check(record_backfilled)
assert response == OutcomeStatus.IN_PROGRESS
| tests/perfalert/test_auto_perf_sheriffing/test_secretary_tool.py | 10,199 | we're testing against this (automatically provided by fixtures) get middle index to make sure the push is in range create new report with records create mature report with records TODO: retarget this test to BackfillRecord.get_pushes_in_range() change repository for the first 2 pushes in range TODO: remove job type mock after soft launch lands | 345 | en | 0.843468 |
#!/usr/bin/env python3
# This scripts attempts to generate massive design of experiment runscripts.
# and save it into a "runMassive.sh" and "doe.log".
#-------------------------------------------------------------------------------
import os, sys
import os.path
import re
import itertools
import glob
PUBLIC = ['nangate45', 'sky130hd', 'sky130hs', 'asap7']
# The number of generated config files into designs/{platform}/{design}/chunks/chuck{number} directory.
NumFilesPerChunk = 50000
## Orignal SDC file name
OriginalSDC = 'constraint_doe.sdc'
##################################
# define input parameters
##################################
# for generated .sh file name
ShellName = 'runMassive'
##################
# Design
##################
## Define platform-design. User should remove ',' for the last item in the list. (string)
PLATFORM_DESIGN = [ \
#'sky130hd-gcd' \
'sky130hd-ibex', \
#'sky130hd-aes', \
#'sky130hd-jpeg', \
#'sky130hs-gcd', \
#'sky130hs-ibex', \
#'sky130hs-aes', \
#'sky130hs-jpeg', \
#'nangate45-gcd', \
#'nangate45-ibex', \
#'nangate45-aes', \
#'nangate45-jpeg', \
#'asap7-gcd', \
#'asap7-ibex', \
#'asap7-aes', \
#'asap7-jpeg', \
]
## Target Clock Period (float)
CLK_PERIOD = []
## SDC uncertainty and IO delay.
## TODO: Currently, it only support when 'set uncertainty' and 'set io_delay'
## are defined in the constraint.sdc file.
UNCERTAINTY = []
IO_DELAY = []
##################
# Synthesis
##################
## Clock period for Yosys (for synthesis)
## The unit should follow each design (ns, ps) (float)
ABC_CLOCK_PERIOD = []
## Hierarchical Synthsis. 0 = hierarchical, 1 = flatten, empty = flatten (default) (int)
FLATTEN = []
##################
# Floorplan
##################
## Utilization. e.g, 45 -> 45% of core util. (int)
#CORE_UTIL = [20, 40, 55]
CORE_UTIL = [20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46, 48, 50]
## Aspect ratio. It REQUIRES 'CORE_UTIL' values (float)
ASPECT_RATIO = [0.5, 0.75, 1.0, 1.25, 1.5]
## Core-to-die gap distance (um). It REQUIRES 'CORE_UTIL' values (int)
CORE_DIE_MARGIN = [10]
## Pin Distance
#PINS_DISTANCE = [2]
PINS_DISTANCE = []
##################
# Placement
##################
## Global Placement Padding for std cells (int)
GP_PAD = [4]
## Detailed Placement Padding for std cells (int)
DP_PAD = [2]
## Global Placement target bin density (select only one option) (.2 float)
## option 1) PLACE_DENSITY uses the values in the list as it is.
## option 2) PLACE_DENSITY_LB_ADDON adds the values in the list to the lower boundary of the PLACE_DENSITY
## For eaxmple, PLACE_DENSITY_LB_ADDON = [0, 0.02, 0.04] means PLACE_DENSITY = [LB, LB+0.02, LB+0.04]
## LB of the place density == (total instance area + padding) / total die area
PLACE_DENSITY = []
PLACE_DENSITY_LB_ADDON = [0, 0.04, 0.08]
##################
# CTS
##################
## CTS clustering size and diameter (um) (int)
CTS_CLUSTER_SIZE = []
CTS_CLUSTER_DIAMETER = []
##################
# Global Routing
##################
## Set global routing layer capacity adjustment
## e.g.) 0.2 -> 20% usage for global routing
## Set for all layers.
## Each layer's layer adjustment will be overwritten with below per-layer values. (float)
LAYER_ADJUST = [0.5]
LAYER_ADJUST_M1 = [0, 0.2, 0.4, 0.6]
LAYER_ADJUST_M2 = [0, 0.2, 0.4, 0.6]
LAYER_ADJUST_M3 = [0, 0.2, 0.4, 0.6]
LAYER_ADJUST_M4 = [0, 0.2, 0.4, 0.6]
LAYER_ADJUST_M5 = []
LAYER_ADJUST_M6 = []
LAYER_ADJUST_M7 = []
LAYER_ADJUST_M8 = []
LAYER_ADJUST_M9 = []
## Set global routing random seed. (int)
GR_SEED = []
## Set allow global routing overflow. 0 = no, 1 = yes, empty = no (default) (int)
# TODO: currently it does not work. Let this as 0 as it is.
GR_OVERFLOW = [0]
##################
# Detailed Routing
##################
## Set global routing random seed. (int)
DR_SEED = []
SweepingAttributes = { "PLATFORM_DESIGN": PLATFORM_DESIGN,
"CP": CLK_PERIOD,
"ABC_CP": ABC_CLOCK_PERIOD,
"FLATTEN": FLATTEN,
"UNCERTAINTY": UNCERTAINTY,
"IO_DELAY": IO_DELAY,
"UTIL": CORE_UTIL,
"AR": ASPECT_RATIO,
"GAP": CORE_DIE_MARGIN,
"PINS_DISTANCE": PINS_DISTANCE,
"GP_PAD": GP_PAD,
"DP_PAD": DP_PAD,
"PD": PLACE_DENSITY,
"PD_LB_ADD": PLACE_DENSITY_LB_ADDON,
"CTS_CLUSTER_SIZE": CTS_CLUSTER_SIZE,
"CTS_CLUSTER_DIAMETER": CTS_CLUSTER_DIAMETER,
"LAYER_ADJUST": LAYER_ADJUST,
"M1": LAYER_ADJUST_M1,
"M2": LAYER_ADJUST_M2,
"M3": LAYER_ADJUST_M3,
"M4": LAYER_ADJUST_M4,
"M5": LAYER_ADJUST_M5,
"M6": LAYER_ADJUST_M6,
"M7": LAYER_ADJUST_M7,
"M8": LAYER_ADJUST_M8,
"M9": LAYER_ADJUST_M9,
"GR_SEED": GR_SEED,
"GR_OVERFLOW": GR_OVERFLOW,
"DR_SEED": DR_SEED }
def assignEmptyAttrs(dicts):
knobs = {}
for k, v in dicts.items():
if len(v) == 0:
knobs.setdefault(k, ['empty'])
else:
knobs.setdefault(k,v)
return knobs
def writeDoeLog(dicts, ProductDicts):
fo = open('./doe.log', 'w')
numRuns = 1
for k, v in dicts.items():
if len(v)>0:
print('%s has %s number of values'%(k,len(v)))
fo.write('%s has %s number of values\n'%(k,len(v)))
numRuns = numRuns * len(v)
fo.write('\nTotal Number of Runs = %s\n\n'%numRuns)
print('\nTotal Number of Runs = %s\n\n'%numRuns)
knobValuesList = []
knobNamesList = []
for CurAttrs in ProductAttrs:
knobValues = []
knobNames = []
for k, v in CurAttrs.items():
if v=='empty':
continue
else:
knobNames.append(str(k))
knobValues.append(str(v))
knobValuesList.append(knobValues)
knobNamesList.append(knobNames)
fo.write(str(knobNamesList[0])+'\n')
for knobSet in knobValuesList:
fo.write(str(knobSet)+'\n')
fo.close()
def productDict(dicts):
return (dict(zip(dicts, x)) for x in itertools.product(*dicts.values()))
def adjustFastRoute(filedata, adjSet, GrOverflow):
if adjSet[0]!='empty':
filedata = re.sub("(set_global_routing_layer_adjustment .* )[0-9\.]+", "\g<1>{:.2f}".format(float(adjSet[0])), filedata)
sep_la_cmds = ""
for i, sep_la in enumerate(adjSet):
if i==0 or sep_la=='empty':
continue
## TODO: Currently, only supports for SKY130HD and SKY130HS.
## TODO: user should manually change the layer name to match techLEF.
layer_name = 'met%s'%i
sep_la_cmds += "set_global_routing_layer_adjustment " + layer_name + " {:.2f}\n".format(float(sep_la))
filedata = re.sub("set_global_routing_layer_adjustment.*\n", "\g<0>"+sep_la_cmds, filedata)
if int(GrOverflow) == 1:
filedata = re.sub("(global_route.*(\n\s+.*)*)", "\g<1> \\\n -allow_overflow", filedata)
return(filedata)
#def setPlaceDensity(DESIGN, Util, GpPad):
# if DESIGN == "ibex":
# LB = (Util/100) + (GpPad * (0.4*(Util/100)-0.01))+0.01
# elif DESIGN == "aes":
# LB = (Util/100) + (GpPad * (0.5*(Util/100)-0.005))+0.02
# else:
# LB = (Util/100) + (GpPad * (0.4*(Util/100)-0.01))+0.01
# return LB
def writeConfigs(CurAttrs, CurChunkNum):
CurPlatform, CurDesign = CurAttrs.get('PLATFORM_DESIGN').split('-')
CurClkPeriod = CurAttrs.get('CP')
CurAbcClkPeriod = CurAttrs.get('ABC_CP')
CurFlatten = CurAttrs.get('FLATTEN')
CurUncertainty = CurAttrs.get('UNCERTAINTY')
CurIoDelay = CurAttrs.get('IO_DELAY')
CurCoreUtil = CurAttrs.get('UTIL')
CurAspectRatio = CurAttrs.get('AR')
CurCoreDieMargin = CurAttrs.get('GAP')
CurPinsDistance = CurAttrs.get('PINS_DISTANCE')
CurGpPad = CurAttrs.get('GP_PAD')
CurDpPad = CurAttrs.get('DP_PAD')
CurPlaceDensity = CurAttrs.get('PD')
CurPlaceDensityLbAddon = CurAttrs.get('PD_LB_ADD')
CurCtsClusterSize = CurAttrs.get('CTS_CLUSTER_SIZE')
CurCtsClusterDiameter = CurAttrs.get('CTS_CLUSTER_DIAMETER')
CurLayerAdjust = CurAttrs.get('LAYER_ADJUST')
CurLayerAdjustM1 = CurAttrs.get('M1')
CurLayerAdjustM2 = CurAttrs.get('M2')
CurLayerAdjustM3 = CurAttrs.get('M3')
CurLayerAdjustM4 = CurAttrs.get('M4')
CurLayerAdjustM5 = CurAttrs.get('M5')
CurLayerAdjustM6 = CurAttrs.get('M6')
CurLayerAdjustM7 = CurAttrs.get('M7')
CurLayerAdjustM8 = CurAttrs.get('M8')
CurLayerAdjustM9 = CurAttrs.get('M9')
CurGrSeed = CurAttrs.get('GR_SEED')
CurGrOverflow = CurAttrs.get('GR_OVERFLOW')
CurDrSeed = CurAttrs.get('DR_SEED')
if not os.path.isdir('./designs/%s/%s/chunks'%(CurPlatform,CurDesign)):
os.mkdir('./designs/%s/%s/chunks'%(CurPlatform,CurDesign))
CurDesignDir = './designs/%s/%s'%(CurPlatform,CurDesign)
CurChunkDir = './designs/%s/%s/chunks/chunk%s'%(CurPlatform,CurDesign,CurChunkNum)
if not os.path.isdir(CurChunkDir):
os.mkdir(CurChunkDir)
#print(CurChunkNum)
if MakeArg=='clean':
fileList = glob.glob('%s/*-DoE-*'%(CurChunkDir))
if fileList is not None:
for file in fileList:
os.remove(file)
return
#print(CurPlatform, CurDesign)
#print(CurClkPeriod, CurAbcClkPeriod, CurFlatten, CurCoreUtil)
#print(CurAspectRatio, CurCoreDieMargin, CurGpPad, CurDpPad)
#print(CurCtsClusterSize, CurCtsClusterDiameter, CurLayerAdjust)
#print(CurLayerAdjustM1, CurLayerAdjustM2, CurLayerAdjustM3)
#print(CurLayerAdjustM4, CurLayerAdjustM5, CurLayerAdjustM6)
#print(CurLayerAdjustM7, CurLayerAdjustM8, CurLayerAdjustM9)
#print(CurGrOverflow)
#print(CurAttrs.items())
variantName = ''
for k, v in CurAttrs.items():
if v!='empty' and k!='PLATFORM_DESIGN':
variantName = variantName + '-' + str(k) + '_' + str(v)
variantName = variantName[1:]
#fileName = 'config-%s-%s-'%(CurPlatform, CurDesign)+variantName + '.mk'
fileName = 'config-DoE-'+variantName + '.mk'
fo = open('%s/%s'%(CurChunkDir,fileName), 'w')
fo.write('include $(realpath $(dir $(DESIGN_CONFIG))../../)/config.mk\n')
fo.write('\n')
fo.write('FLOW_VARIANT = %s\n'%(variantName))
fo.write('\n')
if CurClkPeriod != 'empty' or CurUncertainty != 'empty' or CurIoDelay != 'empty':
fOrigSdc = open('%s/%s'%(CurDesignDir,OriginalSDC),'r')
filedata = fOrigSdc.read()
fOrigSdc.close()
if CurClkPeriod != 'empty':
filedata = re.sub("-period [0-9\.]+", "-period " + str(CurClkPeriod), filedata)
#filedata = re.sub("-waveform [{}\s0-9\.]+$}", "\n", filedata)
filedata = re.sub("-waveform [{}\s0-9\.]+[\s|\n]", "", filedata)
if CurUncertainty != 'empty':
filedata = re.sub("set uncertainty [0-9\.]+", "set uncertainty " + str(CurUncertainty), filedata)
if CurIoDelay != 'empty':
filedata = re.sub("set io_delay [0-9\.]+", "set io_delay " + str(CurIoDelay), filedata)
#fOutSdc = open('./designs/%s/%s/constraint-%s-%s-'%(CurPlatform,CurDesign,CurPlatform,CurDesign)+variantName+'.sdc','w')
fOutSdc = open('%s/constraint-DoE-'%(CurChunkDir)+variantName+'.sdc','w')
fOutSdc.write(filedata)
fOutSdc.close()
fo.write('export SDC_FILE = $(dir $(DESIGN_CONFIG))/constraint-DoE-%s.sdc\n'%variantName)
if CurAbcClkPeriod != 'empty':
fo.write('export ABC_CLOCK_PERIOD_IN_PS = %s\n'%CurAbcClkPeriod)
if CurFlatten != 'empty':
if CurFlatten == 0:
fo.write('export SYNTH_ARGS = \n')
if CurCoreUtil != 'empty':
fo.write('export CORE_UTILIZATION = %s\n'%CurCoreUtil)
if CurPlaceDensity != 'empty':
fo.write('export PLACE_DENSITY = %.2f\n'%CurPlaceDensity)
if CurPlaceDensityLbAddon != 'empty':
fo.write('export PLACE_DENSITY_LB_ADDON = %.2f\n'%CurPlaceDensityLbAddon)
if CurAspectRatio != 'empty':
fo.write('export CORE_ASPECT_RATIO = %s\n'%CurAspectRatio)
if CurCoreDieMargin != 'empty':
fo.write('export CORE_MARGIN = %s\n'%CurCoreDieMargin)
if CurPinsDistance != 'empty':
fo.write('export PLACE_PINS_ARGS = -min_distance %s\n'%CurPinsDistance)
if CurGpPad != 'empty':
fo.write('export CELL_PAD_IN_SITES_GLOBAL_PLACEMENT = %s\n'%CurGpPad)
if CurDpPad != 'empty':
fo.write('export CELL_PAD_IN_SITES_DETAIL_PLACEMENT = %s\n'%CurDpPad)
if CurCtsClusterSize != 'empty':
fo.write('export CTS_CLUSTER_SIZE = %s\n'%CurCtsClusterSize)
if CurCtsClusterDiameter != 'empty':
fo.write('export CTS_CLUSTER_DIAMETER = %s\n'%CurCtsClusterDiameter)
if CurDrSeed != 'empty':
fo.write('export OR_K = 1.0\n')
fo.write('export OR_SEED = %s\n'%CurDrSeed)
if CurLayerAdjust != 'empty' or \
CurLayerAdjustM1 != 'empty' or \
CurLayerAdjustM2 != 'empty' or \
CurLayerAdjustM3 != 'empty' or \
CurLayerAdjustM4 != 'empty' or \
CurLayerAdjustM5 != 'empty' or \
CurLayerAdjustM6 != 'empty' or \
CurLayerAdjustM7 != 'empty' or \
CurLayerAdjustM8 != 'empty' or \
CurLayerAdjustM9 != 'empty' or \
CurGrSeed != 'empty':
fo.write('export FASTROUTE_TCL = $(dir $(DESIGN_CONFIG))/fastroute-DoE-%s.tcl'%variantName)
if CurPlatform in PUBLIC:
PLATFORM_DIR = './platforms/%s'%CurPlatform
else:
PLATFORM_DIR = '../../%s'%CurPlatform
fFrIn = open('%s/fastroute.tcl'%PLATFORM_DIR,'r')
filedata = fFrIn.read()
fFrIn.close()
CurLayerAdjustSet = [CurLayerAdjust, \
CurLayerAdjustM1, \
CurLayerAdjustM2, \
CurLayerAdjustM3, \
CurLayerAdjustM4, \
CurLayerAdjustM5, \
CurLayerAdjustM6, \
CurLayerAdjustM7, \
CurLayerAdjustM8, \
CurLayerAdjustM9 ]
filedata = adjustFastRoute(filedata, CurLayerAdjustSet, CurGrOverflow)
FrName = 'fastroute-DoE-'+variantName+'.tcl'
fOutFr = open('%s/%s'%(CurChunkDir,FrName),'w')
fOutFr.write(filedata)
if CurGrSeed != 'empty':
fOutFr.write('set_global_routing_random -seed %s'%CurGrSeed)
fOutFr.close()
fo.close()
frun = open('./%s.sh'%ShellName, 'a')
RunName = 'DESIGN_CONFIG=%s/%s make\n'%(CurChunkDir,fileName)
frun.write(RunName)
frun.close()
fcollect = open('./%s_metrics_collect.sh'%ShellName, 'a')
CollectName = 'python util/genMetrics.py -x -p %s -d %s -v %s -o metrics_%s/%s.json\n'%(CurPlatform, CurDesign, variantName, ShellName, variantName)
fcollect.write(CollectName)
fcollect.close()
MakeArg = sys.argv[1]
if not os.path.isdir('./metrics_%s'%ShellName):
os.mkdir('./metrics_%s'%ShellName)
knobs = assignEmptyAttrs(SweepingAttributes)
ProductAttrs = list(productDict(knobs))
writeDoeLog(SweepingAttributes, ProductAttrs)
if os.path.isfile('./%s.sh'%ShellName):
os.remove('./%s.sh'%ShellName)
if os.path.isfile('./%s_metrics_collect.sh'%ShellName):
os.remove('./%s_metrics_collect.sh'%ShellName)
CurChunkNum = 0
for i, CurAttrs in enumerate(ProductAttrs, 1):
if i % NumFilesPerChunk == 0:
writeConfigs(CurAttrs, CurChunkNum)
CurChunkNum = CurChunkNum+1
else:
writeConfigs(CurAttrs, CurChunkNum)
# with open('file.txt') as data:
# line = data.readlines()
#
#for line in lines:
# with open('file.txt') as data:
# for line in file_data:
| genMassive.py | 14,774 | !/usr/bin/env python3 This scripts attempts to generate massive design of experiment runscripts. and save it into a "runMassive.sh" and "doe.log". ------------------------------------------------------------------------------- The number of generated config files into designs/{platform}/{design}/chunks/chuck{number} directory. Orignal SDC file name define input parameters for generated .sh file name Design Define platform-design. User should remove ',' for the last item in the list. (string)'sky130hd-gcd' \'sky130hd-aes', \'sky130hd-jpeg', \'sky130hs-gcd', \'sky130hs-ibex', \'sky130hs-aes', \'sky130hs-jpeg', \'nangate45-gcd', \'nangate45-ibex', \'nangate45-aes', \'nangate45-jpeg', \'asap7-gcd', \'asap7-ibex', \'asap7-aes', \'asap7-jpeg', \ Target Clock Period (float) SDC uncertainty and IO delay. TODO: Currently, it only support when 'set uncertainty' and 'set io_delay' are defined in the constraint.sdc file. Synthesis Clock period for Yosys (for synthesis) The unit should follow each design (ns, ps) (float) Hierarchical Synthsis. 0 = hierarchical, 1 = flatten, empty = flatten (default) (int) Floorplan Utilization. e.g, 45 -> 45% of core util. (int)CORE_UTIL = [20, 40, 55] Aspect ratio. It REQUIRES 'CORE_UTIL' values (float) Core-to-die gap distance (um). It REQUIRES 'CORE_UTIL' values (int) Pin DistancePINS_DISTANCE = [2] Placement Global Placement Padding for std cells (int) Detailed Placement Padding for std cells (int) Global Placement target bin density (select only one option) (.2 float) option 1) PLACE_DENSITY uses the values in the list as it is. option 2) PLACE_DENSITY_LB_ADDON adds the values in the list to the lower boundary of the PLACE_DENSITY For eaxmple, PLACE_DENSITY_LB_ADDON = [0, 0.02, 0.04] means PLACE_DENSITY = [LB, LB+0.02, LB+0.04] LB of the place density == (total instance area + padding) / total die area CTS CTS clustering size and diameter (um) (int) Global Routing Set global routing layer capacity adjustment e.g.) 0.2 -> 20% usage for global routing Set for all layers. Each layer's layer adjustment will be overwritten with below per-layer values. (float) Set global routing random seed. (int) Set allow global routing overflow. 0 = no, 1 = yes, empty = no (default) (int) TODO: currently it does not work. Let this as 0 as it is. Detailed Routing Set global routing random seed. (int) TODO: Currently, only supports for SKY130HD and SKY130HS. TODO: user should manually change the layer name to match techLEF.def setPlaceDensity(DESIGN, Util, GpPad): if DESIGN == "ibex": LB = (Util/100) + (GpPad * (0.4*(Util/100)-0.01))+0.01 elif DESIGN == "aes": LB = (Util/100) + (GpPad * (0.5*(Util/100)-0.005))+0.02 else: LB = (Util/100) + (GpPad * (0.4*(Util/100)-0.01))+0.01 return LBprint(CurChunkNum)print(CurPlatform, CurDesign)print(CurClkPeriod, CurAbcClkPeriod, CurFlatten, CurCoreUtil)print(CurAspectRatio, CurCoreDieMargin, CurGpPad, CurDpPad)print(CurCtsClusterSize, CurCtsClusterDiameter, CurLayerAdjust)print(CurLayerAdjustM1, CurLayerAdjustM2, CurLayerAdjustM3)print(CurLayerAdjustM4, CurLayerAdjustM5, CurLayerAdjustM6)print(CurLayerAdjustM7, CurLayerAdjustM8, CurLayerAdjustM9)print(CurGrOverflow)print(CurAttrs.items())fileName = 'config-%s-%s-'%(CurPlatform, CurDesign)+variantName + '.mk'filedata = re.sub("-waveform [{}\s0-9\.]+$}", "\n", filedata)fOutSdc = open('./designs/%s/%s/constraint-%s-%s-'%(CurPlatform,CurDesign,CurPlatform,CurDesign)+variantName+'.sdc','w') with open('file.txt') as data: line = data.readlines()for line in lines: with open('file.txt') as data: for line in file_data: | 3,595 | en | 0.56749 |
from integration.helpers.base_test import BaseTest
class TestBasicLayerVersion(BaseTest):
"""
Basic AWS::Serverless::StateMachine tests
"""
def test_basic_state_machine_inline_definition(self):
"""
Creates a State Machine from inline definition
"""
self.create_and_verify_stack("basic_state_machine_inline_definition")
def test_basic_state_machine_with_tags(self):
"""
Creates a State Machine with tags
"""
self.create_and_verify_stack("basic_state_machine_with_tags")
tags = self.get_stack_tags("MyStateMachineArn")
self.assertIsNotNone(tags)
self._verify_tag_presence(tags, "stateMachine:createdBy", "SAM")
self._verify_tag_presence(tags, "TagOne", "ValueOne")
self._verify_tag_presence(tags, "TagTwo", "ValueTwo")
def _verify_tag_presence(self, tags, key, value):
"""
Verifies the presence of a tag and its value
Parameters
----------
tags : List of dict
List of tag objects
key : string
Tag key
value : string
Tag value
"""
tag = next(tag for tag in tags if tag["key"] == key)
self.assertIsNotNone(tag)
self.assertEqual(tag["value"], value)
| integration/single/test_basic_state_machine.py | 1,305 | Basic AWS::Serverless::StateMachine tests
Verifies the presence of a tag and its value
Parameters
----------
tags : List of dict
List of tag objects
key : string
Tag key
value : string
Tag value
Creates a State Machine from inline definition
Creates a State Machine with tags | 288 | en | 0.417277 |
from __future__ import absolute_import
from .context import *
from .base_verbs import *
from .model import OpenShiftPythonException
from .model import Model, Missing
from .selector import *
from .apiobject import *
from . import naming
from . import status
from . import config
from .ansible import ansible
# Single source for module version
__VERSION__ = '1.0.12'
null = None # Allow scripts to specify null in object definitions
# Allows modules to trigger errors
def error(msg, **kwargs):
raise OpenShiftPythonException(msg, **kwargs)
# Convenience method for accessing the module version
def get_module_version():
return __VERSION__
| packages/openshift/__init__.py | 653 | Single source for module version Allow scripts to specify null in object definitions Allows modules to trigger errors Convenience method for accessing the module version | 169 | en | 0.372833 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Test attention
"""
import unittest
import torch
from torch import tensor
from torch import nn
from function_GAT_attention import SpGraphAttentionLayer, ODEFuncAtt
from torch_geometric.utils import softmax, to_dense_adj
from data import get_dataset
class AttentionTests(unittest.TestCase):
def setUp(self):
self.edge = tensor([[0, 2, 2, 1], [1, 0, 1, 2]])
self.x = tensor([[1., 2.], [3., 2.], [4., 5.]], dtype=torch.float)
self.W = tensor([[2, 1], [3, 2]], dtype=torch.float)
self.alpha = tensor([[1, 2, 3, 4]], dtype=torch.float)
self.edge1 = tensor([[0, 0, 1, 1, 2, 2], [1, 2, 0, 2, 0, 1]])
self.x1 = torch.ones((3, 2), dtype=torch.float)
self.leakyrelu = nn.LeakyReLU(0.2)
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
self.opt = {'dataset': 'Cora', 'self_loop_weight': 1, 'leaky_relu_slope': 0.2, 'beta_dim': 'vc', 'heads': 2,
'K': 10,
'attention_norm_idx': 0, 'add_source': False, 'max_nfe': 1000, 'mix_features': False,
'attention_dim': 32,
'mixed_block': False, 'rewiring': None, 'no_alpha_sigmoid': False, 'reweight_attention': False,
'kinetic_energy': None, 'jacobian_norm2': None, 'total_deriv': None, 'directional_penalty': None}
def tearDown(self) -> None:
pass
def test(self):
h = torch.mm(self.x, self.W)
edge_h = torch.cat((h[self.edge[0, :], :], h[self.edge[1, :], :]), dim=1)
self.assertTrue(edge_h.shape == torch.Size([self.edge.shape[1], 2 * 2]))
ah = self.alpha.mm(edge_h.t()).t()
self.assertTrue(ah.shape == torch.Size([self.edge.shape[1], 1]))
edge_e = self.leakyrelu(ah)
attention = softmax(edge_e, self.edge[1])
print(attention)
def test_function(self):
in_features = self.x.shape[1]
out_features = self.x.shape[1]
def get_round_sum(tens, n_digits=3):
val = torch.sum(tens, dim=int(not self.opt['attention_norm_idx']))
return (val * 10 ** n_digits).round() / (10 ** n_digits)
att_layer = SpGraphAttentionLayer(in_features, out_features, self.opt, self.device, concat=True)
attention, _ = att_layer(self.x, self.edge) # should be n_edges x n_heads
self.assertTrue(attention.shape == (self.edge.shape[1], self.opt['heads']))
dense_attention1 = to_dense_adj(self.edge, edge_attr=attention[:, 0]).squeeze()
dense_attention2 = to_dense_adj(self.edge, edge_attr=attention[:, 1]).squeeze()
self.assertTrue(torch.all(torch.eq(get_round_sum(dense_attention1), 1.)))
self.assertTrue(torch.all(torch.eq(get_round_sum(dense_attention2), 1.)))
self.assertTrue(torch.all(attention > 0.))
self.assertTrue(torch.all(attention <= 1.))
dataset = get_dataset(self.opt, '../data', False)
data = dataset.data
in_features = data.x.shape[1]
out_features = data.x.shape[1]
att_layer = SpGraphAttentionLayer(in_features, out_features, self.opt, self.device, concat=True)
attention, _ = att_layer(data.x, data.edge_index) # should be n_edges x n_heads
self.assertTrue(attention.shape == (data.edge_index.shape[1], self.opt['heads']))
dense_attention1 = to_dense_adj(data.edge_index, edge_attr=attention[:, 0]).squeeze()
dense_attention2 = to_dense_adj(data.edge_index, edge_attr=attention[:, 1]).squeeze()
self.assertTrue(torch.all(torch.eq(get_round_sum(dense_attention1), 1.)))
self.assertTrue(torch.all(torch.eq(get_round_sum(dense_attention2), 1.)))
self.assertTrue(torch.all(attention > 0.))
self.assertTrue(torch.all(attention <= 1.))
def test_symetric_attention(self):
in_features = self.x1.shape[1]
out_features = self.x1.shape[1]
att_layer = SpGraphAttentionLayer(in_features, out_features, self.opt, self.device, concat=True)
attention, _ = att_layer(self.x1, self.edge1) # should be n_edges x n_heads
self.assertTrue(torch.all(torch.eq(attention, 0.5 * torch.ones((self.edge1.shape[1], self.x1.shape[1])))))
def test_module(self):
dataset = get_dataset(self.opt, '../data', False)
t = 1
out_dim = 6
func = ODEFuncAtt(dataset.data.num_features, out_dim, self.opt, dataset.data, self.device)
out = func(t, dataset.data.x)
print(out.shape)
self.assertTrue(out.shape == (dataset.data.num_nodes, dataset.num_features))
| test/test_attention.py | 4,344 | Test attention
!/usr/bin/env python -*- coding: utf-8 -*- should be n_edges x n_heads should be n_edges x n_heads should be n_edges x n_heads | 142 | en | 0.761891 |
"""Platform for Husqvarna Automower device tracker integration."""
from homeassistant.components.device_tracker import SOURCE_TYPE_GPS
from homeassistant.components.device_tracker.config_entry import TrackerEntity
from homeassistant.helpers.entity import DeviceInfo
from .const import DOMAIN
async def async_setup_entry(hass, entry, async_add_devices) -> None:
"""Setup sensor platform."""
session = hass.data[DOMAIN][entry.entry_id]
async_add_devices(
AutomowerTracker(session, idx) for idx, ent in enumerate(session.data["data"])
)
class AutomowerTracker(TrackerEntity):
"""Defining the Device Tracker Entity."""
def __init__(self, session, idx) -> None:
self.session = session
self.idx = idx
self.mower = self.session.data["data"][self.idx]
mower_attributes = self.__get_mower_attributes()
self.mower_id = self.mower["id"]
self.mower_name = mower_attributes["system"]["name"]
self.model = mower_attributes["system"]["model"]
self.session.register_cb(
lambda _: self.async_write_ha_state(), schedule_immediately=True
)
def __get_mower_attributes(self) -> dict:
return self.session.data["data"][self.idx]["attributes"]
@property
def device_info(self) -> DeviceInfo:
return DeviceInfo(identifiers={(DOMAIN, self.mower_id)})
@property
def name(self) -> str:
"""Return the name of the entity."""
return self.mower_name
@property
def unique_id(self) -> str:
"""Return a unique identifier for this entity."""
return f"{self.mower_id}_dt"
@property
def source_type(self) -> str:
"""Return the source type, eg gps or router, of the device."""
return SOURCE_TYPE_GPS
@property
def latitude(self) -> float:
"""Return latitude value of the device."""
lat = self.__get_mower_attributes()["positions"][0]["latitude"]
return lat
@property
def longitude(self) -> float:
"""Return longitude value of the device."""
lon = self.__get_mower_attributes()["positions"][0]["longitude"]
return lon
| custom_components/husqvarna_automower/device_tracker.py | 2,238 | Defining the Device Tracker Entity.
Return latitude value of the device.
Return longitude value of the device.
Return the name of the entity.
Return the source type, eg gps or router, of the device.
Return a unique identifier for this entity.
Platform for Husqvarna Automower device tracker integration. | 303 | en | 0.557116 |
# _*_ coding: utf-8 _*_
__author__ = 'Di Meng'
__date__ = '1/3/2018 10:16 PM'
# _*_ coding: utf-8 _*_
__author__ = 'Di Meng'
__date__ = '1/3/2018 9:26 PM'
from tutorial.feature_functions import *
import pandas as pd
import plotly as py
import json
from plotly import tools
import plotly.graph_objs as go
#loading our data
df = pd.read_csv('EURUSD_hours.csv')
df.columns = ['date','open','high','low','close','volume']
df.date = pd.to_datetime(df.date,format='%d.%m.%Y %H:%M:%S.%f')
df = df.set_index(df.date)
df = df[['open','high','low','close','volume']]
df.drop_duplicates(keep=False)
df = df.iloc[:500]
#moving average
ma = df.close.rolling(center=False, window=30).mean()
# detrended = detrend(df, method='difference')
# f = fourier(df, [10, 15],method='difference')
#HA
# HAresults = candles(df, [1])
# HA = HAresults.candles[1]
#wad
results = wadl(df, [15])
line = results.wadl[15]
print(line['close'])
# draw grarphs
trace = go.Ohlc(x=df.index, open=df.open, high=df.high, low=df.low, close=df.close, name='Currency Quote')
trace1 = go.Scatter(x=df.index, y=ma)
trace2 = go.Scatter(x=df.index, y=(line.close.to_json()))
# linear detrand plot
# trace2 = go.Scatter(x=df.index, y=detrended)
# difference detrand plot
# trace2 = go.Scatter(x=df.index, y=detrended)
data = [trace, trace1, trace2]
fig = tools.make_subplots(rows=2,cols=1,shared_xaxes=True)
fig.append_trace(trace,1,1)
fig.append_trace(trace1,1,1)
fig.append_trace(trace2,2,1)
py.offline.plot(fig, filename="test.html") | finance/tutorial/tester.py | 1,507 | _*_ coding: utf-8 _*_ _*_ coding: utf-8 _*_loading our datamoving average detrended = detrend(df, method='difference') f = fourier(df, [10, 15],method='difference')HA HAresults = candles(df, [1]) HA = HAresults.candles[1]wad draw grarphs linear detrand plot trace2 = go.Scatter(x=df.index, y=detrended) difference detrand plot trace2 = go.Scatter(x=df.index, y=detrended) | 371 | en | 0.490989 |
#!/usr/bin/env python3
"""Run AFL repeatedly with externally supplied generated packet from STDIN."""
import logging
import sys
from ryu.controller import dpset
from faucet import faucet
from faucet import faucet_experimental_api
import afl
import fake_packet
ROUNDS = 1
logging.disable(logging.CRITICAL)
def main():
"""Run AFL repeatedly with externally supplied generated packet from STDIN."""
application = faucet.Faucet(
dpset=dpset.DPSet(),
faucet_experimental_api=faucet_experimental_api.FaucetExperimentalAPI())
application.start()
# make sure dps are running
if application.valves_manager is not None:
for valve in list(application.valves_manager.valves.values()):
state = valve.dp.dyn_finalized
valve.dp.dyn_finalized = False
valve.dp.running = True
valve.dp.dyn_finalized = state
while afl.loop(ROUNDS):
# receive input from afl
rcv = sys.stdin.read()
data = None
try:
data = bytearray.fromhex(rcv) # pytype: disable=missing-parameter
except (ValueError, TypeError):
continue
# create fake packet
_dp = fake_packet.Datapath(1)
msg = fake_packet.Message(datapath=_dp, cookie=15243729, port=1, data=data, in_port=1)
pkt = fake_packet.RyuEvent(msg)
# send fake packet to faucet
application.packet_in_handler(pkt)
if __name__ == "__main__":
main()
| tests/fuzzer/fuzz_packet.py | 1,477 | Run AFL repeatedly with externally supplied generated packet from STDIN.
Run AFL repeatedly with externally supplied generated packet from STDIN.
!/usr/bin/env python3 make sure dps are running receive input from afl pytype: disable=missing-parameter create fake packet send fake packet to faucet | 297 | en | 0.864777 |
import logging
import warnings
lcb_min_version_baseline = (2, 9, 0)
def get_lcb_min_version():
result = lcb_min_version_baseline
try:
# check the version listed in README.rst isn't greater than lcb_min_version
# bump it up to the specified version if it is
import docutils.parsers.rst
import docutils.utils
import docutils.frontend
parser = docutils.parsers.rst.Parser()
with open("README.rst") as README:
settings = docutils.frontend.OptionParser().get_default_values()
settings.update(
dict(tab_width=4, report_level=1, pep_references=False, rfc_references=False, syntax_highlight=False),
docutils.frontend.OptionParser())
document = docutils.utils.new_document(README.name, settings=settings)
parser.parse(README.read(), document)
readme_min_version = tuple(
map(int, document.substitution_defs.get("libcouchbase_version").astext().split('.')))
result = max(result, readme_min_version)
logging.info("min version is {}".format(result))
except Exception as e:
warnings.warn("problem: {}".format(e))
return result | lcb_version.py | 1,231 | check the version listed in README.rst isn't greater than lcb_min_version bump it up to the specified version if it is | 118 | en | 0.849062 |
# Generated by Django 2.2.5 on 2019-11-10 02:46
from django.db import migrations
import django.db.models.deletion
import smart_selects.db_fields
class Migration(migrations.Migration):
dependencies = [
('main_site', '0014_auto_20191109_2038'),
]
operations = [
migrations.AlterField(
model_name='mushroomspecimen',
name='genus',
field=smart_selects.db_fields.ChainedForeignKey(chained_field='family', chained_model_field='family', default=0, on_delete=django.db.models.deletion.CASCADE, to='main_site.Genus', verbose_name='Género'),
),
migrations.AlterField(
model_name='plantspecimen',
name='genus',
field=smart_selects.db_fields.ChainedForeignKey(chained_field='family', chained_model_field='family', default=0, on_delete=django.db.models.deletion.CASCADE, to='main_site.Genus', verbose_name='Género'),
),
]
| plants_api/main_site/migrations/0015_auto_20191109_2046.py | 944 | Generated by Django 2.2.5 on 2019-11-10 02:46 | 45 | en | 0.569301 |
import requests
import json
from datetime import datetime, timezone
from . utils import _extract_videos_necessary_details, _save_video_detils_in_db
from .models import ApiKeys
from . import config
def _get_api_key(): #getting different key w.r.t last used every time cron job starts.(load balanced)
new_key = ApiKeys.objects.all().order_by('last_used').first()
_reponse = ApiKeys.objects.filter(
api_key=new_key.api_key).update(last_used=datetime.now(timezone.utc))
return new_key.api_key
def get_recent_youtube_videos_details():
params = {**config.params}
params.update({'key': _get_api_key()})
print('Prameters: ', params)
youtube_api_response = requests.get(
config.YOUTUBE_SEARCH_URL, params=params)
print('Youtube API Response: ', youtube_api_response.text)
youtube_api_response = json.loads(youtube_api_response.text)
videos_details = _extract_videos_necessary_details(
youtube_api_response.get('items', []))
if videos_details:
_response = _save_video_detils_in_db(videos_details)
return videos_details
| youtubeDataApi/searchApi/cron.py | 1,093 | getting different key w.r.t last used every time cron job starts.(load balanced) | 80 | en | 0.791013 |
"""
Structured information on a coordinate point.
"""
# this file was auto-generated
from datetime import date, datetime
from fairgraph.base_v3 import EmbeddedMetadata, IRI
from fairgraph.fields import Field
class CoordinatePoint(EmbeddedMetadata):
"""
Structured information on a coordinate point.
"""
type = ["https://openminds.ebrains.eu/sands/CoordinatePoint"]
context = {
"schema": "http://schema.org/",
"kg": "https://kg.ebrains.eu/api/instances/",
"vocab": "https://openminds.ebrains.eu/vocab/",
"terms": "https://openminds.ebrains.eu/controlledTerms/",
"core": "https://openminds.ebrains.eu/core/"
}
fields = [
Field("coordinates", "openminds.core.QuantitativeValue", "vocab:coordinates", multiple=True, required=True,
doc="Pair or triplet of numbers defining a location in a given coordinate space."),
Field("coordinate_space", ["openminds.sands.CommonCoordinateSpace", "openminds.sands.CustomCoordinateSpace"], "vocab:coordinateSpace", multiple=False, required=True,
doc="Two or three dimensional geometric setting."),
]
| fairgraph/openminds/sands/miscellaneous/coordinate_point.py | 1,163 | Structured information on a coordinate point.
Structured information on a coordinate point.
this file was auto-generated | 122 | en | 0.88305 |
# coding=utf-8
from pyecharts.chart import Chart
def kline_tooltip_formatter(params):
text = (
params[0].seriesName
+ "<br/>"
+ "- open:"
+ params[0].data[1]
+ "<br/>"
+ "- close:"
+ params[0].data[2]
+ "<br/>"
+ "- lowest:"
+ params[0].data[3]
+ "<br/>"
+ "- highest:"
+ params[0].data[4]
)
return text
class Kline(Chart):
"""
<<< K 线图 >>>
红涨蓝跌
"""
def __init__(self, title="", subtitle="", **kwargs):
super(Kline, self).__init__(title, subtitle, **kwargs)
def add(self, *args, **kwargs):
self.__add(*args, **kwargs)
return self
def __add(self, name, x_axis, y_axis, **kwargs):
"""
:param name:
系列名称,用于 tooltip 的显示,legend 的图例筛选。
:param x_axis:
x 坐标轴数据。
:param y_axis:
y 坐标轴数据。数据中,每一行是一个『数据项』,每一列属于一个『维度』。
数据项具体为 [open, close, lowest, highest] (即:[开盘值, 收盘值,
最低值, 最高值])。
:param kwargs:
"""
kwargs.update(type="candlestick", x_axis=x_axis)
if "tooltip_formatter" not in kwargs:
kwargs["tooltip_formatter"] = kline_tooltip_formatter
if "tooltip_trigger" not in kwargs:
kwargs["tooltip_trigger"] = "axis"
chart = self._get_all_options(**kwargs)
xaxis, yaxis = chart["xy_axis"]
self._option.update(xAxis=xaxis, yAxis=yaxis)
self._option.get("xAxis")[0]["scale"] = True
self._option.get("yAxis")[0]["scale"] = True
self._option.get("yAxis")[0]["splitArea"] = {"show": True}
self._option.get("legend")[0].get("data").append(name)
self._option.get("series").append(
{
"type": "candlestick",
"name": name,
"data": y_axis,
"markPoint": chart["mark_point"],
"markLine": chart["mark_line"],
"seriesId": self._option.get("series_id"),
}
)
self._config_components(**kwargs)
| venv/lib/python3.7/site-packages/pyecharts/charts/kline.py | 2,347 | <<< K 线图 >>>
红涨蓝跌
:param name:
系列名称,用于 tooltip 的显示,legend 的图例筛选。
:param x_axis:
x 坐标轴数据。
:param y_axis:
y 坐标轴数据。数据中,每一行是一个『数据项』,每一列属于一个『维度』。
数据项具体为 [open, close, lowest, highest] (即:[开盘值, 收盘值,
最低值, 最高值])。
:param kwargs:
coding=utf-8 | 256 | zh | 0.96005 |
class FabricSheetType(ElementType,IDisposable):
""" Represents a fabric sheet type,used in the generation of fabric wires. """
@staticmethod
def CreateDefaultFabricSheetType(ADoc):
"""
CreateDefaultFabricSheetType(ADoc: Document) -> ElementId
Creates a new FabricSheetType object with a default name.
ADoc: The document.
Returns: The newly created type id.
"""
pass
def Dispose(self):
""" Dispose(self: Element,A_0: bool) """
pass
def getBoundingBox(self,*args):
""" getBoundingBox(self: Element,view: View) -> BoundingBoxXYZ """
pass
def GetReinforcementRoundingManager(self):
"""
GetReinforcementRoundingManager(self: FabricSheetType) -> FabricRoundingManager
Returns an object for managing reinforcement rounding override settings.
Returns: The rounding manager.
"""
pass
def GetWireItem(self,wireIndex,direction):
"""
GetWireItem(self: FabricSheetType,wireIndex: int,direction: WireDistributionDirection) -> FabricWireItem
Gets the Wire stored in the FabricSheetType at the associated index.
wireIndex: Item index in the Fabric Sheet
direction: Wire distribution direction of the inquired item
Returns: Fabric wire Item
"""
pass
def IsCustom(self):
"""
IsCustom(self: FabricSheetType) -> bool
Verifies if the type is Custom Fabric Sheet
Returns: True if Layout is set on Custom and if the wireArr is not null
"""
pass
def IsValidMajorLapSplice(self,majorLapSplice):
"""
IsValidMajorLapSplice(self: FabricSheetType,majorLapSplice: float) -> bool
Identifies if the input value is valid to be applied as the major lap splice
value for this FabricSheetType.
"""
pass
def IsValidMinorLapSplice(self,minorLapSplice):
"""
IsValidMinorLapSplice(self: FabricSheetType,minorLapSplice: float) -> bool
Identifies if the input value is valid to be applied as the minor lap splice
value for this FabricSheetType.
"""
pass
def ReleaseUnmanagedResources(self,*args):
""" ReleaseUnmanagedResources(self: Element,disposing: bool) """
pass
def setElementType(self,*args):
""" setElementType(self: Element,type: ElementType,incompatibleExceptionMessage: str) """
pass
def SetLayoutAsCustomPattern(self,minorStartOverhang,minorEndOverhang,majorStartOverhang,majorEndOverhang,minorFabricWireItems,majorFabricWireItems):
""" SetLayoutAsCustomPattern(self: FabricSheetType,minorStartOverhang: float,minorEndOverhang: float,majorStartOverhang: float,majorEndOverhang: float,minorFabricWireItems: IList[FabricWireItem],majorFabricWireItems: IList[FabricWireItem]) """
pass
def SetMajorLayoutAsActualSpacing(self,overallWidth,minorStartOverhang,spacing):
"""
SetMajorLayoutAsActualSpacing(self: FabricSheetType,overallWidth: float,minorStartOverhang: float,spacing: float)
Sets the major layout pattern as ActualSpacing,while specifying the needed
parameters for this pattern.
overallWidth: The entire width of the wire sheet in the minor direction.
minorStartOverhang: The distance from the edge of the sheet to the first wire in the minor
direction.
spacing: The distance between the wires in the major direction.
"""
pass
def SetMajorLayoutAsFixedNumber(self,overallWidth,minorStartOverhang,minorEndOverhang,numberOfWires):
"""
SetMajorLayoutAsFixedNumber(self: FabricSheetType,overallWidth: float,minorStartOverhang: float,minorEndOverhang: float,numberOfWires: int)
Sets the major layout pattern as FixedNumber,while specifying the needed
parameters for this pattern.
overallWidth: The entire width of the wire sheet in the minor direction.
minorStartOverhang: The distance from the edge of the sheet to the first wire in the minor
direction.
minorEndOverhang: The distance from the last wire to the edge of the sheet in the minor direction.
numberOfWires: The number of the wires to set in the major direction.
"""
pass
def SetMajorLayoutAsMaximumSpacing(self,overallWidth,minorStartOverhang,minorEndOverhang,spacing):
"""
SetMajorLayoutAsMaximumSpacing(self: FabricSheetType,overallWidth: float,minorStartOverhang: float,minorEndOverhang: float,spacing: float)
Sets the major layout pattern as MaximumSpacing,while specifying the needed
parameters for this pattern.
overallWidth: The entire width of the wire sheet in the minor direction.
minorStartOverhang: The distance from the edge of the sheet to the first wire in the minor
direction.
minorEndOverhang: The distance from the last wire to the edge of the sheet in the minor direction.
spacing: The distance between the wires in the major direction.
"""
pass
def SetMajorLayoutAsNumberWithSpacing(self,overallWidth,minorStartOverhang,numberOfWires,spacing):
"""
SetMajorLayoutAsNumberWithSpacing(self: FabricSheetType,overallWidth: float,minorStartOverhang: float,numberOfWires: int,spacing: float)
Sets the major layout pattern as NumberWithSpacing,while specifying the needed
parameters for this pattern.
overallWidth: The entire width of the wire sheet in the minor direction.
minorStartOverhang: The distance from the edge of the sheet to the first wire in the minor
direction.
numberOfWires: The number of the wires to set in the major direction.
spacing: The distance between the wires in the major direction.
"""
pass
def SetMinorLayoutAsActualSpacing(self,overallLength,majorStartOverhang,spacing):
"""
SetMinorLayoutAsActualSpacing(self: FabricSheetType,overallLength: float,majorStartOverhang: float,spacing: float)
Sets the minor layout pattern as ActualSpacing,while specifying the needed
parameters for this pattern.
overallLength: The entire length of the wire sheet in the major direction.
majorStartOverhang: The distance from the edge of the sheet to the first wire in the major
direction.
spacing: The distance between the wires in the minor direction.
"""
pass
def SetMinorLayoutAsFixedNumber(self,overallLength,majorStartOverhang,majorEndOverhang,numberOfWires):
"""
SetMinorLayoutAsFixedNumber(self: FabricSheetType,overallLength: float,majorStartOverhang: float,majorEndOverhang: float,numberOfWires: int)
Sets the major layout pattern as FixedNumber,while specifying the needed
parameters for this pattern.
overallLength: The entire length of the wire sheet in the major direction.
majorStartOverhang: The distance from the edge of the sheet to the first wire in the major
direction.
majorEndOverhang: The distance from the last wire to the edge of the sheet in the major direction.
numberOfWires: The number of the wires to set in the minor direction.
"""
pass
def SetMinorLayoutAsMaximumSpacing(self,overallLength,majorStartOverhang,majorEndOverhang,spacing):
"""
SetMinorLayoutAsMaximumSpacing(self: FabricSheetType,overallLength: float,majorStartOverhang: float,majorEndOverhang: float,spacing: float)
Sets the major layout pattern as MaximumSpacing,while specifying the needed
parameters for this pattern.
overallLength: The entire length of the wire sheet in the major direction.
majorStartOverhang: The distance from the edge of the sheet to the first wire in the major
direction.
majorEndOverhang: The distance from the last wire to the edge of the sheet in the major direction.
spacing: The distance between the wires in the minor direction.
"""
pass
def SetMinorLayoutAsNumberWithSpacing(self,overallLength,majorStartOverhang,numberOfWires,spacing):
"""
SetMinorLayoutAsNumberWithSpacing(self: FabricSheetType,overallLength: float,majorStartOverhang: float,numberOfWires: int,spacing: float)
Sets the major layout pattern as NumberWithSpacing,while specifying the needed
parameters for this pattern.
overallLength: The entire length of the wire sheet in the major direction.
majorStartOverhang: The distance from the edge of the sheet to the first wire in the major
direction.
numberOfWires: The number of wires in the minor direction.
spacing: The distance between the wires in the minor direction.
"""
pass
def __enter__(self,*args):
""" __enter__(self: IDisposable) -> object """
pass
def __exit__(self,*args):
""" __exit__(self: IDisposable,exc_type: object,exc_value: object,exc_back: object) """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
MajorDirectionWireType=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The id of the FabricWireType to be used in the major direction.
Get: MajorDirectionWireType(self: FabricSheetType) -> ElementId
Set: MajorDirectionWireType(self: FabricSheetType)=value
"""
MajorEndOverhang=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The distance from the edge of the sheet to the last wire (measured in the major direction).
Get: MajorEndOverhang(self: FabricSheetType) -> float
"""
MajorLapSpliceLength=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The lap splice length in the major direction.
Get: MajorLapSpliceLength(self: FabricSheetType) -> float
Set: MajorLapSpliceLength(self: FabricSheetType)=value
"""
MajorLayoutPattern=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The layout pattern in the major direction.
Get: MajorLayoutPattern(self: FabricSheetType) -> FabricSheetLayoutPattern
"""
MajorNumberOfWires=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The number of wires used in the major direction (includes the first and last wires).
Get: MajorNumberOfWires(self: FabricSheetType) -> int
"""
MajorReinforcementArea=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The area of fabric divided by the spacing of the wire in the major direction.
Get: MajorReinforcementArea(self: FabricSheetType) -> float
"""
MajorSpacing=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The spacing between the wires in the major direction (not including the overhangs).
Get: MajorSpacing(self: FabricSheetType) -> float
"""
MajorStartOverhang=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The distance from the edge of the sheet to the first wire (measured in the major direction).
Get: MajorStartOverhang(self: FabricSheetType) -> float
"""
Material=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The id of the material assigned to wires.
Get: Material(self: FabricSheetType) -> ElementId
Set: Material(self: FabricSheetType)=value
"""
MinorDirectionWireType=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The id of the FabricWireType to be used in the minor direction.
Get: MinorDirectionWireType(self: FabricSheetType) -> ElementId
Set: MinorDirectionWireType(self: FabricSheetType)=value
"""
MinorEndOverhang=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The distance from the edge of the sheet to the last wire (measured in the minor direction).
Get: MinorEndOverhang(self: FabricSheetType) -> float
"""
MinorLapSpliceLength=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The lap splice length in the minor direction.
Get: MinorLapSpliceLength(self: FabricSheetType) -> float
Set: MinorLapSpliceLength(self: FabricSheetType)=value
"""
MinorLayoutPattern=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The layout pattern in the minor direction.
Get: MinorLayoutPattern(self: FabricSheetType) -> FabricSheetLayoutPattern
"""
MinorNumberOfWires=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The number of wires used in the minor direction (includes the 1st and last wires).
Get: MinorNumberOfWires(self: FabricSheetType) -> int
"""
MinorReinforcementArea=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The area of fabric divided by the spacing of the wire in the minor direction.
Get: MinorReinforcementArea(self: FabricSheetType) -> float
"""
MinorSpacing=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The spacing between the wires in the minor direction (not including the overhangs).
Get: MinorSpacing(self: FabricSheetType) -> float
"""
MinorStartOverhang=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The distance from the edge of the sheet to the first wire (measured in the minor direction).
Get: MinorStartOverhang(self: FabricSheetType) -> float
"""
OverallLength=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The length of the wire sheet (including overhangs) in the major direction.
Get: OverallLength(self: FabricSheetType) -> float
"""
OverallWidth=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The length of the wire sheet (including overhangs) in the minor direction.
Get: OverallWidth(self: FabricSheetType) -> float
"""
SheetMass=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The sheet mass.
Get: SheetMass(self: FabricSheetType) -> float
Set: SheetMass(self: FabricSheetType)=value
"""
SheetMassUnit=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The sheet mass per area unit.
Get: SheetMassUnit(self: FabricSheetType) -> float
"""
| release/stubs.min/Autodesk/Revit/DB/Structure/__init___parts/FabricSheetType.py | 14,378 | Represents a fabric sheet type,used in the generation of fabric wires.
CreateDefaultFabricSheetType(ADoc: Document) -> ElementId
Creates a new FabricSheetType object with a default name.
ADoc: The document.
Returns: The newly created type id.
Dispose(self: Element,A_0: bool)
GetReinforcementRoundingManager(self: FabricSheetType) -> FabricRoundingManager
Returns an object for managing reinforcement rounding override settings.
Returns: The rounding manager.
GetWireItem(self: FabricSheetType,wireIndex: int,direction: WireDistributionDirection) -> FabricWireItem
Gets the Wire stored in the FabricSheetType at the associated index.
wireIndex: Item index in the Fabric Sheet
direction: Wire distribution direction of the inquired item
Returns: Fabric wire Item
IsCustom(self: FabricSheetType) -> bool
Verifies if the type is Custom Fabric Sheet
Returns: True if Layout is set on Custom and if the wireArr is not null
IsValidMajorLapSplice(self: FabricSheetType,majorLapSplice: float) -> bool
Identifies if the input value is valid to be applied as the major lap splice
value for this FabricSheetType.
IsValidMinorLapSplice(self: FabricSheetType,minorLapSplice: float) -> bool
Identifies if the input value is valid to be applied as the minor lap splice
value for this FabricSheetType.
ReleaseUnmanagedResources(self: Element,disposing: bool)
SetLayoutAsCustomPattern(self: FabricSheetType,minorStartOverhang: float,minorEndOverhang: float,majorStartOverhang: float,majorEndOverhang: float,minorFabricWireItems: IList[FabricWireItem],majorFabricWireItems: IList[FabricWireItem])
SetMajorLayoutAsActualSpacing(self: FabricSheetType,overallWidth: float,minorStartOverhang: float,spacing: float)
Sets the major layout pattern as ActualSpacing,while specifying the needed
parameters for this pattern.
overallWidth: The entire width of the wire sheet in the minor direction.
minorStartOverhang: The distance from the edge of the sheet to the first wire in the minor
direction.
spacing: The distance between the wires in the major direction.
SetMajorLayoutAsFixedNumber(self: FabricSheetType,overallWidth: float,minorStartOverhang: float,minorEndOverhang: float,numberOfWires: int)
Sets the major layout pattern as FixedNumber,while specifying the needed
parameters for this pattern.
overallWidth: The entire width of the wire sheet in the minor direction.
minorStartOverhang: The distance from the edge of the sheet to the first wire in the minor
direction.
minorEndOverhang: The distance from the last wire to the edge of the sheet in the minor direction.
numberOfWires: The number of the wires to set in the major direction.
SetMajorLayoutAsMaximumSpacing(self: FabricSheetType,overallWidth: float,minorStartOverhang: float,minorEndOverhang: float,spacing: float)
Sets the major layout pattern as MaximumSpacing,while specifying the needed
parameters for this pattern.
overallWidth: The entire width of the wire sheet in the minor direction.
minorStartOverhang: The distance from the edge of the sheet to the first wire in the minor
direction.
minorEndOverhang: The distance from the last wire to the edge of the sheet in the minor direction.
spacing: The distance between the wires in the major direction.
SetMajorLayoutAsNumberWithSpacing(self: FabricSheetType,overallWidth: float,minorStartOverhang: float,numberOfWires: int,spacing: float)
Sets the major layout pattern as NumberWithSpacing,while specifying the needed
parameters for this pattern.
overallWidth: The entire width of the wire sheet in the minor direction.
minorStartOverhang: The distance from the edge of the sheet to the first wire in the minor
direction.
numberOfWires: The number of the wires to set in the major direction.
spacing: The distance between the wires in the major direction.
SetMinorLayoutAsActualSpacing(self: FabricSheetType,overallLength: float,majorStartOverhang: float,spacing: float)
Sets the minor layout pattern as ActualSpacing,while specifying the needed
parameters for this pattern.
overallLength: The entire length of the wire sheet in the major direction.
majorStartOverhang: The distance from the edge of the sheet to the first wire in the major
direction.
spacing: The distance between the wires in the minor direction.
SetMinorLayoutAsFixedNumber(self: FabricSheetType,overallLength: float,majorStartOverhang: float,majorEndOverhang: float,numberOfWires: int)
Sets the major layout pattern as FixedNumber,while specifying the needed
parameters for this pattern.
overallLength: The entire length of the wire sheet in the major direction.
majorStartOverhang: The distance from the edge of the sheet to the first wire in the major
direction.
majorEndOverhang: The distance from the last wire to the edge of the sheet in the major direction.
numberOfWires: The number of the wires to set in the minor direction.
SetMinorLayoutAsMaximumSpacing(self: FabricSheetType,overallLength: float,majorStartOverhang: float,majorEndOverhang: float,spacing: float)
Sets the major layout pattern as MaximumSpacing,while specifying the needed
parameters for this pattern.
overallLength: The entire length of the wire sheet in the major direction.
majorStartOverhang: The distance from the edge of the sheet to the first wire in the major
direction.
majorEndOverhang: The distance from the last wire to the edge of the sheet in the major direction.
spacing: The distance between the wires in the minor direction.
SetMinorLayoutAsNumberWithSpacing(self: FabricSheetType,overallLength: float,majorStartOverhang: float,numberOfWires: int,spacing: float)
Sets the major layout pattern as NumberWithSpacing,while specifying the needed
parameters for this pattern.
overallLength: The entire length of the wire sheet in the major direction.
majorStartOverhang: The distance from the edge of the sheet to the first wire in the major
direction.
numberOfWires: The number of wires in the minor direction.
spacing: The distance between the wires in the minor direction.
__enter__(self: IDisposable) -> object
__exit__(self: IDisposable,exc_type: object,exc_value: object,exc_back: object)
x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature
getBoundingBox(self: Element,view: View) -> BoundingBoxXYZ
setElementType(self: Element,type: ElementType,incompatibleExceptionMessage: str) | 6,656 | en | 0.621795 |
from __future__ import unicode_literals
from xml.dom import minidom
from django.contrib.syndication import views
from django.core.exceptions import ImproperlyConfigured
from django.test import TestCase
from django.utils import tzinfo
from django.utils.feedgenerator import rfc2822_date, rfc3339_date
from .models import Entry
class FeedTestCase(TestCase):
fixtures = ['feeddata.json']
def assertChildNodes(self, elem, expected):
actual = set(n.nodeName for n in elem.childNodes)
expected = set(expected)
self.assertEqual(actual, expected)
def assertChildNodeContent(self, elem, expected):
for k, v in expected.items():
self.assertEqual(
elem.getElementsByTagName(k)[0].firstChild.wholeText, v)
def assertCategories(self, elem, expected):
self.assertEqual(set(i.firstChild.wholeText for i in elem.childNodes if i.nodeName == 'category'), set(expected))
######################################
# Feed view
######################################
class SyndicationFeedTest(FeedTestCase):
"""
Tests for the high-level syndication feed framework.
"""
urls = 'syndication.urls'
def test_rss2_feed(self):
"""
Test the structure and content of feeds generated by Rss201rev2Feed.
"""
response = self.client.get('/syndication/rss2/')
doc = minidom.parseString(response.content)
# Making sure there's only 1 `rss` element and that the correct
# RSS version was specified.
feed_elem = doc.getElementsByTagName('rss')
self.assertEqual(len(feed_elem), 1)
feed = feed_elem[0]
self.assertEqual(feed.getAttribute('version'), '2.0')
# Making sure there's only one `channel` element w/in the
# `rss` element.
chan_elem = feed.getElementsByTagName('channel')
self.assertEqual(len(chan_elem), 1)
chan = chan_elem[0]
# Find the last build date
d = Entry.objects.latest('published').published
ltz = tzinfo.LocalTimezone(d)
last_build_date = rfc2822_date(d.replace(tzinfo=ltz))
self.assertChildNodes(chan, ['title', 'link', 'description', 'language', 'lastBuildDate', 'item', 'atom:link', 'ttl', 'copyright', 'category'])
self.assertChildNodeContent(chan, {
'title': 'My blog',
'description': 'A more thorough description of my blog.',
'link': 'http://example.com/blog/',
'language': 'en',
'lastBuildDate': last_build_date,
#'atom:link': '',
'ttl': '600',
'copyright': 'Copyright (c) 2007, Sally Smith',
})
self.assertCategories(chan, ['python', 'django'])
# Ensure the content of the channel is correct
self.assertChildNodeContent(chan, {
'title': 'My blog',
'link': 'http://example.com/blog/',
})
# Check feed_url is passed
self.assertEqual(
chan.getElementsByTagName('atom:link')[0].getAttribute('href'),
'http://example.com/syndication/rss2/'
)
# Find the pubdate of the first feed item
d = Entry.objects.get(pk=1).published
ltz = tzinfo.LocalTimezone(d)
pub_date = rfc2822_date(d.replace(tzinfo=ltz))
items = chan.getElementsByTagName('item')
self.assertEqual(len(items), Entry.objects.count())
self.assertChildNodeContent(items[0], {
'title': 'My first entry',
'description': 'Overridden description: My first entry',
'link': 'http://example.com/blog/1/',
'guid': 'http://example.com/blog/1/',
'pubDate': pub_date,
'author': 'test@example.com (Sally Smith)',
})
self.assertCategories(items[0], ['python', 'testing'])
for item in items:
self.assertChildNodes(item, ['title', 'link', 'description', 'guid', 'category', 'pubDate', 'author'])
# Assert that <guid> does not have any 'isPermaLink' attribute
self.assertIsNone(item.getElementsByTagName(
'guid')[0].attributes.get('isPermaLink'))
def test_rss2_feed_guid_permalink_false(self):
"""
Test if the 'isPermaLink' attribute of <guid> element of an item
in the RSS feed is 'false'.
"""
response = self.client.get(
'/syndication/rss2/guid_ispermalink_false/')
doc = minidom.parseString(response.content)
chan = doc.getElementsByTagName(
'rss')[0].getElementsByTagName('channel')[0]
items = chan.getElementsByTagName('item')
for item in items:
self.assertEqual(
item.getElementsByTagName('guid')[0].attributes.get(
'isPermaLink').value, "false")
def test_rss2_feed_guid_permalink_true(self):
"""
Test if the 'isPermaLink' attribute of <guid> element of an item
in the RSS feed is 'true'.
"""
response = self.client.get(
'/syndication/rss2/guid_ispermalink_true/')
doc = minidom.parseString(response.content)
chan = doc.getElementsByTagName(
'rss')[0].getElementsByTagName('channel')[0]
items = chan.getElementsByTagName('item')
for item in items:
self.assertEqual(
item.getElementsByTagName('guid')[0].attributes.get(
'isPermaLink').value, "true")
def test_rss091_feed(self):
"""
Test the structure and content of feeds generated by RssUserland091Feed.
"""
response = self.client.get('/syndication/rss091/')
doc = minidom.parseString(response.content)
# Making sure there's only 1 `rss` element and that the correct
# RSS version was specified.
feed_elem = doc.getElementsByTagName('rss')
self.assertEqual(len(feed_elem), 1)
feed = feed_elem[0]
self.assertEqual(feed.getAttribute('version'), '0.91')
# Making sure there's only one `channel` element w/in the
# `rss` element.
chan_elem = feed.getElementsByTagName('channel')
self.assertEqual(len(chan_elem), 1)
chan = chan_elem[0]
self.assertChildNodes(chan, ['title', 'link', 'description', 'language', 'lastBuildDate', 'item', 'atom:link', 'ttl', 'copyright', 'category'])
# Ensure the content of the channel is correct
self.assertChildNodeContent(chan, {
'title': 'My blog',
'link': 'http://example.com/blog/',
})
self.assertCategories(chan, ['python', 'django'])
# Check feed_url is passed
self.assertEqual(
chan.getElementsByTagName('atom:link')[0].getAttribute('href'),
'http://example.com/syndication/rss091/'
)
items = chan.getElementsByTagName('item')
self.assertEqual(len(items), Entry.objects.count())
self.assertChildNodeContent(items[0], {
'title': 'My first entry',
'description': 'Overridden description: My first entry',
'link': 'http://example.com/blog/1/',
})
for item in items:
self.assertChildNodes(item, ['title', 'link', 'description'])
self.assertCategories(item, [])
def test_atom_feed(self):
"""
Test the structure and content of feeds generated by Atom1Feed.
"""
response = self.client.get('/syndication/atom/')
feed = minidom.parseString(response.content).firstChild
self.assertEqual(feed.nodeName, 'feed')
self.assertEqual(feed.getAttribute('xmlns'), 'http://www.w3.org/2005/Atom')
self.assertChildNodes(feed, ['title', 'subtitle', 'link', 'id', 'updated', 'entry', 'rights', 'category', 'author'])
for link in feed.getElementsByTagName('link'):
if link.getAttribute('rel') == 'self':
self.assertEqual(link.getAttribute('href'), 'http://example.com/syndication/atom/')
entries = feed.getElementsByTagName('entry')
self.assertEqual(len(entries), Entry.objects.count())
for entry in entries:
self.assertChildNodes(entry, [
'title',
'link',
'id',
'summary',
'category',
'updated',
'published',
'rights',
'author',
])
summary = entry.getElementsByTagName('summary')[0]
self.assertEqual(summary.getAttribute('type'), 'html')
def test_atom_feed_published_and_updated_elements(self):
"""
Test that the published and updated elements are not
the same and now adhere to RFC 4287.
"""
response = self.client.get('/syndication/atom/')
feed = minidom.parseString(response.content).firstChild
entries = feed.getElementsByTagName('entry')
published = entries[0].getElementsByTagName('published')[0].firstChild.wholeText
updated = entries[0].getElementsByTagName('updated')[0].firstChild.wholeText
self.assertNotEqual(published, updated)
def test_latest_post_date(self):
"""
Test that both the published and updated dates are
considered when determining the latest post date.
"""
# this feed has a `published` element with the latest date
response = self.client.get('/syndication/atom/')
feed = minidom.parseString(response.content).firstChild
updated = feed.getElementsByTagName('updated')[0].firstChild.wholeText
d = Entry.objects.latest('published').published
ltz = tzinfo.LocalTimezone(d)
latest_published = rfc3339_date(d.replace(tzinfo=ltz))
self.assertEqual(updated, latest_published)
# this feed has an `updated` element with the latest date
response = self.client.get('/syndication/latest/')
feed = minidom.parseString(response.content).firstChild
updated = feed.getElementsByTagName('updated')[0].firstChild.wholeText
d = Entry.objects.exclude(pk=5).latest('updated').updated
ltz = tzinfo.LocalTimezone(d)
latest_updated = rfc3339_date(d.replace(tzinfo=ltz))
self.assertEqual(updated, latest_updated)
def test_custom_feed_generator(self):
response = self.client.get('/syndication/custom/')
feed = minidom.parseString(response.content).firstChild
self.assertEqual(feed.nodeName, 'feed')
self.assertEqual(feed.getAttribute('django'), 'rocks')
self.assertChildNodes(feed, ['title', 'subtitle', 'link', 'id', 'updated', 'entry', 'spam', 'rights', 'category', 'author'])
entries = feed.getElementsByTagName('entry')
self.assertEqual(len(entries), Entry.objects.count())
for entry in entries:
self.assertEqual(entry.getAttribute('bacon'), 'yum')
self.assertChildNodes(entry, [
'title',
'link',
'id',
'summary',
'ministry',
'rights',
'author',
'updated',
'published',
'category',
])
summary = entry.getElementsByTagName('summary')[0]
self.assertEqual(summary.getAttribute('type'), 'html')
def test_title_escaping(self):
"""
Tests that titles are escaped correctly in RSS feeds.
"""
response = self.client.get('/syndication/rss2/')
doc = minidom.parseString(response.content)
for item in doc.getElementsByTagName('item'):
link = item.getElementsByTagName('link')[0]
if link.firstChild.wholeText == 'http://example.com/blog/4/':
title = item.getElementsByTagName('title')[0]
self.assertEqual(title.firstChild.wholeText, 'A & B < C > D')
def test_naive_datetime_conversion(self):
"""
Test that datetimes are correctly converted to the local time zone.
"""
# Naive date times passed in get converted to the local time zone, so
# check the recived zone offset against the local offset.
response = self.client.get('/syndication/naive-dates/')
doc = minidom.parseString(response.content)
updated = doc.getElementsByTagName('updated')[0].firstChild.wholeText
d = Entry.objects.latest('published').published
ltz = tzinfo.LocalTimezone(d)
latest = rfc3339_date(d.replace(tzinfo=ltz))
self.assertEqual(updated, latest)
def test_aware_datetime_conversion(self):
"""
Test that datetimes with timezones don't get trodden on.
"""
response = self.client.get('/syndication/aware-dates/')
doc = minidom.parseString(response.content)
published = doc.getElementsByTagName('published')[0].firstChild.wholeText
self.assertEqual(published[-6:], '+00:42')
def test_feed_last_modified_time(self):
response = self.client.get('/syndication/naive-dates/')
self.assertEqual(response['Last-Modified'], 'Tue, 26 Mar 2013 01:00:00 GMT')
# No last-modified when feed has no item_pubdate
response = self.client.get('/syndication/no_pubdate/')
self.assertFalse(response.has_header('Last-Modified'))
def test_feed_url(self):
"""
Test that the feed_url can be overridden.
"""
response = self.client.get('/syndication/feedurl/')
doc = minidom.parseString(response.content)
for link in doc.getElementsByTagName('link'):
if link.getAttribute('rel') == 'self':
self.assertEqual(link.getAttribute('href'), 'http://example.com/customfeedurl/')
def test_secure_urls(self):
"""
Test URLs are prefixed with https:// when feed is requested over HTTPS.
"""
response = self.client.get('/syndication/rss2/', **{
'wsgi.url_scheme': 'https',
})
doc = minidom.parseString(response.content)
chan = doc.getElementsByTagName('channel')[0]
self.assertEqual(
chan.getElementsByTagName('link')[0].firstChild.wholeText[0:5],
'https'
)
atom_link = chan.getElementsByTagName('atom:link')[0]
self.assertEqual(atom_link.getAttribute('href')[0:5], 'https')
for link in doc.getElementsByTagName('link'):
if link.getAttribute('rel') == 'self':
self.assertEqual(link.getAttribute('href')[0:5], 'https')
def test_item_link_error(self):
"""
Test that a ImproperlyConfigured is raised if no link could be found
for the item(s).
"""
self.assertRaises(ImproperlyConfigured,
self.client.get,
'/syndication/articles/')
def test_template_feed(self):
"""
Test that the item title and description can be overridden with
templates.
"""
response = self.client.get('/syndication/template/')
doc = minidom.parseString(response.content)
feed = doc.getElementsByTagName('rss')[0]
chan = feed.getElementsByTagName('channel')[0]
items = chan.getElementsByTagName('item')
self.assertChildNodeContent(items[0], {
'title': 'Title in your templates: My first entry',
'description': 'Description in your templates: My first entry',
'link': 'http://example.com/blog/1/',
})
def test_template_context_feed(self):
"""
Test that custom context data can be passed to templates for title
and description.
"""
response = self.client.get('/syndication/template_context/')
doc = minidom.parseString(response.content)
feed = doc.getElementsByTagName('rss')[0]
chan = feed.getElementsByTagName('channel')[0]
items = chan.getElementsByTagName('item')
self.assertChildNodeContent(items[0], {
'title': 'My first entry (foo is bar)',
'description': 'My first entry (foo is bar)',
})
def test_add_domain(self):
"""
Test add_domain() prefixes domains onto the correct URLs.
"""
self.assertEqual(
views.add_domain('example.com', '/foo/?arg=value'),
'http://example.com/foo/?arg=value'
)
self.assertEqual(
views.add_domain('example.com', '/foo/?arg=value', True),
'https://example.com/foo/?arg=value'
)
self.assertEqual(
views.add_domain('example.com', 'http://djangoproject.com/doc/'),
'http://djangoproject.com/doc/'
)
self.assertEqual(
views.add_domain('example.com', 'https://djangoproject.com/doc/'),
'https://djangoproject.com/doc/'
)
self.assertEqual(
views.add_domain('example.com', 'mailto:uhoh@djangoproject.com'),
'mailto:uhoh@djangoproject.com'
)
self.assertEqual(
views.add_domain('example.com', '//example.com/foo/?arg=value'),
'http://example.com/foo/?arg=value'
)
| tests/syndication/tests.py | 17,283 | Tests for the high-level syndication feed framework.
Test add_domain() prefixes domains onto the correct URLs.
Test the structure and content of feeds generated by Atom1Feed.
Test that the published and updated elements are not
the same and now adhere to RFC 4287.
Test that datetimes with timezones don't get trodden on.
Test that the feed_url can be overridden.
Test that a ImproperlyConfigured is raised if no link could be found
for the item(s).
Test that both the published and updated dates are
considered when determining the latest post date.
Test that datetimes are correctly converted to the local time zone.
Test the structure and content of feeds generated by RssUserland091Feed.
Test the structure and content of feeds generated by Rss201rev2Feed.
Test if the 'isPermaLink' attribute of <guid> element of an item
in the RSS feed is 'false'.
Test if the 'isPermaLink' attribute of <guid> element of an item
in the RSS feed is 'true'.
Test URLs are prefixed with https:// when feed is requested over HTTPS.
Test that custom context data can be passed to templates for title
and description.
Test that the item title and description can be overridden with
templates.
Tests that titles are escaped correctly in RSS feeds.
Feed view Making sure there's only 1 `rss` element and that the correct RSS version was specified. Making sure there's only one `channel` element w/in the `rss` element. Find the last build date'atom:link': '', Ensure the content of the channel is correct Check feed_url is passed Find the pubdate of the first feed item Assert that <guid> does not have any 'isPermaLink' attribute Making sure there's only 1 `rss` element and that the correct RSS version was specified. Making sure there's only one `channel` element w/in the `rss` element. Ensure the content of the channel is correct Check feed_url is passed this feed has a `published` element with the latest date this feed has an `updated` element with the latest date Naive date times passed in get converted to the local time zone, so check the recived zone offset against the local offset. No last-modified when feed has no item_pubdate | 2,128 | en | 0.881353 |
# Copyright (c) 2011 The Chromium Embedded Framework Authors. All rights
# reserved. Use of this source code is governed by a BSD-style license that
# can be found in the LICENSE file.
from cef_parser import *
def make_function_body_block(cls):
impl = ' // ' + cls.get_name() + ' methods.\n'
funcs = cls.get_virtual_funcs()
for func in funcs:
impl += ' ' + func.get_cpp_proto()
if cls.is_client_side():
impl += ' override;\n'
else:
impl += ' OVERRIDE;\n'
return impl
def make_function_body(header, cls):
impl = make_function_body_block(cls)
cur_cls = cls
while True:
parent_name = cur_cls.get_parent_name()
if is_base_class(parent_name):
break
else:
parent_cls = header.get_class(parent_name)
if parent_cls is None:
raise Exception('Class does not exist: ' + parent_name)
if len(impl) > 0:
impl += '\n'
impl += make_function_body_block(parent_cls)
cur_cls = header.get_class(parent_name)
return impl
def make_ctocpp_header(header, clsname):
cls = header.get_class(clsname)
if cls is None:
raise Exception('Class does not exist: ' + clsname)
clientside = cls.is_client_side()
directory = cls.get_file_directory()
defname = ''
if not directory is None:
defname += directory + '_'
defname += get_capi_name(clsname[3:], False)
defname = defname.upper()
capiname = cls.get_capi_name()
result = get_copyright()
result += '#ifndef CEF_LIBCEF_DLL_CTOCPP_'+defname+'_CTOCPP_H_\n'+ \
'#define CEF_LIBCEF_DLL_CTOCPP_'+defname+'_CTOCPP_H_\n' + \
'#pragma once\n'
if clientside:
result += """
#if !defined(BUILDING_CEF_SHARED)
#error This file can be included DLL-side only
#endif
"""
else:
result += """
#if !defined(WRAPPING_CEF_SHARED)
#error This file can be included wrapper-side only
#endif
"""
# build the function body
func_body = make_function_body(header, cls)
# include standard headers
if func_body.find('std::map') > 0 or func_body.find('std::multimap') > 0:
result += '\n#include <map>'
if func_body.find('std::vector') > 0:
result += '\n#include <vector>'
# include the headers for this class
result += '\n#include "include/'+cls.get_file_name()+'"'+ \
'\n#include "include/capi/'+cls.get_capi_file_name()+'"\n'
# include headers for any forward declared classes that are not in the same file
declares = cls.get_forward_declares()
for declare in declares:
dcls = header.get_class(declare)
if dcls.get_file_name() != cls.get_file_name():
result += '#include "include/'+dcls.get_file_name()+'"\n' \
'#include "include/capi/'+dcls.get_capi_file_name()+'"\n'
base_class_name = header.get_base_class_name(clsname)
base_scoped = True if base_class_name == 'CefBaseScoped' else False
if base_scoped:
template_file = 'ctocpp_scoped.h'
template_class = 'CefCToCppScoped'
else:
template_file = 'ctocpp_ref_counted.h'
template_class = 'CefCToCppRefCounted'
result += '#include "libcef_dll/ctocpp/' + template_file + '"'
result += '\n\n// Wrap a C structure with a C++ class.\n'
if clientside:
result += '// This class may be instantiated and accessed DLL-side only.\n'
else:
result += '// This class may be instantiated and accessed wrapper-side only.\n'
result += 'class '+clsname+'CToCpp\n'+ \
' : public ' + template_class + '<'+clsname+'CToCpp, '+clsname+', '+capiname+'> {\n'+ \
' public:\n'+ \
' '+clsname+'CToCpp();\n\n'
result += func_body
result += '};\n\n'
result += '#endif // CEF_LIBCEF_DLL_CTOCPP_' + defname + '_CTOCPP_H_'
return result
def write_ctocpp_header(header, clsname, dir):
# give the output file the same directory offset as the input file
cls = header.get_class(clsname)
dir = os.path.dirname(os.path.join(dir, cls.get_file_name()))
file = os.path.join(dir, get_capi_name(clsname[3:], False) + '_ctocpp.h')
newcontents = make_ctocpp_header(header, clsname)
return (file, newcontents)
# test the module
if __name__ == "__main__":
import sys
# verify that the correct number of command-line arguments are provided
if len(sys.argv) < 3:
sys.stderr.write('Usage: ' + sys.argv[0] + ' <infile> <classname>')
sys.exit()
# create the header object
header = obj_header()
header.add_file(sys.argv[1])
# dump the result to stdout
sys.stdout.write(make_ctocpp_header(header, sys.argv[2]))
| tools/make_ctocpp_header.py | 4,505 | Copyright (c) 2011 The Chromium Embedded Framework Authors. All rights reserved. Use of this source code is governed by a BSD-style license that can be found in the LICENSE file. build the function body include standard headers include the headers for this class include headers for any forward declared classes that are not in the same file give the output file the same directory offset as the input file test the module verify that the correct number of command-line arguments are provided create the header object dump the result to stdout | 543 | en | 0.8557 |
# The MIT License (MIT)
#
# Copyright (c) 2015, Nicolas Sebrecht & contributors
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import os
import sys
def testingPath():
return os.path.join(
os.path.abspath(sys.modules['imapfw'].__path__[0]),
'testing')
| imapfw/testing/libcore.py | 1,284 | The MIT License (MIT) Copyright (c) 2015, Nicolas Sebrecht & contributors Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. | 1,094 | en | 0.851754 |
# -*- coding: utf-8 -*-
"""
Created on Thu Jul 07 14:08:31 2016
@author: Mic
"""
from __future__ import division
from wiselib2.must import *
import numpy as np
import wiselib2.Rayman as rm
Gauss1d = lambda x ,y : None
from scipy import interpolate as interpolate
from matplotlib import pyplot as plt
class PsdFuns:
'''
Ensemble of possible Psd Functions.
Each element is a callable Psd.
Most used are
PsdFuns.PowerLaw(x,a,b)
PsdFuns.Interp(x, xData, yData)
'''
@staticmethod
def Flat(x, *args):
N = len(x)
return np.zeros([1,N]) +1
@staticmethod
def PowerLaw(x,a,b):
return a*x**b
@staticmethod
def Gaussian(x,sigma, x0=0):
return np.exp(-0.5 * (x-x0)**2/sigma**2)
@staticmethod
def Interp(x, xData, yData):
f = interpolate.interp1d(xData, yData)
return f(x)
def PsdFun2Noise_1d(N,dx, PsdFun, PsdArgs):
'''
Generates a noise pattern based an the Power spectral density returned
by PsdFun
'''
x = np.arange(0,N//2+1, dx)
yHalf = PsdFun(x, *PsdArgs)
y = Psd2NoisePattern_1d(yHalf, Semiaxis = True )
return x,y
#============================================================================
# FUN: PsdArray2Noise_1d_v2
#============================================================================
def PsdArray2Noise_1d_v2(f_in, Psd_in, L_mm,N):
'''
Returns meters
'''
from scipy import interpolate
log=np.log
fft = np.fft.fft
fftshift = np.fft.fftshift
ff = f_in
yy = Psd_in
L = L_mm
N = int(N)
N2 = int(N//2)
L =300 # (mm)
L_um = L*1e3
L_nm = L*1e6
fMin = 1/L_um
##vecchia riga
##fSpline = (np.array(range(N2))+1)/L_um # um^-1
fSpline = np.arange(N2)/N2 * (max(ff) - min(ff)) + min(ff)
fun = interpolate.splrep(log(ff), log(yy), s=2)
yPsd_log = interpolate.splev(log(fSpline), fun)
ySpline = np.exp(yPsd_log)
yPsd = ySpline
# tolgo
yPsd[fSpline<ff[0]] = 200
n = len(yPsd)
plt.plot(fSpline, yPsd,'-')
plt.plot(ff, yy,'x')
plt.legend(['ySpline','Data'])
ax = plt.axes()
#ax.set_yscale('log')
#ax.set_xscale('log')
#% controllo RMS integrando la yPsd
import scipy.integrate as integrate
RMS = np.sqrt(integrate.trapz(yPsd, fSpline/1000))
#% Modo Manfredda style
#yPsdNorm = np.sqrt(yPsd/L_um/1000)
#yPsdNorm_reverse = yPsdNorm[::-1]
yPsd_reverse = yPsd[::-1]
ell= 1/(fSpline[1] - fSpline[0])
if N%2 == 0:
yPsd2 = np.hstack((yPsd_reverse ,0,yPsd[0:-1]))
else:
yPsd2 = np.hstack((yPsd_reverse ,0,yPsd))
##yPsd2Norm = np.sqrt(yPsd2/ell/1000/2)
yPsd2Norm = np.sqrt(yPsd2/ell/1000)
n_ = len(yPsd2)
print('len(yPsd2) = %0.2d' % len(yPsd2Norm))
phi = 2*np.pi * np.random.rand(n_)
r = np.exp(1j*phi)
yPsd2Norm_ = fftshift(yPsd2Norm)
#yPsd2Norm_[len(yPsd2Norm_)//2] = 0
yRaf = np.fft.fft(r*yPsd2Norm_)
yRaf = np.real(yRaf)
print('Rms = %0.2e nm' % np.std(yRaf))
plt.plot(yPsd2Norm_)
print('max yPsd_ = %d nm' % max(yPsd2))
print('max yPsd2Norm = %0.4f nm' % max(yPsd2Norm))
print('Rms yRaf2 = %0.2e nm' % np.std(yRaf))
return yRaf * 1e-9
#============================================================================
# FUN: Psd2Noise
#============================================================================
def PsdArray2Noise_1d(PsdArray, N, Semiaxis = True, Real = True):
'''
Generates a noise pattern whose Power Spectral density is given by Psd.
Parameters
---------------------
Psd : 1d array
Contains the numeric Psd (treated as evenly spaced array)
Semiaxis :
0 : does nothing
1 : halvens Pds, then replicates the halven part for left frequencies,
producing an output as long as Psd
2 : replicates all Pds for lef frequencies as well, producing an output
twice as long as Psd
Real : boolean
If True, the real part of the output is returned (default)
Returns:
---------------------
An array of the same length of Psd
'''
if Semiaxis == True:
yHalf = PsdArray
PsdArrayNew = np.hstack((yHalf[-1:0:-1], yHalf))
idelta = len(PsdArrayNew) - N
if idelta == 1:# piu lungo
PsdArrayNew = PsdArrayNew[0:-1] # uguale
elif idelta == 0:
pass
else:
print('Error! len(PsdArrayNew) - len(PsdArray) = %0d' % idelta)
y = np.fft.fftshift(PsdArrayNew)
r = 2*np.pi * np.random.rand(len(PsdArrayNew))
f = np.fft.ifft(y * np.exp(1j*r))
if Real:
return np.real(f)
else:
return f
Psd2Noise_1d = PsdArray2Noise_1d
#============================================================================
# FUN: NoNoise_1d
#============================================================================
def NoNoise_1d(N, *args):
return np.zeros([1,N])
#============================================================================
# FUN: GaussianNoise_1d
#============================================================================
def GaussianNoise_1d(N,dx, Sigma):
'''
PSD(f) = np.exp(-0.5^f/Sigma^2)
'''
x = np.linspace( - N//2 *dx, N//2-1 * dx,N)
y = np.exp(-0.5*x**2/Sigma**2)
return Psd2NoisePattern_1d(y)
#============================================================================
# FUN: PowerLawNoise_1d
#============================================================================
def PowerLawNoise_1d(N, dx, a, b):
'''
PSD(x) = a*x^b
'''
x = np.arange(0,N//2+1, dx)
yHalf = a * x**b
# y = np.hstack((yHalf[-1:0:-1], 0, yHalf[1:-1]))
return Psd2NoisePattern_1d(y, Semiaxis = True)
#============================================================================
# FUN: CustomNoise_1d
#============================================================================
def CustomNoise_1d(N, dx, xPsd, yPsd):
xPsd_, yPsd_ = rm.FastResample1d(xPsd, yPsd,N)
return Psd2NoisePattern_1d(yPsd_, Semiaxis = True)
#============================================================================
# CLASS: NoiseGenerator
#============================================================================
class PsdGenerator:
NoNoise = staticmethod(NoNoise_1d)
Gauss = staticmethod(GaussianNoise_1d)
PowerLaw = staticmethod(PowerLawNoise_1d)
NumericArray = staticmethod(CustomNoise_1d)
#============================================================================
# FUN: FitPowerLaw
#============================================================================
def FitPowerLaw(x,y):
'''
Fits the input data in the form
y = a*x^b
returns a,b
'''
import scipy.optimize as optimize
fFit = lambda p, x: p[0] * x ** p[1]
fErr = lambda p, x, y: (y - fFit(p, x))
p0 = [max(y), -1.0]
out = optimize.leastsq(fErr, p0, args=(x, y), full_output=1)
pOut = out[0]
b = pOut[1]
a = pOut[0]
# indexErr = np.np.sqrt( covar[0][0] )
# ampErr = np.np.sqrt( covar[1][1] ) * amp
return a,b
#==============================================================================
# CLASS: RoughnessMaker
#==============================================================================
class RoughnessMaker(object):
class Options():
FIT_NUMERIC_DATA_WITH_POWER_LAW = True
AUTO_ZERO_MEAN_FOR_NUMERIC_DATA = True
AUTO_FILL_NUMERIC_DATA_WITH_ZERO = True
AUTO_RESET_CUTOFF_ON_PSDTYPE_CHANGE = True
def __init__(self):
self.PsdType = PsdFuns.PowerLaw
self.PsdParams = np.array([1,1])
self._IsNumericPsdInFreq = None
self.CutoffLowHigh = [None, None]
self.ProfileScaling = 1
return None
@property
def PsdType(self):
return self._PsdType
@PsdType.setter
def PsdType(self, Val):
'''
Note: each time that the Property value is set, self.CutoffLowHigh is
reset, is specified by options
'''
self. _PsdType = Val
if self.Options.AUTO_RESET_CUTOFF_ON_PSDTYPE_CHANGE == True:
self.PsdCutoffLowHigh = [None, None]
#======================================================================
# FUN: PdfEval
#======================================================================
def PsdEval(self, N, df, CutoffLowHigh = [None, None]):
'''
Evals the PSD in the range [0 - N*df]
It's good custom to have PSD[0] = 0, so that the noise pattern is
zero-mean.
Parameters:
----------------------
N : int
#of samples
df : float
spacing of spatial frequencies (df=1/TotalLength)
CutoffLowHigh : [LowCutoff, HighCutoff]
if >0, then Psd(f<Cutoff) is set to 0.
if None, then LowCutoff = min()
Returns : fAll, yPsdAll
----------------------
fAll : 1d array
contains the spatial frequencies
yPsd : 1d array
contains the Psd
'''
'''
The Pdf is evaluated only within LowCutoff and HoghCutoff
If the Pdf is PsdFuns.Interp, then LowCutoff and HighCutoff are
automatically set to min and max values of the experimental data
'''
StrMessage = ''
def GetInRange(fAll, LowCutoff, HighCutoff):
_tmpa = fAll >= LowCutoff
_tmpb = fAll <= HighCutoff
fMid_Pos = np.all([_tmpa, _tmpb],0)
fMid = fAll[fMid_Pos]
return fMid_Pos, fMid
LowCutoff, HighCutoff = CutoffLowHigh
fMin = 0
fMax = (N-1)*df
fAll = np.linspace(0, fMax, N)
yPsdAll = fAll* 0 # init
LowCutoff = 0 if LowCutoff is None else LowCutoff
HighCutoff = N*df if HighCutoff is None else HighCutoff
# Numeric PSD
# Note: by default returned yPsd is always 0 outside the input data range
if self.PsdType == PsdFuns.Interp:
# Use Auto-Fit + PowerLaw
if self.Options.FIT_NUMERIC_DATA_WITH_POWER_LAW == True:
xFreq,y = self.NumericPsdGetXY()
p = FitPowerLaw(1/xFreq,y)
_PsdParams = p[0], -p[1]
LowCutoff = np.amin(self._PsdNumericX)
HighCutoff = np.amin(self._PsdNumericX)
fMid_Pos, fMid = GetInRange(fAll, LowCutoff, HighCutoff)
yPsd = PsdFuns.PowerLaw(fMid, *_PsdParams )
# Use Interpolation
else:
# check Cutoff
LowVal = np.amin(self._PsdNumericX)
HighVal = np.amax(self._PsdNumericX)
LowCutoff = LowVal if LowCutoff <= LowVal else LowCutoff
HighCutoff = HighVal if HighCutoff >= HighVal else HighCutoff
# Get the list of good frequency values (fMid) and their positions
# (fMid_Pos)
fMid_Pos, fMid = GetInRange(fAll, LowCutoff, HighCutoff)
##yPsd = self.PsdType(fMid, *self.PsdParams)
## non funziona, rimpiazzo a mano
yPsd = PsdFuns.Interp(fMid, self._PsdNumericX, self._PsdNumericY)
# Analytical Psd
else:
fMid_Pos, fMid = GetInRange(fAll, LowCutoff, HighCutoff)
yPsd = self.PsdType(fMid, *self.PsdParams)
# copying array subset
yPsdAll[fMid_Pos] = yPsd
return fAll, yPsdAll
#======================================================================
# FUN: _FitNumericPsdWithPowerLaw
#======================================================================
# in disusos
def _FitNumericPsdWithPowerLaw(self):
x,y = self.NumericPsdGetXY()
if self._IsNumericPsdInFreq == True:
p = FitPowerLaw(1/x,y)
self.PsdParams = p[0], -p[1]
else:
p = FitPowerLaw(x,y)
self.PsdParams = p[0], p[1]
#======================================================================
# FUN: MakeProfile
#======================================================================
def MakeProfile(self, L,N):
'''
Evaluates the psd according to .PsdType, .PsdParams and .Options directives
Returns an evenly-spaced array.
If PsdType = NumericArray, linear interpolation is performed.
:PARAM: N: # of samples
:PARAM: dx: grid spacing (spatial frequency)
returns:
1d arr
'''
if self.PsdType == PsdFuns.Interp:
# chiama codice ad hoc
L_mm = L*1e3
yRoughness = PsdArray2Noise_1d_v2(self._PsdNumericX, self._PsdNumericY, L_mm, N)
else:
print('Irreversible error. The code was not completed to handle this instance')
return yRoughness * self.ProfileScaling
# f, yPsd = self.PsdEval(N//2 + 1,df)
# Special case
# if self.Options.FIT_NUMERIC_DATA_WITH_POWER_LAW == True:
# self.PsdParams = list(FitPowerLaw(*self.NumericPsdGetXY()))
# yPsd = PsdFuns.PowerLaw(x, *self.PsdParams)
# else: # general calse
# yPsd = self.PsdType(x, *self.PsdParams)
# yRoughness = Psd2Noise_1d(yPsd, N, Semiaxis = True)
# x = np.linspace(0, N*dx,N)
# # Special case
# if self.Options.FIT_NUMERIC_DATA_WITH_POWER_LAW == True:
# self.PsdParams = list(FitPowerLaw(*self.NumericPsdGetXY()))
# y = PowerLawNoise_1d(N, dx, *self.PsdParams)
# else: # general calse
# y = self.PsdType(N,dx, *self.PsdParams)
# return y
Generate = MakeProfile
#======================================================================
# FUN: NumericPsdSetXY
#======================================================================
def NumericPsdSetXY(self,x,y):
self._PsdNumericX = x
self._PsdNumericY = y
#======================================================================
# FUN: NumericPsdGetXY
#======================================================================
def NumericPsdGetXY(self):
try:
return self._PsdNumericX, self._PsdNumericY
except:
print('Error in RoughnessMaker.NumericPsdGetXY. Maybe the data file was not properly loaded')
#======================================================================
# FUN: NumericPsdLoadXY
#======================================================================
def NumericPsdLoadXY(self, FilePath, xScaling = 1, yScaling = 1 , xIsSpatialFreq = True):
''' @TODO: specificare formati e tipi di file
Parameters
----------------------------
xIsSpatialFreq : bool
true If the first column (Read_x_values) contains spatial
frequencies. False if it contains lenghts. Default = True
xScaling, yScaling: floats
Read_x_values => Read_x_values * xScaling
Read_y_values => Read_y_values * yScaling
Sometimes, properly setting the x and y scaling values may be confusing (although just matter of high-school considerations). On this purpose, the property .RoughnessMaker.ProfileScaling property can be used also..ProfileScaling is the scale factor that acts on the output of MakeProfile() function only.
remarks
--------
pippo
'''
try:
self._IsNumericPsdInFreq = xIsSpatialFreq
s = np.loadtxt(FilePath)
x = s[:,0]
y = s[:,1]
x = x * xScaling
y = y * yScaling
# inversion of x-axis if not spatial frequencies
if xIsSpatialFreq == False:
f = 1/x
else:
f = x
# array sorting
i = np.argsort(f)
f = f[i]
y = y[i]
# I set the Cutoff value of the class according to available data
self.PsdCutoffLowHigh = [np.amin, np.amax(f)]
# I set class operating variables
self.PsdType = PsdFuns.Interp
self.PsdParams = [f,y]
# Auto-set
# fill 0-value (DC Component)
# if self.Options.AUTO_FILL_NUMERIC_DATA_WITH_ZERO == True:
# if np.amin(x >0):
# x = np.insert(x,0,0)
# y = np.insert(y,0,0) # 0 in psd => 0-mean value in the noise pattern
# sync other class values
self.NumericPsdSetXY(f, y)
except:
pass
def Generate(self, N = None, dx = None, CutoffLowHigh = [None, None]):
'''
Parameters
N: # of output samples
dx: step of the x axis
Note: generates an evenly spaced array
'''
L = dx * N
df = 1/L
fPsd, yPsd = self.PsdEval(N//2 +1 , df = df,
CutoffLowHigh = CutoffLowHigh )
h = Psd2Noise_1d(yPsd, Semiaxis = True)
return h
#======================================================================
# FUN: NumericPsdCheck
#======================================================================
def NumericPsdCheck(self, N, L):
df = 1/L
# Stored data
ff,yy = self.NumericPsdGetXY()
# Evaluated data
fPsd, yPsd = self.PsdEval(N, df)
plt.plot(fPsd, np.log10(yPsd),'x')
plt.plot(ff, np.log10(yy),'.r')
plt.legend(['Evaluated data', 'Stored data'])
plt.suptitle('Usage of stored data (PSD)')
fMax = df*(N//2)
fMin = df
StrMsg = ''
_max = np.max(ff)
_min = np.min(ff)
print('fMax query = %0.1e m^-1' % fMax )
print('fMax data= %0.1e m^-1 = %0.2e um^-1' % (_max, (_max * 1e6) ))
print('fMin query= %0.1e m^-1' % fMin )
print('fMin data= %0.1e m^-1 = %0.2e um^-1' % (_min, (_min * 1e6) ))
return StrMsg | wiselib2/Noise.py | 15,783 | Ensemble of possible Psd Functions.
Each element is a callable Psd.
Most used are
PsdFuns.PowerLaw(x,a,b)
PsdFuns.Interp(x, xData, yData)
Fits the input data in the form
y = a*x^b
returns a,b
PSD(f) = np.exp(-0.5^f/Sigma^2)
Parameters
N: # of output samples
dx: step of the x axis
Note: generates an evenly spaced array
Evaluates the psd according to .PsdType, .PsdParams and .Options directives
Returns an evenly-spaced array.
If PsdType = NumericArray, linear interpolation is performed.
:PARAM: N: # of samples
:PARAM: dx: grid spacing (spatial frequency)
returns:
1d arr
@TODO: specificare formati e tipi di file
Parameters
----------------------------
xIsSpatialFreq : bool
true If the first column (Read_x_values) contains spatial
frequencies. False if it contains lenghts. Default = True
xScaling, yScaling: floats
Read_x_values => Read_x_values * xScaling
Read_y_values => Read_y_values * yScaling
Sometimes, properly setting the x and y scaling values may be confusing (although just matter of high-school considerations). On this purpose, the property .RoughnessMaker.ProfileScaling property can be used also..ProfileScaling is the scale factor that acts on the output of MakeProfile() function only.
remarks
--------
pippo
PSD(x) = a*x^b
Generates a noise pattern whose Power Spectral density is given by Psd.
Parameters
---------------------
Psd : 1d array
Contains the numeric Psd (treated as evenly spaced array)
Semiaxis :
0 : does nothing
1 : halvens Pds, then replicates the halven part for left frequencies,
producing an output as long as Psd
2 : replicates all Pds for lef frequencies as well, producing an output
twice as long as Psd
Real : boolean
If True, the real part of the output is returned (default)
Returns:
---------------------
An array of the same length of Psd
Returns meters
Evals the PSD in the range [0 - N*df]
It's good custom to have PSD[0] = 0, so that the noise pattern is
zero-mean.
Parameters:
----------------------
N : int
#of samples
df : float
spacing of spatial frequencies (df=1/TotalLength)
CutoffLowHigh : [LowCutoff, HighCutoff]
if >0, then Psd(f<Cutoff) is set to 0.
if None, then LowCutoff = min()
Returns : fAll, yPsdAll
----------------------
fAll : 1d array
contains the spatial frequencies
yPsd : 1d array
contains the Psd
Generates a noise pattern based an the Power spectral density returned
by PsdFun
Note: each time that the Property value is set, self.CutoffLowHigh is
reset, is specified by options
Created on Thu Jul 07 14:08:31 2016
@author: Mic
-*- coding: utf-8 -*-============================================================================ FUN: PsdArray2Noise_1d_v2============================================================================ (mm)vecchia rigafSpline = (np.array(range(N2))+1)/L_um um^-1 tolgoax.set_yscale('log')ax.set_xscale('log')% controllo RMS integrando la yPsd% Modo Manfredda styleyPsdNorm = np.sqrt(yPsd/L_um/1000)yPsdNorm_reverse = yPsdNorm[::-1]yPsd2Norm = np.sqrt(yPsd2/ell/1000/2)yPsd2Norm_[len(yPsd2Norm_)//2] = 0============================================================================ FUN: Psd2Noise============================================================================ piu lungo uguale============================================================================ FUN: NoNoise_1d======================================================================================================================================================== FUN: GaussianNoise_1d======================================================================================================================================================== FUN: PowerLawNoise_1d============================================================================ y = np.hstack((yHalf[-1:0:-1], 0, yHalf[1:-1]))============================================================================ FUN: CustomNoise_1d======================================================================================================================================================== CLASS: NoiseGenerator======================================================================================================================================================== FUN: FitPowerLaw============================================================================ indexErr = np.np.sqrt( covar[0][0] ) ampErr = np.np.sqrt( covar[1][1] ) * amp============================================================================== CLASS: RoughnessMaker==================================================================================================================================================== FUN: PdfEval====================================================================== init Numeric PSD Note: by default returned yPsd is always 0 outside the input data range Use Auto-Fit + PowerLaw Use Interpolation check Cutoff Get the list of good frequency values (fMid) and their positions (fMid_Pos)yPsd = self.PsdType(fMid, *self.PsdParams) non funziona, rimpiazzo a mano Analytical Psd copying array subset====================================================================== FUN: _FitNumericPsdWithPowerLaw====================================================================== in disusos====================================================================== FUN: MakeProfile====================================================================== chiama codice ad hoc f, yPsd = self.PsdEval(N//2 + 1,df) Special case if self.Options.FIT_NUMERIC_DATA_WITH_POWER_LAW == True: self.PsdParams = list(FitPowerLaw(*self.NumericPsdGetXY())) yPsd = PsdFuns.PowerLaw(x, *self.PsdParams) else: general calse yPsd = self.PsdType(x, *self.PsdParams) yRoughness = Psd2Noise_1d(yPsd, N, Semiaxis = True) x = np.linspace(0, N*dx,N) Special case if self.Options.FIT_NUMERIC_DATA_WITH_POWER_LAW == True: self.PsdParams = list(FitPowerLaw(*self.NumericPsdGetXY())) y = PowerLawNoise_1d(N, dx, *self.PsdParams) else: general calse y = self.PsdType(N,dx, *self.PsdParams) return y====================================================================== FUN: NumericPsdSetXY============================================================================================================================================ FUN: NumericPsdGetXY============================================================================================================================================ FUN: NumericPsdLoadXY====================================================================== inversion of x-axis if not spatial frequencies array sorting I set the Cutoff value of the class according to available data I set class operating variables Auto-set fill 0-value (DC Component) if self.Options.AUTO_FILL_NUMERIC_DATA_WITH_ZERO == True: if np.amin(x >0): x = np.insert(x,0,0) y = np.insert(y,0,0) 0 in psd => 0-mean value in the noise pattern sync other class values====================================================================== FUN: NumericPsdCheck====================================================================== Stored data Evaluated data | 7,592 | en | 0.437735 |
#!/Users/yaroten/Library/Mobile Documents/com~apple~CloudDocs/git/crawling_scraping/crawling_scraping/bin/python3
# $Id: rst2odt_prepstyles.py 5839 2009-01-07 19:09:28Z dkuhlman $
# Author: Dave Kuhlman <dkuhlman@rexx.com>
# Copyright: This module has been placed in the public domain.
"""
Fix a word-processor-generated styles.odt for odtwriter use: Drop page size
specifications from styles.xml in STYLE_FILE.odt.
"""
#
# Author: Michael Schutte <michi@uiae.at>
from lxml import etree
import sys
import zipfile
from tempfile import mkstemp
import shutil
import os
NAMESPACES = {
"style": "urn:oasis:names:tc:opendocument:xmlns:style:1.0",
"fo": "urn:oasis:names:tc:opendocument:xmlns:xsl-fo-compatible:1.0"
}
def prepstyle(filename):
zin = zipfile.ZipFile(filename)
styles = zin.read("styles.xml")
root = etree.fromstring(styles)
for el in root.xpath("//style:page-layout-properties",
namespaces=NAMESPACES):
for attr in el.attrib:
if attr.startswith("{%s}" % NAMESPACES["fo"]):
del el.attrib[attr]
tempname = mkstemp()
zout = zipfile.ZipFile(os.fdopen(tempname[0], "w"), "w",
zipfile.ZIP_DEFLATED)
for item in zin.infolist():
if item.filename == "styles.xml":
zout.writestr(item, etree.tostring(root))
else:
zout.writestr(item, zin.read(item.filename))
zout.close()
zin.close()
shutil.move(tempname[1], filename)
def main():
args = sys.argv[1:]
if len(args) != 1:
print >> sys.stderr, __doc__
print >> sys.stderr, "Usage: %s STYLE_FILE.odt\n" % sys.argv[0]
sys.exit(1)
filename = args[0]
prepstyle(filename)
if __name__ == '__main__':
main()
# vim:tw=78:sw=4:sts=4:et:
| crawling_scraping/bin/rst2odt_prepstyles.py | 1,793 | Fix a word-processor-generated styles.odt for odtwriter use: Drop page size
specifications from styles.xml in STYLE_FILE.odt.
!/Users/yaroten/Library/Mobile Documents/com~apple~CloudDocs/git/crawling_scraping/crawling_scraping/bin/python3 $Id: rst2odt_prepstyles.py 5839 2009-01-07 19:09:28Z dkuhlman $ Author: Dave Kuhlman <dkuhlman@rexx.com> Copyright: This module has been placed in the public domain. Author: Michael Schutte <michi@uiae.at> vim:tw=78:sw=4:sts=4:et: | 470 | en | 0.488051 |
# -*- coding: utf-8 -*-
__author__ = """Adam Geitgey"""
__email__ = 'ageitgey@gmail.com'
__version__ = '0.1.0'
from .api import load_image_file, face_locations, face_landmarks, face_encodings, compare_faces, face_distance
| face_recognition/face_recognition/__init__.py | 224 | -*- coding: utf-8 -*- | 21 | en | 0.767281 |
import json
import pytest
from great_expectations.core import ExpectationConfiguration, ExpectationSuite
from .test_expectation_suite import baseline_suite, exp1, exp2, exp3, exp4
@pytest.fixture
def empty_suite():
return ExpectationSuite(
expectation_suite_name="warning",
expectations=[],
meta={"notes": "This is an expectation suite."},
)
@pytest.fixture
def exp5():
return ExpectationConfiguration(
expectation_type="expect_column_values_to_not_be_null",
kwargs={"column": "a",},
meta={},
)
def test_append_expectation(empty_suite, exp1, exp2):
assert len(empty_suite.expectations) == 0
empty_suite.append_expectation(exp1)
assert len(empty_suite.expectations) == 1
# Adding the same expectation again *does* add duplicates.
empty_suite.append_expectation(exp1)
assert len(empty_suite.expectations) == 2
empty_suite.append_expectation(exp2)
assert len(empty_suite.expectations) == 3
# Turn this on once we're ready to enforce strict typing.
# with pytest.raises(TypeError):
# empty_suite.append_expectation("not an expectation")
# Turn this on once we're ready to enforce strict typing.
# with pytest.raises(TypeError):
# empty_suite.append_expectation(exp1.to_json_dict())
def test_find_expectation_indexes(baseline_suite, exp5):
# Passing no parameters "finds" all Expectations
assert baseline_suite.find_expectation_indexes() == [0, 1]
# Match on single columns
assert baseline_suite.find_expectation_indexes(column="a") == [0]
assert baseline_suite.find_expectation_indexes(column="b") == [1]
# Non-existent column returns no matches
assert baseline_suite.find_expectation_indexes(column="z") == []
# It can return multiple expectation_type matches
assert baseline_suite.find_expectation_indexes(
expectation_type="expect_column_values_to_be_in_set"
) == [0, 1]
# It can return multiple column matches
baseline_suite.append_expectation(exp5)
assert baseline_suite.find_expectation_indexes(column="a") == [0, 2]
# It can match a single expectation_type
assert baseline_suite.find_expectation_indexes(
expectation_type="expect_column_values_to_not_be_null"
) == [2]
# expectation_kwargs can match full kwargs
assert baseline_suite.find_expectation_indexes(
expectation_kwargs={
"column": "b",
"value_set": [-1, -2, -3],
"result_format": "BASIC",
}
) == [1]
# expectation_kwargs can match partial kwargs
assert baseline_suite.find_expectation_indexes(
expectation_kwargs={"column": "a"}
) == [0, 2]
# expectation_type and expectation_kwargs work in conjunction
assert baseline_suite.find_expectation_indexes(
expectation_type="expect_column_values_to_not_be_null",
expectation_kwargs={"column": "a"},
) == [2]
# column and expectation_kwargs work in conjunction
assert baseline_suite.find_expectation_indexes(
column="a", expectation_kwargs={"result_format": "BASIC"}
) == [0]
# column and expectation_type work in conjunction
assert baseline_suite.find_expectation_indexes(
column="a", expectation_type="expect_column_values_to_not_be_null",
) == [2]
assert (
baseline_suite.find_expectation_indexes(
column="a", expectation_type="expect_column_values_to_be_between",
)
== []
)
assert (
baseline_suite.find_expectation_indexes(
column="zzz", expectation_type="expect_column_values_to_be_between",
)
== []
)
with pytest.raises(ValueError):
assert (
baseline_suite.find_expectation_indexes(
column="a", expectation_kwargs={"column": "b"}
)
== []
)
def test_find_expectation_indexes_on_empty_suite(empty_suite):
assert (
empty_suite.find_expectation_indexes(
expectation_type="expect_column_values_to_not_be_null"
)
== []
)
assert empty_suite.find_expectation_indexes(column="x") == []
assert empty_suite.find_expectation_indexes(expectation_kwargs={}) == []
def test_find_expectations(baseline_suite, exp1, exp2):
# Note: most of the logic in this method is based on
# find_expectation_indexes and _copy_and_clean_up_expectations_from_indexes
# These tests do not thoroughly cover that logic.
# Instead, they focus on the behavior of the discard_* methods
assert (
baseline_suite.find_expectations(
column="a", expectation_type="expect_column_values_to_be_between",
)
== []
)
result = baseline_suite.find_expectations(
column="a", expectation_type="expect_column_values_to_be_in_set",
)
assert len(result) == 1
assert result[0] == ExpectationConfiguration(
expectation_type="expect_column_values_to_be_in_set",
kwargs={
"column": "a",
"value_set": [1, 2, 3],
# "result_format": "BASIC"
},
meta={"notes": "This is an expectation."},
)
exp_with_all_the_params = ExpectationConfiguration(
expectation_type="expect_column_values_to_not_be_null",
kwargs={
"column": "a",
"result_format": "BASIC",
"include_config": True,
"catch_exceptions": True,
},
meta={},
)
baseline_suite.append_expectation(exp_with_all_the_params)
assert baseline_suite.find_expectations(
column="a", expectation_type="expect_column_values_to_not_be_null",
)[0] == ExpectationConfiguration(
expectation_type="expect_column_values_to_not_be_null",
kwargs={"column": "a",},
meta={},
)
assert (
baseline_suite.find_expectations(
column="a",
expectation_type="expect_column_values_to_not_be_null",
discard_result_format_kwargs=False,
discard_include_config_kwargs=False,
discard_catch_exceptions_kwargs=False,
)[0]
== exp_with_all_the_params
)
assert baseline_suite.find_expectations(
column="a",
expectation_type="expect_column_values_to_not_be_null",
discard_result_format_kwargs=False,
discard_catch_exceptions_kwargs=False,
)[0] == ExpectationConfiguration(
expectation_type="expect_column_values_to_not_be_null",
kwargs={"column": "a", "result_format": "BASIC", "catch_exceptions": True,},
meta={},
)
def test_remove_expectation(baseline_suite):
# ValueError: Multiple expectations matched arguments. No expectations removed.
with pytest.raises(ValueError):
baseline_suite.remove_expectation()
# ValueError: No matching expectation found.
with pytest.raises(ValueError):
baseline_suite.remove_expectation(column="does_not_exist")
# ValueError: Multiple expectations matched arguments. No expectations removed.
with pytest.raises(ValueError):
baseline_suite.remove_expectation(
expectation_type="expect_column_values_to_be_in_set"
)
assert len(baseline_suite.expectations) == 2
assert baseline_suite.remove_expectation(column="a") == None
assert len(baseline_suite.expectations) == 1
baseline_suite.remove_expectation(
expectation_type="expect_column_values_to_be_in_set"
)
assert len(baseline_suite.expectations) == 0
# ValueError: No matching expectation found.
with pytest.raises(ValueError):
baseline_suite.remove_expectation(
expectation_type="expect_column_values_to_be_in_set"
)
| tests/core/test_expectation_suite_crud_methods.py | 7,766 | Adding the same expectation again *does* add duplicates. Turn this on once we're ready to enforce strict typing. with pytest.raises(TypeError): empty_suite.append_expectation("not an expectation") Turn this on once we're ready to enforce strict typing. with pytest.raises(TypeError): empty_suite.append_expectation(exp1.to_json_dict()) Passing no parameters "finds" all Expectations Match on single columns Non-existent column returns no matches It can return multiple expectation_type matches It can return multiple column matches It can match a single expectation_type expectation_kwargs can match full kwargs expectation_kwargs can match partial kwargs expectation_type and expectation_kwargs work in conjunction column and expectation_kwargs work in conjunction column and expectation_type work in conjunction Note: most of the logic in this method is based on find_expectation_indexes and _copy_and_clean_up_expectations_from_indexes These tests do not thoroughly cover that logic. Instead, they focus on the behavior of the discard_* methods "result_format": "BASIC" ValueError: Multiple expectations matched arguments. No expectations removed. ValueError: No matching expectation found. ValueError: Multiple expectations matched arguments. No expectations removed. ValueError: No matching expectation found. | 1,322 | en | 0.643928 |
from collections import defaultdict
from typing import DefaultDict
from .. import utils
from .. import data
'''
A collection of functions o index faculty data.
No function in this class reads data from the data files, just works logic
on them. This helps keep the program modular, by separating the data sources
from the data indexing
'''
'''
Maps faculty to the sections they teach.
This function works by taking several arguments:
- faculty, from [FacultyReader.get_faculty]
- sectionTeachers, from [SectionReader.get_section_faculty_ids]
These are kept as parameters instead of calling the functions by itself
in order to keep the data and logic layers separate.
'''
def get_faculty_sections(faculty,section_teachers):
result = defaultdict(set)
missing_emails = set()
for key, value in section_teachers.items():
section_id = key
faculty_id = value
#Teaches a class but doesn't have basic faculty data
if faculty_id not in faculty:
missing_emails.add(faculty_id)
continue
result[faculty[faculty_id]].add(section_id)
if missing_emails:
utils.logger.warning(f"Missing emails for {missing_emails}")
return result
'''
Returns complete [User] objects.
This function returns [User] objects with more properties than before.
See [User.addSchedule] for which properties are added.
This function works by taking several arguments:
- faculty_sections from [get_faculty_sections]
- section_periods from [student_reader.get_periods]
These are kept as parameters instead of calling the functions by itself
in order to keep the data and logic layers separate.
'''
def get_faculty_with_schedule(faculty_sections, section_periods):
# The schedule for each teacher
schedules = {}
# Sections IDs which are taught but never meet.
missing_periods = set()
# Faculty missing a homerooms.
#
# This will be logged at the debug level.
missing_homerooms = set()
# Loop over teacher sections and get their periods.
for key, value in faculty_sections.items():
periods = []
for section_id in value:
if section_id in section_periods:
periods = list(section_periods[section_id])
elif section_id.startswith("UADV"):
key.homeroom = section_id
key.homeroom_location = "Unavailable"
else:
missing_periods.add(section_id)
# Still couldn'y find any homeroom
if key.homeroom is None:
missing_homerooms.add(key)
key.homeroom = "SENIOR_HOMEROOM"
key.homeroom_location = "Unavailable"
schedules[key] = periods
# Some logging
if not missing_periods:
utils.logger.debug("Missing homerooms", missing_homerooms)
# Compiles a list of periods into a full schedule
result = []
for key, value in schedules.items():
schedule = data.DayDefaultDict()
for period in value:
schedule[period.day][period.period-1] = period
schedule.populate(utils.constants.day_names)
key.schedule = schedule
result.append(key)
return result | firebase/firestore-py/lib/faculty/logic.py | 3,015 | Teaches a class but doesn't have basic faculty data The schedule for each teacher Sections IDs which are taught but never meet. Faculty missing a homerooms. This will be logged at the debug level. Loop over teacher sections and get their periods. Still couldn'y find any homeroom Some logging Compiles a list of periods into a full schedule | 340 | en | 0.969562 |
"""
* GTDynamics Copyright 2021, Georgia Tech Research Corporation,
* Atlanta, Georgia 30332-0415
* All Rights Reserved
* See LICENSE for the license information
*
* @file test_print.py
* @brief Test printing with DynamicsSymbol.
* @author Gerry Chen
"""
import unittest
from io import StringIO
from unittest.mock import patch
import gtdynamics as gtd
import gtsam
class TestPrint(unittest.TestCase):
"""Test printing of keys."""
def test_values(self):
"""Checks that printing Values uses the GTDKeyFormatter instead of gtsam's default"""
v = gtd.Values()
gtd.InsertJointAngle(v, 0, 1, 2)
self.assertTrue('q(0)1' in v.__repr__())
def test_nonlinear_factor_graph(self):
"""Checks that printing NonlinearFactorGraph uses the GTDKeyFormatter"""
fg = gtd.NonlinearFactorGraph()
fg.push_back(
gtd.MinTorqueFactor(
gtd.TorqueKey(0, 0).key(),
gtsam.noiseModel.Unit.Create(1)))
self.assertTrue('T(0)0' in fg.__repr__())
def test_key_formatter(self):
"""Tests print method with various key formatters"""
torqueKey = gtd.TorqueKey(0, 0).key()
factor = gtd.MinTorqueFactor(torqueKey,
gtsam.noiseModel.Unit.Create(1))
with patch('sys.stdout', new=StringIO()) as fake_out:
factor.print('factor: ', gtd.GTDKeyFormatter)
self.assertTrue('factor: min torque factor' in fake_out.getvalue())
self.assertTrue('keys = { T(0)0 }' in fake_out.getvalue())
def myKeyFormatter(key):
return 'this is my key formatter {}'.format(key)
with patch('sys.stdout', new=StringIO()) as fake_out:
factor.print('factor: ', myKeyFormatter)
self.assertTrue('factor: min torque factor' in fake_out.getvalue())
self.assertTrue('keys = {{ this is my key formatter {} }}'.format(
torqueKey) in fake_out.getvalue())
if __name__ == "__main__":
unittest.main()
| python/tests/test_print.py | 2,047 | Test printing of keys.
Tests print method with various key formatters
Checks that printing NonlinearFactorGraph uses the GTDKeyFormatter
Checks that printing Values uses the GTDKeyFormatter instead of gtsam's default
* GTDynamics Copyright 2021, Georgia Tech Research Corporation,
* Atlanta, Georgia 30332-0415
* All Rights Reserved
* See LICENSE for the license information
*
* @file test_print.py
* @brief Test printing with DynamicsSymbol.
* @author Gerry Chen | 464 | en | 0.632565 |
from typing import Optional, Union, Tuple, Mapping, List
from torch import Tensor
from torch_geometric.data.storage import recursive_apply
from torch_geometric.typing import Adj
from torch_sparse import SparseTensor
from tsl.ops.connectivity import convert_torch_connectivity
from tsl.typing import DataArray, SparseTensArray, ScipySparseMatrix
from . import utils
class DataParsingMixin:
def _parse_data(self, obj: DataArray) -> Tensor:
assert obj is not None
obj = utils.copy_to_tensor(obj)
obj = utils.to_steps_nodes_channels(obj)
obj = utils.cast_tensor(obj, self.precision)
return obj
def _parse_mask(self, mask: Optional[DataArray]) -> Optional[Tensor]:
if mask is None:
return None
mask = utils.copy_to_tensor(mask)
mask = utils.to_steps_nodes_channels(mask)
self._check_same_dim(mask.size(0), 'n_steps', 'mask')
self._check_same_dim(mask.size(1), 'n_nodes', 'mask')
if mask.size(-1) > 1:
self._check_same_dim(mask.size(-1), 'n_channels', 'mask')
mask = utils.cast_tensor(mask)
return mask
def _parse_exogenous(self, obj: DataArray, name: str,
node_level: bool) -> Tensor:
obj = utils.copy_to_tensor(obj)
if node_level:
obj = utils.to_steps_nodes_channels(obj)
self._check_same_dim(obj.shape[1], 'n_nodes', name)
else:
obj = utils.to_steps_channels(obj)
self._check_same_dim(obj.shape[0], 'n_steps', name)
obj = utils.cast_tensor(obj, self.precision)
return obj
def _parse_attribute(self, obj: DataArray, name: str,
node_level: bool) -> Tensor:
obj = utils.copy_to_tensor(obj)
if node_level:
obj = utils.to_nodes_channels(obj)
self._check_same_dim(obj.shape[0], 'n_nodes', name)
obj = utils.cast_tensor(obj, self.precision)
return obj
def _parse_adj(self, connectivity: Union[SparseTensArray, Tuple[DataArray]],
target_layout: Optional[str] = None
) -> Tuple[Optional[Adj], Optional[Tensor]]:
# format in [sparse, edge_index, None], where None means keep as input
if connectivity is None:
return None, None
# Convert to torch
# from np.ndarray, pd.DataFrame or torch.Tensor
if isinstance(connectivity, DataArray.__args__):
connectivity = utils.copy_to_tensor(connectivity)
elif isinstance(connectivity, (list, tuple)):
connectivity = recursive_apply(connectivity, utils.copy_to_tensor)
# from scipy sparse matrix
elif isinstance(connectivity, ScipySparseMatrix):
connectivity = SparseTensor.from_scipy(connectivity)
elif not isinstance(connectivity, SparseTensor):
raise TypeError("`connectivity` must be a dense matrix or in "
"COO format (i.e., an `edge_index`).")
if target_layout is not None:
connectivity = convert_torch_connectivity(connectivity,
target_layout,
num_nodes=self.n_nodes)
if isinstance(connectivity, (list, tuple)):
edge_index, edge_weight = connectivity
if edge_weight is not None:
edge_weight = utils.cast_tensor(edge_weight, self.precision)
else:
edge_index, edge_weight = connectivity, None
self._check_same_dim(edge_index.size(0), 'n_nodes', 'connectivity')
return edge_index, edge_weight
def _check_same_dim(self, dim: int, attr: str, name: str):
dim_data = getattr(self, attr)
if dim != dim_data:
raise ValueError("Cannot assign {0} with {1}={2}: data has {1}={3}"
.format(name, attr, dim, dim_data))
def _check_name(self, name: str):
if name.startswith('edge_'):
raise ValueError(f"Cannot set attribute with name '{name}' in this "
f"way, consider adding edge attributes as "
f"{self.name}.{name} = value.")
# name cannot be an attribute of self, nor a key in get
invalid_names = set(dir(self)).union(self.keys)
if name in invalid_names:
raise ValueError(f"Cannot set attribute with name '{name}', there "
f"is already an attribute named '{name}' in the "
"dataset.")
def _value_to_kwargs(self, value: Union[DataArray, List, Tuple, Mapping],
keys: Optional[Union[List, Tuple]] = None):
if isinstance(value, DataArray.__args__):
return dict(value=value)
if isinstance(value, (list, tuple)):
return dict(zip(keys, value))
elif isinstance(value, Mapping):
return value
else:
raise TypeError('Invalid type for value "{}"'.format(type(value)))
def _exog_value_to_kwargs(self,
value: Union[DataArray, List, Tuple, Mapping]):
keys = ['value', 'node_level', 'add_to_input_map', 'synch_mode',
'preprocess']
return self._value_to_kwargs(value, keys)
def _attr_value_to_kwargs(self,
value: Union[DataArray, List, Tuple, Mapping]):
keys = ['value', 'node_level', 'add_to_batch']
return self._value_to_kwargs(value, keys)
| tsl/data/mixin.py | 5,576 | format in [sparse, edge_index, None], where None means keep as input Convert to torch from np.ndarray, pd.DataFrame or torch.Tensor from scipy sparse matrix name cannot be an attribute of self, nor a key in get | 210 | en | 0.728147 |
"""
Write a function that takes in an array of integers and returns a sorted version of that array. Use the QuickSort algorithm to sort the array.
"""
def quick_sort(array):
if len(array) <= 1:
return array
_rec_helper(array, 0, len(array) - 1)
return array
def _rec_helper(array, start, end):
# base case
if start >= end:
return
pivot = start
left = pivot + 1
right = end
while left <= right:
if array[left] > array[pivot] and array[right] < array[pivot]:
_swap(array, left, right)
if array[pivot] >= array[left]:
left += 1
if array[pivot] <= array[right]:
right -= 1
_swap(array, pivot, right)
if right - start > end - right:
_rec_helper(array, start, right - 1)
_rec_helper(array, right + 1, end)
else:
_rec_helper(array, right + 1, end)
_rec_helper(array, start, right - 1)
def _swap(array, left, right):
array[left], array[right] = array[right], array[left]
#test
array = [3, 4, 7, 1, 1, 2, 5, 1, 3, 8, 4]
assert quick_sort(array) == sorted(array)
print('OK')
| solutions/quick_sort.py | 1,134 | Write a function that takes in an array of integers and returns a sorted version of that array. Use the QuickSort algorithm to sort the array.
base casetest | 158 | en | 0.776222 |
# -*- coding: utf-8 -*-
"""Example for a list question type.
Run example by typing `python -m examples.list` in your console."""
from pprint import pprint
import questionary
from examples import custom_style_dope
from questionary import Separator, Choice, prompt
def ask_pystyle(**kwargs):
# create the question object
question = questionary.select(
'What do you want to do?',
qmark='😃',
choices=[
'Order a pizza',
'Make a reservation',
Separator(),
'Ask for opening hours',
Choice('Contact support', disabled='Unavailable at this time'),
'Talk to the receptionist'],
style=custom_style_dope,
**kwargs)
# prompt the user for an answer
return question.ask()
def ask_dictstyle(**kwargs):
questions = [
{
'type': 'select',
'name': 'theme',
'message': 'What do you want to do?',
'choices': [
'Order a pizza',
'Make a reservation',
Separator(),
'Ask for opening hours',
{
'name': 'Contact support',
'disabled': 'Unavailable at this time'
},
'Talk to the receptionist'
]
}
]
return prompt(questions, style=custom_style_dope, **kwargs)
if __name__ == '__main__':
pprint(ask_pystyle())
| examples/select.py | 1,463 | Example for a list question type.
Run example by typing `python -m examples.list` in your console.
-*- coding: utf-8 -*- create the question object prompt the user for an answer | 180 | en | 0.76063 |
# -*- coding: utf-8 -*-
from zappa_boilerplate.database import db_session
from flask_wtf import Form
from wtforms import StringField, PasswordField
from wtforms.validators import DataRequired, Email, EqualTo, Length
from .models import User
class RegisterForm(Form):
username = StringField('Username',
validators=[DataRequired(), Length(min=3, max=25)])
email = StringField('Email',
validators=[DataRequired(), Email(), Length(min=6, max=40)])
password = PasswordField('Password',
validators=[DataRequired(), Length(min=6, max=40)])
confirm = PasswordField('Verify password',
[DataRequired(), EqualTo('password', message='Passwords must match')])
def __init__(self, *args, **kwargs):
super(RegisterForm, self).__init__(*args, **kwargs)
self.user = None
def validate(self):
initial_validation = super(RegisterForm, self).validate()
if not initial_validation:
return False
user = db_session.query(User).filter_by(username=self.username.data).first()
if user:
self.username.errors.append("Username already registered")
return False
user = User.query.filter_by(email=self.email.data).first()
if user:
self.email.errors.append("Email already registered")
return False
return True | zappa_boilerplate/user/forms.py | 1,443 | -*- coding: utf-8 -*- | 21 | en | 0.767281 |
# An old version of OpenAI Gym's multi_discrete.py. (Was getting affected by Gym updates)
# (https://github.com/openai/gym/blob/1fb81d4e3fb780ccf77fec731287ba07da35eb84/gym/spaces/multi_discrete.py)
import numpy as np
import gym
class MultiDiscrete(gym.Space):
"""
- The multi-discrete action space consists of a series of discrete action spaces with different parameters
- It can be adapted to both a Discrete action space or a continuous (Box) action space
- It is useful to represent game controllers or keyboards where each key can be represented as a discrete action space
- It is parametrized by passing an array of arrays containing [min, max] for each discrete action space
where the discrete action space can take any integers from `min` to `max` (both inclusive)
Note: A value of 0 always need to represent the NOOP action.
e.g. Nintendo Game Controller
- Can be conceptualized as 3 discrete action spaces:
1) Arrow Keys: Discrete 5 - NOOP[0], UP[1], RIGHT[2], DOWN[3], LEFT[4] - params: min: 0, max: 4
2) Button A: Discrete 2 - NOOP[0], Pressed[1] - params: min: 0, max: 1
3) Button B: Discrete 2 - NOOP[0], Pressed[1] - params: min: 0, max: 1
- Can be initialized as
MultiDiscrete([ [0,4], [0,1], [0,1] ])
"""
def __init__(self, array_of_param_array):
self.low = np.array([x[0] for x in array_of_param_array])
self.high = np.array([x[1] for x in array_of_param_array])
self.num_discrete_space = self.low.shape[0]
def sample(self):
""" Returns a array with one sample from each discrete action space """
# For each row: round(random .* (max - min) + min, 0)
np_random = np.random.RandomState()
random_array = np_random.rand(self.num_discrete_space)
return [int(x) for x in np.floor(np.multiply((self.high - self.low + 1.), random_array) + self.low)]
def contains(self, x):
return len(x) == self.num_discrete_space and (np.array(x) >= self.low).all() and (np.array(x) <= self.high).all()
@property
def shape(self):
return self.num_discrete_space
def __repr__(self):
return "MultiDiscrete" + str(self.num_discrete_space)
def __eq__(self, other):
return np.array_equal(self.low, other.low) and np.array_equal(self.high, other.high) | multiagent/multi_discrete.py | 2,355 | - The multi-discrete action space consists of a series of discrete action spaces with different parameters
- It can be adapted to both a Discrete action space or a continuous (Box) action space
- It is useful to represent game controllers or keyboards where each key can be represented as a discrete action space
- It is parametrized by passing an array of arrays containing [min, max] for each discrete action space
where the discrete action space can take any integers from `min` to `max` (both inclusive)
Note: A value of 0 always need to represent the NOOP action.
e.g. Nintendo Game Controller
- Can be conceptualized as 3 discrete action spaces:
1) Arrow Keys: Discrete 5 - NOOP[0], UP[1], RIGHT[2], DOWN[3], LEFT[4] - params: min: 0, max: 4
2) Button A: Discrete 2 - NOOP[0], Pressed[1] - params: min: 0, max: 1
3) Button B: Discrete 2 - NOOP[0], Pressed[1] - params: min: 0, max: 1
- Can be initialized as
MultiDiscrete([ [0,4], [0,1], [0,1] ])
Returns a array with one sample from each discrete action space
An old version of OpenAI Gym's multi_discrete.py. (Was getting affected by Gym updates) (https://github.com/openai/gym/blob/1fb81d4e3fb780ccf77fec731287ba07da35eb84/gym/spaces/multi_discrete.py) For each row: round(random .* (max - min) + min, 0) | 1,293 | en | 0.773221 |
# -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html
class VmallPipeline(object):
def process_item(self, item, spider):
return item
| vmall/pipelines.py | 286 | -*- coding: utf-8 -*- Define your item pipelines here Don't forget to add your pipeline to the ITEM_PIPELINES setting See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html | 181 | en | 0.714533 |
"""
app.py - Flask-based server.
@author Thomas J. Daley, J.D.
@version: 0.0.1
Copyright (c) 2019 by Thomas J. Daley, J.D.
"""
import argparse
import random
from flask import Flask, render_template, request, flash, redirect, url_for, session, jsonify
from wtforms import Form, StringField, TextAreaField, PasswordField, validators
from functools import wraps
from views.decorators import is_admin_user, is_logged_in, is_case_set
from webservice import WebService
from util.database import Database
from views.admin.admin_routes import admin_routes
from views.cases.case_routes import case_routes
from views.discovery.discovery_routes import discovery_routes
from views.drivers.driver_routes import driver_routes
from views.info.info_routes import info_routes
from views.login.login import login
from views.objections.objection_routes import objection_routes
from views.real_property.real_property_routes import rp_routes
from views.responses.response_routes import response_routes
from views.vehicles.vehicle_routes import vehicle_routes
from views.decorators import is_admin_user, is_case_set, is_logged_in
WEBSERVICE = None
DATABASE = Database()
DATABASE.connect()
app = Flask(__name__)
app.register_blueprint(admin_routes)
app.register_blueprint(case_routes)
app.register_blueprint(discovery_routes)
app.register_blueprint(driver_routes)
app.register_blueprint(info_routes)
app.register_blueprint(login)
app.register_blueprint(objection_routes)
app.register_blueprint(rp_routes)
app.register_blueprint(response_routes)
app.register_blueprint(vehicle_routes)
# Helper to create Public Data credentials from session variables
def pd_credentials(mysession) -> dict:
return {
"username": session["pd_username"],
"password": session["pd_password"]
}
@app.route('/', methods=['GET'])
def index():
return render_template('home.html')
@app.route('/attorney/find/<string:bar_number>', methods=['POST'])
@is_logged_in
def find_attorney(bar_number: str):
attorney = DATABASE.attorney(bar_number)
if attorney:
attorney['success'] = True
return jsonify(attorney)
return jsonify(
{
'success': False,
'message': "Unable to find attorney having Bar Number {}"
.format(bar_number)
}
)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Webservice for DiscoveryBot")
parser.add_argument(
"--debug",
help="Run server in debug mode",
action='store_true'
)
parser.add_argument(
"--port",
help="TCP port to listen on",
type=int,
default=5001
)
parser.add_argument(
"--zillowid",
"-z",
help="Zillow API credential from https://www.zillow.com/howto/api/APIOverview.htm" # NOQA
)
args = parser.parse_args()
WEBSERVICE = WebService(args.zillowid)
app.secret_key = "SDFIIUWER*HGjdf8*"
app.run(debug=args.debug, port=args.port)
| app/app.py | 2,992 | app.py - Flask-based server.
@author Thomas J. Daley, J.D.
@version: 0.0.1
Copyright (c) 2019 by Thomas J. Daley, J.D.
Helper to create Public Data credentials from session variables NOQA | 190 | en | 0.685337 |
"""
A NumPy sub-namespace that conforms to the Python array API standard.
This submodule accompanies NEP 47, which proposes its inclusion in NumPy. It
is still considered experimental, and will issue a warning when imported.
This is a proof-of-concept namespace that wraps the corresponding NumPy
functions to give a conforming implementation of the Python array API standard
(https://data-apis.github.io/array-api/latest/). The standard is currently in
an RFC phase and comments on it are both welcome and encouraged. Comments
should be made either at https://github.com/data-apis/array-api or at
https://github.com/data-apis/consortium-feedback/discussions.
NumPy already follows the proposed spec for the most part, so this module
serves mostly as a thin wrapper around it. However, NumPy also implements a
lot of behavior that is not included in the spec, so this serves as a
restricted subset of the API. Only those functions that are part of the spec
are included in this namespace, and all functions are given with the exact
signature given in the spec, including the use of position-only arguments, and
omitting any extra keyword arguments implemented by NumPy but not part of the
spec. The behavior of some functions is also modified from the NumPy behavior
to conform to the standard. Note that the underlying array object itself is
wrapped in a wrapper Array() class, but is otherwise unchanged. This submodule
is implemented in pure Python with no C extensions.
The array API spec is designed as a "minimal API subset" and explicitly allows
libraries to include behaviors not specified by it. But users of this module
that intend to write portable code should be aware that only those behaviors
that are listed in the spec are guaranteed to be implemented across libraries.
Consequently, the NumPy implementation was chosen to be both conforming and
minimal, so that users can use this implementation of the array API namespace
and be sure that behaviors that it defines will be available in conforming
namespaces from other libraries.
A few notes about the current state of this submodule:
- There is a test suite that tests modules against the array API standard at
https://github.com/data-apis/array-api-tests. The test suite is still a work
in progress, but the existing tests pass on this module, with a few
exceptions:
- DLPack support (see https://github.com/data-apis/array-api/pull/106) is
not included here, as it requires a full implementation in NumPy proper
first.
The test suite is not yet complete, and even the tests that exist are not
guaranteed to give a comprehensive coverage of the spec. Therefore, when
reviewing and using this submodule, you should refer to the standard
documents themselves. There are some tests in numpy.array_api.tests, but
they primarily focus on things that are not tested by the official array API
test suite.
- There is a custom array object, numpy.array_api.Array, which is returned by
all functions in this module. All functions in the array API namespace
implicitly assume that they will only receive this object as input. The only
way to create instances of this object is to use one of the array creation
functions. It does not have a public constructor on the object itself. The
object is a small wrapper class around numpy.ndarray. The main purpose of it
is to restrict the namespace of the array object to only those dtypes and
only those methods that are required by the spec, as well as to limit/change
certain behavior that differs in the spec. In particular:
- The array API namespace does not have scalar objects, only 0-D arrays.
Operations on Array that would create a scalar in NumPy create a 0-D
array.
- Indexing: Only a subset of indices supported by NumPy are required by the
spec. The Array object restricts indexing to only allow those types of
indices that are required by the spec. See the docstring of the
numpy.array_api.Array._validate_indices helper function for more
information.
- Type promotion: Some type promotion rules are different in the spec. In
particular, the spec does not have any value-based casting. The spec also
does not require cross-kind casting, like integer -> floating-point. Only
those promotions that are explicitly required by the array API
specification are allowed in this module. See NEP 47 for more info.
- Functions do not automatically call asarray() on their input, and will not
work if the input type is not Array. The exception is array creation
functions, and Python operators on the Array object, which accept Python
scalars of the same type as the array dtype.
- All functions include type annotations, corresponding to those given in the
spec (see _typing.py for definitions of some custom types). These do not
currently fully pass mypy due to some limitations in mypy.
- Dtype objects are just the NumPy dtype objects, e.g., float64 =
np.dtype('float64'). The spec does not require any behavior on these dtype
objects other than that they be accessible by name and be comparable by
equality, but it was considered too much extra complexity to create custom
objects to represent dtypes.
- All places where the implementations in this submodule are known to deviate
from their corresponding functions in NumPy are marked with "# Note:"
comments.
Still TODO in this module are:
- DLPack support for numpy.ndarray is still in progress. See
https://github.com/numpy/numpy/pull/19083.
- The copy=False keyword argument to asarray() is not yet implemented. This
requires support in numpy.asarray() first.
- Some functions are not yet fully tested in the array API test suite, and may
require updates that are not yet known until the tests are written.
- The spec is still in an RFC phase and may still have minor updates, which
will need to be reflected here.
- The linear algebra extension in the spec will be added in a future pull
request.
- Complex number support in array API spec is planned but not yet finalized,
as are the fft extension and certain linear algebra functions such as eig
that require complex dtypes.
"""
import warnings
warnings.warn(
"The numpy.array_api submodule is still experimental. See NEP 47.", stacklevel=2
)
__all__ = []
from ._constants import e, inf, nan, pi
__all__ += ["e", "inf", "nan", "pi"]
from ._creation_functions import (
asarray,
arange,
empty,
empty_like,
eye,
from_dlpack,
full,
full_like,
linspace,
meshgrid,
ones,
ones_like,
zeros,
zeros_like,
)
__all__ += [
"asarray",
"arange",
"empty",
"empty_like",
"eye",
"from_dlpack",
"full",
"full_like",
"linspace",
"meshgrid",
"ones",
"ones_like",
"zeros",
"zeros_like",
]
from ._data_type_functions import (
broadcast_arrays,
broadcast_to,
can_cast,
finfo,
iinfo,
result_type,
)
__all__ += [
"broadcast_arrays",
"broadcast_to",
"can_cast",
"finfo",
"iinfo",
"result_type",
]
from ._dtypes import (
int8,
int16,
int32,
int64,
uint8,
uint16,
uint32,
uint64,
float32,
float64,
bool,
)
__all__ += [
"int8",
"int16",
"int32",
"int64",
"uint8",
"uint16",
"uint32",
"uint64",
"float32",
"float64",
"bool",
]
from ._elementwise_functions import (
abs,
acos,
acosh,
add,
asin,
asinh,
atan,
atan2,
atanh,
bitwise_and,
bitwise_left_shift,
bitwise_invert,
bitwise_or,
bitwise_right_shift,
bitwise_xor,
ceil,
cos,
cosh,
divide,
equal,
exp,
expm1,
floor,
floor_divide,
greater,
greater_equal,
isfinite,
isinf,
isnan,
less,
less_equal,
log,
log1p,
log2,
log10,
logaddexp,
logical_and,
logical_not,
logical_or,
logical_xor,
multiply,
negative,
not_equal,
positive,
pow,
remainder,
round,
sign,
sin,
sinh,
square,
sqrt,
subtract,
tan,
tanh,
trunc,
)
__all__ += [
"abs",
"acos",
"acosh",
"add",
"asin",
"asinh",
"atan",
"atan2",
"atanh",
"bitwise_and",
"bitwise_left_shift",
"bitwise_invert",
"bitwise_or",
"bitwise_right_shift",
"bitwise_xor",
"ceil",
"cos",
"cosh",
"divide",
"equal",
"exp",
"expm1",
"floor",
"floor_divide",
"greater",
"greater_equal",
"isfinite",
"isinf",
"isnan",
"less",
"less_equal",
"log",
"log1p",
"log2",
"log10",
"logaddexp",
"logical_and",
"logical_not",
"logical_or",
"logical_xor",
"multiply",
"negative",
"not_equal",
"positive",
"pow",
"remainder",
"round",
"sign",
"sin",
"sinh",
"square",
"sqrt",
"subtract",
"tan",
"tanh",
"trunc",
]
# einsum is not yet implemented in the array API spec.
# from ._linear_algebra_functions import einsum
# __all__ += ['einsum']
from ._linear_algebra_functions import matmul, tensordot, transpose, vecdot
__all__ += ["matmul", "tensordot", "transpose", "vecdot"]
from ._manipulation_functions import (
concat,
expand_dims,
flip,
reshape,
roll,
squeeze,
stack,
)
__all__ += ["concat", "expand_dims", "flip", "reshape", "roll", "squeeze", "stack"]
from ._searching_functions import argmax, argmin, nonzero, where
__all__ += ["argmax", "argmin", "nonzero", "where"]
from ._set_functions import unique
__all__ += ["unique"]
from ._sorting_functions import argsort, sort
__all__ += ["argsort", "sort"]
from ._statistical_functions import max, mean, min, prod, std, sum, var
__all__ += ["max", "mean", "min", "prod", "std", "sum", "var"]
from ._utility_functions import all, any
__all__ += ["all", "any"]
| numpy/array_api/__init__.py | 9,976 | A NumPy sub-namespace that conforms to the Python array API standard.
This submodule accompanies NEP 47, which proposes its inclusion in NumPy. It
is still considered experimental, and will issue a warning when imported.
This is a proof-of-concept namespace that wraps the corresponding NumPy
functions to give a conforming implementation of the Python array API standard
(https://data-apis.github.io/array-api/latest/). The standard is currently in
an RFC phase and comments on it are both welcome and encouraged. Comments
should be made either at https://github.com/data-apis/array-api or at
https://github.com/data-apis/consortium-feedback/discussions.
NumPy already follows the proposed spec for the most part, so this module
serves mostly as a thin wrapper around it. However, NumPy also implements a
lot of behavior that is not included in the spec, so this serves as a
restricted subset of the API. Only those functions that are part of the spec
are included in this namespace, and all functions are given with the exact
signature given in the spec, including the use of position-only arguments, and
omitting any extra keyword arguments implemented by NumPy but not part of the
spec. The behavior of some functions is also modified from the NumPy behavior
to conform to the standard. Note that the underlying array object itself is
wrapped in a wrapper Array() class, but is otherwise unchanged. This submodule
is implemented in pure Python with no C extensions.
The array API spec is designed as a "minimal API subset" and explicitly allows
libraries to include behaviors not specified by it. But users of this module
that intend to write portable code should be aware that only those behaviors
that are listed in the spec are guaranteed to be implemented across libraries.
Consequently, the NumPy implementation was chosen to be both conforming and
minimal, so that users can use this implementation of the array API namespace
and be sure that behaviors that it defines will be available in conforming
namespaces from other libraries.
A few notes about the current state of this submodule:
- There is a test suite that tests modules against the array API standard at
https://github.com/data-apis/array-api-tests. The test suite is still a work
in progress, but the existing tests pass on this module, with a few
exceptions:
- DLPack support (see https://github.com/data-apis/array-api/pull/106) is
not included here, as it requires a full implementation in NumPy proper
first.
The test suite is not yet complete, and even the tests that exist are not
guaranteed to give a comprehensive coverage of the spec. Therefore, when
reviewing and using this submodule, you should refer to the standard
documents themselves. There are some tests in numpy.array_api.tests, but
they primarily focus on things that are not tested by the official array API
test suite.
- There is a custom array object, numpy.array_api.Array, which is returned by
all functions in this module. All functions in the array API namespace
implicitly assume that they will only receive this object as input. The only
way to create instances of this object is to use one of the array creation
functions. It does not have a public constructor on the object itself. The
object is a small wrapper class around numpy.ndarray. The main purpose of it
is to restrict the namespace of the array object to only those dtypes and
only those methods that are required by the spec, as well as to limit/change
certain behavior that differs in the spec. In particular:
- The array API namespace does not have scalar objects, only 0-D arrays.
Operations on Array that would create a scalar in NumPy create a 0-D
array.
- Indexing: Only a subset of indices supported by NumPy are required by the
spec. The Array object restricts indexing to only allow those types of
indices that are required by the spec. See the docstring of the
numpy.array_api.Array._validate_indices helper function for more
information.
- Type promotion: Some type promotion rules are different in the spec. In
particular, the spec does not have any value-based casting. The spec also
does not require cross-kind casting, like integer -> floating-point. Only
those promotions that are explicitly required by the array API
specification are allowed in this module. See NEP 47 for more info.
- Functions do not automatically call asarray() on their input, and will not
work if the input type is not Array. The exception is array creation
functions, and Python operators on the Array object, which accept Python
scalars of the same type as the array dtype.
- All functions include type annotations, corresponding to those given in the
spec (see _typing.py for definitions of some custom types). These do not
currently fully pass mypy due to some limitations in mypy.
- Dtype objects are just the NumPy dtype objects, e.g., float64 =
np.dtype('float64'). The spec does not require any behavior on these dtype
objects other than that they be accessible by name and be comparable by
equality, but it was considered too much extra complexity to create custom
objects to represent dtypes.
- All places where the implementations in this submodule are known to deviate
from their corresponding functions in NumPy are marked with "# Note:"
comments.
Still TODO in this module are:
- DLPack support for numpy.ndarray is still in progress. See
https://github.com/numpy/numpy/pull/19083.
- The copy=False keyword argument to asarray() is not yet implemented. This
requires support in numpy.asarray() first.
- Some functions are not yet fully tested in the array API test suite, and may
require updates that are not yet known until the tests are written.
- The spec is still in an RFC phase and may still have minor updates, which
will need to be reflected here.
- The linear algebra extension in the spec will be added in a future pull
request.
- Complex number support in array API spec is planned but not yet finalized,
as are the fft extension and certain linear algebra functions such as eig
that require complex dtypes.
einsum is not yet implemented in the array API spec. from ._linear_algebra_functions import einsum __all__ += ['einsum'] | 6,317 | en | 0.908609 |
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from oslo_utils import uuidutils
from oslo_utils import versionutils
from nova import availability_zones
from nova import context as nova_context
from nova.db import api as db
from nova import exception
from nova.notifications.objects import base as notification
from nova.notifications.objects import service as service_notification
from nova import objects
from nova.objects import base
from nova.objects import fields
LOG = logging.getLogger(__name__)
# NOTE(danms): This is the global service version counter
SERVICE_VERSION = 35
# NOTE(danms): This is our SERVICE_VERSION history. The idea is that any
# time we bump the version, we will put an entry here to record the change,
# along with any pertinent data. For things that we can programatically
# detect that need a bump, we put something in _collect_things() below to
# assemble a dict of things we can check. For example, we pretty much always
# want to consider the compute RPC API version a thing that requires a service
# bump so that we can drive version pins from it. We could include other
# service RPC versions at some point, minimum object versions, etc.
#
# The TestServiceVersion test will fail if the calculated set of
# things differs from the value in the last item of the list below,
# indicating that a version bump is needed.
#
# Also note that there are other reasons we may want to bump this,
# which will not be caught by the test. An example of this would be
# triggering (or disabling) an online data migration once all services
# in the cluster are at the same level.
#
# If a version bump is required for something mechanical, just document
# that generic thing here (like compute RPC version bumps). No need to
# replicate the details from compute/rpcapi.py here. However, for more
# complex service interactions, extra detail should be provided
SERVICE_VERSION_HISTORY = (
# Version 0: Pre-history
{'compute_rpc': '4.0'},
# Version 1: Introduction of SERVICE_VERSION
{'compute_rpc': '4.4'},
# Version 2: Compute RPC version 4.5
{'compute_rpc': '4.5'},
# Version 3: Compute RPC version 4.6
{'compute_rpc': '4.6'},
# Version 4: Add PciDevice.parent_addr (data migration needed)
{'compute_rpc': '4.6'},
# Version 5: Compute RPC version 4.7
{'compute_rpc': '4.7'},
# Version 6: Compute RPC version 4.8
{'compute_rpc': '4.8'},
# Version 7: Compute RPC version 4.9
{'compute_rpc': '4.9'},
# Version 8: Compute RPC version 4.10
{'compute_rpc': '4.10'},
# Version 9: Compute RPC version 4.11
{'compute_rpc': '4.11'},
# Version 10: Compute node conversion to Inventories
{'compute_rpc': '4.11'},
# Version 11: Compute RPC version 4.12
{'compute_rpc': '4.12'},
# Version 12: The network APIs and compute manager support a NetworkRequest
# object where the network_id value is 'auto' or 'none'. BuildRequest
# objects are populated by nova-api during instance boot.
{'compute_rpc': '4.12'},
# Version 13: Compute RPC version 4.13
{'compute_rpc': '4.13'},
# Version 14: The compute manager supports setting device tags.
{'compute_rpc': '4.13'},
# Version 15: Indicate that nova-conductor will stop a boot if BuildRequest
# is deleted before RPC to nova-compute.
{'compute_rpc': '4.13'},
# Version 16: Indicate that nova-compute will refuse to start if it doesn't
# have a placement section configured.
{'compute_rpc': '4.13'},
# Version 17: Add 'reserve_volume' to the boot from volume flow and
# remove 'check_attach'. The service version bump is needed to fall back to
# the old check in the API as the old computes fail if the volume is moved
# to 'attaching' state by reserve.
{'compute_rpc': '4.13'},
# Version 18: Compute RPC version 4.14
{'compute_rpc': '4.14'},
# Version 19: Compute RPC version 4.15
{'compute_rpc': '4.15'},
# Version 20: Compute RPC version 4.16
{'compute_rpc': '4.16'},
# Version 21: Compute RPC version 4.17
{'compute_rpc': '4.17'},
# Version 22: A marker for the behaviour change of auto-healing code on the
# compute host regarding allocations against an instance
{'compute_rpc': '4.17'},
# Version 23: Compute hosts allow pre-creation of the migration object
# for cold migration.
{'compute_rpc': '4.18'},
# Version 24: Add support for Cinder v3 attach/detach API.
{'compute_rpc': '4.18'},
# Version 25: Compute hosts allow migration-based allocations
# for live migration.
{'compute_rpc': '4.18'},
# Version 26: Adds a 'host_list' parameter to build_and_run_instance()
{'compute_rpc': '4.19'},
# Version 27: Compute RPC version 4.20; adds multiattach argument to
# reserve_block_device_name().
{'compute_rpc': '4.20'},
# Version 28: Adds a 'host_list' parameter to prep_resize()
{'compute_rpc': '4.21'},
# Version 29: Compute RPC version 4.22
{'compute_rpc': '4.22'},
# Version 30: Compute RPC version 5.0
{'compute_rpc': '5.0'},
# Version 31: The compute manager checks if 'trusted_certs' are supported
{'compute_rpc': '5.0'},
# Version 32: Add 'file_backed_memory' support. The service version bump is
# needed to allow the destination of a live migration to reject the
# migration if 'file_backed_memory' is enabled and the source does not
# support 'file_backed_memory'
{'compute_rpc': '5.0'},
# Version 33: Add support for check on the server group with
# 'max_server_per_host' rules
{'compute_rpc': '5.0'},
# Version 34: Adds support to abort queued/preparing live migrations.
{'compute_rpc': '5.0'},
# Version 35: Indicates that nova-compute supports live migration with
# ports bound early on the destination host using VIFMigrateData.
{'compute_rpc': '5.0'},
)
# TODO(berrange): Remove NovaObjectDictCompat
@base.NovaObjectRegistry.register
class Service(base.NovaPersistentObject, base.NovaObject,
base.NovaObjectDictCompat):
# Version 1.0: Initial version
# Version 1.1: Added compute_node nested object
# Version 1.2: String attributes updated to support unicode
# Version 1.3: ComputeNode version 1.5
# Version 1.4: Added use_slave to get_by_compute_host
# Version 1.5: ComputeNode version 1.6
# Version 1.6: ComputeNode version 1.7
# Version 1.7: ComputeNode version 1.8
# Version 1.8: ComputeNode version 1.9
# Version 1.9: ComputeNode version 1.10
# Version 1.10: Changes behaviour of loading compute_node
# Version 1.11: Added get_by_host_and_binary
# Version 1.12: ComputeNode version 1.11
# Version 1.13: Added last_seen_up
# Version 1.14: Added forced_down
# Version 1.15: ComputeNode version 1.12
# Version 1.16: Added version
# Version 1.17: ComputeNode version 1.13
# Version 1.18: ComputeNode version 1.14
# Version 1.19: Added get_minimum_version()
# Version 1.20: Added get_minimum_version_multi()
# Version 1.21: Added uuid
# Version 1.22: Added get_by_uuid()
VERSION = '1.22'
fields = {
'id': fields.IntegerField(read_only=True),
'uuid': fields.UUIDField(),
'host': fields.StringField(nullable=True),
'binary': fields.StringField(nullable=True),
'topic': fields.StringField(nullable=True),
'report_count': fields.IntegerField(),
'disabled': fields.BooleanField(),
'disabled_reason': fields.StringField(nullable=True),
'availability_zone': fields.StringField(nullable=True),
'compute_node': fields.ObjectField('ComputeNode'),
'last_seen_up': fields.DateTimeField(nullable=True),
'forced_down': fields.BooleanField(),
'version': fields.IntegerField(),
}
_MIN_VERSION_CACHE = {}
_SERVICE_VERSION_CACHING = False
def __init__(self, *args, **kwargs):
# NOTE(danms): We're going against the rules here and overriding
# init. The reason is that we want to *ensure* that we're always
# setting the current service version on our objects, overriding
# whatever else might be set in the database, or otherwise (which
# is the normal reason not to override init).
#
# We also need to do this here so that it's set on the client side
# all the time, such that create() and save() operations will
# include the current service version.
if 'version' in kwargs:
raise exception.ObjectActionError(
action='init',
reason='Version field is immutable')
super(Service, self).__init__(*args, **kwargs)
self.version = SERVICE_VERSION
def obj_make_compatible_from_manifest(self, primitive, target_version,
version_manifest):
super(Service, self).obj_make_compatible_from_manifest(
primitive, target_version, version_manifest)
_target_version = versionutils.convert_version_to_tuple(target_version)
if _target_version < (1, 21) and 'uuid' in primitive:
del primitive['uuid']
if _target_version < (1, 16) and 'version' in primitive:
del primitive['version']
if _target_version < (1, 14) and 'forced_down' in primitive:
del primitive['forced_down']
if _target_version < (1, 13) and 'last_seen_up' in primitive:
del primitive['last_seen_up']
if _target_version < (1, 10):
# service.compute_node was not lazy-loaded, we need to provide it
# when called
self._do_compute_node(self._context, primitive,
version_manifest)
def _do_compute_node(self, context, primitive, version_manifest):
try:
target_version = version_manifest['ComputeNode']
# NOTE(sbauza): Ironic deployments can have multiple
# nodes for the same service, but for keeping same behaviour,
# returning only the first elem of the list
compute = objects.ComputeNodeList.get_all_by_host(
context, primitive['host'])[0]
except Exception:
return
primitive['compute_node'] = compute.obj_to_primitive(
target_version=target_version,
version_manifest=version_manifest)
@staticmethod
def _from_db_object(context, service, db_service):
allow_missing = ('availability_zone',)
for key in service.fields:
if key in allow_missing and key not in db_service:
continue
if key == 'compute_node':
# NOTE(sbauza); We want to only lazy-load compute_node
continue
elif key == 'version':
# NOTE(danms): Special handling of the version field, since
# it is read_only and set in our init.
setattr(service, base.get_attrname(key), db_service[key])
elif key == 'uuid' and not db_service.get(key):
# Leave uuid off the object if undefined in the database
# so that it will be generated below.
continue
else:
service[key] = db_service[key]
service._context = context
service.obj_reset_changes()
# TODO(dpeschman): Drop this once all services have uuids in database
if 'uuid' not in service:
service.uuid = uuidutils.generate_uuid()
LOG.debug('Generated UUID %(uuid)s for service %(id)i',
dict(uuid=service.uuid, id=service.id))
service.save()
return service
def obj_load_attr(self, attrname):
if not self._context:
raise exception.OrphanedObjectError(method='obj_load_attr',
objtype=self.obj_name())
LOG.debug("Lazy-loading '%(attr)s' on %(name)s id %(id)s",
{'attr': attrname,
'name': self.obj_name(),
'id': self.id,
})
if attrname != 'compute_node':
raise exception.ObjectActionError(
action='obj_load_attr',
reason='attribute %s not lazy-loadable' % attrname)
if self.binary == 'nova-compute':
# Only n-cpu services have attached compute_node(s)
compute_nodes = objects.ComputeNodeList.get_all_by_host(
self._context, self.host)
else:
# NOTE(sbauza); Previous behaviour was raising a ServiceNotFound,
# we keep it for backwards compatibility
raise exception.ServiceNotFound(service_id=self.id)
# NOTE(sbauza): Ironic deployments can have multiple nodes
# for the same service, but for keeping same behaviour, returning only
# the first elem of the list
self.compute_node = compute_nodes[0]
@base.remotable_classmethod
def get_by_id(cls, context, service_id):
db_service = db.service_get(context, service_id)
return cls._from_db_object(context, cls(), db_service)
@base.remotable_classmethod
def get_by_uuid(cls, context, service_uuid):
db_service = db.service_get_by_uuid(context, service_uuid)
return cls._from_db_object(context, cls(), db_service)
@base.remotable_classmethod
def get_by_host_and_topic(cls, context, host, topic):
db_service = db.service_get_by_host_and_topic(context, host, topic)
return cls._from_db_object(context, cls(), db_service)
@base.remotable_classmethod
def get_by_host_and_binary(cls, context, host, binary):
try:
db_service = db.service_get_by_host_and_binary(context,
host, binary)
except exception.HostBinaryNotFound:
return
return cls._from_db_object(context, cls(), db_service)
@staticmethod
@db.select_db_reader_mode
def _db_service_get_by_compute_host(context, host, use_slave=False):
return db.service_get_by_compute_host(context, host)
@base.remotable_classmethod
def get_by_compute_host(cls, context, host, use_slave=False):
db_service = cls._db_service_get_by_compute_host(context, host,
use_slave=use_slave)
return cls._from_db_object(context, cls(), db_service)
# NOTE(ndipanov): This is deprecated and should be removed on the next
# major version bump
@base.remotable_classmethod
def get_by_args(cls, context, host, binary):
db_service = db.service_get_by_host_and_binary(context, host, binary)
return cls._from_db_object(context, cls(), db_service)
def _check_minimum_version(self):
"""Enforce that we are not older that the minimum version.
This is a loose check to avoid creating or updating our service
record if we would do so with a version that is older that the current
minimum of all services. This could happen if we were started with
older code by accident, either due to a rollback or an old and
un-updated node suddenly coming back onto the network.
There is technically a race here between the check and the update,
but since the minimum version should always roll forward and never
backwards, we don't need to worry about doing it atomically. Further,
the consequence for getting this wrong is minor, in that we'll just
fail to send messages that other services understand.
"""
if not self.obj_attr_is_set('version'):
return
if not self.obj_attr_is_set('binary'):
return
minver = self.get_minimum_version(self._context, self.binary)
if minver > self.version:
raise exception.ServiceTooOld(thisver=self.version,
minver=minver)
@base.remotable
def create(self):
if self.obj_attr_is_set('id'):
raise exception.ObjectActionError(action='create',
reason='already created')
self._check_minimum_version()
updates = self.obj_get_changes()
if 'uuid' not in updates:
updates['uuid'] = uuidutils.generate_uuid()
self.uuid = updates['uuid']
db_service = db.service_create(self._context, updates)
self._from_db_object(self._context, self, db_service)
self._send_notification(fields.NotificationAction.CREATE)
@base.remotable
def save(self):
updates = self.obj_get_changes()
updates.pop('id', None)
self._check_minimum_version()
db_service = db.service_update(self._context, self.id, updates)
self._from_db_object(self._context, self, db_service)
self._send_status_update_notification(updates)
def _send_status_update_notification(self, updates):
# Note(gibi): We do not trigger notification on version as that field
# is always dirty, which would cause that nova sends notification on
# every other field change. See the comment in save() too.
if set(updates.keys()).intersection(
{'disabled', 'disabled_reason', 'forced_down'}):
self._send_notification(fields.NotificationAction.UPDATE)
def _send_notification(self, action):
payload = service_notification.ServiceStatusPayload(self)
service_notification.ServiceStatusNotification(
publisher=notification.NotificationPublisher.from_service_obj(
self),
event_type=notification.EventType(
object='service',
action=action),
priority=fields.NotificationPriority.INFO,
payload=payload).emit(self._context)
@base.remotable
def destroy(self):
db.service_destroy(self._context, self.id)
self._send_notification(fields.NotificationAction.DELETE)
@classmethod
def enable_min_version_cache(cls):
cls.clear_min_version_cache()
cls._SERVICE_VERSION_CACHING = True
@classmethod
def clear_min_version_cache(cls):
cls._MIN_VERSION_CACHE = {}
@staticmethod
@db.select_db_reader_mode
def _db_service_get_minimum_version(context, binaries, use_slave=False):
return db.service_get_minimum_version(context, binaries)
@base.remotable_classmethod
def get_minimum_version_multi(cls, context, binaries, use_slave=False):
if not all(binary.startswith('nova-') for binary in binaries):
LOG.warning('get_minimum_version called with likely-incorrect '
'binaries `%s\'', ','.join(binaries))
raise exception.ObjectActionError(action='get_minimum_version',
reason='Invalid binary prefix')
if (not cls._SERVICE_VERSION_CACHING or
any(binary not in cls._MIN_VERSION_CACHE
for binary in binaries)):
min_versions = cls._db_service_get_minimum_version(
context, binaries, use_slave=use_slave)
if min_versions:
min_versions = {binary: version or 0
for binary, version in
min_versions.items()}
cls._MIN_VERSION_CACHE.update(min_versions)
else:
min_versions = {binary: cls._MIN_VERSION_CACHE[binary]
for binary in binaries}
if min_versions:
version = min(min_versions.values())
else:
version = 0
# NOTE(danms): Since our return value is not controlled by object
# schema, be explicit here.
version = int(version)
return version
@base.remotable_classmethod
def get_minimum_version(cls, context, binary, use_slave=False):
return cls.get_minimum_version_multi(context, [binary],
use_slave=use_slave)
def get_minimum_version_all_cells(context, binaries, require_all=False):
"""Get the minimum service version, checking all cells.
This attempts to calculate the minimum service version for a set
of binaries across all the cells in the system. If require_all
is False, then any cells that fail to report a version will be
ignored (assuming they won't be candidates for scheduling and thus
excluding them from the minimum version calculation is reasonable).
If require_all is True, then a failing cell will cause this to raise
exception.CellTimeout, as would be appropriate for gating some
data migration until everything is new enough.
Note that services that do not report a positive version are excluded
from this, as it crosses all cells which will naturally not have all
services.
"""
if not all(binary.startswith('nova-') for binary in binaries):
LOG.warning('get_minimum_version_all_cells called with '
'likely-incorrect binaries `%s\'', ','.join(binaries))
raise exception.ObjectActionError(
action='get_minimum_version_all_cells',
reason='Invalid binary prefix')
# NOTE(danms): Instead of using Service.get_minimum_version_multi(), we
# replicate the call directly to the underlying DB method here because
# we want to defeat the caching and we need to filter non-present
# services differently from the single-cell method.
results = nova_context.scatter_gather_all_cells(
context,
Service._db_service_get_minimum_version,
binaries)
min_version = None
for cell_uuid, result in results.items():
if result is nova_context.did_not_respond_sentinel:
LOG.warning('Cell %s did not respond when getting minimum '
'service version', cell_uuid)
if require_all:
raise exception.CellTimeout()
elif result is nova_context.raised_exception_sentinel:
LOG.warning('Failed to get minimum service version for cell %s',
cell_uuid)
if require_all:
# NOTE(danms): Okay, this isn't necessarily a timeout, but
# it's functionally the same from the caller's perspective
# and we logged the fact that it was actually a failure
# for the forensic investigator during the scatter/gather
# routine.
raise exception.CellTimeout()
else:
# NOTE(danms): Don't consider a zero or None result as the minimum
# since we're crossing cells and will likely not have all the
# services being probed.
relevant_versions = [version for version in result.values()
if version]
if relevant_versions:
min_version_cell = min(relevant_versions)
min_version = (min(min_version, min_version_cell)
if min_version else min_version_cell)
# NOTE(danms): If we got no matches at all (such as at first startup)
# then report that as zero to be consistent with the other such
# methods.
return min_version or 0
@base.NovaObjectRegistry.register
class ServiceList(base.ObjectListBase, base.NovaObject):
# Version 1.0: Initial version
# Service <= version 1.2
# Version 1.1 Service version 1.3
# Version 1.2: Service version 1.4
# Version 1.3: Service version 1.5
# Version 1.4: Service version 1.6
# Version 1.5: Service version 1.7
# Version 1.6: Service version 1.8
# Version 1.7: Service version 1.9
# Version 1.8: Service version 1.10
# Version 1.9: Added get_by_binary() and Service version 1.11
# Version 1.10: Service version 1.12
# Version 1.11: Service version 1.13
# Version 1.12: Service version 1.14
# Version 1.13: Service version 1.15
# Version 1.14: Service version 1.16
# Version 1.15: Service version 1.17
# Version 1.16: Service version 1.18
# Version 1.17: Service version 1.19
# Version 1.18: Added include_disabled parameter to get_by_binary()
# Version 1.19: Added get_all_computes_by_hv_type()
VERSION = '1.19'
fields = {
'objects': fields.ListOfObjectsField('Service'),
}
@base.remotable_classmethod
def get_by_topic(cls, context, topic):
db_services = db.service_get_all_by_topic(context, topic)
return base.obj_make_list(context, cls(context), objects.Service,
db_services)
# NOTE(paul-carlton2): In v2.0 of the object the include_disabled flag
# will be removed so both enabled and disabled hosts are returned
@base.remotable_classmethod
def get_by_binary(cls, context, binary, include_disabled=False):
db_services = db.service_get_all_by_binary(
context, binary, include_disabled=include_disabled)
return base.obj_make_list(context, cls(context), objects.Service,
db_services)
@base.remotable_classmethod
def get_by_host(cls, context, host):
db_services = db.service_get_all_by_host(context, host)
return base.obj_make_list(context, cls(context), objects.Service,
db_services)
@base.remotable_classmethod
def get_all(cls, context, disabled=None, set_zones=False):
db_services = db.service_get_all(context, disabled=disabled)
if set_zones:
db_services = availability_zones.set_availability_zones(
context, db_services)
return base.obj_make_list(context, cls(context), objects.Service,
db_services)
@base.remotable_classmethod
def get_all_computes_by_hv_type(cls, context, hv_type):
db_services = db.service_get_all_computes_by_hv_type(
context, hv_type, include_disabled=False)
return base.obj_make_list(context, cls(context), objects.Service,
db_services)
| nova/objects/service.py | 26,827 | Enforce that we are not older that the minimum version.
This is a loose check to avoid creating or updating our service
record if we would do so with a version that is older that the current
minimum of all services. This could happen if we were started with
older code by accident, either due to a rollback or an old and
un-updated node suddenly coming back onto the network.
There is technically a race here between the check and the update,
but since the minimum version should always roll forward and never
backwards, we don't need to worry about doing it atomically. Further,
the consequence for getting this wrong is minor, in that we'll just
fail to send messages that other services understand.
Get the minimum service version, checking all cells.
This attempts to calculate the minimum service version for a set
of binaries across all the cells in the system. If require_all
is False, then any cells that fail to report a version will be
ignored (assuming they won't be candidates for scheduling and thus
excluding them from the minimum version calculation is reasonable).
If require_all is True, then a failing cell will cause this to raise
exception.CellTimeout, as would be appropriate for gating some
data migration until everything is new enough.
Note that services that do not report a positive version are excluded
from this, as it crosses all cells which will naturally not have all
services.
Copyright 2013 IBM Corp. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. NOTE(danms): This is the global service version counter NOTE(danms): This is our SERVICE_VERSION history. The idea is that any time we bump the version, we will put an entry here to record the change, along with any pertinent data. For things that we can programatically detect that need a bump, we put something in _collect_things() below to assemble a dict of things we can check. For example, we pretty much always want to consider the compute RPC API version a thing that requires a service bump so that we can drive version pins from it. We could include other service RPC versions at some point, minimum object versions, etc. The TestServiceVersion test will fail if the calculated set of things differs from the value in the last item of the list below, indicating that a version bump is needed. Also note that there are other reasons we may want to bump this, which will not be caught by the test. An example of this would be triggering (or disabling) an online data migration once all services in the cluster are at the same level. If a version bump is required for something mechanical, just document that generic thing here (like compute RPC version bumps). No need to replicate the details from compute/rpcapi.py here. However, for more complex service interactions, extra detail should be provided Version 0: Pre-history Version 1: Introduction of SERVICE_VERSION Version 2: Compute RPC version 4.5 Version 3: Compute RPC version 4.6 Version 4: Add PciDevice.parent_addr (data migration needed) Version 5: Compute RPC version 4.7 Version 6: Compute RPC version 4.8 Version 7: Compute RPC version 4.9 Version 8: Compute RPC version 4.10 Version 9: Compute RPC version 4.11 Version 10: Compute node conversion to Inventories Version 11: Compute RPC version 4.12 Version 12: The network APIs and compute manager support a NetworkRequest object where the network_id value is 'auto' or 'none'. BuildRequest objects are populated by nova-api during instance boot. Version 13: Compute RPC version 4.13 Version 14: The compute manager supports setting device tags. Version 15: Indicate that nova-conductor will stop a boot if BuildRequest is deleted before RPC to nova-compute. Version 16: Indicate that nova-compute will refuse to start if it doesn't have a placement section configured. Version 17: Add 'reserve_volume' to the boot from volume flow and remove 'check_attach'. The service version bump is needed to fall back to the old check in the API as the old computes fail if the volume is moved to 'attaching' state by reserve. Version 18: Compute RPC version 4.14 Version 19: Compute RPC version 4.15 Version 20: Compute RPC version 4.16 Version 21: Compute RPC version 4.17 Version 22: A marker for the behaviour change of auto-healing code on the compute host regarding allocations against an instance Version 23: Compute hosts allow pre-creation of the migration object for cold migration. Version 24: Add support for Cinder v3 attach/detach API. Version 25: Compute hosts allow migration-based allocations for live migration. Version 26: Adds a 'host_list' parameter to build_and_run_instance() Version 27: Compute RPC version 4.20; adds multiattach argument to reserve_block_device_name(). Version 28: Adds a 'host_list' parameter to prep_resize() Version 29: Compute RPC version 4.22 Version 30: Compute RPC version 5.0 Version 31: The compute manager checks if 'trusted_certs' are supported Version 32: Add 'file_backed_memory' support. The service version bump is needed to allow the destination of a live migration to reject the migration if 'file_backed_memory' is enabled and the source does not support 'file_backed_memory' Version 33: Add support for check on the server group with 'max_server_per_host' rules Version 34: Adds support to abort queued/preparing live migrations. Version 35: Indicates that nova-compute supports live migration with ports bound early on the destination host using VIFMigrateData. TODO(berrange): Remove NovaObjectDictCompat Version 1.0: Initial version Version 1.1: Added compute_node nested object Version 1.2: String attributes updated to support unicode Version 1.3: ComputeNode version 1.5 Version 1.4: Added use_slave to get_by_compute_host Version 1.5: ComputeNode version 1.6 Version 1.6: ComputeNode version 1.7 Version 1.7: ComputeNode version 1.8 Version 1.8: ComputeNode version 1.9 Version 1.9: ComputeNode version 1.10 Version 1.10: Changes behaviour of loading compute_node Version 1.11: Added get_by_host_and_binary Version 1.12: ComputeNode version 1.11 Version 1.13: Added last_seen_up Version 1.14: Added forced_down Version 1.15: ComputeNode version 1.12 Version 1.16: Added version Version 1.17: ComputeNode version 1.13 Version 1.18: ComputeNode version 1.14 Version 1.19: Added get_minimum_version() Version 1.20: Added get_minimum_version_multi() Version 1.21: Added uuid Version 1.22: Added get_by_uuid() NOTE(danms): We're going against the rules here and overriding init. The reason is that we want to *ensure* that we're always setting the current service version on our objects, overriding whatever else might be set in the database, or otherwise (which is the normal reason not to override init). We also need to do this here so that it's set on the client side all the time, such that create() and save() operations will include the current service version. service.compute_node was not lazy-loaded, we need to provide it when called NOTE(sbauza): Ironic deployments can have multiple nodes for the same service, but for keeping same behaviour, returning only the first elem of the list NOTE(sbauza); We want to only lazy-load compute_node NOTE(danms): Special handling of the version field, since it is read_only and set in our init. Leave uuid off the object if undefined in the database so that it will be generated below. TODO(dpeschman): Drop this once all services have uuids in database Only n-cpu services have attached compute_node(s) NOTE(sbauza); Previous behaviour was raising a ServiceNotFound, we keep it for backwards compatibility NOTE(sbauza): Ironic deployments can have multiple nodes for the same service, but for keeping same behaviour, returning only the first elem of the list NOTE(ndipanov): This is deprecated and should be removed on the next major version bump Note(gibi): We do not trigger notification on version as that field is always dirty, which would cause that nova sends notification on every other field change. See the comment in save() too. NOTE(danms): Since our return value is not controlled by object schema, be explicit here. NOTE(danms): Instead of using Service.get_minimum_version_multi(), we replicate the call directly to the underlying DB method here because we want to defeat the caching and we need to filter non-present services differently from the single-cell method. NOTE(danms): Okay, this isn't necessarily a timeout, but it's functionally the same from the caller's perspective and we logged the fact that it was actually a failure for the forensic investigator during the scatter/gather routine. NOTE(danms): Don't consider a zero or None result as the minimum since we're crossing cells and will likely not have all the services being probed. NOTE(danms): If we got no matches at all (such as at first startup) then report that as zero to be consistent with the other such methods. Version 1.0: Initial version Service <= version 1.2 Version 1.1 Service version 1.3 Version 1.2: Service version 1.4 Version 1.3: Service version 1.5 Version 1.4: Service version 1.6 Version 1.5: Service version 1.7 Version 1.6: Service version 1.8 Version 1.7: Service version 1.9 Version 1.8: Service version 1.10 Version 1.9: Added get_by_binary() and Service version 1.11 Version 1.10: Service version 1.12 Version 1.11: Service version 1.13 Version 1.12: Service version 1.14 Version 1.13: Service version 1.15 Version 1.14: Service version 1.16 Version 1.15: Service version 1.17 Version 1.16: Service version 1.18 Version 1.17: Service version 1.19 Version 1.18: Added include_disabled parameter to get_by_binary() Version 1.19: Added get_all_computes_by_hv_type() NOTE(paul-carlton2): In v2.0 of the object the include_disabled flag will be removed so both enabled and disabled hosts are returned | 10,242 | en | 0.874712 |
# -*- coding: utf-8 -*-
# This file is part of beets.
# Copyright 2016, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Tests for the album art fetchers."""
from __future__ import (division, absolute_import, print_function,
unicode_literals)
import os
import shutil
import responses
from mock import patch
from test import _common
from test._common import unittest
from beetsplug import fetchart
from beets.autotag import AlbumInfo, AlbumMatch
from beets import library
from beets import importer
from beets import config
from beets import logging
from beets import util
from beets.util.artresizer import ArtResizer, WEBPROXY
logger = logging.getLogger('beets.test_art')
class UseThePlugin(_common.TestCase):
def setUp(self):
super(UseThePlugin, self).setUp()
self.plugin = fetchart.FetchArtPlugin()
class FetchImageTest(UseThePlugin):
@responses.activate
def run(self, *args, **kwargs):
super(FetchImageTest, self).run(*args, **kwargs)
def mock_response(self, content_type):
responses.add(responses.GET, 'http://example.com',
content_type=content_type)
def test_invalid_type_returns_none(self):
self.mock_response('image/watercolour')
artpath = self.plugin._fetch_image('http://example.com')
self.assertEqual(artpath, None)
def test_jpeg_type_returns_path(self):
self.mock_response('image/jpeg')
artpath = self.plugin._fetch_image('http://example.com')
self.assertNotEqual(artpath, None)
class FSArtTest(UseThePlugin):
def setUp(self):
super(FSArtTest, self).setUp()
self.dpath = os.path.join(self.temp_dir, 'arttest')
os.mkdir(self.dpath)
self.source = fetchart.FileSystem(logger, self.plugin.config)
def test_finds_jpg_in_directory(self):
_common.touch(os.path.join(self.dpath, 'a.jpg'))
fn = self.source.get(self.dpath, ('art',), False)
self.assertEqual(fn, os.path.join(self.dpath, 'a.jpg'))
def test_appropriately_named_file_takes_precedence(self):
_common.touch(os.path.join(self.dpath, 'a.jpg'))
_common.touch(os.path.join(self.dpath, 'art.jpg'))
fn = self.source.get(self.dpath, ('art',), False)
self.assertEqual(fn, os.path.join(self.dpath, 'art.jpg'))
def test_non_image_file_not_identified(self):
_common.touch(os.path.join(self.dpath, 'a.txt'))
fn = self.source.get(self.dpath, ('art',), False)
self.assertEqual(fn, None)
def test_cautious_skips_fallback(self):
_common.touch(os.path.join(self.dpath, 'a.jpg'))
fn = self.source.get(self.dpath, ('art',), True)
self.assertEqual(fn, None)
def test_empty_dir(self):
fn = self.source.get(self.dpath, ('art',), True)
self.assertEqual(fn, None)
def test_precedence_amongst_correct_files(self):
_common.touch(os.path.join(self.dpath, 'back.jpg'))
_common.touch(os.path.join(self.dpath, 'front.jpg'))
_common.touch(os.path.join(self.dpath, 'front-cover.jpg'))
fn = self.source.get(self.dpath, ('cover', 'front', 'back'), False)
self.assertEqual(fn, os.path.join(self.dpath, 'front-cover.jpg'))
class CombinedTest(UseThePlugin):
ASIN = 'xxxx'
MBID = 'releaseid'
AMAZON_URL = 'http://images.amazon.com/images/P/{0}.01.LZZZZZZZ.jpg' \
.format(ASIN)
AAO_URL = 'http://www.albumart.org/index_detail.php?asin={0}' \
.format(ASIN)
CAA_URL = 'http://coverartarchive.org/release/{0}/front' \
.format(MBID)
def setUp(self):
super(CombinedTest, self).setUp()
self.dpath = os.path.join(self.temp_dir, 'arttest')
os.mkdir(self.dpath)
@responses.activate
def run(self, *args, **kwargs):
super(CombinedTest, self).run(*args, **kwargs)
def mock_response(self, url, content_type='image/jpeg'):
responses.add(responses.GET, url, content_type=content_type)
def test_main_interface_returns_amazon_art(self):
self.mock_response(self.AMAZON_URL)
album = _common.Bag(asin=self.ASIN)
artpath = self.plugin.art_for_album(album, None)
self.assertNotEqual(artpath, None)
def test_main_interface_returns_none_for_missing_asin_and_path(self):
album = _common.Bag()
artpath = self.plugin.art_for_album(album, None)
self.assertEqual(artpath, None)
def test_main_interface_gives_precedence_to_fs_art(self):
_common.touch(os.path.join(self.dpath, 'art.jpg'))
self.mock_response(self.AMAZON_URL)
album = _common.Bag(asin=self.ASIN)
artpath = self.plugin.art_for_album(album, [self.dpath])
self.assertEqual(artpath, os.path.join(self.dpath, 'art.jpg'))
def test_main_interface_falls_back_to_amazon(self):
self.mock_response(self.AMAZON_URL)
album = _common.Bag(asin=self.ASIN)
artpath = self.plugin.art_for_album(album, [self.dpath])
self.assertNotEqual(artpath, None)
self.assertFalse(artpath.startswith(self.dpath))
def test_main_interface_tries_amazon_before_aao(self):
self.mock_response(self.AMAZON_URL)
album = _common.Bag(asin=self.ASIN)
self.plugin.art_for_album(album, [self.dpath])
self.assertEqual(len(responses.calls), 1)
self.assertEqual(responses.calls[0].request.url, self.AMAZON_URL)
def test_main_interface_falls_back_to_aao(self):
self.mock_response(self.AMAZON_URL, content_type='text/html')
album = _common.Bag(asin=self.ASIN)
self.plugin.art_for_album(album, [self.dpath])
self.assertEqual(responses.calls[-1].request.url, self.AAO_URL)
def test_main_interface_uses_caa_when_mbid_available(self):
self.mock_response(self.CAA_URL)
album = _common.Bag(mb_albumid=self.MBID, asin=self.ASIN)
artpath = self.plugin.art_for_album(album, None)
self.assertNotEqual(artpath, None)
self.assertEqual(len(responses.calls), 1)
self.assertEqual(responses.calls[0].request.url, self.CAA_URL)
def test_local_only_does_not_access_network(self):
album = _common.Bag(mb_albumid=self.MBID, asin=self.ASIN)
artpath = self.plugin.art_for_album(album, [self.dpath],
local_only=True)
self.assertEqual(artpath, None)
self.assertEqual(len(responses.calls), 0)
def test_local_only_gets_fs_image(self):
_common.touch(os.path.join(self.dpath, 'art.jpg'))
album = _common.Bag(mb_albumid=self.MBID, asin=self.ASIN)
artpath = self.plugin.art_for_album(album, [self.dpath],
local_only=True)
self.assertEqual(artpath, os.path.join(self.dpath, 'art.jpg'))
self.assertEqual(len(responses.calls), 0)
class AAOTest(UseThePlugin):
ASIN = 'xxxx'
AAO_URL = 'http://www.albumart.org/index_detail.php?asin={0}'.format(ASIN)
def setUp(self):
super(AAOTest, self).setUp()
self.source = fetchart.AlbumArtOrg(logger, self.plugin.config)
@responses.activate
def run(self, *args, **kwargs):
super(AAOTest, self).run(*args, **kwargs)
def mock_response(self, url, body):
responses.add(responses.GET, url, body=body, content_type='text/html',
match_querystring=True)
def test_aao_scraper_finds_image(self):
body = b"""
<br />
<a href=\"TARGET_URL\" title=\"View larger image\"
class=\"thickbox\" style=\"color: #7E9DA2; text-decoration:none;\">
<img src=\"http://www.albumart.org/images/zoom-icon.jpg\"
alt=\"View larger image\" width=\"17\" height=\"15\" border=\"0\"/></a>
"""
self.mock_response(self.AAO_URL, body)
album = _common.Bag(asin=self.ASIN)
res = self.source.get(album)
self.assertEqual(list(res)[0], 'TARGET_URL')
def test_aao_scraper_returns_no_result_when_no_image_present(self):
self.mock_response(self.AAO_URL, b'blah blah')
album = _common.Bag(asin=self.ASIN)
res = self.source.get(album)
self.assertEqual(list(res), [])
class GoogleImageTest(UseThePlugin):
def setUp(self):
super(GoogleImageTest, self).setUp()
self.source = fetchart.GoogleImages(logger, self.plugin.config)
@responses.activate
def run(self, *args, **kwargs):
super(GoogleImageTest, self).run(*args, **kwargs)
def mock_response(self, url, json):
responses.add(responses.GET, url, body=json,
content_type='application/json')
def test_google_art_finds_image(self):
album = _common.Bag(albumartist="some artist", album="some album")
json = b'{"items": [{"link": "url_to_the_image"}]}'
self.mock_response(fetchart.GoogleImages.URL, json)
result_url = self.source.get(album)
self.assertEqual(list(result_url)[0], 'url_to_the_image')
def test_google_art_returns_no_result_when_error_received(self):
album = _common.Bag(albumartist="some artist", album="some album")
json = b'{"error": {"errors": [{"reason": "some reason"}]}}'
self.mock_response(fetchart.GoogleImages.URL, json)
result_url = self.source.get(album)
self.assertEqual(list(result_url), [])
def test_google_art_returns_no_result_with_malformed_response(self):
album = _common.Bag(albumartist="some artist", album="some album")
json = b"""bla blup"""
self.mock_response(fetchart.GoogleImages.URL, json)
result_url = self.source.get(album)
self.assertEqual(list(result_url), [])
@_common.slow_test()
class ArtImporterTest(UseThePlugin):
def setUp(self):
super(ArtImporterTest, self).setUp()
# Mock the album art fetcher to always return our test file.
self.art_file = os.path.join(self.temp_dir, 'tmpcover.jpg')
_common.touch(self.art_file)
self.old_afa = self.plugin.art_for_album
self.afa_response = self.art_file
def art_for_album(i, p, local_only=False):
return self.afa_response
self.plugin.art_for_album = art_for_album
# Test library.
self.libpath = os.path.join(self.temp_dir, 'tmplib.blb')
self.libdir = os.path.join(self.temp_dir, 'tmplib')
os.mkdir(self.libdir)
os.mkdir(os.path.join(self.libdir, 'album'))
itempath = os.path.join(self.libdir, 'album', 'test.mp3')
shutil.copyfile(os.path.join(_common.RSRC, 'full.mp3'), itempath)
self.lib = library.Library(self.libpath)
self.i = _common.item()
self.i.path = itempath
self.album = self.lib.add_album([self.i])
self.lib._connection().commit()
# The import configuration.
self.session = _common.import_session(self.lib)
# Import task for the coroutine.
self.task = importer.ImportTask(None, None, [self.i])
self.task.is_album = True
self.task.album = self.album
info = AlbumInfo(
album='some album',
album_id='albumid',
artist='some artist',
artist_id='artistid',
tracks=[],
)
self.task.set_choice(AlbumMatch(0, info, {}, set(), set()))
def tearDown(self):
self.lib._connection().close()
super(ArtImporterTest, self).tearDown()
self.plugin.art_for_album = self.old_afa
def _fetch_art(self, should_exist):
"""Execute the fetch_art coroutine for the task and return the
album's resulting artpath. ``should_exist`` specifies whether to
assert that art path was set (to the correct value) or or that
the path was not set.
"""
# Execute the two relevant parts of the importer.
self.plugin.fetch_art(self.session, self.task)
self.plugin.assign_art(self.session, self.task)
artpath = self.lib.albums()[0].artpath
if should_exist:
self.assertEqual(
artpath,
os.path.join(os.path.dirname(self.i.path), 'cover.jpg')
)
self.assertExists(artpath)
else:
self.assertEqual(artpath, None)
return artpath
def test_fetch_art(self):
assert not self.lib.albums()[0].artpath
self._fetch_art(True)
def test_art_not_found(self):
self.afa_response = None
self._fetch_art(False)
def test_no_art_for_singleton(self):
self.task.is_album = False
self._fetch_art(False)
def test_leave_original_file_in_place(self):
self._fetch_art(True)
self.assertExists(self.art_file)
def test_delete_original_file(self):
config['import']['delete'] = True
self._fetch_art(True)
self.assertNotExists(self.art_file)
def test_move_original_file(self):
config['import']['move'] = True
self._fetch_art(True)
self.assertNotExists(self.art_file)
def test_do_not_delete_original_if_already_in_place(self):
artdest = os.path.join(os.path.dirname(self.i.path), 'cover.jpg')
shutil.copyfile(self.art_file, artdest)
self.afa_response = artdest
self._fetch_art(True)
def test_fetch_art_if_imported_file_deleted(self):
# See #1126. Test the following scenario:
# - Album art imported, `album.artpath` set.
# - Imported album art file subsequently deleted (by user or other
# program).
# `fetchart` should import album art again instead of printing the
# message "<album> has album art".
self._fetch_art(True)
util.remove(self.album.artpath)
self.plugin.batch_fetch_art(self.lib, self.lib.albums(), force=False)
self.assertExists(self.album.artpath)
class ArtForAlbumTest(UseThePlugin):
""" Tests that fetchart.art_for_album respects the size
configuration (e.g., minwidth, enforce_ratio)
"""
IMG_225x225 = os.path.join(_common.RSRC, 'abbey.jpg')
IMG_348x348 = os.path.join(_common.RSRC, 'abbey-different.jpg')
IMG_500x490 = os.path.join(_common.RSRC, 'abbey-similar.jpg')
def setUp(self):
super(ArtForAlbumTest, self).setUp()
self.old_fs_source_get = self.plugin.fs_source.get
self.old_fetch_img = self.plugin._fetch_image
self.old_source_urls = self.plugin._source_urls
def fs_source_get(*_):
return self.image_file
def source_urls(_):
return ['']
def fetch_img(_):
return self.image_file
self.plugin.fs_source.get = fs_source_get
self.plugin._source_urls = source_urls
self.plugin._fetch_image = fetch_img
def tearDown(self):
self.plugin.fs_source.get = self.old_fs_source_get
self.plugin._source_urls = self.old_source_urls
self.plugin._fetch_image = self.old_fetch_img
super(ArtForAlbumTest, self).tearDown()
def _assertImageIsValidArt(self, image_file, should_exist):
self.assertExists(image_file)
self.image_file = image_file
local_artpath = self.plugin.art_for_album(None, [''], True)
remote_artpath = self.plugin.art_for_album(None, [], False)
self.assertEqual(local_artpath, remote_artpath)
if should_exist:
self.assertEqual(local_artpath, self.image_file)
self.assertExists(local_artpath)
return local_artpath
else:
self.assertIsNone(local_artpath)
def _assertImageResized(self, image_file, should_resize):
self.image_file = image_file
with patch.object(ArtResizer.shared, 'resize') as mock_resize:
self.plugin.art_for_album(None, [''], True)
self.assertEqual(mock_resize.called, should_resize)
def _require_backend(self):
"""Skip the test if the art resizer doesn't have ImageMagick or
PIL (so comparisons and measurements are unavailable).
"""
if ArtResizer.shared.method[0] == WEBPROXY:
self.skipTest("ArtResizer has no local imaging backend available")
def test_respect_minwidth(self):
self._require_backend()
self.plugin.minwidth = 300
self._assertImageIsValidArt(self.IMG_225x225, False)
self._assertImageIsValidArt(self.IMG_348x348, True)
def test_respect_enforce_ratio_yes(self):
self._require_backend()
self.plugin.enforce_ratio = True
self._assertImageIsValidArt(self.IMG_500x490, False)
self._assertImageIsValidArt(self.IMG_225x225, True)
def test_respect_enforce_ratio_no(self):
self.plugin.enforce_ratio = False
self._assertImageIsValidArt(self.IMG_500x490, True)
def test_resize_if_necessary(self):
self._require_backend()
self.plugin.maxwidth = 300
self._assertImageResized(self.IMG_225x225, False)
self._assertImageResized(self.IMG_348x348, True)
def suite():
return unittest.TestLoader().loadTestsFromName(__name__)
if __name__ == b'__main__':
unittest.main(defaultTest='suite')
| test/test_art.py | 17,735 | Tests that fetchart.art_for_album respects the size
configuration (e.g., minwidth, enforce_ratio)
Execute the fetch_art coroutine for the task and return the
album's resulting artpath. ``should_exist`` specifies whether to
assert that art path was set (to the correct value) or or that
the path was not set.
Skip the test if the art resizer doesn't have ImageMagick or
PIL (so comparisons and measurements are unavailable).
Tests for the album art fetchers.
-*- coding: utf-8 -*- This file is part of beets. Copyright 2016, Adrian Sampson. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. Mock the album art fetcher to always return our test file. Test library. The import configuration. Import task for the coroutine. Execute the two relevant parts of the importer. See 1126. Test the following scenario: - Album art imported, `album.artpath` set. - Imported album art file subsequently deleted (by user or other program). `fetchart` should import album art again instead of printing the message "<album> has album art". | 1,542 | en | 0.868647 |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# pytype: skip-file
from __future__ import absolute_import
from __future__ import division
import json
import logging
import math
import os
import tempfile
import unittest
from builtins import range
from typing import List
import sys
# patches unittest.TestCase to be python3 compatible
import future.tests.base # pylint: disable=unused-import
import hamcrest as hc
import avro
import avro.datafile
from avro.datafile import DataFileWriter
from avro.io import DatumWriter
from fastavro.schema import parse_schema
from fastavro import writer
# pylint: disable=wrong-import-order, wrong-import-position, ungrouped-imports
try:
from avro.schema import Parse # avro-python3 library for python3
except ImportError:
from avro.schema import parse as Parse # avro library for python2
# pylint: enable=wrong-import-order, wrong-import-position, ungrouped-imports
import apache_beam as beam
from apache_beam import Create
from apache_beam.io import avroio
from apache_beam.io import filebasedsource
from apache_beam.io import iobase
from apache_beam.io import source_test_utils
from apache_beam.io.avroio import _create_avro_sink # For testing
from apache_beam.io.avroio import _create_avro_source # For testing
from apache_beam.testing.test_pipeline import TestPipeline
from apache_beam.testing.util import assert_that
from apache_beam.testing.util import equal_to
from apache_beam.transforms.display import DisplayData
from apache_beam.transforms.display_test import DisplayDataItemMatcher
# Import snappy optionally; some tests will be skipped when import fails.
try:
import snappy # pylint: disable=import-error
except ImportError:
snappy = None # pylint: disable=invalid-name
logging.warning('python-snappy is not installed; some tests will be skipped.')
RECORDS = [{
'name': 'Thomas', 'favorite_number': 1, 'favorite_color': 'blue'
}, {
'name': 'Henry', 'favorite_number': 3, 'favorite_color': 'green'
}, {
'name': 'Toby', 'favorite_number': 7, 'favorite_color': 'brown'
}, {
'name': 'Gordon', 'favorite_number': 4, 'favorite_color': 'blue'
}, {
'name': 'Emily', 'favorite_number': -1, 'favorite_color': 'Red'
}, {
'name': 'Percy', 'favorite_number': 6, 'favorite_color': 'Green'
}]
class AvroBase(object):
_temp_files = [] # type: List[str]
def __init__(self, methodName='runTest'):
super(AvroBase, self).__init__(methodName)
self.RECORDS = RECORDS
self.SCHEMA_STRING = '''
{"namespace": "example.avro",
"type": "record",
"name": "User",
"fields": [
{"name": "name", "type": "string"},
{"name": "favorite_number", "type": ["int", "null"]},
{"name": "favorite_color", "type": ["string", "null"]}
]
}
'''
@classmethod
def setUpClass(cls):
# Method has been renamed in Python 3
if sys.version_info[0] < 3:
cls.assertCountEqual = cls.assertItemsEqual
def setUp(self):
# Reducing the size of thread pools. Without this test execution may fail in
# environments with limited amount of resources.
filebasedsource.MAX_NUM_THREADS_FOR_SIZE_ESTIMATION = 2
def tearDown(self):
for path in self._temp_files:
if os.path.exists(path):
os.remove(path)
self._temp_files = []
def _write_data(self, directory, prefix, codec, count, sync_interval):
raise NotImplementedError
def _write_pattern(self, num_files):
assert num_files > 0
temp_dir = tempfile.mkdtemp()
file_name = None
for _ in range(num_files):
file_name = self._write_data(directory=temp_dir, prefix='mytemp')
assert file_name
file_name_prefix = file_name[:file_name.rfind(os.path.sep)]
return file_name_prefix + os.path.sep + 'mytemp*'
def _run_avro_test(
self, pattern, desired_bundle_size, perform_splitting, expected_result):
source = _create_avro_source(pattern, use_fastavro=self.use_fastavro)
if perform_splitting:
assert desired_bundle_size
splits = [
split
for split in source.split(desired_bundle_size=desired_bundle_size)
]
if len(splits) < 2:
raise ValueError(
'Test is trivial. Please adjust it so that at least '
'two splits get generated')
sources_info = [(split.source, split.start_position, split.stop_position)
for split in splits]
source_test_utils.assert_sources_equal_reference_source(
(source, None, None), sources_info)
else:
read_records = source_test_utils.read_from_source(source, None, None)
self.assertCountEqual(expected_result, read_records)
def test_read_without_splitting(self):
file_name = self._write_data()
expected_result = self.RECORDS
self._run_avro_test(file_name, None, False, expected_result)
def test_read_with_splitting(self):
file_name = self._write_data()
expected_result = self.RECORDS
self._run_avro_test(file_name, 100, True, expected_result)
def test_source_display_data(self):
file_name = 'some_avro_source'
source = \
_create_avro_source(
file_name,
validate=False,
use_fastavro=self.use_fastavro
)
dd = DisplayData.create_from(source)
# No extra avro parameters for AvroSource.
expected_items = [
DisplayDataItemMatcher('compression', 'auto'),
DisplayDataItemMatcher('file_pattern', file_name)
]
hc.assert_that(dd.items, hc.contains_inanyorder(*expected_items))
def test_read_display_data(self):
file_name = 'some_avro_source'
read = \
avroio.ReadFromAvro(
file_name,
validate=False,
use_fastavro=self.use_fastavro)
dd = DisplayData.create_from(read)
# No extra avro parameters for AvroSource.
expected_items = [
DisplayDataItemMatcher('compression', 'auto'),
DisplayDataItemMatcher('file_pattern', file_name)
]
hc.assert_that(dd.items, hc.contains_inanyorder(*expected_items))
def test_sink_display_data(self):
file_name = 'some_avro_sink'
sink = _create_avro_sink(
file_name,
self.SCHEMA,
'null',
'.end',
0,
None,
'application/x-avro',
use_fastavro=self.use_fastavro)
dd = DisplayData.create_from(sink)
expected_items = [
DisplayDataItemMatcher('schema', str(self.SCHEMA)),
DisplayDataItemMatcher(
'file_pattern',
'some_avro_sink-%(shard_num)05d-of-%(num_shards)05d.end'),
DisplayDataItemMatcher('codec', 'null'),
DisplayDataItemMatcher('compression', 'uncompressed')
]
hc.assert_that(dd.items, hc.contains_inanyorder(*expected_items))
def test_write_display_data(self):
file_name = 'some_avro_sink'
write = avroio.WriteToAvro(
file_name, self.SCHEMA, use_fastavro=self.use_fastavro)
dd = DisplayData.create_from(write)
expected_items = [
DisplayDataItemMatcher('schema', str(self.SCHEMA)),
DisplayDataItemMatcher(
'file_pattern',
'some_avro_sink-%(shard_num)05d-of-%(num_shards)05d'),
DisplayDataItemMatcher('codec', 'deflate'),
DisplayDataItemMatcher('compression', 'uncompressed')
]
hc.assert_that(dd.items, hc.contains_inanyorder(*expected_items))
def test_read_reentrant_without_splitting(self):
file_name = self._write_data()
source = _create_avro_source(file_name, use_fastavro=self.use_fastavro)
source_test_utils.assert_reentrant_reads_succeed((source, None, None))
def test_read_reantrant_with_splitting(self):
file_name = self._write_data()
source = _create_avro_source(file_name, use_fastavro=self.use_fastavro)
splits = [split for split in source.split(desired_bundle_size=100000)]
assert len(splits) == 1
source_test_utils.assert_reentrant_reads_succeed(
(splits[0].source, splits[0].start_position, splits[0].stop_position))
def test_read_without_splitting_multiple_blocks(self):
file_name = self._write_data(count=12000)
expected_result = self.RECORDS * 2000
self._run_avro_test(file_name, None, False, expected_result)
def test_read_with_splitting_multiple_blocks(self):
file_name = self._write_data(count=12000)
expected_result = self.RECORDS * 2000
self._run_avro_test(file_name, 10000, True, expected_result)
def test_split_points(self):
num_records = 12000
sync_interval = 16000
file_name = self._write_data(count=num_records, sync_interval=sync_interval)
source = _create_avro_source(file_name, use_fastavro=self.use_fastavro)
splits = [split for split in source.split(desired_bundle_size=float('inf'))]
assert len(splits) == 1
range_tracker = splits[0].source.get_range_tracker(
splits[0].start_position, splits[0].stop_position)
split_points_report = []
for _ in splits[0].source.read(range_tracker):
split_points_report.append(range_tracker.split_points())
# There will be a total of num_blocks in the generated test file,
# proportional to number of records in the file divided by syncronization
# interval used by avro during write. Each block has more than 10 records.
num_blocks = int(math.ceil(14.5 * num_records / sync_interval))
assert num_blocks > 1
# When reading records of the first block, range_tracker.split_points()
# should return (0, iobase.RangeTracker.SPLIT_POINTS_UNKNOWN)
self.assertEqual(
split_points_report[:10],
[(0, iobase.RangeTracker.SPLIT_POINTS_UNKNOWN)] * 10)
# When reading records of last block, range_tracker.split_points() should
# return (num_blocks - 1, 1)
self.assertEqual(split_points_report[-10:], [(num_blocks - 1, 1)] * 10)
def test_read_without_splitting_compressed_deflate(self):
file_name = self._write_data(codec='deflate')
expected_result = self.RECORDS
self._run_avro_test(file_name, None, False, expected_result)
def test_read_with_splitting_compressed_deflate(self):
file_name = self._write_data(codec='deflate')
expected_result = self.RECORDS
self._run_avro_test(file_name, 100, True, expected_result)
@unittest.skipIf(snappy is None, 'python-snappy not installed.')
def test_read_without_splitting_compressed_snappy(self):
file_name = self._write_data(codec='snappy')
expected_result = self.RECORDS
self._run_avro_test(file_name, None, False, expected_result)
@unittest.skipIf(snappy is None, 'python-snappy not installed.')
def test_read_with_splitting_compressed_snappy(self):
file_name = self._write_data(codec='snappy')
expected_result = self.RECORDS
self._run_avro_test(file_name, 100, True, expected_result)
def test_read_without_splitting_pattern(self):
pattern = self._write_pattern(3)
expected_result = self.RECORDS * 3
self._run_avro_test(pattern, None, False, expected_result)
def test_read_with_splitting_pattern(self):
pattern = self._write_pattern(3)
expected_result = self.RECORDS * 3
self._run_avro_test(pattern, 100, True, expected_result)
def test_dynamic_work_rebalancing_exhaustive(self):
def compare_split_points(file_name):
source = _create_avro_source(file_name, use_fastavro=self.use_fastavro)
splits = [
split for split in source.split(desired_bundle_size=float('inf'))
]
assert len(splits) == 1
source_test_utils.assert_split_at_fraction_exhaustive(splits[0].source)
# Adjusting block size so that we can perform a exhaustive dynamic
# work rebalancing test that completes within an acceptable amount of time.
file_name = self._write_data(count=5, sync_interval=2)
compare_split_points(file_name)
def test_corrupted_file(self):
file_name = self._write_data()
with open(file_name, 'rb') as f:
data = f.read()
# Corrupt the last character of the file which is also the last character of
# the last sync_marker.
# https://avro.apache.org/docs/current/spec.html#Object+Container+Files
corrupted_data = bytearray(data)
corrupted_data[-1] = (corrupted_data[-1] + 1) % 256
with tempfile.NamedTemporaryFile(delete=False,
prefix=tempfile.template) as f:
f.write(corrupted_data)
corrupted_file_name = f.name
source = _create_avro_source(
corrupted_file_name, use_fastavro=self.use_fastavro)
with self.assertRaisesRegex(ValueError, r'expected sync marker'):
source_test_utils.read_from_source(source, None, None)
def test_read_from_avro(self):
path = self._write_data()
with TestPipeline() as p:
assert_that(
p | avroio.ReadFromAvro(path, use_fastavro=self.use_fastavro),
equal_to(self.RECORDS))
def test_read_all_from_avro_single_file(self):
path = self._write_data()
with TestPipeline() as p:
assert_that(
p \
| Create([path]) \
| avroio.ReadAllFromAvro(use_fastavro=self.use_fastavro),
equal_to(self.RECORDS))
def test_read_all_from_avro_many_single_files(self):
path1 = self._write_data()
path2 = self._write_data()
path3 = self._write_data()
with TestPipeline() as p:
assert_that(
p \
| Create([path1, path2, path3]) \
| avroio.ReadAllFromAvro(use_fastavro=self.use_fastavro),
equal_to(self.RECORDS * 3))
def test_read_all_from_avro_file_pattern(self):
file_pattern = self._write_pattern(5)
with TestPipeline() as p:
assert_that(
p \
| Create([file_pattern]) \
| avroio.ReadAllFromAvro(use_fastavro=self.use_fastavro),
equal_to(self.RECORDS * 5))
def test_read_all_from_avro_many_file_patterns(self):
file_pattern1 = self._write_pattern(5)
file_pattern2 = self._write_pattern(2)
file_pattern3 = self._write_pattern(3)
with TestPipeline() as p:
assert_that(
p \
| Create([file_pattern1, file_pattern2, file_pattern3]) \
| avroio.ReadAllFromAvro(use_fastavro=self.use_fastavro),
equal_to(self.RECORDS * 10))
def test_sink_transform(self):
with tempfile.NamedTemporaryFile() as dst:
path = dst.name
with TestPipeline() as p:
# pylint: disable=expression-not-assigned
p \
| beam.Create(self.RECORDS) \
| avroio.WriteToAvro(path, self.SCHEMA, use_fastavro=self.use_fastavro)
with TestPipeline() as p:
# json used for stable sortability
readback = \
p \
| avroio.ReadFromAvro(path + '*', use_fastavro=self.use_fastavro) \
| beam.Map(json.dumps)
assert_that(readback, equal_to([json.dumps(r) for r in self.RECORDS]))
@unittest.skipIf(snappy is None, 'python-snappy not installed.')
def test_sink_transform_snappy(self):
with tempfile.NamedTemporaryFile() as dst:
path = dst.name
with TestPipeline() as p:
# pylint: disable=expression-not-assigned
p \
| beam.Create(self.RECORDS) \
| avroio.WriteToAvro(
path,
self.SCHEMA,
codec='snappy',
use_fastavro=self.use_fastavro)
with TestPipeline() as p:
# json used for stable sortability
readback = \
p \
| avroio.ReadFromAvro(path + '*', use_fastavro=self.use_fastavro) \
| beam.Map(json.dumps)
assert_that(readback, equal_to([json.dumps(r) for r in self.RECORDS]))
@unittest.skipIf(
sys.version_info[0] == 3 and os.environ.get('RUN_SKIPPED_PY3_TESTS') != '1',
'This test still needs to be fixed on Python 3. '
'TODO: BEAM-6522.')
class TestAvro(AvroBase, unittest.TestCase):
def __init__(self, methodName='runTest'):
super(TestAvro, self).__init__(methodName)
self.use_fastavro = False
self.SCHEMA = Parse(self.SCHEMA_STRING)
def _write_data(
self,
directory=None,
prefix=tempfile.template,
codec='null',
count=len(RECORDS),
sync_interval=avro.datafile.SYNC_INTERVAL):
old_sync_interval = avro.datafile.SYNC_INTERVAL
try:
avro.datafile.SYNC_INTERVAL = sync_interval
with tempfile.NamedTemporaryFile(delete=False,
dir=directory,
prefix=prefix) as f:
writer = DataFileWriter(f, DatumWriter(), self.SCHEMA, codec=codec)
len_records = len(self.RECORDS)
for i in range(count):
writer.append(self.RECORDS[i % len_records])
writer.close()
self._temp_files.append(f.name)
return f.name
finally:
avro.datafile.SYNC_INTERVAL = old_sync_interval
class TestFastAvro(AvroBase, unittest.TestCase):
def __init__(self, methodName='runTest'):
super(TestFastAvro, self).__init__(methodName)
self.use_fastavro = True
self.SCHEMA = parse_schema(json.loads(self.SCHEMA_STRING))
def _write_data(
self,
directory=None,
prefix=tempfile.template,
codec='null',
count=len(RECORDS),
**kwargs):
all_records = self.RECORDS * \
(count // len(self.RECORDS)) + self.RECORDS[:(count % len(self.RECORDS))]
with tempfile.NamedTemporaryFile(delete=False,
dir=directory,
prefix=prefix,
mode='w+b') as f:
writer(f, self.SCHEMA, all_records, codec=codec, **kwargs)
self._temp_files.append(f.name)
return f.name
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
unittest.main()
| sdks/python/apache_beam/io/avroio_test.py | 18,450 | Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. pytype: skip-file patches unittest.TestCase to be python3 compatible pylint: disable=unused-import pylint: disable=wrong-import-order, wrong-import-position, ungrouped-imports avro-python3 library for python3 avro library for python2 pylint: enable=wrong-import-order, wrong-import-position, ungrouped-imports For testing For testing Import snappy optionally; some tests will be skipped when import fails. pylint: disable=import-error pylint: disable=invalid-name type: List[str] Method has been renamed in Python 3 Reducing the size of thread pools. Without this test execution may fail in environments with limited amount of resources. No extra avro parameters for AvroSource. No extra avro parameters for AvroSource. There will be a total of num_blocks in the generated test file, proportional to number of records in the file divided by syncronization interval used by avro during write. Each block has more than 10 records. When reading records of the first block, range_tracker.split_points() should return (0, iobase.RangeTracker.SPLIT_POINTS_UNKNOWN) When reading records of last block, range_tracker.split_points() should return (num_blocks - 1, 1) Adjusting block size so that we can perform a exhaustive dynamic work rebalancing test that completes within an acceptable amount of time. Corrupt the last character of the file which is also the last character of the last sync_marker. https://avro.apache.org/docs/current/spec.htmlObject+Container+Files pylint: disable=expression-not-assigned json used for stable sortability pylint: disable=expression-not-assigned json used for stable sortability | 2,360 | en | 0.823692 |
import os
from hisim import hisim_main
from hisim.simulationparameters import SimulationParameters
import shutil
import random
from hisim import log
from hisim.utils import PostProcessingOptions
import matplotlib.pyplot as plt
from hisim import utils
@utils.measure_execution_time
def test_basic_household():
# if os.path.isdir("../hisim/inputs/cache"):
# shutil.rmtree("../hisim/inputs/cache")
path = "../examples/basic_household.py"
func = "basic_household_explicit"
mysimpar = SimulationParameters.one_day_only(year=2019, seconds_per_timestep=60)
hisim_main.main(path, func,mysimpar )
log.information(os.getcwd())
@utils.measure_execution_time
def test_basic_household_with_default_connections():
# if os.path.isdir("../hisim/inputs/cache"):
# shutil.rmtree("../hisim/inputs/cache")
path = "../examples/basic_household.py"
func = "basic_household_with_default_connections"
mysimpar = SimulationParameters.one_day_only(year=2019, seconds_per_timestep=60)
hisim_main.main(path, func,mysimpar )
log.information(os.getcwd())
@utils.measure_execution_time
def test_basic_household_with_all_resultfiles():
# if os.path.isdir("../hisim/inputs/cache"):
# shutil.rmtree("../hisim/inputs/cache")
path = "../examples/basic_household.py"
func = "basic_household_explicit"
mysimpar = SimulationParameters.one_day_only(year=2019, seconds_per_timestep=60)
for option in PostProcessingOptions:
mysimpar.post_processing_options.append(option)
hisim_main.main(path, func,mysimpar )
log.information(os.getcwd())
#
# def test_basic_household_with_all_resultfiles_full_year():
# if os.path.isdir("../hisim/inputs/cache"):
# shutil.rmtree("../hisim/inputs/cache")
# path = "../examples/basic_household.py"
# func = "basic_household_explicit"
# mysimpar = SimulationParameters.full_year(year=2019, seconds_per_timestep=60)
# for option in PostProcessingOptions:
# mysimpar.post_processing_options.append(option)
# log.information(option)
# hisim_main.main(path, func,mysimpar)
# log.information(os.getcwd())
# def test_basic_household_boiler():
# path = "../examples/basic_household_boiler.py"
# func = "basic_household_boiler_explicit"
# mysimpar = SimulationParameters.one_day_only(year=2019, seconds_per_timestep=60)
# hisim_main.main(path, func, mysimpar)
# def test_basic_household_districtheating():
# path = "../examples/basic_household_Districtheating.py"
# func = "basic_household_Districtheating_explicit"
# mysimpar = SimulationParameters.one_day_only(year=2019, seconds_per_timestep=60)
# hisim_main.main(path, func, mysimpar)
# def test_basic_household_oilheater():
# path = "../examples/basic_household_Oilheater.py"
# func = "basic_household_Oilheater_explicit"
# mysimpar = SimulationParameters.one_day_only(year=2019, seconds_per_timestep=60)
# hisim_main.main(path, func, mysimpar)
@utils.measure_execution_time
def test_modular_household_configurations( ):
path = "../examples/modular_household.py"
func = "modular_household_explicit"
mysimpar = SimulationParameters.one_day_only( year = 2019, seconds_per_timestep = 60 )
# for pv_included in [ True, False ]:
# for smart_devices_included in [ True, False ]:
# for boiler_included in [ 'electricity', 'hydrogen', None ]:
# for heating_device_included in [ 'heat_pump', 'oil_heater', 'district_heating' ]:
predictive = True
pv_included = random.choice( [ True, False ] )
smart_devices_included = random.choice( [ True, False ] )
boiler_included = random.choice( [ 'electricity', 'hydrogen', None ] )
heating_device_included = random.choice( [ 'heat_pump', 'oil_heater', 'district_heating' ] )
mysimpar.reset_system_config( predictive = predictive,
pv_included = pv_included,
smart_devices_included = smart_devices_included,
boiler_included = boiler_included,
heating_device_included = heating_device_included )
hisim_main.main( path, func, mysimpar )
@utils.measure_execution_time
def test_first_example():
path = "../examples/examples.py"
func = "first_example"
mysimpar = SimulationParameters.one_day_only(year=2019, seconds_per_timestep=60)
hisim_main.main(path, func, mysimpar)
@utils.measure_execution_time
def test_second_example():
path = "../examples/examples.py"
func = "second_example"
mysimpar = SimulationParameters.one_day_only(year=2019, seconds_per_timestep=60)
hisim_main.main(path, func, mysimpar)
| tests/test_examples.py | 4,725 | if os.path.isdir("../hisim/inputs/cache"): shutil.rmtree("../hisim/inputs/cache") if os.path.isdir("../hisim/inputs/cache"): shutil.rmtree("../hisim/inputs/cache") if os.path.isdir("../hisim/inputs/cache"): shutil.rmtree("../hisim/inputs/cache") def test_basic_household_with_all_resultfiles_full_year(): if os.path.isdir("../hisim/inputs/cache"): shutil.rmtree("../hisim/inputs/cache") path = "../examples/basic_household.py" func = "basic_household_explicit" mysimpar = SimulationParameters.full_year(year=2019, seconds_per_timestep=60) for option in PostProcessingOptions: mysimpar.post_processing_options.append(option) log.information(option) hisim_main.main(path, func,mysimpar) log.information(os.getcwd()) def test_basic_household_boiler(): path = "../examples/basic_household_boiler.py" func = "basic_household_boiler_explicit" mysimpar = SimulationParameters.one_day_only(year=2019, seconds_per_timestep=60) hisim_main.main(path, func, mysimpar) def test_basic_household_districtheating(): path = "../examples/basic_household_Districtheating.py" func = "basic_household_Districtheating_explicit" mysimpar = SimulationParameters.one_day_only(year=2019, seconds_per_timestep=60) hisim_main.main(path, func, mysimpar) def test_basic_household_oilheater(): path = "../examples/basic_household_Oilheater.py" func = "basic_household_Oilheater_explicit" mysimpar = SimulationParameters.one_day_only(year=2019, seconds_per_timestep=60) hisim_main.main(path, func, mysimpar) for pv_included in [ True, False ]: for smart_devices_included in [ True, False ]: for boiler_included in [ 'electricity', 'hydrogen', None ]: for heating_device_included in [ 'heat_pump', 'oil_heater', 'district_heating' ]: | 1,836 | en | 0.456785 |
'''
RenameBot
This file is a part of mrvishal2k2 rename repo
Dont kang !!!
© Mrvishal2k2
'''
import pyrogram
from pyrogram import Client, filters
from pyrogram.types import InlineKeyboardButton, InlineKeyboardMarkup
import logging
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
log = logging.getLogger(__name__)
@Client.on_message(filters.document | filters.video | filters.audio | filters.voice | filters.video_note | filters.animation)
async def rename_filter(c,m):
media = m.document or m.video or m.audio or m.voice or m.video_note or m.animation
## couldn't add photo bcoz i want all photos to use as thumb..
text = ""
button = []
try:
filename = media.file_name
text += f"FileName:\n{filename}\n"
except:
# some files dont gib name ..
filename = None
text += "Select the desired Option"
button.append([InlineKeyboardButton("Rename as File", callback_data="rename_file")])
# Thanks to albert for mime_type suggestion
if media.mime_type.startswith("video/"):
## how the f the other formats can be uploaded as video
button.append([InlineKeyboardButton("Rename as Video",callback_data="rename_video")])
button.append([InlineKeyboardButton("Convert as File",callback_data="convert_file")])
button.append([InlineKeyboardButton("Convert as Video",callback_data="convert_video")])
button.append([InlineKeyboardButton("Cancel ❌",callback_data="cancel")])
markup = InlineKeyboardMarkup(button)
try:
await c.send_chat_action(m.chat.id, "typing")
await m.reply_text(text,quote=True,reply_markup=markup,parse_mode="markdown",disable_web_page_preview=True)
except Exception as e:
log.info(str(e))
| root/plugins/main_filter.py | 1,757 | RenameBot
This file is a part of mrvishal2k2 rename repo
Dont kang !!!
© Mrvishal2k2
couldn't add photo bcoz i want all photos to use as thumb.. some files dont gib name .. Thanks to albert for mime_type suggestion how the f the other formats can be uploaded as video | 271 | en | 0.911089 |
"""
builtin_bracket.py
"""
from __future__ import print_function
from _devbuild.gen.id_kind_asdl import Id
from _devbuild.gen.runtime_asdl import value
from _devbuild.gen.syntax_asdl import (
word, word_e, word_t, word__String, bool_expr,
)
from _devbuild.gen.types_asdl import lex_mode_e
from asdl import runtime
from core import error
from core.pyerror import e_usage, p_die, log
from core import vm
from frontend import match
from osh import sh_expr_eval
from osh import bool_parse
from osh import word_parse
from osh import word_eval
_ = log
from typing import cast, TYPE_CHECKING
if TYPE_CHECKING:
from _devbuild.gen.runtime_asdl import cmd_value__Argv, value__Str
from _devbuild.gen.syntax_asdl import word__String, bool_expr_t
from _devbuild.gen.types_asdl import lex_mode_t
from core.ui import ErrorFormatter
from core import optview
from core import state
class _StringWordEmitter(word_parse.WordEmitter):
"""For test/[, we need a word parser that returns String.
The BoolParser calls word_.BoolId(w), and deals with Kind.BoolUnary,
Kind.BoolBinary, etc. This is instead of Compound/Token (as in the
[[ case.
"""
def __init__(self, cmd_val):
# type: (cmd_value__Argv) -> None
self.cmd_val = cmd_val
self.i = 0
self.n = len(cmd_val.argv)
def ReadWord(self, unused_lex_mode):
# type: (lex_mode_t) -> word__String
"""Interface for bool_parse.py.
TODO: This should probably be word_t
"""
if self.i == self.n:
# Does it make sense to define Eof_Argv or something?
# TODO: Add a way to show this location. Show 1 char past the right-most
# spid of the last word? But we only have the left-most spid.
w = word.String(Id.Eof_Real, '', runtime.NO_SPID)
return w
#log('ARGV %s i %d', self.argv, self.i)
s = self.cmd_val.argv[self.i]
left_spid = self.cmd_val.arg_spids[self.i]
self.i += 1
# default is an operand word
id_ = match.BracketUnary(s)
if id_ == Id.Undefined_Tok:
id_ = match.BracketBinary(s)
if id_ == Id.Undefined_Tok:
id_ = match.BracketOther(s)
if id_ == Id.Undefined_Tok:
id_ = Id.Word_Compound
# NOTE: We only have the left spid now. It might be useful to add the
# right one.
w = word.String(id_, s, left_spid)
return w
def Read(self):
# type: () -> word__String
"""Interface used for special cases below."""
return self.ReadWord(lex_mode_e.ShCommand)
def Peek(self, offset):
# type: (int) -> str
"""For special cases."""
return self.cmd_val.argv[self.i + offset]
def Rewind(self, offset):
# type: (int) -> None
"""For special cases."""
self.i -= offset
class _WordEvaluator(word_eval.StringWordEvaluator):
def __init__(self):
# type: () -> None
word_eval.StringWordEvaluator.__init__(self)
def EvalWordToString(self, w, eval_flags=0):
# type: (word_t, int) -> value__Str
# do_fnmatch: for the [[ == ]] semantics which we don't have!
# I think I need another type of node
# Maybe it should be BuiltinEqual and BuiltinDEqual? Parse it into a
# different tree.
assert w.tag_() == word_e.String
string_word = cast(word__String, w)
return value.Str(string_word.s)
def _TwoArgs(w_parser):
# type: (_StringWordEmitter) -> bool_expr_t
"""Returns an expression tree to be evaluated."""
w0 = w_parser.Read()
w1 = w_parser.Read()
s0 = w0.s
if s0 == '!':
return bool_expr.LogicalNot(bool_expr.WordTest(w1))
unary_id = Id.Undefined_Tok
# Oil's preferred long flags
if w0.s.startswith('--'):
if s0 == '--dir':
unary_id = Id.BoolUnary_d
elif s0 == '--exists':
unary_id = Id.BoolUnary_e
elif s0 == '--file':
unary_id = Id.BoolUnary_f
elif s0 == '--symlink':
unary_id = Id.BoolUnary_L
if unary_id == Id.Undefined_Tok:
unary_id = match.BracketUnary(w0.s)
if unary_id == Id.Undefined_Tok:
p_die('Expected unary operator, got %r (2 args)', w0.s, word=w0)
return bool_expr.Unary(unary_id, w1)
def _ThreeArgs(w_parser):
# type: (_StringWordEmitter) -> bool_expr_t
"""Returns an expression tree to be evaluated."""
w0 = w_parser.Read()
w1 = w_parser.Read()
w2 = w_parser.Read()
# NOTE: Order is important here.
binary_id = match.BracketBinary(w1.s)
if binary_id != Id.Undefined_Tok:
return bool_expr.Binary(binary_id, w0, w2)
if w1.s == '-a':
return bool_expr.LogicalAnd(bool_expr.WordTest(w0), bool_expr.WordTest(w2))
if w1.s == '-o':
return bool_expr.LogicalOr(bool_expr.WordTest(w0), bool_expr.WordTest(w2))
if w0.s == '!':
w_parser.Rewind(2)
child = _TwoArgs(w_parser)
return bool_expr.LogicalNot(child)
if w0.s == '(' and w2.s == ')':
return bool_expr.WordTest(w1)
p_die('Expected binary operator, got %r (3 args)', w1.s, word=w1)
class Test(vm._Builtin):
def __init__(self, need_right_bracket, exec_opts, mem, errfmt):
# type: (bool, optview.Exec, state.Mem, ErrorFormatter) -> None
self.need_right_bracket = need_right_bracket
self.exec_opts = exec_opts
self.mem = mem
self.errfmt = errfmt
def Run(self, cmd_val):
# type: (cmd_value__Argv) -> int
"""The test/[ builtin.
The only difference between test and [ is that [ needs a matching ].
"""
if self.need_right_bracket: # Preprocess right bracket
if self.exec_opts.simple_test_builtin():
e_usage("should be invoked as 'test' (simple_test_builtin)")
strs = cmd_val.argv
if not strs or strs[-1] != ']':
self.errfmt.Print_('missing closing ]', span_id=cmd_val.arg_spids[0])
return 2
# Remove the right bracket
cmd_val.argv.pop()
cmd_val.arg_spids.pop()
w_parser = _StringWordEmitter(cmd_val)
w_parser.Read() # dummy: advance past argv[0]
b_parser = bool_parse.BoolParser(w_parser)
# There is a fundamental ambiguity due to poor language design, in cases like:
# [ -z ]
# [ -z -a ]
# [ -z -a ] ]
#
# See posixtest() in bash's test.c:
# "This is an implementation of a Posix.2 proposal by David Korn."
# It dispatches on expressions of length 0, 1, 2, 3, 4, and N args. We do
# the same here.
#
# Another ambiguity:
# -a is both a unary prefix operator and an infix operator. How to fix this
# ambiguity?
bool_node = None # type: bool_expr_t
n = len(cmd_val.argv) - 1
if self.exec_opts.simple_test_builtin() and n > 3:
e_usage("should only have 3 arguments or fewer (simple_test_builtin)")
try:
if n == 0:
return 1 # [ ] is False
elif n == 1:
w = w_parser.Read()
bool_node = bool_expr.WordTest(w)
elif n == 2:
bool_node = _TwoArgs(w_parser)
elif n == 3:
bool_node = _ThreeArgs(w_parser)
if n == 4:
a0 = w_parser.Peek(0)
if a0 == '!':
w_parser.Read() # skip !
child = _ThreeArgs(w_parser)
bool_node = bool_expr.LogicalNot(child)
elif a0 == '(' and w_parser.Peek(3) == ')':
w_parser.Read() # skip ')'
bool_node = _TwoArgs(w_parser)
else:
pass # fallthrough
if bool_node is None:
bool_node = b_parser.ParseForBuiltin()
except error.Parse as e:
self.errfmt.PrettyPrintError(e, prefix='(test) ')
return 2
# We technically don't need mem because we don't support BASH_REMATCH here.
word_ev = _WordEvaluator()
bool_ev = sh_expr_eval.BoolEvaluator(self.mem, self.exec_opts, None,
self.errfmt)
# We want [ a -eq a ] to always be an error, unlike [[ a -eq a ]]. This is a
# weird case of [[ being less strict.
bool_ev.Init_AlwaysStrict()
bool_ev.word_ev = word_ev
bool_ev.CheckCircularDeps()
try:
b = bool_ev.EvalB(bool_node)
except error._ErrorWithLocation as e:
# We want to catch e_die() and e_strict(). Those are both FatalRuntime
# errors now, but it might not make sense later.
# NOTE: This doesn't seem to happen. We have location info for all
# errors that arise out of [.
#if not e.HasLocation():
# raise
self.errfmt.PrettyPrintError(e, prefix='(test) ')
return 2 # 1 means 'false', and this usage error is like a parse error.
status = 0 if b else 1
return status
| osh/builtin_bracket.py | 8,388 | For test/[, we need a word parser that returns String.
The BoolParser calls word_.BoolId(w), and deals with Kind.BoolUnary,
Kind.BoolBinary, etc. This is instead of Compound/Token (as in the
[[ case.
For special cases.
Interface used for special cases below.
Interface for bool_parse.py.
TODO: This should probably be word_t
For special cases.
The test/[ builtin.
The only difference between test and [ is that [ needs a matching ].
Returns an expression tree to be evaluated.
Returns an expression tree to be evaluated.
builtin_bracket.py
type: (cmd_value__Argv) -> None type: (lex_mode_t) -> word__String Does it make sense to define Eof_Argv or something? TODO: Add a way to show this location. Show 1 char past the right-most spid of the last word? But we only have the left-most spid.log('ARGV %s i %d', self.argv, self.i) default is an operand word NOTE: We only have the left spid now. It might be useful to add the right one. type: () -> word__String type: (int) -> str type: (int) -> None type: () -> None type: (word_t, int) -> value__Str do_fnmatch: for the [[ == ]] semantics which we don't have! I think I need another type of node Maybe it should be BuiltinEqual and BuiltinDEqual? Parse it into a different tree. type: (_StringWordEmitter) -> bool_expr_t Oil's preferred long flags type: (_StringWordEmitter) -> bool_expr_t NOTE: Order is important here. type: (bool, optview.Exec, state.Mem, ErrorFormatter) -> None type: (cmd_value__Argv) -> int Preprocess right bracket Remove the right bracket dummy: advance past argv[0] There is a fundamental ambiguity due to poor language design, in cases like: [ -z ] [ -z -a ] [ -z -a ] ] See posixtest() in bash's test.c: "This is an implementation of a Posix.2 proposal by David Korn." It dispatches on expressions of length 0, 1, 2, 3, 4, and N args. We do the same here. Another ambiguity: -a is both a unary prefix operator and an infix operator. How to fix this ambiguity? type: bool_expr_t [ ] is False skip ! skip ')' fallthrough We technically don't need mem because we don't support BASH_REMATCH here. We want [ a -eq a ] to always be an error, unlike [[ a -eq a ]]. This is a weird case of [[ being less strict. We want to catch e_die() and e_strict(). Those are both FatalRuntime errors now, but it might not make sense later. NOTE: This doesn't seem to happen. We have location info for all errors that arise out of [.if not e.HasLocation(): raise 1 means 'false', and this usage error is like a parse error. | 2,495 | en | 0.841459 |
import glob
import shutil
import subprocess
import os
import sys
import argparse
# Read and save metadata from file
def exiftool_metadata(path):
metadata = {}
exifToolPath = 'exifTool.exe'
''' use Exif tool to get the metadata '''
process = subprocess.Popen(
[
exifToolPath,
path
],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
universal_newlines=True
)
''' get the tags in dict '''
for tag in process.stdout:
tag = tag.strip()
key = tag[:tag.find(':')].strip()
value = tag[tag.find(':') + 1:].strip()
metadata[key] = value
return metadata
class File:
def __init__(self, path):
self.metadata = exiftool_metadata(path)
def _get_file_metadata(self, key, no=''):
if key in self.metadata:
return self.metadata[key]
else:
return no
def copyCore(self, source, dst_dir: str, copy_duplicate=False):
logs = []
# if value of metadata not exists - folder name
no_metadata = 'none'
date = File._get_file_metadata(self, 'Date/Time Original')
if date == '':
date = File._get_file_metadata(self, 'Create Date', no_metadata)
mime_type = File._get_file_metadata(self, 'MIME Type', no_metadata)
dst_dir += f'''/{mime_type[:mime_type.find('/')]}/{date[:4]}/{date[5:7]}'''
filename = File._get_file_metadata(self, 'File Name')
f_name = filename
dst = dst_dir + '/' + filename
# File with the same name exists in dst. If source and dst have same size then determines 'copy_exists'
if os.path.isfile(dst):
i = 0
f_pth = File(dst)
if_same_size: bool = f_pth._get_file_metadata("File Size") == File._get_file_metadata(self, 'File Size')
if (not if_same_size) or copy_duplicate:
while os.path.isfile(dst):
filename = f'''{f_name[:f_name.find('.')]}_D{str(i)}.{File._get_file_metadata(self, 'File Type Extension')}'''
dst = f'''{dst_dir}/{filename}'''
i = i + 1
if if_same_size:
logs.append(f"Warning: file already exists but I must copy all files"
f" [copy_duplicate={copy_duplicate}], so I try do it ...")
else:
logs.append(f"Warning: file already exists but have other size, so I try copy it ...")
else:
logs.append(f"Warning: file already duplicate [copy_exists={copy_duplicate}]."
f"\nCopy aboard: {source} -> {dst}")
return logs
try:
if not os.path.isdir(dst_dir):
os.makedirs(dst_dir)
logs.append(f"New directory created: {dst_dir}")
shutil.copy(source, dst)
logs.append(f'''Copy done: {source} -> {dst}''')
except Exception as e:
logs.append(f'''Copy error [{e}]: {source} -> {dst}''')
return logs
def main():
# Arguments from console
parser = argparse.ArgumentParser()
parser.add_argument('-s', help="Obligatory: source directory path")
parser.add_argument('-d', help="Obligatory: destination folder path")
parser.add_argument('-e', help="Obligatory: copy duplicate files (T/True/F/False)")
args = parser.parse_args(sys.argv[1:])
# Setup variable
source_dir = args.s
dst_dir = args.d
df = {
"T": True,
"TRUE": True,
"F": False,
"FALSE": False
}
try:
copy_duplicate = df.get(args.e.upper(), False)
except AttributeError:
copy_duplicate = False
print(f"app.py: error: unrecognized arguments. Use -h or --help to see options")
exit(1)
# Number of log
l_lpm = 0
# source_dir = 'C:/Users'
# dst_dir = 'C:/Users'
# copy_duplicate = False
for f_inx, source in enumerate(glob.glob(source_dir + '/**/*.*', recursive=True)):
try:
f = File(source)
print("----------")
for log in f.copyCore(source, dst_dir, copy_duplicate):
l_lpm = l_lpm + 1
print(f'''{str(l_lpm)}.{f_inx + 1}) {log}''')
except Exception as e:
print(f'Copy error [{e}]: {source}')
if __name__ == '__main__':
main()
| app.py | 4,422 | Read and save metadata from file if value of metadata not exists - folder name File with the same name exists in dst. If source and dst have same size then determines 'copy_exists' Arguments from console Setup variable Number of log source_dir = 'C:/Users' dst_dir = 'C:/Users' copy_duplicate = False | 300 | en | 0.557558 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class ReplicationStatus(Model):
"""This is the replication status of the gallery Image Version.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar aggregated_state: This is the aggregated replication status based on
all the regional replication status flags. Possible values include:
'Unknown', 'InProgress', 'Completed', 'Failed'
:vartype aggregated_state: str or
~azure.mgmt.compute.v2018_06_01.models.AggregatedReplicationState
:ivar summary: This is a summary of replication status for each region.
:vartype summary:
list[~azure.mgmt.compute.v2018_06_01.models.RegionalReplicationStatus]
"""
_validation = {
'aggregated_state': {'readonly': True},
'summary': {'readonly': True},
}
_attribute_map = {
'aggregated_state': {'key': 'aggregatedState', 'type': 'str'},
'summary': {'key': 'summary', 'type': '[RegionalReplicationStatus]'},
}
def __init__(self, **kwargs) -> None:
super(ReplicationStatus, self).__init__(**kwargs)
self.aggregated_state = None
self.summary = None
| azure-mgmt-compute/azure/mgmt/compute/v2018_06_01/models/replication_status_py3.py | 1,667 | This is the replication status of the gallery Image Version.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar aggregated_state: This is the aggregated replication status based on
all the regional replication status flags. Possible values include:
'Unknown', 'InProgress', 'Completed', 'Failed'
:vartype aggregated_state: str or
~azure.mgmt.compute.v2018_06_01.models.AggregatedReplicationState
:ivar summary: This is a summary of replication status for each region.
:vartype summary:
list[~azure.mgmt.compute.v2018_06_01.models.RegionalReplicationStatus]
coding=utf-8 -------------------------------------------------------------------------- Copyright (c) Microsoft Corporation. All rights reserved. Licensed under the MIT License. See License.txt in the project root for license information. Code generated by Microsoft (R) AutoRest Code Generator. Changes may cause incorrect behavior and will be lost if the code is regenerated. -------------------------------------------------------------------------- | 1,060 | en | 0.70023 |
#!/usr/bin/env python
import SimpleHTTPServer
import SocketServer
import sys
import urllib
import logging
from optparse import OptionParser
class ResultsProvider(object):
'''Base class used to fetch data from server for forwarding'''
import requests
import socket
import time
def __init__(self, **kwargs):
'''Constructor with sensible requests defaults'''
self.session = self.requests.Session()
self.wait = kwargs.get('wait', 2.0)
self.session.verify = kwargs.get('verify', False)
self.session.timeout = kwargs.get('timeout', 5)
self.session.stream = kwargs.get('stream', False)
self.session.proxies = kwargs.get('proxies', {})
self.session.headers = kwargs.get('headers', {})
self.session.allow_redirects = kwargs.get('allow_redirects', True)
self.session.cookies = self.requests.utils.cookiejar_from_dict(kwargs.get('cookies', {}))
self.url = kwargs.get('url', None)
def doRequest(self, verb, url, **kwargs):
'''Makes web request with timeoout support using requests session'''
while 1:
try:
body = kwargs.pop('body') if kwargs.has_key('body') else None
rargs = {}
for a in ['data', 'json', 'params', 'headers']:
if kwargs.has_key(a):
rargs[a] = kwargs.pop(a)
req = self.requests.Request(verb, url, **rargs) # data, headers, params, json
prepped = req.prepare()
if body:
prepped.body = body
response = self.session.send(prepped, **kwargs) # other params here
break
except (self.socket.error, self.requests.exceptions.RequestException):
logging.exception('Retrying request in %.2f seconds...', self.wait)
self.time.sleep(self.wait)
continue
return response
def nextResult(self):
'''Redefine me to make the request and return the response.text'''
#return self.doRequest(url='http://site/whatever/' + str(calculated_value)).text
raise NotImplementedError
class ResultsProviderImpl(ResultsProvider):
'''Implementation for forwarding arbitrary requests to another server'''
def __init__(self, **kwargs):
super(ResultsProviderImpl, self).__init__(**kwargs)
self.hostname=kwargs.get('hostname')
self.protocol=kwargs.get('protocol', 'http')
self.port=kwargs.get('port')
def nextResult(self, verb, path, **kwargs):
r = self.doRequest(verb, '%s://%s:%s%s' %(self.protocol, self.hostname, self.port, path), **kwargs)
return r
class ThreadedTCPServer(SocketServer.ThreadingTCPServer):
'''Simple Threaded TCP server'''
pass
class ServerHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
'''Simple http server request handler'''
import datetime
counter=0
skip_headers = ['content-length', 'transfer-encoding', 'content-encoding', 'connection']
def print_debug(self, title, data):
sep = '=' * 40 + '\n'
dt = self.datetime.datetime.now()
dts = dt.strftime('%d/%m/%Y %H:%M:%S')
self.counter+=1
print sep + title + ' - ' + str(self.counter) + ' - ' + dts + '\n' + sep + data + '\n'
def send_response(self, code, message=None):
'''Redefine from original to get rid of extra headers'''
self.log_request(code)
if message is None:
if code in self.responses:
message = self.responses[code][0]
else:
message = ''
if self.request_version != 'HTTP/0.9':
self.wfile.write("%s %d %s\r\n" %
(self.protocol_version, code, message))
# print (self.protocol_version, code, message)
#self.send_header('Server', self.version_string())
#self.send_header('Date', self.date_time_string())
def do(self, verb, data=None):
args = {'headers' : self.headers.dict}
if data:
args['data'] = data
response = self.server.resultsProvider.nextResult(verb, self.path, **args)
if self.server.debug:
self.print_debug('HTTP Request Received', self.raw_requestline + str(self.headers) + '\r\n' + (data if data else ''))
self.send_response(response.status_code, response.reason)
for header in response.headers.iteritems():
if header[0].lower() not in self.skip_headers:
#self.print_debug('Header Sent', ' :'.join([header[0], header[1]]))
self.send_header(header[0], header[1])
self.send_header('Content-Length', int(len(response.content)))
self.send_header('Connection', 'close')
self.wfile.write('\r\n')
self.wfile.write(response.content)
if self.server.debug:
http_version = '.'.join([a for a in str(response.raw.version)])
version_line = 'HTTP/%s %s %s' %(http_version, response.status_code, response.reason)
headers = '\r\n'.join([ '%s : %s' %(a[0],a[1]) for a in response.headers.items()])
self.print_debug('HTTP Response Received', '\r\n'.join([version_line, headers, '\r\n' + response.content]))
#self.print_debug('Length of response', str(int(len(response.content))))
self.wfile.flush()
self.wfile.close()
def do_GET(self):
self.do('GET')
def do_HEAD(self):
self.do('HEAD')
def do_POST(self):
data = self.rfile.read(int(self.headers['Content-Length'])) if \
self.headers.has_key('Content-Length') else ''
self.do('POST', data=data)
def match_url(input):
return ((input.startswith('http://') or input.startswith('https://')) and \
input.endswith('/') and len(input.split('/')[2]) > 4 and len(input.split('/')) == 4)
if __name__ == '__main__':
parser = OptionParser(usage='%prog -u [url] [options]')
parser.add_option('-d', '--debug', dest='debug', action='store_true', help='show debugging messages')
parser.add_option('-u', '--url', dest='remoteurl', type='string', help='remote base url')
parser.add_option('-p', '--port', dest='port', type='int', default=8000, help='local listen port')
parser.add_option('-a', '--address', dest='address', type='string', default='0.0.0.0', help='local listen address')
parser.add_option('-x', '--proxy', dest='proxy', type='string', help='optional proxy to use in format http://address:port/')
opts, args = parser.parse_args()
if opts.remoteurl == None:
print 'Please provide a remote url using the -u --url option'
sys.exit()
elif not match_url(opts.remoteurl):
print 'Please enter remote url in format protocol://host[:port]/'
sys.exit()
try:
[protocol, _, host_port, _] = opts.remoteurl.split('/')
protocol = protocol.rstrip(':')
hostparts = host_port.split(':')
hostname = hostparts[0]
rport = int(hostparts[1]) if len(hostparts) > 1 else {'http' : 80, 'https' : 443}[protocol]
except:
print 'Please enter remote url in format protocol://host[:port]/'
sys.exit()
if opts.proxy:
if not match_url(opts.proxy) and not opts.proxy.startswith('https'):
print 'Please enter proxy in format http://host:port/'
sys.exit()
if opts.debug:
print 'Using proxy ' + opts.proxy
proxies = {protocol : opts.proxy}
else:
proxies = {}
httpd = ThreadedTCPServer((opts.address, opts.port), ServerHandler)
httpd.debug = opts.debug or False
# add the custom resultsprovider implementation
httpd.resultsProvider = ResultsProviderImpl(hostname=hostname, protocol=protocol, port=rport, proxies=proxies)
print "Serving at: http://%s:%s/, forwarding requests to %s" % (opts.address, str(opts.port), opts.remoteurl)
httpd.serve_forever()
| helper_servers/http_forwarder.py | 8,010 | !/usr/bin/env python data, headers, params, json other params herereturn self.doRequest(url='http://site/whatever/' + str(calculated_value)).text print (self.protocol_version, code, message)self.send_header('Server', self.version_string())self.send_header('Date', self.date_time_string())self.print_debug('Header Sent', ' :'.join([header[0], header[1]]))self.print_debug('Length of response', str(int(len(response.content)))) add the custom resultsprovider implementation | 471 | en | 0.158466 |
import spacy
from spacy.tokens import Doc, Span, Token
import urllib
import xml.etree.ElementTree as ET
import re
from SpacyHu.BaseSpacyHuComponent import BaseSpacyHuComponent
class HuLemmaMorph(BaseSpacyHuComponent):
def __init__(self,
nlp,
label='Morph',
url='http://hlt.bme.hu/chatbot/gate/process?run='):
necessary_modules = ['QT', 'HFSTLemm']
super().__init__(nlp, label, url, necessary_modules)
Token.set_extension('morph', default='')
Token.set_extension('lemma', default='')
def get_word_from_annotation(self, annotation):
for feature in annotation.getchildren():
if feature.find('Name').text == 'string':
return feature.find('Value').text
def get_token_by_idx(self, idx, doc):
for token in doc:
if token.idx == idx:
return token
def get_lemma_from_morph(self, morph):
return set(re.findall(r'(?<=lemma=).*?(?=\})', morph))
def __call__(self, doc):
text = urllib.parse.quote_plus(doc.text)
result = urllib.request.urlopen(self.url + text).read()
annotationset = ET.fromstring(result).find('AnnotationSet')
for annotation in annotationset.getchildren():
if annotation.get('Type') != 'Token':
continue
word_index = int(annotation.get('StartNode'))
word = self.get_word_from_annotation(annotation)
for feature in annotation.getchildren():
if feature.find('Name').text == 'anas':
token = self.get_token_by_idx(word_index, doc)
anas = (feature.find('Value').text
if feature.find('Value').text is not None
else '')
token._.morph = set(anas.split(';'))
token._.lemma = self.get_lemma_from_morph(anas)
break
return doc
if __name__ == "__main__":
from Tokenizer import HuTokenizer
debug_text = 'Jó, hogy ez az alma piros, mert az olyan almákat szeretem.'
# debug_text = 'megszentségteleníthetetlenségeitekért meghalnak'
remote_url = 'http://hlt.bme.hu/chatbot/gate/process?run='
nlp = spacy.blank("en")
nlp.tokenizer = HuTokenizer(nlp.vocab, url=remote_url)
morph_analyzer = HuLemmaMorph(nlp, url=remote_url)
nlp.add_pipe(morph_analyzer, last=True)
doc = nlp(debug_text)
for token in doc:
print('Token is: ' + token.text)
print(token._.lemma)
print(token._.morph)
print()
| SpacyHu/SpacyHu/LemmatizerMorphAnalyzer.py | 2,620 | debug_text = 'megszentségteleníthetetlenségeitekért meghalnak' | 62 | hu | 0.981844 |
__version__ = '0.3.3'
import os
import sys
import logging
import argparse
from .core import WebCrawler
from .helpers import color_logging
def main():
""" parse command line options and run commands.
"""
parser = argparse.ArgumentParser(
description='A web crawler for testing website links validation.')
parser.add_argument(
'-V', '--version', dest='version', action='store_true',
help="show version")
parser.add_argument(
'--log-level', default='INFO',
help="Specify logging level, default is INFO.")
parser.add_argument(
'--config-file', help="Specify config file path.")
parser.add_argument(
'--seeds', default='http://debugtalk.com',
help="Specify crawl seed url(s), several urls can be specified with pipe; \
if auth needed, seeds can be specified like user1:pwd1@url1|user2:pwd2@url2")
parser.add_argument(
'--include-hosts', help="Specify extra hosts to be crawled.")
parser.add_argument(
'--cookies', help="Specify cookies, several cookies can be joined by '|'. \
e.g. 'lang:en,country:us|lang:zh,country:cn'")
parser.add_argument(
'--crawl-mode', default='BFS', help="Specify crawl mode, BFS or DFS.")
parser.add_argument(
'--max-depth', default=5, type=int, help="Specify max crawl depth.")
parser.add_argument(
'--concurrency', help="Specify concurrent workers number.")
parser.add_argument(
'--save-results', default='NO', help="Specify if save results, default is NO.")
parser.add_argument("--grey-user-agent",
help="Specify grey environment header User-Agent.")
parser.add_argument("--grey-traceid",
help="Specify grey environment cookie traceid.")
parser.add_argument("--grey-view-grey",
help="Specify grey environment cookie view_gray.")
try:
from jenkins_mail_py import MailgunHelper
mailer = MailgunHelper(parser)
except ImportError:
mailer = None
args = parser.parse_args()
if args.version:
print("WebCrawler version: {}".format(__version__))
exit(0)
log_level = getattr(logging, args.log_level.upper())
logging.basicConfig(level=log_level)
color_logging("args: %s" % args)
main_crawler(args, mailer)
def main_crawler(args, mailer=None):
include_hosts = args.include_hosts.split(',') if args.include_hosts else []
cookies_list = args.cookies.split('|') if args.cookies else ['']
jenkins_build_number = args.jenkins_build_number
logs_folder = os.path.join(os.getcwd(), "logs", '{}'.format(jenkins_build_number))
web_crawler = WebCrawler(args.seeds, include_hosts, logs_folder, args.config_file)
# set grey environment
if args.grey_user_agent and args.grey_traceid and args.grey_view_grey:
web_crawler.set_grey_env(args.grey_user_agent, args.grey_traceid, args.grey_view_grey)
canceled = False
try:
for cookies_str in cookies_list:
cookies_str_list = cookies_str.split(',')
cookies = {}
for cookie_str in cookies_str_list:
if ':' not in cookie_str:
continue
key, value = cookie_str.split(':')
cookies[key.strip()] = value.strip()
web_crawler.start(
cookies,
args.crawl_mode,
args.max_depth,
args.concurrency
)
if mailer and mailer.config_ready:
subject = "%s" % args.seeds
mail_content_ordered_dict, flag_code = web_crawler.get_mail_content_ordered_dict()
mailer.send_mail(subject, mail_content_ordered_dict, flag_code)
except KeyboardInterrupt:
canceled = True
color_logging("Canceling...", color='red')
finally:
save_results = False if args.save_results.upper() == "NO" else True
web_crawler.print_result(canceled, save_results)
| webcrawler/__init__.py | 4,038 | parse command line options and run commands.
set grey environment | 72 | en | 0.841679 |
#!/usr/bin/env python
import bottle
import os, json
from .utils import distance, neighbours, direction
from .defensive import find_my_tail, trouble, find_enemy_tail, eat_food, find_my_tail_emergency
from .snake import Snake
from .gameboard import GameBoard
SAFTEY = 0
SNAKE = 1
FOOD = 3
DANGER = 5
def move_response(move):
assert move in ['up', 'down', 'left', 'right'], \
"Move must be one of [up, down, left, right]"
return bottle.HTTPResponse(
status=200,
headers={
"Content-Type": "application/json"
},
body=json.dumps({
"move": move
})
)
def init(data):
"""
Initialize grid and update cell values\n
@param data -> Json response from bottle\n
@return game_id -> Game id for debuggin purposes when displaying grid\n
@return grid -> Grid with updated cell values\n
@return food -> Sorted array of food by closest to charlie\n
@return charlie -> My snake\n
@return enemies -> Array of all enemy snakes\n
@return check_food -> Secondary grid to look ahead when eating food
"""
food = []
enemies = []
grid = GameBoard(data['board']['height'], data['board']['width'])
check_food = GameBoard(data['board']['height'], data['board']['width'])
charlie = Snake(data['you'])
for i in data['board']['food']:
food.append([i['x'], i['y']])
grid.set_cell([i['x'], i['y']], FOOD)
check_food.set_cell([i['x'], i['y']], FOOD)
for snake in data['board']['snakes']:
snake = Snake(snake)
for coord in snake.coords:
grid.set_cell(coord, SNAKE)
check_food.set_cell(coord, SNAKE)
if snake.health < 100 and snake.length > 2 and data['turn'] >= 3:
grid.set_cell(snake.tail, SAFTEY)
check_food.set_cell(snake.tail, SAFTEY)
if snake.id != charlie.id:
for neighbour in neighbours(snake.head, grid, 0, snake.coords, [1]):
if snake.length >= charlie.length:
grid.set_cell(neighbour, DANGER)
check_food.set_cell(neighbour, DANGER)
enemies.append(snake)
food = sorted(food, key = lambda p: distance(p, charlie.head))
game_id = data['game']['id']
# print("turn is {}".format(data['turn']))
return game_id, grid, food, charlie, enemies, check_food
@bottle.post('/ping')
def ping():
return bottle.HTTPResponse(
status=200,
headers={
"Content-Type": "application/json"
},
body=json.dumps({})
)
@bottle.post('/start')
def start():
return bottle.HTTPResponse(
status=200,
headers={
"Content-Type": "application/json"
},
body=json.dumps({
"color": '#002080',
'headType': 'pixel',
'tailType': 'pixel'
})
)
@bottle.post('/move')
def move():
data = bottle.request.json
game_id, grid, food, charlie, enemies, check_food = init(data)
# grid.display_game(game_id)
if len(enemies) > 2 or charlie.length <= 25 or charlie.health <= 60:
path = eat_food(charlie, grid, food, check_food)
if path:
# print('eat path {}'.format(path))
return move_response(direction(path[0], path[1]))
if charlie.length >= 3:
path = find_my_tail(charlie, grid)
if path:
# print('find my tail path {}'.format(path))
return move_response(direction(path[0], path[1]))
if not path:
path = find_enemy_tail(charlie, enemies, grid)
if path:
# print('find enemy tail path {}'.format(path))
return move_response(direction(path[0], path[1]))
# # if our length is greater than threshold and no other path was available
if charlie.length >= 3:
path = find_my_tail_emergency(charlie, grid)
if path:
# print('find my tail emergency path {}'.format(path))
return move_response(direction(path[0], path[1]))
# Choose a random free space if no available enemy tail
if not path:
path = trouble(charlie, grid)
if path:
# print('trouble path {}'.format(path))
return move_response(direction(path[0], path[1]))
@bottle.post('/end')
def end():
return bottle.HTTPResponse(
status=200,
headers={
"Content-Type": "application/json"
},
body=json.dumps({})
)
application = bottle.default_app()
if __name__ == '__main__':
bottle.run(application, host=os.getenv('IP', '0.0.0.0'), port=os.getenv('PORT', '8080'), quiet = True) | app/main.py | 4,662 | Initialize grid and update cell values
@param data -> Json response from bottle
@return game_id -> Game id for debuggin purposes when displaying grid
@return grid -> Grid with updated cell values
@return food -> Sorted array of food by closest to charlie
@return charlie -> My snake
@return enemies -> Array of all enemy snakes
@return check_food -> Secondary grid to look ahead when eating food
!/usr/bin/env python print("turn is {}".format(data['turn'])) grid.display_game(game_id) print('eat path {}'.format(path)) print('find my tail path {}'.format(path)) print('find enemy tail path {}'.format(path)) if our length is greater than threshold and no other path was available print('find my tail emergency path {}'.format(path)) Choose a random free space if no available enemy tail print('trouble path {}'.format(path)) | 834 | en | 0.596676 |
#!/usr/bin/python3
# -*- coding: utf8 -*-
# Copyright (c) 2020 Baidu, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Procedure Params
"""
class ProcedureParams:
"""
The procedure params dict
"""
def __init__(self):
"""
The constructor of the ProcedureParams class
"""
self.paramsDict = {} # the inner data for procedure params dict
def __getitem__(self, index):
"""
Get the procedure params according to the index.
Create the register when it does not exist.
:param index:
:return: ProcedureParamStorage
"""
value = self.paramsDict.get(index)
if value is not None:
return value
value = ProcedureParamStorage(index)
self.paramsDict[index] = value
return value
class ProcedureParamStorage:
"""
The storage for procedure param
"""
def __init__(self, index):
"""
The quantum param object needs to know its index.
:param index: the quantum register index
"""
self.index = index | QCompute/QuantumPlatform/ProcedureParams.py | 1,624 | The storage for procedure param
The procedure params dict
Get the procedure params according to the index.
Create the register when it does not exist.
:param index:
:return: ProcedureParamStorage
The constructor of the ProcedureParams class
The quantum param object needs to know its index.
:param index: the quantum register index
Procedure Params
!/usr/bin/python3 -*- coding: utf8 -*- Copyright (c) 2020 Baidu, Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. the inner data for procedure params dict | 1,005 | en | 0.809563 |
#!/usr/bin/env python
# coding=utf-8
class PyPIPackageProject:
pass
| asgi_webdav/core.py | 74 | !/usr/bin/env python coding=utf-8 | 33 | en | 0.221043 |
#!/usr/bin/env python
# encoding: utf-8
"""
@Author: yangwenhao
@Contact: 874681044@qq.com
@Software: PyCharm
@File: Cosine.py
@Time: 19-6-26 下午9:43
@Overview: Implement Cosine Score for speaker identification!
Enrollment set files will be in the 'Data/enroll_set.npy' and the classes-to-index file is 'Data/enroll_classes.npy'
Test set files are in the 'Data/test_set.npy' and the utterances-to-index file is 'Data/test_classes.npy'
"""
import numpy as np
import torch.nn as nn
ENROLL_FILE = "Data/xvector/enroll/extract_adagrad-lr0.1-wd0.0-embed512-alpha10.npy"
ENROLL_CLASS = "Data/enroll_classes.npy"
TEST_FILE = "Data/xvector/test/extract_adagrad-lr0.1-wd0.0-embed512-alpha10.npy"
TEST_CLASS = "Data/test_classes.npy"
# test_vec = np.array([1,2,3,4])
# refe_vec = np.array([8,3,3,4])
def normalize(narray, order=2, axis=1):
norm = np.linalg.norm(narray, ord=order, axis=axis, keepdims=True)
return(narray/norm + np.finfo(np.float32).eps)
def cos_dis(test_vec, refe_vec):
vec1 = normalize(test_vec, axis=0)
vec2 = normalize(refe_vec, axis=0)
dis = np.matmul(vec1, vec2.T)
return(dis)
enroll_features = np.load(ENROLL_FILE, allow_pickle=True)
enroll_classes = np.load(ENROLL_CLASS, allow_pickle=True).item()
test_features = np.load(TEST_FILE, allow_pickle=True)
test_classes = np.load(TEST_CLASS, allow_pickle=True)
enroll_dict = dict()
for item in enroll_classes:
num=0
feat = np.zeros([512], dtype=float)
for (label, feature) in enroll_features:
if label==enroll_classes[item]:
feat += feature.detach().numpy()
num += 1
enroll_dict[item] = feat/num
similarity = {}
for (label, feature) in test_features:
utt = {}
for item in enroll_dict:
utt[item] = np.linalg.norm(feature.detach().numpy()-enroll_dict[item])
for utterance in test_classes:
if int(utterance[1])==label.item():
test_id = utterance[0]
similarity[test_id]=utt
print(similarity)
# cos_dis(test_vec, refe_vec)
| Score/Cosine_Score.py | 2,006 | @Author: yangwenhao
@Contact: 874681044@qq.com
@Software: PyCharm
@File: Cosine.py
@Time: 19-6-26 下午9:43
@Overview: Implement Cosine Score for speaker identification!
Enrollment set files will be in the 'Data/enroll_set.npy' and the classes-to-index file is 'Data/enroll_classes.npy'
Test set files are in the 'Data/test_set.npy' and the utterances-to-index file is 'Data/test_classes.npy'
!/usr/bin/env python encoding: utf-8 test_vec = np.array([1,2,3,4]) refe_vec = np.array([8,3,3,4]) cos_dis(test_vec, refe_vec) | 517 | en | 0.520443 |
"""
Copyright 2013 The Trustees of Princeton University
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import google
from google.appengine.ext import ndb
import google.appengine.api.memcache as google_memcache
import google.appengine.ext.deferred as google_deferred
from google.appengine.datastore.datastore_query import Cursor as GoogleCursor
def raise_(ex):
raise ex
class FutureWrapper( ndb.Future ):
state = ndb.Future.FINISHING
_done = True
def __init__( self, result ):
self.result = result
def get_result( self ):
return self.result
def done( self ):
return True
def wait( self ):
pass
def check_success( self ):
return None
def get_exception( self ):
return None
def get_traceback( self ):
return None
# TODO: wrap query for one item into a future
class FutureQueryWrapper( object ):
def __init__(self, query_fut):
self.query_fut = query_fut
def get_result( self ):
res = self.query_fut.get_result()
if res != None and len(res) > 0:
return res[0]
else:
return None
def done( self ):
return self.query_fut.done()
def wait( self):
return self.query_fut.wait()
def check_success( self ):
return self.query_fut.check_success()
def get_exception( self ):
return self.query_fut.get_exception()
def get_traceback( self ):
return self.query_fut.get_traceback()
# aliases for types
Model = ndb.Model
Integer = ndb.IntegerProperty
Float = ndb.FloatProperty
String = ndb.StringProperty
Text = ndb.TextProperty
Key = ndb.KeyProperty
Boolean = ndb.BooleanProperty
Json = ndb.JsonProperty
Blob = ndb.BlobProperty
Computed = ndb.ComputedProperty
Pickled = ndb.PickleProperty
Cursor = GoogleCursor
# aliases for keys
make_key = ndb.Key
def wait_futures( future_list ):
"""
Wait for all of a list of futures to finish.
Works with FutureWrapper.
"""
# see if any of these are NOT futures...then just wrap them into a future object
# that implements a get_result()
ret = []
futs = []
for f in future_list:
if f is None:
continue
if not isinstance( f, ndb.Future ) and not isinstance( f, FutureWrapper ):
# definitely not a future
ret.append( FutureWrapper( f ) )
else:
# a future or something compatible
futs.append( f )
ndb.Future.wait_all( futs )
return futs + ret
deferred = google_deferred
concurrent = ndb.tasklet
concurrent_return = (lambda x: (raise_(ndb.Return( x ))))
# asynchronous operations
get_multi_async = ndb.get_multi_async
put_multi_async = ndb.put_multi_async
# synchronous operations
get_multi = ndb.get_multi
put_multi = ndb.put_multi
delete_multi = ndb.delete_multi
# aliases for memcache
memcache = google_memcache
# aliases for transaction
transaction = ndb.transaction
transaction_async = ndb.transaction_async
transactional = ndb.transactional
# alises for query predicates
opAND = ndb.AND
opOR = ndb.OR
# aliases for top-level asynchronous loop
toplevel = ndb.toplevel
# aliases for common exceptions
RequestDeadlineExceededError = google.appengine.runtime.DeadlineExceededError
APIRequestDeadlineExceededError = google.appengine.runtime.apiproxy_errors.DeadlineExceededError
URLRequestDeadlineExceededError = google.appengine.api.urlfetch_errors.DeadlineExceededError
TransactionFailedError = google.appengine.ext.db.TransactionFailedError
| ms/storage/backends/google_appengine.py | 4,034 | Wait for all of a list of futures to finish.
Works with FutureWrapper.
Copyright 2013 The Trustees of Princeton University
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
TODO: wrap query for one item into a future aliases for types aliases for keys see if any of these are NOT futures...then just wrap them into a future object that implements a get_result() definitely not a future a future or something compatible asynchronous operations synchronous operations aliases for memcache aliases for transaction alises for query predicates aliases for top-level asynchronous loop aliases for common exceptions | 1,086 | en | 0.807119 |
import sys
import os
import math
import imageio
from moviepy.editor import *
import time
def read_video(video_name):
# Read video from file
video_name_input = 'testset/' + video_name
video = VideoFileClip(video_name_input)
return video
def video2frame(video_name):
video = read_video(video_name)
video_frame_number = int(video.duration * video.fps) ## duration: second / fps: frame per second
video_frame_ciphers = math.ceil(math.log(video_frame_number, 10)) ## ex. 720 -> 3
if not os.path.exists('testset/' + video_name):
os.makedirs('testset/' + video_name)
for i in range(0, video_frame_number):
video.save_frame('testset/' + video_name + '/frame_' + str(i).zfill(video_frame_ciphers) + '.jpg', i/video.fps)
def video2poseframe(video_name):
import numpy as np
sys.path.append(os.path.dirname(__file__) + "/../")
from scipy.misc import imread, imsave
from config import load_config
from dataset.factory import create as create_dataset
from nnet import predict
from util import visualize
from dataset.pose_dataset import data_to_input
from multiperson.detections import extract_detections
from multiperson.predict import SpatialModel, eval_graph, get_person_conf_multicut
from multiperson.visualize import PersonDraw, visualize_detections
import matplotlib.pyplot as plt
from PIL import Image, ImageDraw
import random
cfg = load_config("demo/pose_cfg_multi.yaml")
dataset = create_dataset(cfg)
sm = SpatialModel(cfg)
sm.load()
# Load and setup CNN part detector
sess, inputs, outputs = predict.setup_pose_prediction(cfg)
################
video = read_video(video_name)
video_frame_number = int(video.duration * video.fps) ## duration: second / fps: frame per second
video_frame_ciphers = math.ceil(math.log(video_frame_number, 10)) ## ex. 720 -> 3
if not os.path.exists('testset/' + video_name):
os.makedirs('testset/' + video_name)
for i in range(0, video_frame_number):
image = video.get_frame(i/video.fps)
######################
image_batch = data_to_input(image)
# Compute prediction with the CNN
outputs_np = sess.run(outputs, feed_dict={inputs: image_batch})
scmap, locref, pairwise_diff = predict.extract_cnn_output(outputs_np, cfg, dataset.pairwise_stats)
detections = extract_detections(cfg, scmap, locref, pairwise_diff)
unLab, pos_array, unary_array, pwidx_array, pw_array = eval_graph(sm, detections)
person_conf_multi = get_person_conf_multicut(sm, unLab, unary_array, pos_array)
print('person_conf_multi: ')
print(type(person_conf_multi))
print(person_conf_multi)
# Add library to save image
image_img = Image.fromarray(image)
# Save image with points of pose
draw = ImageDraw.Draw(image_img)
people_num = 0
point_num = 17
print('person_conf_multi.size: ')
print(person_conf_multi.size)
people_num = person_conf_multi.size / (point_num * 2)
people_num = int(people_num)
print('people_num: ')
print(people_num)
point_i = 0 # index of points
point_r = 5 # radius of points
people_real_num = 0
for people_i in range(0, people_num):
point_color_r = random.randrange(0, 256)
point_color_g = random.randrange(0, 256)
point_color_b = random.randrange(0, 256)
point_color = (point_color_r, point_color_g, point_color_b, 255)
point_count = 0
for point_i in range(0, point_num):
if person_conf_multi[people_i][point_i][0] + person_conf_multi[people_i][point_i][1] != 0: # If coordinates of point is (0, 0) == meaningless data
point_count = point_count + 1
if point_count > 5: # If there are more than 5 point in person, we define he/she is REAL PERSON
people_real_num = people_real_num + 1
for point_i in range(0, point_num):
draw.ellipse((person_conf_multi[people_i][point_i][0] - point_r, person_conf_multi[people_i][point_i][1] - point_r, person_conf_multi[people_i][point_i][0] + point_r, person_conf_multi[people_i][point_i][1] + point_r), fill=point_color)
print('people_real_num: ')
print(people_real_num)
video_name_result = 'testset/' + video_name + '/frame_pose_' + str(i).zfill(video_frame_ciphers) + '.jpg'
image_img.save(video_name_result, "JPG")
def video2posevideo(video_name):
time_start = time.clock()
import numpy as np
sys.path.append(os.path.dirname(__file__) + "/../")
from scipy.misc import imread, imsave
from config import load_config
from dataset.factory import create as create_dataset
from nnet import predict
from util import visualize
from dataset.pose_dataset import data_to_input
from multiperson.detections import extract_detections
from multiperson.predict import SpatialModel, eval_graph, get_person_conf_multicut
from multiperson.visualize import PersonDraw, visualize_detections
import matplotlib.pyplot as plt
from PIL import Image, ImageDraw, ImageFont
font = ImageFont.truetype("./font/NotoSans-Bold.ttf", 24)
import random
cfg = load_config("demo/pose_cfg_multi.yaml")
dataset = create_dataset(cfg)
sm = SpatialModel(cfg)
sm.load()
draw_multi = PersonDraw()
# Load and setup CNN part detector
sess, inputs, outputs = predict.setup_pose_prediction(cfg)
################
video = read_video(video_name)
video_frame_number = int(video.duration * video.fps) ## duration: second / fps: frame per second
video_frame_ciphers = math.ceil(math.log(video_frame_number, 10)) ## ex. 720 -> 3
pose_frame_list = []
point_r = 3 # radius of points
point_min = 10 # threshold of points - If there are more than point_min points in person, we define he/she is REAL PERSON
part_min = 3 # threshold of parts - If there are more than part_min parts in person, we define he/she is REAL PERSON / part means head, arm and leg
point_num = 17 # There are 17 points in 1 person
def ellipse_set(person_conf_multi, people_i, point_i):
return (person_conf_multi[people_i][point_i][0] - point_r, person_conf_multi[people_i][point_i][1] - point_r, person_conf_multi[people_i][point_i][0] + point_r, person_conf_multi[people_i][point_i][1] + point_r)
def line_set(person_conf_multi, people_i, point_i, point_j):
return (person_conf_multi[people_i][point_i][0], person_conf_multi[people_i][point_i][1], person_conf_multi[people_i][point_j][0], person_conf_multi[people_i][point_j][1])
def draw_ellipse_and_line(draw, person_conf_multi, people_i, a, b, c, point_color):
draw.ellipse(ellipse_set(person_conf_multi, people_i, a), fill=point_color)
draw.ellipse(ellipse_set(person_conf_multi, people_i, b), fill=point_color)
draw.ellipse(ellipse_set(person_conf_multi, people_i, c), fill=point_color)
draw.line(line_set(person_conf_multi, people_i, a, b), fill=point_color, width=5)
draw.line(line_set(person_conf_multi, people_i, b, c), fill=point_color, width=5)
for i in range(0, video_frame_number):
image = video.get_frame(i/video.fps)
######################
image_batch = data_to_input(image)
# Compute prediction with the CNN
outputs_np = sess.run(outputs, feed_dict={inputs: image_batch})
scmap, locref, pairwise_diff = predict.extract_cnn_output(outputs_np, cfg, dataset.pairwise_stats)
detections = extract_detections(cfg, scmap, locref, pairwise_diff)
unLab, pos_array, unary_array, pwidx_array, pw_array = eval_graph(sm, detections)
person_conf_multi = get_person_conf_multicut(sm, unLab, unary_array, pos_array)
# print('person_conf_multi: ')
# print(type(person_conf_multi))
# print(person_conf_multi)
# Add library to save image
image_img = Image.fromarray(image)
# Save image with points of pose
draw = ImageDraw.Draw(image_img)
people_num = 0
people_real_num = 0
people_part_num = 0
people_num = person_conf_multi.size / (point_num * 2)
people_num = int(people_num)
print('people_num: ' + str(people_num))
for people_i in range(0, people_num):
point_color_r = random.randrange(0, 256)
point_color_g = random.randrange(0, 256)
point_color_b = random.randrange(0, 256)
point_color = (point_color_r, point_color_g, point_color_b, 255)
point_list = []
point_count = 0
point_i = 0 # index of points
part_count = 0 # count of parts in THAT person
# To find rectangle which include that people - list of points x, y coordinates
people_x = []
people_y = []
for point_i in range(0, point_num):
if person_conf_multi[people_i][point_i][0] + person_conf_multi[people_i][point_i][1] != 0: # If coordinates of point is (0, 0) == meaningless data
point_count = point_count + 1
point_list.append(point_i)
# Draw each parts
if (5 in point_list) and (7 in point_list) and (9 in point_list): # Draw left arm
draw_ellipse_and_line(draw, person_conf_multi, people_i, 5, 7, 9, point_color)
part_count = part_count + 1
if (6 in point_list) and (8 in point_list) and (10 in point_list): # Draw right arm
draw_ellipse_and_line(draw, person_conf_multi, people_i, 6, 8, 10, point_color)
part_count = part_count + 1
if (11 in point_list) and (13 in point_list) and (15 in point_list): # Draw left leg
draw_ellipse_and_line(draw, person_conf_multi, people_i, 11, 13, 15, point_color)
part_count = part_count + 1
if (12 in point_list) and (14 in point_list) and (16 in point_list): # Draw right leg
draw_ellipse_and_line(draw, person_conf_multi, people_i, 12, 14, 16, point_color)
part_count = part_count + 1
if point_count >= point_min:
people_real_num = people_real_num + 1
for point_i in range(0, point_num):
if person_conf_multi[people_i][point_i][0] + person_conf_multi[people_i][point_i][1] != 0: # If coordinates of point is (0, 0) == meaningless data
draw.ellipse(ellipse_set(person_conf_multi, people_i, point_i), fill=point_color)
people_x.append(person_conf_multi[people_i][point_i][0])
people_y.append(person_conf_multi[people_i][point_i][1])
# Draw rectangle which include that people
draw.rectangle([min(people_x), min(people_y), max(people_x), max(people_y)], fill=point_color, outline=5)
if part_count >= part_min:
people_part_num = people_part_num + 1
draw.text((0, 0), 'People(by point): ' + str(people_real_num) + ' (threshold = ' + str(point_min) + ')', (0,0,0), font=font)
draw.text((0, 32), 'People(by line): ' + str(people_part_num) + ' (threshold = ' + str(part_min) + ')', (0,0,0), font=font)
draw.text((0, 64), 'Frame: ' + str(i) + '/' + str(video_frame_number), (0,0,0), font=font)
draw.text((0, 96), 'Total time required: ' + str(round(time.clock() - time_start, 1)) + 'sec', (0,0,0))
print('people_real_num: ' + str(people_real_num))
print('people_part_num: ' + str(people_part_num))
print('frame: ' + str(i))
image_img_numpy = np.asarray(image_img)
pose_frame_list.append(image_img_numpy)
video_pose = ImageSequenceClip(pose_frame_list, fps=video.fps)
video_pose.write_videofile("testset/" + video_name + "_pose.mp4", fps=video.fps)
print("Time(s): " + str(time.clock() - time_start))
| video_pose_ed.py | 12,134 | Read video from file duration: second / fps: frame per second ex. 720 -> 3 Load and setup CNN part detector duration: second / fps: frame per second ex. 720 -> 3 Compute prediction with the CNN Add library to save image Save image with points of pose index of points radius of points If coordinates of point is (0, 0) == meaningless data If there are more than 5 point in person, we define he/she is REAL PERSON Load and setup CNN part detector duration: second / fps: frame per second ex. 720 -> 3 radius of points threshold of points - If there are more than point_min points in person, we define he/she is REAL PERSON threshold of parts - If there are more than part_min parts in person, we define he/she is REAL PERSON / part means head, arm and leg There are 17 points in 1 person Compute prediction with the CNN print('person_conf_multi: ') print(type(person_conf_multi)) print(person_conf_multi) Add library to save image Save image with points of pose index of points count of parts in THAT person To find rectangle which include that people - list of points x, y coordinates If coordinates of point is (0, 0) == meaningless data Draw each parts Draw left arm Draw right arm Draw left leg Draw right leg If coordinates of point is (0, 0) == meaningless data Draw rectangle which include that people | 1,306 | en | 0.843226 |
# coding=utf-8
from pub.tables.resources import *
from pub.tables.user import *
import pub.client.login as login
from pub.permission.user import is_logged,is_owner
def is_valid_key(key, r_type):
try:
resource_type.objects.get(key=key)
return False
except:
pass
try:
resource_info.objects.get(key=key)
return False
except:
pass
if (r_type == -1):
return True
try:
if(r_type==s.RESOURCE_TYPE_CUSTOMED):
resource_customed.objects.get(key=key)
return False
elif(r_type == s.RESOURCE_TYPE_TEMPLATED):
resource_templated.objects.get(key=key)
return False
elif(r_type == s.RESOURCE_TYPE_RESTFUL_API):
resource_restful.objects.get(key=key)
return False
elif(r_type == s.RESOURCE_TYPE_IFRAME):
resource_iframe.objects.get(key=key)
return False
elif(r_type == s.RESOURCE_TYPE_SHORT_LINK):
resource_link.objects.get(key=key)
return False
else:
return False
except:
return True
def set_permission(key,readable,writeable,modifiable,token=''):
try:
res = resource_permission.objects.get(key=key)
res.delete()
raise Exception()
except:
resource_permission.objects.create(key=key,readable=readable,writeable=writeable,modifiable=modifiable,token=token)
def can_read(request,key,token=''):
try:
readable,_,_,verify_token =__get_resource_permission(key)
return __accessibility_verfy(readable,request,key,token,verify_token)
except:
return False
def can_write(request,key,token=''):
try:
_,writeable,_,verify_token = __get_resource_permission(key)
return __accessibility_verfy(writeable,request,key,token,verify_token)
except:
return False
def can_modify(request,key,token=''):
try:
_,_,modifiable,verify_token = __get_resource_permission(key)
return __accessibility_verfy(modifiable,request,key,token,verify_token)
except:
return False
def can_create(request, r_type):
if not is_logged(request):
return False
return True
#
# try:
# user = login.get_user_by_session(request,request.session.get(s.SESSION_LOGIN))
# except:
# return False
#
# p = user_permission.objects.get(user_id=user, type=r_type).volume
#
# if p>0:
# return True
#
# return False
def did_create(request,r_type):
if is_logged(request):
user = login.get_user_by_session(request,request.session.get(s.SESSION_LOGIN))
p = user_permission.objects.get(user_id=user, type=r_type)
p.volume = p.volume - 1
p.save()
def __get_resource_permission(key):
p = resource_permission.objects.get(key=key)
readable = p.readable
writeable = p.writeable
modifiable = p.modifiable
token = p.token
return readable, writeable, modifiable, token
def __accessibility_verfy(accessibility, request, key, token, verify_token):
if accessibility == s.ACCESSIBILITY_PUBLIC:
return True
elif accessibility == s.ACCESSIBILITY_LOGIN or accessibility == s.ACCESSIBILITY_LOGIN_OR_TOKEN:
if is_logged(request):
return True
else:
if token != '':
if token == verify_token:
return True
elif accessibility == s.ACCESSIBILITY_PRIVATE:
if is_logged(request):
if is_owner(request, key):
return True
return False
elif accessibility == s.ACCESSIBILITY_TOKEN:
if token != '':
if token == verify_token:
return True
| pub/permission/resource.py | 3,773 | coding=utf-8 try: user = login.get_user_by_session(request,request.session.get(s.SESSION_LOGIN)) except: return False p = user_permission.objects.get(user_id=user, type=r_type).volume if p>0: return True return False | 228 | en | 0.177714 |
import urllib2
from zope.interface import implements
from plone.portlets.interfaces import IPortletDataProvider
from plone.app.portlets.portlets import base
from Products.CMFCore.utils import getToolByName
from zope import schema
from zope.formlib import form
from Products.Five.browser.pagetemplatefile import ViewPageTemplateFile
from wad.blog.utils import find_portlet_assignment_context
from wad.blog.blogentry import IBlogEntry
from wad.blog import MessageFactory as _
class IBlogCategoriesPortlet(IPortletDataProvider):
"""A portlet
It inherits from IPortletDataProvider because for this portlet, the
data that is being rendered and the portlet assignment itself are the
same.
"""
archive_view = schema.TextLine(
title=_(u"Archive view"),
description=_(u"The name of the archive view"),
default=u'blog-view',
required=True
)
class Assignment(base.Assignment):
"""Portlet assignment.
This is what is actually managed through the portlets UI and associated
with columns.
"""
implements(IBlogCategoriesPortlet)
def __init__(self, archive_view=u'blog-view'):
self.archive_view = archive_view
@property
def title(self):
"""This property is used to give the title of the portlet in the
"manage portlets" screen.
"""
return _("Categories")
class Renderer(base.Renderer):
"""Portlet renderer.
This is registered in configure.zcml. The referenced page template is
rendered, and the implicit variable 'view' will refer to an instance
of this class. Other methods can be added and referenced in the template.
"""
render = ViewPageTemplateFile('categories.pt')
def keywords(self):
catalog = getToolByName(self.context, 'portal_catalog')
keywords = catalog.uniqueValuesFor('Subject')
keywords = [unicode(k, 'utf-8') for k in keywords]
return keywords
def archive_url(self, subject):
# Get the path of where the portlet is created. That's the blog.
assignment_context = find_portlet_assignment_context(self.data,
self.context)
if assignment_context is None:
assignment_context = self.context
self.folder_url = assignment_context.absolute_url()
sub = urllib2.quote(subject.encode('utf-8'))
url = '%s/%s?category=%s' % (self.folder_url,
self.data.archive_view,
sub)
return url
def blog_url(self):
assignment_context = find_portlet_assignment_context(self.data,
self.context)
if assignment_context is None:
assignment_context = self.context
return assignment_context.absolute_url()
def count_entries(self, subject):
catalog = getToolByName(self.context, 'portal_catalog')
brains = catalog(object_provides=IBlogEntry.__identifier__,
Subject=subject.encode('utf-8'))
return len(brains)
def count_all_entries(self):
catalog = getToolByName(self.context, 'portal_catalog')
brains = catalog(object_provides=IBlogEntry.__identifier__)
return len(brains)
class AddForm(base.AddForm):
"""Portlet add form.
This is registered in configure.zcml. The form_fields variable tells
zope.formlib which fields to display. The create() method actually
constructs the assignment that is being added.
"""
form_fields = form.Fields(IBlogCategoriesPortlet)
def create(self, data):
return Assignment(**data)
class EditForm(base.EditForm):
"""Portlet edit form.
This is registered with configure.zcml. The form_fields variable tells
zope.formlib which fields to display.
"""
form_fields = form.Fields(IBlogCategoriesPortlet)
| src/wad.blog/wad/blog/portlets/categories.py | 3,956 | Portlet add form.
This is registered in configure.zcml. The form_fields variable tells
zope.formlib which fields to display. The create() method actually
constructs the assignment that is being added.
Portlet assignment.
This is what is actually managed through the portlets UI and associated
with columns.
Portlet edit form.
This is registered with configure.zcml. The form_fields variable tells
zope.formlib which fields to display.
A portlet
It inherits from IPortletDataProvider because for this portlet, the
data that is being rendered and the portlet assignment itself are the
same.
Portlet renderer.
This is registered in configure.zcml. The referenced page template is
rendered, and the implicit variable 'view' will refer to an instance
of this class. Other methods can be added and referenced in the template.
This property is used to give the title of the portlet in the
"manage portlets" screen.
Get the path of where the portlet is created. That's the blog. | 977 | en | 0.926877 |
from configparser import ConfigParser
import feedparser
import re
import requests
import tweepy
def get_id(xkcd_link: str) -> int:
"""
Exctract comic id from xkcd link
"""
match = re.search(r"\d+", xkcd_link)
if match:
return int(match.group())
else:
return 0
def get_xkcd_rss_entries(url: str):
"""
Load latest XKCD RSS feed and extract latest entry
"""
# get latest rss feed
feed = feedparser.parse(url)
return feed.get("entries")
def get_latest_rss_entry(entries: list):
"""
Extract latest entry from XKCD RSS feed and
parse the ID
"""
entry = entries[0]
id_ = get_id(xkcd_link=entry.get("id"))
return id_, entry
def downdload_comic(entry: dict, filename: str) -> None:
"""
Download latest image and store it in
current working directory
"""
match = re.search(r'src="(.*png)"', entry["summary"])
if match:
img_url = match.groups()[0]
r = requests.get(img_url)
r.raise_for_status()
with open(filename, "wb") as f:
f.write(r.content)
return None
def initialize_twitter_api(config: ConfigParser):
"""
Do authentication and return read-to-use
twitter api object
"""
twitter_config = config["twitter"]
auth = tweepy.OAuthHandler(
twitter_config.get("consumer_key"), twitter_config.get("consumer_secret")
)
auth.set_access_token(
twitter_config.get("access_token"), twitter_config.get("access_secret")
)
api = tweepy.API(auth)
return api
def send_twitter_post(entry: dict, api: tweepy.API, img_fname: str) -> None:
"""
Post tweet on twitter
"""
match = re.search("title=(.*)/>", entry["summary"])
if match:
msg = match.groups()[0]
msg += f"\n {entry['link']}"
else:
msg = "-- No Title --"
api.update_with_media(status=msg, filename=img_fname)
return None
| xkcd_feed/src/utils.py | 1,951 | Download latest image and store it in
current working directory
Exctract comic id from xkcd link
Extract latest entry from XKCD RSS feed and
parse the ID
Load latest XKCD RSS feed and extract latest entry
Do authentication and return read-to-use
twitter api object
Post tweet on twitter
get latest rss feed | 308 | en | 0.616991 |
"""api_gw_test"""
# Remove warnings when using pytest fixtures
# pylint: disable=redefined-outer-name
import json
from test.conftest import ENDPOINT_URL
# warning disabled, this is used as a pylint fixture
from test.elasticsearch_test import ( # pylint: disable=unused-import
es_client,
populate_es_test_case_1,
)
from urllib.parse import urlencode
import boto3
import pytest
import requests
def to_localstack_url(api_id: str, url: str):
"""
Converts a API GW url to localstack
"""
return url.replace("4566", f"4566/restapis/{api_id}").replace(
"dev", "dev/_user_request_"
)
def api_gw_lambda_integrate_deploy(
api_client,
api: dict,
api_resource: dict,
lambda_func: dict,
http_method: str = "GET",
) -> str:
"""
Integrate lambda with api gw method and deploy api.
Return the invokation URL
"""
lambda_integration_arn = (
"arn:aws:apigateway:us-east-1:lambda:path/2015-03-31/functions/"
f"{lambda_func['FunctionArn']}/invocations"
)
api_client.put_integration(
restApiId=api["id"],
resourceId=api_resource["id"],
httpMethod=http_method,
type="AWS",
integrationHttpMethod="POST",
uri=lambda_integration_arn,
)
api_client.create_deployment(
restApiId=api["id"], stageName="dev",
)
return f"http://localhost:4566/restapis/{api['id']}/dev/_user_request_{api_resource['path']}"
@pytest.fixture
def api_gw_method(request):
"""api gw for testing"""
marker = request.node.get_closest_marker("api_gw_method_args")
put_method_args = marker.args[0]["put_method_args"]
put_method_response_args = marker.args[0]["put_method_response_args"]
api = None
def fin():
"""fixture finalizer"""
if api:
api_client.delete_rest_api(restApiId=api["id"])
# Hook teardown (finalizer) code
request.addfinalizer(fin)
api_client = boto3.client("apigateway", endpoint_url=ENDPOINT_URL)
api = api_client.create_rest_api(name="testapi")
root_resource_id = api_client.get_resources(restApiId=api["id"])["items"][0]["id"]
api_resource = api_client.create_resource(
restApiId=api["id"], parentId=root_resource_id, pathPart="test"
)
api_client.put_method(
restApiId=api["id"],
resourceId=api_resource["id"],
authorizationType="NONE",
**put_method_args,
)
api_client.put_method_response(
restApiId=api["id"],
resourceId=api_resource["id"],
statusCode="200",
**put_method_response_args,
)
return api_client, api, api_resource
@pytest.mark.api_gw_method_args(
{
"put_method_args": {"httpMethod": "GET",},
"put_method_response_args": {"httpMethod": "GET",},
}
)
@pytest.mark.lambda_function_args(
{
"name": "stac_endpoint",
"handler": "code.handler",
"environment": {"CBERS_STAC_BUCKET": "bucket",},
"timeout": 30,
"layers": (
{
"output_dir": "./test",
"layer_dir": "./cbers2stac/layers/common",
"tag": "common",
},
),
}
)
def test_root(api_gw_method, lambda_function):
"""
test_root_endpoint
"""
# Based on
# https://stackoverflow.com/questions/58859917/creating-aws-lambda-integrated-api-gateway-resource-with-boto3
api_client, api, api_resource = api_gw_method
lambda_client, lambda_func = lambda_function # pylint: disable=unused-variable
url = api_gw_lambda_integrate_deploy(api_client, api, api_resource, lambda_func)
req = requests.get(url)
assert req.status_code == 200
@pytest.mark.api_gw_method_args(
{
"put_method_args": {"httpMethod": "GET",},
"put_method_response_args": {"httpMethod": "GET",},
}
)
@pytest.mark.lambda_function_args(
{
"name": "elasticsearch",
"handler": "es.stac_search_endpoint_handler",
"environment": {},
"timeout": 30,
"layers": (
{
"output_dir": "./test",
"layer_dir": "./cbers2stac/layers/common",
"tag": "common",
},
),
}
)
def test_item_search_get(
api_gw_method, lambda_function, es_client
): # pylint: disable=too-many-locals,too-many-statements
"""
test_item_search_get
"""
api_client, api, api_resource = api_gw_method
lambda_client, lambda_func = lambda_function # pylint: disable=unused-variable
# ES_ENDPOINT is set by lambda_function
lambda_client.update_function_configuration(
FunctionName=lambda_func["FunctionName"],
Environment={"Variables": {"ES_PORT": "4571", "ES_SSL": "NO",}},
)
populate_es_test_case_1(es_client)
# Empty GET, return all 2 items
original_url = api_gw_lambda_integrate_deploy(
api_client, api, api_resource, lambda_func
)
req = requests.get(original_url)
assert req.status_code == 200, req.text
fcol = json.loads(req.text)
assert len(fcol["features"]) == 2
# Single collection, return single item
url = f"{original_url}?collections=CBERS4-MUX"
req = requests.get(url)
assert req.status_code == 200, req.text
fcol = json.loads(req.text)
assert len(fcol["features"]) == 1
assert fcol["features"][0]["collection"] == "CBERS4-MUX"
# Two collections, return all items
url = f"{original_url}?collections=CBERS4-MUX,CBERS4-AWFI"
req = requests.get(url)
assert req.status_code == 200, req.text
fcol = json.loads(req.text)
assert len(fcol["features"]) == 2
# Paging, no next case
url = f"{original_url}"
req = requests.get(url)
assert req.status_code == 200, req.text
fcol = json.loads(req.text)
assert "links" not in fcol.keys()
# Paging, next page
url = f"{original_url}?limit=1"
req = requests.get(url)
assert req.status_code == 200, req.text
fcol = json.loads(req.text)
assert "links" in fcol.keys()
assert len(fcol["links"]) == 1
next_href = to_localstack_url(api["id"], fcol["links"][0]["href"])
req = requests.get(next_href)
assert req.status_code == 200, req.text
fcol = json.loads(req.text)
assert "links" not in fcol.keys()
assert fcol["features"][0]["id"] == "CBERS_4_MUX_20170528_090_084_L2"
# ids
url = f"{original_url}?ids=CBERS_4_MUX_20170528_090_084_L2"
req = requests.get(url)
assert req.status_code == 200, req.text
fcol = json.loads(req.text)
assert len(fcol["features"]) == 1
assert fcol["features"][0]["id"] == "CBERS_4_MUX_20170528_090_084_L2"
# query extension
url = f"{original_url}?"
url += urlencode({"query": '{"cbers:data_type": {"eq":"L4"}}'})
req = requests.get(url)
assert req.status_code == 200, req.text
fcol = json.loads(req.text)
assert len(fcol["features"]) == 1
assert fcol["features"][0]["id"] == "CBERS_4_AWFI_20170409_167_123_L4"
@pytest.mark.api_gw_method_args(
{
"put_method_args": {"httpMethod": "POST",},
"put_method_response_args": {"httpMethod": "POST",},
}
)
@pytest.mark.lambda_function_args(
{
"name": "elasticsearch",
"handler": "es.stac_search_endpoint_handler",
"environment": {},
"timeout": 30,
"layers": (
{
"output_dir": "./test",
"layer_dir": "./cbers2stac/layers/common",
"tag": "common",
},
),
}
)
def test_item_search_post(
api_gw_method, lambda_function, es_client
): # pylint: disable=too-many-locals
"""
test_item_search_post
"""
api_client, api, api_resource = api_gw_method
lambda_client, lambda_func = lambda_function # pylint: disable=unused-variable
# ES_ENDPOINT is set by lambda_function
lambda_client.update_function_configuration(
FunctionName=lambda_func["FunctionName"],
Environment={"Variables": {"ES_PORT": "4571", "ES_SSL": "NO",}},
)
populate_es_test_case_1(es_client)
url = api_gw_lambda_integrate_deploy(
api_client, api, api_resource, lambda_func, http_method="POST"
)
# POST with invalid bbox order, check error status code and message
req = requests.post(
url,
data=json.dumps(
{
"collections": ["mycollection"],
"bbox": [160.6, -55.95, -170, -25.89],
"limit": 100,
"datetime": "2019-01-01T00:00:00Z/2019-01-01T23:59:59Z",
}
),
)
assert req.status_code == 400, req.text
assert "First lon corner is not western" in req.text
# Same as above with fixed bbox
req = requests.post(
url,
data=json.dumps(
{
"collections": ["mycollection"],
"bbox": [-170, -25.89, 160.6, -55.95],
"limit": 100,
"datetime": "2019-01-01T00:00:00Z/2019-01-01T23:59:59Z",
}
),
)
assert req.status_code == 200, req.text
# Paging, no next case
req = requests.post(url)
assert req.status_code == 200, req.text
fcol = json.loads(req.text)
assert "links" not in fcol.keys()
# Paging, next page
body = {"limit": 1}
req = requests.post(url, data=json.dumps(body))
assert req.status_code == 200, req.text
fcol = json.loads(req.text)
assert "links" in fcol.keys()
assert len(fcol["links"]) == 1
next_href = to_localstack_url(api["id"], fcol["links"][0]["href"])
req = requests.post(
next_href, data=json.dumps({**body, **fcol["links"][0]["body"]})
)
assert req.status_code == 200, req.text
fcol = json.loads(req.text)
assert "links" not in fcol.keys()
assert fcol["features"][0]["id"] == "CBERS_4_MUX_20170528_090_084_L2"
# ids
body = {"ids": ["CBERS_4_MUX_20170528_090_084_L2"]}
req = requests.post(url, data=json.dumps(body))
assert req.status_code == 200, req.text
fcol = json.loads(req.text)
assert len(fcol["features"]) == 1
assert fcol["features"][0]["id"] == "CBERS_4_MUX_20170528_090_084_L2"
| test/api_gw_test.py | 10,176 | Integrate lambda with api gw method and deploy api.
Return the invokation URL
api gw for testing
fixture finalizer
test_item_search_get
test_item_search_post
test_root_endpoint
Converts a API GW url to localstack
api_gw_test
Remove warnings when using pytest fixtures pylint: disable=redefined-outer-name warning disabled, this is used as a pylint fixture pylint: disable=unused-import Hook teardown (finalizer) code Based on https://stackoverflow.com/questions/58859917/creating-aws-lambda-integrated-api-gateway-resource-with-boto3 pylint: disable=unused-variable pylint: disable=too-many-locals,too-many-statements pylint: disable=unused-variable ES_ENDPOINT is set by lambda_function Empty GET, return all 2 items Single collection, return single item Two collections, return all items Paging, no next case Paging, next page ids query extension pylint: disable=too-many-locals pylint: disable=unused-variable ES_ENDPOINT is set by lambda_function POST with invalid bbox order, check error status code and message Same as above with fixed bbox Paging, no next case Paging, next page ids | 1,091 | en | 0.673344 |
_base_ = [
'../_base_/datasets/ffhq_flip.py',
'../_base_/models/stylegan/stylegan2_base.py',
'../_base_/default_runtime.py'
]
model = dict(
type='MSPIEStyleGAN2',
generator=dict(
type='MSStyleGANv2Generator',
head_pos_encoding=dict(type='CSG'),
deconv2conv=True,
up_after_conv=True,
head_pos_size=(4, 4),
up_config=dict(scale_factor=2, mode='bilinear', align_corners=True),
out_size=256),
discriminator=dict(
type='MSStyleGAN2Discriminator', in_size=256, with_adaptive_pool=True))
train_cfg = dict(
num_upblocks=6,
multi_input_scales=[0, 2, 4],
multi_scale_probability=[0.5, 0.25, 0.25])
data = dict(
samples_per_gpu=3,
train=dict(dataset=dict(imgs_root='./data/ffhq/ffhq_imgs/ffhq_512')))
ema_half_life = 10.
custom_hooks = [
dict(
type='VisualizeUnconditionalSamples',
output_dir='training_samples',
interval=5000),
dict(
type='ExponentialMovingAverageHook',
module_keys=('generator_ema', ),
interval=1,
interp_cfg=dict(momentum=0.5**(32. / (ema_half_life * 1000.))),
priority='VERY_HIGH')
]
checkpoint_config = dict(interval=10000, by_epoch=False, max_keep_ckpts=40)
lr_config = None
log_config = dict(
interval=100,
hooks=[
dict(type='TextLoggerHook'),
# dict(type='TensorboardLoggerHook'),
])
cudnn_benchmark = False
total_iters = 1100002
metrics = dict(
fid50k=dict(
type='FID',
num_images=50000,
inception_pkl='work_dirs/inception_pkl/ffhq-256-50k-rgb.pkl',
bgr2rgb=True),
pr10k3=dict(type='PR', num_images=10000, k=3))
| configs/positional_encoding_in_gans/mspie-stylegan2_c2_config-d_ffhq_256-512_b3x8_1100k.py | 1,683 | dict(type='TensorboardLoggerHook'), | 35 | en | 0.30399 |
from jsonrpc import ServiceProxy
import sys
import string
# ===== BEGIN USER SETTINGS =====
# if you do not set these you will be prompted for a password for every command
rpcuser = ""
rpcpass = ""
# ====== END USER SETTINGS ======
if rpcpass == "":
access = ServiceProxy("http://127.0.0.1:25176")
else:
access = ServiceProxy("http://"+rpcuser+":"+rpcpass+"@127.0.0.1:25176")
cmd = sys.argv[1].lower()
if cmd == "backupwallet":
try:
path = raw_input("Enter destination path/filename: ")
print access.backupwallet(path)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccount":
try:
addr = raw_input("Enter a Bitmea address: ")
print access.getaccount(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccountaddress":
try:
acct = raw_input("Enter an account name: ")
print access.getaccountaddress(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getaddressesbyaccount":
try:
acct = raw_input("Enter an account name: ")
print access.getaddressesbyaccount(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getbalance":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getbalance(acct, mc)
except:
print access.getbalance()
except:
print "\n---An error occurred---\n"
elif cmd == "getblockbycount":
try:
height = raw_input("Height: ")
print access.getblockbycount(height)
except:
print "\n---An error occurred---\n"
elif cmd == "getblockcount":
try:
print access.getblockcount()
except:
print "\n---An error occurred---\n"
elif cmd == "getblocknumber":
try:
print access.getblocknumber()
except:
print "\n---An error occurred---\n"
elif cmd == "getconnectioncount":
try:
print access.getconnectioncount()
except:
print "\n---An error occurred---\n"
elif cmd == "getdifficulty":
try:
print access.getdifficulty()
except:
print "\n---An error occurred---\n"
elif cmd == "getgenerate":
try:
print access.getgenerate()
except:
print "\n---An error occurred---\n"
elif cmd == "gethashespersec":
try:
print access.gethashespersec()
except:
print "\n---An error occurred---\n"
elif cmd == "getinfo":
try:
print access.getinfo()
except:
print "\n---An error occurred---\n"
elif cmd == "getnewaddress":
try:
acct = raw_input("Enter an account name: ")
try:
print access.getnewaddress(acct)
except:
print access.getnewaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaccount":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaccount(acct, mc)
except:
print access.getreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaddress":
try:
addr = raw_input("Enter a Bitmea address (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaddress(addr, mc)
except:
print access.getreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "gettransaction":
try:
txid = raw_input("Enter a transaction ID: ")
print access.gettransaction(txid)
except:
print "\n---An error occurred---\n"
elif cmd == "getwork":
try:
data = raw_input("Data (optional): ")
try:
print access.gettransaction(data)
except:
print access.gettransaction()
except:
print "\n---An error occurred---\n"
elif cmd == "help":
try:
cmd = raw_input("Command (optional): ")
try:
print access.help(cmd)
except:
print access.help()
except:
print "\n---An error occurred---\n"
elif cmd == "listaccounts":
try:
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.listaccounts(mc)
except:
print access.listaccounts()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaccount":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaccount(mc, incemp)
except:
print access.listreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaddress":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaddress(mc, incemp)
except:
print access.listreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "listtransactions":
try:
acct = raw_input("Account (optional): ")
count = raw_input("Number of transactions (optional): ")
frm = raw_input("Skip (optional):")
try:
print access.listtransactions(acct, count, frm)
except:
print access.listtransactions()
except:
print "\n---An error occurred---\n"
elif cmd == "move":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.move(frm, to, amt, mc, comment)
except:
print access.move(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendfrom":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendfrom(frm, to, amt, mc, comment, commentto)
except:
print access.sendfrom(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendmany":
try:
frm = raw_input("From: ")
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.sendmany(frm,to,mc,comment)
except:
print access.sendmany(frm,to)
except:
print "\n---An error occurred---\n"
elif cmd == "sendtoaddress":
try:
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
amt = raw_input("Amount:")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendtoaddress(to,amt,comment,commentto)
except:
print access.sendtoaddress(to,amt)
except:
print "\n---An error occurred---\n"
elif cmd == "setaccount":
try:
addr = raw_input("Address: ")
acct = raw_input("Account:")
print access.setaccount(addr,acct)
except:
print "\n---An error occurred---\n"
elif cmd == "setgenerate":
try:
gen= raw_input("Generate? (true/false): ")
cpus = raw_input("Max processors/cores (-1 for unlimited, optional):")
try:
print access.setgenerate(gen, cpus)
except:
print access.setgenerate(gen)
except:
print "\n---An error occurred---\n"
elif cmd == "settxfee":
try:
amt = raw_input("Amount:")
print access.settxfee(amt)
except:
print "\n---An error occurred---\n"
elif cmd == "stop":
try:
print access.stop()
except:
print "\n---An error occurred---\n"
elif cmd == "validateaddress":
try:
addr = raw_input("Address: ")
print access.validateaddress(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrase":
try:
pwd = raw_input("Enter wallet passphrase: ")
access.walletpassphrase(pwd, 60)
print "\n---Wallet unlocked---\n"
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrasechange":
try:
pwd = raw_input("Enter old wallet passphrase: ")
pwd2 = raw_input("Enter new wallet passphrase: ")
access.walletpassphrasechange(pwd, pwd2)
print
print "\n---Passphrase changed---\n"
except:
print
print "\n---An error occurred---\n"
print
else:
print "Command not found or not supported"
| contrib/bitrpc/bitrpc.py | 7,836 | ===== BEGIN USER SETTINGS ===== if you do not set these you will be prompted for a password for every command ====== END USER SETTINGS ====== | 141 | en | 0.820638 |
#!/usr/bin/env python3
#
# MIT License
#
# Copyright (c) 2020 EntySec
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
import sys
import tty
import termios
from Head.d3crypt import ghost
class typer:
def __init__(self):
self.d3crypt = d3crypt()
def get_char(self):
fd = sys.stdin.fileno()
old = termios.tcgetattr(fd)
try:
tty.setraw(fd)
return sys.stdin.read(1)
finally:
termios.tcsetattr(fd, termios.TCSADRAIN, old)
def send_char(self, char):
self.ghost.send_command("shell", "input text " + char, False, False)
| Head/typer.py | 1,625 | !/usr/bin/env python3 MIT License Copyright (c) 2020 EntySec Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. | 1,081 | en | 0.851831 |
"""
DeepChEmbed (DCE) Models
"""
from dimreducer import DeepAutoEncoder
from cluster import KMeansLayer
from cluster import KMeans
from keras import Model
from keras import optimizers
from keras.utils import normalize
import numpy as np
class DCE():
"""
The class to build a deep chemical embedding model.
Attributes:
autoencoder_dims: a list of dimensions for encoder, the first
element as input dimension, and the last one as
hidden layer dimension.
n_clusters: int, number of clusters for clustering layer.
alpha: float, parameters for soft label assigning.
update_interval: int, indicating every number of epoches, the harhened
labels will be upadated and/or convergence cretia will
be examed.
max_iteration: int, maximum iteration for the combined training
clustering_tol: float, convergence cretia for clustering layer
model: keras Model variable
HARDENING_FUNCS: smoothsetp hardening functions for unsupervised DCE
training, up to 9th order
"""
HARDENING_FUNCS = {
1: lambda x: x,
3: lambda x: (-2*x + 3) * x**2,
5: lambda x: ((6*x - 15)*x + 10) * x**3,
7: lambda x: (((-20*x + 70)*x - 84)*x + 35) * x**4,
9: lambda x: ((((70*x - 315)*x + 540)*x -420)*x + 126) * x**5}
def __init__(self, autoencoder_dims, n_clusters, update_interval=50,
max_iteration=1e4, clustering_tol=1e-4, alpha=1.0):
"""Construtor of DCE. """
self.autoencoder_dims = autoencoder_dims
self.n_clusters = n_clusters
self.alpha = alpha
self.update_interval = update_interval
self.max_iteration = max_iteration
self.clustering_tol = clustering_tol
self.model = None
return
def build_model(self, norm=True, act='relu'):
"""Build DCE using the initialized attributes
Args:
norm: boolean, wheher to add a normalization layer at the begining
of the autoencoder
act: string, keras activation function name for autoencoder
"""
autoencoder = DeepAutoEncoder(self.autoencoder_dims, act)
autoencoder.build_model(norm=norm)
embeding = autoencoder.model.get_layer(name='embedding_layer').output
clustering = KMeansLayer(self.n_clusters, alpha=self.alpha,
name='clustering')(embeding)
self.model = Model(inputs=autoencoder.model.input,
outputs=[clustering,autoencoder.model.output])
return
def train_model(self, data_train,
labels_train=None, data_test=None, labels_test=None,
verbose=1,
compiled=False, clustering_loss='kld',
decoder_loss='mse',clustering_loss_weight=0.5,
hardening_order=1, hardening_strength=2.0,
compiled=False,
optimizer='adam', lr=0.001, decay=0.0):
"""Train DCE Model:
If labels_train are not present, train DCE model in a unsupervised
learning process; otherwise, train DCE model in a supervised learning
process.
Args:
data_train: input training data
labels_train: true labels of traning data
data_test: input test data
labels_test: true lables of testing data
verbose: 0, turn off the screen prints
clustering_loss: string, clustering layer loss function
decoder_loss:, string, decoder loss function
clustering_loss_weight: float in [0,1], w_c,
harderning_order: odd int, the order of hardening function
harderning_strength: float >=1.0, the streng of the harderning
compiled: boolean, indicating if the model is compiled or not
optmizer: string, keras optimizers
lr: learning rate
dacay: learning rate dacay
Returns:
train_loss: training loss
test_loss: only if data_test and labels_test are not None in
supervised learning process
"""
if (not compiled):
assert clustering_loss_weight <= 1 and clustering_loss_weight >= 0
if optimizer == 'adam':
dce_optimizer = optimizers.Adam(lr=lr,decay=decay)
elif optimizer == 'sgd':
dce_optimizer = optimizers.sgd(lr=lr,decay=decay)
else:
raise Exception('Input optimizer was not found')
self.model.compile(loss={'clustering': clustering_loss,
'decoder_output': decoder_loss},
loss_weights=[clustering_loss_weight,
1 - clustering_loss_weight],
optimizer=dce_optimizer)
if (labels_train is not None):
supervised_learning = True
if verbose >= 1: print('Starting supervised learning')
else:
supervised_learning = False
if verbose >= 1: print('Starting unsupervised learning')
# initializing model by using sklean-Kmeans as guess
kmeans_init = KMeans(n_clusters=self.n_clusters)
kmeans_init.build_model()
encoder = Model(inputs=self.model.input,
outputs=self.model.get_layer(\
name='embedding_layer').output)
kmeans_init.model.fit(encoder.predict(data_train))
y_pred_last = kmeans_init.model.labels_
self.model.get_layer(name='clustering').\
set_weights([kmeans_init.model.cluster_centers_])
# Prepare training: p disctribution methods
if not supervised_learning:
# Unsupervised Learning
assert hardening_order in DCE.HARDENING_FUNCS.keys()
assert hardening_strength >= 1.0
h_func = DCE.HARDENING_FUNCS[hardening_order]
else:
# Supervised Learning
assert len(labels_train) == len(data_train)
assert len(np.unique(labels_train)) == self.n_clusters
p = np.zeros(shape=(len(labels_train), self.n_clusters))
for i in range(len(labels_train)):
p[i][labels_train[i]] = 1.0
if data_test is not None:
assert len(labels_test) == len(data_test)
assert len(np.unique(labels_test)) == self.n_clusters
p_test = np.zeros(shape=(len(labels_test), self.n_clusters))
for i in range(len(labels_test)):
p_test[i][labels_test[i]] = 1.0
validation_loss = []
# training start:
loss = []
for iteration in range(int(self.max_iteration)):
if iteration % self.update_interval == 0:
# updating p for unsupervised learning process
q, _ = self.model.predict(data_train)
if not supervised_learning:
p = DCE.hardening(q, h_func, hardening_strength)
# get label change i
y_pred = q.argmax(1)
delta_label_i = np.sum(y_pred != y_pred_last).\
astype(np.float32) / y_pred.shape[0]
y_pred_last = y_pred
# exam convergence
if iteration > 0 and delta_label_i < self.clustering_tol:
print(str(delta_label_i) +' < ' + str(self.clustering_tol))
print('Reached tolerance threshold. Stopping training.')
break
loss.append(self.model.train_on_batch(x=data_train,
y=[p,data_train]))
if supervised_learning and data_test is not None:
validation_loss.append(self.model.test_on_batch(
x=data_test, y=[p_test,data_test]))
if verbose > 0 and iteration % self.update_interval == 0:
print('Epoch: ' + str(iteration))
if verbose == 1:
print(' Total_loss = ' + str(loss[iteration][0]) +
';Delta_label = ' + str(delta_label_i))
print(' Clustering_loss = ' + str(loss[iteration][1]) +
'; Decoder_loss = ' + str(loss[iteration][2]))
if iteration == self.max_iteration - 1:
print('Reached maximum iteration. Stopping training.')
if data_test is None:
return np.array(loss).T
else:
return [np.array(loss).T, np.array(validation_loss).T]
@staticmethod
def hardening(q, h_func, stength):
"""hardening distribution P and return Q
Args:
q: input distributions.
h_func: input harderning function.
strength: hardening strength.
returns:
p: hardened and normatlized distributions.
"""
q = h_func(q)
weight = q ** stength / q.sum(0)
return (weight.T / weight.sum(1)).T
| deepchembed/dce.py | 9,241 | The class to build a deep chemical embedding model.
Attributes:
autoencoder_dims: a list of dimensions for encoder, the first
element as input dimension, and the last one as
hidden layer dimension.
n_clusters: int, number of clusters for clustering layer.
alpha: float, parameters for soft label assigning.
update_interval: int, indicating every number of epoches, the harhened
labels will be upadated and/or convergence cretia will
be examed.
max_iteration: int, maximum iteration for the combined training
clustering_tol: float, convergence cretia for clustering layer
model: keras Model variable
HARDENING_FUNCS: smoothsetp hardening functions for unsupervised DCE
training, up to 9th order
Construtor of DCE.
Build DCE using the initialized attributes
Args:
norm: boolean, wheher to add a normalization layer at the begining
of the autoencoder
act: string, keras activation function name for autoencoder
hardening distribution P and return Q
Args:
q: input distributions.
h_func: input harderning function.
strength: hardening strength.
returns:
p: hardened and normatlized distributions.
Train DCE Model:
If labels_train are not present, train DCE model in a unsupervised
learning process; otherwise, train DCE model in a supervised learning
process.
Args:
data_train: input training data
labels_train: true labels of traning data
data_test: input test data
labels_test: true lables of testing data
verbose: 0, turn off the screen prints
clustering_loss: string, clustering layer loss function
decoder_loss:, string, decoder loss function
clustering_loss_weight: float in [0,1], w_c,
harderning_order: odd int, the order of hardening function
harderning_strength: float >=1.0, the streng of the harderning
compiled: boolean, indicating if the model is compiled or not
optmizer: string, keras optimizers
lr: learning rate
dacay: learning rate dacay
Returns:
train_loss: training loss
test_loss: only if data_test and labels_test are not None in
supervised learning process
DeepChEmbed (DCE) Models
initializing model by using sklean-Kmeans as guess Prepare training: p disctribution methods Unsupervised Learning Supervised Learning training start: updating p for unsupervised learning process get label change i exam convergence | 2,500 | en | 0.662529 |
import datetime
from . import status
from .errors import InvalidAuthRequest, ProtocolVersionUnsupported, NoMutualAuthType
from .signing import Key
from .response import AuthResponse
class AuthPrincipal:
def __init__(self, userid, auth_methods, ptags=None, session_expiry=None):
self.userid = userid
self.auth_methods = auth_methods
if ptags is None:
ptags = []
self.ptags = ptags
self.session_expiry = session_expiry
class LoginService:
"""High-level interface to implement a web login service (WLS).
This class provides a convenient interface for implementing a WLS with any
authentication backend. It is intended to be instantiated with a single
private key, which is used to sign the responses it generates.
Mechanisms deemed useful for WLS implementation are provided:
- storing the list of supported authentication methods, and checking
whether the WLS and a WAA's request have an method in common
- checking whether the protocol version specified in the WAA request is
supported by `ucam_wls`
These mechanisms can optionally be turned off.
Attributes:
key (ucam_wls.signing.Key): a private key to be used to sign responses
auth_methods (list): a list of supported authentication methods
"""
def __init__(self, key, auth_methods):
if not isinstance(key, Key):
raise TypeError("key must be a ucam_wls.signing.Key instance")
self.key = key
self.auth_methods = auth_methods
def have_mutual_auth_type(self, request):
if request.aauth and any(request.aauth):
return set(request.aauth) & set(self.auth_methods) != set()
else:
return True
def _pre_response(self, request, skip_handling_check, check_auth_types=True):
if not skip_handling_check:
if not request.data_valid:
raise InvalidAuthRequest
if check_auth_types and not self.have_mutual_auth_type(request):
raise NoMutualAuthType(
"WLS supports %s; WAA wants one of %s" % (
self.auth_methods, request.aauth
)
)
if not request.version_supported:
raise ProtocolVersionUnsupported(request.ver)
def _finish_response(self, response, sign=True, force_signature=False):
if sign or response.requires_signature:
if not response.is_signed or force_signature:
self.key.sign(response)
return response
def authenticate_active(self, request, principal, auth, life=None,
sign=True, skip_handling_check=False, *args, **kwargs):
"""Generate a WLS 'success' response based on interaction with the user
This function creates a WLS response specifying that the principal was
authenticated based on 'fresh' interaction with the user (e.g. input of
a username and password).
Args:
request (AuthRequest): the original WAA request
principal (AuthPrincipal): the principal authenticated by the WLS
auth (str): the authentication method used by the principal.
life (int): if specified, the validity (in seconds) of the
principal's session with the WLS.
sign (bool): whether to sign the response or not. Recommended to
leave this at the default value of `True` (see warning below).
*args: passed to `AuthResponse.respond_to_request`
**kwargs: passed to `AuthResponse.respond_to_request`
Returns:
An `AuthResponse` instance matching the given arguments.
Warning:
Responses indicating successful authentication *MUST* be signed by
the WLS. It is recommended that you leave `sign` set to `True`, or
make sure to sign the response manually afterwards.
"""
self._pre_response(request, skip_handling_check)
if request.iact == False:
raise ValueError("WAA demanded passive authentication (iact == 'no')")
if life is None and principal.session_expiry is not None:
life = int((principal.session_expiry - datetime.datetime.utcnow()).total_seconds())
response = AuthResponse.respond_to_request(
request=request, code=status.SUCCESS, principal=principal.userid,
auth=auth, ptags=principal.ptags, life=life, *args, **kwargs
)
return self._finish_response(response=response, sign=sign)
def authenticate_passive(self, request, principal, sso=[], sign=True,
skip_handling_check=False, *args, **kwargs):
"""Generate a WLS 'success' response based on a pre-existing identity
This function creates a WLS response specifying that the principal was
authenticated based on previous successful authentication (e.g. an
existing WLS session cookie).
Args:
request (AuthRequest): the original WAA request
principal (AuthPrincipal): the principal authenticated by the WLS
sso (list): a list of strings indicating the authentication methods
previously used for authentication by the principal. If an
empty list is passed, `principal.auth_methods` will be used.
sign (bool): whether to sign the response or not. Recommended to
leave this at the default value of `True` (see warning below).
*args: passed to `AuthResponse.respond_to_request`
**kwargs: passed to `AuthResponse.respond_to_request`
Returns:
An `AuthResponse` instance matching the given arguments.
Warning:
Responses indicating successful authentication *MUST* be signed by
the WLS. It is recommended that you leave `sign` set to `True`, or
make sure to sign the response manually afterwards.
"""
self._pre_response(request, skip_handling_check)
if request.iact == True:
raise ValueError("WAA demanded active authentication (iact == 'yes')")
if len(sso) == 0:
sso = principal.auth_methods
if len(sso) == 0:
raise ValueError("no authentication methods specified for `sso`")
if principal.session_expiry is not None:
life = int((principal.session_expiry - datetime.datetime.utcnow()).total_seconds())
else:
life = None
response = AuthResponse.respond_to_request(
request=request, code=status.SUCCESS, principal=principal.userid,
sso=sso, ptags=principal.ptags, life=life, *args, **kwargs
)
return self._finish_response(response=response, sign=sign)
def generate_failure(self, code, request, msg='', sign=True,
skip_handling_check=False, *args, **kwargs):
"""Generate a response indicating failure.
This is to be used in all cases where the outcome of user interaction
is not success. This function will refuse to handle a request where
the 'fail' parameter is 'yes' (in which case the WLS must not redirect
back to the WAA).
Args:
code (int): the response status code. Values specified in the
protocol are available as constants under `ucam_wls.status`.
request (AuthRequest): the original WAA request
msg (str): an optional message that could be shown to the end user
by the WAA
sign (bool): whether to sign the response or not.
*args: passed to `AuthResponse.respond_to_request`
**kwargs: passed to `AuthResponse.respond_to_request`
Returns:
An `AuthResponse` instance matching the given arguments.
Note:
Signatures on WLS responses indicating a non-success can optionally
be signed. In the interests of security, the default in this
function is to go ahead and sign anyway, but this can be turned off
if really desired.
"""
self._pre_response(request, skip_handling_check, check_auth_types=False)
if request.fail:
raise ValueError("WAA specified that WLS must not redirect "
"back to it on failure")
if code == status.SUCCESS:
raise ValueError("Failure responses must not have success status")
response = AuthResponse.respond_to_request(
request=request, code=code, *args, **kwargs
)
return self._finish_response(response=response, sign=sign)
| ucam_wls/context.py | 8,756 | High-level interface to implement a web login service (WLS).
This class provides a convenient interface for implementing a WLS with any
authentication backend. It is intended to be instantiated with a single
private key, which is used to sign the responses it generates.
Mechanisms deemed useful for WLS implementation are provided:
- storing the list of supported authentication methods, and checking
whether the WLS and a WAA's request have an method in common
- checking whether the protocol version specified in the WAA request is
supported by `ucam_wls`
These mechanisms can optionally be turned off.
Attributes:
key (ucam_wls.signing.Key): a private key to be used to sign responses
auth_methods (list): a list of supported authentication methods
Generate a WLS 'success' response based on interaction with the user
This function creates a WLS response specifying that the principal was
authenticated based on 'fresh' interaction with the user (e.g. input of
a username and password).
Args:
request (AuthRequest): the original WAA request
principal (AuthPrincipal): the principal authenticated by the WLS
auth (str): the authentication method used by the principal.
life (int): if specified, the validity (in seconds) of the
principal's session with the WLS.
sign (bool): whether to sign the response or not. Recommended to
leave this at the default value of `True` (see warning below).
*args: passed to `AuthResponse.respond_to_request`
**kwargs: passed to `AuthResponse.respond_to_request`
Returns:
An `AuthResponse` instance matching the given arguments.
Warning:
Responses indicating successful authentication *MUST* be signed by
the WLS. It is recommended that you leave `sign` set to `True`, or
make sure to sign the response manually afterwards.
Generate a WLS 'success' response based on a pre-existing identity
This function creates a WLS response specifying that the principal was
authenticated based on previous successful authentication (e.g. an
existing WLS session cookie).
Args:
request (AuthRequest): the original WAA request
principal (AuthPrincipal): the principal authenticated by the WLS
sso (list): a list of strings indicating the authentication methods
previously used for authentication by the principal. If an
empty list is passed, `principal.auth_methods` will be used.
sign (bool): whether to sign the response or not. Recommended to
leave this at the default value of `True` (see warning below).
*args: passed to `AuthResponse.respond_to_request`
**kwargs: passed to `AuthResponse.respond_to_request`
Returns:
An `AuthResponse` instance matching the given arguments.
Warning:
Responses indicating successful authentication *MUST* be signed by
the WLS. It is recommended that you leave `sign` set to `True`, or
make sure to sign the response manually afterwards.
Generate a response indicating failure.
This is to be used in all cases where the outcome of user interaction
is not success. This function will refuse to handle a request where
the 'fail' parameter is 'yes' (in which case the WLS must not redirect
back to the WAA).
Args:
code (int): the response status code. Values specified in the
protocol are available as constants under `ucam_wls.status`.
request (AuthRequest): the original WAA request
msg (str): an optional message that could be shown to the end user
by the WAA
sign (bool): whether to sign the response or not.
*args: passed to `AuthResponse.respond_to_request`
**kwargs: passed to `AuthResponse.respond_to_request`
Returns:
An `AuthResponse` instance matching the given arguments.
Note:
Signatures on WLS responses indicating a non-success can optionally
be signed. In the interests of security, the default in this
function is to go ahead and sign anyway, but this can be turned off
if really desired. | 4,001 | en | 0.818075 |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
__all__ = [
'TableMagneticStoreWriteProperties',
'TableMagneticStoreWritePropertiesMagneticStoreRejectedDataLocation',
'TableMagneticStoreWritePropertiesMagneticStoreRejectedDataLocationS3Configuration',
'TableRetentionProperties',
]
@pulumi.output_type
class TableMagneticStoreWriteProperties(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "enableMagneticStoreWrites":
suggest = "enable_magnetic_store_writes"
elif key == "magneticStoreRejectedDataLocation":
suggest = "magnetic_store_rejected_data_location"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in TableMagneticStoreWriteProperties. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
TableMagneticStoreWriteProperties.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
TableMagneticStoreWriteProperties.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
enable_magnetic_store_writes: Optional[bool] = None,
magnetic_store_rejected_data_location: Optional['outputs.TableMagneticStoreWritePropertiesMagneticStoreRejectedDataLocation'] = None):
"""
:param bool enable_magnetic_store_writes: A flag to enable magnetic store writes.
:param 'TableMagneticStoreWritePropertiesMagneticStoreRejectedDataLocationArgs' magnetic_store_rejected_data_location: The location to write error reports for records rejected asynchronously during magnetic store writes. See Magnetic Store Rejected Data Location below for more details.
"""
if enable_magnetic_store_writes is not None:
pulumi.set(__self__, "enable_magnetic_store_writes", enable_magnetic_store_writes)
if magnetic_store_rejected_data_location is not None:
pulumi.set(__self__, "magnetic_store_rejected_data_location", magnetic_store_rejected_data_location)
@property
@pulumi.getter(name="enableMagneticStoreWrites")
def enable_magnetic_store_writes(self) -> Optional[bool]:
"""
A flag to enable magnetic store writes.
"""
return pulumi.get(self, "enable_magnetic_store_writes")
@property
@pulumi.getter(name="magneticStoreRejectedDataLocation")
def magnetic_store_rejected_data_location(self) -> Optional['outputs.TableMagneticStoreWritePropertiesMagneticStoreRejectedDataLocation']:
"""
The location to write error reports for records rejected asynchronously during magnetic store writes. See Magnetic Store Rejected Data Location below for more details.
"""
return pulumi.get(self, "magnetic_store_rejected_data_location")
@pulumi.output_type
class TableMagneticStoreWritePropertiesMagneticStoreRejectedDataLocation(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "s3Configuration":
suggest = "s3_configuration"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in TableMagneticStoreWritePropertiesMagneticStoreRejectedDataLocation. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
TableMagneticStoreWritePropertiesMagneticStoreRejectedDataLocation.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
TableMagneticStoreWritePropertiesMagneticStoreRejectedDataLocation.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
s3_configuration: Optional['outputs.TableMagneticStoreWritePropertiesMagneticStoreRejectedDataLocationS3Configuration'] = None):
"""
:param 'TableMagneticStoreWritePropertiesMagneticStoreRejectedDataLocationS3ConfigurationArgs' s3_configuration: Configuration of an S3 location to write error reports for records rejected, asynchronously, during magnetic store writes. See S3 Configuration below for more details.
"""
if s3_configuration is not None:
pulumi.set(__self__, "s3_configuration", s3_configuration)
@property
@pulumi.getter(name="s3Configuration")
def s3_configuration(self) -> Optional['outputs.TableMagneticStoreWritePropertiesMagneticStoreRejectedDataLocationS3Configuration']:
"""
Configuration of an S3 location to write error reports for records rejected, asynchronously, during magnetic store writes. See S3 Configuration below for more details.
"""
return pulumi.get(self, "s3_configuration")
@pulumi.output_type
class TableMagneticStoreWritePropertiesMagneticStoreRejectedDataLocationS3Configuration(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "bucketName":
suggest = "bucket_name"
elif key == "encryptionOption":
suggest = "encryption_option"
elif key == "kmsKeyId":
suggest = "kms_key_id"
elif key == "objectKeyPrefix":
suggest = "object_key_prefix"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in TableMagneticStoreWritePropertiesMagneticStoreRejectedDataLocationS3Configuration. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
TableMagneticStoreWritePropertiesMagneticStoreRejectedDataLocationS3Configuration.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
TableMagneticStoreWritePropertiesMagneticStoreRejectedDataLocationS3Configuration.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
bucket_name: Optional[str] = None,
encryption_option: Optional[str] = None,
kms_key_id: Optional[str] = None,
object_key_prefix: Optional[str] = None):
"""
:param str bucket_name: Bucket name of the customer S3 bucket.
:param str encryption_option: Encryption option for the customer s3 location. Options are S3 server side encryption with an S3-managed key or KMS managed key. Valid values are `SSE_KMS` and `SSE_S3`.
:param str kms_key_id: KMS key arn for the customer s3 location when encrypting with a KMS managed key.
:param str object_key_prefix: Object key prefix for the customer S3 location.
"""
if bucket_name is not None:
pulumi.set(__self__, "bucket_name", bucket_name)
if encryption_option is not None:
pulumi.set(__self__, "encryption_option", encryption_option)
if kms_key_id is not None:
pulumi.set(__self__, "kms_key_id", kms_key_id)
if object_key_prefix is not None:
pulumi.set(__self__, "object_key_prefix", object_key_prefix)
@property
@pulumi.getter(name="bucketName")
def bucket_name(self) -> Optional[str]:
"""
Bucket name of the customer S3 bucket.
"""
return pulumi.get(self, "bucket_name")
@property
@pulumi.getter(name="encryptionOption")
def encryption_option(self) -> Optional[str]:
"""
Encryption option for the customer s3 location. Options are S3 server side encryption with an S3-managed key or KMS managed key. Valid values are `SSE_KMS` and `SSE_S3`.
"""
return pulumi.get(self, "encryption_option")
@property
@pulumi.getter(name="kmsKeyId")
def kms_key_id(self) -> Optional[str]:
"""
KMS key arn for the customer s3 location when encrypting with a KMS managed key.
"""
return pulumi.get(self, "kms_key_id")
@property
@pulumi.getter(name="objectKeyPrefix")
def object_key_prefix(self) -> Optional[str]:
"""
Object key prefix for the customer S3 location.
"""
return pulumi.get(self, "object_key_prefix")
@pulumi.output_type
class TableRetentionProperties(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "magneticStoreRetentionPeriodInDays":
suggest = "magnetic_store_retention_period_in_days"
elif key == "memoryStoreRetentionPeriodInHours":
suggest = "memory_store_retention_period_in_hours"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in TableRetentionProperties. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
TableRetentionProperties.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
TableRetentionProperties.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
magnetic_store_retention_period_in_days: int,
memory_store_retention_period_in_hours: int):
"""
:param int magnetic_store_retention_period_in_days: The duration for which data must be stored in the magnetic store. Minimum value of 1. Maximum value of 73000.
:param int memory_store_retention_period_in_hours: The duration for which data must be stored in the memory store. Minimum value of 1. Maximum value of 8766.
"""
pulumi.set(__self__, "magnetic_store_retention_period_in_days", magnetic_store_retention_period_in_days)
pulumi.set(__self__, "memory_store_retention_period_in_hours", memory_store_retention_period_in_hours)
@property
@pulumi.getter(name="magneticStoreRetentionPeriodInDays")
def magnetic_store_retention_period_in_days(self) -> int:
"""
The duration for which data must be stored in the magnetic store. Minimum value of 1. Maximum value of 73000.
"""
return pulumi.get(self, "magnetic_store_retention_period_in_days")
@property
@pulumi.getter(name="memoryStoreRetentionPeriodInHours")
def memory_store_retention_period_in_hours(self) -> int:
"""
The duration for which data must be stored in the memory store. Minimum value of 1. Maximum value of 8766.
"""
return pulumi.get(self, "memory_store_retention_period_in_hours")
| sdk/python/pulumi_aws/timestreamwrite/outputs.py | 10,690 | :param bool enable_magnetic_store_writes: A flag to enable magnetic store writes.
:param 'TableMagneticStoreWritePropertiesMagneticStoreRejectedDataLocationArgs' magnetic_store_rejected_data_location: The location to write error reports for records rejected asynchronously during magnetic store writes. See Magnetic Store Rejected Data Location below for more details.
:param 'TableMagneticStoreWritePropertiesMagneticStoreRejectedDataLocationS3ConfigurationArgs' s3_configuration: Configuration of an S3 location to write error reports for records rejected, asynchronously, during magnetic store writes. See S3 Configuration below for more details.
:param str bucket_name: Bucket name of the customer S3 bucket.
:param str encryption_option: Encryption option for the customer s3 location. Options are S3 server side encryption with an S3-managed key or KMS managed key. Valid values are `SSE_KMS` and `SSE_S3`.
:param str kms_key_id: KMS key arn for the customer s3 location when encrypting with a KMS managed key.
:param str object_key_prefix: Object key prefix for the customer S3 location.
:param int magnetic_store_retention_period_in_days: The duration for which data must be stored in the magnetic store. Minimum value of 1. Maximum value of 73000.
:param int memory_store_retention_period_in_hours: The duration for which data must be stored in the memory store. Minimum value of 1. Maximum value of 8766.
Bucket name of the customer S3 bucket.
A flag to enable magnetic store writes.
Encryption option for the customer s3 location. Options are S3 server side encryption with an S3-managed key or KMS managed key. Valid values are `SSE_KMS` and `SSE_S3`.
KMS key arn for the customer s3 location when encrypting with a KMS managed key.
The location to write error reports for records rejected asynchronously during magnetic store writes. See Magnetic Store Rejected Data Location below for more details.
The duration for which data must be stored in the magnetic store. Minimum value of 1. Maximum value of 73000.
The duration for which data must be stored in the memory store. Minimum value of 1. Maximum value of 8766.
Object key prefix for the customer S3 location.
Configuration of an S3 location to write error reports for records rejected, asynchronously, during magnetic store writes. See S3 Configuration below for more details.
coding=utf-8 *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** *** Do not edit by hand unless you're certain you know what you are doing! *** | 2,525 | en | 0.801816 |
#
# PySNMP MIB module Nortel-MsCarrier-MscPassport-ExtensionsMIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/Nortel-MsCarrier-MscPassport-ExtensionsMIB
# Produced by pysmi-0.3.4 at Wed May 1 14:29:54 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
Integer, ObjectIdentifier, OctetString = mibBuilder.importSymbols("ASN1", "Integer", "ObjectIdentifier", "OctetString")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueSizeConstraint, ConstraintsIntersection, SingleValueConstraint, ValueRangeConstraint, ConstraintsUnion = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueSizeConstraint", "ConstraintsIntersection", "SingleValueConstraint", "ValueRangeConstraint", "ConstraintsUnion")
ifIndex, = mibBuilder.importSymbols("IF-MIB", "ifIndex")
RowPointer, = mibBuilder.importSymbols("Nortel-MsCarrier-MscPassport-StandardTextualConventionsMIB", "RowPointer")
mscPassportMIBs, mscComponents = mibBuilder.importSymbols("Nortel-MsCarrier-MscPassport-UsefulDefinitionsMIB", "mscPassportMIBs", "mscComponents")
NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance")
Unsigned32, MibIdentifier, iso, ObjectIdentity, MibScalar, MibTable, MibTableRow, MibTableColumn, ModuleIdentity, NotificationType, Counter32, Bits, Gauge32, IpAddress, TimeTicks, Integer32, Counter64 = mibBuilder.importSymbols("SNMPv2-SMI", "Unsigned32", "MibIdentifier", "iso", "ObjectIdentity", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "ModuleIdentity", "NotificationType", "Counter32", "Bits", "Gauge32", "IpAddress", "TimeTicks", "Integer32", "Counter64")
TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString")
extensionsMIB = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 36, 2, 2, 5))
mscExtensions = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 4))
mscExtensionIfTable = MibTable((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 4, 1), )
if mibBuilder.loadTexts: mscExtensionIfTable.setStatus('mandatory')
if mibBuilder.loadTexts: mscExtensionIfTable.setDescription('A table which provides enterprise extensions to the standard ifTable.')
mscExtensionIfEntry = MibTableRow((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 4, 1, 1), ).setIndexNames((0, "IF-MIB", "ifIndex"))
if mibBuilder.loadTexts: mscExtensionIfEntry.setStatus('mandatory')
if mibBuilder.loadTexts: mscExtensionIfEntry.setDescription(' An entry containing enterprise extensions to the standard ifEntry.')
mscIfRowPointer = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 4, 1, 1, 1), RowPointer()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mscIfRowPointer.setStatus('mandatory')
if mibBuilder.loadTexts: mscIfRowPointer.setDescription('A pointer to the RowStatus variable for the component represented by the ifTable entry.')
extensionsGroup = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 36, 2, 2, 5, 1))
extensionsGroupCA = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 36, 2, 2, 5, 1, 1))
extensionsGroupCA01 = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 36, 2, 2, 5, 1, 1, 2))
extensionsGroupCA01A = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 36, 2, 2, 5, 1, 1, 2, 2))
extensionsCapabilities = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 36, 2, 2, 5, 3))
extensionsCapabilitiesCA = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 36, 2, 2, 5, 3, 1))
extensionsCapabilitiesCA01 = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 36, 2, 2, 5, 3, 1, 2))
extensionsCapabilitiesCA01A = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 36, 2, 2, 5, 3, 1, 2, 2))
mibBuilder.exportSymbols("Nortel-MsCarrier-MscPassport-ExtensionsMIB", extensionsGroup=extensionsGroup, extensionsGroupCA01=extensionsGroupCA01, extensionsCapabilitiesCA=extensionsCapabilitiesCA, extensionsGroupCA=extensionsGroupCA, extensionsMIB=extensionsMIB, mscIfRowPointer=mscIfRowPointer, extensionsCapabilitiesCA01A=extensionsCapabilitiesCA01A, extensionsGroupCA01A=extensionsGroupCA01A, extensionsCapabilities=extensionsCapabilities, extensionsCapabilitiesCA01=extensionsCapabilitiesCA01, mscExtensions=mscExtensions, mscExtensionIfTable=mscExtensionIfTable, mscExtensionIfEntry=mscExtensionIfEntry)
| pysnmp-with-texts/Nortel-MsCarrier-MscPassport-ExtensionsMIB.py | 4,227 | PySNMP MIB module Nortel-MsCarrier-MscPassport-ExtensionsMIB (http://snmplabs.com/pysmi) ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/Nortel-MsCarrier-MscPassport-ExtensionsMIB Produced by pysmi-0.3.4 at Wed May 1 14:29:54 2019 On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4 Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15) | 378 | en | 0.374487 |
# Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Simple check list from AllenNLP repo: https://github.com/allenai/allennlp/blob/master/setup.py
To create the package for pypi.
1. Change the version in __init__.py, setup.py as well as docs/source/conf.py. Remove the master from the links in
the new models of the README:
(https://huggingface.co/transformers/master/model_doc/ -> https://huggingface.co/transformers/model_doc/)
then run `make fix-copies` to fix the index of the documentation.
2. Unpin specific versions from setup.py that use a git install.
2. Commit these changes with the message: "Release: VERSION"
3. Add a tag in git to mark the release: "git tag VERSION -m 'Adds tag VERSION for pypi' "
Push the tag to git: git push --tags origin master
4. Build both the sources and the wheel. Do not change anything in setup.py between
creating the wheel and the source distribution (obviously).
For the wheel, run: "python setup.py bdist_wheel" in the top level directory.
(this will build a wheel for the python version you use to build it).
For the sources, run: "python setup.py sdist"
You should now have a /dist directory with both .whl and .tar.gz source versions.
5. Check that everything looks correct by uploading the package to the pypi test server:
twine upload dist/* -r pypitest
(pypi suggest using twine as other methods upload files via plaintext.)
You may have to specify the repository url, use the following command then:
twine upload dist/* -r pypitest --repository-url=https://test.pypi.org/legacy/
Check that you can install it in a virtualenv by running:
pip install -i https://testpypi.python.org/pypi transformers
6. Upload the final version to actual pypi:
twine upload dist/* -r pypi
7. Copy the release notes from RELEASE.md to the tag in github once everything is looking hunky-dory.
8. Add the release version to docs/source/_static/js/custom.js and .circleci/deploy.sh
9. Update README.md to redirect to correct documentation.
10. Update the version in __init__.py, setup.py to the new version "-dev" and push to master.
"""
import os
import re
import shutil
from distutils.core import Command
from pathlib import Path
from setuptools import find_packages, setup
# Remove stale transformers.egg-info directory to avoid https://github.com/pypa/pip/issues/5466
stale_egg_info = Path(__file__).parent / "transformers.egg-info"
if stale_egg_info.exists():
print(
(
"Warning: {} exists.\n\n"
"If you recently updated transformers to 3.0 or later, this is expected,\n"
"but it may prevent transformers from installing in editable mode.\n\n"
"This directory is automatically generated by Python's packaging tools.\n"
"I will remove it now.\n\n"
"See https://github.com/pypa/pip/issues/5466 for details.\n"
).format(stale_egg_info)
)
shutil.rmtree(stale_egg_info)
# IMPORTANT:
# 1. all dependencies should be listed here with their version requirements if any
# 2. once modified, run: `make deps_table_update` to update src/transformers/dependency_versions_table.py
_deps = [
"black>=20.8b1",
"cookiecutter==1.7.2",
"dataclasses",
"datasets",
"faiss-cpu",
"fastapi",
"filelock",
"flake8>=3.8.3",
"flax>=0.2.2",
"fugashi>=1.0",
"importlib_metadata",
"ipadic>=1.0.0,<2.0",
"isort>=5.5.4",
"jax>=0.2.8",
"jaxlib>=0.1.59",
"keras2onnx",
"numpy>=1.17",
"onnxconverter-common",
"onnxruntime-tools>=1.4.2",
"onnxruntime>=1.4.0",
"packaging",
"parameterized",
"protobuf",
"psutil",
"pydantic",
"pytest",
"pytest-xdist",
"python>=3.6.0",
"recommonmark",
"regex!=2019.12.17",
"requests",
"sacremoses",
"scikit-learn",
"sentencepiece==0.1.91",
"soundfile",
"sphinx-copybutton",
"sphinx-markdown-tables",
"sphinx-rtd-theme==0.4.3", # sphinx-rtd-theme==0.5.0 introduced big changes in the style.
"sphinx==3.2.1",
"starlette",
"tensorflow-cpu>=2.3",
"tensorflow>=2.3",
"timeout-decorator",
"tokenizers>=0.10.1,<0.11",
"torch>=1.0",
"torchaudio",
"tqdm>=4.27",
"unidic>=1.0.2",
"unidic_lite>=1.0.7",
"uvicorn",
]
# this is a lookup table with items like:
#
# tokenizers: "tokenizers==0.9.4"
# packaging: "packaging"
#
# some of the values are versioned whereas others aren't.
deps = {b: a for a, b in (re.findall(r"^(([^!=<>]+)(?:[!=<>].*)?$)", x)[0] for x in _deps)}
# since we save this data in src/transformers/dependency_versions_table.py it can be easily accessed from
# anywhere. If you need to quickly access the data from this table in a shell, you can do so easily with:
#
# python -c 'import sys; from transformers.dependency_versions_table import deps; \
# print(" ".join([ deps[x] for x in sys.argv[1:]]))' tokenizers datasets
#
# Just pass the desired package names to that script as it's shown with 2 packages above.
#
# If transformers is not yet installed and the work is done from the cloned repo remember to add `PYTHONPATH=src` to the script above
#
# You can then feed this for example to `pip`:
#
# pip install -U $(python -c 'import sys; from transformers.dependency_versions_table import deps; \
# print(" ".join([ deps[x] for x in sys.argv[1:]]))' tokenizers datasets)
#
def deps_list(*pkgs):
return [deps[pkg] for pkg in pkgs]
class DepsTableUpdateCommand(Command):
"""
A custom distutils command that updates the dependency table.
usage: python setup.py deps_table_update
"""
description = "build runtime dependency table"
user_options = [
# format: (long option, short option, description).
("dep-table-update", None, "updates src/transformers/dependency_versions_table.py"),
]
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
entries = "\n".join([f' "{k}": "{v}",' for k, v in deps.items()])
content = [
"# THIS FILE HAS BEEN AUTOGENERATED. To update:",
"# 1. modify the `_deps` dict in setup.py",
"# 2. run `make deps_table_update``",
"deps = {",
entries,
"}",
"",
]
target = "src/transformers/dependency_versions_table.py"
print(f"updating {target}")
with open(target, "w", encoding="utf-8", newline="\n") as f:
f.write("\n".join(content))
extras = {}
extras["ja"] = deps_list("fugashi", "ipadic", "unidic_lite", "unidic")
extras["sklearn"] = deps_list("scikit-learn")
extras["tf"] = deps_list("tensorflow", "onnxconverter-common", "keras2onnx")
extras["tf-cpu"] = deps_list("tensorflow-cpu", "onnxconverter-common", "keras2onnx")
extras["torch"] = deps_list("torch")
if os.name == "nt": # windows
extras["retrieval"] = deps_list("datasets") # faiss is not supported on windows
extras["flax"] = [] # jax is not supported on windows
else:
extras["retrieval"] = deps_list("faiss-cpu", "datasets")
extras["flax"] = deps_list("jax", "jaxlib", "flax")
extras["tokenizers"] = deps_list("tokenizers")
extras["onnxruntime"] = deps_list("onnxruntime", "onnxruntime-tools")
extras["modelcreation"] = deps_list("cookiecutter")
extras["serving"] = deps_list("pydantic", "uvicorn", "fastapi", "starlette")
extras["speech"] = deps_list("soundfile", "torchaudio")
extras["sentencepiece"] = deps_list("sentencepiece", "protobuf")
extras["testing"] = (
deps_list("pytest", "pytest-xdist", "timeout-decorator", "parameterized", "psutil", "datasets")
+ extras["retrieval"]
+ extras["modelcreation"]
)
extras["docs"] = deps_list("recommonmark", "sphinx", "sphinx-markdown-tables", "sphinx-rtd-theme", "sphinx-copybutton")
extras["quality"] = deps_list("black", "isort", "flake8")
extras["all"] = extras["tf"] + extras["torch"] + extras["flax"] + extras["sentencepiece"] + extras["tokenizers"]
extras["dev"] = (
extras["all"]
+ extras["testing"]
+ extras["quality"]
+ extras["ja"]
+ extras["docs"]
+ extras["sklearn"]
+ extras["modelcreation"]
)
extras["torchhub"] = deps_list(
"filelock",
"importlib_metadata",
"numpy",
"packaging",
"protobuf",
"regex",
"requests",
"sacremoses",
"sentencepiece",
"torch",
"tokenizers",
"tqdm",
)
# when modifying the following list, make sure to update src/transformers/dependency_versions_check.py
install_requires = [
deps["dataclasses"] + ";python_version<'3.7'", # dataclasses for Python versions that don't have it
deps["importlib_metadata"] + ";python_version<'3.8'", # importlib_metadata for Python versions that don't have it
deps["filelock"], # filesystem locks, e.g., to prevent parallel downloads
deps["numpy"],
deps["packaging"], # utilities from PyPA to e.g., compare versions
deps["regex"], # for OpenAI GPT
deps["requests"], # for downloading models over HTTPS
deps["sacremoses"], # for XLM
deps["tokenizers"],
deps["tqdm"], # progress bars in model download and training scripts
]
setup(
name="transformers",
version="4.4.0.dev0", # expected format is one of x.y.z.dev0, or x.y.z.rc1 or x.y.z (no to dashes, yes to dots)
author="Thomas Wolf, Lysandre Debut, Victor Sanh, Julien Chaumond, Sam Shleifer, Patrick von Platen, Sylvain Gugger, Google AI Language Team Authors, Open AI team Authors, Facebook AI Authors, Carnegie Mellon University Authors",
author_email="thomas@huggingface.co",
description="State-of-the-art Natural Language Processing for TensorFlow 2.0 and PyTorch",
long_description=open("README.md", "r", encoding="utf-8").read(),
long_description_content_type="text/markdown",
keywords="NLP deep learning transformer pytorch tensorflow BERT GPT GPT-2 google openai CMU",
license="Apache",
url="https://github.com/huggingface/transformers",
package_dir={"": "src"},
packages=find_packages("src"),
extras_require=extras,
entry_points={"console_scripts": ["transformers-cli=transformers.commands.transformers_cli:main"]},
python_requires=">=3.6.0",
install_requires=install_requires,
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"Intended Audience :: Education",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
],
cmdclass={"deps_table_update": DepsTableUpdateCommand},
)
| setup.py | 11,408 | A custom distutils command that updates the dependency table.
usage: python setup.py deps_table_update
Simple check list from AllenNLP repo: https://github.com/allenai/allennlp/blob/master/setup.py
To create the package for pypi.
1. Change the version in __init__.py, setup.py as well as docs/source/conf.py. Remove the master from the links in
the new models of the README:
(https://huggingface.co/transformers/master/model_doc/ -> https://huggingface.co/transformers/model_doc/)
then run `make fix-copies` to fix the index of the documentation.
2. Unpin specific versions from setup.py that use a git install.
2. Commit these changes with the message: "Release: VERSION"
3. Add a tag in git to mark the release: "git tag VERSION -m 'Adds tag VERSION for pypi' "
Push the tag to git: git push --tags origin master
4. Build both the sources and the wheel. Do not change anything in setup.py between
creating the wheel and the source distribution (obviously).
For the wheel, run: "python setup.py bdist_wheel" in the top level directory.
(this will build a wheel for the python version you use to build it).
For the sources, run: "python setup.py sdist"
You should now have a /dist directory with both .whl and .tar.gz source versions.
5. Check that everything looks correct by uploading the package to the pypi test server:
twine upload dist/* -r pypitest
(pypi suggest using twine as other methods upload files via plaintext.)
You may have to specify the repository url, use the following command then:
twine upload dist/* -r pypitest --repository-url=https://test.pypi.org/legacy/
Check that you can install it in a virtualenv by running:
pip install -i https://testpypi.python.org/pypi transformers
6. Upload the final version to actual pypi:
twine upload dist/* -r pypi
7. Copy the release notes from RELEASE.md to the tag in github once everything is looking hunky-dory.
8. Add the release version to docs/source/_static/js/custom.js and .circleci/deploy.sh
9. Update README.md to redirect to correct documentation.
10. Update the version in __init__.py, setup.py to the new version "-dev" and push to master.
Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Remove stale transformers.egg-info directory to avoid https://github.com/pypa/pip/issues/5466 IMPORTANT: 1. all dependencies should be listed here with their version requirements if any 2. once modified, run: `make deps_table_update` to update src/transformers/dependency_versions_table.py sphinx-rtd-theme==0.5.0 introduced big changes in the style. this is a lookup table with items like: tokenizers: "tokenizers==0.9.4" packaging: "packaging" some of the values are versioned whereas others aren't. since we save this data in src/transformers/dependency_versions_table.py it can be easily accessed from anywhere. If you need to quickly access the data from this table in a shell, you can do so easily with: python -c 'import sys; from transformers.dependency_versions_table import deps; \ print(" ".join([ deps[x] for x in sys.argv[1:]]))' tokenizers datasets Just pass the desired package names to that script as it's shown with 2 packages above. If transformers is not yet installed and the work is done from the cloned repo remember to add `PYTHONPATH=src` to the script above You can then feed this for example to `pip`: pip install -U $(python -c 'import sys; from transformers.dependency_versions_table import deps; \ print(" ".join([ deps[x] for x in sys.argv[1:]]))' tokenizers datasets) format: (long option, short option, description). windows faiss is not supported on windows jax is not supported on windows when modifying the following list, make sure to update src/transformers/dependency_versions_check.py dataclasses for Python versions that don't have it importlib_metadata for Python versions that don't have it filesystem locks, e.g., to prevent parallel downloads utilities from PyPA to e.g., compare versions for OpenAI GPT for downloading models over HTTPS for XLM progress bars in model download and training scripts expected format is one of x.y.z.dev0, or x.y.z.rc1 or x.y.z (no to dashes, yes to dots) | 4,691 | en | 0.798846 |
from django.db import models
# Create your models here.
class BaseView(models.Model):
title = models.CharField(max_length=256)
def __unicode__(self):
return self.title
class port1View(models.Model):
def __unicode__(self):
return self.title
class port2View(models.Model):
title = models.CharField(max_length=256)
def __unicode__(self):
return self.title
class port3View(models.Model):
title = models.CharField(max_length=256)
def __unicode__(self):
return self.title
class port4View(models.Model):
title = models.CharField(max_length=256)
def __unicode__(self):
return self.title
class port5View(models.Model):
title = models.CharField(max_length=256)
def __unicode__(self):
return self.title
class port6View(models.Model):
title = models.CharField(max_length=256)
def __unicode__(self):
return self.title
| mainsite/models.py | 849 | Create your models here. | 24 | en | 0.920486 |
# import the necessary packages
import sys
import cv2
import numpy as np
import pandas as pd
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from sklearn.preprocessing import LabelBinarizer
from sklearn.preprocessing import MinMaxScaler
from sklearn.model_selection import train_test_split
from tensorflow.keras.layers import BatchNormalization
from tensorflow.keras.layers import Conv2D
from tensorflow.keras.layers import Conv2DTranspose
from tensorflow.keras.layers import LeakyReLU
from tensorflow.keras.layers import Activation
from tensorflow.keras.layers import Flatten
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import Reshape
from tensorflow.keras.optimizers import Adam
from tensorflow.keras import Input
from tensorflow.keras import Model
from tensorflow.keras import backend as K
from tensorflow.keras.models import load_model
from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping
class CNNProcessData:
def __init__(self):
pass
def get_imagedatagenerator(self):
datagen = ImageDataGenerator(
featurewise_center=True,
featurewise_std_normalization=True,
#rotation_range=20,
#width_shift_range=0.05,
#height_shift_range=0.05,
#horizontal_flip=True,
# vertical_flip=True,
#brightness_range=[0.8,1.2]
)
return datagen
def generate_croppings(self, testX, testY, image_size, number):
if number != 11:
raise Exception("Only implemented for number = 11 right now")
augmented_testX_1 = []
augmented_testX_2 = []
augmented_testX_3 = []
augmented_testX_4 = []
augmented_testX_5 = []
augmented_testX_6 = []
augmented_testX_7 = []
augmented_testX_8 = []
augmented_testX_9 = []
augmented_testX_10 = []
augmented_testX_11 = []
mid_image_size = int(round(image_size/2))
for img in testX:
height = img.shape[0]
small_height = int(round(height*0.1))
mid_height = int(round(height/2))
width = img.shape[1]
mid_width = int(round(width/2))
crop_img1 = img[height-image_size:height, 0:image_size]
crop_img2 = img[height-image_size:height, width-image_size:width]
crop_img3 = img[0:image_size, width-image_size:width]
crop_img4 = img[0:image_size, 0:image_size]
crop_img5 = img[mid_height-mid_image_size:mid_height+mid_image_size, mid_width-mid_image_size:mid_width+mid_image_size]
crop_img6 = img[mid_height-mid_image_size:mid_height+mid_image_size, 0:image_size]
crop_img7 = img[mid_height-mid_image_size:mid_height+mid_image_size, width-image_size:width]
crop_img8 = img[mid_height+small_height-mid_image_size:mid_height+small_height+mid_image_size, 0:image_size]
crop_img9 = img[mid_height+small_height-mid_image_size:mid_height+small_height+mid_image_size, width-image_size:width]
crop_img10 = img[mid_height-small_height-mid_image_size:mid_height-small_height+mid_image_size, 0:image_size]
crop_img11 = img[mid_height-small_height-mid_image_size:mid_height-small_height+mid_image_size, width-image_size:width]
augmented_testX_1.append(crop_img1)
augmented_testX_2.append(crop_img2)
augmented_testX_3.append(crop_img3)
augmented_testX_4.append(crop_img4)
augmented_testX_5.append(crop_img5)
augmented_testX_6.append(crop_img6)
augmented_testX_7.append(crop_img7)
augmented_testX_8.append(crop_img8)
augmented_testX_9.append(crop_img9)
augmented_testX_10.append(crop_img10)
augmented_testX_11.append(crop_img11)
augmented_testX_1 = np.array(augmented_testX_1)
augmented_testX_2 = np.array(augmented_testX_2)
augmented_testX_3 = np.array(augmented_testX_3)
augmented_testX_4 = np.array(augmented_testX_4)
augmented_testX_5 = np.array(augmented_testX_5)
augmented_testX_6 = np.array(augmented_testX_6)
augmented_testX_7 = np.array(augmented_testX_7)
augmented_testX_8 = np.array(augmented_testX_8)
augmented_testX_9 = np.array(augmented_testX_9)
augmented_testX_10 = np.array(augmented_testX_10)
augmented_testX_11 = np.array(augmented_testX_11)
testX = np.concatenate((augmented_testX_1, augmented_testX_2, augmented_testX_3, augmented_testX_4, augmented_testX_5, augmented_testX_6, augmented_testX_7, augmented_testX_8, augmented_testX_9, augmented_testX_10, augmented_testX_11))
# testXflipped = []
# for img in testX:
# horizontal_flip = cv2.flip( img, 0 )
# testXflipped.append(horizontal_flip)
# testXflipped = np.array(testXflipped)
# testX = np.concatenate((testX, testXflipped))
testY = np.repeat(testY, number)
return (testX, testY)
def create_montages(self, images, montage_image_number, image_size, full_montage_image_size):
output = []
if montage_image_number == 4:
data = images.reshape(int(len(images)/montage_image_number), montage_image_number, image_size, image_size, 3)
for iter in range(len(data)):
img_set = data[iter]
outputImage = np.zeros((full_montage_image_size, full_montage_image_size, 3))
outputImage[0:image_size, 0:image_size, :] = img_set[0]
outputImage[0:image_size, image_size:2*image_size, :] = img_set[1]
outputImage[image_size:2*image_size, 0:image_size, :] = img_set[2]
outputImage[image_size:2*image_size, image_size:2*image_size, :] = img_set[3]
# cv2.imshow("Result", outputImage)
# cv2.waitKey(0)
# raise Exception('Exit')
output.append(outputImage)
else:
raise Exception('Only implemented to montage 4 images into one image')
return np.array(output)
def process_cnn_data(self, images, aux_data, num_unique_stock_ids, num_unique_image_types, num_unique_time_days, image_size, keras_model_type, data_augmentation, data_augmentation_test, montage_image_number, full_montage_image_size, output_autoencoder_model_file_path, log_file_path):
if log_file_path is not None:
sys.stderr = open(log_file_path, 'a')
def eprint(*args, **kwargs):
print(*args, file=sys.stderr, **kwargs)
trainX = []
testX = []
trainY = []
testY = []
datagen = self.get_imagedatagenerator()
datagen.fit(images)
images = datagen.standardize(images)
aux_data["value"] = aux_data["value"].astype(float)
output_image_file = aux_data["output_image_file"].tolist()
# LSTM models group images by time, but are still ties to a single label e.g. X, Y = [img_t1, img_t2, img_t3], y1.
if keras_model_type == 'densenet121_lstm_imagenet':
images = images.reshape(num_unique_stock_ids * num_unique_image_types, num_unique_time_days, input_image_size, input_image_size, 3)
(train_aux_data, test_aux_data, train_images, test_images) = train_test_split(aux_data, images, test_size=0.2)
trainX_length = len(train_images)
testX_length = len(test_images)
train_images = train_images.reshape(trainX_length * num_unique_time_days, input_image_size, input_image_size, 3)
test_images = test_images.reshape(testX_length * num_unique_time_days, input_image_size, input_image_size, 3)
trainX_length_flat = len(train_images)
test_images = datagen.standardize(test_images)
# (testX, testY) = self.generate_croppings(testX, testY, image_size, data_augmentation_test)
testX_resized = []
for img in test_images:
testX_resized.append(cv2.resize(img, (image_size, image_size)))
test_images = np.array(testX_resized)
test_images = test_images.reshape(data_augmentation_test * testX_length, num_unique_time_days, image_size, image_size, 3)
# trainX_aug = []
# trainY_aug = []
# augmented = datagen.flow(train_images, train_aux_data, batch_size=trainX_length_flat)
# for i in range(0, data_augmentation):
# X, y = augmented.next()
# if len(trainX_aug) == 0:
# trainX_aug = X
# trainY_aug = y
# else:
# trainX_aug = np.concatenate((trainX_aug, X))
# trainY_aug = np.concatenate((trainY_aug, y))
#
# trainX = trainX_aug
# trainY = trainY_aug
trainX_resized = []
for img in train_images:
trainX_resized.append(cv2.resize(img, (image_size, image_size)))
train_images = np.array(trainX_resized)
train_images = train_images.reshape(data_augmentation * trainX_length, num_unique_time_days, image_size, image_size, 3)
else:
images = self.create_montages(images, montage_image_number, image_size, full_montage_image_size)
(encoder, decoder, autoencoder) = self.build_autoencoder(full_montage_image_size, full_montage_image_size, 3)
opt = Adam(lr=1e-3)
autoencoder.compile(loss="mse", optimizer=opt)
(train_aux_data, test_aux_data, train_images, test_images) = train_test_split(aux_data, images, test_size=0.2)
checkpoint = ModelCheckpoint(filepath=output_autoencoder_model_file_path, monitor='loss', verbose=1, save_best_only=True, mode='min', save_frequency=1, save_weights_only=False)
callbacks_list = [checkpoint]
# train the convolutional autoencoder
H = autoencoder.fit(
train_images, train_images,
validation_data=(test_images, test_images),
epochs=25,
batch_size=32,
callbacks=callbacks_list
)
decoded = autoencoder.predict(images)
output_image_counter = 0
for image in decoded:
cv2.imwrite(output_image_file[output_image_counter], image*255)
output_image_counter += 1
(train_aux_data, test_aux_data, train_images, test_images) = train_test_split(aux_data, decoded, test_size=0.2)
# testY_length = len(testY)
# (testX, testY) = self.generate_croppings(testX, testY, image_size, data_augmentation_test)
# testY = testY.reshape(data_augmentation_test * testY_length, 1)
# augmented = datagen.flow(trainX, trainY, batch_size=len(trainX))
# for i in range(0, data_augmentation):
# X, y = augmented.next()
stock_id_binarizer = LabelBinarizer().fit(aux_data["stock_id"])
train_stock_id_categorical = stock_id_binarizer.transform(train_aux_data["stock_id"])
test_stock_id_categorical = stock_id_binarizer.transform(test_aux_data["stock_id"])
accession_id_binarizer = LabelBinarizer().fit(aux_data["accession_id"])
train_accession_id_categorical = accession_id_binarizer.transform(train_aux_data["accession_id"])
test_accession_id_categorical = accession_id_binarizer.transform(test_aux_data["accession_id"])
female_id_binarizer = LabelBinarizer().fit(aux_data["female_id"])
train_female_id_categorical = female_id_binarizer.transform(train_aux_data["female_id"])
test_female_id_categorical = female_id_binarizer.transform(test_aux_data["female_id"])
male_id_binarizer = LabelBinarizer().fit(aux_data["male_id"])
train_male_id_categorical = male_id_binarizer.transform(train_aux_data["male_id"])
test_male_id_categorical = male_id_binarizer.transform(test_aux_data["male_id"])
continuous = [col for col in aux_data.columns if 'aux_trait_' in col]
cs = MinMaxScaler()
if len(continuous) > 0:
trainContinuous = cs.fit_transform(train_aux_data[continuous])
testContinuous = cs.transform(test_aux_data[continuous])
#trainX = np.hstack((train_stock_id_categorical, train_accession_id_categorical, train_female_id_categorical, train_male_id_categorical, trainContinuous))
#testX = np.hstack((test_stock_id_categorical, test_accession_id_categorical, test_female_id_categorical, test_male_id_categorical, testContinuous))
trainX = trainContinuous
testX = testContinuous
else:
trainX = []
testX = []
trainx = np.array(trainX)
testx = np.array(testX)
max_label = aux_data["value"].max()
trainY = train_aux_data["value"]/max_label
testY = test_aux_data["value"]/max_label
train_genotype_files = train_aux_data["genotype_file"].tolist()
test_genotype_files = test_aux_data["genotype_file"].tolist()
train_genotype_data = []
for f in train_genotype_files:
if log_file_path is not None:
eprint(f)
else:
print(f)
if pd.isna(f) is False:
geno_data = pd.read_csv(f, sep="\t", header=None, na_values="NA")
train_genotype_data.append(np.array(geno_data.iloc[:,0]))
test_genotype_data = []
for f in test_genotype_files:
if log_file_path is not None:
eprint(f)
else:
print(f)
if pd.isna(f) is False:
geno_data = pd.read_csv(f, sep="\t", header=None, na_values="NA")
test_genotype_data.append(np.array(geno_data.iloc[:,0]))
train_genotype_data = np.array(train_genotype_data)
test_genotype_data = np.array(test_genotype_data)
eprint(train_genotype_data)
eprint(testX)
eprint(trainX)
return (test_images, np.array(testX), testY.to_numpy(), test_genotype_data, train_images, np.array(trainX), trainY.to_numpy(), train_genotype_data)
def process_cnn_data_predictions(self, data, aux_data, num_unique_stock_ids, num_unique_image_types, num_unique_time_days, image_size, keras_model_type, input_autoencoder_model_file_path, training_data, data_augmentation_test, montage_image_number, full_montage_image_size):
trainX = []
testX = []
trainY = []
testY = []
datagen = self.get_imagedatagenerator()
datagen.fit(training_data)
data = datagen.standardize(data)
output_image_file = aux_data["output_image_file"].tolist()
data = self.create_montages(data, montage_image_number, image_size, full_montage_image_size)
autoencoder_model = load_model(input_autoencoder_model_file_path)
data = autoencoder_model.predict(data)
#ret = self.generate_croppings(data, None, image_size, data_augmentation_test)
#augmented_data = ret[0]
# LSTM models group images by time, but are still ties to a single label e.g. X, Y = [img_t1, img_t2, img_t3], y1.
if keras_model_type == 'KerasCNNLSTMDenseNet121ImageNetWeights':
data = data.reshape(data_augmentation_test * num_unique_stock_ids * num_unique_image_types, num_unique_time_days, image_size, image_size, 3)
output_image_counter = 0
for image in data:
cv2.imwrite(output_image_file[output_image_counter], image*255)
output_image_counter += 1
stock_id_binarizer = LabelBinarizer().fit(aux_data["stock_id"])
stock_id_categorical = stock_id_binarizer.transform(aux_data["stock_id"])
accession_id_binarizer = LabelBinarizer().fit(aux_data["accession_id"])
accession_id_categorical = accession_id_binarizer.transform(aux_data["accession_id"])
female_id_binarizer = LabelBinarizer().fit(aux_data["female_id"])
female_id_categorical = female_id_binarizer.transform(aux_data["female_id"])
male_id_binarizer = LabelBinarizer().fit(aux_data["male_id"])
male_id_categorical = male_id_binarizer.transform(aux_data["male_id"])
continuous = [col for col in aux_data.columns if 'aux_trait_' in col]
cs = MinMaxScaler()
if len(continuous) > 0:
fitContinuous = cs.fit_transform(aux_data[continuous])
# fitX = np.hstack([stock_id_categorical, accession_id_categorical, female_id_categorical, male_id_categorical, fitContinuous])
fitX = fitContinuous
else:
# fitX = np.hstack([stock_id_categorical, accession_id_categorical, female_id_categorical, male_id_categorical])
fitX = []
fitX = np.array(fitX)
max_label = aux_data["value"].max()
fitY = aux_data["value"]/max_label
genotype_files = aux_data["genotype_file"].tolist()
genotype_data = []
for f in genotype_files:
if pd.isna(f) is False:
geno_data = pd.read_csv(f, sep="\t", header=None, na_values="NA")
genotype_data.append(np.array(geno_data.iloc[:,0]))
genotype_data = np.array(genotype_data)
return (data, fitX, genotype_data, fitY.to_numpy())
def build_autoencoder(self, width, height, depth, filters=(32, 64), latentDim=16):
inputShape = (height, width, depth)
chanDim = -1
# define the input to the encoder
inputs = Input(shape=inputShape)
x = inputs
# loop over the number of filters
for f in filters:
# apply a CONV => RELU => BN operation
x = Conv2D(f, (3, 3), strides=2, padding="same")(x)
x = LeakyReLU(alpha=0.2)(x)
x = BatchNormalization(axis=chanDim)(x)
# flatten the network and then construct our latent vector
volumeSize = K.int_shape(x)
x = Flatten()(x)
latent = Dense(latentDim)(x)
# build the encoder model
encoder = Model(inputs, latent, name="encoder")
# start building the decoder model which will accept the
# output of the encoder as its inputs
latentInputs = Input(shape=(latentDim,))
x = Dense(np.prod(volumeSize[1:]))(latentInputs)
x = Reshape((volumeSize[1], volumeSize[2], volumeSize[3]))(x)
# loop over our number of filters again, but this time in
# reverse order
for f in filters[::-1]:
# apply a CONV_TRANSPOSE => RELU => BN operation
x = Conv2DTranspose(f, (3, 3), strides=2, padding="same")(x)
x = LeakyReLU(alpha=0.2)(x)
x = BatchNormalization(axis=chanDim)(x)
# apply a single CONV_TRANSPOSE layer used to recover the
# original depth of the image
x = Conv2DTranspose(depth, (3, 3), padding="same")(x)
outputs = Activation("sigmoid")(x)
# build the decoder model
decoder = Model(latentInputs, outputs, name="decoder")
# our autoencoder is the encoder + decoder
autoencoder = Model(inputs, decoder(encoder(inputs)), name="autoencoder")
# return a 3-tuple of the encoder, decoder, and autoencoder
return (encoder, decoder, autoencoder)
| CNN/CNNProcessData.py | 19,386 | import the necessary packagesrotation_range=20,width_shift_range=0.05,height_shift_range=0.05,horizontal_flip=True, vertical_flip=True,brightness_range=[0.8,1.2] testXflipped = [] for img in testX: horizontal_flip = cv2.flip( img, 0 ) testXflipped.append(horizontal_flip) testXflipped = np.array(testXflipped) testX = np.concatenate((testX, testXflipped)) cv2.imshow("Result", outputImage) cv2.waitKey(0) raise Exception('Exit') LSTM models group images by time, but are still ties to a single label e.g. X, Y = [img_t1, img_t2, img_t3], y1. (testX, testY) = self.generate_croppings(testX, testY, image_size, data_augmentation_test) trainX_aug = [] trainY_aug = [] augmented = datagen.flow(train_images, train_aux_data, batch_size=trainX_length_flat) for i in range(0, data_augmentation): X, y = augmented.next() if len(trainX_aug) == 0: trainX_aug = X trainY_aug = y else: trainX_aug = np.concatenate((trainX_aug, X)) trainY_aug = np.concatenate((trainY_aug, y)) trainX = trainX_aug trainY = trainY_aug train the convolutional autoencoder testY_length = len(testY) (testX, testY) = self.generate_croppings(testX, testY, image_size, data_augmentation_test) testY = testY.reshape(data_augmentation_test * testY_length, 1) augmented = datagen.flow(trainX, trainY, batch_size=len(trainX)) for i in range(0, data_augmentation): X, y = augmented.next()trainX = np.hstack((train_stock_id_categorical, train_accession_id_categorical, train_female_id_categorical, train_male_id_categorical, trainContinuous))testX = np.hstack((test_stock_id_categorical, test_accession_id_categorical, test_female_id_categorical, test_male_id_categorical, testContinuous))ret = self.generate_croppings(data, None, image_size, data_augmentation_test)augmented_data = ret[0] LSTM models group images by time, but are still ties to a single label e.g. X, Y = [img_t1, img_t2, img_t3], y1. fitX = np.hstack([stock_id_categorical, accession_id_categorical, female_id_categorical, male_id_categorical, fitContinuous]) fitX = np.hstack([stock_id_categorical, accession_id_categorical, female_id_categorical, male_id_categorical]) define the input to the encoder loop over the number of filters apply a CONV => RELU => BN operation flatten the network and then construct our latent vector build the encoder model start building the decoder model which will accept the output of the encoder as its inputs loop over our number of filters again, but this time in reverse order apply a CONV_TRANSPOSE => RELU => BN operation apply a single CONV_TRANSPOSE layer used to recover the original depth of the image build the decoder model our autoencoder is the encoder + decoder return a 3-tuple of the encoder, decoder, and autoencoder | 2,751 | en | 0.473604 |
from numbers import Number
import yaml
from .color_tools import hex2rgb
def __default_grid__(ax):
"""This is a temporary function"""
ax.grid(b=True, which='major', color='#000000', alpha=0.2, linestyle='-', linewidth=0.5)
ax.grid(b=True, which='minor', color='#000000', alpha=0.1, linestyle='-', linewidth=0.25)
ax.minorticks_on() # Enables minor ticks without text, only the ticks.
class FigStyle:
def __init__(self, config_file):
self.__width = None
self.__ratio = None
self.__hspace = None
self.__colors = [None]
self.__linestyles = [None]
self.__markers = [None]
self.__grid = __default_grid__
self.__main_color = None
self.read_config_file(config_file) # This is what actually initializes the values.
@property
def colors(self):
return self.__colors
@property
def width(self):
return self.__width
@property
def ratio(self):
return self.__ratio
@property
def hspace(self):
return self.__hspace
@property
def grid(self):
return self.__grid
@property
def linestyles(self):
return self.__linestyles
@property
def markers(self):
return self.__markers
@property
def main_color(self):
return self.__main_color
def read_config_file(self, filename):
if not isinstance(filename, str):
raise ValueError('"file_name" must be a string')
with open(filename, 'r') as stream:
try:
data = yaml.load(stream)
except yaml.YAMLError as exc:
print(exc)
if 'width' not in data:
raise ValueError('The "figstyle" file must have a "width" field')
self.__width = float(data['width'])
if 'ratio' not in data:
raise ValueError('The "figstyle" file must have a "ratio" field')
if isinstance(data['ratio'], list) and len(data['ratio']) == 2 and isinstance(data['ratio'][0], Number) and isinstance(data['ratio'][1], Number):
self.__ratio = data['ratio']
else:
raise ValueError('Error reading "' + filename + '": ratio must be a list of two numbers [x_ratio, y_ratio]')
if 'hspace' not in data:
raise ValueError('The "figstyle" file must have a "hspace" field')
self.__hspace = float(data['hspace'])
if isinstance(data['colors'], list):
self.__colors = [None]*len(data['colors'])
for k in range(len(data['colors'])):
self.__colors[k] = hex2rgb(data['colors'][k])
if 'linestyles' in data:
if isinstance(data['linestyles'], list):
self.__linestyles = data['linestyles']
if 'markers' in data:
if isinstance(data['markers'], list):
self.__markers = data['markers']
if 'main_color' in data:
if isinstance(data['main_color'], str):
self.__main_color = hex2rgb(data['main_color'])
| nicenquickplotlib/config_types.py | 2,622 | This is a temporary function
Enables minor ticks without text, only the ticks. This is what actually initializes the values. | 126 | en | 0.766983 |
from machine import Pin, Map, PWM # include Pin, Map and PWM functions from machine module
import time # include time module
# create PWM on WIO BUZZER with 2000Hz frequency and 250 duty cycle
BUZZER = PWM(Pin(Map.WIO_BUZZER), freq=1000, duty=250)
| Classroom 4/Buzzer_PWM.py | 259 | include Pin, Map and PWM functions from machine module include time module create PWM on WIO BUZZER with 2000Hz frequency and 250 duty cycle | 142 | en | 0.812334 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('cms', '__first__'),
]
operations = [
migrations.CreateModel(
name='GoogleMap',
fields=[
('cmsplugin_ptr', models.OneToOneField(serialize=False, parent_link=True, auto_created=True, to='cms.CMSPlugin', primary_key=True)),
('title', models.CharField(verbose_name='map title', blank=True, null=True, max_length=100)),
('address', models.CharField(verbose_name='address', max_length=150)),
('zipcode', models.CharField(verbose_name='zip code', max_length=30)),
('city', models.CharField(verbose_name='city', max_length=100)),
('content', models.CharField(help_text='Displayed under address in the bubble.', blank=True, max_length=255, verbose_name='additional content')),
('zoom', models.PositiveSmallIntegerField(verbose_name='zoom level', default=13, choices=[(0, '0'), (1, '1'), (2, '2'), (3, '3'), (4, '4'), (5, '5'), (6, '6'), (7, '7'), (8, '8'), (9, '9'), (10, '10'), (11, '11'), (12, '12'), (13, '13'), (14, '14'), (15, '15'), (16, '16'), (17, '17'), (18, '18'), (19, '19'), (20, '20'), (21, '21')])),
('lat', models.DecimalField(help_text='Use latitude & longitude to fine tune the map position.', blank=True, max_digits=10, verbose_name='latitude', null=True, decimal_places=6)),
('lng', models.DecimalField(max_digits=10, verbose_name='longitude', blank=True, null=True, decimal_places=6)),
('route_planer_title', models.CharField(verbose_name='route planer title', blank=True, null=True, max_length=150, default='Calculate your fastest way to here')),
('route_planer', models.BooleanField(verbose_name='route planer', default=False)),
('width', models.CharField(help_text='Plugin width (in pixels or percent).', default='100%', max_length=6, verbose_name='width')),
('height', models.CharField(help_text='Plugin height (in pixels).', default='400px', max_length=6, verbose_name='height')),
('info_window', models.BooleanField(help_text='Show textbox over marker', default=True, verbose_name='info window')),
('scrollwheel', models.BooleanField(help_text='Enable scrollwheel zooming on the map', default=True, verbose_name='scrollwheel')),
('double_click_zoom', models.BooleanField(verbose_name='double click zoom', default=True)),
('draggable', models.BooleanField(verbose_name='draggable', default=True)),
('keyboard_shortcuts', models.BooleanField(verbose_name='keyboard shortcuts', default=True)),
('pan_control', models.BooleanField(verbose_name='Pan control', default=True)),
('zoom_control', models.BooleanField(verbose_name='zoom control', default=True)),
('street_view_control', models.BooleanField(verbose_name='Street View control', default=True)),
],
options={
'abstract': False,
},
bases=('cms.cmsplugin',),
),
]
| env/lib/python2.7/site-packages/djangocms_googlemap/migrations_django/0001_initial.py | 3,264 | -*- coding: utf-8 -*- | 21 | en | 0.767281 |
import os.path
import IMLearn.learners.regressors.linear_regression
from IMLearn.learners.regressors import PolynomialFitting
from IMLearn.utils import split_train_test
import numpy as np
import pandas as pd
import plotly.express as px
import plotly.io as pio
pio.templates.default = "simple_white"
from IMLearn.metrics.loss_functions import mean_square_error
CITY_TEMPERATURE_DATA_PATH = os.path.join(os.path.curdir, "..", "datasets", "City_Temperature.csv")
def load_data(filename: str) -> pd.DataFrame:
"""
Load city daily temperature dataset and preprocess data.
Parameters
----------
filename: str
Path to house prices dataset
Returns
-------
Design matrix and response vector (Temp)
"""
data = pd.read_csv(filename, parse_dates=["Date"]).drop_duplicates()
data = data.drop(data[data["Temp"] < -70].index) # invalid Temp
data["DayOfYear"] = data['Date'].dt.dayofyear
return data
def question_2(data):
""" Exploring data specifically in Israel """
data = data.copy()
data = data[data["Country"] == "Israel"]
data["Year"] = data["Year"].astype(str)
fig = px.scatter(data, x="DayOfYear", y="Temp", color="Year", width=1500, height=700,
labels={"DayOfYear": "Day of Year", "Temp": "Temperature"},
title="Q2(1) The relation between the day in the year and the temperature in Israel")
fig.update_xaxes(range=[0, 365], tick0=0, dtick=20)
fig.show()
std_by_month = data.groupby("Month").std().reset_index()
fig = px.bar(std_by_month, x="Month", y="Temp", width=1500, height=700,
labels={"Temp": "Std of the daily temperatures"},
title="Q2(2) The Standard Deviation of the Daily Temperatures Per Month in Israel")
fig.data[-1].text = np.round(std_by_month["Temp"], 3)
fig.update_xaxes(tick0=1, dtick=1)
fig.update_traces(textposition='outside')
fig.show()
def question_3(data):
""" Exploring differences between countries"""
agg_data_mean = data.groupby(["Country", "Month"]).mean().reset_index()
agg_data_std = data.groupby(["Country", "Month"]).std().reset_index()
fig = px.line(agg_data_mean, x="Month", y="Temp", color="Country", error_y=agg_data_std["Temp"],
width=1500, height=700, labels={"Temp": "Averaged Temperature"},
title="Q3 The Average Monthly Temperatures in Different Countries")
fig.update_xaxes(tick0=1, dtick=1)
fig.show()
def question_4(data):
""" Fitting model for different values of `k` """
data = data[data["Country"] == "Israel"]
train_X, train_y, test_X, test_y = split_train_test(data["DayOfYear"], data["Temp"])
losses = np.array([])
for k in range(1, 11):
poly_fit = PolynomialFitting(k)
poly_fit.fit(train_X.to_numpy(), train_y.to_numpy())
loss = poly_fit.loss(test_X.to_numpy(), test_y.to_numpy())
losses = np.append(losses, round(loss, 2))
print(k, loss)
fig = px.bar(x=range(1, 11), y=losses, width=1500, height=700,
labels={"x": "Polynomials Degrees (k)", "y": "Test Error (MSE)"},
title="Q4 Test Errors for Different Polynomials Degrees (k)")
fig.data[-1].text = losses
fig.update_xaxes(tick0=1, dtick=1)
fig.update_traces(textposition="outside")
fig.show()
def question_5(data):
""" Evaluating fitted model on different countries """
data_israel = data[data["Country"] == "Israel"]
poly_fit = PolynomialFitting(k=5)
poly_fit.fit(data_israel["DayOfYear"], data_israel["Temp"])
other_countries = ["Jordan", "South Africa", "The Netherlands"]
losses = np.array([])
for country in other_countries:
country_data = data[data["Country"] == country]
loss = poly_fit.loss(country_data["DayOfYear"], country_data["Temp"])
losses = np.append(losses, loss)
fig = px.bar(x=np.array(other_countries), y=losses, width=700, height=700,
labels={"x": "Country", "y": "Losses (MSE)"}, title="Q5 Losses (MSE) per Country With k=5")
fig.data[-1].text = np.round(losses, 3)
fig.update_traces(textposition="outside")
fig.show()
if __name__ == '__main__':
np.random.seed(0)
# Question 1 - Load and preprocessing of city temperature dataset
data = load_data(CITY_TEMPERATURE_DATA_PATH)
# Question 2 - Exploring data for specific country
question_2(data)
# Question 3 - Exploring differences between countries
question_3(data)
# Question 4 - Fitting model for different values of `k`
question_4(data)
# Question 5 - Evaluating fitted model on different countries
question_5(data)
| exercises/city_temperature_prediction.py | 4,714 | Load city daily temperature dataset and preprocess data.
Parameters
----------
filename: str
Path to house prices dataset
Returns
-------
Design matrix and response vector (Temp)
Exploring data specifically in Israel
Exploring differences between countries
Fitting model for different values of `k`
Evaluating fitted model on different countries
invalid Temp Question 1 - Load and preprocessing of city temperature dataset Question 2 - Exploring data for specific country Question 3 - Exploring differences between countries Question 4 - Fitting model for different values of `k` Question 5 - Evaluating fitted model on different countries | 649 | en | 0.777614 |
from tests.testmodels import Event, IntFields, MinRelation, Node, Reporter, Team, Tournament, Tree
from tortoise import Tortoise
from tortoise.contrib import test
from tortoise.exceptions import (
DoesNotExist,
FieldError,
IntegrityError,
MultipleObjectsReturned,
ParamsError,
)
from tortoise.expressions import F, RawSQL, Subquery
# TODO: Test the many exceptions in QuerySet
# TODO: .filter(intnum_null=None) does not work as expected
class TestQueryset(test.TestCase):
async def asyncSetUp(self):
await super().asyncSetUp()
# Build large dataset
self.intfields = [await IntFields.create(intnum=val) for val in range(10, 100, 3)]
self.db = Tortoise.get_connection("models")
async def test_all_count(self):
self.assertEqual(await IntFields.all().count(), 30)
self.assertEqual(await IntFields.filter(intnum_null=80).count(), 0)
async def test_exists(self):
ret = await IntFields.filter(intnum=0).exists()
self.assertFalse(ret)
ret = await IntFields.filter(intnum=10).exists()
self.assertTrue(ret)
ret = await IntFields.filter(intnum__gt=10).exists()
self.assertTrue(ret)
ret = await IntFields.filter(intnum__lt=10).exists()
self.assertFalse(ret)
async def test_limit_count(self):
self.assertEqual(await IntFields.all().limit(10).count(), 10)
async def test_limit_negative(self):
with self.assertRaisesRegex(ParamsError, "Limit should be non-negative number"):
await IntFields.all().limit(-10)
async def test_offset_count(self):
self.assertEqual(await IntFields.all().offset(10).count(), 20)
async def test_offset_negative(self):
with self.assertRaisesRegex(ParamsError, "Offset should be non-negative number"):
await IntFields.all().offset(-10)
async def test_join_count(self):
tour = await Tournament.create(name="moo")
await MinRelation.create(tournament=tour)
self.assertEqual(await MinRelation.all().count(), 1)
self.assertEqual(await MinRelation.filter(tournament__id=tour.id).count(), 1)
async def test_modify_dataset(self):
# Modify dataset
rows_affected = await IntFields.filter(intnum__gte=70).update(intnum_null=80)
self.assertEqual(rows_affected, 10)
self.assertEqual(await IntFields.filter(intnum_null=80).count(), 10)
self.assertEqual(await IntFields.filter(intnum_null__isnull=True).count(), 20)
await IntFields.filter(intnum_null__isnull=True).update(intnum_null=-1)
self.assertEqual(await IntFields.filter(intnum_null=None).count(), 0)
self.assertEqual(await IntFields.filter(intnum_null=-1).count(), 20)
async def test_distinct(self):
# Test distinct
await IntFields.filter(intnum__gte=70).update(intnum_null=80)
await IntFields.filter(intnum_null__isnull=True).update(intnum_null=-1)
self.assertEqual(
await IntFields.all()
.order_by("intnum_null")
.distinct()
.values_list("intnum_null", flat=True),
[-1, 80],
)
self.assertEqual(
await IntFields.all().order_by("intnum_null").distinct().values("intnum_null"),
[{"intnum_null": -1}, {"intnum_null": 80}],
)
async def test_limit_offset_values_list(self):
# Test limit/offset/ordering values_list
self.assertEqual(
await IntFields.all().order_by("intnum").limit(10).values_list("intnum", flat=True),
[10, 13, 16, 19, 22, 25, 28, 31, 34, 37],
)
self.assertEqual(
await IntFields.all()
.order_by("intnum")
.limit(10)
.offset(10)
.values_list("intnum", flat=True),
[40, 43, 46, 49, 52, 55, 58, 61, 64, 67],
)
self.assertEqual(
await IntFields.all()
.order_by("intnum")
.limit(10)
.offset(20)
.values_list("intnum", flat=True),
[70, 73, 76, 79, 82, 85, 88, 91, 94, 97],
)
self.assertEqual(
await IntFields.all()
.order_by("intnum")
.limit(10)
.offset(30)
.values_list("intnum", flat=True),
[],
)
self.assertEqual(
await IntFields.all().order_by("-intnum").limit(10).values_list("intnum", flat=True),
[97, 94, 91, 88, 85, 82, 79, 76, 73, 70],
)
self.assertEqual(
await IntFields.all()
.order_by("intnum")
.limit(10)
.filter(intnum__gte=40)
.values_list("intnum", flat=True),
[40, 43, 46, 49, 52, 55, 58, 61, 64, 67],
)
async def test_limit_offset_values(self):
# Test limit/offset/ordering values
self.assertEqual(
await IntFields.all().order_by("intnum").limit(5).values("intnum"),
[{"intnum": 10}, {"intnum": 13}, {"intnum": 16}, {"intnum": 19}, {"intnum": 22}],
)
self.assertEqual(
await IntFields.all().order_by("intnum").limit(5).offset(10).values("intnum"),
[{"intnum": 40}, {"intnum": 43}, {"intnum": 46}, {"intnum": 49}, {"intnum": 52}],
)
self.assertEqual(
await IntFields.all().order_by("intnum").limit(5).offset(30).values("intnum"), []
)
self.assertEqual(
await IntFields.all().order_by("-intnum").limit(5).values("intnum"),
[{"intnum": 97}, {"intnum": 94}, {"intnum": 91}, {"intnum": 88}, {"intnum": 85}],
)
self.assertEqual(
await IntFields.all()
.order_by("intnum")
.limit(5)
.filter(intnum__gte=40)
.values("intnum"),
[{"intnum": 40}, {"intnum": 43}, {"intnum": 46}, {"intnum": 49}, {"intnum": 52}],
)
async def test_in_bulk(self):
id_list = [item.pk for item in await IntFields.all().only("id").limit(2)]
ret = await IntFields.in_bulk(id_list=id_list)
self.assertEqual(list(ret.keys()), id_list)
async def test_first(self):
# Test first
self.assertEqual(
(await IntFields.all().order_by("intnum").filter(intnum__gte=40).first()).intnum, 40
)
self.assertEqual(
(await IntFields.all().order_by("intnum").filter(intnum__gte=40).first().values())[
"intnum"
],
40,
)
self.assertEqual(
(await IntFields.all().order_by("intnum").filter(intnum__gte=40).first().values_list())[
1
],
40,
)
self.assertEqual(
await IntFields.all().order_by("intnum").filter(intnum__gte=400).first(), None
)
self.assertEqual(
await IntFields.all().order_by("intnum").filter(intnum__gte=400).first().values(), None
)
self.assertEqual(
await IntFields.all().order_by("intnum").filter(intnum__gte=400).first().values_list(),
None,
)
async def test_get_or_none(self):
self.assertEqual((await IntFields.all().get_or_none(intnum=40)).intnum, 40)
self.assertEqual((await IntFields.all().get_or_none(intnum=40).values())["intnum"], 40)
self.assertEqual((await IntFields.all().get_or_none(intnum=40).values_list())[1], 40)
self.assertEqual(
await IntFields.all().order_by("intnum").get_or_none(intnum__gte=400), None
)
self.assertEqual(
await IntFields.all().order_by("intnum").get_or_none(intnum__gte=400).values(), None
)
self.assertEqual(
await IntFields.all().order_by("intnum").get_or_none(intnum__gte=400).values_list(),
None,
)
with self.assertRaises(MultipleObjectsReturned):
await IntFields.all().order_by("intnum").get_or_none(intnum__gte=40)
with self.assertRaises(MultipleObjectsReturned):
await IntFields.all().order_by("intnum").get_or_none(intnum__gte=40).values()
with self.assertRaises(MultipleObjectsReturned):
await IntFields.all().order_by("intnum").get_or_none(intnum__gte=40).values_list()
async def test_get(self):
await IntFields.filter(intnum__gte=70).update(intnum_null=80)
# Test get
self.assertEqual((await IntFields.all().get(intnum=40)).intnum, 40)
self.assertEqual((await IntFields.all().get(intnum=40).values())["intnum"], 40)
self.assertEqual((await IntFields.all().get(intnum=40).values_list())[1], 40)
self.assertEqual((await IntFields.all().all().all().all().all().get(intnum=40)).intnum, 40)
self.assertEqual(
(await IntFields.all().all().all().all().all().get(intnum=40).values())["intnum"], 40
)
self.assertEqual(
(await IntFields.all().all().all().all().all().get(intnum=40).values_list())[1], 40
)
self.assertEqual((await IntFields.get(intnum=40)).intnum, 40)
self.assertEqual((await IntFields.get(intnum=40).values())["intnum"], 40)
self.assertEqual((await IntFields.get(intnum=40).values_list())[1], 40)
with self.assertRaises(DoesNotExist):
await IntFields.all().get(intnum=41)
with self.assertRaises(DoesNotExist):
await IntFields.all().get(intnum=41).values()
with self.assertRaises(DoesNotExist):
await IntFields.all().get(intnum=41).values_list()
with self.assertRaises(DoesNotExist):
await IntFields.get(intnum=41)
with self.assertRaises(DoesNotExist):
await IntFields.get(intnum=41).values()
with self.assertRaises(DoesNotExist):
await IntFields.get(intnum=41).values_list()
with self.assertRaises(MultipleObjectsReturned):
await IntFields.all().get(intnum_null=80)
with self.assertRaises(MultipleObjectsReturned):
await IntFields.all().get(intnum_null=80).values()
with self.assertRaises(MultipleObjectsReturned):
await IntFields.all().get(intnum_null=80).values_list()
with self.assertRaises(MultipleObjectsReturned):
await IntFields.get(intnum_null=80)
with self.assertRaises(MultipleObjectsReturned):
await IntFields.get(intnum_null=80).values()
with self.assertRaises(MultipleObjectsReturned):
await IntFields.get(intnum_null=80).values_list()
async def test_delete(self):
# Test delete
await (await IntFields.get(intnum=40)).delete()
with self.assertRaises(DoesNotExist):
await IntFields.get(intnum=40)
self.assertEqual(await IntFields.all().count(), 29)
rows_affected = (
await IntFields.all().order_by("intnum").limit(10).filter(intnum__gte=70).delete()
)
self.assertEqual(rows_affected, 10)
self.assertEqual(await IntFields.all().count(), 19)
@test.requireCapability(support_update_limit_order_by=True)
async def test_delete_limit(self):
await IntFields.all().limit(1).delete()
self.assertEqual(await IntFields.all().count(), 29)
@test.requireCapability(support_update_limit_order_by=True)
async def test_delete_limit_order_by(self):
await IntFields.all().limit(1).order_by("-id").delete()
self.assertEqual(await IntFields.all().count(), 29)
with self.assertRaises(DoesNotExist):
await IntFields.get(intnum=97)
async def test_async_iter(self):
counter = 0
async for _ in IntFields.all():
counter += 1
self.assertEqual(await IntFields.all().count(), counter)
async def test_update_basic(self):
obj0 = await IntFields.create(intnum=2147483647)
await IntFields.filter(id=obj0.id).update(intnum=2147483646)
obj = await IntFields.get(id=obj0.id)
self.assertEqual(obj.intnum, 2147483646)
self.assertEqual(obj.intnum_null, None)
async def test_update_f_expression(self):
obj0 = await IntFields.create(intnum=2147483647)
await IntFields.filter(id=obj0.id).update(intnum=F("intnum") - 1)
obj = await IntFields.get(id=obj0.id)
self.assertEqual(obj.intnum, 2147483646)
async def test_update_badparam(self):
obj0 = await IntFields.create(intnum=2147483647)
with self.assertRaisesRegex(FieldError, "Unknown keyword argument"):
await IntFields.filter(id=obj0.id).update(badparam=1)
async def test_update_pk(self):
obj0 = await IntFields.create(intnum=2147483647)
with self.assertRaisesRegex(IntegrityError, "is PK and can not be updated"):
await IntFields.filter(id=obj0.id).update(id=1)
async def test_update_virtual(self):
tour = await Tournament.create(name="moo")
obj0 = await MinRelation.create(tournament=tour)
with self.assertRaisesRegex(FieldError, "is virtual and can not be updated"):
await MinRelation.filter(id=obj0.id).update(participants=[])
async def test_bad_ordering(self):
with self.assertRaisesRegex(FieldError, "Unknown field moo1fip for model IntFields"):
await IntFields.all().order_by("moo1fip")
async def test_duplicate_values(self):
with self.assertRaisesRegex(FieldError, "Duplicate key intnum"):
await IntFields.all().values("intnum", "intnum")
async def test_duplicate_values_list(self):
await IntFields.all().values_list("intnum", "intnum")
async def test_duplicate_values_kw(self):
with self.assertRaisesRegex(FieldError, "Duplicate key intnum"):
await IntFields.all().values("intnum", intnum="intnum_null")
async def test_duplicate_values_kw_badmap(self):
with self.assertRaisesRegex(FieldError, 'Unknown field "intnum2" for model "IntFields"'):
await IntFields.all().values(intnum="intnum2")
async def test_bad_values(self):
with self.assertRaisesRegex(FieldError, 'Unknown field "int2num" for model "IntFields"'):
await IntFields.all().values("int2num")
async def test_bad_values_list(self):
with self.assertRaisesRegex(FieldError, 'Unknown field "int2num" for model "IntFields"'):
await IntFields.all().values_list("int2num")
async def test_many_flat_values_list(self):
with self.assertRaisesRegex(
TypeError, "You can flat value_list only if contains one field"
):
await IntFields.all().values_list("intnum", "intnum_null", flat=True)
async def test_all_flat_values_list(self):
with self.assertRaisesRegex(
TypeError, "You can flat value_list only if contains one field"
):
await IntFields.all().values_list(flat=True)
async def test_all_values_list(self):
data = await IntFields.all().order_by("id").values_list()
self.assertEqual(data[2], (self.intfields[2].id, 16, None))
async def test_all_values(self):
data = await IntFields.all().order_by("id").values()
self.assertEqual(data[2], {"id": self.intfields[2].id, "intnum": 16, "intnum_null": None})
async def test_order_by_bad_value(self):
with self.assertRaisesRegex(FieldError, "Unknown field badid for model IntFields"):
await IntFields.all().order_by("badid").values_list()
async def test_annotate_order_expression(self):
data = (
await IntFields.annotate(idp=F("id") + 1)
.order_by("-idp")
.first()
.values_list("id", "idp")
)
self.assertEqual(data[0] + 1, data[1])
async def test_annotate_expression_filter(self):
count = await IntFields.annotate(intnum=F("intnum") + 1).filter(intnum__gt=30).count()
self.assertEqual(count, 23)
async def test_get_raw_sql(self):
sql = IntFields.all().sql()
self.assertRegex(sql, r"^SELECT.+FROM.+")
@test.requireCapability(support_index_hint=True)
async def test_force_index(self):
sql = IntFields.filter(pk=1).only("id").force_index("index_name").sql()
self.assertEqual(
sql,
"SELECT `id` `id` FROM `intfields` FORCE INDEX (`index_name`) WHERE `id`=1",
)
sql_again = IntFields.filter(pk=1).only("id").force_index("index_name").sql()
self.assertEqual(
sql_again,
"SELECT `id` `id` FROM `intfields` FORCE INDEX (`index_name`) WHERE `id`=1",
)
@test.requireCapability(support_index_hint=True)
async def test_force_index_avaiable_in_more_query(self):
sql_ValuesQuery = IntFields.filter(pk=1).force_index("index_name").values("id").sql()
self.assertEqual(
sql_ValuesQuery,
"SELECT `id` `id` FROM `intfields` FORCE INDEX (`index_name`) WHERE `id`=1",
)
sql_ValuesListQuery = (
IntFields.filter(pk=1).force_index("index_name").values_list("id").sql()
)
self.assertEqual(
sql_ValuesListQuery,
"SELECT `id` `0` FROM `intfields` FORCE INDEX (`index_name`) WHERE `id`=1",
)
sql_CountQuery = IntFields.filter(pk=1).force_index("index_name").count().sql()
self.assertEqual(
sql_CountQuery,
"SELECT COUNT(*) FROM `intfields` FORCE INDEX (`index_name`) WHERE `id`=1",
)
sql_ExistsQuery = IntFields.filter(pk=1).force_index("index_name").exists().sql()
self.assertEqual(
sql_ExistsQuery,
"SELECT 1 FROM `intfields` FORCE INDEX (`index_name`) WHERE `id`=1 LIMIT 1",
)
@test.requireCapability(support_index_hint=True)
async def test_use_index(self):
sql = IntFields.filter(pk=1).only("id").use_index("index_name").sql()
self.assertEqual(
sql,
"SELECT `id` `id` FROM `intfields` USE INDEX (`index_name`) WHERE `id`=1",
)
sql_again = IntFields.filter(pk=1).only("id").use_index("index_name").sql()
self.assertEqual(
sql_again,
"SELECT `id` `id` FROM `intfields` USE INDEX (`index_name`) WHERE `id`=1",
)
@test.requireCapability(support_index_hint=True)
async def test_use_index_avaiable_in_more_query(self):
sql_ValuesQuery = IntFields.filter(pk=1).use_index("index_name").values("id").sql()
self.assertEqual(
sql_ValuesQuery,
"SELECT `id` `id` FROM `intfields` USE INDEX (`index_name`) WHERE `id`=1",
)
sql_ValuesListQuery = IntFields.filter(pk=1).use_index("index_name").values_list("id").sql()
self.assertEqual(
sql_ValuesListQuery,
"SELECT `id` `0` FROM `intfields` USE INDEX (`index_name`) WHERE `id`=1",
)
sql_CountQuery = IntFields.filter(pk=1).use_index("index_name").count().sql()
self.assertEqual(
sql_CountQuery,
"SELECT COUNT(*) FROM `intfields` USE INDEX (`index_name`) WHERE `id`=1",
)
sql_ExistsQuery = IntFields.filter(pk=1).use_index("index_name").exists().sql()
self.assertEqual(
sql_ExistsQuery,
"SELECT 1 FROM `intfields` USE INDEX (`index_name`) WHERE `id`=1 LIMIT 1",
)
@test.requireCapability(support_for_update=True)
async def test_select_for_update(self):
sql1 = IntFields.filter(pk=1).only("id").select_for_update().sql()
sql2 = IntFields.filter(pk=1).only("id").select_for_update(nowait=True).sql()
sql3 = IntFields.filter(pk=1).only("id").select_for_update(skip_locked=True).sql()
sql4 = IntFields.filter(pk=1).only("id").select_for_update(of=("intfields",)).sql()
dialect = self.db.schema_generator.DIALECT
if dialect == "postgres":
self.assertEqual(
sql1,
'SELECT "id" "id" FROM "intfields" WHERE "id"=1 FOR UPDATE',
)
self.assertEqual(
sql2,
'SELECT "id" "id" FROM "intfields" WHERE "id"=1 FOR UPDATE NOWAIT',
)
self.assertEqual(
sql3,
'SELECT "id" "id" FROM "intfields" WHERE "id"=1 FOR UPDATE SKIP LOCKED',
)
self.assertEqual(
sql4,
'SELECT "id" "id" FROM "intfields" WHERE "id"=1 FOR UPDATE OF "intfields"',
)
elif dialect == "mysql":
self.assertEqual(
sql1,
"SELECT `id` `id` FROM `intfields` WHERE `id`=1 FOR UPDATE",
)
self.assertEqual(
sql2,
"SELECT `id` `id` FROM `intfields` WHERE `id`=1 FOR UPDATE NOWAIT",
)
self.assertEqual(
sql3,
"SELECT `id` `id` FROM `intfields` WHERE `id`=1 FOR UPDATE SKIP LOCKED",
)
self.assertEqual(
sql4,
"SELECT `id` `id` FROM `intfields` WHERE `id`=1 FOR UPDATE OF `intfields`",
)
async def test_select_related(self):
tournament = await Tournament.create(name="1")
reporter = await Reporter.create(name="Reporter")
event = await Event.create(name="1", tournament=tournament, reporter=reporter)
event = await Event.all().select_related("tournament", "reporter").get(pk=event.pk)
self.assertEqual(event.tournament.pk, tournament.pk)
self.assertEqual(event.reporter.pk, reporter.pk)
async def test_select_related_with_two_same_models(self):
parent_node = await Node.create(name="1")
child_node = await Node.create(name="2")
tree = await Tree.create(parent=parent_node, child=child_node)
tree = await Tree.all().select_related("parent", "child").get(pk=tree.pk)
self.assertEqual(tree.parent.pk, parent_node.pk)
self.assertEqual(tree.parent.name, parent_node.name)
self.assertEqual(tree.child.pk, child_node.pk)
self.assertEqual(tree.child.name, child_node.name)
@test.requireCapability(dialect="postgres")
async def test_postgres_search(self):
name = "hello world"
await Tournament.create(name=name)
ret = await Tournament.filter(name__search="hello").first()
self.assertEqual(ret.name, name)
async def test_subquery_select(self):
t1 = await Tournament.create(name="1")
ret = (
await Tournament.filter(pk=t1.pk)
.annotate(ids=Subquery(Tournament.filter(pk=t1.pk).values("id")))
.values("ids", "id")
)
self.assertEqual(ret, [{"id": t1.pk, "ids": t1.pk}])
async def test_subquery_access(self):
"""This test ensures that accessing a query does not modify it (#780)"""
tournament_1 = await Tournament.create(name="1")
event_1 = await Event.create(event_id=1, name="event 1", tournament=tournament_1)
event_2 = await Event.create(event_id=2, name="event 2", tournament=tournament_1)
team_1 = await Team.create(id=1, name="team 1")
team_2 = await Team.create(id=2, name="team 2")
await event_1.participants.add(team_1)
await event_2.participants.add(team_1, team_2)
self.assertEqual(await event_1.participants.all(), [team_1])
self.assertEqual(await event_2.participants.all(), [team_1, team_2])
sub_query_team_1 = Subquery(Event.filter(participants__id=1).values("event_id"))
sub_query_team_2 = Subquery(Event.filter(participants__id=2).values("event_id"))
query = Event.filter(pk__in=sub_query_team_1) # should select event 1 and event 2
query = query.filter(pk__in=sub_query_team_2) # should select only event 2
self.assertEqual(query.sql(), query.sql())
self.assertEqual(await query.count(), await query.count())
self.assertEqual(await query.count(), 1)
self.assertEqual(await query.all(), [event_2])
async def test_subquery_filter(self):
t1 = await Tournament.create(name="1")
ret = await Tournament.filter(pk=Subquery(Tournament.filter(pk=t1.pk).values("id"))).first()
self.assertEqual(ret, t1)
async def test_raw_sql_count(self):
t1 = await Tournament.create(name="1")
ret = await Tournament.filter(pk=t1.pk).annotate(count=RawSQL("count(*)")).values("count")
self.assertEqual(ret, [{"count": 1}])
async def test_raw_sql_select(self):
t1 = await Tournament.create(id=1, name="1")
ret = (
await Tournament.filter(pk=t1.pk)
.annotate(idp=RawSQL("id + 1"))
.filter(idp=2)
.values("idp")
)
self.assertEqual(ret, [{"idp": 2}])
async def test_raw_sql_filter(self):
ret = await Tournament.filter(pk=RawSQL("id + 1"))
self.assertEqual(ret, [])
async def test_annotation_field_priorior_to_model_field(self):
# Sometimes, field name in annotates also exist in model field sets
# and may need lift the former's priority in select query construction.
t1 = await Tournament.create(name="1")
ret = await Tournament.filter(pk=t1.pk).annotate(id=RawSQL("id + 1")).values("id")
self.assertEqual(ret, [{"id": t1.pk + 1}])
| tests/test_queryset.py | 25,423 | TODO: Test the many exceptions in QuerySet TODO: .filter(intnum_null=None) does not work as expected Build large dataset Modify dataset Test distinct Test limit/offset/ordering values_list Test limit/offset/ordering values Test first Test get Test delete should select event 1 and event 2 should select only event 2 Sometimes, field name in annotates also exist in model field sets and may need lift the former's priority in select query construction. | 451 | en | 0.839766 |
"""
Output demo
^^^^^^^^^^^^^^
Demonstrate various output usage supported by PyWebIO
:demo_host:`Demo </?pywebio_api=output_usage>` `Source code <https://github.com/wang0618/PyWebIO/blob/dev/demos/output_usage.py>`_
"""
from pywebio import start_server
from pywebio.output import *
from pywebio.session import hold, get_info
from functools import partial
def t(eng, chinese):
"""return English or Chinese text according to the user's browser language"""
return chinese if 'zh' in get_info().user_language else eng
def code_block(code, strip_indent=4):
if strip_indent:
lines = (
i[strip_indent:] if (i[:strip_indent] == ' ' * strip_indent) else i
for i in code.splitlines()
)
code = '\n'.join(lines)
code = code.strip('\n')
def run_code(code, scope):
with use_scope(scope):
exec(code, globals())
with use_scope() as scope:
put_code(code, 'python')
put_buttons([{'label': t('Run', '运行'), 'value': '', 'color': 'success'}],
onclick=[partial(run_code, code=code, scope=scope)], small=True)
async def main():
"""PyWebIO Output demo
Demonstrate various output usage supported by PyWebIO.
演示PyWebIO输出模块的使用
"""
put_markdown(t("""# PyWebIO Output demo
You can get the source code of this demo in [here](https://github.com/wang0618/PyWebIO/blob/dev/demos/output_usage.py)
This demo only introduces part of the functions of the PyWebIO output module. For the complete features, please refer to the [User Guide](https://pywebio.readthedocs.io/zh_CN/latest/guide.html).
The output functions are all defined in the `pywebio.output` module and can be imported using `from pywebio.output import *`.
""", """# PyWebIO 输出演示
在[这里](https://github.com/wang0618/PyWebIO/blob/dev/demos/output_usage.py)可以获取本Demo的源码。
本Demo仅提供了PyWebIO输出模块的部分功能的演示,完整特性请参阅[用户指南](https://pywebio.readthedocs.io/zh_CN/latest/guide.html)。
PyWebIO的输出函数都定义在 `pywebio.output` 模块中,可以使用 `from pywebio.output import *` 引入。
### 基本输出
PyWebIO提供了一些便捷函数来输出表格、链接等格式:
"""), strip_indent=4)
code_block(t(r"""
# Text Output
put_text("Hello world!")
# Table Output
put_table([
['Commodity', 'Price'],
['Apple', '5.5'],
['Banana', '7'],
])
# Markdown Output
put_markdown('~~Strikethrough~~')
# File Output
put_file('hello_word.txt', b'hello word!')
""", r"""
# 文本输出
put_text("Hello world!")
# 表格输出
put_table([
['商品', '价格'],
['苹果', '5.5'],
['香蕉', '7'],
])
# Markdown输出
put_markdown('~~删除线~~')
# 文件输出
put_file('hello_word.txt', b'hello word!')
"""))
put_markdown(t(r"""For all output functions provided by PyWebIO, please refer to the document.
### Combined Output
The output functions whose name starts with put_ can be combined with some output functions as part of the final output:
You can pass `put_xxx()` calls to `put_table()` as cell content:
""", r"""PyWebIO提供的全部输出函数请参考PyWebIO文档
### 组合输出
函数名以 `put_` 开始的输出函数,可以与一些输出函数组合使用,作为最终输出的一部分。
比如`put_table()`支持以`put_xxx()`调用作为单元格内容:
"""), strip_indent=4)
code_block(r"""
put_table([
['Type', 'Content'],
['html', put_html('X<sup>2</sup>')],
['text', '<hr/>'], # equal to ['text', put_text('<hr/>')]
['buttons', put_buttons(['A', 'B'], onclick=toast)],
['markdown', put_markdown('`Awesome PyWebIO!`')],
['file', put_file('hello.text', b'hello world')],
['table', put_table([['A', 'B'], ['C', 'D']])]
])
""")
put_markdown(t(r"Similarly, you can pass `put_xxx()` calls to `popup()` as the popup content:",
r"类似地,`popup()`也可以将`put_xxx()`调用作为弹窗内容:"), strip_indent=4)
code_block(r"""
popup('Popup title', [
put_html('<h3>Popup Content</h3>'),
'plain html: <br/>', # equal to put_text('plain html: <br/>')
put_table([['A', 'B'], ['C', 'D']]),
put_buttons(['close_popup()'], onclick=lambda _: close_popup())
])
""")
put_markdown(t(r"For more output functions that accept `put_xxx()` calls as parameters, please refer to corresponding function documentation.",
r"更多接受`put_xxx()`作为参数的输出函数请参考函数文档。"))
put_markdown(t(r"""### Callback
PyWebIO allows you to output some buttons, and the provided callback function will be executed when the button is clicked.
This is an example:%s
The call to `put_table()` will not block. When user clicks a button, the corresponding callback function will be invoked:
""", r"""### 事件回调
PyWebIO允许你输出一些控件,当控件被点击时执行提供的回调函数,就像编写GUI程序一样。
下面是一个例子:%s
`put_table()`的调用不会阻塞。当用户点击了某行中的按钮时,PyWebIO会自动调用相应的回调函数:
""") % """
```python
from functools import partial
def edit_row(choice, row):
put_markdown("> You click`%s` button ar row `%s`" % (choice, row))
put_table([
['Idx', 'Actions'],
[1, put_buttons(['edit', 'delete'], onclick=partial(edit_row, row=1))],
[2, put_buttons(['edit', 'delete'], onclick=partial(edit_row, row=2))],
[3, put_buttons(['edit', 'delete'], onclick=partial(edit_row, row=3))],
])
```
""", strip_indent=4)
from functools import partial
@use_scope('table-callback')
def edit_row(choice, row):
put_markdown("> You click `%s` button ar row `%s`" % (choice, row))
put_table([
['Idx', 'Actions'],
[1, put_buttons(['edit', 'delete'], onclick=partial(edit_row, row=1))],
[2, put_buttons(['edit', 'delete'], onclick=partial(edit_row, row=2))],
[3, put_buttons(['edit', 'delete'], onclick=partial(edit_row, row=3))],
])
set_scope('table-callback')
put_markdown(t("Of course, PyWebIO also supports outputting individual button:", "当然,PyWebIO还支持单独的按钮控件:")+r"""
```python
def btn_click(btn_val):
put_markdown("> You click `%s` button" % btn_val)
put_buttons(['A', 'B', 'C'], onclick=btn_click)
```
""", strip_indent=4)
@use_scope('button-callback')
def btn_click(btn_val):
put_markdown("> You click `%s` button" % btn_val)
put_buttons(['A', 'B', 'C'], onclick=btn_click)
set_scope('button-callback')
put_markdown(t(r"""### Output Scope
PyWebIO uses the scope model to give more control to the location of content output. The output area of PyWebIO can be divided into different output domains. The output domain is called Scope in PyWebIO.
The output domain is a container of output content, and each output domain is arranged vertically, and the output domains can also be nested.
Each output function (function name like `put_xxx()`) will output its content to a scope, the default is "current scope". "current scope" is determined by the runtime context. The output function can also manually specify the scope to output. The scope name is unique within the session.
You can use `use_scope()` to open and enter a new output scope, or enter an existing output scope: %s
The above code will generate the following Scope layout:
""", r"""### 输出域Scope
PyWebIO使用Scope模型来对内容输出的位置进行灵活地控制,PyWebIO的内容输出区可以划分出不同的输出域,PyWebIO将输出域称作`Scope`。
输出域为输出内容的容器,各个输出域之间上下排列,输出域也可以进行嵌套。
每个输出函数(函数名形如 `put_xxx()` )都会将内容输出到一个Scope,默认为”当前Scope”,”当前Scope”由运行时上下文确定,输出函数也可以手动指定输出到的Scope。Scope名在会话内唯一。
可以使用 `use_scope()` 开启并进入一个新的输出域,或进入一个已经存在的输出域: %s
以上代码将会产生如下Scope布局:
""") % """
```python
with use_scope('A'):
put_text('Text in scope A')
with use_scope('B'):
put_text('Text in scope B')
with use_scope('C'):
put_text('Text in scope C')
```
""", strip_indent=4)
with use_scope('A'):
put_text('Text in scope A')
with use_scope('B'):
put_text('Text in scope B')
with use_scope('C'):
put_text('Text in scope C')
put_html("""<style>
#pywebio-scope-A {border: 1px solid red;}
#pywebio-scope-B {border: 1px solid blue;margin:2px}
#pywebio-scope-C {border: 1px solid green;margin-top:2px}
</style><br/>""")
put_markdown(t(r"""The output function (function name like `put_xxx()`) will output the content to the "current scope" by default, and the "current scope" of the runtime context can be set by `use_scope()`.
In addition, you can use the `scope` parameter of the output function to specify the destination scope to output:
""", r"""
输出函数(函数名形如 `put_xxx()` )在默认情况下,会将内容输出到”当前Scope”,可以通过 `use_scope()` 设置运行时上下文的”当前Scope”。
此外,也可以通过输出函数的 scope 参数指定输出的目的Scope:
"""), strip_indent=4)
put_grid([
[put_code("put_text('A', scope='A')", 'python'), None, put_buttons([t('Run', '运行')], [lambda: put_text('A', scope='A')])],
[put_code("put_text('B', scope='B')", 'python'), None, put_buttons([t('Run', '运行')], [lambda: put_text('B', scope='B')])],
[put_code("put_text('C', scope='C')", 'python'), None, put_buttons([t('Run', '运行')], [lambda: put_text('C', scope='C')])],
], cell_widths='1fr 10px auto')
put_markdown(t("The output content can be inserted into any positions of the target scope by using the `position` parameter of the output function.", "输出函数可以使用`position`参数指定内容在Scope中输出的位置") + """
```python
put_text(now(), scope='A', position=...)
```
""", strip_indent=4)
import datetime
put_buttons([('position=%s' % i, i) for i in [1, 2, 3, -1, -2, -3]],
lambda i: put_text(datetime.datetime.now(), position=i, scope='A'), small=True)
put_markdown(t(r"In addition to `use_scope()`, PyWebIO also provides the following scope control functions:",
r"除了 `use_scope()` , PyWebIO同样提供了以下scope控制函数: "))
put_grid([
[put_code("clear('B') # Clear content of Scope B", 'python'), None, put_buttons(['运行'], [lambda: clear('B')])],
[put_code("remove('C') # Remove Scope C", 'python'), None, put_buttons(['运行'], [lambda: remove('C')])],
[put_code("scroll_to('A') # Scroll the page to position of Scope A", 'python'), None, put_buttons(['运行'], [lambda: scroll_to('A')])],
], cell_widths='1fr 10px auto')
put_markdown(t(r"""### Layout
In general, using the various output functions introduced above is enough to output what you want, but these outputs are arranged vertically. If you want to make a more complex layout (such as displaying a code block on the left side of the page and an image on the right), you need to use layout functions.
The `pywebio.output` module provides 3 layout functions, and you can create complex layouts by combining them:
- `put_row()` : Use row layout to output content. The content is arranged horizontally
- `put_column()` : Use column layout to output content. The content is arranged vertically
- `put_grid()` : Output content using grid layout
Here is an example by combining `put_row()` and `put_column()`:
""", r"""### 布局
一般情况下,使用上文介绍的各种输出函数足以完成各种内容的展示,但直接调用输出函数产生的输出之间都是竖直排列的,如果想实现更复杂的布局(比如在页 面左侧显示一个代码块,在右侧显示一个图像),就需要借助布局函数。
`pywebio.output` 模块提供了3个布局函数,通过对他们进行组合可以完成各种复杂的布局:
- `put_row()` : 使用行布局输出内容. 内容在水平方向上排列
- `put_column()` : 使用列布局输出内容. 内容在竖直方向上排列
- `put_grid()` : 使用网格布局输出内容
比如,通过通过组合 `put_row()` 和 `put_column()` 实现的布局:
"""), strip_indent=4)
code_block(r"""
put_row([
put_column([
put_code('A'),
put_row([
put_code('B1'), None, # %s
put_code('B2'), None,
put_code('B3'),
]),
put_code('C'),
]), None,
put_code('D'), None,
put_code('E')
])
""" % t('None represents the space between the output', 'None 表示输出之间的空白'))
put_markdown(t(r"""### Style
If you are familiar with CSS styles, you can use the `style()` function to set a custom style for the output.
You can set the CSS style for a single `put_xxx()` output:
""", r"""### 样式
如果你熟悉 CSS样式 ,你还可以使用 `style()` 函数给输出设定自定义样式。
可以给单个的 `put_xxx()` 输出设定CSS样式,也可以配合组合输出使用:
"""), strip_indent=4)
code_block(r"""
style(put_text('Red'), 'color: red')
put_table([
['A', 'B'],
['C', style(put_text('Red'), 'color: red')],
])
""", strip_indent=4)
put_markdown(t(r"`style()` also accepts a list of output calls:", r"`style()` 也接受列表作为输入:"))
code_block(r"""
style([
put_text('Red'),
put_markdown('~~del~~')
], 'color: red')
put_collapse('title', style([
put_text('text'),
put_markdown('~~del~~'),
], 'margin-left: 20px'))
""", strip_indent=4)
put_markdown(t("""----
For more information about output of PyWebIO, please visit PyWebIO [User Guide](https://pywebio.readthedocs.io/zh_CN/latest/guide.html) and [output module documentation](https://pywebio.readthedocs.io/zh_CN/latest/output.html).
""","""----
PyWebIO的输出演示到这里就结束了,更多内容请访问PyWebIO[用户指南](https://pywebio.readthedocs.io/zh_CN/latest/guide.html)和[output模块文档](https://pywebio.readthedocs.io/zh_CN/latest/output.html)。
"""), lstrip=True)
await hold()
if __name__ == '__main__':
start_server(main, debug=True, port=8080, cdn=False)
| demos/output_usage.py | 15,342 | return English or Chinese text according to the user's browser language
Output demo
^^^^^^^^^^^^^^
Demonstrate various output usage supported by PyWebIO
:demo_host:`Demo </?pywebio_api=output_usage>` `Source code <https://github.com/wang0618/PyWebIO/blob/dev/demos/output_usage.py>`_ | 285 | en | 0.414437 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.4 on 2016-12-30 03:21
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('core', '0001_initial'),
]
operations = [
migrations.DeleteModel(
name='Subcategory',
),
migrations.AddField(
model_name='category',
name='parent_category',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='core.Category'),
),
migrations.AlterField(
model_name='salepost',
name='poster',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.DeleteModel(
name='User',
),
]
| core/migrations/0002_auto_20161229_2221.py | 943 | -*- coding: utf-8 -*- Generated by Django 1.9.4 on 2016-12-30 03:21 | 67 | en | 0.742361 |
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import flash
from flash.core.data.utils import download_data
from flash.image import ObjectDetectionData, ObjectDetector
# 1. Create the DataModule
# Dataset Credit: https://www.kaggle.com/ultralytics/coco128
download_data("https://github.com/zhiqwang/yolov5-rt-stack/releases/download/v0.3.0/coco128.zip", "data/")
datamodule = ObjectDetectionData.from_coco(
train_folder="data/coco128/images/train2017/",
train_ann_file="data/coco128/annotations/instances_train2017.json",
val_split=0.1,
batch_size=2,
)
# 2. Build the task
model = ObjectDetector(model="retinanet", num_classes=datamodule.num_classes)
# 3. Create the trainer and finetune the model
trainer = flash.Trainer(max_epochs=3, gpus=torch.cuda.device_count())
trainer.finetune(model, datamodule=datamodule)
# 4. Detect objects in a few images!
predictions = model.predict(
[
"data/coco128/images/train2017/000000000625.jpg",
"data/coco128/images/train2017/000000000626.jpg",
"data/coco128/images/train2017/000000000629.jpg",
]
)
print(predictions)
# 5. Save the model!
trainer.save_checkpoint("object_detection_model.pt")
| flash_examples/object_detection.py | 1,737 | Copyright The PyTorch Lightning team. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. 1. Create the DataModule Dataset Credit: https://www.kaggle.com/ultralytics/coco128 2. Build the task 3. Create the trainer and finetune the model 4. Detect objects in a few images! 5. Save the model! | 760 | en | 0.809583 |
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras initializers for TF 2.
"""
# pylint: disable=g-classes-have-attributes
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from keras import backend
from tensorflow.python.ops import init_ops_v2
from tensorflow.python.util.tf_export import keras_export
@keras_export('keras.initializers.Initializer')
class Initializer(object):
"""Initializer base class: all Keras initializers inherit from this class.
Initializers should implement a `__call__` method with the following
signature:
```python
def __call__(self, shape, dtype=None, **kwargs):
# returns a tensor of shape `shape` and dtype `dtype`
# containing values drawn from a distribution of your choice.
```
Optionally, you an also implement the method `get_config` and the class
method `from_config` in order to support serialization -- just like with
any Keras object.
Here's a simple example: a random normal initializer.
```python
import tensorflow as tf
class ExampleRandomNormal(tf.keras.initializers.Initializer):
def __init__(self, mean, stddev):
self.mean = mean
self.stddev = stddev
def __call__(self, shape, dtype=None, **kwargs):
return tf.random.normal(
shape, mean=self.mean, stddev=self.stddev, dtype=dtype)
def get_config(self): # To support serialization
return {"mean": self.mean, "stddev": self.stddev}
```
Note that we don't have to implement `from_config` in the example above since
the constructor arguments of the class the keys in the config returned by
`get_config` are the same. In this case, the default `from_config`
works fine.
"""
def __call__(self, shape, dtype=None, **kwargs):
"""Returns a tensor object initialized as specified by the initializer.
Args:
shape: Shape of the tensor.
dtype: Optional dtype of the tensor.
**kwargs: Additional keyword arguments.
"""
raise NotImplementedError
def get_config(self):
"""Returns the configuration of the initializer as a JSON-serializable dict.
Returns:
A JSON-serializable Python dict.
"""
return {}
@classmethod
def from_config(cls, config):
"""Instantiates an initializer from a configuration dictionary.
Example:
```python
initializer = RandomUniform(-1, 1)
config = initializer.get_config()
initializer = RandomUniform.from_config(config)
```
Args:
config: A Python dictionary, the output of `get_config`.
Returns:
A `tf.keras.initializers.Initializer` instance.
"""
config.pop('dtype', None)
return cls(**config)
@keras_export('keras.initializers.Zeros', 'keras.initializers.zeros', v1=[])
class Zeros(tf.zeros_initializer, Initializer):
"""Initializer that generates tensors initialized to 0.
Also available via the shortcut function `tf.keras.initializers.zeros`.
Examples:
>>> # Standalone usage:
>>> initializer = tf.keras.initializers.Zeros()
>>> values = initializer(shape=(2, 2))
>>> # Usage in a Keras layer:
>>> initializer = tf.keras.initializers.Zeros()
>>> layer = tf.keras.layers.Dense(3, kernel_initializer=initializer)
"""
def __call__(self, shape, dtype=None, **kwargs):
"""Returns a tensor object initialized as specified by the initializer.
Args:
shape: Shape of the tensor.
dtype: Optional dtype of the tensor. Only numeric or boolean dtypes are
supported. If not specified, `tf.keras.backend.floatx()` is used,
which default to `float32` unless you configured it otherwise
(via `tf.keras.backend.set_floatx(float_dtype)`).
**kwargs: Additional keyword arguments.
"""
return super(Zeros, self).__call__(shape, dtype=_get_dtype(dtype), **kwargs)
@keras_export('keras.initializers.Ones', 'keras.initializers.ones', v1=[])
class Ones(tf.ones_initializer, Initializer):
"""Initializer that generates tensors initialized to 1.
Also available via the shortcut function `tf.keras.initializers.ones`.
Examples:
>>> # Standalone usage:
>>> initializer = tf.keras.initializers.Ones()
>>> values = initializer(shape=(2, 2))
>>> # Usage in a Keras layer:
>>> initializer = tf.keras.initializers.Ones()
>>> layer = tf.keras.layers.Dense(3, kernel_initializer=initializer)
"""
def __call__(self, shape, dtype=None, **kwargs):
"""Returns a tensor object initialized as specified by the initializer.
Args:
shape: Shape of the tensor.
dtype: Optional dtype of the tensor. Only numeric or boolean dtypes are
supported. If not specified, `tf.keras.backend.floatx()` is used,
which default to `float32` unless you configured it otherwise
(via `tf.keras.backend.set_floatx(float_dtype)`).
**kwargs: Additional keyword arguments.
"""
return super(Ones, self).__call__(shape, dtype=_get_dtype(dtype), **kwargs)
@keras_export('keras.initializers.Constant',
'keras.initializers.constant',
v1=[])
class Constant(Initializer):
"""Initializer that generates tensors with constant values.
Also available via the shortcut function `tf.keras.initializers.constant`.
Only scalar values are allowed.
The constant value provided must be convertible to the dtype requested
when calling the initializer.
Examples:
>>> # Standalone usage:
>>> initializer = tf.keras.initializers.Constant(3.)
>>> values = initializer(shape=(2, 2))
>>> # Usage in a Keras layer:
>>> initializer = tf.keras.initializers.Constant(3.)
>>> layer = tf.keras.layers.Dense(3, kernel_initializer=initializer)
Args:
value: A Python scalar.
"""
def __init__(self, value=0):
self.value = value
def __call__(self, shape, dtype=None, **kwargs):
"""Returns a tensor object initialized to `self.value`.
Args:
shape: Shape of the tensor.
dtype: Optional dtype of the tensor. If not specified,
`tf.keras.backend.floatx()` is used,
which default to `float32` unless you configured it otherwise
(via `tf.keras.backend.set_floatx(float_dtype)`).
**kwargs: Additional keyword arguments.
"""
del kwargs
return tf.constant(
self.value, dtype=_get_dtype(dtype), shape=shape)
def get_config(self):
return {'value': self.value}
@keras_export('keras.initializers.RandomUniform',
'keras.initializers.random_uniform',
v1=[])
class RandomUniform(tf.random_uniform_initializer, Initializer):
"""Initializer that generates tensors with a uniform distribution.
Also available via the shortcut function
`tf.keras.initializers.random_uniform`.
Examples:
>>> # Standalone usage:
>>> initializer = tf.keras.initializers.RandomUniform(minval=0., maxval=1.)
>>> values = initializer(shape=(2, 2))
>>> # Usage in a Keras layer:
>>> initializer = tf.keras.initializers.RandomUniform(minval=0., maxval=1.)
>>> layer = tf.keras.layers.Dense(3, kernel_initializer=initializer)
Args:
minval: A python scalar or a scalar tensor. Lower bound of the range of
random values to generate (inclusive).
maxval: A python scalar or a scalar tensor. Upper bound of the range of
random values to generate (exclusive).
seed: A Python integer. An initializer created with a given seed will
always produce the same random tensor for a given shape and dtype.
"""
def __call__(self, shape, dtype=None, **kwargs):
"""Returns a tensor object initialized as specified by the initializer.
Args:
shape: Shape of the tensor.
dtype: Optional dtype of the tensor. Only floating point and integer
types are supported. If not specified,
`tf.keras.backend.floatx()` is used,
which default to `float32` unless you configured it otherwise
(via `tf.keras.backend.set_floatx(float_dtype)`).
**kwargs: Additional keyword arguments.
"""
return super(RandomUniform, self).__call__(
shape, dtype=_get_dtype(dtype), **kwargs)
@keras_export('keras.initializers.RandomNormal',
'keras.initializers.random_normal',
v1=[])
class RandomNormal(tf.random_normal_initializer, Initializer):
"""Initializer that generates tensors with a normal distribution.
Also available via the shortcut function
`tf.keras.initializers.random_normal`.
Examples:
>>> # Standalone usage:
>>> initializer = tf.keras.initializers.RandomNormal(mean=0., stddev=1.)
>>> values = initializer(shape=(2, 2))
>>> # Usage in a Keras layer:
>>> initializer = tf.keras.initializers.RandomNormal(mean=0., stddev=1.)
>>> layer = tf.keras.layers.Dense(3, kernel_initializer=initializer)
Args:
mean: a python scalar or a scalar tensor. Mean of the random values to
generate.
stddev: a python scalar or a scalar tensor. Standard deviation of the random
values to generate.
seed: A Python integer. An initializer created with a given seed will
always produce the same random tensor for a given shape and dtype.
"""
def __call__(self, shape, dtype=None, **kwargs):
"""Returns a tensor object initialized to random normal values.
Args:
shape: Shape of the tensor.
dtype: Optional dtype of the tensor. Only floating point types are
supported. If not specified, `tf.keras.backend.floatx()` is used, which
default to `float32` unless you configured it otherwise (via
`tf.keras.backend.set_floatx(float_dtype)`)
**kwargs: Additional keyword arguments.
"""
return super(RandomNormal, self).__call__(
shape, dtype=_get_dtype(dtype), **kwargs)
@keras_export('keras.initializers.TruncatedNormal',
'keras.initializers.truncated_normal',
v1=[])
class TruncatedNormal(init_ops_v2.TruncatedNormal, Initializer):
"""Initializer that generates a truncated normal distribution.
Also available via the shortcut function
`tf.keras.initializers.truncated_normal`.
The values generated are similar to values from a
`tf.keras.initializers.RandomNormal` initializer except that values more
than two standard deviations from the mean are
discarded and re-drawn.
Examples:
>>> # Standalone usage:
>>> initializer = tf.keras.initializers.TruncatedNormal(mean=0., stddev=1.)
>>> values = initializer(shape=(2, 2))
>>> # Usage in a Keras layer:
>>> initializer = tf.keras.initializers.TruncatedNormal(mean=0., stddev=1.)
>>> layer = tf.keras.layers.Dense(3, kernel_initializer=initializer)
Args:
mean: a python scalar or a scalar tensor. Mean of the random values
to generate.
stddev: a python scalar or a scalar tensor. Standard deviation of the
random values to generate.
seed: A Python integer. An initializer created with a given seed will
always produce the same random tensor for a given shape and dtype.
"""
def __call__(self, shape, dtype=None, **kwargs):
"""Returns a tensor object initialized to random normal values (truncated).
Args:
shape: Shape of the tensor.
dtype: Optional dtype of the tensor. Only floating point types are
supported. If not specified, `tf.keras.backend.floatx()` is used, which
default to `float32` unless you configured it otherwise (via
`tf.keras.backend.set_floatx(float_dtype)`)
**kwargs: Additional keyword arguments.
"""
return super(TruncatedNormal, self).__call__(
shape, dtype=_get_dtype(dtype), **kwargs)
@keras_export('keras.initializers.VarianceScaling',
'keras.initializers.variance_scaling',
v1=[])
class VarianceScaling(init_ops_v2.VarianceScaling, Initializer):
"""Initializer capable of adapting its scale to the shape of weights tensors.
Also available via the shortcut function
`tf.keras.initializers.variance_scaling`.
With `distribution="truncated_normal" or "untruncated_normal"`, samples are
drawn from a truncated/untruncated normal distribution with a mean of zero and
a standard deviation (after truncation, if used) `stddev = sqrt(scale / n)`,
where `n` is:
- number of input units in the weight tensor, if `mode="fan_in"`
- number of output units, if `mode="fan_out"`
- average of the numbers of input and output units, if `mode="fan_avg"`
With `distribution="uniform"`, samples are drawn from a uniform distribution
within `[-limit, limit]`, where `limit = sqrt(3 * scale / n)`.
Examples:
>>> # Standalone usage:
>>> initializer = tf.keras.initializers.VarianceScaling(
... scale=0.1, mode='fan_in', distribution='uniform')
>>> values = initializer(shape=(2, 2))
>>> # Usage in a Keras layer:
>>> initializer = tf.keras.initializers.VarianceScaling(
... scale=0.1, mode='fan_in', distribution='uniform')
>>> layer = tf.keras.layers.Dense(3, kernel_initializer=initializer)
Args:
scale: Scaling factor (positive float).
mode: One of "fan_in", "fan_out", "fan_avg".
distribution: Random distribution to use. One of "truncated_normal",
"untruncated_normal" and "uniform".
seed: A Python integer. An initializer created with a given seed will
always produce the same random tensor for a given shape and dtype.
"""
def __call__(self, shape, dtype=None, **kwargs):
"""Returns a tensor object initialized as specified by the initializer.
Args:
shape: Shape of the tensor.
dtype: Optional dtype of the tensor. Only floating point types are
supported. If not specified, `tf.keras.backend.floatx()` is used, which
default to `float32` unless you configured it otherwise (via
`tf.keras.backend.set_floatx(float_dtype)`)
**kwargs: Additional keyword arguments.
"""
return super(VarianceScaling, self).__call__(
shape, dtype=_get_dtype(dtype), **kwargs)
@keras_export('keras.initializers.Orthogonal',
'keras.initializers.orthogonal',
v1=[])
class Orthogonal(init_ops_v2.Orthogonal, Initializer):
"""Initializer that generates an orthogonal matrix.
Also available via the shortcut function `tf.keras.initializers.orthogonal`.
If the shape of the tensor to initialize is two-dimensional, it is initialized
with an orthogonal matrix obtained from the QR decomposition of a matrix of
random numbers drawn from a normal distribution.
If the matrix has fewer rows than columns then the output will have orthogonal
rows. Otherwise, the output will have orthogonal columns.
If the shape of the tensor to initialize is more than two-dimensional,
a matrix of shape `(shape[0] * ... * shape[n - 2], shape[n - 1])`
is initialized, where `n` is the length of the shape vector.
The matrix is subsequently reshaped to give a tensor of the desired shape.
Examples:
>>> # Standalone usage:
>>> initializer = tf.keras.initializers.Orthogonal()
>>> values = initializer(shape=(2, 2))
>>> # Usage in a Keras layer:
>>> initializer = tf.keras.initializers.Orthogonal()
>>> layer = tf.keras.layers.Dense(3, kernel_initializer=initializer)
Args:
gain: multiplicative factor to apply to the orthogonal matrix
seed: A Python integer. An initializer created with a given seed will
always produce the same random tensor for a given shape and dtype.
References:
[Saxe et al., 2014](https://openreview.net/forum?id=_wzZwKpTDF_9C)
([pdf](https://arxiv.org/pdf/1312.6120.pdf))
"""
def __call__(self, shape, dtype=None, **kwargs):
"""Returns a tensor object initialized to an orthogonal matrix.
Args:
shape: Shape of the tensor.
dtype: Optional dtype of the tensor. Only floating point types are
supported. If not specified, `tf.keras.backend.floatx()` is used,
which default to `float32` unless you configured it otherwise
(via `tf.keras.backend.set_floatx(float_dtype)`)
**kwargs: Additional keyword arguments.
"""
return super(Orthogonal, self).__call__(
shape, dtype=_get_dtype(dtype), **kwargs)
@keras_export('keras.initializers.Identity',
'keras.initializers.identity',
v1=[])
class Identity(init_ops_v2.Identity, Initializer):
"""Initializer that generates the identity matrix.
Also available via the shortcut function `tf.keras.initializers.identity`.
Only usable for generating 2D matrices.
Examples:
>>> # Standalone usage:
>>> initializer = tf.keras.initializers.Identity()
>>> values = initializer(shape=(2, 2))
>>> # Usage in a Keras layer:
>>> initializer = tf.keras.initializers.Identity()
>>> layer = tf.keras.layers.Dense(3, kernel_initializer=initializer)
Args:
gain: Multiplicative factor to apply to the identity matrix.
"""
def __call__(self, shape, dtype=None, **kwargs):
"""Returns a tensor object initialized to a 2D identity matrix.
Args:
shape: Shape of the tensor. It should have exactly rank 2.
dtype: Optional dtype of the tensor. Only floating point types are
supported. If not specified, `tf.keras.backend.floatx()` is used,
which default to `float32` unless you configured it otherwise
(via `tf.keras.backend.set_floatx(float_dtype)`)
**kwargs: Additional keyword arguments.
"""
return super(Identity, self).__call__(
shape, dtype=_get_dtype(dtype), **kwargs)
@keras_export('keras.initializers.GlorotUniform',
'keras.initializers.glorot_uniform',
v1=[])
class GlorotUniform(VarianceScaling):
"""The Glorot uniform initializer, also called Xavier uniform initializer.
Also available via the shortcut function
`tf.keras.initializers.glorot_uniform`.
Draws samples from a uniform distribution within `[-limit, limit]`, where
`limit = sqrt(6 / (fan_in + fan_out))` (`fan_in` is the number of input units
in the weight tensor and `fan_out` is the number of output units).
Examples:
>>> # Standalone usage:
>>> initializer = tf.keras.initializers.GlorotUniform()
>>> values = initializer(shape=(2, 2))
>>> # Usage in a Keras layer:
>>> initializer = tf.keras.initializers.GlorotUniform()
>>> layer = tf.keras.layers.Dense(3, kernel_initializer=initializer)
Args:
seed: A Python integer. An initializer created with a given seed will
always produce the same random tensor for a given shape and dtype.
References:
[Glorot et al., 2010](http://proceedings.mlr.press/v9/glorot10a.html)
([pdf](http://jmlr.org/proceedings/papers/v9/glorot10a/glorot10a.pdf))
"""
def __init__(self, seed=None):
super(GlorotUniform, self).__init__(
scale=1.0,
mode='fan_avg',
distribution='uniform',
seed=seed)
def get_config(self):
return {'seed': self.seed}
@keras_export('keras.initializers.GlorotNormal',
'keras.initializers.glorot_normal',
v1=[])
class GlorotNormal(VarianceScaling):
"""The Glorot normal initializer, also called Xavier normal initializer.
Also available via the shortcut function
`tf.keras.initializers.glorot_normal`.
Draws samples from a truncated normal distribution centered on 0 with `stddev
= sqrt(2 / (fan_in + fan_out))` where `fan_in` is the number of input units in
the weight tensor and `fan_out` is the number of output units in the weight
tensor.
Examples:
>>> # Standalone usage:
>>> initializer = tf.keras.initializers.GlorotNormal()
>>> values = initializer(shape=(2, 2))
>>> # Usage in a Keras layer:
>>> initializer = tf.keras.initializers.GlorotNormal()
>>> layer = tf.keras.layers.Dense(3, kernel_initializer=initializer)
Args:
seed: A Python integer. An initializer created with a given seed will
always produce the same random tensor for a given shape and dtype.
References:
[Glorot et al., 2010](http://proceedings.mlr.press/v9/glorot10a.html)
([pdf](http://jmlr.org/proceedings/papers/v9/glorot10a/glorot10a.pdf))
"""
def __init__(self, seed=None):
super(GlorotNormal, self).__init__(
scale=1.0,
mode='fan_avg',
distribution='truncated_normal',
seed=seed)
def get_config(self):
return {'seed': self.seed}
@keras_export('keras.initializers.LecunNormal',
'keras.initializers.lecun_normal',
v1=[])
class LecunNormal(VarianceScaling):
"""Lecun normal initializer.
Also available via the shortcut function
`tf.keras.initializers.lecun_normal`.
Initializers allow you to pre-specify an initialization strategy, encoded in
the Initializer object, without knowing the shape and dtype of the variable
being initialized.
Draws samples from a truncated normal distribution centered on 0 with `stddev
= sqrt(1 / fan_in)` where `fan_in` is the number of input units in the weight
tensor.
Examples:
>>> # Standalone usage:
>>> initializer = tf.keras.initializers.LecunNormal()
>>> values = initializer(shape=(2, 2))
>>> # Usage in a Keras layer:
>>> initializer = tf.keras.initializers.LecunNormal()
>>> layer = tf.keras.layers.Dense(3, kernel_initializer=initializer)
Arguments:
seed: A Python integer. Used to seed the random generator.
References:
- Self-Normalizing Neural Networks,
[Klambauer et al., 2017]
(https://papers.nips.cc/paper/6698-self-normalizing-neural-networks)
([pdf]
(https://papers.nips.cc/paper/6698-self-normalizing-neural-networks.pdf))
- Efficient Backprop,
[Lecun et al., 1998](http://yann.lecun.com/exdb/publis/pdf/lecun-98b.pdf)
"""
def __init__(self, seed=None):
super(LecunNormal, self).__init__(
scale=1., mode='fan_in', distribution='truncated_normal', seed=seed)
def get_config(self):
return {'seed': self.seed}
@keras_export('keras.initializers.LecunUniform',
'keras.initializers.lecun_uniform',
v1=[])
class LecunUniform(VarianceScaling):
"""Lecun uniform initializer.
Also available via the shortcut function
`tf.keras.initializers.lecun_uniform`.
Draws samples from a uniform distribution within `[-limit, limit]`,
where `limit = sqrt(3 / fan_in)` (`fan_in` is the number of input units in the
weight tensor).
Examples:
>>> # Standalone usage:
>>> initializer = tf.keras.initializers.LecunUniform()
>>> values = initializer(shape=(2, 2))
>>> # Usage in a Keras layer:
>>> initializer = tf.keras.initializers.LecunUniform()
>>> layer = tf.keras.layers.Dense(3, kernel_initializer=initializer)
Arguments:
seed: A Python integer. An initializer created with a given seed will
always produce the same random tensor for a given shape and dtype.
References:
- Self-Normalizing Neural Networks,
[Klambauer et al., 2017](https://papers.nips.cc/paper/6698-self-normalizing-neural-networks) # pylint: disable=line-too-long
([pdf](https://papers.nips.cc/paper/6698-self-normalizing-neural-networks.pdf))
- Efficient Backprop,
[Lecun et al., 1998](http://yann.lecun.com/exdb/publis/pdf/lecun-98b.pdf)
"""
def __init__(self, seed=None):
super(LecunUniform, self).__init__(
scale=1., mode='fan_in', distribution='uniform', seed=seed)
def get_config(self):
return {'seed': self.seed}
@keras_export('keras.initializers.HeNormal',
'keras.initializers.he_normal',
v1=[])
class HeNormal(VarianceScaling):
"""He normal initializer.
Also available via the shortcut function
`tf.keras.initializers.he_normal`.
It draws samples from a truncated normal distribution centered on 0 with
`stddev = sqrt(2 / fan_in)` where `fan_in` is the number of input units in the
weight tensor.
Examples:
>>> # Standalone usage:
>>> initializer = tf.keras.initializers.HeNormal()
>>> values = initializer(shape=(2, 2))
>>> # Usage in a Keras layer:
>>> initializer = tf.keras.initializers.HeNormal()
>>> layer = tf.keras.layers.Dense(3, kernel_initializer=initializer)
Arguments:
seed: A Python integer. An initializer created with a given seed will
always produce the same random tensor for a given shape and dtype.
References:
[He et al., 2015](https://www.cv-foundation.org/openaccess/content_iccv_2015/html/He_Delving_Deep_into_ICCV_2015_paper.html) # pylint: disable=line-too-long
([pdf](https://www.cv-foundation.org/openaccess/content_iccv_2015/papers/He_Delving_Deep_into_ICCV_2015_paper.pdf))
"""
def __init__(self, seed=None):
super(HeNormal, self).__init__(
scale=2., mode='fan_in', distribution='truncated_normal', seed=seed)
def get_config(self):
return {'seed': self.seed}
@keras_export('keras.initializers.HeUniform',
'keras.initializers.he_uniform',
v1=[])
class HeUniform(VarianceScaling):
"""He uniform variance scaling initializer.
Also available via the shortcut function
`tf.keras.initializers.he_uniform`.
Draws samples from a uniform distribution within `[-limit, limit]`, where
`limit = sqrt(6 / fan_in)` (`fan_in` is the number of input units in the
weight tensor).
Examples:
>>> # Standalone usage:
>>> initializer = tf.keras.initializers.HeUniform()
>>> values = initializer(shape=(2, 2))
>>> # Usage in a Keras layer:
>>> initializer = tf.keras.initializers.HeUniform()
>>> layer = tf.keras.layers.Dense(3, kernel_initializer=initializer)
Arguments:
seed: A Python integer. An initializer created with a given seed will
always produce the same random tensor for a given shape and dtype.
References:
[He et al., 2015](https://www.cv-foundation.org/openaccess/content_iccv_2015/html/He_Delving_Deep_into_ICCV_2015_paper.html) # pylint: disable=line-too-long
([pdf](https://www.cv-foundation.org/openaccess/content_iccv_2015/papers/He_Delving_Deep_into_ICCV_2015_paper.pdf))
"""
def __init__(self, seed=None):
super(HeUniform, self).__init__(
scale=2., mode='fan_in', distribution='uniform', seed=seed)
def get_config(self):
return {'seed': self.seed}
def _get_dtype(dtype):
if dtype is None:
dtype = backend.floatx()
return tf.as_dtype(dtype)
| keras/initializers/initializers_v2.py | 26,877 | Initializer that generates tensors with constant values.
Also available via the shortcut function `tf.keras.initializers.constant`.
Only scalar values are allowed.
The constant value provided must be convertible to the dtype requested
when calling the initializer.
Examples:
>>> # Standalone usage:
>>> initializer = tf.keras.initializers.Constant(3.)
>>> values = initializer(shape=(2, 2))
>>> # Usage in a Keras layer:
>>> initializer = tf.keras.initializers.Constant(3.)
>>> layer = tf.keras.layers.Dense(3, kernel_initializer=initializer)
Args:
value: A Python scalar.
The Glorot normal initializer, also called Xavier normal initializer.
Also available via the shortcut function
`tf.keras.initializers.glorot_normal`.
Draws samples from a truncated normal distribution centered on 0 with `stddev
= sqrt(2 / (fan_in + fan_out))` where `fan_in` is the number of input units in
the weight tensor and `fan_out` is the number of output units in the weight
tensor.
Examples:
>>> # Standalone usage:
>>> initializer = tf.keras.initializers.GlorotNormal()
>>> values = initializer(shape=(2, 2))
>>> # Usage in a Keras layer:
>>> initializer = tf.keras.initializers.GlorotNormal()
>>> layer = tf.keras.layers.Dense(3, kernel_initializer=initializer)
Args:
seed: A Python integer. An initializer created with a given seed will
always produce the same random tensor for a given shape and dtype.
References:
[Glorot et al., 2010](http://proceedings.mlr.press/v9/glorot10a.html)
([pdf](http://jmlr.org/proceedings/papers/v9/glorot10a/glorot10a.pdf))
The Glorot uniform initializer, also called Xavier uniform initializer.
Also available via the shortcut function
`tf.keras.initializers.glorot_uniform`.
Draws samples from a uniform distribution within `[-limit, limit]`, where
`limit = sqrt(6 / (fan_in + fan_out))` (`fan_in` is the number of input units
in the weight tensor and `fan_out` is the number of output units).
Examples:
>>> # Standalone usage:
>>> initializer = tf.keras.initializers.GlorotUniform()
>>> values = initializer(shape=(2, 2))
>>> # Usage in a Keras layer:
>>> initializer = tf.keras.initializers.GlorotUniform()
>>> layer = tf.keras.layers.Dense(3, kernel_initializer=initializer)
Args:
seed: A Python integer. An initializer created with a given seed will
always produce the same random tensor for a given shape and dtype.
References:
[Glorot et al., 2010](http://proceedings.mlr.press/v9/glorot10a.html)
([pdf](http://jmlr.org/proceedings/papers/v9/glorot10a/glorot10a.pdf))
He normal initializer.
Also available via the shortcut function
`tf.keras.initializers.he_normal`.
It draws samples from a truncated normal distribution centered on 0 with
`stddev = sqrt(2 / fan_in)` where `fan_in` is the number of input units in the
weight tensor.
Examples:
>>> # Standalone usage:
>>> initializer = tf.keras.initializers.HeNormal()
>>> values = initializer(shape=(2, 2))
>>> # Usage in a Keras layer:
>>> initializer = tf.keras.initializers.HeNormal()
>>> layer = tf.keras.layers.Dense(3, kernel_initializer=initializer)
Arguments:
seed: A Python integer. An initializer created with a given seed will
always produce the same random tensor for a given shape and dtype.
References:
[He et al., 2015](https://www.cv-foundation.org/openaccess/content_iccv_2015/html/He_Delving_Deep_into_ICCV_2015_paper.html) # pylint: disable=line-too-long
([pdf](https://www.cv-foundation.org/openaccess/content_iccv_2015/papers/He_Delving_Deep_into_ICCV_2015_paper.pdf))
He uniform variance scaling initializer.
Also available via the shortcut function
`tf.keras.initializers.he_uniform`.
Draws samples from a uniform distribution within `[-limit, limit]`, where
`limit = sqrt(6 / fan_in)` (`fan_in` is the number of input units in the
weight tensor).
Examples:
>>> # Standalone usage:
>>> initializer = tf.keras.initializers.HeUniform()
>>> values = initializer(shape=(2, 2))
>>> # Usage in a Keras layer:
>>> initializer = tf.keras.initializers.HeUniform()
>>> layer = tf.keras.layers.Dense(3, kernel_initializer=initializer)
Arguments:
seed: A Python integer. An initializer created with a given seed will
always produce the same random tensor for a given shape and dtype.
References:
[He et al., 2015](https://www.cv-foundation.org/openaccess/content_iccv_2015/html/He_Delving_Deep_into_ICCV_2015_paper.html) # pylint: disable=line-too-long
([pdf](https://www.cv-foundation.org/openaccess/content_iccv_2015/papers/He_Delving_Deep_into_ICCV_2015_paper.pdf))
Initializer that generates the identity matrix.
Also available via the shortcut function `tf.keras.initializers.identity`.
Only usable for generating 2D matrices.
Examples:
>>> # Standalone usage:
>>> initializer = tf.keras.initializers.Identity()
>>> values = initializer(shape=(2, 2))
>>> # Usage in a Keras layer:
>>> initializer = tf.keras.initializers.Identity()
>>> layer = tf.keras.layers.Dense(3, kernel_initializer=initializer)
Args:
gain: Multiplicative factor to apply to the identity matrix.
Initializer base class: all Keras initializers inherit from this class.
Initializers should implement a `__call__` method with the following
signature:
```python
def __call__(self, shape, dtype=None, **kwargs):
# returns a tensor of shape `shape` and dtype `dtype`
# containing values drawn from a distribution of your choice.
```
Optionally, you an also implement the method `get_config` and the class
method `from_config` in order to support serialization -- just like with
any Keras object.
Here's a simple example: a random normal initializer.
```python
import tensorflow as tf
class ExampleRandomNormal(tf.keras.initializers.Initializer):
def __init__(self, mean, stddev):
self.mean = mean
self.stddev = stddev
def __call__(self, shape, dtype=None, **kwargs):
return tf.random.normal(
shape, mean=self.mean, stddev=self.stddev, dtype=dtype)
def get_config(self): # To support serialization
return {"mean": self.mean, "stddev": self.stddev}
```
Note that we don't have to implement `from_config` in the example above since
the constructor arguments of the class the keys in the config returned by
`get_config` are the same. In this case, the default `from_config`
works fine.
Lecun normal initializer.
Also available via the shortcut function
`tf.keras.initializers.lecun_normal`.
Initializers allow you to pre-specify an initialization strategy, encoded in
the Initializer object, without knowing the shape and dtype of the variable
being initialized.
Draws samples from a truncated normal distribution centered on 0 with `stddev
= sqrt(1 / fan_in)` where `fan_in` is the number of input units in the weight
tensor.
Examples:
>>> # Standalone usage:
>>> initializer = tf.keras.initializers.LecunNormal()
>>> values = initializer(shape=(2, 2))
>>> # Usage in a Keras layer:
>>> initializer = tf.keras.initializers.LecunNormal()
>>> layer = tf.keras.layers.Dense(3, kernel_initializer=initializer)
Arguments:
seed: A Python integer. Used to seed the random generator.
References:
- Self-Normalizing Neural Networks,
[Klambauer et al., 2017]
(https://papers.nips.cc/paper/6698-self-normalizing-neural-networks)
([pdf]
(https://papers.nips.cc/paper/6698-self-normalizing-neural-networks.pdf))
- Efficient Backprop,
[Lecun et al., 1998](http://yann.lecun.com/exdb/publis/pdf/lecun-98b.pdf)
Lecun uniform initializer.
Also available via the shortcut function
`tf.keras.initializers.lecun_uniform`.
Draws samples from a uniform distribution within `[-limit, limit]`,
where `limit = sqrt(3 / fan_in)` (`fan_in` is the number of input units in the
weight tensor).
Examples:
>>> # Standalone usage:
>>> initializer = tf.keras.initializers.LecunUniform()
>>> values = initializer(shape=(2, 2))
>>> # Usage in a Keras layer:
>>> initializer = tf.keras.initializers.LecunUniform()
>>> layer = tf.keras.layers.Dense(3, kernel_initializer=initializer)
Arguments:
seed: A Python integer. An initializer created with a given seed will
always produce the same random tensor for a given shape and dtype.
References:
- Self-Normalizing Neural Networks,
[Klambauer et al., 2017](https://papers.nips.cc/paper/6698-self-normalizing-neural-networks) # pylint: disable=line-too-long
([pdf](https://papers.nips.cc/paper/6698-self-normalizing-neural-networks.pdf))
- Efficient Backprop,
[Lecun et al., 1998](http://yann.lecun.com/exdb/publis/pdf/lecun-98b.pdf)
Initializer that generates tensors initialized to 1.
Also available via the shortcut function `tf.keras.initializers.ones`.
Examples:
>>> # Standalone usage:
>>> initializer = tf.keras.initializers.Ones()
>>> values = initializer(shape=(2, 2))
>>> # Usage in a Keras layer:
>>> initializer = tf.keras.initializers.Ones()
>>> layer = tf.keras.layers.Dense(3, kernel_initializer=initializer)
Initializer that generates an orthogonal matrix.
Also available via the shortcut function `tf.keras.initializers.orthogonal`.
If the shape of the tensor to initialize is two-dimensional, it is initialized
with an orthogonal matrix obtained from the QR decomposition of a matrix of
random numbers drawn from a normal distribution.
If the matrix has fewer rows than columns then the output will have orthogonal
rows. Otherwise, the output will have orthogonal columns.
If the shape of the tensor to initialize is more than two-dimensional,
a matrix of shape `(shape[0] * ... * shape[n - 2], shape[n - 1])`
is initialized, where `n` is the length of the shape vector.
The matrix is subsequently reshaped to give a tensor of the desired shape.
Examples:
>>> # Standalone usage:
>>> initializer = tf.keras.initializers.Orthogonal()
>>> values = initializer(shape=(2, 2))
>>> # Usage in a Keras layer:
>>> initializer = tf.keras.initializers.Orthogonal()
>>> layer = tf.keras.layers.Dense(3, kernel_initializer=initializer)
Args:
gain: multiplicative factor to apply to the orthogonal matrix
seed: A Python integer. An initializer created with a given seed will
always produce the same random tensor for a given shape and dtype.
References:
[Saxe et al., 2014](https://openreview.net/forum?id=_wzZwKpTDF_9C)
([pdf](https://arxiv.org/pdf/1312.6120.pdf))
Initializer that generates tensors with a normal distribution.
Also available via the shortcut function
`tf.keras.initializers.random_normal`.
Examples:
>>> # Standalone usage:
>>> initializer = tf.keras.initializers.RandomNormal(mean=0., stddev=1.)
>>> values = initializer(shape=(2, 2))
>>> # Usage in a Keras layer:
>>> initializer = tf.keras.initializers.RandomNormal(mean=0., stddev=1.)
>>> layer = tf.keras.layers.Dense(3, kernel_initializer=initializer)
Args:
mean: a python scalar or a scalar tensor. Mean of the random values to
generate.
stddev: a python scalar or a scalar tensor. Standard deviation of the random
values to generate.
seed: A Python integer. An initializer created with a given seed will
always produce the same random tensor for a given shape and dtype.
Initializer that generates tensors with a uniform distribution.
Also available via the shortcut function
`tf.keras.initializers.random_uniform`.
Examples:
>>> # Standalone usage:
>>> initializer = tf.keras.initializers.RandomUniform(minval=0., maxval=1.)
>>> values = initializer(shape=(2, 2))
>>> # Usage in a Keras layer:
>>> initializer = tf.keras.initializers.RandomUniform(minval=0., maxval=1.)
>>> layer = tf.keras.layers.Dense(3, kernel_initializer=initializer)
Args:
minval: A python scalar or a scalar tensor. Lower bound of the range of
random values to generate (inclusive).
maxval: A python scalar or a scalar tensor. Upper bound of the range of
random values to generate (exclusive).
seed: A Python integer. An initializer created with a given seed will
always produce the same random tensor for a given shape and dtype.
Initializer that generates a truncated normal distribution.
Also available via the shortcut function
`tf.keras.initializers.truncated_normal`.
The values generated are similar to values from a
`tf.keras.initializers.RandomNormal` initializer except that values more
than two standard deviations from the mean are
discarded and re-drawn.
Examples:
>>> # Standalone usage:
>>> initializer = tf.keras.initializers.TruncatedNormal(mean=0., stddev=1.)
>>> values = initializer(shape=(2, 2))
>>> # Usage in a Keras layer:
>>> initializer = tf.keras.initializers.TruncatedNormal(mean=0., stddev=1.)
>>> layer = tf.keras.layers.Dense(3, kernel_initializer=initializer)
Args:
mean: a python scalar or a scalar tensor. Mean of the random values
to generate.
stddev: a python scalar or a scalar tensor. Standard deviation of the
random values to generate.
seed: A Python integer. An initializer created with a given seed will
always produce the same random tensor for a given shape and dtype.
Initializer capable of adapting its scale to the shape of weights tensors.
Also available via the shortcut function
`tf.keras.initializers.variance_scaling`.
With `distribution="truncated_normal" or "untruncated_normal"`, samples are
drawn from a truncated/untruncated normal distribution with a mean of zero and
a standard deviation (after truncation, if used) `stddev = sqrt(scale / n)`,
where `n` is:
- number of input units in the weight tensor, if `mode="fan_in"`
- number of output units, if `mode="fan_out"`
- average of the numbers of input and output units, if `mode="fan_avg"`
With `distribution="uniform"`, samples are drawn from a uniform distribution
within `[-limit, limit]`, where `limit = sqrt(3 * scale / n)`.
Examples:
>>> # Standalone usage:
>>> initializer = tf.keras.initializers.VarianceScaling(
... scale=0.1, mode='fan_in', distribution='uniform')
>>> values = initializer(shape=(2, 2))
>>> # Usage in a Keras layer:
>>> initializer = tf.keras.initializers.VarianceScaling(
... scale=0.1, mode='fan_in', distribution='uniform')
>>> layer = tf.keras.layers.Dense(3, kernel_initializer=initializer)
Args:
scale: Scaling factor (positive float).
mode: One of "fan_in", "fan_out", "fan_avg".
distribution: Random distribution to use. One of "truncated_normal",
"untruncated_normal" and "uniform".
seed: A Python integer. An initializer created with a given seed will
always produce the same random tensor for a given shape and dtype.
Initializer that generates tensors initialized to 0.
Also available via the shortcut function `tf.keras.initializers.zeros`.
Examples:
>>> # Standalone usage:
>>> initializer = tf.keras.initializers.Zeros()
>>> values = initializer(shape=(2, 2))
>>> # Usage in a Keras layer:
>>> initializer = tf.keras.initializers.Zeros()
>>> layer = tf.keras.layers.Dense(3, kernel_initializer=initializer)
Returns a tensor object initialized as specified by the initializer.
Args:
shape: Shape of the tensor.
dtype: Optional dtype of the tensor.
**kwargs: Additional keyword arguments.
Returns a tensor object initialized as specified by the initializer.
Args:
shape: Shape of the tensor.
dtype: Optional dtype of the tensor. Only numeric or boolean dtypes are
supported. If not specified, `tf.keras.backend.floatx()` is used,
which default to `float32` unless you configured it otherwise
(via `tf.keras.backend.set_floatx(float_dtype)`).
**kwargs: Additional keyword arguments.
Returns a tensor object initialized as specified by the initializer.
Args:
shape: Shape of the tensor.
dtype: Optional dtype of the tensor. Only numeric or boolean dtypes are
supported. If not specified, `tf.keras.backend.floatx()` is used,
which default to `float32` unless you configured it otherwise
(via `tf.keras.backend.set_floatx(float_dtype)`).
**kwargs: Additional keyword arguments.
Returns a tensor object initialized to `self.value`.
Args:
shape: Shape of the tensor.
dtype: Optional dtype of the tensor. If not specified,
`tf.keras.backend.floatx()` is used,
which default to `float32` unless you configured it otherwise
(via `tf.keras.backend.set_floatx(float_dtype)`).
**kwargs: Additional keyword arguments.
Returns a tensor object initialized as specified by the initializer.
Args:
shape: Shape of the tensor.
dtype: Optional dtype of the tensor. Only floating point and integer
types are supported. If not specified,
`tf.keras.backend.floatx()` is used,
which default to `float32` unless you configured it otherwise
(via `tf.keras.backend.set_floatx(float_dtype)`).
**kwargs: Additional keyword arguments.
Returns a tensor object initialized to random normal values.
Args:
shape: Shape of the tensor.
dtype: Optional dtype of the tensor. Only floating point types are
supported. If not specified, `tf.keras.backend.floatx()` is used, which
default to `float32` unless you configured it otherwise (via
`tf.keras.backend.set_floatx(float_dtype)`)
**kwargs: Additional keyword arguments.
Returns a tensor object initialized to random normal values (truncated).
Args:
shape: Shape of the tensor.
dtype: Optional dtype of the tensor. Only floating point types are
supported. If not specified, `tf.keras.backend.floatx()` is used, which
default to `float32` unless you configured it otherwise (via
`tf.keras.backend.set_floatx(float_dtype)`)
**kwargs: Additional keyword arguments.
Returns a tensor object initialized as specified by the initializer.
Args:
shape: Shape of the tensor.
dtype: Optional dtype of the tensor. Only floating point types are
supported. If not specified, `tf.keras.backend.floatx()` is used, which
default to `float32` unless you configured it otherwise (via
`tf.keras.backend.set_floatx(float_dtype)`)
**kwargs: Additional keyword arguments.
Returns a tensor object initialized to an orthogonal matrix.
Args:
shape: Shape of the tensor.
dtype: Optional dtype of the tensor. Only floating point types are
supported. If not specified, `tf.keras.backend.floatx()` is used,
which default to `float32` unless you configured it otherwise
(via `tf.keras.backend.set_floatx(float_dtype)`)
**kwargs: Additional keyword arguments.
Returns a tensor object initialized to a 2D identity matrix.
Args:
shape: Shape of the tensor. It should have exactly rank 2.
dtype: Optional dtype of the tensor. Only floating point types are
supported. If not specified, `tf.keras.backend.floatx()` is used,
which default to `float32` unless you configured it otherwise
(via `tf.keras.backend.set_floatx(float_dtype)`)
**kwargs: Additional keyword arguments.
Instantiates an initializer from a configuration dictionary.
Example:
```python
initializer = RandomUniform(-1, 1)
config = initializer.get_config()
initializer = RandomUniform.from_config(config)
```
Args:
config: A Python dictionary, the output of `get_config`.
Returns:
A `tf.keras.initializers.Initializer` instance.
Returns the configuration of the initializer as a JSON-serializable dict.
Returns:
A JSON-serializable Python dict.
Keras initializers for TF 2.
Copyright 2020 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ============================================================================== pylint: disable=g-classes-have-attributes | 19,817 | en | 0.567713 |
from operator import attrgetter
import pyangbind.lib.xpathhelper as xpathhelper
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType
from pyangbind.lib.yangtypes import YANGBool, YANGListType, YANGDynClass, ReferenceType
from pyangbind.lib.base import PybindBase
from decimal import Decimal
from bitarray import bitarray
import __builtin__
import vlan
class access(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module brocade-interface - based on the path /interface/hundredgigabitethernet/switchport/access-mac-group-rspan-vlan-classification/access. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: The access layer characteristics of this interface.
"""
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_rest_name', '_extmethods', '__vlan',)
_yang_name = 'access'
_rest_name = 'access'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
path_helper_ = kwargs.pop("path_helper", None)
if path_helper_ is False:
self._path_helper = False
elif path_helper_ is not None and isinstance(path_helper_, xpathhelper.YANGPathHelper):
self._path_helper = path_helper_
elif hasattr(self, "_parent"):
path_helper_ = getattr(self._parent, "_path_helper", False)
self._path_helper = path_helper_
else:
self._path_helper = False
extmethods = kwargs.pop("extmethods", None)
if extmethods is False:
self._extmethods = False
elif extmethods is not None and isinstance(extmethods, dict):
self._extmethods = extmethods
elif hasattr(self, "_parent"):
extmethods = getattr(self._parent, "_extmethods", None)
self._extmethods = extmethods
else:
self._extmethods = False
self.__vlan = YANGDynClass(base=YANGListType("access_vlan_id access_mac_group",vlan.vlan, yang_name="vlan", rest_name="rspan-vlan", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='access-vlan-id access-mac-group', extensions={u'tailf-common': {u'callpoint': u'rspan-mac-group-vlan-classification-config-phy', u'cli-suppress-list-no': None, u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'alt-name': u'rspan-vlan'}}), is_container='list', yang_name="vlan", rest_name="rspan-vlan", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'rspan-mac-group-vlan-classification-config-phy', u'cli-suppress-list-no': None, u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'alt-name': u'rspan-vlan'}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='list', is_config=True)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'interface', u'hundredgigabitethernet', u'switchport', u'access-mac-group-rspan-vlan-classification', u'access']
def _rest_path(self):
if hasattr(self, "_parent"):
if self._rest_name:
return self._parent._rest_path()+[self._rest_name]
else:
return self._parent._rest_path()
else:
return [u'interface', u'HundredGigabitEthernet', u'switchport', u'access']
def _get_vlan(self):
"""
Getter method for vlan, mapped from YANG variable /interface/hundredgigabitethernet/switchport/access_mac_group_rspan_vlan_classification/access/vlan (list)
"""
return self.__vlan
def _set_vlan(self, v, load=False):
"""
Setter method for vlan, mapped from YANG variable /interface/hundredgigabitethernet/switchport/access_mac_group_rspan_vlan_classification/access/vlan (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_vlan is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_vlan() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("access_vlan_id access_mac_group",vlan.vlan, yang_name="vlan", rest_name="rspan-vlan", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='access-vlan-id access-mac-group', extensions={u'tailf-common': {u'callpoint': u'rspan-mac-group-vlan-classification-config-phy', u'cli-suppress-list-no': None, u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'alt-name': u'rspan-vlan'}}), is_container='list', yang_name="vlan", rest_name="rspan-vlan", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'rspan-mac-group-vlan-classification-config-phy', u'cli-suppress-list-no': None, u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'alt-name': u'rspan-vlan'}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='list', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """vlan must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("access_vlan_id access_mac_group",vlan.vlan, yang_name="vlan", rest_name="rspan-vlan", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='access-vlan-id access-mac-group', extensions={u'tailf-common': {u'callpoint': u'rspan-mac-group-vlan-classification-config-phy', u'cli-suppress-list-no': None, u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'alt-name': u'rspan-vlan'}}), is_container='list', yang_name="vlan", rest_name="rspan-vlan", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'rspan-mac-group-vlan-classification-config-phy', u'cli-suppress-list-no': None, u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'alt-name': u'rspan-vlan'}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='list', is_config=True)""",
})
self.__vlan = t
if hasattr(self, '_set'):
self._set()
def _unset_vlan(self):
self.__vlan = YANGDynClass(base=YANGListType("access_vlan_id access_mac_group",vlan.vlan, yang_name="vlan", rest_name="rspan-vlan", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='access-vlan-id access-mac-group', extensions={u'tailf-common': {u'callpoint': u'rspan-mac-group-vlan-classification-config-phy', u'cli-suppress-list-no': None, u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'alt-name': u'rspan-vlan'}}), is_container='list', yang_name="vlan", rest_name="rspan-vlan", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'rspan-mac-group-vlan-classification-config-phy', u'cli-suppress-list-no': None, u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'alt-name': u'rspan-vlan'}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='list', is_config=True)
vlan = __builtin__.property(_get_vlan, _set_vlan)
_pyangbind_elements = {'vlan': vlan, }
| pybind/nos/v6_0_2f/interface/hundredgigabitethernet/switchport/access_mac_group_rspan_vlan_classification/access/__init__.py | 8,168 | This class was auto-generated by the PythonClass plugin for PYANG
from YANG module brocade-interface - based on the path /interface/hundredgigabitethernet/switchport/access-mac-group-rspan-vlan-classification/access. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: The access layer characteristics of this interface.
Getter method for vlan, mapped from YANG variable /interface/hundredgigabitethernet/switchport/access_mac_group_rspan_vlan_classification/access/vlan (list)
Setter method for vlan, mapped from YANG variable /interface/hundredgigabitethernet/switchport/access_mac_group_rspan_vlan_classification/access/vlan (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_vlan is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_vlan() directly. | 921 | en | 0.685498 |
# Generated by Django 2.0.5 on 2018-06-07 10:31
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('it_purchase_app', '0030_auto_20180607_1020'),
]
operations = [
migrations.AlterField(
model_name='purchase',
name='manager_approval',
field=models.CharField(blank=True, choices=[('Not Decided', 'Not Decided'), ('Yes', 'Yes'), ('No', 'No')], max_length=500, null=True),
),
]
| it_purchase_project/it_purchase_app/migrations/0031_auto_20180607_1031.py | 502 | Generated by Django 2.0.5 on 2018-06-07 10:31 | 45 | en | 0.564677 |
#!/usr/bin/env python
# encoding: utf-8
from django.db import models
from django.contrib.auth.models import User
from django.utils.translation import ugettext_lazy as _
from django_extensions.db.fields import AutoSlugField
from v1.recipe.models import Recipe
class GroceryList(models.Model):
"""
The GroceryList is the core of list app.
It offers a home to many GroceryItems.
title = The name of the GroceryList.
slug = The HTML safe name of the GroceryList.
author = The User who created the GroceryList.
pub_date = The date that the GroceryList was created on.
"""
title = models.CharField(_("grocery list title"), max_length=250)
slug = AutoSlugField(_('slug'), populate_from='title')
author = models.ForeignKey(User, on_delete=models.CASCADE)
pub_date = models.DateTimeField(auto_now_add=True)
class Meta:
ordering = ['pub_date']
def __str__(self):
return '%s' % self.title
def item_count(self):
"""get the number of items in the list"""
return GroceryItem.objects.filter(list=self).count()
class GroceryItem(models.Model):
"""
The GroceryItem is an item on a GroceryList.
list = The GroceryList that owns the GroceryItem.
title = The name of the GroceryItem.
completed = Whether or not the GroceryItem has been purchased or
added to the users shopping cart in the supermarket.
order = The order of the item in the GroceryList.
"""
list = models.ForeignKey(GroceryList, on_delete=models.CASCADE, related_name='items')
title = models.CharField(_("title"), max_length=550)
completed = models.BooleanField(_("completed"), default=False)
order = models.IntegerField(_("order"), default=0)
class Meta:
ordering = ['list_id', 'order', 'pk']
def __str__(self):
return '%s' % self.title
class GroceryShared(models.Model):
"""
Determines whether or not a GroceryList is shared to another user.
Shared lists allow other uses to add/delete/edit the GroceryList.
list = The GroceryList to be shared.
shared_by = The User that shared the List.
shared_to = The User that is given access to a GroceryList.
"""
list = models.ForeignKey(GroceryList, on_delete=models.CASCADE)
shared_by = models.ForeignKey(User, on_delete=models.CASCADE, related_name="shared_by")
shared_to = models.ForeignKey(User, on_delete=models.CASCADE, related_name="shared_to")
def __str__(self):
return '%s' % self.list.title
| v1/list/models.py | 2,531 | The GroceryItem is an item on a GroceryList.
list = The GroceryList that owns the GroceryItem.
title = The name of the GroceryItem.
completed = Whether or not the GroceryItem has been purchased or
added to the users shopping cart in the supermarket.
order = The order of the item in the GroceryList.
The GroceryList is the core of list app.
It offers a home to many GroceryItems.
title = The name of the GroceryList.
slug = The HTML safe name of the GroceryList.
author = The User who created the GroceryList.
pub_date = The date that the GroceryList was created on.
Determines whether or not a GroceryList is shared to another user.
Shared lists allow other uses to add/delete/edit the GroceryList.
list = The GroceryList to be shared.
shared_by = The User that shared the List.
shared_to = The User that is given access to a GroceryList.
get the number of items in the list
!/usr/bin/env python encoding: utf-8 | 925 | en | 0.915098 |
from pandas import read_csv
from IPython.display import display
import numpy as np
import sys
import math
###############################
####Maria Eugenia Lopez #####
###############################
def fully_grown_depuration(number_to_remove=0.50):
return plants.loc[plants.height_m > number_to_remove]
def convert_GPS_lat_long(df):
for index, row in df.iterrows():
lat_viejo = row["GPS_lat"]
latVal = (40008000*row["GPS_lat"])/360
#res= div*0.001#to convert to Klm
df.loc[index,"GPS_lat"] = latVal
lat_radians = math.radians(lat_viejo)
lonVal = (40075160*row["GPS_lon"])/360
lonVal = lonVal*math.cos(lat_radians)
#res = res*0.001
df.loc[index,"GPS_lon"] = lonVal
##----------------------------------------
##Part A Assembling a Data Set
##----------------------------------------
##----------------------------------------
##Input and Output: Data Frames
plants = read_csv('environmental_survey/plants2017.csv',
index_col=0)
plants.reset_index(level=0,inplace=True)
plants.drop(plants.index[plants.Plant == 'tree'], inplace=True)
#display(plants.head(n=50))
plants.reset_index(drop=True,inplace=True)
##----------------------------------------
##Functions
convert_GPS_lat_long( plants)
plants.rename(columns={'GPS_lon':'Meters_lon',
'GPS_lat':'Meters_lat'}, inplace=True)
##----------------------------------------
##Functions and Data Structures: Boolean Indexing
heiht_set_by_user = float(input("Set the height that you want: ") or "0.5")
plants = fully_grown_depuration(float(heiht_set_by_user))
#reseting the index after the depuration
plants.reset_index(drop=True,inplace=True)
display(plants)
| Assignment/Environmental_Project/part_A.py | 1,650 | Maria Eugenia Lopez res= div*0.001to convert to Klmres = res*0.001----------------------------------------Part A Assembling a Data Set--------------------------------------------------------------------------------Input and Output: Data Framesdisplay(plants.head(n=50))----------------------------------------Functions----------------------------------------Functions and Data Structures: Boolean Indexingreseting the index after the depuration | 445 | en | 0.285517 |
#
# PySNMP MIB module EdgeSwitch-IPV6-TUNNEL-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/EdgeSwitch-IPV6-TUNNEL-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 18:56:15 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
Integer, ObjectIdentifier, OctetString = mibBuilder.importSymbols("ASN1", "Integer", "ObjectIdentifier", "OctetString")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueSizeConstraint, ConstraintsUnion, ValueRangeConstraint, SingleValueConstraint, ConstraintsIntersection = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueSizeConstraint", "ConstraintsUnion", "ValueRangeConstraint", "SingleValueConstraint", "ConstraintsIntersection")
fastPath, = mibBuilder.importSymbols("EdgeSwitch-REF-MIB", "fastPath")
InetAddressPrefixLength, InetAddressIPv4 = mibBuilder.importSymbols("INET-ADDRESS-MIB", "InetAddressPrefixLength", "InetAddressIPv4")
Ipv6Address, Ipv6IfIndex, Ipv6AddressPrefix = mibBuilder.importSymbols("IPV6-TC", "Ipv6Address", "Ipv6IfIndex", "Ipv6AddressPrefix")
ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup")
Unsigned32, ModuleIdentity, Bits, Gauge32, Integer32, NotificationType, ObjectIdentity, MibIdentifier, MibScalar, MibTable, MibTableRow, MibTableColumn, IpAddress, iso, Counter64, Counter32, TimeTicks = mibBuilder.importSymbols("SNMPv2-SMI", "Unsigned32", "ModuleIdentity", "Bits", "Gauge32", "Integer32", "NotificationType", "ObjectIdentity", "MibIdentifier", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "IpAddress", "iso", "Counter64", "Counter32", "TimeTicks")
RowStatus, DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "RowStatus", "DisplayString", "TextualConvention")
fastPathIpv6Tunnel = ModuleIdentity((1, 3, 6, 1, 4, 1, 4413, 1, 1, 27))
fastPathIpv6Tunnel.setRevisions(('2011-01-26 00:00', '2007-05-23 00:00',))
if mibBuilder.loadTexts: fastPathIpv6Tunnel.setLastUpdated('201101260000Z')
if mibBuilder.loadTexts: fastPathIpv6Tunnel.setOrganization('Broadcom Inc')
agentTunnelIPV6Group = MibIdentifier((1, 3, 6, 1, 4, 1, 4413, 1, 1, 27, 1))
agentTunnelIPV6Table = MibTable((1, 3, 6, 1, 4, 1, 4413, 1, 1, 27, 1, 1), )
if mibBuilder.loadTexts: agentTunnelIPV6Table.setStatus('current')
agentTunnelIPV6Entry = MibTableRow((1, 3, 6, 1, 4, 1, 4413, 1, 1, 27, 1, 1, 1), ).setIndexNames((0, "EdgeSwitch-IPV6-TUNNEL-MIB", "agentTunnelID"))
if mibBuilder.loadTexts: agentTunnelIPV6Entry.setStatus('current')
agentTunnelID = MibTableColumn((1, 3, 6, 1, 4, 1, 4413, 1, 1, 27, 1, 1, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647)))
if mibBuilder.loadTexts: agentTunnelID.setStatus('current')
agentTunnelIfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 4413, 1, 1, 27, 1, 1, 1, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: agentTunnelIfIndex.setStatus('current')
agentTunnelMode = MibTableColumn((1, 3, 6, 1, 4, 1, 4413, 1, 1, 27, 1, 1, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("undefined", 1), ("ip6over4", 2), ("ip6to4", 3))).clone('undefined')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: agentTunnelMode.setStatus('current')
agentTunnelLocalIP4Addr = MibTableColumn((1, 3, 6, 1, 4, 1, 4413, 1, 1, 27, 1, 1, 1, 4), InetAddressIPv4()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: agentTunnelLocalIP4Addr.setStatus('current')
agentTunnelRemoteIP4Addr = MibTableColumn((1, 3, 6, 1, 4, 1, 4413, 1, 1, 27, 1, 1, 1, 5), InetAddressIPv4()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: agentTunnelRemoteIP4Addr.setStatus('current')
agentTunnelLocalIfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 4413, 1, 1, 27, 1, 1, 1, 6), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: agentTunnelLocalIfIndex.setStatus('current')
agentTunnelStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 4413, 1, 1, 27, 1, 1, 1, 7), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: agentTunnelStatus.setStatus('current')
agentTunnelIcmpUnreachableMode = MibTableColumn((1, 3, 6, 1, 4, 1, 4413, 1, 1, 27, 1, 1, 1, 8), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enable", 1), ("disable", 2)))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: agentTunnelIcmpUnreachableMode.setStatus('current')
agentTunnelIPV6PrefixTable = MibTable((1, 3, 6, 1, 4, 1, 4413, 1, 1, 27, 1, 2), )
if mibBuilder.loadTexts: agentTunnelIPV6PrefixTable.setStatus('current')
agentTunnelIPV6PrefixEntry = MibTableRow((1, 3, 6, 1, 4, 1, 4413, 1, 1, 27, 1, 2, 1), ).setIndexNames((0, "EdgeSwitch-IPV6-TUNNEL-MIB", "agentTunnelID"), (0, "EdgeSwitch-IPV6-TUNNEL-MIB", "agentTunnelIPV6PrefixPrefix"), (0, "EdgeSwitch-IPV6-TUNNEL-MIB", "agentTunnelIPV6PrefixPrefixLen"))
if mibBuilder.loadTexts: agentTunnelIPV6PrefixEntry.setStatus('current')
agentTunnelIPV6PrefixPrefix = MibTableColumn((1, 3, 6, 1, 4, 1, 4413, 1, 1, 27, 1, 2, 1, 1), Ipv6AddressPrefix())
if mibBuilder.loadTexts: agentTunnelIPV6PrefixPrefix.setStatus('current')
agentTunnelIPV6PrefixPrefixLen = MibTableColumn((1, 3, 6, 1, 4, 1, 4413, 1, 1, 27, 1, 2, 1, 2), InetAddressPrefixLength())
if mibBuilder.loadTexts: agentTunnelIPV6PrefixPrefixLen.setStatus('current')
agentTunnelIPV6PrefixStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 4413, 1, 1, 27, 1, 2, 1, 3), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: agentTunnelIPV6PrefixStatus.setStatus('current')
mibBuilder.exportSymbols("EdgeSwitch-IPV6-TUNNEL-MIB", agentTunnelIPV6PrefixStatus=agentTunnelIPV6PrefixStatus, agentTunnelIPV6Entry=agentTunnelIPV6Entry, agentTunnelIPV6Table=agentTunnelIPV6Table, agentTunnelIPV6PrefixEntry=agentTunnelIPV6PrefixEntry, agentTunnelLocalIP4Addr=agentTunnelLocalIP4Addr, fastPathIpv6Tunnel=fastPathIpv6Tunnel, agentTunnelID=agentTunnelID, agentTunnelIPV6PrefixPrefix=agentTunnelIPV6PrefixPrefix, agentTunnelIPV6PrefixPrefixLen=agentTunnelIPV6PrefixPrefixLen, agentTunnelIPV6PrefixTable=agentTunnelIPV6PrefixTable, agentTunnelStatus=agentTunnelStatus, agentTunnelIPV6Group=agentTunnelIPV6Group, agentTunnelRemoteIP4Addr=agentTunnelRemoteIP4Addr, agentTunnelLocalIfIndex=agentTunnelLocalIfIndex, agentTunnelMode=agentTunnelMode, PYSNMP_MODULE_ID=fastPathIpv6Tunnel, agentTunnelIcmpUnreachableMode=agentTunnelIcmpUnreachableMode, agentTunnelIfIndex=agentTunnelIfIndex)
| pysnmp/EdgeSwitch-IPV6-TUNNEL-MIB.py | 6,528 | PySNMP MIB module EdgeSwitch-IPV6-TUNNEL-MIB (http://snmplabs.com/pysmi) ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/EdgeSwitch-IPV6-TUNNEL-MIB Produced by pysmi-0.3.4 at Mon Apr 29 18:56:15 2019 On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4 Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15) | 346 | en | 0.360628 |
"""Test cases for the pypfilt.io module."""
import datetime
import numpy as np
import os
from pypfilt.io import read_table, date_column
def test_read_datetime():
# Test data: sequential dates with Fibonacci sequence.
content = """
date count
2020-01-01 1
2020-01-02 1
2020-01-03 2
2020-01-04 3
2020-01-05 5
2020-01-06 8
2020-01-07 13
2020-01-08 21
2020-01-09 34
"""
expect_rows = 9
expect_count = [1, 1]
for i in range(expect_rows - 2):
expect_count.append(expect_count[i] + expect_count[i + 1])
# Save this data to a temporary data file.
path = "test_read_datetime.ssv"
with open(path, encoding='utf-8', mode='w') as f:
f.write(content)
# Read the data and then remove the data file.
columns = [
date_column('date'),
('count', np.int_),
]
df = read_table(path, columns)
os.remove(path)
# Check that we received the expected number of rows.
assert len(df) == expect_rows
# Check that each row has the expected content.
for ix, row in enumerate(df):
assert isinstance(row['date'], datetime.datetime)
assert row['date'].year == 2020
assert row['date'].month == 1
assert row['date'].day == ix + 1
assert row['count'] == expect_count[ix]
| local_pypfilt/tests/test_io.py | 1,326 | Test cases for the pypfilt.io module.
Test data: sequential dates with Fibonacci sequence. Save this data to a temporary data file. Read the data and then remove the data file. Check that we received the expected number of rows. Check that each row has the expected content. | 276 | en | 0.885202 |
#coding:utf8
#authors : yqq
import logging
import json
from utils import decimal_default,get_linenumber
from base_handler import BaseHandler
from .proxy import AuthServiceProxy
from cashaddress import convert
import traceback
#设置精度
from decimal import Decimal
from decimal import getcontext
getcontext().prec = 8
from constants import BSV_RPC_URL as RPC_URL
STR_ADDRESS_TABLE = "t_btc_address"
class BTC_ListAccounts(BaseHandler):
@staticmethod
def addresses():
from sql import run
accounts = run("""select * from {};""".format(STR_ADDRESS_TABLE)) #TODO:后期数据量大的时候, 使用redis进行缓存地址
return [account['address'] for account in accounts]
def get(self):
try:
data = BTC_ListAccounts.addresses()
self.write(json.dumps(BaseHandler.success_ret_with_data(data), default=decimal_default))
except Exception as e:
self.write(json.dumps(BaseHandler.error_ret_with_data("error: %s"%e)))
logging.error("BTC_ListAccounts error:{0} in {1}".format(e,get_linenumber()))
g_exUserAddrs = BTC_ListAccounts.addresses() #使用全局变量保存交易所用户BTC地址 2019-06-01
class BTC_GetAccount(BaseHandler):
def get(self):
btc_rpc_connection = AuthServiceProxy(RPC_URL)
try:
commands = [["getaccount",self.get_argument("address")]]
data = btc_rpc_connection.batch_(commands)
self.write(json.dumps(BaseHandler.success_ret_with_data(data), default=decimal_default))
except Exception as e:
self.write(json.dumps(BaseHandler.error_ret_with_data("error: %s"%e)))
logging.error("BTC_GetAccount error:{0} in {1}".format(e,get_linenumber()))
class BTC_GetAccountAddress(BaseHandler):
def get(self):
btc_rpc_connection = AuthServiceProxy(RPC_URL)
try:
commands = [["getaccountaddress",self.get_argument("account")]]
data = btc_rpc_connection.batch_(commands)
self.write(json.dumps(BaseHandler.success_ret_with_data(data), default=decimal_default))
except Exception as e:
self.write(json.dumps(BaseHandler.error_ret_with_data("error: %s"%e)))
logging.error("BTC_GetAccoutAddress error:{0} in {1}".format(e,get_linenumber()))
class BTC_GetAccountBalance(BaseHandler):
def get(self):
btc_rpc_connection = AuthServiceProxy(RPC_URL)
try:
account = self.get_argument("account").decode("utf-8")
if account is None or len(account) == 0:
self.write(json.dumps(BaseHandler.error_ret()))
return
commands = [["getbalance", account]]
data = btc_rpc_connection.batch_(commands)
self.write(json.dumps(BaseHandler.success_ret_with_data(data), default=decimal_default))
except Exception as e:
self.write(json.dumps(BaseHandler.error_ret_with_data("error: %s"%e)))
logging.error("BTC_GetAccountBalance error:{0} in {1}".format(e,get_linenumber()))
class BTC_GetBalance(BaseHandler):
def get(self):
btc_rpc_connection = AuthServiceProxy(RPC_URL)
try:
addr = self.get_argument("address")
data = BTC_ListUTXO.utxo(btc_rpc_connection, addr)
if not data:
self.write(json.dumps(BaseHandler.error_ret_with_data("0")))
return
from utils import accumulate
self.write(json.dumps(BaseHandler.success_ret_with_data('%.8f' % accumulate(data)), default=decimal_default))
except Exception as e:
self.write(json.dumps(BaseHandler.error_ret_with_data("error: %s"%e)))
logging.error("BTC_GetBalance error:{0} in {1}".format(e,get_linenumber()))
class BTC_ListUTXO(BaseHandler):
@staticmethod
def utxo(rpcconn, addrs, minconf=1, maxconf=9999999, opt=None):
argAddrs = addrs if isinstance(addrs, list) else [addrs]
if opt == None:
commands = [["listunspent", minconf, maxconf, argAddrs, True]]
else:
commands = [["listunspent", minconf, maxconf, argAddrs, True, opt]]
utxos = rpcconn.batch_(commands)[0]
#要进行地址格式的转换
for i in range(len(utxos)):
cashAddr = utxos[i]['address']
legacyAddr = convert.to_legacy_address(cashAddr)
utxos[i]['address'] = legacyAddr
utxos[i]['cashaddress'] = cashAddr
return utxos
def post(self):
btc_rpc_connection = AuthServiceProxy(RPC_URL)
data = None
try:
minconf = int(self.get_argument("minconf")) if not self.get_argument("minconf") == "" else 1
maxconf = int(self.get_argument("maxconf")) if not self.get_argument("maxconf") == "" else 9999999
addr = self.get_argument("address")
data = BTC_ListUTXO.utxo(btc_rpc_connection,addr,minconf,maxconf)
self.write(json.dumps(BaseHandler.success_ret_with_data(data), default=decimal_default))
except Exception as e:
self.write(json.dumps(BaseHandler.error_ret_with_data("error: %s"%e)))
logging.error("BTC_GetUTXO error:{0} in {1}".format(e,get_linenumber()))
class BTC_EstimateSmartFee(BaseHandler):
@staticmethod
def process(rpcconn, nConfTarget=2, strEstimateMode='ECONOMICAL'):
# commands = [["estimatesmartfee", nConfTarget, strEstimateMode ]]
# commands = [["estimatefee", nConfTarget]] # bsv 需要根据前面的区块来计算, 和 bch, btc , ltc 不一样
# data = rpcconn.batch_(commands)
# nFeeRate = data[0] if len(data) > 0 else Decimal(0.00001)
# return nFeeRate * 100000000 / 1000 # satoshi/Byte 即 in satoshis per byte
# if len(data) > 0:
# return data[0]['feerate'] * 100000000 / 1000 # satoshi/Byte 即 in satoshis per byte
return 20
@staticmethod
def calcFee(rpcconn, nIn=1, nOut = 2):
from decimal import Decimal
from decimal import getcontext
getcontext().prec = 8
rate = BTC_EstimateSmartFee.process(rpcconn)
rate = "%.8f" % (rate / Decimal(100000000.0))
return Decimal(str((148 * nIn + 34 * nOut + 10))) * Decimal(rate)
def get(self):
try:
rpcconn = AuthServiceProxy(RPC_URL)
data = BTC_EstimateSmartFee.calcFee(rpcconn)
data = '%.8f' % data
self.write(json.dumps(BaseHandler.success_ret_with_data(data), default=decimal_default))
except Exception as e:
self.write(json.dumps(BaseHandler.error_ret_with_data("error: %s" % e)))
logging.error("BTC_EstimateSmartFee error:{0} in {1}".format(e, get_linenumber()))
pass
class BTC_CreateRawTransaction(BaseHandler):
@staticmethod
def process(rpcconn,from_addr,to_addr,amount):
#utxos
utxos = BTC_ListUTXO.utxo(rpcconn, from_addr)
#print(utxos)
def UtxoFilter(utxos, amount):
selected = []
from decimal import Decimal
nSum = Decimal('0')
#最小输入utxo金额 : 148 * rate 其中rate是 1000字节 所需的btc数量
nFee = Decimal('0.0')
for utxo in [item for item in utxos if int(item["confirmations"]) >= 1 and float(item["amount"]) > 0.0003 ]:
selected.append(utxo)
nSum += Decimal(str((utxo["amount"])))
if nSum > Decimal(str(amount)):
nFee = BTC_EstimateSmartFee.calcFee(rpcconn, len(selected), 2)
if nSum > nFee + amount:
break
return selected, nSum, nFee
selected, nSum , fee = UtxoFilter(utxos, amount)
# check if enough
# from utils import calcFee
if not isinstance(amount, Decimal):
amount = Decimal(str(amount))
# fee = BTC_EstimateSmartFee.calcFee(rpcconn, len(selected), 2)
if nSum < fee + amount:
return False,"budget not enough"
#return False,0 #需测试!!!
from utils import filtered
param_in = [filtered(item,["txid","vout"]) for item in selected]
param_out = {to_addr:amount, from_addr: nSum - amount - fee}
#print("--------------param_out-------------")
#print("fee" + str(fee))
#print(param_in)
#print(param_out)
#print("--------------param_out-------------")
# create raw transaction
commands = [["createrawtransaction",param_in,param_out]]
return True, {"hex":rpcconn.batch_(commands), "utxos":selected, "txout":param_out}
def post(self):
btc_rpc_connection = AuthServiceProxy(RPC_URL)
try:
from_addr = self.get_argument("from")
to_addr = self.get_argument("to")
#amount = float(self.get_argument("amount"))
from decimal import Decimal
amount = Decimal(str(self.get_argument("amount")))
ret, rsp = BTC_CreateRawTransaction.process(btc_rpc_connection,from_addr,to_addr,amount)
if not ret:
self.write(json.dumps(BaseHandler.error_ret_with_data(rsp)))
return
self.write(json.dumps(BaseHandler.success_ret_with_data(rsp), default=decimal_default))
except Exception as e:
self.write(json.dumps(BaseHandler.error_ret_with_data("error: %s"%e)))
logging.error("BTC_CreatRawTransaction error:{0} in {1}".format(e,get_linenumber()))
class BTC_SendRawTransaction(BaseHandler):
def post(self):
btc_rpc_connection = AuthServiceProxy(RPC_URL)
try:
rawdata = self.get_argument("rawdata")
if not rawdata: return
commands = [["sendrawtransaction",rawdata]]
data = btc_rpc_connection.batch_(commands)
self.write(json.dumps(BaseHandler.success_ret_with_data(data), default=decimal_default))
except Exception as e:
self.write(json.dumps(BaseHandler.error_ret_with_data("error: %s"%e)))
logging.error("BTC_SendRawTransaction error:{0} in {1}".format(e,get_linenumber()))
class BTC_CreateRawTransactionEx(BaseHandler):
@staticmethod
def genearateInParam(rpcconn, src, dest):
utxos,gross,amount = [],Decimal('0'),sum(dest.values())
redundant = 0
for addr in src:
# utxos
all = BTC_ListUTXO.utxo(rpcconn,addr)
# recommend
from utils import recommended
selected,aggregate = recommended(all,amount)
# process
utxos += selected
gross += aggregate
# check if enough
redundant = gross - BTC_EstimateSmartFee.calcFee(rpcconn, len(utxos), len(dest.keys())+1) - amount
if redundant > 0:
return True,utxos,redundant
return False,utxos,redundant
@staticmethod
def generateOutParam(dest):
param_out = {}
for key,value in dest.items():
param_out[key] = Decimal(value) if isinstance(value, str) else Decimal(str(value))
return param_out
@staticmethod
def process(rpcconn, src, dest ):
# preprocess
param_out = BTC_CreateRawTransactionEx.generateOutParam(dest)
ret,utxos,redundant = BTC_CreateRawTransactionEx.genearateInParam(rpcconn,src,param_out)
if not ret: return False, "budget not enough"
# param_out refinement
param_out[src[0]] = redundant if src[0] not in param_out.keys() else param_out[src[0]] + redundant
#print(param_out)
# param_in refinement
from utils import filtered
param_in = [filtered(item,["txid","vout"]) for item in utxos]
#print(param_in)
return True, {"hex":rpcconn.batch_([["createrawtransaction",param_in,param_out]]),"utxos":utxos, "txout":param_out}
def get_argument_ex(self, str):
from utils import json2dict
str2dict = json2dict(self.request.body)
return str2dict[str] if str in str2dict.keys() else False
def post(self):
btc_rpc_connection = AuthServiceProxy(RPC_URL)
try:
src = self.get_argument_ex("src")
dest = self.get_argument_ex("dest")
if not isinstance(src, list):
self.write(json.dumps(BaseHandler.error_ret_with_data("error: %s" % ("src must be json list"))))
return
if not isinstance(dest, dict):
self.write(json.dumps(BaseHandler.error_ret_with_data("error: %s" % ("dest must be json object"))))
return
ret, rsp = BTC_CreateRawTransactionEx.process(btc_rpc_connection, src, dest)
if not ret:
self.write(json.dumps(BaseHandler.error_ret_with_data(rsp)))
return
self.write(json.dumps(BaseHandler.success_ret_with_data(rsp), default=decimal_default))
except Exception as e:
self.write(json.dumps(BaseHandler.error_ret_with_data("error: %s"%e)))
logging.error("BTC_CreateRawTransactionEx error:{0} in {1}".format(e,get_linenumber()))
class BTC_CreateRawTransactionEx_Collection(BaseHandler):
@staticmethod
def makeParams( rpcconn, lstSrc, lstDest):
if len(lstSrc) == 1 and lstSrc[0].strip() == "*":
lstSrcAddrs = g_exUserAddrs
else:
lstSrcAddrs = lstSrc
utxos, nSum = [], Decimal('0')
txAmount, fTxFee = 0, 0
#for addr in lstSrc:
if isinstance(lstSrc, list):
# bitcoin-cli -conf=/root/.bitcoin/bitcoin-test.conf listunspent 0 9999999 '[]' true '{ "minimumAmount": 0.005 }'
# commands = [["listunspent", 0, 99999999, [], True, {'minimumAmount':0.0003}]]
# lstUtxos = rpcconn.batch_(commands)[0]
# BSV 不支持 option操作
# opt = {'minimumAmount':0.0003}
lstUtxos = BTC_ListUTXO.utxo(rpcconn, [ ], 1, 9999999)
# print(len(lstUtxos))
for utxo in lstUtxos:
if Decimal(utxo['amount']) < 0.0003: continue
if utxo['address'].strip() in lstSrcAddrs:
utxos.append(utxo)
nSum += Decimal(str((utxo["amount"])))
fTxFee = BTC_EstimateSmartFee.calcFee(rpcconn, len(utxos), len(lstDest))
txAmount = nSum - fTxFee #实际转账金额
if txAmount <= 0.0003: #实际转账金额太小
return False, None, 0, 0
return True, utxos, txAmount , fTxFee
@staticmethod
def process(rpcconn, lstSrc, lstDest):
#lstSrcAddrs = []
bRet, utxos, txAmount, fTxFee = BTC_CreateRawTransactionEx_Collection.makeParams(rpcconn, lstSrc, lstDest)
if not bRet:
return False, "collection amount is too small!"
strDst = lstDest[0]
vout = {strDst : txAmount}
from utils import filtered
vin = [filtered(item,["txid","vout"]) for item in utxos]
strHex = rpcconn.batch_([["createrawtransaction", vin, vout]])
return True, {"hex": strHex, "utxos":utxos, "txout":vout, "txFee":fTxFee}
def get_argument_ex(self, str):
from utils import json2dict
str2dict = json2dict(self.request.body)
return str2dict[str] if str in str2dict.keys() else False
def post(self):
btc_rpc_connection = AuthServiceProxy(RPC_URL)
try:
src = self.get_argument_ex("src")
dest = self.get_argument_ex("dest")
if not isinstance(src, list):
self.write(json.dumps(BaseHandler.error_ret_with_data("error: %s" % ("src must be json list"))))
return
if not isinstance(dest, list):
self.write(json.dumps(BaseHandler.error_ret_with_data("error: %s" % ("dest must be json list"))))
return
ret, rsp = BTC_CreateRawTransactionEx_Collection.process(btc_rpc_connection, src, dest)
if not ret:
self.write(json.dumps(BaseHandler.error_ret_with_data(rsp)))
return
self.write(json.dumps(BaseHandler.success_ret_with_data(rsp), default=decimal_default))
except Exception as e:
# traceback.print_exc()
self.write(json.dumps(BaseHandler.error_ret_with_data("error: %s"%e)))
logging.error("BTC_CreateRawTransactionEx error:{0} in {1}".format(e,get_linenumber()))
#查询需要归集的地址余额
class BTC_CollectionQuery(BaseHandler):
def get(self):
rpcconn = AuthServiceProxy(RPC_URL)
try:
# commands = [["listunspent", 0, 99999999, [], True, {'minimumAmount':0.0003}]]
# lstUtxos = rpcconn.batch_(commands)[0]
# opt = {'minimumAmount': 0.0003}
lstUtxos = BTC_ListUTXO.utxo(rpcconn, [], 1, 9999999)
mapRet = {}
for utxo in lstUtxos:
strAddr = utxo['address'].strip()
if Decimal(utxo['amount']) < 0.0003: continue
if strAddr not in g_exUserAddrs : continue
if strAddr not in mapRet:
mapRet[strAddr] = Decimal("0.0")
nAmount = utxo['amount']
mapRet[strAddr] = str( nAmount + Decimal( mapRet[strAddr]) )
self.write(json.dumps(BaseHandler.success_ret_with_data(mapRet), default=decimal_default))
except Exception as e:
self.write(json.dumps(BaseHandler.error_ret_with_data("error: %s"%e)))
logging.error("BTC_CollectionQuery error:{0} in {1}".format(e, get_linenumber()))
class BTC_ListTransactions(BaseHandler):
@staticmethod
def blktimes(rpc_connection,account="*",tx_counts=10):
commands = [["listtransactions",account,tx_counts]]
data = rpc_connection.batch_(commands)
if len(data) == 0: return []
#fix bug:only return those txs which be writen into blockchain @yqq 2019-03-21
return [item['blocktime'] for item in data[0] if "blocktime" in item][::-1]
#add 'include_watchonly' to include those address's transactions
# which not import private key into the wallet. #yqq 2019-03-26
@staticmethod
def process(rpc_connection,account="*",tx_counts=10,skips=0,include_watchonly=True):
commands = [["listtransactions",account,tx_counts,skips, include_watchonly]]
data = rpc_connection.batch_(commands)
if len(data) == 0: return []
#fix bug:only return those txs which be writen into blockchain @yqq 2019-03-21
txs = [item for item in data[0] if "blocktime" in item and item["category"] == "receive"]
from utils import filtered
return [filtered(item,["address","category","amount","confirmations","txid","blocktime"]) for item in txs][::-1]
def get(self):
btc_rpc_connection = AuthServiceProxy(RPC_URL)
try:
account = self.get_argument("account") if self.get_argument("account") else "*"
tx_counts = int(self.get_argument("count")) if self.get_argument("count") else 10
skips = int(self.get_argument("skips")) if self.get_argument("skips") else 0
data = BTC_ListTransactions.process(btc_rpc_connection,account,tx_counts,skips)
self.write(json.dumps(BaseHandler.success_ret_with_data(data), default=decimal_default))
except Exception as e:
self.write(json.dumps(BaseHandler.error_ret_with_data("error: %s"%e)))
logging.error("BTC_ListTransActions error:{0} in {1}".format(e,get_linenumber()))
class BTC_CrawlTxData(BaseHandler):
@staticmethod
def process(rpc_connection, nblktime):
if len(g_exUserAddrs) == 0:
return []
txs = BTC_ListTransactions.process(rpc_connection, '*', 100000000)
retTxs = []
for tx in txs:
strLegacyAddr = convert.to_legacy_address(tx["address"].strip())
tx["address"] = strLegacyAddr.strip()
# print(tx)
if int(str(tx['blocktime'])) >= nblktime and tx["address"].strip() in g_exUserAddrs:
retTxs.append(tx)
return retTxs
def post(self):
rpc_connection = AuthServiceProxy(RPC_URL)
try:
lastscannedblktime = int(str(self.get_argument("blocktime")))
data = BTC_CrawlTxData.process(rpc_connection,lastscannedblktime)
for i in range(len(data)):
data[i]["amount"] = str(data[i]["amount"]) #convert to str to avoid bug
self.write(json.dumps(BaseHandler.success_ret_with_data(data), default=decimal_default))
except Exception as e:
self.write(json.dumps(BaseHandler.error_ret_with_data("error: %s"%e)))
logging.error("BTC_CrawlTxData error:{0} in {1}".format(e,get_linenumber()))
class BTC_GetBlockCount(BaseHandler):
@staticmethod
def process(rpcconn):
commands = [["getblockcount"]]
return int(rpcconn.batch_(commands))
def get(self):
btc_rpc_connection = AuthServiceProxy(RPC_URL)
try:
blknumber = BTC_GetBlockCount.process(btc_rpc_connection)
self.write(json.dumps(BaseHandler.success_ret_with_data(blknumber), default=decimal_default))
except Exception as e:
self.write(json.dumps(BaseHandler.error_ret_with_data("error: %s"%e)))
logging.error("BTC_GetBlockCount error:{0} in {1}".format(e,get_linenumber()))
class BTC_GetBlockHash(BaseHandler):
@staticmethod
def process(rpcconn,blknumber):
commands = [["getblockhash",blknumber]]
return rpcconn.batch_(commands)
def get(self):
btc_rpc_connection = AuthServiceProxy(RPC_URL)
try:
blknumber = self.get_argument("blknumber") if self.get_argument("blknumber") else BTC_GetBlockCount.process(btc_rpc_connection)
data = BTC_GetBlockHash.process(btc_rpc_connection,blknumber)
self.write(json.dumps(BaseHandler.success_ret_with_data(data), default=decimal_default))
except Exception as e:
self.write(json.dumps(BaseHandler.error_ret_with_data("error: %s"%e)))
logging.error("BTC_GetBlockHash error:{0} in {1}".format(e,get_linenumber()))
class BTC_DecodeRawTransaction(BaseHandler):
def post(self):
btc_rpc_connection = AuthServiceProxy(RPC_URL)
try:
commands = [["decoderawtransaction",self.get_argument("rawdata")]]
data = btc_rpc_connection.batch_(commands)
self.write(json.dumps(BaseHandler.success_ret_with_data(data), default=decimal_default))
except Exception as e:
self.write(json.dumps(BaseHandler.error_ret_with_data("error: %s"%e)))
logging.error("BTC_GetTransaction error:{0} in {1}".format(e,get_linenumber()))
class BTC_GetRawTransaction(BaseHandler):
def get(self):
btc_rpc_connection = AuthServiceProxy(RPC_URL)
try:
commands = [["getrawtransaction",self.get_argument("txid"),True]]
data = btc_rpc_connection.batch_(commands)
self.write(json.dumps(BaseHandler.success_ret_with_data(data), default=decimal_default))
except Exception as e:
self.write(json.dumps(BaseHandler.error_ret_with_data("error: %s"%e)))
logging.error("BTC_GetTransaction error:{0} in {1}".format(e,get_linenumber()))
class BTC_GetBlock(BaseHandler):
def get(self):
btc_rpc_connection = AuthServiceProxy(RPC_URL)
try:
blkhash = self.get_argument("blkhash") if self.get_argument("blkhash") else BTC_GetBlockCount.process(btc_rpc_connection)
commands = [["getblock"]]
data = btc_rpc_connection.batch_(commands)
self.write(json.dumps(BaseHandler.success_ret_with_data(data), default=decimal_default))
except Exception as e:
self.write(json.dumps(BaseHandler.error_ret_with_data("error: %s"%e)))
logging.error("BTC_GetBlockHash error:{0} in {1}".format(e,get_linenumber()))
| Python3/Tornado/apps/ExchangeWalletApi/ExWallet/bsv/handler.py | 24,277 | coding:utf8authors : yqq设置精度 TODO:后期数据量大的时候, 使用redis进行缓存地址使用全局变量保存交易所用户BTC地址 2019-06-01要进行地址格式的转换 commands = [["estimatesmartfee", nConfTarget, strEstimateMode ]] commands = [["estimatefee", nConfTarget]] bsv 需要根据前面的区块来计算, 和 bch, btc , ltc 不一样 data = rpcconn.batch_(commands) nFeeRate = data[0] if len(data) > 0 else Decimal(0.00001) return nFeeRate * 100000000 / 1000 satoshi/Byte 即 in satoshis per byte if len(data) > 0: return data[0]['feerate'] * 100000000 / 1000 satoshi/Byte 即 in satoshis per byteutxosprint(utxos)最小输入utxo金额 : 148 * rate 其中rate是 1000字节 所需的btc数量 check if enough from utils import calcFee fee = BTC_EstimateSmartFee.calcFee(rpcconn, len(selected), 2)return False,0 需测试!!!print("--------------param_out-------------")print("fee" + str(fee))print(param_in)print(param_out)print("--------------param_out-------------") create raw transactionamount = float(self.get_argument("amount")) utxos recommend process check if enough preprocess param_out refinementprint(param_out) param_in refinementprint(param_in)for addr in lstSrc: bitcoin-cli -conf=/root/.bitcoin/bitcoin-test.conf listunspent 0 9999999 '[]' true '{ "minimumAmount": 0.005 }' commands = [["listunspent", 0, 99999999, [], True, {'minimumAmount':0.0003}]] lstUtxos = rpcconn.batch_(commands)[0] BSV 不支持 option操作 opt = {'minimumAmount':0.0003} print(len(lstUtxos))实际转账金额实际转账金额太小 lstSrcAddrs = [] traceback.print_exc()查询需要归集的地址余额 commands = [["listunspent", 0, 99999999, [], True, {'minimumAmount':0.0003}]] lstUtxos = rpcconn.batch_(commands)[0] opt = {'minimumAmount': 0.0003}fix bug:only return those txs which be writen into blockchain @yqq 2019-03-21 add 'include_watchonly' to include those address's transactions which not import private key into the wallet. yqq 2019-03-26fix bug:only return those txs which be writen into blockchain @yqq 2019-03-21 print(tx)convert to str to avoid bug | 1,905 | en | 0.32 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.