hexsha
stringlengths 40
40
| size
int64 4
996k
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
245
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
245
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
996k
| avg_line_length
float64 1.33
58.2k
| max_line_length
int64 2
323k
| alphanum_fraction
float64 0
0.97
| content_no_comment
stringlengths 0
946k
| is_comment_constant_removed
bool 2
classes | is_sharp_comment_removed
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
790480b08ad84d4d056648aa9b8a63969feccabf
| 1,210
|
py
|
Python
|
airflow/contrib/operators/emr_add_steps_operator.py
|
dorranh/airflow
|
1a9a2cadcf8606cfcb729d1323dd33dfacc64633
|
[
"Apache-2.0"
] | 5
|
2020-07-17T07:33:58.000Z
|
2022-03-02T06:23:47.000Z
|
airflow/contrib/operators/emr_add_steps_operator.py
|
dorranh/airflow
|
1a9a2cadcf8606cfcb729d1323dd33dfacc64633
|
[
"Apache-2.0"
] | 7
|
2020-06-03T14:55:17.000Z
|
2021-12-30T00:01:50.000Z
|
airflow/contrib/operators/emr_add_steps_operator.py
|
dorranh/airflow
|
1a9a2cadcf8606cfcb729d1323dd33dfacc64633
|
[
"Apache-2.0"
] | 12
|
2020-01-09T14:02:39.000Z
|
2022-01-24T07:18:51.000Z
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module is deprecated. Please use `airflow.providers.amazon.aws.operators.emr_add_steps`."""
import warnings
# pylint: disable=unused-import
from airflow.providers.amazon.aws.operators.emr_add_steps import EmrAddStepsOperator # noqa
warnings.warn(
"This module is deprecated. Please use `airflow.providers.amazon.aws.operators.emr_add_steps`.",
DeprecationWarning, stacklevel=2
)
| 40.333333
| 100
| 0.771074
|
import warnings
from airflow.providers.amazon.aws.operators.emr_add_steps import EmrAddStepsOperator
warnings.warn(
"This module is deprecated. Please use `airflow.providers.amazon.aws.operators.emr_add_steps`.",
DeprecationWarning, stacklevel=2
)
| true
| true
|
790481128cb623ac28c14ca0b8a50b9b7f8bc3c8
| 17,980
|
py
|
Python
|
test/test_formats_geotiff.py
|
Scartography/mapchete
|
f7d1a74acb4021adfd3053501416d2b974c40af9
|
[
"MIT"
] | null | null | null |
test/test_formats_geotiff.py
|
Scartography/mapchete
|
f7d1a74acb4021adfd3053501416d2b974c40af9
|
[
"MIT"
] | null | null | null |
test/test_formats_geotiff.py
|
Scartography/mapchete
|
f7d1a74acb4021adfd3053501416d2b974c40af9
|
[
"MIT"
] | null | null | null |
"""Test GeoTIFF as process output."""
import numpy as np
import numpy.ma as ma
import os
import pytest
import rasterio
from rasterio.io import MemoryFile
from rio_cogeo.cogeo import cog_validate
import shutil
from tilematrix import Bounds
import warnings
import mapchete
from mapchete.errors import MapcheteConfigError
from mapchete.io import path_exists
from mapchete.formats.default import gtiff
from mapchete.tile import BufferedTilePyramid
def test_output_data(mp_tmpdir):
"""Check GeoTIFF as output data."""
output_params = dict(
grid="geodetic",
format="GeoTIFF",
path=mp_tmpdir,
pixelbuffer=0,
metatiling=1,
bands=1,
dtype="int16",
delimiters=dict(
bounds=Bounds(-180.0, -90.0, 180.0, 90.0),
effective_bounds=Bounds(-180.439453125, -90.0, 180.439453125, 90.0),
zoom=[5],
process_bounds=Bounds(-180.0, -90.0, 180.0, 90.0),
),
)
output = gtiff.OutputDataWriter(output_params)
assert output.path == mp_tmpdir
assert output.file_extension == ".tif"
tp = BufferedTilePyramid("geodetic")
tile = tp.tile(5, 5, 5)
# get_path
assert output.get_path(tile) == os.path.join(*[mp_tmpdir, "5", "5", "5" + ".tif"])
# prepare_path
try:
temp_dir = os.path.join(*[mp_tmpdir, "5", "5"])
output.prepare_path(tile)
assert os.path.isdir(temp_dir)
finally:
shutil.rmtree(temp_dir, ignore_errors=True)
# profile
assert isinstance(output.profile(tile), dict)
# write
try:
data = np.ones((1,) + tile.shape) * 128
output.write(tile, data)
# tiles_exist
assert output.tiles_exist(tile)
# read
data = output.read(tile)
assert isinstance(data, np.ndarray)
assert not data[0].mask.any()
finally:
shutil.rmtree(mp_tmpdir, ignore_errors=True)
# read empty
try:
data = output.read(tile)
assert isinstance(data, np.ndarray)
assert data[0].mask.all()
finally:
shutil.rmtree(mp_tmpdir, ignore_errors=True)
# empty
try:
empty = output.empty(tile)
assert isinstance(empty, ma.MaskedArray)
assert not empty.any()
finally:
shutil.rmtree(mp_tmpdir, ignore_errors=True)
# deflate with predictor
try:
# with pytest.deprecated_call():
output_params.update(compress="deflate", predictor=2)
output = gtiff.OutputDataWriter(output_params)
assert output.profile(tile)["compress"] == "deflate"
assert output.profile(tile)["predictor"] == 2
finally:
shutil.rmtree(mp_tmpdir, ignore_errors=True)
# using deprecated "compression" property
try:
with pytest.deprecated_call():
output_params.update(compression="deflate", predictor=2)
output = gtiff.OutputDataWriter(output_params)
assert output.profile(tile)["compress"] == "deflate"
assert output.profile(tile)["predictor"] == 2
finally:
shutil.rmtree(mp_tmpdir, ignore_errors=True)
def test_for_web(client, mp_tmpdir):
"""Send GTiff via flask."""
tile_base_url = "/wmts_simple/1.0.0/cleantopo_br/default/WGS84/"
for url in ["/"]:
response = client.get(url)
assert response.status_code == 200
for url in [
tile_base_url + "5/30/62.tif",
tile_base_url + "5/30/63.tif",
tile_base_url + "5/31/62.tif",
tile_base_url + "5/31/63.tif",
]:
response = client.get(url)
assert response.status_code == 200
img = response.data
with warnings.catch_warnings():
warnings.simplefilter("ignore")
with MemoryFile(img) as memfile:
with memfile.open() as dataset:
assert dataset.read().any()
def test_input_data(mp_tmpdir, cleantopo_br):
"""Check GeoTIFF proces output as input data."""
with mapchete.open(cleantopo_br.path) as mp:
tp = BufferedTilePyramid("geodetic")
# TODO tile with existing but empty data
tile = tp.tile(5, 5, 5)
output_params = dict(
grid="geodetic",
format="GeoTIFF",
path=mp_tmpdir,
pixelbuffer=0,
metatiling=1,
bands=2,
dtype="int16",
delimiters=dict(
bounds=Bounds(-180.0, -90.0, 180.0, 90.0),
effective_bounds=Bounds(-180.439453125, -90.0, 180.439453125, 90.0),
zoom=[5],
process_bounds=Bounds(-180.0, -90.0, 180.0, 90.0),
),
)
output = gtiff.OutputDataWriter(output_params)
with output.open(tile, mp) as input_tile:
for data in [
input_tile.read(),
input_tile.read(1),
input_tile.read([1]),
# TODO assert valid indexes are passed input_tile.read([1, 2])
]:
assert isinstance(data, ma.masked_array)
assert input_tile.is_empty()
# open without resampling
with output.open(tile, mp) as input_tile:
pass
def test_write_geotiff_tags(mp_tmpdir, cleantopo_br, write_rasterfile_tags_py):
"""Pass on metadata tags from user process to rasterio."""
conf = dict(**cleantopo_br.dict)
conf.update(process=write_rasterfile_tags_py)
with mapchete.open(conf) as mp:
for tile in mp.get_process_tiles():
data, tags = mp.execute(tile)
assert data.any()
assert isinstance(tags, dict)
mp.write(process_tile=tile, data=(data, tags))
# read data
out_path = mp.config.output.get_path(tile)
with rasterio.open(out_path) as src:
assert "filewide_tag" in src.tags()
assert src.tags()["filewide_tag"] == "value"
assert "band_tag" in src.tags(1)
assert src.tags(1)["band_tag"] == "True"
@pytest.mark.remote
def test_s3_write_output_data(gtiff_s3, s3_example_tile, mp_s3_tmpdir):
"""Write and read output."""
with mapchete.open(gtiff_s3.dict) as mp:
process_tile = mp.config.process_pyramid.tile(*s3_example_tile)
# basic functions
assert mp.config.output.profile()
assert mp.config.output.empty(process_tile).mask.all()
assert mp.config.output.get_path(process_tile)
# check if tile exists
assert not mp.config.output.tiles_exist(process_tile)
# write
mp.batch_process(tile=process_tile.id)
# check if tile exists
assert mp.config.output.tiles_exist(process_tile)
# read again, this time with data
data = mp.config.output.read(process_tile)
assert isinstance(data, np.ndarray)
assert not data[0].mask.all()
def test_output_single_gtiff(output_single_gtiff):
tile_id = (5, 3, 7)
with mapchete.open(output_single_gtiff.path) as mp:
process_tile = mp.config.process_pyramid.tile(*tile_id)
# basic functions
assert mp.config.output.profile()
assert mp.config.output.empty(process_tile).mask.all()
assert mp.config.output.get_path(process_tile)
# check if tile exists
assert not mp.config.output.tiles_exist(process_tile)
# write
mp.batch_process(multi=2)
# check if tile exists
assert mp.config.output.tiles_exist(process_tile)
# read again, this time with data
data = mp.config.output.read(process_tile)
assert isinstance(data, np.ndarray)
assert not data[0].mask.all()
# write empty array
data = ma.masked_array(
data=np.ones(process_tile.shape),
mask=np.ones(process_tile.shape),
)
mp.config.output.write(process_tile, data)
assert os.path.isfile(mp.config.output.path)
# error on existing file
with pytest.raises(MapcheteConfigError):
mapchete.open(output_single_gtiff.path)
# overwrite existing file
with mapchete.open(output_single_gtiff.path, mode="overwrite") as mp:
process_tile = mp.config.process_pyramid.tile(*tile_id)
assert not mp.config.output.tiles_exist(process_tile)
# write
mp.batch_process(tile=process_tile.id)
# check if tile exists
assert mp.config.output.tiles_exist(process_tile)
assert mp.config.output.tiles_exist(
output_tile=mp.config.output_pyramid.intersecting(process_tile)[0]
)
# read again, this time with data
data = mp.config.output.read(process_tile)
assert isinstance(data, np.ndarray)
assert not data[0].mask.all()
def test_output_single_gtiff_errors(output_single_gtiff):
# single gtiff does not work on multiple zoom levels
with pytest.raises(ValueError):
mapchete.open(dict(output_single_gtiff.dict, zoom_levels=[5, 6]))
# provide either process_tile or output_tile
with mapchete.open(output_single_gtiff.path) as mp:
tile = mp.config.process_pyramid.tile(5, 3, 7)
with pytest.raises(ValueError):
mp.config.output.tiles_exist(process_tile=tile, output_tile=tile)
def test_output_single_gtiff_pixelbuffer(output_single_gtiff):
tile_id = (5, 3, 7)
with mapchete.open(
dict(
output_single_gtiff.dict,
output=dict(output_single_gtiff.dict["output"], pixelbuffer=5),
),
) as mp:
process_tile = mp.config.process_pyramid.tile(*tile_id)
# basic functions
assert mp.config.output.profile()
assert mp.config.output.empty(process_tile).mask.all()
assert mp.config.output.get_path(process_tile)
# check if tile exists
assert not mp.config.output.tiles_exist(process_tile)
# write
mp.batch_process(tile=process_tile.id)
# check if tile exists
assert mp.config.output.tiles_exist(process_tile)
# read again, this time with data
data = mp.config.output.read(process_tile)
assert isinstance(data, np.ndarray)
assert not data[0].mask.all()
def test_output_single_gtiff_compression(output_single_gtiff):
tile_id = (5, 3, 7)
with mapchete.open(
dict(
output_single_gtiff.dict,
output=dict(output_single_gtiff.dict["output"], compress="deflate"),
),
) as mp:
process_tile = mp.config.process_pyramid.tile(*tile_id)
assert "compress" in mp.config.output.profile()
assert mp.config.output.profile()["compress"] == "deflate"
mp.batch_process(tile=process_tile.id)
with rasterio.open(mp.config.output.path) as src:
assert src.profile["compress"] == "deflate"
def test_output_single_gtiff_overviews(output_single_gtiff):
# overwrite existing file
with mapchete.open(
dict(
output_single_gtiff.dict,
output=dict(
output_single_gtiff.dict["output"],
overviews=True,
overviews_resampling="bilinear",
),
),
) as mp:
tile_id = (5, 3, 7)
process_tile = mp.config.process_pyramid.tile(*tile_id)
mp.batch_process(tile=process_tile.id)
with rasterio.open(mp.config.output.path) as src:
assert src.overviews(1)
assert src.tags().get("OVR_RESAMPLING_ALG").lower() == "bilinear"
for o in [1, 2, 4, 8]:
a = src.read(
masked=True, out_shape=(1, int(src.height / o), int(src.width / o))
)
assert not a.mask.all()
@pytest.mark.remote
def test_output_single_gtiff_s3(output_single_gtiff, mp_s3_tmpdir):
tile_id = (5, 3, 7)
with mapchete.open(
dict(
output_single_gtiff.dict,
output=dict(
output_single_gtiff.dict["output"],
path=os.path.join(mp_s3_tmpdir, "temp.tif"),
),
)
) as mp:
process_tile = mp.config.process_pyramid.tile(*tile_id)
# basic functions
assert mp.config.output.profile()
assert mp.config.output.empty(process_tile).mask.all()
assert mp.config.output.get_path(process_tile)
# check if tile exists
assert not mp.config.output.tiles_exist(process_tile)
# write
mp.batch_process(multi=2)
# check if tile exists
assert mp.config.output.tiles_exist(process_tile)
# read again, this time with data
data = mp.config.output.read(process_tile)
assert isinstance(data, np.ndarray)
assert not data[0].mask.all()
# write empty array
data = ma.masked_array(
data=np.ones(process_tile.shape),
mask=np.ones(process_tile.shape),
)
mp.config.output.write(process_tile, data)
assert path_exists(mp.config.output.path)
@pytest.mark.remote
def test_output_single_gtiff_s3_tempfile(output_single_gtiff, mp_s3_tmpdir):
tile_id = (5, 3, 7)
with mapchete.open(
dict(
output_single_gtiff.dict,
output=dict(
output_single_gtiff.dict["output"],
path=os.path.join(mp_s3_tmpdir, "temp.tif"),
in_memory=False,
),
)
) as mp:
process_tile = mp.config.process_pyramid.tile(*tile_id)
# basic functions
assert mp.config.output.profile()
assert mp.config.output.empty(process_tile).mask.all()
assert mp.config.output.get_path(process_tile)
# check if tile exists
assert not mp.config.output.tiles_exist(process_tile)
# write
mp.batch_process(multi=2)
# check if tile exists
assert mp.config.output.tiles_exist(process_tile)
# read again, this time with data
data = mp.config.output.read(process_tile)
assert isinstance(data, np.ndarray)
assert not data[0].mask.all()
# write empty array
data = ma.masked_array(
data=np.ones(process_tile.shape),
mask=np.ones(process_tile.shape),
)
mp.config.output.write(process_tile, data)
assert path_exists(mp.config.output.path)
def test_output_single_gtiff_cog(output_single_gtiff_cog):
tile_id = (5, 3, 7)
with mapchete.open(output_single_gtiff_cog.dict) as mp:
process_tile = mp.config.process_pyramid.tile(*tile_id)
# basic functions
assert mp.config.output.profile()
assert mp.config.output.empty(process_tile).mask.all()
assert mp.config.output.get_path(process_tile)
# check if tile exists
assert not mp.config.output.tiles_exist(process_tile)
# write
mp.batch_process(multi=2)
# check if tile exists
assert mp.config.output.tiles_exist(process_tile)
# read again, this time with data
data = mp.config.output.read(process_tile)
assert isinstance(data, np.ndarray)
assert not data[0].mask.all()
# write empty array
data = ma.masked_array(
data=np.ones(process_tile.shape),
mask=np.ones(process_tile.shape),
)
mp.config.output.write(process_tile, data)
assert path_exists(mp.config.output.path)
assert cog_validate(mp.config.output.path, strict=True)
def test_output_single_gtiff_cog_tempfile(output_single_gtiff_cog):
tile_id = (5, 3, 7)
with mapchete.open(
dict(
output_single_gtiff_cog.dict,
output=dict(output_single_gtiff_cog.dict["output"], in_memory=False),
)
) as mp:
process_tile = mp.config.process_pyramid.tile(*tile_id)
# basic functions
assert mp.config.output.profile()
assert mp.config.output.empty(process_tile).mask.all()
assert mp.config.output.get_path(process_tile)
# check if tile exists
assert not mp.config.output.tiles_exist(process_tile)
# write
mp.batch_process(multi=2)
# check if tile exists
assert mp.config.output.tiles_exist(process_tile)
# read again, this time with data
data = mp.config.output.read(process_tile)
assert isinstance(data, np.ndarray)
assert not data[0].mask.all()
# write empty array
data = ma.masked_array(
data=np.ones(process_tile.shape),
mask=np.ones(process_tile.shape),
)
mp.config.output.write(process_tile, data)
assert path_exists(mp.config.output.path)
assert cog_validate(mp.config.output.path, strict=True)
@pytest.mark.remote
def test_output_single_gtiff_cog_s3(output_single_gtiff_cog, mp_s3_tmpdir):
tile_id = (5, 3, 7)
with mapchete.open(
dict(
output_single_gtiff_cog.dict,
output=dict(
output_single_gtiff_cog.dict["output"],
path=os.path.join(mp_s3_tmpdir, "cog.tif"),
),
)
) as mp:
process_tile = mp.config.process_pyramid.tile(*tile_id)
# basic functions
assert mp.config.output.profile()
assert mp.config.output.empty(process_tile).mask.all()
assert mp.config.output.get_path(process_tile)
# check if tile exists
assert not mp.config.output.tiles_exist(process_tile)
# write
mp.batch_process(multi=2)
# check if tile exists
assert mp.config.output.tiles_exist(process_tile)
# read again, this time with data
data = mp.config.output.read(process_tile)
assert isinstance(data, np.ndarray)
assert not data[0].mask.all()
# write empty array
data = ma.masked_array(
data=np.ones(process_tile.shape),
mask=np.ones(process_tile.shape),
)
mp.config.output.write(process_tile, data)
assert path_exists(mp.config.output.path)
assert cog_validate(mp.config.output.path, strict=True)
| 36.396761
| 86
| 0.628699
|
import numpy as np
import numpy.ma as ma
import os
import pytest
import rasterio
from rasterio.io import MemoryFile
from rio_cogeo.cogeo import cog_validate
import shutil
from tilematrix import Bounds
import warnings
import mapchete
from mapchete.errors import MapcheteConfigError
from mapchete.io import path_exists
from mapchete.formats.default import gtiff
from mapchete.tile import BufferedTilePyramid
def test_output_data(mp_tmpdir):
output_params = dict(
grid="geodetic",
format="GeoTIFF",
path=mp_tmpdir,
pixelbuffer=0,
metatiling=1,
bands=1,
dtype="int16",
delimiters=dict(
bounds=Bounds(-180.0, -90.0, 180.0, 90.0),
effective_bounds=Bounds(-180.439453125, -90.0, 180.439453125, 90.0),
zoom=[5],
process_bounds=Bounds(-180.0, -90.0, 180.0, 90.0),
),
)
output = gtiff.OutputDataWriter(output_params)
assert output.path == mp_tmpdir
assert output.file_extension == ".tif"
tp = BufferedTilePyramid("geodetic")
tile = tp.tile(5, 5, 5)
assert output.get_path(tile) == os.path.join(*[mp_tmpdir, "5", "5", "5" + ".tif"])
try:
temp_dir = os.path.join(*[mp_tmpdir, "5", "5"])
output.prepare_path(tile)
assert os.path.isdir(temp_dir)
finally:
shutil.rmtree(temp_dir, ignore_errors=True)
assert isinstance(output.profile(tile), dict)
try:
data = np.ones((1,) + tile.shape) * 128
output.write(tile, data)
assert output.tiles_exist(tile)
data = output.read(tile)
assert isinstance(data, np.ndarray)
assert not data[0].mask.any()
finally:
shutil.rmtree(mp_tmpdir, ignore_errors=True)
try:
data = output.read(tile)
assert isinstance(data, np.ndarray)
assert data[0].mask.all()
finally:
shutil.rmtree(mp_tmpdir, ignore_errors=True)
try:
empty = output.empty(tile)
assert isinstance(empty, ma.MaskedArray)
assert not empty.any()
finally:
shutil.rmtree(mp_tmpdir, ignore_errors=True)
try:
output_params.update(compress="deflate", predictor=2)
output = gtiff.OutputDataWriter(output_params)
assert output.profile(tile)["compress"] == "deflate"
assert output.profile(tile)["predictor"] == 2
finally:
shutil.rmtree(mp_tmpdir, ignore_errors=True)
try:
with pytest.deprecated_call():
output_params.update(compression="deflate", predictor=2)
output = gtiff.OutputDataWriter(output_params)
assert output.profile(tile)["compress"] == "deflate"
assert output.profile(tile)["predictor"] == 2
finally:
shutil.rmtree(mp_tmpdir, ignore_errors=True)
def test_for_web(client, mp_tmpdir):
tile_base_url = "/wmts_simple/1.0.0/cleantopo_br/default/WGS84/"
for url in ["/"]:
response = client.get(url)
assert response.status_code == 200
for url in [
tile_base_url + "5/30/62.tif",
tile_base_url + "5/30/63.tif",
tile_base_url + "5/31/62.tif",
tile_base_url + "5/31/63.tif",
]:
response = client.get(url)
assert response.status_code == 200
img = response.data
with warnings.catch_warnings():
warnings.simplefilter("ignore")
with MemoryFile(img) as memfile:
with memfile.open() as dataset:
assert dataset.read().any()
def test_input_data(mp_tmpdir, cleantopo_br):
with mapchete.open(cleantopo_br.path) as mp:
tp = BufferedTilePyramid("geodetic")
tile = tp.tile(5, 5, 5)
output_params = dict(
grid="geodetic",
format="GeoTIFF",
path=mp_tmpdir,
pixelbuffer=0,
metatiling=1,
bands=2,
dtype="int16",
delimiters=dict(
bounds=Bounds(-180.0, -90.0, 180.0, 90.0),
effective_bounds=Bounds(-180.439453125, -90.0, 180.439453125, 90.0),
zoom=[5],
process_bounds=Bounds(-180.0, -90.0, 180.0, 90.0),
),
)
output = gtiff.OutputDataWriter(output_params)
with output.open(tile, mp) as input_tile:
for data in [
input_tile.read(),
input_tile.read(1),
input_tile.read([1]),
]:
assert isinstance(data, ma.masked_array)
assert input_tile.is_empty()
with output.open(tile, mp) as input_tile:
pass
def test_write_geotiff_tags(mp_tmpdir, cleantopo_br, write_rasterfile_tags_py):
conf = dict(**cleantopo_br.dict)
conf.update(process=write_rasterfile_tags_py)
with mapchete.open(conf) as mp:
for tile in mp.get_process_tiles():
data, tags = mp.execute(tile)
assert data.any()
assert isinstance(tags, dict)
mp.write(process_tile=tile, data=(data, tags))
out_path = mp.config.output.get_path(tile)
with rasterio.open(out_path) as src:
assert "filewide_tag" in src.tags()
assert src.tags()["filewide_tag"] == "value"
assert "band_tag" in src.tags(1)
assert src.tags(1)["band_tag"] == "True"
@pytest.mark.remote
def test_s3_write_output_data(gtiff_s3, s3_example_tile, mp_s3_tmpdir):
with mapchete.open(gtiff_s3.dict) as mp:
process_tile = mp.config.process_pyramid.tile(*s3_example_tile)
assert mp.config.output.profile()
assert mp.config.output.empty(process_tile).mask.all()
assert mp.config.output.get_path(process_tile)
assert not mp.config.output.tiles_exist(process_tile)
mp.batch_process(tile=process_tile.id)
assert mp.config.output.tiles_exist(process_tile)
data = mp.config.output.read(process_tile)
assert isinstance(data, np.ndarray)
assert not data[0].mask.all()
def test_output_single_gtiff(output_single_gtiff):
tile_id = (5, 3, 7)
with mapchete.open(output_single_gtiff.path) as mp:
process_tile = mp.config.process_pyramid.tile(*tile_id)
assert mp.config.output.profile()
assert mp.config.output.empty(process_tile).mask.all()
assert mp.config.output.get_path(process_tile)
assert not mp.config.output.tiles_exist(process_tile)
mp.batch_process(multi=2)
assert mp.config.output.tiles_exist(process_tile)
data = mp.config.output.read(process_tile)
assert isinstance(data, np.ndarray)
assert not data[0].mask.all()
data = ma.masked_array(
data=np.ones(process_tile.shape),
mask=np.ones(process_tile.shape),
)
mp.config.output.write(process_tile, data)
assert os.path.isfile(mp.config.output.path)
with pytest.raises(MapcheteConfigError):
mapchete.open(output_single_gtiff.path)
with mapchete.open(output_single_gtiff.path, mode="overwrite") as mp:
process_tile = mp.config.process_pyramid.tile(*tile_id)
assert not mp.config.output.tiles_exist(process_tile)
mp.batch_process(tile=process_tile.id)
assert mp.config.output.tiles_exist(process_tile)
assert mp.config.output.tiles_exist(
output_tile=mp.config.output_pyramid.intersecting(process_tile)[0]
)
data = mp.config.output.read(process_tile)
assert isinstance(data, np.ndarray)
assert not data[0].mask.all()
def test_output_single_gtiff_errors(output_single_gtiff):
with pytest.raises(ValueError):
mapchete.open(dict(output_single_gtiff.dict, zoom_levels=[5, 6]))
with mapchete.open(output_single_gtiff.path) as mp:
tile = mp.config.process_pyramid.tile(5, 3, 7)
with pytest.raises(ValueError):
mp.config.output.tiles_exist(process_tile=tile, output_tile=tile)
def test_output_single_gtiff_pixelbuffer(output_single_gtiff):
tile_id = (5, 3, 7)
with mapchete.open(
dict(
output_single_gtiff.dict,
output=dict(output_single_gtiff.dict["output"], pixelbuffer=5),
),
) as mp:
process_tile = mp.config.process_pyramid.tile(*tile_id)
assert mp.config.output.profile()
assert mp.config.output.empty(process_tile).mask.all()
assert mp.config.output.get_path(process_tile)
assert not mp.config.output.tiles_exist(process_tile)
mp.batch_process(tile=process_tile.id)
assert mp.config.output.tiles_exist(process_tile)
data = mp.config.output.read(process_tile)
assert isinstance(data, np.ndarray)
assert not data[0].mask.all()
def test_output_single_gtiff_compression(output_single_gtiff):
tile_id = (5, 3, 7)
with mapchete.open(
dict(
output_single_gtiff.dict,
output=dict(output_single_gtiff.dict["output"], compress="deflate"),
),
) as mp:
process_tile = mp.config.process_pyramid.tile(*tile_id)
assert "compress" in mp.config.output.profile()
assert mp.config.output.profile()["compress"] == "deflate"
mp.batch_process(tile=process_tile.id)
with rasterio.open(mp.config.output.path) as src:
assert src.profile["compress"] == "deflate"
def test_output_single_gtiff_overviews(output_single_gtiff):
with mapchete.open(
dict(
output_single_gtiff.dict,
output=dict(
output_single_gtiff.dict["output"],
overviews=True,
overviews_resampling="bilinear",
),
),
) as mp:
tile_id = (5, 3, 7)
process_tile = mp.config.process_pyramid.tile(*tile_id)
mp.batch_process(tile=process_tile.id)
with rasterio.open(mp.config.output.path) as src:
assert src.overviews(1)
assert src.tags().get("OVR_RESAMPLING_ALG").lower() == "bilinear"
for o in [1, 2, 4, 8]:
a = src.read(
masked=True, out_shape=(1, int(src.height / o), int(src.width / o))
)
assert not a.mask.all()
@pytest.mark.remote
def test_output_single_gtiff_s3(output_single_gtiff, mp_s3_tmpdir):
tile_id = (5, 3, 7)
with mapchete.open(
dict(
output_single_gtiff.dict,
output=dict(
output_single_gtiff.dict["output"],
path=os.path.join(mp_s3_tmpdir, "temp.tif"),
),
)
) as mp:
process_tile = mp.config.process_pyramid.tile(*tile_id)
assert mp.config.output.profile()
assert mp.config.output.empty(process_tile).mask.all()
assert mp.config.output.get_path(process_tile)
assert not mp.config.output.tiles_exist(process_tile)
mp.batch_process(multi=2)
assert mp.config.output.tiles_exist(process_tile)
data = mp.config.output.read(process_tile)
assert isinstance(data, np.ndarray)
assert not data[0].mask.all()
data = ma.masked_array(
data=np.ones(process_tile.shape),
mask=np.ones(process_tile.shape),
)
mp.config.output.write(process_tile, data)
assert path_exists(mp.config.output.path)
@pytest.mark.remote
def test_output_single_gtiff_s3_tempfile(output_single_gtiff, mp_s3_tmpdir):
tile_id = (5, 3, 7)
with mapchete.open(
dict(
output_single_gtiff.dict,
output=dict(
output_single_gtiff.dict["output"],
path=os.path.join(mp_s3_tmpdir, "temp.tif"),
in_memory=False,
),
)
) as mp:
process_tile = mp.config.process_pyramid.tile(*tile_id)
assert mp.config.output.profile()
assert mp.config.output.empty(process_tile).mask.all()
assert mp.config.output.get_path(process_tile)
assert not mp.config.output.tiles_exist(process_tile)
mp.batch_process(multi=2)
assert mp.config.output.tiles_exist(process_tile)
data = mp.config.output.read(process_tile)
assert isinstance(data, np.ndarray)
assert not data[0].mask.all()
data = ma.masked_array(
data=np.ones(process_tile.shape),
mask=np.ones(process_tile.shape),
)
mp.config.output.write(process_tile, data)
assert path_exists(mp.config.output.path)
def test_output_single_gtiff_cog(output_single_gtiff_cog):
tile_id = (5, 3, 7)
with mapchete.open(output_single_gtiff_cog.dict) as mp:
process_tile = mp.config.process_pyramid.tile(*tile_id)
assert mp.config.output.profile()
assert mp.config.output.empty(process_tile).mask.all()
assert mp.config.output.get_path(process_tile)
assert not mp.config.output.tiles_exist(process_tile)
mp.batch_process(multi=2)
assert mp.config.output.tiles_exist(process_tile)
data = mp.config.output.read(process_tile)
assert isinstance(data, np.ndarray)
assert not data[0].mask.all()
data = ma.masked_array(
data=np.ones(process_tile.shape),
mask=np.ones(process_tile.shape),
)
mp.config.output.write(process_tile, data)
assert path_exists(mp.config.output.path)
assert cog_validate(mp.config.output.path, strict=True)
def test_output_single_gtiff_cog_tempfile(output_single_gtiff_cog):
tile_id = (5, 3, 7)
with mapchete.open(
dict(
output_single_gtiff_cog.dict,
output=dict(output_single_gtiff_cog.dict["output"], in_memory=False),
)
) as mp:
process_tile = mp.config.process_pyramid.tile(*tile_id)
assert mp.config.output.profile()
assert mp.config.output.empty(process_tile).mask.all()
assert mp.config.output.get_path(process_tile)
assert not mp.config.output.tiles_exist(process_tile)
mp.batch_process(multi=2)
assert mp.config.output.tiles_exist(process_tile)
data = mp.config.output.read(process_tile)
assert isinstance(data, np.ndarray)
assert not data[0].mask.all()
data = ma.masked_array(
data=np.ones(process_tile.shape),
mask=np.ones(process_tile.shape),
)
mp.config.output.write(process_tile, data)
assert path_exists(mp.config.output.path)
assert cog_validate(mp.config.output.path, strict=True)
@pytest.mark.remote
def test_output_single_gtiff_cog_s3(output_single_gtiff_cog, mp_s3_tmpdir):
tile_id = (5, 3, 7)
with mapchete.open(
dict(
output_single_gtiff_cog.dict,
output=dict(
output_single_gtiff_cog.dict["output"],
path=os.path.join(mp_s3_tmpdir, "cog.tif"),
),
)
) as mp:
process_tile = mp.config.process_pyramid.tile(*tile_id)
assert mp.config.output.profile()
assert mp.config.output.empty(process_tile).mask.all()
assert mp.config.output.get_path(process_tile)
assert not mp.config.output.tiles_exist(process_tile)
mp.batch_process(multi=2)
assert mp.config.output.tiles_exist(process_tile)
data = mp.config.output.read(process_tile)
assert isinstance(data, np.ndarray)
assert not data[0].mask.all()
data = ma.masked_array(
data=np.ones(process_tile.shape),
mask=np.ones(process_tile.shape),
)
mp.config.output.write(process_tile, data)
assert path_exists(mp.config.output.path)
assert cog_validate(mp.config.output.path, strict=True)
| true
| true
|
790481acff9e1f94ef5f8e798b0cdd6ecbc4e28a
| 5,863
|
py
|
Python
|
docs/conf.py
|
C-Pauli/cob
|
88b9c4f9206f09dec446885485a73cdf2b366379
|
[
"MIT"
] | 2
|
2016-09-28T15:21:04.000Z
|
2017-02-21T19:56:47.000Z
|
docs/conf.py
|
C-Pauli/cob
|
88b9c4f9206f09dec446885485a73cdf2b366379
|
[
"MIT"
] | 67
|
2016-05-17T16:30:14.000Z
|
2017-08-06T23:11:51.000Z
|
docs/conf.py
|
C-Pauli/cob
|
88b9c4f9206f09dec446885485a73cdf2b366379
|
[
"MIT"
] | 5
|
2018-09-28T21:45:10.000Z
|
2019-08-16T03:20:16.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# cob documentation build configuration file, created by
# sphinx-quickstart on Sun Jan 7 18:09:10 2018.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
#from recommonmark.parser import CommonMarkParser
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinxcontrib.programoutput',
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
'sphinx.ext.githubpages',
'sphinx.ext.napoleon',
'IPython.sphinxext.ipython_console_highlighting',
'IPython.sphinxext.ipython_directive'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_parsers = {
'.md': 'recommonmark.parser.CommonMarkParser',
}
source_suffix = ['.rst', '.md']
#source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'COB'
copyright = '2019, Joseph Jeffers, Rob Schaefer'
author = 'Joseph Jeffers, Rob Schaefer'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
import cob
version = cob.__version__
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
#html_theme = 'alabaster'
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# This is required for the alabaster theme
# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars
html_sidebars = {
'**': [
'globaltoc.html',
'relations.html', # needs 'show_related': True theme option to display
'searchbox.html',
]
}
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'cobdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'cob.tex', 'cob Documentation',
'Joseph Jeffers, Rob Schaefer', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'cob', 'cob Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'cob', 'cob Documentation',
author, 'cob', 'One line description of project.',
'Miscellaneous'),
]
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
| 30.378238
| 79
| 0.683268
|
extensions = [
'sphinxcontrib.programoutput',
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
'sphinx.ext.githubpages',
'sphinx.ext.napoleon',
'IPython.sphinxext.ipython_console_highlighting',
'IPython.sphinxext.ipython_directive'
]
templates_path = ['_templates']
source_parsers = {
'.md': 'recommonmark.parser.CommonMarkParser',
}
source_suffix = ['.rst', '.md']
master_doc = 'index'
project = 'COB'
copyright = '2019, Joseph Jeffers, Rob Schaefer'
author = 'Joseph Jeffers, Rob Schaefer'
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
import cob
version = cob.__version__
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
#html_theme = 'alabaster'
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# This is required for the alabaster theme
# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars
html_sidebars = {
'**': [
'globaltoc.html',
'relations.html', # needs 'show_related': True theme option to display
'searchbox.html',
]
}
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'cobdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'cob.tex', 'cob Documentation',
'Joseph Jeffers, Rob Schaefer', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'cob', 'cob Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'cob', 'cob Documentation',
author, 'cob', 'One line description of project.',
'Miscellaneous'),
]
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
| true
| true
|
7904834425721460d9dbad64646007bec03538a2
| 14,823
|
py
|
Python
|
generator_labeler/paper_results/custom_plots.py
|
researchuser7/QWAugmenter
|
eb70fa27ddb4b90d72c2eae6db2ff65086c3fb69
|
[
"MIT"
] | null | null | null |
generator_labeler/paper_results/custom_plots.py
|
researchuser7/QWAugmenter
|
eb70fa27ddb4b90d72c2eae6db2ff65086c3fb69
|
[
"MIT"
] | null | null | null |
generator_labeler/paper_results/custom_plots.py
|
researchuser7/QWAugmenter
|
eb70fa27ddb4b90d72c2eae6db2ff65086c3fb69
|
[
"MIT"
] | 1
|
2022-02-28T04:45:16.000Z
|
2022-02-28T04:45:16.000Z
|
import numpy as np
from sklearn.metrics import r2_score
np.random.seed(42)
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
figsize = (8, 4)
def show_r2(results):
data_size = results["data_size"]
test_scores = results["test_scores"]
test_scores_exp = results["test_scores_exp"]
fig, ax = plt.subplots(figsize=(8, 4))
ax.plot(list(map(lambda x: x["r2"], test_scores)), marker="o", label="Log(Exec. time)", color="#777777")
ax.plot(list(map(lambda x: x["r2"], test_scores_exp)), marker="o", label="Exec. time", color="#111111")
ax.set_xticks(list(range(data_size.__len__())))
ax.set_xticklabels(data_size, rotation=60)
ax.set_ylim((0, 1))
ax.set_yticks(np.arange(0, 1, 0.1))
ax.set_xlabel("# Executed Jobs")
ax.set_ylabel("$R^2$ Score")
ax.legend()
return ax
def compare_r2(results, results_real_card, results_random_sampling=None, exp=True):
data_size = results["data_size"]
if exp:
test_scores_real = results_real_card["test_scores_exp"]
test_scores = results["test_scores_exp"]
else:
test_scores_real = results_real_card["test_scores"]
test_scores = results["test_scores"]
fig, ax = plt.subplots(figsize=(8, 2))
if results_random_sampling:
if exp:
test_scores_random = results_random_sampling["test_scores_exp"]
else:
test_scores_random = results_random_sampling["test_scores"]
ax.plot(list(map(lambda x: x["r2"], test_scores_random)), marker="^", linestyle="dotted",
label="Rand. samples - Estimated out card. (Baseline)",
color=sns.color_palette()[-4])
ax.plot(list(map(lambda x: x["r2"], test_scores)), marker="o", label="Active labeling - Estimated out card.",
color="#111111")
ax.plot(list(map(lambda x: x["r2"], test_scores_real)), linestyle="--", marker="s",
label="Active labeling - Real out card. (Top-line)",
color=sns.color_palette()[-3], alpha=0.85)
ax.set_xticks(list(range(data_size.__len__())))
ax.set_xticklabels(data_size, rotation=60)
ax.set_ylim((0, 1))
ax.set_yticks(np.arange(0, 1, 0.2))
ax.set_xlabel("# Cumulated Executed Jobs")
ax.set_ylabel("$R^2$ of pred.\nExec. Time")
ax.legend()
return ax
def show_uncertainty(results, show_errors=False):
data_size = results["data_size"]
IQRs_RMSE = results["model_uncertainty"]
IQRs_RMSE = np.array([np.mean(np.exp(I["uncertainty_high"]) - np.exp(I["uncertainty_low"])) for I in results["iterations_results"]])
IQRs_std = np.array([np.std(np.exp(I["uncertainty_high"]) - np.exp(I["uncertainty_low"])) for I in
results["iterations_results"]])
fig, ax = plt.subplots(figsize=(8, 2))
if show_errors:
ax.errorbar(np.arange(len(IQRs_RMSE)),
IQRs_RMSE,
yerr=IQRs_std, fmt='o', label="Uncertainty")
else:
ax.plot(IQRs_RMSE, marker="o", label="Uncertainty")
ax.set_xticks(list(range(data_size.__len__())))
ax.set_xticklabels(data_size, rotation=60)
ax.set_xlabel("# Cumulated Executed Jobs")
ax.set_ylabel("Model\nUncertainty [ms]")
final_th = 0.1
count = 0
min_u = IQRs_RMSE[0]
min_local_u = IQRs_RMSE[0]
stops = []
for i in range(1, len(data_size)):
#print(i, " -> min_local_u", min_local_u)
r = IQRs_RMSE[i] / min_local_u
#print(r)
if (r > 1) or (IQRs_RMSE[i]>min_u):
pass
elif (1-r) < final_th:
pass
else:
print(i, data_size[i], "-> STOP!")
count += 1
stops.append({"iteration": i, "data_size": data_size[i],
"uncertainty": IQRs_RMSE[i],
"uncertainty_std": IQRs_std[i],
"cost": np.sum(np.exp(results["iterations_results"][i]["train_labels"]))
})
print("--------------------------------")
min_u = min(IQRs_RMSE[:i+1])
min_local_u = min(IQRs_RMSE[i-1:i+1])
#min_cost_id = np.argwhere(IQRs_RMSE == min_cost)
if len(stops) == 0:
stops.append({"iteration": len(data_size)-1, "data_size": data_size[len(data_size)-1], "cost": np.sum(np.exp(results["iterations_results"][len(data_size)-1]["train_labels"])) })
ax.errorbar([s["iteration"] for s in stops], [s["uncertainty"] for s in stops], color="red", label="Early stop", linewidth=0, marker="o" )
ax.legend()
print(pd.DataFrame(stops))
return ax
def show_iteration(results, iteration_to_show, exp=False, drop_outliers=False):
y_test = results["iterations_results"][iteration_to_show]["test_labels"]
y_pred = results["iterations_results"][iteration_to_show]["pred_labels"]
y_pred_lower = results["iterations_results"][iteration_to_show]["uncertainty_low"]
y_pred_upper = results["iterations_results"][iteration_to_show]["uncertainty_high"]
p = y_test.argsort()
if drop_outliers:
q = np.quantile(y_test, 0.97)
print(q)
out_mask = y_test < q
print(out_mask.shape)
y_test = y_test[out_mask]
y_pred = y_pred[out_mask]
y_pred_lower = y_pred_lower[out_mask]
y_pred_upper = y_pred_upper[out_mask]
p = y_test.argsort()
fig, ax = plt.subplots(figsize=(6, 3))
if exp:
y_test = np.exp(y_test)
y_pred = np.exp(y_pred)
y_pred_lower = np.exp(y_pred_lower)
y_pred_upper = np.exp(y_pred_upper)
if drop_outliers:
new_r2 = r2_score(y_test, y_pred)
print("NEW R2 without outliers:", new_r2)
ax.plot(y_test[p], marker=".", linewidth=1, label="Real", color="#777777", alpha=0.5)
ax.errorbar(np.arange(len(y_pred)),y_pred[p], yerr=np.array([y_pred[p] - y_pred_lower[p], y_pred_upper[p] - y_pred[p]]), linewidth=0.5, fmt='.', color="#ff7f0e", label="Pred. + Interval", alpha=0.5)
#ax.plot(np.arange(len(y_pred)), (y_pred_lower[p]+y_pred_upper[p])/2, marker=".", linewidth=0, label="smooth", color="green")
ax.set_ylabel("Exec. Time [ms]")
# ax.ticklabel_format(axis='y', style='sci', scilimits=(0, 3))
#ax.set_yscale("log")
ax.set_xlabel("Non-executed Jobs")
ax.legend()
print(results["test_scores_exp"][iteration_to_show])
else:
ax.plot(y_test[p], marker=".", linewidth=1, label="Real", color="#777777", alpha=0.5)
ax.errorbar(np.arange(len(y_pred)), y_pred[p], yerr=np.array([y_pred[p] - y_pred_lower[p], y_pred_upper[p] - y_pred[p]]), linewidth=0.5, fmt='.', color="#ff7f0e", label="Pred. + Interval", alpha=0.5)
ax.set_ylabel("Log(Exec. Time)")
ax.set_xlabel("Non-executed Jobs")
ax.legend()
print(results["test_scores"][iteration_to_show])
return ax
def show_iteration_2(results, iteration_to_show, drop_outliers=False):
y_test = results["iterations_results"][iteration_to_show]["test_labels"]
y_pred = results["iterations_results"][iteration_to_show]["pred_labels"]
y_pred_lower = results["iterations_results"][iteration_to_show]["uncertainty_low"]
y_pred_upper = results["iterations_results"][iteration_to_show]["uncertainty_high"]
p = y_test.argsort()
new_r2 = r2_score(y_test, y_pred)
print("NEW R2 log with outliers:", new_r2)
if drop_outliers:
q = np.quantile(y_test, 0.97)
print(q)
out_mask = y_test < q
print(out_mask.shape)
y_test = y_test[out_mask]
y_pred = y_pred[out_mask]
y_pred_lower = y_pred_lower[out_mask]
y_pred_upper = y_pred_upper[out_mask]
p = y_test.argsort()
fig, ax = plt.subplots(figsize=(6, 6))
y_test = np.exp(y_test)
y_pred = np.exp(y_pred)
y_pred_lower = np.exp(y_pred_lower)
y_pred_upper = np.exp(y_pred_upper)
if drop_outliers:
new_r2 = r2_score(y_test, y_pred)
print("NEW R2 without outliers:", new_r2)
ax.plot(y_test[p], y_test[p], marker=".", linewidth=1, label="Real", color="#777777", alpha=0.5)
ax.errorbar(y_test[p],y_pred[p], yerr=np.array([y_pred[p] - y_pred_lower[p], y_pred_upper[p] - y_pred[p]]), linewidth=0.5, fmt='.', color="#ff7f0e", label="Pred. + Interval", alpha=0.5)
ax.set_ylabel("Forecasted Exec. Time [ms] (Log scale)")
ax.set_yscale("log")
ax.set_xlabel("Real Exec. Time [ms] (Log scale)")
ax.set_xscale("log")
ax.legend()
return ax
def show_td_gen(results, iteration_to_show):
y_test = results[list(results.keys())[iteration_to_show]]["test_labels"]
y_pred = results[list(results.keys())[iteration_to_show]]["pred_labels"]
from sklearn.metrics import r2_score
score = r2_score(y_test, y_pred)
print("R2 score:", score)
p = y_test.argsort()
fig, ax = plt.subplots(figsize=(6, 3))
ax.plot(y_test[p], y_test[p], marker=".", linewidth=1, label="Real", color="#777777", alpha=0.5)
ax.plot(y_test[p], y_pred[p], marker=".", linewidth=0, label="TDGen Pred.", color=sns.color_palette()[4], alpha=0.5)
ax.set_ylabel("Forecasted Exec. Time [ms] (Log scale)")
ax.set_yscale("log")
ax.set_xlabel("Real Exec. Time [ms] (Log scale)")
ax.set_xscale("log")
ax.legend()
return ax
def show_our_and_td_gen(our_results, td_gen_results, iteration_to_show):
our_y_test = np.exp(our_results["iterations_results"][iteration_to_show]["test_labels"])
our_y_pred = np.exp(our_results["iterations_results"][iteration_to_show]["pred_labels"])
y_test = td_gen_results[list(td_gen_results.keys())[iteration_to_show]]["test_labels"]
y_pred = td_gen_results[list(td_gen_results.keys())[iteration_to_show]]["pred_labels"]
from sklearn.metrics import r2_score
score = r2_score(y_test, y_pred)
print("R2 score:", score)
p = y_test.argsort()
our_p = our_y_test.argsort()
fig, ax = plt.subplots(figsize=(6, 6))
ax.plot(y_test[p], y_test[p], marker="", linewidth=1, label="Real", color="#777777", alpha=0.5)
ax.plot(our_y_test[our_p], our_y_pred[our_p], marker=".", linewidth=0, label="Our solution", color=sns.color_palette()[1], alpha=0.2)
ax.plot(y_test[p], y_pred[p], marker=".", linewidth=0, label="TDGen Pred.", color=sns.color_palette()[4], alpha=0.2)
ax.set_ylabel("Forecasted Exec. Time [ms] (Log scale)")
ax.set_yscale("log")
ax.set_xlabel("Real Exec. Time [ms] (Log scale)")
ax.set_xscale("log")
ax.legend()
return ax
def compare_td_gen_r2(results, results_td_gen):
data_size = results["data_size"]
test_scores = results["test_scores_exp"]
from sklearn.metrics import r2_score
td_gen_scores = []
x = []
for k, v in results_td_gen.items():
y_test = v["test_labels"]
y_pred = v["pred_labels"]
score = r2_score(y_test, y_pred)
print(k ,"R2 score:", score)
td_gen_scores.append(score)
x.append(k)
fig, ax = plt.subplots(figsize=(8, 2))
ax.plot(td_gen_scores, linestyle="--", marker="o", label="TDGen",
color=sns.color_palette()[4])
ax.plot(list(map(lambda x: x["r2"], test_scores)), marker="o", label="Our solution",
color="#111111")
ax.set_xticks(list(range(data_size.__len__())))
ax.set_xticklabels(data_size, rotation=60)
print(np.array(list(map(lambda x: x["r2"], test_scores)))/np.array(td_gen_scores))
#ax.set_ylim((0, 1))
#ax.set_yticks(np.arange(0, 1, 0.1))
ax.set_xlabel("# Cumulated Executed Jobs")
ax.set_ylabel("$R^2$ of pred. Exec. Time")
ax.legend()
return ax
def show_centerd_uncertainty(data, iteration, exp=False):
print(data["iterations_results"][iteration].keys())
if exp:
preds = np.exp(np.array(data["iterations_results"][iteration]["pred_labels"]))
upper = np.exp(np.array(data["iterations_results"][iteration]["uncertainty_high"]))
lower = np.exp(np.array(data["iterations_results"][iteration]["uncertainty_low"]))
else:
preds = np.array(data["iterations_results"][iteration]["pred_labels"])
upper = np.array(data["iterations_results"][iteration]["uncertainty_high"])
lower = np.array(data["iterations_results"][iteration]["uncertainty_low"])
IQR_interval = upper - lower
sort_ind = np.argsort(IQR_interval)
# y_true_all = y_true_all[sort_ind]
preds = preds[sort_ind]
upper = upper[sort_ind]
lower = lower[sort_ind]
mean = (upper + lower) / 2
std = np.std((upper + lower))
# Center such that the mean of the prediction interval is at 0.0
# y_true_all_centered = y_true_all.copy()
upper_centered = upper.copy()
lower_centered = lower.copy()
preds_centered = preds.copy()
# y_true_all_centered -= mean
upper_centered = (upper_centered - mean) # /std
lower_centered = (lower_centered - mean) # /std
preds_centered = (preds_centered - mean) # /std
IRQ_th = np.quantile(IQR_interval, 0.95)
print(IRQ_th)
x_idx = np.arange(len(upper_centered))
cut = x_idx[IQR_interval[sort_ind] > IRQ_th]
print(cut)
fig, ax = plt.subplots(1, 1, figsize=(8, 4))
# ax.plot(y_true_all_centered, "ro", markersize=1)
ax.plot(preds_centered, marker=".", color="#ff7f0e", linewidth=0)
ax.fill_between(
np.arange(len(upper_centered)), lower_centered, upper_centered, alpha=0.2, color="#ff7f0e",
label="Pred. interval (centerd)")
ax.axvline(cut[0], color="red", linestyle="--", label="Threshold $\eta$")
ax.set_xlabel("Non-executed jobs sorted by uncertainty.")
ax.set_ylabel("Predicted values (centered)")
ax.legend()
# ax.set_yscale("symlog")
# ax.set_ylim([-1.5, 1.5])
def compute_stats_on_pred_errors(results, iteration_to_show):
y_train = results["iterations_results"][iteration_to_show]["train_labels"]
y_test = results["iterations_results"][iteration_to_show]["test_labels"]
y_pred = results["iterations_results"][iteration_to_show]["pred_labels"]
y_pred_lower = results["iterations_results"][iteration_to_show]["uncertainty_low"]
y_pred_upper = results["iterations_results"][iteration_to_show]["uncertainty_high"]
y_train = np.exp(y_train)
y_test = np.exp(y_test)
y_pred = np.exp(y_pred)
y_pred_lower = np.exp(y_pred_lower)
y_pred_upper = np.exp(y_pred_upper)
print("Real values")
print(pd.Series(np.hstack((y_train, y_test)) / 1000).describe())
print("highest 5:", np.sort(np.hstack((y_train, y_test)))[-5:]/1000)
print()
print("\nAverage Prediction Error")
print(pd.Series(np.abs(y_test - y_pred) / 1000).describe())
# count_true = (y_test <= y_pred_upper) & (y_test >= y_pred_lower)
# print(len(count_true),len(count_true[count_true==True]))
| 38.203608
| 208
| 0.644134
|
import numpy as np
from sklearn.metrics import r2_score
np.random.seed(42)
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
figsize = (8, 4)
def show_r2(results):
data_size = results["data_size"]
test_scores = results["test_scores"]
test_scores_exp = results["test_scores_exp"]
fig, ax = plt.subplots(figsize=(8, 4))
ax.plot(list(map(lambda x: x["r2"], test_scores)), marker="o", label="Log(Exec. time)", color="#777777")
ax.plot(list(map(lambda x: x["r2"], test_scores_exp)), marker="o", label="Exec. time", color="#111111")
ax.set_xticks(list(range(data_size.__len__())))
ax.set_xticklabels(data_size, rotation=60)
ax.set_ylim((0, 1))
ax.set_yticks(np.arange(0, 1, 0.1))
ax.set_xlabel("# Executed Jobs")
ax.set_ylabel("$R^2$ Score")
ax.legend()
return ax
def compare_r2(results, results_real_card, results_random_sampling=None, exp=True):
data_size = results["data_size"]
if exp:
test_scores_real = results_real_card["test_scores_exp"]
test_scores = results["test_scores_exp"]
else:
test_scores_real = results_real_card["test_scores"]
test_scores = results["test_scores"]
fig, ax = plt.subplots(figsize=(8, 2))
if results_random_sampling:
if exp:
test_scores_random = results_random_sampling["test_scores_exp"]
else:
test_scores_random = results_random_sampling["test_scores"]
ax.plot(list(map(lambda x: x["r2"], test_scores_random)), marker="^", linestyle="dotted",
label="Rand. samples - Estimated out card. (Baseline)",
color=sns.color_palette()[-4])
ax.plot(list(map(lambda x: x["r2"], test_scores)), marker="o", label="Active labeling - Estimated out card.",
color="#111111")
ax.plot(list(map(lambda x: x["r2"], test_scores_real)), linestyle="--", marker="s",
label="Active labeling - Real out card. (Top-line)",
color=sns.color_palette()[-3], alpha=0.85)
ax.set_xticks(list(range(data_size.__len__())))
ax.set_xticklabels(data_size, rotation=60)
ax.set_ylim((0, 1))
ax.set_yticks(np.arange(0, 1, 0.2))
ax.set_xlabel("# Cumulated Executed Jobs")
ax.set_ylabel("$R^2$ of pred.\nExec. Time")
ax.legend()
return ax
def show_uncertainty(results, show_errors=False):
data_size = results["data_size"]
IQRs_RMSE = results["model_uncertainty"]
IQRs_RMSE = np.array([np.mean(np.exp(I["uncertainty_high"]) - np.exp(I["uncertainty_low"])) for I in results["iterations_results"]])
IQRs_std = np.array([np.std(np.exp(I["uncertainty_high"]) - np.exp(I["uncertainty_low"])) for I in
results["iterations_results"]])
fig, ax = plt.subplots(figsize=(8, 2))
if show_errors:
ax.errorbar(np.arange(len(IQRs_RMSE)),
IQRs_RMSE,
yerr=IQRs_std, fmt='o', label="Uncertainty")
else:
ax.plot(IQRs_RMSE, marker="o", label="Uncertainty")
ax.set_xticks(list(range(data_size.__len__())))
ax.set_xticklabels(data_size, rotation=60)
ax.set_xlabel("# Cumulated Executed Jobs")
ax.set_ylabel("Model\nUncertainty [ms]")
final_th = 0.1
count = 0
min_u = IQRs_RMSE[0]
min_local_u = IQRs_RMSE[0]
stops = []
for i in range(1, len(data_size)):
r = IQRs_RMSE[i] / min_local_u
if (r > 1) or (IQRs_RMSE[i]>min_u):
pass
elif (1-r) < final_th:
pass
else:
print(i, data_size[i], "-> STOP!")
count += 1
stops.append({"iteration": i, "data_size": data_size[i],
"uncertainty": IQRs_RMSE[i],
"uncertainty_std": IQRs_std[i],
"cost": np.sum(np.exp(results["iterations_results"][i]["train_labels"]))
})
print("--------------------------------")
min_u = min(IQRs_RMSE[:i+1])
min_local_u = min(IQRs_RMSE[i-1:i+1])
if len(stops) == 0:
stops.append({"iteration": len(data_size)-1, "data_size": data_size[len(data_size)-1], "cost": np.sum(np.exp(results["iterations_results"][len(data_size)-1]["train_labels"])) })
ax.errorbar([s["iteration"] for s in stops], [s["uncertainty"] for s in stops], color="red", label="Early stop", linewidth=0, marker="o" )
ax.legend()
print(pd.DataFrame(stops))
return ax
def show_iteration(results, iteration_to_show, exp=False, drop_outliers=False):
y_test = results["iterations_results"][iteration_to_show]["test_labels"]
y_pred = results["iterations_results"][iteration_to_show]["pred_labels"]
y_pred_lower = results["iterations_results"][iteration_to_show]["uncertainty_low"]
y_pred_upper = results["iterations_results"][iteration_to_show]["uncertainty_high"]
p = y_test.argsort()
if drop_outliers:
q = np.quantile(y_test, 0.97)
print(q)
out_mask = y_test < q
print(out_mask.shape)
y_test = y_test[out_mask]
y_pred = y_pred[out_mask]
y_pred_lower = y_pred_lower[out_mask]
y_pred_upper = y_pred_upper[out_mask]
p = y_test.argsort()
fig, ax = plt.subplots(figsize=(6, 3))
if exp:
y_test = np.exp(y_test)
y_pred = np.exp(y_pred)
y_pred_lower = np.exp(y_pred_lower)
y_pred_upper = np.exp(y_pred_upper)
if drop_outliers:
new_r2 = r2_score(y_test, y_pred)
print("NEW R2 without outliers:", new_r2)
ax.plot(y_test[p], marker=".", linewidth=1, label="Real", color="#777777", alpha=0.5)
ax.errorbar(np.arange(len(y_pred)),y_pred[p], yerr=np.array([y_pred[p] - y_pred_lower[p], y_pred_upper[p] - y_pred[p]]), linewidth=0.5, fmt='.', color="#ff7f0e", label="Pred. + Interval", alpha=0.5)
ax.set_ylabel("Exec. Time [ms]")
ax.set_xlabel("Non-executed Jobs")
ax.legend()
print(results["test_scores_exp"][iteration_to_show])
else:
ax.plot(y_test[p], marker=".", linewidth=1, label="Real", color="#777777", alpha=0.5)
ax.errorbar(np.arange(len(y_pred)), y_pred[p], yerr=np.array([y_pred[p] - y_pred_lower[p], y_pred_upper[p] - y_pred[p]]), linewidth=0.5, fmt='.', color="#ff7f0e", label="Pred. + Interval", alpha=0.5)
ax.set_ylabel("Log(Exec. Time)")
ax.set_xlabel("Non-executed Jobs")
ax.legend()
print(results["test_scores"][iteration_to_show])
return ax
def show_iteration_2(results, iteration_to_show, drop_outliers=False):
y_test = results["iterations_results"][iteration_to_show]["test_labels"]
y_pred = results["iterations_results"][iteration_to_show]["pred_labels"]
y_pred_lower = results["iterations_results"][iteration_to_show]["uncertainty_low"]
y_pred_upper = results["iterations_results"][iteration_to_show]["uncertainty_high"]
p = y_test.argsort()
new_r2 = r2_score(y_test, y_pred)
print("NEW R2 log with outliers:", new_r2)
if drop_outliers:
q = np.quantile(y_test, 0.97)
print(q)
out_mask = y_test < q
print(out_mask.shape)
y_test = y_test[out_mask]
y_pred = y_pred[out_mask]
y_pred_lower = y_pred_lower[out_mask]
y_pred_upper = y_pred_upper[out_mask]
p = y_test.argsort()
fig, ax = plt.subplots(figsize=(6, 6))
y_test = np.exp(y_test)
y_pred = np.exp(y_pred)
y_pred_lower = np.exp(y_pred_lower)
y_pred_upper = np.exp(y_pred_upper)
if drop_outliers:
new_r2 = r2_score(y_test, y_pred)
print("NEW R2 without outliers:", new_r2)
ax.plot(y_test[p], y_test[p], marker=".", linewidth=1, label="Real", color="#777777", alpha=0.5)
ax.errorbar(y_test[p],y_pred[p], yerr=np.array([y_pred[p] - y_pred_lower[p], y_pred_upper[p] - y_pred[p]]), linewidth=0.5, fmt='.', color="#ff7f0e", label="Pred. + Interval", alpha=0.5)
ax.set_ylabel("Forecasted Exec. Time [ms] (Log scale)")
ax.set_yscale("log")
ax.set_xlabel("Real Exec. Time [ms] (Log scale)")
ax.set_xscale("log")
ax.legend()
return ax
def show_td_gen(results, iteration_to_show):
y_test = results[list(results.keys())[iteration_to_show]]["test_labels"]
y_pred = results[list(results.keys())[iteration_to_show]]["pred_labels"]
from sklearn.metrics import r2_score
score = r2_score(y_test, y_pred)
print("R2 score:", score)
p = y_test.argsort()
fig, ax = plt.subplots(figsize=(6, 3))
ax.plot(y_test[p], y_test[p], marker=".", linewidth=1, label="Real", color="#777777", alpha=0.5)
ax.plot(y_test[p], y_pred[p], marker=".", linewidth=0, label="TDGen Pred.", color=sns.color_palette()[4], alpha=0.5)
ax.set_ylabel("Forecasted Exec. Time [ms] (Log scale)")
ax.set_yscale("log")
ax.set_xlabel("Real Exec. Time [ms] (Log scale)")
ax.set_xscale("log")
ax.legend()
return ax
def show_our_and_td_gen(our_results, td_gen_results, iteration_to_show):
our_y_test = np.exp(our_results["iterations_results"][iteration_to_show]["test_labels"])
our_y_pred = np.exp(our_results["iterations_results"][iteration_to_show]["pred_labels"])
y_test = td_gen_results[list(td_gen_results.keys())[iteration_to_show]]["test_labels"]
y_pred = td_gen_results[list(td_gen_results.keys())[iteration_to_show]]["pred_labels"]
from sklearn.metrics import r2_score
score = r2_score(y_test, y_pred)
print("R2 score:", score)
p = y_test.argsort()
our_p = our_y_test.argsort()
fig, ax = plt.subplots(figsize=(6, 6))
ax.plot(y_test[p], y_test[p], marker="", linewidth=1, label="Real", color="#777777", alpha=0.5)
ax.plot(our_y_test[our_p], our_y_pred[our_p], marker=".", linewidth=0, label="Our solution", color=sns.color_palette()[1], alpha=0.2)
ax.plot(y_test[p], y_pred[p], marker=".", linewidth=0, label="TDGen Pred.", color=sns.color_palette()[4], alpha=0.2)
ax.set_ylabel("Forecasted Exec. Time [ms] (Log scale)")
ax.set_yscale("log")
ax.set_xlabel("Real Exec. Time [ms] (Log scale)")
ax.set_xscale("log")
ax.legend()
return ax
def compare_td_gen_r2(results, results_td_gen):
data_size = results["data_size"]
test_scores = results["test_scores_exp"]
from sklearn.metrics import r2_score
td_gen_scores = []
x = []
for k, v in results_td_gen.items():
y_test = v["test_labels"]
y_pred = v["pred_labels"]
score = r2_score(y_test, y_pred)
print(k ,"R2 score:", score)
td_gen_scores.append(score)
x.append(k)
fig, ax = plt.subplots(figsize=(8, 2))
ax.plot(td_gen_scores, linestyle="--", marker="o", label="TDGen",
color=sns.color_palette()[4])
ax.plot(list(map(lambda x: x["r2"], test_scores)), marker="o", label="Our solution",
color="#111111")
ax.set_xticks(list(range(data_size.__len__())))
ax.set_xticklabels(data_size, rotation=60)
print(np.array(list(map(lambda x: x["r2"], test_scores)))/np.array(td_gen_scores))
ax.set_xlabel("# Cumulated Executed Jobs")
ax.set_ylabel("$R^2$ of pred. Exec. Time")
ax.legend()
return ax
def show_centerd_uncertainty(data, iteration, exp=False):
print(data["iterations_results"][iteration].keys())
if exp:
preds = np.exp(np.array(data["iterations_results"][iteration]["pred_labels"]))
upper = np.exp(np.array(data["iterations_results"][iteration]["uncertainty_high"]))
lower = np.exp(np.array(data["iterations_results"][iteration]["uncertainty_low"]))
else:
preds = np.array(data["iterations_results"][iteration]["pred_labels"])
upper = np.array(data["iterations_results"][iteration]["uncertainty_high"])
lower = np.array(data["iterations_results"][iteration]["uncertainty_low"])
IQR_interval = upper - lower
sort_ind = np.argsort(IQR_interval)
preds = preds[sort_ind]
upper = upper[sort_ind]
lower = lower[sort_ind]
mean = (upper + lower) / 2
std = np.std((upper + lower))
upper_centered = upper.copy()
lower_centered = lower.copy()
preds_centered = preds.copy()
upper_centered = (upper_centered - mean)
lower_centered = (lower_centered - mean)
preds_centered = (preds_centered - mean)
IRQ_th = np.quantile(IQR_interval, 0.95)
print(IRQ_th)
x_idx = np.arange(len(upper_centered))
cut = x_idx[IQR_interval[sort_ind] > IRQ_th]
print(cut)
fig, ax = plt.subplots(1, 1, figsize=(8, 4))
ax.plot(preds_centered, marker=".", color="#ff7f0e", linewidth=0)
ax.fill_between(
np.arange(len(upper_centered)), lower_centered, upper_centered, alpha=0.2, color="#ff7f0e",
label="Pred. interval (centerd)")
ax.axvline(cut[0], color="red", linestyle="--", label="Threshold $\eta$")
ax.set_xlabel("Non-executed jobs sorted by uncertainty.")
ax.set_ylabel("Predicted values (centered)")
ax.legend()
def compute_stats_on_pred_errors(results, iteration_to_show):
y_train = results["iterations_results"][iteration_to_show]["train_labels"]
y_test = results["iterations_results"][iteration_to_show]["test_labels"]
y_pred = results["iterations_results"][iteration_to_show]["pred_labels"]
y_pred_lower = results["iterations_results"][iteration_to_show]["uncertainty_low"]
y_pred_upper = results["iterations_results"][iteration_to_show]["uncertainty_high"]
y_train = np.exp(y_train)
y_test = np.exp(y_test)
y_pred = np.exp(y_pred)
y_pred_lower = np.exp(y_pred_lower)
y_pred_upper = np.exp(y_pred_upper)
print("Real values")
print(pd.Series(np.hstack((y_train, y_test)) / 1000).describe())
print("highest 5:", np.sort(np.hstack((y_train, y_test)))[-5:]/1000)
print()
print("\nAverage Prediction Error")
print(pd.Series(np.abs(y_test - y_pred) / 1000).describe())
| true
| true
|
790483b9c92a0a5f9653f2bd70aead4cdd719e0e
| 1,292
|
py
|
Python
|
src/azure-cli/azure/cli/command_modules/iotcentral/commands.py
|
psignoret/azure-cli
|
1a4a043750315f9a7f2894b4287126089978b615
|
[
"MIT"
] | 1
|
2020-12-14T15:30:11.000Z
|
2020-12-14T15:30:11.000Z
|
src/azure-cli/azure/cli/command_modules/iotcentral/commands.py
|
psignoret/azure-cli
|
1a4a043750315f9a7f2894b4287126089978b615
|
[
"MIT"
] | 4
|
2018-08-08T20:01:17.000Z
|
2018-09-17T15:20:06.000Z
|
src/azure-cli/azure/cli/command_modules/iotcentral/commands.py
|
psignoret/azure-cli
|
1a4a043750315f9a7f2894b4287126089978b615
|
[
"MIT"
] | 1
|
2020-12-22T00:28:33.000Z
|
2020-12-22T00:28:33.000Z
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# pylint: disable=line-too-long
from ._client_factory import iotcentral_service_factory
def load_command_table(self, _):
from azure.cli.core.commands import CliCommandType
iotcentral_sdk = CliCommandType(
operations_tmpl='azure.mgmt.iotcentral.operations#IoTCentaralOperations.{}'
)
update_custom_util = CliCommandType(
operations_tmpl='azure.cli.command_modules.iotcentral.custom#{}')
with self.command_group('iotcentral app', iotcentral_sdk, client_factory=iotcentral_service_factory) as g:
g.custom_command('create', 'iotcentral_app_create')
g.custom_command('list', 'iotcentral_app_list')
g.custom_command('show', 'iotcentral_app_get')
g.generic_update_command('update', getter_name='iotcentral_app_get',
setter_name='iotcentral_app_update', command_type=update_custom_util)
g.custom_command('delete', 'iotcentral_app_delete')
| 46.142857
| 110
| 0.632353
|
from ._client_factory import iotcentral_service_factory
def load_command_table(self, _):
from azure.cli.core.commands import CliCommandType
iotcentral_sdk = CliCommandType(
operations_tmpl='azure.mgmt.iotcentral.operations#IoTCentaralOperations.{}'
)
update_custom_util = CliCommandType(
operations_tmpl='azure.cli.command_modules.iotcentral.custom#{}')
with self.command_group('iotcentral app', iotcentral_sdk, client_factory=iotcentral_service_factory) as g:
g.custom_command('create', 'iotcentral_app_create')
g.custom_command('list', 'iotcentral_app_list')
g.custom_command('show', 'iotcentral_app_get')
g.generic_update_command('update', getter_name='iotcentral_app_get',
setter_name='iotcentral_app_update', command_type=update_custom_util)
g.custom_command('delete', 'iotcentral_app_delete')
| true
| true
|
790484439b56f027dc2766ba8658b9d6b786dec3
| 8,045
|
py
|
Python
|
pyzoo/test/zoo/chronos/model/forecast/test_lstm_forecaster.py
|
DiegoCao/analytics-zoo
|
31a7c8acee38053b6bb20ccb4dc02f06d1d58900
|
[
"Apache-2.0"
] | null | null | null |
pyzoo/test/zoo/chronos/model/forecast/test_lstm_forecaster.py
|
DiegoCao/analytics-zoo
|
31a7c8acee38053b6bb20ccb4dc02f06d1d58900
|
[
"Apache-2.0"
] | null | null | null |
pyzoo/test/zoo/chronos/model/forecast/test_lstm_forecaster.py
|
DiegoCao/analytics-zoo
|
31a7c8acee38053b6bb20ccb4dc02f06d1d58900
|
[
"Apache-2.0"
] | null | null | null |
#
# Copyright 2018 Analytics Zoo Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
import tempfile
import os
import torch
from zoo.chronos.model.forecast.lstm_forecaster import LSTMForecaster
from zoo.orca import init_orca_context, stop_orca_context
from unittest import TestCase
import pytest
def create_data():
num_train_samples = 1000
num_val_samples = 400
num_test_samples = 400
input_time_steps = 24
input_feature_dim = 2
output_time_steps = 1
output_feature_dim = 2
def get_x_y(num_samples):
x = np.random.rand(num_samples, input_time_steps, input_feature_dim).astype(np.float32)
y = x[:, -output_time_steps:, :]*2 + \
np.random.rand(num_samples, output_time_steps, output_feature_dim).astype(np.float32)
return x, y
train_data = get_x_y(num_train_samples)
val_data = get_x_y(num_val_samples)
test_data = get_x_y(num_test_samples)
return train_data, val_data, test_data
class TestChronosModelLSTMForecaster(TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_tcn_forecaster_fit_eva_pred(self):
train_data, val_data, test_data = create_data()
forecaster = LSTMForecaster(past_seq_len=24,
input_feature_num=2,
output_feature_num=2,
loss="mae",
lr=0.01)
train_loss = forecaster.fit(train_data, epochs=2)
test_pred = forecaster.predict(test_data[0])
assert test_pred.shape == test_data[1].shape
test_mse = forecaster.evaluate(test_data)
def test_tcn_forecaster_onnx_methods(self):
train_data, val_data, test_data = create_data()
forecaster = LSTMForecaster(past_seq_len=24,
input_feature_num=2,
output_feature_num=2,
loss="mae",
lr=0.01)
forecaster.fit(train_data, epochs=2)
try:
import onnx
import onnxruntime
pred = forecaster.predict(test_data[0])
pred_onnx = forecaster.predict_with_onnx(test_data[0])
np.testing.assert_almost_equal(pred, pred_onnx, decimal=5)
mse = forecaster.evaluate(test_data, multioutput="raw_values")
mse_onnx = forecaster.evaluate_with_onnx(test_data,
multioutput="raw_values")
np.testing.assert_almost_equal(mse, mse_onnx, decimal=5)
mse = forecaster.evaluate(test_data)
mse_onnx = forecaster.evaluate_with_onnx(test_data)
np.testing.assert_almost_equal(mse, mse_onnx, decimal=5)
except ImportError:
pass
def test_tcn_forecaster_save_load(self):
train_data, val_data, test_data = create_data()
forecaster = LSTMForecaster(past_seq_len=24,
input_feature_num=2,
output_feature_num=2,
loss="mae",
lr=0.01)
train_mse = forecaster.fit(train_data, epochs=2)
with tempfile.TemporaryDirectory() as tmp_dir_name:
ckpt_name = os.path.join(tmp_dir_name, "ckpt")
test_pred_save = forecaster.predict(test_data[0])
forecaster.save(ckpt_name)
forecaster.load(ckpt_name)
test_pred_load = forecaster.predict(test_data[0])
np.testing.assert_almost_equal(test_pred_save, test_pred_load)
def test_tcn_forecaster_runtime_error(self):
train_data, val_data, test_data = create_data()
forecaster = LSTMForecaster(past_seq_len=24,
input_feature_num=2,
output_feature_num=2,
loss="mae",
lr=0.01)
with pytest.raises(RuntimeError):
with tempfile.TemporaryDirectory() as tmp_dir_name:
ckpt_name = os.path.join(tmp_dir_name, "ckpt")
forecaster.save(ckpt_name)
with pytest.raises(RuntimeError):
forecaster.predict(test_data[0])
with pytest.raises(RuntimeError):
forecaster.evaluate(test_data)
def test_tcn_forecaster_shape_error(self):
train_data, val_data, test_data = create_data()
forecaster = LSTMForecaster(past_seq_len=24,
input_feature_num=2,
output_feature_num=1,
loss="mae",
lr=0.01)
with pytest.raises(AssertionError):
forecaster.fit(train_data, epochs=2)
def test_tcn_forecaster_xshard_input(self):
train_data, val_data, test_data = create_data()
print("original", train_data[0].dtype)
init_orca_context(cores=4, memory="2g")
from zoo.orca.data import XShards
def transform_to_dict(data):
return {'x': data[0], 'y': data[1]}
def transform_to_dict_x(data):
return {'x': data[0]}
train_data = XShards.partition(train_data).transform_shard(transform_to_dict)
val_data = XShards.partition(val_data).transform_shard(transform_to_dict)
test_data = XShards.partition(test_data).transform_shard(transform_to_dict_x)
for distributed in [True, False]:
forecaster = LSTMForecaster(past_seq_len=24,
input_feature_num=2,
output_feature_num=2,
loss="mae",
lr=0.01,
distributed=distributed)
forecaster.fit(train_data, epochs=2)
distributed_pred = forecaster.predict(test_data)
distributed_eval = forecaster.evaluate(val_data)
stop_orca_context()
def test_tcn_forecaster_distributed(self):
train_data, val_data, test_data = create_data()
init_orca_context(cores=4, memory="2g")
forecaster = LSTMForecaster(past_seq_len=24,
input_feature_num=2,
output_feature_num=2,
loss="mae",
lr=0.01,
distributed=True)
forecaster.fit(train_data, epochs=2)
distributed_pred = forecaster.predict(test_data[0])
distributed_eval = forecaster.evaluate(val_data)
model = forecaster.get_model()
assert isinstance(model, torch.nn.Module)
forecaster.to_local()
local_pred = forecaster.predict(test_data[0])
local_eval = forecaster.evaluate(val_data)
np.testing.assert_almost_equal(distributed_pred, local_pred, decimal=5)
try:
import onnx
import onnxruntime
local_pred_onnx = forecaster.predict_with_onnx(test_data[0])
local_eval_onnx = forecaster.evaluate_with_onnx(val_data)
np.testing.assert_almost_equal(distributed_pred, local_pred_onnx, decimal=5)
except ImportError:
pass
model = forecaster.get_model()
assert isinstance(model, torch.nn.Module)
stop_orca_context()
| 40.631313
| 97
| 0.595649
|
import numpy as np
import tempfile
import os
import torch
from zoo.chronos.model.forecast.lstm_forecaster import LSTMForecaster
from zoo.orca import init_orca_context, stop_orca_context
from unittest import TestCase
import pytest
def create_data():
num_train_samples = 1000
num_val_samples = 400
num_test_samples = 400
input_time_steps = 24
input_feature_dim = 2
output_time_steps = 1
output_feature_dim = 2
def get_x_y(num_samples):
x = np.random.rand(num_samples, input_time_steps, input_feature_dim).astype(np.float32)
y = x[:, -output_time_steps:, :]*2 + \
np.random.rand(num_samples, output_time_steps, output_feature_dim).astype(np.float32)
return x, y
train_data = get_x_y(num_train_samples)
val_data = get_x_y(num_val_samples)
test_data = get_x_y(num_test_samples)
return train_data, val_data, test_data
class TestChronosModelLSTMForecaster(TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_tcn_forecaster_fit_eva_pred(self):
train_data, val_data, test_data = create_data()
forecaster = LSTMForecaster(past_seq_len=24,
input_feature_num=2,
output_feature_num=2,
loss="mae",
lr=0.01)
train_loss = forecaster.fit(train_data, epochs=2)
test_pred = forecaster.predict(test_data[0])
assert test_pred.shape == test_data[1].shape
test_mse = forecaster.evaluate(test_data)
def test_tcn_forecaster_onnx_methods(self):
train_data, val_data, test_data = create_data()
forecaster = LSTMForecaster(past_seq_len=24,
input_feature_num=2,
output_feature_num=2,
loss="mae",
lr=0.01)
forecaster.fit(train_data, epochs=2)
try:
import onnx
import onnxruntime
pred = forecaster.predict(test_data[0])
pred_onnx = forecaster.predict_with_onnx(test_data[0])
np.testing.assert_almost_equal(pred, pred_onnx, decimal=5)
mse = forecaster.evaluate(test_data, multioutput="raw_values")
mse_onnx = forecaster.evaluate_with_onnx(test_data,
multioutput="raw_values")
np.testing.assert_almost_equal(mse, mse_onnx, decimal=5)
mse = forecaster.evaluate(test_data)
mse_onnx = forecaster.evaluate_with_onnx(test_data)
np.testing.assert_almost_equal(mse, mse_onnx, decimal=5)
except ImportError:
pass
def test_tcn_forecaster_save_load(self):
train_data, val_data, test_data = create_data()
forecaster = LSTMForecaster(past_seq_len=24,
input_feature_num=2,
output_feature_num=2,
loss="mae",
lr=0.01)
train_mse = forecaster.fit(train_data, epochs=2)
with tempfile.TemporaryDirectory() as tmp_dir_name:
ckpt_name = os.path.join(tmp_dir_name, "ckpt")
test_pred_save = forecaster.predict(test_data[0])
forecaster.save(ckpt_name)
forecaster.load(ckpt_name)
test_pred_load = forecaster.predict(test_data[0])
np.testing.assert_almost_equal(test_pred_save, test_pred_load)
def test_tcn_forecaster_runtime_error(self):
train_data, val_data, test_data = create_data()
forecaster = LSTMForecaster(past_seq_len=24,
input_feature_num=2,
output_feature_num=2,
loss="mae",
lr=0.01)
with pytest.raises(RuntimeError):
with tempfile.TemporaryDirectory() as tmp_dir_name:
ckpt_name = os.path.join(tmp_dir_name, "ckpt")
forecaster.save(ckpt_name)
with pytest.raises(RuntimeError):
forecaster.predict(test_data[0])
with pytest.raises(RuntimeError):
forecaster.evaluate(test_data)
def test_tcn_forecaster_shape_error(self):
train_data, val_data, test_data = create_data()
forecaster = LSTMForecaster(past_seq_len=24,
input_feature_num=2,
output_feature_num=1,
loss="mae",
lr=0.01)
with pytest.raises(AssertionError):
forecaster.fit(train_data, epochs=2)
def test_tcn_forecaster_xshard_input(self):
train_data, val_data, test_data = create_data()
print("original", train_data[0].dtype)
init_orca_context(cores=4, memory="2g")
from zoo.orca.data import XShards
def transform_to_dict(data):
return {'x': data[0], 'y': data[1]}
def transform_to_dict_x(data):
return {'x': data[0]}
train_data = XShards.partition(train_data).transform_shard(transform_to_dict)
val_data = XShards.partition(val_data).transform_shard(transform_to_dict)
test_data = XShards.partition(test_data).transform_shard(transform_to_dict_x)
for distributed in [True, False]:
forecaster = LSTMForecaster(past_seq_len=24,
input_feature_num=2,
output_feature_num=2,
loss="mae",
lr=0.01,
distributed=distributed)
forecaster.fit(train_data, epochs=2)
distributed_pred = forecaster.predict(test_data)
distributed_eval = forecaster.evaluate(val_data)
stop_orca_context()
def test_tcn_forecaster_distributed(self):
train_data, val_data, test_data = create_data()
init_orca_context(cores=4, memory="2g")
forecaster = LSTMForecaster(past_seq_len=24,
input_feature_num=2,
output_feature_num=2,
loss="mae",
lr=0.01,
distributed=True)
forecaster.fit(train_data, epochs=2)
distributed_pred = forecaster.predict(test_data[0])
distributed_eval = forecaster.evaluate(val_data)
model = forecaster.get_model()
assert isinstance(model, torch.nn.Module)
forecaster.to_local()
local_pred = forecaster.predict(test_data[0])
local_eval = forecaster.evaluate(val_data)
np.testing.assert_almost_equal(distributed_pred, local_pred, decimal=5)
try:
import onnx
import onnxruntime
local_pred_onnx = forecaster.predict_with_onnx(test_data[0])
local_eval_onnx = forecaster.evaluate_with_onnx(val_data)
np.testing.assert_almost_equal(distributed_pred, local_pred_onnx, decimal=5)
except ImportError:
pass
model = forecaster.get_model()
assert isinstance(model, torch.nn.Module)
stop_orca_context()
| true
| true
|
79048490556fde1b61605b3bb2be4bfa21cfe9d0
| 2,046
|
py
|
Python
|
src/spaceone/inventory/api/v1/network_type.py
|
choonho/inventory
|
cc89757490d28fecb7ffccdfd6f89d4c0aa40da5
|
[
"Apache-2.0"
] | null | null | null |
src/spaceone/inventory/api/v1/network_type.py
|
choonho/inventory
|
cc89757490d28fecb7ffccdfd6f89d4c0aa40da5
|
[
"Apache-2.0"
] | null | null | null |
src/spaceone/inventory/api/v1/network_type.py
|
choonho/inventory
|
cc89757490d28fecb7ffccdfd6f89d4c0aa40da5
|
[
"Apache-2.0"
] | null | null | null |
from spaceone.api.inventory.v1 import network_type_pb2, network_type_pb2_grpc
from spaceone.core.pygrpc import BaseAPI
class NetworkType(BaseAPI, network_type_pb2_grpc.NetworkTypeServicer):
pb2 = network_type_pb2
pb2_grpc = network_type_pb2_grpc
def create(self, request, context):
params, metadata = self.parse_request(request, context)
with self.locator.get_service('NetworkTypeService', metadata) as ntype_service:
return self.locator.get_info('NetworkTypeInfo', ntype_service.create(params))
def update(self, request, context):
params, metadata = self.parse_request(request, context)
with self.locator.get_service('NetworkTypeService', metadata) as ntype_service:
return self.locator.get_info('NetworkTypeInfo', ntype_service.update(params))
def delete(self, request, context):
params, metadata = self.parse_request(request, context)
with self.locator.get_service('NetworkTypeService', metadata) as ntype_service:
ntype_service.delete(params)
return self.locator.get_info('EmptyInfo')
def get(self, request, context):
params, metadata = self.parse_request(request, context)
with self.locator.get_service('NetworkTypeService', metadata) as ntype_service:
return self.locator.get_info('NetworkTypeInfo', ntype_service.get(params))
def list(self, request, context):
params, metadata = self.parse_request(request, context)
with self.locator.get_service('NetworkTypeService', metadata) as ntype_service:
ntype_vos, total_count = ntype_service.list(params)
return self.locator.get_info('NetworkTypesInfo', ntype_vos, total_count, minimal=self.get_minimal(params))
def stat(self, request, context):
params, metadata = self.parse_request(request, context)
with self.locator.get_service('NetworkTypeService', metadata) as ntype_service:
return self.locator.get_info('StatisticsInfo', ntype_service.stat(params))
| 43.531915
| 118
| 0.726784
|
from spaceone.api.inventory.v1 import network_type_pb2, network_type_pb2_grpc
from spaceone.core.pygrpc import BaseAPI
class NetworkType(BaseAPI, network_type_pb2_grpc.NetworkTypeServicer):
pb2 = network_type_pb2
pb2_grpc = network_type_pb2_grpc
def create(self, request, context):
params, metadata = self.parse_request(request, context)
with self.locator.get_service('NetworkTypeService', metadata) as ntype_service:
return self.locator.get_info('NetworkTypeInfo', ntype_service.create(params))
def update(self, request, context):
params, metadata = self.parse_request(request, context)
with self.locator.get_service('NetworkTypeService', metadata) as ntype_service:
return self.locator.get_info('NetworkTypeInfo', ntype_service.update(params))
def delete(self, request, context):
params, metadata = self.parse_request(request, context)
with self.locator.get_service('NetworkTypeService', metadata) as ntype_service:
ntype_service.delete(params)
return self.locator.get_info('EmptyInfo')
def get(self, request, context):
params, metadata = self.parse_request(request, context)
with self.locator.get_service('NetworkTypeService', metadata) as ntype_service:
return self.locator.get_info('NetworkTypeInfo', ntype_service.get(params))
def list(self, request, context):
params, metadata = self.parse_request(request, context)
with self.locator.get_service('NetworkTypeService', metadata) as ntype_service:
ntype_vos, total_count = ntype_service.list(params)
return self.locator.get_info('NetworkTypesInfo', ntype_vos, total_count, minimal=self.get_minimal(params))
def stat(self, request, context):
params, metadata = self.parse_request(request, context)
with self.locator.get_service('NetworkTypeService', metadata) as ntype_service:
return self.locator.get_info('StatisticsInfo', ntype_service.stat(params))
| true
| true
|
790488091f13f4b2ff427e7b9bda7aa18b0d732c
| 1,391
|
py
|
Python
|
misc/style/check-include-guard-convention.py
|
nitinkaveriappa/downward
|
5c9a1b5111d667bb96f94da61ca2a45b1b70bb83
|
[
"MIT"
] | 4
|
2019-04-23T10:41:35.000Z
|
2019-10-27T05:14:42.000Z
|
misc/style/check-include-guard-convention.py
|
nitinkaveriappa/downward
|
5c9a1b5111d667bb96f94da61ca2a45b1b70bb83
|
[
"MIT"
] | null | null | null |
misc/style/check-include-guard-convention.py
|
nitinkaveriappa/downward
|
5c9a1b5111d667bb96f94da61ca2a45b1b70bb83
|
[
"MIT"
] | 4
|
2018-01-16T00:00:22.000Z
|
2019-11-01T23:35:01.000Z
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
import glob
import os.path
import sys
DIR = os.path.dirname(os.path.abspath(__file__))
REPO = os.path.dirname(os.path.dirname(DIR))
SRC_DIR = os.path.join(REPO, "src")
def check_header_files(component):
component_dir = os.path.join(SRC_DIR, component)
header_files = (glob.glob(os.path.join(component_dir, "*.h")) +
glob.glob(os.path.join(component_dir, "*", "*.h")))
assert header_files
errors = []
for filename in header_files:
assert filename.endswith(".h"), filename
rel_filename = os.path.relpath(filename, start=component_dir)
guard = rel_filename.replace(".", "_").replace("/", "_").replace("-", "_").upper()
expected = "#ifndef " + guard
for line in open(filename):
line = line.rstrip("\n")
if line.startswith("#ifndef"):
if line != expected:
errors.append('%s uses guard "%s" but should use "%s"' %
(filename, line, expected))
break
return errors
def main():
errors = []
errors.extend(check_header_files("preprocess"))
errors.extend(check_header_files("search"))
for error in errors:
print(error)
if errors:
sys.exit(1)
if __name__ == "__main__":
main()
| 28.979167
| 90
| 0.591661
|
from __future__ import print_function
import glob
import os.path
import sys
DIR = os.path.dirname(os.path.abspath(__file__))
REPO = os.path.dirname(os.path.dirname(DIR))
SRC_DIR = os.path.join(REPO, "src")
def check_header_files(component):
component_dir = os.path.join(SRC_DIR, component)
header_files = (glob.glob(os.path.join(component_dir, "*.h")) +
glob.glob(os.path.join(component_dir, "*", "*.h")))
assert header_files
errors = []
for filename in header_files:
assert filename.endswith(".h"), filename
rel_filename = os.path.relpath(filename, start=component_dir)
guard = rel_filename.replace(".", "_").replace("/", "_").replace("-", "_").upper()
expected = "#ifndef " + guard
for line in open(filename):
line = line.rstrip("\n")
if line.startswith("#ifndef"):
if line != expected:
errors.append('%s uses guard "%s" but should use "%s"' %
(filename, line, expected))
break
return errors
def main():
errors = []
errors.extend(check_header_files("preprocess"))
errors.extend(check_header_files("search"))
for error in errors:
print(error)
if errors:
sys.exit(1)
if __name__ == "__main__":
main()
| true
| true
|
7904885b0ba56ba64b7a2d57ff185fb7cd178af8
| 827
|
py
|
Python
|
backend/unikernel/osv/__init__.py
|
ShengliangD/Cunik-engine
|
1951d20629fc3cbbe0047ee04b438bbe91adc44c
|
[
"MIT"
] | 31
|
2018-05-17T01:54:46.000Z
|
2019-08-22T02:55:58.000Z
|
backend/unikernel/osv/__init__.py
|
ShengliangD/Cunik-engine
|
1951d20629fc3cbbe0047ee04b438bbe91adc44c
|
[
"MIT"
] | 1
|
2018-07-06T11:33:31.000Z
|
2018-07-17T10:08:15.000Z
|
backend/unikernel/osv/__init__.py
|
ShengliangD/Cunik-engine
|
1951d20629fc3cbbe0047ee04b438bbe91adc44c
|
[
"MIT"
] | 7
|
2018-06-08T08:35:11.000Z
|
2018-07-07T09:16:32.000Z
|
"""Implements interface for OSv unikernels."""
from backend.vm import VMConfig
from os import path
from .imgedit import set_cmdline
class OSv:
cmdline_template = "--ip=eth0,{ipv4_addr},255.255.255.0 --nameserver=10.0.125.0 {extra_cmdline}"
@staticmethod
def configure(image, config, nic_name):
cmdline = OSv.cmdline_template.format(
ipv4_addr=config.ipv4_addr,
extra_cmdline=config.cmdline if config.cmdline else image.default_cmdline,
)
set_cmdline(path.join(image.root, 'system.qemu'), cmdline)
vmc = VMConfig(
name=config.name,
nic_name=nic_name,
num_cpus=4,
vdisk_path=path.join(image.root, 'system.qemu'),
vdisk_format='qcow2',
memory_size=1024000
)
return vmc
| 29.535714
| 100
| 0.634825
|
from backend.vm import VMConfig
from os import path
from .imgedit import set_cmdline
class OSv:
cmdline_template = "--ip=eth0,{ipv4_addr},255.255.255.0 --nameserver=10.0.125.0 {extra_cmdline}"
@staticmethod
def configure(image, config, nic_name):
cmdline = OSv.cmdline_template.format(
ipv4_addr=config.ipv4_addr,
extra_cmdline=config.cmdline if config.cmdline else image.default_cmdline,
)
set_cmdline(path.join(image.root, 'system.qemu'), cmdline)
vmc = VMConfig(
name=config.name,
nic_name=nic_name,
num_cpus=4,
vdisk_path=path.join(image.root, 'system.qemu'),
vdisk_format='qcow2',
memory_size=1024000
)
return vmc
| true
| true
|
790489aa109a3810fa6f0d208b39f83eb3d71525
| 1,688
|
py
|
Python
|
kite/venv/lib/python3.7/site-packages/bs4/tests/test_htmlparser.py
|
pxuanqui/Edge-Assisted-Cart
|
2edd1f7023ab0b02f5733e2e9204bac4623eeeac
|
[
"BSD-3-Clause"
] | 27
|
2019-10-28T05:03:18.000Z
|
2021-06-09T00:16:22.000Z
|
kite/venv/lib/python3.7/site-packages/bs4/tests/test_htmlparser.py
|
pxuanqui/Edge-Assisted-Cart
|
2edd1f7023ab0b02f5733e2e9204bac4623eeeac
|
[
"BSD-3-Clause"
] | 47
|
2018-11-16T19:18:01.000Z
|
2021-12-01T19:40:44.000Z
|
virtual/lib/python3.6/site-packages/bs4/tests/test_htmlparser.py
|
catherine244/Reviews
|
30138f5ad09a39c1b6866c8bacf3fd0c89abbd00
|
[
"MIT"
] | 9
|
2019-11-02T06:44:18.000Z
|
2021-11-08T11:46:19.000Z
|
"""Tests to ensure that the html.parser tree builder generates good
trees."""
from pdb import set_trace
import pickle
from bs4.testing import SoupTest, HTMLTreeBuilderSmokeTest
from bs4.builder import HTMLParserTreeBuilder
from bs4.builder._htmlparser import BeautifulSoupHTMLParser
class HTMLParserTreeBuilderSmokeTest(SoupTest, HTMLTreeBuilderSmokeTest):
default_builder = HTMLParserTreeBuilder
def test_namespaced_system_doctype(self):
# html.parser can't handle namespaced doctypes, so skip this one.
pass
def test_namespaced_public_doctype(self):
# html.parser can't handle namespaced doctypes, so skip this one.
pass
def test_builder_is_pickled(self):
"""Unlike most tree builders, HTMLParserTreeBuilder and will
be restored after pickling.
"""
tree = self.soup("<a><b>foo</a>")
dumped = pickle.dumps(tree, 2)
loaded = pickle.loads(dumped)
self.assertTrue(isinstance(loaded.builder, type(tree.builder)))
def test_redundant_empty_element_closing_tags(self):
self.assertSoupEquals('<br></br><br></br><br></br>', "<br/><br/><br/>")
self.assertSoupEquals('</br></br></br>', "")
def test_empty_element(self):
# This verifies that any buffered data present when the parser
# finishes working is handled.
self.assertSoupEquals("foo &# bar", "foo &# bar")
class TestHTMLParserSubclass(SoupTest):
def test_error(self):
"""Verify that our HTMLParser subclass implements error() in a way
that doesn't cause a crash.
"""
parser = BeautifulSoupHTMLParser()
parser.error("don't crash")
| 35.166667
| 79
| 0.690758
|
from pdb import set_trace
import pickle
from bs4.testing import SoupTest, HTMLTreeBuilderSmokeTest
from bs4.builder import HTMLParserTreeBuilder
from bs4.builder._htmlparser import BeautifulSoupHTMLParser
class HTMLParserTreeBuilderSmokeTest(SoupTest, HTMLTreeBuilderSmokeTest):
default_builder = HTMLParserTreeBuilder
def test_namespaced_system_doctype(self):
pass
def test_namespaced_public_doctype(self):
# html.parser can't handle namespaced doctypes, so skip this one.
pass
def test_builder_is_pickled(self):
tree = self.soup("<a><b>foo</a>")
dumped = pickle.dumps(tree, 2)
loaded = pickle.loads(dumped)
self.assertTrue(isinstance(loaded.builder, type(tree.builder)))
def test_redundant_empty_element_closing_tags(self):
self.assertSoupEquals('<br></br><br></br><br></br>', "<br/><br/><br/>")
self.assertSoupEquals('</br></br></br>', "")
def test_empty_element(self):
self.assertSoupEquals("foo &# bar", "foo &# bar")
class TestHTMLParserSubclass(SoupTest):
def test_error(self):
parser = BeautifulSoupHTMLParser()
parser.error("don't crash")
| true
| true
|
79048bb09489ee4abba8f5a07e9432d46f3ca509
| 9,832
|
py
|
Python
|
apps/Graph4KG/utils.py
|
LemonNoel/PGL
|
c12357b66a105b10dd5a1f034fa21008f053d0f0
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
apps/Graph4KG/utils.py
|
LemonNoel/PGL
|
c12357b66a105b10dd5a1f034fa21008f053d0f0
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
apps/Graph4KG/utils.py
|
LemonNoel/PGL
|
c12357b66a105b10dd5a1f034fa21008f053d0f0
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import csv
import math
import json
import time
import random
import logging
import functools
import traceback
from collections import defaultdict
from _thread import start_new_thread
from multiprocessing import Queue, Process
import numpy as np
from tqdm import tqdm
import paddle
import paddle.distributed as dist
def set_seed(seed):
"""Set seed for reproduction.
"""
seed = seed + dist.get_rank()
random.seed(seed)
np.random.seed(seed)
paddle.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
def set_logger(args):
"""Write logs to console and log file.
"""
log_file = os.path.join(args.save_path, 'train.log')
logging.basicConfig(
format='%(asctime)s %(levelname)-8s %(message)s',
level=logging.INFO,
datefmt='%Y-%m-%d %H:%M:%S',
filename=log_file,
filemode='a+')
if args.print_on_screen:
console = logging.StreamHandler()
console.setLevel(logging.INFO)
formatter = logging.Formatter(
'%(asctime)s %(levelname)-8s %(message)s')
console.setFormatter(formatter)
logging.getLogger('').addHandler(console)
for arg in vars(args):
logging.info('{:20}:{}'.format(arg, getattr(args, arg)))
def print_log(step, interval, log, timer, time_sum):
"""Print log to logger.
"""
logging.info(
'[GPU %d] step: %d, loss: %.5f, reg: %.4e, speed: %.2f steps/s, time: %.2f s' %
(dist.get_rank(), step, log['loss'] / interval, log['reg'] / interval,
interval / time_sum, time_sum))
logging.info('sample: %f, forward: %f, backward: %f, update: %f' % (
timer['sample'], timer['forward'], timer['backward'], timer['update']))
def uniform(low, high, size, dtype=np.float32, seed=0):
"""Memory efficient uniform implementation.
"""
rng = np.random.default_rng(seed)
out = (high - low) * rng.random(size, dtype=dtype) + low
return out
def timer_wrapper(name):
"""Time counter wrapper.
"""
def decorate(func):
"""decorate func
"""
@functools.wraps(func)
def wrapper(*args, **kwargs):
"""wrapper func
"""
logging.info(f'[{name}] start...')
ts = time.time()
result = func(*args, **kwargs)
te = time.time()
costs = te - ts
if costs < 1e-4:
cost_str = '%f sec' % costs
elif costs > 3600:
cost_str = '%.4f sec (%.4f hours)' % (costs, costs / 3600.)
else:
cost_str = '%.4f sec' % costs
logging.info(f'[{name}] finished! It takes {cost_str} s')
return result
return wrapper
return decorate
def calculate_metrics(scores, corr_idxs, filter_list):
"""Calculate metrics according to scores.
"""
logs = []
for i in range(scores.shape[0]):
rank = (scores[i] > scores[i][corr_idxs[i]]).astype('float32')
if filter_list is not None:
mask = paddle.ones(rank.shape, dtype='float32')
mask[filter_list[i]] = 0.
rank = rank * mask
rank = paddle.sum(rank) + 1
logs.append({
'MRR': 1.0 / rank,
'MR': float(rank),
'HITS@1': 1.0 if rank <= 1 else 0.0,
'HITS@3': 1.0 if rank <= 3 else 0.0,
'HITS@10': 1.0 if rank <= 10 else 0.0,
})
return logs
def evaluate_wikikg2(model, loader, mode, save_path):
from ogb.linkproppred import Evaluator
evaluator = Evaluator(name='ogbl-wikikg2')
model.eval()
with paddle.no_grad():
y_pred_pos = []
y_pred_neg = []
for h, r, t, neg_h, neg_t in tqdm(loader):
pos_h = model._get_ent_embedding(h)
pos_r = model._get_rel_embedding(r)
pos_t = model._get_ent_embedding(t)
y_pred_pos.append(model(pos_h, pos_r, pos_t).numpy())
y_neg_head = model.predict(t, r, neg_h, mode='head').numpy()
y_neg_tail = model.predict(h, r, neg_t, mode='tail').numpy()
y_pred_neg.append(np.concatenate([y_neg_head, y_neg_tail], axis=1))
y_pred_pos = np.concatenate(y_pred_pos, axis=0)
y_pred_neg = np.concatenate(y_pred_neg, axis=0)
input_dict = {'y_pred_pos': y_pred_pos, 'y_pred_neg': y_pred_neg}
result = evaluator.eval(input_dict)
logging.info('-- %s results ------------' % mode)
logging.info(' ' + ' '.join(
['{}: {}'.format(k, v.mean()) for k, v in result.items()]))
def evaluate_wikikg90m(model, loader, mode, save_path):
from ogb.lsc import WikiKG90MEvaluator
evaluator = WikiKG90MEvaluator()
model.eval()
with paddle.no_grad():
top_tens = []
corr_idx = []
for h, r, t_idx, cand_t in tqdm(loader):
score = model.predict(h, r, cand_t)
rank = paddle.argsort(score, axis=1, descending=True)
top_tens.append(rank[:, :10].numpy())
corr_idx.append(t_idx.numpy())
t_pred_top10 = np.concatenate(top_tens, axis=0)
t_correct_index = np.concatenate(corr_idx, axis=0)
input_dict = {}
if mode == 'valid':
input_dict['h,r->t'] = {
't_pred_top10': t_pred_top10,
't_correct_index': t_correct_index
}
result = evaluator.eval(input_dict)
logging.info('-- %s results -------------' % mode)
logging.info(' '.join(
['{}: {}'.format(k, v) for k, v in result.items()]))
else:
input_dict['h,r->t'] = {'t_pred_top10': t_pred_top10}
evaluator.save_test_submission(
input_dict=input_dict, dir_path=save_path)
@timer_wrapper('evaluation')
def evaluate(model,
loader,
evaluate_mode='test',
filter_dict=None,
save_path='./tmp/',
data_mode='hrt'):
"""Evaluate given KGE model.
"""
if data_mode == 'wikikg2':
evaluate_wikikg2(model, loader, evaluate_mode, save_path)
elif data_mode == 'wikikg90m':
evaluate_wikikg90m(model, loader, evaluate_mode, save_path)
else:
model.eval()
with paddle.no_grad():
h_metrics = []
t_metrics = []
output = {'h,r->t': {}, 't,r->h': {}, 'average': {}}
for h, r, t in tqdm(loader):
t_score = model.predict(h, r, mode='tail')
h_score = model.predict(t, r, mode='head')
if filter_dict is not None:
h_filter_list = [
filter_dict['head'][(ti, ri)]
for ti, ri in zip(t.numpy(), r.numpy())
]
t_filter_list = [
filter_dict['tail'][(hi, ri)]
for hi, ri in zip(h.numpy(), r.numpy())
]
else:
h_filter_list = None
t_filter_list = None
h_metrics += calculate_metrics(h_score, h, h_filter_list)
t_metrics += calculate_metrics(t_score, t, t_filter_list)
for metric in h_metrics[0].keys():
output['t,r->h'][metric] = np.mean(
[x[metric] for x in h_metrics])
output['h,r->t'][metric] = np.mean(
[x[metric] for x in t_metrics])
output['average'][metric] = (
output['t,r->h'][metric] + output['h,r->t'][metric]) / 2
logging.info('-------------- %s result --------------' %
evaluate_mode)
logging.info('t,r->h |' + ' '.join(
['{}: {}'.format(k, v) for k, v in output['t,r->h'].items()]))
logging.info('h,r->t |' + ' '.join(
['{}: {}'.format(k, v) for k, v in output['h,r->t'].items()]))
logging.info('average |' + ' '.join(
['{}: {}'.format(k, v) for k, v in output['average'].items()]))
logging.info('-----------------------------------------')
def gram_schimidt_process(embeds, num_elem, use_scale):
""" Orthogonalize embeddings.
"""
num_embed = embeds.shape[0]
assert embeds.shape[1] == num_elem
assert embeds.shape[2] == (num_elem + int(use_scale))
if use_scale:
scales = embeds[:, :, -1]
embeds = embeds[:, :, :num_elem]
u = [embeds[:, 0]]
uu = [0] * num_elem
uu[0] = (u[0] * u[0]).sum(axis=-1)
u_d = embeds[:, 1:]
ushape = (num_embed, 1, -1)
for i in range(1, num_elem):
tmp_a = (embeds[:, i:] * u[i - 1].reshape(ushape)).sum(axis=-1)
tmp_b = uu[i - 1].reshape((num_embed, -1))
tmp_u = (tmp_a / tmp_b).reshape((num_embed, -1, 1))
u_d = u_d - u[-1].reshape(ushape) * tmp_u
u_i = u_d[:, 0]
if u_d.shape[1] > 1:
u_d = u_d[:, 1:]
uu[i] = (u_i * u_i).sum(axis=-1)
u.append(u_i)
u = np.stack(u, axis=1)
u_norm = np.linalg.norm(u, axis=-1, keepdims=True)
u = u / u_norm
if use_scale:
u = np.concatenate([u, scales.reshape((num_embed, -1, 1))], axis=-1)
return u
| 34.989324
| 87
| 0.54465
|
import os
import csv
import math
import json
import time
import random
import logging
import functools
import traceback
from collections import defaultdict
from _thread import start_new_thread
from multiprocessing import Queue, Process
import numpy as np
from tqdm import tqdm
import paddle
import paddle.distributed as dist
def set_seed(seed):
seed = seed + dist.get_rank()
random.seed(seed)
np.random.seed(seed)
paddle.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
def set_logger(args):
log_file = os.path.join(args.save_path, 'train.log')
logging.basicConfig(
format='%(asctime)s %(levelname)-8s %(message)s',
level=logging.INFO,
datefmt='%Y-%m-%d %H:%M:%S',
filename=log_file,
filemode='a+')
if args.print_on_screen:
console = logging.StreamHandler()
console.setLevel(logging.INFO)
formatter = logging.Formatter(
'%(asctime)s %(levelname)-8s %(message)s')
console.setFormatter(formatter)
logging.getLogger('').addHandler(console)
for arg in vars(args):
logging.info('{:20}:{}'.format(arg, getattr(args, arg)))
def print_log(step, interval, log, timer, time_sum):
logging.info(
'[GPU %d] step: %d, loss: %.5f, reg: %.4e, speed: %.2f steps/s, time: %.2f s' %
(dist.get_rank(), step, log['loss'] / interval, log['reg'] / interval,
interval / time_sum, time_sum))
logging.info('sample: %f, forward: %f, backward: %f, update: %f' % (
timer['sample'], timer['forward'], timer['backward'], timer['update']))
def uniform(low, high, size, dtype=np.float32, seed=0):
rng = np.random.default_rng(seed)
out = (high - low) * rng.random(size, dtype=dtype) + low
return out
def timer_wrapper(name):
def decorate(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
logging.info(f'[{name}] start...')
ts = time.time()
result = func(*args, **kwargs)
te = time.time()
costs = te - ts
if costs < 1e-4:
cost_str = '%f sec' % costs
elif costs > 3600:
cost_str = '%.4f sec (%.4f hours)' % (costs, costs / 3600.)
else:
cost_str = '%.4f sec' % costs
logging.info(f'[{name}] finished! It takes {cost_str} s')
return result
return wrapper
return decorate
def calculate_metrics(scores, corr_idxs, filter_list):
logs = []
for i in range(scores.shape[0]):
rank = (scores[i] > scores[i][corr_idxs[i]]).astype('float32')
if filter_list is not None:
mask = paddle.ones(rank.shape, dtype='float32')
mask[filter_list[i]] = 0.
rank = rank * mask
rank = paddle.sum(rank) + 1
logs.append({
'MRR': 1.0 / rank,
'MR': float(rank),
'HITS@1': 1.0 if rank <= 1 else 0.0,
'HITS@3': 1.0 if rank <= 3 else 0.0,
'HITS@10': 1.0 if rank <= 10 else 0.0,
})
return logs
def evaluate_wikikg2(model, loader, mode, save_path):
from ogb.linkproppred import Evaluator
evaluator = Evaluator(name='ogbl-wikikg2')
model.eval()
with paddle.no_grad():
y_pred_pos = []
y_pred_neg = []
for h, r, t, neg_h, neg_t in tqdm(loader):
pos_h = model._get_ent_embedding(h)
pos_r = model._get_rel_embedding(r)
pos_t = model._get_ent_embedding(t)
y_pred_pos.append(model(pos_h, pos_r, pos_t).numpy())
y_neg_head = model.predict(t, r, neg_h, mode='head').numpy()
y_neg_tail = model.predict(h, r, neg_t, mode='tail').numpy()
y_pred_neg.append(np.concatenate([y_neg_head, y_neg_tail], axis=1))
y_pred_pos = np.concatenate(y_pred_pos, axis=0)
y_pred_neg = np.concatenate(y_pred_neg, axis=0)
input_dict = {'y_pred_pos': y_pred_pos, 'y_pred_neg': y_pred_neg}
result = evaluator.eval(input_dict)
logging.info('-- %s results ------------' % mode)
logging.info(' ' + ' '.join(
['{}: {}'.format(k, v.mean()) for k, v in result.items()]))
def evaluate_wikikg90m(model, loader, mode, save_path):
from ogb.lsc import WikiKG90MEvaluator
evaluator = WikiKG90MEvaluator()
model.eval()
with paddle.no_grad():
top_tens = []
corr_idx = []
for h, r, t_idx, cand_t in tqdm(loader):
score = model.predict(h, r, cand_t)
rank = paddle.argsort(score, axis=1, descending=True)
top_tens.append(rank[:, :10].numpy())
corr_idx.append(t_idx.numpy())
t_pred_top10 = np.concatenate(top_tens, axis=0)
t_correct_index = np.concatenate(corr_idx, axis=0)
input_dict = {}
if mode == 'valid':
input_dict['h,r->t'] = {
't_pred_top10': t_pred_top10,
't_correct_index': t_correct_index
}
result = evaluator.eval(input_dict)
logging.info('-- %s results -------------' % mode)
logging.info(' '.join(
['{}: {}'.format(k, v) for k, v in result.items()]))
else:
input_dict['h,r->t'] = {'t_pred_top10': t_pred_top10}
evaluator.save_test_submission(
input_dict=input_dict, dir_path=save_path)
@timer_wrapper('evaluation')
def evaluate(model,
loader,
evaluate_mode='test',
filter_dict=None,
save_path='./tmp/',
data_mode='hrt'):
if data_mode == 'wikikg2':
evaluate_wikikg2(model, loader, evaluate_mode, save_path)
elif data_mode == 'wikikg90m':
evaluate_wikikg90m(model, loader, evaluate_mode, save_path)
else:
model.eval()
with paddle.no_grad():
h_metrics = []
t_metrics = []
output = {'h,r->t': {}, 't,r->h': {}, 'average': {}}
for h, r, t in tqdm(loader):
t_score = model.predict(h, r, mode='tail')
h_score = model.predict(t, r, mode='head')
if filter_dict is not None:
h_filter_list = [
filter_dict['head'][(ti, ri)]
for ti, ri in zip(t.numpy(), r.numpy())
]
t_filter_list = [
filter_dict['tail'][(hi, ri)]
for hi, ri in zip(h.numpy(), r.numpy())
]
else:
h_filter_list = None
t_filter_list = None
h_metrics += calculate_metrics(h_score, h, h_filter_list)
t_metrics += calculate_metrics(t_score, t, t_filter_list)
for metric in h_metrics[0].keys():
output['t,r->h'][metric] = np.mean(
[x[metric] for x in h_metrics])
output['h,r->t'][metric] = np.mean(
[x[metric] for x in t_metrics])
output['average'][metric] = (
output['t,r->h'][metric] + output['h,r->t'][metric]) / 2
logging.info('-------------- %s result --------------' %
evaluate_mode)
logging.info('t,r->h |' + ' '.join(
['{}: {}'.format(k, v) for k, v in output['t,r->h'].items()]))
logging.info('h,r->t |' + ' '.join(
['{}: {}'.format(k, v) for k, v in output['h,r->t'].items()]))
logging.info('average |' + ' '.join(
['{}: {}'.format(k, v) for k, v in output['average'].items()]))
logging.info('-----------------------------------------')
def gram_schimidt_process(embeds, num_elem, use_scale):
num_embed = embeds.shape[0]
assert embeds.shape[1] == num_elem
assert embeds.shape[2] == (num_elem + int(use_scale))
if use_scale:
scales = embeds[:, :, -1]
embeds = embeds[:, :, :num_elem]
u = [embeds[:, 0]]
uu = [0] * num_elem
uu[0] = (u[0] * u[0]).sum(axis=-1)
u_d = embeds[:, 1:]
ushape = (num_embed, 1, -1)
for i in range(1, num_elem):
tmp_a = (embeds[:, i:] * u[i - 1].reshape(ushape)).sum(axis=-1)
tmp_b = uu[i - 1].reshape((num_embed, -1))
tmp_u = (tmp_a / tmp_b).reshape((num_embed, -1, 1))
u_d = u_d - u[-1].reshape(ushape) * tmp_u
u_i = u_d[:, 0]
if u_d.shape[1] > 1:
u_d = u_d[:, 1:]
uu[i] = (u_i * u_i).sum(axis=-1)
u.append(u_i)
u = np.stack(u, axis=1)
u_norm = np.linalg.norm(u, axis=-1, keepdims=True)
u = u / u_norm
if use_scale:
u = np.concatenate([u, scales.reshape((num_embed, -1, 1))], axis=-1)
return u
| true
| true
|
79048c6c7c8173928958eb3dfb4ea531ee1fa52d
| 54
|
py
|
Python
|
script.py
|
Delightkc/fosslab
|
19eaf6f00623a54a09b51f3c31e8e6a9dfb3dbe7
|
[
"MIT"
] | null | null | null |
script.py
|
Delightkc/fosslab
|
19eaf6f00623a54a09b51f3c31e8e6a9dfb3dbe7
|
[
"MIT"
] | null | null | null |
script.py
|
Delightkc/fosslab
|
19eaf6f00623a54a09b51f3c31e8e6a9dfb3dbe7
|
[
"MIT"
] | 1
|
2020-10-17T09:48:19.000Z
|
2020-10-17T09:48:19.000Z
|
n=input("My Name is Delight Kurian Chandy")
print(n)
| 13.5
| 43
| 0.722222
|
n=input("My Name is Delight Kurian Chandy")
print(n)
| true
| true
|
79048c75af6e117359e7d8fac15f4339dc33aadb
| 32,423
|
py
|
Python
|
admin.py
|
SpatialStrout/ago-tools
|
6dd3726792d390fff5fa7fe7556a29305c3055e9
|
[
"Apache-2.0"
] | null | null | null |
admin.py
|
SpatialStrout/ago-tools
|
6dd3726792d390fff5fa7fe7556a29305c3055e9
|
[
"Apache-2.0"
] | null | null | null |
admin.py
|
SpatialStrout/ago-tools
|
6dd3726792d390fff5fa7fe7556a29305c3055e9
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
import urllib,urllib2
import json
import csv
import time
from datetime import date, timedelta
class Admin:
'''A class of tools for administering AGO Orgs or Portals'''
def __init__(self, username, portal=None, password=None):
from . import User
self.user = User(username, portal, password)
def __users__(self, start=0):
'''Retrieve a single page of users.'''
parameters = urllib.urlencode({'token' : self.user.token,
'f' : 'json',
'start' : start,
'num' : 100})
portalId = self.user.__portalId__()
response = urllib.urlopen(self.user.portalUrl + '/sharing/rest/portals/' + portalId + '/users?' + parameters).read()
users = json.loads(response)
return users
def __roles__(self,start=0):
parameters = urllib.urlencode({'token' : self.user.token,
'f' : 'json',
'start' : start,
'num' : 100})
portalId = self.user.__portalId__()
response = urllib.urlopen(self.user.portalUrl + '/sharing/rest/portals/' + portalId + '/roles?' + parameters).read()
roles = json.loads(response)
return roles
def __groups__(self,start=0):
parameters = urllib.urlencode({'token' : self.user.token,
'q':'orgid:'+ self._getOrgID(),
'f' : 'json',
'start' : start,
'num' : 100})
portalId = self.user.__portalId__()
response = urllib.urlopen(self.user.portalUrl + '/sharing/rest/community/groups?' + parameters).read()
groups = json.loads(response)
return groups
def getRoles(self):
'''
Returns a list of roles defined in the organization.
This is helpful for custom roles because the User's role property simply returns the ID of the role.
THIS DOES NOT INCLUDE THE STANDARD ARCGIS ONLINE ROLES OF ['org_admin', 'org_publisher', 'org_author', 'org_viewer']
'''
allRoles = []
roles = self.__roles__()
for role in roles['roles']:
allRoles.append(role)
while roles['nextStart'] > 0:
roles=self.__roles__(roles['nextStart'])
for role in roles['roles']:
allRoles.append(role)
return allRoles
def getGroups(self):
'''
Returns a list of groups defined in the organization.
'''
allGroups = []
groups = self.__groups__()
for group in groups['results']:
allGroups.append(group)
while groups['nextStart'] > 0:
for group in groups['results']:
allGroups.append(group)
return allGroups
def findGroup(self,title):
'''
Gets a group object by its title.
'''
parameters = urllib.urlencode({'token' : self.user.token,
'q':'title:'+title,
'f' : 'json'})
portalId = self.user.__portalId__()
response = urllib.urlopen(self.user.portalUrl + '/sharing/rest/community/groups?' + parameters).read()
groupUsers = json.loads(response)
if "results" in groupUsers and len(groupUsers["results"]) > 0:
return groupUsers["results"][0]
else:
return None
def getUsersInGroup(self,groupID):
'''
Returns a list of users in a group
'''
parameters = urllib.urlencode({'token' : self.user.token,
'f' : 'json'})
portalId = self.user.__portalId__()
response = urllib.urlopen(self.user.portalUrl + '/sharing/rest/community/groups/'+groupID+'/users?' + parameters).read()
groupUsers = json.loads(response)
return groupUsers
def getUsers(self, roles=None, daysToCheck=10000):
'''
Returns a list of all users in the organization (requires admin access).
Optionally provide a list of roles to filter the results (e.g. ['org_publisher']).
Optionally provide a number to include only accounts created in the last x number of days.
'''
#if not roles:
# roles = ['org_admin', 'org_publisher', 'org_user']
#roles = ['org_admin', 'org_publisher', 'org_author', 'org_viewer'] # new roles to support Dec 2013 update
#the role property of a user is either one of the standard roles or a custom role ID. Loop through and build a list of ids from the queried roles.
if roles:
standardRoles = ['org_admin', 'org_publisher', 'org_author', 'org_viewer']
queryRoleIDs=[]
#if it's a standard role, go ahead and add it.
for roleName in roles:
if roleName in standardRoles:
queryRoleIDs.append(roleName)
#if it's not a standard role, we'll have to look it to return the ID.
allRoles = self.getRoles()
for role in allRoles:
for roleName in roles:
if roleName == role["name"]:
queryRoleIDs.append(role["id"])
allUsers = []
users = self.__users__()
for user in users['users']:
if roles:
if not user['role'] in queryRoleIDs:
continue
if date.fromtimestamp(float(user['created'])/1000) > date.today()-timedelta(days=daysToCheck):
allUsers.append(user)
while users['nextStart'] > 0:
users = self.__users__(users['nextStart'])
for user in users['users']:
if roles:
if not user['role'] in queryRoleIDs:
continue
if date.fromtimestamp(float(user['created'])/1000) > date.today()-timedelta(days=daysToCheck):
allUsers.append(user)
return allUsers
def createGroup(self,title,snippet=None,description=None,tags=None,access="org",isViewOnly=False,viewOnly=False,inviteOnly=True,thumbnail=None):
'''
Creates a new group
'''
portalId = self.user.__portalId__()
uri = self.user.portalUrl + '/sharing/rest/community/createGroup'
parameters ={'token' : self.user.token,
'f' : 'json',
'title' : title,
'description':description,
'snippet':snippet,
'tags':tags,
'access':access,
'isInvitationOnly':inviteOnly,
'isViewOnly':viewOnly,
'thumbnail':thumbnail}
parameters = urllib.urlencode(parameters)
req = urllib2.Request(uri,parameters)
response = urllib2.urlopen(req)
result = response.read()
return json.loads(result)
def createUser(self,username,password,firstName,lastName,email,description,role,provider):
'''
Creates a new user WITHOUT sending an invitation
'''
invitations = [{"username":str(username),
"password":str(password),
"firstname":str(firstName),
"lastname":str(lastName),
"fullname":str(firstName) + " " + str(lastName),
"email":str(email),
"role":str(role)}]
parameters ={'token' : self.user.token,
'f' : 'json',
'subject':'Welcome to the portal',
'html':"blah",
'invitationList':{'invitations':invitations}}
parameters = urllib.urlencode(parameters)
portalId = self.user.__portalId__()
uri = self.user.portalUrl + '/sharing/rest/portals/' + portalId + '/invite'
req = urllib2.Request(uri,parameters)
response = urllib2.urlopen(req)
result = response.read()
return json.loads(result)
def addUsersToGroups(self, users, groups):
'''
REQUIRES ADMIN ACCESS
Add organization users to multiple groups and return a list of the status
'''
# Provide one or more usernames in a list.
# e.g. ['user_1', 'user_2']
# Provide one or more group IDs in a list.
# e.g. ['d93aabd856f8459a8905a5bd434d4d4a', 'f84c841a3dfc4591b1ff83281ea5025f']
toolSummary = []
# Assign users to the specified group(s).
parameters = urllib.urlencode({'token': self.user.token, 'f': 'json'})
for group in groups:
# Add Users - REQUIRES POST method (undocumented operation as of 2013-11-12).
response = urllib.urlopen(self.user.portalUrl + '/sharing/rest/community/groups/' + group + '/addUsers?', 'users=' + ','.join(users) + "&" + parameters).read()
# Users not added will be reported back with each group.
toolSummary.append({group: json.loads(response)})
return toolSummary
def reassignAllUser1ItemsToUser2(self, userFrom, userTo):
'''
REQUIRES ADMIN ACCESS
Transfers ownership of all items in userFrom/User1's account to userTo/User2's account, keeping same folder names.
- Does not check for existing folders in userTo's account.
- Does not delete content from userFrom's account.
'''
# request user content for userFrom
# response contains list of items in root folder and list of all folders
parameters = urllib.urlencode({'token': self.user.token, 'f': 'json'})
request = self.user.portalUrl + '/sharing/rest/content/users/' + userFrom + '?' + parameters
userContent = json.loads(urllib.urlopen(request).read())
# create same folders in userTo's account like those in userFrom's account
for folder in userContent['folders']:
parameters2 = urllib.urlencode({'title' : folder['title'], 'token': self.user.token, 'f': 'json'})
request2 = self.user.portalUrl + '/sharing/rest/content/users/' + userTo + '/createFolder?'
response2 = urllib.urlopen(request2, parameters2).read() # requires POST
# keep track of items and folders
numberOfItems = 0
numberOfFolders = 1
# change ownership of items in ROOT folder
for item in userContent['items']:
parameters3 = urllib.urlencode({'targetUsername' : userTo, 'targetFoldername' : '/', 'token': self.user.token, 'f': 'json'})
request3 = self.user.portalUrl + '/sharing/rest/content/users/' + userFrom + '/items/' + item['id'] + '/reassign?'
response3 = urllib.urlopen(request3, parameters3).read() # requires POST
if 'success' in response3:
numberOfItems += 1
### change ownership of items in SUBFOLDERS (nested loop)
# request content in current folder
for folder in userContent['folders']:
parameters4 = urllib.urlencode({'token': self.user.token, 'f': 'json'})
request4 = self.user.portalUrl + '/sharing/rest/content/users/' + userFrom + '/' + folder['id'] + '?' + parameters4
folderContent = json.loads(urllib.urlopen(request4).read())
numberOfFolders += 1
# change ownership of items in CURRENT folder to userTo and put in correct folder
for item in folderContent['items']:
parameters5 = urllib.urlencode({'targetUsername' : userTo, 'targetFoldername' : folder['title'], 'token': self.user.token, 'f': 'pjson'})
request5 = self.user.portalUrl + '/sharing/rest/content/users/' + userFrom + '/' + folder['id'] + '/items/' + item['id'] + '/reassign?'
response5 = urllib.urlopen(request5, parameters5).read() # requires POST
numberOfItems += 1
# summarize results
print ' ' + str(numberOfItems) + ' ITEMS in ' + str(numberOfFolders) + ' FOLDERS (incl. Home folder) copied'
print ' from USER ' + userFrom + ' to USER ' + userTo
return
def reassignGroupOwnership(self,groupId,userTo):
parameters ={'token' : self.user.token,
'f' : 'json',
'targetUsername':userTo}
parameters = urllib.urlencode(parameters)
portalId = self.user.__portalId__()
uri = self.user.portalUrl + '/sharing/rest/community/groups/'+groupId+'/reassign'
req = urllib2.Request(uri,parameters)
response = urllib2.urlopen(req)
result = response.read()
return json.loads(result)
def reassignAllGroupOwnership(self, userFrom, userTo):
'''
REQUIRES ADMIN ACCESS
Reassigns ownership of all groups between a pair of accounts.
'''
groups = 0
groupsReassigned = 0
# Get list of userFrom's groups
print 'Requesting ' + userFrom + "'s group info from ArcGIS Online...",
parameters = urllib.urlencode({'token': self.user.token, 'f': 'pjson'})
request = self.user.portalUrl + '/sharing/rest/community/users/' + userFrom + '?' + parameters
response = urllib.urlopen(request).read()
userFromContent = json.loads(response)
print 'RECEIVED!'
# Determine if userFrom is group owner and, if so, transfer ownership to userTo
print 'Checking groups...',
for group in userFromContent['groups']:
print '.',
groups += 1
if group['owner'] == userFrom:
parameters = urllib.urlencode({'targetUsername' : userTo, 'token': self.user.token, 'f': 'pjson'})
request = self.user.portalUrl + '/sharing/rest/community/groups/' + group['id'] + '/reassign?'
response = urllib.urlopen(request, parameters).read() # requires POST
if 'success' in response:
groupsReassigned += 1
# Report results
print
print ' CHECKED ' + str(groups) + ' groups ASSOCIATED with ' + userFrom + '.'
print ' REASSIGNED ' + str(groupsReassigned) + ' groups OWNED by ' + userFrom + ' to ' + userTo + '.'
return
def addUser2ToAllUser1Groups(self, userFrom, userTo):
'''
REQUIRES ADMIN ACCESS
Adds userTo/User2 to all groups that userFrom/User1 is a member
'''
groups = 0
groupsOwned = 0
groupsAdded = 0
# Get list of userFrom's groups
parameters = urllib.urlencode({'token': self.user.token, 'f': 'pjson'})
request = self.user.portalUrl + '/sharing/rest/community/users/' + userFrom + '?' + parameters
response = urllib.urlopen(request).read()
userFromContent = json.loads(response)
# Add userTo to each group that userFrom's is a member, but not an owner
for group in userFromContent['groups']:
groups += 1
if group['owner'] == userFrom:
groupsOwned += 1
else:
parameters = urllib.urlencode({'users' : userTo, 'token': self.user.token, 'f': 'pjson'})
request = self.user.portalUrl + '/sharing/rest/community/groups/' + group['id'] + '/addUsers?'
response = urllib.urlopen(request, parameters).read() # requires POST
if '[]' in response: # This currently undocumented operation does not correctly return "success"
groupsAdded += 1
print ' CHECKED ' + str(groups) + ' groups associated with ' + userFrom + ':'
print ' ' + userFrom + ' OWNS ' + str(groupsOwned) + ' groups (' + userTo + ' NOT added).'
print ' ' + userTo + ' is already a MEMBER of ' + str(groups-groupsOwned-groupsAdded) + ' groups.'
print ' ' + userTo + ' was ADDED to ' + str(groupsAdded) + ' groups.'
return
def migrateAccount(self, userFrom, userTo):
'''
REQUIRES ADMIN ACCESS
Reassigns ownership of all content items and groups from userFrom to userTo.
Also adds userTo to all groups which userFrom is a member.
'''
print 'Copying all items from ' + userFrom + ' to ' + userTo + '...'
self.reassignAllUser1ItemsToUser2(self, userFrom, userTo)
print
print 'Reassigning groups owned by ' + userFrom + ' to ' + userTo + '...'
self.reassignAllGroupOwnership(self, userFrom, userTo)
print
print 'Adding ' + userTo + ' as a member of ' + userFrom + "'s groups..."
self.addUser2ToAllUser1Groups(self, userFrom, userTo)
return
def migrateAccounts(self, pathUserMappingCSV):
'''
REQUIRES ADMIN ACCESS
Reassigns ownership of all content items and groups between pairs of accounts specified in a CSV file.
Also adds userTo to all groups which userFrom is a member.
This function batches migrateAccount using a CSV to feed in the accounts to migrate from/to,
the CSV should have two columns (no column headers/labels): col1=userFrom, col2=userTo)
'''
with open(pathUserMappingCSV, 'rb') as userMappingCSV:
userMapping = csv.reader(userMappingCSV)
for user in userMapping:
userFrom = user[0]
userTo = user[1]
print '=========='
print 'Copying all items from ' + userFrom + ' to ' + userTo + '...'
self.reassignAllUser1ItemsToUser2(self, userFrom, userTo)
print
print 'Reassigning groups owned by ' + userFrom + ' to ' + userTo + '...'
self.reassignAllGroupOwnership(self, userFrom, userTo)
print
print 'Adding ' + userTo + ' as a member of ' + userFrom + "'s groups..."
self.addUser2ToAllUser1Groups(self, userFrom, userTo)
print '=========='
return
def updateServiceItemsThumbnail(self, folder=None):
'''
Fetches catalog of items in portal. If there is no thumbnail, assigns the default.
'''
if(folder!=None):
catalog = self.AGOLUserCatalog(folder,False)
else:
catalog=self.AGOLCatalog(None)
for r in catalog:
if(r.thumbnail==None):
parameters = urllib.urlencode({'thumbnailURL' : 'http://static.arcgis.com/images/desktopapp.png', 'token' : self.user.token, 'f' : 'json'})
requestToUpdate = self.user.portalUrl + '/sharing/rest/content/users/' + self.user.username + '/items/' +r.id + '/update'
try:
print ("updating " + r.title + " with thumbnail.")
response = urllib.urlopen(requestToUpdate, parameters ).read()
jresult = json.loads(response)
except:
e=1
return None
def registerItems (self, mapservices, folder=''):
'''
Given a set of AGOL items, register them to the portal,
optionally to a specific folder.
'''
self.servicesToRegister=mapservices
if folder==None:
folder=''
icount=0
i=0
for ms in self.servicesToRegister.service_list:
i = i +1
sURL=ms.url
sTitle=ms.title
if ms.thumbnail==None:
sThumbnail ='http://static.arcgis.com/images/desktopapp.png'
elif ms.id !=None:
sThumbnail ="http://www.arcgis.com/sharing/content/items/" + ms.id + "/info/" + ms.thumbnail
else:
sThumbnail='http://static.arcgis.com/images/desktopapp.png'
#todo, handle map service exports
sTags = 'mapping' if ms.tags==None else ms.tags
sType= 'Map Service' if ms.type==None else ms.type
sDescription = '' if ms.description==None else ms.description
sSnippet = '' if ms.snippet ==None else ms.snippet
sExtent = '' if ms.extent==None else ms.extent
sSpatialReference='' if ms.spatialReference==None else ms.spatialReference
sAccessInfo='' if ms.accessInformation==None else ms.accessInformation
sLicenseInfo='' if ms.licenseInfo==None else ms.licenseInfo
sCulture='' if ms.culture == None else ms.culture
parameters = urllib.urlencode({'URL' : sURL,
'title' : sTitle,
'thumbnailURL' : sThumbnail,
'tags' : sTags,
'description' : sDescription,
'snippet': sSnippet,
'extent':sExtent,
'spatialReference':sSpatialReference,
'accessInformation': sAccessInfo,
'licenseInfo': sLicenseInfo,
'culture': sCulture,
'type' : sType,
'token' : self.user.token,
'f' : 'json'})
#todo- use export map on map service items for thumbnail
requestToAdd = self.user.portalUrl + '/sharing/rest/content/users/' + self.user.username + folder + '/addItem'
try:
if(sType.find('Service')>=0 or sType.find('Web Mapping Application')>=0):
response = urllib.urlopen(requestToAdd, parameters ).read()
jresult = json.loads(response)
print str(i) + ") " + ms.title + ": success= " + str(jresult["success"]) + "," + ms.url + ", " + "(" + jresult["id"] + ")"
if jresult["success"]:
icount=icount+1
except:
print str(i) + ") " + ms.title + ':error!'
print str(icount) + " item(s) added."
def getFolderID(self, folderName):
'''
Return the ID of the folder with the given name.
'''
folders = self._getUserFolders()
for f in folders:
if str(f['title']) == folderName:
return str(f['id'])
return ''
def _getUserFolders(self):
'''
Return all folder objects.
'''
requestToAdd = self.user.portalUrl + '/sharing/rest/content/users/' + self.user.username + '?f=json&token=' + self.user.token;
response = urllib.urlopen(requestToAdd).read()
jresult = json.loads(response)
return jresult["folders"]
def deleteGroup(self,groupid):
'''
Deletes group
'''
portalId = self.user.__portalId__()
uri = self.user.portalUrl + '/sharing/rest/community/groups/'+groupid+'/delete'
parameters ={'token' : self.user.token,
'f' : 'json'}
parameters = urllib.urlencode(parameters)
req = urllib2.Request(uri,parameters)
response = urllib2.urlopen(req)
result = response.read()
return json.loads(result)
def clearGroup(self, groupid):
'''
Unshare all content from the specified group.
CAUTION
'''
groupcatalog = self.AGOLGroupCatalog(groupid)
sItems=''
for f in groupcatalog:
requestToDelete = self.user.portalUrl + '/sharing/rest/content/items/' + f.id + "/unshare?groups=" + groupid
parameters = urllib.urlencode({
'token' : self.user.token,
'f' : 'json'})
print "Unsharing " + f.title
response = urllib.urlopen(requestToDelete,parameters).read()
jresult = json.loads(response)
print "Complete."
return None
def clearFolder(self, folderid):
'''
Delete all content from the specified folder.
CAUTION
'''
foldercatalog = self.AGOLUserCatalog(folderid)
sItems=''
for f in foldercatalog:
sItems+= f.id + ","
if len(sItems)>0: sItems=sItems[:-1]
requestToDelete = self.user.portalUrl + '/sharing/rest/content/users/' + self.user.username + "/deleteItems"
parameters = urllib.urlencode({'items':sItems,
'token' : self.user.token,
'f' : 'json'})
print "Deleting " + str(len(foldercatalog)) + " items..."
response = urllib.urlopen(requestToDelete,parameters).read()
jresult = json.loads(response)
print "Complete."
return None
def AGOLGroupCatalog(self, groupid):
'''
Return the catalog of items in desiginated group.
'''
sCatalogURL=self.user.portalUrl + "/sharing/rest/search?q=%20group%3A" + groupid + "%20-type:%22Code%20Attachment%22%20-type:%22Featured%20Items%22%20-type:%22Symbol%20Set%22%20-type:%22Color%20Set%22%20-type:%22Windows%20Viewer%20Add%20In%22%20-type:%22Windows%20Viewer%20Configuration%22%20%20-type:%22Code%20Attachment%22%20-type:%22Featured%20Items%22%20-type:%22Symbol%20Set%22%20-type:%22Color%20Set%22%20-type:%22Windows%20Viewer%20Add%20In%22%20-type:%22Windows%20Viewer%20Configuration%22%20&num=100&sortField=title&sortOrder=asc"
return self.AGOLCatalog(None,None,sCatalogURL)
def AGOLUserCatalog(self, folder, includeSize=False):
'''
Return the catalog of CURRENT USER's items from portal, optionally from only a folder.
'''
sCatalogURL = self.user.portalUrl + "/sharing/rest/content/users/" + self.user.username + folder
return self.AGOLCatalog(None,None,sCatalogURL)
def AGOLCatalog(self, query=None, includeSize=False, sCatalogURL=None):
'''
Return all items from all users in a portal, optionally matching a
specified query.
optionally make the additional requests for SIZE.
sCatalogURL can be specified to use a specific folder
'''
resultCount = 0
searchURL = ""
viewURL = ""
orgID = ""
self.sFullSearch = ""
self.bIncludeSize=includeSize
self.orgID = self._getOrgID()
self.catalogURL=sCatalogURL #for cataloging folders
if self.user.portalUrl != None:
self.searchURL = self.user.portalUrl + "/sharing/rest"
self.viewURL = self.user.portalUrl + "/home/item.html?id="
self.query = query
pList=[]
allResults = []
sQuery=self._getCatalogQuery(1,100)#get first batch
print("fetching records 1-100...")
response = urllib.urlopen(sQuery).read()
jresult=json.loads(response)
nextRecord = jresult['nextStart']
totalRecords = jresult['total']
num = jresult['num']
start =jresult['start']
#if this is a folder catalog, use items, not results
sItemsProperty = 'results'
if self.catalogURL!=None and str(self.catalogURL).find("/sharing/rest/content/users/")>0: sItemsProperty='items'
pList = AGOLItems( jresult[sItemsProperty])
for r in pList.AGOLItems_list:
r.itemURL = self.viewURL + r.id
r.created = time.strftime("%Y-%m-%d",time.gmtime(r.created/1000))
r.modified = time.strftime("%Y-%m-%d",time.gmtime(r.modified/1000))
if r.size== -1:
r.size=0
r.size = self._getSize(r)
r.myRowID = len(allResults) + 1;
allResults.append(r)
if (nextRecord>0):
while(nextRecord>0):
sQuery = self._getCatalogQuery(nextRecord, 100)
print("fetching records " + str(nextRecord) + "-" + str(nextRecord+100) + "...")
response = urllib.urlopen(sQuery).read()
jresult=json.loads(response)
nextRecord = jresult['nextStart']
totalRecords = jresult['total']
num = jresult['num']
start =jresult['start']
pList = AGOLItems( jresult['results'])
for r in pList.AGOLItems_list:
r.itemURL = self.viewURL + r.id
r.created = time.strftime("%Y-%m-%d",time.gmtime(r.created/1000))
r.modified = time.strftime("%Y-%m-%d",time.gmtime(r.modified/1000))
if r.size== -1:
r.size=0
r.size = self._getSize(r)
r.myRowID = len(allResults) + 1;
allResults.append(r)
return allResults
def _getSize(self, r):
'''
Issue query for item size.
'''
if(self.bIncludeSize != True):
return 0
print ("fetching size for " + r.title + " (" + r.type + ")")
result=0
sURL = self.searchURL + "/content/items/" + str(r.id) + "?f=json&token=" + self.user.token;
response = urllib.urlopen(sURL).read()
result = json.loads(response)['size']
if(result>0):
result = result/1024
else:
result=0
return result
def _getOrgID(self):
'''
Return the organization's ID.
'''
sURL = self.user.portalUrl + "/sharing/rest/portals/self?f=json&token=" + self.user.token
response = urllib.urlopen(sURL).read()
return str(json.loads(response)['id'])
def _getCatalogQuery(self, start, num):
'''
Format a content query from specified start and number of records.
'''
sQuery=None
if self.query != None:
sQuery = self.query
else:
sQuery = self.sFullSearch
if(self.catalogURL==None):
sCatalogQuery = self.searchURL + "/search?q=" + sQuery
if self.orgID != None:
sCatalogQuery += " orgid:" + self.orgID
else:
#check to ensure ? vs &
if(str(self.catalogURL).find('?')<0):
char="?"
else:
char="&"
sCatalogQuery = self.catalogURL + char + "ts=1"
sCatalogQuery += "&f=json&num="+ str(num) + "&start=" + str(start)
sCatalogQuery += "&token=" + self.user.token
return sCatalogQuery
def updateUserRoles(self, users):
self.usersToUpdate=users
requestToUpdate= self.user.portalUrl + '/sharing/rest/portals/self/updateuserrole'
for u in self.usersToUpdate.user_list:
parameters = urllib.urlencode({'user':u.Username,
'role':u.Role,
'token' : self.user.token,
'f' : 'json'})
print "Updating Role for " + u.Username + " to " + u.Role + "..."
response = urllib.urlopen(requestToUpdate,parameters).read()
jresult = json.loads(response)
success= str(jresult["success"])
print "Success: " + success
print "Complete."
return None
#collection of AGOLItem
class AGOLItems:
def __init__ (self, item_list):
self.AGOLItems_list=[]
for item in item_list:
self.AGOLItems_list.append(AGOLItem(item))
#AGOL item
class AGOLItem:
def __init__(self, item_attributes):
for k, v in item_attributes.items():
setattr(self, k, v)
#collection of Map Services
class MapServices:
def __init__ (self, import_list):
self.service_list=[]
for service in import_list:
self.service_list.append(MapService(service))
#Map Service
class MapService:
def __init__(self, service_attributes):
for k, v in service_attributes.items():
setattr(self, k, v)
#Collection of Usernames and roles
class UsersAttributes:
def __init__ (self, import_list):
self.user_list=[]
for user in import_list:
self.user_list.append(UserAttributes(user))
class UserAttributes:
def __init__(self, user_attributes):
for k, v in user_attributes.items():
setattr(self, k, v)
| 41.250636
| 547
| 0.559603
|
import urllib,urllib2
import json
import csv
import time
from datetime import date, timedelta
class Admin:
'''A class of tools for administering AGO Orgs or Portals'''
def __init__(self, username, portal=None, password=None):
from . import User
self.user = User(username, portal, password)
def __users__(self, start=0):
'''Retrieve a single page of users.'''
parameters = urllib.urlencode({'token' : self.user.token,
'f' : 'json',
'start' : start,
'num' : 100})
portalId = self.user.__portalId__()
response = urllib.urlopen(self.user.portalUrl + '/sharing/rest/portals/' + portalId + '/users?' + parameters).read()
users = json.loads(response)
return users
def __roles__(self,start=0):
parameters = urllib.urlencode({'token' : self.user.token,
'f' : 'json',
'start' : start,
'num' : 100})
portalId = self.user.__portalId__()
response = urllib.urlopen(self.user.portalUrl + '/sharing/rest/portals/' + portalId + '/roles?' + parameters).read()
roles = json.loads(response)
return roles
def __groups__(self,start=0):
parameters = urllib.urlencode({'token' : self.user.token,
'q':'orgid:'+ self._getOrgID(),
'f' : 'json',
'start' : start,
'num' : 100})
portalId = self.user.__portalId__()
response = urllib.urlopen(self.user.portalUrl + '/sharing/rest/community/groups?' + parameters).read()
groups = json.loads(response)
return groups
def getRoles(self):
'''
Returns a list of roles defined in the organization.
This is helpful for custom roles because the User's role property simply returns the ID of the role.
THIS DOES NOT INCLUDE THE STANDARD ARCGIS ONLINE ROLES OF ['org_admin', 'org_publisher', 'org_author', 'org_viewer']
'''
allRoles = []
roles = self.__roles__()
for role in roles['roles']:
allRoles.append(role)
while roles['nextStart'] > 0:
roles=self.__roles__(roles['nextStart'])
for role in roles['roles']:
allRoles.append(role)
return allRoles
def getGroups(self):
'''
Returns a list of groups defined in the organization.
'''
allGroups = []
groups = self.__groups__()
for group in groups['results']:
allGroups.append(group)
while groups['nextStart'] > 0:
for group in groups['results']:
allGroups.append(group)
return allGroups
def findGroup(self,title):
'''
Gets a group object by its title.
'''
parameters = urllib.urlencode({'token' : self.user.token,
'q':'title:'+title,
'f' : 'json'})
portalId = self.user.__portalId__()
response = urllib.urlopen(self.user.portalUrl + '/sharing/rest/community/groups?' + parameters).read()
groupUsers = json.loads(response)
if "results" in groupUsers and len(groupUsers["results"]) > 0:
return groupUsers["results"][0]
else:
return None
def getUsersInGroup(self,groupID):
'''
Returns a list of users in a group
'''
parameters = urllib.urlencode({'token' : self.user.token,
'f' : 'json'})
portalId = self.user.__portalId__()
response = urllib.urlopen(self.user.portalUrl + '/sharing/rest/community/groups/'+groupID+'/users?' + parameters).read()
groupUsers = json.loads(response)
return groupUsers
def getUsers(self, roles=None, daysToCheck=10000):
'''
Returns a list of all users in the organization (requires admin access).
Optionally provide a list of roles to filter the results (e.g. ['org_publisher']).
Optionally provide a number to include only accounts created in the last x number of days.
'''
#if not roles:
# roles = ['org_admin', 'org_publisher', 'org_user']
#roles = ['org_admin', 'org_publisher', 'org_author', 'org_viewer'] # new roles to support Dec 2013 update
#the role property of a user is either one of the standard roles or a custom role ID. Loop through and build a list of ids from the queried roles.
if roles:
standardRoles = ['org_admin', 'org_publisher', 'org_author', 'org_viewer']
queryRoleIDs=[]
#if it's a standard role, go ahead and add it.
for roleName in roles:
if roleName in standardRoles:
queryRoleIDs.append(roleName)
allRoles = self.getRoles()
for role in allRoles:
for roleName in roles:
if roleName == role["name"]:
queryRoleIDs.append(role["id"])
allUsers = []
users = self.__users__()
for user in users['users']:
if roles:
if not user['role'] in queryRoleIDs:
continue
if date.fromtimestamp(float(user['created'])/1000) > date.today()-timedelta(days=daysToCheck):
allUsers.append(user)
while users['nextStart'] > 0:
users = self.__users__(users['nextStart'])
for user in users['users']:
if roles:
if not user['role'] in queryRoleIDs:
continue
if date.fromtimestamp(float(user['created'])/1000) > date.today()-timedelta(days=daysToCheck):
allUsers.append(user)
return allUsers
def createGroup(self,title,snippet=None,description=None,tags=None,access="org",isViewOnly=False,viewOnly=False,inviteOnly=True,thumbnail=None):
'''
Creates a new group
'''
portalId = self.user.__portalId__()
uri = self.user.portalUrl + '/sharing/rest/community/createGroup'
parameters ={'token' : self.user.token,
'f' : 'json',
'title' : title,
'description':description,
'snippet':snippet,
'tags':tags,
'access':access,
'isInvitationOnly':inviteOnly,
'isViewOnly':viewOnly,
'thumbnail':thumbnail}
parameters = urllib.urlencode(parameters)
req = urllib2.Request(uri,parameters)
response = urllib2.urlopen(req)
result = response.read()
return json.loads(result)
def createUser(self,username,password,firstName,lastName,email,description,role,provider):
'''
Creates a new user WITHOUT sending an invitation
'''
invitations = [{"username":str(username),
"password":str(password),
"firstname":str(firstName),
"lastname":str(lastName),
"fullname":str(firstName) + " " + str(lastName),
"email":str(email),
"role":str(role)}]
parameters ={'token' : self.user.token,
'f' : 'json',
'subject':'Welcome to the portal',
'html':"blah",
'invitationList':{'invitations':invitations}}
parameters = urllib.urlencode(parameters)
portalId = self.user.__portalId__()
uri = self.user.portalUrl + '/sharing/rest/portals/' + portalId + '/invite'
req = urllib2.Request(uri,parameters)
response = urllib2.urlopen(req)
result = response.read()
return json.loads(result)
def addUsersToGroups(self, users, groups):
'''
REQUIRES ADMIN ACCESS
Add organization users to multiple groups and return a list of the status
'''
toolSummary = []
parameters = urllib.urlencode({'token': self.user.token, 'f': 'json'})
for group in groups:
response = urllib.urlopen(self.user.portalUrl + '/sharing/rest/community/groups/' + group + '/addUsers?', 'users=' + ','.join(users) + "&" + parameters).read()
toolSummary.append({group: json.loads(response)})
return toolSummary
def reassignAllUser1ItemsToUser2(self, userFrom, userTo):
'''
REQUIRES ADMIN ACCESS
Transfers ownership of all items in userFrom/User1's account to userTo/User2's account, keeping same folder names.
- Does not check for existing folders in userTo's account.
- Does not delete content from userFrom's account.
'''
parameters = urllib.urlencode({'token': self.user.token, 'f': 'json'})
request = self.user.portalUrl + '/sharing/rest/content/users/' + userFrom + '?' + parameters
userContent = json.loads(urllib.urlopen(request).read())
for folder in userContent['folders']:
parameters2 = urllib.urlencode({'title' : folder['title'], 'token': self.user.token, 'f': 'json'})
request2 = self.user.portalUrl + '/sharing/rest/content/users/' + userTo + '/createFolder?'
response2 = urllib.urlopen(request2, parameters2).read()
numberOfItems = 0
numberOfFolders = 1
for item in userContent['items']:
parameters3 = urllib.urlencode({'targetUsername' : userTo, 'targetFoldername' : '/', 'token': self.user.token, 'f': 'json'})
request3 = self.user.portalUrl + '/sharing/rest/content/users/' + userFrom + '/items/' + item['id'] + '/reassign?'
response3 = urllib.urlopen(request3, parameters3).read()
if 'success' in response3:
numberOfItems += 1
lf.user.token, 'f': 'json'})
request4 = self.user.portalUrl + '/sharing/rest/content/users/' + userFrom + '/' + folder['id'] + '?' + parameters4
folderContent = json.loads(urllib.urlopen(request4).read())
numberOfFolders += 1
for item in folderContent['items']:
parameters5 = urllib.urlencode({'targetUsername' : userTo, 'targetFoldername' : folder['title'], 'token': self.user.token, 'f': 'pjson'})
request5 = self.user.portalUrl + '/sharing/rest/content/users/' + userFrom + '/' + folder['id'] + '/items/' + item['id'] + '/reassign?'
response5 = urllib.urlopen(request5, parameters5).read()
numberOfItems += 1
print ' ' + str(numberOfItems) + ' ITEMS in ' + str(numberOfFolders) + ' FOLDERS (incl. Home folder) copied'
print ' from USER ' + userFrom + ' to USER ' + userTo
return
def reassignGroupOwnership(self,groupId,userTo):
parameters ={'token' : self.user.token,
'f' : 'json',
'targetUsername':userTo}
parameters = urllib.urlencode(parameters)
portalId = self.user.__portalId__()
uri = self.user.portalUrl + '/sharing/rest/community/groups/'+groupId+'/reassign'
req = urllib2.Request(uri,parameters)
response = urllib2.urlopen(req)
result = response.read()
return json.loads(result)
def reassignAllGroupOwnership(self, userFrom, userTo):
'''
REQUIRES ADMIN ACCESS
Reassigns ownership of all groups between a pair of accounts.
'''
groups = 0
groupsReassigned = 0
print 'Requesting ' + userFrom + "'s group info from ArcGIS Online...",
parameters = urllib.urlencode({'token': self.user.token, 'f': 'pjson'})
request = self.user.portalUrl + '/sharing/rest/community/users/' + userFrom + '?' + parameters
response = urllib.urlopen(request).read()
userFromContent = json.loads(response)
print 'RECEIVED!'
print 'Checking groups...',
for group in userFromContent['groups']:
print '.',
groups += 1
if group['owner'] == userFrom:
parameters = urllib.urlencode({'targetUsername' : userTo, 'token': self.user.token, 'f': 'pjson'})
request = self.user.portalUrl + '/sharing/rest/community/groups/' + group['id'] + '/reassign?'
response = urllib.urlopen(request, parameters).read()
if 'success' in response:
groupsReassigned += 1
print
print ' CHECKED ' + str(groups) + ' groups ASSOCIATED with ' + userFrom + '.'
print ' REASSIGNED ' + str(groupsReassigned) + ' groups OWNED by ' + userFrom + ' to ' + userTo + '.'
return
def addUser2ToAllUser1Groups(self, userFrom, userTo):
'''
REQUIRES ADMIN ACCESS
Adds userTo/User2 to all groups that userFrom/User1 is a member
'''
groups = 0
groupsOwned = 0
groupsAdded = 0
parameters = urllib.urlencode({'token': self.user.token, 'f': 'pjson'})
request = self.user.portalUrl + '/sharing/rest/community/users/' + userFrom + '?' + parameters
response = urllib.urlopen(request).read()
userFromContent = json.loads(response)
# Add userTo to each group that userFrom's is a member, but not an owner
for group in userFromContent['groups']:
groups += 1
if group['owner'] == userFrom:
groupsOwned += 1
else:
parameters = urllib.urlencode({'users' : userTo, 'token': self.user.token, 'f': 'pjson'})
request = self.user.portalUrl + '/sharing/rest/community/groups/' + group['id'] + '/addUsers?'
response = urllib.urlopen(request, parameters).read()
if '[]' in response:
groupsAdded += 1
print ' CHECKED ' + str(groups) + ' groups associated with ' + userFrom + ':'
print ' ' + userFrom + ' OWNS ' + str(groupsOwned) + ' groups (' + userTo + ' NOT added).'
print ' ' + userTo + ' is already a MEMBER of ' + str(groups-groupsOwned-groupsAdded) + ' groups.'
print ' ' + userTo + ' was ADDED to ' + str(groupsAdded) + ' groups.'
return
def migrateAccount(self, userFrom, userTo):
'''
REQUIRES ADMIN ACCESS
Reassigns ownership of all content items and groups from userFrom to userTo.
Also adds userTo to all groups which userFrom is a member.
'''
print 'Copying all items from ' + userFrom + ' to ' + userTo + '...'
self.reassignAllUser1ItemsToUser2(self, userFrom, userTo)
print
print 'Reassigning groups owned by ' + userFrom + ' to ' + userTo + '...'
self.reassignAllGroupOwnership(self, userFrom, userTo)
print
print 'Adding ' + userTo + ' as a member of ' + userFrom + "'s groups..."
self.addUser2ToAllUser1Groups(self, userFrom, userTo)
return
def migrateAccounts(self, pathUserMappingCSV):
'''
REQUIRES ADMIN ACCESS
Reassigns ownership of all content items and groups between pairs of accounts specified in a CSV file.
Also adds userTo to all groups which userFrom is a member.
This function batches migrateAccount using a CSV to feed in the accounts to migrate from/to,
the CSV should have two columns (no column headers/labels): col1=userFrom, col2=userTo)
'''
with open(pathUserMappingCSV, 'rb') as userMappingCSV:
userMapping = csv.reader(userMappingCSV)
for user in userMapping:
userFrom = user[0]
userTo = user[1]
print '=========='
print 'Copying all items from ' + userFrom + ' to ' + userTo + '...'
self.reassignAllUser1ItemsToUser2(self, userFrom, userTo)
print
print 'Reassigning groups owned by ' + userFrom + ' to ' + userTo + '...'
self.reassignAllGroupOwnership(self, userFrom, userTo)
print
print 'Adding ' + userTo + ' as a member of ' + userFrom + "'s groups..."
self.addUser2ToAllUser1Groups(self, userFrom, userTo)
print '=========='
return
def updateServiceItemsThumbnail(self, folder=None):
'''
Fetches catalog of items in portal. If there is no thumbnail, assigns the default.
'''
if(folder!=None):
catalog = self.AGOLUserCatalog(folder,False)
else:
catalog=self.AGOLCatalog(None)
for r in catalog:
if(r.thumbnail==None):
parameters = urllib.urlencode({'thumbnailURL' : 'http://static.arcgis.com/images/desktopapp.png', 'token' : self.user.token, 'f' : 'json'})
requestToUpdate = self.user.portalUrl + '/sharing/rest/content/users/' + self.user.username + '/items/' +r.id + '/update'
try:
print ("updating " + r.title + " with thumbnail.")
response = urllib.urlopen(requestToUpdate, parameters ).read()
jresult = json.loads(response)
except:
e=1
return None
def registerItems (self, mapservices, folder=''):
'''
Given a set of AGOL items, register them to the portal,
optionally to a specific folder.
'''
self.servicesToRegister=mapservices
if folder==None:
folder=''
icount=0
i=0
for ms in self.servicesToRegister.service_list:
i = i +1
sURL=ms.url
sTitle=ms.title
if ms.thumbnail==None:
sThumbnail ='http://static.arcgis.com/images/desktopapp.png'
elif ms.id !=None:
sThumbnail ="http://www.arcgis.com/sharing/content/items/" + ms.id + "/info/" + ms.thumbnail
else:
sThumbnail='http://static.arcgis.com/images/desktopapp.png'
sTags = 'mapping' if ms.tags==None else ms.tags
sType= 'Map Service' if ms.type==None else ms.type
sDescription = '' if ms.description==None else ms.description
sSnippet = '' if ms.snippet ==None else ms.snippet
sExtent = '' if ms.extent==None else ms.extent
sSpatialReference='' if ms.spatialReference==None else ms.spatialReference
sAccessInfo='' if ms.accessInformation==None else ms.accessInformation
sLicenseInfo='' if ms.licenseInfo==None else ms.licenseInfo
sCulture='' if ms.culture == None else ms.culture
parameters = urllib.urlencode({'URL' : sURL,
'title' : sTitle,
'thumbnailURL' : sThumbnail,
'tags' : sTags,
'description' : sDescription,
'snippet': sSnippet,
'extent':sExtent,
'spatialReference':sSpatialReference,
'accessInformation': sAccessInfo,
'licenseInfo': sLicenseInfo,
'culture': sCulture,
'type' : sType,
'token' : self.user.token,
'f' : 'json'})
requestToAdd = self.user.portalUrl + '/sharing/rest/content/users/' + self.user.username + folder + '/addItem'
try:
if(sType.find('Service')>=0 or sType.find('Web Mapping Application')>=0):
response = urllib.urlopen(requestToAdd, parameters ).read()
jresult = json.loads(response)
print str(i) + ") " + ms.title + ": success= " + str(jresult["success"]) + "," + ms.url + ", " + "(" + jresult["id"] + ")"
if jresult["success"]:
icount=icount+1
except:
print str(i) + ") " + ms.title + ':error!'
print str(icount) + " item(s) added."
def getFolderID(self, folderName):
'''
Return the ID of the folder with the given name.
'''
folders = self._getUserFolders()
for f in folders:
if str(f['title']) == folderName:
return str(f['id'])
return ''
def _getUserFolders(self):
'''
Return all folder objects.
'''
requestToAdd = self.user.portalUrl + '/sharing/rest/content/users/' + self.user.username + '?f=json&token=' + self.user.token;
response = urllib.urlopen(requestToAdd).read()
jresult = json.loads(response)
return jresult["folders"]
def deleteGroup(self,groupid):
'''
Deletes group
'''
portalId = self.user.__portalId__()
uri = self.user.portalUrl + '/sharing/rest/community/groups/'+groupid+'/delete'
parameters ={'token' : self.user.token,
'f' : 'json'}
parameters = urllib.urlencode(parameters)
req = urllib2.Request(uri,parameters)
response = urllib2.urlopen(req)
result = response.read()
return json.loads(result)
def clearGroup(self, groupid):
'''
Unshare all content from the specified group.
CAUTION
'''
groupcatalog = self.AGOLGroupCatalog(groupid)
sItems=''
for f in groupcatalog:
requestToDelete = self.user.portalUrl + '/sharing/rest/content/items/' + f.id + "/unshare?groups=" + groupid
parameters = urllib.urlencode({
'token' : self.user.token,
'f' : 'json'})
print "Unsharing " + f.title
response = urllib.urlopen(requestToDelete,parameters).read()
jresult = json.loads(response)
print "Complete."
return None
def clearFolder(self, folderid):
'''
Delete all content from the specified folder.
CAUTION
'''
foldercatalog = self.AGOLUserCatalog(folderid)
sItems=''
for f in foldercatalog:
sItems+= f.id + ","
if len(sItems)>0: sItems=sItems[:-1]
requestToDelete = self.user.portalUrl + '/sharing/rest/content/users/' + self.user.username + "/deleteItems"
parameters = urllib.urlencode({'items':sItems,
'token' : self.user.token,
'f' : 'json'})
print "Deleting " + str(len(foldercatalog)) + " items..."
response = urllib.urlopen(requestToDelete,parameters).read()
jresult = json.loads(response)
print "Complete."
return None
def AGOLGroupCatalog(self, groupid):
'''
Return the catalog of items in desiginated group.
'''
sCatalogURL=self.user.portalUrl + "/sharing/rest/search?q=%20group%3A" + groupid + "%20-type:%22Code%20Attachment%22%20-type:%22Featured%20Items%22%20-type:%22Symbol%20Set%22%20-type:%22Color%20Set%22%20-type:%22Windows%20Viewer%20Add%20In%22%20-type:%22Windows%20Viewer%20Configuration%22%20%20-type:%22Code%20Attachment%22%20-type:%22Featured%20Items%22%20-type:%22Symbol%20Set%22%20-type:%22Color%20Set%22%20-type:%22Windows%20Viewer%20Add%20In%22%20-type:%22Windows%20Viewer%20Configuration%22%20&num=100&sortField=title&sortOrder=asc"
return self.AGOLCatalog(None,None,sCatalogURL)
def AGOLUserCatalog(self, folder, includeSize=False):
'''
Return the catalog of CURRENT USER's items from portal, optionally from only a folder.
'''
sCatalogURL = self.user.portalUrl + "/sharing/rest/content/users/" + self.user.username + folder
return self.AGOLCatalog(None,None,sCatalogURL)
def AGOLCatalog(self, query=None, includeSize=False, sCatalogURL=None):
'''
Return all items from all users in a portal, optionally matching a
specified query.
optionally make the additional requests for SIZE.
sCatalogURL can be specified to use a specific folder
'''
resultCount = 0
searchURL = ""
viewURL = ""
orgID = ""
self.sFullSearch = ""
self.bIncludeSize=includeSize
self.orgID = self._getOrgID()
self.catalogURL=sCatalogURL #for cataloging folders
if self.user.portalUrl != None:
self.searchURL = self.user.portalUrl + "/sharing/rest"
self.viewURL = self.user.portalUrl + "/home/item.html?id="
self.query = query
pList=[]
allResults = []
sQuery=self._getCatalogQuery(1,100)#get first batch
print("fetching records 1-100...")
response = urllib.urlopen(sQuery).read()
jresult=json.loads(response)
nextRecord = jresult['nextStart']
totalRecords = jresult['total']
num = jresult['num']
start =jresult['start']
#if this is a folder catalog, use items, not results
sItemsProperty = 'results'
if self.catalogURL!=None and str(self.catalogURL).find("/sharing/rest/content/users/")>0: sItemsProperty='items'
pList = AGOLItems( jresult[sItemsProperty])
for r in pList.AGOLItems_list:
r.itemURL = self.viewURL + r.id
r.created = time.strftime("%Y-%m-%d",time.gmtime(r.created/1000))
r.modified = time.strftime("%Y-%m-%d",time.gmtime(r.modified/1000))
if r.size== -1:
r.size=0
r.size = self._getSize(r)
r.myRowID = len(allResults) + 1;
allResults.append(r)
if (nextRecord>0):
while(nextRecord>0):
sQuery = self._getCatalogQuery(nextRecord, 100)
print("fetching records " + str(nextRecord) + "-" + str(nextRecord+100) + "...")
response = urllib.urlopen(sQuery).read()
jresult=json.loads(response)
nextRecord = jresult['nextStart']
totalRecords = jresult['total']
num = jresult['num']
start =jresult['start']
pList = AGOLItems( jresult['results'])
for r in pList.AGOLItems_list:
r.itemURL = self.viewURL + r.id
r.created = time.strftime("%Y-%m-%d",time.gmtime(r.created/1000))
r.modified = time.strftime("%Y-%m-%d",time.gmtime(r.modified/1000))
if r.size== -1:
r.size=0
r.size = self._getSize(r)
r.myRowID = len(allResults) + 1;
allResults.append(r)
return allResults
def _getSize(self, r):
'''
Issue query for item size.
'''
if(self.bIncludeSize != True):
return 0
print ("fetching size for " + r.title + " (" + r.type + ")")
result=0
sURL = self.searchURL + "/content/items/" + str(r.id) + "?f=json&token=" + self.user.token;
response = urllib.urlopen(sURL).read()
result = json.loads(response)['size']
if(result>0):
result = result/1024
else:
result=0
return result
def _getOrgID(self):
'''
Return the organization's ID.
'''
sURL = self.user.portalUrl + "/sharing/rest/portals/self?f=json&token=" + self.user.token
response = urllib.urlopen(sURL).read()
return str(json.loads(response)['id'])
def _getCatalogQuery(self, start, num):
'''
Format a content query from specified start and number of records.
'''
sQuery=None
if self.query != None:
sQuery = self.query
else:
sQuery = self.sFullSearch
if(self.catalogURL==None):
sCatalogQuery = self.searchURL + "/search?q=" + sQuery
if self.orgID != None:
sCatalogQuery += " orgid:" + self.orgID
else:
if(str(self.catalogURL).find('?')<0):
char="?"
else:
char="&"
sCatalogQuery = self.catalogURL + char + "ts=1"
sCatalogQuery += "&f=json&num="+ str(num) + "&start=" + str(start)
sCatalogQuery += "&token=" + self.user.token
return sCatalogQuery
def updateUserRoles(self, users):
self.usersToUpdate=users
requestToUpdate= self.user.portalUrl + '/sharing/rest/portals/self/updateuserrole'
for u in self.usersToUpdate.user_list:
parameters = urllib.urlencode({'user':u.Username,
'role':u.Role,
'token' : self.user.token,
'f' : 'json'})
print "Updating Role for " + u.Username + " to " + u.Role + "..."
response = urllib.urlopen(requestToUpdate,parameters).read()
jresult = json.loads(response)
success= str(jresult["success"])
print "Success: " + success
print "Complete."
return None
class AGOLItems:
def __init__ (self, item_list):
self.AGOLItems_list=[]
for item in item_list:
self.AGOLItems_list.append(AGOLItem(item))
class AGOLItem:
def __init__(self, item_attributes):
for k, v in item_attributes.items():
setattr(self, k, v)
class MapServices:
def __init__ (self, import_list):
self.service_list=[]
for service in import_list:
self.service_list.append(MapService(service))
class MapService:
def __init__(self, service_attributes):
for k, v in service_attributes.items():
setattr(self, k, v)
class UsersAttributes:
def __init__ (self, import_list):
self.user_list=[]
for user in import_list:
self.user_list.append(UserAttributes(user))
class UserAttributes:
def __init__(self, user_attributes):
for k, v in user_attributes.items():
setattr(self, k, v)
| false
| true
|
79048c941ac0e16854fa97bd02fbab176bf74c74
| 171
|
py
|
Python
|
apps/course/apps.py
|
wyftddev/MXOline
|
b0353d57fd91851088486e7caf18d9db706c113c
|
[
"Apache-2.0"
] | null | null | null |
apps/course/apps.py
|
wyftddev/MXOline
|
b0353d57fd91851088486e7caf18d9db706c113c
|
[
"Apache-2.0"
] | null | null | null |
apps/course/apps.py
|
wyftddev/MXOline
|
b0353d57fd91851088486e7caf18d9db706c113c
|
[
"Apache-2.0"
] | null | null | null |
#encoding=utf-8
from __future__ import unicode_literals
from django.apps import AppConfig
class CourseConfig(AppConfig):
name = 'course'
verbose_name = u"课程管理"
| 17.1
| 39
| 0.760234
|
from __future__ import unicode_literals
from django.apps import AppConfig
class CourseConfig(AppConfig):
name = 'course'
verbose_name = u"课程管理"
| true
| true
|
79048cff42ce750f3a33344f76f2a01c5367ca07
| 485
|
py
|
Python
|
wargame/designpatterns/pythonic_orcfighter.py
|
jeantardelli/wargameRepo
|
1e11ae40281f7eafa65ea6e40e045304b20e3824
|
[
"MIT"
] | 1
|
2020-12-01T20:30:27.000Z
|
2020-12-01T20:30:27.000Z
|
wargame/designpatterns/pythonic_orcfighter.py
|
jeantardelli/wargameRepo
|
1e11ae40281f7eafa65ea6e40e045304b20e3824
|
[
"MIT"
] | null | null | null |
wargame/designpatterns/pythonic_orcfighter.py
|
jeantardelli/wargameRepo
|
1e11ae40281f7eafa65ea6e40e045304b20e3824
|
[
"MIT"
] | null | null | null |
"""pythonic_orcfighter
This is one of the different GameUnits that are used in the desing patterns examples.
:copyright: 2020, Jean Tardelli
:license: The MIT license (MIT). See LICENSE file for further details.
"""
from pythonic_abstractgameunit import AbstractGameUnit
class OrcFighter(AbstractGameUnit):
"""Create a OrcFighter instance"""
def info(self):
"""Print info about this unit, overrides superclass method."""
print("Grrr, I am the Orc Figher!")
| 30.3125
| 85
| 0.736082
|
from pythonic_abstractgameunit import AbstractGameUnit
class OrcFighter(AbstractGameUnit):
def info(self):
print("Grrr, I am the Orc Figher!")
| true
| true
|
79048ec7a7c5aab851a0a8ff50a5e9a9d1fabda0
| 10,620
|
py
|
Python
|
python_modules/dagster/dagster_tests/daemon_tests/test_queued_run_coordinator_daemon.py
|
keypointt/dagster
|
45683a29cbe2429d4e538254fac9498198f53879
|
[
"Apache-2.0"
] | null | null | null |
python_modules/dagster/dagster_tests/daemon_tests/test_queued_run_coordinator_daemon.py
|
keypointt/dagster
|
45683a29cbe2429d4e538254fac9498198f53879
|
[
"Apache-2.0"
] | null | null | null |
python_modules/dagster/dagster_tests/daemon_tests/test_queued_run_coordinator_daemon.py
|
keypointt/dagster
|
45683a29cbe2429d4e538254fac9498198f53879
|
[
"Apache-2.0"
] | null | null | null |
# pylint: disable=redefined-outer-name
import pytest
from dagster.core.code_pointer import ModuleCodePointer
from dagster.core.definitions.reconstructable import ReconstructableRepository
from dagster.core.host_representation.grpc_server_registry import ProcessGrpcServerRegistry
from dagster.core.host_representation.handle import GrpcServerRepositoryLocationHandle
from dagster.core.host_representation.origin import (
ExternalPipelineOrigin,
ExternalRepositoryOrigin,
InProcessRepositoryLocationOrigin,
)
from dagster.core.storage.pipeline_run import IN_PROGRESS_RUN_STATUSES, PipelineRunStatus
from dagster.core.storage.tags import PRIORITY_TAG
from dagster.core.test_utils import create_run_for_test, instance_for_test
from dagster.daemon.run_coordinator.queued_run_coordinator_daemon import QueuedRunCoordinatorDaemon
from dagster_tests.api_tests.utils import get_foo_pipeline_handle
@pytest.fixture()
def instance():
overrides = {
"run_launcher": {"module": "dagster.core.test_utils", "class": "MockedRunLauncher"},
}
with instance_for_test(overrides=overrides) as inst:
yield inst
@pytest.fixture()
def grpc_server_registry(instance): # pylint: disable=unused-argument
with ProcessGrpcServerRegistry(wait_for_processes_on_exit=True) as registry:
yield registry
def create_run(instance, **kwargs):
with get_foo_pipeline_handle() as pipeline_handle:
create_run_for_test(
instance,
external_pipeline_origin=pipeline_handle.get_external_origin(),
pipeline_name="foo",
**kwargs,
)
def create_invalid_run(instance, **kwargs):
create_run_for_test(
instance,
external_pipeline_origin=ExternalPipelineOrigin(
ExternalRepositoryOrigin(
InProcessRepositoryLocationOrigin(
ReconstructableRepository(ModuleCodePointer("fake", "fake"))
),
"foo",
),
"wrong-pipeline",
),
pipeline_name="wrong-pipeline",
**kwargs,
)
def get_run_ids(runs_queue):
return [run.run_id for run in runs_queue]
def test_attempt_to_launch_runs_filter(instance, grpc_server_registry):
create_run(
instance,
run_id="queued-run",
status=PipelineRunStatus.QUEUED,
)
create_run(
instance,
run_id="non-queued-run",
status=PipelineRunStatus.NOT_STARTED,
)
coordinator = QueuedRunCoordinatorDaemon(
interval_seconds=5,
max_concurrent_runs=10,
)
list(coordinator.run_iteration(instance, grpc_server_registry))
assert get_run_ids(instance.run_launcher.queue()) == ["queued-run"]
def test_attempt_to_launch_runs_no_queued(instance, grpc_server_registry):
create_run(
instance,
run_id="queued-run",
status=PipelineRunStatus.STARTED,
)
create_run(
instance,
run_id="non-queued-run",
status=PipelineRunStatus.NOT_STARTED,
)
coordinator = QueuedRunCoordinatorDaemon(
interval_seconds=5,
max_concurrent_runs=10,
)
list(coordinator.run_iteration(instance, grpc_server_registry))
assert instance.run_launcher.queue() == []
@pytest.mark.parametrize(
"num_in_progress_runs",
[0, 1, 3, 4, 5],
)
def test_get_queued_runs_max_runs(instance, num_in_progress_runs, grpc_server_registry):
max_runs = 4
# fill run store with ongoing runs
in_progress_run_ids = ["in_progress-run-{}".format(i) for i in range(num_in_progress_runs)]
for i, run_id in enumerate(in_progress_run_ids):
# get a selection of all in progress statuses
status = IN_PROGRESS_RUN_STATUSES[i % len(IN_PROGRESS_RUN_STATUSES)]
create_run(
instance,
run_id=run_id,
status=status,
)
# add more queued runs than should be launched
queued_run_ids = ["queued-run-{}".format(i) for i in range(max_runs + 1)]
for run_id in queued_run_ids:
create_run(
instance,
run_id=run_id,
status=PipelineRunStatus.QUEUED,
)
coordinator = QueuedRunCoordinatorDaemon(
interval_seconds=5,
max_concurrent_runs=max_runs,
)
list(coordinator.run_iteration(instance, grpc_server_registry))
assert len(instance.run_launcher.queue()) == max(0, max_runs - num_in_progress_runs)
def test_priority(instance, grpc_server_registry):
create_run(instance, run_id="default-pri-run", status=PipelineRunStatus.QUEUED)
create_run(
instance,
run_id="low-pri-run",
status=PipelineRunStatus.QUEUED,
tags={PRIORITY_TAG: "-1"},
)
create_run(
instance,
run_id="hi-pri-run",
status=PipelineRunStatus.QUEUED,
tags={PRIORITY_TAG: "3"},
)
coordinator = QueuedRunCoordinatorDaemon(
interval_seconds=5,
max_concurrent_runs=10,
)
list(coordinator.run_iteration(instance, grpc_server_registry))
assert get_run_ids(instance.run_launcher.queue()) == [
"hi-pri-run",
"default-pri-run",
"low-pri-run",
]
def test_priority_on_malformed_tag(instance, grpc_server_registry):
create_run(
instance,
run_id="bad-pri-run",
status=PipelineRunStatus.QUEUED,
tags={PRIORITY_TAG: "foobar"},
)
coordinator = QueuedRunCoordinatorDaemon(
interval_seconds=5,
max_concurrent_runs=10,
)
list(coordinator.run_iteration(instance, grpc_server_registry))
assert get_run_ids(instance.run_launcher.queue()) == ["bad-pri-run"]
def test_tag_limits(instance, grpc_server_registry):
create_run(
instance,
run_id="tiny-1",
status=PipelineRunStatus.QUEUED,
tags={"database": "tiny"},
)
create_run(
instance,
run_id="tiny-2",
status=PipelineRunStatus.QUEUED,
tags={"database": "tiny"},
)
create_run(
instance,
run_id="large-1",
status=PipelineRunStatus.QUEUED,
tags={"database": "large"},
)
coordinator = QueuedRunCoordinatorDaemon(
interval_seconds=5,
max_concurrent_runs=10,
tag_concurrency_limits=[{"key": "database", "value": "tiny", "limit": 1}],
)
list(coordinator.run_iteration(instance, grpc_server_registry))
assert get_run_ids(instance.run_launcher.queue()) == ["tiny-1", "large-1"]
def test_multiple_tag_limits(instance, grpc_server_registry):
create_run(
instance,
run_id="run-1",
status=PipelineRunStatus.QUEUED,
tags={"database": "tiny", "user": "johann"},
)
create_run(
instance,
run_id="run-2",
status=PipelineRunStatus.QUEUED,
tags={"database": "tiny"},
)
create_run(
instance,
run_id="run-3",
status=PipelineRunStatus.QUEUED,
tags={"user": "johann"},
)
create_run(
instance,
run_id="run-4",
status=PipelineRunStatus.QUEUED,
tags={"user": "johann"},
)
coordinator = QueuedRunCoordinatorDaemon(
interval_seconds=5,
max_concurrent_runs=10,
tag_concurrency_limits=[
{"key": "database", "value": "tiny", "limit": 1},
{"key": "user", "value": "johann", "limit": 2},
],
)
list(coordinator.run_iteration(instance, grpc_server_registry))
assert get_run_ids(instance.run_launcher.queue()) == ["run-1", "run-3"]
def test_overlapping_tag_limits(instance, grpc_server_registry):
create_run(
instance,
run_id="run-1",
status=PipelineRunStatus.QUEUED,
tags={"foo": "bar"},
)
create_run(
instance,
run_id="run-2",
status=PipelineRunStatus.QUEUED,
tags={"foo": "bar"},
)
create_run(
instance,
run_id="run-3",
status=PipelineRunStatus.QUEUED,
tags={"foo": "other"},
)
create_run(
instance,
run_id="run-4",
status=PipelineRunStatus.QUEUED,
tags={"foo": "other"},
)
coordinator = QueuedRunCoordinatorDaemon(
interval_seconds=5,
max_concurrent_runs=10,
tag_concurrency_limits=[
{"key": "foo", "limit": 2},
{"key": "foo", "value": "bar", "limit": 1},
],
)
list(coordinator.run_iteration(instance, grpc_server_registry))
assert get_run_ids(instance.run_launcher.queue()) == ["run-1", "run-3"]
def test_location_handles_reused(instance, monkeypatch, grpc_server_registry):
"""
verifies that only one repository location is created when two queued runs from the same
location are dequeued in the same iteration
"""
create_run(
instance,
run_id="queued-run",
status=PipelineRunStatus.QUEUED,
)
create_run(
instance,
run_id="queued-run-2",
status=PipelineRunStatus.QUEUED,
)
original_method = GrpcServerRepositoryLocationHandle.__init__
method_calls = []
def mocked_handle_init(
self,
origin,
host=None,
port=None,
socket=None,
server_id=None,
heartbeat=False,
watch_server=True,
):
method_calls.append(origin)
return original_method(self, origin, host, port, socket, server_id, heartbeat, watch_server)
monkeypatch.setattr(
GrpcServerRepositoryLocationHandle,
"__init__",
mocked_handle_init,
)
coordinator = QueuedRunCoordinatorDaemon(
interval_seconds=5,
max_concurrent_runs=10,
)
list(coordinator.run_iteration(instance, grpc_server_registry))
assert get_run_ids(instance.run_launcher.queue()) == ["queued-run", "queued-run-2"]
assert len(method_calls) == 1
def test_skip_error_runs(instance, grpc_server_registry):
create_invalid_run(
instance,
run_id="bad-run",
status=PipelineRunStatus.QUEUED,
)
create_run(
instance,
run_id="good-run",
status=PipelineRunStatus.QUEUED,
)
coordinator = QueuedRunCoordinatorDaemon(
interval_seconds=5,
max_concurrent_runs=10,
)
errors = [
error for error in list(coordinator.run_iteration(instance, grpc_server_registry)) if error
]
assert len(errors) == 1
assert "ModuleNotFoundError" in errors[0].message
assert get_run_ids(instance.run_launcher.queue()) == ["good-run"]
assert instance.get_run_by_id("bad-run").status == PipelineRunStatus.FAILURE
| 28.32
| 100
| 0.655932
|
import pytest
from dagster.core.code_pointer import ModuleCodePointer
from dagster.core.definitions.reconstructable import ReconstructableRepository
from dagster.core.host_representation.grpc_server_registry import ProcessGrpcServerRegistry
from dagster.core.host_representation.handle import GrpcServerRepositoryLocationHandle
from dagster.core.host_representation.origin import (
ExternalPipelineOrigin,
ExternalRepositoryOrigin,
InProcessRepositoryLocationOrigin,
)
from dagster.core.storage.pipeline_run import IN_PROGRESS_RUN_STATUSES, PipelineRunStatus
from dagster.core.storage.tags import PRIORITY_TAG
from dagster.core.test_utils import create_run_for_test, instance_for_test
from dagster.daemon.run_coordinator.queued_run_coordinator_daemon import QueuedRunCoordinatorDaemon
from dagster_tests.api_tests.utils import get_foo_pipeline_handle
@pytest.fixture()
def instance():
overrides = {
"run_launcher": {"module": "dagster.core.test_utils", "class": "MockedRunLauncher"},
}
with instance_for_test(overrides=overrides) as inst:
yield inst
@pytest.fixture()
def grpc_server_registry(instance):
with ProcessGrpcServerRegistry(wait_for_processes_on_exit=True) as registry:
yield registry
def create_run(instance, **kwargs):
with get_foo_pipeline_handle() as pipeline_handle:
create_run_for_test(
instance,
external_pipeline_origin=pipeline_handle.get_external_origin(),
pipeline_name="foo",
**kwargs,
)
def create_invalid_run(instance, **kwargs):
create_run_for_test(
instance,
external_pipeline_origin=ExternalPipelineOrigin(
ExternalRepositoryOrigin(
InProcessRepositoryLocationOrigin(
ReconstructableRepository(ModuleCodePointer("fake", "fake"))
),
"foo",
),
"wrong-pipeline",
),
pipeline_name="wrong-pipeline",
**kwargs,
)
def get_run_ids(runs_queue):
return [run.run_id for run in runs_queue]
def test_attempt_to_launch_runs_filter(instance, grpc_server_registry):
create_run(
instance,
run_id="queued-run",
status=PipelineRunStatus.QUEUED,
)
create_run(
instance,
run_id="non-queued-run",
status=PipelineRunStatus.NOT_STARTED,
)
coordinator = QueuedRunCoordinatorDaemon(
interval_seconds=5,
max_concurrent_runs=10,
)
list(coordinator.run_iteration(instance, grpc_server_registry))
assert get_run_ids(instance.run_launcher.queue()) == ["queued-run"]
def test_attempt_to_launch_runs_no_queued(instance, grpc_server_registry):
create_run(
instance,
run_id="queued-run",
status=PipelineRunStatus.STARTED,
)
create_run(
instance,
run_id="non-queued-run",
status=PipelineRunStatus.NOT_STARTED,
)
coordinator = QueuedRunCoordinatorDaemon(
interval_seconds=5,
max_concurrent_runs=10,
)
list(coordinator.run_iteration(instance, grpc_server_registry))
assert instance.run_launcher.queue() == []
@pytest.mark.parametrize(
"num_in_progress_runs",
[0, 1, 3, 4, 5],
)
def test_get_queued_runs_max_runs(instance, num_in_progress_runs, grpc_server_registry):
max_runs = 4
in_progress_run_ids = ["in_progress-run-{}".format(i) for i in range(num_in_progress_runs)]
for i, run_id in enumerate(in_progress_run_ids):
status = IN_PROGRESS_RUN_STATUSES[i % len(IN_PROGRESS_RUN_STATUSES)]
create_run(
instance,
run_id=run_id,
status=status,
)
queued_run_ids = ["queued-run-{}".format(i) for i in range(max_runs + 1)]
for run_id in queued_run_ids:
create_run(
instance,
run_id=run_id,
status=PipelineRunStatus.QUEUED,
)
coordinator = QueuedRunCoordinatorDaemon(
interval_seconds=5,
max_concurrent_runs=max_runs,
)
list(coordinator.run_iteration(instance, grpc_server_registry))
assert len(instance.run_launcher.queue()) == max(0, max_runs - num_in_progress_runs)
def test_priority(instance, grpc_server_registry):
create_run(instance, run_id="default-pri-run", status=PipelineRunStatus.QUEUED)
create_run(
instance,
run_id="low-pri-run",
status=PipelineRunStatus.QUEUED,
tags={PRIORITY_TAG: "-1"},
)
create_run(
instance,
run_id="hi-pri-run",
status=PipelineRunStatus.QUEUED,
tags={PRIORITY_TAG: "3"},
)
coordinator = QueuedRunCoordinatorDaemon(
interval_seconds=5,
max_concurrent_runs=10,
)
list(coordinator.run_iteration(instance, grpc_server_registry))
assert get_run_ids(instance.run_launcher.queue()) == [
"hi-pri-run",
"default-pri-run",
"low-pri-run",
]
def test_priority_on_malformed_tag(instance, grpc_server_registry):
create_run(
instance,
run_id="bad-pri-run",
status=PipelineRunStatus.QUEUED,
tags={PRIORITY_TAG: "foobar"},
)
coordinator = QueuedRunCoordinatorDaemon(
interval_seconds=5,
max_concurrent_runs=10,
)
list(coordinator.run_iteration(instance, grpc_server_registry))
assert get_run_ids(instance.run_launcher.queue()) == ["bad-pri-run"]
def test_tag_limits(instance, grpc_server_registry):
create_run(
instance,
run_id="tiny-1",
status=PipelineRunStatus.QUEUED,
tags={"database": "tiny"},
)
create_run(
instance,
run_id="tiny-2",
status=PipelineRunStatus.QUEUED,
tags={"database": "tiny"},
)
create_run(
instance,
run_id="large-1",
status=PipelineRunStatus.QUEUED,
tags={"database": "large"},
)
coordinator = QueuedRunCoordinatorDaemon(
interval_seconds=5,
max_concurrent_runs=10,
tag_concurrency_limits=[{"key": "database", "value": "tiny", "limit": 1}],
)
list(coordinator.run_iteration(instance, grpc_server_registry))
assert get_run_ids(instance.run_launcher.queue()) == ["tiny-1", "large-1"]
def test_multiple_tag_limits(instance, grpc_server_registry):
create_run(
instance,
run_id="run-1",
status=PipelineRunStatus.QUEUED,
tags={"database": "tiny", "user": "johann"},
)
create_run(
instance,
run_id="run-2",
status=PipelineRunStatus.QUEUED,
tags={"database": "tiny"},
)
create_run(
instance,
run_id="run-3",
status=PipelineRunStatus.QUEUED,
tags={"user": "johann"},
)
create_run(
instance,
run_id="run-4",
status=PipelineRunStatus.QUEUED,
tags={"user": "johann"},
)
coordinator = QueuedRunCoordinatorDaemon(
interval_seconds=5,
max_concurrent_runs=10,
tag_concurrency_limits=[
{"key": "database", "value": "tiny", "limit": 1},
{"key": "user", "value": "johann", "limit": 2},
],
)
list(coordinator.run_iteration(instance, grpc_server_registry))
assert get_run_ids(instance.run_launcher.queue()) == ["run-1", "run-3"]
def test_overlapping_tag_limits(instance, grpc_server_registry):
create_run(
instance,
run_id="run-1",
status=PipelineRunStatus.QUEUED,
tags={"foo": "bar"},
)
create_run(
instance,
run_id="run-2",
status=PipelineRunStatus.QUEUED,
tags={"foo": "bar"},
)
create_run(
instance,
run_id="run-3",
status=PipelineRunStatus.QUEUED,
tags={"foo": "other"},
)
create_run(
instance,
run_id="run-4",
status=PipelineRunStatus.QUEUED,
tags={"foo": "other"},
)
coordinator = QueuedRunCoordinatorDaemon(
interval_seconds=5,
max_concurrent_runs=10,
tag_concurrency_limits=[
{"key": "foo", "limit": 2},
{"key": "foo", "value": "bar", "limit": 1},
],
)
list(coordinator.run_iteration(instance, grpc_server_registry))
assert get_run_ids(instance.run_launcher.queue()) == ["run-1", "run-3"]
def test_location_handles_reused(instance, monkeypatch, grpc_server_registry):
create_run(
instance,
run_id="queued-run",
status=PipelineRunStatus.QUEUED,
)
create_run(
instance,
run_id="queued-run-2",
status=PipelineRunStatus.QUEUED,
)
original_method = GrpcServerRepositoryLocationHandle.__init__
method_calls = []
def mocked_handle_init(
self,
origin,
host=None,
port=None,
socket=None,
server_id=None,
heartbeat=False,
watch_server=True,
):
method_calls.append(origin)
return original_method(self, origin, host, port, socket, server_id, heartbeat, watch_server)
monkeypatch.setattr(
GrpcServerRepositoryLocationHandle,
"__init__",
mocked_handle_init,
)
coordinator = QueuedRunCoordinatorDaemon(
interval_seconds=5,
max_concurrent_runs=10,
)
list(coordinator.run_iteration(instance, grpc_server_registry))
assert get_run_ids(instance.run_launcher.queue()) == ["queued-run", "queued-run-2"]
assert len(method_calls) == 1
def test_skip_error_runs(instance, grpc_server_registry):
create_invalid_run(
instance,
run_id="bad-run",
status=PipelineRunStatus.QUEUED,
)
create_run(
instance,
run_id="good-run",
status=PipelineRunStatus.QUEUED,
)
coordinator = QueuedRunCoordinatorDaemon(
interval_seconds=5,
max_concurrent_runs=10,
)
errors = [
error for error in list(coordinator.run_iteration(instance, grpc_server_registry)) if error
]
assert len(errors) == 1
assert "ModuleNotFoundError" in errors[0].message
assert get_run_ids(instance.run_launcher.queue()) == ["good-run"]
assert instance.get_run_by_id("bad-run").status == PipelineRunStatus.FAILURE
| true
| true
|
79048f6cdfe5cc3626aeccb151685edee36e7c84
| 11,127
|
py
|
Python
|
src/canmatrix/tests/test_sym.py
|
tainnok/canmatrix
|
4c785a405c9713cd0f6709c2d1634eee5cebfde8
|
[
"BSD-2-Clause"
] | 1
|
2020-12-07T13:16:47.000Z
|
2020-12-07T13:16:47.000Z
|
src/canmatrix/tests/test_sym.py
|
motorctl/canmatrix
|
5b2b43b472c8d8304ea7c09fe497cc0cdd109db3
|
[
"BSD-2-Clause"
] | null | null | null |
src/canmatrix/tests/test_sym.py
|
motorctl/canmatrix
|
5b2b43b472c8d8304ea7c09fe497cc0cdd109db3
|
[
"BSD-2-Clause"
] | 1
|
2020-11-18T00:05:43.000Z
|
2020-11-18T00:05:43.000Z
|
# -*- coding: utf-8 -*-
import io
import sys
import textwrap
from itertools import chain
from pprint import pprint
import pytest
import canmatrix.canmatrix
import canmatrix.formats.sym
def test_colliding_mux_values():
f = io.BytesIO(
textwrap.dedent(
'''\
FormatVersion=5.0 // Do not edit this line!
Title="a file"
{SEND}
[MuxedId]
ID=0h
Mux=TheMux 0,1 0h
Var=Signal unsigned 1,1
[MuxedId]
Mux=FirstMux 0,1 1h
Var=Signal unsigned 1,1
[MuxedId]
Mux=SecondMux 0,1 1h
Var=Signal unsigned 1,1
''',
).encode('utf-8'),
)
matrix = canmatrix.formats.sym.load(f)
error, = matrix.load_errors
line_number = 16
assert len(matrix.load_errors) == 1
assert isinstance(error, canmatrix.formats.sym.DuplicateMuxIdError)
assert error.line_number == line_number
error_string = str(error)
assert error_string.startswith(
'line {line_number}: '.format(line_number=line_number),
)
assert 'FirstMux' in error_string
assert 'SecondMux' in error_string
def test_parse_longname_with_colon():
f = io.BytesIO(
textwrap.dedent(
'''\
FormatVersion=5.0 // Do not edit this line!
Title="a file"
{SEND}
[pass]
DLC=8
Var=Password unsigned 16,16 /ln:"Access Level : Password"
''',
).encode('utf-8'),
)
matrix = canmatrix.formats.sym.load(f)
frame = matrix.frames[0]
signal = frame.signals[0]
assert signal.attributes['LongName'] == 'Access Level : Password'
@pytest.mark.parametrize(
'is_float, value, expected',
(
(False, '37', '37'),
(True, '37.1', '37.1'),
),
)
def test_export_default_decimal_places(is_float, value, expected):
matrix = canmatrix.canmatrix.CanMatrix()
frame = canmatrix.canmatrix.Frame()
matrix.add_frame(frame)
signal = canmatrix.canmatrix.Signal(
size=32,
is_float=is_float,
is_signed=False,
initial_value=value,
)
frame.add_signal(signal)
s = canmatrix.formats.sym.create_signal(db=matrix, signal=signal)
start = '/d:'
d, = (
segment
for segment in s.split()
if segment.startswith(start)
)
d = d[len(start):]
assert d == expected
@pytest.mark.parametrize(
'variable_type, bit_length',
(
('float', 32),
('double', 64),
)
)
def tests_parse_float(variable_type, bit_length):
f = io.BytesIO(
textwrap.dedent(
'''\
FormatVersion=5.0 // Do not edit this line!
Title="Untitled"
{{SENDRECEIVE}}
[Symbol1]
ID=000h
DLC=8
Var=a_signal {variable_type} 0,{bit_length}
'''.format(
variable_type=variable_type,
bit_length=bit_length,
),
).encode('utf-8'),
)
matrix = canmatrix.formats.sym.load(f)
assert matrix.load_errors == []
frame = matrix.frames[0]
signal = frame.signals[0]
assert signal.is_float
def test_unterminated_enum():
f = io.BytesIO(
textwrap.dedent(
'''\
FormatVersion=5.0 // Do not edit this line!
Title="Untitled
{ENUMS}
enum Categories(0="Animal", 1="Vegetable", 3="Mineral"
{SENDRECEIVE}
[Symbol1]
ID=000h
DLC=8
Var=Signal unsigned 0,16
'''
).encode('utf-8'),
)
# Missing ')' at the end of enum used to cause infinite loop
matrix = canmatrix.formats.sym.load(f)
assert len(matrix.load_errors) == 1
if sys.version_info > (3, 0):
assert isinstance(matrix.load_errors[0], EOFError)
else:
assert isinstance(matrix.load_errors[0], StopIteration)
def test_title_read_and_write():
f = io.BytesIO(
textwrap.dedent(
'''\
FormatVersion=5.0 // Do not edit this line!
Title="An Example Title"
'''
).encode('utf-8'),
)
matrix = canmatrix.formats.sym.load(f)
assert matrix.attribute("Title") == "An Example Title"
f_out = io.BytesIO()
canmatrix.formats.sym.dump(matrix, f_out)
assert f_out.getvalue().decode('utf-8').splitlines()[1] == 'Title="An Example Title"'
@pytest.mark.parametrize(
'enum_str, enum_dict, enum_label',
(
('enum Animal(0="Dog", 1="Cat", 2="Fox")', {"Animal": {0: "Dog", 1: "Cat", 2: "Fox"}}, "Simple enum"),
('''\
enum Animal(0="Dog", //A Comment
1="Cat",
2="Fox")''',
{"Animal": {0: "Dog", 1: "Cat", 2: "Fox"}}, "Multiline enum"),
('enum Animal(0="Dog",1="Cat",2="Fox")', {"Animal": {0: "Dog", 1: "Cat", 2: "Fox"}}, "No Space in Separator"),
)
)
def test_enums_read(enum_str, enum_dict, enum_label):
f = io.BytesIO('''\
FormatVersion=5.0 // Do not edit this line!
Title="An Example Title"
{{ENUMS}}
{}
'''.format(enum_str).encode('utf-8'),
)
matrix = canmatrix.formats.sym.load(f)
assert matrix.load_errors == [], "Failed to load canmatrix, when testing enum case : '{}'".format(enum_label)
assert matrix.value_tables == enum_dict, "Enum not parsed correctly : '{}'".format(enum_label)
def test_enums_export():
f = io.BytesIO('''\
FormatVersion=5.0 // Do not edit this line!
Title="An Example Title"
{ENUMS}
enum Animal(0="Dog",1="Cat",2="Fox")
{SENDRECEIVE}
[Frame1]
ID=000h
DLC=8
Var=Signal1 unsigned 0,16
'''.encode('utf-8'),
)
matrix = canmatrix.formats.sym.load(f)
assert matrix.load_errors == [], "Failed to load canmatrix"
# Add an enum to Signal1
matrix.frame_by_name("Frame1").signal_by_name("Signal1").enumeration = "Plants"
matrix.frame_by_name("Frame1").signal_by_name("Signal1").values = {0: "Grass", 1: "Flower", 2: "Tree"}
# Export and reimport
f_out = io.BytesIO()
canmatrix.formats.sym.dump(matrix, f_out)
f_in = io.BytesIO(f_out.getvalue())
new_matrix = canmatrix.formats.sym.load(f_in)
# Check that Enums from Enums table exported and reimported correctly
assert new_matrix.value_tables["Animal"] == {0: "Dog", 1: "Cat", 2: "Fox"}
# Check that Enums from a Signal.Values property exported and reimported correctly
assert new_matrix.value_tables["Plants"] == {0: "Grass", 1: "Flower", 2: "Tree"}
def test_types_read():
f = io.BytesIO('''\
FormatVersion=5.0 // Do not edit this line!
Title="Types Test"
{ENUMS}
enum EnumAnimals(0="Cat", // An enum value for cats
1="Dog", // An enum value for dogs
2="Horse", 3="Monkey",
4="Lion")// An enum with a comment for the final value
{SENDRECEIVE}
[SymbolLengths]
ID=000h
DLC=8
Var="1Bit" unsigned 0,1
Var="3Bits" unsigned 1,3
Var="4Bits" unsigned 4,4
Var="21Bits" unsigned 8,21
Var="6Bits" unsigned 29,6
Var="29Bits" unsigned 35,29
[SymbolTypes]
ID=001h
DLC=8
Var=Bit bit 0,1
Var=Char char 1,8
Var=String string 16,16
Var=Signed signed 32,4
Var=Unsigned unsigned 36,4
Var=Enum EnumAnimals 40,4
Var=Raw raw 48,16
[SymbolDouble]
ID=002h
DLC=8
Var=Double double 0,64 // Must be 8 Bytes according to PCAN Symbol Editor V5
[SymbolFloat]
ID=003h
DLC=4
Var=Float float 0,32 // Must be 4 Bytes according to PCAN Symbol Editor V5
'''.encode('utf-8'),
)
matrix = canmatrix.formats.sym.load(f)
# Check no errors loading the matrix
assert matrix.load_errors == []
f_out = io.BytesIO()
canmatrix.formats.sym.dump(matrix, f_out)
f_out_bytes = f_out.getvalue()
f_out_string = f_out_bytes.decode("utf-8")
# Check that types are preserved when saving back to .SYM format
assert "Var=Bit bit" in f_out_string
assert "Var=Char char" in f_out_string
assert "Var=String string" in f_out_string
assert "Var=Signed signed" in f_out_string
assert 'Var="21Bits" unsigned' in f_out_string
assert 'Var=Float float' in f_out_string
assert 'Var=Double double' in f_out_string
# Read matrix back in to check all symbols/frames preserved
f_in = io.BytesIO(f_out_bytes)
new_matrix = canmatrix.formats.sym.load(f_in)
# Check no errors loading the matrix
assert new_matrix.load_errors == []
# Check that both matrices have the same Frames
frames = [f.name for f in matrix.frames]
new_frames = [f.name for f in new_matrix.frames]
assert sorted(frames) == sorted(new_frames)
# Check that both matrices have the same signals, and that all the expected signals are present
signals = chain(*[[s.name for s in frame.signals] for frame in matrix.frames])
new_signals = chain(*[[s.name for s in frame.signals] for frame in new_matrix.frames])
assert sorted(signals) == sorted(new_signals) == sorted([
"1Bit",
"3Bits",
"4Bits",
"21Bits",
"6Bits",
"29Bits",
"Bit",
"Char",
"String",
"Signed",
"Unsigned",
"Enum",
"Raw",
"Double",
"Float", ])
@pytest.mark.parametrize(
'var_name,data,raw_value',
(
('VarMux1', bytearray([1, 12, 0, 0, 0, 0, 0, 0]), 12),
('VarMux2', bytearray([2, 0, 0, 0, 23, 0, 0, 0]), 23),
('VarMux200', bytearray([200, 0, 0, 0, 0, 0, 34, 0]), 34),
)
)
def test_mux_decode(var_name,data,raw_value):
f = io.BytesIO('''\
FormatVersion=5.0 // Do not edit this line!
Title="Types Test"
FormatVersion=5.0 // Do not edit this line!
Title="Test Symbols File"
{SENDRECEIVE}
[MuxTestFrame]
ID=002h
DLC=8
Mux=Mux1 0,8 1
Var=VarMux1 unsigned 8,8
[MuxTestFrame]
DLC=8
Mux=Mux2 0,8 2
Var=VarMux2 unsigned 32,8
[MuxTestFrame]
DLC=8
Mux=Mux200 0,8 C8h
Var=VarMux200 unsigned 48,8
'''.encode('utf-8'),
)
matrix = canmatrix.formats.sym.load(f)
# Check no errors loading the matrix
assert matrix.load_errors == []
frame = matrix.frame_by_name("MuxTestFrame")
r = frame.decode(data)
assert var_name in r.keys(), "Signal {}, not decoded. Only : {}".format(var_name, ','.join(r for r in r.keys()))
assert r[var_name].raw_value == raw_value
| 27.8175
| 119
| 0.560528
|
import io
import sys
import textwrap
from itertools import chain
from pprint import pprint
import pytest
import canmatrix.canmatrix
import canmatrix.formats.sym
def test_colliding_mux_values():
f = io.BytesIO(
textwrap.dedent(
'''\
FormatVersion=5.0 // Do not edit this line!
Title="a file"
{SEND}
[MuxedId]
ID=0h
Mux=TheMux 0,1 0h
Var=Signal unsigned 1,1
[MuxedId]
Mux=FirstMux 0,1 1h
Var=Signal unsigned 1,1
[MuxedId]
Mux=SecondMux 0,1 1h
Var=Signal unsigned 1,1
''',
).encode('utf-8'),
)
matrix = canmatrix.formats.sym.load(f)
error, = matrix.load_errors
line_number = 16
assert len(matrix.load_errors) == 1
assert isinstance(error, canmatrix.formats.sym.DuplicateMuxIdError)
assert error.line_number == line_number
error_string = str(error)
assert error_string.startswith(
'line {line_number}: '.format(line_number=line_number),
)
assert 'FirstMux' in error_string
assert 'SecondMux' in error_string
def test_parse_longname_with_colon():
f = io.BytesIO(
textwrap.dedent(
'''\
FormatVersion=5.0 // Do not edit this line!
Title="a file"
{SEND}
[pass]
DLC=8
Var=Password unsigned 16,16 /ln:"Access Level : Password"
''',
).encode('utf-8'),
)
matrix = canmatrix.formats.sym.load(f)
frame = matrix.frames[0]
signal = frame.signals[0]
assert signal.attributes['LongName'] == 'Access Level : Password'
@pytest.mark.parametrize(
'is_float, value, expected',
(
(False, '37', '37'),
(True, '37.1', '37.1'),
),
)
def test_export_default_decimal_places(is_float, value, expected):
matrix = canmatrix.canmatrix.CanMatrix()
frame = canmatrix.canmatrix.Frame()
matrix.add_frame(frame)
signal = canmatrix.canmatrix.Signal(
size=32,
is_float=is_float,
is_signed=False,
initial_value=value,
)
frame.add_signal(signal)
s = canmatrix.formats.sym.create_signal(db=matrix, signal=signal)
start = '/d:'
d, = (
segment
for segment in s.split()
if segment.startswith(start)
)
d = d[len(start):]
assert d == expected
@pytest.mark.parametrize(
'variable_type, bit_length',
(
('float', 32),
('double', 64),
)
)
def tests_parse_float(variable_type, bit_length):
f = io.BytesIO(
textwrap.dedent(
'''\
FormatVersion=5.0 // Do not edit this line!
Title="Untitled"
{{SENDRECEIVE}}
[Symbol1]
ID=000h
DLC=8
Var=a_signal {variable_type} 0,{bit_length}
'''.format(
variable_type=variable_type,
bit_length=bit_length,
),
).encode('utf-8'),
)
matrix = canmatrix.formats.sym.load(f)
assert matrix.load_errors == []
frame = matrix.frames[0]
signal = frame.signals[0]
assert signal.is_float
def test_unterminated_enum():
f = io.BytesIO(
textwrap.dedent(
'''\
FormatVersion=5.0 // Do not edit this line!
Title="Untitled
{ENUMS}
enum Categories(0="Animal", 1="Vegetable", 3="Mineral"
{SENDRECEIVE}
[Symbol1]
ID=000h
DLC=8
Var=Signal unsigned 0,16
'''
).encode('utf-8'),
)
# Missing ')' at the end of enum used to cause infinite loop
matrix = canmatrix.formats.sym.load(f)
assert len(matrix.load_errors) == 1
if sys.version_info > (3, 0):
assert isinstance(matrix.load_errors[0], EOFError)
else:
assert isinstance(matrix.load_errors[0], StopIteration)
def test_title_read_and_write():
f = io.BytesIO(
textwrap.dedent(
'''\
FormatVersion=5.0 // Do not edit this line!
Title="An Example Title"
'''
).encode('utf-8'),
)
matrix = canmatrix.formats.sym.load(f)
assert matrix.attribute("Title") == "An Example Title"
f_out = io.BytesIO()
canmatrix.formats.sym.dump(matrix, f_out)
assert f_out.getvalue().decode('utf-8').splitlines()[1] == 'Title="An Example Title"'
@pytest.mark.parametrize(
'enum_str, enum_dict, enum_label',
(
('enum Animal(0="Dog", 1="Cat", 2="Fox")', {"Animal": {0: "Dog", 1: "Cat", 2: "Fox"}}, "Simple enum"),
('''\
enum Animal(0="Dog", //A Comment
1="Cat",
2="Fox")''',
{"Animal": {0: "Dog", 1: "Cat", 2: "Fox"}}, "Multiline enum"),
('enum Animal(0="Dog",1="Cat",2="Fox")', {"Animal": {0: "Dog", 1: "Cat", 2: "Fox"}}, "No Space in Separator"),
)
)
def test_enums_read(enum_str, enum_dict, enum_label):
f = io.BytesIO('''\
FormatVersion=5.0 // Do not edit this line!
Title="An Example Title"
{{ENUMS}}
{}
'''.format(enum_str).encode('utf-8'),
)
matrix = canmatrix.formats.sym.load(f)
assert matrix.load_errors == [], "Failed to load canmatrix, when testing enum case : '{}'".format(enum_label)
assert matrix.value_tables == enum_dict, "Enum not parsed correctly : '{}'".format(enum_label)
def test_enums_export():
f = io.BytesIO('''\
FormatVersion=5.0 // Do not edit this line!
Title="An Example Title"
{ENUMS}
enum Animal(0="Dog",1="Cat",2="Fox")
{SENDRECEIVE}
[Frame1]
ID=000h
DLC=8
Var=Signal1 unsigned 0,16
'''.encode('utf-8'),
)
matrix = canmatrix.formats.sym.load(f)
assert matrix.load_errors == [], "Failed to load canmatrix"
# Add an enum to Signal1
matrix.frame_by_name("Frame1").signal_by_name("Signal1").enumeration = "Plants"
matrix.frame_by_name("Frame1").signal_by_name("Signal1").values = {0: "Grass", 1: "Flower", 2: "Tree"}
# Export and reimport
f_out = io.BytesIO()
canmatrix.formats.sym.dump(matrix, f_out)
f_in = io.BytesIO(f_out.getvalue())
new_matrix = canmatrix.formats.sym.load(f_in)
# Check that Enums from Enums table exported and reimported correctly
assert new_matrix.value_tables["Animal"] == {0: "Dog", 1: "Cat", 2: "Fox"}
# Check that Enums from a Signal.Values property exported and reimported correctly
assert new_matrix.value_tables["Plants"] == {0: "Grass", 1: "Flower", 2: "Tree"}
def test_types_read():
f = io.BytesIO('''\
FormatVersion=5.0 // Do not edit this line!
Title="Types Test"
{ENUMS}
enum EnumAnimals(0="Cat", // An enum value for cats
1="Dog", // An enum value for dogs
2="Horse", 3="Monkey",
4="Lion")// An enum with a comment for the final value
{SENDRECEIVE}
[SymbolLengths]
ID=000h
DLC=8
Var="1Bit" unsigned 0,1
Var="3Bits" unsigned 1,3
Var="4Bits" unsigned 4,4
Var="21Bits" unsigned 8,21
Var="6Bits" unsigned 29,6
Var="29Bits" unsigned 35,29
[SymbolTypes]
ID=001h
DLC=8
Var=Bit bit 0,1
Var=Char char 1,8
Var=String string 16,16
Var=Signed signed 32,4
Var=Unsigned unsigned 36,4
Var=Enum EnumAnimals 40,4
Var=Raw raw 48,16
[SymbolDouble]
ID=002h
DLC=8
Var=Double double 0,64 // Must be 8 Bytes according to PCAN Symbol Editor V5
[SymbolFloat]
ID=003h
DLC=4
Var=Float float 0,32 // Must be 4 Bytes according to PCAN Symbol Editor V5
'''.encode('utf-8'),
)
matrix = canmatrix.formats.sym.load(f)
# Check no errors loading the matrix
assert matrix.load_errors == []
f_out = io.BytesIO()
canmatrix.formats.sym.dump(matrix, f_out)
f_out_bytes = f_out.getvalue()
f_out_string = f_out_bytes.decode("utf-8")
# Check that types are preserved when saving back to .SYM format
assert "Var=Bit bit" in f_out_string
assert "Var=Char char" in f_out_string
assert "Var=String string" in f_out_string
assert "Var=Signed signed" in f_out_string
assert 'Var="21Bits" unsigned' in f_out_string
assert 'Var=Float float' in f_out_string
assert 'Var=Double double' in f_out_string
# Read matrix back in to check all symbols/frames preserved
f_in = io.BytesIO(f_out_bytes)
new_matrix = canmatrix.formats.sym.load(f_in)
# Check no errors loading the matrix
assert new_matrix.load_errors == []
# Check that both matrices have the same Frames
frames = [f.name for f in matrix.frames]
new_frames = [f.name for f in new_matrix.frames]
assert sorted(frames) == sorted(new_frames)
# Check that both matrices have the same signals, and that all the expected signals are present
signals = chain(*[[s.name for s in frame.signals] for frame in matrix.frames])
new_signals = chain(*[[s.name for s in frame.signals] for frame in new_matrix.frames])
assert sorted(signals) == sorted(new_signals) == sorted([
"1Bit",
"3Bits",
"4Bits",
"21Bits",
"6Bits",
"29Bits",
"Bit",
"Char",
"String",
"Signed",
"Unsigned",
"Enum",
"Raw",
"Double",
"Float", ])
@pytest.mark.parametrize(
'var_name,data,raw_value',
(
('VarMux1', bytearray([1, 12, 0, 0, 0, 0, 0, 0]), 12),
('VarMux2', bytearray([2, 0, 0, 0, 23, 0, 0, 0]), 23),
('VarMux200', bytearray([200, 0, 0, 0, 0, 0, 34, 0]), 34),
)
)
def test_mux_decode(var_name,data,raw_value):
f = io.BytesIO('''\
FormatVersion=5.0 // Do not edit this line!
Title="Types Test"
FormatVersion=5.0 // Do not edit this line!
Title="Test Symbols File"
{SENDRECEIVE}
[MuxTestFrame]
ID=002h
DLC=8
Mux=Mux1 0,8 1
Var=VarMux1 unsigned 8,8
[MuxTestFrame]
DLC=8
Mux=Mux2 0,8 2
Var=VarMux2 unsigned 32,8
[MuxTestFrame]
DLC=8
Mux=Mux200 0,8 C8h
Var=VarMux200 unsigned 48,8
'''.encode('utf-8'),
)
matrix = canmatrix.formats.sym.load(f)
# Check no errors loading the matrix
assert matrix.load_errors == []
frame = matrix.frame_by_name("MuxTestFrame")
r = frame.decode(data)
assert var_name in r.keys(), "Signal {}, not decoded. Only : {}".format(var_name, ','.join(r for r in r.keys()))
assert r[var_name].raw_value == raw_value
| true
| true
|
79048f8e29eab4293238d092bc3249ac9d44c7ce
| 52
|
py
|
Python
|
__init__.py
|
rahulk90/vae_sparse
|
102b3cf72abae8d66718b945df365edd4a23a62d
|
[
"MIT"
] | 11
|
2017-11-16T13:01:47.000Z
|
2021-12-26T20:07:24.000Z
|
__init__.py
|
rahulk90/inference_introspection
|
102b3cf72abae8d66718b945df365edd4a23a62d
|
[
"MIT"
] | null | null | null |
__init__.py
|
rahulk90/inference_introspection
|
102b3cf72abae8d66718b945df365edd4a23a62d
|
[
"MIT"
] | null | null | null |
all=['optvaedatasets','optvaemodels','optvaeutils']
| 26
| 51
| 0.769231
|
all=['optvaedatasets','optvaemodels','optvaeutils']
| true
| true
|
79048f9e76a4e94fce44343cd6e4dadc399df71d
| 689
|
py
|
Python
|
lnbits/extensions/satspay/migrations.py
|
lightningames/lnbits
|
63d7431898f9ab79522765dbb29c8a2fd874820a
|
[
"MIT"
] | null | null | null |
lnbits/extensions/satspay/migrations.py
|
lightningames/lnbits
|
63d7431898f9ab79522765dbb29c8a2fd874820a
|
[
"MIT"
] | null | null | null |
lnbits/extensions/satspay/migrations.py
|
lightningames/lnbits
|
63d7431898f9ab79522765dbb29c8a2fd874820a
|
[
"MIT"
] | null | null | null |
async def m001_initial(db):
"""
Initial wallet table.
"""
await db.execute(
"""
CREATE TABLE IF NOT EXISTS charges (
id TEXT NOT NULL PRIMARY KEY,
user TEXT,
description TEXT,
onchainwallet TEXT,
onchainaddress TEXT,
lnbitswallet TEXT,
payment_request TEXT,
payment_hash TEXT,
webhook TEXT,
completelink TEXT,
completelinktext TEXT,
time INTEGER,
amount INTEGER,
balance INTEGER DEFAULT 0,
timestamp TIMESTAMP NOT NULL DEFAULT (strftime('%s', 'now'))
);
"""
)
| 25.518519
| 72
| 0.510885
|
async def m001_initial(db):
await db.execute(
"""
CREATE TABLE IF NOT EXISTS charges (
id TEXT NOT NULL PRIMARY KEY,
user TEXT,
description TEXT,
onchainwallet TEXT,
onchainaddress TEXT,
lnbitswallet TEXT,
payment_request TEXT,
payment_hash TEXT,
webhook TEXT,
completelink TEXT,
completelinktext TEXT,
time INTEGER,
amount INTEGER,
balance INTEGER DEFAULT 0,
timestamp TIMESTAMP NOT NULL DEFAULT (strftime('%s', 'now'))
);
"""
)
| true
| true
|
79048fa362c16ca7f8ab347da84da6f744e9c7a6
| 1,070
|
py
|
Python
|
regulations/tests/apps_tests.py
|
PhilR8/regulations-site
|
19e2eafbba960f02e3a10d37aa288898f2614ee9
|
[
"CC0-1.0"
] | 6
|
2020-10-05T20:19:25.000Z
|
2022-03-17T18:34:59.000Z
|
regulations/tests/apps_tests.py
|
PhilR8/regulations-site
|
19e2eafbba960f02e3a10d37aa288898f2614ee9
|
[
"CC0-1.0"
] | 95
|
2020-10-22T15:00:46.000Z
|
2022-03-31T19:10:20.000Z
|
regulations/tests/apps_tests.py
|
PhilR8/regulations-site
|
19e2eafbba960f02e3a10d37aa288898f2614ee9
|
[
"CC0-1.0"
] | 7
|
2020-10-08T14:10:49.000Z
|
2022-01-24T18:36:13.000Z
|
import os
import shutil
import tempfile
from unittest import TestCase
from mock import patch
from regulations.apps import RegulationsConfig
class RegulationsConfigTests(TestCase):
def setUp(self):
self.tmpdir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.tmpdir)
@patch('regulations.apps.get_app_template_dirs')
def test_precompute_custom_templates(self, get_app_template_dirs):
"""Verify that custom templates are found"""
get_app_template_dirs.return_value = [self.tmpdir]
open(os.path.join(self.tmpdir, '123-45-a.html'), 'w').close()
open(os.path.join(self.tmpdir, 'other.html'), 'w').close()
RegulationsConfig.precompute_custom_templates()
self.assertEqual(RegulationsConfig.custom_tpls['123-45-a'],
'regulations/custom_nodes/123-45-a.html')
self.assertEqual(RegulationsConfig.custom_tpls['other'],
'regulations/custom_nodes/other.html')
self.assertFalse('another' in RegulationsConfig.custom_tpls)
| 34.516129
| 70
| 0.695327
|
import os
import shutil
import tempfile
from unittest import TestCase
from mock import patch
from regulations.apps import RegulationsConfig
class RegulationsConfigTests(TestCase):
def setUp(self):
self.tmpdir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.tmpdir)
@patch('regulations.apps.get_app_template_dirs')
def test_precompute_custom_templates(self, get_app_template_dirs):
get_app_template_dirs.return_value = [self.tmpdir]
open(os.path.join(self.tmpdir, '123-45-a.html'), 'w').close()
open(os.path.join(self.tmpdir, 'other.html'), 'w').close()
RegulationsConfig.precompute_custom_templates()
self.assertEqual(RegulationsConfig.custom_tpls['123-45-a'],
'regulations/custom_nodes/123-45-a.html')
self.assertEqual(RegulationsConfig.custom_tpls['other'],
'regulations/custom_nodes/other.html')
self.assertFalse('another' in RegulationsConfig.custom_tpls)
| true
| true
|
7904900c65e8be12c71fba1a74ba06b9f5cb497e
| 1,155
|
py
|
Python
|
main.py
|
BraffordHunter/03-Text-Adventure-2
|
a967f1bfafcbc44a027c88c07d30f2e386d29774
|
[
"MIT"
] | null | null | null |
main.py
|
BraffordHunter/03-Text-Adventure-2
|
a967f1bfafcbc44a027c88c07d30f2e386d29774
|
[
"MIT"
] | null | null | null |
main.py
|
BraffordHunter/03-Text-Adventure-2
|
a967f1bfafcbc44a027c88c07d30f2e386d29774
|
[
"MIT"
] | 1
|
2019-09-26T20:10:47.000Z
|
2019-09-26T20:10:47.000Z
|
import sys, os, json
version = (3,7)
assert sys.version_info >= version, "This script requires at least Python {0}.{1}".format(version[0],version[1])
# Game loop functions
def render(game,current):
''' Displays the current room '''
print('You are in the ' + game['rooms'][current]['name'])
print(game['rooms'][current]['desc'])
def getInput():
''' Asks the user for input and returns a stripped, uppercase version of what they typed '''
response = input('What would you like to do? ').strip().upper()
return response
def update(response,game,current):
''' Process the input and update the state of the world '''
for e in game['rooms'][current]['exits']:
if response == e['verb']:
current = e['target']
return current
def main():
game = {}
with open('house.json') as json_file:
game = json.load(json_file)
current = 'START'
quit = False
while not quit:
render(game,current)
response = getInput()
current = update(response,game,current)
if response == 'QUIT':
quit = True
if __name__ == '__main__':
main()
| 21
| 112
| 0.61039
|
import sys, os, json
version = (3,7)
assert sys.version_info >= version, "This script requires at least Python {0}.{1}".format(version[0],version[1])
def render(game,current):
print('You are in the ' + game['rooms'][current]['name'])
print(game['rooms'][current]['desc'])
def getInput():
response = input('What would you like to do? ').strip().upper()
return response
def update(response,game,current):
for e in game['rooms'][current]['exits']:
if response == e['verb']:
current = e['target']
return current
def main():
game = {}
with open('house.json') as json_file:
game = json.load(json_file)
current = 'START'
quit = False
while not quit:
render(game,current)
response = getInput()
current = update(response,game,current)
if response == 'QUIT':
quit = True
if __name__ == '__main__':
main()
| true
| true
|
7904908fc5c6e9037185991e24be7e0abcfd456e
| 174,180
|
py
|
Python
|
picamera/camera.py
|
RobertLucian/picamera
|
eae031080d016753deed1fe78ca878110a818401
|
[
"BSD-3-Clause"
] | null | null | null |
picamera/camera.py
|
RobertLucian/picamera
|
eae031080d016753deed1fe78ca878110a818401
|
[
"BSD-3-Clause"
] | null | null | null |
picamera/camera.py
|
RobertLucian/picamera
|
eae031080d016753deed1fe78ca878110a818401
|
[
"BSD-3-Clause"
] | 1
|
2020-04-21T02:40:37.000Z
|
2020-04-21T02:40:37.000Z
|
# vim: set et sw=4 sts=4 fileencoding=utf-8:
#
# Python camera library for the Rasperry-Pi camera module
# Copyright (c) 2013-2017 Dave Jones <dave@waveform.org.uk>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from __future__ import (
unicode_literals,
print_function,
division,
absolute_import,
)
# Make Py2's str equivalent to Py3's
str = type('')
import warnings
import datetime
import mimetypes
import ctypes as ct
import threading
from fractions import Fraction
from operator import itemgetter
from collections import namedtuple
from . import bcm_host, mmal, mmalobj as mo
from .exc import (
PiCameraError,
PiCameraValueError,
PiCameraRuntimeError,
PiCameraClosed,
PiCameraNotRecording,
PiCameraAlreadyRecording,
PiCameraMMALError,
PiCameraDeprecated,
PiCameraFallback,
)
from .encoders import (
PiVideoFrame,
PiVideoEncoder,
PiRawVideoEncoder,
PiCookedVideoEncoder,
PiRawOneImageEncoder,
PiRawMultiImageEncoder,
PiCookedOneImageEncoder,
PiCookedMultiImageEncoder,
)
from .renderers import (
PiPreviewRenderer,
PiOverlayRenderer,
PiNullSink,
)
from .color import Color
try:
from RPi import GPIO
except ImportError:
# Can't find RPi.GPIO so just null-out the reference
GPIO = None
def docstring_values(values, indent=8):
"""
Formats a dictionary of values for inclusion in a docstring.
"""
return ('\n' + ' ' * indent).join(
"* ``'%s'``" % k
for (k, v) in
sorted(values.items(), key=itemgetter(1)))
class PiCameraMaxResolution(object):
"""
Singleton representing the maximum resolution of the camera module.
"""
PiCameraMaxResolution = PiCameraMaxResolution()
class PiCameraMaxFramerate(object):
"""
Singleton representing the maximum framerate of the camera module.
"""
PiCameraMaxFramerate = PiCameraMaxFramerate()
class PiCamera(object):
"""
Provides a pure Python interface to the Raspberry Pi's camera module.
Upon construction, this class initializes the camera. The *camera_num*
parameter (which defaults to 0) selects the camera module that the instance
will represent. Only the Raspberry Pi compute module currently supports
more than one camera.
The *sensor_mode*, *resolution*, *framerate*, *framerate_range*, and
*clock_mode* parameters provide initial values for the :attr:`sensor_mode`,
:attr:`resolution`, :attr:`framerate`, :attr:`framerate_range`, and
:attr:`clock_mode` attributes of the class (these attributes are all
relatively expensive to set individually, hence setting them all upon
construction is a speed optimization). Please refer to the attribute
documentation for more information and default values.
The *stereo_mode* and *stereo_decimate* parameters configure dual cameras
on a compute module for sterescopic mode. These parameters can only be set
at construction time; they cannot be altered later without closing the
:class:`PiCamera` instance and recreating it. The *stereo_mode* parameter
defaults to ``'none'`` (no stereoscopic mode) but can be set to
``'side-by-side'`` or ``'top-bottom'`` to activate a stereoscopic mode. If
the *stereo_decimate* parameter is ``True``, the resolution of the two
cameras will be halved so that the resulting image has the same dimensions
as if stereoscopic mode were not being used.
The *led_pin* parameter can be used to specify the GPIO pin which should be
used to control the camera's LED via the :attr:`led` attribute. If this is
not specified, it should default to the correct value for your Pi platform.
You should only need to specify this parameter if you are using a custom
DeviceTree blob (this is only typical on the `Compute Module`_ platform).
No preview or recording is started automatically upon construction. Use
the :meth:`capture` method to capture images, the :meth:`start_recording`
method to begin recording video, or the :meth:`start_preview` method to
start live display of the camera's input.
Several attributes are provided to adjust the camera's configuration. Some
of these can be adjusted while a recording is running, like
:attr:`brightness`. Others, like :attr:`resolution`, can only be adjusted
when the camera is idle.
When you are finished with the camera, you should ensure you call the
:meth:`close` method to release the camera resources::
camera = PiCamera()
try:
# do something with the camera
pass
finally:
camera.close()
The class supports the context manager protocol to make this particularly
easy (upon exiting the :keyword:`with` statement, the :meth:`close` method
is automatically called)::
with PiCamera() as camera:
# do something with the camera
pass
.. versionchanged:: 1.8
Added *stereo_mode* and *stereo_decimate* parameters.
.. versionchanged:: 1.9
Added *resolution*, *framerate*, and *sensor_mode* parameters.
.. versionchanged:: 1.10
Added *led_pin* parameter.
.. versionchanged:: 1.11
Added *clock_mode* parameter, and permitted setting of resolution as
appropriately formatted string.
.. versionchanged:: 1.13
Added *framerate_range* parameter.
.. _Compute Module: https://www.raspberrypi.org/documentation/hardware/computemodule/cmio-camera.md
"""
CAMERA_PREVIEW_PORT = 0
CAMERA_VIDEO_PORT = 1
CAMERA_CAPTURE_PORT = 2
MAX_RESOLUTION = PiCameraMaxResolution # modified by PiCamera.__init__
MAX_FRAMERATE = PiCameraMaxFramerate # modified by PiCamera.__init__
DEFAULT_ANNOTATE_SIZE = 32
CAPTURE_TIMEOUT = 60
METER_MODES = {
'average': mmal.MMAL_PARAM_EXPOSUREMETERINGMODE_AVERAGE,
'spot': mmal.MMAL_PARAM_EXPOSUREMETERINGMODE_SPOT,
'backlit': mmal.MMAL_PARAM_EXPOSUREMETERINGMODE_BACKLIT,
'matrix': mmal.MMAL_PARAM_EXPOSUREMETERINGMODE_MATRIX,
}
EXPOSURE_MODES = {
'off': mmal.MMAL_PARAM_EXPOSUREMODE_OFF,
'auto': mmal.MMAL_PARAM_EXPOSUREMODE_AUTO,
'night': mmal.MMAL_PARAM_EXPOSUREMODE_NIGHT,
'nightpreview': mmal.MMAL_PARAM_EXPOSUREMODE_NIGHTPREVIEW,
'backlight': mmal.MMAL_PARAM_EXPOSUREMODE_BACKLIGHT,
'spotlight': mmal.MMAL_PARAM_EXPOSUREMODE_SPOTLIGHT,
'sports': mmal.MMAL_PARAM_EXPOSUREMODE_SPORTS,
'snow': mmal.MMAL_PARAM_EXPOSUREMODE_SNOW,
'beach': mmal.MMAL_PARAM_EXPOSUREMODE_BEACH,
'verylong': mmal.MMAL_PARAM_EXPOSUREMODE_VERYLONG,
'fixedfps': mmal.MMAL_PARAM_EXPOSUREMODE_FIXEDFPS,
'antishake': mmal.MMAL_PARAM_EXPOSUREMODE_ANTISHAKE,
'fireworks': mmal.MMAL_PARAM_EXPOSUREMODE_FIREWORKS,
}
FLASH_MODES = {
'off': mmal.MMAL_PARAM_FLASH_OFF,
'auto': mmal.MMAL_PARAM_FLASH_AUTO,
'on': mmal.MMAL_PARAM_FLASH_ON,
'redeye': mmal.MMAL_PARAM_FLASH_REDEYE,
'fillin': mmal.MMAL_PARAM_FLASH_FILLIN,
'torch': mmal.MMAL_PARAM_FLASH_TORCH,
}
AWB_MODES = {
'off': mmal.MMAL_PARAM_AWBMODE_OFF,
'auto': mmal.MMAL_PARAM_AWBMODE_AUTO,
'sunlight': mmal.MMAL_PARAM_AWBMODE_SUNLIGHT,
'cloudy': mmal.MMAL_PARAM_AWBMODE_CLOUDY,
'shade': mmal.MMAL_PARAM_AWBMODE_SHADE,
'tungsten': mmal.MMAL_PARAM_AWBMODE_TUNGSTEN,
'fluorescent': mmal.MMAL_PARAM_AWBMODE_FLUORESCENT,
'incandescent': mmal.MMAL_PARAM_AWBMODE_INCANDESCENT,
'flash': mmal.MMAL_PARAM_AWBMODE_FLASH,
'horizon': mmal.MMAL_PARAM_AWBMODE_HORIZON,
}
IMAGE_EFFECTS = {
'none': mmal.MMAL_PARAM_IMAGEFX_NONE,
'negative': mmal.MMAL_PARAM_IMAGEFX_NEGATIVE,
'solarize': mmal.MMAL_PARAM_IMAGEFX_SOLARIZE,
# The following don't work
#'posterize': mmal.MMAL_PARAM_IMAGEFX_POSTERIZE,
#'whiteboard': mmal.MMAL_PARAM_IMAGEFX_WHITEBOARD,
#'blackboard': mmal.MMAL_PARAM_IMAGEFX_BLACKBOARD,
'sketch': mmal.MMAL_PARAM_IMAGEFX_SKETCH,
'denoise': mmal.MMAL_PARAM_IMAGEFX_DENOISE,
'emboss': mmal.MMAL_PARAM_IMAGEFX_EMBOSS,
'oilpaint': mmal.MMAL_PARAM_IMAGEFX_OILPAINT,
'hatch': mmal.MMAL_PARAM_IMAGEFX_HATCH,
'gpen': mmal.MMAL_PARAM_IMAGEFX_GPEN,
'pastel': mmal.MMAL_PARAM_IMAGEFX_PASTEL,
'watercolor': mmal.MMAL_PARAM_IMAGEFX_WATERCOLOUR,
'film': mmal.MMAL_PARAM_IMAGEFX_FILM,
'blur': mmal.MMAL_PARAM_IMAGEFX_BLUR,
'saturation': mmal.MMAL_PARAM_IMAGEFX_SATURATION,
'colorswap': mmal.MMAL_PARAM_IMAGEFX_COLOURSWAP,
'washedout': mmal.MMAL_PARAM_IMAGEFX_WASHEDOUT,
'posterise': mmal.MMAL_PARAM_IMAGEFX_POSTERISE,
'colorpoint': mmal.MMAL_PARAM_IMAGEFX_COLOURPOINT,
'colorbalance': mmal.MMAL_PARAM_IMAGEFX_COLOURBALANCE,
'cartoon': mmal.MMAL_PARAM_IMAGEFX_CARTOON,
'deinterlace1': mmal.MMAL_PARAM_IMAGEFX_DEINTERLACE_DOUBLE,
'deinterlace2': mmal.MMAL_PARAM_IMAGEFX_DEINTERLACE_ADV,
}
DRC_STRENGTHS = {
'off': mmal.MMAL_PARAMETER_DRC_STRENGTH_OFF,
'low': mmal.MMAL_PARAMETER_DRC_STRENGTH_LOW,
'medium': mmal.MMAL_PARAMETER_DRC_STRENGTH_MEDIUM,
'high': mmal.MMAL_PARAMETER_DRC_STRENGTH_HIGH,
}
RAW_FORMATS = {
'yuv',
'rgb',
'rgba',
'bgr',
'bgra',
}
STEREO_MODES = {
'none': mmal.MMAL_STEREOSCOPIC_MODE_NONE,
'side-by-side': mmal.MMAL_STEREOSCOPIC_MODE_SIDE_BY_SIDE,
'top-bottom': mmal.MMAL_STEREOSCOPIC_MODE_BOTTOM,
}
CLOCK_MODES = {
'reset': mmal.MMAL_PARAM_TIMESTAMP_MODE_RESET_STC,
'raw': mmal.MMAL_PARAM_TIMESTAMP_MODE_RAW_STC,
}
_METER_MODES_R = {v: k for (k, v) in METER_MODES.items()}
_EXPOSURE_MODES_R = {v: k for (k, v) in EXPOSURE_MODES.items()}
_FLASH_MODES_R = {v: k for (k, v) in FLASH_MODES.items()}
_AWB_MODES_R = {v: k for (k, v) in AWB_MODES.items()}
_IMAGE_EFFECTS_R = {v: k for (k, v) in IMAGE_EFFECTS.items()}
_DRC_STRENGTHS_R = {v: k for (k, v) in DRC_STRENGTHS.items()}
_STEREO_MODES_R = {v: k for (k, v) in STEREO_MODES.items()}
_CLOCK_MODES_R = {v: k for (k, v) in CLOCK_MODES.items()}
__slots__ = (
'_used_led',
'_led_pin',
'_camera',
'_camera_config',
'_camera_exception',
'_revision',
'_preview',
'_preview_alpha',
'_preview_layer',
'_preview_fullscreen',
'_preview_window',
'_splitter',
'_splitter_connection',
'_encoders_lock',
'_encoders',
'_overlays',
'_raw_format',
'_image_effect_params',
'_exif_tags',
)
def __init__(
self, camera_num=0, stereo_mode='none', stereo_decimate=False,
resolution=None, framerate=None, sensor_mode=0, led_pin=None,
clock_mode='reset', framerate_range=None):
bcm_host.bcm_host_init()
mimetypes.add_type('application/h264', '.h264', False)
mimetypes.add_type('application/mjpeg', '.mjpg', False)
mimetypes.add_type('application/mjpeg', '.mjpeg', False)
self._used_led = False
if GPIO and led_pin is None:
try:
led_pin = {
(0, 0): 2, # compute module (default for cam 0)
(0, 1): 30, # compute module (default for cam 1)
(1, 0): 5, # Pi 1 model B rev 1
(2, 0): 5, # Pi 1 model B rev 2 or model A
(3, 0): 32, # Pi 1 model B+ or Pi 2 model B
}[(GPIO.RPI_REVISION, camera_num)]
except KeyError:
raise PiCameraError(
'Unable to determine default GPIO LED pin for RPi '
'revision %d and camera num %d' % (
GPIO.RPI_REVISION, camera_num))
self._led_pin = led_pin
self._camera = None
self._camera_config = None
self._camera_exception = None
self._preview = None
self._preview_alpha = 255
self._preview_layer = 2
self._preview_fullscreen = True
self._preview_window = None
self._splitter = None
self._splitter_connection = None
self._encoders_lock = threading.Lock()
self._encoders = {}
self._overlays = []
self._raw_format = 'yuv'
self._image_effect_params = None
with mo.MMALCameraInfo() as camera_info:
info = camera_info.control.params[mmal.MMAL_PARAMETER_CAMERA_INFO]
self._revision = 'ov5647'
if camera_info.info_rev > 1:
self._revision = info.cameras[camera_num].camera_name.decode('ascii')
self._exif_tags = {
'IFD0.Model': 'RP_%s' % self._revision,
'IFD0.Make': 'RaspberryPi',
}
if PiCamera.MAX_RESOLUTION is PiCameraMaxResolution:
PiCamera.MAX_RESOLUTION = mo.PiResolution(
info.cameras[camera_num].max_width,
info.cameras[camera_num].max_height,
)
if PiCamera.MAX_FRAMERATE is PiCameraMaxFramerate:
if self._revision.upper() == 'OV5647':
PiCamera.MAX_FRAMERATE = 90
else:
PiCamera.MAX_FRAMERATE = 120
if resolution is None:
# Get screen resolution
w = ct.c_uint32()
h = ct.c_uint32()
if bcm_host.graphics_get_display_size(0, w, h) == -1:
w = 1280
h = 720
else:
w = int(w.value)
h = int(h.value)
resolution = mo.PiResolution(w, h)
elif resolution is PiCameraMaxResolution:
resolution = PiCamera.MAX_RESOLUTION
else:
resolution = mo.to_resolution(resolution)
if framerate_range is None:
if framerate is None:
framerate = 30
elif framerate is PiCameraMaxFramerate:
framerate = PiCamera.MAX_FRAMERATE
else:
framerate = mo.to_fraction(framerate)
elif framerate is not None:
raise PiCameraValueError(
"Can't specify framerate and framerate_range")
else:
try:
low, high = framerate_range
except TypeError:
raise PiCameraValueError(
"framerate_range must have (low, high) values")
if low is PiCameraMaxFramerate:
low = PiCamera.MAX_FRAMERATE
if high is PiCameraMaxFramerate:
high = PiCamera.MAX_FRAMERATE
framerate = (mo.to_fraction(low), mo.to_fraction(high))
try:
stereo_mode = self.STEREO_MODES[stereo_mode]
except KeyError:
raise PiCameraValueError('Invalid stereo mode: %s' % stereo_mode)
try:
clock_mode = self.CLOCK_MODES[clock_mode]
except KeyError:
raise PiCameraValueError('Invalid clock mode: %s' % clock_mode)
try:
self._init_camera(camera_num, stereo_mode, stereo_decimate)
self._configure_camera(sensor_mode, framerate, resolution, clock_mode)
self._init_preview()
self._init_splitter()
self._camera.enable()
self._init_defaults()
except:
self.close()
raise
def _init_led(self):
global GPIO
if GPIO:
try:
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
GPIO.setup(self._led_pin, GPIO.OUT, initial=GPIO.LOW)
self._used_led = True
except RuntimeError:
# We're probably not running as root. In this case, forget the
# GPIO reference so we don't try anything further
GPIO = None
def _init_camera(self, num, stereo_mode, stereo_decimate):
try:
self._camera = mo.MMALCamera()
except PiCameraMMALError as e:
if e.status == mmal.MMAL_ENOMEM:
raise PiCameraError(
"Camera is not enabled. Try running 'sudo raspi-config' "
"and ensure that the camera has been enabled.")
else:
raise
self._camera_config = self._camera.control.params[mmal.MMAL_PARAMETER_CAMERA_CONFIG]
# Don't attempt to set this if stereo mode isn't requested as it'll
# break compatibility on older firmwares
if stereo_mode != mmal.MMAL_STEREOSCOPIC_MODE_NONE:
for p in self._camera.outputs:
mp = mmal.MMAL_PARAMETER_STEREOSCOPIC_MODE_T(
mmal.MMAL_PARAMETER_HEADER_T(
mmal.MMAL_PARAMETER_STEREOSCOPIC_MODE,
ct.sizeof(mmal.MMAL_PARAMETER_STEREOSCOPIC_MODE_T),
),
mode=stereo_mode,
decimate=stereo_decimate,
swap_eyes=False,
)
p.params[mmal.MMAL_PARAMETER_STEREOSCOPIC_MODE] = mp
# Must be done *after* stereo-scopic setting
self._camera.control.params[mmal.MMAL_PARAMETER_CAMERA_NUM] = num
def _init_defaults(self):
self.sharpness = 0
self.contrast = 0
self.brightness = 50
self.saturation = 0
self.iso = 0 # auto
self.video_stabilization = False
self.exposure_compensation = 0
self.exposure_mode = 'auto'
self.meter_mode = 'average'
self.awb_mode = 'auto'
self.image_effect = 'none'
self.color_effects = None
self.rotation = 0
self.hflip = self.vflip = False
self.zoom = (0.0, 0.0, 1.0, 1.0)
def _init_splitter(self):
# Create a splitter component for the video port. This is to permit
# video recordings and captures where use_video_port=True to occur
# simultaneously (#26)
self._splitter = mo.MMALSplitter()
self._splitter.inputs[0].connect(
self._camera.outputs[self.CAMERA_VIDEO_PORT]).enable()
def _init_preview(self):
# Create a null-sink component, enable it and connect it to the
# camera's preview port. If nothing is connected to the preview port,
# the camera doesn't measure exposure and captured images gradually
# fade to black (issue #22)
self._preview = PiNullSink(
self, self._camera.outputs[self.CAMERA_PREVIEW_PORT])
def _start_capture(self, port):
# Only enable capture if the port is the camera's still port, or if
# there's a single active encoder on the video splitter
if (
port == self._camera.outputs[self.CAMERA_CAPTURE_PORT] or
len([e for e in self._encoders.values() if e.active]) == 1):
port.params[mmal.MMAL_PARAMETER_CAPTURE] = True
def _stop_capture(self, port):
# Only disable capture if the port is the camera's still port, or if
# there's a single active encoder on the video splitter
if (
port == self._camera.outputs[self.CAMERA_CAPTURE_PORT] or
len([e for e in self._encoders.values() if e.active]) == 1):
port.params[mmal.MMAL_PARAMETER_CAPTURE] = False
def _check_camera_open(self):
"""
Raise an exception if the camera is already closed, or if the camera
has encountered a fatal error.
"""
exc, self._camera_exception = self._camera_exception, None
if exc:
raise exc
if self.closed:
raise PiCameraClosed("Camera is closed")
def _check_recording_stopped(self):
"""
Raise an exception if the camera is currently recording.
"""
if self.recording:
raise PiCameraRuntimeError("Recording is currently running")
def _get_ports(self, from_video_port, splitter_port):
"""
Determine the camera and output ports for given capture options.
See :ref:`camera_hardware` for more information on picamera's usage of
camera, splitter, and encoder ports. The general idea here is that the
capture (still) port operates on its own, while the video port is
always connected to a splitter component, so requests for a video port
also have to specify which splitter port they want to use.
"""
self._check_camera_open()
if from_video_port and (splitter_port in self._encoders):
raise PiCameraAlreadyRecording(
'The camera is already using port %d ' % splitter_port)
camera_port = (
self._camera.outputs[self.CAMERA_VIDEO_PORT]
if from_video_port else
self._camera.outputs[self.CAMERA_CAPTURE_PORT]
)
output_port = (
self._splitter.outputs[splitter_port]
if from_video_port else
camera_port
)
return (camera_port, output_port)
def _get_output_format(self, output):
"""
Given an output object, attempt to determine the requested format.
We attempt to determine the filename of the *output* object and derive
a MIME type from the extension. If *output* has no filename, an error
is raised.
"""
if isinstance(output, bytes):
filename = output.decode('utf-8')
elif isinstance(output, str):
filename = output
else:
try:
filename = output.name
except AttributeError:
raise PiCameraValueError(
'Format must be specified when output has no filename')
(type, encoding) = mimetypes.guess_type(filename, strict=False)
if not type:
raise PiCameraValueError(
'Unable to determine type from filename %s' % filename)
return type
def _get_image_format(self, output, format=None):
"""
Given an output object and an optional format, attempt to determine the
requested image format.
This method is used by all capture methods to determine the requested
output format. If *format* is specified as a MIME-type the "image/"
prefix is stripped. If *format* is not specified, then
:meth:`_get_output_format` will be called to attempt to determine
format from the *output* object.
"""
if isinstance(format, bytes):
format = format.decode('utf-8')
format = format or self._get_output_format(output)
format = (
format[6:] if format.startswith('image/') else
format)
if format == 'x-ms-bmp':
format = 'bmp'
if format == 'raw':
format = self.raw_format
return format
def _get_video_format(self, output, format=None):
"""
Given an output object and an optional format, attempt to determine the
requested video format.
This method is used by all recording methods to determine the requested
output format. If *format* is specified as a MIME-type the "video/" or
"application/" prefix will be stripped. If *format* is not specified,
then :meth:`_get_output_format` will be called to attempt to determine
format from the *output* object.
"""
if isinstance(format, bytes):
format = format.decode('utf-8')
format = format or self._get_output_format(output)
format = (
format[6:] if format.startswith('video/') else
format[12:] if format.startswith('application/') else
format)
return format
def _get_image_encoder(
self, camera_port, output_port, format, resize, **options):
"""
Construct an image encoder for the requested parameters.
This method is called by :meth:`capture` and :meth:`capture_continuous`
to construct an image encoder. The *camera_port* parameter gives the
MMAL camera port that should be enabled for capture by the encoder. The
*output_port* parameter gives the MMAL port that the encoder should
read output from (this may be the same as the camera port, but may be
different if other component(s) like a splitter have been placed in the
pipeline). The *format* parameter indicates the image format and will
be one of:
* ``'jpeg'``
* ``'png'``
* ``'gif'``
* ``'bmp'``
* ``'yuv'``
* ``'rgb'``
* ``'rgba'``
* ``'bgr'``
* ``'bgra'``
The *resize* parameter indicates the size that the encoder should
resize the output to (presumably by including a resizer in the
pipeline). Finally, *options* includes extra keyword arguments that
should be passed verbatim to the encoder.
"""
encoder_class = (
PiRawOneImageEncoder if format in self.RAW_FORMATS else
PiCookedOneImageEncoder)
return encoder_class(
self, camera_port, output_port, format, resize, **options)
def _get_images_encoder(
self, camera_port, output_port, format, resize, **options):
"""
Construct a multi-image encoder for the requested parameters.
This method is largely equivalent to :meth:`_get_image_encoder` with
the exception that the encoder returned should expect to be passed an
iterable of outputs to its :meth:`~PiEncoder.start` method, rather than
a single output object. This method is called by the
:meth:`capture_sequence` method.
All parameters are the same as in :meth:`_get_image_encoder`. Please
refer to the documentation for that method for further information.
"""
encoder_class = (
PiRawMultiImageEncoder if format in self.RAW_FORMATS else
PiCookedMultiImageEncoder)
return encoder_class(
self, camera_port, output_port, format, resize, **options)
def _get_video_encoder(
self, camera_port, output_port, format, resize, **options):
"""
Construct a video encoder for the requested parameters.
This method is called by :meth:`start_recording` and
:meth:`record_sequence` to construct a video encoder. The
*camera_port* parameter gives the MMAL camera port that should be
enabled for capture by the encoder. The *output_port* parameter gives
the MMAL port that the encoder should read output from (this may be the
same as the camera port, but may be different if other component(s)
like a splitter have been placed in the pipeline). The *format*
parameter indicates the video format and will be one of:
* ``'h264'``
* ``'mjpeg'``
The *resize* parameter indicates the size that the encoder should
resize the output to (presumably by including a resizer in the
pipeline). Finally, *options* includes extra keyword arguments that
should be passed verbatim to the encoder.
"""
encoder_class = (
PiRawVideoEncoder if format in self.RAW_FORMATS else
PiCookedVideoEncoder)
return encoder_class(
self, camera_port, output_port, format, resize, **options)
def close(self):
"""
Finalizes the state of the camera.
After successfully constructing a :class:`PiCamera` object, you should
ensure you call the :meth:`close` method once you are finished with the
camera (e.g. in the ``finally`` section of a ``try..finally`` block).
This method stops all recording and preview activities and releases all
resources associated with the camera; this is necessary to prevent GPU
memory leaks.
"""
for port in list(self._encoders):
self.stop_recording(splitter_port=port)
assert not self.recording
for overlay in list(self._overlays):
self.remove_overlay(overlay)
if self._preview:
self._preview.close()
self._preview = None
if self._splitter:
self._splitter.close()
self._splitter = None
if self._camera:
self._camera.close()
self._camera = None
exc, self._camera_exception = self._camera_exception, None
if exc:
raise exc
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, exc_tb):
self.close()
def start_preview(self, **options):
"""
Displays the preview overlay.
This method starts a camera preview as an overlay on the Pi's primary
display (HDMI or composite). A :class:`PiRenderer` instance (more
specifically, a :class:`PiPreviewRenderer`) is constructed with the
keyword arguments captured in *options*, and is returned from the
method (this instance is also accessible from the :attr:`preview`
attribute for as long as the renderer remains active). By default, the
renderer will be opaque and fullscreen.
This means the default preview overrides whatever is currently visible
on the display. More specifically, the preview does not rely on a
graphical environment like X-Windows (it can run quite happily from a
TTY console); it is simply an overlay on the Pi's video output. To stop
the preview and reveal the display again, call :meth:`stop_preview`.
The preview can be started and stopped multiple times during the
lifetime of the :class:`PiCamera` object.
All other camera properties can be modified "live" while the preview is
running (e.g. :attr:`brightness`).
.. note::
Because the default preview typically obscures the screen, ensure
you have a means of stopping a preview before starting one. If the
preview obscures your interactive console you won't be able to
Alt+Tab back to it as the preview isn't in a window. If you are in
an interactive Python session, simply pressing Ctrl+D usually
suffices to terminate the environment, including the camera and its
associated preview.
"""
self._check_camera_open()
self._preview.close()
options.setdefault('layer', self._preview_layer)
options.setdefault('alpha', self._preview_alpha)
options.setdefault('fullscreen', self._preview_fullscreen)
options.setdefault('window', self._preview_window)
renderer = PiPreviewRenderer(
self, self._camera.outputs[self.CAMERA_PREVIEW_PORT], **options)
self._preview = renderer
return renderer
def stop_preview(self):
"""
Hides the preview overlay.
If :meth:`start_preview` has previously been called, this method shuts
down the preview display which generally results in the underlying
display becoming visible again. If a preview is not currently running,
no exception is raised - the method will simply do nothing.
"""
self._check_camera_open()
self._preview.close()
self._preview = PiNullSink(
self, self._camera.outputs[self.CAMERA_PREVIEW_PORT])
def add_overlay(self, source, size=None, format=None, **options):
"""
Adds a static overlay to the preview output.
This method creates a new static overlay using the same rendering
mechanism as the preview. Overlays will appear on the Pi's video
output, but will not appear in captures or video recordings. Multiple
overlays can exist; each call to :meth:`add_overlay` returns a new
:class:`PiOverlayRenderer` instance representing the overlay.
The *source* must be an object that supports the :ref:`buffer protocol
<bufferobjects>` in one of the supported unencoded formats: ``'yuv'``,
``'rgb'``, ``'rgba'``, ``'bgr'``, or ``'bgra'``. The format can
specified explicitly with the optional *format* parameter. If not
specified, the method will attempt to guess the format based on the
length of *source* and the *size* (assuming 3 bytes per pixel for RGB,
and 4 bytes for RGBA).
The optional *size* parameter specifies the size of the source image as
a ``(width, height)`` tuple. If this is omitted or ``None`` then the
size is assumed to be the same as the camera's current
:attr:`resolution`.
The length of *source* must take into account that widths are rounded
up to the nearest multiple of 32, and heights to the nearest multiple
of 16. For example, if *size* is ``(1280, 720)``, and *format* is
``'rgb'``, then *source* must be a buffer with length 1280 × 720 × 3
bytes, or 2,764,800 bytes (because 1280 is a multiple of 32, and 720 is
a multiple of 16 no extra rounding is required). However, if *size* is
``(97, 57)``, and *format* is ``'rgb'`` then *source* must be a buffer
with length 128 × 64 × 3 bytes, or 24,576 bytes (pixels beyond column
97 and row 57 in the source will be ignored).
New overlays default to *layer* 0, whilst the preview defaults to layer
2. Higher numbered layers obscure lower numbered layers, hence new
overlays will be invisible (if the preview is running) by default. You
can make the new overlay visible either by making any existing preview
transparent (with the :attr:`~PiRenderer.alpha` property) or by moving
the overlay into a layer higher than the preview (with the
:attr:`~PiRenderer.layer` property).
All keyword arguments captured in *options* are passed onto the
:class:`PiRenderer` constructor. All camera properties except
:attr:`resolution` and :attr:`framerate` can be modified while overlays
exist. The reason for these exceptions is that the overlay has a static
resolution and changing the camera's mode would require resizing of the
source.
.. warning::
If too many overlays are added, the display output will be disabled
and a reboot will generally be required to restore the display.
Overlays are composited "on the fly". Hence, a real-time constraint
exists wherein for each horizontal line of HDMI output, the content
of all source layers must be fetched, resized, converted, and
blended to produce the output pixels.
If enough overlays exist (where "enough" is a number dependent on
overlay size, display resolution, bus frequency, and several other
factors making it unrealistic to calculate in advance), this
process breaks down and video output fails. One solution is to add
``dispmanx_offline=1`` to ``/boot/config.txt`` to force the use of
an off-screen buffer. Be aware that this requires more GPU memory
and may reduce the update rate.
.. _RGB: https://en.wikipedia.org/wiki/RGB
.. _RGBA: https://en.wikipedia.org/wiki/RGBA_color_space
.. versionadded:: 1.8
.. versionchanged:: 1.13
Added *format* parameter
"""
self._check_camera_open()
renderer = PiOverlayRenderer(self, source, size, format, **options)
self._overlays.append(renderer)
return renderer
def remove_overlay(self, overlay):
"""
Removes a static overlay from the preview output.
This method removes an overlay which was previously created by
:meth:`add_overlay`. The *overlay* parameter specifies the
:class:`PiRenderer` instance that was returned by :meth:`add_overlay`.
.. versionadded:: 1.8
"""
if not overlay in self._overlays:
raise PiCameraValueError(
"The specified overlay is not owned by this instance of "
"PiCamera")
overlay.close()
self._overlays.remove(overlay)
def start_recording(
self, output, format=None, resize=None, splitter_port=1, **options):
"""
Start recording video from the camera, storing it in *output*.
If *output* is a string, it will be treated as a filename for a new
file which the video will be written to. If *output* is not a string,
but is an object with a ``write`` method, it is assumed to be a
file-like object and the video data is appended to it (the
implementation only assumes the object has a ``write()`` method - no
other methods are required but ``flush`` will be called at the end of
recording if it is present). If *output* is not a string, and has no
``write`` method it is assumed to be a writeable object implementing
the buffer protocol. In this case, the video frames will be written
sequentially to the underlying buffer (which must be large enough to
accept all frame data).
If *format* is ``None`` (the default), the method will attempt to guess
the required video format from the extension of *output* (if it's a
string), or from the *name* attribute of *output* (if it has one). In
the case that the format cannot be determined, a
:exc:`PiCameraValueError` will be raised.
If *format* is not ``None``, it must be a string specifying the format
that you want the video output in. The format can be a MIME-type or
one of the following strings:
* ``'h264'`` - Write an H.264 video stream
* ``'mjpeg'`` - Write an M-JPEG video stream
* ``'yuv'`` - Write the raw video data to a file in YUV420 format
* ``'rgb'`` - Write the raw video data to a file in 24-bit RGB format
* ``'rgba'`` - Write the raw video data to a file in 32-bit RGBA format
* ``'bgr'`` - Write the raw video data to a file in 24-bit BGR format
* ``'bgra'`` - Write the raw video data to a file in 32-bit BGRA format
If *resize* is not ``None`` (the default), it must be a two-element
tuple specifying the width and height that the video recording should
be resized to. This is particularly useful for recording video using
the full resolution of the camera sensor (which is not possible in
H.264 without down-sizing the output).
The *splitter_port* parameter specifies the port of the built-in
splitter that the video encoder will be attached to. This defaults to
``1`` and most users will have no need to specify anything different.
If you wish to record multiple (presumably resized) streams
simultaneously, specify a value between ``0`` and ``3`` inclusive for
this parameter, ensuring that you do not specify a port that is
currently in use.
Certain formats accept additional options which can be specified
as keyword arguments. The ``'h264'`` format accepts the following
additional options:
* *profile* - The H.264 profile to use for encoding. Defaults to
'high', but can be one of 'baseline', 'main', 'extended', 'high', or
'constrained'.
* *level* - The `H.264 level`_ to use for encoding. Defaults to '4',
but can be any H.264 level up to '4.2'.
* *intra_period* - The key frame rate (the rate at which I-frames are
inserted in the output). Defaults to ``None``, but can be any 32-bit
integer value representing the number of frames between successive
I-frames. The special value 0 causes the encoder to produce a single
initial I-frame, and then only P-frames subsequently. Note that
:meth:`split_recording` will fail in this mode.
* *intra_refresh* - The key frame format (the way in which I-frames
will be inserted into the output stream). Defaults to ``None``, but
can be one of 'cyclic', 'adaptive', 'both', or 'cyclicrows'.
* *inline_headers* - When ``True``, specifies that the encoder should
output SPS/PPS headers within the stream to ensure GOPs (groups of
pictures) are self describing. This is important for streaming
applications where the client may wish to seek within the stream, and
enables the use of :meth:`split_recording`. Defaults to ``True`` if
not specified.
* *sei* - When ``True``, specifies the encoder should include
"Supplemental Enhancement Information" within the output stream.
Defaults to ``False`` if not specified.
* *sps_timing* - When ``True`` the encoder includes the camera's
framerate in the SPS header. Defaults to ``False`` if not specified.
* *motion_output* - Indicates the output destination for motion vector
estimation data. When ``None`` (the default), motion data is not
output. Otherwise, this can be a filename string, a file-like object,
or a writeable buffer object (as with the *output* parameter).
All encoded formats accept the following additional options:
* *bitrate* - The bitrate at which video will be encoded. Defaults to
17000000 (17Mbps) if not specified. The maximum value depends on the
selected `H.264 level`_ and profile. Bitrate 0 indicates the encoder
should not use bitrate control (the encoder is limited by the quality
only).
* *quality* - Specifies the quality that the encoder should attempt
to maintain. For the ``'h264'`` format, use values between 10 and 40
where 10 is extremely high quality, and 40 is extremely low (20-25 is
usually a reasonable range for H.264 encoding). For the ``mjpeg``
format, use JPEG quality values between 1 and 100 (where higher
values are higher quality). Quality 0 is special and seems to be
a "reasonable quality" default.
* *quantization* - Deprecated alias for *quality*.
.. versionchanged:: 1.0
The *resize* parameter was added, and ``'mjpeg'`` was added as a
recording format
.. versionchanged:: 1.3
The *splitter_port* parameter was added
.. versionchanged:: 1.5
The *quantization* parameter was deprecated in favor of *quality*,
and the *motion_output* parameter was added.
.. versionchanged:: 1.11
Support for buffer outputs was added.
.. _H.264 level: https://en.wikipedia.org/wiki/H.264/MPEG-4_AVC#Levels
"""
if 'quantization' in options:
warnings.warn(
PiCameraDeprecated(
'The quantization option is deprecated; please use '
'quality instead (same value)'))
with self._encoders_lock:
camera_port, output_port = self._get_ports(True, splitter_port)
format = self._get_video_format(output, format)
encoder = self._get_video_encoder(
camera_port, output_port, format, resize, **options)
self._encoders[splitter_port] = encoder
try:
encoder.start(output, options.get('motion_output'))
except Exception as e:
encoder.close()
with self._encoders_lock:
del self._encoders[splitter_port]
raise
def split_recording(self, output, splitter_port=1, **options):
"""
Continue the recording in the specified output; close existing output.
When called, the video encoder will wait for the next appropriate
split point (an inline SPS header), then will cease writing to the
current output (and close it, if it was specified as a filename), and
continue writing to the newly specified *output*.
The *output* parameter is treated as in the :meth:`start_recording`
method (it can be a string, a file-like object, or a writeable
buffer object).
The *motion_output* parameter can be used to redirect the output of the
motion vector data in the same fashion as *output*. If *motion_output*
is ``None`` (the default) then motion vector data will not be
redirected and will continue being written to the output specified by
the *motion_output* parameter given to :meth:`start_recording`.
Alternatively, if you only wish to redirect motion vector data, you can
set *output* to ``None`` and given a new value for *motion_output*.
The *splitter_port* parameter specifies which port of the video
splitter the encoder you wish to change outputs is attached to. This
defaults to ``1`` and most users will have no need to specify anything
different. Valid values are between ``0`` and ``3`` inclusive.
Note that unlike :meth:`start_recording`, you cannot specify format or
other options as these cannot be changed in the middle of recording.
Only the new *output* (and *motion_output*) can be specified.
Furthermore, the format of the recording is currently limited to H264,
and *inline_headers* must be ``True`` when :meth:`start_recording` is
called (this is the default).
.. versionchanged:: 1.3
The *splitter_port* parameter was added
.. versionchanged:: 1.5
The *motion_output* parameter was added
.. versionchanged:: 1.11
Support for buffer outputs was added.
"""
try:
with self._encoders_lock:
encoder = self._encoders[splitter_port]
except KeyError:
raise PiCameraNotRecording(
'There is no recording in progress on '
'port %d' % splitter_port)
else:
encoder.split(output, options.get('motion_output'))
def request_key_frame(self, splitter_port=1):
"""
Request the encoder generate a key-frame as soon as possible.
When called, the video encoder running on the specified *splitter_port*
will attempt to produce a key-frame (full-image frame) as soon as
possible. The *splitter_port* defaults to ``1``. Valid values are
between ``0`` and ``3`` inclusive.
.. note::
This method is only meaningful for recordings encoded in the H264
format as MJPEG produces full frames for every frame recorded.
Furthermore, there's no guarantee that the *next* frame will be
a key-frame; this is simply a request to produce one as soon as
possible after the call.
.. versionadded:: 1.11
"""
try:
with self._encoders_lock:
encoder = self._encoders[splitter_port]
except KeyError:
raise PiCameraNotRecording(
'There is no recording in progress on '
'port %d' % splitter_port)
else:
encoder.request_key_frame()
def wait_recording(self, timeout=0, splitter_port=1):
"""
Wait on the video encoder for timeout seconds.
It is recommended that this method is called while recording to check
for exceptions. If an error occurs during recording (for example out of
disk space) the recording will stop, but an exception will only be
raised when the :meth:`wait_recording` or :meth:`stop_recording`
methods are called.
If ``timeout`` is 0 (the default) the function will immediately return
(or raise an exception if an error has occurred).
The *splitter_port* parameter specifies which port of the video
splitter the encoder you wish to wait on is attached to. This
defaults to ``1`` and most users will have no need to specify anything
different. Valid values are between ``0`` and ``3`` inclusive.
.. versionchanged:: 1.3
The *splitter_port* parameter was added
"""
assert timeout is not None
try:
with self._encoders_lock:
encoder = self._encoders[splitter_port]
except KeyError:
raise PiCameraNotRecording(
'There is no recording in progress on '
'port %d' % splitter_port)
else:
encoder.wait(timeout)
def stop_recording(self, splitter_port=1):
"""
Stop recording video from the camera.
After calling this method the video encoder will be shut down and
output will stop being written to the file-like object specified with
:meth:`start_recording`. If an error occurred during recording and
:meth:`wait_recording` has not been called since the error then this
method will raise the exception.
The *splitter_port* parameter specifies which port of the video
splitter the encoder you wish to stop is attached to. This defaults to
``1`` and most users will have no need to specify anything different.
Valid values are between ``0`` and ``3`` inclusive.
.. versionchanged:: 1.3
The *splitter_port* parameter was added
"""
try:
with self._encoders_lock:
encoder = self._encoders[splitter_port]
except KeyError:
raise PiCameraNotRecording(
'There is no recording in progress on '
'port %d' % splitter_port)
else:
try:
self.wait_recording(0, splitter_port)
finally:
encoder.close()
with self._encoders_lock:
del self._encoders[splitter_port]
def record_sequence(
self, outputs, format='h264', resize=None, splitter_port=1, **options):
"""
Record a sequence of video clips from the camera.
This method accepts a sequence or iterator of *outputs* each of which
must either be a string specifying a filename for output, or a
file-like object with a ``write`` method.
The method acts as an iterator itself, yielding each item of the
sequence in turn. In this way, the caller can control how long to
record to each item by only permitting the loop to continue when ready
to switch to the next output.
The *format*, *splitter_port*, *resize*, and *options* parameters are
the same as in :meth:`start_recording`, but *format* defaults to
``'h264'``. The format is **not** derived from the filenames in
*outputs* by this method.
For example, to record 3 consecutive 10-second video clips, writing the
output to a series of H.264 files named clip01.h264, clip02.h264, and
clip03.h264 one could use the following::
import picamera
with picamera.PiCamera() as camera:
for filename in camera.record_sequence([
'clip01.h264',
'clip02.h264',
'clip03.h264']):
print('Recording to %s' % filename)
camera.wait_recording(10)
Alternatively, a more flexible method of writing the previous example
(which is easier to expand to a large number of output files) is by
using a generator expression as the input sequence::
import picamera
with picamera.PiCamera() as camera:
for filename in camera.record_sequence(
'clip%02d.h264' % i for i in range(3)):
print('Recording to %s' % filename)
camera.wait_recording(10)
More advanced techniques are also possible by utilising infinite
sequences, such as those generated by :func:`itertools.cycle`. In the
following example, recording is switched between two in-memory streams.
Whilst one stream is recording, the other is being analysed. The script
only stops recording when a video recording meets some criteria defined
by the ``process`` function::
import io
import itertools
import picamera
with picamera.PiCamera() as camera:
analyse = None
for stream in camera.record_sequence(
itertools.cycle((io.BytesIO(), io.BytesIO()))):
if analyse is not None:
if process(analyse):
break
analyse.seek(0)
analyse.truncate()
camera.wait_recording(5)
analyse = stream
.. versionadded:: 1.3
"""
with self._encoders_lock:
camera_port, output_port = self._get_ports(True, splitter_port)
format = self._get_video_format('', format)
encoder = self._get_video_encoder(
camera_port, output_port, format, resize, **options)
self._encoders[splitter_port] = encoder
try:
start = True
for output in outputs:
if start:
start = False
encoder.start(output, options.get('motion_output'))
else:
encoder.split(output)
yield output
finally:
try:
encoder.wait(0)
finally:
encoder.close()
with self._encoders_lock:
del self._encoders[splitter_port]
def capture(
self, output, format=None, use_video_port=False, resize=None,
splitter_port=0, bayer=False, **options):
"""
Capture an image from the camera, storing it in *output*.
If *output* is a string, it will be treated as a filename for a new
file which the image will be written to. If *output* is not a string,
but is an object with a ``write`` method, it is assumed to be a
file-like object and the image data is appended to it (the
implementation only assumes the object has a ``write`` method - no
other methods are required but ``flush`` will be called at the end of
capture if it is present). If *output* is not a string, and has no
``write`` method it is assumed to be a writeable object implementing
the buffer protocol. In this case, the image data will be written
directly to the underlying buffer (which must be large enough to accept
the image data).
If *format* is ``None`` (the default), the method will attempt to guess
the required image format from the extension of *output* (if it's a
string), or from the *name* attribute of *output* (if it has one). In
the case that the format cannot be determined, a
:exc:`PiCameraValueError` will be raised.
If *format* is not ``None``, it must be a string specifying the format
that you want the image output in. The format can be a MIME-type or
one of the following strings:
* ``'jpeg'`` - Write a JPEG file
* ``'png'`` - Write a PNG file
* ``'gif'`` - Write a GIF file
* ``'bmp'`` - Write a Windows bitmap file
* ``'yuv'`` - Write the raw image data to a file in YUV420 format
* ``'rgb'`` - Write the raw image data to a file in 24-bit RGB format
* ``'rgba'`` - Write the raw image data to a file in 32-bit RGBA format
* ``'bgr'`` - Write the raw image data to a file in 24-bit BGR format
* ``'bgra'`` - Write the raw image data to a file in 32-bit BGRA format
* ``'raw'`` - Deprecated option for raw captures; the format is taken
from the deprecated :attr:`raw_format` attribute
The *use_video_port* parameter controls whether the camera's image or
video port is used to capture images. It defaults to ``False`` which
means that the camera's image port is used. This port is slow but
produces better quality pictures. If you need rapid capture up to the
rate of video frames, set this to ``True``.
When *use_video_port* is ``True``, the *splitter_port* parameter
specifies the port of the video splitter that the image encoder will be
attached to. This defaults to ``0`` and most users will have no need to
specify anything different. This parameter is ignored when
*use_video_port* is ``False``. See :ref:`mmal` for more information
about the video splitter.
If *resize* is not ``None`` (the default), it must be a two-element
tuple specifying the width and height that the image should be resized
to.
.. warning::
If *resize* is specified, or *use_video_port* is ``True``, Exif
metadata will **not** be included in JPEG output. This is due to an
underlying firmware limitation.
Certain file formats accept additional options which can be specified
as keyword arguments. Currently, only the ``'jpeg'`` encoder accepts
additional options, which are:
* *quality* - Defines the quality of the JPEG encoder as an integer
ranging from 1 to 100. Defaults to 85. Please note that JPEG quality
is not a percentage and `definitions of quality`_ vary widely.
* *restart* - Defines the restart interval for the JPEG encoder as a
number of JPEG MCUs. The actual restart interval used will be a
multiple of the number of MCUs per row in the resulting image.
* *thumbnail* - Defines the size and quality of the thumbnail to embed
in the Exif metadata. Specifying ``None`` disables thumbnail
generation. Otherwise, specify a tuple of ``(width, height,
quality)``. Defaults to ``(64, 48, 35)``.
* *bayer* - If ``True``, the raw bayer data from the camera's sensor
is included in the Exif metadata.
.. note::
The so-called "raw" formats listed above (``'yuv'``, ``'rgb'``,
etc.) do not represent the raw bayer data from the camera's sensor.
Rather they provide access to the image data after GPU processing,
but before format encoding (JPEG, PNG, etc). Currently, the only
method of accessing the raw bayer data is via the *bayer* parameter
described above.
.. versionchanged:: 1.0
The *resize* parameter was added, and raw capture formats can now
be specified directly
.. versionchanged:: 1.3
The *splitter_port* parameter was added, and *bayer* was added as
an option for the ``'jpeg'`` format
.. versionchanged:: 1.11
Support for buffer outputs was added.
.. _definitions of quality: http://photo.net/learn/jpeg/#qual
"""
if format == 'raw':
warnings.warn(
PiCameraDeprecated(
'The "raw" format option is deprecated; specify the '
'required format directly instead ("yuv", "rgb", etc.)'))
if use_video_port and bayer:
raise PiCameraValueError(
'bayer is only valid with still port captures')
if 'burst' in options:
raise PiCameraValueError(
'burst is only valid with capture_sequence or capture_continuous')
with self._encoders_lock:
camera_port, output_port = self._get_ports(use_video_port, splitter_port)
format = self._get_image_format(output, format)
encoder = self._get_image_encoder(
camera_port, output_port, format, resize, **options)
if use_video_port:
self._encoders[splitter_port] = encoder
try:
if bayer:
camera_port.params[mmal.MMAL_PARAMETER_ENABLE_RAW_CAPTURE] = True
encoder.start(output)
# Wait for the callback to set the event indicating the end of
# image capture
if not encoder.wait(self.CAPTURE_TIMEOUT):
raise PiCameraRuntimeError(
'Timed out waiting for capture to end')
finally:
encoder.close()
with self._encoders_lock:
if use_video_port:
del self._encoders[splitter_port]
def capture_sequence(
self, outputs, format='jpeg', use_video_port=False, resize=None,
splitter_port=0, burst=False, bayer=False, **options):
"""
Capture a sequence of consecutive images from the camera.
This method accepts a sequence or iterator of *outputs* each of which
must either be a string specifying a filename for output, or a
file-like object with a ``write`` method, or a writeable buffer object.
For each item in the sequence or iterator of outputs, the camera
captures a single image as fast as it can.
The *format*, *use_video_port*, *splitter_port*, *resize*, and
*options* parameters are the same as in :meth:`capture`, but *format*
defaults to ``'jpeg'``. The format is **not** derived from the
filenames in *outputs* by this method.
If *use_video_port* is ``False`` (the default), the *burst* parameter
can be used to make still port captures faster. Specifically, this
prevents the preview from switching resolutions between captures which
significantly speeds up consecutive captures from the still port. The
downside is that this mode is currently has several bugs; the major
issue is that if captures are performed too quickly some frames will
come back severely underexposed. It is recommended that users avoid the
*burst* parameter unless they absolutely require it and are prepared to
work around such issues.
For example, to capture 3 consecutive images::
import time
import picamera
with picamera.PiCamera() as camera:
camera.start_preview()
time.sleep(2)
camera.capture_sequence([
'image1.jpg',
'image2.jpg',
'image3.jpg',
])
camera.stop_preview()
If you wish to capture a large number of images, a list comprehension
or generator expression can be used to construct the list of filenames
to use::
import time
import picamera
with picamera.PiCamera() as camera:
camera.start_preview()
time.sleep(2)
camera.capture_sequence([
'image%02d.jpg' % i
for i in range(100)
])
camera.stop_preview()
More complex effects can be obtained by using a generator function to
provide the filenames or output objects.
.. versionchanged:: 1.0
The *resize* parameter was added, and raw capture formats can now
be specified directly
.. versionchanged:: 1.3
The *splitter_port* parameter was added
.. versionchanged:: 1.11
Support for buffer outputs was added.
"""
if use_video_port:
if burst:
raise PiCameraValueError(
'burst is only valid with still port captures')
if bayer:
raise PiCameraValueError(
'bayer is only valid with still port captures')
with self._encoders_lock:
camera_port, output_port = self._get_ports(use_video_port, splitter_port)
format = self._get_image_format('', format)
if use_video_port:
encoder = self._get_images_encoder(
camera_port, output_port, format, resize, **options)
self._encoders[splitter_port] = encoder
else:
encoder = self._get_image_encoder(
camera_port, output_port, format, resize, **options)
try:
if use_video_port:
encoder.start(outputs)
encoder.wait()
else:
if burst:
camera_port.params[mmal.MMAL_PARAMETER_CAMERA_BURST_CAPTURE] = True
try:
for output in outputs:
if bayer:
camera_port.params[mmal.MMAL_PARAMETER_ENABLE_RAW_CAPTURE] = True
encoder.start(output)
if not encoder.wait(self.CAPTURE_TIMEOUT):
raise PiCameraRuntimeError(
'Timed out waiting for capture to end')
finally:
if burst:
camera_port.params[mmal.MMAL_PARAMETER_CAMERA_BURST_CAPTURE] = False
finally:
encoder.close()
with self._encoders_lock:
if use_video_port:
del self._encoders[splitter_port]
def capture_continuous(
self, output, format=None, use_video_port=False, resize=None,
splitter_port=0, burst=False, bayer=False, **options):
"""
Capture images continuously from the camera as an infinite iterator.
This method returns an infinite iterator of images captured
continuously from the camera. If *output* is a string, each captured
image is stored in a file named after *output* after substitution of
two values with the :meth:`~str.format` method. Those two values are:
* ``{counter}`` - a simple incrementor that starts at 1 and increases
by 1 for each image taken
* ``{timestamp}`` - a :class:`~datetime.datetime` instance
The table below contains several example values of *output* and the
sequence of filenames those values could produce:
.. tabularcolumns:: |p{80mm}|p{40mm}|p{10mm}|
+--------------------------------------------+--------------------------------------------+-------+
| *output* Value | Filenames | Notes |
+============================================+============================================+=======+
| ``'image{counter}.jpg'`` | image1.jpg, image2.jpg, image3.jpg, ... | |
+--------------------------------------------+--------------------------------------------+-------+
| ``'image{counter:02d}.jpg'`` | image01.jpg, image02.jpg, image03.jpg, ... | |
+--------------------------------------------+--------------------------------------------+-------+
| ``'image{timestamp}.jpg'`` | image2013-10-05 12:07:12.346743.jpg, | (1) |
| | image2013-10-05 12:07:32.498539, ... | |
+--------------------------------------------+--------------------------------------------+-------+
| ``'image{timestamp:%H-%M-%S-%f}.jpg'`` | image12-10-02-561527.jpg, | |
| | image12-10-14-905398.jpg | |
+--------------------------------------------+--------------------------------------------+-------+
| ``'{timestamp:%H%M%S}-{counter:03d}.jpg'`` | 121002-001.jpg, 121013-002.jpg, | (2) |
| | 121014-003.jpg, ... | |
+--------------------------------------------+--------------------------------------------+-------+
1. Note that because timestamp's default output includes colons (:),
the resulting filenames are not suitable for use on Windows. For
this reason (and the fact the default contains spaces) it is
strongly recommended you always specify a format when using
``{timestamp}``.
2. You can use both ``{timestamp}`` and ``{counter}`` in a single
format string (multiple times too!) although this tends to be
redundant.
If *output* is not a string, but has a ``write`` method, it is assumed
to be a file-like object and each image is simply written to this
object sequentially. In this case you will likely either want to write
something to the object between the images to distinguish them, or
clear the object between iterations. If *output* is not a string, and
has no ``write`` method, it is assumed to be a writeable object
supporting the buffer protocol; each image is simply written to the
buffer sequentially.
The *format*, *use_video_port*, *splitter_port*, *resize*, and
*options* parameters are the same as in :meth:`capture`.
If *use_video_port* is ``False`` (the default), the *burst* parameter
can be used to make still port captures faster. Specifically, this
prevents the preview from switching resolutions between captures which
significantly speeds up consecutive captures from the still port. The
downside is that this mode is currently has several bugs; the major
issue is that if captures are performed too quickly some frames will
come back severely underexposed. It is recommended that users avoid the
*burst* parameter unless they absolutely require it and are prepared to
work around such issues.
For example, to capture 60 images with a one second delay between them,
writing the output to a series of JPEG files named image01.jpg,
image02.jpg, etc. one could do the following::
import time
import picamera
with picamera.PiCamera() as camera:
camera.start_preview()
try:
for i, filename in enumerate(
camera.capture_continuous('image{counter:02d}.jpg')):
print(filename)
time.sleep(1)
if i == 59:
break
finally:
camera.stop_preview()
Alternatively, to capture JPEG frames as fast as possible into an
in-memory stream, performing some processing on each stream until
some condition is satisfied::
import io
import time
import picamera
with picamera.PiCamera() as camera:
stream = io.BytesIO()
for foo in camera.capture_continuous(stream, format='jpeg'):
# Truncate the stream to the current position (in case
# prior iterations output a longer image)
stream.truncate()
stream.seek(0)
if process(stream):
break
.. versionchanged:: 1.0
The *resize* parameter was added, and raw capture formats can now
be specified directly
.. versionchanged:: 1.3
The *splitter_port* parameter was added
.. versionchanged:: 1.11
Support for buffer outputs was added.
"""
if use_video_port:
if burst:
raise PiCameraValueError(
'burst is only valid with still port captures')
if bayer:
raise PiCameraValueError(
'bayer is only valid with still port captures')
with self._encoders_lock:
camera_port, output_port = self._get_ports(use_video_port, splitter_port)
format = self._get_image_format(output, format)
encoder = self._get_image_encoder(
camera_port, output_port, format, resize, **options)
if use_video_port:
self._encoders[splitter_port] = encoder
try:
if burst:
camera_port.params[mmal.MMAL_PARAMETER_CAMERA_BURST_CAPTURE] = True
try:
if isinstance(output, bytes):
# If we're fed a bytes string, assume it's UTF-8 encoded
# and convert it to Unicode. Technically this is wrong
# (file-systems use all sorts of encodings), but UTF-8 is a
# reasonable default and this keeps compatibility with
# Python 2 simple although it breaks the edge cases of
# non-UTF-8 encoded bytes strings with non-UTF-8 encoded
# file-systems
output = output.decode('utf-8')
if isinstance(output, str):
counter = 1
while True:
filename = output.format(
counter=counter,
timestamp=datetime.datetime.now(),
)
if bayer:
camera_port.params[mmal.MMAL_PARAMETER_ENABLE_RAW_CAPTURE] = True
encoder.start(filename)
if not encoder.wait(self.CAPTURE_TIMEOUT):
raise PiCameraRuntimeError(
'Timed out waiting for capture to end')
yield filename
counter += 1
else:
while True:
if bayer:
camera_port.params[mmal.MMAL_PARAMETER_ENABLE_RAW_CAPTURE] = True
encoder.start(output)
if not encoder.wait(self.CAPTURE_TIMEOUT):
raise PiCameraRuntimeError(
'Timed out waiting for capture to end')
yield output
finally:
if burst:
camera_port.params[mmal.MMAL_PARAMETER_CAMERA_BURST_CAPTURE] = False
finally:
encoder.close()
with self._encoders_lock:
if use_video_port:
del self._encoders[splitter_port]
@property
def closed(self):
"""
Returns ``True`` if the :meth:`close` method has been called.
"""
return not self._camera
@property
def recording(self):
"""
Returns ``True`` if the :meth:`start_recording` method has been called,
and no :meth:`stop_recording` call has been made yet.
"""
return any(
isinstance(e, PiVideoEncoder) and e.active
for e in self._encoders.values()
)
@property
def previewing(self):
"""
Returns ``True`` if the :meth:`start_preview` method has been called,
and no :meth:`stop_preview` call has been made yet.
.. deprecated:: 1.8
Test whether :attr:`preview` is ``None`` instead.
"""
warnings.warn(
PiCameraDeprecated(
'PiCamera.previewing is deprecated; test PiCamera.preview '
'is not None instead'))
return isinstance(self._preview, PiPreviewRenderer)
@property
def revision(self):
"""
Returns a string representing the revision of the Pi's camera module.
At the time of writing, the string returned is 'ov5647' for the V1
module, and 'imx219' for the V2 module.
"""
return self._revision
@property
def exif_tags(self):
"""
Holds a mapping of the Exif tags to apply to captured images.
.. note::
Please note that Exif tagging is only supported with the ``jpeg``
format.
By default several Exif tags are automatically applied to any images
taken with the :meth:`capture` method: ``IFD0.Make`` (which is set to
``RaspberryPi``), ``IFD0.Model`` (which is set to ``RP_OV5647``), and
three timestamp tags: ``IFD0.DateTime``, ``EXIF.DateTimeOriginal``, and
``EXIF.DateTimeDigitized`` which are all set to the current date and
time just before the picture is taken.
If you wish to set additional Exif tags, or override any of the
aforementioned tags, simply add entries to the exif_tags map before
calling :meth:`capture`. For example::
camera.exif_tags['IFD0.Copyright'] = 'Copyright (c) 2013 Foo Industries'
The Exif standard mandates ASCII encoding for all textual values, hence
strings containing non-ASCII characters will cause an encoding error to
be raised when :meth:`capture` is called. If you wish to set binary
values, use a :func:`bytes` value::
camera.exif_tags['EXIF.UserComment'] = b'Something containing\\x00NULL characters'
.. warning::
Binary Exif values are currently ignored; this appears to be a
libmmal or firmware bug.
You may also specify datetime values, integer, or float values, all of
which will be converted to appropriate ASCII strings (datetime values
are formatted as ``YYYY:MM:DD HH:MM:SS`` in accordance with the Exif
standard).
The currently supported Exif tags are:
+-------+-------------------------------------------------------------+
| Group | Tags |
+=======+=============================================================+
| IFD0, | ImageWidth, ImageLength, BitsPerSample, Compression, |
| IFD1 | PhotometricInterpretation, ImageDescription, Make, Model, |
| | StripOffsets, Orientation, SamplesPerPixel, RowsPerString, |
| | StripByteCounts, Xresolution, Yresolution, |
| | PlanarConfiguration, ResolutionUnit, TransferFunction, |
| | Software, DateTime, Artist, WhitePoint, |
| | PrimaryChromaticities, JPEGInterchangeFormat, |
| | JPEGInterchangeFormatLength, YcbCrCoefficients, |
| | YcbCrSubSampling, YcbCrPositioning, ReferenceBlackWhite, |
| | Copyright |
+-------+-------------------------------------------------------------+
| EXIF | ExposureTime, FNumber, ExposureProgram, |
| | SpectralSensitivity, ISOSpeedRatings, OECF, ExifVersion, |
| | DateTimeOriginal, DateTimeDigitized, |
| | ComponentsConfiguration, CompressedBitsPerPixel, |
| | ShutterSpeedValue, ApertureValue, BrightnessValue, |
| | ExposureBiasValue, MaxApertureValue, SubjectDistance, |
| | MeteringMode, LightSource, Flash, FocalLength, SubjectArea, |
| | MakerNote, UserComment, SubSecTime, SubSecTimeOriginal, |
| | SubSecTimeDigitized, FlashpixVersion, ColorSpace, |
| | PixelXDimension, PixelYDimension, RelatedSoundFile, |
| | FlashEnergy, SpacialFrequencyResponse, |
| | FocalPlaneXResolution, FocalPlaneYResolution, |
| | FocalPlaneResolutionUnit, SubjectLocation, ExposureIndex, |
| | SensingMethod, FileSource, SceneType, CFAPattern, |
| | CustomRendered, ExposureMode, WhiteBalance, |
| | DigitalZoomRatio, FocalLengthIn35mmFilm, SceneCaptureType, |
| | GainControl, Contrast, Saturation, Sharpness, |
| | DeviceSettingDescription, SubjectDistanceRange, |
| | ImageUniqueID |
+-------+-------------------------------------------------------------+
| GPS | GPSVersionID, GPSLatitudeRef, GPSLatitude, GPSLongitudeRef, |
| | GPSLongitude, GPSAltitudeRef, GPSAltitude, GPSTimeStamp, |
| | GPSSatellites, GPSStatus, GPSMeasureMode, GPSDOP, |
| | GPSSpeedRef, GPSSpeed, GPSTrackRef, GPSTrack, |
| | GPSImgDirectionRef, GPSImgDirection, GPSMapDatum, |
| | GPSDestLatitudeRef, GPSDestLatitude, GPSDestLongitudeRef, |
| | GPSDestLongitude, GPSDestBearingRef, GPSDestBearing, |
| | GPSDestDistanceRef, GPSDestDistance, GPSProcessingMethod, |
| | GPSAreaInformation, GPSDateStamp, GPSDifferential |
+-------+-------------------------------------------------------------+
| EINT | InteroperabilityIndex, InteroperabilityVersion, |
| | RelatedImageFileFormat, RelatedImageWidth, |
| | RelatedImageLength |
+-------+-------------------------------------------------------------+
"""
return self._exif_tags
def _set_led(self, value):
if not self._used_led:
self._init_led()
if not GPIO:
raise PiCameraRuntimeError(
"GPIO library not found, or not accessible; please install "
"RPi.GPIO and run the script as root")
GPIO.output(self._led_pin, bool(value))
led = property(None, _set_led, doc="""
Sets the state of the camera's LED via GPIO.
If a GPIO library is available (only RPi.GPIO is currently supported),
and if the python process has the necessary privileges (typically this
means running as root via sudo), this property can be used to set the
state of the camera's LED as a boolean value (``True`` is on, ``False``
is off).
.. note::
This is a write-only property. While it can be used to control the
camera's LED, you cannot query the state of the camera's LED using
this property.
.. note::
At present, the camera's LED cannot be controlled on the Pi 3
(the GPIOs used to control the camera LED were re-routed to GPIO
expander on the Pi 3).
.. warning::
There are circumstances in which the camera firmware may override
an existing LED setting. For example, in the case that the firmware
resets the camera (as can happen with a CSI-2 timeout), the LED may
also be reset. If you wish to guarantee that the LED remain off at
all times, you may prefer to use the ``disable_camera_led`` option
in `config.txt`_ (this has the added advantage that sudo privileges
and GPIO access are not required, at least for LED control).
.. _config.txt: https://www.raspberrypi.org/documentation/configuration/config-txt.md
""")
def _get_raw_format(self):
warnings.warn(
PiCameraDeprecated(
'PiCamera.raw_format is deprecated; use required format '
'directly with capture methods instead'))
return self._raw_format
def _set_raw_format(self, value):
warnings.warn(
PiCameraDeprecated(
'PiCamera.raw_format is deprecated; use required format '
'directly with capture methods instead'))
if value not in self.RAW_FORMATS:
raise PiCameraValueError("Invalid raw format: %s" % value)
self._raw_format = value
raw_format = property(_get_raw_format, _set_raw_format, doc="""
Retrieves or sets the raw format of the camera's ports.
.. deprecated:: 1.0
Please use ``'yuv'`` or ``'rgb'`` directly as a format in the
various capture methods instead.
""")
def _get_timestamp(self):
self._check_camera_open()
return self._camera.control.params[mmal.MMAL_PARAMETER_SYSTEM_TIME]
timestamp = property(_get_timestamp, doc="""
Retrieves the system time according to the camera firmware.
The camera's timestamp is a 64-bit integer representing the number of
microseconds since the last system boot. When the camera's
:attr:`clock_mode` is ``'raw'`` the values returned by this attribute
are comparable to those from the :attr:`frame`
:attr:`~PiVideoFrame.timestamp` attribute.
""")
def _get_frame(self):
self._check_camera_open()
for e in self._encoders.values():
try:
return e.frame
except AttributeError:
pass
raise PiCameraRuntimeError(
"Cannot query frame information when camera is not recording")
frame = property(_get_frame, doc="""
Retrieves information about the current frame recorded from the camera.
When video recording is active (after a call to
:meth:`start_recording`), this attribute will return a
:class:`PiVideoFrame` tuple containing information about the current
frame that the camera is recording.
If multiple video recordings are currently in progress (after multiple
calls to :meth:`start_recording` with different values for the
``splitter_port`` parameter), which encoder's frame information is
returned is arbitrary. If you require information from a specific
encoder, you will need to extract it from :attr:`_encoders` explicitly.
Querying this property when the camera is not recording will result in
an exception.
.. note::
There is a small window of time when querying this attribute will
return ``None`` after calling :meth:`start_recording`. If this
attribute returns ``None``, this means that the video encoder has
been initialized, but the camera has not yet returned any frames.
""")
def _disable_camera(self):
"""
An internal method for disabling the camera, e.g. for re-configuration.
This disables the splitter and preview connections (if they exist).
"""
self._splitter.connection.disable()
self._preview.renderer.connection.disable()
self._camera.disable()
def _enable_camera(self):
"""
An internal method for enabling the camera after re-configuration.
This ensures the splitter configuration is consistent, then re-enables
the camera along with the splitter and preview connections.
"""
self._camera.enable()
self._preview.renderer.connection.enable()
self._splitter.connection.enable()
def _configure_splitter(self):
"""
Ensures all splitter output ports have a sensible format (I420) and
buffer sizes.
This method is used to ensure the splitter configuration is sane,
typically after :meth:`_configure_camera` is called.
"""
self._splitter.inputs[0].copy_from(self._camera.outputs[self.CAMERA_VIDEO_PORT])
self._splitter.inputs[0].commit()
def _control_callback(self, port, buf):
try:
if buf.command == mmal.MMAL_EVENT_ERROR:
raise PiCameraRuntimeError(
"No data recevied from sensor. Check all connections, "
"including the SUNNY chip on the camera board")
elif buf.command != mmal.MMAL_EVENT_PARAMETER_CHANGED:
raise PiCameraRuntimeError(
"Received unexpected camera control callback event, 0x%08x" % buf[0].cmd)
except Exception as exc:
# Pass the exception to the main thread; next time
# check_camera_open() is called this will get raised
self._camera_exception = exc
def _configure_camera(
self, sensor_mode, framerate, resolution, clock_mode,
old_sensor_mode=0):
"""
An internal method for setting a new camera mode, framerate,
resolution, and/or clock_mode.
This method is used by the setters of the :attr:`resolution`,
:attr:`framerate`, and :attr:`sensor_mode` properties. It assumes the
camera is currently disabled. The *old_mode* and *new_mode* arguments
are required to ensure correct operation on older firmwares
(specifically that we don't try to set the sensor mode when both old
and new modes are 0 or automatic).
"""
old_cc = mmal.MMAL_PARAMETER_CAMERA_CONFIG_T.from_buffer_copy(self._camera_config)
old_ports = [
(port.framesize, port.framerate, port.params[mmal.MMAL_PARAMETER_FPS_RANGE])
for port in self._camera.outputs
]
if old_sensor_mode != 0 or sensor_mode != 0:
self._camera.control.params[mmal.MMAL_PARAMETER_CAMERA_CUSTOM_SENSOR_CONFIG] = sensor_mode
if not self._camera.control.enabled:
# Initial setup
self._camera.control.enable(self._control_callback)
preview_resolution = resolution
elif (
self._camera.outputs[self.CAMERA_PREVIEW_PORT].framesize ==
self._camera.outputs[self.CAMERA_VIDEO_PORT].framesize
):
preview_resolution = resolution
else:
preview_resolution = self._camera.outputs[self.CAMERA_PREVIEW_PORT].framesize
try:
try:
fps_low, fps_high = framerate
except TypeError:
fps_low = fps_high = framerate
else:
framerate = 0
fps_range = mmal.MMAL_PARAMETER_FPS_RANGE_T(
mmal.MMAL_PARAMETER_HEADER_T(
mmal.MMAL_PARAMETER_FPS_RANGE,
ct.sizeof(mmal.MMAL_PARAMETER_FPS_RANGE_T)
),
fps_low=mo.to_rational(fps_low),
fps_high=mo.to_rational(fps_high),
)
cc = self._camera_config
cc.max_stills_w = resolution.width
cc.max_stills_h = resolution.height
cc.stills_yuv422 = 0
cc.one_shot_stills = 1
cc.max_preview_video_w = resolution.width
cc.max_preview_video_h = resolution.height
cc.num_preview_video_frames = max(3, fps_high // 10)
cc.stills_capture_circular_buffer_height = 0
cc.fast_preview_resume = 0
cc.use_stc_timestamp = clock_mode
self._camera.control.params[mmal.MMAL_PARAMETER_CAMERA_CONFIG] = cc
# Clamp preview resolution to camera's resolution
if (
preview_resolution.width > resolution.width or
preview_resolution.height > resolution.height
):
preview_resolution = resolution
for port in self._camera.outputs:
port.params[mmal.MMAL_PARAMETER_FPS_RANGE] = fps_range
if port.index == self.CAMERA_PREVIEW_PORT:
port.framesize = preview_resolution
else:
port.framesize = resolution
port.framerate = framerate
port.commit()
except:
# If anything goes wrong, restore original resolution and
# framerate otherwise the camera can be left in unusual states
# (camera config not matching ports, etc).
self._camera.control.params[mmal.MMAL_PARAMETER_CAMERA_CONFIG] = old_cc
self._camera_config = old_cc
for port, (res, fps, fps_range) in zip(self._camera.outputs, old_ports):
port.framesize = res
port.framerate = fps
port.params[mmal.MMAL_PARAMETER_FPS_RANGE] = fps_range
port.commit()
raise
def _get_framerate(self):
self._check_camera_open()
port_num = (
self.CAMERA_VIDEO_PORT
if self._encoders else
self.CAMERA_PREVIEW_PORT
)
return mo.PiCameraFraction(self._camera.outputs[port_num].framerate)
def _set_framerate(self, value):
self._check_camera_open()
self._check_recording_stopped()
value = mo.to_fraction(value, den_limit=256)
if not (0 < value <= self.MAX_FRAMERATE):
raise PiCameraValueError("Invalid framerate: %.2ffps" % value)
sensor_mode = self.sensor_mode
clock_mode = self.CLOCK_MODES[self.clock_mode]
resolution = self.resolution
self._disable_camera()
self._configure_camera(
sensor_mode=sensor_mode, framerate=value, resolution=resolution,
clock_mode=clock_mode)
self._configure_splitter()
self._enable_camera()
framerate = property(_get_framerate, _set_framerate, doc="""\
Retrieves or sets the framerate at which video-port based image
captures, video recordings, and previews will run.
When queried, the :attr:`framerate` property returns the rate at which
the camera's video and preview ports will operate as a
:class:`~fractions.Fraction` instance (which can be easily converted to
an :class:`int` or :class:`float`). If :attr:`framerate_range` has been
set, then :attr:`framerate` will be 0 which indicates that a dynamic
range of framerates is being used.
.. note::
For backwards compatibility, a derivative of the
:class:`~fractions.Fraction` class is actually used which permits
the value to be treated as a tuple of ``(numerator, denominator)``.
Setting and retrieving framerate as a ``(numerator, denominator)``
tuple is deprecated and will be removed in 2.0. Please use a
:class:`~fractions.Fraction` instance instead (which is just as
accurate and also permits direct use with math operators).
When set, the property configures the camera so that the next call to
recording and previewing methods will use the new framerate. Setting
this property implicitly sets :attr:`framerate_range` so that the low
and high values are equal to the new framerate. The framerate can be
specified as an :ref:`int <typesnumeric>`, :ref:`float <typesnumeric>`,
:class:`~fractions.Fraction`, or a ``(numerator, denominator)`` tuple.
For example, the following definitions are all equivalent::
from fractions import Fraction
camera.framerate = 30
camera.framerate = 30 / 1
camera.framerate = Fraction(30, 1)
camera.framerate = (30, 1) # deprecated
The camera must not be closed, and no recording must be active when the
property is set.
.. note::
This attribute, in combination with :attr:`resolution`, determines
the mode that the camera operates in. The actual sensor framerate
and resolution used by the camera is influenced, but not directly
set, by this property. See :attr:`sensor_mode` for more
information.
The initial value of this property can be specified with the
*framerate* parameter in the :class:`PiCamera` constructor, and will
default to 30 if not specified.
""")
def _get_sensor_mode(self):
self._check_camera_open()
return self._camera.control.params[mmal.MMAL_PARAMETER_CAMERA_CUSTOM_SENSOR_CONFIG]
def _set_sensor_mode(self, value):
self._check_camera_open()
self._check_recording_stopped()
try:
if not (0 <= value <= 7):
raise PiCameraValueError(
"Invalid sensor mode: %d (valid range 0..7)" % value)
except TypeError:
raise PiCameraValueError("Invalid sensor mode: %s" % value)
sensor_mode = self.sensor_mode
clock_mode = self.CLOCK_MODES[self.clock_mode]
resolution = self.resolution
framerate = Fraction(self.framerate)
if framerate == 0:
framerate = self.framerate_range
self._disable_camera()
self._configure_camera(
old_sensor_mode=sensor_mode, sensor_mode=value,
framerate=framerate, resolution=resolution,
clock_mode=clock_mode)
self._configure_splitter()
self._enable_camera()
sensor_mode = property(_get_sensor_mode, _set_sensor_mode, doc="""\
Retrieves or sets the input mode of the camera's sensor.
This is an advanced property which can be used to control the camera's
sensor mode. By default, mode 0 is used which allows the camera to
automatically select an input mode based on the requested
:attr:`resolution` and :attr:`framerate`. Valid values are currently
between 0 and 7. The set of valid sensor modes (along with the
heuristic used to select one automatically) are detailed in the
:ref:`camera_modes` section of the documentation.
.. note::
At the time of writing, setting this property does nothing unless
the camera has been initialized with a sensor mode other than 0.
Furthermore, some mode transitions appear to require setting the
property twice (in a row). This appears to be a firmware
limitation.
The initial value of this property can be specified with the
*sensor_mode* parameter in the :class:`PiCamera` constructor, and will
default to 0 if not specified.
.. versionadded:: 1.9
""")
def _get_clock_mode(self):
self._check_camera_open()
return self._CLOCK_MODES_R[self._camera_config.use_stc_timestamp]
def _set_clock_mode(self, value):
self._check_camera_open()
self._check_recording_stopped()
try:
clock_mode = self.CLOCK_MODES[value]
except KeyError:
raise PiCameraValueError("Invalid clock mode %s" % value)
sensor_mode = self.sensor_mode
framerate = Fraction(self.framerate)
if framerate == 0:
framerate = self.framerate_range
resolution = self.resolution
self._disable_camera()
self._configure_camera(
sensor_mode=sensor_mode, framerate=framerate,
resolution=resolution, clock_mode=clock_mode)
self._configure_splitter()
self._enable_camera()
clock_mode = property(_get_clock_mode, _set_clock_mode, doc="""\
Retrieves or sets the mode of the camera's clock.
This is an advanced property which can be used to control the nature of
the frame timestamps available from the :attr:`frame` property. When
this is "reset" (the default) each frame's timestamp will be relative
to the start of the recording. When this is "raw", each frame's
timestamp will be relative to the last initialization of the camera.
The initial value of this property can be specified with the
*clock_mode* parameter in the :class:`PiCamera` constructor, and will
default to "reset" if not specified.
.. versionadded:: 1.11
""")
def _get_resolution(self):
self._check_camera_open()
return mo.PiResolution(
int(self._camera_config.max_stills_w),
int(self._camera_config.max_stills_h)
)
def _set_resolution(self, value):
self._check_camera_open()
self._check_recording_stopped()
value = mo.to_resolution(value)
if not (
(0 < value.width <= self.MAX_RESOLUTION.width) and
(0 < value.height <= self.MAX_RESOLUTION.height)):
raise PiCameraValueError(
"Invalid resolution requested: %r" % (value,))
sensor_mode = self.sensor_mode
clock_mode = self.CLOCK_MODES[self.clock_mode]
framerate = Fraction(self.framerate)
if framerate == 0:
framerate = self.framerate_range
self._disable_camera()
self._configure_camera(
sensor_mode=sensor_mode, framerate=framerate,
resolution=value, clock_mode=clock_mode)
self._configure_splitter()
self._enable_camera()
resolution = property(_get_resolution, _set_resolution, doc="""
Retrieves or sets the resolution at which image captures, video
recordings, and previews will be captured.
When queried, the :attr:`resolution` property returns the resolution at
which the camera will operate as a tuple of ``(width, height)``
measured in pixels. This is the resolution that the :meth:`capture`
method will produce images at, and the resolution that
:meth:`start_recording` will produce videos at.
When set, the property configures the camera so that the next call to
these methods will use the new resolution. The resolution can be
specified as a ``(width, height)`` tuple, as a string formatted
``'WIDTHxHEIGHT'``, or as a string containing a commonly recognized
`display resolution`_ name (e.g. "VGA", "HD", "1080p", etc). For
example, the following definitions are all equivalent::
camera.resolution = (1280, 720)
camera.resolution = '1280x720'
camera.resolution = '1280 x 720'
camera.resolution = 'HD'
camera.resolution = '720p'
The camera must not be closed, and no recording must be active when the
property is set.
.. note::
This attribute, in combination with :attr:`framerate`, determines
the mode that the camera operates in. The actual sensor framerate
and resolution used by the camera is influenced, but not directly
set, by this property. See :attr:`sensor_mode` for more
information.
The initial value of this property can be specified with the
*resolution* parameter in the :class:`PiCamera` constructor, and will
default to the display's resolution or 1280x720 if the display has
been disabled (with ``tvservice -o``).
.. versionchanged:: 1.11
Resolution permitted to be set as a string. Preview resolution
added as separate property.
.. _display resolution: https://en.wikipedia.org/wiki/Graphics_display_resolution
""")
def _get_framerate_range(self):
self._check_camera_open()
port_num = (
self.CAMERA_VIDEO_PORT
if self._encoders else
self.CAMERA_PREVIEW_PORT
)
mp = self._camera.outputs[port_num].params[mmal.MMAL_PARAMETER_FPS_RANGE]
return mo.PiFramerateRange(
mo.to_fraction(mp.fps_low), mo.to_fraction(mp.fps_high))
def _set_framerate_range(self, value):
self._check_camera_open()
self._check_recording_stopped()
low, high = value
low = mo.to_fraction(low, den_limit=256)
high = mo.to_fraction(high, den_limit=256)
if not (0 < low <= self.MAX_FRAMERATE):
raise PiCameraValueError("Invalid low framerate: %.2ffps" % low)
if not (0 < high <= self.MAX_FRAMERATE):
raise PiCameraValueError("Invalid high framerate: %.2ffps" % high)
if high < low:
raise PiCameraValueError("framerate_range is backwards")
sensor_mode = self.sensor_mode
clock_mode = self.CLOCK_MODES[self.clock_mode]
resolution = self.resolution
self._disable_camera()
self._configure_camera(
sensor_mode=sensor_mode, framerate=(low, high),
resolution=resolution, clock_mode=clock_mode)
self._configure_splitter()
self._enable_camera()
framerate_range = property(_get_framerate_range, _set_framerate_range, doc="""\
Retrieves or sets a range between which the camera's framerate is
allowed to float.
When queried, the :attr:`framerate_range` property returns a
:func:`~collections.namedtuple` derivative with ``low`` and ``high``
components (index 0 and 1 respectively) which specify the limits of the
permitted framerate range.
When set, the property configures the camera so that the next call to
recording and previewing methods will use the new framerate range.
Setting this property will implicitly set the :attr:`framerate`
property to 0 (indicating that a dynamic range of framerates is in use
by the camera).
.. note::
Use of this property prevents use of :attr:`framerate_delta` (there
would be little point in making fractional adjustments to the
framerate when the framerate itself is variable).
The low and high framerates can be specified as :ref:`int
<typesnumeric>`, :ref:`float <typesnumeric>`, or
:class:`~fractions.Fraction` values. For example, the following
definitions are all equivalent::
from fractions import Fraction
camera.framerate_range = (0.16666, 30)
camera.framerate_range = (Fraction(1, 6), 30 / 1)
camera.framerate_range = (Fraction(1, 6), Fraction(30, 1))
The camera must not be closed, and no recording must be active when the
property is set.
.. note::
This attribute, like :attr:`framerate`, determines the mode that
the camera operates in. The actual sensor framerate and resolution
used by the camera is influenced, but not directly set, by this
property. See :attr:`sensor_mode` for more information.
.. versionadded:: 1.13
""")
def _get_framerate_delta(self):
self._check_camera_open()
if self.framerate == 0:
raise PiCameraValueError(
'framerate_delta cannot be used with framerate_range')
port_num = (
self.CAMERA_VIDEO_PORT
if self._encoders else
self.CAMERA_PREVIEW_PORT
)
return self._camera.outputs[port_num].params[mmal.MMAL_PARAMETER_FRAME_RATE] - self.framerate
def _set_framerate_delta(self, value):
self._check_camera_open()
if self.framerate == 0:
raise PiCameraValueError(
'framerate_delta cannot be used with framerate_range')
value = mo.to_fraction(self.framerate + value, den_limit=256)
self._camera.outputs[self.CAMERA_PREVIEW_PORT].params[mmal.MMAL_PARAMETER_FRAME_RATE] = value
self._camera.outputs[self.CAMERA_VIDEO_PORT].params[mmal.MMAL_PARAMETER_FRAME_RATE] = value
framerate_delta = property(_get_framerate_delta, _set_framerate_delta, doc="""\
Retrieves or sets a fractional amount that is added to the camera's
framerate for the purpose of minor framerate adjustments.
When queried, the :attr:`framerate_delta` property returns the amount
that the camera's :attr:`framerate` has been adjusted. This defaults
to 0 (so the camera's framerate is the actual framerate used).
When set, the property adjusts the camera's framerate on the fly. The
property can be set while recordings or previews are in progress. Thus
the framerate used by the camera is actually :attr:`framerate` +
:attr:`framerate_delta`.
.. note::
Framerates deltas can be fractional with adjustments as small as
1/256th of an fps possible (finer adjustments will be rounded).
With an appropriately tuned PID controller, this can be used to
achieve synchronization between the camera framerate and other
devices.
If the new framerate demands a mode switch (such as moving between a
low framerate and a high framerate mode), currently active recordings
may drop a frame. This should only happen when specifying quite large
deltas, or when framerate is at the boundary of a sensor mode (e.g.
49fps).
The framerate delta can be specified as an :ref:`int <typesnumeric>`,
:ref:`float <typesnumeric>`, :class:`~fractions.Fraction` or a
``(numerator, denominator)`` tuple. For example, the following
definitions are all equivalent::
from fractions import Fraction
camera.framerate_delta = 0.5
camera.framerate_delta = 1 / 2 # in python 3
camera.framerate_delta = Fraction(1, 2)
camera.framerate_delta = (1, 2) # deprecated
.. note::
This property is implicitly reset to 0 when :attr:`framerate` or
:attr:`framerate_range` is set. When :attr:`framerate` is 0
(indicating that :attr:`framerate_range` is set), this property
cannot be used. (there would be little point in making fractional
adjustments to the framerate when the framerate itself is
variable).
.. versionadded:: 1.11
""")
def _get_still_stats(self):
self._check_camera_open()
return self._camera.control.params[mmal.MMAL_PARAMETER_CAPTURE_STATS_PASS]
def _set_still_stats(self, value):
self._check_camera_open()
self._camera.control.params[mmal.MMAL_PARAMETER_CAPTURE_STATS_PASS] = value
still_stats = property(_get_still_stats, _set_still_stats, doc="""\
Retrieves or sets whether statistics will be calculated from still
frames or the prior preview frame.
When queried, the :attr:`still_stats` property returns a boolean value
indicating when scene statistics will be calculated for still captures
(that is, captures where the *use_video_port* parameter of
:meth:`capture` is ``False``). When this property is ``False`` (the
default), statistics will be calculated from the preceding preview
frame (this also applies when the preview is not visible). When `True`,
statistics will be calculated from the captured image itself.
When set, the propetry controls when scene statistics will be
calculated for still captures. The property can be set while recordings
or previews are in progress. The default value is ``False``.
The advantages to calculating scene statistics from the captured image
are that time between startup and capture is reduced as only the AGC
(automatic gain control) has to converge. The downside is that
processing time for captures increases and that white balance and gain
won't necessarily match the preview.
.. warning::
Enabling the still statistics pass will `override fixed white
balance`_ gains (set via :attr:`awb_gains` and :attr:`awb_mode`).
.. _override fixed white balance: https://www.raspberrypi.org/forums/viewtopic.php?p=875772&sid=92fa4ea70d1fe24590a4cdfb4a10c489#p875772
.. versionadded:: 1.9
""")
def _get_saturation(self):
self._check_camera_open()
return int(self._camera.control.params[mmal.MMAL_PARAMETER_SATURATION] * 100)
def _set_saturation(self, value):
self._check_camera_open()
if not (-100 <= value <= 100):
raise PiCameraValueError(
"Invalid saturation value: %d (valid range -100..100)" % value)
self._camera.control.params[mmal.MMAL_PARAMETER_SATURATION] = Fraction(value, 100)
saturation = property(_get_saturation, _set_saturation, doc="""\
Retrieves or sets the saturation setting of the camera.
When queried, the :attr:`saturation` property returns the color
saturation of the camera as an integer between -100 and 100. When set,
the property adjusts the saturation of the camera. Saturation can be
adjusted while previews or recordings are in progress. The default
value is 0.
""")
def _get_sharpness(self):
self._check_camera_open()
return int(self._camera.control.params[mmal.MMAL_PARAMETER_SHARPNESS] * 100)
def _set_sharpness(self, value):
self._check_camera_open()
if not (-100 <= value <= 100):
raise PiCameraValueError(
"Invalid sharpness value: %d (valid range -100..100)" % value)
self._camera.control.params[mmal.MMAL_PARAMETER_SHARPNESS] = Fraction(value, 100)
sharpness = property(_get_sharpness, _set_sharpness, doc="""\
Retrieves or sets the sharpness setting of the camera.
When queried, the :attr:`sharpness` property returns the sharpness
level of the camera (a measure of the amount of post-processing to
reduce or increase image sharpness) as an integer between -100 and 100.
When set, the property adjusts the sharpness of the camera. Sharpness
can be adjusted while previews or recordings are in progress. The
default value is 0.
""")
def _get_contrast(self):
self._check_camera_open()
return int(self._camera.control.params[mmal.MMAL_PARAMETER_CONTRAST] * 100)
def _set_contrast(self, value):
self._check_camera_open()
if not (-100 <= value <= 100):
raise PiCameraValueError(
"Invalid contrast value: %d (valid range -100..100)" % value)
self._camera.control.params[mmal.MMAL_PARAMETER_CONTRAST] = Fraction(value, 100)
contrast = property(_get_contrast, _set_contrast, doc="""\
Retrieves or sets the contrast setting of the camera.
When queried, the :attr:`contrast` property returns the contrast level
of the camera as an integer between -100 and 100. When set, the
property adjusts the contrast of the camera. Contrast can be adjusted
while previews or recordings are in progress. The default value is 0.
""")
def _get_brightness(self):
self._check_camera_open()
return int(self._camera.control.params[mmal.MMAL_PARAMETER_BRIGHTNESS] * 100)
def _set_brightness(self, value):
self._check_camera_open()
if not (0 <= value <= 100):
raise PiCameraValueError(
"Invalid brightness value: %d (valid range 0..100)" % value)
self._camera.control.params[mmal.MMAL_PARAMETER_BRIGHTNESS] = Fraction(value, 100)
brightness = property(_get_brightness, _set_brightness, doc="""\
Retrieves or sets the brightness setting of the camera.
When queried, the :attr:`brightness` property returns the brightness
level of the camera as an integer between 0 and 100. When set, the
property adjusts the brightness of the camera. Brightness can be
adjusted while previews or recordings are in progress. The default
value is 50.
""")
def _get_shutter_speed(self):
self._check_camera_open()
return int(self._camera.control.params[mmal.MMAL_PARAMETER_SHUTTER_SPEED])
def _set_shutter_speed(self, value):
self._check_camera_open()
self._camera.control.params[mmal.MMAL_PARAMETER_SHUTTER_SPEED] = value
shutter_speed = property(_get_shutter_speed, _set_shutter_speed, doc="""\
Retrieves or sets the shutter speed of the camera in microseconds.
When queried, the :attr:`shutter_speed` property returns the shutter
speed of the camera in microseconds, or 0 which indicates that the
speed will be automatically determined by the auto-exposure algorithm.
Faster shutter times naturally require greater amounts of illumination
and vice versa.
When set, the property adjusts the shutter speed of the camera, which
most obviously affects the illumination of subsequently captured
images. Shutter speed can be adjusted while previews or recordings are
running. The default value is 0 (auto).
.. note::
You can query the :attr:`exposure_speed` attribute to determine the
actual shutter speed being used when this attribute is set to 0.
Please note that this capability requires an up to date firmware
(#692 or later).
.. note::
In later firmwares, this attribute is limited by the value of the
:attr:`framerate` attribute. For example, if framerate is set to
30fps, the shutter speed cannot be slower than 33,333µs (1/fps).
""")
def _get_exposure_speed(self):
self._check_camera_open()
return self._camera.control.params[mmal.MMAL_PARAMETER_CAMERA_SETTINGS].exposure
exposure_speed = property(_get_exposure_speed, doc="""\
Retrieves the current shutter speed of the camera.
When queried, this property returns the shutter speed currently being
used by the camera. If you have set :attr:`shutter_speed` to a non-zero
value, then :attr:`exposure_speed` and :attr:`shutter_speed` should be
equal. However, if :attr:`shutter_speed` is set to 0 (auto), then you
can read the actual shutter speed being used from this attribute. The
value is returned as an integer representing a number of microseconds.
This is a read-only property.
.. versionadded:: 1.6
""")
def _get_analog_gain(self):
self._check_camera_open()
return mo.to_fraction(
self._camera.control.params[mmal.MMAL_PARAMETER_CAMERA_SETTINGS].analog_gain)
analog_gain = property(_get_analog_gain, doc="""\
Retrieves the current analog gain of the camera.
When queried, this property returns the analog gain currently being
used by the camera. The value represents the analog gain of the sensor
prior to digital conversion. The value is returned as a
:class:`~fractions.Fraction` instance.
.. versionadded:: 1.6
""")
def _get_digital_gain(self):
self._check_camera_open()
return mo.to_fraction(
self._camera.control.params[mmal.MMAL_PARAMETER_CAMERA_SETTINGS].digital_gain)
digital_gain = property(_get_digital_gain, doc="""\
Retrieves the current digital gain of the camera.
When queried, this property returns the digital gain currently being
used by the camera. The value represents the digital gain the camera
applies after conversion of the sensor's analog output. The value is
returned as a :class:`~fractions.Fraction` instance.
.. versionadded:: 1.6
""")
def _get_video_denoise(self):
self._check_camera_open()
return self._camera.control.params[mmal.MMAL_PARAMETER_VIDEO_DENOISE]
def _set_video_denoise(self, value):
self._check_camera_open()
self._camera.control.params[mmal.MMAL_PARAMETER_VIDEO_DENOISE] = value
video_denoise = property(_get_video_denoise, _set_video_denoise, doc="""\
Retrieves or sets whether denoise will be applied to video recordings.
When queried, the :attr:`video_denoise` property returns a boolean
value indicating whether or not the camera software will apply a
denoise algorithm to video recordings.
When set, the property activates or deactivates the denoise algorithm
for video recordings. The property can be set while recordings or
previews are in progress. The default value is ``True``.
.. versionadded:: 1.7
""")
def _get_image_denoise(self):
self._check_camera_open()
return self._camera.control.params[mmal.MMAL_PARAMETER_STILLS_DENOISE]
def _set_image_denoise(self, value):
self._check_camera_open()
self._camera.control.params[mmal.MMAL_PARAMETER_STILLS_DENOISE] = value
image_denoise = property(_get_image_denoise, _set_image_denoise, doc="""\
Retrieves or sets whether denoise will be applied to image captures.
When queried, the :attr:`image_denoise` property returns a boolean
value indicating whether or not the camera software will apply a
denoise algorithm to image captures.
When set, the property activates or deactivates the denoise algorithm
for image captures. The property can be set while recordings or
previews are in progress. The default value is ``True``.
.. versionadded:: 1.7
""")
def _get_drc_strength(self):
self._check_camera_open()
return self._DRC_STRENGTHS_R[
self._camera.control.params[mmal.MMAL_PARAMETER_DYNAMIC_RANGE_COMPRESSION].strength
]
def _set_drc_strength(self, value):
self._check_camera_open()
try:
mp = self._camera.control.params[mmal.MMAL_PARAMETER_DYNAMIC_RANGE_COMPRESSION]
mp.strength = self.DRC_STRENGTHS[value]
except KeyError:
raise PiCameraValueError(
"Invalid dynamic range compression strength: %s" % value)
self._camera.control.params[mmal.MMAL_PARAMETER_DYNAMIC_RANGE_COMPRESSION] = mp
drc_strength = property(_get_drc_strength, _set_drc_strength, doc="""\
Retrieves or sets the dynamic range compression strength of the camera.
When queried, the :attr:`drc_strength` property returns a string
indicating the amount of `dynamic range compression`_ the camera
applies to images.
When set, the attributes adjusts the strength of the dynamic range
compression applied to the camera's output. Valid values are given
in the list below:
{values}
The default value is ``'off'``. All possible values for the attribute
can be obtained from the ``PiCamera.DRC_STRENGTHS`` attribute.
.. warning::
Enabling DRC will `override fixed white balance`_ gains (set via
:attr:`awb_gains` and :attr:`awb_mode`).
.. _dynamic range compression: https://en.wikipedia.org/wiki/Gain_compression
.. _override fixed white balance: https://www.raspberrypi.org/forums/viewtopic.php?p=875772&sid=92fa4ea70d1fe24590a4cdfb4a10c489#p875772
.. versionadded:: 1.6
""".format(values=docstring_values(DRC_STRENGTHS)))
def _get_ISO(self):
warnings.warn(
PiCameraDeprecated(
'PiCamera.ISO is deprecated; use PiCamera.iso instead'))
return self.iso
def _set_ISO(self, value):
warnings.warn(
PiCameraDeprecated(
'PiCamera.ISO is deprecated; use PiCamera.iso instead'))
self.iso = value
ISO = property(_get_ISO, _set_ISO, doc="""
Retrieves or sets the apparent ISO setting of the camera.
.. deprecated:: 1.8
Please use the :attr:`iso` attribute instead.
""")
def _get_iso(self):
self._check_camera_open()
return self._camera.control.params[mmal.MMAL_PARAMETER_ISO]
def _set_iso(self, value):
self._check_camera_open()
try:
if not (0 <= value <= 1600):
raise PiCameraValueError(
"Invalid iso value: %d (valid range 0..800)" % value)
except TypeError:
raise PiCameraValueError("Invalid iso value: %s" % value)
self._camera.control.params[mmal.MMAL_PARAMETER_ISO] = value
iso = property(_get_iso, _set_iso, doc="""\
Retrieves or sets the apparent ISO setting of the camera.
When queried, the :attr:`iso` property returns the ISO setting of the
camera, a value which represents the `sensitivity of the camera to
light`_. Lower values (e.g. 100) imply less sensitivity than higher
values (e.g. 400 or 800). Lower sensitivities tend to produce less
"noisy" (smoother) images, but operate poorly in low light conditions.
When set, the property adjusts the sensitivity of the camera (by
adjusting the :attr:`analog_gain` and :attr:`digital_gain`). Valid
values are between 0 (auto) and 1600. The actual value used when iso is
explicitly set will be one of the following values (whichever is
closest): 100, 200, 320, 400, 500, 640, 800.
On the V1 camera module, non-zero ISO values attempt to fix overall
gain at various levels. For example, ISO 100 attempts to provide an
overall gain of 1.0, ISO 200 attempts to provide overall gain of 2.0,
etc. The algorithm prefers analog gain over digital gain to reduce
noise.
On the V2 camera module, ISO 100 attempts to produce overall gain of
~1.84, and ISO 800 attempts to produce overall gain of ~14.72 (the V2
camera module was calibrated against the `ISO film speed`_ standard).
The attribute can be adjusted while previews or recordings are in
progress. The default value is 0 which means automatically determine a
value according to image-taking conditions.
.. note::
Some users on the Pi camera forum have noted that higher ISO values
than 800 (specifically up to 1600) can be achieved in certain
conditions with :attr:`exposure_mode` set to ``'sports'`` and
:attr:`iso` set to 0. It doesn't appear to be possible to manually
request an ISO setting higher than 800, but the picamera library
will permit settings up to 1600 in case the underlying firmware
permits such settings in particular circumstances.
.. note::
Certain :attr:`exposure_mode` values override the ISO setting. For
example, ``'off'`` fixes :attr:`analog_gain` and
:attr:`digital_gain` entirely, preventing this property from
adjusting them when set.
.. _sensitivity of the camera to light: https://en.wikipedia.org/wiki/Film_speed#Digital
.. _ISO film speed: https://en.wikipedia.org/wiki/Film_speed#Current_system:_ISO
""")
def _get_meter_mode(self):
self._check_camera_open()
return self._METER_MODES_R[
self._camera.control.params[mmal.MMAL_PARAMETER_EXP_METERING_MODE].value
]
def _set_meter_mode(self, value):
self._check_camera_open()
try:
mp = self._camera.control.params[mmal.MMAL_PARAMETER_EXP_METERING_MODE]
mp.value = self.METER_MODES[value]
except KeyError:
raise PiCameraValueError("Invalid metering mode: %s" % value)
self._camera.control.params[mmal.MMAL_PARAMETER_EXP_METERING_MODE] = mp
meter_mode = property(_get_meter_mode, _set_meter_mode, doc="""\
Retrieves or sets the metering mode of the camera.
When queried, the :attr:`meter_mode` property returns the method by
which the camera `determines the exposure`_ as one of the following
strings:
{values}
When set, the property adjusts the camera's metering mode. All modes
set up two regions: a center region, and an outer region. The major
`difference between each mode`_ is the size of the center region. The
``'backlit'`` mode has the largest central region (30% of the width),
while ``'spot'`` has the smallest (10% of the width).
The property can be set while recordings or previews are in progress.
The default value is ``'average'``. All possible values for the
attribute can be obtained from the ``PiCamera.METER_MODES`` attribute.
.. _determines the exposure: https://en.wikipedia.org/wiki/Metering_mode
.. _difference between each mode: https://www.raspberrypi.org/forums/viewtopic.php?p=565644#p565644
""".format(values=docstring_values(METER_MODES)))
def _get_video_stabilization(self):
self._check_camera_open()
return self._camera.control.params[mmal.MMAL_PARAMETER_VIDEO_STABILISATION]
def _set_video_stabilization(self, value):
self._check_camera_open()
self._camera.control.params[mmal.MMAL_PARAMETER_VIDEO_STABILISATION] = value
video_stabilization = property(
_get_video_stabilization, _set_video_stabilization, doc="""\
Retrieves or sets the video stabilization mode of the camera.
When queried, the :attr:`video_stabilization` property returns a
boolean value indicating whether or not the camera attempts to
compensate for motion.
When set, the property activates or deactivates video stabilization.
The property can be set while recordings or previews are in progress.
The default value is ``False``.
.. note::
The built-in video stabilization only accounts for `vertical and
horizontal motion`_, not rotation.
.. _vertical and horizontal motion: https://www.raspberrypi.org/forums/viewtopic.php?p=342667&sid=ec7d95e887ab74a90ffaab87888c48cd#p342667
""")
def _get_exposure_compensation(self):
self._check_camera_open()
return self._camera.control.params[mmal.MMAL_PARAMETER_EXPOSURE_COMP]
def _set_exposure_compensation(self, value):
self._check_camera_open()
try:
if not (-25 <= value <= 25):
raise PiCameraValueError(
"Invalid exposure compensation value: "
"%d (valid range -25..25)" % value)
except TypeError:
raise PiCameraValueError(
"Invalid exposure compensation value: %s" % value)
self._camera.control.params[mmal.MMAL_PARAMETER_EXPOSURE_COMP] = value
exposure_compensation = property(
_get_exposure_compensation, _set_exposure_compensation, doc="""\
Retrieves or sets the exposure compensation level of the camera.
When queried, the :attr:`exposure_compensation` property returns an
integer value between -25 and 25 indicating the exposure level of the
camera. Larger values result in brighter images.
When set, the property adjusts the camera's exposure compensation
level. Each increment represents 1/6th of a stop. Hence setting the
attribute to 6 increases exposure by 1 stop. The property can be set
while recordings or previews are in progress. The default value is 0.
""")
def _get_exposure_mode(self):
self._check_camera_open()
return self._EXPOSURE_MODES_R[
self._camera.control.params[mmal.MMAL_PARAMETER_EXPOSURE_MODE].value
]
def _set_exposure_mode(self, value):
self._check_camera_open()
try:
mp = self._camera.control.params[mmal.MMAL_PARAMETER_EXPOSURE_MODE]
mp.value = self.EXPOSURE_MODES[value]
except KeyError:
raise PiCameraValueError("Invalid exposure mode: %s" % value)
self._camera.control.params[mmal.MMAL_PARAMETER_EXPOSURE_MODE] = mp
exposure_mode = property(_get_exposure_mode, _set_exposure_mode, doc="""\
Retrieves or sets the exposure mode of the camera.
When queried, the :attr:`exposure_mode` property returns a string
representing the exposure setting of the camera. The possible values
can be obtained from the ``PiCamera.EXPOSURE_MODES`` attribute, and
are as follows:
{values}
When set, the property adjusts the camera's exposure mode. The
property can be set while recordings or previews are in progress. The
default value is ``'auto'``.
.. note::
Exposure mode ``'off'`` is special: this disables the camera's
automatic gain control, fixing the values of :attr:`digital_gain`
and :attr:`analog_gain`.
Please note that these properties are not directly settable
(although they can be influenced by setting :attr:`iso` *prior* to
fixing the gains), and default to low values when the camera is
first initialized. Therefore it is important to let them settle on
higher values before disabling automatic gain control otherwise all
frames captured will appear black.
""".format(values=docstring_values(EXPOSURE_MODES)))
def _get_flash_mode(self):
self._check_camera_open()
return self._FLASH_MODES_R[
self._camera.control.params[mmal.MMAL_PARAMETER_FLASH].value
]
def _set_flash_mode(self, value):
self._check_camera_open()
try:
mp = self._camera.control.params[mmal.MMAL_PARAMETER_FLASH]
mp.value = self.FLASH_MODES[value]
except KeyError:
raise PiCameraValueError("Invalid flash mode: %s" % value)
self._camera.control.params[mmal.MMAL_PARAMETER_FLASH] = mp
flash_mode = property(_get_flash_mode, _set_flash_mode, doc="""\
Retrieves or sets the flash mode of the camera.
When queried, the :attr:`flash_mode` property returns a string
representing the flash setting of the camera. The possible values can
be obtained from the ``PiCamera.FLASH_MODES`` attribute, and are as
follows:
{values}
When set, the property adjusts the camera's flash mode. The property
can be set while recordings or previews are in progress. The default
value is ``'off'``.
.. note::
You must define which GPIO pins the camera is to use for flash and
privacy indicators. This is done within the `Device Tree
configuration`_ which is considered an advanced topic.
Specifically, you need to define pins ``FLASH_0_ENABLE`` and
optionally ``FLASH_0_INDICATOR`` (for the privacy indicator). More
information can be found in this :ref:`recipe
<flash_configuration>`.
.. _Device Tree configuration: https://www.raspberrypi.org/documentation/configuration/pin-configuration.md
.. versionadded:: 1.10
""".format(values=docstring_values(FLASH_MODES)))
def _get_awb_mode(self):
self._check_camera_open()
return self._AWB_MODES_R[
self._camera.control.params[mmal.MMAL_PARAMETER_AWB_MODE].value
]
def _set_awb_mode(self, value):
self._check_camera_open()
try:
mp = self._camera.control.params[mmal.MMAL_PARAMETER_AWB_MODE]
mp.value = self.AWB_MODES[value]
except KeyError:
raise PiCameraValueError("Invalid auto-white-balance mode: %s" % value)
self._camera.control.params[mmal.MMAL_PARAMETER_AWB_MODE] = mp
awb_mode = property(_get_awb_mode, _set_awb_mode, doc="""\
Retrieves or sets the auto-white-balance mode of the camera.
When queried, the :attr:`awb_mode` property returns a string
representing the auto white balance setting of the camera. The possible
values can be obtained from the ``PiCamera.AWB_MODES`` attribute, and
are as follows:
{values}
When set, the property adjusts the camera's auto-white-balance mode.
The property can be set while recordings or previews are in progress.
The default value is ``'auto'``.
.. note::
AWB mode ``'off'`` is special: this disables the camera's automatic
white balance permitting manual control of the white balance via
the :attr:`awb_gains` property. However, even with AWB disabled,
some attributes (specifically :attr:`still_stats` and
:attr:`drc_strength`) can cause AWB re-calculations.
""".format(values=docstring_values(AWB_MODES)))
def _get_awb_gains(self):
self._check_camera_open()
mp = self._camera.control.params[mmal.MMAL_PARAMETER_CAMERA_SETTINGS]
return (
mo.to_fraction(mp.awb_red_gain),
mo.to_fraction(mp.awb_blue_gain),
)
def _set_awb_gains(self, value):
self._check_camera_open()
try:
red_gain, blue_gain = value
except (ValueError, TypeError):
red_gain = blue_gain = value
if not (0.0 <= red_gain <= 8.0 and 0.0 <= blue_gain <= 8.0):
raise PiCameraValueError(
"Invalid gain(s) in (%f, %f) (valid range: 0.0-8.0)" % (
red_gain, blue_gain))
mp = mmal.MMAL_PARAMETER_AWB_GAINS_T(
mmal.MMAL_PARAMETER_HEADER_T(
mmal.MMAL_PARAMETER_CUSTOM_AWB_GAINS,
ct.sizeof(mmal.MMAL_PARAMETER_AWB_GAINS_T)
),
mo.to_rational(red_gain),
mo.to_rational(blue_gain),
)
self._camera.control.params[mmal.MMAL_PARAMETER_CUSTOM_AWB_GAINS] = mp
awb_gains = property(_get_awb_gains, _set_awb_gains, doc="""\
Gets or sets the auto-white-balance gains of the camera.
When queried, this attribute returns a tuple of values representing
the `(red, blue)` balance of the camera. The `red` and `blue` values
are returned :class:`~fractions.Fraction` instances. The values will
be between 0.0 and 8.0.
When set, this attribute adjusts the camera's auto-white-balance gains.
The property can be specified as a single value in which case both red
and blue gains will be adjusted equally, or as a `(red, blue)` tuple.
Values can be specified as an :ref:`int <typesnumeric>`, :ref:`float
<typesnumeric>` or :class:`~fractions.Fraction` and each gain must be
between 0.0 and 8.0. Typical values for the gains are between 0.9 and
1.9. The property can be set while recordings or previews are in
progress.
.. note::
This attribute only has an effect when :attr:`awb_mode` is set to
``'off'``. Also note that even with AWB disabled, some attributes
(specifically :attr:`still_stats` and :attr:`drc_strength`) can
cause AWB re-calculations.
.. versionchanged:: 1.6
Prior to version 1.6, this attribute was write-only.
""")
def _get_image_effect(self):
self._check_camera_open()
return self._IMAGE_EFFECTS_R[
self._camera.control.params[mmal.MMAL_PARAMETER_IMAGE_EFFECT].value
]
def _set_image_effect(self, value):
self._check_camera_open()
try:
mp = self._camera.control.params[mmal.MMAL_PARAMETER_IMAGE_EFFECT]
mp.value = self.IMAGE_EFFECTS[value]
self._image_effect_params = None
except KeyError:
raise PiCameraValueError("Invalid image effect: %s" % value)
self._camera.control.params[mmal.MMAL_PARAMETER_IMAGE_EFFECT] = mp
image_effect = property(_get_image_effect, _set_image_effect, doc="""\
Retrieves or sets the current image effect applied by the camera.
When queried, the :attr:`image_effect` property returns a string
representing the effect the camera will apply to captured video. The
possible values can be obtained from the ``PiCamera.IMAGE_EFFECTS``
attribute, and are as follows:
{values}
When set, the property changes the effect applied by the camera. The
property can be set while recordings or previews are in progress, but
only certain effects work while recording video (notably ``'negative'``
and ``'solarize'``). The default value is ``'none'``.
""".format(values=docstring_values(IMAGE_EFFECTS)))
def _get_image_effect_params(self):
self._check_camera_open()
return self._image_effect_params
def _set_image_effect_params(self, value):
self._check_camera_open()
to_int = lambda x: int(x)
to_byte = lambda x: max(0, min(255, int(x)))
to_bool = lambda x: (0, 1)[bool(x)]
to_8dot8 = lambda x: int(x * 256)
valid_transforms = {
'solarize': [
(to_bool, to_byte, to_byte, to_byte, to_byte),
(to_byte, to_byte, to_byte, to_byte),
(to_bool,),
],
'colorpoint': [
(lambda x: max(0, min(3, int(x))),),
],
'colorbalance': [
(to_8dot8, to_8dot8, to_8dot8, to_8dot8, to_int, to_int),
(to_8dot8, to_8dot8, to_8dot8, to_8dot8),
(to_8dot8, to_8dot8, to_8dot8),
],
'colorswap': [
(to_bool,),
],
'posterise': [
(lambda x: max(2, min(31, int(x))),),
],
'blur': [
(lambda x: max(1, min(2, int(x))),),
],
'film': [
(to_byte, to_byte, to_byte),
],
'watercolor': [
(),
(to_byte, to_byte),
]
}
# Ensure params is a tuple
try:
params = tuple(i for i in value)
except TypeError:
params = (value,)
# Find the parameter combination for the current effect
effect = self.image_effect
param_transforms = [
transforms for transforms in valid_transforms.get(effect, [])
if len(transforms) == len(params)
]
if not param_transforms:
raise PiCameraValueError(
'invalid set of parameters for effect "%s"' % effect)
param_transforms = param_transforms[0]
params = tuple(
transform(p)
for (transform, p) in zip(param_transforms, params)
)
mp = mmal.MMAL_PARAMETER_IMAGEFX_PARAMETERS_T(
mmal.MMAL_PARAMETER_HEADER_T(
mmal.MMAL_PARAMETER_IMAGE_EFFECT_PARAMETERS,
ct.sizeof(mmal.MMAL_PARAMETER_IMAGEFX_PARAMETERS_T)
),
effect=self.IMAGE_EFFECTS[effect],
num_effect_params=len(params),
effect_parameter=params,
)
self._camera.control.params[mmal.MMAL_PARAMETER_IMAGE_EFFECT_PARAMETERS] = mp
self._image_effect_params = value
image_effect_params = property(
_get_image_effect_params, _set_image_effect_params, doc="""\
Retrieves or sets the parameters for the current :attr:`effect
<image_effect>`.
When queried, the :attr:`image_effect_params` property either returns
``None`` (for effects which have no configurable parameters, or if no
parameters have been configured), or a tuple of numeric values up to
six elements long.
When set, the property changes the parameters of the current
:attr:`effect <image_effect>` as a sequence of numbers, or a single
number. Attempting to set parameters on an effect which does not
support parameters, or providing an incompatible set of parameters for
an effect will raise a :exc:`PiCameraValueError` exception.
The effects which have parameters, and what combinations those
parameters can take is as follows:
.. tabularcolumns:: |p{30mm}|p{25mm}|p{75mm}|
+--------------------+----------------+-----------------------------------------+
| Effect | Parameters | Description |
+====================+================+=========================================+
| ``'solarize'`` | *yuv*, | *yuv* controls whether data is |
| | *x0*, *y1*, | processed as RGB (0) or YUV(1). Input |
| | *y2*, *y3* | values from 0 to *x0* - 1 are remapped |
| | | linearly onto the range 0 to *y0*. |
| | | Values from *x0* to 255 are remapped |
| | | linearly onto the range *y1* to *y2*. |
| +----------------+-----------------------------------------+
| | *x0*, *y0*, | Same as above, but *yuv* defaults to |
| | *y1*, *y2* | 0 (process as RGB). |
| +----------------+-----------------------------------------+
| | *yuv* | Same as above, but *x0*, *y0*, *y1*, |
| | | *y2* default to 128, 128, 128, 0 |
| | | respectively. |
+--------------------+----------------+-----------------------------------------+
| ``'colorpoint'`` | *quadrant* | *quadrant* specifies which quadrant |
| | | of the U/V space to retain chroma |
| | | from: 0=green, 1=red/yellow, 2=blue, |
| | | 3=purple. There is no default; this |
| | | effect does nothing until parameters |
| | | are set. |
+--------------------+----------------+-----------------------------------------+
| ``'colorbalance'`` | *lens*, | *lens* specifies the lens shading |
| | *r*, *g*, *b*, | strength (0.0 to 256.0, where 0.0 |
| | *u*, *v* | indicates lens shading has no effect). |
| | | *r*, *g*, *b* are multipliers for their |
| | | respective color channels (0.0 to |
| | | 256.0). *u* and *v* are offsets added |
| | | to the U/V plane (0 to 255). |
| +----------------+-----------------------------------------+
| | *lens*, | Same as above but *u* are defaulted |
| | *r*, *g*, *b* | to 0. |
| +----------------+-----------------------------------------+
| | *lens*, | Same as above but *g* also defaults to |
| | *r*, *b* | to 1.0. |
+--------------------+----------------+-----------------------------------------+
| ``'colorswap'`` | *dir* | If *dir* is 0, swap RGB to BGR. If |
| | | *dir* is 1, swap RGB to BRG. |
+--------------------+----------------+-----------------------------------------+
| ``'posterise'`` | *steps* | Control the quantization steps for the |
| | | image. Valid values are 2 to 32, and |
| | | the default is 4. |
+--------------------+----------------+-----------------------------------------+
| ``'blur'`` | *size* | Specifies the size of the kernel. Valid |
| | | values are 1 or 2. |
+--------------------+----------------+-----------------------------------------+
| ``'film'`` | *strength*, | *strength* specifies the strength of |
| | *u*, *v* | effect. *u* and *v* are offsets added |
| | | to the U/V plane (0 to 255). |
+--------------------+----------------+-----------------------------------------+
| ``'watercolor'`` | *u*, *v* | *u* and *v* specify offsets to add to |
| | | the U/V plane (0 to 255). |
| +----------------+-----------------------------------------+
| | | No parameters indicates no U/V effect. |
+--------------------+----------------+-----------------------------------------+
.. versionadded:: 1.8
""")
def _get_color_effects(self):
self._check_camera_open()
mp = self._camera.control.params[mmal.MMAL_PARAMETER_COLOUR_EFFECT]
if mp.enable != mmal.MMAL_FALSE:
return (mp.u, mp.v)
else:
return None
def _set_color_effects(self, value):
self._check_camera_open()
if value is None:
enable = mmal.MMAL_FALSE
u = v = 128
else:
enable = mmal.MMAL_TRUE
try:
u, v = value
except (TypeError, ValueError) as e:
raise PiCameraValueError(
"Invalid color effect (u, v) tuple: %s" % value)
if not ((0 <= u <= 255) and (0 <= v <= 255)):
raise PiCameraValueError(
"(u, v) values must be between 0 and 255")
mp = mmal.MMAL_PARAMETER_COLOURFX_T(
mmal.MMAL_PARAMETER_HEADER_T(
mmal.MMAL_PARAMETER_COLOUR_EFFECT,
ct.sizeof(mmal.MMAL_PARAMETER_COLOURFX_T)
),
enable, u, v
)
self._camera.control.params[mmal.MMAL_PARAMETER_COLOUR_EFFECT] = mp
color_effects = property(_get_color_effects, _set_color_effects, doc="""\
Retrieves or sets the current color effect applied by the camera.
When queried, the :attr:`color_effects` property either returns
``None`` which indicates that the camera is using normal color
settings, or a ``(u, v)`` tuple where ``u`` and ``v`` are integer
values between 0 and 255.
When set, the property changes the color effect applied by the camera.
The property can be set while recordings or previews are in progress.
For example, to make the image black and white set the value to ``(128,
128)``. The default value is ``None``.
""")
def _get_rotation(self):
self._check_camera_open()
return self._camera.outputs[0].params[mmal.MMAL_PARAMETER_ROTATION]
def _set_rotation(self, value):
self._check_camera_open()
try:
value = ((int(value) % 360) // 90) * 90
except ValueError:
raise PiCameraValueError("Invalid rotation angle: %s" % value)
for port in self._camera.outputs:
port.params[mmal.MMAL_PARAMETER_ROTATION] = value
rotation = property(_get_rotation, _set_rotation, doc="""\
Retrieves or sets the current rotation of the camera's image.
When queried, the :attr:`rotation` property returns the rotation
applied to the image. Valid values are 0, 90, 180, and 270.
When set, the property changes the rotation applied to the camera's
input. The property can be set while recordings or previews are in
progress. The default value is ``0``.
""")
def _get_vflip(self):
self._check_camera_open()
return self._camera.outputs[0].params[mmal.MMAL_PARAMETER_MIRROR] in (
mmal.MMAL_PARAM_MIRROR_VERTICAL, mmal.MMAL_PARAM_MIRROR_BOTH)
def _set_vflip(self, value):
self._check_camera_open()
value = {
(False, False): mmal.MMAL_PARAM_MIRROR_NONE,
(True, False): mmal.MMAL_PARAM_MIRROR_VERTICAL,
(False, True): mmal.MMAL_PARAM_MIRROR_HORIZONTAL,
(True, True): mmal.MMAL_PARAM_MIRROR_BOTH,
}[(bool(value), self.hflip)]
for port in self._camera.outputs:
port.params[mmal.MMAL_PARAMETER_MIRROR] = value
vflip = property(_get_vflip, _set_vflip, doc="""\
Retrieves or sets whether the camera's output is vertically flipped.
When queried, the :attr:`vflip` property returns a boolean indicating
whether or not the camera's output is vertically flipped. The property
can be set while recordings or previews are in progress. The default
value is ``False``.
""")
def _get_hflip(self):
self._check_camera_open()
return self._camera.outputs[0].params[mmal.MMAL_PARAMETER_MIRROR] in (
mmal.MMAL_PARAM_MIRROR_HORIZONTAL, mmal.MMAL_PARAM_MIRROR_BOTH)
def _set_hflip(self, value):
self._check_camera_open()
value = {
(False, False): mmal.MMAL_PARAM_MIRROR_NONE,
(True, False): mmal.MMAL_PARAM_MIRROR_VERTICAL,
(False, True): mmal.MMAL_PARAM_MIRROR_HORIZONTAL,
(True, True): mmal.MMAL_PARAM_MIRROR_BOTH,
}[(self.vflip, bool(value))]
for port in self._camera.outputs:
port.params[mmal.MMAL_PARAMETER_MIRROR] = value
hflip = property(_get_hflip, _set_hflip, doc="""\
Retrieves or sets whether the camera's output is horizontally flipped.
When queried, the :attr:`hflip` property returns a boolean indicating
whether or not the camera's output is horizontally flipped. The
property can be set while recordings or previews are in progress. The
default value is ``False``.
""")
def _get_zoom(self):
self._check_camera_open()
mp = self._camera.control.params[mmal.MMAL_PARAMETER_INPUT_CROP]
return (
mp.rect.x / 65535.0,
mp.rect.y / 65535.0,
mp.rect.width / 65535.0,
mp.rect.height / 65535.0,
)
def _set_zoom(self, value):
self._check_camera_open()
try:
x, y, w, h = value
except (TypeError, ValueError) as e:
raise PiCameraValueError(
"Invalid zoom rectangle (x, y, w, h) tuple: %s" % value)
mp = mmal.MMAL_PARAMETER_INPUT_CROP_T(
mmal.MMAL_PARAMETER_HEADER_T(
mmal.MMAL_PARAMETER_INPUT_CROP,
ct.sizeof(mmal.MMAL_PARAMETER_INPUT_CROP_T)
),
mmal.MMAL_RECT_T(
max(0, min(65535, int(65535 * x))),
max(0, min(65535, int(65535 * y))),
max(0, min(65535, int(65535 * w))),
max(0, min(65535, int(65535 * h))),
),
)
self._camera.control.params[mmal.MMAL_PARAMETER_INPUT_CROP] = mp
zoom = property(_get_zoom, _set_zoom, doc="""\
Retrieves or sets the zoom applied to the camera's input.
When queried, the :attr:`zoom` property returns a ``(x, y, w, h)``
tuple of floating point values ranging from 0.0 to 1.0, indicating the
proportion of the image to include in the output (this is also known as
the "Region of Interest" or ROI). The default value is ``(0.0, 0.0,
1.0, 1.0)`` which indicates that everything should be included. The
property can be set while recordings or previews are in progress.
The `zoom` is applied to the processed image, after rotation and rescale.
If rotation has been used, zoom is composed of ``(y, x, h, w)`` instead.
The values `w` and `h` can modify the aspect ratio of the image: use equal
values for `w` and `h` if you want to keep the same the aspect ratio.
""")
def _get_crop(self):
warnings.warn(
PiCameraDeprecated(
'PiCamera.crop is deprecated; use PiCamera.zoom instead'))
return self.zoom
def _set_crop(self, value):
warnings.warn(
PiCameraDeprecated(
'PiCamera.crop is deprecated; use PiCamera.zoom instead'))
self.zoom = value
crop = property(_get_crop, _set_crop, doc="""
Retrieves or sets the zoom applied to the camera's input.
.. deprecated:: 1.8
Please use the :attr:`zoom` attribute instead.
""")
def _get_overlays(self):
self._check_camera_open()
return self._overlays
overlays = property(_get_overlays, doc="""\
Retrieves all active :class:`PiRenderer` overlays.
If no overlays are current active, :attr:`overlays` will return an
empty iterable. Otherwise, it will return an iterable of
:class:`PiRenderer` instances which are currently acting as overlays.
Note that the preview renderer is an exception to this: it is *not*
included as an overlay despite being derived from :class:`PiRenderer`.
.. versionadded:: 1.8
""")
def _get_preview(self):
self._check_camera_open()
if isinstance(self._preview, PiPreviewRenderer):
return self._preview
preview = property(_get_preview, doc="""\
Retrieves the :class:`PiRenderer` displaying the camera preview.
If no preview is currently active, :attr:`preview` will return
``None``. Otherwise, it will return the instance of
:class:`PiRenderer` which is currently connected to the camera's
preview port for rendering what the camera sees. You can use the
attributes of the :class:`PiRenderer` class to configure the appearance
of the preview. For example, to make the preview semi-transparent::
import picamera
with picamera.PiCamera() as camera:
camera.start_preview()
camera.preview.alpha = 128
.. versionadded:: 1.8
""")
def _get_preview_alpha(self):
self._check_camera_open()
warnings.warn(
PiCameraDeprecated(
'PiCamera.preview_alpha is deprecated; use '
'PiCamera.preview.alpha instead'))
if self.preview:
return self.preview.alpha
else:
return self._preview_alpha
def _set_preview_alpha(self, value):
self._check_camera_open()
warnings.warn(
PiCameraDeprecated(
'PiCamera.preview_alpha is deprecated; use '
'PiCamera.preview.alpha instead'))
if self.preview:
self.preview.alpha = value
else:
self._preview_alpha = value
preview_alpha = property(_get_preview_alpha, _set_preview_alpha, doc="""\
Retrieves or sets the opacity of the preview window.
.. deprecated:: 1.8
Please use the :attr:`~PiRenderer.alpha` attribute of the
:attr:`preview` object instead.
""")
def _get_preview_layer(self):
self._check_camera_open()
warnings.warn(
PiCameraDeprecated(
'PiCamera.preview_layer is deprecated; '
'use PiCamera.preview.layer instead'))
if self.preview:
return self.preview.layer
else:
return self._preview_layer
def _set_preview_layer(self, value):
self._check_camera_open()
warnings.warn(
PiCameraDeprecated(
'PiCamera.preview_layer is deprecated; '
'use PiCamera.preview.layer instead'))
if self.preview:
self.preview.layer = value
else:
self._preview_layer = value
preview_layer = property(_get_preview_layer, _set_preview_layer, doc="""\
Retrieves or sets the layer of the preview window.
.. deprecated:: 1.8
Please use the :attr:`~PiRenderer.layer` attribute of the
:attr:`preview` object instead.
""")
def _get_preview_fullscreen(self):
self._check_camera_open()
warnings.warn(
PiCameraDeprecated(
'PiCamera.preview_fullscreen is deprecated; '
'use PiCamera.preview.fullscreen instead'))
if self.preview:
return self.preview.fullscreen
else:
return self._preview_fullscreen
def _set_preview_fullscreen(self, value):
self._check_camera_open()
warnings.warn(
PiCameraDeprecated(
'PiCamera.preview_fullscreen is deprecated; '
'use PiCamera.preview.fullscreen instead'))
if self.preview:
self.preview.fullscreen = value
else:
self._preview_fullscreen = value
preview_fullscreen = property(
_get_preview_fullscreen, _set_preview_fullscreen, doc="""\
Retrieves or sets full-screen for the preview window.
.. deprecated:: 1.8
Please use the :attr:`~PiRenderer.fullscreen` attribute of the
:attr:`preview` object instead.
""")
def _get_preview_window(self):
self._check_camera_open()
warnings.warn(
PiCameraDeprecated(
'PiCamera.preview_window is deprecated; '
'use PiCamera.preview.window instead'))
if self.preview:
return self.preview.window
else:
return self._preview_window
def _set_preview_window(self, value):
self._check_camera_open()
warnings.warn(
PiCameraDeprecated(
'PiCamera.preview_window is deprecated; '
'use PiCamera.preview.window instead'))
if self.preview:
self.preview.window = value
else:
self._preview_window = value
preview_window = property(
_get_preview_window, _set_preview_window, doc="""\
Retrieves or sets the size of the preview window.
.. deprecated:: 1.8
Please use the :attr:`~PiRenderer.window` attribute of the
:attr:`preview` object instead.
""")
def _get_annotate_text(self):
self._check_camera_open()
mp = self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE]
if mp.enable:
return mp.text.decode('ascii')
else:
return ''
def _set_annotate_text(self, value):
self._check_camera_open()
mp = self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE]
mp.enable = bool(value or mp.show_frame_num)
if mp.enable:
try:
mp.text = value.encode('ascii')
except ValueError as e:
raise PiCameraValueError(str(e))
self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE] = mp
annotate_text = property(_get_annotate_text, _set_annotate_text, doc="""\
Retrieves or sets a text annotation for all output.
When queried, the :attr:`annotate_text` property returns the current
annotation (if no annotation has been set, this is simply a blank
string).
When set, the property immediately applies the annotation to the
preview (if it is running) and to any future captures or video
recording. Strings longer than 255 characters, or strings containing
non-ASCII characters will raise a :exc:`PiCameraValueError`. The
default value is ``''``.
.. versionchanged:: 1.8
Text annotations can now be 255 characters long. The prior limit
was 32 characters.
""")
def _get_annotate_frame_num(self):
self._check_camera_open()
mp = self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE]
return mp.show_frame_num.value != mmal.MMAL_FALSE
def _set_annotate_frame_num(self, value):
self._check_camera_open()
mp = self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE]
mp.enable = bool(value or mp.text)
mp.show_frame_num = bool(value)
self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE] = mp
annotate_frame_num = property(
_get_annotate_frame_num, _set_annotate_frame_num, doc="""\
Controls whether the current frame number is drawn as an annotation.
The :attr:`annotate_frame_num` attribute is a bool indicating whether
or not the current frame number is rendered as an annotation, similar
to :attr:`annotate_text`. The default is ``False``.
.. versionadded:: 1.8
""")
def _get_annotate_text_size(self):
self._check_camera_open()
if self._camera.annotate_rev == 3:
mp = self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE]
return mp.text_size or self.DEFAULT_ANNOTATE_SIZE
else:
return self.DEFAULT_ANNOTATE_SIZE
def _set_annotate_text_size(self, value):
self._check_camera_open()
if not (6 <= value <= 160):
raise PiCameraValueError(
"Invalid annotation text size: %d (valid range 6-160)" % value)
if self._camera.annotate_rev == 3:
mp = self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE]
mp.text_size = value
self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE] = mp
elif value != self.DEFAULT_ANNOTATE_SIZE:
warnings.warn(
PiCameraFallback(
"Firmware does not support setting annotation text "
"size; using default (%d) instead" % self.DEFAULT_ANNOTATE_SIZE))
annotate_text_size = property(
_get_annotate_text_size, _set_annotate_text_size, doc="""\
Controls the size of the annotation text.
The :attr:`annotate_text_size` attribute is an int which determines how
large the annotation text will appear on the display. Valid values are
in the range 6 to 160, inclusive. The default is {size}.
.. versionadded:: 1.10
""".format(size=DEFAULT_ANNOTATE_SIZE))
def _get_annotate_foreground(self):
self._check_camera_open()
mp = self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE]
if self._camera.annotate_rev == 3 and mp.custom_text_color:
return Color.from_yuv_bytes(
mp.custom_text_Y,
mp.custom_text_U,
mp.custom_text_V)
else:
return Color('white')
def _set_annotate_foreground(self, value):
self._check_camera_open()
if not isinstance(value, Color):
raise PiCameraValueError(
'annotate_foreground must be a Color')
elif self._camera.annotate_rev < 3:
if value.rgb_bytes != (255, 255, 255):
warnings.warn(
PiCameraFallback(
"Firmware does not support setting a custom foreground "
"annotation color; using white instead"))
return
mp = self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE]
mp.custom_text_color = True
(
mp.custom_text_Y,
mp.custom_text_U,
mp.custom_text_V,
) = value.yuv_bytes
self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE] = mp
annotate_foreground = property(
_get_annotate_foreground, _set_annotate_foreground, doc="""\
Controls the color of the annotation text.
The :attr:`annotate_foreground` attribute specifies, partially, the
color of the annotation text. The value is specified as a
:class:`Color`. The default is white.
.. note::
The underlying firmware does not directly support setting all
components of the text color, only the Y' component of a `Y'UV`_
tuple. This is roughly (but not precisely) analogous to the
"brightness" of a color, so you may choose to think of this as
setting how bright the annotation text will be relative to its
background. In order to specify just the Y' component when setting
this attribute, you may choose to construct the
:class:`Color` instance as follows::
camera.annotate_foreground = picamera.Color(y=0.2, u=0, v=0)
.. _Y'UV: https://en.wikipedia.org/wiki/YUV
.. versionadded:: 1.10
""")
def _get_annotate_background(self):
self._check_camera_open()
mp = self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE]
if self._camera.annotate_rev == 3:
if mp.enable_text_background:
if mp.custom_background_color:
return Color.from_yuv_bytes(
mp.custom_background_Y,
mp.custom_background_U,
mp.custom_background_V)
else:
return Color('black')
else:
return None
else:
if mp.black_text_background:
return Color('black')
else:
return None
def _set_annotate_background(self, value):
self._check_camera_open()
if value is True:
warnings.warn(
PiCameraDeprecated(
'Setting PiCamera.annotate_background to True is '
'deprecated; use PiCamera.color.Color("black") instead'))
value = Color('black')
elif value is False:
warnings.warn(
PiCameraDeprecated(
'Setting PiCamera.annotate_background to False is '
'deprecated; use None instead'))
value = None
elif value is None:
pass
elif not isinstance(value, Color):
raise PiCameraValueError(
'annotate_background must be a Color or None')
elif self._camera.annotate_rev < 3 and value.rgb_bytes != (0, 0, 0):
warnings.warn(
PiCameraFallback(
"Firmware does not support setting a custom background "
"annotation color; using black instead"))
mp = self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE]
if self._camera.annotate_rev == 3:
if value is None:
mp.enable_text_background = False
else:
mp.enable_text_background = True
mp.custom_background_color = True
(
mp.custom_background_Y,
mp.custom_background_U,
mp.custom_background_V,
) = value.yuv_bytes
else:
if value is None:
mp.black_text_background = False
else:
mp.black_text_background = True
self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE] = mp
annotate_background = property(
_get_annotate_background, _set_annotate_background, doc="""\
Controls what background is drawn behind the annotation.
The :attr:`annotate_background` attribute specifies if a background
will be drawn behind the :attr:`annotation text <annotate_text>` and,
if so, what color it will be. The value is specified as a
:class:`Color` or ``None`` if no background should be drawn. The
default is ``None``.
.. note::
For backward compatibility purposes, the value ``False`` will be
treated as ``None``, and the value ``True`` will be treated as the
color black. The "truthiness" of the values returned by the
attribute are backward compatible although the values themselves
are not.
.. versionadded:: 1.8
.. versionchanged:: 1.10
In prior versions this was a bool value with ``True`` representing
a black background.
""")
| 45.465936
| 146
| 0.609215
|
from __future__ import (
unicode_literals,
print_function,
division,
absolute_import,
)
str = type('')
import warnings
import datetime
import mimetypes
import ctypes as ct
import threading
from fractions import Fraction
from operator import itemgetter
from collections import namedtuple
from . import bcm_host, mmal, mmalobj as mo
from .exc import (
PiCameraError,
PiCameraValueError,
PiCameraRuntimeError,
PiCameraClosed,
PiCameraNotRecording,
PiCameraAlreadyRecording,
PiCameraMMALError,
PiCameraDeprecated,
PiCameraFallback,
)
from .encoders import (
PiVideoFrame,
PiVideoEncoder,
PiRawVideoEncoder,
PiCookedVideoEncoder,
PiRawOneImageEncoder,
PiRawMultiImageEncoder,
PiCookedOneImageEncoder,
PiCookedMultiImageEncoder,
)
from .renderers import (
PiPreviewRenderer,
PiOverlayRenderer,
PiNullSink,
)
from .color import Color
try:
from RPi import GPIO
except ImportError:
GPIO = None
def docstring_values(values, indent=8):
return ('\n' + ' ' * indent).join(
"* ``'%s'``" % k
for (k, v) in
sorted(values.items(), key=itemgetter(1)))
class PiCameraMaxResolution(object):
PiCameraMaxResolution = PiCameraMaxResolution()
class PiCameraMaxFramerate(object):
PiCameraMaxFramerate = PiCameraMaxFramerate()
class PiCamera(object):
CAMERA_PREVIEW_PORT = 0
CAMERA_VIDEO_PORT = 1
CAMERA_CAPTURE_PORT = 2
MAX_RESOLUTION = PiCameraMaxResolution # modified by PiCamera.__init__
MAX_FRAMERATE = PiCameraMaxFramerate # modified by PiCamera.__init__
DEFAULT_ANNOTATE_SIZE = 32
CAPTURE_TIMEOUT = 60
METER_MODES = {
'average': mmal.MMAL_PARAM_EXPOSUREMETERINGMODE_AVERAGE,
'spot': mmal.MMAL_PARAM_EXPOSUREMETERINGMODE_SPOT,
'backlit': mmal.MMAL_PARAM_EXPOSUREMETERINGMODE_BACKLIT,
'matrix': mmal.MMAL_PARAM_EXPOSUREMETERINGMODE_MATRIX,
}
EXPOSURE_MODES = {
'off': mmal.MMAL_PARAM_EXPOSUREMODE_OFF,
'auto': mmal.MMAL_PARAM_EXPOSUREMODE_AUTO,
'night': mmal.MMAL_PARAM_EXPOSUREMODE_NIGHT,
'nightpreview': mmal.MMAL_PARAM_EXPOSUREMODE_NIGHTPREVIEW,
'backlight': mmal.MMAL_PARAM_EXPOSUREMODE_BACKLIGHT,
'spotlight': mmal.MMAL_PARAM_EXPOSUREMODE_SPOTLIGHT,
'sports': mmal.MMAL_PARAM_EXPOSUREMODE_SPORTS,
'snow': mmal.MMAL_PARAM_EXPOSUREMODE_SNOW,
'beach': mmal.MMAL_PARAM_EXPOSUREMODE_BEACH,
'verylong': mmal.MMAL_PARAM_EXPOSUREMODE_VERYLONG,
'fixedfps': mmal.MMAL_PARAM_EXPOSUREMODE_FIXEDFPS,
'antishake': mmal.MMAL_PARAM_EXPOSUREMODE_ANTISHAKE,
'fireworks': mmal.MMAL_PARAM_EXPOSUREMODE_FIREWORKS,
}
FLASH_MODES = {
'off': mmal.MMAL_PARAM_FLASH_OFF,
'auto': mmal.MMAL_PARAM_FLASH_AUTO,
'on': mmal.MMAL_PARAM_FLASH_ON,
'redeye': mmal.MMAL_PARAM_FLASH_REDEYE,
'fillin': mmal.MMAL_PARAM_FLASH_FILLIN,
'torch': mmal.MMAL_PARAM_FLASH_TORCH,
}
AWB_MODES = {
'off': mmal.MMAL_PARAM_AWBMODE_OFF,
'auto': mmal.MMAL_PARAM_AWBMODE_AUTO,
'sunlight': mmal.MMAL_PARAM_AWBMODE_SUNLIGHT,
'cloudy': mmal.MMAL_PARAM_AWBMODE_CLOUDY,
'shade': mmal.MMAL_PARAM_AWBMODE_SHADE,
'tungsten': mmal.MMAL_PARAM_AWBMODE_TUNGSTEN,
'fluorescent': mmal.MMAL_PARAM_AWBMODE_FLUORESCENT,
'incandescent': mmal.MMAL_PARAM_AWBMODE_INCANDESCENT,
'flash': mmal.MMAL_PARAM_AWBMODE_FLASH,
'horizon': mmal.MMAL_PARAM_AWBMODE_HORIZON,
}
IMAGE_EFFECTS = {
'none': mmal.MMAL_PARAM_IMAGEFX_NONE,
'negative': mmal.MMAL_PARAM_IMAGEFX_NEGATIVE,
'solarize': mmal.MMAL_PARAM_IMAGEFX_SOLARIZE,
# The following don't work
'sketch': mmal.MMAL_PARAM_IMAGEFX_SKETCH,
'denoise': mmal.MMAL_PARAM_IMAGEFX_DENOISE,
'emboss': mmal.MMAL_PARAM_IMAGEFX_EMBOSS,
'oilpaint': mmal.MMAL_PARAM_IMAGEFX_OILPAINT,
'hatch': mmal.MMAL_PARAM_IMAGEFX_HATCH,
'gpen': mmal.MMAL_PARAM_IMAGEFX_GPEN,
'pastel': mmal.MMAL_PARAM_IMAGEFX_PASTEL,
'watercolor': mmal.MMAL_PARAM_IMAGEFX_WATERCOLOUR,
'film': mmal.MMAL_PARAM_IMAGEFX_FILM,
'blur': mmal.MMAL_PARAM_IMAGEFX_BLUR,
'saturation': mmal.MMAL_PARAM_IMAGEFX_SATURATION,
'colorswap': mmal.MMAL_PARAM_IMAGEFX_COLOURSWAP,
'washedout': mmal.MMAL_PARAM_IMAGEFX_WASHEDOUT,
'posterise': mmal.MMAL_PARAM_IMAGEFX_POSTERISE,
'colorpoint': mmal.MMAL_PARAM_IMAGEFX_COLOURPOINT,
'colorbalance': mmal.MMAL_PARAM_IMAGEFX_COLOURBALANCE,
'cartoon': mmal.MMAL_PARAM_IMAGEFX_CARTOON,
'deinterlace1': mmal.MMAL_PARAM_IMAGEFX_DEINTERLACE_DOUBLE,
'deinterlace2': mmal.MMAL_PARAM_IMAGEFX_DEINTERLACE_ADV,
}
DRC_STRENGTHS = {
'off': mmal.MMAL_PARAMETER_DRC_STRENGTH_OFF,
'low': mmal.MMAL_PARAMETER_DRC_STRENGTH_LOW,
'medium': mmal.MMAL_PARAMETER_DRC_STRENGTH_MEDIUM,
'high': mmal.MMAL_PARAMETER_DRC_STRENGTH_HIGH,
}
RAW_FORMATS = {
'yuv',
'rgb',
'rgba',
'bgr',
'bgra',
}
STEREO_MODES = {
'none': mmal.MMAL_STEREOSCOPIC_MODE_NONE,
'side-by-side': mmal.MMAL_STEREOSCOPIC_MODE_SIDE_BY_SIDE,
'top-bottom': mmal.MMAL_STEREOSCOPIC_MODE_BOTTOM,
}
CLOCK_MODES = {
'reset': mmal.MMAL_PARAM_TIMESTAMP_MODE_RESET_STC,
'raw': mmal.MMAL_PARAM_TIMESTAMP_MODE_RAW_STC,
}
_METER_MODES_R = {v: k for (k, v) in METER_MODES.items()}
_EXPOSURE_MODES_R = {v: k for (k, v) in EXPOSURE_MODES.items()}
_FLASH_MODES_R = {v: k for (k, v) in FLASH_MODES.items()}
_AWB_MODES_R = {v: k for (k, v) in AWB_MODES.items()}
_IMAGE_EFFECTS_R = {v: k for (k, v) in IMAGE_EFFECTS.items()}
_DRC_STRENGTHS_R = {v: k for (k, v) in DRC_STRENGTHS.items()}
_STEREO_MODES_R = {v: k for (k, v) in STEREO_MODES.items()}
_CLOCK_MODES_R = {v: k for (k, v) in CLOCK_MODES.items()}
__slots__ = (
'_used_led',
'_led_pin',
'_camera',
'_camera_config',
'_camera_exception',
'_revision',
'_preview',
'_preview_alpha',
'_preview_layer',
'_preview_fullscreen',
'_preview_window',
'_splitter',
'_splitter_connection',
'_encoders_lock',
'_encoders',
'_overlays',
'_raw_format',
'_image_effect_params',
'_exif_tags',
)
def __init__(
self, camera_num=0, stereo_mode='none', stereo_decimate=False,
resolution=None, framerate=None, sensor_mode=0, led_pin=None,
clock_mode='reset', framerate_range=None):
bcm_host.bcm_host_init()
mimetypes.add_type('application/h264', '.h264', False)
mimetypes.add_type('application/mjpeg', '.mjpg', False)
mimetypes.add_type('application/mjpeg', '.mjpeg', False)
self._used_led = False
if GPIO and led_pin is None:
try:
led_pin = {
(0, 0): 2,
(0, 1): 30,
(1, 0): 5,
(2, 0): 5,
(3, 0): 32,
}[(GPIO.RPI_REVISION, camera_num)]
except KeyError:
raise PiCameraError(
'Unable to determine default GPIO LED pin for RPi '
'revision %d and camera num %d' % (
GPIO.RPI_REVISION, camera_num))
self._led_pin = led_pin
self._camera = None
self._camera_config = None
self._camera_exception = None
self._preview = None
self._preview_alpha = 255
self._preview_layer = 2
self._preview_fullscreen = True
self._preview_window = None
self._splitter = None
self._splitter_connection = None
self._encoders_lock = threading.Lock()
self._encoders = {}
self._overlays = []
self._raw_format = 'yuv'
self._image_effect_params = None
with mo.MMALCameraInfo() as camera_info:
info = camera_info.control.params[mmal.MMAL_PARAMETER_CAMERA_INFO]
self._revision = 'ov5647'
if camera_info.info_rev > 1:
self._revision = info.cameras[camera_num].camera_name.decode('ascii')
self._exif_tags = {
'IFD0.Model': 'RP_%s' % self._revision,
'IFD0.Make': 'RaspberryPi',
}
if PiCamera.MAX_RESOLUTION is PiCameraMaxResolution:
PiCamera.MAX_RESOLUTION = mo.PiResolution(
info.cameras[camera_num].max_width,
info.cameras[camera_num].max_height,
)
if PiCamera.MAX_FRAMERATE is PiCameraMaxFramerate:
if self._revision.upper() == 'OV5647':
PiCamera.MAX_FRAMERATE = 90
else:
PiCamera.MAX_FRAMERATE = 120
if resolution is None:
w = ct.c_uint32()
h = ct.c_uint32()
if bcm_host.graphics_get_display_size(0, w, h) == -1:
w = 1280
h = 720
else:
w = int(w.value)
h = int(h.value)
resolution = mo.PiResolution(w, h)
elif resolution is PiCameraMaxResolution:
resolution = PiCamera.MAX_RESOLUTION
else:
resolution = mo.to_resolution(resolution)
if framerate_range is None:
if framerate is None:
framerate = 30
elif framerate is PiCameraMaxFramerate:
framerate = PiCamera.MAX_FRAMERATE
else:
framerate = mo.to_fraction(framerate)
elif framerate is not None:
raise PiCameraValueError(
"Can't specify framerate and framerate_range")
else:
try:
low, high = framerate_range
except TypeError:
raise PiCameraValueError(
"framerate_range must have (low, high) values")
if low is PiCameraMaxFramerate:
low = PiCamera.MAX_FRAMERATE
if high is PiCameraMaxFramerate:
high = PiCamera.MAX_FRAMERATE
framerate = (mo.to_fraction(low), mo.to_fraction(high))
try:
stereo_mode = self.STEREO_MODES[stereo_mode]
except KeyError:
raise PiCameraValueError('Invalid stereo mode: %s' % stereo_mode)
try:
clock_mode = self.CLOCK_MODES[clock_mode]
except KeyError:
raise PiCameraValueError('Invalid clock mode: %s' % clock_mode)
try:
self._init_camera(camera_num, stereo_mode, stereo_decimate)
self._configure_camera(sensor_mode, framerate, resolution, clock_mode)
self._init_preview()
self._init_splitter()
self._camera.enable()
self._init_defaults()
except:
self.close()
raise
def _init_led(self):
global GPIO
if GPIO:
try:
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
GPIO.setup(self._led_pin, GPIO.OUT, initial=GPIO.LOW)
self._used_led = True
except RuntimeError:
# We're probably not running as root. In this case, forget the
GPIO = None
def _init_camera(self, num, stereo_mode, stereo_decimate):
try:
self._camera = mo.MMALCamera()
except PiCameraMMALError as e:
if e.status == mmal.MMAL_ENOMEM:
raise PiCameraError(
"Camera is not enabled. Try running 'sudo raspi-config' "
"and ensure that the camera has been enabled.")
else:
raise
self._camera_config = self._camera.control.params[mmal.MMAL_PARAMETER_CAMERA_CONFIG]
# Don't attempt to set this if stereo mode isn't requested as it'll
if stereo_mode != mmal.MMAL_STEREOSCOPIC_MODE_NONE:
for p in self._camera.outputs:
mp = mmal.MMAL_PARAMETER_STEREOSCOPIC_MODE_T(
mmal.MMAL_PARAMETER_HEADER_T(
mmal.MMAL_PARAMETER_STEREOSCOPIC_MODE,
ct.sizeof(mmal.MMAL_PARAMETER_STEREOSCOPIC_MODE_T),
),
mode=stereo_mode,
decimate=stereo_decimate,
swap_eyes=False,
)
p.params[mmal.MMAL_PARAMETER_STEREOSCOPIC_MODE] = mp
self._camera.control.params[mmal.MMAL_PARAMETER_CAMERA_NUM] = num
def _init_defaults(self):
self.sharpness = 0
self.contrast = 0
self.brightness = 50
self.saturation = 0
self.iso = 0
self.video_stabilization = False
self.exposure_compensation = 0
self.exposure_mode = 'auto'
self.meter_mode = 'average'
self.awb_mode = 'auto'
self.image_effect = 'none'
self.color_effects = None
self.rotation = 0
self.hflip = self.vflip = False
self.zoom = (0.0, 0.0, 1.0, 1.0)
def _init_splitter(self):
self._splitter = mo.MMALSplitter()
self._splitter.inputs[0].connect(
self._camera.outputs[self.CAMERA_VIDEO_PORT]).enable()
def _init_preview(self):
# the camera doesn't measure exposure and captured images gradually
self._preview = PiNullSink(
self, self._camera.outputs[self.CAMERA_PREVIEW_PORT])
def _start_capture(self, port):
# there's a single active encoder on the video splitter
if (
port == self._camera.outputs[self.CAMERA_CAPTURE_PORT] or
len([e for e in self._encoders.values() if e.active]) == 1):
port.params[mmal.MMAL_PARAMETER_CAPTURE] = True
def _stop_capture(self, port):
# there's a single active encoder on the video splitter
if (
port == self._camera.outputs[self.CAMERA_CAPTURE_PORT] or
len([e for e in self._encoders.values() if e.active]) == 1):
port.params[mmal.MMAL_PARAMETER_CAPTURE] = False
def _check_camera_open(self):
exc, self._camera_exception = self._camera_exception, None
if exc:
raise exc
if self.closed:
raise PiCameraClosed("Camera is closed")
def _check_recording_stopped(self):
if self.recording:
raise PiCameraRuntimeError("Recording is currently running")
def _get_ports(self, from_video_port, splitter_port):
self._check_camera_open()
if from_video_port and (splitter_port in self._encoders):
raise PiCameraAlreadyRecording(
'The camera is already using port %d ' % splitter_port)
camera_port = (
self._camera.outputs[self.CAMERA_VIDEO_PORT]
if from_video_port else
self._camera.outputs[self.CAMERA_CAPTURE_PORT]
)
output_port = (
self._splitter.outputs[splitter_port]
if from_video_port else
camera_port
)
return (camera_port, output_port)
def _get_output_format(self, output):
if isinstance(output, bytes):
filename = output.decode('utf-8')
elif isinstance(output, str):
filename = output
else:
try:
filename = output.name
except AttributeError:
raise PiCameraValueError(
'Format must be specified when output has no filename')
(type, encoding) = mimetypes.guess_type(filename, strict=False)
if not type:
raise PiCameraValueError(
'Unable to determine type from filename %s' % filename)
return type
def _get_image_format(self, output, format=None):
if isinstance(format, bytes):
format = format.decode('utf-8')
format = format or self._get_output_format(output)
format = (
format[6:] if format.startswith('image/') else
format)
if format == 'x-ms-bmp':
format = 'bmp'
if format == 'raw':
format = self.raw_format
return format
def _get_video_format(self, output, format=None):
if isinstance(format, bytes):
format = format.decode('utf-8')
format = format or self._get_output_format(output)
format = (
format[6:] if format.startswith('video/') else
format[12:] if format.startswith('application/') else
format)
return format
def _get_image_encoder(
self, camera_port, output_port, format, resize, **options):
encoder_class = (
PiRawOneImageEncoder if format in self.RAW_FORMATS else
PiCookedOneImageEncoder)
return encoder_class(
self, camera_port, output_port, format, resize, **options)
def _get_images_encoder(
self, camera_port, output_port, format, resize, **options):
encoder_class = (
PiRawMultiImageEncoder if format in self.RAW_FORMATS else
PiCookedMultiImageEncoder)
return encoder_class(
self, camera_port, output_port, format, resize, **options)
def _get_video_encoder(
self, camera_port, output_port, format, resize, **options):
encoder_class = (
PiRawVideoEncoder if format in self.RAW_FORMATS else
PiCookedVideoEncoder)
return encoder_class(
self, camera_port, output_port, format, resize, **options)
def close(self):
for port in list(self._encoders):
self.stop_recording(splitter_port=port)
assert not self.recording
for overlay in list(self._overlays):
self.remove_overlay(overlay)
if self._preview:
self._preview.close()
self._preview = None
if self._splitter:
self._splitter.close()
self._splitter = None
if self._camera:
self._camera.close()
self._camera = None
exc, self._camera_exception = self._camera_exception, None
if exc:
raise exc
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, exc_tb):
self.close()
def start_preview(self, **options):
self._check_camera_open()
self._preview.close()
options.setdefault('layer', self._preview_layer)
options.setdefault('alpha', self._preview_alpha)
options.setdefault('fullscreen', self._preview_fullscreen)
options.setdefault('window', self._preview_window)
renderer = PiPreviewRenderer(
self, self._camera.outputs[self.CAMERA_PREVIEW_PORT], **options)
self._preview = renderer
return renderer
def stop_preview(self):
self._check_camera_open()
self._preview.close()
self._preview = PiNullSink(
self, self._camera.outputs[self.CAMERA_PREVIEW_PORT])
def add_overlay(self, source, size=None, format=None, **options):
self._check_camera_open()
renderer = PiOverlayRenderer(self, source, size, format, **options)
self._overlays.append(renderer)
return renderer
def remove_overlay(self, overlay):
if not overlay in self._overlays:
raise PiCameraValueError(
"The specified overlay is not owned by this instance of "
"PiCamera")
overlay.close()
self._overlays.remove(overlay)
def start_recording(
self, output, format=None, resize=None, splitter_port=1, **options):
if 'quantization' in options:
warnings.warn(
PiCameraDeprecated(
'The quantization option is deprecated; please use '
'quality instead (same value)'))
with self._encoders_lock:
camera_port, output_port = self._get_ports(True, splitter_port)
format = self._get_video_format(output, format)
encoder = self._get_video_encoder(
camera_port, output_port, format, resize, **options)
self._encoders[splitter_port] = encoder
try:
encoder.start(output, options.get('motion_output'))
except Exception as e:
encoder.close()
with self._encoders_lock:
del self._encoders[splitter_port]
raise
def split_recording(self, output, splitter_port=1, **options):
try:
with self._encoders_lock:
encoder = self._encoders[splitter_port]
except KeyError:
raise PiCameraNotRecording(
'There is no recording in progress on '
'port %d' % splitter_port)
else:
encoder.split(output, options.get('motion_output'))
def request_key_frame(self, splitter_port=1):
try:
with self._encoders_lock:
encoder = self._encoders[splitter_port]
except KeyError:
raise PiCameraNotRecording(
'There is no recording in progress on '
'port %d' % splitter_port)
else:
encoder.request_key_frame()
def wait_recording(self, timeout=0, splitter_port=1):
assert timeout is not None
try:
with self._encoders_lock:
encoder = self._encoders[splitter_port]
except KeyError:
raise PiCameraNotRecording(
'There is no recording in progress on '
'port %d' % splitter_port)
else:
encoder.wait(timeout)
def stop_recording(self, splitter_port=1):
try:
with self._encoders_lock:
encoder = self._encoders[splitter_port]
except KeyError:
raise PiCameraNotRecording(
'There is no recording in progress on '
'port %d' % splitter_port)
else:
try:
self.wait_recording(0, splitter_port)
finally:
encoder.close()
with self._encoders_lock:
del self._encoders[splitter_port]
def record_sequence(
self, outputs, format='h264', resize=None, splitter_port=1, **options):
with self._encoders_lock:
camera_port, output_port = self._get_ports(True, splitter_port)
format = self._get_video_format('', format)
encoder = self._get_video_encoder(
camera_port, output_port, format, resize, **options)
self._encoders[splitter_port] = encoder
try:
start = True
for output in outputs:
if start:
start = False
encoder.start(output, options.get('motion_output'))
else:
encoder.split(output)
yield output
finally:
try:
encoder.wait(0)
finally:
encoder.close()
with self._encoders_lock:
del self._encoders[splitter_port]
def capture(
self, output, format=None, use_video_port=False, resize=None,
splitter_port=0, bayer=False, **options):
if format == 'raw':
warnings.warn(
PiCameraDeprecated(
'The "raw" format option is deprecated; specify the '
'required format directly instead ("yuv", "rgb", etc.)'))
if use_video_port and bayer:
raise PiCameraValueError(
'bayer is only valid with still port captures')
if 'burst' in options:
raise PiCameraValueError(
'burst is only valid with capture_sequence or capture_continuous')
with self._encoders_lock:
camera_port, output_port = self._get_ports(use_video_port, splitter_port)
format = self._get_image_format(output, format)
encoder = self._get_image_encoder(
camera_port, output_port, format, resize, **options)
if use_video_port:
self._encoders[splitter_port] = encoder
try:
if bayer:
camera_port.params[mmal.MMAL_PARAMETER_ENABLE_RAW_CAPTURE] = True
encoder.start(output)
if not encoder.wait(self.CAPTURE_TIMEOUT):
raise PiCameraRuntimeError(
'Timed out waiting for capture to end')
finally:
encoder.close()
with self._encoders_lock:
if use_video_port:
del self._encoders[splitter_port]
def capture_sequence(
self, outputs, format='jpeg', use_video_port=False, resize=None,
splitter_port=0, burst=False, bayer=False, **options):
if use_video_port:
if burst:
raise PiCameraValueError(
'burst is only valid with still port captures')
if bayer:
raise PiCameraValueError(
'bayer is only valid with still port captures')
with self._encoders_lock:
camera_port, output_port = self._get_ports(use_video_port, splitter_port)
format = self._get_image_format('', format)
if use_video_port:
encoder = self._get_images_encoder(
camera_port, output_port, format, resize, **options)
self._encoders[splitter_port] = encoder
else:
encoder = self._get_image_encoder(
camera_port, output_port, format, resize, **options)
try:
if use_video_port:
encoder.start(outputs)
encoder.wait()
else:
if burst:
camera_port.params[mmal.MMAL_PARAMETER_CAMERA_BURST_CAPTURE] = True
try:
for output in outputs:
if bayer:
camera_port.params[mmal.MMAL_PARAMETER_ENABLE_RAW_CAPTURE] = True
encoder.start(output)
if not encoder.wait(self.CAPTURE_TIMEOUT):
raise PiCameraRuntimeError(
'Timed out waiting for capture to end')
finally:
if burst:
camera_port.params[mmal.MMAL_PARAMETER_CAMERA_BURST_CAPTURE] = False
finally:
encoder.close()
with self._encoders_lock:
if use_video_port:
del self._encoders[splitter_port]
def capture_continuous(
self, output, format=None, use_video_port=False, resize=None,
splitter_port=0, burst=False, bayer=False, **options):
if use_video_port:
if burst:
raise PiCameraValueError(
'burst is only valid with still port captures')
if bayer:
raise PiCameraValueError(
'bayer is only valid with still port captures')
with self._encoders_lock:
camera_port, output_port = self._get_ports(use_video_port, splitter_port)
format = self._get_image_format(output, format)
encoder = self._get_image_encoder(
camera_port, output_port, format, resize, **options)
if use_video_port:
self._encoders[splitter_port] = encoder
try:
if burst:
camera_port.params[mmal.MMAL_PARAMETER_CAMERA_BURST_CAPTURE] = True
try:
if isinstance(output, bytes):
output = output.decode('utf-8')
if isinstance(output, str):
counter = 1
while True:
filename = output.format(
counter=counter,
timestamp=datetime.datetime.now(),
)
if bayer:
camera_port.params[mmal.MMAL_PARAMETER_ENABLE_RAW_CAPTURE] = True
encoder.start(filename)
if not encoder.wait(self.CAPTURE_TIMEOUT):
raise PiCameraRuntimeError(
'Timed out waiting for capture to end')
yield filename
counter += 1
else:
while True:
if bayer:
camera_port.params[mmal.MMAL_PARAMETER_ENABLE_RAW_CAPTURE] = True
encoder.start(output)
if not encoder.wait(self.CAPTURE_TIMEOUT):
raise PiCameraRuntimeError(
'Timed out waiting for capture to end')
yield output
finally:
if burst:
camera_port.params[mmal.MMAL_PARAMETER_CAMERA_BURST_CAPTURE] = False
finally:
encoder.close()
with self._encoders_lock:
if use_video_port:
del self._encoders[splitter_port]
@property
def closed(self):
return not self._camera
@property
def recording(self):
return any(
isinstance(e, PiVideoEncoder) and e.active
for e in self._encoders.values()
)
@property
def previewing(self):
warnings.warn(
PiCameraDeprecated(
'PiCamera.previewing is deprecated; test PiCamera.preview '
'is not None instead'))
return isinstance(self._preview, PiPreviewRenderer)
@property
def revision(self):
return self._revision
@property
def exif_tags(self):
return self._exif_tags
def _set_led(self, value):
if not self._used_led:
self._init_led()
if not GPIO:
raise PiCameraRuntimeError(
"GPIO library not found, or not accessible; please install "
"RPi.GPIO and run the script as root")
GPIO.output(self._led_pin, bool(value))
led = property(None, _set_led, doc="""
Sets the state of the camera's LED via GPIO.
If a GPIO library is available (only RPi.GPIO is currently supported),
and if the python process has the necessary privileges (typically this
means running as root via sudo), this property can be used to set the
state of the camera's LED as a boolean value (``True`` is on, ``False``
is off).
.. note::
This is a write-only property. While it can be used to control the
camera's LED, you cannot query the state of the camera's LED using
this property.
.. note::
At present, the camera's LED cannot be controlled on the Pi 3
(the GPIOs used to control the camera LED were re-routed to GPIO
expander on the Pi 3).
.. warning::
There are circumstances in which the camera firmware may override
an existing LED setting. For example, in the case that the firmware
resets the camera (as can happen with a CSI-2 timeout), the LED may
also be reset. If you wish to guarantee that the LED remain off at
all times, you may prefer to use the ``disable_camera_led`` option
in `config.txt`_ (this has the added advantage that sudo privileges
and GPIO access are not required, at least for LED control).
.. _config.txt: https://www.raspberrypi.org/documentation/configuration/config-txt.md
""")
def _get_raw_format(self):
warnings.warn(
PiCameraDeprecated(
'PiCamera.raw_format is deprecated; use required format '
'directly with capture methods instead'))
return self._raw_format
def _set_raw_format(self, value):
warnings.warn(
PiCameraDeprecated(
'PiCamera.raw_format is deprecated; use required format '
'directly with capture methods instead'))
if value not in self.RAW_FORMATS:
raise PiCameraValueError("Invalid raw format: %s" % value)
self._raw_format = value
raw_format = property(_get_raw_format, _set_raw_format, doc="""
Retrieves or sets the raw format of the camera's ports.
.. deprecated:: 1.0
Please use ``'yuv'`` or ``'rgb'`` directly as a format in the
various capture methods instead.
""")
def _get_timestamp(self):
self._check_camera_open()
return self._camera.control.params[mmal.MMAL_PARAMETER_SYSTEM_TIME]
timestamp = property(_get_timestamp, doc="""
Retrieves the system time according to the camera firmware.
The camera's timestamp is a 64-bit integer representing the number of
microseconds since the last system boot. When the camera's
:attr:`clock_mode` is ``'raw'`` the values returned by this attribute
are comparable to those from the :attr:`frame`
:attr:`~PiVideoFrame.timestamp` attribute.
""")
def _get_frame(self):
self._check_camera_open()
for e in self._encoders.values():
try:
return e.frame
except AttributeError:
pass
raise PiCameraRuntimeError(
"Cannot query frame information when camera is not recording")
frame = property(_get_frame, doc="""
Retrieves information about the current frame recorded from the camera.
When video recording is active (after a call to
:meth:`start_recording`), this attribute will return a
:class:`PiVideoFrame` tuple containing information about the current
frame that the camera is recording.
If multiple video recordings are currently in progress (after multiple
calls to :meth:`start_recording` with different values for the
``splitter_port`` parameter), which encoder's frame information is
returned is arbitrary. If you require information from a specific
encoder, you will need to extract it from :attr:`_encoders` explicitly.
Querying this property when the camera is not recording will result in
an exception.
.. note::
There is a small window of time when querying this attribute will
return ``None`` after calling :meth:`start_recording`. If this
attribute returns ``None``, this means that the video encoder has
been initialized, but the camera has not yet returned any frames.
""")
def _disable_camera(self):
self._splitter.connection.disable()
self._preview.renderer.connection.disable()
self._camera.disable()
def _enable_camera(self):
self._camera.enable()
self._preview.renderer.connection.enable()
self._splitter.connection.enable()
def _configure_splitter(self):
self._splitter.inputs[0].copy_from(self._camera.outputs[self.CAMERA_VIDEO_PORT])
self._splitter.inputs[0].commit()
def _control_callback(self, port, buf):
try:
if buf.command == mmal.MMAL_EVENT_ERROR:
raise PiCameraRuntimeError(
"No data recevied from sensor. Check all connections, "
"including the SUNNY chip on the camera board")
elif buf.command != mmal.MMAL_EVENT_PARAMETER_CHANGED:
raise PiCameraRuntimeError(
"Received unexpected camera control callback event, 0x%08x" % buf[0].cmd)
except Exception as exc:
# Pass the exception to the main thread; next time
# check_camera_open() is called this will get raised
self._camera_exception = exc
def _configure_camera(
self, sensor_mode, framerate, resolution, clock_mode,
old_sensor_mode=0):
old_cc = mmal.MMAL_PARAMETER_CAMERA_CONFIG_T.from_buffer_copy(self._camera_config)
old_ports = [
(port.framesize, port.framerate, port.params[mmal.MMAL_PARAMETER_FPS_RANGE])
for port in self._camera.outputs
]
if old_sensor_mode != 0 or sensor_mode != 0:
self._camera.control.params[mmal.MMAL_PARAMETER_CAMERA_CUSTOM_SENSOR_CONFIG] = sensor_mode
if not self._camera.control.enabled:
# Initial setup
self._camera.control.enable(self._control_callback)
preview_resolution = resolution
elif (
self._camera.outputs[self.CAMERA_PREVIEW_PORT].framesize ==
self._camera.outputs[self.CAMERA_VIDEO_PORT].framesize
):
preview_resolution = resolution
else:
preview_resolution = self._camera.outputs[self.CAMERA_PREVIEW_PORT].framesize
try:
try:
fps_low, fps_high = framerate
except TypeError:
fps_low = fps_high = framerate
else:
framerate = 0
fps_range = mmal.MMAL_PARAMETER_FPS_RANGE_T(
mmal.MMAL_PARAMETER_HEADER_T(
mmal.MMAL_PARAMETER_FPS_RANGE,
ct.sizeof(mmal.MMAL_PARAMETER_FPS_RANGE_T)
),
fps_low=mo.to_rational(fps_low),
fps_high=mo.to_rational(fps_high),
)
cc = self._camera_config
cc.max_stills_w = resolution.width
cc.max_stills_h = resolution.height
cc.stills_yuv422 = 0
cc.one_shot_stills = 1
cc.max_preview_video_w = resolution.width
cc.max_preview_video_h = resolution.height
cc.num_preview_video_frames = max(3, fps_high // 10)
cc.stills_capture_circular_buffer_height = 0
cc.fast_preview_resume = 0
cc.use_stc_timestamp = clock_mode
self._camera.control.params[mmal.MMAL_PARAMETER_CAMERA_CONFIG] = cc
# Clamp preview resolution to camera's resolution
if (
preview_resolution.width > resolution.width or
preview_resolution.height > resolution.height
):
preview_resolution = resolution
for port in self._camera.outputs:
port.params[mmal.MMAL_PARAMETER_FPS_RANGE] = fps_range
if port.index == self.CAMERA_PREVIEW_PORT:
port.framesize = preview_resolution
else:
port.framesize = resolution
port.framerate = framerate
port.commit()
except:
self._camera.control.params[mmal.MMAL_PARAMETER_CAMERA_CONFIG] = old_cc
self._camera_config = old_cc
for port, (res, fps, fps_range) in zip(self._camera.outputs, old_ports):
port.framesize = res
port.framerate = fps
port.params[mmal.MMAL_PARAMETER_FPS_RANGE] = fps_range
port.commit()
raise
def _get_framerate(self):
self._check_camera_open()
port_num = (
self.CAMERA_VIDEO_PORT
if self._encoders else
self.CAMERA_PREVIEW_PORT
)
return mo.PiCameraFraction(self._camera.outputs[port_num].framerate)
def _set_framerate(self, value):
self._check_camera_open()
self._check_recording_stopped()
value = mo.to_fraction(value, den_limit=256)
if not (0 < value <= self.MAX_FRAMERATE):
raise PiCameraValueError("Invalid framerate: %.2ffps" % value)
sensor_mode = self.sensor_mode
clock_mode = self.CLOCK_MODES[self.clock_mode]
resolution = self.resolution
self._disable_camera()
self._configure_camera(
sensor_mode=sensor_mode, framerate=value, resolution=resolution,
clock_mode=clock_mode)
self._configure_splitter()
self._enable_camera()
framerate = property(_get_framerate, _set_framerate, doc="""\
Retrieves or sets the framerate at which video-port based image
captures, video recordings, and previews will run.
When queried, the :attr:`framerate` property returns the rate at which
the camera's video and preview ports will operate as a
:class:`~fractions.Fraction` instance (which can be easily converted to
an :class:`int` or :class:`float`). If :attr:`framerate_range` has been
set, then :attr:`framerate` will be 0 which indicates that a dynamic
range of framerates is being used.
.. note::
For backwards compatibility, a derivative of the
:class:`~fractions.Fraction` class is actually used which permits
the value to be treated as a tuple of ``(numerator, denominator)``.
Setting and retrieving framerate as a ``(numerator, denominator)``
tuple is deprecated and will be removed in 2.0. Please use a
:class:`~fractions.Fraction` instance instead (which is just as
accurate and also permits direct use with math operators).
When set, the property configures the camera so that the next call to
recording and previewing methods will use the new framerate. Setting
this property implicitly sets :attr:`framerate_range` so that the low
and high values are equal to the new framerate. The framerate can be
specified as an :ref:`int <typesnumeric>`, :ref:`float <typesnumeric>`,
:class:`~fractions.Fraction`, or a ``(numerator, denominator)`` tuple.
For example, the following definitions are all equivalent::
from fractions import Fraction
camera.framerate = 30
camera.framerate = 30 / 1
camera.framerate = Fraction(30, 1)
camera.framerate = (30, 1) # deprecated
The camera must not be closed, and no recording must be active when the
property is set.
.. note::
This attribute, in combination with :attr:`resolution`, determines
the mode that the camera operates in. The actual sensor framerate
and resolution used by the camera is influenced, but not directly
set, by this property. See :attr:`sensor_mode` for more
information.
The initial value of this property can be specified with the
*framerate* parameter in the :class:`PiCamera` constructor, and will
default to 30 if not specified.
""")
def _get_sensor_mode(self):
self._check_camera_open()
return self._camera.control.params[mmal.MMAL_PARAMETER_CAMERA_CUSTOM_SENSOR_CONFIG]
def _set_sensor_mode(self, value):
self._check_camera_open()
self._check_recording_stopped()
try:
if not (0 <= value <= 7):
raise PiCameraValueError(
"Invalid sensor mode: %d (valid range 0..7)" % value)
except TypeError:
raise PiCameraValueError("Invalid sensor mode: %s" % value)
sensor_mode = self.sensor_mode
clock_mode = self.CLOCK_MODES[self.clock_mode]
resolution = self.resolution
framerate = Fraction(self.framerate)
if framerate == 0:
framerate = self.framerate_range
self._disable_camera()
self._configure_camera(
old_sensor_mode=sensor_mode, sensor_mode=value,
framerate=framerate, resolution=resolution,
clock_mode=clock_mode)
self._configure_splitter()
self._enable_camera()
sensor_mode = property(_get_sensor_mode, _set_sensor_mode, doc="""\
Retrieves or sets the input mode of the camera's sensor.
This is an advanced property which can be used to control the camera's
sensor mode. By default, mode 0 is used which allows the camera to
automatically select an input mode based on the requested
:attr:`resolution` and :attr:`framerate`. Valid values are currently
between 0 and 7. The set of valid sensor modes (along with the
heuristic used to select one automatically) are detailed in the
:ref:`camera_modes` section of the documentation.
.. note::
At the time of writing, setting this property does nothing unless
the camera has been initialized with a sensor mode other than 0.
Furthermore, some mode transitions appear to require setting the
property twice (in a row). This appears to be a firmware
limitation.
The initial value of this property can be specified with the
*sensor_mode* parameter in the :class:`PiCamera` constructor, and will
default to 0 if not specified.
.. versionadded:: 1.9
""")
def _get_clock_mode(self):
self._check_camera_open()
return self._CLOCK_MODES_R[self._camera_config.use_stc_timestamp]
def _set_clock_mode(self, value):
self._check_camera_open()
self._check_recording_stopped()
try:
clock_mode = self.CLOCK_MODES[value]
except KeyError:
raise PiCameraValueError("Invalid clock mode %s" % value)
sensor_mode = self.sensor_mode
framerate = Fraction(self.framerate)
if framerate == 0:
framerate = self.framerate_range
resolution = self.resolution
self._disable_camera()
self._configure_camera(
sensor_mode=sensor_mode, framerate=framerate,
resolution=resolution, clock_mode=clock_mode)
self._configure_splitter()
self._enable_camera()
clock_mode = property(_get_clock_mode, _set_clock_mode, doc="""\
Retrieves or sets the mode of the camera's clock.
This is an advanced property which can be used to control the nature of
the frame timestamps available from the :attr:`frame` property. When
this is "reset" (the default) each frame's timestamp will be relative
to the start of the recording. When this is "raw", each frame's
timestamp will be relative to the last initialization of the camera.
The initial value of this property can be specified with the
*clock_mode* parameter in the :class:`PiCamera` constructor, and will
default to "reset" if not specified.
.. versionadded:: 1.11
""")
def _get_resolution(self):
self._check_camera_open()
return mo.PiResolution(
int(self._camera_config.max_stills_w),
int(self._camera_config.max_stills_h)
)
def _set_resolution(self, value):
self._check_camera_open()
self._check_recording_stopped()
value = mo.to_resolution(value)
if not (
(0 < value.width <= self.MAX_RESOLUTION.width) and
(0 < value.height <= self.MAX_RESOLUTION.height)):
raise PiCameraValueError(
"Invalid resolution requested: %r" % (value,))
sensor_mode = self.sensor_mode
clock_mode = self.CLOCK_MODES[self.clock_mode]
framerate = Fraction(self.framerate)
if framerate == 0:
framerate = self.framerate_range
self._disable_camera()
self._configure_camera(
sensor_mode=sensor_mode, framerate=framerate,
resolution=value, clock_mode=clock_mode)
self._configure_splitter()
self._enable_camera()
resolution = property(_get_resolution, _set_resolution, doc="""
Retrieves or sets the resolution at which image captures, video
recordings, and previews will be captured.
When queried, the :attr:`resolution` property returns the resolution at
which the camera will operate as a tuple of ``(width, height)``
measured in pixels. This is the resolution that the :meth:`capture`
method will produce images at, and the resolution that
:meth:`start_recording` will produce videos at.
When set, the property configures the camera so that the next call to
these methods will use the new resolution. The resolution can be
specified as a ``(width, height)`` tuple, as a string formatted
``'WIDTHxHEIGHT'``, or as a string containing a commonly recognized
`display resolution`_ name (e.g. "VGA", "HD", "1080p", etc). For
example, the following definitions are all equivalent::
camera.resolution = (1280, 720)
camera.resolution = '1280x720'
camera.resolution = '1280 x 720'
camera.resolution = 'HD'
camera.resolution = '720p'
The camera must not be closed, and no recording must be active when the
property is set.
.. note::
This attribute, in combination with :attr:`framerate`, determines
the mode that the camera operates in. The actual sensor framerate
and resolution used by the camera is influenced, but not directly
set, by this property. See :attr:`sensor_mode` for more
information.
The initial value of this property can be specified with the
*resolution* parameter in the :class:`PiCamera` constructor, and will
default to the display's resolution or 1280x720 if the display has
been disabled (with ``tvservice -o``).
.. versionchanged:: 1.11
Resolution permitted to be set as a string. Preview resolution
added as separate property.
.. _display resolution: https://en.wikipedia.org/wiki/Graphics_display_resolution
""")
def _get_framerate_range(self):
self._check_camera_open()
port_num = (
self.CAMERA_VIDEO_PORT
if self._encoders else
self.CAMERA_PREVIEW_PORT
)
mp = self._camera.outputs[port_num].params[mmal.MMAL_PARAMETER_FPS_RANGE]
return mo.PiFramerateRange(
mo.to_fraction(mp.fps_low), mo.to_fraction(mp.fps_high))
def _set_framerate_range(self, value):
self._check_camera_open()
self._check_recording_stopped()
low, high = value
low = mo.to_fraction(low, den_limit=256)
high = mo.to_fraction(high, den_limit=256)
if not (0 < low <= self.MAX_FRAMERATE):
raise PiCameraValueError("Invalid low framerate: %.2ffps" % low)
if not (0 < high <= self.MAX_FRAMERATE):
raise PiCameraValueError("Invalid high framerate: %.2ffps" % high)
if high < low:
raise PiCameraValueError("framerate_range is backwards")
sensor_mode = self.sensor_mode
clock_mode = self.CLOCK_MODES[self.clock_mode]
resolution = self.resolution
self._disable_camera()
self._configure_camera(
sensor_mode=sensor_mode, framerate=(low, high),
resolution=resolution, clock_mode=clock_mode)
self._configure_splitter()
self._enable_camera()
framerate_range = property(_get_framerate_range, _set_framerate_range, doc="""\
Retrieves or sets a range between which the camera's framerate is
allowed to float.
When queried, the :attr:`framerate_range` property returns a
:func:`~collections.namedtuple` derivative with ``low`` and ``high``
components (index 0 and 1 respectively) which specify the limits of the
permitted framerate range.
When set, the property configures the camera so that the next call to
recording and previewing methods will use the new framerate range.
Setting this property will implicitly set the :attr:`framerate`
property to 0 (indicating that a dynamic range of framerates is in use
by the camera).
.. note::
Use of this property prevents use of :attr:`framerate_delta` (there
would be little point in making fractional adjustments to the
framerate when the framerate itself is variable).
The low and high framerates can be specified as :ref:`int
<typesnumeric>`, :ref:`float <typesnumeric>`, or
:class:`~fractions.Fraction` values. For example, the following
definitions are all equivalent::
from fractions import Fraction
camera.framerate_range = (0.16666, 30)
camera.framerate_range = (Fraction(1, 6), 30 / 1)
camera.framerate_range = (Fraction(1, 6), Fraction(30, 1))
The camera must not be closed, and no recording must be active when the
property is set.
.. note::
This attribute, like :attr:`framerate`, determines the mode that
the camera operates in. The actual sensor framerate and resolution
used by the camera is influenced, but not directly set, by this
property. See :attr:`sensor_mode` for more information.
.. versionadded:: 1.13
""")
def _get_framerate_delta(self):
self._check_camera_open()
if self.framerate == 0:
raise PiCameraValueError(
'framerate_delta cannot be used with framerate_range')
port_num = (
self.CAMERA_VIDEO_PORT
if self._encoders else
self.CAMERA_PREVIEW_PORT
)
return self._camera.outputs[port_num].params[mmal.MMAL_PARAMETER_FRAME_RATE] - self.framerate
def _set_framerate_delta(self, value):
self._check_camera_open()
if self.framerate == 0:
raise PiCameraValueError(
'framerate_delta cannot be used with framerate_range')
value = mo.to_fraction(self.framerate + value, den_limit=256)
self._camera.outputs[self.CAMERA_PREVIEW_PORT].params[mmal.MMAL_PARAMETER_FRAME_RATE] = value
self._camera.outputs[self.CAMERA_VIDEO_PORT].params[mmal.MMAL_PARAMETER_FRAME_RATE] = value
framerate_delta = property(_get_framerate_delta, _set_framerate_delta, doc="""\
Retrieves or sets a fractional amount that is added to the camera's
framerate for the purpose of minor framerate adjustments.
When queried, the :attr:`framerate_delta` property returns the amount
that the camera's :attr:`framerate` has been adjusted. This defaults
to 0 (so the camera's framerate is the actual framerate used).
When set, the property adjusts the camera's framerate on the fly. The
property can be set while recordings or previews are in progress. Thus
the framerate used by the camera is actually :attr:`framerate` +
:attr:`framerate_delta`.
.. note::
Framerates deltas can be fractional with adjustments as small as
1/256th of an fps possible (finer adjustments will be rounded).
With an appropriately tuned PID controller, this can be used to
achieve synchronization between the camera framerate and other
devices.
If the new framerate demands a mode switch (such as moving between a
low framerate and a high framerate mode), currently active recordings
may drop a frame. This should only happen when specifying quite large
deltas, or when framerate is at the boundary of a sensor mode (e.g.
49fps).
The framerate delta can be specified as an :ref:`int <typesnumeric>`,
:ref:`float <typesnumeric>`, :class:`~fractions.Fraction` or a
``(numerator, denominator)`` tuple. For example, the following
definitions are all equivalent::
from fractions import Fraction
camera.framerate_delta = 0.5
camera.framerate_delta = 1 / 2 # in python 3
camera.framerate_delta = Fraction(1, 2)
camera.framerate_delta = (1, 2) # deprecated
.. note::
This property is implicitly reset to 0 when :attr:`framerate` or
:attr:`framerate_range` is set. When :attr:`framerate` is 0
(indicating that :attr:`framerate_range` is set), this property
cannot be used. (there would be little point in making fractional
adjustments to the framerate when the framerate itself is
variable).
.. versionadded:: 1.11
""")
def _get_still_stats(self):
self._check_camera_open()
return self._camera.control.params[mmal.MMAL_PARAMETER_CAPTURE_STATS_PASS]
def _set_still_stats(self, value):
self._check_camera_open()
self._camera.control.params[mmal.MMAL_PARAMETER_CAPTURE_STATS_PASS] = value
still_stats = property(_get_still_stats, _set_still_stats, doc="""\
Retrieves or sets whether statistics will be calculated from still
frames or the prior preview frame.
When queried, the :attr:`still_stats` property returns a boolean value
indicating when scene statistics will be calculated for still captures
(that is, captures where the *use_video_port* parameter of
:meth:`capture` is ``False``). When this property is ``False`` (the
default), statistics will be calculated from the preceding preview
frame (this also applies when the preview is not visible). When `True`,
statistics will be calculated from the captured image itself.
When set, the propetry controls when scene statistics will be
calculated for still captures. The property can be set while recordings
or previews are in progress. The default value is ``False``.
The advantages to calculating scene statistics from the captured image
are that time between startup and capture is reduced as only the AGC
(automatic gain control) has to converge. The downside is that
processing time for captures increases and that white balance and gain
won't necessarily match the preview.
.. warning::
Enabling the still statistics pass will `override fixed white
balance`_ gains (set via :attr:`awb_gains` and :attr:`awb_mode`).
.. _override fixed white balance: https://www.raspberrypi.org/forums/viewtopic.php?p=875772&sid=92fa4ea70d1fe24590a4cdfb4a10c489#p875772
.. versionadded:: 1.9
""")
def _get_saturation(self):
self._check_camera_open()
return int(self._camera.control.params[mmal.MMAL_PARAMETER_SATURATION] * 100)
def _set_saturation(self, value):
self._check_camera_open()
if not (-100 <= value <= 100):
raise PiCameraValueError(
"Invalid saturation value: %d (valid range -100..100)" % value)
self._camera.control.params[mmal.MMAL_PARAMETER_SATURATION] = Fraction(value, 100)
saturation = property(_get_saturation, _set_saturation, doc="""\
Retrieves or sets the saturation setting of the camera.
When queried, the :attr:`saturation` property returns the color
saturation of the camera as an integer between -100 and 100. When set,
the property adjusts the saturation of the camera. Saturation can be
adjusted while previews or recordings are in progress. The default
value is 0.
""")
def _get_sharpness(self):
self._check_camera_open()
return int(self._camera.control.params[mmal.MMAL_PARAMETER_SHARPNESS] * 100)
def _set_sharpness(self, value):
self._check_camera_open()
if not (-100 <= value <= 100):
raise PiCameraValueError(
"Invalid sharpness value: %d (valid range -100..100)" % value)
self._camera.control.params[mmal.MMAL_PARAMETER_SHARPNESS] = Fraction(value, 100)
sharpness = property(_get_sharpness, _set_sharpness, doc="""\
Retrieves or sets the sharpness setting of the camera.
When queried, the :attr:`sharpness` property returns the sharpness
level of the camera (a measure of the amount of post-processing to
reduce or increase image sharpness) as an integer between -100 and 100.
When set, the property adjusts the sharpness of the camera. Sharpness
can be adjusted while previews or recordings are in progress. The
default value is 0.
""")
def _get_contrast(self):
self._check_camera_open()
return int(self._camera.control.params[mmal.MMAL_PARAMETER_CONTRAST] * 100)
def _set_contrast(self, value):
self._check_camera_open()
if not (-100 <= value <= 100):
raise PiCameraValueError(
"Invalid contrast value: %d (valid range -100..100)" % value)
self._camera.control.params[mmal.MMAL_PARAMETER_CONTRAST] = Fraction(value, 100)
contrast = property(_get_contrast, _set_contrast, doc="""\
Retrieves or sets the contrast setting of the camera.
When queried, the :attr:`contrast` property returns the contrast level
of the camera as an integer between -100 and 100. When set, the
property adjusts the contrast of the camera. Contrast can be adjusted
while previews or recordings are in progress. The default value is 0.
""")
def _get_brightness(self):
self._check_camera_open()
return int(self._camera.control.params[mmal.MMAL_PARAMETER_BRIGHTNESS] * 100)
def _set_brightness(self, value):
self._check_camera_open()
if not (0 <= value <= 100):
raise PiCameraValueError(
"Invalid brightness value: %d (valid range 0..100)" % value)
self._camera.control.params[mmal.MMAL_PARAMETER_BRIGHTNESS] = Fraction(value, 100)
brightness = property(_get_brightness, _set_brightness, doc="""\
Retrieves or sets the brightness setting of the camera.
When queried, the :attr:`brightness` property returns the brightness
level of the camera as an integer between 0 and 100. When set, the
property adjusts the brightness of the camera. Brightness can be
adjusted while previews or recordings are in progress. The default
value is 50.
""")
def _get_shutter_speed(self):
self._check_camera_open()
return int(self._camera.control.params[mmal.MMAL_PARAMETER_SHUTTER_SPEED])
def _set_shutter_speed(self, value):
self._check_camera_open()
self._camera.control.params[mmal.MMAL_PARAMETER_SHUTTER_SPEED] = value
shutter_speed = property(_get_shutter_speed, _set_shutter_speed, doc="""\
Retrieves or sets the shutter speed of the camera in microseconds.
When queried, the :attr:`shutter_speed` property returns the shutter
speed of the camera in microseconds, or 0 which indicates that the
speed will be automatically determined by the auto-exposure algorithm.
Faster shutter times naturally require greater amounts of illumination
and vice versa.
When set, the property adjusts the shutter speed of the camera, which
most obviously affects the illumination of subsequently captured
images. Shutter speed can be adjusted while previews or recordings are
running. The default value is 0 (auto).
.. note::
You can query the :attr:`exposure_speed` attribute to determine the
actual shutter speed being used when this attribute is set to 0.
Please note that this capability requires an up to date firmware
(#692 or later).
.. note::
In later firmwares, this attribute is limited by the value of the
:attr:`framerate` attribute. For example, if framerate is set to
30fps, the shutter speed cannot be slower than 33,333µs (1/fps).
""")
def _get_exposure_speed(self):
self._check_camera_open()
return self._camera.control.params[mmal.MMAL_PARAMETER_CAMERA_SETTINGS].exposure
exposure_speed = property(_get_exposure_speed, doc="""\
Retrieves the current shutter speed of the camera.
When queried, this property returns the shutter speed currently being
used by the camera. If you have set :attr:`shutter_speed` to a non-zero
value, then :attr:`exposure_speed` and :attr:`shutter_speed` should be
equal. However, if :attr:`shutter_speed` is set to 0 (auto), then you
can read the actual shutter speed being used from this attribute. The
value is returned as an integer representing a number of microseconds.
This is a read-only property.
.. versionadded:: 1.6
""")
def _get_analog_gain(self):
self._check_camera_open()
return mo.to_fraction(
self._camera.control.params[mmal.MMAL_PARAMETER_CAMERA_SETTINGS].analog_gain)
analog_gain = property(_get_analog_gain, doc="""\
Retrieves the current analog gain of the camera.
When queried, this property returns the analog gain currently being
used by the camera. The value represents the analog gain of the sensor
prior to digital conversion. The value is returned as a
:class:`~fractions.Fraction` instance.
.. versionadded:: 1.6
""")
def _get_digital_gain(self):
self._check_camera_open()
return mo.to_fraction(
self._camera.control.params[mmal.MMAL_PARAMETER_CAMERA_SETTINGS].digital_gain)
digital_gain = property(_get_digital_gain, doc="""\
Retrieves the current digital gain of the camera.
When queried, this property returns the digital gain currently being
used by the camera. The value represents the digital gain the camera
applies after conversion of the sensor's analog output. The value is
returned as a :class:`~fractions.Fraction` instance.
.. versionadded:: 1.6
""")
def _get_video_denoise(self):
self._check_camera_open()
return self._camera.control.params[mmal.MMAL_PARAMETER_VIDEO_DENOISE]
def _set_video_denoise(self, value):
self._check_camera_open()
self._camera.control.params[mmal.MMAL_PARAMETER_VIDEO_DENOISE] = value
video_denoise = property(_get_video_denoise, _set_video_denoise, doc="""\
Retrieves or sets whether denoise will be applied to video recordings.
When queried, the :attr:`video_denoise` property returns a boolean
value indicating whether or not the camera software will apply a
denoise algorithm to video recordings.
When set, the property activates or deactivates the denoise algorithm
for video recordings. The property can be set while recordings or
previews are in progress. The default value is ``True``.
.. versionadded:: 1.7
""")
def _get_image_denoise(self):
self._check_camera_open()
return self._camera.control.params[mmal.MMAL_PARAMETER_STILLS_DENOISE]
def _set_image_denoise(self, value):
self._check_camera_open()
self._camera.control.params[mmal.MMAL_PARAMETER_STILLS_DENOISE] = value
image_denoise = property(_get_image_denoise, _set_image_denoise, doc="""\
Retrieves or sets whether denoise will be applied to image captures.
When queried, the :attr:`image_denoise` property returns a boolean
value indicating whether or not the camera software will apply a
denoise algorithm to image captures.
When set, the property activates or deactivates the denoise algorithm
for image captures. The property can be set while recordings or
previews are in progress. The default value is ``True``.
.. versionadded:: 1.7
""")
def _get_drc_strength(self):
self._check_camera_open()
return self._DRC_STRENGTHS_R[
self._camera.control.params[mmal.MMAL_PARAMETER_DYNAMIC_RANGE_COMPRESSION].strength
]
def _set_drc_strength(self, value):
self._check_camera_open()
try:
mp = self._camera.control.params[mmal.MMAL_PARAMETER_DYNAMIC_RANGE_COMPRESSION]
mp.strength = self.DRC_STRENGTHS[value]
except KeyError:
raise PiCameraValueError(
"Invalid dynamic range compression strength: %s" % value)
self._camera.control.params[mmal.MMAL_PARAMETER_DYNAMIC_RANGE_COMPRESSION] = mp
drc_strength = property(_get_drc_strength, _set_drc_strength, doc="""\
Retrieves or sets the dynamic range compression strength of the camera.
When queried, the :attr:`drc_strength` property returns a string
indicating the amount of `dynamic range compression`_ the camera
applies to images.
When set, the attributes adjusts the strength of the dynamic range
compression applied to the camera's output. Valid values are given
in the list below:
{values}
The default value is ``'off'``. All possible values for the attribute
can be obtained from the ``PiCamera.DRC_STRENGTHS`` attribute.
.. warning::
Enabling DRC will `override fixed white balance`_ gains (set via
:attr:`awb_gains` and :attr:`awb_mode`).
.. _dynamic range compression: https://en.wikipedia.org/wiki/Gain_compression
.. _override fixed white balance: https://www.raspberrypi.org/forums/viewtopic.php?p=875772&sid=92fa4ea70d1fe24590a4cdfb4a10c489#p875772
.. versionadded:: 1.6
""".format(values=docstring_values(DRC_STRENGTHS)))
def _get_ISO(self):
warnings.warn(
PiCameraDeprecated(
'PiCamera.ISO is deprecated; use PiCamera.iso instead'))
return self.iso
def _set_ISO(self, value):
warnings.warn(
PiCameraDeprecated(
'PiCamera.ISO is deprecated; use PiCamera.iso instead'))
self.iso = value
ISO = property(_get_ISO, _set_ISO, doc="""
Retrieves or sets the apparent ISO setting of the camera.
.. deprecated:: 1.8
Please use the :attr:`iso` attribute instead.
""")
def _get_iso(self):
self._check_camera_open()
return self._camera.control.params[mmal.MMAL_PARAMETER_ISO]
def _set_iso(self, value):
self._check_camera_open()
try:
if not (0 <= value <= 1600):
raise PiCameraValueError(
"Invalid iso value: %d (valid range 0..800)" % value)
except TypeError:
raise PiCameraValueError("Invalid iso value: %s" % value)
self._camera.control.params[mmal.MMAL_PARAMETER_ISO] = value
iso = property(_get_iso, _set_iso, doc="""\
Retrieves or sets the apparent ISO setting of the camera.
When queried, the :attr:`iso` property returns the ISO setting of the
camera, a value which represents the `sensitivity of the camera to
light`_. Lower values (e.g. 100) imply less sensitivity than higher
values (e.g. 400 or 800). Lower sensitivities tend to produce less
"noisy" (smoother) images, but operate poorly in low light conditions.
When set, the property adjusts the sensitivity of the camera (by
adjusting the :attr:`analog_gain` and :attr:`digital_gain`). Valid
values are between 0 (auto) and 1600. The actual value used when iso is
explicitly set will be one of the following values (whichever is
closest): 100, 200, 320, 400, 500, 640, 800.
On the V1 camera module, non-zero ISO values attempt to fix overall
gain at various levels. For example, ISO 100 attempts to provide an
overall gain of 1.0, ISO 200 attempts to provide overall gain of 2.0,
etc. The algorithm prefers analog gain over digital gain to reduce
noise.
On the V2 camera module, ISO 100 attempts to produce overall gain of
~1.84, and ISO 800 attempts to produce overall gain of ~14.72 (the V2
camera module was calibrated against the `ISO film speed`_ standard).
The attribute can be adjusted while previews or recordings are in
progress. The default value is 0 which means automatically determine a
value according to image-taking conditions.
.. note::
Some users on the Pi camera forum have noted that higher ISO values
than 800 (specifically up to 1600) can be achieved in certain
conditions with :attr:`exposure_mode` set to ``'sports'`` and
:attr:`iso` set to 0. It doesn't appear to be possible to manually
request an ISO setting higher than 800, but the picamera library
will permit settings up to 1600 in case the underlying firmware
permits such settings in particular circumstances.
.. note::
Certain :attr:`exposure_mode` values override the ISO setting. For
example, ``'off'`` fixes :attr:`analog_gain` and
:attr:`digital_gain` entirely, preventing this property from
adjusting them when set.
.. _sensitivity of the camera to light: https://en.wikipedia.org/wiki/Film_speed#Digital
.. _ISO film speed: https://en.wikipedia.org/wiki/Film_speed#Current_system:_ISO
""")
def _get_meter_mode(self):
self._check_camera_open()
return self._METER_MODES_R[
self._camera.control.params[mmal.MMAL_PARAMETER_EXP_METERING_MODE].value
]
def _set_meter_mode(self, value):
self._check_camera_open()
try:
mp = self._camera.control.params[mmal.MMAL_PARAMETER_EXP_METERING_MODE]
mp.value = self.METER_MODES[value]
except KeyError:
raise PiCameraValueError("Invalid metering mode: %s" % value)
self._camera.control.params[mmal.MMAL_PARAMETER_EXP_METERING_MODE] = mp
meter_mode = property(_get_meter_mode, _set_meter_mode, doc="""\
Retrieves or sets the metering mode of the camera.
When queried, the :attr:`meter_mode` property returns the method by
which the camera `determines the exposure`_ as one of the following
strings:
{values}
When set, the property adjusts the camera's metering mode. All modes
set up two regions: a center region, and an outer region. The major
`difference between each mode`_ is the size of the center region. The
``'backlit'`` mode has the largest central region (30% of the width),
while ``'spot'`` has the smallest (10% of the width).
The property can be set while recordings or previews are in progress.
The default value is ``'average'``. All possible values for the
attribute can be obtained from the ``PiCamera.METER_MODES`` attribute.
.. _determines the exposure: https://en.wikipedia.org/wiki/Metering_mode
.. _difference between each mode: https://www.raspberrypi.org/forums/viewtopic.php?p=565644#p565644
""".format(values=docstring_values(METER_MODES)))
def _get_video_stabilization(self):
self._check_camera_open()
return self._camera.control.params[mmal.MMAL_PARAMETER_VIDEO_STABILISATION]
def _set_video_stabilization(self, value):
self._check_camera_open()
self._camera.control.params[mmal.MMAL_PARAMETER_VIDEO_STABILISATION] = value
video_stabilization = property(
_get_video_stabilization, _set_video_stabilization, doc="""\
Retrieves or sets the video stabilization mode of the camera.
When queried, the :attr:`video_stabilization` property returns a
boolean value indicating whether or not the camera attempts to
compensate for motion.
When set, the property activates or deactivates video stabilization.
The property can be set while recordings or previews are in progress.
The default value is ``False``.
.. note::
The built-in video stabilization only accounts for `vertical and
horizontal motion`_, not rotation.
.. _vertical and horizontal motion: https://www.raspberrypi.org/forums/viewtopic.php?p=342667&sid=ec7d95e887ab74a90ffaab87888c48cd#p342667
""")
def _get_exposure_compensation(self):
self._check_camera_open()
return self._camera.control.params[mmal.MMAL_PARAMETER_EXPOSURE_COMP]
def _set_exposure_compensation(self, value):
self._check_camera_open()
try:
if not (-25 <= value <= 25):
raise PiCameraValueError(
"Invalid exposure compensation value: "
"%d (valid range -25..25)" % value)
except TypeError:
raise PiCameraValueError(
"Invalid exposure compensation value: %s" % value)
self._camera.control.params[mmal.MMAL_PARAMETER_EXPOSURE_COMP] = value
exposure_compensation = property(
_get_exposure_compensation, _set_exposure_compensation, doc="""\
Retrieves or sets the exposure compensation level of the camera.
When queried, the :attr:`exposure_compensation` property returns an
integer value between -25 and 25 indicating the exposure level of the
camera. Larger values result in brighter images.
When set, the property adjusts the camera's exposure compensation
level. Each increment represents 1/6th of a stop. Hence setting the
attribute to 6 increases exposure by 1 stop. The property can be set
while recordings or previews are in progress. The default value is 0.
""")
def _get_exposure_mode(self):
self._check_camera_open()
return self._EXPOSURE_MODES_R[
self._camera.control.params[mmal.MMAL_PARAMETER_EXPOSURE_MODE].value
]
def _set_exposure_mode(self, value):
self._check_camera_open()
try:
mp = self._camera.control.params[mmal.MMAL_PARAMETER_EXPOSURE_MODE]
mp.value = self.EXPOSURE_MODES[value]
except KeyError:
raise PiCameraValueError("Invalid exposure mode: %s" % value)
self._camera.control.params[mmal.MMAL_PARAMETER_EXPOSURE_MODE] = mp
exposure_mode = property(_get_exposure_mode, _set_exposure_mode, doc="""\
Retrieves or sets the exposure mode of the camera.
When queried, the :attr:`exposure_mode` property returns a string
representing the exposure setting of the camera. The possible values
can be obtained from the ``PiCamera.EXPOSURE_MODES`` attribute, and
are as follows:
{values}
When set, the property adjusts the camera's exposure mode. The
property can be set while recordings or previews are in progress. The
default value is ``'auto'``.
.. note::
Exposure mode ``'off'`` is special: this disables the camera's
automatic gain control, fixing the values of :attr:`digital_gain`
and :attr:`analog_gain`.
Please note that these properties are not directly settable
(although they can be influenced by setting :attr:`iso` *prior* to
fixing the gains), and default to low values when the camera is
first initialized. Therefore it is important to let them settle on
higher values before disabling automatic gain control otherwise all
frames captured will appear black.
""".format(values=docstring_values(EXPOSURE_MODES)))
def _get_flash_mode(self):
self._check_camera_open()
return self._FLASH_MODES_R[
self._camera.control.params[mmal.MMAL_PARAMETER_FLASH].value
]
def _set_flash_mode(self, value):
self._check_camera_open()
try:
mp = self._camera.control.params[mmal.MMAL_PARAMETER_FLASH]
mp.value = self.FLASH_MODES[value]
except KeyError:
raise PiCameraValueError("Invalid flash mode: %s" % value)
self._camera.control.params[mmal.MMAL_PARAMETER_FLASH] = mp
flash_mode = property(_get_flash_mode, _set_flash_mode, doc="""\
Retrieves or sets the flash mode of the camera.
When queried, the :attr:`flash_mode` property returns a string
representing the flash setting of the camera. The possible values can
be obtained from the ``PiCamera.FLASH_MODES`` attribute, and are as
follows:
{values}
When set, the property adjusts the camera's flash mode. The property
can be set while recordings or previews are in progress. The default
value is ``'off'``.
.. note::
You must define which GPIO pins the camera is to use for flash and
privacy indicators. This is done within the `Device Tree
configuration`_ which is considered an advanced topic.
Specifically, you need to define pins ``FLASH_0_ENABLE`` and
optionally ``FLASH_0_INDICATOR`` (for the privacy indicator). More
information can be found in this :ref:`recipe
<flash_configuration>`.
.. _Device Tree configuration: https://www.raspberrypi.org/documentation/configuration/pin-configuration.md
.. versionadded:: 1.10
""".format(values=docstring_values(FLASH_MODES)))
def _get_awb_mode(self):
self._check_camera_open()
return self._AWB_MODES_R[
self._camera.control.params[mmal.MMAL_PARAMETER_AWB_MODE].value
]
def _set_awb_mode(self, value):
self._check_camera_open()
try:
mp = self._camera.control.params[mmal.MMAL_PARAMETER_AWB_MODE]
mp.value = self.AWB_MODES[value]
except KeyError:
raise PiCameraValueError("Invalid auto-white-balance mode: %s" % value)
self._camera.control.params[mmal.MMAL_PARAMETER_AWB_MODE] = mp
awb_mode = property(_get_awb_mode, _set_awb_mode, doc="""\
Retrieves or sets the auto-white-balance mode of the camera.
When queried, the :attr:`awb_mode` property returns a string
representing the auto white balance setting of the camera. The possible
values can be obtained from the ``PiCamera.AWB_MODES`` attribute, and
are as follows:
{values}
When set, the property adjusts the camera's auto-white-balance mode.
The property can be set while recordings or previews are in progress.
The default value is ``'auto'``.
.. note::
AWB mode ``'off'`` is special: this disables the camera's automatic
white balance permitting manual control of the white balance via
the :attr:`awb_gains` property. However, even with AWB disabled,
some attributes (specifically :attr:`still_stats` and
:attr:`drc_strength`) can cause AWB re-calculations.
""".format(values=docstring_values(AWB_MODES)))
def _get_awb_gains(self):
self._check_camera_open()
mp = self._camera.control.params[mmal.MMAL_PARAMETER_CAMERA_SETTINGS]
return (
mo.to_fraction(mp.awb_red_gain),
mo.to_fraction(mp.awb_blue_gain),
)
def _set_awb_gains(self, value):
self._check_camera_open()
try:
red_gain, blue_gain = value
except (ValueError, TypeError):
red_gain = blue_gain = value
if not (0.0 <= red_gain <= 8.0 and 0.0 <= blue_gain <= 8.0):
raise PiCameraValueError(
"Invalid gain(s) in (%f, %f) (valid range: 0.0-8.0)" % (
red_gain, blue_gain))
mp = mmal.MMAL_PARAMETER_AWB_GAINS_T(
mmal.MMAL_PARAMETER_HEADER_T(
mmal.MMAL_PARAMETER_CUSTOM_AWB_GAINS,
ct.sizeof(mmal.MMAL_PARAMETER_AWB_GAINS_T)
),
mo.to_rational(red_gain),
mo.to_rational(blue_gain),
)
self._camera.control.params[mmal.MMAL_PARAMETER_CUSTOM_AWB_GAINS] = mp
awb_gains = property(_get_awb_gains, _set_awb_gains, doc="""\
Gets or sets the auto-white-balance gains of the camera.
When queried, this attribute returns a tuple of values representing
the `(red, blue)` balance of the camera. The `red` and `blue` values
are returned :class:`~fractions.Fraction` instances. The values will
be between 0.0 and 8.0.
When set, this attribute adjusts the camera's auto-white-balance gains.
The property can be specified as a single value in which case both red
and blue gains will be adjusted equally, or as a `(red, blue)` tuple.
Values can be specified as an :ref:`int <typesnumeric>`, :ref:`float
<typesnumeric>` or :class:`~fractions.Fraction` and each gain must be
between 0.0 and 8.0. Typical values for the gains are between 0.9 and
1.9. The property can be set while recordings or previews are in
progress.
.. note::
This attribute only has an effect when :attr:`awb_mode` is set to
``'off'``. Also note that even with AWB disabled, some attributes
(specifically :attr:`still_stats` and :attr:`drc_strength`) can
cause AWB re-calculations.
.. versionchanged:: 1.6
Prior to version 1.6, this attribute was write-only.
""")
def _get_image_effect(self):
self._check_camera_open()
return self._IMAGE_EFFECTS_R[
self._camera.control.params[mmal.MMAL_PARAMETER_IMAGE_EFFECT].value
]
def _set_image_effect(self, value):
self._check_camera_open()
try:
mp = self._camera.control.params[mmal.MMAL_PARAMETER_IMAGE_EFFECT]
mp.value = self.IMAGE_EFFECTS[value]
self._image_effect_params = None
except KeyError:
raise PiCameraValueError("Invalid image effect: %s" % value)
self._camera.control.params[mmal.MMAL_PARAMETER_IMAGE_EFFECT] = mp
image_effect = property(_get_image_effect, _set_image_effect, doc="""\
Retrieves or sets the current image effect applied by the camera.
When queried, the :attr:`image_effect` property returns a string
representing the effect the camera will apply to captured video. The
possible values can be obtained from the ``PiCamera.IMAGE_EFFECTS``
attribute, and are as follows:
{values}
When set, the property changes the effect applied by the camera. The
property can be set while recordings or previews are in progress, but
only certain effects work while recording video (notably ``'negative'``
and ``'solarize'``). The default value is ``'none'``.
""".format(values=docstring_values(IMAGE_EFFECTS)))
def _get_image_effect_params(self):
self._check_camera_open()
return self._image_effect_params
def _set_image_effect_params(self, value):
self._check_camera_open()
to_int = lambda x: int(x)
to_byte = lambda x: max(0, min(255, int(x)))
to_bool = lambda x: (0, 1)[bool(x)]
to_8dot8 = lambda x: int(x * 256)
valid_transforms = {
'solarize': [
(to_bool, to_byte, to_byte, to_byte, to_byte),
(to_byte, to_byte, to_byte, to_byte),
(to_bool,),
],
'colorpoint': [
(lambda x: max(0, min(3, int(x))),),
],
'colorbalance': [
(to_8dot8, to_8dot8, to_8dot8, to_8dot8, to_int, to_int),
(to_8dot8, to_8dot8, to_8dot8, to_8dot8),
(to_8dot8, to_8dot8, to_8dot8),
],
'colorswap': [
(to_bool,),
],
'posterise': [
(lambda x: max(2, min(31, int(x))),),
],
'blur': [
(lambda x: max(1, min(2, int(x))),),
],
'film': [
(to_byte, to_byte, to_byte),
],
'watercolor': [
(),
(to_byte, to_byte),
]
}
try:
params = tuple(i for i in value)
except TypeError:
params = (value,)
effect = self.image_effect
param_transforms = [
transforms for transforms in valid_transforms.get(effect, [])
if len(transforms) == len(params)
]
if not param_transforms:
raise PiCameraValueError(
'invalid set of parameters for effect "%s"' % effect)
param_transforms = param_transforms[0]
params = tuple(
transform(p)
for (transform, p) in zip(param_transforms, params)
)
mp = mmal.MMAL_PARAMETER_IMAGEFX_PARAMETERS_T(
mmal.MMAL_PARAMETER_HEADER_T(
mmal.MMAL_PARAMETER_IMAGE_EFFECT_PARAMETERS,
ct.sizeof(mmal.MMAL_PARAMETER_IMAGEFX_PARAMETERS_T)
),
effect=self.IMAGE_EFFECTS[effect],
num_effect_params=len(params),
effect_parameter=params,
)
self._camera.control.params[mmal.MMAL_PARAMETER_IMAGE_EFFECT_PARAMETERS] = mp
self._image_effect_params = value
image_effect_params = property(
_get_image_effect_params, _set_image_effect_params, doc="""\
Retrieves or sets the parameters for the current :attr:`effect
<image_effect>`.
When queried, the :attr:`image_effect_params` property either returns
``None`` (for effects which have no configurable parameters, or if no
parameters have been configured), or a tuple of numeric values up to
six elements long.
When set, the property changes the parameters of the current
:attr:`effect <image_effect>` as a sequence of numbers, or a single
number. Attempting to set parameters on an effect which does not
support parameters, or providing an incompatible set of parameters for
an effect will raise a :exc:`PiCameraValueError` exception.
The effects which have parameters, and what combinations those
parameters can take is as follows:
.. tabularcolumns:: |p{30mm}|p{25mm}|p{75mm}|
+--------------------+----------------+-----------------------------------------+
| Effect | Parameters | Description |
+====================+================+=========================================+
| ``'solarize'`` | *yuv*, | *yuv* controls whether data is |
| | *x0*, *y1*, | processed as RGB (0) or YUV(1). Input |
| | *y2*, *y3* | values from 0 to *x0* - 1 are remapped |
| | | linearly onto the range 0 to *y0*. |
| | | Values from *x0* to 255 are remapped |
| | | linearly onto the range *y1* to *y2*. |
| +----------------+-----------------------------------------+
| | *x0*, *y0*, | Same as above, but *yuv* defaults to |
| | *y1*, *y2* | 0 (process as RGB). |
| +----------------+-----------------------------------------+
| | *yuv* | Same as above, but *x0*, *y0*, *y1*, |
| | | *y2* default to 128, 128, 128, 0 |
| | | respectively. |
+--------------------+----------------+-----------------------------------------+
| ``'colorpoint'`` | *quadrant* | *quadrant* specifies which quadrant |
| | | of the U/V space to retain chroma |
| | | from: 0=green, 1=red/yellow, 2=blue, |
| | | 3=purple. There is no default; this |
| | | effect does nothing until parameters |
| | | are set. |
+--------------------+----------------+-----------------------------------------+
| ``'colorbalance'`` | *lens*, | *lens* specifies the lens shading |
| | *r*, *g*, *b*, | strength (0.0 to 256.0, where 0.0 |
| | *u*, *v* | indicates lens shading has no effect). |
| | | *r*, *g*, *b* are multipliers for their |
| | | respective color channels (0.0 to |
| | | 256.0). *u* and *v* are offsets added |
| | | to the U/V plane (0 to 255). |
| +----------------+-----------------------------------------+
| | *lens*, | Same as above but *u* are defaulted |
| | *r*, *g*, *b* | to 0. |
| +----------------+-----------------------------------------+
| | *lens*, | Same as above but *g* also defaults to |
| | *r*, *b* | to 1.0. |
+--------------------+----------------+-----------------------------------------+
| ``'colorswap'`` | *dir* | If *dir* is 0, swap RGB to BGR. If |
| | | *dir* is 1, swap RGB to BRG. |
+--------------------+----------------+-----------------------------------------+
| ``'posterise'`` | *steps* | Control the quantization steps for the |
| | | image. Valid values are 2 to 32, and |
| | | the default is 4. |
+--------------------+----------------+-----------------------------------------+
| ``'blur'`` | *size* | Specifies the size of the kernel. Valid |
| | | values are 1 or 2. |
+--------------------+----------------+-----------------------------------------+
| ``'film'`` | *strength*, | *strength* specifies the strength of |
| | *u*, *v* | effect. *u* and *v* are offsets added |
| | | to the U/V plane (0 to 255). |
+--------------------+----------------+-----------------------------------------+
| ``'watercolor'`` | *u*, *v* | *u* and *v* specify offsets to add to |
| | | the U/V plane (0 to 255). |
| +----------------+-----------------------------------------+
| | | No parameters indicates no U/V effect. |
+--------------------+----------------+-----------------------------------------+
.. versionadded:: 1.8
""")
def _get_color_effects(self):
self._check_camera_open()
mp = self._camera.control.params[mmal.MMAL_PARAMETER_COLOUR_EFFECT]
if mp.enable != mmal.MMAL_FALSE:
return (mp.u, mp.v)
else:
return None
def _set_color_effects(self, value):
self._check_camera_open()
if value is None:
enable = mmal.MMAL_FALSE
u = v = 128
else:
enable = mmal.MMAL_TRUE
try:
u, v = value
except (TypeError, ValueError) as e:
raise PiCameraValueError(
"Invalid color effect (u, v) tuple: %s" % value)
if not ((0 <= u <= 255) and (0 <= v <= 255)):
raise PiCameraValueError(
"(u, v) values must be between 0 and 255")
mp = mmal.MMAL_PARAMETER_COLOURFX_T(
mmal.MMAL_PARAMETER_HEADER_T(
mmal.MMAL_PARAMETER_COLOUR_EFFECT,
ct.sizeof(mmal.MMAL_PARAMETER_COLOURFX_T)
),
enable, u, v
)
self._camera.control.params[mmal.MMAL_PARAMETER_COLOUR_EFFECT] = mp
color_effects = property(_get_color_effects, _set_color_effects, doc="""\
Retrieves or sets the current color effect applied by the camera.
When queried, the :attr:`color_effects` property either returns
``None`` which indicates that the camera is using normal color
settings, or a ``(u, v)`` tuple where ``u`` and ``v`` are integer
values between 0 and 255.
When set, the property changes the color effect applied by the camera.
The property can be set while recordings or previews are in progress.
For example, to make the image black and white set the value to ``(128,
128)``. The default value is ``None``.
""")
def _get_rotation(self):
self._check_camera_open()
return self._camera.outputs[0].params[mmal.MMAL_PARAMETER_ROTATION]
def _set_rotation(self, value):
self._check_camera_open()
try:
value = ((int(value) % 360) // 90) * 90
except ValueError:
raise PiCameraValueError("Invalid rotation angle: %s" % value)
for port in self._camera.outputs:
port.params[mmal.MMAL_PARAMETER_ROTATION] = value
rotation = property(_get_rotation, _set_rotation, doc="""\
Retrieves or sets the current rotation of the camera's image.
When queried, the :attr:`rotation` property returns the rotation
applied to the image. Valid values are 0, 90, 180, and 270.
When set, the property changes the rotation applied to the camera's
input. The property can be set while recordings or previews are in
progress. The default value is ``0``.
""")
def _get_vflip(self):
self._check_camera_open()
return self._camera.outputs[0].params[mmal.MMAL_PARAMETER_MIRROR] in (
mmal.MMAL_PARAM_MIRROR_VERTICAL, mmal.MMAL_PARAM_MIRROR_BOTH)
def _set_vflip(self, value):
self._check_camera_open()
value = {
(False, False): mmal.MMAL_PARAM_MIRROR_NONE,
(True, False): mmal.MMAL_PARAM_MIRROR_VERTICAL,
(False, True): mmal.MMAL_PARAM_MIRROR_HORIZONTAL,
(True, True): mmal.MMAL_PARAM_MIRROR_BOTH,
}[(bool(value), self.hflip)]
for port in self._camera.outputs:
port.params[mmal.MMAL_PARAMETER_MIRROR] = value
vflip = property(_get_vflip, _set_vflip, doc="""\
Retrieves or sets whether the camera's output is vertically flipped.
When queried, the :attr:`vflip` property returns a boolean indicating
whether or not the camera's output is vertically flipped. The property
can be set while recordings or previews are in progress. The default
value is ``False``.
""")
def _get_hflip(self):
self._check_camera_open()
return self._camera.outputs[0].params[mmal.MMAL_PARAMETER_MIRROR] in (
mmal.MMAL_PARAM_MIRROR_HORIZONTAL, mmal.MMAL_PARAM_MIRROR_BOTH)
def _set_hflip(self, value):
self._check_camera_open()
value = {
(False, False): mmal.MMAL_PARAM_MIRROR_NONE,
(True, False): mmal.MMAL_PARAM_MIRROR_VERTICAL,
(False, True): mmal.MMAL_PARAM_MIRROR_HORIZONTAL,
(True, True): mmal.MMAL_PARAM_MIRROR_BOTH,
}[(self.vflip, bool(value))]
for port in self._camera.outputs:
port.params[mmal.MMAL_PARAMETER_MIRROR] = value
hflip = property(_get_hflip, _set_hflip, doc="""\
Retrieves or sets whether the camera's output is horizontally flipped.
When queried, the :attr:`hflip` property returns a boolean indicating
whether or not the camera's output is horizontally flipped. The
property can be set while recordings or previews are in progress. The
default value is ``False``.
""")
def _get_zoom(self):
self._check_camera_open()
mp = self._camera.control.params[mmal.MMAL_PARAMETER_INPUT_CROP]
return (
mp.rect.x / 65535.0,
mp.rect.y / 65535.0,
mp.rect.width / 65535.0,
mp.rect.height / 65535.0,
)
def _set_zoom(self, value):
self._check_camera_open()
try:
x, y, w, h = value
except (TypeError, ValueError) as e:
raise PiCameraValueError(
"Invalid zoom rectangle (x, y, w, h) tuple: %s" % value)
mp = mmal.MMAL_PARAMETER_INPUT_CROP_T(
mmal.MMAL_PARAMETER_HEADER_T(
mmal.MMAL_PARAMETER_INPUT_CROP,
ct.sizeof(mmal.MMAL_PARAMETER_INPUT_CROP_T)
),
mmal.MMAL_RECT_T(
max(0, min(65535, int(65535 * x))),
max(0, min(65535, int(65535 * y))),
max(0, min(65535, int(65535 * w))),
max(0, min(65535, int(65535 * h))),
),
)
self._camera.control.params[mmal.MMAL_PARAMETER_INPUT_CROP] = mp
zoom = property(_get_zoom, _set_zoom, doc="""\
Retrieves or sets the zoom applied to the camera's input.
When queried, the :attr:`zoom` property returns a ``(x, y, w, h)``
tuple of floating point values ranging from 0.0 to 1.0, indicating the
proportion of the image to include in the output (this is also known as
the "Region of Interest" or ROI). The default value is ``(0.0, 0.0,
1.0, 1.0)`` which indicates that everything should be included. The
property can be set while recordings or previews are in progress.
The `zoom` is applied to the processed image, after rotation and rescale.
If rotation has been used, zoom is composed of ``(y, x, h, w)`` instead.
The values `w` and `h` can modify the aspect ratio of the image: use equal
values for `w` and `h` if you want to keep the same the aspect ratio.
""")
def _get_crop(self):
warnings.warn(
PiCameraDeprecated(
'PiCamera.crop is deprecated; use PiCamera.zoom instead'))
return self.zoom
def _set_crop(self, value):
warnings.warn(
PiCameraDeprecated(
'PiCamera.crop is deprecated; use PiCamera.zoom instead'))
self.zoom = value
crop = property(_get_crop, _set_crop, doc="""
Retrieves or sets the zoom applied to the camera's input.
.. deprecated:: 1.8
Please use the :attr:`zoom` attribute instead.
""")
def _get_overlays(self):
self._check_camera_open()
return self._overlays
overlays = property(_get_overlays, doc="""\
Retrieves all active :class:`PiRenderer` overlays.
If no overlays are current active, :attr:`overlays` will return an
empty iterable. Otherwise, it will return an iterable of
:class:`PiRenderer` instances which are currently acting as overlays.
Note that the preview renderer is an exception to this: it is *not*
included as an overlay despite being derived from :class:`PiRenderer`.
.. versionadded:: 1.8
""")
def _get_preview(self):
self._check_camera_open()
if isinstance(self._preview, PiPreviewRenderer):
return self._preview
preview = property(_get_preview, doc="""\
Retrieves the :class:`PiRenderer` displaying the camera preview.
If no preview is currently active, :attr:`preview` will return
``None``. Otherwise, it will return the instance of
:class:`PiRenderer` which is currently connected to the camera's
preview port for rendering what the camera sees. You can use the
attributes of the :class:`PiRenderer` class to configure the appearance
of the preview. For example, to make the preview semi-transparent::
import picamera
with picamera.PiCamera() as camera:
camera.start_preview()
camera.preview.alpha = 128
.. versionadded:: 1.8
""")
def _get_preview_alpha(self):
self._check_camera_open()
warnings.warn(
PiCameraDeprecated(
'PiCamera.preview_alpha is deprecated; use '
'PiCamera.preview.alpha instead'))
if self.preview:
return self.preview.alpha
else:
return self._preview_alpha
def _set_preview_alpha(self, value):
self._check_camera_open()
warnings.warn(
PiCameraDeprecated(
'PiCamera.preview_alpha is deprecated; use '
'PiCamera.preview.alpha instead'))
if self.preview:
self.preview.alpha = value
else:
self._preview_alpha = value
preview_alpha = property(_get_preview_alpha, _set_preview_alpha, doc="""\
Retrieves or sets the opacity of the preview window.
.. deprecated:: 1.8
Please use the :attr:`~PiRenderer.alpha` attribute of the
:attr:`preview` object instead.
""")
def _get_preview_layer(self):
self._check_camera_open()
warnings.warn(
PiCameraDeprecated(
'PiCamera.preview_layer is deprecated; '
'use PiCamera.preview.layer instead'))
if self.preview:
return self.preview.layer
else:
return self._preview_layer
def _set_preview_layer(self, value):
self._check_camera_open()
warnings.warn(
PiCameraDeprecated(
'PiCamera.preview_layer is deprecated; '
'use PiCamera.preview.layer instead'))
if self.preview:
self.preview.layer = value
else:
self._preview_layer = value
preview_layer = property(_get_preview_layer, _set_preview_layer, doc="""\
Retrieves or sets the layer of the preview window.
.. deprecated:: 1.8
Please use the :attr:`~PiRenderer.layer` attribute of the
:attr:`preview` object instead.
""")
def _get_preview_fullscreen(self):
self._check_camera_open()
warnings.warn(
PiCameraDeprecated(
'PiCamera.preview_fullscreen is deprecated; '
'use PiCamera.preview.fullscreen instead'))
if self.preview:
return self.preview.fullscreen
else:
return self._preview_fullscreen
def _set_preview_fullscreen(self, value):
self._check_camera_open()
warnings.warn(
PiCameraDeprecated(
'PiCamera.preview_fullscreen is deprecated; '
'use PiCamera.preview.fullscreen instead'))
if self.preview:
self.preview.fullscreen = value
else:
self._preview_fullscreen = value
preview_fullscreen = property(
_get_preview_fullscreen, _set_preview_fullscreen, doc="""\
Retrieves or sets full-screen for the preview window.
.. deprecated:: 1.8
Please use the :attr:`~PiRenderer.fullscreen` attribute of the
:attr:`preview` object instead.
""")
def _get_preview_window(self):
self._check_camera_open()
warnings.warn(
PiCameraDeprecated(
'PiCamera.preview_window is deprecated; '
'use PiCamera.preview.window instead'))
if self.preview:
return self.preview.window
else:
return self._preview_window
def _set_preview_window(self, value):
self._check_camera_open()
warnings.warn(
PiCameraDeprecated(
'PiCamera.preview_window is deprecated; '
'use PiCamera.preview.window instead'))
if self.preview:
self.preview.window = value
else:
self._preview_window = value
preview_window = property(
_get_preview_window, _set_preview_window, doc="""\
Retrieves or sets the size of the preview window.
.. deprecated:: 1.8
Please use the :attr:`~PiRenderer.window` attribute of the
:attr:`preview` object instead.
""")
def _get_annotate_text(self):
self._check_camera_open()
mp = self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE]
if mp.enable:
return mp.text.decode('ascii')
else:
return ''
def _set_annotate_text(self, value):
self._check_camera_open()
mp = self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE]
mp.enable = bool(value or mp.show_frame_num)
if mp.enable:
try:
mp.text = value.encode('ascii')
except ValueError as e:
raise PiCameraValueError(str(e))
self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE] = mp
annotate_text = property(_get_annotate_text, _set_annotate_text, doc="""\
Retrieves or sets a text annotation for all output.
When queried, the :attr:`annotate_text` property returns the current
annotation (if no annotation has been set, this is simply a blank
string).
When set, the property immediately applies the annotation to the
preview (if it is running) and to any future captures or video
recording. Strings longer than 255 characters, or strings containing
non-ASCII characters will raise a :exc:`PiCameraValueError`. The
default value is ``''``.
.. versionchanged:: 1.8
Text annotations can now be 255 characters long. The prior limit
was 32 characters.
""")
def _get_annotate_frame_num(self):
self._check_camera_open()
mp = self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE]
return mp.show_frame_num.value != mmal.MMAL_FALSE
def _set_annotate_frame_num(self, value):
self._check_camera_open()
mp = self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE]
mp.enable = bool(value or mp.text)
mp.show_frame_num = bool(value)
self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE] = mp
annotate_frame_num = property(
_get_annotate_frame_num, _set_annotate_frame_num, doc="""\
Controls whether the current frame number is drawn as an annotation.
The :attr:`annotate_frame_num` attribute is a bool indicating whether
or not the current frame number is rendered as an annotation, similar
to :attr:`annotate_text`. The default is ``False``.
.. versionadded:: 1.8
""")
def _get_annotate_text_size(self):
self._check_camera_open()
if self._camera.annotate_rev == 3:
mp = self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE]
return mp.text_size or self.DEFAULT_ANNOTATE_SIZE
else:
return self.DEFAULT_ANNOTATE_SIZE
def _set_annotate_text_size(self, value):
self._check_camera_open()
if not (6 <= value <= 160):
raise PiCameraValueError(
"Invalid annotation text size: %d (valid range 6-160)" % value)
if self._camera.annotate_rev == 3:
mp = self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE]
mp.text_size = value
self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE] = mp
elif value != self.DEFAULT_ANNOTATE_SIZE:
warnings.warn(
PiCameraFallback(
"Firmware does not support setting annotation text "
"size; using default (%d) instead" % self.DEFAULT_ANNOTATE_SIZE))
annotate_text_size = property(
_get_annotate_text_size, _set_annotate_text_size, doc="""\
Controls the size of the annotation text.
The :attr:`annotate_text_size` attribute is an int which determines how
large the annotation text will appear on the display. Valid values are
in the range 6 to 160, inclusive. The default is {size}.
.. versionadded:: 1.10
""".format(size=DEFAULT_ANNOTATE_SIZE))
def _get_annotate_foreground(self):
self._check_camera_open()
mp = self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE]
if self._camera.annotate_rev == 3 and mp.custom_text_color:
return Color.from_yuv_bytes(
mp.custom_text_Y,
mp.custom_text_U,
mp.custom_text_V)
else:
return Color('white')
def _set_annotate_foreground(self, value):
self._check_camera_open()
if not isinstance(value, Color):
raise PiCameraValueError(
'annotate_foreground must be a Color')
elif self._camera.annotate_rev < 3:
if value.rgb_bytes != (255, 255, 255):
warnings.warn(
PiCameraFallback(
"Firmware does not support setting a custom foreground "
"annotation color; using white instead"))
return
mp = self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE]
mp.custom_text_color = True
(
mp.custom_text_Y,
mp.custom_text_U,
mp.custom_text_V,
) = value.yuv_bytes
self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE] = mp
annotate_foreground = property(
_get_annotate_foreground, _set_annotate_foreground, doc="""\
Controls the color of the annotation text.
The :attr:`annotate_foreground` attribute specifies, partially, the
color of the annotation text. The value is specified as a
:class:`Color`. The default is white.
.. note::
The underlying firmware does not directly support setting all
components of the text color, only the Y' component of a `Y'UV`_
tuple. This is roughly (but not precisely) analogous to the
"brightness" of a color, so you may choose to think of this as
setting how bright the annotation text will be relative to its
background. In order to specify just the Y' component when setting
this attribute, you may choose to construct the
:class:`Color` instance as follows::
camera.annotate_foreground = picamera.Color(y=0.2, u=0, v=0)
.. _Y'UV: https://en.wikipedia.org/wiki/YUV
.. versionadded:: 1.10
""")
def _get_annotate_background(self):
self._check_camera_open()
mp = self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE]
if self._camera.annotate_rev == 3:
if mp.enable_text_background:
if mp.custom_background_color:
return Color.from_yuv_bytes(
mp.custom_background_Y,
mp.custom_background_U,
mp.custom_background_V)
else:
return Color('black')
else:
return None
else:
if mp.black_text_background:
return Color('black')
else:
return None
def _set_annotate_background(self, value):
self._check_camera_open()
if value is True:
warnings.warn(
PiCameraDeprecated(
'Setting PiCamera.annotate_background to True is '
'deprecated; use PiCamera.color.Color("black") instead'))
value = Color('black')
elif value is False:
warnings.warn(
PiCameraDeprecated(
'Setting PiCamera.annotate_background to False is '
'deprecated; use None instead'))
value = None
elif value is None:
pass
elif not isinstance(value, Color):
raise PiCameraValueError(
'annotate_background must be a Color or None')
elif self._camera.annotate_rev < 3 and value.rgb_bytes != (0, 0, 0):
warnings.warn(
PiCameraFallback(
"Firmware does not support setting a custom background "
"annotation color; using black instead"))
mp = self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE]
if self._camera.annotate_rev == 3:
if value is None:
mp.enable_text_background = False
else:
mp.enable_text_background = True
mp.custom_background_color = True
(
mp.custom_background_Y,
mp.custom_background_U,
mp.custom_background_V,
) = value.yuv_bytes
else:
if value is None:
mp.black_text_background = False
else:
mp.black_text_background = True
self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE] = mp
annotate_background = property(
_get_annotate_background, _set_annotate_background, doc="""\
Controls what background is drawn behind the annotation.
The :attr:`annotate_background` attribute specifies if a background
will be drawn behind the :attr:`annotation text <annotate_text>` and,
if so, what color it will be. The value is specified as a
:class:`Color` or ``None`` if no background should be drawn. The
default is ``None``.
.. note::
For backward compatibility purposes, the value ``False`` will be
treated as ``None``, and the value ``True`` will be treated as the
color black. The "truthiness" of the values returned by the
attribute are backward compatible although the values themselves
are not.
.. versionadded:: 1.8
.. versionchanged:: 1.10
In prior versions this was a bool value with ``True`` representing
a black background.
""")
| true
| true
|
790490a2fe105f55f3b011637612348a41355cec
| 628
|
py
|
Python
|
modules/dbnd-airflow/src/dbnd_airflow_contrib/credentials_helper_azure.py
|
ipattarapong/dbnd
|
7bd65621c46c73e078eb628f994127ad4c7dbd1a
|
[
"Apache-2.0"
] | 224
|
2020-01-02T10:46:37.000Z
|
2022-03-02T13:54:08.000Z
|
modules/dbnd-airflow/src/dbnd_airflow_contrib/credentials_helper_azure.py
|
ipattarapong/dbnd
|
7bd65621c46c73e078eb628f994127ad4c7dbd1a
|
[
"Apache-2.0"
] | 16
|
2020-03-11T09:37:58.000Z
|
2022-01-26T10:22:08.000Z
|
modules/dbnd-airflow/src/dbnd_airflow_contrib/credentials_helper_azure.py
|
ipattarapong/dbnd
|
7bd65621c46c73e078eb628f994127ad4c7dbd1a
|
[
"Apache-2.0"
] | 24
|
2020-03-24T13:53:50.000Z
|
2022-03-22T11:55:18.000Z
|
from airflow.hooks.base_hook import BaseHook
class AzureBlobStorageCredentials(BaseHook):
def __init__(self, conn_id="azure_blob_storage_default"):
self.conn_id = conn_id
def get_credentials(self):
connection_object = self.get_connection(self.conn_id)
extras = connection_object.extra_dejson
credentials = dict()
if connection_object.login:
credentials["account_name"] = connection_object.login
if connection_object.password:
credentials["account_key"] = connection_object.password
credentials.update(extras)
return credentials
| 33.052632
| 67
| 0.710191
|
from airflow.hooks.base_hook import BaseHook
class AzureBlobStorageCredentials(BaseHook):
def __init__(self, conn_id="azure_blob_storage_default"):
self.conn_id = conn_id
def get_credentials(self):
connection_object = self.get_connection(self.conn_id)
extras = connection_object.extra_dejson
credentials = dict()
if connection_object.login:
credentials["account_name"] = connection_object.login
if connection_object.password:
credentials["account_key"] = connection_object.password
credentials.update(extras)
return credentials
| true
| true
|
790490e89602a87c8a556e59d31dc0f19b50cac5
| 44,483
|
py
|
Python
|
flopy/utils/util_list.py
|
aleaf/flopy
|
a5777a4d4a745e473110a167c69603ac4ad3106c
|
[
"CC0-1.0",
"BSD-3-Clause"
] | null | null | null |
flopy/utils/util_list.py
|
aleaf/flopy
|
a5777a4d4a745e473110a167c69603ac4ad3106c
|
[
"CC0-1.0",
"BSD-3-Clause"
] | null | null | null |
flopy/utils/util_list.py
|
aleaf/flopy
|
a5777a4d4a745e473110a167c69603ac4ad3106c
|
[
"CC0-1.0",
"BSD-3-Clause"
] | null | null | null |
"""
util_list module. Contains the mflist class.
This classes encapsulates modflow-style list inputs away
from the individual packages. The end-user should not need to
instantiate this class directly.
some more info
"""
from __future__ import division, print_function
import os
import warnings
import numpy as np
from ..datbase import DataInterface, DataListInterface, DataType
from ..utils.recarray_utils import create_empty_recarray
try:
from numpy.lib import NumpyVersion
numpy114 = NumpyVersion(np.__version__) >= "1.14.0"
except ImportError:
numpy114 = False
class MfList(DataInterface, DataListInterface):
"""
a generic object for handling transient boundary condition lists
Parameters
----------
package : package object
The package object (of type :class:`flopy.pakbase.Package`) to which
this MfList will be added.
data : varies
the data of the transient list (optional). (the default is None)
Attributes
----------
mxact : int
the max number of active bc for any stress period
Methods
-------
add_record(kper,index,value) : None
add a record to stress period kper at index location
write_transient(f) : None
write the transient sequence to the model input file f
check_kij() : None
checks for boundaries outside of model domain - issues warnings only
See Also
--------
Notes
-----
Examples
--------
"""
def __init__(
self,
package,
data=None,
dtype=None,
model=None,
list_free_format=None,
binary=False,
):
if isinstance(data, MfList):
for attr in data.__dict__.items():
setattr(self, attr[0], attr[1])
if model is None:
self._model = package.parent
else:
self._model = model
self._package = package
return
self._package = package
if model is None:
self._model = package.parent
else:
self._model = model
if dtype is None:
assert isinstance(self.package.dtype, np.dtype)
self.__dtype = self.package.dtype
else:
self.__dtype = dtype
self.__binary = binary
self.__vtype = {}
self.__data = {}
if data is not None:
self.__cast_data(data)
self.__df = None
if list_free_format is None:
if package.parent.version == "mf2k":
list_free_format = False
self.list_free_format = list_free_format
return
@property
def name(self):
return self.package.name
@property
def mg(self):
return self._model.modelgrid
@property
def sr(self):
return self.mg.sr
@property
def model(self):
return self._model
@property
def package(self):
return self._package
@property
def data_type(self):
return DataType.transientlist
@property
def plotable(self):
return True
def get_empty(self, ncell=0):
d = create_empty_recarray(ncell, self.dtype, default_value=-1.0e10)
return d
def export(self, f, **kwargs):
from flopy import export
return export.utils.mflist_export(f, self, **kwargs)
def append(self, other):
""" append the recarrays from one MfList to another
Parameters
----------
other: variable: an item that can be cast in to an MfList
that corresponds with self
Returns
-------
dict of {kper:recarray}
"""
if not isinstance(other, MfList):
other = MfList(
self.package,
data=other,
dtype=self.dtype,
model=self._model,
list_free_format=self.list_free_format,
)
msg = (
"MfList.append(): other arg must be "
+ "MfList or dict, not {0}".format(type(other))
)
assert isinstance(other, MfList), msg
other_kpers = list(other.data.keys())
other_kpers.sort()
self_kpers = list(self.data.keys())
self_kpers.sort()
new_dict = {}
for kper in range(self._model.nper):
other_data = other[kper].copy()
self_data = self[kper].copy()
other_len = other_data.shape[0]
self_len = self_data.shape[0]
if (other_len == 0 and self_len == 0) or (
kper not in self_kpers and kper not in other_kpers
):
continue
elif self_len == 0:
new_dict[kper] = other_data
elif other_len == 0:
new_dict[kper] = self_data
else:
new_len = other_data.shape[0] + self_data.shape[0]
new_data = np.recarray(new_len, dtype=self.dtype)
new_data[:self_len] = self_data
new_data[self_len : self_len + other_len] = other_data
new_dict[kper] = new_data
return new_dict
def drop(self, fields):
"""drop fields from an MfList
Parameters
----------
fields : list or set of field names to drop
Returns
-------
dropped : MfList without the dropped fields
"""
if not isinstance(fields, list):
fields = [fields]
names = [n for n in self.dtype.names if n not in fields]
dtype = np.dtype(
[(k, d) for k, d in self.dtype.descr if k not in fields]
)
spd = {}
for k, v in self.data.items():
# because np 1.9 doesn't support indexing by list of columns
newarr = np.array([self.data[k][n] for n in names]).transpose()
newarr = np.array(list(map(tuple, newarr)), dtype=dtype).view(
np.recarray
)
for n in dtype.names:
newarr[n] = self.data[k][n]
spd[k] = newarr
return MfList(self.package, spd, dtype=dtype)
@property
def data(self):
return self.__data
@property
def df(self):
if self.__df is None:
self.__df = self.get_dataframe()
return self.__df
@property
def vtype(self):
return self.__vtype
@property
def dtype(self):
return self.__dtype
# Get the itmp for a given kper
def get_itmp(self, kper):
if kper not in list(self.__data.keys()):
return None
if self.__vtype[kper] is None:
return -1
# If an external file, have to load it
if self.__vtype[kper] == str:
return self.__fromfile(self.__data[kper]).shape[0]
if self.__vtype[kper] == np.recarray:
return self.__data[kper].shape[0]
# If not any of the above, it must be an int
return self.__data[kper]
@property
def mxact(self):
mxact = 0
for kper in list(self.__data.keys()):
mxact = max(mxact, self.get_itmp(kper))
return mxact
@property
def fmt_string(self):
"""Returns a C-style fmt string for numpy savetxt that corresponds to
the dtype"""
if self.list_free_format is not None:
use_free = self.list_free_format
else:
use_free = True
if self.package.parent.has_package("bas6"):
use_free = self.package.parent.bas6.ifrefm
# mt3d list data is fixed format
if "mt3d" in self.package.parent.version.lower():
use_free = False
fmts = []
for field in self.dtype.descr:
vtype = field[1][1].lower()
if vtype in ("i", "b"):
if use_free:
fmts.append("%9d")
else:
fmts.append("%10d")
elif vtype == "f":
if use_free:
if numpy114:
# Use numpy's floating-point formatter (Dragon4)
fmts.append("%15s")
else:
fmts.append("%15.7E")
else:
fmts.append("%10G")
elif vtype == "o":
if use_free:
fmts.append("%9s")
else:
fmts.append("%10s")
elif vtype == "s":
msg = (
"MfList.fmt_string error: 'str' type found in dtype. "
"This gives unpredictable results when "
"recarray to file - change to 'object' type"
)
raise TypeError(msg)
else:
raise TypeError(
"MfList.fmt_string error: unknown vtype in "
"field: {}".format(field)
)
if use_free:
fmt_string = " " + " ".join(fmts)
else:
fmt_string = "".join(fmts)
return fmt_string
# Private method to cast the data argument
# Should only be called by the constructor
def __cast_data(self, data):
# If data is a list, then all we can do is try to cast it to
# an ndarray, then cast again to a recarray
if isinstance(data, list):
# warnings.warn("MfList casting list to array")
try:
data = np.array(data)
except Exception as e:
raise Exception(
"MfList error: casting list to ndarray: " + str(e)
)
# If data is a dict, the we have to assume it is keyed on kper
if isinstance(data, dict):
if not list(data.keys()):
raise Exception("MfList error: data dict is empty")
for kper, d in data.items():
try:
kper = int(kper)
except Exception as e:
raise Exception(
"MfList error: data dict key "
+ "{0:s} not integer: ".format(kper)
+ str(type(kper))
+ "\n"
+ str(e)
)
# Same as before, just try...
if isinstance(d, list):
# warnings.warn("MfList: casting list to array at " +\
# "kper {0:d}".format(kper))
try:
d = np.array(d)
except Exception as e:
raise Exception(
"MfList error: casting list "
+ "to ndarray: "
+ str(e)
)
# super hack - sick of recarrays already
# if (isinstance(d,np.ndarray) and len(d.dtype.fields) > 1):
# d = d.view(np.recarray)
if isinstance(d, np.recarray):
self.__cast_recarray(kper, d)
elif isinstance(d, np.ndarray):
self.__cast_ndarray(kper, d)
elif isinstance(d, int):
self.__cast_int(kper, d)
elif isinstance(d, str):
self.__cast_str(kper, d)
elif d is None:
self.__data[kper] = -1
self.__vtype[kper] = None
else:
raise Exception(
"MfList error: unsupported data type: "
+ str(type(d))
+ " at kper "
+ "{0:d}".format(kper)
)
# A single recarray - same MfList for all stress periods
elif isinstance(data, np.recarray):
self.__cast_recarray(0, data)
# A single ndarray
elif isinstance(data, np.ndarray):
self.__cast_ndarray(0, data)
# A single filename
elif isinstance(data, str):
self.__cast_str(0, data)
else:
raise Exception(
"MfList error: unsupported data type: " + str(type(data))
)
def __cast_str(self, kper, d):
# If d is a string, assume it is a filename and check that it exists
assert os.path.exists(d), (
"MfList error: dict filename (string) '"
+ d
+ "' value for "
+ "kper {0:d} not found".format(kper)
)
self.__data[kper] = d
self.__vtype[kper] = str
def __cast_int(self, kper, d):
# If d is an integer, then it must be 0 or -1
if d > 0:
raise Exception(
"MfList error: dict integer value for "
"kper {0:10d} must be 0 or -1, "
"not {1:10d}".format(kper, d)
)
if d == 0:
self.__data[kper] = 0
self.__vtype[kper] = None
else:
self.__data[kper] = -1
self.__vtype[kper] = None
def __cast_recarray(self, kper, d):
assert d.dtype == self.__dtype, (
"MfList error: recarray dtype: "
+ str(d.dtype)
+ " doesn't match "
+ "self dtype: "
+ str(self.dtype)
)
self.__data[kper] = d
self.__vtype[kper] = np.recarray
def __cast_ndarray(self, kper, d):
d = np.atleast_2d(d)
if d.dtype != self.__dtype:
assert d.shape[1] == len(self.dtype), (
"MfList error: ndarray "
+ "shape "
+ str(d.shape)
+ " doesn't match dtype "
+ "len: "
+ str(len(self.dtype))
)
# warnings.warn("MfList: ndarray dtype does not match self " +\
# "dtype, trying to cast")
try:
self.__data[kper] = np.core.records.fromarrays(
d.transpose(), dtype=self.dtype
)
except Exception as e:
raise Exception(
"MfList error: casting ndarray to recarray: " + str(e)
)
self.__vtype[kper] = np.recarray
def get_dataframe(self, squeeze=True):
"""
Cast recarrays for stress periods into single
dataframe containing all stress periods.
Parameters
----------
squeeze : bool
Reduce number of columns in dataframe to only include
stress periods where a variable changes.
Returns
-------
df : dataframe
Dataframe of shape nrow = ncells, ncol = nvar x nper. If
the squeeze option is chosen, nper is the number of
stress periods where at least one cells is different,
otherwise it is equal to the number of keys in MfList.data.
Notes
-----
Requires pandas.
"""
try:
import pandas as pd
except Exception as e:
msg = "MfList.get_dataframe() requires pandas"
raise ImportError(msg)
# make a dataframe of all data for all stress periods
names = ["k", "i", "j"]
if "MNW2" in self.package.name:
names += ["wellid"]
# find relevant variable names
# may have to iterate over the first stress period
for per in range(self._model.nper):
if hasattr(self.data[per], "dtype"):
varnames = list(
[n for n in self.data[per].dtype.names if n not in names]
)
break
# create list of dataframes for each stress period
# each with index of k, i, j
dfs = []
for per in self.data.keys():
recs = self.data[per]
if recs is None or len(recs) == 0:
# add an empty dataframe if a stress period is
# empty (e.g. no pumping during a predevelopment
# period)
columns = names + list(
["{}{}".format(c, per) for c in varnames]
)
dfi = pd.DataFrame(data=None, columns=columns)
dfi = dfi.set_index(names)
else:
dfi = pd.DataFrame.from_records(recs)
dfg = dfi.groupby(names)
count = dfg[varnames[0]].count().rename("n")
if (count > 1).values.any():
print(
"Duplicated list entry locations aggregated "
"for kper {}".format(per)
)
for kij in count[count > 1].index.values:
print(" (k,i,j) {}".format(kij))
dfi = dfg.sum() # aggregate
dfi.columns = list(["{}{}".format(c, per) for c in varnames])
dfs.append(dfi)
df = pd.concat(dfs, axis=1)
if squeeze:
keep = []
for var in varnames:
diffcols = list([n for n in df.columns if var in n])
diff = df[diffcols].fillna(0).diff(axis=1)
diff[
"{}0".format(var)
] = 1 # always return the first stress period
changed = diff.sum(axis=0) != 0
keep.append(df.loc[:, changed.index[changed]])
df = pd.concat(keep, axis=1)
df = df.reset_index()
df.insert(len(names), "node", df.i * self._model.ncol + df.j)
return df
def add_record(self, kper, index, values):
# Add a record to possible already set list for a given kper
# index is a list of k,i,j or nodes.
# values is a list of floats.
# The length of index + values must be equal to the number of names
# in dtype
assert len(index) + len(values) == len(self.dtype), (
"MfList.add_record() error: length of index arg +"
+ "length of value arg != length of self dtype"
)
# If we already have something for this kper, then add to it
if kper in list(self.__data.keys()):
if self.vtype[kper] == int:
# If a 0 or -1, reset
self.__data[kper] = self.get_empty(1)
self.__vtype[kper] = np.recarray
elif self.vtype[kper] == str:
# If filename, load into recarray
d = self.__fromfile(self.data[kper])
d.resize(d.shape[0], d.shape[1])
self.__data[kper] = d
self.__vtype[kper] = np.recarray
elif self.vtype[kper] == np.recarray:
# Extend the recarray
self.__data[kper] = np.append(
self.__data[kper], self.get_empty(1)
)
else:
self.__data[kper] = self.get_empty(1)
self.__vtype[kper] = np.recarray
rec = list(index)
rec.extend(list(values))
try:
self.__data[kper][-1] = tuple(rec)
except Exception as e:
raise Exception(
"MfList.add_record() error: adding record to "
+ "recarray: "
+ str(e)
)
def __getitem__(self, kper):
# Get the recarray for a given kper
# If the data entry for kper is a string,
# return the corresponding recarray,
# but don't reset the value in the data dict
# assert kper in list(self.data.keys()), "MfList.__getitem__() kper " + \
# str(kper) + " not in data.keys()"
try:
kper = int(kper)
except Exception as e:
raise Exception(
"MfList error: _getitem__() passed invalid kper index:"
+ str(kper)
)
if kper not in list(self.data.keys()):
if kper == 0:
return self.get_empty()
else:
return self.data[self.__find_last_kper(kper)]
if self.vtype[kper] == int:
if self.data[kper] == 0:
return self.get_empty()
else:
return self.data[self.__find_last_kper(kper)]
if self.vtype[kper] == str:
return self.__fromfile(self.data[kper])
if self.vtype[kper] == np.recarray:
return self.data[kper]
def __setitem__(self, kper, data):
if kper in list(self.__data.keys()):
if self._model.verbose:
print("removing existing data for kper={}".format(kper))
self.data.pop(kper)
# If data is a list, then all we can do is try to cast it to
# an ndarray, then cast again to a recarray
if isinstance(data, list):
# warnings.warn("MfList casting list to array")
try:
data = np.array(data)
except Exception as e:
raise Exception(
"MfList error: casting list to ndarray: " + str(e)
)
# cast data
if isinstance(data, int):
self.__cast_int(kper, data)
elif isinstance(data, np.recarray):
self.__cast_recarray(kper, data)
# A single ndarray
elif isinstance(data, np.ndarray):
self.__cast_ndarray(kper, data)
# A single filename
elif isinstance(data, str):
self.__cast_str(kper, data)
else:
raise Exception(
"MfList error: unsupported data type: " + str(type(data))
)
# raise NotImplementedError("MfList.__setitem__() not implemented")
def __fromfile(self, f):
# d = np.fromfile(f,dtype=self.dtype,count=count)
try:
d = np.genfromtxt(f, dtype=self.dtype)
except Exception as e:
raise Exception(
"MfList.__fromfile() error reading recarray "
+ "from file "
+ str(e)
)
return d
def get_filenames(self):
kpers = list(self.data.keys())
kpers.sort()
filenames = []
first = kpers[0]
for kper in list(range(0, max(self._model.nper, max(kpers) + 1))):
# Fill missing early kpers with 0
if kper < first:
itmp = 0
kper_vtype = int
elif kper in kpers:
kper_vtype = self.__vtype[kper]
if (
self._model.array_free_format
and self._model.external_path is not None
):
# py_filepath = ''
# py_filepath = os.path.join(py_filepath,
# self._model.external_path)
filename = self.package.name[0] + "_{0:04d}.dat".format(kper)
filenames.append(filename)
return filenames
def get_filename(self, kper):
ext = "dat"
if self.binary:
ext = "bin"
return self.package.name[0] + "_{0:04d}.{1}".format(kper, ext)
@property
def binary(self):
return bool(self.__binary)
def write_transient(self, f, single_per=None, forceInternal=False):
# forceInternal overrides isExternal (set below) for cases where
# external arrays are not supported (oh hello MNW1!)
# write the transient sequence described by the data dict
nr, nc, nl, nper = self._model.get_nrow_ncol_nlay_nper()
assert hasattr(f, "read"), (
"MfList.write() error: " + "f argument must be a file handle"
)
kpers = list(self.data.keys())
kpers.sort()
first = kpers[0]
if single_per is None:
loop_over_kpers = list(range(0, max(nper, max(kpers) + 1)))
else:
if not isinstance(single_per, list):
single_per = [single_per]
loop_over_kpers = single_per
for kper in loop_over_kpers:
# Fill missing early kpers with 0
if kper < first:
itmp = 0
kper_vtype = int
elif kper in kpers:
kper_data = self.__data[kper]
kper_vtype = self.__vtype[kper]
if kper_vtype == str:
if not self._model.array_free_format:
kper_data = self.__fromfile(kper_data)
kper_vtype = np.recarray
itmp = self.get_itmp(kper)
if kper_vtype == np.recarray:
itmp = kper_data.shape[0]
elif (kper_vtype == int) or (kper_vtype is None):
itmp = kper_data
# Fill late missing kpers with -1
else:
itmp = -1
kper_vtype = int
f.write(
" {0:9d} {1:9d} # stress period {2:d}\n".format(
itmp, 0, kper + 1
)
)
isExternal = False
if (
self._model.array_free_format
and self._model.external_path is not None
and forceInternal is False
):
isExternal = True
if self.__binary:
isExternal = True
if isExternal:
if kper_vtype == np.recarray:
py_filepath = ""
if self._model.model_ws is not None:
py_filepath = self._model.model_ws
if self._model.external_path is not None:
py_filepath = os.path.join(
py_filepath, self._model.external_path
)
filename = self.get_filename(kper)
py_filepath = os.path.join(py_filepath, filename)
model_filepath = filename
if self._model.external_path is not None:
model_filepath = os.path.join(
self._model.external_path, filename
)
self.__tofile(py_filepath, kper_data)
kper_vtype = str
kper_data = model_filepath
if kper_vtype == np.recarray:
name = f.name
if self.__binary or not numpy114:
f.close()
# switch file append mode to binary
with open(name, "ab+") as f:
self.__tofile(f, kper_data)
# continue back to non-binary
f = open(name, "a")
else:
self.__tofile(f, kper_data)
elif kper_vtype == str:
f.write(" open/close " + kper_data)
if self.__binary:
f.write(" (BINARY)")
f.write("\n")
def __tofile(self, f, data):
# Write the recarray (data) to the file (or file handle) f
assert isinstance(data, np.recarray), (
"MfList.__tofile() data arg " + "not a recarray"
)
# Add one to the kij indices
lnames = [name.lower() for name in self.dtype.names]
# --make copy of data for multiple calls
d = data.copy()
for idx in ["k", "i", "j", "node"]:
if idx in lnames:
d[idx] += 1
if self.__binary:
dtype2 = []
for name in self.dtype.names:
dtype2.append((name, np.float32))
dtype2 = np.dtype(dtype2)
d = np.array(d, dtype=dtype2)
d.tofile(f)
else:
np.savetxt(f, d, fmt=self.fmt_string, delimiter="")
def check_kij(self):
names = self.dtype.names
if ("k" not in names) or ("i" not in names) or ("j" not in names):
warnings.warn(
"MfList.check_kij(): index fieldnames 'k,i,j' "
+ "not found in self.dtype names: "
+ str(names)
)
return
nr, nc, nl, nper = self._model.get_nrow_ncol_nlay_nper()
if nl == 0:
warnings.warn(
"MfList.check_kij(): unable to get dis info from " + "model"
)
return
for kper in list(self.data.keys()):
out_idx = []
data = self[kper]
if data is not None:
k = data["k"]
k_idx = np.where(np.logical_or(k < 0, k >= nl))
if k_idx[0].shape[0] > 0:
out_idx.extend(list(k_idx[0]))
i = data["i"]
i_idx = np.where(np.logical_or(i < 0, i >= nr))
if i_idx[0].shape[0] > 0:
out_idx.extend(list(i_idx[0]))
j = data["j"]
j_idx = np.where(np.logical_or(j < 0, j >= nc))
if j_idx[0].shape[0]:
out_idx.extend(list(j_idx[0]))
if len(out_idx) > 0:
warn_str = (
"MfList.check_kij(): warning the following "
+ "indices are out of bounds in kper "
+ str(kper)
+ ":\n"
)
for idx in out_idx:
d = data[idx]
warn_str += " {0:9d} {1:9d} {2:9d}\n".format(
d["k"] + 1, d["i"] + 1, d["j"] + 1
)
warnings.warn(warn_str)
def __find_last_kper(self, kper):
kpers = list(self.data.keys())
kpers.sort()
last = 0
for kkper in kpers[::-1]:
# if this entry is valid
if self.vtype[kkper] != int or self.data[kkper] != -1:
last = kkper
if kkper <= kper:
break
return kkper
def get_indices(self):
"""
a helper function for plotting - get all unique indices
"""
names = self.dtype.names
lnames = []
[lnames.append(name.lower()) for name in names]
if "k" not in lnames or "j" not in lnames:
raise NotImplementedError("MfList.get_indices requires kij")
kpers = list(self.data.keys())
kpers.sort()
indices = []
for i, kper in enumerate(kpers):
kper_vtype = self.__vtype[kper]
if (kper_vtype != int) or (kper_vtype is not None):
d = self.data[kper]
if not indices:
indices = list(zip(d["k"], d["i"], d["j"]))
else:
new_indices = list(zip(d["k"], d["i"], d["j"]))
for ni in new_indices:
if ni not in indices:
indices.append(ni)
return indices
def attribute_by_kper(self, attr, function=np.mean, idx_val=None):
assert attr in self.dtype.names
if idx_val is not None:
assert idx_val[0] in self.dtype.names
kpers = list(self.data.keys())
kpers.sort()
values = []
for kper in range(0, max(self._model.nper, max(kpers))):
if kper < min(kpers):
values.append(0)
elif kper > max(kpers) or kper not in kpers:
values.append(values[-1])
else:
kper_data = self.__data[kper]
if idx_val is not None:
kper_data = kper_data[
np.where(kper_data[idx_val[0]] == idx_val[1])
]
# kper_vtype = self.__vtype[kper]
v = function(kper_data[attr])
values.append(v)
return values
def plot(
self,
key=None,
names=None,
kper=0,
filename_base=None,
file_extension=None,
mflay=None,
**kwargs
):
"""
Plot stress period boundary condition (MfList) data for a specified
stress period
Parameters
----------
key : str
MfList dictionary key. (default is None)
names : list
List of names for figure titles. (default is None)
kper : int
MODFLOW zero-based stress period number to return. (default is zero)
filename_base : str
Base file name that will be used to automatically generate file
names for output image files. Plots will be exported as image
files if file_name_base is not None. (default is None)
file_extension : str
Valid matplotlib.pyplot file extension for savefig(). Only used
if filename_base is not None. (default is 'png')
mflay : int
MODFLOW zero-based layer number to return. If None, then all
all layers will be included. (default is None)
**kwargs : dict
axes : list of matplotlib.pyplot.axis
List of matplotlib.pyplot.axis that will be used to plot
data for each layer. If axes=None axes will be generated.
(default is None)
pcolor : bool
Boolean used to determine if matplotlib.pyplot.pcolormesh
plot will be plotted. (default is True)
colorbar : bool
Boolean used to determine if a color bar will be added to
the matplotlib.pyplot.pcolormesh. Only used if pcolor=True.
(default is False)
inactive : bool
Boolean used to determine if a black overlay in inactive
cells in a layer will be displayed. (default is True)
contour : bool
Boolean used to determine if matplotlib.pyplot.contour
plot will be plotted. (default is False)
clabel : bool
Boolean used to determine if matplotlib.pyplot.clabel
will be plotted. Only used if contour=True. (default is False)
grid : bool
Boolean used to determine if the model grid will be plotted
on the figure. (default is False)
masked_values : list
List of unique values to be excluded from the plot.
Returns
----------
out : list
Empty list is returned if filename_base is not None. Otherwise
a list of matplotlib.pyplot.axis is returned.
See Also
--------
Notes
-----
Examples
--------
>>> import flopy
>>> ml = flopy.modflow.Modflow.load('test.nam')
>>> ml.wel.stress_period_data.plot(ml.wel, kper=1)
"""
from flopy.plot import PlotUtilities
axes = PlotUtilities._plot_mflist_helper(
self,
key=key,
names=names,
kper=kper,
filename_base=filename_base,
file_extension=file_extension,
mflay=mflay,
**kwargs
)
return axes
def to_shapefile(self, filename, kper=None):
"""
Export stress period boundary condition (MfList) data for a specified
stress period
Parameters
----------
filename : str
Shapefile name to write
kper : int
MODFLOW zero-based stress period number to return. (default is None)
Returns
----------
None
See Also
--------
Notes
-----
Examples
--------
>>> import flopy
>>> ml = flopy.modflow.Modflow.load('test.nam')
>>> ml.wel.to_shapefile('test_hk.shp', kper=1)
"""
import warnings
warnings.warn(
"Deprecation warning: to_shapefile() is deprecated. use .export()"
)
# if self.sr is None:
# raise Exception("MfList.to_shapefile: SpatialReference not set")
# import flopy.utils.flopy_io as fio
# if kper is None:
# keys = self.data.keys()
# keys.sort()
# else:
# keys = [kper]
# array_dict = {}
# for kk in keys:
# arrays = self.to_array(kk)
# for name, array in arrays.items():
# for k in range(array.shape[0]):
# #aname = name+"{0:03d}_{1:02d}".format(kk, k)
# n = fio.shape_attr_name(name, length=4)
# aname = "{}{:03d}{:03d}".format(n, k+1, int(kk)+1)
# array_dict[aname] = array[k]
# fio.write_grid_shapefile(filename, self.sr, array_dict)
self.export(filename, kper=kper)
def to_array(self, kper=0, mask=False):
"""
Convert stress period boundary condition (MfList) data for a
specified stress period to a 3-D numpy array
Parameters
----------
kper : int
MODFLOW zero-based stress period number to return. (default is zero)
mask : boolean
return array with np.NaN instead of zero
Returns
----------
out : dict of numpy.ndarrays
Dictionary of 3-D numpy arrays containing the stress period data for
a selected stress period. The dictionary keys are the MfList dtype
names for the stress period data ('cond', 'flux', 'bhead', etc.).
See Also
--------
Notes
-----
Examples
--------
>>> import flopy
>>> ml = flopy.modflow.Modflow.load('test.nam')
>>> v = ml.wel.stress_period_data.to_array(kper=1)
"""
i0 = 3
unstructured = False
if "inode" in self.dtype.names:
raise NotImplementedError()
if "node" in self.dtype.names:
if "i" not in self.dtype.names and "j" not in self.dtype.names:
i0 = 1
unstructured = True
arrays = {}
for name in self.dtype.names[i0:]:
if not self.dtype.fields[name][0] == object:
if unstructured:
arr = np.zeros((self._model.nlay * self._model.ncpl,))
else:
arr = np.zeros(
(self._model.nlay, self._model.nrow, self._model.ncol)
)
arrays[name] = arr.copy()
# if this kper is not found
if kper not in self.data.keys():
kpers = list(self.data.keys())
kpers.sort()
# if this kper is before the first entry,
# (maybe) mask and return
if kper < kpers[0]:
if mask:
for name, arr in arrays.items():
arrays[name][:] = np.NaN
return arrays
# find the last kper
else:
kper = self.__find_last_kper(kper)
sarr = self.data[kper]
if np.isscalar(sarr):
# if there are no entries for this kper
if sarr == 0:
if mask:
for name, arr in arrays.items():
arrays[name][:] = np.NaN
return arrays
else:
raise Exception("MfList: something bad happened")
for name, arr in arrays.items():
if unstructured:
cnt = np.zeros(
(self._model.nlay * self._model.ncpl,), dtype=np.float
)
else:
cnt = np.zeros(
(self._model.nlay, self._model.nrow, self._model.ncol),
dtype=np.float,
)
# print(name,kper)
for rec in sarr:
if unstructured:
arr[rec["node"]] += rec[name]
cnt[rec["node"]] += 1.0
else:
arr[rec["k"], rec["i"], rec["j"]] += rec[name]
cnt[rec["k"], rec["i"], rec["j"]] += 1.0
# average keys that should not be added
if name not in ("cond", "flux"):
idx = cnt > 0.0
arr[idx] /= cnt[idx]
if mask:
arr = np.ma.masked_where(cnt == 0.0, arr)
arr[cnt == 0.0] = np.NaN
arrays[name] = arr.copy()
# elif mask:
# for name, arr in arrays.items():
# arrays[name][:] = np.NaN
return arrays
@property
def masked_4D_arrays(self):
# get the first kper
arrays = self.to_array(kper=0, mask=True)
# initialize these big arrays
m4ds = {}
for name, array in arrays.items():
m4d = np.zeros(
(
self._model.nper,
self._model.nlay,
self._model.nrow,
self._model.ncol,
)
)
m4d[0, :, :, :] = array
m4ds[name] = m4d
for kper in range(1, self._model.nper):
arrays = self.to_array(kper=kper, mask=True)
for name, array in arrays.items():
m4ds[name][kper, :, :, :] = array
return m4ds
def masked_4D_arrays_itr(self):
# get the first kper
arrays = self.to_array(kper=0, mask=True)
# initialize these big arrays
for name, array in arrays.items():
m4d = np.zeros(
(
self._model.nper,
self._model.nlay,
self._model.nrow,
self._model.ncol,
)
)
m4d[0, :, :, :] = array
for kper in range(1, self._model.nper):
arrays = self.to_array(kper=kper, mask=True)
for tname, array in arrays.items():
if tname == name:
m4d[kper, :, :, :] = array
yield name, m4d
@property
def array(self):
return self.masked_4D_arrays
@classmethod
def from_4d(cls, model, pak_name, m4ds):
"""construct an MfList instance from a dict of
(attribute_name,masked 4D ndarray
Parameters
----------
model : mbase derived type
pak_name : str package name (e.g GHB)
m4ds : {attribute name:4d masked numpy.ndarray}
Returns
-------
MfList instance
"""
sp_data = MfList.masked4D_arrays_to_stress_period_data(
model.get_package(pak_name).get_default_dtype(), m4ds
)
return cls(model.get_package(pak_name), data=sp_data)
@staticmethod
def masked4D_arrays_to_stress_period_data(dtype, m4ds):
""" convert a dictionary of 4-dim masked arrays to
a stress_period_data style dict of recarray
Parameters
----------
dtype : numpy dtype
m4ds : dict {name:masked numpy 4-dim ndarray}
Returns
-------
dict {kper:recarray}
"""
assert isinstance(m4ds, dict)
for name, m4d in m4ds.items():
assert isinstance(m4d, np.ndarray)
assert name in dtype.names
assert m4d.ndim == 4
keys = list(m4ds.keys())
for i1, key1 in enumerate(keys):
a1 = np.isnan(m4ds[key1])
for i2, key2 in enumerate(keys[i1:]):
a2 = np.isnan(m4ds[key2])
if not np.array_equal(a1, a2):
raise Exception(
"Transient2d error: masking not equal"
+ " for {0} and {1}".format(key1, key2)
)
sp_data = {}
for kper in range(m4d.shape[0]):
vals = {}
for name, m4d in m4ds.items():
arr = m4d[kper, :, :, :]
isnan = np.argwhere(~np.isnan(arr))
v = []
for k, i, j in isnan:
v.append(arr[k, i, j])
vals[name] = v
kk = isnan[:, 0]
ii = isnan[:, 1]
jj = isnan[:, 2]
spd = np.recarray(shape=isnan.shape[0], dtype=dtype)
spd["i"] = ii
spd["k"] = kk
spd["j"] = jj
for n, v in vals.items():
spd[n] = v
sp_data[kper] = spd
return sp_data
| 34.349807
| 81
| 0.49012
|
from __future__ import division, print_function
import os
import warnings
import numpy as np
from ..datbase import DataInterface, DataListInterface, DataType
from ..utils.recarray_utils import create_empty_recarray
try:
from numpy.lib import NumpyVersion
numpy114 = NumpyVersion(np.__version__) >= "1.14.0"
except ImportError:
numpy114 = False
class MfList(DataInterface, DataListInterface):
def __init__(
self,
package,
data=None,
dtype=None,
model=None,
list_free_format=None,
binary=False,
):
if isinstance(data, MfList):
for attr in data.__dict__.items():
setattr(self, attr[0], attr[1])
if model is None:
self._model = package.parent
else:
self._model = model
self._package = package
return
self._package = package
if model is None:
self._model = package.parent
else:
self._model = model
if dtype is None:
assert isinstance(self.package.dtype, np.dtype)
self.__dtype = self.package.dtype
else:
self.__dtype = dtype
self.__binary = binary
self.__vtype = {}
self.__data = {}
if data is not None:
self.__cast_data(data)
self.__df = None
if list_free_format is None:
if package.parent.version == "mf2k":
list_free_format = False
self.list_free_format = list_free_format
return
@property
def name(self):
return self.package.name
@property
def mg(self):
return self._model.modelgrid
@property
def sr(self):
return self.mg.sr
@property
def model(self):
return self._model
@property
def package(self):
return self._package
@property
def data_type(self):
return DataType.transientlist
@property
def plotable(self):
return True
def get_empty(self, ncell=0):
d = create_empty_recarray(ncell, self.dtype, default_value=-1.0e10)
return d
def export(self, f, **kwargs):
from flopy import export
return export.utils.mflist_export(f, self, **kwargs)
def append(self, other):
if not isinstance(other, MfList):
other = MfList(
self.package,
data=other,
dtype=self.dtype,
model=self._model,
list_free_format=self.list_free_format,
)
msg = (
"MfList.append(): other arg must be "
+ "MfList or dict, not {0}".format(type(other))
)
assert isinstance(other, MfList), msg
other_kpers = list(other.data.keys())
other_kpers.sort()
self_kpers = list(self.data.keys())
self_kpers.sort()
new_dict = {}
for kper in range(self._model.nper):
other_data = other[kper].copy()
self_data = self[kper].copy()
other_len = other_data.shape[0]
self_len = self_data.shape[0]
if (other_len == 0 and self_len == 0) or (
kper not in self_kpers and kper not in other_kpers
):
continue
elif self_len == 0:
new_dict[kper] = other_data
elif other_len == 0:
new_dict[kper] = self_data
else:
new_len = other_data.shape[0] + self_data.shape[0]
new_data = np.recarray(new_len, dtype=self.dtype)
new_data[:self_len] = self_data
new_data[self_len : self_len + other_len] = other_data
new_dict[kper] = new_data
return new_dict
def drop(self, fields):
if not isinstance(fields, list):
fields = [fields]
names = [n for n in self.dtype.names if n not in fields]
dtype = np.dtype(
[(k, d) for k, d in self.dtype.descr if k not in fields]
)
spd = {}
for k, v in self.data.items():
newarr = np.array([self.data[k][n] for n in names]).transpose()
newarr = np.array(list(map(tuple, newarr)), dtype=dtype).view(
np.recarray
)
for n in dtype.names:
newarr[n] = self.data[k][n]
spd[k] = newarr
return MfList(self.package, spd, dtype=dtype)
@property
def data(self):
return self.__data
@property
def df(self):
if self.__df is None:
self.__df = self.get_dataframe()
return self.__df
@property
def vtype(self):
return self.__vtype
@property
def dtype(self):
return self.__dtype
# Get the itmp for a given kper
def get_itmp(self, kper):
if kper not in list(self.__data.keys()):
return None
if self.__vtype[kper] is None:
return -1
# If an external file, have to load it
if self.__vtype[kper] == str:
return self.__fromfile(self.__data[kper]).shape[0]
if self.__vtype[kper] == np.recarray:
return self.__data[kper].shape[0]
# If not any of the above, it must be an int
return self.__data[kper]
@property
def mxact(self):
mxact = 0
for kper in list(self.__data.keys()):
mxact = max(mxact, self.get_itmp(kper))
return mxact
@property
def fmt_string(self):
if self.list_free_format is not None:
use_free = self.list_free_format
else:
use_free = True
if self.package.parent.has_package("bas6"):
use_free = self.package.parent.bas6.ifrefm
# mt3d list data is fixed format
if "mt3d" in self.package.parent.version.lower():
use_free = False
fmts = []
for field in self.dtype.descr:
vtype = field[1][1].lower()
if vtype in ("i", "b"):
if use_free:
fmts.append("%9d")
else:
fmts.append("%10d")
elif vtype == "f":
if use_free:
if numpy114:
# Use numpy's floating-point formatter (Dragon4)
fmts.append("%15s")
else:
fmts.append("%15.7E")
else:
fmts.append("%10G")
elif vtype == "o":
if use_free:
fmts.append("%9s")
else:
fmts.append("%10s")
elif vtype == "s":
msg = (
"MfList.fmt_string error: 'str' type found in dtype. "
"This gives unpredictable results when "
"recarray to file - change to 'object' type"
)
raise TypeError(msg)
else:
raise TypeError(
"MfList.fmt_string error: unknown vtype in "
"field: {}".format(field)
)
if use_free:
fmt_string = " " + " ".join(fmts)
else:
fmt_string = "".join(fmts)
return fmt_string
def __cast_data(self, data):
if isinstance(data, list):
try:
data = np.array(data)
except Exception as e:
raise Exception(
"MfList error: casting list to ndarray: " + str(e)
)
if isinstance(data, dict):
if not list(data.keys()):
raise Exception("MfList error: data dict is empty")
for kper, d in data.items():
try:
kper = int(kper)
except Exception as e:
raise Exception(
"MfList error: data dict key "
+ "{0:s} not integer: ".format(kper)
+ str(type(kper))
+ "\n"
+ str(e)
)
if isinstance(d, list):
try:
d = np.array(d)
except Exception as e:
raise Exception(
"MfList error: casting list "
+ "to ndarray: "
+ str(e)
)
if isinstance(d, np.recarray):
self.__cast_recarray(kper, d)
elif isinstance(d, np.ndarray):
self.__cast_ndarray(kper, d)
elif isinstance(d, int):
self.__cast_int(kper, d)
elif isinstance(d, str):
self.__cast_str(kper, d)
elif d is None:
self.__data[kper] = -1
self.__vtype[kper] = None
else:
raise Exception(
"MfList error: unsupported data type: "
+ str(type(d))
+ " at kper "
+ "{0:d}".format(kper)
)
elif isinstance(data, np.recarray):
self.__cast_recarray(0, data)
elif isinstance(data, np.ndarray):
self.__cast_ndarray(0, data)
elif isinstance(data, str):
self.__cast_str(0, data)
else:
raise Exception(
"MfList error: unsupported data type: " + str(type(data))
)
def __cast_str(self, kper, d):
assert os.path.exists(d), (
"MfList error: dict filename (string) '"
+ d
+ "' value for "
+ "kper {0:d} not found".format(kper)
)
self.__data[kper] = d
self.__vtype[kper] = str
def __cast_int(self, kper, d):
if d > 0:
raise Exception(
"MfList error: dict integer value for "
"kper {0:10d} must be 0 or -1, "
"not {1:10d}".format(kper, d)
)
if d == 0:
self.__data[kper] = 0
self.__vtype[kper] = None
else:
self.__data[kper] = -1
self.__vtype[kper] = None
def __cast_recarray(self, kper, d):
assert d.dtype == self.__dtype, (
"MfList error: recarray dtype: "
+ str(d.dtype)
+ " doesn't match "
+ "self dtype: "
+ str(self.dtype)
)
self.__data[kper] = d
self.__vtype[kper] = np.recarray
def __cast_ndarray(self, kper, d):
d = np.atleast_2d(d)
if d.dtype != self.__dtype:
assert d.shape[1] == len(self.dtype), (
"MfList error: ndarray "
+ "shape "
+ str(d.shape)
+ " doesn't match dtype "
+ "len: "
+ str(len(self.dtype))
)
try:
self.__data[kper] = np.core.records.fromarrays(
d.transpose(), dtype=self.dtype
)
except Exception as e:
raise Exception(
"MfList error: casting ndarray to recarray: " + str(e)
)
self.__vtype[kper] = np.recarray
def get_dataframe(self, squeeze=True):
try:
import pandas as pd
except Exception as e:
msg = "MfList.get_dataframe() requires pandas"
raise ImportError(msg)
names = ["k", "i", "j"]
if "MNW2" in self.package.name:
names += ["wellid"]
for per in range(self._model.nper):
if hasattr(self.data[per], "dtype"):
varnames = list(
[n for n in self.data[per].dtype.names if n not in names]
)
break
dfs = []
for per in self.data.keys():
recs = self.data[per]
if recs is None or len(recs) == 0:
columns = names + list(
["{}{}".format(c, per) for c in varnames]
)
dfi = pd.DataFrame(data=None, columns=columns)
dfi = dfi.set_index(names)
else:
dfi = pd.DataFrame.from_records(recs)
dfg = dfi.groupby(names)
count = dfg[varnames[0]].count().rename("n")
if (count > 1).values.any():
print(
"Duplicated list entry locations aggregated "
"for kper {}".format(per)
)
for kij in count[count > 1].index.values:
print(" (k,i,j) {}".format(kij))
dfi = dfg.sum()
dfi.columns = list(["{}{}".format(c, per) for c in varnames])
dfs.append(dfi)
df = pd.concat(dfs, axis=1)
if squeeze:
keep = []
for var in varnames:
diffcols = list([n for n in df.columns if var in n])
diff = df[diffcols].fillna(0).diff(axis=1)
diff[
"{}0".format(var)
] = 1
changed = diff.sum(axis=0) != 0
keep.append(df.loc[:, changed.index[changed]])
df = pd.concat(keep, axis=1)
df = df.reset_index()
df.insert(len(names), "node", df.i * self._model.ncol + df.j)
return df
def add_record(self, kper, index, values):
assert len(index) + len(values) == len(self.dtype), (
"MfList.add_record() error: length of index arg +"
+ "length of value arg != length of self dtype"
)
if kper in list(self.__data.keys()):
if self.vtype[kper] == int:
self.__data[kper] = self.get_empty(1)
self.__vtype[kper] = np.recarray
elif self.vtype[kper] == str:
d = self.__fromfile(self.data[kper])
d.resize(d.shape[0], d.shape[1])
self.__data[kper] = d
self.__vtype[kper] = np.recarray
elif self.vtype[kper] == np.recarray:
self.__data[kper] = np.append(
self.__data[kper], self.get_empty(1)
)
else:
self.__data[kper] = self.get_empty(1)
self.__vtype[kper] = np.recarray
rec = list(index)
rec.extend(list(values))
try:
self.__data[kper][-1] = tuple(rec)
except Exception as e:
raise Exception(
"MfList.add_record() error: adding record to "
+ "recarray: "
+ str(e)
)
def __getitem__(self, kper):
# assert kper in list(self.data.keys()), "MfList.__getitem__() kper " + \
# str(kper) + " not in data.keys()"
try:
kper = int(kper)
except Exception as e:
raise Exception(
"MfList error: _getitem__() passed invalid kper index:"
+ str(kper)
)
if kper not in list(self.data.keys()):
if kper == 0:
return self.get_empty()
else:
return self.data[self.__find_last_kper(kper)]
if self.vtype[kper] == int:
if self.data[kper] == 0:
return self.get_empty()
else:
return self.data[self.__find_last_kper(kper)]
if self.vtype[kper] == str:
return self.__fromfile(self.data[kper])
if self.vtype[kper] == np.recarray:
return self.data[kper]
def __setitem__(self, kper, data):
if kper in list(self.__data.keys()):
if self._model.verbose:
print("removing existing data for kper={}".format(kper))
self.data.pop(kper)
# If data is a list, then all we can do is try to cast it to
# an ndarray, then cast again to a recarray
if isinstance(data, list):
# warnings.warn("MfList casting list to array")
try:
data = np.array(data)
except Exception as e:
raise Exception(
"MfList error: casting list to ndarray: " + str(e)
)
# cast data
if isinstance(data, int):
self.__cast_int(kper, data)
elif isinstance(data, np.recarray):
self.__cast_recarray(kper, data)
# A single ndarray
elif isinstance(data, np.ndarray):
self.__cast_ndarray(kper, data)
# A single filename
elif isinstance(data, str):
self.__cast_str(kper, data)
else:
raise Exception(
"MfList error: unsupported data type: " + str(type(data))
)
# raise NotImplementedError("MfList.__setitem__() not implemented")
def __fromfile(self, f):
# d = np.fromfile(f,dtype=self.dtype,count=count)
try:
d = np.genfromtxt(f, dtype=self.dtype)
except Exception as e:
raise Exception(
"MfList.__fromfile() error reading recarray "
+ "from file "
+ str(e)
)
return d
def get_filenames(self):
kpers = list(self.data.keys())
kpers.sort()
filenames = []
first = kpers[0]
for kper in list(range(0, max(self._model.nper, max(kpers) + 1))):
# Fill missing early kpers with 0
if kper < first:
itmp = 0
kper_vtype = int
elif kper in kpers:
kper_vtype = self.__vtype[kper]
if (
self._model.array_free_format
and self._model.external_path is not None
):
# py_filepath = ''
# py_filepath = os.path.join(py_filepath,
# self._model.external_path)
filename = self.package.name[0] + "_{0:04d}.dat".format(kper)
filenames.append(filename)
return filenames
def get_filename(self, kper):
ext = "dat"
if self.binary:
ext = "bin"
return self.package.name[0] + "_{0:04d}.{1}".format(kper, ext)
@property
def binary(self):
return bool(self.__binary)
def write_transient(self, f, single_per=None, forceInternal=False):
# forceInternal overrides isExternal (set below) for cases where
# external arrays are not supported (oh hello MNW1!)
# write the transient sequence described by the data dict
nr, nc, nl, nper = self._model.get_nrow_ncol_nlay_nper()
assert hasattr(f, "read"), (
"MfList.write() error: " + "f argument must be a file handle"
)
kpers = list(self.data.keys())
kpers.sort()
first = kpers[0]
if single_per is None:
loop_over_kpers = list(range(0, max(nper, max(kpers) + 1)))
else:
if not isinstance(single_per, list):
single_per = [single_per]
loop_over_kpers = single_per
for kper in loop_over_kpers:
# Fill missing early kpers with 0
if kper < first:
itmp = 0
kper_vtype = int
elif kper in kpers:
kper_data = self.__data[kper]
kper_vtype = self.__vtype[kper]
if kper_vtype == str:
if not self._model.array_free_format:
kper_data = self.__fromfile(kper_data)
kper_vtype = np.recarray
itmp = self.get_itmp(kper)
if kper_vtype == np.recarray:
itmp = kper_data.shape[0]
elif (kper_vtype == int) or (kper_vtype is None):
itmp = kper_data
# Fill late missing kpers with -1
else:
itmp = -1
kper_vtype = int
f.write(
" {0:9d} {1:9d} # stress period {2:d}\n".format(
itmp, 0, kper + 1
)
)
isExternal = False
if (
self._model.array_free_format
and self._model.external_path is not None
and forceInternal is False
):
isExternal = True
if self.__binary:
isExternal = True
if isExternal:
if kper_vtype == np.recarray:
py_filepath = ""
if self._model.model_ws is not None:
py_filepath = self._model.model_ws
if self._model.external_path is not None:
py_filepath = os.path.join(
py_filepath, self._model.external_path
)
filename = self.get_filename(kper)
py_filepath = os.path.join(py_filepath, filename)
model_filepath = filename
if self._model.external_path is not None:
model_filepath = os.path.join(
self._model.external_path, filename
)
self.__tofile(py_filepath, kper_data)
kper_vtype = str
kper_data = model_filepath
if kper_vtype == np.recarray:
name = f.name
if self.__binary or not numpy114:
f.close()
# switch file append mode to binary
with open(name, "ab+") as f:
self.__tofile(f, kper_data)
# continue back to non-binary
f = open(name, "a")
else:
self.__tofile(f, kper_data)
elif kper_vtype == str:
f.write(" open/close " + kper_data)
if self.__binary:
f.write(" (BINARY)")
f.write("\n")
def __tofile(self, f, data):
# Write the recarray (data) to the file (or file handle) f
assert isinstance(data, np.recarray), (
"MfList.__tofile() data arg " + "not a recarray"
)
# Add one to the kij indices
lnames = [name.lower() for name in self.dtype.names]
# --make copy of data for multiple calls
d = data.copy()
for idx in ["k", "i", "j", "node"]:
if idx in lnames:
d[idx] += 1
if self.__binary:
dtype2 = []
for name in self.dtype.names:
dtype2.append((name, np.float32))
dtype2 = np.dtype(dtype2)
d = np.array(d, dtype=dtype2)
d.tofile(f)
else:
np.savetxt(f, d, fmt=self.fmt_string, delimiter="")
def check_kij(self):
names = self.dtype.names
if ("k" not in names) or ("i" not in names) or ("j" not in names):
warnings.warn(
"MfList.check_kij(): index fieldnames 'k,i,j' "
+ "not found in self.dtype names: "
+ str(names)
)
return
nr, nc, nl, nper = self._model.get_nrow_ncol_nlay_nper()
if nl == 0:
warnings.warn(
"MfList.check_kij(): unable to get dis info from " + "model"
)
return
for kper in list(self.data.keys()):
out_idx = []
data = self[kper]
if data is not None:
k = data["k"]
k_idx = np.where(np.logical_or(k < 0, k >= nl))
if k_idx[0].shape[0] > 0:
out_idx.extend(list(k_idx[0]))
i = data["i"]
i_idx = np.where(np.logical_or(i < 0, i >= nr))
if i_idx[0].shape[0] > 0:
out_idx.extend(list(i_idx[0]))
j = data["j"]
j_idx = np.where(np.logical_or(j < 0, j >= nc))
if j_idx[0].shape[0]:
out_idx.extend(list(j_idx[0]))
if len(out_idx) > 0:
warn_str = (
"MfList.check_kij(): warning the following "
+ "indices are out of bounds in kper "
+ str(kper)
+ ":\n"
)
for idx in out_idx:
d = data[idx]
warn_str += " {0:9d} {1:9d} {2:9d}\n".format(
d["k"] + 1, d["i"] + 1, d["j"] + 1
)
warnings.warn(warn_str)
def __find_last_kper(self, kper):
kpers = list(self.data.keys())
kpers.sort()
last = 0
for kkper in kpers[::-1]:
# if this entry is valid
if self.vtype[kkper] != int or self.data[kkper] != -1:
last = kkper
if kkper <= kper:
break
return kkper
def get_indices(self):
names = self.dtype.names
lnames = []
[lnames.append(name.lower()) for name in names]
if "k" not in lnames or "j" not in lnames:
raise NotImplementedError("MfList.get_indices requires kij")
kpers = list(self.data.keys())
kpers.sort()
indices = []
for i, kper in enumerate(kpers):
kper_vtype = self.__vtype[kper]
if (kper_vtype != int) or (kper_vtype is not None):
d = self.data[kper]
if not indices:
indices = list(zip(d["k"], d["i"], d["j"]))
else:
new_indices = list(zip(d["k"], d["i"], d["j"]))
for ni in new_indices:
if ni not in indices:
indices.append(ni)
return indices
def attribute_by_kper(self, attr, function=np.mean, idx_val=None):
assert attr in self.dtype.names
if idx_val is not None:
assert idx_val[0] in self.dtype.names
kpers = list(self.data.keys())
kpers.sort()
values = []
for kper in range(0, max(self._model.nper, max(kpers))):
if kper < min(kpers):
values.append(0)
elif kper > max(kpers) or kper not in kpers:
values.append(values[-1])
else:
kper_data = self.__data[kper]
if idx_val is not None:
kper_data = kper_data[
np.where(kper_data[idx_val[0]] == idx_val[1])
]
# kper_vtype = self.__vtype[kper]
v = function(kper_data[attr])
values.append(v)
return values
def plot(
self,
key=None,
names=None,
kper=0,
filename_base=None,
file_extension=None,
mflay=None,
**kwargs
):
from flopy.plot import PlotUtilities
axes = PlotUtilities._plot_mflist_helper(
self,
key=key,
names=names,
kper=kper,
filename_base=filename_base,
file_extension=file_extension,
mflay=mflay,
**kwargs
)
return axes
def to_shapefile(self, filename, kper=None):
import warnings
warnings.warn(
"Deprecation warning: to_shapefile() is deprecated. use .export()"
)
# if self.sr is None:
# raise Exception("MfList.to_shapefile: SpatialReference not set")
# import flopy.utils.flopy_io as fio
# if kper is None:
# keys = self.data.keys()
# keys.sort()
# else:
# keys = [kper]
# array_dict = {}
# for kk in keys:
# arrays = self.to_array(kk)
# for name, array in arrays.items():
# for k in range(array.shape[0]):
# #aname = name+"{0:03d}_{1:02d}".format(kk, k)
# n = fio.shape_attr_name(name, length=4)
# aname = "{}{:03d}{:03d}".format(n, k+1, int(kk)+1)
# array_dict[aname] = array[k]
# fio.write_grid_shapefile(filename, self.sr, array_dict)
self.export(filename, kper=kper)
def to_array(self, kper=0, mask=False):
i0 = 3
unstructured = False
if "inode" in self.dtype.names:
raise NotImplementedError()
if "node" in self.dtype.names:
if "i" not in self.dtype.names and "j" not in self.dtype.names:
i0 = 1
unstructured = True
arrays = {}
for name in self.dtype.names[i0:]:
if not self.dtype.fields[name][0] == object:
if unstructured:
arr = np.zeros((self._model.nlay * self._model.ncpl,))
else:
arr = np.zeros(
(self._model.nlay, self._model.nrow, self._model.ncol)
)
arrays[name] = arr.copy()
# if this kper is not found
if kper not in self.data.keys():
kpers = list(self.data.keys())
kpers.sort()
# if this kper is before the first entry,
# (maybe) mask and return
if kper < kpers[0]:
if mask:
for name, arr in arrays.items():
arrays[name][:] = np.NaN
return arrays
# find the last kper
else:
kper = self.__find_last_kper(kper)
sarr = self.data[kper]
if np.isscalar(sarr):
# if there are no entries for this kper
if sarr == 0:
if mask:
for name, arr in arrays.items():
arrays[name][:] = np.NaN
return arrays
else:
raise Exception("MfList: something bad happened")
for name, arr in arrays.items():
if unstructured:
cnt = np.zeros(
(self._model.nlay * self._model.ncpl,), dtype=np.float
)
else:
cnt = np.zeros(
(self._model.nlay, self._model.nrow, self._model.ncol),
dtype=np.float,
)
# print(name,kper)
for rec in sarr:
if unstructured:
arr[rec["node"]] += rec[name]
cnt[rec["node"]] += 1.0
else:
arr[rec["k"], rec["i"], rec["j"]] += rec[name]
cnt[rec["k"], rec["i"], rec["j"]] += 1.0
# average keys that should not be added
if name not in ("cond", "flux"):
idx = cnt > 0.0
arr[idx] /= cnt[idx]
if mask:
arr = np.ma.masked_where(cnt == 0.0, arr)
arr[cnt == 0.0] = np.NaN
arrays[name] = arr.copy()
# elif mask:
# for name, arr in arrays.items():
# arrays[name][:] = np.NaN
return arrays
@property
def masked_4D_arrays(self):
# get the first kper
arrays = self.to_array(kper=0, mask=True)
# initialize these big arrays
m4ds = {}
for name, array in arrays.items():
m4d = np.zeros(
(
self._model.nper,
self._model.nlay,
self._model.nrow,
self._model.ncol,
)
)
m4d[0, :, :, :] = array
m4ds[name] = m4d
for kper in range(1, self._model.nper):
arrays = self.to_array(kper=kper, mask=True)
for name, array in arrays.items():
m4ds[name][kper, :, :, :] = array
return m4ds
def masked_4D_arrays_itr(self):
# get the first kper
arrays = self.to_array(kper=0, mask=True)
# initialize these big arrays
for name, array in arrays.items():
m4d = np.zeros(
(
self._model.nper,
self._model.nlay,
self._model.nrow,
self._model.ncol,
)
)
m4d[0, :, :, :] = array
for kper in range(1, self._model.nper):
arrays = self.to_array(kper=kper, mask=True)
for tname, array in arrays.items():
if tname == name:
m4d[kper, :, :, :] = array
yield name, m4d
@property
def array(self):
return self.masked_4D_arrays
@classmethod
def from_4d(cls, model, pak_name, m4ds):
sp_data = MfList.masked4D_arrays_to_stress_period_data(
model.get_package(pak_name).get_default_dtype(), m4ds
)
return cls(model.get_package(pak_name), data=sp_data)
@staticmethod
def masked4D_arrays_to_stress_period_data(dtype, m4ds):
assert isinstance(m4ds, dict)
for name, m4d in m4ds.items():
assert isinstance(m4d, np.ndarray)
assert name in dtype.names
assert m4d.ndim == 4
keys = list(m4ds.keys())
for i1, key1 in enumerate(keys):
a1 = np.isnan(m4ds[key1])
for i2, key2 in enumerate(keys[i1:]):
a2 = np.isnan(m4ds[key2])
if not np.array_equal(a1, a2):
raise Exception(
"Transient2d error: masking not equal"
+ " for {0} and {1}".format(key1, key2)
)
sp_data = {}
for kper in range(m4d.shape[0]):
vals = {}
for name, m4d in m4ds.items():
arr = m4d[kper, :, :, :]
isnan = np.argwhere(~np.isnan(arr))
v = []
for k, i, j in isnan:
v.append(arr[k, i, j])
vals[name] = v
kk = isnan[:, 0]
ii = isnan[:, 1]
jj = isnan[:, 2]
spd = np.recarray(shape=isnan.shape[0], dtype=dtype)
spd["i"] = ii
spd["k"] = kk
spd["j"] = jj
for n, v in vals.items():
spd[n] = v
sp_data[kper] = spd
return sp_data
| true
| true
|
79049133923de2c6452134da9e925d0cf99c16c7
| 6,283
|
py
|
Python
|
main.py
|
EmilienDupont/neural-function-distributions
|
c034bf79640c6d8922f1c276174b3cb1800d22b4
|
[
"MIT"
] | 96
|
2021-05-31T19:29:51.000Z
|
2022-03-22T02:15:46.000Z
|
main.py
|
EmilienDupont/neural-function-distributions
|
c034bf79640c6d8922f1c276174b3cb1800d22b4
|
[
"MIT"
] | null | null | null |
main.py
|
EmilienDupont/neural-function-distributions
|
c034bf79640c6d8922f1c276174b3cb1800d22b4
|
[
"MIT"
] | 8
|
2021-06-05T05:14:05.000Z
|
2022-03-25T02:15:40.000Z
|
import json
import os
import sys
import time
import torch
from training.training import Trainer
from data.conversion import GridDataConverter, PointCloudDataConverter, ERA5Converter
from data.dataloaders import mnist, celebahq
from data.dataloaders_era5 import era5
from data.dataloaders3d import shapenet_voxels, shapenet_point_clouds
from models.discriminator import PointConvDiscriminator
from models.function_distribution import HyperNetwork, FunctionDistribution
from models.function_representation import FunctionRepresentation, FourierFeatures
def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# Get config file from command line arguments
if len(sys.argv) != 2:
raise(RuntimeError("Wrong arguments, use python main.py <config_path>"))
config_path = sys.argv[1]
# Open config file
with open(config_path) as f:
config = json.load(f)
if config["path_to_data"] == "":
raise(RuntimeError("Path to data not specified. Modify path_to_data attribute in config to point to data."))
# Create a folder to store experiment results
timestamp = time.strftime("%Y-%m-%d_%H-%M")
directory = "{}_{}".format(timestamp, config["id"])
if not os.path.exists(directory):
os.makedirs(directory)
# Save config file in experiment directory
with open(directory + '/config.json', 'w') as f:
json.dump(config, f)
# Setup dataloader
is_voxel = False
is_point_cloud = False
is_era5 = False
if config["dataset"] == 'mnist':
dataloader = mnist(path_to_data=config["path_to_data"],
batch_size=config["training"]["batch_size"],
size=config["resolution"],
train=True)
input_dim = 2
output_dim = 1
data_shape = (1, config["resolution"], config["resolution"])
elif config["dataset"] == 'celebahq':
dataloader = celebahq(path_to_data=config["path_to_data"],
batch_size=config["training"]["batch_size"],
size=config["resolution"])
input_dim = 2
output_dim = 3
data_shape = (3, config["resolution"], config["resolution"])
elif config["dataset"] == 'shapenet_voxels':
dataloader = shapenet_voxels(path_to_data=config["path_to_data"],
batch_size=config["training"]["batch_size"],
size=config["resolution"])
input_dim = 3
output_dim = 1
data_shape = (1, config["resolution"], config["resolution"], config["resolution"])
is_voxel = True
elif config["dataset"] == 'shapenet_point_clouds':
dataloader = shapenet_point_clouds(path_to_data=config["path_to_data"],
batch_size=config["training"]["batch_size"])
input_dim = 3
output_dim = 1
data_shape = (1, config["resolution"], config["resolution"], config["resolution"])
is_point_cloud = True
elif config["dataset"] == 'era5':
dataloader = era5(path_to_data=config["path_to_data"],
batch_size=config["training"]["batch_size"])
input_dim = 3
output_dim = 1
data_shape = (46, 90)
is_era5 = True
# Setup data converter
if is_point_cloud:
data_converter = PointCloudDataConverter(device, data_shape, normalize_features=True)
elif is_era5:
data_converter = ERA5Converter(device, data_shape, normalize_features=True)
else:
data_converter = GridDataConverter(device, data_shape, normalize_features=True)
# Setup encoding for function distribution
num_frequencies = config["generator"]["encoding"]["num_frequencies"]
std_dev = config["generator"]["encoding"]["std_dev"]
if num_frequencies:
frequency_matrix = torch.normal(mean=torch.zeros(num_frequencies, input_dim),
std=std_dev).to(device)
encoding = FourierFeatures(frequency_matrix)
else:
encoding = torch.nn.Identity()
# Setup generator models
final_non_linearity = torch.nn.Tanh()
non_linearity = torch.nn.LeakyReLU(0.1)
function_representation = FunctionRepresentation(input_dim, output_dim,
config["generator"]["layer_sizes"],
encoding, non_linearity,
final_non_linearity).to(device)
hypernetwork = HyperNetwork(function_representation, config["generator"]["latent_dim"],
config["generator"]["hypernet_layer_sizes"], non_linearity).to(device)
function_distribution = FunctionDistribution(hypernetwork).to(device)
# Setup discriminator
discriminator = PointConvDiscriminator(input_dim, output_dim, config["discriminator"]["layer_configs"],
linear_layer_sizes=config["discriminator"]["linear_layer_sizes"],
norm_order=config["discriminator"]["norm_order"],
add_sigmoid=True,
add_batchnorm=config["discriminator"]["add_batchnorm"],
add_weightnet_batchnorm=config["discriminator"]["add_weightnet_batchnorm"],
deterministic=config["discriminator"]["deterministic"],
same_coordinates=config["discriminator"]["same_coordinates"]).to(device)
print("\nFunction distribution")
print(hypernetwork)
print("Number of parameters: {}".format(count_parameters(hypernetwork)))
print("\nDiscriminator")
print(discriminator)
print("Number of parameters: {}".format(count_parameters(discriminator)))
# Setup trainer
trainer = Trainer(device, function_distribution, discriminator, data_converter,
lr=config["training"]["lr"], lr_disc=config["training"]["lr_disc"],
r1_weight=config["training"]["r1_weight"],
max_num_points=config["training"]["max_num_points"],
print_freq=config["training"]["print_freq"], save_dir=directory,
model_save_freq=config["training"]["model_save_freq"],
is_voxel=is_voxel, is_point_cloud=is_point_cloud,
is_era5=is_era5)
trainer.train(dataloader, config["training"]["epochs"])
| 43.034247
| 115
| 0.660353
|
import json
import os
import sys
import time
import torch
from training.training import Trainer
from data.conversion import GridDataConverter, PointCloudDataConverter, ERA5Converter
from data.dataloaders import mnist, celebahq
from data.dataloaders_era5 import era5
from data.dataloaders3d import shapenet_voxels, shapenet_point_clouds
from models.discriminator import PointConvDiscriminator
from models.function_distribution import HyperNetwork, FunctionDistribution
from models.function_representation import FunctionRepresentation, FourierFeatures
def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
if len(sys.argv) != 2:
raise(RuntimeError("Wrong arguments, use python main.py <config_path>"))
config_path = sys.argv[1]
with open(config_path) as f:
config = json.load(f)
if config["path_to_data"] == "":
raise(RuntimeError("Path to data not specified. Modify path_to_data attribute in config to point to data."))
timestamp = time.strftime("%Y-%m-%d_%H-%M")
directory = "{}_{}".format(timestamp, config["id"])
if not os.path.exists(directory):
os.makedirs(directory)
with open(directory + '/config.json', 'w') as f:
json.dump(config, f)
is_voxel = False
is_point_cloud = False
is_era5 = False
if config["dataset"] == 'mnist':
dataloader = mnist(path_to_data=config["path_to_data"],
batch_size=config["training"]["batch_size"],
size=config["resolution"],
train=True)
input_dim = 2
output_dim = 1
data_shape = (1, config["resolution"], config["resolution"])
elif config["dataset"] == 'celebahq':
dataloader = celebahq(path_to_data=config["path_to_data"],
batch_size=config["training"]["batch_size"],
size=config["resolution"])
input_dim = 2
output_dim = 3
data_shape = (3, config["resolution"], config["resolution"])
elif config["dataset"] == 'shapenet_voxels':
dataloader = shapenet_voxels(path_to_data=config["path_to_data"],
batch_size=config["training"]["batch_size"],
size=config["resolution"])
input_dim = 3
output_dim = 1
data_shape = (1, config["resolution"], config["resolution"], config["resolution"])
is_voxel = True
elif config["dataset"] == 'shapenet_point_clouds':
dataloader = shapenet_point_clouds(path_to_data=config["path_to_data"],
batch_size=config["training"]["batch_size"])
input_dim = 3
output_dim = 1
data_shape = (1, config["resolution"], config["resolution"], config["resolution"])
is_point_cloud = True
elif config["dataset"] == 'era5':
dataloader = era5(path_to_data=config["path_to_data"],
batch_size=config["training"]["batch_size"])
input_dim = 3
output_dim = 1
data_shape = (46, 90)
is_era5 = True
if is_point_cloud:
data_converter = PointCloudDataConverter(device, data_shape, normalize_features=True)
elif is_era5:
data_converter = ERA5Converter(device, data_shape, normalize_features=True)
else:
data_converter = GridDataConverter(device, data_shape, normalize_features=True)
num_frequencies = config["generator"]["encoding"]["num_frequencies"]
std_dev = config["generator"]["encoding"]["std_dev"]
if num_frequencies:
frequency_matrix = torch.normal(mean=torch.zeros(num_frequencies, input_dim),
std=std_dev).to(device)
encoding = FourierFeatures(frequency_matrix)
else:
encoding = torch.nn.Identity()
final_non_linearity = torch.nn.Tanh()
non_linearity = torch.nn.LeakyReLU(0.1)
function_representation = FunctionRepresentation(input_dim, output_dim,
config["generator"]["layer_sizes"],
encoding, non_linearity,
final_non_linearity).to(device)
hypernetwork = HyperNetwork(function_representation, config["generator"]["latent_dim"],
config["generator"]["hypernet_layer_sizes"], non_linearity).to(device)
function_distribution = FunctionDistribution(hypernetwork).to(device)
discriminator = PointConvDiscriminator(input_dim, output_dim, config["discriminator"]["layer_configs"],
linear_layer_sizes=config["discriminator"]["linear_layer_sizes"],
norm_order=config["discriminator"]["norm_order"],
add_sigmoid=True,
add_batchnorm=config["discriminator"]["add_batchnorm"],
add_weightnet_batchnorm=config["discriminator"]["add_weightnet_batchnorm"],
deterministic=config["discriminator"]["deterministic"],
same_coordinates=config["discriminator"]["same_coordinates"]).to(device)
print("\nFunction distribution")
print(hypernetwork)
print("Number of parameters: {}".format(count_parameters(hypernetwork)))
print("\nDiscriminator")
print(discriminator)
print("Number of parameters: {}".format(count_parameters(discriminator)))
trainer = Trainer(device, function_distribution, discriminator, data_converter,
lr=config["training"]["lr"], lr_disc=config["training"]["lr_disc"],
r1_weight=config["training"]["r1_weight"],
max_num_points=config["training"]["max_num_points"],
print_freq=config["training"]["print_freq"], save_dir=directory,
model_save_freq=config["training"]["model_save_freq"],
is_voxel=is_voxel, is_point_cloud=is_point_cloud,
is_era5=is_era5)
trainer.train(dataloader, config["training"]["epochs"])
| true
| true
|
790492fa3f4feead19f5b1aef6a861bd440b8ec5
| 4,371
|
py
|
Python
|
common/migrations/0018_auto_20161014_1805.py
|
baylee-d/cos.io
|
3f88acb0feb7a167bf9e81c42e28f9d2d38bbd43
|
[
"Apache-2.0"
] | null | null | null |
common/migrations/0018_auto_20161014_1805.py
|
baylee-d/cos.io
|
3f88acb0feb7a167bf9e81c42e28f9d2d38bbd43
|
[
"Apache-2.0"
] | null | null | null |
common/migrations/0018_auto_20161014_1805.py
|
baylee-d/cos.io
|
3f88acb0feb7a167bf9e81c42e28f9d2d38bbd43
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2016-10-14 18:05
from __future__ import unicode_literals
import common.blocks.columns
import common.blocks.tabs
from django.db import migrations, models
import wagtail.wagtailcore.blocks
import wagtail.wagtailcore.fields
import wagtail.wagtailembeds.blocks
import wagtail.wagtailimages.blocks
class Migration(migrations.Migration):
dependencies = [
('common', '0017_upimagepath'),
]
operations = [
migrations.AlterField(
model_name='custompage',
name='content',
field=wagtail.wagtailcore.fields.StreamField((('appeal', wagtail.wagtailcore.blocks.StructBlock((('icon', wagtail.wagtailcore.blocks.ChoiceBlock(choices=[('none', 'none'), ('flask', 'flask'), ('group', 'group'), ('laptop', 'laptop'), ('sitemap', 'sitemap'), ('user', 'user'), ('book', 'book'), ('download', 'download')])), ('topic', wagtail.wagtailcore.blocks.CharBlock(max_length=35, required=True)), ('content', wagtail.wagtailcore.blocks.TextBlock(max_length=255, required=True))), classname='appeal', icon='tick', template='common/blocks/appeal.html')), ('heading', wagtail.wagtailcore.blocks.CharBlock(classname='full title')), ('statement', wagtail.wagtailcore.blocks.CharBlock()), ('paragraph', wagtail.wagtailcore.blocks.RichTextBlock()), ('imagechooser', wagtail.wagtailimages.blocks.ImageChooserBlock()), ('column', common.blocks.columns.RowBlock()), ('tabbed_block', common.blocks.tabs.TabListBlock()), ('image', wagtail.wagtailcore.blocks.StructBlock((('main_image', wagtail.wagtailimages.blocks.ImageChooserBlock()), ('style', wagtail.wagtailcore.blocks.ChoiceBlock(choices=[('max-width:225px;max-height:145px', 'small display'), ('max_width:250px;max-height:250px', 'middle display'), ('max_width:250px;max-height:250px;padding-top:20px', 'middle + padding display'), ('height:auto', 'auto display')], default='height:auto')), ('url', wagtail.wagtailcore.blocks.CharBlock(max_length=250, required=False))))), ('rich_text', wagtail.wagtailcore.blocks.RichTextBlock()), ('raw_html', wagtail.wagtailcore.blocks.RawHTMLBlock(help_text='With great power comes great responsibility. This HTML is unescaped. Be careful!')), ('people_block', wagtail.wagtailcore.blocks.StructBlock((('displayStyle', wagtail.wagtailcore.blocks.ChoiceBlock(choices=[('concise-team', 'concise-team'), ('concise-ambassador', 'concise-ambassador'), ('detailed', 'detailed')], default='concise')), ('tag', wagtail.wagtailcore.blocks.CharBlock(max_length=20))))), ('centered_text', wagtail.wagtailcore.blocks.StructBlock((('text', wagtail.wagtailcore.blocks.RichTextBlock()),))), ('hero_block', wagtail.wagtailcore.blocks.StructBlock((('image', wagtail.wagtailimages.blocks.ImageChooserBlock(required=True)), ('description', wagtail.wagtailcore.blocks.RichTextBlock(required=True))))), ('spotlight_block', wagtail.wagtailcore.blocks.StructBlock((('bubbles', wagtail.wagtailcore.blocks.StreamBlock((('bubble_block', wagtail.wagtailcore.blocks.StructBlock((('image', wagtail.wagtailimages.blocks.ImageChooserBlock()), ('title', wagtail.wagtailcore.blocks.CharBlock(max_length=35, required=True)), ('description', wagtail.wagtailcore.blocks.RichTextBlock(required=True))))),))),))), ('job_whole_block', wagtail.wagtailcore.blocks.StructBlock(())), ('embed_block', wagtail.wagtailembeds.blocks.EmbedBlock()), ('whitespaceblock', wagtail.wagtailcore.blocks.StructBlock((('height', wagtail.wagtailcore.blocks.IntegerBlock()),))), ('clear_fixblock', wagtail.wagtailcore.blocks.StructBlock(())), ('code_block', wagtail.wagtailcore.blocks.StructBlock((('language', wagtail.wagtailcore.blocks.ChoiceBlock(choices=[('python', 'python'), ('css', 'css'), ('sql', 'sql'), ('javascript', 'javascript'), ('clike', 'clike'), ('markup', 'markup'), ('java', 'java')], default='python')), ('codes', wagtail.wagtailcore.blocks.TextBlock())))), ('calender_blog', wagtail.wagtailcore.blocks.StructBlock((('source', wagtail.wagtailcore.blocks.CharBlock(help_text='Such as: calendar@cos.io', max_length=255, required=True)),)))), blank=True, null=True),
),
migrations.AlterField(
model_name='upimagepath',
name='upImagePath',
field=models.CharField(default='https://cosio.s3.amazonaws.com/images/up.original.png', help_text='Up image path', max_length=255),
),
]
| 136.59375
| 3,522
| 0.732098
|
from __future__ import unicode_literals
import common.blocks.columns
import common.blocks.tabs
from django.db import migrations, models
import wagtail.wagtailcore.blocks
import wagtail.wagtailcore.fields
import wagtail.wagtailembeds.blocks
import wagtail.wagtailimages.blocks
class Migration(migrations.Migration):
dependencies = [
('common', '0017_upimagepath'),
]
operations = [
migrations.AlterField(
model_name='custompage',
name='content',
field=wagtail.wagtailcore.fields.StreamField((('appeal', wagtail.wagtailcore.blocks.StructBlock((('icon', wagtail.wagtailcore.blocks.ChoiceBlock(choices=[('none', 'none'), ('flask', 'flask'), ('group', 'group'), ('laptop', 'laptop'), ('sitemap', 'sitemap'), ('user', 'user'), ('book', 'book'), ('download', 'download')])), ('topic', wagtail.wagtailcore.blocks.CharBlock(max_length=35, required=True)), ('content', wagtail.wagtailcore.blocks.TextBlock(max_length=255, required=True))), classname='appeal', icon='tick', template='common/blocks/appeal.html')), ('heading', wagtail.wagtailcore.blocks.CharBlock(classname='full title')), ('statement', wagtail.wagtailcore.blocks.CharBlock()), ('paragraph', wagtail.wagtailcore.blocks.RichTextBlock()), ('imagechooser', wagtail.wagtailimages.blocks.ImageChooserBlock()), ('column', common.blocks.columns.RowBlock()), ('tabbed_block', common.blocks.tabs.TabListBlock()), ('image', wagtail.wagtailcore.blocks.StructBlock((('main_image', wagtail.wagtailimages.blocks.ImageChooserBlock()), ('style', wagtail.wagtailcore.blocks.ChoiceBlock(choices=[('max-width:225px;max-height:145px', 'small display'), ('max_width:250px;max-height:250px', 'middle display'), ('max_width:250px;max-height:250px;padding-top:20px', 'middle + padding display'), ('height:auto', 'auto display')], default='height:auto')), ('url', wagtail.wagtailcore.blocks.CharBlock(max_length=250, required=False))))), ('rich_text', wagtail.wagtailcore.blocks.RichTextBlock()), ('raw_html', wagtail.wagtailcore.blocks.RawHTMLBlock(help_text='With great power comes great responsibility. This HTML is unescaped. Be careful!')), ('people_block', wagtail.wagtailcore.blocks.StructBlock((('displayStyle', wagtail.wagtailcore.blocks.ChoiceBlock(choices=[('concise-team', 'concise-team'), ('concise-ambassador', 'concise-ambassador'), ('detailed', 'detailed')], default='concise')), ('tag', wagtail.wagtailcore.blocks.CharBlock(max_length=20))))), ('centered_text', wagtail.wagtailcore.blocks.StructBlock((('text', wagtail.wagtailcore.blocks.RichTextBlock()),))), ('hero_block', wagtail.wagtailcore.blocks.StructBlock((('image', wagtail.wagtailimages.blocks.ImageChooserBlock(required=True)), ('description', wagtail.wagtailcore.blocks.RichTextBlock(required=True))))), ('spotlight_block', wagtail.wagtailcore.blocks.StructBlock((('bubbles', wagtail.wagtailcore.blocks.StreamBlock((('bubble_block', wagtail.wagtailcore.blocks.StructBlock((('image', wagtail.wagtailimages.blocks.ImageChooserBlock()), ('title', wagtail.wagtailcore.blocks.CharBlock(max_length=35, required=True)), ('description', wagtail.wagtailcore.blocks.RichTextBlock(required=True))))),))),))), ('job_whole_block', wagtail.wagtailcore.blocks.StructBlock(())), ('embed_block', wagtail.wagtailembeds.blocks.EmbedBlock()), ('whitespaceblock', wagtail.wagtailcore.blocks.StructBlock((('height', wagtail.wagtailcore.blocks.IntegerBlock()),))), ('clear_fixblock', wagtail.wagtailcore.blocks.StructBlock(())), ('code_block', wagtail.wagtailcore.blocks.StructBlock((('language', wagtail.wagtailcore.blocks.ChoiceBlock(choices=[('python', 'python'), ('css', 'css'), ('sql', 'sql'), ('javascript', 'javascript'), ('clike', 'clike'), ('markup', 'markup'), ('java', 'java')], default='python')), ('codes', wagtail.wagtailcore.blocks.TextBlock())))), ('calender_blog', wagtail.wagtailcore.blocks.StructBlock((('source', wagtail.wagtailcore.blocks.CharBlock(help_text='Such as: calendar@cos.io', max_length=255, required=True)),)))), blank=True, null=True),
),
migrations.AlterField(
model_name='upimagepath',
name='upImagePath',
field=models.CharField(default='https://cosio.s3.amazonaws.com/images/up.original.png', help_text='Up image path', max_length=255),
),
]
| true
| true
|
7904934f95925caff636ee0ac9ac8d4a33f42a38
| 746
|
py
|
Python
|
DieRolls.py
|
bwnelb/dnd5e
|
092a95c16366e0abff248611464eb8fbc500e3af
|
[
"MIT"
] | null | null | null |
DieRolls.py
|
bwnelb/dnd5e
|
092a95c16366e0abff248611464eb8fbc500e3af
|
[
"MIT"
] | null | null | null |
DieRolls.py
|
bwnelb/dnd5e
|
092a95c16366e0abff248611464eb8fbc500e3af
|
[
"MIT"
] | null | null | null |
import random
### Advantage Logic ###
def advantage(rollfunc):
roll1 = rollfunc
roll2 = rollfunc
if roll1 > roll2:
return roll1
else:
return roll2
### Disadvantage Logic ###
def disadvantage(rollfunc):
roll1 = rollfunc
roll2 = rollfunc
if roll1 < roll2:
return roll1
else:
return roll2
### Die Rolls ###
def rolld4(sides:int=4):
return random.randint(1, sides)
def rolld6(sides:int=6):
return random.randint(1, sides)
def rolld8(sides:int=8):
return random.randint(1, sides)
def rolld10(sides:int=10):
return random.randint(1, sides)
def rolld12(sides:int=12):
return random.randint(1, sides)
def rolld20(sides:int=20):
return random.randint(1, sides)
| 20.162162
| 35
| 0.651475
|
import random
roll2 = rollfunc
if roll1 > roll2:
return roll1
else:
return roll2
oll2 = rollfunc
if roll1 < roll2:
return roll1
else:
return roll2
random.randint(1, sides)
def rolld6(sides:int=6):
return random.randint(1, sides)
def rolld8(sides:int=8):
return random.randint(1, sides)
def rolld10(sides:int=10):
return random.randint(1, sides)
def rolld12(sides:int=12):
return random.randint(1, sides)
def rolld20(sides:int=20):
return random.randint(1, sides)
| true
| true
|
790494120d60c3eb1207b64a634e40696354fd88
| 4,390
|
py
|
Python
|
core/domain/rule_domain_test.py
|
VictoriaRoux/oppia
|
5ae2a7f0b5c85d6e28222844d22ebdbfb81923c6
|
[
"Apache-2.0"
] | null | null | null |
core/domain/rule_domain_test.py
|
VictoriaRoux/oppia
|
5ae2a7f0b5c85d6e28222844d22ebdbfb81923c6
|
[
"Apache-2.0"
] | null | null | null |
core/domain/rule_domain_test.py
|
VictoriaRoux/oppia
|
5ae2a7f0b5c85d6e28222844d22ebdbfb81923c6
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, softwar
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for rule objects."""
__author__ = 'Sean Lip'
import inspect
import os
import pkgutil
from core.domain import rule_domain
from extensions.objects.models import objects
import feconf
import test_utils
class FakeRule(rule_domain.Rule):
subject_type = objects.Real
description = 'is between {{x|Real}} and {{y|UnicodeString}}'
def _evaluate(self, subject):
return subject == self.x
class RuleServicesUnitTests(test_utils.GenericTestBase):
"""Tests for rule services."""
def test_get_rules_for_obj_type(self):
self.assertEqual(
len(rule_domain.get_rules_for_obj_type('NonnegativeInt')), 1)
self.assertEqual(
len(rule_domain.get_rules_for_obj_type('Real')), 7)
self.assertEqual(
len(rule_domain.get_rules_for_obj_type('Null')), 0)
self.assertEqual(
len(rule_domain.get_rules_for_obj_type('FakeObjType')), 0)
class RuleDomainUnitTests(test_utils.GenericTestBase):
"""Tests for rules."""
def test_rule_initialization(self):
with self.assertRaises(ValueError):
FakeRule()
with self.assertRaises(ValueError):
FakeRule(1, 'too_many_args', 3)
with self.assertRaises(ValueError):
FakeRule('not_a_number', 'a')
with self.assertRaises(ValueError):
FakeRule('wrong_order', 1)
fake_rule = FakeRule(2, 'a')
self.assertTrue(fake_rule.x, 2)
self.assertTrue(fake_rule.y, 'a')
self.assertEqual(
fake_rule._PARAMS,
[('x', objects.Real), ('y', objects.UnicodeString)]
)
def test_rule_is_generic(self):
self.assertTrue(rule_domain.is_generic('Real', 'IsGreaterThan'))
self.assertFalse(rule_domain.is_generic('UnicodeString', 'Equals'))
class RuleDataUnitTests(test_utils.GenericTestBase):
"""Tests for the actual rules in extensions/."""
def test_that_all_rules_have_object_editor_templates(self):
rule_dir = os.path.join(os.getcwd(), feconf.RULES_DIR)
at_least_one_rule_found = False
clses = []
for loader, name, _ in pkgutil.iter_modules(path=[rule_dir]):
if name.endswith('_test') or name == 'base':
continue
module = loader.find_module(name).load_module(name)
for name, clazz in inspect.getmembers(module, inspect.isclass):
param_list = rule_domain.get_param_list(clazz.description)
for (param_name, param_obj_type) in param_list:
# TODO(sll): Get rid of this special case.
if param_obj_type.__name__ == 'NonnegativeInt':
continue
self.assertTrue(
param_obj_type.has_editor_js_template(),
msg='(%s)' % clazz.description)
at_least_one_rule_found = True
clses.append(clazz)
self.assertTrue(at_least_one_rule_found)
class RuleFunctionUnitTests(test_utils.GenericTestBase):
"""Test for functions involving rules."""
def test_get_description_strings_for_obj_type(self):
rule_descriptions = rule_domain.get_description_strings_for_obj_type(
'UnicodeString')
self.assertEqual(rule_descriptions, {
'CaseSensitiveEquals': (
'is equal to {{x|UnicodeString}}, taking case into account'),
'Contains': 'contains {{x|UnicodeString}}',
'Equals': 'is equal to {{x|UnicodeString}}',
'MatchesBase64EncodedFile': (
'has same content as the file located at '
'{{filepath|UnicodeString}}'),
'StartsWith': 'starts with {{x|UnicodeString}}',
})
| 34.84127
| 77
| 0.648064
|
__author__ = 'Sean Lip'
import inspect
import os
import pkgutil
from core.domain import rule_domain
from extensions.objects.models import objects
import feconf
import test_utils
class FakeRule(rule_domain.Rule):
subject_type = objects.Real
description = 'is between {{x|Real}} and {{y|UnicodeString}}'
def _evaluate(self, subject):
return subject == self.x
class RuleServicesUnitTests(test_utils.GenericTestBase):
def test_get_rules_for_obj_type(self):
self.assertEqual(
len(rule_domain.get_rules_for_obj_type('NonnegativeInt')), 1)
self.assertEqual(
len(rule_domain.get_rules_for_obj_type('Real')), 7)
self.assertEqual(
len(rule_domain.get_rules_for_obj_type('Null')), 0)
self.assertEqual(
len(rule_domain.get_rules_for_obj_type('FakeObjType')), 0)
class RuleDomainUnitTests(test_utils.GenericTestBase):
def test_rule_initialization(self):
with self.assertRaises(ValueError):
FakeRule()
with self.assertRaises(ValueError):
FakeRule(1, 'too_many_args', 3)
with self.assertRaises(ValueError):
FakeRule('not_a_number', 'a')
with self.assertRaises(ValueError):
FakeRule('wrong_order', 1)
fake_rule = FakeRule(2, 'a')
self.assertTrue(fake_rule.x, 2)
self.assertTrue(fake_rule.y, 'a')
self.assertEqual(
fake_rule._PARAMS,
[('x', objects.Real), ('y', objects.UnicodeString)]
)
def test_rule_is_generic(self):
self.assertTrue(rule_domain.is_generic('Real', 'IsGreaterThan'))
self.assertFalse(rule_domain.is_generic('UnicodeString', 'Equals'))
class RuleDataUnitTests(test_utils.GenericTestBase):
def test_that_all_rules_have_object_editor_templates(self):
rule_dir = os.path.join(os.getcwd(), feconf.RULES_DIR)
at_least_one_rule_found = False
clses = []
for loader, name, _ in pkgutil.iter_modules(path=[rule_dir]):
if name.endswith('_test') or name == 'base':
continue
module = loader.find_module(name).load_module(name)
for name, clazz in inspect.getmembers(module, inspect.isclass):
param_list = rule_domain.get_param_list(clazz.description)
for (param_name, param_obj_type) in param_list:
if param_obj_type.__name__ == 'NonnegativeInt':
continue
self.assertTrue(
param_obj_type.has_editor_js_template(),
msg='(%s)' % clazz.description)
at_least_one_rule_found = True
clses.append(clazz)
self.assertTrue(at_least_one_rule_found)
class RuleFunctionUnitTests(test_utils.GenericTestBase):
def test_get_description_strings_for_obj_type(self):
rule_descriptions = rule_domain.get_description_strings_for_obj_type(
'UnicodeString')
self.assertEqual(rule_descriptions, {
'CaseSensitiveEquals': (
'is equal to {{x|UnicodeString}}, taking case into account'),
'Contains': 'contains {{x|UnicodeString}}',
'Equals': 'is equal to {{x|UnicodeString}}',
'MatchesBase64EncodedFile': (
'has same content as the file located at '
'{{filepath|UnicodeString}}'),
'StartsWith': 'starts with {{x|UnicodeString}}',
})
| true
| true
|
790494268c4a51b99b50ac9e5a941c56937612a9
| 10,561
|
py
|
Python
|
main/utils_test.py
|
mitodl/bootcamp-ecommerce
|
ba7d6aefe56c6481ae2a5afc84cdd644538b6d50
|
[
"BSD-3-Clause"
] | 2
|
2018-06-20T19:37:03.000Z
|
2021-01-06T09:51:40.000Z
|
main/utils_test.py
|
mitodl/bootcamp-ecommerce
|
ba7d6aefe56c6481ae2a5afc84cdd644538b6d50
|
[
"BSD-3-Clause"
] | 1,226
|
2017-02-23T14:52:28.000Z
|
2022-03-29T13:19:54.000Z
|
main/utils_test.py
|
mitodl/bootcamp-ecommerce
|
ba7d6aefe56c6481ae2a5afc84cdd644538b6d50
|
[
"BSD-3-Clause"
] | 3
|
2017-03-20T03:51:27.000Z
|
2021-03-19T15:54:31.000Z
|
"""
Tests for the utils module
"""
import datetime
import operator as op
from math import ceil
from types import SimpleNamespace
import pytest
import pytz
from mitol.common.utils import (
is_near_now,
has_equal_properties,
first_or_none,
first_matching_item,
max_or_none,
partition_to_lists,
unique,
unique_ignore_case,
item_at_index_or_none,
all_equal,
all_unique,
has_all_keys,
group_into_dict,
now_in_utc,
filter_dict_by_key_set,
chunks,
get_error_response_summary,
)
from ecommerce.factories import Order, ReceiptFactory
from main.utils import (
get_field_names,
is_empty_file,
serialize_model_object,
is_blank,
partition_around_index,
format_month_day,
)
from main.test_utils import format_as_iso8601, MockResponse
def test_now_in_utc():
"""now_in_utc() should return the current time set to the UTC time zone"""
now = now_in_utc()
assert is_near_now(now)
assert now.tzinfo == pytz.UTC
def test_is_near_now():
"""
Test is_near_now for now
"""
now = datetime.datetime.now(tz=pytz.UTC)
assert is_near_now(now) is True
later = now + datetime.timedelta(0, 6)
assert is_near_now(later) is False
earlier = now - datetime.timedelta(0, 6)
assert is_near_now(earlier) is False
def test_first_or_none():
"""
Assert that first_or_none returns the first item in an iterable or None
"""
assert first_or_none([]) is None
assert first_or_none(set()) is None
assert first_or_none([1, 2, 3]) == 1
assert first_or_none(range(1, 5)) == 1
def test_first_matching_item():
"""first_matching_item should return the first item where the predicate function returns true"""
assert first_matching_item([1, 2, 3, 4, 5], lambda x: x % 2 == 0) == 2
assert first_matching_item([], lambda x: True) is None
assert first_matching_item(["x", "y", "z"], lambda x: False) is None
def test_max_or_none():
"""
Assert that max_or_none returns the max of some iterable, or None if the iterable has no items
"""
assert max_or_none(i for i in [5, 4, 3, 2, 1]) == 5
assert max_or_none([1, 3, 5, 4, 2]) == 5
assert max_or_none([]) is None
def test_unique():
"""
Assert that unique() returns a generator of unique elements from a provided iterable
"""
assert list(unique([1, 2, 2, 3, 3, 0, 3])) == [1, 2, 3, 0]
assert list(unique(("a", "b", "a", "c", "C", None))) == ["a", "b", "c", "C", None]
def test_unique_ignore_case():
"""
Assert that unique_ignore_case() returns a generator of unique lowercase strings from a
provided iterable
"""
assert list(unique_ignore_case(["ABC", "def", "AbC", "DEf"])) == ["abc", "def"]
def test_item_at_index_or_none():
"""
Assert that item_at_index_or_none returns an item at a given index, or None if that index
doesn't exist
"""
arr = [1, 2, 3]
assert item_at_index_or_none(arr, 1) == 2
assert item_at_index_or_none(arr, 10) is None
def test_all_equal():
"""
Assert that all_equal returns True if all of the provided args are equal to each other
"""
assert all_equal(1, 1, 1) is True
assert all_equal(1, 2, 1) is False
assert all_equal() is True
def test_all_unique():
"""
Assert that all_unique returns True if all of the items in the iterable argument are unique
"""
assert all_unique([1, 2, 3, 4]) is True
assert all_unique((1, 2, 3, 4)) is True
assert all_unique([1, 2, 3, 1]) is False
def test_has_all_keys():
"""
Assert that has_all_keys returns True if the given dict has all of the specified keys
"""
d = {"a": 1, "b": 2, "c": 3}
assert has_all_keys(d, ["a", "c"]) is True
assert has_all_keys(d, ["a", "z"]) is False
def test_is_blank():
"""
Assert that is_blank returns True if the given value is None or a blank string
"""
assert is_blank("") is True
assert is_blank(None) is True
assert is_blank(0) is False
assert is_blank(" ") is False
assert is_blank(False) is False
assert is_blank("value") is False
def test_group_into_dict():
"""
Assert that group_into_dict takes an iterable of items and returns a dictionary of those items
grouped by generated keys
"""
class Car: # pylint: disable=missing-docstring
def __init__(self, make, model):
self.make = make
self.model = model
cars = [
Car(make="Honda", model="Civic"),
Car(make="Honda", model="Accord"),
Car(make="Ford", model="F150"),
Car(make="Ford", model="Focus"),
Car(make="Jeep", model="Wrangler"),
]
grouped_cars = group_into_dict(cars, key_fn=op.attrgetter("make"))
assert set(grouped_cars.keys()) == {"Honda", "Ford", "Jeep"}
assert set(grouped_cars["Honda"]) == set(cars[0:2])
assert set(grouped_cars["Ford"]) == set(cars[2:4])
assert grouped_cars["Jeep"] == [cars[4]]
nums = [1, 2, 3, 4, 5, 6]
grouped_nums = group_into_dict(nums, key_fn=lambda num: (num % 2 == 0))
assert grouped_nums.keys() == {True, False}
assert set(grouped_nums[True]) == {2, 4, 6}
assert set(grouped_nums[False]) == {1, 3, 5}
def test_filter_dict_by_key_set():
"""
Test that filter_dict_by_key_set returns a dict with only the given keys
"""
d = {"a": 1, "b": 2, "c": 3, "d": 4}
assert filter_dict_by_key_set(d, {"a", "c"}) == {"a": 1, "c": 3}
assert filter_dict_by_key_set(d, {"a", "c", "nonsense"}) == {"a": 1, "c": 3}
assert filter_dict_by_key_set(d, {"nonsense"}) == {}
def test_partition_to_lists():
"""
Assert that partition_to_lists splits an iterable into two lists according to a condition
"""
nums = [1, 2, 1, 3, 1, 4, 0, None, None]
not_ones, ones = partition_to_lists(nums, lambda n: n == 1)
assert not_ones == [2, 3, 4, 0, None, None]
assert ones == [1, 1, 1]
# The default predicate is the standard Python bool() function
falsey, truthy = partition_to_lists(nums)
assert falsey == [0, None, None]
assert truthy == [1, 2, 1, 3, 1, 4]
def test_partition_around_index():
"""partition_around_index should split a list into two lists around an index"""
assert partition_around_index([1, 2, 3, 4], 2) == ([1, 2], [4])
assert partition_around_index([1, 2, 3, 4], 0) == ([], [2, 3, 4])
assert partition_around_index([1, 2, 3, 4], 3) == ([1, 2, 3], [])
with pytest.raises(ValueError):
partition_around_index([1, 2, 3, 4], 4)
@pytest.mark.parametrize(
"content,content_type,exp_summary_content,exp_url_in_summary",
[
['{"bad": "response"}', "application/json", '{"bad": "response"}', False],
["plain text", "text/plain", "plain text", False],
[
"<div>HTML content</div>",
"text/html; charset=utf-8",
"(HTML body ignored)",
True,
],
],
)
def test_get_error_response_summary(
content, content_type, exp_summary_content, exp_url_in_summary
):
"""
get_error_response_summary should provide a summary of an error HTTP response object with the correct bits of
information depending on the type of content.
"""
status_code = 400
url = "http://example.com"
mock_response = MockResponse(
status_code=status_code, content=content, content_type=content_type, url=url
)
summary = get_error_response_summary(mock_response)
assert f"Response - code: {status_code}" in summary
assert f"content: {exp_summary_content}" in summary
assert (f"url: {url}" in summary) is exp_url_in_summary
@pytest.mark.django_db
def test_jsonfield(settings):
"""
Test a model with a JSONField is handled correctly
"""
settings.CYBERSOURCE_SECURITY_KEY = "asdf"
receipt = ReceiptFactory.create()
assert serialize_model_object(receipt) == {
"created_on": format_as_iso8601(receipt.created_on),
"data": receipt.data,
"id": receipt.id,
"updated_on": format_as_iso8601(receipt.updated_on),
"order": receipt.order.id,
}
def test_get_field_names():
"""
Assert that get_field_names does not include related fields
"""
assert set(get_field_names(Order)) == {
"user",
"status",
"total_price_paid",
"application",
"created_on",
"updated_on",
"payment_type",
}
def test_is_empty_file():
"""is_empty_file should return True if the given object is None or has a blank name property"""
fake_file = None
assert is_empty_file(fake_file) is True
fake_file = SimpleNamespace(name="")
assert is_empty_file(fake_file) is True
fake_file = SimpleNamespace(name="path/to/file.txt")
assert is_empty_file(fake_file) is False
def test_chunks():
"""
test for chunks
"""
input_list = list(range(113))
output_list = []
for nums in chunks(input_list):
output_list += nums
assert output_list == input_list
output_list = []
for nums in chunks(input_list, chunk_size=1):
output_list += nums
assert output_list == input_list
output_list = []
for nums in chunks(input_list, chunk_size=124):
output_list += nums
assert output_list == input_list
def test_chunks_iterable():
"""
test that chunks works on non-list iterables too
"""
count = 113
input_range = range(count)
chunk_output = []
for chunk in chunks(input_range, chunk_size=10):
chunk_output.append(chunk)
assert len(chunk_output) == ceil(113 / 10)
range_list = []
for chunk in chunk_output:
range_list += chunk
assert range_list == list(range(count))
def test_format_month_day():
"""
format_month_day should format the month and day from a datetime
"""
dt = datetime.datetime(year=2020, month=1, day=1, tzinfo=pytz.UTC)
assert format_month_day(dt) == "Jan 1"
assert format_month_day(dt, month_fmt="%b") == "Jan 1"
assert format_month_day(dt, month_fmt="%B") == "January 1"
def test_has_equal_properties():
"""
Assert that has_equal_properties returns True if an object has equivalent properties to a given dict
"""
obj = SimpleNamespace(a=1, b=2, c=3)
assert has_equal_properties(obj, {}) is True
assert has_equal_properties(obj, dict(a=1, b=2)) is True
assert has_equal_properties(obj, dict(a=1, b=2, c=3)) is True
assert has_equal_properties(obj, dict(a=2)) is False
assert has_equal_properties(obj, dict(d=4)) is False
| 30.435159
| 113
| 0.648802
|
import datetime
import operator as op
from math import ceil
from types import SimpleNamespace
import pytest
import pytz
from mitol.common.utils import (
is_near_now,
has_equal_properties,
first_or_none,
first_matching_item,
max_or_none,
partition_to_lists,
unique,
unique_ignore_case,
item_at_index_or_none,
all_equal,
all_unique,
has_all_keys,
group_into_dict,
now_in_utc,
filter_dict_by_key_set,
chunks,
get_error_response_summary,
)
from ecommerce.factories import Order, ReceiptFactory
from main.utils import (
get_field_names,
is_empty_file,
serialize_model_object,
is_blank,
partition_around_index,
format_month_day,
)
from main.test_utils import format_as_iso8601, MockResponse
def test_now_in_utc():
now = now_in_utc()
assert is_near_now(now)
assert now.tzinfo == pytz.UTC
def test_is_near_now():
now = datetime.datetime.now(tz=pytz.UTC)
assert is_near_now(now) is True
later = now + datetime.timedelta(0, 6)
assert is_near_now(later) is False
earlier = now - datetime.timedelta(0, 6)
assert is_near_now(earlier) is False
def test_first_or_none():
assert first_or_none([]) is None
assert first_or_none(set()) is None
assert first_or_none([1, 2, 3]) == 1
assert first_or_none(range(1, 5)) == 1
def test_first_matching_item():
assert first_matching_item([1, 2, 3, 4, 5], lambda x: x % 2 == 0) == 2
assert first_matching_item([], lambda x: True) is None
assert first_matching_item(["x", "y", "z"], lambda x: False) is None
def test_max_or_none():
assert max_or_none(i for i in [5, 4, 3, 2, 1]) == 5
assert max_or_none([1, 3, 5, 4, 2]) == 5
assert max_or_none([]) is None
def test_unique():
assert list(unique([1, 2, 2, 3, 3, 0, 3])) == [1, 2, 3, 0]
assert list(unique(("a", "b", "a", "c", "C", None))) == ["a", "b", "c", "C", None]
def test_unique_ignore_case():
assert list(unique_ignore_case(["ABC", "def", "AbC", "DEf"])) == ["abc", "def"]
def test_item_at_index_or_none():
arr = [1, 2, 3]
assert item_at_index_or_none(arr, 1) == 2
assert item_at_index_or_none(arr, 10) is None
def test_all_equal():
assert all_equal(1, 1, 1) is True
assert all_equal(1, 2, 1) is False
assert all_equal() is True
def test_all_unique():
assert all_unique([1, 2, 3, 4]) is True
assert all_unique((1, 2, 3, 4)) is True
assert all_unique([1, 2, 3, 1]) is False
def test_has_all_keys():
d = {"a": 1, "b": 2, "c": 3}
assert has_all_keys(d, ["a", "c"]) is True
assert has_all_keys(d, ["a", "z"]) is False
def test_is_blank():
assert is_blank("") is True
assert is_blank(None) is True
assert is_blank(0) is False
assert is_blank(" ") is False
assert is_blank(False) is False
assert is_blank("value") is False
def test_group_into_dict():
class Car:
def __init__(self, make, model):
self.make = make
self.model = model
cars = [
Car(make="Honda", model="Civic"),
Car(make="Honda", model="Accord"),
Car(make="Ford", model="F150"),
Car(make="Ford", model="Focus"),
Car(make="Jeep", model="Wrangler"),
]
grouped_cars = group_into_dict(cars, key_fn=op.attrgetter("make"))
assert set(grouped_cars.keys()) == {"Honda", "Ford", "Jeep"}
assert set(grouped_cars["Honda"]) == set(cars[0:2])
assert set(grouped_cars["Ford"]) == set(cars[2:4])
assert grouped_cars["Jeep"] == [cars[4]]
nums = [1, 2, 3, 4, 5, 6]
grouped_nums = group_into_dict(nums, key_fn=lambda num: (num % 2 == 0))
assert grouped_nums.keys() == {True, False}
assert set(grouped_nums[True]) == {2, 4, 6}
assert set(grouped_nums[False]) == {1, 3, 5}
def test_filter_dict_by_key_set():
d = {"a": 1, "b": 2, "c": 3, "d": 4}
assert filter_dict_by_key_set(d, {"a", "c"}) == {"a": 1, "c": 3}
assert filter_dict_by_key_set(d, {"a", "c", "nonsense"}) == {"a": 1, "c": 3}
assert filter_dict_by_key_set(d, {"nonsense"}) == {}
def test_partition_to_lists():
nums = [1, 2, 1, 3, 1, 4, 0, None, None]
not_ones, ones = partition_to_lists(nums, lambda n: n == 1)
assert not_ones == [2, 3, 4, 0, None, None]
assert ones == [1, 1, 1]
falsey, truthy = partition_to_lists(nums)
assert falsey == [0, None, None]
assert truthy == [1, 2, 1, 3, 1, 4]
def test_partition_around_index():
assert partition_around_index([1, 2, 3, 4], 2) == ([1, 2], [4])
assert partition_around_index([1, 2, 3, 4], 0) == ([], [2, 3, 4])
assert partition_around_index([1, 2, 3, 4], 3) == ([1, 2, 3], [])
with pytest.raises(ValueError):
partition_around_index([1, 2, 3, 4], 4)
@pytest.mark.parametrize(
"content,content_type,exp_summary_content,exp_url_in_summary",
[
['{"bad": "response"}', "application/json", '{"bad": "response"}', False],
["plain text", "text/plain", "plain text", False],
[
"<div>HTML content</div>",
"text/html; charset=utf-8",
"(HTML body ignored)",
True,
],
],
)
def test_get_error_response_summary(
content, content_type, exp_summary_content, exp_url_in_summary
):
status_code = 400
url = "http://example.com"
mock_response = MockResponse(
status_code=status_code, content=content, content_type=content_type, url=url
)
summary = get_error_response_summary(mock_response)
assert f"Response - code: {status_code}" in summary
assert f"content: {exp_summary_content}" in summary
assert (f"url: {url}" in summary) is exp_url_in_summary
@pytest.mark.django_db
def test_jsonfield(settings):
settings.CYBERSOURCE_SECURITY_KEY = "asdf"
receipt = ReceiptFactory.create()
assert serialize_model_object(receipt) == {
"created_on": format_as_iso8601(receipt.created_on),
"data": receipt.data,
"id": receipt.id,
"updated_on": format_as_iso8601(receipt.updated_on),
"order": receipt.order.id,
}
def test_get_field_names():
assert set(get_field_names(Order)) == {
"user",
"status",
"total_price_paid",
"application",
"created_on",
"updated_on",
"payment_type",
}
def test_is_empty_file():
fake_file = None
assert is_empty_file(fake_file) is True
fake_file = SimpleNamespace(name="")
assert is_empty_file(fake_file) is True
fake_file = SimpleNamespace(name="path/to/file.txt")
assert is_empty_file(fake_file) is False
def test_chunks():
input_list = list(range(113))
output_list = []
for nums in chunks(input_list):
output_list += nums
assert output_list == input_list
output_list = []
for nums in chunks(input_list, chunk_size=1):
output_list += nums
assert output_list == input_list
output_list = []
for nums in chunks(input_list, chunk_size=124):
output_list += nums
assert output_list == input_list
def test_chunks_iterable():
count = 113
input_range = range(count)
chunk_output = []
for chunk in chunks(input_range, chunk_size=10):
chunk_output.append(chunk)
assert len(chunk_output) == ceil(113 / 10)
range_list = []
for chunk in chunk_output:
range_list += chunk
assert range_list == list(range(count))
def test_format_month_day():
dt = datetime.datetime(year=2020, month=1, day=1, tzinfo=pytz.UTC)
assert format_month_day(dt) == "Jan 1"
assert format_month_day(dt, month_fmt="%b") == "Jan 1"
assert format_month_day(dt, month_fmt="%B") == "January 1"
def test_has_equal_properties():
obj = SimpleNamespace(a=1, b=2, c=3)
assert has_equal_properties(obj, {}) is True
assert has_equal_properties(obj, dict(a=1, b=2)) is True
assert has_equal_properties(obj, dict(a=1, b=2, c=3)) is True
assert has_equal_properties(obj, dict(a=2)) is False
assert has_equal_properties(obj, dict(d=4)) is False
| true
| true
|
790496160664ef27a3f84a7e2228d4aa40fd0f66
| 1,762
|
py
|
Python
|
sysinv/cgts-client/cgts-client/cgtsclient/v1/sm_service_nodes.py
|
etaivan/stx-config
|
281e1f110973f96e077645fb01f67b646fc253cc
|
[
"Apache-2.0"
] | 10
|
2020-02-07T18:57:44.000Z
|
2021-09-11T10:29:34.000Z
|
sysinv/cgts-client/cgts-client/cgtsclient/v1/sm_service_nodes.py
|
etaivan/stx-config
|
281e1f110973f96e077645fb01f67b646fc253cc
|
[
"Apache-2.0"
] | 1
|
2021-01-14T12:01:55.000Z
|
2021-01-14T12:01:55.000Z
|
sysinv/cgts-client/cgts-client/cgtsclient/v1/sm_service_nodes.py
|
etaivan/stx-config
|
281e1f110973f96e077645fb01f67b646fc253cc
|
[
"Apache-2.0"
] | 10
|
2020-10-13T08:37:46.000Z
|
2022-02-09T00:21:25.000Z
|
# -*- encoding: utf-8 -*-
#
# Copyright © 2013 Red Hat, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# Copyright (c) 2013-2016 Wind River Systems, Inc.
#
from cgtsclient.common import base
from cgtsclient import exc
CREATION_ATTRIBUTES = ['servicename', 'state']
class SmNodes(base.Resource):
def __repr__(self):
return "<SmNodes %s>" % self._info
class SmNodesManager(base.Manager):
resource_class = SmNodes
@staticmethod
def _path(id=None):
return '/v1/servicenodes/%s' % id if id else '/v1/servicenodes'
def list(self):
return self._list(self._path(), "nodes")
def get(self, nodes_id):
try:
return self._list(self._path(nodes_id))[0]
except IndexError:
return None
def create(self, **kwargs):
new = {}
for (key, value) in kwargs.items():
if key in CREATION_ATTRIBUTES:
new[key] = value
else:
raise exc.InvalidAttribute()
return self._create(self._path(), new)
def delete(self, nodes_id):
return self._delete(self._path(nodes_id))
def update(self, nodes_id, patch):
return self._update(self._path(nodes_id), patch)
| 27.968254
| 78
| 0.646425
|
from cgtsclient.common import base
from cgtsclient import exc
CREATION_ATTRIBUTES = ['servicename', 'state']
class SmNodes(base.Resource):
def __repr__(self):
return "<SmNodes %s>" % self._info
class SmNodesManager(base.Manager):
resource_class = SmNodes
@staticmethod
def _path(id=None):
return '/v1/servicenodes/%s' % id if id else '/v1/servicenodes'
def list(self):
return self._list(self._path(), "nodes")
def get(self, nodes_id):
try:
return self._list(self._path(nodes_id))[0]
except IndexError:
return None
def create(self, **kwargs):
new = {}
for (key, value) in kwargs.items():
if key in CREATION_ATTRIBUTES:
new[key] = value
else:
raise exc.InvalidAttribute()
return self._create(self._path(), new)
def delete(self, nodes_id):
return self._delete(self._path(nodes_id))
def update(self, nodes_id, patch):
return self._update(self._path(nodes_id), patch)
| true
| true
|
7904962d692fbfb38c6be6c21aabf62d49aa32de
| 3,593
|
py
|
Python
|
example_programs/PadmalaPessoa2011.py
|
ahsanbutt95/sweetpea-py
|
d2e2074ef4b20b5f46d8049ca4bb0bf46c3fc705
|
[
"MIT"
] | null | null | null |
example_programs/PadmalaPessoa2011.py
|
ahsanbutt95/sweetpea-py
|
d2e2074ef4b20b5f46d8049ca4bb0bf46c3fc705
|
[
"MIT"
] | null | null | null |
example_programs/PadmalaPessoa2011.py
|
ahsanbutt95/sweetpea-py
|
d2e2074ef4b20b5f46d8049ca4bb0bf46c3fc705
|
[
"MIT"
] | null | null | null |
# Make SweetPea visible regardless of whether it's been installed.
import sys
sys.path.append("..")
from sweetpea.primitives import Factor, DerivedLevel, WithinTrial, Transition
from sweetpea.constraints import no_more_than_k_in_a_row
from sweetpea import fully_cross_block, synthesize_trials_non_uniform, print_experiments
"""
Padmala & Pessoa (2011) design
***********************
factors (levels):
- reward (rewarded, non-rewarded)
- response (left, right)
- response Transition (repetition, switch). Factor dependent on response:
- congruency (congruent, incongruent, neutral)
- congruency Transition (congruent-congruent, congruent-incongruent, congruent-neutral, incongruent-congruent, incongruent-incongruent, incongruent-neutral, neutral-congruent, neutral-incongruent, neutral-neutral)
design:
- counterbalancing reward x response x response_transition x congruency_transition
"""
# DEFINE REWARD, RESPONSE and CONGRUENCY FACTORS
reward = Factor("reward", ["rewarded", "non-rewarded"])
response = Factor("response", ["building", "house"])
congruency = Factor("congruency", ["congruent", "incongruent", "neutral"])
# DEFINE CONGRUENCY TRANSITION FACTOR
def con_con(congruency):
return congruency[0] == "congruent" and congruency[1] == "congruent"
def con_inc(congruency):
return congruency[0] == "congruent" and congruency[1] == "incongruent"
def con_ntr(congruency):
return congruency[0] == "congruent" and congruency[1] == "neutral"
def inc_con(congruency):
return congruency[0] == "incongruent" and congruency[1] == "congruent"
def inc_inc(congruency):
return congruency[0] == "incongruent" and congruency[1] == "incongruent"
def inc_ntr(congruency):
return congruency[0] == "incongruent" and congruency[1] == "neutral"
def ntr_con(congruency):
return congruency[0] == "neutral" and congruency[1] == "congruent"
def ntr_inc(congruency):
return congruency[0] == "neutral" and congruency[1] == "incongruent"
def ntr_ntr(congruency):
return congruency[0] == "neutral" and congruency[1] == "neutral"
congruency_transition = Factor("congruency_transition", [
DerivedLevel("congruent-congruent", Transition(con_con, [congruency])),
DerivedLevel("congruent-incongruent", Transition(con_inc, [congruency])),
DerivedLevel("congruent-neutral", Transition(con_ntr, [congruency])),
DerivedLevel("incongruent-congruent", Transition(inc_con, [congruency])),
DerivedLevel("incongruent-incongruent", Transition(inc_inc, [congruency])),
DerivedLevel("incongruent-neutral", Transition(inc_ntr, [congruency])),
DerivedLevel("neutral-congruent", Transition(ntr_con, [congruency])),
DerivedLevel("neutral-incongruent", Transition(ntr_inc, [congruency])),
DerivedLevel("neutral-neutral", Transition(ntr_ntr, [congruency]))
])
# DEFINE RESPONSE TRANSITION FACTOR
def response_repeat(responses):
return responses[0] == responses[1]
def response_switch(responses):
return not response_repeat(responses)
response_transition = Factor("resp_transition", [
DerivedLevel("repeat", Transition(response_repeat, [response])),
DerivedLevel("switch", Transition(response_switch, [response]))
])
# DEFINE SEQUENCE CONSTRAINTS
constraints = []
# DEFINE EXPERIMENT
design = [congruency, reward, response, congruency_transition, response_transition]
crossing = [reward, response, congruency_transition, response_transition]
block = fully_cross_block(design, crossing, constraints)
# SOLVE
experiments = synthesize_trials_non_uniform(block, 5)
print_experiments(block, experiments)
| 38.634409
| 213
| 0.748678
|
import sys
sys.path.append("..")
from sweetpea.primitives import Factor, DerivedLevel, WithinTrial, Transition
from sweetpea.constraints import no_more_than_k_in_a_row
from sweetpea import fully_cross_block, synthesize_trials_non_uniform, print_experiments
# DEFINE REWARD, RESPONSE and CONGRUENCY FACTORS
reward = Factor("reward", ["rewarded", "non-rewarded"])
response = Factor("response", ["building", "house"])
congruency = Factor("congruency", ["congruent", "incongruent", "neutral"])
# DEFINE CONGRUENCY TRANSITION FACTOR
def con_con(congruency):
return congruency[0] == "congruent" and congruency[1] == "congruent"
def con_inc(congruency):
return congruency[0] == "congruent" and congruency[1] == "incongruent"
def con_ntr(congruency):
return congruency[0] == "congruent" and congruency[1] == "neutral"
def inc_con(congruency):
return congruency[0] == "incongruent" and congruency[1] == "congruent"
def inc_inc(congruency):
return congruency[0] == "incongruent" and congruency[1] == "incongruent"
def inc_ntr(congruency):
return congruency[0] == "incongruent" and congruency[1] == "neutral"
def ntr_con(congruency):
return congruency[0] == "neutral" and congruency[1] == "congruent"
def ntr_inc(congruency):
return congruency[0] == "neutral" and congruency[1] == "incongruent"
def ntr_ntr(congruency):
return congruency[0] == "neutral" and congruency[1] == "neutral"
congruency_transition = Factor("congruency_transition", [
DerivedLevel("congruent-congruent", Transition(con_con, [congruency])),
DerivedLevel("congruent-incongruent", Transition(con_inc, [congruency])),
DerivedLevel("congruent-neutral", Transition(con_ntr, [congruency])),
DerivedLevel("incongruent-congruent", Transition(inc_con, [congruency])),
DerivedLevel("incongruent-incongruent", Transition(inc_inc, [congruency])),
DerivedLevel("incongruent-neutral", Transition(inc_ntr, [congruency])),
DerivedLevel("neutral-congruent", Transition(ntr_con, [congruency])),
DerivedLevel("neutral-incongruent", Transition(ntr_inc, [congruency])),
DerivedLevel("neutral-neutral", Transition(ntr_ntr, [congruency]))
])
# DEFINE RESPONSE TRANSITION FACTOR
def response_repeat(responses):
return responses[0] == responses[1]
def response_switch(responses):
return not response_repeat(responses)
response_transition = Factor("resp_transition", [
DerivedLevel("repeat", Transition(response_repeat, [response])),
DerivedLevel("switch", Transition(response_switch, [response]))
])
# DEFINE SEQUENCE CONSTRAINTS
constraints = []
# DEFINE EXPERIMENT
design = [congruency, reward, response, congruency_transition, response_transition]
crossing = [reward, response, congruency_transition, response_transition]
block = fully_cross_block(design, crossing, constraints)
# SOLVE
experiments = synthesize_trials_non_uniform(block, 5)
print_experiments(block, experiments)
| true
| true
|
7904963f7babdc6d2af45ebb3647c446e74c1004
| 2,974
|
py
|
Python
|
external/rocksdb/buckifier/targets_builder.py
|
cashbitecrypto/cashbite
|
991200dc37234caa74c603cb8aee094cbd7ce429
|
[
"BSD-3-Clause"
] | 858
|
2017-12-10T12:21:19.000Z
|
2022-03-28T17:36:42.000Z
|
external/rocksdb/buckifier/targets_builder.py
|
cashbitecrypto/cashbite
|
991200dc37234caa74c603cb8aee094cbd7ce429
|
[
"BSD-3-Clause"
] | 663
|
2017-12-11T22:45:00.000Z
|
2021-06-17T16:02:50.000Z
|
external/rocksdb/buckifier/targets_builder.py
|
cashbitecrypto/cashbite
|
991200dc37234caa74c603cb8aee094cbd7ce429
|
[
"BSD-3-Clause"
] | 1,731
|
2017-12-09T15:09:43.000Z
|
2022-03-30T18:23:38.000Z
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
try:
from builtins import object
from builtins import str
except ImportError:
from __builtin__ import object
from __builtin__ import str
import targets_cfg
def pretty_list(lst, indent=8):
if lst is None or len(lst) == 0:
return ""
if len(lst) == 1:
return "\"%s\"" % lst[0]
separator = "\",\n%s\"" % (" " * indent)
res = separator.join(sorted(lst))
res = "\n" + (" " * indent) + "\"" + res + "\",\n" + (" " * (indent - 4))
return res
class TARGETSBuilder(object):
def __init__(self, path):
self.path = path
self.targets_file = open(path, 'w')
self.targets_file.write(targets_cfg.rocksdb_target_header)
self.total_lib = 0
self.total_bin = 0
self.total_test = 0
self.tests_cfg = ""
def __del__(self):
self.targets_file.close()
def add_library(self, name, srcs, deps=None, headers=None):
headers_attr_prefix = ""
if headers is None:
headers_attr_prefix = "auto_"
headers = "AutoHeaders.RECURSIVE_GLOB"
self.targets_file.write(targets_cfg.library_template.format(
name=name,
srcs=pretty_list(srcs),
headers_attr_prefix=headers_attr_prefix,
headers=headers,
deps=pretty_list(deps)))
self.total_lib = self.total_lib + 1
def add_rocksdb_library(self, name, srcs, headers=None):
headers_attr_prefix = ""
if headers is None:
headers_attr_prefix = "auto_"
headers = "AutoHeaders.RECURSIVE_GLOB"
self.targets_file.write(targets_cfg.rocksdb_library_template.format(
name=name,
srcs=pretty_list(srcs),
headers_attr_prefix=headers_attr_prefix,
headers=headers))
self.total_lib = self.total_lib + 1
def add_binary(self, name, srcs, deps=None):
self.targets_file.write(targets_cfg.binary_template % (
name,
pretty_list(srcs),
pretty_list(deps)))
self.total_bin = self.total_bin + 1
def register_test(self,
test_name,
src,
is_parallel,
extra_deps,
extra_compiler_flags):
exec_mode = "serial"
if is_parallel:
exec_mode = "parallel"
self.tests_cfg += targets_cfg.test_cfg_template % (
test_name,
str(src),
str(exec_mode),
extra_deps,
extra_compiler_flags)
self.total_test = self.total_test + 1
def flush_tests(self):
self.targets_file.write(targets_cfg.unittests_template % self.tests_cfg)
self.tests_cfg = ""
| 31.978495
| 80
| 0.599866
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
try:
from builtins import object
from builtins import str
except ImportError:
from __builtin__ import object
from __builtin__ import str
import targets_cfg
def pretty_list(lst, indent=8):
if lst is None or len(lst) == 0:
return ""
if len(lst) == 1:
return "\"%s\"" % lst[0]
separator = "\",\n%s\"" % (" " * indent)
res = separator.join(sorted(lst))
res = "\n" + (" " * indent) + "\"" + res + "\",\n" + (" " * (indent - 4))
return res
class TARGETSBuilder(object):
def __init__(self, path):
self.path = path
self.targets_file = open(path, 'w')
self.targets_file.write(targets_cfg.rocksdb_target_header)
self.total_lib = 0
self.total_bin = 0
self.total_test = 0
self.tests_cfg = ""
def __del__(self):
self.targets_file.close()
def add_library(self, name, srcs, deps=None, headers=None):
headers_attr_prefix = ""
if headers is None:
headers_attr_prefix = "auto_"
headers = "AutoHeaders.RECURSIVE_GLOB"
self.targets_file.write(targets_cfg.library_template.format(
name=name,
srcs=pretty_list(srcs),
headers_attr_prefix=headers_attr_prefix,
headers=headers,
deps=pretty_list(deps)))
self.total_lib = self.total_lib + 1
def add_rocksdb_library(self, name, srcs, headers=None):
headers_attr_prefix = ""
if headers is None:
headers_attr_prefix = "auto_"
headers = "AutoHeaders.RECURSIVE_GLOB"
self.targets_file.write(targets_cfg.rocksdb_library_template.format(
name=name,
srcs=pretty_list(srcs),
headers_attr_prefix=headers_attr_prefix,
headers=headers))
self.total_lib = self.total_lib + 1
def add_binary(self, name, srcs, deps=None):
self.targets_file.write(targets_cfg.binary_template % (
name,
pretty_list(srcs),
pretty_list(deps)))
self.total_bin = self.total_bin + 1
def register_test(self,
test_name,
src,
is_parallel,
extra_deps,
extra_compiler_flags):
exec_mode = "serial"
if is_parallel:
exec_mode = "parallel"
self.tests_cfg += targets_cfg.test_cfg_template % (
test_name,
str(src),
str(exec_mode),
extra_deps,
extra_compiler_flags)
self.total_test = self.total_test + 1
def flush_tests(self):
self.targets_file.write(targets_cfg.unittests_template % self.tests_cfg)
self.tests_cfg = ""
| true
| true
|
79049699a1eef13e74b134490f3bc3f1fe152ec0
| 16,882
|
py
|
Python
|
testscripts/RDKB/component/WAN_MANAGER/TS_WANMANAGER_DSLWANoE_FixedMode_ActLink_P_1-1_WAN_Pri-Sec.py
|
rdkcmf/rdkb-tools-tdkb
|
9f9c3600cd701d5fc90ac86a6394ebd28d49267e
|
[
"Apache-2.0"
] | null | null | null |
testscripts/RDKB/component/WAN_MANAGER/TS_WANMANAGER_DSLWANoE_FixedMode_ActLink_P_1-1_WAN_Pri-Sec.py
|
rdkcmf/rdkb-tools-tdkb
|
9f9c3600cd701d5fc90ac86a6394ebd28d49267e
|
[
"Apache-2.0"
] | null | null | null |
testscripts/RDKB/component/WAN_MANAGER/TS_WANMANAGER_DSLWANoE_FixedMode_ActLink_P_1-1_WAN_Pri-Sec.py
|
rdkcmf/rdkb-tools-tdkb
|
9f9c3600cd701d5fc90ac86a6394ebd28d49267e
|
[
"Apache-2.0"
] | null | null | null |
##########################################################################
# If not stated otherwise in this file or this component's Licenses.txt
# file the following copyright and licenses apply:
#
# Copyright 2021 RDK Management
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##########################################################################
'''
<?xml version="1.0" encoding="UTF-8"?><xml>
<id/>
<version>1</version>
<name>TS_WANMANAGER_DSLWANoE_FixedMode_ActLink_P_1-1_WAN_Pri-Sec</name>
<primitive_test_id/>
<primitive_test_name>wanmanager_DoNothing</primitive_test_name>
<primitive_test_version>1</primitive_test_version>
<status>FREE</status>
<synopsis>To check if DSL line is active with FIXED_MODE policy ,WAN Type and priorities being (1,1) (Primary,Secondary) for DSL and WANOE respectively</synopsis>
<groups_id/>
<execution_time>40</execution_time>
<long_duration>false</long_duration>
<advanced_script>false</advanced_script>
<remarks/>
<skip>false</skip>
<box_types>
<box_type>Broadband</box_type>
</box_types>
<rdk_versions>
<rdk_version>RDKB</rdk_version>
</rdk_versions>
<test_cases>
<test_case_id>TC_WANMANAGER_55</test_case_id>
<test_objective>This test case is to check if DSL line is active with FIXED_MODE policy ,WAN Type and priorities being (1,1) (Primary,Secondary) for DSL and WANOE respectively </test_objective>
<test_type>Positive</test_type>
<test_setup>Broadband</test_setup>
<pre_requisite>1.Ccsp Components should be in a running state else invoke cosa_start.sh manually that includes all the ccsp components and TDK Component
2.TDK Agent should be in running state or invoke it through StartTdk.sh script
3.WAN Manager should be enabled
4.Both DSL WAN and WANOE WAN connections should be available</pre_requisite>
<api_or_interface_used>None</api_or_interface_used>
<input_parameters>Device.X_RDK_WanManager.Policy
Device.X_RDK_WanManager.CPEInterface.1.Wan.Type
Device.X_RDK_WanManager.CPEInterface.2.Wan.Type
Device.X_RDK_WanManager.CPEInterface.1.Wan.Priority
Device.X_RDK_WanManager.CPEInterface.2.Wan.Priority
Device.X_RDK_WanManager.CPEInterface.1.Wan.ActiveLink
Device.X_RDK_WanManager.CPEInterface.2.Wan.ActiveLink </input_parameters>
<automation_approch>1.Load the Module
2.Get the current WAN Priority and WAN Types for DSL and WANOE interfaces
3.Make the priority and WAN Type unequal for further set operations to be success
4.Get the current WAN policy , set the policy to FIXED_MODE if not in the same policy
5.Set the Wan Type and priorities as(1,1) (Primary, Secondary) for DSL and WANOE respectively
6.Get the active link status for DSL and WANOE
7.With the current configurations DSL Line is expected to be active
8.Revert the set values
9.Unload the module</automation_approch>
<expected_output>With Fixed Mode policy Wan Type and priorities being (1,1) (Primary, Secondary) for DSL and WANOE respectively - DSL Line is expected to be active </expected_output>
<priority>High</priority>
<test_stub_interface>WAN_MANAGER</test_stub_interface>
<test_script>TS_WANMANAGER_DSLWANoE_FixedMode_ActLink_P_1-1_WAN_Pri-Sec</test_script>
<skipped>No</skipped>
<release_version>M90</release_version>
<remarks>None</remarks>
</test_cases>
</xml>
'''
# tdklib library,which provides a wrapper for tdk testcase script
import tdklib;
from tdkbVariables import *;
from time import sleep;
from WanManager_Utility import *;
obj = tdklib.TDKScriptingLibrary("tdkbtr181","RDKB");
obj1 = tdklib.TDKScriptingLibrary("sysutil","1");
#IP and Port of box, No need to change,
#This will be replaced with correspoing Box Ip and port while executing script
ip = <ipaddress>
port = <port>
obj.configureTestCase(ip,port,'TS_WANMANAGER_DSLWANoE_FixedMode_ActLink_P_1-1_WAN_Pri-Sec');
obj1.configureTestCase(ip,port,'TS_WANMANAGER_DSLWANoE_FixedMode_ActLink_P_1-1_WAN_Pri-Sec');
#Get the result of connection with test component and DUT
loadmodulestatus =obj.getLoadModuleResult();
loadmodulestatus1 =obj1.getLoadModuleResult();
print "[LIB LOAD STATUS] : %s" %loadmodulestatus;
print "[LIB LOAD STATUS] : %s" %loadmodulestatus1;
if "SUCCESS" in (loadmodulestatus.upper() and loadmodulestatus1.upper()):
#Set the result status of execution
obj.setLoadModuleStatus("SUCCESS");
obj1.setLoadModuleStatus("SUCCESS");
revertwantype =0;
revertpriority =0;
expectedresult="SUCCESS";
tdkTestObj = obj.createTestStep('TDKB_TR181Stub_Get');
defaultTypePriority,actualresult = GetCurrentWanTypeAndPriority(tdkTestObj);
if expectedresult in actualresult:
#Set the result status of execution
tdkTestObj.setResultStatus("SUCCESS");
print "TEST STEP 1: Get the current WAN Type,Priority values for DSL and WANOE";
print "EXPECTED RESULT 1: Should get the current WAN Type,Priority values for DSL and WANOE"
print "ACTUAL RESULT 1 :The current WAN Type,Priority for DSL and WANOE are %s:"%defaultTypePriority;
#Get the result of execution
print "[TEST EXECUTION RESULT] : SUCCESS";
step = 2;
status, policy_initial = get_policy(tdkTestObj, step);
if status == 0:
tdkTestObj_Get = obj.createTestStep('TDKB_TR181Stub_Get');
tdkTestObj_Set = obj.createTestStep('TDKB_TR181Stub_Set');
print "***Checking if WAN types are equal and making them Unequal***";
revertwantype,default,actualresult = MakeWANTypeUnEqual(tdkTestObj_Get,tdkTestObj_Set);
if expectedresult in actualresult:
tdkTestObj.setResultStatus("SUCCESS");
print "***Checking if WAN priorities are equal and making them Unequal***";
revertpriority,default,actualresult = MakePriorityUnEqual(tdkTestObj_Get,tdkTestObj_Set);
if expectedresult in actualresult:
tdkTestObj.setResultStatus("SUCCESS");
#Set the Wan Manager Policy to FIXED_MODE
new_policy = "FIXED_MODE"
expectedresult="SUCCESS";
policyStatus =1;
revert = 0
if new_policy != policy_initial:
print "Setting the wanmanager policy to :%s"%new_policy
set_policy(new_policy, policy_initial, obj1, revert);
#Get the WANMANAGER POLICY and cross check with the Set value
step = step + 1;
status, policy = get_policy(tdkTestObj, step);
if status == 0:
revert = 1;
if policy == new_policy:
tdkTestObj.setResultStatus("SUCCESS");
print "The wanmanager policy is set successfully";
tdkTestObj = obj1.createTestStep('ExecuteCmd');
obj1.initiateReboot();
sleep(300);
else:
policyStatus =0;
tdkTestObj.setResultStatus("FAILURE");
print "The wanmanager policy is not set successfully";
else:
policyStatus =0;
tdkTestObj.setResultStatus("FAILURE");
print "Failed to get wanmanager policy after set ";
if policyStatus == 1:
print "The current WAN Manager Policy is %s" %new_policy;
wanDSL = "Primary";
wanWANOE = "Secondary";
priDSL = "1";
priWANOE ="1";
actualresult = SetWANTypethenPriority(tdkTestObj_Set,wanDSL,wanWANOE,priDSL,priWANOE);
revertwantype =1;
revertpriority =1;
if expectedresult in actualresult:
tdkTestObj.setResultStatus("SUCCESS");
print "TEST STEP 3: Set the (WANtype,Priority)for DSL(%s,%s) and WANOE(%s,%s)" %(wanDSL,priDSL,wanWANOE,priWANOE);
print "EXPECTED RESULT 3:Set operation is expected to be successful";
print "ACTUAL RESULT 3:set operations are successful";
#Get the result of execution
print "[TEST EXECUTION RESULT] : SUCCESS";
tdkTestObj = obj.createTestStep('TDKB_TR181Stub_Get');
tdkTestObj.addParameter("ParamName","Device.X_RDK_WanManager.CPEInterface.1.Wan.ActiveLink");
#Execute the test case in DUT
tdkTestObj.executeTestCase(expectedresult);
actualresult1 = tdkTestObj.getResult();
activeDSL = tdkTestObj.getResultDetails().strip().replace("\\n", "");
tdkTestObj = obj.createTestStep('TDKB_TR181Stub_Get');
tdkTestObj.addParameter("ParamName","Device.X_RDK_WanManager.CPEInterface.2.Wan.ActiveLink");
#Execute the test case in DUT
tdkTestObj.executeTestCase(expectedresult);
actualresult2 = tdkTestObj.getResult();
activeWANOE = tdkTestObj.getResultDetails().strip().replace("\\n", "");
if expectedresult in (actualresult1 and actualresult2):
#Set the result status of execution
tdkTestObj.setResultStatus("SUCCESS");
print "TEST STEP 4: Get the Active link status of DSL and WANOE";
print "EXPECTED RESULT 4: Active link status of DSL and WANOE should be fetched successfully";
print "ACTUAL RESULT 4: Get operation succeeded";
#Get the result of execution
print "[TEST EXECUTION RESULT] : SUCCESS";
if activeDSL == "true" and activeWANOE == "false":
#Set the result status of execution
tdkTestObj.setResultStatus("SUCCESS");
print "TEST STEP 5: Get the Active link status of DSL and WANOE";
print "EXPECTED RESULT 5: Active link status of DSL is expected to be true and WANOE as false";
print "ACTUAL RESULT 5: DSL status :%s, WANOE status : %s" %(activeDSL,activeWANOE);
#Get the result of execution
print "[TEST EXECUTION RESULT] : SUCCESS";
else:
#Set the result status of execution
tdkTestObj.setResultStatus("FAILURE");
print "TEST STEP 5: Get the Active link status of DSL and WANOE";
print "EXPECTED RESULT 5:Active link status of DSL is expected to be true and WANOE as false";
print "ACTUAL RESULT 5: DSL status :%s, WANOE status : %s" %(activeDSL,activeWANOE);
#Get the result of execution
print "[TEST EXECUTION RESULT] : FAILURE";
else:
#Set the result status of execution
tdkTestObj.setResultStatus("FAILURE");
print "TEST STEP 4: Get the Active link status of DSL and WANOE";
print "EXPECTED RESULT 4: Active link status of DSL is expected to be true and WANOE as false";
print "ACTUAL RESULT 4: Get operation failed ";
#Get the result of execution
print "[TEST EXECUTION RESULT] : FAILURE";
if revert == 1:
set_policy(new_policy, policy_initial, obj1, revert);
else:
tdkTestObj.setResultStatus("FAILURE");
print "TEST STEP 3: Set the (WANtype,Priority)for DSL(%s,%s) and WANOE(%s,%s)"%(wanDSL,priDSL,wanWANOE,priWANOE);
print "EXPECTED RESULT 3:Set operation is expected to be successful";
print "ACTUAL RESULT 3 :set operations failed";
#Get the result of execution
print "[TEST EXECUTION RESULT] : FAILURE";
else:
print "set operation of WAN Policy failed";
else:
tdkTestObj.setResultStatus("FAILURE");
print "Unable to make WAN priorities Un-equal"
else:
tdkTestObj.setResultStatus("FAILURE");
print "Unable to make WAN Types Un-equal"
else:
tdkTestObj.setResultStatus("FAILURE");
print "The current policy is not the expected policy";
else:
#Set the result status of execution
tdkTestObj.setResultStatus("FAILURE");
print "TEST STEP 1: Get the default WAN Type,Priority values for DSL and WANOE";
print "EXPECTED RESULT 1: Should get the default WAN Type,Priority values for DSL and WANOE"
print "ACTUAL RESULT 1 :The default WAN Type,Priority for DSL and WANOE are %s:"%defaultTypePriority;
#Get the result of execution
print "[TEST EXECUTION RESULT] : FAILURE";
#Revert operations
revertflag =1;
if revertpriority ==1:
print "Reverting priority to defaults";
paramList = ["Device.X_RDK_WanManager.CPEInterface.1.Wan.Priority","Device.X_RDK_WanManager.CPEInterface.2.Wan.Priority"];
tdkTestObj = obj.createTestStep('TDKB_TR181Stub_Set');
index = 2;
for item in paramList:
tdkTestObj.addParameter("ParamName",item);
tdkTestObj.addParameter("ParamValue",defaultTypePriority[index]);
tdkTestObj.addParameter("Type","int");
expectedresult= "SUCCESS";
#Execute testcase on DUT
tdkTestObj.executeTestCase(expectedresult);
result = tdkTestObj.getResult();
Setresult = tdkTestObj.getResultDetails();
index =index +1;
if expectedresult in result:
tdkTestObj.setResultStatus("SUCCESS");
else:
revertflag =0;
print "Revert operation failed for WAN priority";
tdkTestObj.setResultStatus("FAILURE");
break;
if revertwantype == 1:
print "Reverting WAN Type to defaults";
paramList = ["Device.X_RDK_WanManager.CPEInterface.1.Wan.Type","Device.X_RDK_WanManager.CPEInterface.2.Wan.Type"];
tdkTestObj = obj.createTestStep('TDKB_TR181Stub_Set');
index = 0;
for item in paramList:
tdkTestObj.addParameter("ParamName",item);
tdkTestObj.addParameter("ParamValue",defaultTypePriority[index]);
tdkTestObj.addParameter("Type","string");
expectedresult= "SUCCESS";
#Execute testcase on DUT
tdkTestObj.executeTestCase(expectedresult);
result = tdkTestObj.getResult();
Setresult = tdkTestObj.getResultDetails();
index =index +1;
if expectedresult in result:
tdkTestObj.setResultStatus("SUCCESS");
else:
revertflag =0;
print "Revert operation failed for WAN Type";
tdkTestObj.setResultStatus("FAILURE");
break;
#printing the final revert status
if revertflag == 1:
print "Revert operation successful for WAN Type and WAN priority";
else:
print "Revert operation failed for either WAN Type or WAN priority";
obj.unloadModule("tdkbtr181");
obj1.unloadModule("sysutil");
else:
print "Failed to load module";
obj.setLoadModuleStatus("FAILURE");
obj1.setLoadModuleStatus("FAILURE");
| 53.936102
| 199
| 0.603838
|
Types for DSL and WANOE interfaces
3.Make the priority and WAN Type unequal for further set operations to be success
4.Get the current WAN policy , set the policy to FIXED_MODE if not in the same policy
5.Set the Wan Type and priorities as(1,1) (Primary, Secondary) for DSL and WANOE respectively
6.Get the active link status for DSL and WANOE
7.With the current configurations DSL Line is expected to be active
8.Revert the set values
9.Unload the module</automation_approch>
<expected_output>With Fixed Mode policy Wan Type and priorities being (1,1) (Primary, Secondary) for DSL and WANOE respectively - DSL Line is expected to be active </expected_output>
<priority>High</priority>
<test_stub_interface>WAN_MANAGER</test_stub_interface>
<test_script>TS_WANMANAGER_DSLWANoE_FixedMode_ActLink_P_1-1_WAN_Pri-Sec</test_script>
<skipped>No</skipped>
<release_version>M90</release_version>
<remarks>None</remarks>
</test_cases>
</xml>
'''
# tdklib library,which provides a wrapper for tdk testcase script
import tdklib;
from tdkbVariables import *;
from time import sleep;
from WanManager_Utility import *;
obj = tdklib.TDKScriptingLibrary("tdkbtr181","RDKB");
obj1 = tdklib.TDKScriptingLibrary("sysutil","1");
#IP and Port of box, No need to change,
#This will be replaced with correspoing Box Ip and port while executing script
ip = <ipaddress>
port = <port>
obj.configureTestCase(ip,port,'TS_WANMANAGER_DSLWANoE_FixedMode_ActLink_P_1-1_WAN_Pri-Sec');
obj1.configureTestCase(ip,port,'TS_WANMANAGER_DSLWANoE_FixedMode_ActLink_P_1-1_WAN_Pri-Sec');
#Get the result of connection with test component and DUT
loadmodulestatus =obj.getLoadModuleResult();
loadmodulestatus1 =obj1.getLoadModuleResult();
print "[LIB LOAD STATUS] : %s" %loadmodulestatus;
print "[LIB LOAD STATUS] : %s" %loadmodulestatus1;
if "SUCCESS" in (loadmodulestatus.upper() and loadmodulestatus1.upper()):
#Set the result status of execution
obj.setLoadModuleStatus("SUCCESS");
obj1.setLoadModuleStatus("SUCCESS");
revertwantype =0;
revertpriority =0;
expectedresult="SUCCESS";
tdkTestObj = obj.createTestStep('TDKB_TR181Stub_Get');
defaultTypePriority,actualresult = GetCurrentWanTypeAndPriority(tdkTestObj);
if expectedresult in actualresult:
#Set the result status of execution
tdkTestObj.setResultStatus("SUCCESS");
print "TEST STEP 1: Get the current WAN Type,Priority values for DSL and WANOE";
print "EXPECTED RESULT 1: Should get the current WAN Type,Priority values for DSL and WANOE"
print "ACTUAL RESULT 1 :The current WAN Type,Priority for DSL and WANOE are %s:"%defaultTypePriority;
#Get the result of execution
print "[TEST EXECUTION RESULT] : SUCCESS";
step = 2;
status, policy_initial = get_policy(tdkTestObj, step);
if status == 0:
tdkTestObj_Get = obj.createTestStep('TDKB_TR181Stub_Get');
tdkTestObj_Set = obj.createTestStep('TDKB_TR181Stub_Set');
print "***Checking if WAN types are equal and making them Unequal***";
revertwantype,default,actualresult = MakeWANTypeUnEqual(tdkTestObj_Get,tdkTestObj_Set);
if expectedresult in actualresult:
tdkTestObj.setResultStatus("SUCCESS");
print "***Checking if WAN priorities are equal and making them Unequal***";
revertpriority,default,actualresult = MakePriorityUnEqual(tdkTestObj_Get,tdkTestObj_Set);
if expectedresult in actualresult:
tdkTestObj.setResultStatus("SUCCESS");
#Set the Wan Manager Policy to FIXED_MODE
new_policy = "FIXED_MODE"
expectedresult="SUCCESS";
policyStatus =1;
revert = 0
if new_policy != policy_initial:
print "Setting the wanmanager policy to :%s"%new_policy
set_policy(new_policy, policy_initial, obj1, revert);
#Get the WANMANAGER POLICY and cross check with the Set value
step = step + 1;
status, policy = get_policy(tdkTestObj, step);
if status == 0:
revert = 1;
if policy == new_policy:
tdkTestObj.setResultStatus("SUCCESS");
print "The wanmanager policy is set successfully";
tdkTestObj = obj1.createTestStep('ExecuteCmd');
obj1.initiateReboot();
sleep(300);
else:
policyStatus =0;
tdkTestObj.setResultStatus("FAILURE");
print "The wanmanager policy is not set successfully";
else:
policyStatus =0;
tdkTestObj.setResultStatus("FAILURE");
print "Failed to get wanmanager policy after set ";
if policyStatus == 1:
print "The current WAN Manager Policy is %s" %new_policy;
wanDSL = "Primary";
wanWANOE = "Secondary";
priDSL = "1";
priWANOE ="1";
actualresult = SetWANTypethenPriority(tdkTestObj_Set,wanDSL,wanWANOE,priDSL,priWANOE);
revertwantype =1;
revertpriority =1;
if expectedresult in actualresult:
tdkTestObj.setResultStatus("SUCCESS");
print "TEST STEP 3: Set the (WANtype,Priority)for DSL(%s,%s) and WANOE(%s,%s)" %(wanDSL,priDSL,wanWANOE,priWANOE);
print "EXPECTED RESULT 3:Set operation is expected to be successful";
print "ACTUAL RESULT 3:set operations are successful";
#Get the result of execution
print "[TEST EXECUTION RESULT] : SUCCESS";
tdkTestObj = obj.createTestStep('TDKB_TR181Stub_Get');
tdkTestObj.addParameter("ParamName","Device.X_RDK_WanManager.CPEInterface.1.Wan.ActiveLink");
#Execute the test case in DUT
tdkTestObj.executeTestCase(expectedresult);
actualresult1 = tdkTestObj.getResult();
activeDSL = tdkTestObj.getResultDetails().strip().replace("\\n", "");
tdkTestObj = obj.createTestStep('TDKB_TR181Stub_Get');
tdkTestObj.addParameter("ParamName","Device.X_RDK_WanManager.CPEInterface.2.Wan.ActiveLink");
#Execute the test case in DUT
tdkTestObj.executeTestCase(expectedresult);
actualresult2 = tdkTestObj.getResult();
activeWANOE = tdkTestObj.getResultDetails().strip().replace("\\n", "");
if expectedresult in (actualresult1 and actualresult2):
#Set the result status of execution
tdkTestObj.setResultStatus("SUCCESS");
print "TEST STEP 4: Get the Active link status of DSL and WANOE";
print "EXPECTED RESULT 4: Active link status of DSL and WANOE should be fetched successfully";
print "ACTUAL RESULT 4: Get operation succeeded";
#Get the result of execution
print "[TEST EXECUTION RESULT] : SUCCESS";
if activeDSL == "true" and activeWANOE == "false":
#Set the result status of execution
tdkTestObj.setResultStatus("SUCCESS");
print "TEST STEP 5: Get the Active link status of DSL and WANOE";
print "EXPECTED RESULT 5: Active link status of DSL is expected to be true and WANOE as false";
print "ACTUAL RESULT 5: DSL status :%s, WANOE status : %s" %(activeDSL,activeWANOE);
#Get the result of execution
print "[TEST EXECUTION RESULT] : SUCCESS";
else:
#Set the result status of execution
tdkTestObj.setResultStatus("FAILURE");
print "TEST STEP 5: Get the Active link status of DSL and WANOE";
print "EXPECTED RESULT 5:Active link status of DSL is expected to be true and WANOE as false";
print "ACTUAL RESULT 5: DSL status :%s, WANOE status : %s" %(activeDSL,activeWANOE);
#Get the result of execution
print "[TEST EXECUTION RESULT] : FAILURE";
else:
#Set the result status of execution
tdkTestObj.setResultStatus("FAILURE");
print "TEST STEP 4: Get the Active link status of DSL and WANOE";
print "EXPECTED RESULT 4: Active link status of DSL is expected to be true and WANOE as false";
print "ACTUAL RESULT 4: Get operation failed ";
#Get the result of execution
print "[TEST EXECUTION RESULT] : FAILURE";
if revert == 1:
set_policy(new_policy, policy_initial, obj1, revert);
else:
tdkTestObj.setResultStatus("FAILURE");
print "TEST STEP 3: Set the (WANtype,Priority)for DSL(%s,%s) and WANOE(%s,%s)"%(wanDSL,priDSL,wanWANOE,priWANOE);
print "EXPECTED RESULT 3:Set operation is expected to be successful";
print "ACTUAL RESULT 3 :set operations failed";
#Get the result of execution
print "[TEST EXECUTION RESULT] : FAILURE";
else:
print "set operation of WAN Policy failed";
else:
tdkTestObj.setResultStatus("FAILURE");
print "Unable to make WAN priorities Un-equal"
else:
tdkTestObj.setResultStatus("FAILURE");
print "Unable to make WAN Types Un-equal"
else:
tdkTestObj.setResultStatus("FAILURE");
print "The current policy is not the expected policy";
else:
#Set the result status of execution
tdkTestObj.setResultStatus("FAILURE");
print "TEST STEP 1: Get the default WAN Type,Priority values for DSL and WANOE";
print "EXPECTED RESULT 1: Should get the default WAN Type,Priority values for DSL and WANOE"
print "ACTUAL RESULT 1 :The default WAN Type,Priority for DSL and WANOE are %s:"%defaultTypePriority;
#Get the result of execution
print "[TEST EXECUTION RESULT] : FAILURE";
#Revert operations
revertflag =1;
if revertpriority ==1:
print "Reverting priority to defaults";
paramList = ["Device.X_RDK_WanManager.CPEInterface.1.Wan.Priority","Device.X_RDK_WanManager.CPEInterface.2.Wan.Priority"];
tdkTestObj = obj.createTestStep('TDKB_TR181Stub_Set');
index = 2;
for item in paramList:
tdkTestObj.addParameter("ParamName",item);
tdkTestObj.addParameter("ParamValue",defaultTypePriority[index]);
tdkTestObj.addParameter("Type","int");
expectedresult= "SUCCESS";
#Execute testcase on DUT
tdkTestObj.executeTestCase(expectedresult);
result = tdkTestObj.getResult();
Setresult = tdkTestObj.getResultDetails();
index =index +1;
if expectedresult in result:
tdkTestObj.setResultStatus("SUCCESS");
else:
revertflag =0;
print "Revert operation failed for WAN priority";
tdkTestObj.setResultStatus("FAILURE");
break;
if revertwantype == 1:
print "Reverting WAN Type to defaults";
paramList = ["Device.X_RDK_WanManager.CPEInterface.1.Wan.Type","Device.X_RDK_WanManager.CPEInterface.2.Wan.Type"];
tdkTestObj = obj.createTestStep('TDKB_TR181Stub_Set');
index = 0;
for item in paramList:
tdkTestObj.addParameter("ParamName",item);
tdkTestObj.addParameter("ParamValue",defaultTypePriority[index]);
tdkTestObj.addParameter("Type","string");
expectedresult= "SUCCESS";
#Execute testcase on DUT
tdkTestObj.executeTestCase(expectedresult);
result = tdkTestObj.getResult();
Setresult = tdkTestObj.getResultDetails();
index =index +1;
if expectedresult in result:
tdkTestObj.setResultStatus("SUCCESS");
else:
revertflag =0;
print "Revert operation failed for WAN Type";
tdkTestObj.setResultStatus("FAILURE");
break;
#printing the final revert status
if revertflag == 1:
print "Revert operation successful for WAN Type and WAN priority";
else:
print "Revert operation failed for either WAN Type or WAN priority";
obj.unloadModule("tdkbtr181");
obj1.unloadModule("sysutil");
else:
print "Failed to load module";
obj.setLoadModuleStatus("FAILURE");
obj1.setLoadModuleStatus("FAILURE");
| false
| true
|
790497b29462b28b7c826279aa596827763c1a39
| 977
|
py
|
Python
|
src/testing/drawCountriesReg.py
|
OpenGeoscience/vgl
|
904bc5648727806e9c212af18964153f4cab0d3c
|
[
"Apache-2.0"
] | 6
|
2015-05-03T05:23:11.000Z
|
2018-09-15T08:17:13.000Z
|
src/testing/drawCountriesReg.py
|
OpenGeoscience/vgl
|
904bc5648727806e9c212af18964153f4cab0d3c
|
[
"Apache-2.0"
] | 44
|
2015-02-04T18:40:33.000Z
|
2018-12-18T16:16:51.000Z
|
src/testing/drawCountriesReg.py
|
OpenGeoscience/vgl
|
904bc5648727806e9c212af18964153f4cab0d3c
|
[
"Apache-2.0"
] | 1
|
2015-10-12T00:47:01.000Z
|
2015-10-12T00:47:01.000Z
|
import os
import sys
import time
import datetime
import selenium
from selenium import webdriver
from selenium.webdriver.common.action_chains import ActionChains
from selenium.common.exceptions import NoSuchElementException
from compare_images import *
if __name__ == "__main__":
# Create a Firefox window driver.
browser = webdriver.Firefox()
browser.set_window_size(400, 400)
# Load the vtkweb application page.
url = "http://localhost:8000/testing/drawCountries.html"
browser.get(url)
# Give the page some time to update the image.
time.sleep(1)
# Take a screenshot.
shot = "drawCountries-%s.png" % (datetime.datetime.now())
browser.save_screenshot(shot)
# Compare the screenshot with the baseline, and report to stdout.
baseline_dir = os.environ['VGL_BASELINE_DIR']
print check_result_image(shot, os.path.join(baseline_dir, "baseline-drawCountries.png"), 20)
# Close the browser window.
browser.quit()
| 27.914286
| 96
| 0.738997
|
import os
import sys
import time
import datetime
import selenium
from selenium import webdriver
from selenium.webdriver.common.action_chains import ActionChains
from selenium.common.exceptions import NoSuchElementException
from compare_images import *
if __name__ == "__main__":
browser = webdriver.Firefox()
browser.set_window_size(400, 400)
url = "http://localhost:8000/testing/drawCountries.html"
browser.get(url)
time.sleep(1)
shot = "drawCountries-%s.png" % (datetime.datetime.now())
browser.save_screenshot(shot)
baseline_dir = os.environ['VGL_BASELINE_DIR']
print check_result_image(shot, os.path.join(baseline_dir, "baseline-drawCountries.png"), 20)
browser.quit()
| false
| true
|
790497d305bb514cfd066b3430147ed303c833a3
| 398
|
py
|
Python
|
comment/migrations/0002_auto_20200903_0323.py
|
shenjinglei/typeidea
|
0391db5354bfb3e96b38652d907b670af11eabf7
|
[
"BSD-2-Clause"
] | null | null | null |
comment/migrations/0002_auto_20200903_0323.py
|
shenjinglei/typeidea
|
0391db5354bfb3e96b38652d907b670af11eabf7
|
[
"BSD-2-Clause"
] | null | null | null |
comment/migrations/0002_auto_20200903_0323.py
|
shenjinglei/typeidea
|
0391db5354bfb3e96b38652d907b670af11eabf7
|
[
"BSD-2-Clause"
] | null | null | null |
# Generated by Django 2.2.6 on 2020-09-03 03:23
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('comment', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='comment',
name='target',
field=models.CharField(max_length=100, verbose_name='评论目标'),
),
]
| 20.947368
| 72
| 0.59799
|
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('comment', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='comment',
name='target',
field=models.CharField(max_length=100, verbose_name='评论目标'),
),
]
| true
| true
|
7904984f2db32a39e0ae2d50aef02c67e0eeb3f7
| 52,257
|
py
|
Python
|
discovery-infra/test_infra/helper_classes/cluster.py
|
mchernik/assisted-test-infra
|
02b2b9533044740dc5de56fcbac7b1ed7f7e1227
|
[
"Apache-2.0"
] | null | null | null |
discovery-infra/test_infra/helper_classes/cluster.py
|
mchernik/assisted-test-infra
|
02b2b9533044740dc5de56fcbac7b1ed7f7e1227
|
[
"Apache-2.0"
] | 206
|
2020-11-10T07:34:14.000Z
|
2022-03-29T16:37:50.000Z
|
discovery-infra/test_infra/helper_classes/cluster.py
|
mchernik/assisted-test-infra
|
02b2b9533044740dc5de56fcbac7b1ed7f7e1227
|
[
"Apache-2.0"
] | null | null | null |
import contextlib
import ipaddress
import json
import os
import random
import re
import time
import warnings
from collections import Counter
from typing import Any, Dict, List, Optional, Set, Union
import requests
import test_infra.utils.waiting
import waiting
import yaml
from assisted_service_client import models
from assisted_service_client.models.operator_type import OperatorType
from junit_report import JunitTestCase
from netaddr import IPAddress, IPNetwork
from test_infra import consts, utils
from test_infra.assisted_service_api import InventoryClient
from test_infra.controllers.load_balancer_controller import LoadBalancerController
from test_infra.controllers.node_controllers import Node
from test_infra.helper_classes.cluster_host import ClusterHost
from test_infra.helper_classes.config import BaseClusterConfig, BaseInfraEnvConfig
from test_infra.helper_classes.entity import Entity
from test_infra.helper_classes.events_handler import EventsHandler
from test_infra.helper_classes.infra_env import InfraEnv
from test_infra.helper_classes.nodes import Nodes
from test_infra.tools import static_network, terraform_utils
from test_infra.utils import Path, log, logs_utils, network_utils, operators_utils
from test_infra.utils.entity_name import ClusterName
class Cluster(Entity):
MINIMUM_NODES_TO_WAIT = 1
EVENTS_THRESHOLD = 500 # TODO - remove EVENTS_THRESHOLD after removing it from kni-assisted-installer-auto
_config: BaseClusterConfig
def __init__(
self,
api_client: InventoryClient,
config: BaseClusterConfig,
infra_env_config: BaseInfraEnvConfig,
nodes: Optional[Nodes] = None,
):
super().__init__(api_client, config, nodes)
self._infra_env_config = infra_env_config
self._infra_env = None
# Update infraEnv configurations
self._infra_env_config.cluster_id = config.cluster_id
self._infra_env_config.openshift_version = self._config.openshift_version
self._infra_env_config.pull_secret = self._config.pull_secret
self._high_availability_mode = config.high_availability_mode
self.name = config.cluster_name.get()
@property
def kubeconfig_path(self):
return self._config.kubeconfig_path
@property
def iso_download_path(self):
return self._config.iso_download_path
@property
def enable_image_download(self):
return self._config.download_image
def _update_day2_config(self, api_client: InventoryClient, cluster_id: str):
day2_cluster: models.cluster.Cluster = api_client.cluster_get(cluster_id)
self.update_config(
**dict(
openshift_version=day2_cluster.openshift_version,
cluster_name=ClusterName(day2_cluster.name),
additional_ntp_source=day2_cluster.additional_ntp_source,
user_managed_networking=day2_cluster.user_managed_networking,
high_availability_mode=day2_cluster.high_availability_mode,
olm_operators=day2_cluster.monitored_operators,
base_dns_domain=day2_cluster.base_dns_domain,
vip_dhcp_allocation=day2_cluster.vip_dhcp_allocation,
)
)
def _create(self) -> str:
if self._config.cluster_id:
log.info(f"Fetching day2 cluster with id {self._config.cluster_id}")
self._update_day2_config(self.api_client, self._config.cluster_id)
return self._config.cluster_id
cluster = self.api_client.create_cluster(
self._config.cluster_name.get(),
ssh_public_key=self._config.ssh_public_key,
openshift_version=self._config.openshift_version,
pull_secret=self._config.pull_secret,
base_dns_domain=self._config.base_dns_domain,
vip_dhcp_allocation=self._config.vip_dhcp_allocation,
additional_ntp_source=self._config.additional_ntp_source,
user_managed_networking=self._config.user_managed_networking,
high_availability_mode=self._config.high_availability_mode,
olm_operators=[{"name": name} for name in self._config.olm_operators],
network_type=self._config.network_type,
)
self._config.cluster_id = cluster.id
return cluster.id
def delete(self):
self.api_client.delete_cluster(self.id)
def get_details(self):
return self.api_client.cluster_get(self.id)
def get_cluster_name(self):
return self.get_details().name
def get_hosts(self):
return self.api_client.get_cluster_hosts(self.id)
def get_host_ids(self):
return [host["id"] for host in self.get_hosts()]
def get_host_ids_names_mapping(self):
return {host["id"]: host["requested_hostname"] for host in self.get_hosts()}
def get_host_assigned_roles(self):
hosts = self.get_hosts()
return {h["id"]: h["role"] for h in hosts}
def get_operators(self):
return self.api_client.get_cluster_operators(self.id)
# TODO remove in favor of generate_infra_env
def generate_image(self):
warnings.warn("generate_image is deprecated. Use generate_infra_env instead.", DeprecationWarning)
self.api_client.generate_image(cluster_id=self.id, ssh_key=self._config.ssh_public_key)
def generate_infra_env(
self, static_network_config=None, iso_image_type=None, ssh_key=None, ignition_info=None, proxy=None
) -> InfraEnv:
self._infra_env_config.ssh_public_key = ssh_key or self._config.ssh_public_key
self._infra_env_config.iso_image_type = iso_image_type or self._config.iso_image_type
self._infra_env_config.static_network_config = static_network_config
self._infra_env_config.ignition_config_override = ignition_info
self._infra_env_config.proxy = proxy or self._config.proxy
infra_env = InfraEnv(api_client=self.api_client, config=self._infra_env_config)
self._infra_env = infra_env
return infra_env
def update_infra_env_proxy(self, proxy: models.Proxy) -> None:
self._infra_env_config.proxy = proxy
self._infra_env.update_proxy(proxy=proxy)
def download_infra_env_image(self, iso_download_path=None) -> Path:
iso_download_path = iso_download_path or self._config.iso_download_path
return self._infra_env.download_image(iso_download_path=iso_download_path)
@JunitTestCase()
def generate_and_download_infra_env(
self,
iso_download_path=None,
static_network_config=None,
iso_image_type=None,
ssh_key=None,
ignition_info=None,
proxy=None,
) -> Path:
if self._config.is_static_ip and static_network_config is None:
static_network_config = static_network.generate_static_network_data_from_tf(self.nodes.controller.tf_folder)
self.generate_infra_env(
static_network_config=static_network_config,
iso_image_type=iso_image_type,
ssh_key=ssh_key,
ignition_info=ignition_info,
proxy=proxy,
)
return self.download_infra_env_image(iso_download_path=iso_download_path or self._config.iso_download_path)
@JunitTestCase()
def generate_and_download_image(
self, iso_download_path=None, static_network_config=None, iso_image_type=None, ssh_key=None
):
warnings.warn(
"generate_and_download_image is deprecated. Use generate_and_download_infra_env instead.",
DeprecationWarning,
)
iso_download_path = iso_download_path or self._config.iso_download_path
# ensure file path exists before downloading
if not os.path.exists(iso_download_path):
utils.recreate_folder(os.path.dirname(iso_download_path), force_recreate=False)
self.api_client.generate_and_download_image(
cluster_id=self.id,
ssh_key=ssh_key or self._config.ssh_public_key,
image_path=iso_download_path,
image_type=iso_image_type or self._config.iso_image_type,
static_network_config=static_network_config,
)
def wait_until_hosts_are_disconnected(self, nodes_count: int = None):
statuses = [consts.NodesStatus.DISCONNECTED]
test_infra.utils.waiting.wait_till_all_hosts_are_in_status(
client=self.api_client,
cluster_id=self.id,
nodes_count=nodes_count or self.nodes.nodes_count,
statuses=statuses,
timeout=consts.DISCONNECTED_TIMEOUT,
)
@JunitTestCase()
def wait_until_hosts_are_discovered(self, allow_insufficient=False, nodes_count: int = None):
statuses = [consts.NodesStatus.PENDING_FOR_INPUT, consts.NodesStatus.KNOWN]
if allow_insufficient:
statuses.append(consts.NodesStatus.INSUFFICIENT)
test_infra.utils.waiting.wait_till_all_hosts_are_in_status(
client=self.api_client,
cluster_id=self.id,
nodes_count=nodes_count or self.nodes.nodes_count,
statuses=statuses,
timeout=consts.NODES_REGISTERED_TIMEOUT,
)
def _get_matching_hosts(self, host_type, count):
hosts = self.get_hosts()
return [{"id": h["id"], "role": host_type} for h in hosts if host_type in h["requested_hostname"]][:count]
def set_cluster_name(self, cluster_name: str):
log.info(f"Setting Cluster Name:{cluster_name} for cluster: {self.id}")
self.update_config(cluster_name=ClusterName(prefix=cluster_name, suffix=None))
self.api_client.update_cluster(self.id, {"name": cluster_name})
def select_installation_disk(self, host_id: str, disk_paths: List[dict]) -> None:
self._infra_env.select_host_installation_disk(host_id=host_id, disk_paths=disk_paths)
def set_ocs(self, properties=None):
self.set_olm_operator(consts.OperatorType.OCS, properties=properties)
def set_cnv(self, properties=None):
self.set_olm_operator(consts.OperatorType.CNV, properties=properties)
def unset_ocs(self):
self.unset_olm_operator(consts.OperatorType.OCS)
def unset_cnv(self):
self.unset_olm_operator(consts.OperatorType.CNV)
def unset_olm_operator(self, operator_name):
log.info(f"Unsetting {operator_name} for cluster: {self.id}")
cluster = self.api_client.cluster_get(self.id)
olm_operators = []
for operator in cluster.monitored_operators:
if operator.name == operator_name or operator.operator_type == OperatorType.BUILTIN:
continue
olm_operators.append({"name": operator.name, "properties": operator.properties})
self.api_client.update_cluster(self.id, {"olm_operators": olm_operators})
def set_olm_operator(self, operator_name, properties=None):
log.info(f"Setting {operator_name} for cluster: {self.id}")
cluster = self.api_client.cluster_get(self.id)
if operator_name in [o.name for o in cluster.monitored_operators]:
return
olm_operators = []
for operator in cluster.monitored_operators:
if operator.operator_type == OperatorType.BUILTIN:
continue
olm_operators.append({"name": operator.name, "properties": operator.properties})
olm_operators.append({"name": operator_name, "properties": properties})
self._config.olm_operators = olm_operators
self.api_client.update_cluster(self.id, {"olm_operators": olm_operators})
def set_host_roles(self, num_masters: int = None, num_workers: int = None, requested_roles=None):
if requested_roles is None:
requested_roles = Counter(
master=num_masters or self.nodes.masters_count, worker=num_workers or self.nodes.workers_count
)
assigned_roles = self._get_matching_hosts(host_type=consts.NodeRoles.MASTER, count=requested_roles["master"])
assigned_roles.extend(
self._get_matching_hosts(host_type=consts.NodeRoles.WORKER, count=requested_roles["worker"])
)
for role in assigned_roles:
self._infra_env.update_host(host_id=role["id"], host_role=role["role"])
return assigned_roles
def set_specific_host_role(self, host, role):
self._infra_env.update_host(host_id=host["id"], host_role=role)
def set_network_params(self, controller=None):
# Controller argument is here only for backward compatibility TODO - Remove after QE refactor all e2e tests
controller = controller or self.nodes.controller # TODO - Remove after QE refactor all e2e tests
if self._config.platform == consts.Platforms.NONE:
log.info("On None platform, leaving network management to the user")
api_vip = ingress_vip = machine_networks = None
elif self._config.vip_dhcp_allocation or self._high_availability_mode == consts.HighAvailabilityMode.NONE:
log.info("Letting access VIPs be deducted from machine networks")
api_vip = ingress_vip = None
machine_networks = self.get_machine_networks()
else:
log.info("Assigning VIPs statically")
access_vips = controller.get_ingress_and_api_vips()
api_vip = access_vips["api_vip"]
ingress_vip = access_vips["ingress_vip"]
machine_networks = None
self.set_advanced_networking(
vip_dhcp_allocation=self._config.vip_dhcp_allocation,
cluster_networks=self._config.cluster_networks,
service_networks=self._config.service_networks,
machine_networks=machine_networks,
api_vip=api_vip,
ingress_vip=ingress_vip,
)
# TODO: when assisted-service supports configuring dual-stack networks on one go,
# change it so that we call set_advanced_networking only once
if self._config.is_ipv4 and self._config.is_ipv6:
machine_networks = controller.get_all_machine_addresses()
self.set_advanced_networking(machine_networks=machine_networks)
def get_primary_machine_cidr(self):
cidr = self.nodes.controller.get_primary_machine_cidr()
if not cidr:
# Support controllers which the machine cidr is not configurable. taking it from the AI instead
matching_cidrs = self.get_cluster_matching_cidrs(Cluster.get_cluster_hosts(self.get_details()))
if not matching_cidrs:
raise RuntimeError("No matching cidr for DHCP")
cidr = next(iter(matching_cidrs))
return cidr
def get_machine_networks(self):
networks = []
primary_machine_cidr = self.nodes.controller.get_primary_machine_cidr()
if primary_machine_cidr:
networks.append(primary_machine_cidr)
secondary_machine_cidr = self.nodes.controller.get_provisioning_cidr()
if secondary_machine_cidr:
networks.append(secondary_machine_cidr)
if not networks:
# Support controllers which the machine cidr is not configurable. taking it from the AI instead
networks = self.get_cluster_matching_cidrs(Cluster.get_cluster_hosts(self.get_details()))
if not networks:
raise RuntimeError("No matching cidr for DHCP")
return networks
def set_ingress_and_api_vips(self, vips):
log.info(f"Setting API VIP:{vips['api_vip']} and ingress VIP:{vips['ingress_vip']} for cluster: {self.id}")
self.api_client.update_cluster(self.id, vips)
def set_ssh_key(self, ssh_key: str):
log.info(f"Setting SSH key:{ssh_key} for cluster: {self.id}")
self.update_config(ssh_public_key=ssh_key)
self.api_client.update_cluster(self.id, {"ssh_public_key": ssh_key})
def set_base_dns_domain(self, base_dns_domain: str):
log.info(f"Setting base DNS domain:{base_dns_domain} for cluster: {self.id}")
self.update_config(base_dns_domain=base_dns_domain)
self.api_client.update_cluster(self.id, {"base_dns_domain": base_dns_domain})
def set_advanced_networking(
self,
vip_dhcp_allocation: Optional[bool] = None,
cluster_networks: Optional[List[models.ClusterNetwork]] = None,
service_networks: Optional[List[models.ServiceNetwork]] = None,
machine_networks: Optional[List[models.MachineNetwork]] = None,
api_vip: Optional[str] = None,
ingress_vip: Optional[str] = None,
):
if machine_networks is None:
machine_networks = self._config.machine_networks
else:
machine_networks = [models.MachineNetwork(cidr=cidr) for cidr in machine_networks]
if vip_dhcp_allocation is None:
vip_dhcp_allocation = self._config.vip_dhcp_allocation
advanced_networking = {
"vip_dhcp_allocation": vip_dhcp_allocation,
"cluster_networks": cluster_networks if cluster_networks is not None else self._config.cluster_networks,
"service_networks": service_networks if service_networks is not None else self._config.service_networks,
"machine_networks": machine_networks,
"api_vip": api_vip if api_vip is not None else self._config.api_vip,
"ingress_vip": ingress_vip if ingress_vip is not None else self._config.ingress_vip,
}
log.info(f"Updating advanced networking with {advanced_networking} for cluster: {self.id}")
self.update_config(**advanced_networking)
self.api_client.update_cluster(self.id, advanced_networking)
def set_pull_secret(self, pull_secret: str):
log.info(f"Setting pull secret:{pull_secret} for cluster: {self.id}")
self.update_config(pull_secret=pull_secret)
self.api_client.update_cluster(self.id, {"pull_secret": pull_secret})
def set_host_name(self, host_id, requested_name):
log.info(f"Setting Required Host Name:{requested_name}, for Host ID: {host_id}")
self._infra_env.update_host(host_id=host_id, host_name=requested_name)
def set_additional_ntp_source(self, ntp_source: List[str]):
log.info(f"Setting Additional NTP source:{ntp_source}")
if isinstance(ntp_source, List):
ntp_source_string = ",".join(ntp_source)
elif isinstance(ntp_source, str):
ntp_source_string = ntp_source
else:
raise TypeError(
f"ntp_source must be a string or a list of strings, got: {ntp_source}," f" type: {type(ntp_source)}"
)
self.update_config(additional_ntp_source=ntp_source_string)
self.api_client.update_cluster(self.id, {"additional_ntp_source": ntp_source_string})
def patch_discovery_ignition(self, ignition):
self._infra_env.patch_discovery_ignition(ignition_info=ignition)
def set_proxy_values(self, proxy_values: models.Proxy) -> None:
log.info(f"Setting proxy values {proxy_values} for cluster: {self.id}")
self.update_config(proxy=proxy_values)
self.api_client.set_cluster_proxy(
self.id,
http_proxy=self._config.proxy.http_proxy,
https_proxy=self._config.proxy.https_proxy,
no_proxy=self._config.proxy.no_proxy,
)
@JunitTestCase()
def start_install(self):
self.api_client.install_cluster(cluster_id=self.id)
def wait_for_logs_complete(self, timeout, interval=60, check_host_logs_only=False):
logs_utils.wait_for_logs_complete(
client=self.api_client,
cluster_id=self.id,
timeout=timeout,
interval=interval,
check_host_logs_only=check_host_logs_only,
)
def wait_for_installing_in_progress(self, nodes_count: int = MINIMUM_NODES_TO_WAIT):
test_infra.utils.waiting.wait_till_at_least_one_host_is_in_status(
client=self.api_client,
cluster_id=self.id,
statuses=[consts.NodesStatus.INSTALLING_IN_PROGRESS],
nodes_count=nodes_count,
timeout=consts.INSTALLING_IN_PROGRESS_TIMEOUT,
)
def wait_for_write_image_to_disk(self, nodes_count: int = MINIMUM_NODES_TO_WAIT):
test_infra.utils.waiting.wait_till_at_least_one_host_is_in_stage(
client=self.api_client,
cluster_id=self.id,
stages=[consts.HostsProgressStages.WRITE_IMAGE_TO_DISK, consts.HostsProgressStages.REBOOTING],
nodes_count=nodes_count,
)
def wait_for_host_status(self, statuses, fall_on_error_status=True, nodes_count: int = MINIMUM_NODES_TO_WAIT):
test_infra.utils.waiting.wait_till_at_least_one_host_is_in_status(
client=self.api_client,
cluster_id=self.id,
statuses=statuses,
nodes_count=nodes_count,
fall_on_error_status=fall_on_error_status,
)
def wait_for_specific_host_status(self, host, statuses, nodes_count: int = MINIMUM_NODES_TO_WAIT):
test_infra.utils.waiting.wait_till_specific_host_is_in_status(
client=self.api_client,
cluster_id=self.id,
host_name=host.get("requested_hostname"),
statuses=statuses,
nodes_count=nodes_count,
)
def wait_for_specific_host_stage(self, host: dict, stage: str, inclusive: bool = True):
index = consts.all_host_stages.index(stage)
test_infra.utils.waiting.wait_till_specific_host_is_in_stage(
client=self.api_client,
cluster_id=self.id,
host_name=host.get("requested_hostname"),
stages=consts.all_host_stages[index:] if inclusive else consts.all_host_stages[index + 1 :],
)
def wait_for_cluster_in_error_status(self):
utils.wait_till_cluster_is_in_status(
client=self.api_client,
cluster_id=self.id,
statuses=[consts.ClusterStatus.ERROR],
timeout=consts.ERROR_TIMEOUT,
)
def wait_for_pending_for_input_status(self):
utils.wait_till_cluster_is_in_status(
client=self.api_client,
cluster_id=self.id,
statuses=[consts.ClusterStatus.PENDING_FOR_INPUT],
timeout=consts.PENDING_USER_ACTION_TIMEOUT,
)
def wait_for_at_least_one_host_to_boot_during_install(self, nodes_count: int = MINIMUM_NODES_TO_WAIT):
test_infra.utils.waiting.wait_till_at_least_one_host_is_in_stage(
client=self.api_client,
cluster_id=self.id,
stages=[consts.HostsProgressStages.REBOOTING],
nodes_count=nodes_count,
)
def wait_for_non_bootstrap_masters_to_reach_configuring_state_during_install(self, num_masters: int = None):
num_masters = num_masters if num_masters is not None else self.nodes.masters_count
test_infra.utils.waiting.wait_till_at_least_one_host_is_in_stage(
client=self.api_client,
cluster_id=self.id,
stages=[consts.HostsProgressStages.CONFIGURING],
nodes_count=num_masters - 1,
)
def wait_for_non_bootstrap_masters_to_reach_joined_state_during_install(self, num_masters: int = None):
num_masters = num_masters if num_masters is not None else self.nodes.masters_count
test_infra.utils.waiting.wait_till_at_least_one_host_is_in_stage(
client=self.api_client,
cluster_id=self.id,
stages=[consts.HostsProgressStages.JOINED],
nodes_count=num_masters - 1,
)
def wait_for_hosts_stage(self, stage: str, inclusive: bool = True):
index = consts.all_host_stages.index(stage)
test_infra.utils.waiting.wait_till_at_least_one_host_is_in_stage(
client=self.api_client,
cluster_id=self.id,
stages=consts.all_host_stages[index:] if inclusive else consts.all_host_stages[index + 1 :],
nodes_count=self.nodes.nodes_count,
)
@JunitTestCase()
def start_install_and_wait_for_installed(
self,
wait_for_hosts=True,
wait_for_operators=True,
wait_for_cluster_install=True,
download_kubeconfig=True,
):
self.start_install()
if wait_for_hosts:
self.wait_for_hosts_to_install()
if wait_for_operators:
self.wait_for_operators_to_finish()
if wait_for_cluster_install:
self.wait_for_install()
if download_kubeconfig:
self.download_kubeconfig()
def disable_worker_hosts(self):
hosts = self.get_hosts_by_role(consts.NodeRoles.WORKER)
for host in hosts:
self.disable_host(host)
def disable_host(self, host):
host_name = host["requested_hostname"]
log.info(f"Going to disable host: {host_name} in cluster: {self.id}")
self._infra_env.unbind_host(host_id=host["id"])
def enable_host(self, host):
host_name = host["requested_hostname"]
log.info(f"Going to enable host: {host_name} in cluster: {self.id}")
self._infra_env.bind_host(host_id=host["id"], cluster_id=self.id)
def delete_host(self, host):
host_id = host["id"]
log.info(f"Going to delete host: {host_id} in cluster: {self.id}")
self._infra_env.delete_host(host_id=host_id)
def cancel_install(self):
self.api_client.cancel_cluster_install(cluster_id=self.id)
def get_bootstrap_hostname(self):
hosts = self.get_hosts_by_role(consts.NodeRoles.MASTER)
for host in hosts:
if host.get("bootstrap"):
log.info("Bootstrap node is: %s", host["requested_hostname"])
return host["requested_hostname"]
def get_hosts_by_role(self, role, hosts=None):
hosts = hosts or self.api_client.get_cluster_hosts(self.id)
nodes_by_role = []
for host in hosts:
if host["role"] == role:
nodes_by_role.append(host)
log.info(f"Found hosts: {nodes_by_role}, that has the role: {role}")
return nodes_by_role
def get_random_host_by_role(self, role):
return random.choice(self.get_hosts_by_role(role))
def get_reboot_required_hosts(self):
return self.api_client.get_hosts_in_statuses(
cluster_id=self.id, statuses=[consts.NodesStatus.RESETING_PENDING_USER_ACTION]
)
def reboot_required_nodes_into_iso_after_reset(self):
hosts_to_reboot = self.get_reboot_required_hosts()
self.nodes.run_for_given_nodes_by_cluster_hosts(cluster_hosts=hosts_to_reboot, func_name="reset")
def wait_for_one_host_to_be_in_wrong_boot_order(self, fall_on_error_status=True):
test_infra.utils.waiting.wait_till_at_least_one_host_is_in_status(
client=self.api_client,
cluster_id=self.id,
statuses=[consts.NodesStatus.INSTALLING_PENDING_USER_ACTION],
status_info=consts.HostStatusInfo.WRONG_BOOT_ORDER,
fall_on_error_status=fall_on_error_status,
timeout=consts.PENDING_USER_ACTION_TIMEOUT,
)
def wait_for_at_least_one_host_to_be_in_reboot_timeout(self, fall_on_error_status=True, nodes_count=1):
test_infra.utils.waiting.wait_till_at_least_one_host_is_in_status(
client=self.api_client,
cluster_id=self.id,
statuses=[consts.NodesStatus.INSTALLING_PENDING_USER_ACTION],
status_info=consts.HostStatusInfo.REBOOT_TIMEOUT,
nodes_count=nodes_count,
fall_on_error_status=fall_on_error_status,
timeout=consts.PENDING_USER_ACTION_TIMEOUT,
)
def wait_for_hosts_to_be_in_wrong_boot_order(
self, nodes_count, timeout=consts.PENDING_USER_ACTION_TIMEOUT, fall_on_error_status=True
):
test_infra.utils.waiting.wait_till_all_hosts_are_in_status(
client=self.api_client,
cluster_id=self.id,
statuses=[consts.NodesStatus.INSTALLING_PENDING_USER_ACTION],
status_info=consts.HostStatusInfo.WRONG_BOOT_ORDER,
nodes_count=nodes_count,
timeout=timeout,
fall_on_error_status=fall_on_error_status,
)
def wait_for_ready_to_install(self):
utils.wait_till_cluster_is_in_status(
client=self.api_client,
cluster_id=self.id,
statuses=[consts.ClusterStatus.READY],
timeout=consts.READY_TIMEOUT,
)
# This code added due to BZ:1909997, temporarily checking if help to prevent unexpected failure
time.sleep(10)
utils.wait_till_cluster_is_in_status(
client=self.api_client,
cluster_id=self.id,
statuses=[consts.ClusterStatus.READY],
timeout=consts.READY_TIMEOUT,
)
def is_in_cancelled_status(self):
return utils.is_cluster_in_status(
client=self.api_client, cluster_id=self.id, statuses=[consts.ClusterStatus.CANCELLED]
)
def is_in_error(self):
return utils.is_cluster_in_status(
client=self.api_client, cluster_id=self.id, statuses=[consts.ClusterStatus.ERROR]
)
def is_finalizing(self):
return utils.is_cluster_in_status(
client=self.api_client, cluster_id=self.id, statuses=[consts.ClusterStatus.FINALIZING]
)
def is_installing(self):
return utils.is_cluster_in_status(
client=self.api_client, cluster_id=self.id, statuses=[consts.ClusterStatus.INSTALLING]
)
def reset_install(self):
self.api_client.reset_cluster_install(cluster_id=self.id)
def is_in_insufficient_status(self):
return utils.is_cluster_in_status(
client=self.api_client, cluster_id=self.id, statuses=[consts.ClusterStatus.INSUFFICIENT]
)
def wait_for_hosts_to_install(
self, timeout=consts.CLUSTER_INSTALLATION_TIMEOUT, fall_on_error_status=True, nodes_count: int = None
):
test_infra.utils.waiting.wait_till_all_hosts_are_in_status(
client=self.api_client,
cluster_id=self.id,
statuses=[consts.ClusterStatus.INSTALLED],
nodes_count=nodes_count or self.nodes.nodes_count,
timeout=timeout,
fall_on_error_status=fall_on_error_status,
)
def wait_for_operators_to_finish(self, timeout=consts.CLUSTER_INSTALLATION_TIMEOUT, fall_on_error_status=True):
operators = self.get_operators()
if fall_on_error_status:
statuses = [consts.OperatorStatus.AVAILABLE]
else:
statuses = [consts.OperatorStatus.AVAILABLE, consts.OperatorStatus.FAILED]
operators_utils.wait_till_all_operators_are_in_status(
client=self.api_client,
cluster_id=self.id,
operators_count=len(operators_utils.filter_operators_by_type(operators, OperatorType.BUILTIN)),
operator_types=[OperatorType.BUILTIN],
statuses=statuses,
timeout=timeout,
fall_on_error_status=False,
)
operators_utils.wait_till_all_operators_are_in_status(
client=self.api_client,
cluster_id=self.id,
operators_count=len(operators_utils.filter_operators_by_type(operators, OperatorType.OLM)),
operator_types=[OperatorType.OLM],
statuses=[consts.OperatorStatus.AVAILABLE, consts.OperatorStatus.FAILED],
timeout=timeout,
fall_on_error_status=fall_on_error_status,
)
def is_operator_in_status(self, operator_name, status):
return operators_utils.is_operator_in_status(
operators=self.get_operators(), operator_name=operator_name, status=status
)
def wait_for_install(self, timeout=consts.CLUSTER_INSTALLATION_TIMEOUT):
utils.wait_till_cluster_is_in_status(
client=self.api_client,
cluster_id=self.id,
statuses=[consts.ClusterStatus.INSTALLED],
timeout=timeout,
)
def _set_hostnames_and_roles(self):
cluster_id = self.id
hosts = self.to_cluster_hosts(self.api_client.get_cluster_hosts(cluster_id))
nodes = self.nodes.get_nodes(refresh=True)
for host in hosts:
if host.has_hostname():
continue
name = self.find_matching_node_name(host, nodes)
assert name is not None, (
f"Failed to find matching node for host with mac address {host.macs()}"
f" nodes: {[(n.name, n.ips, n.macs) for n in nodes]}"
)
if self.nodes.nodes_count == 1:
role = None
else:
role = consts.NodeRoles.MASTER if consts.NodeRoles.MASTER in name else consts.NodeRoles.WORKER
self._infra_env.update_host(host_id=host.get_id(), host_role=role, host_name=name)
def _ha_not_none(self):
return (
self._high_availability_mode != consts.HighAvailabilityMode.NONE
and self._config.platform != consts.Platforms.NONE
)
def download_image(self, iso_download_path: str = None) -> Path:
if self._infra_env is None:
log.warning("No infra_env found. Generating infra_env and downloading ISO")
return self.generate_and_download_infra_env(
iso_download_path=iso_download_path or self._config.iso_download_path,
iso_image_type=self._config.iso_image_type,
)
return self._infra_env.download_image(iso_download_path)
@JunitTestCase()
def prepare_for_installation(self, **kwargs):
super(Cluster, self).prepare_for_installation(**kwargs)
self.nodes.wait_for_networking()
self._set_hostnames_and_roles()
if self._high_availability_mode != consts.HighAvailabilityMode.NONE:
self.set_host_roles(len(self.nodes.get_masters()), len(self.nodes.get_workers()))
self.set_network_params(controller=self.nodes.controller)
# in case of None platform we need to specify dns records before hosts are ready
if self._config.platform == consts.Platforms.NONE:
self._configure_load_balancer()
self.nodes.controller.set_dns_for_user_managed_network()
elif self._high_availability_mode == consts.HighAvailabilityMode.NONE:
main_cidr = self.get_primary_machine_cidr()
ip = Cluster.get_ip_for_single_node(self.api_client, self.id, main_cidr)
self.nodes.controller.set_single_node_ip(ip)
self.nodes.controller.set_dns(api_vip=ip, ingress_vip=ip)
self.wait_for_ready_to_install()
# in case of regular cluster, need to set dns after vips exits
# in our case when nodes are ready, vips will be there for sure
if self._ha_not_none():
vips_info = self.__class__.get_vips_from_cluster(self.api_client, self.id)
self.nodes.controller.set_dns(api_vip=vips_info["api_vip"], ingress_vip=vips_info["ingress_vip"])
def download_kubeconfig_no_ingress(self, kubeconfig_path: str = None):
self.api_client.download_kubeconfig_no_ingress(self.id, kubeconfig_path or self._config.kubeconfig_path)
def download_kubeconfig(self, kubeconfig_path: str = None):
self.api_client.download_kubeconfig(self.id, kubeconfig_path or self._config.kubeconfig_path)
def download_installation_logs(self, cluster_tar_path):
self.api_client.download_cluster_logs(self.id, cluster_tar_path)
def get_install_config(self):
return yaml.safe_load(self.api_client.get_cluster_install_config(self.id))
def get_admin_credentials(self):
return self.api_client.get_cluster_admin_credentials(self.id)
def register_dummy_host(self):
dummy_host_id = "b164df18-0ff1-4b85-9121-059f10f58f71"
self.api_client.register_host(self.id, dummy_host_id)
def host_get_next_step(self, host_id):
return self.api_client.host_get_next_step(self.id, host_id)
def host_post_step_result(self, host_id, step_type, step_id, exit_code, output):
self.api_client.host_post_step_result(
self.id, host_id, step_type=step_type, step_id=step_id, exit_code=exit_code, output=output
)
def host_update_install_progress(self, host_id, current_stage, progress_info=None):
self.api_client.host_update_progress(self.id, host_id, current_stage, progress_info=progress_info)
def host_complete_install(self):
self.api_client.complete_cluster_installation(cluster_id=self.id, is_success=True)
def setup_nodes(self, nodes, infra_env_config: BaseInfraEnvConfig):
self._infra_env = InfraEnv.generate(
self.api_client, infra_env_config, iso_image_type=self._config.iso_image_type
)
self._infra_env.download_image(iso_download_path=self._config.iso_download_path)
nodes.start_all()
self.wait_until_hosts_are_discovered()
return nodes.create_nodes_cluster_hosts_mapping(cluster=self)
def wait_for_cluster_validation(
self, validation_section, validation_id, statuses, timeout=consts.VALIDATION_TIMEOUT, interval=2
):
log.info("Wait until cluster %s validation %s is in status %s", self.id, validation_id, statuses)
try:
waiting.wait(
lambda: self.is_cluster_validation_in_status(
validation_section=validation_section, validation_id=validation_id, statuses=statuses
),
timeout_seconds=timeout,
sleep_seconds=interval,
waiting_for=f"Cluster validation to be in status {statuses}",
)
except BaseException:
log.error(
"Cluster validation status is: %s",
utils.get_cluster_validation_value(
self.api_client.cluster_get(self.id), validation_section, validation_id
),
)
raise
def is_cluster_validation_in_status(self, validation_section, validation_id, statuses):
log.info("Is cluster %s validation %s in status %s", self.id, validation_id, statuses)
try:
return (
utils.get_cluster_validation_value(
self.api_client.cluster_get(self.id), validation_section, validation_id
)
in statuses
)
except BaseException:
log.exception("Failed to get cluster %s validation info", self.id)
def wait_for_host_validation(
self, host_id, validation_section, validation_id, statuses, timeout=consts.VALIDATION_TIMEOUT, interval=2
):
log.info("Wait until host %s validation %s is in status %s", host_id, validation_id, statuses)
try:
waiting.wait(
lambda: self.is_host_validation_in_status(
host_id=host_id,
validation_section=validation_section,
validation_id=validation_id,
statuses=statuses,
),
timeout_seconds=timeout,
sleep_seconds=interval,
waiting_for=f"Host validation to be in status {statuses}",
)
except BaseException:
log.error(
"Host validation status is: %s",
utils.get_host_validation_value(
self.api_client.cluster_get(self.id), host_id, validation_section, validation_id
),
)
raise
def is_host_validation_in_status(self, host_id, validation_section, validation_id, statuses):
log.info("Is host %s validation %s in status %s", host_id, validation_id, statuses)
try:
return (
utils.get_host_validation_value(
self.api_client.cluster_get(self.id), host_id, validation_section, validation_id
)
in statuses
)
except BaseException:
log.exception("Failed to get cluster %s validation info", self.id)
def wait_for_cluster_to_be_in_installing_pending_user_action_status(self):
utils.wait_till_cluster_is_in_status(
client=self.api_client,
cluster_id=self.id,
statuses=[consts.ClusterStatus.INSTALLING_PENDING_USER_ACTION],
timeout=consts.PENDING_USER_ACTION_TIMEOUT,
)
def wait_for_cluster_to_be_in_installing_status(self):
utils.wait_till_cluster_is_in_status(
client=self.api_client,
cluster_id=self.id,
statuses=[consts.ClusterStatus.INSTALLING],
timeout=consts.START_CLUSTER_INSTALLATION_TIMEOUT,
)
def wait_for_cluster_to_be_in_finalizing_status(self):
utils.wait_till_cluster_is_in_status(
client=self.api_client,
cluster_id=self.id,
statuses=[consts.ClusterStatus.FINALIZING, consts.ClusterStatus.INSTALLED],
timeout=consts.CLUSTER_INSTALLATION_TIMEOUT,
break_statuses=[consts.ClusterStatus.ERROR],
)
def wait_for_cluster_to_be_in_status(self, statuses, timeout=consts.ERROR_TIMEOUT):
utils.wait_till_cluster_is_in_status(
client=self.api_client,
cluster_id=self.id,
statuses=statuses,
timeout=timeout,
)
@classmethod
def reset_cluster_and_wait_for_ready(cls, cluster):
# Reset cluster install
cluster.reset_install()
assert cluster.is_in_insufficient_status()
# Reboot required nodes into ISO
cluster.reboot_required_nodes_into_iso_after_reset()
# Wait for hosts to be rediscovered
cluster.wait_until_hosts_are_discovered()
cluster.wait_for_ready_to_install()
def get_events(self, host_id="", infra_env_id=""):
warnings.warn(
"Cluster.get_events is now deprecated, use EventsHandler.get_events instead",
PendingDeprecationWarning,
)
handler = EventsHandler(self.api_client)
return handler.get_events(host_id, self.id, infra_env_id)
def _configure_load_balancer(self):
main_cidr = self.get_primary_machine_cidr()
secondary_cidr = self.nodes.controller.get_provisioning_cidr()
master_ips = self.get_master_ips(self.api_client, self.id, main_cidr) + self.get_master_ips(
self.api_client, self.id, secondary_cidr
)
worker_ips = self.get_worker_ips(self.api_client, self.id, main_cidr)
load_balancer_ip = str(IPNetwork(main_cidr).ip + 1)
tf = terraform_utils.TerraformUtils(working_dir=self.nodes.controller.tf_folder)
lb_controller = LoadBalancerController(tf)
lb_controller.set_load_balancing_config(load_balancer_ip, master_ips, worker_ips)
@classmethod
def _get_namespace_index(cls, libvirt_network_if):
# Hack to retrieve namespace index - does not exist in tests
matcher = re.match(r"^tt(\d+)$", libvirt_network_if)
return int(matcher.groups()[0]) if matcher is not None else 0
def wait_for_event(self, event_to_find, reference_time, params_list=None, host_id="", infra_env_id="", timeout=10):
warnings.warn(
"Cluster.wait_for_event is now deprecated, use EventsHandler.wait_for_event instead",
PendingDeprecationWarning,
)
handler = EventsHandler(self.api_client)
return handler.wait_for_event(
event_to_find, reference_time, params_list, host_id, infra_env_id, self.id, timeout
)
@staticmethod
def get_inventory_host_nics_data(host: dict, ipv4_first=True):
def get_network_interface_ip(interface):
addresses = (
interface.ipv4_addresses + interface.ipv6_addresses
if ipv4_first
else interface.ipv6_addresses + interface.ipv4_addresses
)
return addresses[0].split("/")[0] if len(addresses) > 0 else None
inventory = models.Inventory(**json.loads(host["inventory"]))
interfaces_list = [models.Interface(**interface) for interface in inventory.interfaces]
return [
{
"name": interface.name,
"model": interface.product,
"mac": interface.mac_address,
"ip": get_network_interface_ip(interface),
"speed": interface.speed_mbps,
}
for interface in interfaces_list
]
@staticmethod
def get_hosts_nics_data(hosts: list, ipv4_first=True):
return [Cluster.get_inventory_host_nics_data(h, ipv4_first=ipv4_first) for h in hosts]
@staticmethod
def get_cluster_hosts(cluster: models.cluster.Cluster) -> List[ClusterHost]:
return [ClusterHost(h) for h in cluster.hosts]
@staticmethod
def to_cluster_hosts(hosts: List[Dict[str, Any]]) -> List[ClusterHost]:
return [ClusterHost(models.Host(**h)) for h in hosts]
def get_cluster_cidrs(self, hosts: List[ClusterHost]) -> Set[str]:
cidrs = set()
for host in hosts:
ips = []
if self.nodes.is_ipv4:
ips += host.ipv4_addresses()
if self.nodes.is_ipv6:
ips += host.ipv6_addresses()
for host_ip in ips:
cidr = network_utils.get_cidr_by_interface(host_ip)
cidrs.add(cidr)
return cidrs
def get_cluster_matching_cidrs(self, hosts: List[ClusterHost]) -> Set[str]:
cluster_cidrs = self.get_cluster_cidrs(hosts)
matching_cidrs = set()
for cidr in cluster_cidrs:
for host in hosts:
interfaces = []
if self.nodes.is_ipv4:
interfaces += host.ipv4_addresses()
if self.nodes.is_ipv6:
interfaces += host.ipv6_addresses()
if not network_utils.any_interface_in_cidr(interfaces, cidr):
break
matching_cidrs.add(cidr)
return matching_cidrs
@staticmethod
def get_ip_for_single_node(client, cluster_id, machine_cidr, ipv4_first=True):
cluster_info = client.cluster_get(cluster_id).to_dict()
if len(cluster_info["hosts"]) == 0:
raise Exception("No host found")
network = IPNetwork(machine_cidr)
interfaces = Cluster.get_inventory_host_nics_data(cluster_info["hosts"][0], ipv4_first=ipv4_first)
for intf in interfaces:
ip = intf["ip"]
if IPAddress(ip) in network:
return ip
raise Exception("IP for single node not found")
@staticmethod
def get_ips_for_role(client, cluster_id, network, role):
cluster_info = client.cluster_get(cluster_id).to_dict()
ret = []
net = IPNetwork(network)
hosts_interfaces = Cluster.get_hosts_nics_data([h for h in cluster_info["hosts"] if h["role"] == role])
for host_interfaces in hosts_interfaces:
for intf in host_interfaces:
ip = IPAddress(intf["ip"])
if ip in net:
ret = ret + [intf["ip"]]
return ret
@staticmethod
def get_master_ips(client, cluster_id, network):
return Cluster.get_ips_for_role(client, cluster_id, network, consts.NodeRoles.MASTER)
@staticmethod
def get_worker_ips(client, cluster_id, network):
return Cluster.get_ips_for_role(client, cluster_id, network, consts.NodeRoles.WORKER)
@staticmethod
def get_vips_from_cluster(client, cluster_id):
cluster_info = client.cluster_get(cluster_id)
return dict(api_vip=cluster_info.api_vip, ingress_vip=cluster_info.ingress_vip)
def get_host_disks(self, host, filter=None):
hosts = self.get_hosts()
selected_host = [h for h in hosts if h["id"] == host["id"]]
disks = json.loads(selected_host[0]["inventory"])["disks"]
if not filter:
return [disk for disk in disks]
else:
return [disk for disk in disks if filter(disk)]
def get_inventory_host_ips_data(self, host: dict):
nics = self.get_inventory_host_nics_data(host)
return [nic["ip"] for nic in nics]
# needed for None platform and single node
# we need to get ip where api is running
def get_kube_api_ip(self, hosts):
for host in hosts:
for ip in self.get_inventory_host_ips_data(host):
if self.is_kubeapi_service_ready(ip):
return ip
def get_api_vip(self, cluster):
cluster = cluster or self.get_details()
api_vip = cluster.api_vip
if not api_vip and cluster.user_managed_networking:
log.info("API VIP is not set, searching for api ip on masters")
masters = self.get_hosts_by_role(consts.NodeRoles.MASTER, hosts=cluster.to_dict()["hosts"])
api_vip = self._wait_for_api_vip(masters)
log.info("api vip is %s", api_vip)
return api_vip
def _wait_for_api_vip(self, hosts, timeout=180):
"""Enable some grace time for waiting for API's availability."""
return waiting.wait(
lambda: self.get_kube_api_ip(hosts=hosts), timeout_seconds=timeout, sleep_seconds=5, waiting_for="API's IP"
)
def find_matching_node_name(self, host: ClusterHost, nodes: List[Node]) -> Union[str, None]:
# Looking for node matches the given host by its mac address (which is unique)
for node in nodes:
for mac in node.macs:
if mac.lower() in host.macs():
return node.name
# IPv6 static ips
if self._config.is_static_ip:
mappings = static_network.get_name_to_mac_addresses_mapping(self.nodes.controller.tf_folder)
for mac in host.macs():
for name, macs in mappings.items():
if mac in macs:
return name
return None
@staticmethod
def is_kubeapi_service_ready(ip_or_dns):
"""Validate if kube-api is ready on given address."""
with contextlib.suppress(ValueError):
# IPv6 addresses need to be surrounded with square-brackets
# to differentiate them from domain names
if ipaddress.ip_address(ip_or_dns).version == 6:
ip_or_dns = f"[{ip_or_dns}]"
try:
response = requests.get(f"https://{ip_or_dns}:6443/readyz", verify=False, timeout=1)
return response.ok
except BaseException:
return False
def wait_and_kill_installer(self, host):
# Wait for specific host to be in installing in progress
self.wait_for_specific_host_status(host=host, statuses=[consts.NodesStatus.INSTALLING_IN_PROGRESS])
# Kill installer to simulate host error
selected_node = self.nodes.get_node_from_cluster_host(host)
selected_node.kill_installer()
def get_api_vip_from_cluster(api_client, cluster_info: Union[dict, models.cluster.Cluster], pull_secret):
import warnings
from tests.config import ClusterConfig, InfraEnvConfig
warnings.warn(
"Soon get_api_vip_from_cluster will be deprecated. Avoid using or adding new functionality to "
"this function. The function and solution for that case have not been determined yet. It might be "
"on another module, or as a classmethod within Cluster class."
" For more information see https://issues.redhat.com/browse/MGMT-4975",
PendingDeprecationWarning,
)
if isinstance(cluster_info, dict):
cluster_info = models.cluster.Cluster(**cluster_info)
cluster = Cluster(
api_client=api_client,
infra_env_config=InfraEnvConfig(),
config=ClusterConfig(
cluster_name=ClusterName(cluster_info.name),
pull_secret=pull_secret,
ssh_public_key=cluster_info.ssh_public_key,
cluster_id=cluster_info.id,
),
nodes=None,
)
return cluster.get_api_vip(cluster=cluster_info)
| 42.450853
| 120
| 0.679813
|
import contextlib
import ipaddress
import json
import os
import random
import re
import time
import warnings
from collections import Counter
from typing import Any, Dict, List, Optional, Set, Union
import requests
import test_infra.utils.waiting
import waiting
import yaml
from assisted_service_client import models
from assisted_service_client.models.operator_type import OperatorType
from junit_report import JunitTestCase
from netaddr import IPAddress, IPNetwork
from test_infra import consts, utils
from test_infra.assisted_service_api import InventoryClient
from test_infra.controllers.load_balancer_controller import LoadBalancerController
from test_infra.controllers.node_controllers import Node
from test_infra.helper_classes.cluster_host import ClusterHost
from test_infra.helper_classes.config import BaseClusterConfig, BaseInfraEnvConfig
from test_infra.helper_classes.entity import Entity
from test_infra.helper_classes.events_handler import EventsHandler
from test_infra.helper_classes.infra_env import InfraEnv
from test_infra.helper_classes.nodes import Nodes
from test_infra.tools import static_network, terraform_utils
from test_infra.utils import Path, log, logs_utils, network_utils, operators_utils
from test_infra.utils.entity_name import ClusterName
class Cluster(Entity):
MINIMUM_NODES_TO_WAIT = 1
EVENTS_THRESHOLD = 500
_config: BaseClusterConfig
def __init__(
self,
api_client: InventoryClient,
config: BaseClusterConfig,
infra_env_config: BaseInfraEnvConfig,
nodes: Optional[Nodes] = None,
):
super().__init__(api_client, config, nodes)
self._infra_env_config = infra_env_config
self._infra_env = None
self._infra_env_config.cluster_id = config.cluster_id
self._infra_env_config.openshift_version = self._config.openshift_version
self._infra_env_config.pull_secret = self._config.pull_secret
self._high_availability_mode = config.high_availability_mode
self.name = config.cluster_name.get()
@property
def kubeconfig_path(self):
return self._config.kubeconfig_path
@property
def iso_download_path(self):
return self._config.iso_download_path
@property
def enable_image_download(self):
return self._config.download_image
def _update_day2_config(self, api_client: InventoryClient, cluster_id: str):
day2_cluster: models.cluster.Cluster = api_client.cluster_get(cluster_id)
self.update_config(
**dict(
openshift_version=day2_cluster.openshift_version,
cluster_name=ClusterName(day2_cluster.name),
additional_ntp_source=day2_cluster.additional_ntp_source,
user_managed_networking=day2_cluster.user_managed_networking,
high_availability_mode=day2_cluster.high_availability_mode,
olm_operators=day2_cluster.monitored_operators,
base_dns_domain=day2_cluster.base_dns_domain,
vip_dhcp_allocation=day2_cluster.vip_dhcp_allocation,
)
)
def _create(self) -> str:
if self._config.cluster_id:
log.info(f"Fetching day2 cluster with id {self._config.cluster_id}")
self._update_day2_config(self.api_client, self._config.cluster_id)
return self._config.cluster_id
cluster = self.api_client.create_cluster(
self._config.cluster_name.get(),
ssh_public_key=self._config.ssh_public_key,
openshift_version=self._config.openshift_version,
pull_secret=self._config.pull_secret,
base_dns_domain=self._config.base_dns_domain,
vip_dhcp_allocation=self._config.vip_dhcp_allocation,
additional_ntp_source=self._config.additional_ntp_source,
user_managed_networking=self._config.user_managed_networking,
high_availability_mode=self._config.high_availability_mode,
olm_operators=[{"name": name} for name in self._config.olm_operators],
network_type=self._config.network_type,
)
self._config.cluster_id = cluster.id
return cluster.id
def delete(self):
self.api_client.delete_cluster(self.id)
def get_details(self):
return self.api_client.cluster_get(self.id)
def get_cluster_name(self):
return self.get_details().name
def get_hosts(self):
return self.api_client.get_cluster_hosts(self.id)
def get_host_ids(self):
return [host["id"] for host in self.get_hosts()]
def get_host_ids_names_mapping(self):
return {host["id"]: host["requested_hostname"] for host in self.get_hosts()}
def get_host_assigned_roles(self):
hosts = self.get_hosts()
return {h["id"]: h["role"] for h in hosts}
def get_operators(self):
return self.api_client.get_cluster_operators(self.id)
def generate_image(self):
warnings.warn("generate_image is deprecated. Use generate_infra_env instead.", DeprecationWarning)
self.api_client.generate_image(cluster_id=self.id, ssh_key=self._config.ssh_public_key)
def generate_infra_env(
self, static_network_config=None, iso_image_type=None, ssh_key=None, ignition_info=None, proxy=None
) -> InfraEnv:
self._infra_env_config.ssh_public_key = ssh_key or self._config.ssh_public_key
self._infra_env_config.iso_image_type = iso_image_type or self._config.iso_image_type
self._infra_env_config.static_network_config = static_network_config
self._infra_env_config.ignition_config_override = ignition_info
self._infra_env_config.proxy = proxy or self._config.proxy
infra_env = InfraEnv(api_client=self.api_client, config=self._infra_env_config)
self._infra_env = infra_env
return infra_env
def update_infra_env_proxy(self, proxy: models.Proxy) -> None:
self._infra_env_config.proxy = proxy
self._infra_env.update_proxy(proxy=proxy)
def download_infra_env_image(self, iso_download_path=None) -> Path:
iso_download_path = iso_download_path or self._config.iso_download_path
return self._infra_env.download_image(iso_download_path=iso_download_path)
@JunitTestCase()
def generate_and_download_infra_env(
self,
iso_download_path=None,
static_network_config=None,
iso_image_type=None,
ssh_key=None,
ignition_info=None,
proxy=None,
) -> Path:
if self._config.is_static_ip and static_network_config is None:
static_network_config = static_network.generate_static_network_data_from_tf(self.nodes.controller.tf_folder)
self.generate_infra_env(
static_network_config=static_network_config,
iso_image_type=iso_image_type,
ssh_key=ssh_key,
ignition_info=ignition_info,
proxy=proxy,
)
return self.download_infra_env_image(iso_download_path=iso_download_path or self._config.iso_download_path)
@JunitTestCase()
def generate_and_download_image(
self, iso_download_path=None, static_network_config=None, iso_image_type=None, ssh_key=None
):
warnings.warn(
"generate_and_download_image is deprecated. Use generate_and_download_infra_env instead.",
DeprecationWarning,
)
iso_download_path = iso_download_path or self._config.iso_download_path
if not os.path.exists(iso_download_path):
utils.recreate_folder(os.path.dirname(iso_download_path), force_recreate=False)
self.api_client.generate_and_download_image(
cluster_id=self.id,
ssh_key=ssh_key or self._config.ssh_public_key,
image_path=iso_download_path,
image_type=iso_image_type or self._config.iso_image_type,
static_network_config=static_network_config,
)
def wait_until_hosts_are_disconnected(self, nodes_count: int = None):
statuses = [consts.NodesStatus.DISCONNECTED]
test_infra.utils.waiting.wait_till_all_hosts_are_in_status(
client=self.api_client,
cluster_id=self.id,
nodes_count=nodes_count or self.nodes.nodes_count,
statuses=statuses,
timeout=consts.DISCONNECTED_TIMEOUT,
)
@JunitTestCase()
def wait_until_hosts_are_discovered(self, allow_insufficient=False, nodes_count: int = None):
statuses = [consts.NodesStatus.PENDING_FOR_INPUT, consts.NodesStatus.KNOWN]
if allow_insufficient:
statuses.append(consts.NodesStatus.INSUFFICIENT)
test_infra.utils.waiting.wait_till_all_hosts_are_in_status(
client=self.api_client,
cluster_id=self.id,
nodes_count=nodes_count or self.nodes.nodes_count,
statuses=statuses,
timeout=consts.NODES_REGISTERED_TIMEOUT,
)
def _get_matching_hosts(self, host_type, count):
hosts = self.get_hosts()
return [{"id": h["id"], "role": host_type} for h in hosts if host_type in h["requested_hostname"]][:count]
def set_cluster_name(self, cluster_name: str):
log.info(f"Setting Cluster Name:{cluster_name} for cluster: {self.id}")
self.update_config(cluster_name=ClusterName(prefix=cluster_name, suffix=None))
self.api_client.update_cluster(self.id, {"name": cluster_name})
def select_installation_disk(self, host_id: str, disk_paths: List[dict]) -> None:
self._infra_env.select_host_installation_disk(host_id=host_id, disk_paths=disk_paths)
def set_ocs(self, properties=None):
self.set_olm_operator(consts.OperatorType.OCS, properties=properties)
def set_cnv(self, properties=None):
self.set_olm_operator(consts.OperatorType.CNV, properties=properties)
def unset_ocs(self):
self.unset_olm_operator(consts.OperatorType.OCS)
def unset_cnv(self):
self.unset_olm_operator(consts.OperatorType.CNV)
def unset_olm_operator(self, operator_name):
log.info(f"Unsetting {operator_name} for cluster: {self.id}")
cluster = self.api_client.cluster_get(self.id)
olm_operators = []
for operator in cluster.monitored_operators:
if operator.name == operator_name or operator.operator_type == OperatorType.BUILTIN:
continue
olm_operators.append({"name": operator.name, "properties": operator.properties})
self.api_client.update_cluster(self.id, {"olm_operators": olm_operators})
def set_olm_operator(self, operator_name, properties=None):
log.info(f"Setting {operator_name} for cluster: {self.id}")
cluster = self.api_client.cluster_get(self.id)
if operator_name in [o.name for o in cluster.monitored_operators]:
return
olm_operators = []
for operator in cluster.monitored_operators:
if operator.operator_type == OperatorType.BUILTIN:
continue
olm_operators.append({"name": operator.name, "properties": operator.properties})
olm_operators.append({"name": operator_name, "properties": properties})
self._config.olm_operators = olm_operators
self.api_client.update_cluster(self.id, {"olm_operators": olm_operators})
def set_host_roles(self, num_masters: int = None, num_workers: int = None, requested_roles=None):
if requested_roles is None:
requested_roles = Counter(
master=num_masters or self.nodes.masters_count, worker=num_workers or self.nodes.workers_count
)
assigned_roles = self._get_matching_hosts(host_type=consts.NodeRoles.MASTER, count=requested_roles["master"])
assigned_roles.extend(
self._get_matching_hosts(host_type=consts.NodeRoles.WORKER, count=requested_roles["worker"])
)
for role in assigned_roles:
self._infra_env.update_host(host_id=role["id"], host_role=role["role"])
return assigned_roles
def set_specific_host_role(self, host, role):
self._infra_env.update_host(host_id=host["id"], host_role=role)
def set_network_params(self, controller=None):
controller = controller or self.nodes.controller
if self._config.platform == consts.Platforms.NONE:
log.info("On None platform, leaving network management to the user")
api_vip = ingress_vip = machine_networks = None
elif self._config.vip_dhcp_allocation or self._high_availability_mode == consts.HighAvailabilityMode.NONE:
log.info("Letting access VIPs be deducted from machine networks")
api_vip = ingress_vip = None
machine_networks = self.get_machine_networks()
else:
log.info("Assigning VIPs statically")
access_vips = controller.get_ingress_and_api_vips()
api_vip = access_vips["api_vip"]
ingress_vip = access_vips["ingress_vip"]
machine_networks = None
self.set_advanced_networking(
vip_dhcp_allocation=self._config.vip_dhcp_allocation,
cluster_networks=self._config.cluster_networks,
service_networks=self._config.service_networks,
machine_networks=machine_networks,
api_vip=api_vip,
ingress_vip=ingress_vip,
)
if self._config.is_ipv4 and self._config.is_ipv6:
machine_networks = controller.get_all_machine_addresses()
self.set_advanced_networking(machine_networks=machine_networks)
def get_primary_machine_cidr(self):
cidr = self.nodes.controller.get_primary_machine_cidr()
if not cidr:
matching_cidrs = self.get_cluster_matching_cidrs(Cluster.get_cluster_hosts(self.get_details()))
if not matching_cidrs:
raise RuntimeError("No matching cidr for DHCP")
cidr = next(iter(matching_cidrs))
return cidr
def get_machine_networks(self):
networks = []
primary_machine_cidr = self.nodes.controller.get_primary_machine_cidr()
if primary_machine_cidr:
networks.append(primary_machine_cidr)
secondary_machine_cidr = self.nodes.controller.get_provisioning_cidr()
if secondary_machine_cidr:
networks.append(secondary_machine_cidr)
if not networks:
networks = self.get_cluster_matching_cidrs(Cluster.get_cluster_hosts(self.get_details()))
if not networks:
raise RuntimeError("No matching cidr for DHCP")
return networks
def set_ingress_and_api_vips(self, vips):
log.info(f"Setting API VIP:{vips['api_vip']} and ingress VIP:{vips['ingress_vip']} for cluster: {self.id}")
self.api_client.update_cluster(self.id, vips)
def set_ssh_key(self, ssh_key: str):
log.info(f"Setting SSH key:{ssh_key} for cluster: {self.id}")
self.update_config(ssh_public_key=ssh_key)
self.api_client.update_cluster(self.id, {"ssh_public_key": ssh_key})
def set_base_dns_domain(self, base_dns_domain: str):
log.info(f"Setting base DNS domain:{base_dns_domain} for cluster: {self.id}")
self.update_config(base_dns_domain=base_dns_domain)
self.api_client.update_cluster(self.id, {"base_dns_domain": base_dns_domain})
def set_advanced_networking(
self,
vip_dhcp_allocation: Optional[bool] = None,
cluster_networks: Optional[List[models.ClusterNetwork]] = None,
service_networks: Optional[List[models.ServiceNetwork]] = None,
machine_networks: Optional[List[models.MachineNetwork]] = None,
api_vip: Optional[str] = None,
ingress_vip: Optional[str] = None,
):
if machine_networks is None:
machine_networks = self._config.machine_networks
else:
machine_networks = [models.MachineNetwork(cidr=cidr) for cidr in machine_networks]
if vip_dhcp_allocation is None:
vip_dhcp_allocation = self._config.vip_dhcp_allocation
advanced_networking = {
"vip_dhcp_allocation": vip_dhcp_allocation,
"cluster_networks": cluster_networks if cluster_networks is not None else self._config.cluster_networks,
"service_networks": service_networks if service_networks is not None else self._config.service_networks,
"machine_networks": machine_networks,
"api_vip": api_vip if api_vip is not None else self._config.api_vip,
"ingress_vip": ingress_vip if ingress_vip is not None else self._config.ingress_vip,
}
log.info(f"Updating advanced networking with {advanced_networking} for cluster: {self.id}")
self.update_config(**advanced_networking)
self.api_client.update_cluster(self.id, advanced_networking)
def set_pull_secret(self, pull_secret: str):
log.info(f"Setting pull secret:{pull_secret} for cluster: {self.id}")
self.update_config(pull_secret=pull_secret)
self.api_client.update_cluster(self.id, {"pull_secret": pull_secret})
def set_host_name(self, host_id, requested_name):
log.info(f"Setting Required Host Name:{requested_name}, for Host ID: {host_id}")
self._infra_env.update_host(host_id=host_id, host_name=requested_name)
def set_additional_ntp_source(self, ntp_source: List[str]):
log.info(f"Setting Additional NTP source:{ntp_source}")
if isinstance(ntp_source, List):
ntp_source_string = ",".join(ntp_source)
elif isinstance(ntp_source, str):
ntp_source_string = ntp_source
else:
raise TypeError(
f"ntp_source must be a string or a list of strings, got: {ntp_source}," f" type: {type(ntp_source)}"
)
self.update_config(additional_ntp_source=ntp_source_string)
self.api_client.update_cluster(self.id, {"additional_ntp_source": ntp_source_string})
def patch_discovery_ignition(self, ignition):
self._infra_env.patch_discovery_ignition(ignition_info=ignition)
def set_proxy_values(self, proxy_values: models.Proxy) -> None:
log.info(f"Setting proxy values {proxy_values} for cluster: {self.id}")
self.update_config(proxy=proxy_values)
self.api_client.set_cluster_proxy(
self.id,
http_proxy=self._config.proxy.http_proxy,
https_proxy=self._config.proxy.https_proxy,
no_proxy=self._config.proxy.no_proxy,
)
@JunitTestCase()
def start_install(self):
self.api_client.install_cluster(cluster_id=self.id)
def wait_for_logs_complete(self, timeout, interval=60, check_host_logs_only=False):
logs_utils.wait_for_logs_complete(
client=self.api_client,
cluster_id=self.id,
timeout=timeout,
interval=interval,
check_host_logs_only=check_host_logs_only,
)
def wait_for_installing_in_progress(self, nodes_count: int = MINIMUM_NODES_TO_WAIT):
test_infra.utils.waiting.wait_till_at_least_one_host_is_in_status(
client=self.api_client,
cluster_id=self.id,
statuses=[consts.NodesStatus.INSTALLING_IN_PROGRESS],
nodes_count=nodes_count,
timeout=consts.INSTALLING_IN_PROGRESS_TIMEOUT,
)
def wait_for_write_image_to_disk(self, nodes_count: int = MINIMUM_NODES_TO_WAIT):
test_infra.utils.waiting.wait_till_at_least_one_host_is_in_stage(
client=self.api_client,
cluster_id=self.id,
stages=[consts.HostsProgressStages.WRITE_IMAGE_TO_DISK, consts.HostsProgressStages.REBOOTING],
nodes_count=nodes_count,
)
def wait_for_host_status(self, statuses, fall_on_error_status=True, nodes_count: int = MINIMUM_NODES_TO_WAIT):
test_infra.utils.waiting.wait_till_at_least_one_host_is_in_status(
client=self.api_client,
cluster_id=self.id,
statuses=statuses,
nodes_count=nodes_count,
fall_on_error_status=fall_on_error_status,
)
def wait_for_specific_host_status(self, host, statuses, nodes_count: int = MINIMUM_NODES_TO_WAIT):
test_infra.utils.waiting.wait_till_specific_host_is_in_status(
client=self.api_client,
cluster_id=self.id,
host_name=host.get("requested_hostname"),
statuses=statuses,
nodes_count=nodes_count,
)
def wait_for_specific_host_stage(self, host: dict, stage: str, inclusive: bool = True):
index = consts.all_host_stages.index(stage)
test_infra.utils.waiting.wait_till_specific_host_is_in_stage(
client=self.api_client,
cluster_id=self.id,
host_name=host.get("requested_hostname"),
stages=consts.all_host_stages[index:] if inclusive else consts.all_host_stages[index + 1 :],
)
def wait_for_cluster_in_error_status(self):
utils.wait_till_cluster_is_in_status(
client=self.api_client,
cluster_id=self.id,
statuses=[consts.ClusterStatus.ERROR],
timeout=consts.ERROR_TIMEOUT,
)
def wait_for_pending_for_input_status(self):
utils.wait_till_cluster_is_in_status(
client=self.api_client,
cluster_id=self.id,
statuses=[consts.ClusterStatus.PENDING_FOR_INPUT],
timeout=consts.PENDING_USER_ACTION_TIMEOUT,
)
def wait_for_at_least_one_host_to_boot_during_install(self, nodes_count: int = MINIMUM_NODES_TO_WAIT):
test_infra.utils.waiting.wait_till_at_least_one_host_is_in_stage(
client=self.api_client,
cluster_id=self.id,
stages=[consts.HostsProgressStages.REBOOTING],
nodes_count=nodes_count,
)
def wait_for_non_bootstrap_masters_to_reach_configuring_state_during_install(self, num_masters: int = None):
num_masters = num_masters if num_masters is not None else self.nodes.masters_count
test_infra.utils.waiting.wait_till_at_least_one_host_is_in_stage(
client=self.api_client,
cluster_id=self.id,
stages=[consts.HostsProgressStages.CONFIGURING],
nodes_count=num_masters - 1,
)
def wait_for_non_bootstrap_masters_to_reach_joined_state_during_install(self, num_masters: int = None):
num_masters = num_masters if num_masters is not None else self.nodes.masters_count
test_infra.utils.waiting.wait_till_at_least_one_host_is_in_stage(
client=self.api_client,
cluster_id=self.id,
stages=[consts.HostsProgressStages.JOINED],
nodes_count=num_masters - 1,
)
def wait_for_hosts_stage(self, stage: str, inclusive: bool = True):
index = consts.all_host_stages.index(stage)
test_infra.utils.waiting.wait_till_at_least_one_host_is_in_stage(
client=self.api_client,
cluster_id=self.id,
stages=consts.all_host_stages[index:] if inclusive else consts.all_host_stages[index + 1 :],
nodes_count=self.nodes.nodes_count,
)
@JunitTestCase()
def start_install_and_wait_for_installed(
self,
wait_for_hosts=True,
wait_for_operators=True,
wait_for_cluster_install=True,
download_kubeconfig=True,
):
self.start_install()
if wait_for_hosts:
self.wait_for_hosts_to_install()
if wait_for_operators:
self.wait_for_operators_to_finish()
if wait_for_cluster_install:
self.wait_for_install()
if download_kubeconfig:
self.download_kubeconfig()
def disable_worker_hosts(self):
hosts = self.get_hosts_by_role(consts.NodeRoles.WORKER)
for host in hosts:
self.disable_host(host)
def disable_host(self, host):
host_name = host["requested_hostname"]
log.info(f"Going to disable host: {host_name} in cluster: {self.id}")
self._infra_env.unbind_host(host_id=host["id"])
def enable_host(self, host):
host_name = host["requested_hostname"]
log.info(f"Going to enable host: {host_name} in cluster: {self.id}")
self._infra_env.bind_host(host_id=host["id"], cluster_id=self.id)
def delete_host(self, host):
host_id = host["id"]
log.info(f"Going to delete host: {host_id} in cluster: {self.id}")
self._infra_env.delete_host(host_id=host_id)
def cancel_install(self):
self.api_client.cancel_cluster_install(cluster_id=self.id)
def get_bootstrap_hostname(self):
hosts = self.get_hosts_by_role(consts.NodeRoles.MASTER)
for host in hosts:
if host.get("bootstrap"):
log.info("Bootstrap node is: %s", host["requested_hostname"])
return host["requested_hostname"]
def get_hosts_by_role(self, role, hosts=None):
hosts = hosts or self.api_client.get_cluster_hosts(self.id)
nodes_by_role = []
for host in hosts:
if host["role"] == role:
nodes_by_role.append(host)
log.info(f"Found hosts: {nodes_by_role}, that has the role: {role}")
return nodes_by_role
def get_random_host_by_role(self, role):
return random.choice(self.get_hosts_by_role(role))
def get_reboot_required_hosts(self):
return self.api_client.get_hosts_in_statuses(
cluster_id=self.id, statuses=[consts.NodesStatus.RESETING_PENDING_USER_ACTION]
)
def reboot_required_nodes_into_iso_after_reset(self):
hosts_to_reboot = self.get_reboot_required_hosts()
self.nodes.run_for_given_nodes_by_cluster_hosts(cluster_hosts=hosts_to_reboot, func_name="reset")
def wait_for_one_host_to_be_in_wrong_boot_order(self, fall_on_error_status=True):
test_infra.utils.waiting.wait_till_at_least_one_host_is_in_status(
client=self.api_client,
cluster_id=self.id,
statuses=[consts.NodesStatus.INSTALLING_PENDING_USER_ACTION],
status_info=consts.HostStatusInfo.WRONG_BOOT_ORDER,
fall_on_error_status=fall_on_error_status,
timeout=consts.PENDING_USER_ACTION_TIMEOUT,
)
def wait_for_at_least_one_host_to_be_in_reboot_timeout(self, fall_on_error_status=True, nodes_count=1):
test_infra.utils.waiting.wait_till_at_least_one_host_is_in_status(
client=self.api_client,
cluster_id=self.id,
statuses=[consts.NodesStatus.INSTALLING_PENDING_USER_ACTION],
status_info=consts.HostStatusInfo.REBOOT_TIMEOUT,
nodes_count=nodes_count,
fall_on_error_status=fall_on_error_status,
timeout=consts.PENDING_USER_ACTION_TIMEOUT,
)
def wait_for_hosts_to_be_in_wrong_boot_order(
self, nodes_count, timeout=consts.PENDING_USER_ACTION_TIMEOUT, fall_on_error_status=True
):
test_infra.utils.waiting.wait_till_all_hosts_are_in_status(
client=self.api_client,
cluster_id=self.id,
statuses=[consts.NodesStatus.INSTALLING_PENDING_USER_ACTION],
status_info=consts.HostStatusInfo.WRONG_BOOT_ORDER,
nodes_count=nodes_count,
timeout=timeout,
fall_on_error_status=fall_on_error_status,
)
def wait_for_ready_to_install(self):
utils.wait_till_cluster_is_in_status(
client=self.api_client,
cluster_id=self.id,
statuses=[consts.ClusterStatus.READY],
timeout=consts.READY_TIMEOUT,
)
time.sleep(10)
utils.wait_till_cluster_is_in_status(
client=self.api_client,
cluster_id=self.id,
statuses=[consts.ClusterStatus.READY],
timeout=consts.READY_TIMEOUT,
)
def is_in_cancelled_status(self):
return utils.is_cluster_in_status(
client=self.api_client, cluster_id=self.id, statuses=[consts.ClusterStatus.CANCELLED]
)
def is_in_error(self):
return utils.is_cluster_in_status(
client=self.api_client, cluster_id=self.id, statuses=[consts.ClusterStatus.ERROR]
)
def is_finalizing(self):
return utils.is_cluster_in_status(
client=self.api_client, cluster_id=self.id, statuses=[consts.ClusterStatus.FINALIZING]
)
def is_installing(self):
return utils.is_cluster_in_status(
client=self.api_client, cluster_id=self.id, statuses=[consts.ClusterStatus.INSTALLING]
)
def reset_install(self):
self.api_client.reset_cluster_install(cluster_id=self.id)
def is_in_insufficient_status(self):
return utils.is_cluster_in_status(
client=self.api_client, cluster_id=self.id, statuses=[consts.ClusterStatus.INSUFFICIENT]
)
def wait_for_hosts_to_install(
self, timeout=consts.CLUSTER_INSTALLATION_TIMEOUT, fall_on_error_status=True, nodes_count: int = None
):
test_infra.utils.waiting.wait_till_all_hosts_are_in_status(
client=self.api_client,
cluster_id=self.id,
statuses=[consts.ClusterStatus.INSTALLED],
nodes_count=nodes_count or self.nodes.nodes_count,
timeout=timeout,
fall_on_error_status=fall_on_error_status,
)
def wait_for_operators_to_finish(self, timeout=consts.CLUSTER_INSTALLATION_TIMEOUT, fall_on_error_status=True):
operators = self.get_operators()
if fall_on_error_status:
statuses = [consts.OperatorStatus.AVAILABLE]
else:
statuses = [consts.OperatorStatus.AVAILABLE, consts.OperatorStatus.FAILED]
operators_utils.wait_till_all_operators_are_in_status(
client=self.api_client,
cluster_id=self.id,
operators_count=len(operators_utils.filter_operators_by_type(operators, OperatorType.BUILTIN)),
operator_types=[OperatorType.BUILTIN],
statuses=statuses,
timeout=timeout,
fall_on_error_status=False,
)
operators_utils.wait_till_all_operators_are_in_status(
client=self.api_client,
cluster_id=self.id,
operators_count=len(operators_utils.filter_operators_by_type(operators, OperatorType.OLM)),
operator_types=[OperatorType.OLM],
statuses=[consts.OperatorStatus.AVAILABLE, consts.OperatorStatus.FAILED],
timeout=timeout,
fall_on_error_status=fall_on_error_status,
)
def is_operator_in_status(self, operator_name, status):
return operators_utils.is_operator_in_status(
operators=self.get_operators(), operator_name=operator_name, status=status
)
def wait_for_install(self, timeout=consts.CLUSTER_INSTALLATION_TIMEOUT):
utils.wait_till_cluster_is_in_status(
client=self.api_client,
cluster_id=self.id,
statuses=[consts.ClusterStatus.INSTALLED],
timeout=timeout,
)
def _set_hostnames_and_roles(self):
cluster_id = self.id
hosts = self.to_cluster_hosts(self.api_client.get_cluster_hosts(cluster_id))
nodes = self.nodes.get_nodes(refresh=True)
for host in hosts:
if host.has_hostname():
continue
name = self.find_matching_node_name(host, nodes)
assert name is not None, (
f"Failed to find matching node for host with mac address {host.macs()}"
f" nodes: {[(n.name, n.ips, n.macs) for n in nodes]}"
)
if self.nodes.nodes_count == 1:
role = None
else:
role = consts.NodeRoles.MASTER if consts.NodeRoles.MASTER in name else consts.NodeRoles.WORKER
self._infra_env.update_host(host_id=host.get_id(), host_role=role, host_name=name)
def _ha_not_none(self):
return (
self._high_availability_mode != consts.HighAvailabilityMode.NONE
and self._config.platform != consts.Platforms.NONE
)
def download_image(self, iso_download_path: str = None) -> Path:
if self._infra_env is None:
log.warning("No infra_env found. Generating infra_env and downloading ISO")
return self.generate_and_download_infra_env(
iso_download_path=iso_download_path or self._config.iso_download_path,
iso_image_type=self._config.iso_image_type,
)
return self._infra_env.download_image(iso_download_path)
@JunitTestCase()
def prepare_for_installation(self, **kwargs):
super(Cluster, self).prepare_for_installation(**kwargs)
self.nodes.wait_for_networking()
self._set_hostnames_and_roles()
if self._high_availability_mode != consts.HighAvailabilityMode.NONE:
self.set_host_roles(len(self.nodes.get_masters()), len(self.nodes.get_workers()))
self.set_network_params(controller=self.nodes.controller)
if self._config.platform == consts.Platforms.NONE:
self._configure_load_balancer()
self.nodes.controller.set_dns_for_user_managed_network()
elif self._high_availability_mode == consts.HighAvailabilityMode.NONE:
main_cidr = self.get_primary_machine_cidr()
ip = Cluster.get_ip_for_single_node(self.api_client, self.id, main_cidr)
self.nodes.controller.set_single_node_ip(ip)
self.nodes.controller.set_dns(api_vip=ip, ingress_vip=ip)
self.wait_for_ready_to_install()
if self._ha_not_none():
vips_info = self.__class__.get_vips_from_cluster(self.api_client, self.id)
self.nodes.controller.set_dns(api_vip=vips_info["api_vip"], ingress_vip=vips_info["ingress_vip"])
def download_kubeconfig_no_ingress(self, kubeconfig_path: str = None):
self.api_client.download_kubeconfig_no_ingress(self.id, kubeconfig_path or self._config.kubeconfig_path)
def download_kubeconfig(self, kubeconfig_path: str = None):
self.api_client.download_kubeconfig(self.id, kubeconfig_path or self._config.kubeconfig_path)
def download_installation_logs(self, cluster_tar_path):
self.api_client.download_cluster_logs(self.id, cluster_tar_path)
def get_install_config(self):
return yaml.safe_load(self.api_client.get_cluster_install_config(self.id))
def get_admin_credentials(self):
return self.api_client.get_cluster_admin_credentials(self.id)
def register_dummy_host(self):
dummy_host_id = "b164df18-0ff1-4b85-9121-059f10f58f71"
self.api_client.register_host(self.id, dummy_host_id)
def host_get_next_step(self, host_id):
return self.api_client.host_get_next_step(self.id, host_id)
def host_post_step_result(self, host_id, step_type, step_id, exit_code, output):
self.api_client.host_post_step_result(
self.id, host_id, step_type=step_type, step_id=step_id, exit_code=exit_code, output=output
)
def host_update_install_progress(self, host_id, current_stage, progress_info=None):
self.api_client.host_update_progress(self.id, host_id, current_stage, progress_info=progress_info)
def host_complete_install(self):
self.api_client.complete_cluster_installation(cluster_id=self.id, is_success=True)
def setup_nodes(self, nodes, infra_env_config: BaseInfraEnvConfig):
self._infra_env = InfraEnv.generate(
self.api_client, infra_env_config, iso_image_type=self._config.iso_image_type
)
self._infra_env.download_image(iso_download_path=self._config.iso_download_path)
nodes.start_all()
self.wait_until_hosts_are_discovered()
return nodes.create_nodes_cluster_hosts_mapping(cluster=self)
def wait_for_cluster_validation(
self, validation_section, validation_id, statuses, timeout=consts.VALIDATION_TIMEOUT, interval=2
):
log.info("Wait until cluster %s validation %s is in status %s", self.id, validation_id, statuses)
try:
waiting.wait(
lambda: self.is_cluster_validation_in_status(
validation_section=validation_section, validation_id=validation_id, statuses=statuses
),
timeout_seconds=timeout,
sleep_seconds=interval,
waiting_for=f"Cluster validation to be in status {statuses}",
)
except BaseException:
log.error(
"Cluster validation status is: %s",
utils.get_cluster_validation_value(
self.api_client.cluster_get(self.id), validation_section, validation_id
),
)
raise
def is_cluster_validation_in_status(self, validation_section, validation_id, statuses):
log.info("Is cluster %s validation %s in status %s", self.id, validation_id, statuses)
try:
return (
utils.get_cluster_validation_value(
self.api_client.cluster_get(self.id), validation_section, validation_id
)
in statuses
)
except BaseException:
log.exception("Failed to get cluster %s validation info", self.id)
def wait_for_host_validation(
self, host_id, validation_section, validation_id, statuses, timeout=consts.VALIDATION_TIMEOUT, interval=2
):
log.info("Wait until host %s validation %s is in status %s", host_id, validation_id, statuses)
try:
waiting.wait(
lambda: self.is_host_validation_in_status(
host_id=host_id,
validation_section=validation_section,
validation_id=validation_id,
statuses=statuses,
),
timeout_seconds=timeout,
sleep_seconds=interval,
waiting_for=f"Host validation to be in status {statuses}",
)
except BaseException:
log.error(
"Host validation status is: %s",
utils.get_host_validation_value(
self.api_client.cluster_get(self.id), host_id, validation_section, validation_id
),
)
raise
def is_host_validation_in_status(self, host_id, validation_section, validation_id, statuses):
log.info("Is host %s validation %s in status %s", host_id, validation_id, statuses)
try:
return (
utils.get_host_validation_value(
self.api_client.cluster_get(self.id), host_id, validation_section, validation_id
)
in statuses
)
except BaseException:
log.exception("Failed to get cluster %s validation info", self.id)
def wait_for_cluster_to_be_in_installing_pending_user_action_status(self):
utils.wait_till_cluster_is_in_status(
client=self.api_client,
cluster_id=self.id,
statuses=[consts.ClusterStatus.INSTALLING_PENDING_USER_ACTION],
timeout=consts.PENDING_USER_ACTION_TIMEOUT,
)
def wait_for_cluster_to_be_in_installing_status(self):
utils.wait_till_cluster_is_in_status(
client=self.api_client,
cluster_id=self.id,
statuses=[consts.ClusterStatus.INSTALLING],
timeout=consts.START_CLUSTER_INSTALLATION_TIMEOUT,
)
def wait_for_cluster_to_be_in_finalizing_status(self):
utils.wait_till_cluster_is_in_status(
client=self.api_client,
cluster_id=self.id,
statuses=[consts.ClusterStatus.FINALIZING, consts.ClusterStatus.INSTALLED],
timeout=consts.CLUSTER_INSTALLATION_TIMEOUT,
break_statuses=[consts.ClusterStatus.ERROR],
)
def wait_for_cluster_to_be_in_status(self, statuses, timeout=consts.ERROR_TIMEOUT):
utils.wait_till_cluster_is_in_status(
client=self.api_client,
cluster_id=self.id,
statuses=statuses,
timeout=timeout,
)
@classmethod
def reset_cluster_and_wait_for_ready(cls, cluster):
cluster.reset_install()
assert cluster.is_in_insufficient_status()
cluster.reboot_required_nodes_into_iso_after_reset()
cluster.wait_until_hosts_are_discovered()
cluster.wait_for_ready_to_install()
def get_events(self, host_id="", infra_env_id=""):
warnings.warn(
"Cluster.get_events is now deprecated, use EventsHandler.get_events instead",
PendingDeprecationWarning,
)
handler = EventsHandler(self.api_client)
return handler.get_events(host_id, self.id, infra_env_id)
def _configure_load_balancer(self):
main_cidr = self.get_primary_machine_cidr()
secondary_cidr = self.nodes.controller.get_provisioning_cidr()
master_ips = self.get_master_ips(self.api_client, self.id, main_cidr) + self.get_master_ips(
self.api_client, self.id, secondary_cidr
)
worker_ips = self.get_worker_ips(self.api_client, self.id, main_cidr)
load_balancer_ip = str(IPNetwork(main_cidr).ip + 1)
tf = terraform_utils.TerraformUtils(working_dir=self.nodes.controller.tf_folder)
lb_controller = LoadBalancerController(tf)
lb_controller.set_load_balancing_config(load_balancer_ip, master_ips, worker_ips)
@classmethod
def _get_namespace_index(cls, libvirt_network_if):
matcher = re.match(r"^tt(\d+)$", libvirt_network_if)
return int(matcher.groups()[0]) if matcher is not None else 0
def wait_for_event(self, event_to_find, reference_time, params_list=None, host_id="", infra_env_id="", timeout=10):
warnings.warn(
"Cluster.wait_for_event is now deprecated, use EventsHandler.wait_for_event instead",
PendingDeprecationWarning,
)
handler = EventsHandler(self.api_client)
return handler.wait_for_event(
event_to_find, reference_time, params_list, host_id, infra_env_id, self.id, timeout
)
@staticmethod
def get_inventory_host_nics_data(host: dict, ipv4_first=True):
def get_network_interface_ip(interface):
addresses = (
interface.ipv4_addresses + interface.ipv6_addresses
if ipv4_first
else interface.ipv6_addresses + interface.ipv4_addresses
)
return addresses[0].split("/")[0] if len(addresses) > 0 else None
inventory = models.Inventory(**json.loads(host["inventory"]))
interfaces_list = [models.Interface(**interface) for interface in inventory.interfaces]
return [
{
"name": interface.name,
"model": interface.product,
"mac": interface.mac_address,
"ip": get_network_interface_ip(interface),
"speed": interface.speed_mbps,
}
for interface in interfaces_list
]
@staticmethod
def get_hosts_nics_data(hosts: list, ipv4_first=True):
return [Cluster.get_inventory_host_nics_data(h, ipv4_first=ipv4_first) for h in hosts]
@staticmethod
def get_cluster_hosts(cluster: models.cluster.Cluster) -> List[ClusterHost]:
return [ClusterHost(h) for h in cluster.hosts]
@staticmethod
def to_cluster_hosts(hosts: List[Dict[str, Any]]) -> List[ClusterHost]:
return [ClusterHost(models.Host(**h)) for h in hosts]
def get_cluster_cidrs(self, hosts: List[ClusterHost]) -> Set[str]:
cidrs = set()
for host in hosts:
ips = []
if self.nodes.is_ipv4:
ips += host.ipv4_addresses()
if self.nodes.is_ipv6:
ips += host.ipv6_addresses()
for host_ip in ips:
cidr = network_utils.get_cidr_by_interface(host_ip)
cidrs.add(cidr)
return cidrs
def get_cluster_matching_cidrs(self, hosts: List[ClusterHost]) -> Set[str]:
cluster_cidrs = self.get_cluster_cidrs(hosts)
matching_cidrs = set()
for cidr in cluster_cidrs:
for host in hosts:
interfaces = []
if self.nodes.is_ipv4:
interfaces += host.ipv4_addresses()
if self.nodes.is_ipv6:
interfaces += host.ipv6_addresses()
if not network_utils.any_interface_in_cidr(interfaces, cidr):
break
matching_cidrs.add(cidr)
return matching_cidrs
@staticmethod
def get_ip_for_single_node(client, cluster_id, machine_cidr, ipv4_first=True):
cluster_info = client.cluster_get(cluster_id).to_dict()
if len(cluster_info["hosts"]) == 0:
raise Exception("No host found")
network = IPNetwork(machine_cidr)
interfaces = Cluster.get_inventory_host_nics_data(cluster_info["hosts"][0], ipv4_first=ipv4_first)
for intf in interfaces:
ip = intf["ip"]
if IPAddress(ip) in network:
return ip
raise Exception("IP for single node not found")
@staticmethod
def get_ips_for_role(client, cluster_id, network, role):
cluster_info = client.cluster_get(cluster_id).to_dict()
ret = []
net = IPNetwork(network)
hosts_interfaces = Cluster.get_hosts_nics_data([h for h in cluster_info["hosts"] if h["role"] == role])
for host_interfaces in hosts_interfaces:
for intf in host_interfaces:
ip = IPAddress(intf["ip"])
if ip in net:
ret = ret + [intf["ip"]]
return ret
@staticmethod
def get_master_ips(client, cluster_id, network):
return Cluster.get_ips_for_role(client, cluster_id, network, consts.NodeRoles.MASTER)
@staticmethod
def get_worker_ips(client, cluster_id, network):
return Cluster.get_ips_for_role(client, cluster_id, network, consts.NodeRoles.WORKER)
@staticmethod
def get_vips_from_cluster(client, cluster_id):
cluster_info = client.cluster_get(cluster_id)
return dict(api_vip=cluster_info.api_vip, ingress_vip=cluster_info.ingress_vip)
def get_host_disks(self, host, filter=None):
hosts = self.get_hosts()
selected_host = [h for h in hosts if h["id"] == host["id"]]
disks = json.loads(selected_host[0]["inventory"])["disks"]
if not filter:
return [disk for disk in disks]
else:
return [disk for disk in disks if filter(disk)]
def get_inventory_host_ips_data(self, host: dict):
nics = self.get_inventory_host_nics_data(host)
return [nic["ip"] for nic in nics]
def get_kube_api_ip(self, hosts):
for host in hosts:
for ip in self.get_inventory_host_ips_data(host):
if self.is_kubeapi_service_ready(ip):
return ip
def get_api_vip(self, cluster):
cluster = cluster or self.get_details()
api_vip = cluster.api_vip
if not api_vip and cluster.user_managed_networking:
log.info("API VIP is not set, searching for api ip on masters")
masters = self.get_hosts_by_role(consts.NodeRoles.MASTER, hosts=cluster.to_dict()["hosts"])
api_vip = self._wait_for_api_vip(masters)
log.info("api vip is %s", api_vip)
return api_vip
def _wait_for_api_vip(self, hosts, timeout=180):
return waiting.wait(
lambda: self.get_kube_api_ip(hosts=hosts), timeout_seconds=timeout, sleep_seconds=5, waiting_for="API's IP"
)
def find_matching_node_name(self, host: ClusterHost, nodes: List[Node]) -> Union[str, None]:
# Looking for node matches the given host by its mac address (which is unique)
for node in nodes:
for mac in node.macs:
if mac.lower() in host.macs():
return node.name
# IPv6 static ips
if self._config.is_static_ip:
mappings = static_network.get_name_to_mac_addresses_mapping(self.nodes.controller.tf_folder)
for mac in host.macs():
for name, macs in mappings.items():
if mac in macs:
return name
return None
@staticmethod
def is_kubeapi_service_ready(ip_or_dns):
with contextlib.suppress(ValueError):
# IPv6 addresses need to be surrounded with square-brackets
# to differentiate them from domain names
if ipaddress.ip_address(ip_or_dns).version == 6:
ip_or_dns = f"[{ip_or_dns}]"
try:
response = requests.get(f"https://{ip_or_dns}:6443/readyz", verify=False, timeout=1)
return response.ok
except BaseException:
return False
def wait_and_kill_installer(self, host):
# Wait for specific host to be in installing in progress
self.wait_for_specific_host_status(host=host, statuses=[consts.NodesStatus.INSTALLING_IN_PROGRESS])
# Kill installer to simulate host error
selected_node = self.nodes.get_node_from_cluster_host(host)
selected_node.kill_installer()
def get_api_vip_from_cluster(api_client, cluster_info: Union[dict, models.cluster.Cluster], pull_secret):
import warnings
from tests.config import ClusterConfig, InfraEnvConfig
warnings.warn(
"Soon get_api_vip_from_cluster will be deprecated. Avoid using or adding new functionality to "
"this function. The function and solution for that case have not been determined yet. It might be "
"on another module, or as a classmethod within Cluster class."
" For more information see https://issues.redhat.com/browse/MGMT-4975",
PendingDeprecationWarning,
)
if isinstance(cluster_info, dict):
cluster_info = models.cluster.Cluster(**cluster_info)
cluster = Cluster(
api_client=api_client,
infra_env_config=InfraEnvConfig(),
config=ClusterConfig(
cluster_name=ClusterName(cluster_info.name),
pull_secret=pull_secret,
ssh_public_key=cluster_info.ssh_public_key,
cluster_id=cluster_info.id,
),
nodes=None,
)
return cluster.get_api_vip(cluster=cluster_info)
| true
| true
|
790498b4f6c27daf5b1fc037d4e421f7c4553e58
| 1,156
|
py
|
Python
|
scripts/linters/test_files/invalid_python_three.py
|
yash10019coder/oppia
|
8c349c61ac723a2fd507046b20957934cba70e3a
|
[
"Apache-2.0"
] | 5,422
|
2015-08-14T01:56:44.000Z
|
2022-03-31T23:31:56.000Z
|
scripts/linters/test_files/invalid_python_three.py
|
yash10019coder/oppia
|
8c349c61ac723a2fd507046b20957934cba70e3a
|
[
"Apache-2.0"
] | 14,178
|
2015-08-14T05:21:45.000Z
|
2022-03-31T23:54:10.000Z
|
scripts/linters/test_files/invalid_python_three.py
|
yash10019coder/oppia
|
8c349c61ac723a2fd507046b20957934cba70e3a
|
[
"Apache-2.0"
] | 3,574
|
2015-08-14T04:20:06.000Z
|
2022-03-29T01:52:37.000Z
|
# coding: utf-8
#
# Copyright 2020 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Python file with invalid syntax, used by scripts/linters/
python_linter_test.py. This file doesnot import from __future__.
"""
class FakeClass:
"""This is a fake docstring for valid syntax purposes."""
def __init__(self, fake_arg):
self.fake_arg = fake_arg
def fake_method(self, name):
"""This doesn't do anything.
Args:
name: str. Means nothing.
Yields:
tuple(str, str). The argument passed in but twice in a tuple.
"""
yield (name, name)
| 30.421053
| 74
| 0.693772
|
class FakeClass:
def __init__(self, fake_arg):
self.fake_arg = fake_arg
def fake_method(self, name):
yield (name, name)
| true
| true
|
79049a6892e9c63f8426fc1689bce207fdd771d4
| 7,623
|
py
|
Python
|
todo/operations/csv_importer.py
|
paiuolo/django-todo
|
17d35460b6dfa8c5a45a9eeafbec262233f1586d
|
[
"BSD-3-Clause"
] | null | null | null |
todo/operations/csv_importer.py
|
paiuolo/django-todo
|
17d35460b6dfa8c5a45a9eeafbec262233f1586d
|
[
"BSD-3-Clause"
] | null | null | null |
todo/operations/csv_importer.py
|
paiuolo/django-todo
|
17d35460b6dfa8c5a45a9eeafbec262233f1586d
|
[
"BSD-3-Clause"
] | null | null | null |
import codecs
import csv
import datetime
import logging
from django.contrib.auth import get_user_model
from django.contrib.auth.models import Group
from todo.models import Task, TaskList
log = logging.getLogger(__name__)
class CSVImporter:
"""Core upsert functionality for CSV import, for re-use by `import_csv` management command, web UI and tests.
Supplies a detailed log of what was and was not imported at the end. See README for usage notes.
"""
def __init__(self):
self.errors = []
self.upserts = []
self.summaries = []
self.line_count = 0
self.upsert_count = 0
def upsert(self, fileobj, as_string_obj=False):
"""Expects a file *object*, not a file path. This is important because this has to work for both
the management command and the web uploader; the web uploader will pass in in-memory file
with no path!
Header row is:
Title, Group, Task List, Created Date, Due Date, Completed, Created By, Assigned To, Note, Priority
"""
if as_string_obj:
# fileobj comes from mgmt command
csv_reader = csv.DictReader(fileobj)
else:
# fileobj comes from browser upload (in-memory)
csv_reader = csv.DictReader(codecs.iterdecode(fileobj, "utf-8"))
# DI check: Do we have expected header row?
header = csv_reader.fieldnames
expected = [
"Title",
"Group",
"Task List",
"Created By",
"Created Date",
"Due Date",
"Completed",
"Assigned To",
"Note",
"Priority",
]
if header != expected:
self.errors.append(
f"Inbound data does not have expected columns.\nShould be: {expected}"
)
return
for row in csv_reader:
self.line_count += 1
newrow = self.validate_row(row)
if newrow:
# newrow at this point is fully validated, and all FK relations exist,
# e.g. `newrow.get("Assigned To")`, is a Django User instance.
assignee = newrow.get("Assigned To") if newrow.get("Assigned To") else None
created_at = (
newrow.get("Created Date")
if newrow.get("Created Date")
else datetime.datetime.today()
)
due_date = newrow.get("Due Date") if newrow.get("Due Date") else None
priority = newrow.get("Priority") if newrow.get("Priority") else None
obj, created = Task.objects.update_or_create(
created_by=newrow.get("Created By"),
task_list=newrow.get("Task List"),
title=newrow.get("Title"),
defaults={
"assigned_to": assignee,
"completed": newrow.get("Completed"),
"created_at": created_at,
"due_date": due_date,
"note": newrow.get("Note"),
"priority": priority,
},
)
self.upsert_count += 1
msg = (
f'Upserted task {obj.id}: "{obj.title}"'
f' in list "{obj.task_list}" (group "{obj.task_list.group}")'
)
self.upserts.append(msg)
self.summaries.append(f"Processed {self.line_count} CSV rows")
self.summaries.append(f"Upserted {self.upsert_count} rows")
self.summaries.append(f"Skipped {self.line_count - self.upsert_count} rows")
return {"summaries": self.summaries, "upserts": self.upserts, "errors": self.errors}
def validate_row(self, row):
"""Perform data integrity checks and set default values. Returns a valid object for insertion, or False.
Errors are stored for later display. Intentionally not broken up into separate validator functions because
there are interdpendencies, such as checking for existing `creator` in one place and then using
that creator for group membership check in others."""
row_errors = []
# #######################
# Task creator must exist
if not row.get("Created By"):
msg = f"Missing required task creator."
row_errors.append(msg)
creator = get_user_model().objects.filter(username=row.get("Created By")).first()
if not creator:
msg = f"Invalid task creator {row.get('Created By')}"
row_errors.append(msg)
# #######################
# If specified, Assignee must exist
assignee = None # Perfectly valid
if row.get("Assigned To"):
assigned = get_user_model().objects.filter(username=row.get("Assigned To"))
if assigned.exists():
assignee = assigned.first()
else:
msg = f"Missing or invalid task assignee {row.get('Assigned To')}"
row_errors.append(msg)
# #######################
# Group must exist
try:
target_group = Group.objects.get(name=row.get("Group"))
except Group.DoesNotExist:
msg = f"Could not find group {row.get('Group')}."
row_errors.append(msg)
target_group = None
# #######################
# Task creator must be in the target group
if creator and target_group not in creator.groups.all():
msg = f"{creator} is not in group {target_group}"
row_errors.append(msg)
# #######################
# Assignee must be in the target group
if assignee and target_group not in assignee.groups.all():
msg = f"{assignee} is not in group {target_group}"
row_errors.append(msg)
# #######################
# Task list must exist in the target group
try:
tasklist = TaskList.objects.get(name=row.get("Task List"), group=target_group)
row["Task List"] = tasklist
except TaskList.DoesNotExist:
msg = f"Task list {row.get('Task List')} in group {target_group} does not exist"
row_errors.append(msg)
# #######################
# Validate Dates
datefields = ["Due Date", "Created Date"]
for datefield in datefields:
datestring = row.get(datefield)
if datestring:
valid_date = self.validate_date(datestring)
if valid_date:
row[datefield] = valid_date
else:
msg = f"Could not convert {datefield} {datestring} to valid date instance"
row_errors.append(msg)
# #######################
# Group membership checks have passed
row["Created By"] = creator
row["Group"] = target_group
if assignee:
row["Assigned To"] = assignee
# Set Completed
row["Completed"] = row["Completed"] == "Yes"
# #######################
if row_errors:
self.errors.append({self.line_count: row_errors})
return False
# No errors:
return row
def validate_date(self, datestring):
"""Inbound date string from CSV translates to a valid python date."""
try:
date_obj = datetime.datetime.strptime(datestring, "%Y-%m-%d")
return date_obj
except ValueError:
return False
| 37.737624
| 114
| 0.543093
|
import codecs
import csv
import datetime
import logging
from django.contrib.auth import get_user_model
from django.contrib.auth.models import Group
from todo.models import Task, TaskList
log = logging.getLogger(__name__)
class CSVImporter:
def __init__(self):
self.errors = []
self.upserts = []
self.summaries = []
self.line_count = 0
self.upsert_count = 0
def upsert(self, fileobj, as_string_obj=False):
if as_string_obj:
csv_reader = csv.DictReader(fileobj)
else:
csv_reader = csv.DictReader(codecs.iterdecode(fileobj, "utf-8"))
header = csv_reader.fieldnames
expected = [
"Title",
"Group",
"Task List",
"Created By",
"Created Date",
"Due Date",
"Completed",
"Assigned To",
"Note",
"Priority",
]
if header != expected:
self.errors.append(
f"Inbound data does not have expected columns.\nShould be: {expected}"
)
return
for row in csv_reader:
self.line_count += 1
newrow = self.validate_row(row)
if newrow:
assignee = newrow.get("Assigned To") if newrow.get("Assigned To") else None
created_at = (
newrow.get("Created Date")
if newrow.get("Created Date")
else datetime.datetime.today()
)
due_date = newrow.get("Due Date") if newrow.get("Due Date") else None
priority = newrow.get("Priority") if newrow.get("Priority") else None
obj, created = Task.objects.update_or_create(
created_by=newrow.get("Created By"),
task_list=newrow.get("Task List"),
title=newrow.get("Title"),
defaults={
"assigned_to": assignee,
"completed": newrow.get("Completed"),
"created_at": created_at,
"due_date": due_date,
"note": newrow.get("Note"),
"priority": priority,
},
)
self.upsert_count += 1
msg = (
f'Upserted task {obj.id}: "{obj.title}"'
f' in list "{obj.task_list}" (group "{obj.task_list.group}")'
)
self.upserts.append(msg)
self.summaries.append(f"Processed {self.line_count} CSV rows")
self.summaries.append(f"Upserted {self.upsert_count} rows")
self.summaries.append(f"Skipped {self.line_count - self.upsert_count} rows")
return {"summaries": self.summaries, "upserts": self.upserts, "errors": self.errors}
def validate_row(self, row):
row_errors = []
d task creator {row.get('Created By')}"
row_errors.append(msg)
f"Missing or invalid task assignee {row.get('Assigned To')}"
row_errors.append(msg)
"
row_errors.append(msg)
return False
| true
| true
|
79049ac874bf28ecbdb30f9685138e91dcc1a528
| 561
|
py
|
Python
|
gevent/gevent-group_pool.py
|
all3g/pieces
|
bc378fd22ddc700891fe7f34ab0d5b341141e434
|
[
"CNRI-Python"
] | 34
|
2016-10-31T02:05:24.000Z
|
2018-11-08T14:33:13.000Z
|
gevent/gevent-group_pool.py
|
join-us/python-programming
|
bc378fd22ddc700891fe7f34ab0d5b341141e434
|
[
"CNRI-Python"
] | 2
|
2017-05-11T03:00:31.000Z
|
2017-11-01T23:37:37.000Z
|
gevent/gevent-group_pool.py
|
join-us/python-programming
|
bc378fd22ddc700891fe7f34ab0d5b341141e434
|
[
"CNRI-Python"
] | 21
|
2016-08-19T09:05:45.000Z
|
2018-11-08T14:33:16.000Z
|
#!/usr/bin/env python
# -*- coding: utf8 -*-
import gevent
from gevent import getcurrent
from gevent.pool import Group
group = Group()
def hello_from(n):
print('Size of group %s' % len(group))
print('Hello from Greenlet %s' % id(getcurrent()))
group.map(hello_from, xrange(3))
def intensive(n):
gevent.sleep(3 - n)
return 'task', n
print('Ordered')
ogroup = Group()
for i in ogroup.imap(intensive, xrange(3)):
print (i)
print('Unordered')
igroup = Group()
for i in igroup.imap_unordered(intensive, xrange(3)):
print(i)
| 14.025
| 54
| 0.655971
|
import gevent
from gevent import getcurrent
from gevent.pool import Group
group = Group()
def hello_from(n):
print('Size of group %s' % len(group))
print('Hello from Greenlet %s' % id(getcurrent()))
group.map(hello_from, xrange(3))
def intensive(n):
gevent.sleep(3 - n)
return 'task', n
print('Ordered')
ogroup = Group()
for i in ogroup.imap(intensive, xrange(3)):
print (i)
print('Unordered')
igroup = Group()
for i in igroup.imap_unordered(intensive, xrange(3)):
print(i)
| true
| true
|
79049b3404e746c2e38fd147e8eaa255ab8306e2
| 1,931
|
py
|
Python
|
packages/std/nodes/std___Or0/std___Or0___METACODE.py
|
Shirazbello/Pyscriptining
|
0f2c80a9bb10477d65966faeccc7783f20385c1b
|
[
"MIT"
] | null | null | null |
packages/std/nodes/std___Or0/std___Or0___METACODE.py
|
Shirazbello/Pyscriptining
|
0f2c80a9bb10477d65966faeccc7783f20385c1b
|
[
"MIT"
] | null | null | null |
packages/std/nodes/std___Or0/std___Or0___METACODE.py
|
Shirazbello/Pyscriptining
|
0f2c80a9bb10477d65966faeccc7783f20385c1b
|
[
"MIT"
] | null | null | null |
from custom_src.NodeInstance import NodeInstance
from custom_src.Node import Node
# USEFUL
# self.input(index) <- access to input data
# self.outputs[index].set_val(val) <- set output data port value
# self.main_widget <- access to main widget
# self.exec_output(index) <- executes an execution output
# self.create_new_input(type_, label, widget_type='', widget_name='', widget_pos='under', pos=-1)
# self.delete_input(input or index)
# self.create_new_output(type_, label, pos=-1)
# self.delete_output(output or index)
# self.update_shape()
class %NODE_TITLE%_NodeInstance(NodeInstance):
def __init__(self, parent_node: Node, flow, configuration=None):
super(%NODE_TITLE%_NodeInstance, self).__init__(parent_node, flow, configuration)
self.special_actions['add input'] = {'method': self.action_add_input}
self.enlargement_state = 0
self.initialized()
def action_add_input(self):
self.create_new_input('data', '', widget_type='std line edit', widget_pos='besides')
self.enlargement_state += 1
self.special_actions['remove input'] = {'method': self.action_remove_input}
def action_remove_input(self):
self.delete_input(self.inputs[-1])
self.enlargement_state -= 1
if self.enlargement_state == 0:
del self.special_actions['remove input']
def update_event(self, input_called=-1):
result = self.input(0) or self.input(1)
for i in range(self.enlargement_state):
result = result or self.input(2+i)
self.outputs[0].set_val(result)
def get_data(self):
data = {'enlargement state': self.enlargement_state}
return data
def set_data(self, data):
self.enlargement_state = data['enlargement state']
# optional - important for threading - stop everything here
def removing(self):
pass
| 34.482143
| 97
| 0.671673
|
from custom_src.NodeInstance import NodeInstance
from custom_src.Node import Node
class %NODE_TITLE%_NodeInstance(NodeInstance):
def __init__(self, parent_node: Node, flow, configuration=None):
super(%NODE_TITLE%_NodeInstance, self).__init__(parent_node, flow, configuration)
self.special_actions['add input'] = {'method': self.action_add_input}
self.enlargement_state = 0
self.initialized()
def action_add_input(self):
self.create_new_input('data', '', widget_type='std line edit', widget_pos='besides')
self.enlargement_state += 1
self.special_actions['remove input'] = {'method': self.action_remove_input}
def action_remove_input(self):
self.delete_input(self.inputs[-1])
self.enlargement_state -= 1
if self.enlargement_state == 0:
del self.special_actions['remove input']
def update_event(self, input_called=-1):
result = self.input(0) or self.input(1)
for i in range(self.enlargement_state):
result = result or self.input(2+i)
self.outputs[0].set_val(result)
def get_data(self):
data = {'enlargement state': self.enlargement_state}
return data
def set_data(self, data):
self.enlargement_state = data['enlargement state']
def removing(self):
pass
| false
| true
|
79049bd4a704b0a968e7e105aba2606fd9874bca
| 774
|
py
|
Python
|
apps/sys_inspect/urls.py
|
MaLei666/oms
|
2447ec656ae5b61b9edc93c28a42f487476b5978
|
[
"MIT"
] | null | null | null |
apps/sys_inspect/urls.py
|
MaLei666/oms
|
2447ec656ae5b61b9edc93c28a42f487476b5978
|
[
"MIT"
] | 6
|
2020-03-23T09:21:13.000Z
|
2022-03-11T23:49:57.000Z
|
apps/sys_inspect/urls.py
|
MaLei666/oms
|
2447ec656ae5b61b9edc93c28a42f487476b5978
|
[
"MIT"
] | 1
|
2019-10-15T03:06:46.000Z
|
2019-10-15T03:06:46.000Z
|
"""
Host management app
"""
from django.urls import path
from .views import *
app_name = 'sys_inspect'
urlpatterns = [
# 设备列表
path('device/list', InspectDevInfoViews.as_view(), name='inspect_devices_list'),
# 添加设备
path('device/add', AddDevView.as_view(), name='inspect_devices_add'),
# 删除设备
path('device/delete', DeleteDevView.as_view(), name='inspect_device_delete'),
# 编辑设备
path('device/edit', EditDevInfoView.as_view(), name='inspect_device_edit'),
# 任务列表
path('content/list', ContentViews.as_view(), name='inspect_contents_list'),
# 添加任务
path('content/add', AddContView.as_view(), name='inspect_contents_add'),
# 删除任务
path('content/delete', DeleteContView.as_view(), name='inspect_contents_delete'),
]
| 22.114286
| 85
| 0.686047
|
from django.urls import path
from .views import *
app_name = 'sys_inspect'
urlpatterns = [
path('device/list', InspectDevInfoViews.as_view(), name='inspect_devices_list'),
path('device/add', AddDevView.as_view(), name='inspect_devices_add'),
path('device/delete', DeleteDevView.as_view(), name='inspect_device_delete'),
path('device/edit', EditDevInfoView.as_view(), name='inspect_device_edit'),
path('content/list', ContentViews.as_view(), name='inspect_contents_list'),
path('content/add', AddContView.as_view(), name='inspect_contents_add'),
path('content/delete', DeleteContView.as_view(), name='inspect_contents_delete'),
]
| true
| true
|
79049bfff6d362808a6e0e4af9dd2d0fd9ef944b
| 15,158
|
py
|
Python
|
Classes/Packets/Server/Home/OwnHomeDataMessage.py
|
ServerBSvvv/BSDS-V41
|
5ffbc308c520f6d4e8a8fb9d7eca497c59735653
|
[
"Apache-2.0"
] | null | null | null |
Classes/Packets/Server/Home/OwnHomeDataMessage.py
|
ServerBSvvv/BSDS-V41
|
5ffbc308c520f6d4e8a8fb9d7eca497c59735653
|
[
"Apache-2.0"
] | null | null | null |
Classes/Packets/Server/Home/OwnHomeDataMessage.py
|
ServerBSvvv/BSDS-V41
|
5ffbc308c520f6d4e8a8fb9d7eca497c59735653
|
[
"Apache-2.0"
] | null | null | null |
import time
from Classes.Packets.PiranhaMessage import PiranhaMessage
class OwnHomeDataMessage(PiranhaMessage):
def __init__(self, messageData):
super().__init__(messageData)
self.messageVersion = 0
def encode(self, fields, player):
ownedBrawlersCount = len(player.OwnedBrawlers)
ownedPinsCount = len(player.OwnedPins)
ownedThumbnailCount = len(player.OwnedThumbnails)
ownedSkins = []
for brawlerInfo in player.OwnedBrawlers.values():
try:
ownedSkins.extend(brawlerInfo["Skins"])
except KeyError:
continue
self.writeVint(int(time.time()))
self.writeVint(0)
self.writeVint(0)
self.writeVint(0)
self.writeVint(player.Trophies) # Trophies
self.writeVint(player.HighestTrophies) # Highest Trophies
self.writeVint(player.HighestTrophies)
self.writeVint(player.TrophyRoadTier)
self.writeVint(player.Experience) # Experience
self.writeDataReference(28, player.Thumbnail) # Thumbnail
self.writeDataReference(43, player.Namecolor) # Namecolor
self.writeVint(0)
self.writeVint(0) # Selected Skins
self.writeVint(0) # Randomizer Skin Selected
self.writeVint(0) # Current Random Skin
self.writeVint(len(ownedSkins))
for skinID in ownedSkins:
self.writeDataReference(29, skinID)
self.writeVint(0) # Unlocked Skin Purchase Option
self.writeVint(0) # New Item State
self.writeVint(0)
self.writeVint(player.HighestTrophies)
self.writeVint(0)
self.writeVint(1)
self.writeBoolean(True)
self.writeVint(player.TokensDoubler)
self.writeVint(0)
self.writeVint(0)
self.writeVint(0)
self.writeVint(141)
self.writeVint(135)
self.writeVint(5)
self.writeVint(93)
self.writeVint(206)
self.writeVint(456)
self.writeVint(792)
self.writeVint(729)
self.writeBoolean(False) # Offer 1
self.writeBoolean(False) # Offer 2
self.writeBoolean(True) # Token Doubler Enabled
self.writeVint(2) # Token Doubler New Tag State
self.writeVint(2) # Event Tickets New Tag State
self.writeVint(2) # Coin Packs New Tag State
self.writeVint(0) # Change Name Cost
self.writeVint(0) # Timer For the Next Name Change
self.writeVint(1) # Offers count
self.writeVint(1) # RewardCount
for i in range(1):
self.writeVint(6) # ItemType
self.writeVint(0)
self.writeDataReference(0) # CsvID
self.writeVint(0)
self.writeVint(0)
self.writeVint(666)
self.writeVint(950400)
self.writeVint(2)
self.writeVint(0)
self.writeBoolean(False)
self.writeVint(3917)
self.writeVint(0)
self.writeBoolean(False)
self.writeVint(49)
self.writeInt(0)
self.writeString("Unlock all skins")
self.writeBoolean(False)
self.writeString()
self.writeVint(-1)
self.writeBoolean(False)
self.writeVint(0)
self.writeVint(0)
self.writeString()
self.writeBoolean(False)
self.writeBoolean(False)
self.writeVint(0)
self.writeVint(player.Tokens)
self.writeVint(-1)
self.writeVint(0)
self.writeVint(0)
self.writeVint(0)
self.writeVint(len(player.SelectedBrawlers))
for i in player.SelectedBrawlers:
self.writeDataReference(16, i)
self.writeString(player.Region)
self.writeString(player.ContentCreator)
self.writeVint(19)
self.writeLong(2, 1) # Unknown
self.writeLong(3, 0) # TokensGained
self.writeLong(4, 0) # TrophiesGained
self.writeLong(6, 0) # DemoAccount
self.writeLong(7, 0) # InvitesBlocked
self.writeLong(8, 0) # StarPointsGained
self.writeLong(9, 1) # ShowStarPoints
self.writeLong(10, 0) # PowerPlayTrophiesGained
self.writeLong(12, 1) # Unknown
self.writeLong(14, 0) # CoinsGained
self.writeLong(15, 0) # AgeScreen | 3 = underage (disable social media) | 1 = age popup
self.writeLong(16, 1)
self.writeLong(17, 1) # TeamChatMuted
self.writeLong(18, 1) # EsportButton
self.writeLong(19, 1) # ChampionShipLivesBuyPopup
self.writeLong(20, 0) # GemsGained
self.writeLong(21, 1) # LookingForTeamState
self.writeLong(22, 1)
self.writeLong(24, 1) # Have already watched club league stupid animation
self.writeVint(0)
self.writeVint(2) # Brawlpass
for i in range(8, 10):
self.writeVint(i)
self.writeVint(34500)
self.writeBoolean(True)
self.writeVint(0)
self.writeUInt8(2)
self.writeUInt(4294967292)
self.writeUInt(4294967295)
self.writeUInt(511)
self.writeUInt(0)
self.writeUInt8(1)
self.writeUInt(4294967292)
self.writeUInt(4294967295)
self.writeUInt(511)
self.writeUInt(0)
self.writeVint(0)
self.writeBoolean(True)
self.writeVint(0)
self.writeBoolean(True)
self.writeVint(ownedPinsCount + ownedThumbnailCount) # Vanity Count
for i in player.OwnedPins:
self.writeDataReference(52, i)
self.writeVint(1)
for i in range(1):
self.writeVint(1)
self.writeVint(1)
for i in player.OwnedThumbnails:
self.writeDataReference(28, i)
self.writeVint(1)
for i in range(1):
self.writeVint(1)
self.writeVint(1)
self.writeBoolean(False)
self.writeInt(0)
self.writeVint(0)
self.writeVint(25) # Count
self.writeVint(1)
self.writeVint(2)
self.writeVint(3)
self.writeVint(4)
self.writeVint(5)
self.writeVint(6)
self.writeVint(7)
self.writeVint(8)
self.writeVint(9)
self.writeVint(10)
self.writeVint(11)
self.writeVint(12)
self.writeVint(13)
self.writeVint(14)
self.writeVint(15)
self.writeVint(16)
self.writeVint(17)
self.writeVint(20)
self.writeVint(21)
self.writeVint(22)
self.writeVint(23)
self.writeVint(24)
self.writeVint(30)
self.writeVint(31)
self.writeVint(32)
self.writeVint(3) # Events
eventIndex = 1
for i in [5, 7, 24]:
self.writeVint(-1)
self.writeVint(eventIndex) # EventType
self.writeVint(0) # EventsBeginCountdown
self.writeVint(51208) # Timer
self.writeVint(0) # tokens reward for new event
self.writeDataReference(15, i) # MapID
self.writeVint(-1) # GameModeVariation
self.writeVint(2) # State
self.writeString()
self.writeVint(0)
self.writeVint(0)
self.writeVint(0)
self.writeVint(0) # Modifiers
self.writeVint(0)
self.writeVint(0)
self.writeBoolean(False) # Map Maker Map Structure Array
self.writeVint(0)
self.writeBoolean(False) # Power League Data Array
self.writeVint(0)
self.writeVint(0)
self.writeBoolean(False) # ChronosTextEntry
self.writeBoolean(False)
self.writeBoolean(False)
self.writeVint(-1)
self.writeBoolean(False)
self.writeBoolean(False)
eventIndex += 1
self.writeVint(0) # Comming Events
self.writeVint(10) # Brawler Upgrade Cost
self.writeVint(20)
self.writeVint(35)
self.writeVint(75)
self.writeVint(140)
self.writeVint(290)
self.writeVint(480)
self.writeVint(800)
self.writeVint(1250)
self.writeVint(1875)
self.writeVint(2800)
self.writeVint(4) # Shop Coins Price
self.writeVint(20)
self.writeVint(50)
self.writeVint(140)
self.writeVint(280)
self.writeVint(4) # Shop Coins Amount
self.writeVint(150)
self.writeVint(400)
self.writeVint(1200)
self.writeVint(2600)
self.writeBoolean(True) # Show Offers Packs
self.writeVint(0)
self.writeVint(23) # IntValueEntry
self.writeLong(10008, 501)
self.writeLong(65, 2)
self.writeLong(1, 41000036) # ThemeID
self.writeLong(60, 36270)
self.writeLong(66, 1)
self.writeLong(61, 36270) # SupportDisabled State | if 36218 < state its true
self.writeLong(47, 41381)
self.writeLong(29, 0) # Skin Group Active For Campaign
self.writeLong(48, 41381)
self.writeLong(50, 0) # Coming up quests placeholder
self.writeLong(1100, 500)
self.writeLong(1101, 500)
self.writeLong(1003, 1)
self.writeLong(36, 0)
self.writeLong(14, 0) # Double Token Event
self.writeLong(31, 0) # Gold rush event
self.writeLong(79, 149999)
self.writeLong(80, 160000)
self.writeLong(28, 4)
self.writeLong(74, 1)
self.writeLong(78, 1)
self.writeLong(17, 4)
self.writeLong(10046, 1)
self.writeVint(0) # Timed Int Value Entry
self.writeVint(0) # Custom Event
self.writeVint(0)
self.writeVint(0)
self.writeVint(0)
self.writeLong(player.ID[0], player.ID[1]) # PlayerID
self.writeVint(0) # NotificationFactory
self.writeVint(-1)
self.writeBoolean(False)
self.writeVint(0)
self.writeVint(0)
self.writeVint(0)
self.writeVLong(player.ID[0], player.ID[1])
self.writeVLong(0, 0)
self.writeVLong(0, 0)
self.writeString(player.Name)
self.writeBoolean(player.Registered)
self.writeInt(0)
self.writeVint(15)
self.writeVint(3 + ownedBrawlersCount)
for brawlerInfo in player.OwnedBrawlers.values():
self.writeDataReference(23, brawlerInfo["CardID"])
self.writeVint(1)
self.writeDataReference(5, 8)
self.writeVint(player.Coins)
self.writeDataReference(5, 10)
self.writeVint(player.StarPoints)
self.writeDataReference(5, 13)
self.writeVint(99999) # Club coins
self.writeVint(ownedBrawlersCount)
for brawlerID,brawlerInfo in player.OwnedBrawlers.items():
self.writeDataReference(16, brawlerID)
self.writeVint(brawlerInfo["Trophies"])
self.writeVint(ownedBrawlersCount)
for brawlerID, brawlerInfo in player.OwnedBrawlers.items():
self.writeDataReference(16, brawlerID)
self.writeVint(brawlerInfo["HighestTrophies"])
self.writeVint(0)
self.writeVint(ownedBrawlersCount)
for brawlerID, brawlerInfo in player.OwnedBrawlers.items():
self.writeDataReference(16, brawlerID)
self.writeVint(brawlerInfo["PowerPoints"])
self.writeVint(ownedBrawlersCount)
for brawlerID, brawlerInfo in player.OwnedBrawlers.items():
self.writeDataReference(16, brawlerID)
self.writeVint(brawlerInfo["PowerLevel"] - 1)
self.writeVint(0)
self.writeVint(ownedBrawlersCount)
for brawlerID, brawlerInfo in player.OwnedBrawlers.items():
self.writeDataReference(16, brawlerID)
self.writeVint(brawlerInfo["State"])
self.writeVint(0)
self.writeVint(0)
self.writeVint(0)
self.writeVint(0)
self.writeVint(0)
self.writeVint(0)
self.writeVint(0)
self.writeVint(player.Gems) # Diamonds
self.writeVint(player.Gems) # Free Diamonds
self.writeVint(player.Level) # Player Level
self.writeVint(100)
self.writeVint(0) # CumulativePurchasedDiamonds or Avatar User Level Tier | 10000 < Level Tier = 3 | 1000 < Level Tier = 2 | 0 < Level Tier = 1
self.writeVint(0) # Battle Count
self.writeVint(0) # WinCount
self.writeVint(0) # LoseCount
self.writeVint(0) # WinLooseStreak
self.writeVint(0) # NpcWinCount
self.writeVint(0) # NpcLoseCount
self.writeVint(2) # TutorialState | shouldGoToFirstTutorialBattle = State == 0
self.writeVint(0)
def decode(self):
fields = {}
# fields["AccountID"] = self.readLong()
# fields["HomeID"] = self.readLong()
# fields["PassToken"] = self.readString()
# fields["FacebookID"] = self.readString()
# fields["GamecenterID"] = self.readString()
# fields["ServerMajorVersion"] = self.readInt()
# fields["ContentVersion"] = self.readInt()
# fields["ServerBuild"] = self.readInt()
# fields["ServerEnvironment"] = self.readString()
# fields["SessionCount"] = self.readInt()
# fields["PlayTimeSeconds"] = self.readInt()
# fields["DaysSinceStartedPlaying"] = self.readInt()
# fields["FacebookAppID"] = self.readString()
# fields["ServerTime"] = self.readString()
# fields["AccountCreatedDate"] = self.readString()
# fields["StartupCooldownSeconds"] = self.readInt()
# fields["GoogleServiceID"] = self.readString()
# fields["LoginCountry"] = self.readString()
# fields["KunlunID"] = self.readString()
# fields["Tier"] = self.readInt()
# fields["TencentID"] = self.readString()
#
# ContentUrlCount = self.readInt()
# fields["GameAssetsUrls"] = []
# for i in range(ContentUrlCount):
# fields["GameAssetsUrls"].append(self.readString())
#
# EventUrlCount = self.readInt()
# fields["EventAssetsUrls"] = []
# for i in range(EventUrlCount):
# fields["EventAssetsUrls"].append(self.readString())
#
# fields["SecondsUntilAccountDeletion"] = self.readVint()
# fields["SupercellIDToken"] = self.readCompressedString()
# fields["IsSupercellIDLogoutAllDevicesAllowed"] = self.readBoolean()
# fields["isSupercellIDEligible"] = self.readBoolean()
# fields["LineID"] = self.readString()
# fields["SessionID"] = self.readString()
# fields["KakaoID"] = self.readString()
# fields["UpdateURL"] = self.readString()
# fields["YoozooPayNotifyUrl"] = self.readString()
# fields["UnbotifyEnabled"] = self.readBoolean()
# super().decode(fields)
return fields
def execute(message, calling_instance, fields):
pass
def getMessageType(self):
return 24101
def getMessageVersion(self):
return self.messageVersion
| 31.844538
| 152
| 0.599419
|
import time
from Classes.Packets.PiranhaMessage import PiranhaMessage
class OwnHomeDataMessage(PiranhaMessage):
def __init__(self, messageData):
super().__init__(messageData)
self.messageVersion = 0
def encode(self, fields, player):
ownedBrawlersCount = len(player.OwnedBrawlers)
ownedPinsCount = len(player.OwnedPins)
ownedThumbnailCount = len(player.OwnedThumbnails)
ownedSkins = []
for brawlerInfo in player.OwnedBrawlers.values():
try:
ownedSkins.extend(brawlerInfo["Skins"])
except KeyError:
continue
self.writeVint(int(time.time()))
self.writeVint(0)
self.writeVint(0)
self.writeVint(0)
self.writeVint(player.Trophies)
self.writeVint(player.HighestTrophies)
self.writeVint(player.HighestTrophies)
self.writeVint(player.TrophyRoadTier)
self.writeVint(player.Experience)
self.writeDataReference(28, player.Thumbnail)
self.writeDataReference(43, player.Namecolor)
self.writeVint(0)
self.writeVint(0)
self.writeVint(0)
self.writeVint(0)
self.writeVint(len(ownedSkins))
for skinID in ownedSkins:
self.writeDataReference(29, skinID)
self.writeVint(0)
self.writeVint(0)
self.writeVint(0)
self.writeVint(player.HighestTrophies)
self.writeVint(0)
self.writeVint(1)
self.writeBoolean(True)
self.writeVint(player.TokensDoubler)
self.writeVint(0)
self.writeVint(0)
self.writeVint(0)
self.writeVint(141)
self.writeVint(135)
self.writeVint(5)
self.writeVint(93)
self.writeVint(206)
self.writeVint(456)
self.writeVint(792)
self.writeVint(729)
self.writeBoolean(False)
self.writeBoolean(False)
self.writeBoolean(True)
self.writeVint(2)
self.writeVint(2)
self.writeVint(2)
self.writeVint(0)
self.writeVint(0)
self.writeVint(1)
self.writeVint(1)
for i in range(1):
self.writeVint(6)
self.writeVint(0)
self.writeDataReference(0)
self.writeVint(0)
self.writeVint(0)
self.writeVint(666)
self.writeVint(950400)
self.writeVint(2)
self.writeVint(0)
self.writeBoolean(False)
self.writeVint(3917)
self.writeVint(0)
self.writeBoolean(False)
self.writeVint(49)
self.writeInt(0)
self.writeString("Unlock all skins")
self.writeBoolean(False)
self.writeString()
self.writeVint(-1)
self.writeBoolean(False)
self.writeVint(0)
self.writeVint(0)
self.writeString()
self.writeBoolean(False)
self.writeBoolean(False)
self.writeVint(0)
self.writeVint(player.Tokens)
self.writeVint(-1)
self.writeVint(0)
self.writeVint(0)
self.writeVint(0)
self.writeVint(len(player.SelectedBrawlers))
for i in player.SelectedBrawlers:
self.writeDataReference(16, i)
self.writeString(player.Region)
self.writeString(player.ContentCreator)
self.writeVint(19)
self.writeLong(2, 1)
self.writeLong(3, 0)
self.writeLong(4, 0)
self.writeLong(6, 0)
self.writeLong(7, 0)
self.writeLong(8, 0)
self.writeLong(9, 1)
self.writeLong(10, 0)
self.writeLong(12, 1)
self.writeLong(14, 0)
self.writeLong(15, 0)
self.writeLong(16, 1)
self.writeLong(17, 1)
self.writeLong(18, 1)
self.writeLong(19, 1)
self.writeLong(20, 0)
self.writeLong(21, 1)
self.writeLong(22, 1)
self.writeLong(24, 1)
self.writeVint(0)
self.writeVint(2)
for i in range(8, 10):
self.writeVint(i)
self.writeVint(34500)
self.writeBoolean(True)
self.writeVint(0)
self.writeUInt8(2)
self.writeUInt(4294967292)
self.writeUInt(4294967295)
self.writeUInt(511)
self.writeUInt(0)
self.writeUInt8(1)
self.writeUInt(4294967292)
self.writeUInt(4294967295)
self.writeUInt(511)
self.writeUInt(0)
self.writeVint(0)
self.writeBoolean(True)
self.writeVint(0)
self.writeBoolean(True)
self.writeVint(ownedPinsCount + ownedThumbnailCount)
for i in player.OwnedPins:
self.writeDataReference(52, i)
self.writeVint(1)
for i in range(1):
self.writeVint(1)
self.writeVint(1)
for i in player.OwnedThumbnails:
self.writeDataReference(28, i)
self.writeVint(1)
for i in range(1):
self.writeVint(1)
self.writeVint(1)
self.writeBoolean(False)
self.writeInt(0)
self.writeVint(0)
self.writeVint(25)
self.writeVint(1)
self.writeVint(2)
self.writeVint(3)
self.writeVint(4)
self.writeVint(5)
self.writeVint(6)
self.writeVint(7)
self.writeVint(8)
self.writeVint(9)
self.writeVint(10)
self.writeVint(11)
self.writeVint(12)
self.writeVint(13)
self.writeVint(14)
self.writeVint(15)
self.writeVint(16)
self.writeVint(17)
self.writeVint(20)
self.writeVint(21)
self.writeVint(22)
self.writeVint(23)
self.writeVint(24)
self.writeVint(30)
self.writeVint(31)
self.writeVint(32)
self.writeVint(3)
eventIndex = 1
for i in [5, 7, 24]:
self.writeVint(-1)
self.writeVint(eventIndex)
self.writeVint(0)
self.writeVint(51208)
self.writeVint(0)
self.writeDataReference(15, i)
self.writeVint(-1)
self.writeVint(2)
self.writeString()
self.writeVint(0)
self.writeVint(0)
self.writeVint(0)
self.writeVint(0)
self.writeVint(0)
self.writeVint(0)
self.writeBoolean(False)
self.writeVint(0)
self.writeBoolean(False)
self.writeVint(0)
self.writeVint(0)
self.writeBoolean(False)
self.writeBoolean(False)
self.writeBoolean(False)
self.writeVint(-1)
self.writeBoolean(False)
self.writeBoolean(False)
eventIndex += 1
self.writeVint(0)
self.writeVint(10)
self.writeVint(20)
self.writeVint(35)
self.writeVint(75)
self.writeVint(140)
self.writeVint(290)
self.writeVint(480)
self.writeVint(800)
self.writeVint(1250)
self.writeVint(1875)
self.writeVint(2800)
self.writeVint(4)
self.writeVint(20)
self.writeVint(50)
self.writeVint(140)
self.writeVint(280)
self.writeVint(4)
self.writeVint(150)
self.writeVint(400)
self.writeVint(1200)
self.writeVint(2600)
self.writeBoolean(True)
self.writeVint(0)
self.writeVint(23)
self.writeLong(10008, 501)
self.writeLong(65, 2)
self.writeLong(1, 41000036)
self.writeLong(60, 36270)
self.writeLong(66, 1)
self.writeLong(61, 36270)
self.writeLong(47, 41381)
self.writeLong(29, 0)
self.writeLong(48, 41381)
self.writeLong(50, 0)
self.writeLong(1100, 500)
self.writeLong(1101, 500)
self.writeLong(1003, 1)
self.writeLong(36, 0)
self.writeLong(14, 0)
self.writeLong(31, 0)
self.writeLong(79, 149999)
self.writeLong(80, 160000)
self.writeLong(28, 4)
self.writeLong(74, 1)
self.writeLong(78, 1)
self.writeLong(17, 4)
self.writeLong(10046, 1)
self.writeVint(0)
self.writeVint(0)
self.writeVint(0)
self.writeVint(0)
self.writeVint(0)
self.writeLong(player.ID[0], player.ID[1])
self.writeVint(0)
self.writeVint(-1)
self.writeBoolean(False)
self.writeVint(0)
self.writeVint(0)
self.writeVint(0)
self.writeVLong(player.ID[0], player.ID[1])
self.writeVLong(0, 0)
self.writeVLong(0, 0)
self.writeString(player.Name)
self.writeBoolean(player.Registered)
self.writeInt(0)
self.writeVint(15)
self.writeVint(3 + ownedBrawlersCount)
for brawlerInfo in player.OwnedBrawlers.values():
self.writeDataReference(23, brawlerInfo["CardID"])
self.writeVint(1)
self.writeDataReference(5, 8)
self.writeVint(player.Coins)
self.writeDataReference(5, 10)
self.writeVint(player.StarPoints)
self.writeDataReference(5, 13)
self.writeVint(99999)
self.writeVint(ownedBrawlersCount)
for brawlerID,brawlerInfo in player.OwnedBrawlers.items():
self.writeDataReference(16, brawlerID)
self.writeVint(brawlerInfo["Trophies"])
self.writeVint(ownedBrawlersCount)
for brawlerID, brawlerInfo in player.OwnedBrawlers.items():
self.writeDataReference(16, brawlerID)
self.writeVint(brawlerInfo["HighestTrophies"])
self.writeVint(0)
self.writeVint(ownedBrawlersCount)
for brawlerID, brawlerInfo in player.OwnedBrawlers.items():
self.writeDataReference(16, brawlerID)
self.writeVint(brawlerInfo["PowerPoints"])
self.writeVint(ownedBrawlersCount)
for brawlerID, brawlerInfo in player.OwnedBrawlers.items():
self.writeDataReference(16, brawlerID)
self.writeVint(brawlerInfo["PowerLevel"] - 1)
self.writeVint(0)
self.writeVint(ownedBrawlersCount)
for brawlerID, brawlerInfo in player.OwnedBrawlers.items():
self.writeDataReference(16, brawlerID)
self.writeVint(brawlerInfo["State"])
self.writeVint(0)
self.writeVint(0)
self.writeVint(0)
self.writeVint(0)
self.writeVint(0)
self.writeVint(0)
self.writeVint(0)
self.writeVint(player.Gems)
self.writeVint(player.Gems)
self.writeVint(player.Level)
self.writeVint(100)
self.writeVint(0)
self.writeVint(0)
self.writeVint(0)
self.writeVint(0)
self.writeVint(0)
self.writeVint(0)
self.writeVint(0)
self.writeVint(2)
self.writeVint(0)
def decode(self):
fields = {}
return fields
def execute(message, calling_instance, fields):
pass
def getMessageType(self):
return 24101
def getMessageVersion(self):
return self.messageVersion
| true
| true
|
79049daa71c2792e4230d62fcf43754cca915b74
| 1,030
|
py
|
Python
|
app.py
|
flamanta/river-trash-detection
|
f61228352c0d5e352962e5dfc132f44865a349c8
|
[
"MIT"
] | null | null | null |
app.py
|
flamanta/river-trash-detection
|
f61228352c0d5e352962e5dfc132f44865a349c8
|
[
"MIT"
] | null | null | null |
app.py
|
flamanta/river-trash-detection
|
f61228352c0d5e352962e5dfc132f44865a349c8
|
[
"MIT"
] | 1
|
2020-10-10T04:59:54.000Z
|
2020-10-10T04:59:54.000Z
|
# Digital OCEAN FLASK SERVER RECEIVES IMAGE
from flask import Flask, request, jsonify
import classify
import base64
import json
import firebase
import env
# Instantiate Flask
app = Flask(__name__)
# health check
@app.route("/status")
def health_check():
return "Running!"
# Performing image Recognition on Image, sent as bytes via POST payload
@app.route("/detect", methods=["POST"])
def detect():
imgBytes = request.data
imgdata = base64.b64decode(imgBytes)
with open("temp.png", "wb") as f:
f.write(imgdata)
print("successfully receieved image")
# Pass image bytes to classifier
result = classify.analyse("temp.png")
# Return results as neat JSON object, using
result = jsonify(result)
print(result.json)
response_data = result.json
print(response_data)
db = firebase.Firebase()
db.authenticate()
db.push(response_data)
print("Updated Firebase.")
return result
if __name__ == "__main__":
app.run(host="0.0.0.0", port=80, debug=True)
| 20.196078
| 71
| 0.691262
|
from flask import Flask, request, jsonify
import classify
import base64
import json
import firebase
import env
app = Flask(__name__)
@app.route("/status")
def health_check():
return "Running!"
@app.route("/detect", methods=["POST"])
def detect():
imgBytes = request.data
imgdata = base64.b64decode(imgBytes)
with open("temp.png", "wb") as f:
f.write(imgdata)
print("successfully receieved image")
result = classify.analyse("temp.png")
result = jsonify(result)
print(result.json)
response_data = result.json
print(response_data)
db = firebase.Firebase()
db.authenticate()
db.push(response_data)
print("Updated Firebase.")
return result
if __name__ == "__main__":
app.run(host="0.0.0.0", port=80, debug=True)
| true
| true
|
79049dfac8e8811455c6071dfde7aa1cbd3abdd8
| 8,118
|
py
|
Python
|
infrastructure-provisioning/src/general/scripts/os/common_clean_instance.py
|
pjfanning/incubator-datalab
|
53a98c3deff17533e38f3c0d87eb6706b067f3c7
|
[
"Apache-2.0"
] | null | null | null |
infrastructure-provisioning/src/general/scripts/os/common_clean_instance.py
|
pjfanning/incubator-datalab
|
53a98c3deff17533e38f3c0d87eb6706b067f3c7
|
[
"Apache-2.0"
] | null | null | null |
infrastructure-provisioning/src/general/scripts/os/common_clean_instance.py
|
pjfanning/incubator-datalab
|
53a98c3deff17533e38f3c0d87eb6706b067f3c7
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python3
# *****************************************************************************
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# ******************************************************************************
import argparse
import os
import sys
from datalab.notebook_lib import *
from fabric import *
parser = argparse.ArgumentParser()
parser.add_argument('--hostname', type=str, default='')
parser.add_argument('--keyfile', type=str, default='')
parser.add_argument('--os_user', type=str, default='')
parser.add_argument('--application', type=str, default='')
args = parser.parse_args()
def general_clean():
try:
conn.sudo('systemctl stop ungit')
conn.sudo('systemctl stop inactive.timer')
conn.sudo('rm -f /etc/systemd/system/inactive.service')
conn.sudo('rm -f /etc/systemd/system/inactive.timer')
conn.sudo('rm -rf /opt/inactivity')
conn.sudo('npm -g uninstall ungit')
conn.sudo('rm -f /etc/systemd/system/ungit.service')
conn.sudo('systemctl daemon-reload')
remove_os_pkg(['nodejs', 'npm'])
conn.sudo('sed -i "/spark.*.memory/d" /opt/spark/conf/spark-defaults.conf')
except Exception as err:
print('Error: {0}'.format(err))
sys.exit(1)
def clean_jupyter():
try:
conn.sudo('systemctl stop jupyter-notebook')
conn.sudo('pip3 uninstall -y notebook jupyter')
conn.sudo('rm -rf /usr/local/share/jupyter/')
conn.sudo('rm -rf /home/{}/.jupyter/'.format(args.os_user))
conn.sudo('rm -rf /home/{}/.ipython/'.format(args.os_user))
conn.sudo('rm -rf /home/{}/.ipynb_checkpoints/'.format(args.os_user))
conn.sudo('rm -rf /home/{}/.local/share/jupyter/'.format(args.os_user))
conn.sudo('rm -f /etc/systemd/system/jupyter-notebook.service')
conn.sudo('systemctl daemon-reload')
except Exception as err:
print('Error: {0}'.format(err))
sys.exit(1)
def clean_jupyterlab():
try:
conn.sudo('systemctl stop jupyterlab-notebook')
conn.sudo('pip3 uninstall -y jupyterlab')
#conn.sudo('rm -rf /usr/local/share/jupyter/')
conn.sudo('rm -rf /home/{}/.jupyter/'.format(args.os_user))
conn.sudo('rm -rf /home/{}/.ipython/'.format(args.os_user))
conn.sudo('rm -rf /home/{}/.ipynb_checkpoints/'.format(args.os_user))
conn.sudo('rm -rf /home/{}/.local/share/jupyter/'.format(args.os_user))
conn.sudo('rm -f /etc/systemd/system/jupyterlab-notebook.service')
conn.sudo('systemctl daemon-reload')
except Exception as err:
print('Error: {0}'.format(err))
sys.exit(1)
def clean_zeppelin():
try:
conn.sudo('systemctl stop zeppelin-notebook')
conn.sudo('rm -rf /opt/zeppelin* /var/log/zeppelin /var/run/zeppelin')
if os.environ['notebook_multiple_clusters'] == 'true':
conn.sudo('systemctl stop livy-server')
conn.sudo('rm -rf /opt/livy* /var/run/livy')
conn.sudo('rm -f /etc/systemd/system/livy-server.service')
conn.sudo('rm -f /etc/systemd/system/zeppelin-notebook.service')
conn.sudo('systemctl daemon-reload')
except Exception as err:
print('Error: {0}'.format(err))
sys.exit(1)
def clean_rstudio():
try:
remove_os_pkg(['rstudio-server'])
conn.sudo('rm -f /home/{}/.Rprofile'.format(args.os_user))
conn.sudo('rm -f /home/{}/.Renviron'.format(args.os_user))
except Exception as err:
print('Error:', str(err))
sys.exit(1)
def clean_tensor():
try:
clean_jupyter()
conn.sudo('systemctl stop tensorboard')
conn.sudo('systemctl disable tensorboard')
conn.sudo('systemctl daemon-reload')
except Exception as err:
print('Error: {0}'.format(err))
sys.exit(1)
def clean_tensor_rstudio():
try:
clean_rstudio()
conn.sudo('systemctl stop tensorboard')
conn.sudo('systemctl disable tensorboard')
conn.sudo('systemctl daemon-reload')
except Exception as err:
print('Error: {0}'.format(err))
sys.exit(1)
def clean_tensor_jupyterlab():
try:
clean_jupyterlab()
conn.sudo('systemctl stop tensorboard')
conn.sudo('systemctl disable tensorboard')
conn.sudo('systemctl daemon-reload')
except Exception as err:
print('Error: {0}'.format(err))
sys.exit(1)
def clean_deeplearning():
try:
conn.sudo('systemctl stop ungit')
conn.sudo('systemctl stop inactive.timer')
conn.sudo('rm -f /etc/systemd/system/inactive.service')
conn.sudo('rm -f /etc/systemd/system/inactive.timer')
conn.sudo('rm -rf /opt/inactivity')
conn.sudo('npm -g uninstall ungit')
conn.sudo('rm -f /etc/systemd/system/ungit.service')
conn.sudo('systemctl daemon-reload')
remove_os_pkg(['nodejs', 'npm'])
conn.sudo('sed -i "/spark.*.memory/d" /opt/spark/conf/spark-defaults.conf')
# conn.sudo('systemctl stop tensorboard')
# conn.sudo('systemctl disable tensorboard')
# conn.sudo('systemctl daemon-reload')
clean_jupyter()
except Exception as err:
print('Error: {0}'.format(err))
sys.exit(1)
if __name__ == "__main__":
print('Configure connections')
global conn
conn = datalab.fab.init_datalab_connection(args.hostname, args.os_user, args.keyfile)
if os.environ['conf_cloud_provider'] == 'azure':
from datalab.actions_lib import ensure_right_mount_paths
ensure_right_mount_paths()
de_master_name = '{}-{}-{}-de-{}-m'.format(
os.environ['conf_service_base_name'],
os.environ['project_name'],
os.environ['endpoint_name'],
os.environ['computational_name'])
de_ami_id = AzureMeta().get_instance_image(os.environ['azure_resource_group_name'],
de_master_name)
default_ami_id = 'default'
else:
de_master_name = '{}-{}-{}-de-{}-m'.format(
os.environ['conf_service_base_name'],
os.environ['project_name'],
os.environ['endpoint_name'],
os.environ['computational_name'])
de_ami_id = get_ami_id_by_instance_name(de_master_name)
default_ami_id = get_ami_id(
os.environ['aws_{}_image_name'.format(os.environ['conf_os_family'])])
if de_ami_id != default_ami_id:
if args.application in os.environ['dataengine_image_notebooks'].split(','):
if args.application == 'deeplearning':
clean_deeplearning()
else:
general_clean()
if args.application == 'jupyter':
clean_jupyter()
elif args.application == 'zeppelin':
clean_zeppelin()
elif args.application == 'rstudio':
clean_rstudio()
elif args.application == 'tensor':
clean_tensor()
elif args.application == 'tensor-rstudio':
clean_tensor_rstudio()
elif args.application == 'tensor-jupyterlab':
clean_tensor_jupyterlab()
else:
print('Found default ami, do not make clean')
#conn.close()
sys.exit(0)
| 38.657143
| 91
| 0.612343
|
import argparse
import os
import sys
from datalab.notebook_lib import *
from fabric import *
parser = argparse.ArgumentParser()
parser.add_argument('--hostname', type=str, default='')
parser.add_argument('--keyfile', type=str, default='')
parser.add_argument('--os_user', type=str, default='')
parser.add_argument('--application', type=str, default='')
args = parser.parse_args()
def general_clean():
try:
conn.sudo('systemctl stop ungit')
conn.sudo('systemctl stop inactive.timer')
conn.sudo('rm -f /etc/systemd/system/inactive.service')
conn.sudo('rm -f /etc/systemd/system/inactive.timer')
conn.sudo('rm -rf /opt/inactivity')
conn.sudo('npm -g uninstall ungit')
conn.sudo('rm -f /etc/systemd/system/ungit.service')
conn.sudo('systemctl daemon-reload')
remove_os_pkg(['nodejs', 'npm'])
conn.sudo('sed -i "/spark.*.memory/d" /opt/spark/conf/spark-defaults.conf')
except Exception as err:
print('Error: {0}'.format(err))
sys.exit(1)
def clean_jupyter():
try:
conn.sudo('systemctl stop jupyter-notebook')
conn.sudo('pip3 uninstall -y notebook jupyter')
conn.sudo('rm -rf /usr/local/share/jupyter/')
conn.sudo('rm -rf /home/{}/.jupyter/'.format(args.os_user))
conn.sudo('rm -rf /home/{}/.ipython/'.format(args.os_user))
conn.sudo('rm -rf /home/{}/.ipynb_checkpoints/'.format(args.os_user))
conn.sudo('rm -rf /home/{}/.local/share/jupyter/'.format(args.os_user))
conn.sudo('rm -f /etc/systemd/system/jupyter-notebook.service')
conn.sudo('systemctl daemon-reload')
except Exception as err:
print('Error: {0}'.format(err))
sys.exit(1)
def clean_jupyterlab():
try:
conn.sudo('systemctl stop jupyterlab-notebook')
conn.sudo('pip3 uninstall -y jupyterlab')
conn.sudo('rm -rf /home/{}/.jupyter/'.format(args.os_user))
conn.sudo('rm -rf /home/{}/.ipython/'.format(args.os_user))
conn.sudo('rm -rf /home/{}/.ipynb_checkpoints/'.format(args.os_user))
conn.sudo('rm -rf /home/{}/.local/share/jupyter/'.format(args.os_user))
conn.sudo('rm -f /etc/systemd/system/jupyterlab-notebook.service')
conn.sudo('systemctl daemon-reload')
except Exception as err:
print('Error: {0}'.format(err))
sys.exit(1)
def clean_zeppelin():
try:
conn.sudo('systemctl stop zeppelin-notebook')
conn.sudo('rm -rf /opt/zeppelin* /var/log/zeppelin /var/run/zeppelin')
if os.environ['notebook_multiple_clusters'] == 'true':
conn.sudo('systemctl stop livy-server')
conn.sudo('rm -rf /opt/livy* /var/run/livy')
conn.sudo('rm -f /etc/systemd/system/livy-server.service')
conn.sudo('rm -f /etc/systemd/system/zeppelin-notebook.service')
conn.sudo('systemctl daemon-reload')
except Exception as err:
print('Error: {0}'.format(err))
sys.exit(1)
def clean_rstudio():
try:
remove_os_pkg(['rstudio-server'])
conn.sudo('rm -f /home/{}/.Rprofile'.format(args.os_user))
conn.sudo('rm -f /home/{}/.Renviron'.format(args.os_user))
except Exception as err:
print('Error:', str(err))
sys.exit(1)
def clean_tensor():
try:
clean_jupyter()
conn.sudo('systemctl stop tensorboard')
conn.sudo('systemctl disable tensorboard')
conn.sudo('systemctl daemon-reload')
except Exception as err:
print('Error: {0}'.format(err))
sys.exit(1)
def clean_tensor_rstudio():
try:
clean_rstudio()
conn.sudo('systemctl stop tensorboard')
conn.sudo('systemctl disable tensorboard')
conn.sudo('systemctl daemon-reload')
except Exception as err:
print('Error: {0}'.format(err))
sys.exit(1)
def clean_tensor_jupyterlab():
try:
clean_jupyterlab()
conn.sudo('systemctl stop tensorboard')
conn.sudo('systemctl disable tensorboard')
conn.sudo('systemctl daemon-reload')
except Exception as err:
print('Error: {0}'.format(err))
sys.exit(1)
def clean_deeplearning():
try:
conn.sudo('systemctl stop ungit')
conn.sudo('systemctl stop inactive.timer')
conn.sudo('rm -f /etc/systemd/system/inactive.service')
conn.sudo('rm -f /etc/systemd/system/inactive.timer')
conn.sudo('rm -rf /opt/inactivity')
conn.sudo('npm -g uninstall ungit')
conn.sudo('rm -f /etc/systemd/system/ungit.service')
conn.sudo('systemctl daemon-reload')
remove_os_pkg(['nodejs', 'npm'])
conn.sudo('sed -i "/spark.*.memory/d" /opt/spark/conf/spark-defaults.conf')
clean_jupyter()
except Exception as err:
print('Error: {0}'.format(err))
sys.exit(1)
if __name__ == "__main__":
print('Configure connections')
global conn
conn = datalab.fab.init_datalab_connection(args.hostname, args.os_user, args.keyfile)
if os.environ['conf_cloud_provider'] == 'azure':
from datalab.actions_lib import ensure_right_mount_paths
ensure_right_mount_paths()
de_master_name = '{}-{}-{}-de-{}-m'.format(
os.environ['conf_service_base_name'],
os.environ['project_name'],
os.environ['endpoint_name'],
os.environ['computational_name'])
de_ami_id = AzureMeta().get_instance_image(os.environ['azure_resource_group_name'],
de_master_name)
default_ami_id = 'default'
else:
de_master_name = '{}-{}-{}-de-{}-m'.format(
os.environ['conf_service_base_name'],
os.environ['project_name'],
os.environ['endpoint_name'],
os.environ['computational_name'])
de_ami_id = get_ami_id_by_instance_name(de_master_name)
default_ami_id = get_ami_id(
os.environ['aws_{}_image_name'.format(os.environ['conf_os_family'])])
if de_ami_id != default_ami_id:
if args.application in os.environ['dataengine_image_notebooks'].split(','):
if args.application == 'deeplearning':
clean_deeplearning()
else:
general_clean()
if args.application == 'jupyter':
clean_jupyter()
elif args.application == 'zeppelin':
clean_zeppelin()
elif args.application == 'rstudio':
clean_rstudio()
elif args.application == 'tensor':
clean_tensor()
elif args.application == 'tensor-rstudio':
clean_tensor_rstudio()
elif args.application == 'tensor-jupyterlab':
clean_tensor_jupyterlab()
else:
print('Found default ami, do not make clean')
sys.exit(0)
| true
| true
|
79049f677b5dcfa6f0d3dafc6c589c5010d7295e
| 16,206
|
py
|
Python
|
gammapy/irf/psf/gauss.py
|
mdebony/gammapy
|
29541fbfd90b0895ccc04fd3b9814a6f95511e14
|
[
"BSD-3-Clause"
] | null | null | null |
gammapy/irf/psf/gauss.py
|
mdebony/gammapy
|
29541fbfd90b0895ccc04fd3b9814a6f95511e14
|
[
"BSD-3-Clause"
] | null | null | null |
gammapy/irf/psf/gauss.py
|
mdebony/gammapy
|
29541fbfd90b0895ccc04fd3b9814a6f95511e14
|
[
"BSD-3-Clause"
] | null | null | null |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import logging
import numpy as np
from astropy import units as u
from astropy.coordinates import Angle
from astropy.io import fits
from astropy.table import Table
from gammapy.maps import MapAxes, MapAxis
from gammapy.utils.array import array_stats_str
from gammapy.utils.gauss import MultiGauss2D
from gammapy.utils.interpolation import ScaledRegularGridInterpolator
from gammapy.utils.scripts import make_path
from .table import PSF3D, EnergyDependentTablePSF
__all__ = ["EnergyDependentMultiGaussPSF"]
log = logging.getLogger(__name__)
class EnergyDependentMultiGaussPSF:
"""Triple Gauss analytical PSF depending on energy and theta.
To evaluate the PSF call the ``to_energy_dependent_table_psf`` or ``psf_at_energy_and_theta`` methods.
Parameters
----------
energy_axis_true : `MapAxis`
True energy axis
offset_axis : `MapAxis`
Offset axis.
sigmas : list of 'numpy.ndarray'
Triple Gauss sigma parameters, where every entry is
a two dimensional 'numpy.ndarray' containing the sigma
value for every given energy and theta.
norms : list of 'numpy.ndarray'
Triple Gauss norm parameters, where every entry is
a two dimensional 'numpy.ndarray' containing the norm
value for every given energy and theta. Norm corresponds
to the value of the Gaussian at theta = 0.
meta : dict
Meta data
Examples
--------
Plot R68 of the PSF vs. theta and energy:
.. plot::
:include-source:
import matplotlib.pyplot as plt
from gammapy.irf import EnergyDependentMultiGaussPSF
filename = '$GAMMAPY_DATA/cta-1dc/caldb/data/cta/1dc/bcf/South_z20_50h/irf_file.fits'
psf = EnergyDependentMultiGaussPSF.read(filename, hdu='POINT SPREAD FUNCTION')
psf.plot_containment(0.68)
plt.show()
"""
tag = "psf_3gauss"
def __init__(
self,
energy_axis_true,
offset_axis,
sigmas,
norms,
meta,
):
energy_axis_true.assert_name("energy_true")
offset_axis.assert_name("offset")
self._energy_axis_true = energy_axis_true
self._offset_axis = offset_axis
sigmas[0][sigmas[0] == 0] = 1
sigmas[1][sigmas[1] == 0] = 1
sigmas[2][sigmas[2] == 0] = 1
self.sigmas = sigmas
self.norms = norms
self.meta = meta or {}
self._interp_norms = self._setup_interpolators(self.norms)
self._interp_sigmas = self._setup_interpolators(self.sigmas)
@property
def energy_thresh_lo(self):
"""Low energy threshold"""
return self.meta["LO_THRES"] * u.TeV
@property
def energy_thresh_hi(self):
"""High energy threshold"""
return self.meta["HI_THRES"] * u.TeV
@property
def energy_axis_true(self):
return self._energy_axis_true
@property
def offset_axis(self):
return self._offset_axis
def _setup_interpolators(self, values_list):
interps = []
for values in values_list:
interp = ScaledRegularGridInterpolator(
points=(self.offset_axis.center, self.energy_axis_true.center),
values=values,
)
interps.append(interp)
return interps
@classmethod
def read(cls, filename, hdu="PSF_2D_GAUSS"):
"""Create `EnergyDependentMultiGaussPSF` from FITS file.
Parameters
----------
filename : str
File name
"""
with fits.open(str(make_path(filename)), memmap=False) as hdulist:
return cls.from_table_hdu(hdulist[hdu])
@classmethod
def from_table_hdu(cls, hdu):
"""Create `EnergyDependentMultiGaussPSF` from HDU list.
Parameters
----------
hdu : `~astropy.io.fits.BinTableHDU`
HDU
"""
table = Table.read(hdu)
energy_axis_true = MapAxis.from_table(
table, column_prefix="ENERG", format="gadf-dl3"
)
offset_axis = MapAxis.from_table(
table, column_prefix="THETA", format="gadf-dl3"
)
# Get sigmas
shape = (offset_axis.nbin, energy_axis_true.nbin)
sigmas = []
for key in ["SIGMA_1", "SIGMA_2", "SIGMA_3"]:
sigma = hdu.data[key].reshape(shape).copy()
sigmas.append(sigma)
# Get amplitudes
norms = []
for key in ["SCALE", "AMPL_2", "AMPL_3"]:
norm = hdu.data[key].reshape(shape).copy()
norms.append(norm)
return cls(
energy_axis_true=energy_axis_true,
offset_axis=offset_axis,
sigmas=sigmas,
norms=norms,
meta=dict(hdu.header)
)
def to_hdulist(self):
"""
Convert psf table data to FITS hdu list.
Returns
-------
hdu_list : `~astropy.io.fits.HDUList`
PSF in HDU list format.
"""
# Set up data
names = [
"SCALE",
"SIGMA_1",
"AMPL_2",
"SIGMA_2",
"AMPL_3",
"SIGMA_3",
]
units = ["", "deg", "", "deg", "", "deg"]
data = [
self.norms[0],
self.sigmas[0],
self.norms[1],
self.sigmas[1],
self.norms[2],
self.sigmas[2],
]
axes = MapAxes([self.energy_axis_true, self.offset_axis])
table = axes.to_table(format="gadf-dl3")
for name_, data_, unit_ in zip(names, data, units):
table[name_] = [data_]
table[name_].unit = unit_
# Create hdu and hdu list
hdu = fits.BinTableHDU(table)
hdu.header.update(self.meta)
return fits.HDUList([fits.PrimaryHDU(), hdu])
def write(self, filename, *args, **kwargs):
"""Write PSF to FITS file.
Calls `~astropy.io.fits.HDUList.writeto`, forwarding all arguments.
"""
self.to_hdulist().writeto(str(make_path(filename)), *args, **kwargs)
def psf_at_energy_and_theta(self, energy, theta):
"""
Get `~gammapy.modeling.models.MultiGauss2D` model for given energy and theta.
No interpolation is used.
Parameters
----------
energy : `~astropy.units.u.Quantity`
Energy at which a PSF is requested.
theta : `~astropy.coordinates.Angle`
Offset angle at which a PSF is requested.
Returns
-------
psf : `~gammapy.utils.gauss.MultiGauss2D`
Multigauss PSF object.
"""
energy = u.Quantity(energy)
theta = u.Quantity(theta)
sigmas, norms = [], []
pars = {"A_1": 1}
for interp_sigma in self._interp_sigmas:
sigma = interp_sigma((theta, energy))
sigmas.append(sigma)
for name, interp_norm in zip(["scale", "A_2", "A_3"], self._interp_norms):
pars[name] = interp_norm((theta, energy))
for idx, sigma in enumerate(sigmas):
a = pars[f"A_{idx + 1}"]
norm = pars["scale"] * 2 * a * sigma ** 2
norms.append(norm)
m = MultiGauss2D(sigmas, norms)
m.normalize()
return m
def containment_radius(self, energy, theta, fraction=0.68):
"""Compute containment for all energy and theta values"""
# This is a false positive from pylint
# See https://github.com/PyCQA/pylint/issues/2435
energies = u.Quantity(
energy
).flatten() # pylint:disable=assignment-from-no-return
thetas = Angle(theta).flatten()
radius = np.empty((theta.size, energy.size))
for idx, energy in enumerate(energies):
for jdx, theta in enumerate(thetas):
try:
psf = self.psf_at_energy_and_theta(energy, theta)
radius[jdx, idx] = psf.containment_radius(fraction)
except ValueError:
log.debug(
f"Computing containment failed for energy = {energy:.2f}"
f" and theta={theta:.2f}"
)
log.debug(f"Sigmas: {psf.sigmas} Norms: {psf.norms}")
radius[jdx, idx] = np.nan
return Angle(radius, "deg")
def plot_containment(self, fraction=0.68, ax=None, add_cbar=True, **kwargs):
"""
Plot containment image with energy and theta axes.
Parameters
----------
fraction : float
Containment fraction between 0 and 1.
add_cbar : bool
Add a colorbar
"""
import matplotlib.pyplot as plt
ax = plt.gca() if ax is None else ax
energy = self.energy_axis_true.center
offset = self.offset_axis.center
# Set up and compute data
containment = self.containment_radius(energy, offset, fraction)
# plotting defaults
kwargs.setdefault("cmap", "GnBu")
kwargs.setdefault("vmin", np.nanmin(containment.value))
kwargs.setdefault("vmax", np.nanmax(containment.value))
# Plotting
x = energy.value
y = offset.value
caxes = ax.pcolormesh(x, y, containment.value, **kwargs)
# Axes labels and ticks, colobar
ax.semilogx()
ax.set_ylabel(f"Offset ({offset.unit})")
ax.set_xlabel(f"Energy ({energy.unit})")
ax.set_xlim(x.min(), x.max())
ax.set_ylim(y.min(), y.max())
try:
self._plot_safe_energy_range(ax)
except KeyError:
pass
if add_cbar:
label = f"Containment radius R{100 * fraction:.0f} ({containment.unit})"
ax.figure.colorbar(caxes, ax=ax, label=label)
return ax
def _plot_safe_energy_range(self, ax):
"""add safe energy range lines to the plot"""
esafe = self.energy_thresh_lo
omin = self.offset_axis.center.min()
omax = self.offset_axis.center.max()
ax.vlines(x=esafe.value, ymin=omin.value, ymax=omax.value)
label = f"Safe energy threshold: {esafe:3.2f}"
ax.text(x=1.1 * esafe.value, y=0.3, s=label, va="top")
def plot_containment_vs_energy(
self, fractions=[0.68, 0.95], thetas=Angle([0, 1], "deg"), ax=None, **kwargs
):
"""Plot containment fraction as a function of energy.
"""
import matplotlib.pyplot as plt
ax = plt.gca() if ax is None else ax
energy = self.energy_axis_true.center
for theta in thetas:
for fraction in fractions:
radius = self.containment_radius(energy, theta, fraction).squeeze()
kwargs.setdefault("label", f"{theta.deg} deg, {100 * fraction:.1f}%")
ax.plot(energy.value, radius.value, **kwargs)
ax.semilogx()
ax.legend(loc="best")
ax.set_xlabel("Energy (TeV)")
ax.set_ylabel("Containment radius (deg)")
def peek(self, figsize=(15, 5)):
"""Quick-look summary plots."""
import matplotlib.pyplot as plt
fig, axes = plt.subplots(nrows=1, ncols=3, figsize=figsize)
self.plot_containment(fraction=0.68, ax=axes[0])
self.plot_containment(fraction=0.95, ax=axes[1])
self.plot_containment_vs_energy(ax=axes[2])
# TODO: implement this plot
# psf = self.psf_at_energy_and_theta(energy='1 TeV', theta='1 deg')
# psf.plot_components(ax=axes[2])
plt.tight_layout()
def info(
self,
fractions=[0.68, 0.95],
energies=u.Quantity([1.0, 10.0], "TeV"),
thetas=u.Quantity([0.0], "deg"),
):
"""
Print PSF summary info.
The containment radius for given fraction, energies and thetas is
computed and printed on the command line.
Parameters
----------
fractions : list
Containment fraction to compute containment radius for.
energies : `~astropy.units.u.Quantity`
Energies to compute containment radius for.
thetas : `~astropy.units.u.Quantity`
Thetas to compute containment radius for.
Returns
-------
ss : string
Formatted string containing the summary info.
"""
ss = "\nSummary PSF info\n"
ss += "----------------\n"
ss += array_stats_str(self.offset_axis.center.to("deg"), "Theta")
ss += array_stats_str(self.energy_axis_true.edges[1:], "Energy hi")
ss += array_stats_str(self.energy_axis_true.edges[:-1], "Energy lo")
ss += f"Safe energy threshold lo: {self.energy_thresh_lo:6.3f}\n"
ss += f"Safe energy threshold hi: {self.energy_thresh_hi:6.3f}\n"
for fraction in fractions:
containment = self.containment_radius(energies, thetas, fraction)
for i, energy in enumerate(energies):
for j, theta in enumerate(thetas):
radius = containment[j, i]
ss += (
"{:2.0f}% containment radius at theta = {} and "
"E = {:4.1f}: {:5.8f}\n"
"".format(100 * fraction, theta, energy, radius)
)
return ss
def to_energy_dependent_table_psf(self, theta=None, rad=None, exposure=None):
"""Convert triple Gaussian PSF ot table PSF.
Parameters
----------
theta : `~astropy.coordinates.Angle`
Offset in the field of view. Default theta = 0 deg
rad : `~astropy.coordinates.Angle`
Offset from PSF center used for evaluating the PSF on a grid.
Default offset = [0, 0.005, ..., 1.495, 1.5] deg.
exposure : `~astropy.units.u.Quantity`
Energy dependent exposure. Should be in units equivalent to 'cm^2 s'.
Default exposure = 1.
Returns
-------
tabe_psf : `~gammapy.irf.EnergyDependentTablePSF`
Instance of `EnergyDependentTablePSF`.
"""
# Convert energies to log center
energies = self.energy_axis_true.center
# Defaults and input handling
if theta is None:
theta = Angle(0, "deg")
else:
theta = Angle(theta)
if rad is None:
rad = Angle(np.arange(0, 1.5, 0.005), "deg")
rad_axis = MapAxis.from_nodes(rad, name="rad")
psf_value = u.Quantity(np.zeros((energies.size, rad.size)), "deg^-2")
for idx, energy in enumerate(energies):
psf_gauss = self.psf_at_energy_and_theta(energy, theta)
psf_value[idx] = u.Quantity(psf_gauss(rad), "deg^-2")
return EnergyDependentTablePSF(
energy_axis_true=self.energy_axis_true,
rad_axis=rad_axis,
exposure=exposure,
data=psf_value,
)
def to_psf3d(self, rad=None):
"""Create a PSF3D from an analytical PSF.
Parameters
----------
rad : `~astropy.units.u.Quantity` or `~astropy.coordinates.Angle`
the array of position errors (rad) on which the PSF3D will be defined
Returns
-------
psf3d : `~gammapy.irf.PSF3D`
the PSF3D. It will be defined on the same energy and offset values than the input psf.
"""
offsets = self.offset_axis.center
energy = self.energy_axis_true.center
if rad is None:
rad = np.linspace(0, 0.66, 67) * u.deg
rad_axis = MapAxis.from_edges(rad, name="rad")
shape = (self.energy_axis_true.nbin, self.offset_axis.nbin, rad_axis.nbin)
psf_value = np.zeros(shape) * u.Unit("sr-1")
for idx, offset in enumerate(offsets):
table_psf = self.to_energy_dependent_table_psf(offset)
psf_value[:, idx, :] = table_psf.evaluate(energy, rad_axis.center)
return PSF3D(
energy_axis_true=self.energy_axis_true,
rad_axis=rad_axis,
offset_axis=self.offset_axis,
data=psf_value,
meta=self.meta.copy()
)
| 32.542169
| 106
| 0.578304
|
import logging
import numpy as np
from astropy import units as u
from astropy.coordinates import Angle
from astropy.io import fits
from astropy.table import Table
from gammapy.maps import MapAxes, MapAxis
from gammapy.utils.array import array_stats_str
from gammapy.utils.gauss import MultiGauss2D
from gammapy.utils.interpolation import ScaledRegularGridInterpolator
from gammapy.utils.scripts import make_path
from .table import PSF3D, EnergyDependentTablePSF
__all__ = ["EnergyDependentMultiGaussPSF"]
log = logging.getLogger(__name__)
class EnergyDependentMultiGaussPSF:
tag = "psf_3gauss"
def __init__(
self,
energy_axis_true,
offset_axis,
sigmas,
norms,
meta,
):
energy_axis_true.assert_name("energy_true")
offset_axis.assert_name("offset")
self._energy_axis_true = energy_axis_true
self._offset_axis = offset_axis
sigmas[0][sigmas[0] == 0] = 1
sigmas[1][sigmas[1] == 0] = 1
sigmas[2][sigmas[2] == 0] = 1
self.sigmas = sigmas
self.norms = norms
self.meta = meta or {}
self._interp_norms = self._setup_interpolators(self.norms)
self._interp_sigmas = self._setup_interpolators(self.sigmas)
@property
def energy_thresh_lo(self):
return self.meta["LO_THRES"] * u.TeV
@property
def energy_thresh_hi(self):
return self.meta["HI_THRES"] * u.TeV
@property
def energy_axis_true(self):
return self._energy_axis_true
@property
def offset_axis(self):
return self._offset_axis
def _setup_interpolators(self, values_list):
interps = []
for values in values_list:
interp = ScaledRegularGridInterpolator(
points=(self.offset_axis.center, self.energy_axis_true.center),
values=values,
)
interps.append(interp)
return interps
@classmethod
def read(cls, filename, hdu="PSF_2D_GAUSS"):
with fits.open(str(make_path(filename)), memmap=False) as hdulist:
return cls.from_table_hdu(hdulist[hdu])
@classmethod
def from_table_hdu(cls, hdu):
table = Table.read(hdu)
energy_axis_true = MapAxis.from_table(
table, column_prefix="ENERG", format="gadf-dl3"
)
offset_axis = MapAxis.from_table(
table, column_prefix="THETA", format="gadf-dl3"
)
shape = (offset_axis.nbin, energy_axis_true.nbin)
sigmas = []
for key in ["SIGMA_1", "SIGMA_2", "SIGMA_3"]:
sigma = hdu.data[key].reshape(shape).copy()
sigmas.append(sigma)
norms = []
for key in ["SCALE", "AMPL_2", "AMPL_3"]:
norm = hdu.data[key].reshape(shape).copy()
norms.append(norm)
return cls(
energy_axis_true=energy_axis_true,
offset_axis=offset_axis,
sigmas=sigmas,
norms=norms,
meta=dict(hdu.header)
)
def to_hdulist(self):
names = [
"SCALE",
"SIGMA_1",
"AMPL_2",
"SIGMA_2",
"AMPL_3",
"SIGMA_3",
]
units = ["", "deg", "", "deg", "", "deg"]
data = [
self.norms[0],
self.sigmas[0],
self.norms[1],
self.sigmas[1],
self.norms[2],
self.sigmas[2],
]
axes = MapAxes([self.energy_axis_true, self.offset_axis])
table = axes.to_table(format="gadf-dl3")
for name_, data_, unit_ in zip(names, data, units):
table[name_] = [data_]
table[name_].unit = unit_
hdu = fits.BinTableHDU(table)
hdu.header.update(self.meta)
return fits.HDUList([fits.PrimaryHDU(), hdu])
def write(self, filename, *args, **kwargs):
self.to_hdulist().writeto(str(make_path(filename)), *args, **kwargs)
def psf_at_energy_and_theta(self, energy, theta):
energy = u.Quantity(energy)
theta = u.Quantity(theta)
sigmas, norms = [], []
pars = {"A_1": 1}
for interp_sigma in self._interp_sigmas:
sigma = interp_sigma((theta, energy))
sigmas.append(sigma)
for name, interp_norm in zip(["scale", "A_2", "A_3"], self._interp_norms):
pars[name] = interp_norm((theta, energy))
for idx, sigma in enumerate(sigmas):
a = pars[f"A_{idx + 1}"]
norm = pars["scale"] * 2 * a * sigma ** 2
norms.append(norm)
m = MultiGauss2D(sigmas, norms)
m.normalize()
return m
def containment_radius(self, energy, theta, fraction=0.68):
energies = u.Quantity(
energy
).flatten()
thetas = Angle(theta).flatten()
radius = np.empty((theta.size, energy.size))
for idx, energy in enumerate(energies):
for jdx, theta in enumerate(thetas):
try:
psf = self.psf_at_energy_and_theta(energy, theta)
radius[jdx, idx] = psf.containment_radius(fraction)
except ValueError:
log.debug(
f"Computing containment failed for energy = {energy:.2f}"
f" and theta={theta:.2f}"
)
log.debug(f"Sigmas: {psf.sigmas} Norms: {psf.norms}")
radius[jdx, idx] = np.nan
return Angle(radius, "deg")
def plot_containment(self, fraction=0.68, ax=None, add_cbar=True, **kwargs):
import matplotlib.pyplot as plt
ax = plt.gca() if ax is None else ax
energy = self.energy_axis_true.center
offset = self.offset_axis.center
containment = self.containment_radius(energy, offset, fraction)
kwargs.setdefault("cmap", "GnBu")
kwargs.setdefault("vmin", np.nanmin(containment.value))
kwargs.setdefault("vmax", np.nanmax(containment.value))
x = energy.value
y = offset.value
caxes = ax.pcolormesh(x, y, containment.value, **kwargs)
ax.semilogx()
ax.set_ylabel(f"Offset ({offset.unit})")
ax.set_xlabel(f"Energy ({energy.unit})")
ax.set_xlim(x.min(), x.max())
ax.set_ylim(y.min(), y.max())
try:
self._plot_safe_energy_range(ax)
except KeyError:
pass
if add_cbar:
label = f"Containment radius R{100 * fraction:.0f} ({containment.unit})"
ax.figure.colorbar(caxes, ax=ax, label=label)
return ax
def _plot_safe_energy_range(self, ax):
esafe = self.energy_thresh_lo
omin = self.offset_axis.center.min()
omax = self.offset_axis.center.max()
ax.vlines(x=esafe.value, ymin=omin.value, ymax=omax.value)
label = f"Safe energy threshold: {esafe:3.2f}"
ax.text(x=1.1 * esafe.value, y=0.3, s=label, va="top")
def plot_containment_vs_energy(
self, fractions=[0.68, 0.95], thetas=Angle([0, 1], "deg"), ax=None, **kwargs
):
import matplotlib.pyplot as plt
ax = plt.gca() if ax is None else ax
energy = self.energy_axis_true.center
for theta in thetas:
for fraction in fractions:
radius = self.containment_radius(energy, theta, fraction).squeeze()
kwargs.setdefault("label", f"{theta.deg} deg, {100 * fraction:.1f}%")
ax.plot(energy.value, radius.value, **kwargs)
ax.semilogx()
ax.legend(loc="best")
ax.set_xlabel("Energy (TeV)")
ax.set_ylabel("Containment radius (deg)")
def peek(self, figsize=(15, 5)):
import matplotlib.pyplot as plt
fig, axes = plt.subplots(nrows=1, ncols=3, figsize=figsize)
self.plot_containment(fraction=0.68, ax=axes[0])
self.plot_containment(fraction=0.95, ax=axes[1])
self.plot_containment_vs_energy(ax=axes[2])
plt.tight_layout()
def info(
self,
fractions=[0.68, 0.95],
energies=u.Quantity([1.0, 10.0], "TeV"),
thetas=u.Quantity([0.0], "deg"),
):
ss = "\nSummary PSF info\n"
ss += "----------------\n"
ss += array_stats_str(self.offset_axis.center.to("deg"), "Theta")
ss += array_stats_str(self.energy_axis_true.edges[1:], "Energy hi")
ss += array_stats_str(self.energy_axis_true.edges[:-1], "Energy lo")
ss += f"Safe energy threshold lo: {self.energy_thresh_lo:6.3f}\n"
ss += f"Safe energy threshold hi: {self.energy_thresh_hi:6.3f}\n"
for fraction in fractions:
containment = self.containment_radius(energies, thetas, fraction)
for i, energy in enumerate(energies):
for j, theta in enumerate(thetas):
radius = containment[j, i]
ss += (
"{:2.0f}% containment radius at theta = {} and "
"E = {:4.1f}: {:5.8f}\n"
"".format(100 * fraction, theta, energy, radius)
)
return ss
def to_energy_dependent_table_psf(self, theta=None, rad=None, exposure=None):
energies = self.energy_axis_true.center
if theta is None:
theta = Angle(0, "deg")
else:
theta = Angle(theta)
if rad is None:
rad = Angle(np.arange(0, 1.5, 0.005), "deg")
rad_axis = MapAxis.from_nodes(rad, name="rad")
psf_value = u.Quantity(np.zeros((energies.size, rad.size)), "deg^-2")
for idx, energy in enumerate(energies):
psf_gauss = self.psf_at_energy_and_theta(energy, theta)
psf_value[idx] = u.Quantity(psf_gauss(rad), "deg^-2")
return EnergyDependentTablePSF(
energy_axis_true=self.energy_axis_true,
rad_axis=rad_axis,
exposure=exposure,
data=psf_value,
)
def to_psf3d(self, rad=None):
offsets = self.offset_axis.center
energy = self.energy_axis_true.center
if rad is None:
rad = np.linspace(0, 0.66, 67) * u.deg
rad_axis = MapAxis.from_edges(rad, name="rad")
shape = (self.energy_axis_true.nbin, self.offset_axis.nbin, rad_axis.nbin)
psf_value = np.zeros(shape) * u.Unit("sr-1")
for idx, offset in enumerate(offsets):
table_psf = self.to_energy_dependent_table_psf(offset)
psf_value[:, idx, :] = table_psf.evaluate(energy, rad_axis.center)
return PSF3D(
energy_axis_true=self.energy_axis_true,
rad_axis=rad_axis,
offset_axis=self.offset_axis,
data=psf_value,
meta=self.meta.copy()
)
| true
| true
|
79049fc29922dc54a008d5f0cf947137a3610083
| 4,728
|
py
|
Python
|
bin/parse_new_files.py
|
LoganRickert/CPP-Builder-And-Documentator
|
c537b8d9380c23ad94073f9841b83e7e8137d27a
|
[
"CC0-1.0"
] | 2
|
2017-07-28T16:30:19.000Z
|
2018-05-16T02:26:48.000Z
|
bin/parse_new_files.py
|
LoganRickert/CPP-Manager
|
c537b8d9380c23ad94073f9841b83e7e8137d27a
|
[
"CC0-1.0"
] | null | null | null |
bin/parse_new_files.py
|
LoganRickert/CPP-Manager
|
c537b8d9380c23ad94073f9841b83e7e8137d27a
|
[
"CC0-1.0"
] | null | null | null |
import sys
import datetime
def capitalize(string):
return string[0].upper() + string[1:]
action = sys.argv[1]
file_path = sys.argv[2]
project_name = sys.argv[3]
namespace = sys.argv[4]
now = datetime.datetime.now()
date = now.strftime("%m-%d-%Y %H:%M:%S")
args = sys.argv[6:]
username = "Logan Rickert"
def new_class():
file_name = sys.argv[5]
cpp_file_path = file_path + "src/" + file_name + ".cpp"
h_file_path = file_path + "include/" + file_name + ".h"
if len(args) % 2 != 0:
print "You must have an even amount of arguments!"
sys.exit()
parse = []
for arg in xrange(0,len(args),2):
parse.append([args[arg], args[arg + 1]])
cpp_file_contents = None
h_file_contents = None
with open(cpp_file_path, 'r') as f:
cpp_file_contents = f.read()
with open(h_file_path, 'r') as f:
h_file_contents = f.read()
cpp_file_contents = cpp_file_contents.replace(
"{{class_name}}", file_name
)
cpp_file_contents = cpp_file_contents.replace(
"{{namespace}}", namespace
)
cpp_file_contents = cpp_file_contents.replace(
"{{date}}", date
)
cpp_file_contents = cpp_file_contents.replace(
"{{username}}", username
)
if len(args) > 0:
construct_init = file_name + "::" + file_name + "("
for key, value in parse:
construct_init += key + " s" + capitalize(value) + ", "
construct_init = construct_init[:-2] + ") {"
cpp_file_contents = cpp_file_contents.replace(
"{{construct_init}}", construct_init
)
construct_init_equals = ""
for key, value in parse:
construct_init_equals += "\t" + value + " = s" + capitalize(value) + ";\n"
construct_init_equals += "}"
cpp_file_contents = cpp_file_contents.replace(
"{{construct_init_equals}}", construct_init_equals
)
getters_setters = ""
for key, value in parse:
getters_setters += """%s %s::get%s() {
return %s;
}
void %s::set%s(%s s%s) {
%s = s%s;
}
""" % (
key,
file_name,
capitalize(value),
value,
file_name,
capitalize(value),
key,
capitalize(value),
value,
capitalize(value)
)
getters_setters = getters_setters[:-2]
cpp_file_contents = cpp_file_contents.replace(
"{{getters_setters}}", getters_setters
)
else:
cpp_file_contents = cpp_file_contents.replace(
"\n{{construct_init}}\n", ""
)
cpp_file_contents = cpp_file_contents.replace(
"{{construct_init_equals}}\n", ""
)
cpp_file_contents = cpp_file_contents.replace(
"\n{{getters_setters}}\n", ""
)
with open(cpp_file_path, 'w') as f:
f.write(cpp_file_contents)
h_file_contents = h_file_contents.replace(
"{{class_name_caps}}", file_name.upper()
)
h_file_contents = h_file_contents.replace(
"{{class_name}}", file_name
)
h_file_contents = h_file_contents.replace(
"{{username}}", username
)
h_file_contents = h_file_contents.replace(
"{{namespace}}", namespace
)
h_file_contents = h_file_contents.replace(
"{{date}}", date
)
if len(args) > 0:
class_construct_full = file_name + "("
for key, value in parse:
class_construct_full += key + ", "
class_construct_full = class_construct_full[:-2] + ");"
h_file_contents = h_file_contents.replace(
"{{class_construct_full}}", class_construct_full
)
getters_setters = ""
for key, value in parse:
getters_setters += "\t\t" + key + " get" + capitalize(value) + "();\n"
getters_setters += '\n'
for key, value in parse:
getters_setters += "\t\tvoid set" + capitalize(value) + "(" + key + " s" + capitalize(value) + ");\n"
h_file_contents = h_file_contents.replace(
"{{getters_setters}}", getters_setters
)
class_fields = ""
for key, value in parse:
class_fields += "\t\t" + key + " " + value + ";\n"
h_file_contents = h_file_contents.replace(
"{{class_fields}}", class_fields
)
else:
h_file_contents = h_file_contents.replace(
"\n\t\t{{class_construct_full}}", ""
)
h_file_contents = h_file_contents.replace(
"{{getters_setters}}\n", ""
)
h_file_contents = h_file_contents.replace(
"{{class_fields}}", ""
)
with open(h_file_path, 'w') as f:
f.write(h_file_contents)
def new_main():
cpp_file_path = file_path + "/src/Main.cpp"
cpp_file_contents = None
h_file_contents = None
with open(cpp_file_path, 'r') as f:
cpp_file_contents = f.read()
cpp_file_contents = cpp_file_contents.replace(
"{{class_name}}", "Main"
)
cpp_file_contents = cpp_file_contents.replace(
"{{namespace}}", namespace
)
cpp_file_contents = cpp_file_contents.replace(
"{{username}}", username
)
cpp_file_contents = cpp_file_contents.replace(
"{{date}}", date
)
with open(cpp_file_path, 'w') as f:
f.write(cpp_file_contents)
if action == "class":
new_class()
elif action == "namespace" or action == "project":
new_main()
| 20.828194
| 104
| 0.666244
|
import sys
import datetime
def capitalize(string):
return string[0].upper() + string[1:]
action = sys.argv[1]
file_path = sys.argv[2]
project_name = sys.argv[3]
namespace = sys.argv[4]
now = datetime.datetime.now()
date = now.strftime("%m-%d-%Y %H:%M:%S")
args = sys.argv[6:]
username = "Logan Rickert"
def new_class():
file_name = sys.argv[5]
cpp_file_path = file_path + "src/" + file_name + ".cpp"
h_file_path = file_path + "include/" + file_name + ".h"
if len(args) % 2 != 0:
print "You must have an even amount of arguments!"
sys.exit()
parse = []
for arg in xrange(0,len(args),2):
parse.append([args[arg], args[arg + 1]])
cpp_file_contents = None
h_file_contents = None
with open(cpp_file_path, 'r') as f:
cpp_file_contents = f.read()
with open(h_file_path, 'r') as f:
h_file_contents = f.read()
cpp_file_contents = cpp_file_contents.replace(
"{{class_name}}", file_name
)
cpp_file_contents = cpp_file_contents.replace(
"{{namespace}}", namespace
)
cpp_file_contents = cpp_file_contents.replace(
"{{date}}", date
)
cpp_file_contents = cpp_file_contents.replace(
"{{username}}", username
)
if len(args) > 0:
construct_init = file_name + "::" + file_name + "("
for key, value in parse:
construct_init += key + " s" + capitalize(value) + ", "
construct_init = construct_init[:-2] + ") {"
cpp_file_contents = cpp_file_contents.replace(
"{{construct_init}}", construct_init
)
construct_init_equals = ""
for key, value in parse:
construct_init_equals += "\t" + value + " = s" + capitalize(value) + ";\n"
construct_init_equals += "}"
cpp_file_contents = cpp_file_contents.replace(
"{{construct_init_equals}}", construct_init_equals
)
getters_setters = ""
for key, value in parse:
getters_setters += """%s %s::get%s() {
return %s;
}
void %s::set%s(%s s%s) {
%s = s%s;
}
""" % (
key,
file_name,
capitalize(value),
value,
file_name,
capitalize(value),
key,
capitalize(value),
value,
capitalize(value)
)
getters_setters = getters_setters[:-2]
cpp_file_contents = cpp_file_contents.replace(
"{{getters_setters}}", getters_setters
)
else:
cpp_file_contents = cpp_file_contents.replace(
"\n{{construct_init}}\n", ""
)
cpp_file_contents = cpp_file_contents.replace(
"{{construct_init_equals}}\n", ""
)
cpp_file_contents = cpp_file_contents.replace(
"\n{{getters_setters}}\n", ""
)
with open(cpp_file_path, 'w') as f:
f.write(cpp_file_contents)
h_file_contents = h_file_contents.replace(
"{{class_name_caps}}", file_name.upper()
)
h_file_contents = h_file_contents.replace(
"{{class_name}}", file_name
)
h_file_contents = h_file_contents.replace(
"{{username}}", username
)
h_file_contents = h_file_contents.replace(
"{{namespace}}", namespace
)
h_file_contents = h_file_contents.replace(
"{{date}}", date
)
if len(args) > 0:
class_construct_full = file_name + "("
for key, value in parse:
class_construct_full += key + ", "
class_construct_full = class_construct_full[:-2] + ");"
h_file_contents = h_file_contents.replace(
"{{class_construct_full}}", class_construct_full
)
getters_setters = ""
for key, value in parse:
getters_setters += "\t\t" + key + " get" + capitalize(value) + "();\n"
getters_setters += '\n'
for key, value in parse:
getters_setters += "\t\tvoid set" + capitalize(value) + "(" + key + " s" + capitalize(value) + ");\n"
h_file_contents = h_file_contents.replace(
"{{getters_setters}}", getters_setters
)
class_fields = ""
for key, value in parse:
class_fields += "\t\t" + key + " " + value + ";\n"
h_file_contents = h_file_contents.replace(
"{{class_fields}}", class_fields
)
else:
h_file_contents = h_file_contents.replace(
"\n\t\t{{class_construct_full}}", ""
)
h_file_contents = h_file_contents.replace(
"{{getters_setters}}\n", ""
)
h_file_contents = h_file_contents.replace(
"{{class_fields}}", ""
)
with open(h_file_path, 'w') as f:
f.write(h_file_contents)
def new_main():
cpp_file_path = file_path + "/src/Main.cpp"
cpp_file_contents = None
h_file_contents = None
with open(cpp_file_path, 'r') as f:
cpp_file_contents = f.read()
cpp_file_contents = cpp_file_contents.replace(
"{{class_name}}", "Main"
)
cpp_file_contents = cpp_file_contents.replace(
"{{namespace}}", namespace
)
cpp_file_contents = cpp_file_contents.replace(
"{{username}}", username
)
cpp_file_contents = cpp_file_contents.replace(
"{{date}}", date
)
with open(cpp_file_path, 'w') as f:
f.write(cpp_file_contents)
if action == "class":
new_class()
elif action == "namespace" or action == "project":
new_main()
| false
| true
|
7904a04d4b3fa8725c8de1a5a3345de34d30e585
| 11,307
|
py
|
Python
|
train_model.py
|
shineyjg/cnn_captcha
|
1048494895ab6c1e4d5940025c02026386c32912
|
[
"Apache-2.0"
] | null | null | null |
train_model.py
|
shineyjg/cnn_captcha
|
1048494895ab6c1e4d5940025c02026386c32912
|
[
"Apache-2.0"
] | null | null | null |
train_model.py
|
shineyjg/cnn_captcha
|
1048494895ab6c1e4d5940025c02026386c32912
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import time
from PIL import Image
import random
import os
from sample import sample_conf
from tensorflow.python.framework.errors_impl import NotFoundError
# 设置以下环境变量可开启CPU识别
# os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
# os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
class TrainError(Exception):
pass
class TrainModel(object):
def __init__(self, img_path, char_set, model_save_dir, verify=False):
# 模型路径
self.model_save_dir = model_save_dir
# 打乱文件顺序+校验图片格式
self.img_path = img_path
self.img_list = os.listdir(img_path)
# 校验格式
if verify:
self.confirm_image_suffix()
# 打乱文件顺序
random.seed(time.time())
random.shuffle(self.img_list)
# 获得图片宽高和字符长度基本信息
label, captcha_array = self.gen_captcha_text_image(self.img_list[0])
captcha_shape = captcha_array.shape
captcha_shape_len = len(captcha_shape)
if captcha_shape_len == 3:
image_height, image_width, channel = captcha_shape
self.channel = channel
elif captcha_shape_len == 2:
image_height, image_width = captcha_shape
else:
raise TrainError("图片转换为矩阵时出错,请检查图片格式")
# 初始化变量
# 图片尺寸
self.image_height = image_height
self.image_width = image_width
# 验证码长度(位数)
self.max_captcha = len(label)
# 验证码字符类别
self.char_set = char_set
self.char_set_len = len(char_set)
# 相关信息打印
print("-->图片尺寸: {} X {}".format(image_height, image_width))
print("-->验证码长度: {}".format(self.max_captcha))
print("-->验证码共{}类 {}".format(self.char_set_len, char_set))
print("-->使用测试集为 {}".format(img_path))
# tf初始化占位符
self.X = tf.placeholder(tf.float32, [None, image_height * image_width]) # 特征向量
self.Y = tf.placeholder(tf.float32, [None, self.max_captcha * self.char_set_len]) # 标签
self.keep_prob = tf.placeholder(tf.float32) # dropout值
self.w_alpha = 0.01
self.b_alpha = 0.1
# test model input and output
print(">>> Start model test")
batch_x, batch_y = self.get_batch(0, size=100)
print(">>> input batch images shape: {}".format(batch_x.shape))
print(">>> input batch labels shape: {}".format(batch_y.shape))
def gen_captcha_text_image(self, img_name):
"""
返回一个验证码的array形式和对应的字符串标签
:return:tuple (str, numpy.array)
"""
# 标签
label = img_name.split("_")[0]
# 文件
img_file = os.path.join(self.img_path, img_name)
captcha_image = Image.open(img_file)
captcha_array = np.array(captcha_image) # 向量化
return label, captcha_array
@staticmethod
def convert2gray(img):
"""
图片转为灰度图,如果是3通道图则计算,单通道图则直接返回
:param img:
:return:
"""
if len(img.shape) > 2:
r, g, b = img[:, :, 0], img[:, :, 1], img[:, :, 2]
gray = 0.2989 * r + 0.5870 * g + 0.1140 * b
return gray
else:
return img
def text2vec(self, text):
"""
转标签为oneHot编码
:param text: str
:return: numpy.array
"""
text_len = len(text)
if text_len > self.max_captcha:
raise ValueError('验证码最长{}个字符'.format(self.max_captcha))
vector = np.zeros(self.max_captcha * self.char_set_len)
for i, ch in enumerate(text):
idx = i * self.char_set_len + self.char_set.index(ch)
vector[idx] = 1
return vector
def get_batch(self, n, size=128):
batch_x = np.zeros([size, self.image_height * self.image_width]) # 初始化
batch_y = np.zeros([size, self.max_captcha * self.char_set_len]) # 初始化
max_batch = int(len(self.img_list) / size)
# print(max_batch)
if max_batch - 1 < 0:
raise TrainError("训练集图片数量需要大于每批次训练的图片数量")
if n > max_batch - 1:
n = n % max_batch
s = n * size
e = (n + 1) * size
this_batch = self.img_list[s:e]
# print("{}:{}".format(s, e))
for i, img_name in enumerate(this_batch):
label, image_array = self.gen_captcha_text_image(img_name)
image_array = self.convert2gray(image_array) # 灰度化图片
batch_x[i, :] = image_array.flatten() / 255 # flatten 转为一维
batch_y[i, :] = self.text2vec(label) # 生成 oneHot
return batch_x, batch_y
def confirm_image_suffix(self):
# 在训练前校验所有文件格式
print("开始校验所有图片后缀")
for index, img_name in enumerate(self.img_list):
print("{} image pass".format(index), end='\r')
if not img_name.endswith(sample_conf['image_suffix']):
raise TrainError('confirm images suffix:you request [.{}] file but get file [{}]'
.format(sample_conf['image_suffix'], img_name))
print("所有图片格式校验通过")
def model(self):
x = tf.reshape(self.X, shape=[-1, self.image_height, self.image_width, 1])
print(">>> input x: {}".format(x))
# 卷积层1
wc1 = tf.get_variable(name='wc1', shape=[3, 3, 1, 32], dtype=tf.float32,
initializer=tf.contrib.layers.xavier_initializer())
bc1 = tf.Variable(self.b_alpha * tf.random_normal([32]))
conv1 = tf.nn.relu(tf.nn.bias_add(tf.nn.conv2d(x, wc1, strides=[1, 1, 1, 1], padding='SAME'), bc1))
conv1 = tf.nn.max_pool(conv1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
conv1 = tf.nn.dropout(conv1, self.keep_prob)
# 卷积层2
wc2 = tf.get_variable(name='wc2', shape=[3, 3, 32, 64], dtype=tf.float32,
initializer=tf.contrib.layers.xavier_initializer())
bc2 = tf.Variable(self.b_alpha * tf.random_normal([64]))
conv2 = tf.nn.relu(tf.nn.bias_add(tf.nn.conv2d(conv1, wc2, strides=[1, 1, 1, 1], padding='SAME'), bc2))
conv2 = tf.nn.max_pool(conv2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
conv2 = tf.nn.dropout(conv2, self.keep_prob)
# 卷积层3
wc3 = tf.get_variable(name='wc3', shape=[3, 3, 64, 128], dtype=tf.float32,
initializer=tf.contrib.layers.xavier_initializer())
bc3 = tf.Variable(self.b_alpha * tf.random_normal([128]))
conv3 = tf.nn.relu(tf.nn.bias_add(tf.nn.conv2d(conv2, wc3, strides=[1, 1, 1, 1], padding='SAME'), bc3))
conv3 = tf.nn.max_pool(conv3, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
conv3 = tf.nn.dropout(conv3, self.keep_prob)
print(">>> convolution 3: ", conv3.shape)
next_shape = conv3.shape[1] * conv3.shape[2] * conv3.shape[3]
# 全连接层1
wd1 = tf.get_variable(name='wd1', shape=[next_shape, 1024], dtype=tf.float32,
initializer=tf.contrib.layers.xavier_initializer())
bd1 = tf.Variable(self.b_alpha * tf.random_normal([1024]))
dense = tf.reshape(conv3, [-1, wd1.get_shape().as_list()[0]])
dense = tf.nn.relu(tf.add(tf.matmul(dense, wd1), bd1))
dense = tf.nn.dropout(dense, self.keep_prob)
# 全连接层2
wout = tf.get_variable('name', shape=[1024, self.max_captcha * self.char_set_len], dtype=tf.float32,
initializer=tf.contrib.layers.xavier_initializer())
bout = tf.Variable(self.b_alpha * tf.random_normal([self.max_captcha * self.char_set_len]))
y_predict = tf.add(tf.matmul(dense, wout), bout)
return y_predict
def train_cnn(self):
y_predict = self.model()
print(">>> input batch predict shape: {}".format(y_predict.shape))
print(">>> End model test")
# 计算概率 损失
cost = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=y_predict, labels=self.Y))
# 梯度下降
optimizer = tf.train.AdamOptimizer(learning_rate=0.0001).minimize(cost)
# 计算准确率
predict = tf.reshape(y_predict, [-1, self.max_captcha, self.char_set_len]) # 预测结果
max_idx_p = tf.argmax(predict, 2) # 预测结果
max_idx_l = tf.argmax(tf.reshape(self.Y, [-1, self.max_captcha, self.char_set_len]), 2) # 标签
# 计算准确率
correct_pred = tf.equal(max_idx_p, max_idx_l)
accuracy = tf.reduce_mean(tf.reduce_min(tf.cast(correct_pred, tf.float32), axis=1))
# 模型保存对象
saver = tf.train.Saver()
with tf.Session() as sess:
init = tf.global_variables_initializer()
sess.run(init)
# 恢复模型
if os.path.exists(self.model_save_dir):
try:
saver.restore(sess, self.model_save_dir)
# 判断捕获model文件夹中没有模型文件的错误
except NotFoundError:
print("model文件夹为空,将创建新模型")
else:
pass
step = 1
for i in range(3000):
batch_x, batch_y = self.get_batch(i, size=128)
_, cost_ = sess.run([optimizer, cost], feed_dict={self.X: batch_x, self.Y: batch_y, self.keep_prob: 0.75})
if step % 10 == 0:
batch_x_test, batch_y_test = self.get_batch(i, size=100)
acc = sess.run(accuracy, feed_dict={self.X: batch_x_test, self.Y: batch_y_test, self.keep_prob: 1.})
print("第{}次训练 >>> 准确率为 {} >>> loss {}".format(step, acc, cost_))
# 准确率达到99%后保存并停止
if acc > 0.99:
saver.save(sess, self.model_save_dir)
break
# 每训练500轮就保存一次
if i % 500 == 0:
saver.save(sess, self.model_save_dir)
step += 1
saver.save(sess, self.model_save_dir)
def recognize_captcha(self):
label, captcha_array = self.gen_captcha_text_image(random.choice(self.img_list))
f = plt.figure()
ax = f.add_subplot(111)
ax.text(0.1, 0.9, "origin:" + label, ha='center', va='center', transform=ax.transAxes)
plt.imshow(captcha_array)
# 预测图片
image = self.convert2gray(captcha_array)
image = image.flatten() / 255
y_predict = self.model()
saver = tf.train.Saver()
with tf.Session() as sess:
saver.restore(sess, self.model_save_dir)
predict = tf.argmax(tf.reshape(y_predict, [-1, self.max_captcha, self.char_set_len]), 2)
text_list = sess.run(predict, feed_dict={self.X: [image], self.keep_prob: 1.})
predict_text = text_list[0].tolist()
print("正确: {} 预测: {}".format(label, predict_text))
# 显示图片和预测结果
p_text = ""
for p in predict_text:
p_text += str(self.char_set[p])
print(p_text)
plt.text(20, 1, 'predict:{}'.format(p_text))
plt.show()
def main():
train_image_dir = sample_conf["train_image_dir"]
char_set = sample_conf["char_set"]
model_save_dir = sample_conf["model_save_dir"]
tm = TrainModel(train_image_dir, char_set, model_save_dir, verify=False)
tm.train_cnn() # 开始训练模型
# tm.recognize_captcha() # 识别图片示例
if __name__ == '__main__':
main()
| 39.534965
| 122
| 0.580437
|
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import time
from PIL import Image
import random
import os
from sample import sample_conf
from tensorflow.python.framework.errors_impl import NotFoundError
class TrainError(Exception):
pass
class TrainModel(object):
def __init__(self, img_path, char_set, model_save_dir, verify=False):
self.model_save_dir = model_save_dir
self.img_path = img_path
self.img_list = os.listdir(img_path)
if verify:
self.confirm_image_suffix()
random.seed(time.time())
random.shuffle(self.img_list)
label, captcha_array = self.gen_captcha_text_image(self.img_list[0])
captcha_shape = captcha_array.shape
captcha_shape_len = len(captcha_shape)
if captcha_shape_len == 3:
image_height, image_width, channel = captcha_shape
self.channel = channel
elif captcha_shape_len == 2:
image_height, image_width = captcha_shape
else:
raise TrainError("图片转换为矩阵时出错,请检查图片格式")
self.image_height = image_height
self.image_width = image_width
self.max_captcha = len(label)
self.char_set = char_set
self.char_set_len = len(char_set)
print("-->图片尺寸: {} X {}".format(image_height, image_width))
print("-->验证码长度: {}".format(self.max_captcha))
print("-->验证码共{}类 {}".format(self.char_set_len, char_set))
print("-->使用测试集为 {}".format(img_path))
self.X = tf.placeholder(tf.float32, [None, image_height * image_width])
self.Y = tf.placeholder(tf.float32, [None, self.max_captcha * self.char_set_len])
self.keep_prob = tf.placeholder(tf.float32)
self.w_alpha = 0.01
self.b_alpha = 0.1
print(">>> Start model test")
batch_x, batch_y = self.get_batch(0, size=100)
print(">>> input batch images shape: {}".format(batch_x.shape))
print(">>> input batch labels shape: {}".format(batch_y.shape))
def gen_captcha_text_image(self, img_name):
label = img_name.split("_")[0]
img_file = os.path.join(self.img_path, img_name)
captcha_image = Image.open(img_file)
captcha_array = np.array(captcha_image)
return label, captcha_array
@staticmethod
def convert2gray(img):
if len(img.shape) > 2:
r, g, b = img[:, :, 0], img[:, :, 1], img[:, :, 2]
gray = 0.2989 * r + 0.5870 * g + 0.1140 * b
return gray
else:
return img
def text2vec(self, text):
text_len = len(text)
if text_len > self.max_captcha:
raise ValueError('验证码最长{}个字符'.format(self.max_captcha))
vector = np.zeros(self.max_captcha * self.char_set_len)
for i, ch in enumerate(text):
idx = i * self.char_set_len + self.char_set.index(ch)
vector[idx] = 1
return vector
def get_batch(self, n, size=128):
batch_x = np.zeros([size, self.image_height * self.image_width])
batch_y = np.zeros([size, self.max_captcha * self.char_set_len])
max_batch = int(len(self.img_list) / size)
if max_batch - 1 < 0:
raise TrainError("训练集图片数量需要大于每批次训练的图片数量")
if n > max_batch - 1:
n = n % max_batch
s = n * size
e = (n + 1) * size
this_batch = self.img_list[s:e]
for i, img_name in enumerate(this_batch):
label, image_array = self.gen_captcha_text_image(img_name)
image_array = self.convert2gray(image_array)
batch_x[i, :] = image_array.flatten() / 255
batch_y[i, :] = self.text2vec(label)
return batch_x, batch_y
def confirm_image_suffix(self):
print("开始校验所有图片后缀")
for index, img_name in enumerate(self.img_list):
print("{} image pass".format(index), end='\r')
if not img_name.endswith(sample_conf['image_suffix']):
raise TrainError('confirm images suffix:you request [.{}] file but get file [{}]'
.format(sample_conf['image_suffix'], img_name))
print("所有图片格式校验通过")
def model(self):
x = tf.reshape(self.X, shape=[-1, self.image_height, self.image_width, 1])
print(">>> input x: {}".format(x))
wc1 = tf.get_variable(name='wc1', shape=[3, 3, 1, 32], dtype=tf.float32,
initializer=tf.contrib.layers.xavier_initializer())
bc1 = tf.Variable(self.b_alpha * tf.random_normal([32]))
conv1 = tf.nn.relu(tf.nn.bias_add(tf.nn.conv2d(x, wc1, strides=[1, 1, 1, 1], padding='SAME'), bc1))
conv1 = tf.nn.max_pool(conv1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
conv1 = tf.nn.dropout(conv1, self.keep_prob)
wc2 = tf.get_variable(name='wc2', shape=[3, 3, 32, 64], dtype=tf.float32,
initializer=tf.contrib.layers.xavier_initializer())
bc2 = tf.Variable(self.b_alpha * tf.random_normal([64]))
conv2 = tf.nn.relu(tf.nn.bias_add(tf.nn.conv2d(conv1, wc2, strides=[1, 1, 1, 1], padding='SAME'), bc2))
conv2 = tf.nn.max_pool(conv2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
conv2 = tf.nn.dropout(conv2, self.keep_prob)
wc3 = tf.get_variable(name='wc3', shape=[3, 3, 64, 128], dtype=tf.float32,
initializer=tf.contrib.layers.xavier_initializer())
bc3 = tf.Variable(self.b_alpha * tf.random_normal([128]))
conv3 = tf.nn.relu(tf.nn.bias_add(tf.nn.conv2d(conv2, wc3, strides=[1, 1, 1, 1], padding='SAME'), bc3))
conv3 = tf.nn.max_pool(conv3, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
conv3 = tf.nn.dropout(conv3, self.keep_prob)
print(">>> convolution 3: ", conv3.shape)
next_shape = conv3.shape[1] * conv3.shape[2] * conv3.shape[3]
wd1 = tf.get_variable(name='wd1', shape=[next_shape, 1024], dtype=tf.float32,
initializer=tf.contrib.layers.xavier_initializer())
bd1 = tf.Variable(self.b_alpha * tf.random_normal([1024]))
dense = tf.reshape(conv3, [-1, wd1.get_shape().as_list()[0]])
dense = tf.nn.relu(tf.add(tf.matmul(dense, wd1), bd1))
dense = tf.nn.dropout(dense, self.keep_prob)
wout = tf.get_variable('name', shape=[1024, self.max_captcha * self.char_set_len], dtype=tf.float32,
initializer=tf.contrib.layers.xavier_initializer())
bout = tf.Variable(self.b_alpha * tf.random_normal([self.max_captcha * self.char_set_len]))
y_predict = tf.add(tf.matmul(dense, wout), bout)
return y_predict
def train_cnn(self):
y_predict = self.model()
print(">>> input batch predict shape: {}".format(y_predict.shape))
print(">>> End model test")
cost = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=y_predict, labels=self.Y))
optimizer = tf.train.AdamOptimizer(learning_rate=0.0001).minimize(cost)
predict = tf.reshape(y_predict, [-1, self.max_captcha, self.char_set_len])
max_idx_p = tf.argmax(predict, 2)
max_idx_l = tf.argmax(tf.reshape(self.Y, [-1, self.max_captcha, self.char_set_len]), 2)
correct_pred = tf.equal(max_idx_p, max_idx_l)
accuracy = tf.reduce_mean(tf.reduce_min(tf.cast(correct_pred, tf.float32), axis=1))
saver = tf.train.Saver()
with tf.Session() as sess:
init = tf.global_variables_initializer()
sess.run(init)
if os.path.exists(self.model_save_dir):
try:
saver.restore(sess, self.model_save_dir)
except NotFoundError:
print("model文件夹为空,将创建新模型")
else:
pass
step = 1
for i in range(3000):
batch_x, batch_y = self.get_batch(i, size=128)
_, cost_ = sess.run([optimizer, cost], feed_dict={self.X: batch_x, self.Y: batch_y, self.keep_prob: 0.75})
if step % 10 == 0:
batch_x_test, batch_y_test = self.get_batch(i, size=100)
acc = sess.run(accuracy, feed_dict={self.X: batch_x_test, self.Y: batch_y_test, self.keep_prob: 1.})
print("第{}次训练 >>> 准确率为 {} >>> loss {}".format(step, acc, cost_))
if acc > 0.99:
saver.save(sess, self.model_save_dir)
break
if i % 500 == 0:
saver.save(sess, self.model_save_dir)
step += 1
saver.save(sess, self.model_save_dir)
def recognize_captcha(self):
label, captcha_array = self.gen_captcha_text_image(random.choice(self.img_list))
f = plt.figure()
ax = f.add_subplot(111)
ax.text(0.1, 0.9, "origin:" + label, ha='center', va='center', transform=ax.transAxes)
plt.imshow(captcha_array)
image = self.convert2gray(captcha_array)
image = image.flatten() / 255
y_predict = self.model()
saver = tf.train.Saver()
with tf.Session() as sess:
saver.restore(sess, self.model_save_dir)
predict = tf.argmax(tf.reshape(y_predict, [-1, self.max_captcha, self.char_set_len]), 2)
text_list = sess.run(predict, feed_dict={self.X: [image], self.keep_prob: 1.})
predict_text = text_list[0].tolist()
print("正确: {} 预测: {}".format(label, predict_text))
p_text = ""
for p in predict_text:
p_text += str(self.char_set[p])
print(p_text)
plt.text(20, 1, 'predict:{}'.format(p_text))
plt.show()
def main():
train_image_dir = sample_conf["train_image_dir"]
char_set = sample_conf["char_set"]
model_save_dir = sample_conf["model_save_dir"]
tm = TrainModel(train_image_dir, char_set, model_save_dir, verify=False)
tm.train_cnn()
name__ == '__main__':
main()
| true
| true
|
7904a08ed39c9c940c519724269e4a13f846add2
| 13,875
|
py
|
Python
|
noiseprint/utility/gaussianMixture.py
|
steveazzolin/noiseprint
|
f42335c3ae641b620583c7dcd89063ca24a6437b
|
[
"BSD-4-Clause-UC"
] | null | null | null |
noiseprint/utility/gaussianMixture.py
|
steveazzolin/noiseprint
|
f42335c3ae641b620583c7dcd89063ca24a6437b
|
[
"BSD-4-Clause-UC"
] | null | null | null |
noiseprint/utility/gaussianMixture.py
|
steveazzolin/noiseprint
|
f42335c3ae641b620583c7dcd89063ca24a6437b
|
[
"BSD-4-Clause-UC"
] | null | null | null |
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
#
# Copyright (c) 2017 Image Processing Research Group of University Federico II of Naples ('GRIP-UNINA').
# This software is delivered with Government Purpose Rights (GPR) under agreement number FA8750-16-2-0204.
#
# By downloading and/or using any of these files, you implicitly agree to all the
# terms of the license, as specified in the document LICENSE.txt
# (included in this package)
#
import numpy as np
from scipy.linalg import eigvalsh
from numpy.linalg import cholesky
from numpy.linalg import eigh
from numba import jit
import torch
class gm:
prioriProb = 0
outliersProb = 0
outliersNlogl = 0
mu = 0
listSigma = []
listSigmaInds = []
listSigmaType = []
# sigmaType = 0 # isotropic covariance
# sigmaType = 1 # diagonal covariance
# sigmaType = 2 # full covariance
# outliersProb < 0 # outliers are not managed
# outliersProb >= 0 # outliers are managed throught fixed nlogl (negative log likelihood)
# TODO: outliers managed throught fixed probability
def __init__(self, dim, listSigmaInds, listSigmaType, outliersProb = -1, outliersNlogl = 0, dtype = np.float32):
K = len(listSigmaInds)
S = len(listSigmaType)
self.listSigmaInds = listSigmaInds
self.listSigmaType = listSigmaType
self.outliersProb = outliersProb
self.outliersNlogl = outliersNlogl
self.prioriProb = (1.0-self.outliersProb) * np.ones((K, 1), dtype=dtype) / K
self.mu = np.zeros((K, dim), dtype=dtype)
self.listSigma = [None, ] * S
for s in range(S):
sigmaType = self.listSigmaType[s]
if sigmaType == 2: # full covariance
self.listSigma[s] = np.ones([dim, dim], dtype = dtype)
elif sigmaType == 1: # diagonal covariance
self.listSigma[s] = np.ones([1, dim], dtype = dtype)
else:
self.listSigma[s] = np.ones([], dtype = dtype)
def setRandomParams(self, X, regularizer = 0, randomState = np.random.get_state()):
[N, dim] = X.shape
K = len(self.listSigmaInds)
S = len(self.listSigmaType)
dtype = X.dtype
if self.outliersProb > 0:
self.prioriProb = (1.0-self.outliersProb) * np.ones((K, 1), dtype=dtype) / K
else:
self.prioriProb = np.ones((K, 1), dtype=dtype) / K
inds = randomState.random_integers(low=0,high=(N-1),size=(K,))
self.mu = X[inds, :]
varX = np.var(X, axis=0, keepdims=True)
if regularizer>0:
varX = varX + regularizer
elif regularizer<0:
varX = varX + np.abs(regularizer*np.spacing(np.max(varX)))
for s in range(S):
sigmaType = self.listSigmaType[s]
if sigmaType == 2: # full covariance
self.listSigma[s] = np.diag(varX.flatten())
elif sigmaType == 1: # diagonal covariance
self.listSigma[s] = varX
else:
self.listSigma[s] = np.mean(varX)
return inds
def setRandomParamsW(self, X, weights, regularizer = 0, randomState = np.random.get_state(), meanFlag = False):
[N, dim] = X.shape
K = len(self.listSigmaInds)
S = len(self.listSigmaType)
dtype = X.dtype
if self.outliersProb > 0:
self.prioriProb = (1.0-self.outliersProb) * np.ones((K, 1), dtype=dtype) / K
else:
self.prioriProb = np.ones((K, 1), dtype=dtype) / K
avrX = np.mean(X*weights, axis=0, keepdims=True)/np.mean(weights)
varX = np.mean(weights *((X - avrX) ** 2), axis=0, keepdims=True)/np.mean(weights)
indsW = np.sum(weights)*randomState.random_sample(size=(K,))
inds = [None, ] * K
weights = np.cumsum(weights.flatten())
for index in range(K):
inds[index] = np.count_nonzero(weights<=indsW[index])
self.mu = X[inds, :]
if meanFlag: self.mu[0,:] = avrX
#varX = np.var(X, axis=0, keepdims=True)
if regularizer>0:
varX = varX + regularizer
elif regularizer<0:
varX = varX + np.abs(regularizer*np.spacing(np.max(varX)))
for s in range(S):
sigmaType = self.listSigmaType[s]
if sigmaType == 2: # full covariance
self.listSigma[s] = np.diag(varX.flatten())
elif sigmaType == 1: # diagonal covariance
self.listSigma[s] = varX
else:
self.listSigma[s] = np.mean(varX)
return inds
def getNlogl(self, X):
[N, dim] = X.shape
K = len(self.listSigmaInds)
S = len(self.listSigmaType)
dtype = X.dtype
K0 = K
if self.outliersProb >= 0: K0 = K+1
nlogl = np.zeros([N, K0], dtype = dtype)
mahal = np.zeros([N, K ], dtype = dtype)
listLogDet = [None, ] * S
listLowMtx = [None, ] * S
for s in range(S):
sigmaType = self.listSigmaType[s]
sigma = self.listSigma[s]
if sigmaType == 2: # full covariance
try:
listLowMtx[s] = cholesky(sigma)
except:
# exceptional regularization
sigma_w, sigma_v = eigh(np.real(sigma))
sigma_w = np.maximum(sigma_w, np.spacing(np.max(sigma_w)))
sigma = np.matmul(np.matmul(sigma_v, np.diag(sigma_w)), (np.transpose(sigma_v,[1,0])))
try:
listLowMtx[s] = cholesky(sigma)
except:
sigma_w, sigma_v = eigh(np.real(sigma))
sigma_w = np.maximum(sigma_w, np.spacing(np.max(sigma_w)))
#print(np.min(sigma_w))
sigma = np.matmul(np.matmul(sigma_v, np.diag(sigma_w)), (np.transpose(sigma_v,[1,0])))
#print(sigma)
listLowMtx[s] = cholesky(sigma)
diagLowMtx = np.diag(listLowMtx[s])
listLogDet[s] = 2 * np.sum(np.log(diagLowMtx))
elif sigmaType == 1: # diagonal covariance
listLowMtx[s] = np.sqrt(sigma)
listLogDet[s] = np.sum(np.log(sigma))
else: # isotropic covariance
listLowMtx[s] = np.sqrt(sigma)
listLogDet[s] = dim * np.log(sigma)
constPi = dim*np.log(2*np.pi)
for k in range(K):
s = self.listSigmaInds[k]
sigmaType = self.listSigmaType[s]
lowMtx = listLowMtx[s]
logDet = listLogDet[s]
Xmu = X - self.mu[k,:]
if sigmaType == 2: # full covariance
Xmu = self.tmp(lowMtx, Xmu) #np.linalg.solve(lowMtx, Xmu.transpose()).transpose()
elif sigmaType == 1: # diagonal covariance
Xmu = Xmu / lowMtx
else: # isotropic covariance
Xmu = Xmu / lowMtx
mahal[:,k] = np.sum(Xmu * Xmu, axis = 1)
nlogl[:,k] = 0.5 * (mahal[:,k] + logDet + constPi)
if self.outliersProb >= 0:
nlogl[:, K] = self.outliersNlogl
return nlogl, mahal
@staticmethod
def tmp(lowMtx, Xmu):
return np.linalg.solve(lowMtx, Xmu.transpose()).transpose()
#lowMtx, Xmu = torch.tensor(lowMtx, device="cuda") , torch.tensor(Xmu, device="cuda")
#sa = torch.linalg.solve(lowMtx, Xmu.T).T
#return sa.cpu().numpy()
def getLoglh(self, X):
nlogl, _ = self.getNlogl(X)
logPrb = np.log(self.prioriProb)
if self.outliersProb >= 0:
#print(self.outliersProb)
logPrb = np.append(logPrb.squeeze(), np.log(self.outliersProb))
logPrb = logPrb.reshape((-1,1))
return logPrb.transpose((1,0)) - nlogl
def getLoglhInlier(self, X):
nlogl, _ = self.getNlogl(X)
K = self.prioriProb.size
logPrb = np.log(self.prioriProb)
logit = logPrb.transpose((1, 0)) - nlogl[:, :K]
maxll = np.max(logit, axis=1, keepdims=True)
prob = np.exp(logit - maxll)
dem = np.sum(prob, axis=1, keepdims=True)
#return (np.log(dem) + maxll - np.log(np.sum(self.prioriProb)))
return (np.log(dem) + maxll - np.log(np.sum(self.outliersProb)))
def maximizationParam(self, X, post, regularizer = 0):
[N, dim] = X.shape
K = len(self.listSigmaInds)
S = len(self.listSigmaType)
dtype = X.dtype
self.prioriProb = np.sum(post[:,:K], axis=0, keepdims=True).transpose([1, 0])
self.mu = np.tensordot(post, X, (0, 0)) / self.prioriProb
for s in range(S):
sigmaType = self.listSigmaType[s]
if sigmaType == 2: # full covariance
sigma = np.zeros([dim, dim], dtype=dtype)
sigmadem = np.zeros([], dtype=dtype)
for k in range(K):
if s == self.listSigmaInds[k]:
Xmu = X - self.mu[(k,), :]
Xmu = np.sqrt(post[:, (k,)]) * Xmu
sigma = sigma + np.tensordot(Xmu, Xmu, (0, 0))
sigmadem += self.prioriProb[k, 0]
sigma = sigma / sigmadem
if regularizer > 0:
sigma = sigma + regularizer * np.eye(dim)
elif regularizer < 0:
#sigma = sigma - regularizer * np.spacing(np.max(np.linalg.eigvalsh(sigma))) * np.eye(dim)
sigma = sigma + np.abs(regularizer * np.spacing(eigvalsh(sigma, eigvals=(dim - 1, dim - 1)))) * np.eye(dim)
elif sigmaType == 1: # diagonal covariance
sigma = np.zeros([1, dim], dtype=dtype)
sigmadem = np.zeros([], dtype=dtype)
for k in range(K):
if s == self.listSigmaInds[k]:
Xmu = X - self.mu[(k,), :]
sigma = sigma + np.tensordot(post[:, (k,)], (Xmu * Xmu), (0, 0))
sigmadem += self.prioriProb[k, 0]
sigma = sigma / sigmadem
if regularizer > 0:
sigma = sigma + regularizer
elif regularizer < 0:
sigma = sigma + + np.abs(regularizer * np.spacing(np.max(sigma)))
else: # isotropic covariance
sigma = np.zeros([], dtype=dtype)
sigmadem = np.zeros([], dtype=dtype)
for k in range(K):
if s == self.listSigmaInds[k]:
Xmu = X - self.mu[(k,), :]
sigma = sigma + np.dot(post[:, k], np.mean((Xmu * Xmu), axis=1))
sigmadem += self.prioriProb[k, 0]
sigma = sigma / sigmadem
if regularizer > 0:
sigma = sigma + regularizer
elif regularizer < 0:
sigma = sigma + np.abs(regularizer * np.spacing(sigma))
self.listSigma[s] = sigma
# normalize PComponents
if self.outliersProb < 0:
self.prioriProb = self.prioriProb / np.sum(self.prioriProb)
else:
self.outliersProb = np.sum(post[:,K])
dem = self.outliersProb + np.sum(self.prioriProb)
self.prioriProb = self.prioriProb / dem
self.outliersProb = self.outliersProb / dem
def expectation(self, X):
[post, avrLogl] = softmax(self.getLoglh(X))
return post, avrLogl
def expectationWeighed(self, X, weighed):
[post, avrLogl] = softmaxWeighed(self.getLoglh(X), weighed)
return post, avrLogl
def MEstep(self, X, post, regularizer = 0):
self.maximizationParam(X, post, regularizer = regularizer)
[post, avrLogl] = self.expectation(X)
return post, avrLogl
def MEstepWeighed(self, X, weights, post, regularizer = 0):
self.maximizationParam(X, post * weights, regularizer = regularizer)
[post, avrLogl] = self.expectationWeighed(X, weights)
return post, avrLogl
def EM(self, X, regularizer, maxIter, relErr = 1e-5):
[post, avrLogl_old] = self.expectation(X)
flagExit = 1
# flagExit = 1 # max number of iteretions
# flagExit = 0 # converged
for iter in range(maxIter):
[post, avrLogl] = self.MEstep(X, post, regularizer = regularizer)
diff = avrLogl - avrLogl_old
if (diff >= 0) & (diff < relErr * np.abs(avrLogl)):
flagExit = 0
break
avrLogl_old = avrLogl
return avrLogl, flagExit, iter
def EMweighed(self, X, weights, regularizer, maxIter, relErr=1e-5):
[post, avrLogl_old] = self.expectationWeighed(X, weights)
flagExit = 1
# flagExit = 1 # max number of iteretions
# flagExit = 0 # converged
for iter in range(maxIter):
[post, avrLogl] = self.MEstepWeighed(X, weights, post, regularizer=regularizer)
diff = avrLogl - avrLogl_old
if (diff >= 0) & (diff < relErr * np.abs(avrLogl)):
flagExit = 0
break
avrLogl_old = avrLogl
return avrLogl, flagExit, iter
def softmax(logit):
maxll = np.max(logit, axis = 1, keepdims=True)
prob = np.exp(logit - maxll)
dem = np.sum(prob, axis = 1, keepdims=True)
prob = prob / dem
avrLogl = np.mean(np.log(dem) + maxll)
return prob, avrLogl
def softmaxWeighed(logit, weights):
maxll = np.max(logit, axis = 1, keepdims=True)
prob = np.exp(logit - maxll)
dem = np.sum(prob, axis = 1, keepdims=True)
prob = prob / dem
avrLogl = np.mean(weights * (np.log(dem) + maxll)) / np.mean(weights)
return prob, avrLogl
| 39.756447
| 127
| 0.541333
|
import numpy as np
from scipy.linalg import eigvalsh
from numpy.linalg import cholesky
from numpy.linalg import eigh
from numba import jit
import torch
class gm:
prioriProb = 0
outliersProb = 0
outliersNlogl = 0
mu = 0
listSigma = []
listSigmaInds = []
listSigmaType = []
= len(listSigmaInds)
S = len(listSigmaType)
self.listSigmaInds = listSigmaInds
self.listSigmaType = listSigmaType
self.outliersProb = outliersProb
self.outliersNlogl = outliersNlogl
self.prioriProb = (1.0-self.outliersProb) * np.ones((K, 1), dtype=dtype) / K
self.mu = np.zeros((K, dim), dtype=dtype)
self.listSigma = [None, ] * S
for s in range(S):
sigmaType = self.listSigmaType[s]
if sigmaType == 2:
self.listSigma[s] = np.ones([dim, dim], dtype = dtype)
elif sigmaType == 1:
self.listSigma[s] = np.ones([1, dim], dtype = dtype)
else:
self.listSigma[s] = np.ones([], dtype = dtype)
def setRandomParams(self, X, regularizer = 0, randomState = np.random.get_state()):
[N, dim] = X.shape
K = len(self.listSigmaInds)
S = len(self.listSigmaType)
dtype = X.dtype
if self.outliersProb > 0:
self.prioriProb = (1.0-self.outliersProb) * np.ones((K, 1), dtype=dtype) / K
else:
self.prioriProb = np.ones((K, 1), dtype=dtype) / K
inds = randomState.random_integers(low=0,high=(N-1),size=(K,))
self.mu = X[inds, :]
varX = np.var(X, axis=0, keepdims=True)
if regularizer>0:
varX = varX + regularizer
elif regularizer<0:
varX = varX + np.abs(regularizer*np.spacing(np.max(varX)))
for s in range(S):
sigmaType = self.listSigmaType[s]
if sigmaType == 2:
self.listSigma[s] = np.diag(varX.flatten())
elif sigmaType == 1:
self.listSigma[s] = varX
else:
self.listSigma[s] = np.mean(varX)
return inds
def setRandomParamsW(self, X, weights, regularizer = 0, randomState = np.random.get_state(), meanFlag = False):
[N, dim] = X.shape
K = len(self.listSigmaInds)
S = len(self.listSigmaType)
dtype = X.dtype
if self.outliersProb > 0:
self.prioriProb = (1.0-self.outliersProb) * np.ones((K, 1), dtype=dtype) / K
else:
self.prioriProb = np.ones((K, 1), dtype=dtype) / K
avrX = np.mean(X*weights, axis=0, keepdims=True)/np.mean(weights)
varX = np.mean(weights *((X - avrX) ** 2), axis=0, keepdims=True)/np.mean(weights)
indsW = np.sum(weights)*randomState.random_sample(size=(K,))
inds = [None, ] * K
weights = np.cumsum(weights.flatten())
for index in range(K):
inds[index] = np.count_nonzero(weights<=indsW[index])
self.mu = X[inds, :]
if meanFlag: self.mu[0,:] = avrX
if regularizer>0:
varX = varX + regularizer
elif regularizer<0:
varX = varX + np.abs(regularizer*np.spacing(np.max(varX)))
for s in range(S):
sigmaType = self.listSigmaType[s]
if sigmaType == 2:
self.listSigma[s] = np.diag(varX.flatten())
elif sigmaType == 1:
self.listSigma[s] = varX
else:
self.listSigma[s] = np.mean(varX)
return inds
def getNlogl(self, X):
[N, dim] = X.shape
K = len(self.listSigmaInds)
S = len(self.listSigmaType)
dtype = X.dtype
K0 = K
if self.outliersProb >= 0: K0 = K+1
nlogl = np.zeros([N, K0], dtype = dtype)
mahal = np.zeros([N, K ], dtype = dtype)
listLogDet = [None, ] * S
listLowMtx = [None, ] * S
for s in range(S):
sigmaType = self.listSigmaType[s]
sigma = self.listSigma[s]
if sigmaType == 2:
try:
listLowMtx[s] = cholesky(sigma)
except:
sigma_w, sigma_v = eigh(np.real(sigma))
sigma_w = np.maximum(sigma_w, np.spacing(np.max(sigma_w)))
sigma = np.matmul(np.matmul(sigma_v, np.diag(sigma_w)), (np.transpose(sigma_v,[1,0])))
try:
listLowMtx[s] = cholesky(sigma)
except:
sigma_w, sigma_v = eigh(np.real(sigma))
sigma_w = np.maximum(sigma_w, np.spacing(np.max(sigma_w)))
sigma = np.matmul(np.matmul(sigma_v, np.diag(sigma_w)), (np.transpose(sigma_v,[1,0])))
listLowMtx[s] = cholesky(sigma)
diagLowMtx = np.diag(listLowMtx[s])
listLogDet[s] = 2 * np.sum(np.log(diagLowMtx))
elif sigmaType == 1:
listLowMtx[s] = np.sqrt(sigma)
listLogDet[s] = np.sum(np.log(sigma))
else:
listLowMtx[s] = np.sqrt(sigma)
listLogDet[s] = dim * np.log(sigma)
constPi = dim*np.log(2*np.pi)
for k in range(K):
s = self.listSigmaInds[k]
sigmaType = self.listSigmaType[s]
lowMtx = listLowMtx[s]
logDet = listLogDet[s]
Xmu = X - self.mu[k,:]
if sigmaType == 2:
Xmu = self.tmp(lowMtx, Xmu)
elif sigmaType == 1:
Xmu = Xmu / lowMtx
else:
Xmu = Xmu / lowMtx
mahal[:,k] = np.sum(Xmu * Xmu, axis = 1)
nlogl[:,k] = 0.5 * (mahal[:,k] + logDet + constPi)
if self.outliersProb >= 0:
nlogl[:, K] = self.outliersNlogl
return nlogl, mahal
@staticmethod
def tmp(lowMtx, Xmu):
return np.linalg.solve(lowMtx, Xmu.transpose()).transpose()
def getLoglh(self, X):
nlogl, _ = self.getNlogl(X)
logPrb = np.log(self.prioriProb)
if self.outliersProb >= 0:
logPrb = np.append(logPrb.squeeze(), np.log(self.outliersProb))
logPrb = logPrb.reshape((-1,1))
return logPrb.transpose((1,0)) - nlogl
def getLoglhInlier(self, X):
nlogl, _ = self.getNlogl(X)
K = self.prioriProb.size
logPrb = np.log(self.prioriProb)
logit = logPrb.transpose((1, 0)) - nlogl[:, :K]
maxll = np.max(logit, axis=1, keepdims=True)
prob = np.exp(logit - maxll)
dem = np.sum(prob, axis=1, keepdims=True)
return (np.log(dem) + maxll - np.log(np.sum(self.outliersProb)))
def maximizationParam(self, X, post, regularizer = 0):
[N, dim] = X.shape
K = len(self.listSigmaInds)
S = len(self.listSigmaType)
dtype = X.dtype
self.prioriProb = np.sum(post[:,:K], axis=0, keepdims=True).transpose([1, 0])
self.mu = np.tensordot(post, X, (0, 0)) / self.prioriProb
for s in range(S):
sigmaType = self.listSigmaType[s]
if sigmaType == 2:
sigma = np.zeros([dim, dim], dtype=dtype)
sigmadem = np.zeros([], dtype=dtype)
for k in range(K):
if s == self.listSigmaInds[k]:
Xmu = X - self.mu[(k,), :]
Xmu = np.sqrt(post[:, (k,)]) * Xmu
sigma = sigma + np.tensordot(Xmu, Xmu, (0, 0))
sigmadem += self.prioriProb[k, 0]
sigma = sigma / sigmadem
if regularizer > 0:
sigma = sigma + regularizer * np.eye(dim)
elif regularizer < 0:
sigma = sigma + np.abs(regularizer * np.spacing(eigvalsh(sigma, eigvals=(dim - 1, dim - 1)))) * np.eye(dim)
elif sigmaType == 1:
sigma = np.zeros([1, dim], dtype=dtype)
sigmadem = np.zeros([], dtype=dtype)
for k in range(K):
if s == self.listSigmaInds[k]:
Xmu = X - self.mu[(k,), :]
sigma = sigma + np.tensordot(post[:, (k,)], (Xmu * Xmu), (0, 0))
sigmadem += self.prioriProb[k, 0]
sigma = sigma / sigmadem
if regularizer > 0:
sigma = sigma + regularizer
elif regularizer < 0:
sigma = sigma + + np.abs(regularizer * np.spacing(np.max(sigma)))
else:
sigma = np.zeros([], dtype=dtype)
sigmadem = np.zeros([], dtype=dtype)
for k in range(K):
if s == self.listSigmaInds[k]:
Xmu = X - self.mu[(k,), :]
sigma = sigma + np.dot(post[:, k], np.mean((Xmu * Xmu), axis=1))
sigmadem += self.prioriProb[k, 0]
sigma = sigma / sigmadem
if regularizer > 0:
sigma = sigma + regularizer
elif regularizer < 0:
sigma = sigma + np.abs(regularizer * np.spacing(sigma))
self.listSigma[s] = sigma
if self.outliersProb < 0:
self.prioriProb = self.prioriProb / np.sum(self.prioriProb)
else:
self.outliersProb = np.sum(post[:,K])
dem = self.outliersProb + np.sum(self.prioriProb)
self.prioriProb = self.prioriProb / dem
self.outliersProb = self.outliersProb / dem
def expectation(self, X):
[post, avrLogl] = softmax(self.getLoglh(X))
return post, avrLogl
def expectationWeighed(self, X, weighed):
[post, avrLogl] = softmaxWeighed(self.getLoglh(X), weighed)
return post, avrLogl
def MEstep(self, X, post, regularizer = 0):
self.maximizationParam(X, post, regularizer = regularizer)
[post, avrLogl] = self.expectation(X)
return post, avrLogl
def MEstepWeighed(self, X, weights, post, regularizer = 0):
self.maximizationParam(X, post * weights, regularizer = regularizer)
[post, avrLogl] = self.expectationWeighed(X, weights)
return post, avrLogl
def EM(self, X, regularizer, maxIter, relErr = 1e-5):
[post, avrLogl_old] = self.expectation(X)
flagExit = 1
axIter):
[post, avrLogl] = self.MEstep(X, post, regularizer = regularizer)
diff = avrLogl - avrLogl_old
if (diff >= 0) & (diff < relErr * np.abs(avrLogl)):
flagExit = 0
break
avrLogl_old = avrLogl
return avrLogl, flagExit, iter
def EMweighed(self, X, weights, regularizer, maxIter, relErr=1e-5):
[post, avrLogl_old] = self.expectationWeighed(X, weights)
flagExit = 1
axIter):
[post, avrLogl] = self.MEstepWeighed(X, weights, post, regularizer=regularizer)
diff = avrLogl - avrLogl_old
if (diff >= 0) & (diff < relErr * np.abs(avrLogl)):
flagExit = 0
break
avrLogl_old = avrLogl
return avrLogl, flagExit, iter
def softmax(logit):
maxll = np.max(logit, axis = 1, keepdims=True)
prob = np.exp(logit - maxll)
dem = np.sum(prob, axis = 1, keepdims=True)
prob = prob / dem
avrLogl = np.mean(np.log(dem) + maxll)
return prob, avrLogl
def softmaxWeighed(logit, weights):
maxll = np.max(logit, axis = 1, keepdims=True)
prob = np.exp(logit - maxll)
dem = np.sum(prob, axis = 1, keepdims=True)
prob = prob / dem
avrLogl = np.mean(weights * (np.log(dem) + maxll)) / np.mean(weights)
return prob, avrLogl
| true
| true
|
7904a20c44284db3241f6b6fcfb29b1197cb2d9b
| 1,847
|
py
|
Python
|
appengine/standard/users/main.py
|
baditaflorin/python-docs-samples
|
f122cbc13f20336d15409b5bd9820377dcb65464
|
[
"Apache-2.0"
] | 2
|
2017-09-23T04:23:46.000Z
|
2021-06-11T01:23:06.000Z
|
appengine/standard/users/main.py
|
Acidburn0zzz/python-docs-samples
|
bc0924a6826cbdb669415b58fd5b2d8534d87aa1
|
[
"Apache-2.0"
] | 2
|
2021-06-10T23:54:32.000Z
|
2021-06-10T23:54:33.000Z
|
appengine/standard/users/main.py
|
Acidburn0zzz/python-docs-samples
|
bc0924a6826cbdb669415b58fd5b2d8534d87aa1
|
[
"Apache-2.0"
] | 2
|
2019-11-27T00:13:37.000Z
|
2021-03-24T00:05:36.000Z
|
# Copyright 2016 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Sample Google App Engine application that demonstrates using the Users API
For more information about App Engine, see README.md under /appengine.
"""
# [START all]
from google.appengine.api import users
import webapp2
class MainPage(webapp2.RequestHandler):
def get(self):
user = users.get_current_user()
if user:
nickname = user.nickname()
logout_url = users.create_logout_url('/')
greeting = 'Welcome, {}! (<a href="{}">sign out</a>)'.format(
nickname, logout_url)
else:
login_url = users.create_login_url('/')
greeting = '<a href="{}">Sign in</a>'.format(login_url)
self.response.write(
'<html><body>{}</body></html>'.format(greeting))
class AdminPage(webapp2.RequestHandler):
def get(self):
user = users.get_current_user()
if user:
if users.is_current_user_admin():
self.response.write('You are an administrator.')
else:
self.response.write('You are not an administrator.')
else:
self.response.write('You are not logged in.')
app = webapp2.WSGIApplication([
('/', MainPage),
('/admin', AdminPage)
], debug=True)
# [END all]
| 30.278689
| 74
| 0.646995
|
from google.appengine.api import users
import webapp2
class MainPage(webapp2.RequestHandler):
def get(self):
user = users.get_current_user()
if user:
nickname = user.nickname()
logout_url = users.create_logout_url('/')
greeting = 'Welcome, {}! (<a href="{}">sign out</a>)'.format(
nickname, logout_url)
else:
login_url = users.create_login_url('/')
greeting = '<a href="{}">Sign in</a>'.format(login_url)
self.response.write(
'<html><body>{}</body></html>'.format(greeting))
class AdminPage(webapp2.RequestHandler):
def get(self):
user = users.get_current_user()
if user:
if users.is_current_user_admin():
self.response.write('You are an administrator.')
else:
self.response.write('You are not an administrator.')
else:
self.response.write('You are not logged in.')
app = webapp2.WSGIApplication([
('/', MainPage),
('/admin', AdminPage)
], debug=True)
| true
| true
|
7904a3040e8c6bf9902569d51ccd6879143a4351
| 1,963
|
py
|
Python
|
DailyCodingProblem/84_Amazon_Find_Islands_From_Matrix.py
|
RafayAK/CodingPrep
|
718eccb439db0f6e727806964766a40e8234c8a9
|
[
"MIT"
] | 5
|
2019-09-07T17:31:17.000Z
|
2022-03-05T09:59:46.000Z
|
DailyCodingProblem/84_Amazon_Find_Islands_From_Matrix.py
|
RafayAK/CodingPrep
|
718eccb439db0f6e727806964766a40e8234c8a9
|
[
"MIT"
] | null | null | null |
DailyCodingProblem/84_Amazon_Find_Islands_From_Matrix.py
|
RafayAK/CodingPrep
|
718eccb439db0f6e727806964766a40e8234c8a9
|
[
"MIT"
] | 2
|
2019-09-07T17:31:24.000Z
|
2019-10-28T16:10:52.000Z
|
"""
This problem was asked by Amazon.
Given a matrix of 1s and 0s, return the number of "islands" in the matrix.
A 1 represents land and 0 represents water, so an island is a group of 1s
that are neighboring whose perimeter is surrounded by water.
For example, this matrix has 4 islands.
1 0 0 0 0
0 0 1 1 0
0 1 1 0 0
0 0 0 0 0
1 1 0 0 1
1 1 0 0 1
"""
moves = [
# row, col
(0, 1), # west
(0, -1), # east
(1, 0), # south
(-1, 0), # north
(1,1), # south-west
(1, -1), # south-east
(-1, 1), # north-west
(-1, -1) # north-east
]
def mark_island(row, col, land_map, marker):
if row < 0 or col<0 or row>=len(land_map) or col >= len(land_map[0]):
return land_map
if land_map[row][col]== 0:
return land_map
if land_map[row][col]== marker:
return land_map
if land_map[row][col] == 1:
land_map[row][col] = marker
for r,c in moves:
land_map = mark_island(row+r, col+c, land_map, marker)
return land_map
def find_num_of_islands(land_map):
islands_found = 0
for i in range(len(land_map)):
for j in range(len(land_map[0])):
if land_map[i][j] == 1:
islands_found+= 1
land_map = mark_island(i, j, land_map, marker='i')
# print(*land_map, sep='\n')
return islands_found
if __name__ == '__main__':
land_map = [
[1, 0, 0, 0, 0],
[0, 0, 1, 1, 0],
[0, 1, 1, 0, 0],
[0, 0, 0, 0, 0],
[1, 1, 0, 0, 1],
[1, 1, 0, 0, 1],
]
print(find_num_of_islands(land_map)) # 4
land_map = [
[1, 0, 0, 0, 0],
[0, 0, 1, 1, 0],
[0, 1, 1, 0, 0],
[0, 0, 0, 0, 0],
[1, 1, 0, 0, 1],
[1, 1, 0, 0, 1],
[1, 0, 0, 0, 0],
[0, 0, 1, 1, 0],
[0, 1, 1, 0, 0],
[0, 0, 0, 0, 0],
[1, 1, 0, 0, 1],
[1, 1, 0, 0, 1],
]
print(find_num_of_islands(land_map)) # 7
| 21.107527
| 74
| 0.496689
|
moves = [
(0, 1),
(0, -1),
(1, 0),
(-1, 0),
(1,1),
(1, -1),
(-1, 1),
(-1, -1)
]
def mark_island(row, col, land_map, marker):
if row < 0 or col<0 or row>=len(land_map) or col >= len(land_map[0]):
return land_map
if land_map[row][col]== 0:
return land_map
if land_map[row][col]== marker:
return land_map
if land_map[row][col] == 1:
land_map[row][col] = marker
for r,c in moves:
land_map = mark_island(row+r, col+c, land_map, marker)
return land_map
def find_num_of_islands(land_map):
islands_found = 0
for i in range(len(land_map)):
for j in range(len(land_map[0])):
if land_map[i][j] == 1:
islands_found+= 1
land_map = mark_island(i, j, land_map, marker='i')
return islands_found
if __name__ == '__main__':
land_map = [
[1, 0, 0, 0, 0],
[0, 0, 1, 1, 0],
[0, 1, 1, 0, 0],
[0, 0, 0, 0, 0],
[1, 1, 0, 0, 1],
[1, 1, 0, 0, 1],
]
print(find_num_of_islands(land_map))
land_map = [
[1, 0, 0, 0, 0],
[0, 0, 1, 1, 0],
[0, 1, 1, 0, 0],
[0, 0, 0, 0, 0],
[1, 1, 0, 0, 1],
[1, 1, 0, 0, 1],
[1, 0, 0, 0, 0],
[0, 0, 1, 1, 0],
[0, 1, 1, 0, 0],
[0, 0, 0, 0, 0],
[1, 1, 0, 0, 1],
[1, 1, 0, 0, 1],
]
print(find_num_of_islands(land_map))
| true
| true
|
7904a32bbc9136e18a310b57a5c186fce26b8e7e
| 3,981
|
py
|
Python
|
parl/remote/tests/cluster_test.py
|
GOnion/PARL
|
b29c45a6d2a187d1cfa8b5fe38e9c81b99ef37f2
|
[
"Apache-2.0"
] | 1
|
2020-08-04T13:56:12.000Z
|
2020-08-04T13:56:12.000Z
|
parl/remote/tests/cluster_test.py
|
GOnion/PARL
|
b29c45a6d2a187d1cfa8b5fe38e9c81b99ef37f2
|
[
"Apache-2.0"
] | null | null | null |
parl/remote/tests/cluster_test.py
|
GOnion/PARL
|
b29c45a6d2a187d1cfa8b5fe38e9c81b99ef37f2
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import parl
from parl.remote.master import Master
from parl.remote.worker import Worker
import time
import threading
from parl.remote.client import disconnect
from parl.remote import exceptions
import timeout_decorator
import subprocess
@parl.remote_class
class Actor(object):
def __init__(self, arg1=None, arg2=None):
self.arg1 = arg1
self.arg2 = arg2
def get_arg1(self):
return self.arg1
def get_arg2(self):
return self.arg2
def set_arg1(self, value):
self.arg1 = value
def set_arg2(self, value):
self.arg2 = value
def get_unable_serialize_object(self):
return UnableSerializeObject()
def add_one(self, value):
value += 1
return value
def add(self, x, y):
time.sleep(3)
return x + y
def will_raise_exception_func(self):
x = 1 / 0
class TestCluster(unittest.TestCase):
def tearDown(self):
disconnect()
#time.sleep(20)
#command = ("pkill -f remote/job.py")
#subprocess.call([command], shell=True)
def test_actor_exception(self):
master = Master(port=1235)
th = threading.Thread(target=master.run)
th.start()
time.sleep(1)
worker1 = Worker('localhost:1235', 1)
self.assertEqual(1, master.cpu_num)
parl.connect('localhost:1235')
with self.assertRaises(exceptions.RemoteError):
actor = Actor(abcd='a bug')
actor2 = Actor()
self.assertEqual(actor2.add_one(1), 2)
self.assertEqual(0, master.cpu_num)
master.exit()
worker1.exit()
@timeout_decorator.timeout(seconds=300)
def test_actor_exception(self):
master = Master(port=1236)
th = threading.Thread(target=master.run)
th.start()
time.sleep(1)
worker1 = Worker('localhost:1236', 1)
self.assertEqual(1, master.cpu_num)
parl.connect('localhost:1236')
actor = Actor()
try:
actor.will_raise_exception_func()
except:
pass
actor2 = Actor()
time.sleep(30)
self.assertEqual(actor2.add_one(1), 2)
self.assertEqual(0, master.cpu_num)
del actor
del actor2
worker1.exit()
master.exit()
def test_reset_actor(self):
# start the master
master = Master(port=1237)
th = threading.Thread(target=master.run)
th.start()
time.sleep(1)
worker1 = Worker('localhost:1237', 4)
parl.connect('localhost:1237')
for i in range(10):
actor = Actor()
ret = actor.add_one(1)
self.assertEqual(ret, 2)
del actor
time.sleep(20)
self.assertEqual(master.cpu_num, 4)
worker1.exit()
master.exit()
def test_add_worker(self):
master = Master(port=1234)
th = threading.Thread(target=master.run)
th.start()
time.sleep(1)
worker1 = Worker('localhost:1234', 4)
self.assertEqual(master.cpu_num, 4)
worker2 = Worker('localhost:1234', 4)
self.assertEqual(master.cpu_num, 8)
worker2.exit()
time.sleep(30)
self.assertEqual(master.cpu_num, 4)
master.exit()
worker1.exit()
if __name__ == '__main__':
unittest.main()
| 26.898649
| 74
| 0.621954
|
import unittest
import parl
from parl.remote.master import Master
from parl.remote.worker import Worker
import time
import threading
from parl.remote.client import disconnect
from parl.remote import exceptions
import timeout_decorator
import subprocess
@parl.remote_class
class Actor(object):
def __init__(self, arg1=None, arg2=None):
self.arg1 = arg1
self.arg2 = arg2
def get_arg1(self):
return self.arg1
def get_arg2(self):
return self.arg2
def set_arg1(self, value):
self.arg1 = value
def set_arg2(self, value):
self.arg2 = value
def get_unable_serialize_object(self):
return UnableSerializeObject()
def add_one(self, value):
value += 1
return value
def add(self, x, y):
time.sleep(3)
return x + y
def will_raise_exception_func(self):
x = 1 / 0
class TestCluster(unittest.TestCase):
def tearDown(self):
disconnect()
def test_actor_exception(self):
master = Master(port=1235)
th = threading.Thread(target=master.run)
th.start()
time.sleep(1)
worker1 = Worker('localhost:1235', 1)
self.assertEqual(1, master.cpu_num)
parl.connect('localhost:1235')
with self.assertRaises(exceptions.RemoteError):
actor = Actor(abcd='a bug')
actor2 = Actor()
self.assertEqual(actor2.add_one(1), 2)
self.assertEqual(0, master.cpu_num)
master.exit()
worker1.exit()
@timeout_decorator.timeout(seconds=300)
def test_actor_exception(self):
master = Master(port=1236)
th = threading.Thread(target=master.run)
th.start()
time.sleep(1)
worker1 = Worker('localhost:1236', 1)
self.assertEqual(1, master.cpu_num)
parl.connect('localhost:1236')
actor = Actor()
try:
actor.will_raise_exception_func()
except:
pass
actor2 = Actor()
time.sleep(30)
self.assertEqual(actor2.add_one(1), 2)
self.assertEqual(0, master.cpu_num)
del actor
del actor2
worker1.exit()
master.exit()
def test_reset_actor(self):
master = Master(port=1237)
th = threading.Thread(target=master.run)
th.start()
time.sleep(1)
worker1 = Worker('localhost:1237', 4)
parl.connect('localhost:1237')
for i in range(10):
actor = Actor()
ret = actor.add_one(1)
self.assertEqual(ret, 2)
del actor
time.sleep(20)
self.assertEqual(master.cpu_num, 4)
worker1.exit()
master.exit()
def test_add_worker(self):
master = Master(port=1234)
th = threading.Thread(target=master.run)
th.start()
time.sleep(1)
worker1 = Worker('localhost:1234', 4)
self.assertEqual(master.cpu_num, 4)
worker2 = Worker('localhost:1234', 4)
self.assertEqual(master.cpu_num, 8)
worker2.exit()
time.sleep(30)
self.assertEqual(master.cpu_num, 4)
master.exit()
worker1.exit()
if __name__ == '__main__':
unittest.main()
| true
| true
|
7904a352993473b0a3fcdaec0f4a8c7c0ca8c781
| 6,268
|
py
|
Python
|
onlinecourse/views.py
|
yashjain0112/djangoapp
|
3e55b947a78d42a56dc4a293d185de5a040cf2fb
|
[
"Apache-2.0"
] | null | null | null |
onlinecourse/views.py
|
yashjain0112/djangoapp
|
3e55b947a78d42a56dc4a293d185de5a040cf2fb
|
[
"Apache-2.0"
] | null | null | null |
onlinecourse/views.py
|
yashjain0112/djangoapp
|
3e55b947a78d42a56dc4a293d185de5a040cf2fb
|
[
"Apache-2.0"
] | null | null | null |
from django.shortcuts import render
from django.http import HttpResponseRedirect
# <HINT> Import any new Models here
from .models import Course, Enrollment, Question, Choice, Submission , Lesson
from django.contrib.auth.models import User
from django.shortcuts import get_object_or_404, render, redirect
from django.urls import reverse
from django.views import generic
from django.contrib.auth import login, logout, authenticate
import logging
# Get an instance of a logger
logger = logging.getLogger(__name__)
# Create your views here.
def registration_request(request):
context = {}
if request.method == 'GET':
return render(request, 'onlinecourse/user_registration_bootstrap.html', context)
elif request.method == 'POST':
# Check if user exists
username = request.POST['username']
password = request.POST['psw']
first_name = request.POST['firstname']
last_name = request.POST['lastname']
user_exist = False
try:
User.objects.get(username=username)
user_exist = True
except:
logger.error("New user")
if not user_exist:
user = User.objects.create_user(username=username, first_name=first_name, last_name=last_name,
password=password)
login(request, user)
return redirect("onlinecourse:index")
else:
context['message'] = "User already exists."
return render(request, 'onlinecourse/user_registration_bootstrap.html', context)
def login_request(request):
context = {}
if request.method == "POST":
username = request.POST['username']
password = request.POST['psw']
user = authenticate(username=username, password=password)
if user is not None:
login(request, user)
return redirect('onlinecourse:index')
else:
context['message'] = "Invalid username or password."
return render(request, 'onlinecourse/user_login_bootstrap.html', context)
else:
return render(request, 'onlinecourse/user_login_bootstrap.html', context)
def logout_request(request):
logout(request)
return redirect('onlinecourse:index')
def check_if_enrolled(user, course):
is_enrolled = False
if user.id is not None:
# Check if user enrolled
num_results = Enrollment.objects.filter(user=user, course=course).count()
if num_results > 0:
is_enrolled = True
return is_enrolled
# CourseListView
class CourseListView(generic.ListView):
template_name = 'onlinecourse/course_list_bootstrap.html'
context_object_name = 'course_list'
def get_queryset(self):
user = self.request.user
courses = Course.objects.order_by('-total_enrollment')[:10]
for course in courses:
if user.is_authenticated:
course.is_enrolled = check_if_enrolled(user, course)
return courses
class CourseDetailView(generic.DetailView):
model = Course
template_name = 'onlinecourse/course_detail_bootstrap.html'
def enroll(request, course_id):
course = get_object_or_404(Course, pk=course_id)
user = request.user
is_enrolled = check_if_enrolled(user, course)
if not is_enrolled and user.is_authenticated:
# Create an enrollment
Enrollment.objects.create(user=user, course=course, mode='honor')
course.total_enrollment += 1
course.save()
return HttpResponseRedirect(reverse(viewname='onlinecourse:course_details', args=(course.id,)))
# <HINT> Create a submit view to create an exam submission record for a course enrollment,
# you may implement it based on following logic:
# Get user and course object, then get the associated enrollment object created when the user enrolled the course
# Create a submission object referring to the enrollment
# Collect the selected choices from exam form
# Add each selected choice object to the submission object
# Redirect to show_exam_result with the submission id
# <HINT> A example method to collect the selected choices from the exam form from the request object
def extract_answers(request):
submitted_anwsers = []
for key in request.POST:
if key.startswith('choice'):
value = request.POST[key]
choice_id = int(value)
submitted_anwsers.append(choice_id)
return submitted_anwsers
def submit(request, course_id):
user = request.user
course = Course.objects.get(pk=course_id)
enrollment = Enrollment.objects.get(user=user, course=course)
submitted_anwsers = extract_answers(request)
submission = Submission.objects.create(enrollment=enrollment)
submission.chocies.set(submitted_anwsers)
print(submission)
return HttpResponseRedirect(reverse(viewname='onlinecourse:result', args=(course_id, submission.chocies.first().question.lesson.pk, submission.pk)))
# <HINT> Create an exam result view to check if learner passed exam and show their question results and result for each question,
# you may implement it based on the following logic:
# Get course and submission based on their ids
# Get the selected choice ids from the submission record
# For each selected choice, check if it is a correct answer or not
# Calculate the total score
def show_exam_result(request, course_id, lesson_id, submission_id):
from django.db.models import Sum
course = Course.objects.get(pk=course_id)
submission = Submission.objects.get(pk=submission_id)
selected_choices = submission.chocies.all()
lesson = Lesson.objects.get(pk=lesson_id)
questions = lesson.question_set.all()
total_mark = round(lesson.question_set.all().aggregate(Sum("grade"))["grade__sum"])
grade = 0
for question in questions:
if question.is_get_score(selected_choices):
grade += question.grade
ctx = {
'grade': round(grade),
'total_mark': total_mark,
'questions': questions,
'lesson': lesson,
'selected_choices': selected_choices,
}
return render(request , 'onlinecourse/exam_result_bootstrap.html' , ctx)
| 36.654971
| 153
| 0.690651
|
from django.shortcuts import render
from django.http import HttpResponseRedirect
from .models import Course, Enrollment, Question, Choice, Submission , Lesson
from django.contrib.auth.models import User
from django.shortcuts import get_object_or_404, render, redirect
from django.urls import reverse
from django.views import generic
from django.contrib.auth import login, logout, authenticate
import logging
logger = logging.getLogger(__name__)
def registration_request(request):
context = {}
if request.method == 'GET':
return render(request, 'onlinecourse/user_registration_bootstrap.html', context)
elif request.method == 'POST':
username = request.POST['username']
password = request.POST['psw']
first_name = request.POST['firstname']
last_name = request.POST['lastname']
user_exist = False
try:
User.objects.get(username=username)
user_exist = True
except:
logger.error("New user")
if not user_exist:
user = User.objects.create_user(username=username, first_name=first_name, last_name=last_name,
password=password)
login(request, user)
return redirect("onlinecourse:index")
else:
context['message'] = "User already exists."
return render(request, 'onlinecourse/user_registration_bootstrap.html', context)
def login_request(request):
context = {}
if request.method == "POST":
username = request.POST['username']
password = request.POST['psw']
user = authenticate(username=username, password=password)
if user is not None:
login(request, user)
return redirect('onlinecourse:index')
else:
context['message'] = "Invalid username or password."
return render(request, 'onlinecourse/user_login_bootstrap.html', context)
else:
return render(request, 'onlinecourse/user_login_bootstrap.html', context)
def logout_request(request):
logout(request)
return redirect('onlinecourse:index')
def check_if_enrolled(user, course):
is_enrolled = False
if user.id is not None:
num_results = Enrollment.objects.filter(user=user, course=course).count()
if num_results > 0:
is_enrolled = True
return is_enrolled
class CourseListView(generic.ListView):
template_name = 'onlinecourse/course_list_bootstrap.html'
context_object_name = 'course_list'
def get_queryset(self):
user = self.request.user
courses = Course.objects.order_by('-total_enrollment')[:10]
for course in courses:
if user.is_authenticated:
course.is_enrolled = check_if_enrolled(user, course)
return courses
class CourseDetailView(generic.DetailView):
model = Course
template_name = 'onlinecourse/course_detail_bootstrap.html'
def enroll(request, course_id):
course = get_object_or_404(Course, pk=course_id)
user = request.user
is_enrolled = check_if_enrolled(user, course)
if not is_enrolled and user.is_authenticated:
Enrollment.objects.create(user=user, course=course, mode='honor')
course.total_enrollment += 1
course.save()
return HttpResponseRedirect(reverse(viewname='onlinecourse:course_details', args=(course.id,)))
def extract_answers(request):
submitted_anwsers = []
for key in request.POST:
if key.startswith('choice'):
value = request.POST[key]
choice_id = int(value)
submitted_anwsers.append(choice_id)
return submitted_anwsers
def submit(request, course_id):
user = request.user
course = Course.objects.get(pk=course_id)
enrollment = Enrollment.objects.get(user=user, course=course)
submitted_anwsers = extract_answers(request)
submission = Submission.objects.create(enrollment=enrollment)
submission.chocies.set(submitted_anwsers)
print(submission)
return HttpResponseRedirect(reverse(viewname='onlinecourse:result', args=(course_id, submission.chocies.first().question.lesson.pk, submission.pk)))
def show_exam_result(request, course_id, lesson_id, submission_id):
from django.db.models import Sum
course = Course.objects.get(pk=course_id)
submission = Submission.objects.get(pk=submission_id)
selected_choices = submission.chocies.all()
lesson = Lesson.objects.get(pk=lesson_id)
questions = lesson.question_set.all()
total_mark = round(lesson.question_set.all().aggregate(Sum("grade"))["grade__sum"])
grade = 0
for question in questions:
if question.is_get_score(selected_choices):
grade += question.grade
ctx = {
'grade': round(grade),
'total_mark': total_mark,
'questions': questions,
'lesson': lesson,
'selected_choices': selected_choices,
}
return render(request , 'onlinecourse/exam_result_bootstrap.html' , ctx)
| true
| true
|
7904a3cf708d31f553aba6bc487fdbfd5a76f097
| 9,136
|
py
|
Python
|
generate_xfoil/Step4_CreateDataset.py
|
nasa/airfoil-learning
|
a76dabc0474485d1e573471e70ec4826aeae0517
|
[
"NASA-1.3"
] | null | null | null |
generate_xfoil/Step4_CreateDataset.py
|
nasa/airfoil-learning
|
a76dabc0474485d1e573471e70ec4826aeae0517
|
[
"NASA-1.3"
] | null | null | null |
generate_xfoil/Step4_CreateDataset.py
|
nasa/airfoil-learning
|
a76dabc0474485d1e573471e70ec4826aeae0517
|
[
"NASA-1.3"
] | null | null | null |
import pickle
from typing import Dict, List, Tuple
from tqdm import trange
import numpy as np
import random, json
import torch, glob, os
import os.path as osp
from torch.utils.data import random_split
import torch_geometric.transforms as T
from libs.utils import create_edge_adjacency
from torch_geometric.data import Data
import sys
from libs.utils import pchip
sys.path.insert(0,'libs')
def shuffle_and_save(scaled_data: List, process_path:str,file_prefix:str,train_test_split:float=0.7):
"""Shuffle the list and save
Args:
scaled_data (List): [description]
file_prefix (str): [description]
train_test_split (float, optional): [description]. Defaults to 0.7.
"""
# Load all the designs
random.shuffle(scaled_data) # Shuffle the list
train_size = int(len(scaled_data)*train_test_split)
test_size = len(scaled_data) - train_size
train_subset, test_subset = random_split(scaled_data,[train_size, test_size])
train_dataset = [scaled_data[i] for i in train_subset.indices]
test_dataset = [scaled_data[i] for i in test_subset.indices]
torch.save(train_dataset,os.path.join(process_path,f'{file_prefix}_train.pt'))
torch.save(test_dataset,os.path.join(process_path,f'{file_prefix}_test.pt'))
def CreateDatasetFromJson(airfoil:Dict,scaler:Dict,scaler_cp:Dict,cp_points:int) -> Tuple[List[Data], List[Data], List[Data], List[Data]]:
"""Takes a single json file and creates a tuple containing lists of graph data objects and also deep neural network data objects. These objects are combined together and later used by pytorch dataloader
Args:
airfoil (Dict): Dictionary containing properties of the airfoil0
scaler (Dict): Dictionary containing normalization parameters
scaler_cp (Dict): Dictionary containing normalization parameters for Cp
Returns:
Tuple containing:
List[Data], List[Data], List[Data], List[Data]]: [description]
"""
'''
Normalize the x and the y for airfoil
'''
xss = airfoil['xss']
yss = airfoil['yss']
xps = airfoil['xps']
yps = airfoil['yps']
x = np.concatenate((xss[0:],np.flip(xps[1:-1]))).reshape(-1,1) # This is already in 0 to 1
y = np.concatenate((yss[0:],np.flip(yps[1:-1]))).reshape(-1,1) #
y_scaled = scaler['y'].transform(y) # Do not transform y for gnn. This is for DNN only
edge_index = create_edge_adjacency(len(x))
graph_scaled_data = list()
graph_scaled_data_cp = list()
dnn_scaled = list()
dnn_scaled_cp = list()
for p in range(len(airfoil['polars'])):
polar = airfoil['polars'][p]
Cp_ss = np.array(polar['Cp_ss'])
Cp_ps = np.array(polar['Cp_ps'])
alpha = scaler['alpha'].transform(np.array(polar['alpha']).reshape(-1,1))[0][0]
Re = scaler['Re'].transform(np.array(polar['Re']).reshape(-1,1))[0][0]
Ncrit = scaler['Ncrit'].transform(np.array(polar['Ncrit']).reshape(-1,1))[0][0]
# Normalize Cl, Cd, Cdp, Cm
Cl = scaler['Cl'].transform(np.array(polar['Cl']).reshape(-1,1))
Cd = scaler['Cd'].transform(np.array(polar['Cd']).reshape(-1,1))
Cdp = scaler['Cdp'].transform(np.array(polar['Cdp']).reshape(-1,1))
Cm = scaler['Cm'].transform(np.array(polar['Cm']).reshape(-1,1))
# Scale Cp
Cp = np.concatenate(( Cp_ss, np.flip(Cp_ps[1:-1]) ))
Cp = torch.as_tensor(scaler['Cp'].transform(Cp.reshape(-1,1))[0:],dtype=torch.float32) # This has been normalized as a whole
data_y = torch.as_tensor(np.hstack([ Cl, Cd, Cdp, Cm ]), dtype=torch.float32)[0]
edge_index = np.array(edge_index) # Edge Adjacency
if (edge_index.shape[0]!=2):
edge_index = edge_index.transpose()
edge_index = torch.as_tensor(edge_index,dtype=torch.long).contiguous()
x = torch.as_tensor(np.hstack([x]), dtype=torch.float32)
y = torch.as_tensor(np.hstack([y]), dtype=torch.float32)
y_scaled = torch.as_tensor(np.hstack([y_scaled]), dtype=torch.float32)
conditions=torch.as_tensor(np.hstack([alpha, Re, Ncrit]),dtype=torch.float32)
pos = torch.as_tensor(np.hstack([x, y]), dtype=torch.float32)
edge_attr = torch.ones((edge_index.shape[1],pos.shape[1]),dtype=torch.float32)
'''
airfoil with all values scaled by global min/max or mean/std
'''
# d = Data(x=data_x,edge_index=edge_index,pos=pos,y=data_y,node_labels=Cp,conditions=conditions)
features = torch.zeros((y.shape[0],3))
# features[:,0] = x[:,0]
# features[:,1] = y[:,0]
features[:,0] = alpha
features[:,1] = Re
features[:,2] = Ncrit
# scaled_data
graph_scaled_data.append(Data(x=features,edge_index=edge_index,pos=pos,y=data_y,node_labels=Cp,conditions=conditions,edge_attr=edge_attr))
'''
airfoil with all values except for cp scaled by global min/max or mean/std
'''
Cp_ss_scaled = Cp_ss
Cp_ps_scaled = Cp_ps
for i in range(len(scaler_cp)):
Cp_ss_scaled[i] = scaler_cp[i].transform(Cp_ss[i].reshape(-1,1))[0] # Transform Cp for each value of x
for i in range(len(scaler_cp)):
Cp_ps_scaled[i] = scaler_cp[i].transform(Cp_ps[i].reshape(-1,1))[0] # Transform Cp for each value of x
Cp_ps_scaled = np.flip(Cp_ps[1:-1])
Cp_scaled = np.concatenate(( Cp_ss_scaled, Cp_ps_scaled ))
Cp_scaled = torch.as_tensor(Cp_scaled.reshape(-1,1)[0:],dtype=torch.float32)
# scaled_data_cp
graph_scaled_data_cp.append(Data(x=features,edge_index=edge_index,pos=pos,y=data_y,node_labels=Cp_scaled,conditions=conditions,edge_attr=edge_attr))
'''
Deep Neural Network
'''
dnn_features = (torch.cat((y_scaled[:,0], torch.tensor([alpha]), torch.tensor([Re]), torch.tensor([Ncrit])))).float()
dnn_labels = (torch.cat((data_y,Cp[:,0])))
dnn_labels_cp = (torch.cat((data_y,Cp_scaled[:,0])))
dnn_scaled.append((dnn_features,dnn_labels))
dnn_scaled_cp.append((dnn_features,dnn_labels_cp))
return graph_scaled_data, graph_scaled_data_cp, dnn_scaled, dnn_scaled_cp
def CreateDataset(data_folder:str='json',processed_path:str='datasets',
use_standard_scaler:bool=True):
"""Create a dataset that can be used to train a graph neural network
Reference:
https://pytorch-geometric.readthedocs.io/en/latest/modules/data.html
Args:
data_folder (str, optional): name of file to be scraped . Defaults to 'json'.
processed_path (str, optional): path to save the pytorch dataset. Defaults to 'datasets'.
use_standard_scaler (bool, optional): Whether to use standard scaler or min_max. Defaults to True.
Returns:
Saves 4 files in the processed_path folder
graph_scaled_data.pt: Graph Data format with cp all scaled by a common scaler
graph_scaled_data_cp.pt: Graph Data format with cp individually scaled at each x value
dnn_scaled.pt: Deep neural format with cp all scaled by a common scaler
dnn_scaled_cp.pt: Deep neural network format with cp individually scaled at each x value
"""
os.makedirs(processed_path,exist_ok=True)
data_files = glob.glob(osp.join(data_folder,'*.json'))
jsons = list()
for filename in data_files:
with open(filename,'r') as f:
jsons.append(json.load(f))
with open('scalers.pickle','rb') as f:
data = pickle.load(f)
if use_standard_scaler:
scaler = data['standard']
scaler_cp = data['standard_cp']
else:
scaler = data['min_max']
scaler_cp = data['min_max_cp']
graph_scaled_data = list() # All airfoil parameters are scaled by the global min and max or mean and standard dev
graph_scaled_data_cp = list() # All except for Cp is scaled by global min and max. Cp is scaled at each x
dnn_scaled = list()
dnn_scaled_cp = list()
pbar = trange(len(jsons),desc='Processing')
for c in pbar:
out1, out2, out3, out4 = CreateDatasetFromJson(jsons[c],scaler,scaler_cp,50)
pbar.desc="Extending List"
graph_scaled_data.extend(out1)
graph_scaled_data_cp.extend(out2)
dnn_scaled.extend(out3)
dnn_scaled_cp.extend(out4)
pbar.desc="Processing"
shuffle_and_save(graph_scaled_data,processed_path,'graph_scaled_data',0.7)
shuffle_and_save(graph_scaled_data_cp,processed_path,'graph_scaled_data_cp',0.7)
shuffle_and_save(dnn_scaled,processed_path,'dnn_scaled_data',0.7)
shuffle_and_save(dnn_scaled_cp,processed_path,'dnn_scaled_data_cp',0.7)
if __name__ == "__main__":
CreateDataset(data_folder='json_cp_resize',processed_path='datasets/standard/',use_standard_scaler=True)
CreateDataset(data_folder='json_cp_resize',processed_path='datasets/minmax/',use_standard_scaler=False)
# transform_test_train()
| 45.004926
| 207
| 0.660574
|
import pickle
from typing import Dict, List, Tuple
from tqdm import trange
import numpy as np
import random, json
import torch, glob, os
import os.path as osp
from torch.utils.data import random_split
import torch_geometric.transforms as T
from libs.utils import create_edge_adjacency
from torch_geometric.data import Data
import sys
from libs.utils import pchip
sys.path.insert(0,'libs')
def shuffle_and_save(scaled_data: List, process_path:str,file_prefix:str,train_test_split:float=0.7):
random.shuffle(scaled_data)
train_size = int(len(scaled_data)*train_test_split)
test_size = len(scaled_data) - train_size
train_subset, test_subset = random_split(scaled_data,[train_size, test_size])
train_dataset = [scaled_data[i] for i in train_subset.indices]
test_dataset = [scaled_data[i] for i in test_subset.indices]
torch.save(train_dataset,os.path.join(process_path,f'{file_prefix}_train.pt'))
torch.save(test_dataset,os.path.join(process_path,f'{file_prefix}_test.pt'))
def CreateDatasetFromJson(airfoil:Dict,scaler:Dict,scaler_cp:Dict,cp_points:int) -> Tuple[List[Data], List[Data], List[Data], List[Data]]:
xss = airfoil['xss']
yss = airfoil['yss']
xps = airfoil['xps']
yps = airfoil['yps']
x = np.concatenate((xss[0:],np.flip(xps[1:-1]))).reshape(-1,1)
y = np.concatenate((yss[0:],np.flip(yps[1:-1]))).reshape(-1,1)
y_scaled = scaler['y'].transform(y)
edge_index = create_edge_adjacency(len(x))
graph_scaled_data = list()
graph_scaled_data_cp = list()
dnn_scaled = list()
dnn_scaled_cp = list()
for p in range(len(airfoil['polars'])):
polar = airfoil['polars'][p]
Cp_ss = np.array(polar['Cp_ss'])
Cp_ps = np.array(polar['Cp_ps'])
alpha = scaler['alpha'].transform(np.array(polar['alpha']).reshape(-1,1))[0][0]
Re = scaler['Re'].transform(np.array(polar['Re']).reshape(-1,1))[0][0]
Ncrit = scaler['Ncrit'].transform(np.array(polar['Ncrit']).reshape(-1,1))[0][0]
Cl = scaler['Cl'].transform(np.array(polar['Cl']).reshape(-1,1))
Cd = scaler['Cd'].transform(np.array(polar['Cd']).reshape(-1,1))
Cdp = scaler['Cdp'].transform(np.array(polar['Cdp']).reshape(-1,1))
Cm = scaler['Cm'].transform(np.array(polar['Cm']).reshape(-1,1))
Cp = np.concatenate(( Cp_ss, np.flip(Cp_ps[1:-1]) ))
Cp = torch.as_tensor(scaler['Cp'].transform(Cp.reshape(-1,1))[0:],dtype=torch.float32)
data_y = torch.as_tensor(np.hstack([ Cl, Cd, Cdp, Cm ]), dtype=torch.float32)[0]
edge_index = np.array(edge_index)
if (edge_index.shape[0]!=2):
edge_index = edge_index.transpose()
edge_index = torch.as_tensor(edge_index,dtype=torch.long).contiguous()
x = torch.as_tensor(np.hstack([x]), dtype=torch.float32)
y = torch.as_tensor(np.hstack([y]), dtype=torch.float32)
y_scaled = torch.as_tensor(np.hstack([y_scaled]), dtype=torch.float32)
conditions=torch.as_tensor(np.hstack([alpha, Re, Ncrit]),dtype=torch.float32)
pos = torch.as_tensor(np.hstack([x, y]), dtype=torch.float32)
edge_attr = torch.ones((edge_index.shape[1],pos.shape[1]),dtype=torch.float32)
features = torch.zeros((y.shape[0],3))
features[:,0] = alpha
features[:,1] = Re
features[:,2] = Ncrit
graph_scaled_data.append(Data(x=features,edge_index=edge_index,pos=pos,y=data_y,node_labels=Cp,conditions=conditions,edge_attr=edge_attr))
Cp_ss_scaled = Cp_ss
Cp_ps_scaled = Cp_ps
for i in range(len(scaler_cp)):
Cp_ss_scaled[i] = scaler_cp[i].transform(Cp_ss[i].reshape(-1,1))[0]
for i in range(len(scaler_cp)):
Cp_ps_scaled[i] = scaler_cp[i].transform(Cp_ps[i].reshape(-1,1))[0]
Cp_ps_scaled = np.flip(Cp_ps[1:-1])
Cp_scaled = np.concatenate(( Cp_ss_scaled, Cp_ps_scaled ))
Cp_scaled = torch.as_tensor(Cp_scaled.reshape(-1,1)[0:],dtype=torch.float32)
graph_scaled_data_cp.append(Data(x=features,edge_index=edge_index,pos=pos,y=data_y,node_labels=Cp_scaled,conditions=conditions,edge_attr=edge_attr))
dnn_features = (torch.cat((y_scaled[:,0], torch.tensor([alpha]), torch.tensor([Re]), torch.tensor([Ncrit])))).float()
dnn_labels = (torch.cat((data_y,Cp[:,0])))
dnn_labels_cp = (torch.cat((data_y,Cp_scaled[:,0])))
dnn_scaled.append((dnn_features,dnn_labels))
dnn_scaled_cp.append((dnn_features,dnn_labels_cp))
return graph_scaled_data, graph_scaled_data_cp, dnn_scaled, dnn_scaled_cp
def CreateDataset(data_folder:str='json',processed_path:str='datasets',
use_standard_scaler:bool=True):
os.makedirs(processed_path,exist_ok=True)
data_files = glob.glob(osp.join(data_folder,'*.json'))
jsons = list()
for filename in data_files:
with open(filename,'r') as f:
jsons.append(json.load(f))
with open('scalers.pickle','rb') as f:
data = pickle.load(f)
if use_standard_scaler:
scaler = data['standard']
scaler_cp = data['standard_cp']
else:
scaler = data['min_max']
scaler_cp = data['min_max_cp']
graph_scaled_data = list()
graph_scaled_data_cp = list()
dnn_scaled = list()
dnn_scaled_cp = list()
pbar = trange(len(jsons),desc='Processing')
for c in pbar:
out1, out2, out3, out4 = CreateDatasetFromJson(jsons[c],scaler,scaler_cp,50)
pbar.desc="Extending List"
graph_scaled_data.extend(out1)
graph_scaled_data_cp.extend(out2)
dnn_scaled.extend(out3)
dnn_scaled_cp.extend(out4)
pbar.desc="Processing"
shuffle_and_save(graph_scaled_data,processed_path,'graph_scaled_data',0.7)
shuffle_and_save(graph_scaled_data_cp,processed_path,'graph_scaled_data_cp',0.7)
shuffle_and_save(dnn_scaled,processed_path,'dnn_scaled_data',0.7)
shuffle_and_save(dnn_scaled_cp,processed_path,'dnn_scaled_data_cp',0.7)
if __name__ == "__main__":
CreateDataset(data_folder='json_cp_resize',processed_path='datasets/standard/',use_standard_scaler=True)
CreateDataset(data_folder='json_cp_resize',processed_path='datasets/minmax/',use_standard_scaler=False)
| true
| true
|
7904a48fd08bc3a934fe3f4f273745b2570dce4c
| 21,535
|
py
|
Python
|
hw06_train.py
|
arao53/BME695-object-detection
|
7f094cc016d91c6b00d6f86f7c3e2e96acbb0083
|
[
"MIT"
] | null | null | null |
hw06_train.py
|
arao53/BME695-object-detection
|
7f094cc016d91c6b00d6f86f7c3e2e96acbb0083
|
[
"MIT"
] | null | null | null |
hw06_train.py
|
arao53/BME695-object-detection
|
7f094cc016d91c6b00d6f86f7c3e2e96acbb0083
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""hw06_training.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1bjAeUIVjt9W8mASk8vw1y2SbuBYuQdlG
"""
!pip install reports
import PIL.Image as Image, requests, urllib, random
import argparse, json, PIL.Image, reports, os, pickle
from requests.exceptions import ConnectionError, ReadTimeout, TooManyRedirects, MissingSchema, InvalidURL
import numpy, torch, cv2, skimage
import skimage.io as io
from torch import nn
import torch.nn.functional as F
from pycocotools.coco import COCO
import glob
from torch.utils.data import DataLoader,Dataset
import torchvision.transforms as tvt
import matplotlib.pyplot as plt
from torchsummary import summary
import pandas as pd
# Mount google drive to run on Colab
#from google.colab import drive
#drive.mount('/content/drive')
#%cd "/content/drive/My Drive/Colab Notebooks/DeepLearning/hw06/"
#!pwd
#!ls
root_path = "/content/drive/My Drive/Colab Notebooks/DeepLearning/hw06/"
coco_json_path = "annotations/instances_train2017.json"
class_list = ["person", "dog", "hot dog"]
coco = COCO(coco_json_path)
class build_annotations:
# Structure of the all_annotations file:
# indexed by the image filepath, removing the '.jpg' or the string version (with zeros) of the imageID
# For each image:
# 'imageID': corresponds to the integer image ID assigned within COCO.
# 'num_objects': integer number of objects in the image (at most 5)
# 'bbox': a dictionary of the bounding box array for each instance within the image. The dictionary key is the string 0-5 of each instance in order of decreasing area
# 'labels': a dictionary of the labels of each instance within the image. The key is the same as bbox but the value is the integer category ID assigned within COCO.
def __init__(self, root_path, class_list, max_instances = 5):
self.root_path = root_path
self.image_dir = root_path + '*.jpg'
self.cat_IDs = coco.getCatIds(catNms=class_list)
self.max_instances = max_instances
def __call__(self):
all_annotations = {}
g = glob.glob(self.image_dir)
for i, filename in enumerate(g):
filename = filename.split('/')[-1]
img_ID = int(filename.split('.')[0])
ann_Ids = coco.getAnnIds(imgIds=img_ID, catIds = self.cat_IDs, iscrowd = False)
num_objects = min(len(ann_Ids), self.max_instances) # cap at a max of 5 images
anns = coco.loadAnns(ann_Ids)
indices = sort_by_area(anns, self.max_instances)
bbox = {}
label = {}
i = 0
for n in indices:
instance = anns[n]
bbox[str(i)] = instance['bbox']
label[str(i)] = instance['category_id']
i+=1
annotation= {"imageID":img_ID, "num_objects":i, 'bbox': bbox, 'labels':label}
all_annotations[filename.split('.')[0]] = annotation
ann_path = self.root_path + "image_annotations.p"
pickle.dump( all_annotations, open(ann_path, "wb" ) )
print('Annotations saved in:', ann_path)
def sort_by_area(anns, num):
areas = numpy.zeros(len(anns))
for i, instance in enumerate(anns):
areas[i] = instance['area']
indices = numpy.argsort(areas)[-num:]
return indices[::-1]
class your_dataset_class(Dataset):
def __init__(self, path, class_list, coco):
self.class_list = class_list
self.folder = path
self.coco = coco
self.catIds = coco.getCatIds(catNms = class_list)
self.imgIds = coco.getImgIds(catIds = self.catIds)
self.categories = coco.loadCats(self.catIds)
#create label dictionary
labeldict = {}
for idx, in_class in enumerate(self.class_list):
for c in self.categories:
if c["name"] == in_class:
labeldict[c['id']] = idx
self.coco_labeldict = labeldict
#if first time running, index the image dataset to make annotation .p file
annotation_path = path + 'image_annotations.p'
if os.path.exists(annotation_path) ==False:
print("Indexing dataset to compile annotations...")
dataset_annotations = build_annotations(path, class_list)
dataset_annotations()
self.data_anns = pickle.load(open(annotation_path, "rb" ))
def __len__(self):
g = glob.glob(self.folder + '*.jpg') # ,'*.jpg')
return (len(g))
def get_imagelabel(self, img_path, sc, max_objects = 5): #img_path = file location, sc = scale [0]: width, [1]: height
saved_filename = os.path.basename(img_path)
filename = saved_filename.split('.jpg')[0]
image_id = int(filename)#.split('_')[-1])
bbox_tensor = torch.zeros(max_objects, 4, dtype=torch.uint8)
label_tensor = torch.zeros(max_objects+1, dtype=torch.uint8) + len(self.class_list)
target_obj = self.data_anns[filename]
num_objects = target_obj['num_objects']
for n in range(num_objects):
[x,y,w,h] = target_obj['bbox'][str(n)]
bbox = [sc[1]*y, x*sc[0], sc[1]*(h), sc[0]*(w)]
bbox_tensor[n,:] = torch.tensor(numpy.array(bbox))
cat_label = target_obj['labels'][str(n)]
data_label = self.coco_labeldict[cat_label]
label_tensor[n] = torch.tensor(data_label)
return bbox_tensor, label_tensor
def __getitem__(self, item):
g = glob.glob(self.folder + '*.jpg') #'**/*.jpg') # , '*.jpg')
im = PIL.Image.open(g[item])
im, scale_fac = rescale_factor(im, 128) #overwrite old image with new resized image of size 256
W, H = im.size
transformer = tvt.Compose([tvt.ToTensor(), tvt.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
im_array = torch.randint(0, 256, (3, H, W)).type(torch.uint8)
for i in range(H):
for j in range(W):
im_array[:, j, i] = torch.tensor(im.getpixel((i, j)))
im_scaled = im_array / im_array.max() # scaled from 0-1
im_tf = transformer(numpy.transpose(im_scaled.numpy()))
num_classes = len(self.class_list)
bbox, label = self.get_imagelabel(g[item], scale_fac)
sample = {'im_ID': g[item],
'scale':scale_fac,
'image': im_tf,
'bbox' : bbox,
'label': label}
return sample
def rescale_factor(im_original, std_size):
raw_width, raw_height = im_original.size
im = im_original.resize((std_size, std_size), Image.BOX)
w_factor = std_size/raw_width
h_factor = std_size/raw_height
return (im, [w_factor, h_factor])
#train_path = os.path.join(root_path, "Train/")
train_path = root_path + "Train/"
val_path = os.path.join(root_path, "Val/")
batch_size = 64
train_dataset = your_dataset_class(train_path, class_list, coco)
#train_dataset.__getitem__(32)
train_data_loader = torch.utils.data.DataLoader(dataset = train_dataset,
batch_size = batch_size,
shuffle = True,
num_workers= 2,
drop_last=True)
#val_dataset = your_dataset_class(val_path, class_list)
#val_data_loader = torch.utils.data.DataLoader(dataset = val_dataset,
# batch_size = batch_size,
# shuffle = True,
# num_workers = 4,
# drop_last=True)
class SkipBlock(nn.Module):
def __init__(self,in_ch, out_ch, downsample = False):
super().__init__()
self.in_ch = in_ch
self.out_ch = out_ch
self.conv1 = nn.Conv2d(in_ch, out_ch, 3, stride = 1, padding = 1)
self.conv2 = nn.Conv2d(in_ch, out_ch, 3, padding = 1)
self.bnorm1 = nn.BatchNorm2d(out_ch)
self.bnorm2 = nn.BatchNorm2d(out_ch)
self.downsample_tf = downsample
self.downsampler = nn.Conv2d(in_ch, out_ch, 1, stride= 2)
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bnorm1(out)
out = F.relu(out)
if self.downsample_tf == True:
identity = self.downsampler(identity)
out = self.downsampler(out)
out += identity
else:
out = self.conv2(out)
out = self.bnorm2(out)
out = F.relu(out)
out += identity
return out
class MechEnet(nn.Module):
def __init__(self, num_classes, depth):
super().__init__()
self.depth = depth // 8
self.conv_initial = nn.Conv2d( 3, 64, 3, padding = 1)
self.pool = nn.MaxPool2d(2,2)
## assume all layers are 64 channels deep
self.skipblock64_1 = nn.ModuleList()
for i in range(self.depth):
#print("adding layer", i)
self.skipblock64_1.append( SkipBlock(64,64, downsample = False) ) #append a 64 in/out ch layer - depth*2/4 convolutions
self.skip_downsample = SkipBlock(64,64, downsample= True)
self.skipblock64_2 = nn.ModuleList()
for i in range(self.depth):
#print("adding layer", i + self.depth)
self.skipblock64_2.append( SkipBlock(64,64, downsample = False) ) #append a 64 in/out layer - depth*2/4 convolutions
self.fc_seqn = nn.Sequential(
nn.Linear(64*4*4, 3000),
nn.ReLU(inplace =True),
nn.Linear(3000,3000),
nn.ReLU(inplace =True),
nn.Linear(3000,8*8*(5*(5+3))) #5 anchor boxes*(1+ bbox(4) + classes (3))
)
def forward(self, x):
# x1 is the output of classification
x = self.pool(F.relu(self.conv_initial(x)))
x1 = self.skip_downsample(x)
for i, skips in enumerate(self.skipblock64_1[self.depth//4 :]):
x1 = skips(x1)
x1 = self.skip_downsample(x1)
for i, skips in enumerate(self.skipblock64_1[:self.depth//4]):
x1 = skips(x1)
x1 = self.skip_downsample(x1)
for i, skips in enumerate(self.skipblock64_2[self.depth//4:]):
x1 = skips(x1)
x1 = self.skip_downsample(x1)
for i, skips in enumerate(self.skipblock64_2[:self.depth//4]):
x1 = skips(x1)
#x1 = self.skip_downsample(x)
x1 = x1.view(x1.size(0),-1)
x1 = self.fc_seqn(x1)
return x1
class IoULoss(torch.nn.Module):
def __init__(self, weight=None, size_average=True):
super(IoULoss, self).__init__()
def forward(self, inputs, targets, smooth=1):
#flatten label and prediction tensors
# tensor shape = [b, yolo_cell, anch, yolovector]
# flattened tensor = [b, numcells*numanch*8]
b_size = inputs.shape[0]
pred_unscrm = inputs.view(b_size, 8**2, 5, -1)
targ_unscrm = targets.view(b_size, 8**2, 5, -1)
pred_bbox = pred_unscrm[:,:,:,1:5]
targ_bbox = targ_unscrm[:,:,:,1:5]
intersection = targ_bbox*pred_bbox
union = targ_bbox + pred_bbox
J_idx = torch.div(intersection, union)
#print(J_idx)
J_dist = 1.0-J_idx
return torch.sum(J_dist)
## significant code is adapted from Prof. Kak's Multi-instance detector
def run_code_for_training(net, lrate, mom, epochs, im_size, max_objects, yolo_interval = 16):
print('Beginning training for', epochs,'epochs...')
#criterion1 = torch.nn.CrossEntropyLoss()
criterion = torch.nn.MSELoss()
#criterion = IoULoss()
optimizer = torch.optim.SGD(net.parameters(), lr = lrate, momentum = mom)
loss_tracker = []
num_cells_image_height = im_size//yolo_interval
num_cells_image_width = im_size//yolo_interval
num_yolo_cells = num_cells_image_height*num_cells_image_width
print_iteration = 3
num_anchor_boxes = 5
yolo_tensor = torch.zeros(batch_size, num_yolo_cells, num_anchor_boxes, 1*5+3) #batch size, 8*8, 1*5+3 classes
class AnchorBox:
def __init__(self, AR, topleft, abox_h, abox_w, abox_idx):
self.AR = AR
self.topleft = topleft
self.abox_h = abox_h
self.abox_w = abox_w
self.abox_idx= abox_idx
device = torch.device("cuda:0")
for epoch in range(epochs):
print('\nEpoch %d training...' %(epoch + 1))
running_loss = 0.0
for i, data in enumerate(train_data_loader):
sample_batch = data['im_ID']
im_tensor = data["image"]
target_reg = data["bbox"].type(torch.FloatTensor)
target_clf = data["label"].type(torch.LongTensor)
optimizer.zero_grad()
im_tensor = im_tensor.to(device)
target_reg = target_reg.to(device)
target_clf = target_clf.to(device)
yolo_tensor = yolo_tensor.to(device)
obj_centers = {ibx :
{idx : None for idx in range(max_objects)}
for ibx in range(im_tensor.shape[0])}
anchor_boxes_1_1 = [[AnchorBox(1/1, (i*yolo_interval,j*yolo_interval), yolo_interval, yolo_interval, 0)
for i in range(0,num_cells_image_height)]
for j in range(0,num_cells_image_width)]
anchor_boxes_1_3 = [[AnchorBox(1/3, (i*yolo_interval,j*yolo_interval), yolo_interval, 3*yolo_interval, 1)
for i in range(0,num_cells_image_height)]
for j in range(0,num_cells_image_width)]
anchor_boxes_3_1 = [[AnchorBox(3/1, (i*yolo_interval,j*yolo_interval), 3*yolo_interval, yolo_interval, 2)
for i in range(0,num_cells_image_height)]
for j in range(0,num_cells_image_width)]
anchor_boxes_1_5 = [[AnchorBox(1/5, (i*yolo_interval,j*yolo_interval), yolo_interval, 5*yolo_interval, 3)
for i in range(0,num_cells_image_height)]
for j in range(0,num_cells_image_width)]
anchor_boxes_5_1 = [[AnchorBox(5/1, (i*yolo_interval,j*yolo_interval), 5*yolo_interval, yolo_interval, 4)
for i in range(0,num_cells_image_height)]
for j in range(0,num_cells_image_width)]
#Build the yolo tensor based on the bounding box and label tensors from the target/dataset
for b in range(im_tensor.shape[0]): # Loop through batch index
for idx in range(max_objects): # Loop through each object in the target tensor
height_center_bb = (target_reg[b][idx][1].item() + target_reg[b][idx][3].item()) // 2
width_center_bb = (target_reg[b][idx][0].item() + target_reg[b][idx][2].item()) // 2
obj_bb_height = target_reg[b][idx][3].item() - target_reg[b][idx][1].item()
obj_bb_width = target_reg[b][idx][2].item() - target_reg[b][idx][0].item()
obj_label = target_clf[b][idx].item()
if obj_label == 13:
obj_label = 4
eps = 1e-8
AR = float(obj_bb_height + eps) / float(obj_bb_width + eps)
cell_row_idx = int(height_center_bb // yolo_interval) ## for the i coordinate
cell_col_idx = int(width_center_bb // yolo_interval) ## for the j coordinates
if AR <= 0.2: ## (F)
anchbox = anchor_boxes_1_5[cell_row_idx][cell_col_idx]
elif AR <= 0.5:
anchbox = anchor_boxes_1_3[cell_row_idx][cell_col_idx]
elif AR <= 1.5:
anchbox = anchor_boxes_1_1[cell_row_idx][cell_col_idx]
elif AR <= 4:
anchbox = anchor_boxes_3_1[cell_row_idx][cell_col_idx]
elif AR > 4:
anchbox = anchor_boxes_5_1[cell_row_idx][cell_col_idx]
bh = float(obj_bb_height) / float(yolo_interval) ## (G)
bw = float(obj_bb_width) / float(yolo_interval)
obj_center_x = float(target_reg[b][idx][2].item() + target_reg[b][idx][0].item()) / 2.0
obj_center_y = float(target_reg[b][idx][3].item() + target_reg[b][idx][1].item()) / 2.0
yolocell_center_i = cell_row_idx*yolo_interval + float(yolo_interval) / 2.0
yolocell_center_j = cell_col_idx*yolo_interval + float(yolo_interval) / 2.0
del_x = float(obj_center_x - yolocell_center_j) / yolo_interval
del_y = float(obj_center_y - yolocell_center_i) / yolo_interval
yolo_vector = [0, del_x, del_y, bh, bw, 0, 0, 0]
if obj_label<4:
yolo_vector[4 + obj_label] = 1
yolo_vector[0] = 1
yolo_cell_index = cell_row_idx * num_cells_image_width + cell_col_idx
yolo_tensor[b, yolo_cell_index, anchbox.abox_idx] = torch.FloatTensor( yolo_vector )
yolo_tensor_flattened = yolo_tensor.view(im_tensor.shape[0], -1)
## Foward Pass
pred_yolo = net(im_tensor)
#pred_yolo = filter_yolo_tensor(pred_yolo, im_tensor.shape[0], num_yolo_cells, num_anchor_boxes)
loss = criterion(pred_yolo, yolo_tensor_flattened)
loss.backward(retain_graph = True)
pred_unscrm = pred_yolo.view(im_tensor.shape[0], 8**2, 5, -1)
sample_yolo_tensor = pred_unscrm
optimizer.step()
running_loss += loss.item()
if (i+1)%print_iteration ==0:
average_loss = running_loss/float(print_iteration)
print("[epoch: %d, batch: %5d] Avg Batch loss: %.4f" %(epoch + 1, i+1, average_loss))
loss_tracker = numpy.append(loss_tracker, average_loss)
running_loss = 0.0
return loss_tracker, sample_yolo_tensor, sample_batch
def filter_yolo_tensor(yolo_tensor, batch_size, num_yolo_cells, aboxes):
#loop through each yolo_cell_index in the in the prediction tensor
# if idx[0] of the yolo vector is less than 0.5, make the whole vector zero
zero_vec = torch.zeros(8)
print(yolo_tensor.shape)
for b in range(batch_size):
for num in range(num_yolo_cells):
for an in range(aboxes):
if yolo_tensor[b,num][an][0] < 0.5:
yolo_tensor[b,num][an][:] = torch.zeros(8)
return yolo_tensor
model = MechEnet(len(class_list), depth = 64)
lrate = 5e-3
mom = 0.5
epochs = 1
yolo_int = 16
im_size = 128
max_objects = 5
savepath = "MechEnet.pth"
model.load_state_dict(torch.load(savepath))
if torch.cuda.is_available():
device = torch.device("cuda:0")
model.cuda()
summary(model, (3, im_size, im_size))
training_loss, yolo_sample, batches = run_code_for_training(model, lrate, mom, epochs, im_size, max_objects, yolo_interval = yolo_int)
#savepath = "/content/drive/My Drive/Colab Notebooks/DeepLearning/hw06/MechEnet.pth"
#torch.save(model.state_dict(), savepath)
#pd.DataFrame(training_loss).to_csv("/content/drive/My Drive/Colab Notebooks/DeepLearning/hw06/loss.csv")
fig, ax = plt.subplots()
ax.plot(training_loss)
ax.set_title('Training loss')
ax.set_ylabel('Loss')
ax.set_xlabel('Iterations')
## Visualize prediction on training set
annotation_path = root_path + 'Train/'+ 'image_annotations.p'
data_anns = pickle.load(open(annotation_path, "rb" ))
def show_image(image_anns):
img = coco.loadImgs(rand_img['imageID'])[0]
I = io.imread(img['coco_url'])
if len(I.shape) == 2:
I = skimage.color.gray2rgb(I)
catIds = coco.getCatIds(catNms= class_list)
annIds = coco.getAnnIds(imgIds=rand_img['imageID'], catIds= catIds, iscrowd=False)
anns = coco.loadAnns(annIds)
image = numpy.uint8(I)
for i in range(rand_img['num_objects']):
[x,y,w,h] = rand_img['bbox'][str(i)]
label = rand_img['labels'][str(i)]
image = cv2.rectangle(image, (int(x), int(y)), (int(x +w), int(y + h)), (36,255,12), 2)
class_label = coco_labels_inverse[label]
image = cv2.putText(image, 'True ' + class_list[class_label], (int(x), int(y-10)), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (36,255,12), 2)
return image
bdx =37 #numpy.random.randint(0,64)
#55 #18 #5
img_loc = batches[bdx].split('/')[-1].split('.')[0]
rand_img = data_anns[img_loc]
image = show_image(rand_img)
scale = train_dataset.__getitem__(sdx)['scale']
g = glob.glob(root_path + 'Train/*.jpg')
for i in range(len(g)):
if img_loc in g[i]:
sdx = i
import math
im_considered = yolo_sample[bdx,:,:,:]
im_pred_anch = torch.zeros(64,8)
cell_pred = []
num_cell_width = 8
yolo_interval = 16
for i in range(im_considered.shape[0]):
AR = torch.argmax(im_considered[i,:,0])
im_pred_anch[i,:] = im_considered[i,AR,:]
if im_pred_anch[i,0] > 0.75:
if AR == 0:
w,h = 1,1
elif AR == 1:
w,h = 1,3
elif AR == 2:
w,h = 3,1
elif AR == 3:
w,h = 1,5
elif AR == 4:
w,h = 5,1
row_idx = math.floor(i/num_cell_width)
col_idx = i%num_cell_width
yolo_box = im_pred_anch[i,1:5].cpu().detach().numpy()
x1 = ((row_idx + 0.5)*yolo_interval)/scale[0]
x2 = x1 + (w*yolo_interval)/scale[0]
y1 = (col_idx + 0.5)*yolo_interval/scale[1]
y2 = y1+ (h*yolo_interval)/scale[1]
label = torch.argmax(im_pred_anch[i,5:]).cpu().detach().numpy()
pred_label = str('Predicted ' + class_list[label])
temp = [pred_label, x1,y1, x2,y2]
cell_pred = numpy.append(cell_pred, temp)
image = cv2.rectangle(image, (int(x1), int(y1)), (int(x2), int(y2)), (255,0,0), 2)
image = cv2.putText(image, pred_label, (int(x1), int(y1-10)), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (255,0,0), 2)
fig, ax = plt.subplots(1,1, dpi = 150)
ax.imshow(image)
ax.set_axis_off()
plt.axis('tight')
plt.show()
| 39.083485
| 168
| 0.622661
|
"""hw06_training.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1bjAeUIVjt9W8mASk8vw1y2SbuBYuQdlG
"""
!pip install reports
import PIL.Image as Image, requests, urllib, random
import argparse, json, PIL.Image, reports, os, pickle
from requests.exceptions import ConnectionError, ReadTimeout, TooManyRedirects, MissingSchema, InvalidURL
import numpy, torch, cv2, skimage
import skimage.io as io
from torch import nn
import torch.nn.functional as F
from pycocotools.coco import COCO
import glob
from torch.utils.data import DataLoader,Dataset
import torchvision.transforms as tvt
import matplotlib.pyplot as plt
from torchsummary import summary
import pandas as pd
root_path = "/content/drive/My Drive/Colab Notebooks/DeepLearning/hw06/"
coco_json_path = "annotations/instances_train2017.json"
class_list = ["person", "dog", "hot dog"]
coco = COCO(coco_json_path)
class build_annotations:
def __init__(self, root_path, class_list, max_instances = 5):
self.root_path = root_path
self.image_dir = root_path + '*.jpg'
self.cat_IDs = coco.getCatIds(catNms=class_list)
self.max_instances = max_instances
def __call__(self):
all_annotations = {}
g = glob.glob(self.image_dir)
for i, filename in enumerate(g):
filename = filename.split('/')[-1]
img_ID = int(filename.split('.')[0])
ann_Ids = coco.getAnnIds(imgIds=img_ID, catIds = self.cat_IDs, iscrowd = False)
num_objects = min(len(ann_Ids), self.max_instances)
anns = coco.loadAnns(ann_Ids)
indices = sort_by_area(anns, self.max_instances)
bbox = {}
label = {}
i = 0
for n in indices:
instance = anns[n]
bbox[str(i)] = instance['bbox']
label[str(i)] = instance['category_id']
i+=1
annotation= {"imageID":img_ID, "num_objects":i, 'bbox': bbox, 'labels':label}
all_annotations[filename.split('.')[0]] = annotation
ann_path = self.root_path + "image_annotations.p"
pickle.dump( all_annotations, open(ann_path, "wb" ) )
print('Annotations saved in:', ann_path)
def sort_by_area(anns, num):
areas = numpy.zeros(len(anns))
for i, instance in enumerate(anns):
areas[i] = instance['area']
indices = numpy.argsort(areas)[-num:]
return indices[::-1]
class your_dataset_class(Dataset):
def __init__(self, path, class_list, coco):
self.class_list = class_list
self.folder = path
self.coco = coco
self.catIds = coco.getCatIds(catNms = class_list)
self.imgIds = coco.getImgIds(catIds = self.catIds)
self.categories = coco.loadCats(self.catIds)
labeldict = {}
for idx, in_class in enumerate(self.class_list):
for c in self.categories:
if c["name"] == in_class:
labeldict[c['id']] = idx
self.coco_labeldict = labeldict
annotation_path = path + 'image_annotations.p'
if os.path.exists(annotation_path) ==False:
print("Indexing dataset to compile annotations...")
dataset_annotations = build_annotations(path, class_list)
dataset_annotations()
self.data_anns = pickle.load(open(annotation_path, "rb" ))
def __len__(self):
g = glob.glob(self.folder + '*.jpg')
return (len(g))
def get_imagelabel(self, img_path, sc, max_objects = 5):
saved_filename = os.path.basename(img_path)
filename = saved_filename.split('.jpg')[0]
image_id = int(filename)
bbox_tensor = torch.zeros(max_objects, 4, dtype=torch.uint8)
label_tensor = torch.zeros(max_objects+1, dtype=torch.uint8) + len(self.class_list)
target_obj = self.data_anns[filename]
num_objects = target_obj['num_objects']
for n in range(num_objects):
[x,y,w,h] = target_obj['bbox'][str(n)]
bbox = [sc[1]*y, x*sc[0], sc[1]*(h), sc[0]*(w)]
bbox_tensor[n,:] = torch.tensor(numpy.array(bbox))
cat_label = target_obj['labels'][str(n)]
data_label = self.coco_labeldict[cat_label]
label_tensor[n] = torch.tensor(data_label)
return bbox_tensor, label_tensor
def __getitem__(self, item):
g = glob.glob(self.folder + '*.jpg') PIL.Image.open(g[item])
im, scale_fac = rescale_factor(im, 128)
W, H = im.size
transformer = tvt.Compose([tvt.ToTensor(), tvt.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
im_array = torch.randint(0, 256, (3, H, W)).type(torch.uint8)
for i in range(H):
for j in range(W):
im_array[:, j, i] = torch.tensor(im.getpixel((i, j)))
im_scaled = im_array / im_array.max()
im_tf = transformer(numpy.transpose(im_scaled.numpy()))
num_classes = len(self.class_list)
bbox, label = self.get_imagelabel(g[item], scale_fac)
sample = {'im_ID': g[item],
'scale':scale_fac,
'image': im_tf,
'bbox' : bbox,
'label': label}
return sample
def rescale_factor(im_original, std_size):
raw_width, raw_height = im_original.size
im = im_original.resize((std_size, std_size), Image.BOX)
w_factor = std_size/raw_width
h_factor = std_size/raw_height
return (im, [w_factor, h_factor])
train_path = root_path + "Train/"
val_path = os.path.join(root_path, "Val/")
batch_size = 64
train_dataset = your_dataset_class(train_path, class_list, coco)
train_data_loader = torch.utils.data.DataLoader(dataset = train_dataset,
batch_size = batch_size,
shuffle = True,
num_workers= 2,
drop_last=True)
class SkipBlock(nn.Module):
def __init__(self,in_ch, out_ch, downsample = False):
super().__init__()
self.in_ch = in_ch
self.out_ch = out_ch
self.conv1 = nn.Conv2d(in_ch, out_ch, 3, stride = 1, padding = 1)
self.conv2 = nn.Conv2d(in_ch, out_ch, 3, padding = 1)
self.bnorm1 = nn.BatchNorm2d(out_ch)
self.bnorm2 = nn.BatchNorm2d(out_ch)
self.downsample_tf = downsample
self.downsampler = nn.Conv2d(in_ch, out_ch, 1, stride= 2)
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bnorm1(out)
out = F.relu(out)
if self.downsample_tf == True:
identity = self.downsampler(identity)
out = self.downsampler(out)
out += identity
else:
out = self.conv2(out)
out = self.bnorm2(out)
out = F.relu(out)
out += identity
return out
class MechEnet(nn.Module):
def __init__(self, num_classes, depth):
super().__init__()
self.depth = depth // 8
self.conv_initial = nn.Conv2d( 3, 64, 3, padding = 1)
self.pool = nn.MaxPool2d(2,2)
()
for i in range(self.depth):
self.skipblock64_1.append( SkipBlock(64,64, downsample = False) )
self.skip_downsample = SkipBlock(64,64, downsample= True)
self.skipblock64_2 = nn.ModuleList()
for i in range(self.depth):
self.skipblock64_2.append( SkipBlock(64,64, downsample = False) )
self.fc_seqn = nn.Sequential(
nn.Linear(64*4*4, 3000),
nn.ReLU(inplace =True),
nn.Linear(3000,3000),
nn.ReLU(inplace =True),
nn.Linear(3000,8*8*(5*(5+3)))
)
def forward(self, x):
x = self.pool(F.relu(self.conv_initial(x)))
x1 = self.skip_downsample(x)
for i, skips in enumerate(self.skipblock64_1[self.depth//4 :]):
x1 = skips(x1)
x1 = self.skip_downsample(x1)
for i, skips in enumerate(self.skipblock64_1[:self.depth//4]):
x1 = skips(x1)
x1 = self.skip_downsample(x1)
for i, skips in enumerate(self.skipblock64_2[self.depth//4:]):
x1 = skips(x1)
x1 = self.skip_downsample(x1)
for i, skips in enumerate(self.skipblock64_2[:self.depth//4]):
x1 = skips(x1)
x1 = x1.view(x1.size(0),-1)
x1 = self.fc_seqn(x1)
return x1
class IoULoss(torch.nn.Module):
def __init__(self, weight=None, size_average=True):
super(IoULoss, self).__init__()
def forward(self, inputs, targets, smooth=1):
b_size = inputs.shape[0]
pred_unscrm = inputs.view(b_size, 8**2, 5, -1)
targ_unscrm = targets.view(b_size, 8**2, 5, -1)
pred_bbox = pred_unscrm[:,:,:,1:5]
targ_bbox = targ_unscrm[:,:,:,1:5]
intersection = targ_bbox*pred_bbox
union = targ_bbox + pred_bbox
J_idx = torch.div(intersection, union)
J_dist = 1.0-J_idx
return torch.sum(J_dist)
cts, yolo_interval = 16):
print('Beginning training for', epochs,'epochs...')
#criterion1 = torch.nn.CrossEntropyLoss()
criterion = torch.nn.MSELoss()
#criterion = IoULoss()
optimizer = torch.optim.SGD(net.parameters(), lr = lrate, momentum = mom)
loss_tracker = []
num_cells_image_height = im_size//yolo_interval
num_cells_image_width = im_size//yolo_interval
num_yolo_cells = num_cells_image_height*num_cells_image_width
print_iteration = 3
num_anchor_boxes = 5
yolo_tensor = torch.zeros(batch_size, num_yolo_cells, num_anchor_boxes, 1*5+3) #batch size, 8*8, 1*5+3 classes
class AnchorBox:
def __init__(self, AR, topleft, abox_h, abox_w, abox_idx):
self.AR = AR
self.topleft = topleft
self.abox_h = abox_h
self.abox_w = abox_w
self.abox_idx= abox_idx
device = torch.device("cuda:0")
for epoch in range(epochs):
print('\nEpoch %d training...' %(epoch + 1))
running_loss = 0.0
for i, data in enumerate(train_data_loader):
sample_batch = data['im_ID']
im_tensor = data["image"]
target_reg = data["bbox"].type(torch.FloatTensor)
target_clf = data["label"].type(torch.LongTensor)
optimizer.zero_grad()
im_tensor = im_tensor.to(device)
target_reg = target_reg.to(device)
target_clf = target_clf.to(device)
yolo_tensor = yolo_tensor.to(device)
obj_centers = {ibx :
{idx : None for idx in range(max_objects)}
for ibx in range(im_tensor.shape[0])}
anchor_boxes_1_1 = [[AnchorBox(1/1, (i*yolo_interval,j*yolo_interval), yolo_interval, yolo_interval, 0)
for i in range(0,num_cells_image_height)]
for j in range(0,num_cells_image_width)]
anchor_boxes_1_3 = [[AnchorBox(1/3, (i*yolo_interval,j*yolo_interval), yolo_interval, 3*yolo_interval, 1)
for i in range(0,num_cells_image_height)]
for j in range(0,num_cells_image_width)]
anchor_boxes_3_1 = [[AnchorBox(3/1, (i*yolo_interval,j*yolo_interval), 3*yolo_interval, yolo_interval, 2)
for i in range(0,num_cells_image_height)]
for j in range(0,num_cells_image_width)]
anchor_boxes_1_5 = [[AnchorBox(1/5, (i*yolo_interval,j*yolo_interval), yolo_interval, 5*yolo_interval, 3)
for i in range(0,num_cells_image_height)]
for j in range(0,num_cells_image_width)]
anchor_boxes_5_1 = [[AnchorBox(5/1, (i*yolo_interval,j*yolo_interval), 5*yolo_interval, yolo_interval, 4)
for i in range(0,num_cells_image_height)]
for j in range(0,num_cells_image_width)]
#Build the yolo tensor based on the bounding box and label tensors from the target/dataset
for b in range(im_tensor.shape[0]): # Loop through batch index
for idx in range(max_objects): # Loop through each object in the target tensor
height_center_bb = (target_reg[b][idx][1].item() + target_reg[b][idx][3].item()) // 2
width_center_bb = (target_reg[b][idx][0].item() + target_reg[b][idx][2].item()) // 2
obj_bb_height = target_reg[b][idx][3].item() - target_reg[b][idx][1].item()
obj_bb_width = target_reg[b][idx][2].item() - target_reg[b][idx][0].item()
obj_label = target_clf[b][idx].item()
if obj_label == 13:
obj_label = 4
eps = 1e-8
AR = float(obj_bb_height + eps) / float(obj_bb_width + eps)
cell_row_idx = int(height_center_bb // yolo_interval) ## for the i coordinate
cell_col_idx = int(width_center_bb // yolo_interval) ## for the j coordinates
if AR <= 0.2: ## (F)
anchbox = anchor_boxes_1_5[cell_row_idx][cell_col_idx]
elif AR <= 0.5:
anchbox = anchor_boxes_1_3[cell_row_idx][cell_col_idx]
elif AR <= 1.5:
anchbox = anchor_boxes_1_1[cell_row_idx][cell_col_idx]
elif AR <= 4:
anchbox = anchor_boxes_3_1[cell_row_idx][cell_col_idx]
elif AR > 4:
anchbox = anchor_boxes_5_1[cell_row_idx][cell_col_idx]
bh = float(obj_bb_height) / float(yolo_interval) ## (G)
bw = float(obj_bb_width) / float(yolo_interval)
obj_center_x = float(target_reg[b][idx][2].item() + target_reg[b][idx][0].item()) / 2.0
obj_center_y = float(target_reg[b][idx][3].item() + target_reg[b][idx][1].item()) / 2.0
yolocell_center_i = cell_row_idx*yolo_interval + float(yolo_interval) / 2.0
yolocell_center_j = cell_col_idx*yolo_interval + float(yolo_interval) / 2.0
del_x = float(obj_center_x - yolocell_center_j) / yolo_interval
del_y = float(obj_center_y - yolocell_center_i) / yolo_interval
yolo_vector = [0, del_x, del_y, bh, bw, 0, 0, 0]
if obj_label<4:
yolo_vector[4 + obj_label] = 1
yolo_vector[0] = 1
yolo_cell_index = cell_row_idx * num_cells_image_width + cell_col_idx
yolo_tensor[b, yolo_cell_index, anchbox.abox_idx] = torch.FloatTensor( yolo_vector )
yolo_tensor_flattened = yolo_tensor.view(im_tensor.shape[0], -1)
## Foward Pass
pred_yolo = net(im_tensor)
#pred_yolo = filter_yolo_tensor(pred_yolo, im_tensor.shape[0], num_yolo_cells, num_anchor_boxes)
loss = criterion(pred_yolo, yolo_tensor_flattened)
loss.backward(retain_graph = True)
pred_unscrm = pred_yolo.view(im_tensor.shape[0], 8**2, 5, -1)
sample_yolo_tensor = pred_unscrm
optimizer.step()
running_loss += loss.item()
if (i+1)%print_iteration ==0:
average_loss = running_loss/float(print_iteration)
print("[epoch: %d, batch: %5d] Avg Batch loss: %.4f" %(epoch + 1, i+1, average_loss))
loss_tracker = numpy.append(loss_tracker, average_loss)
running_loss = 0.0
return loss_tracker, sample_yolo_tensor, sample_batch
def filter_yolo_tensor(yolo_tensor, batch_size, num_yolo_cells, aboxes):
#loop through each yolo_cell_index in the in the prediction tensor
# if idx[0] of the yolo vector is less than 0.5, make the whole vector zero
zero_vec = torch.zeros(8)
print(yolo_tensor.shape)
for b in range(batch_size):
for num in range(num_yolo_cells):
for an in range(aboxes):
if yolo_tensor[b,num][an][0] < 0.5:
yolo_tensor[b,num][an][:] = torch.zeros(8)
return yolo_tensor
model = MechEnet(len(class_list), depth = 64)
lrate = 5e-3
mom = 0.5
epochs = 1
yolo_int = 16
im_size = 128
max_objects = 5
savepath = "MechEnet.pth"
model.load_state_dict(torch.load(savepath))
if torch.cuda.is_available():
device = torch.device("cuda:0")
model.cuda()
summary(model, (3, im_size, im_size))
training_loss, yolo_sample, batches = run_code_for_training(model, lrate, mom, epochs, im_size, max_objects, yolo_interval = yolo_int)
#savepath = "/content/drive/My Drive/Colab Notebooks/DeepLearning/hw06/MechEnet.pth"
#torch.save(model.state_dict(), savepath)
#pd.DataFrame(training_loss).to_csv("/content/drive/My Drive/Colab Notebooks/DeepLearning/hw06/loss.csv")
fig, ax = plt.subplots()
ax.plot(training_loss)
ax.set_title('Training loss')
ax.set_ylabel('Loss')
ax.set_xlabel('Iterations')
## Visualize prediction on training set
annotation_path = root_path + 'Train/'+ 'image_annotations.p'
data_anns = pickle.load(open(annotation_path, "rb" ))
def show_image(image_anns):
img = coco.loadImgs(rand_img['imageID'])[0]
I = io.imread(img['coco_url'])
if len(I.shape) == 2:
I = skimage.color.gray2rgb(I)
catIds = coco.getCatIds(catNms= class_list)
annIds = coco.getAnnIds(imgIds=rand_img['imageID'], catIds= catIds, iscrowd=False)
anns = coco.loadAnns(annIds)
image = numpy.uint8(I)
for i in range(rand_img['num_objects']):
[x,y,w,h] = rand_img['bbox'][str(i)]
label = rand_img['labels'][str(i)]
image = cv2.rectangle(image, (int(x), int(y)), (int(x +w), int(y + h)), (36,255,12), 2)
class_label = coco_labels_inverse[label]
image = cv2.putText(image, 'True ' + class_list[class_label], (int(x), int(y-10)), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (36,255,12), 2)
return image
bdx =37 #numpy.random.randint(0,64)
#55 #18 #5
img_loc = batches[bdx].split('/')[-1].split('.')[0]
rand_img = data_anns[img_loc]
image = show_image(rand_img)
scale = train_dataset.__getitem__(sdx)['scale']
g = glob.glob(root_path + 'Train/*.jpg')
for i in range(len(g)):
if img_loc in g[i]:
sdx = i
import math
im_considered = yolo_sample[bdx,:,:,:]
im_pred_anch = torch.zeros(64,8)
cell_pred = []
num_cell_width = 8
yolo_interval = 16
for i in range(im_considered.shape[0]):
AR = torch.argmax(im_considered[i,:,0])
im_pred_anch[i,:] = im_considered[i,AR,:]
if im_pred_anch[i,0] > 0.75:
if AR == 0:
w,h = 1,1
elif AR == 1:
w,h = 1,3
elif AR == 2:
w,h = 3,1
elif AR == 3:
w,h = 1,5
elif AR == 4:
w,h = 5,1
row_idx = math.floor(i/num_cell_width)
col_idx = i%num_cell_width
yolo_box = im_pred_anch[i,1:5].cpu().detach().numpy()
x1 = ((row_idx + 0.5)*yolo_interval)/scale[0]
x2 = x1 + (w*yolo_interval)/scale[0]
y1 = (col_idx + 0.5)*yolo_interval/scale[1]
y2 = y1+ (h*yolo_interval)/scale[1]
label = torch.argmax(im_pred_anch[i,5:]).cpu().detach().numpy()
pred_label = str('Predicted ' + class_list[label])
temp = [pred_label, x1,y1, x2,y2]
cell_pred = numpy.append(cell_pred, temp)
image = cv2.rectangle(image, (int(x1), int(y1)), (int(x2), int(y2)), (255,0,0), 2)
image = cv2.putText(image, pred_label, (int(x1), int(y1-10)), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (255,0,0), 2)
fig, ax = plt.subplots(1,1, dpi = 150)
ax.imshow(image)
ax.set_axis_off()
plt.axis('tight')
plt.show()
| false
| true
|
7904a4bffb91e3d6fbb33c163e21b3b5e6eeb747
| 86
|
py
|
Python
|
.vim/template/python/base-atcoder.py
|
reireias/dotfiles
|
7e25c5fcb9203c6ddd1e280ea3bad577c3af28f6
|
[
"MIT"
] | 24
|
2017-04-27T09:21:49.000Z
|
2022-01-10T16:44:34.000Z
|
skale/migrate.py
|
skalenetwork/skaled-tests
|
b1cbbff9888a6854f04f58917ab3400395933f5a
|
[
"MIT"
] | 7
|
2019-11-13T14:54:37.000Z
|
2022-03-01T01:05:13.000Z
|
skale/migrate.py
|
skalenetwork/skaled-tests
|
b1cbbff9888a6854f04f58917ab3400395933f5a
|
[
"MIT"
] | 12
|
2018-01-29T08:27:57.000Z
|
2021-07-25T04:55:03.000Z
|
#!/usr/bin/env python3
def main():
pass
if __name__ == '__main__':
main()
| 8.6
| 26
| 0.569767
|
def main():
pass
if __name__ == '__main__':
main()
| true
| true
|
7904a55033cf1b02c79576dfe78b8b0e9c0e6741
| 1,770
|
py
|
Python
|
examples/button.py
|
NextLight/drawy
|
e7cd8f9607a52937df589e936f4dcce0ca1306aa
|
[
"MIT"
] | null | null | null |
examples/button.py
|
NextLight/drawy
|
e7cd8f9607a52937df589e936f4dcce0ca1306aa
|
[
"MIT"
] | null | null | null |
examples/button.py
|
NextLight/drawy
|
e7cd8f9607a52937df589e936f4dcce0ca1306aa
|
[
"MIT"
] | null | null | null |
from drawy import *
class Button:
def __init__(self, text, click_handler, point, width, height, *, hide=False, do_highlight=True, background_color='gray', highlight_color='lightgray', text_color='black', border_color='black'):
self.text = text
self.click_handler = click_handler
self.point = Point(*point)
self.width = width
self.height = height
self.hide = hide
self.do_highlight = do_highlight
self.background_color = background_color
self.highlight_color = highlight_color
self.text_color = text_color
self.border_color = border_color
def is_point_inside(self, point: Point):
return point.is_inside_rectangle(self.point, self.width, self.height)
def draw(self):
if self.hide:
return
background = self.background_color
if self.do_highlight and self.is_point_inside(MOUSE_POSITION):
background = self.highlight_color
draw_rectangle(self.point, self.width, self.height, background)
draw_rectangle(self.point, self.width, self.height, self.border_color, fill=False, border_thickness=4)
draw_text(self.text, self.point + Point(self.width, self.height) / 2, self.text_color)
def on_click(self):
if self.is_point_inside(MOUSE_POSITION) and self.click_handler:
self.click_handler()
BUTTONS = [
Button("SCORE", lambda: print('score!'), (100, 100), 200, 60),
Button("test", lambda: print("test!"), (100, 300), 200, 60),
]
def init():
pass
def draw():
for b in BUTTONS:
b.draw()
def on_click():
for b in BUTTONS:
b.on_click()
run(background_color='#ccc', title='Buttons test')
| 34.038462
| 197
| 0.640678
|
from drawy import *
class Button:
def __init__(self, text, click_handler, point, width, height, *, hide=False, do_highlight=True, background_color='gray', highlight_color='lightgray', text_color='black', border_color='black'):
self.text = text
self.click_handler = click_handler
self.point = Point(*point)
self.width = width
self.height = height
self.hide = hide
self.do_highlight = do_highlight
self.background_color = background_color
self.highlight_color = highlight_color
self.text_color = text_color
self.border_color = border_color
def is_point_inside(self, point: Point):
return point.is_inside_rectangle(self.point, self.width, self.height)
def draw(self):
if self.hide:
return
background = self.background_color
if self.do_highlight and self.is_point_inside(MOUSE_POSITION):
background = self.highlight_color
draw_rectangle(self.point, self.width, self.height, background)
draw_rectangle(self.point, self.width, self.height, self.border_color, fill=False, border_thickness=4)
draw_text(self.text, self.point + Point(self.width, self.height) / 2, self.text_color)
def on_click(self):
if self.is_point_inside(MOUSE_POSITION) and self.click_handler:
self.click_handler()
BUTTONS = [
Button("SCORE", lambda: print('score!'), (100, 100), 200, 60),
Button("test", lambda: print("test!"), (100, 300), 200, 60),
]
def init():
pass
def draw():
for b in BUTTONS:
b.draw()
def on_click():
for b in BUTTONS:
b.on_click()
run(background_color='#ccc', title='Buttons test')
| true
| true
|
7904a6e26a9b0119d1b29ca665ce26547d0afb9c
| 798
|
py
|
Python
|
app/rest/serializers.py
|
WishesFire/Epam-Python-Project
|
d54bbe48d539b0810d9b42b0839a64b035021c6d
|
[
"Apache-2.0"
] | 1
|
2021-11-18T11:57:02.000Z
|
2021-11-18T11:57:02.000Z
|
app/rest/serializers.py
|
WishesFire/Epam-project
|
d54bbe48d539b0810d9b42b0839a64b035021c6d
|
[
"Apache-2.0"
] | null | null | null |
app/rest/serializers.py
|
WishesFire/Epam-project
|
d54bbe48d539b0810d9b42b0839a64b035021c6d
|
[
"Apache-2.0"
] | null | null | null |
"""
This module used for serializing data
CategorySchema - data from Category model
VacancySchema - data from Vacancy model
"""
# pylint: disable=too-many-ancestors
# pylint: disable=missing-class-docstring
# pylint: disable=too-few-public-methods
from app import ma
from app.models.model import Category, Vacancy
class CategorySchema(ma.SQLAlchemyAutoSchema):
"""
Used for serialize Category data
"""
class Meta:
model = Category
fields = ("name", )
class VacancySchema(ma.SQLAlchemyAutoSchema):
"""
Used for serialize Vacancy data
"""
class Meta:
model = Vacancy
fields = ("name", "salary", "info", "contacts")
ordered = True
categories_schema = CategorySchema(many=True)
vacancies_schema = VacancySchema(many=True)
| 22.166667
| 55
| 0.692982
|
from app import ma
from app.models.model import Category, Vacancy
class CategorySchema(ma.SQLAlchemyAutoSchema):
class Meta:
model = Category
fields = ("name", )
class VacancySchema(ma.SQLAlchemyAutoSchema):
class Meta:
model = Vacancy
fields = ("name", "salary", "info", "contacts")
ordered = True
categories_schema = CategorySchema(many=True)
vacancies_schema = VacancySchema(many=True)
| true
| true
|
7904a85d5c7963b95e5adf6c7e63db126115a3f3
| 8,302
|
py
|
Python
|
facenet/align/align_dataset_mtcnn.py
|
btlk/facenet
|
fd531331b962ec4fd4aac534debf9a5bbf7e8c4a
|
[
"MIT"
] | null | null | null |
facenet/align/align_dataset_mtcnn.py
|
btlk/facenet
|
fd531331b962ec4fd4aac534debf9a5bbf7e8c4a
|
[
"MIT"
] | null | null | null |
facenet/align/align_dataset_mtcnn.py
|
btlk/facenet
|
fd531331b962ec4fd4aac534debf9a5bbf7e8c4a
|
[
"MIT"
] | null | null | null |
"""Performs face alignment and stores face thumbnails in the output directory."""
# MIT License
#
# Copyright (c) 2016 David Sandberg
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from scipy import misc
import sys
import os
import argparse
import tensorflow as tf
import numpy as np
import facenet
from detect_face import create_mtcnn, detect_face
import random
from time import sleep
def main(args):
sleep(random.random())
output_dir = os.path.expanduser(args.output_dir)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
# Store some git revision info in a text file in the log directory
src_path,_ = os.path.split(os.path.realpath(__file__))
facenet.store_revision_info(src_path, output_dir, ' '.join(sys.argv))
dataset = facenet.get_dataset(args.input_dir, False)
print('Creating networks and loading parameters')
with tf.Graph().as_default():
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=args.gpu_memory_fraction)
sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options, log_device_placement=False))
with sess.as_default():
pnet, rnet, onet = create_mtcnn(sess, None)
minsize = 20 # minimum size of face
threshold = [ 0.6, 0.7, 0.7 ] # three steps's threshold
factor = 0.709 # scale factor
# Add a random key to the filename to allow alignment using multiple processes
random_key = np.random.randint(0, high=99999)
bounding_boxes_filename = os.path.join(output_dir, 'bounding_boxes_%05d.txt' % random_key)
with open(bounding_boxes_filename, "w") as text_file:
nrof_images_total = 0
nrof_successfully_aligned = 0
if args.random_order:
random.shuffle(dataset)
for cls in dataset:
output_class_dir = os.path.join(output_dir, cls.name)
if not os.path.exists(output_class_dir):
os.makedirs(output_class_dir)
if args.random_order:
random.shuffle(cls.image_paths)
for image_path in cls.image_paths:
nrof_images_total += 1
filename = os.path.splitext(os.path.split(image_path)[1])[0]
output_filename = os.path.join(output_class_dir, filename+'.png')
print(image_path)
if not os.path.exists(output_filename):
try:
img = misc.imread(image_path)
except (IOError, ValueError, IndexError) as e:
errorMessage = '{}: {}'.format(image_path, e)
print(errorMessage)
else:
if img.ndim<2:
print('Unable to align "%s"' % image_path)
text_file.write('%s\n' % (output_filename))
continue
if img.ndim == 2:
img = facenet.to_rgb(img)
img = img[:,:,0:3]
bounding_boxes, _ = detect_face(img, minsize, pnet, rnet, onet, threshold, factor)
nrof_faces = bounding_boxes.shape[0]
if nrof_faces>0:
det = bounding_boxes[:,0:4]
det_arr = []
img_size = np.asarray(img.shape)[0:2]
if nrof_faces>1:
if args.detect_multiple_faces:
for i in range(nrof_faces):
det_arr.append(np.squeeze(det[i]))
else:
bounding_box_size = (det[:,2]-det[:,0])*(det[:,3]-det[:,1])
img_center = img_size / 2
offsets = np.vstack([ (det[:,0]+det[:,2])/2-img_center[1], (det[:,1]+det[:,3])/2-img_center[0] ])
offset_dist_squared = np.sum(np.power(offsets,2.0),0)
index = np.argmax(bounding_box_size-offset_dist_squared*2.0) # some extra weight on the centering
det_arr.append(det[index,:])
else:
det_arr.append(np.squeeze(det))
for i, det in enumerate(det_arr):
det = np.squeeze(det)
bb = np.zeros(4, dtype=np.int32)
bb[0] = np.maximum(det[0]-args.margin/2, 0)
bb[1] = np.maximum(det[1]-args.margin/2, 0)
bb[2] = np.minimum(det[2]+args.margin/2, img_size[1])
bb[3] = np.minimum(det[3]+args.margin/2, img_size[0])
cropped = img[bb[1]:bb[3],bb[0]:bb[2],:]
scaled = misc.imresize(cropped, (args.image_size, args.image_size), interp='bilinear')
nrof_successfully_aligned += 1
filename_base, file_extension = os.path.splitext(output_filename)
if args.detect_multiple_faces:
output_filename_n = "{}_{}{}".format(filename_base, i, file_extension)
else:
output_filename_n = "{}{}".format(filename_base, file_extension)
misc.imsave(output_filename_n, scaled)
text_file.write('%s %d %d %d %d\n' % (output_filename_n, bb[0], bb[1], bb[2], bb[3]))
else:
print('Unable to align "%s"' % image_path)
text_file.write('%s\n' % (output_filename))
print('Total number of images: %d' % nrof_images_total)
print('Number of successfully aligned images: %d' % nrof_successfully_aligned)
def parse_arguments(argv):
parser = argparse.ArgumentParser()
parser.add_argument('--input_dir', type=str, help='Directory with unaligned images.')
parser.add_argument('--output_dir', type=str, help='Directory with aligned face thumbnails.')
parser.add_argument('--image_size', type=int,
help='Image size (height, width) in pixels.', default=182)
parser.add_argument('--margin', type=int,
help='Margin for the crop around the bounding box (height, width) in pixels.', default=44)
parser.add_argument('--random_order',
help='Shuffles the order of images to enable alignment using multiple processes.', action='store_true')
parser.add_argument('--gpu_memory_fraction', type=float,
help='Upper bound on the amount of GPU memory that will be used by the process.', default=1.0)
parser.add_argument('--detect_multiple_faces', type=bool,
help='Detect and align multiple faces per image.', default=False)
return parser.parse_args(argv)
if __name__ == '__main__':
main(parse_arguments(sys.argv[1:]))
| 51.8875
| 133
| 0.57456
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from scipy import misc
import sys
import os
import argparse
import tensorflow as tf
import numpy as np
import facenet
from detect_face import create_mtcnn, detect_face
import random
from time import sleep
def main(args):
sleep(random.random())
output_dir = os.path.expanduser(args.output_dir)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
src_path,_ = os.path.split(os.path.realpath(__file__))
facenet.store_revision_info(src_path, output_dir, ' '.join(sys.argv))
dataset = facenet.get_dataset(args.input_dir, False)
print('Creating networks and loading parameters')
with tf.Graph().as_default():
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=args.gpu_memory_fraction)
sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options, log_device_placement=False))
with sess.as_default():
pnet, rnet, onet = create_mtcnn(sess, None)
minsize = 20
threshold = [ 0.6, 0.7, 0.7 ]
factor = 0.709 # scale factor
# Add a random key to the filename to allow alignment using multiple processes
random_key = np.random.randint(0, high=99999)
bounding_boxes_filename = os.path.join(output_dir, 'bounding_boxes_%05d.txt' % random_key)
with open(bounding_boxes_filename, "w") as text_file:
nrof_images_total = 0
nrof_successfully_aligned = 0
if args.random_order:
random.shuffle(dataset)
for cls in dataset:
output_class_dir = os.path.join(output_dir, cls.name)
if not os.path.exists(output_class_dir):
os.makedirs(output_class_dir)
if args.random_order:
random.shuffle(cls.image_paths)
for image_path in cls.image_paths:
nrof_images_total += 1
filename = os.path.splitext(os.path.split(image_path)[1])[0]
output_filename = os.path.join(output_class_dir, filename+'.png')
print(image_path)
if not os.path.exists(output_filename):
try:
img = misc.imread(image_path)
except (IOError, ValueError, IndexError) as e:
errorMessage = '{}: {}'.format(image_path, e)
print(errorMessage)
else:
if img.ndim<2:
print('Unable to align "%s"' % image_path)
text_file.write('%s\n' % (output_filename))
continue
if img.ndim == 2:
img = facenet.to_rgb(img)
img = img[:,:,0:3]
bounding_boxes, _ = detect_face(img, minsize, pnet, rnet, onet, threshold, factor)
nrof_faces = bounding_boxes.shape[0]
if nrof_faces>0:
det = bounding_boxes[:,0:4]
det_arr = []
img_size = np.asarray(img.shape)[0:2]
if nrof_faces>1:
if args.detect_multiple_faces:
for i in range(nrof_faces):
det_arr.append(np.squeeze(det[i]))
else:
bounding_box_size = (det[:,2]-det[:,0])*(det[:,3]-det[:,1])
img_center = img_size / 2
offsets = np.vstack([ (det[:,0]+det[:,2])/2-img_center[1], (det[:,1]+det[:,3])/2-img_center[0] ])
offset_dist_squared = np.sum(np.power(offsets,2.0),0)
index = np.argmax(bounding_box_size-offset_dist_squared*2.0) # some extra weight on the centering
det_arr.append(det[index,:])
else:
det_arr.append(np.squeeze(det))
for i, det in enumerate(det_arr):
det = np.squeeze(det)
bb = np.zeros(4, dtype=np.int32)
bb[0] = np.maximum(det[0]-args.margin/2, 0)
bb[1] = np.maximum(det[1]-args.margin/2, 0)
bb[2] = np.minimum(det[2]+args.margin/2, img_size[1])
bb[3] = np.minimum(det[3]+args.margin/2, img_size[0])
cropped = img[bb[1]:bb[3],bb[0]:bb[2],:]
scaled = misc.imresize(cropped, (args.image_size, args.image_size), interp='bilinear')
nrof_successfully_aligned += 1
filename_base, file_extension = os.path.splitext(output_filename)
if args.detect_multiple_faces:
output_filename_n = "{}_{}{}".format(filename_base, i, file_extension)
else:
output_filename_n = "{}{}".format(filename_base, file_extension)
misc.imsave(output_filename_n, scaled)
text_file.write('%s %d %d %d %d\n' % (output_filename_n, bb[0], bb[1], bb[2], bb[3]))
else:
print('Unable to align "%s"' % image_path)
text_file.write('%s\n' % (output_filename))
print('Total number of images: %d' % nrof_images_total)
print('Number of successfully aligned images: %d' % nrof_successfully_aligned)
def parse_arguments(argv):
parser = argparse.ArgumentParser()
parser.add_argument('--input_dir', type=str, help='Directory with unaligned images.')
parser.add_argument('--output_dir', type=str, help='Directory with aligned face thumbnails.')
parser.add_argument('--image_size', type=int,
help='Image size (height, width) in pixels.', default=182)
parser.add_argument('--margin', type=int,
help='Margin for the crop around the bounding box (height, width) in pixels.', default=44)
parser.add_argument('--random_order',
help='Shuffles the order of images to enable alignment using multiple processes.', action='store_true')
parser.add_argument('--gpu_memory_fraction', type=float,
help='Upper bound on the amount of GPU memory that will be used by the process.', default=1.0)
parser.add_argument('--detect_multiple_faces', type=bool,
help='Detect and align multiple faces per image.', default=False)
return parser.parse_args(argv)
if __name__ == '__main__':
main(parse_arguments(sys.argv[1:]))
| true
| true
|
7904a8c11b4b2be45e4c03e64545a4ce832791ee
| 5,287
|
py
|
Python
|
libs/sdc_etl_libs/test/dataframe_tests/sdc_dataframe_sql.py
|
darknegma/docker-airflow
|
44e3d02d7ac43c8876145ae47acfbbbde67230df
|
[
"Apache-2.0"
] | null | null | null |
libs/sdc_etl_libs/test/dataframe_tests/sdc_dataframe_sql.py
|
darknegma/docker-airflow
|
44e3d02d7ac43c8876145ae47acfbbbde67230df
|
[
"Apache-2.0"
] | 3
|
2021-03-31T19:26:57.000Z
|
2021-12-13T20:33:01.000Z
|
libs/sdc_etl_libs/test/dataframe_tests/sdc_dataframe_sql.py
|
darknegma/docker-airflow
|
44e3d02d7ac43c8876145ae47acfbbbde67230df
|
[
"Apache-2.0"
] | null | null | null |
import sys
import math
import os
sys.path.append(os.path.dirname(os.path.abspath(__file__)) + "/../../../")
from sdc_etl_libs.sdc_dataframe.Dataframe import *
import pandas as pd
import numpy as np
import json
import pytest
def test_generate_insert_query_ddl(mocker):
test_schema = """
{
"namespace": "TimeControl",
"type": "object",
"name": "languages",
"country_code": "USA",
"data_sink": {"type":"snowflake", "database": "HRIS_DATA", "table_name": "LANGUAGES", "schema": "TIMECONTROL"},
"data_source": {"type": "api", "base_url": "https://smiledirectclub.timecontrol.net/api/v1"},
"fields": [
{"name":"_METADATA","type":{"type":"string","logical_type":"json"}},
{"name":"KEY","type":{"type":"int"},"sf_merge_key": true},
{"name":"NAME","type":{"type":"string"}},
{"name":"DESCRIPTION","type":{"type":"string"}},
{"name":"CULTURE","type":{"type":"string"}},
{"name":"_SF_INSERTEDDATETIME","type":{"type":"string","logical_type":"datetime", "add_column": true }}
]
}"""
test_data = """
[{"_metadata": {"links": [{"id": "9",
"rel": "self",
"href": "/api/v1/languages/9",
"code": "Ceština"}]},
"Key": 9,
"Name": "Ceština",
"Description": "Czech",
"Culture": "cs"},
{"_metadata": {"links": [{"id": "10",
"rel": "self",
"href": "/api/v1/languages/10",
"code": "This"}]},
"Key": 9,
"Name": "This",
"Description": "Is",
"Culture": "ze"}]
"""
df = Dataframe(SDCDFTypes.PANDAS, test_schema)
df.load_data(json.loads(test_data))
query = df.generate_insert_query_ddl(df.df)
assert query == '("CULTURE", "DESCRIPTION", "KEY", "NAME", "_METADATA", "_SF_INSERTEDDATETIME") select Column1 as "CULTURE", Column2 as "DESCRIPTION", Column3 as "KEY", Column4 as "NAME", PARSE_JSON(Column5) as "_METADATA", Column6 as "_SF_INSERTEDDATETIME" from values '
def test_generate_insert_query_values(mocker):
test_schema = """
{
"namespace": "TimeControl",
"type": "object",
"name": "languages",
"country_code": "USA",
"data_sink": {"type":"snowflake", "database": "HRIS_DATA", "table_name": "LANGUAGES", "schema": "TIMECONTROL"},
"data_source": {"type": "api", "base_url": "https://smiledirectclub.timecontrol.net/api/v1"},
"fields": [
{"name":"_METADATA","type":{"type":"string","logical_type":"json"}},
{"name":"KEY","type":{"type":"int"},"sf_merge_key": true},
{"name":"NAME","type":{"type":"string"}},
{"name":"DESCRIPTION","type":{"type":"string"}},
{"name":"CULTURE","type":{"type":"string"}}
]
}"""
test_data = """
[{"_metadata": {"links": [{"id": "9",
"rel": "self",
"href": "/api/v1/languages/9",
"code": "Ceština"}]},
"Key": 9,
"Name": "Ceština",
"Description": "Czech",
"Culture": "cs"},
{"_metadata": {"links": [{"id": "10",
"rel": "self",
"href": "/api/v1/languages/10",
"code": "This"}]},
"Key": 9,
"Name": "This",
"Description": "Is",
"Culture": "ze"}]
"""
df = Dataframe(SDCDFTypes.PANDAS, test_schema)
df.load_data(json.loads(test_data))
query = df.generate_insert_query_values(df.df)
assert query == "('cs', 'Czech', '9', 'Ceština', '{'links': [{'id': '9', 'rel': 'self', 'href': '/api/v1/languages/9', 'code': 'Ceština'}]}'), ('ze', 'Is', '9', 'This', '{'links': [{'id': '10', 'rel': 'self', 'href': '/api/v1/languages/10', 'code': 'This'}]}'), "
def test_convert_columns_to_json(mocker):
test_schema = """
{
"namespace": "TimeControl",
"type": "object",
"name": "languages",
"country_code": "USA",
"data_sink": {"type":"snowflake", "database": "HRIS_DATA",
"table_name": "LANGUAGES", "schema": "TIMECONTROL"},
"data_source": {"type": "api", "base_url":
"https://smiledirectclub.timecontrol.net/api/v1"},
"fields": [
{"name":"_METADATA","type":{"type":"string","logical_type":"json"}},
{"name":"KEY","type":{"type":"int"},"sf_merge_key": true},
{"name":"NAME","type":{"type":"string"}},
{"name":"DESCRIPTION","type":{"type":"string"}},
{"name":"CULTURE","type":{"type":"string"}}
]
}"""
test_data = """
[{"_metadata": {"links": [{"id": "9",
"rel": "self",
"href": "/api/v1/languages/9",
"code": "Ceština"}]},
"Key": 9,
"Name": "Ceština",
"Description": "Czech",
"Culture": "cs"},
{"_metadata": {"links": [{"id": "10",
"rel": "self",
"href": "/api/v1/languages/10",
"code": "This"}]},
"Key": 9,
"Name": "This",
"Description": "Is",
"Culture": "ze"}]
"""
df = Dataframe(SDCDFTypes.PANDAS, test_schema)
df.load_data(json.loads(test_data))
data_before = df.df["_METADATA"][0]
df.convert_columns_to_json()
data_after = df.df["_METADATA"][0]
pytest.assume(data_before == "{'links': [{'id': '9', 'rel': 'self', 'href': '/api/v1/languages/9', 'code': 'Ceština'}]}")
pytest.assume(data_after == '{"links": [{"id": "9", "rel": "self", "href": "/api/v1/languages/9", "code": "Ce\\u0161tina"}]}')
| 35.722973
| 275
| 0.538491
|
import sys
import math
import os
sys.path.append(os.path.dirname(os.path.abspath(__file__)) + "/../../../")
from sdc_etl_libs.sdc_dataframe.Dataframe import *
import pandas as pd
import numpy as np
import json
import pytest
def test_generate_insert_query_ddl(mocker):
test_schema = """
{
"namespace": "TimeControl",
"type": "object",
"name": "languages",
"country_code": "USA",
"data_sink": {"type":"snowflake", "database": "HRIS_DATA", "table_name": "LANGUAGES", "schema": "TIMECONTROL"},
"data_source": {"type": "api", "base_url": "https://smiledirectclub.timecontrol.net/api/v1"},
"fields": [
{"name":"_METADATA","type":{"type":"string","logical_type":"json"}},
{"name":"KEY","type":{"type":"int"},"sf_merge_key": true},
{"name":"NAME","type":{"type":"string"}},
{"name":"DESCRIPTION","type":{"type":"string"}},
{"name":"CULTURE","type":{"type":"string"}},
{"name":"_SF_INSERTEDDATETIME","type":{"type":"string","logical_type":"datetime", "add_column": true }}
]
}"""
test_data = """
[{"_metadata": {"links": [{"id": "9",
"rel": "self",
"href": "/api/v1/languages/9",
"code": "Ceština"}]},
"Key": 9,
"Name": "Ceština",
"Description": "Czech",
"Culture": "cs"},
{"_metadata": {"links": [{"id": "10",
"rel": "self",
"href": "/api/v1/languages/10",
"code": "This"}]},
"Key": 9,
"Name": "This",
"Description": "Is",
"Culture": "ze"}]
"""
df = Dataframe(SDCDFTypes.PANDAS, test_schema)
df.load_data(json.loads(test_data))
query = df.generate_insert_query_ddl(df.df)
assert query == '("CULTURE", "DESCRIPTION", "KEY", "NAME", "_METADATA", "_SF_INSERTEDDATETIME") select Column1 as "CULTURE", Column2 as "DESCRIPTION", Column3 as "KEY", Column4 as "NAME", PARSE_JSON(Column5) as "_METADATA", Column6 as "_SF_INSERTEDDATETIME" from values '
def test_generate_insert_query_values(mocker):
test_schema = """
{
"namespace": "TimeControl",
"type": "object",
"name": "languages",
"country_code": "USA",
"data_sink": {"type":"snowflake", "database": "HRIS_DATA", "table_name": "LANGUAGES", "schema": "TIMECONTROL"},
"data_source": {"type": "api", "base_url": "https://smiledirectclub.timecontrol.net/api/v1"},
"fields": [
{"name":"_METADATA","type":{"type":"string","logical_type":"json"}},
{"name":"KEY","type":{"type":"int"},"sf_merge_key": true},
{"name":"NAME","type":{"type":"string"}},
{"name":"DESCRIPTION","type":{"type":"string"}},
{"name":"CULTURE","type":{"type":"string"}}
]
}"""
test_data = """
[{"_metadata": {"links": [{"id": "9",
"rel": "self",
"href": "/api/v1/languages/9",
"code": "Ceština"}]},
"Key": 9,
"Name": "Ceština",
"Description": "Czech",
"Culture": "cs"},
{"_metadata": {"links": [{"id": "10",
"rel": "self",
"href": "/api/v1/languages/10",
"code": "This"}]},
"Key": 9,
"Name": "This",
"Description": "Is",
"Culture": "ze"}]
"""
df = Dataframe(SDCDFTypes.PANDAS, test_schema)
df.load_data(json.loads(test_data))
query = df.generate_insert_query_values(df.df)
assert query == "('cs', 'Czech', '9', 'Ceština', '{'links': [{'id': '9', 'rel': 'self', 'href': '/api/v1/languages/9', 'code': 'Ceština'}]}'), ('ze', 'Is', '9', 'This', '{'links': [{'id': '10', 'rel': 'self', 'href': '/api/v1/languages/10', 'code': 'This'}]}'), "
def test_convert_columns_to_json(mocker):
test_schema = """
{
"namespace": "TimeControl",
"type": "object",
"name": "languages",
"country_code": "USA",
"data_sink": {"type":"snowflake", "database": "HRIS_DATA",
"table_name": "LANGUAGES", "schema": "TIMECONTROL"},
"data_source": {"type": "api", "base_url":
"https://smiledirectclub.timecontrol.net/api/v1"},
"fields": [
{"name":"_METADATA","type":{"type":"string","logical_type":"json"}},
{"name":"KEY","type":{"type":"int"},"sf_merge_key": true},
{"name":"NAME","type":{"type":"string"}},
{"name":"DESCRIPTION","type":{"type":"string"}},
{"name":"CULTURE","type":{"type":"string"}}
]
}"""
test_data = """
[{"_metadata": {"links": [{"id": "9",
"rel": "self",
"href": "/api/v1/languages/9",
"code": "Ceština"}]},
"Key": 9,
"Name": "Ceština",
"Description": "Czech",
"Culture": "cs"},
{"_metadata": {"links": [{"id": "10",
"rel": "self",
"href": "/api/v1/languages/10",
"code": "This"}]},
"Key": 9,
"Name": "This",
"Description": "Is",
"Culture": "ze"}]
"""
df = Dataframe(SDCDFTypes.PANDAS, test_schema)
df.load_data(json.loads(test_data))
data_before = df.df["_METADATA"][0]
df.convert_columns_to_json()
data_after = df.df["_METADATA"][0]
pytest.assume(data_before == "{'links': [{'id': '9', 'rel': 'self', 'href': '/api/v1/languages/9', 'code': 'Ceština'}]}")
pytest.assume(data_after == '{"links": [{"id": "9", "rel": "self", "href": "/api/v1/languages/9", "code": "Ce\\u0161tina"}]}')
| true
| true
|
7904a934ee4c0d7f813b2c3b6ed46871ce61bd49
| 5,855
|
py
|
Python
|
netdev/vendors/junos_like.py
|
maliciousgroup/netdev
|
e2585ac24891cba172fc2056e9868e1d7c41ddc2
|
[
"Apache-2.0"
] | 199
|
2016-06-24T14:00:33.000Z
|
2022-02-14T07:48:44.000Z
|
netdev/vendors/junos_like.py
|
maliciousgroup/netdev
|
e2585ac24891cba172fc2056e9868e1d7c41ddc2
|
[
"Apache-2.0"
] | 55
|
2017-05-08T10:01:26.000Z
|
2021-07-02T00:54:33.000Z
|
netdev/vendors/junos_like.py
|
maliciousgroup/netdev
|
e2585ac24891cba172fc2056e9868e1d7c41ddc2
|
[
"Apache-2.0"
] | 54
|
2016-12-29T13:28:00.000Z
|
2022-03-01T04:58:19.000Z
|
"""
JunOSLikeDevice Class is abstract class for using in Juniper JunOS like devices
Connection Method are based upon AsyncSSH and should be running in asyncio loop
"""
import re
from netdev.logger import logger
from netdev.vendors.base import BaseDevice
class JunOSLikeDevice(BaseDevice):
"""
JunOSLikeDevice Class for working with Juniper JunOS like devices
Juniper JunOS like devices having several concepts:
* shell mode (csh). This is csh shell for FreeBSD. This mode is not covered by this Class.
* cli mode (specific shell). The entire configuration is usual configured in this shell:
* operation mode. This mode is using for getting information from device
* configuration mode. This mode is using for configuration system
"""
_delimiter_list = ["%", ">", "#"]
"""All this characters will stop reading from buffer. It mean the end of device prompt"""
_pattern = r"\w+(\@[\-\w]*)?[{delimiters}]"
"""Pattern for using in reading buffer. When it found processing ends"""
_disable_paging_command = "set cli screen-length 0"
"""Command for disabling paging"""
_config_enter = "configure"
"""Command for entering to configuration mode"""
_config_exit = "exit configuration-mode"
"""Command for existing from configuration mode to privilege exec"""
_config_check = "#"
"""Checking string in prompt. If it's exist im prompt - we are in configuration mode"""
_commit_command = "commit"
"""Command for committing changes"""
_commit_comment_command = "commit comment {}"
"""Command for committing changes with comment"""
async def _set_base_prompt(self):
"""
Setting two important vars
base_prompt - textual prompt in CLI (usually username or hostname)
base_pattern - regexp for finding the end of command. IT's platform specific parameter
For JunOS devices base_pattern is "user(@[hostname])?[>|#]
"""
logger.info("Host {}: Setting base prompt".format(self._host))
prompt = await self._find_prompt()
prompt = prompt[:-1]
# Strip off trailing terminator
if "@" in prompt:
prompt = prompt.split("@")[1]
self._base_prompt = prompt
delimiters = map(re.escape, type(self)._delimiter_list)
delimiters = r"|".join(delimiters)
base_prompt = re.escape(self._base_prompt[:12])
pattern = type(self)._pattern
self._base_pattern = pattern.format(delimiters=delimiters)
logger.debug("Host {}: Base Prompt: {}".format(self._host, self._base_prompt))
logger.debug("Host {}: Base Pattern: {}".format(self._host, self._base_pattern))
return self._base_prompt
async def check_config_mode(self):
"""Check if are in configuration mode. Return boolean"""
logger.info("Host {}: Checking configuration mode".format(self._host))
check_string = type(self)._config_check
self._stdin.write(self._normalize_cmd("\n"))
output = await self._read_until_prompt()
return check_string in output
async def config_mode(self):
"""Enter to configuration mode"""
logger.info("Host {}: Entering to configuration mode".format(self._host))
output = ""
config_enter = type(self)._config_enter
if not await self.check_config_mode():
self._stdin.write(self._normalize_cmd(config_enter))
output += await self._read_until_prompt()
if not await self.check_config_mode():
raise ValueError("Failed to enter to configuration mode")
return output
async def exit_config_mode(self):
"""Exit from configuration mode"""
logger.info("Host {}: Exiting from configuration mode".format(self._host))
output = ""
config_exit = type(self)._config_exit
if await self.check_config_mode():
self._stdin.write(self._normalize_cmd(config_exit))
output += await self._read_until_prompt()
if await self.check_config_mode():
raise ValueError("Failed to exit from configuration mode")
return output
async def send_config_set(
self,
config_commands=None,
with_commit=True,
commit_comment="",
exit_config_mode=True,
):
"""
Sending configuration commands to device
By default automatically exits/enters configuration mode.
:param list config_commands: iterable string list with commands for applying to network devices in system view
:param bool with_commit: if true it commit all changes after applying all config_commands
:param string commit_comment: message for configuration commit
:param bool exit_config_mode: If true it will quit from configuration mode automatically
:return: The output of these commands
"""
if config_commands is None:
return ""
# Send config commands
output = await self.config_mode()
output += await super().send_config_set(config_commands=config_commands)
if with_commit:
commit = type(self)._commit_command
if commit_comment:
commit = type(self)._commit_comment_command.format(commit_comment)
self._stdin.write(self._normalize_cmd(commit))
output += await self._read_until_prompt()
if exit_config_mode:
output += await self.exit_config_mode()
output = self._normalize_linefeeds(output)
logger.debug(
"Host {}: Config commands output: {}".format(self._host, repr(output))
)
return output
| 40.10274
| 119
| 0.644236
|
import re
from netdev.logger import logger
from netdev.vendors.base import BaseDevice
class JunOSLikeDevice(BaseDevice):
_delimiter_list = ["%", ">", "#"]
_pattern = r"\w+(\@[\-\w]*)?[{delimiters}]"
_disable_paging_command = "set cli screen-length 0"
_config_enter = "configure"
_config_exit = "exit configuration-mode"
_config_check = "#"
_commit_command = "commit"
_commit_comment_command = "commit comment {}"
async def _set_base_prompt(self):
logger.info("Host {}: Setting base prompt".format(self._host))
prompt = await self._find_prompt()
prompt = prompt[:-1]
if "@" in prompt:
prompt = prompt.split("@")[1]
self._base_prompt = prompt
delimiters = map(re.escape, type(self)._delimiter_list)
delimiters = r"|".join(delimiters)
base_prompt = re.escape(self._base_prompt[:12])
pattern = type(self)._pattern
self._base_pattern = pattern.format(delimiters=delimiters)
logger.debug("Host {}: Base Prompt: {}".format(self._host, self._base_prompt))
logger.debug("Host {}: Base Pattern: {}".format(self._host, self._base_pattern))
return self._base_prompt
async def check_config_mode(self):
logger.info("Host {}: Checking configuration mode".format(self._host))
check_string = type(self)._config_check
self._stdin.write(self._normalize_cmd("\n"))
output = await self._read_until_prompt()
return check_string in output
async def config_mode(self):
logger.info("Host {}: Entering to configuration mode".format(self._host))
output = ""
config_enter = type(self)._config_enter
if not await self.check_config_mode():
self._stdin.write(self._normalize_cmd(config_enter))
output += await self._read_until_prompt()
if not await self.check_config_mode():
raise ValueError("Failed to enter to configuration mode")
return output
async def exit_config_mode(self):
logger.info("Host {}: Exiting from configuration mode".format(self._host))
output = ""
config_exit = type(self)._config_exit
if await self.check_config_mode():
self._stdin.write(self._normalize_cmd(config_exit))
output += await self._read_until_prompt()
if await self.check_config_mode():
raise ValueError("Failed to exit from configuration mode")
return output
async def send_config_set(
self,
config_commands=None,
with_commit=True,
commit_comment="",
exit_config_mode=True,
):
if config_commands is None:
return ""
output = await self.config_mode()
output += await super().send_config_set(config_commands=config_commands)
if with_commit:
commit = type(self)._commit_command
if commit_comment:
commit = type(self)._commit_comment_command.format(commit_comment)
self._stdin.write(self._normalize_cmd(commit))
output += await self._read_until_prompt()
if exit_config_mode:
output += await self.exit_config_mode()
output = self._normalize_linefeeds(output)
logger.debug(
"Host {}: Config commands output: {}".format(self._host, repr(output))
)
return output
| true
| true
|
7904a9a5f121ef43001036bd043db12acd71f522
| 565
|
py
|
Python
|
Prime Powers/prime_powers.py
|
philippossfrn/Integer-Sequences
|
ba803320ab6e1abd921db402c60fb8c48a5877d5
|
[
"Unlicense"
] | 48
|
2021-06-28T05:53:43.000Z
|
2022-03-17T10:37:26.000Z
|
Prime Powers/prime_powers.py
|
philippossfrn/Integer-Sequences
|
ba803320ab6e1abd921db402c60fb8c48a5877d5
|
[
"Unlicense"
] | 99
|
2021-06-28T03:16:51.000Z
|
2022-03-17T00:18:50.000Z
|
Prime Powers/prime_powers.py
|
philippossfrn/Integer-Sequences
|
ba803320ab6e1abd921db402c60fb8c48a5877d5
|
[
"Unlicense"
] | 140
|
2021-06-28T06:29:19.000Z
|
2022-03-30T11:15:45.000Z
|
import math
def is_prime_power(n):
#even number divisible
factors = set()
while n % 2 == 0:
factors.add(2)
n = n / 2
#n became odd
for i in range(3,int(math.sqrt(n))+1,2):
while (n % i == 0):
factors.add(i)
n = n / i
if n > 2:
factors.add(n)
return len(factors) == 1
def main():
n = int(input('Enter n: '))
count = -1
curr = 0
while count < n:
curr += 1
if is_prime_power(curr):
count += 1
print(curr)
if __name__ == '__main__':
main()
| 17.65625
| 43
| 0.486726
|
import math
def is_prime_power(n):
factors = set()
while n % 2 == 0:
factors.add(2)
n = n / 2
for i in range(3,int(math.sqrt(n))+1,2):
while (n % i == 0):
factors.add(i)
n = n / i
if n > 2:
factors.add(n)
return len(factors) == 1
def main():
n = int(input('Enter n: '))
count = -1
curr = 0
while count < n:
curr += 1
if is_prime_power(curr):
count += 1
print(curr)
if __name__ == '__main__':
main()
| true
| true
|
7904a9f3f89733d06a46bd4b4fa1b13af9209ed9
| 655
|
py
|
Python
|
src/pycontw2016/settings/production/pycontw2016.py
|
kaka-lin/pycon.tw
|
67809a5e43b03273ac8d8f5a1b6b3d3f73474be7
|
[
"MIT"
] | 47
|
2015-12-19T10:23:11.000Z
|
2018-06-13T08:07:33.000Z
|
src/pycontw2016/settings/production/pycontw2016.py
|
kaka-lin/pycon.tw
|
67809a5e43b03273ac8d8f5a1b6b3d3f73474be7
|
[
"MIT"
] | 473
|
2018-12-01T13:01:48.000Z
|
2022-03-30T07:10:42.000Z
|
src/pycontw2016/settings/production/pycontw2016.py
|
kaka-lin/pycon.tw
|
67809a5e43b03273ac8d8f5a1b6b3d3f73474be7
|
[
"MIT"
] | 91
|
2018-07-26T02:38:59.000Z
|
2022-01-16T02:38:31.000Z
|
import collections
import datetime
from django.utils.translation import gettext_lazy as _
from .base import * # noqa
# Override static and media URL for prefix in WSGI server.
# https://code.djangoproject.com/ticket/25598
STATIC_URL = '/2016/static/'
MEDIA_URL = '/2016/media/'
CONFERENCE_DEFAULT_SLUG = 'pycontw-2016'
TALK_PROPOSAL_DURATION_CHOICES = (
('NOPREF', _('No preference')),
('PREF25', _('Prefer 25min')),
('PREF45', _('Prefer 45min')),
)
EVENTS_DAY_NAMES = collections.OrderedDict([
(datetime.date(2016, 6, 3), _('Day 1')),
(datetime.date(2016, 6, 4), _('Day 2')),
(datetime.date(2016, 6, 5), _('Day 3')),
])
| 25.192308
| 58
| 0.674809
|
import collections
import datetime
from django.utils.translation import gettext_lazy as _
from .base import *
STATIC_URL = '/2016/static/'
MEDIA_URL = '/2016/media/'
CONFERENCE_DEFAULT_SLUG = 'pycontw-2016'
TALK_PROPOSAL_DURATION_CHOICES = (
('NOPREF', _('No preference')),
('PREF25', _('Prefer 25min')),
('PREF45', _('Prefer 45min')),
)
EVENTS_DAY_NAMES = collections.OrderedDict([
(datetime.date(2016, 6, 3), _('Day 1')),
(datetime.date(2016, 6, 4), _('Day 2')),
(datetime.date(2016, 6, 5), _('Day 3')),
])
| true
| true
|
7904aa9d28cbe73452b0cf7ed25e00e98afe1123
| 215
|
py
|
Python
|
WebPersonal/WebPersonal/wsgi.py
|
CristianAAT/web-personal
|
12a920247e89e37030ca49ae42d1f0959b6d2796
|
[
"Apache-2.0"
] | null | null | null |
WebPersonal/WebPersonal/wsgi.py
|
CristianAAT/web-personal
|
12a920247e89e37030ca49ae42d1f0959b6d2796
|
[
"Apache-2.0"
] | 7
|
2021-03-30T13:57:13.000Z
|
2022-01-13T02:56:37.000Z
|
WebPersonal/WebPersonal/wsgi.py
|
CristianAAT/web-personal
|
12a920247e89e37030ca49ae42d1f0959b6d2796
|
[
"Apache-2.0"
] | null | null | null |
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "WebPersonal.settings")
application = get_wsgi_application()
#application = DjangoWhiteNoise(application)
| 30.714286
| 71
| 0.84186
|
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "WebPersonal.settings")
application = get_wsgi_application()
| true
| true
|
7904ab641d3683007eb6f39dfe08fafe512112a5
| 4,181
|
py
|
Python
|
scripts/asmt_merge_vacc_exetera.py
|
deng113jie/ExeTeraCovid
|
ee9ec90983d7c2c711962c7fe9ac25251392e41b
|
[
"Apache-2.0"
] | 3
|
2021-03-23T14:23:06.000Z
|
2021-12-29T16:54:42.000Z
|
scripts/asmt_merge_vacc_exetera.py
|
deng113jie/ExeTeraCovid
|
ee9ec90983d7c2c711962c7fe9ac25251392e41b
|
[
"Apache-2.0"
] | 29
|
2021-02-22T12:12:53.000Z
|
2021-09-27T10:52:25.000Z
|
scripts/asmt_merge_vacc_exetera.py
|
deng113jie/ExeTeraCovid
|
ee9ec90983d7c2c711962c7fe9ac25251392e41b
|
[
"Apache-2.0"
] | 1
|
2021-03-08T15:00:30.000Z
|
2021-03-08T15:00:30.000Z
|
from datetime import datetime
import numpy as np
import exetera.core.session as sess
from exetera.core import dataframe
ADATA = '/home/jd21/data/processed_May17_processed.hdf5'
VDATA = '/home/jd21/data/vacc.0603.h5'
DSTDATA = '/home/jd21/data/full_merge.h5'
def asmt_merge_vacc():
"""
Merge assessment df with vaccine dataframe, filter out subject has a healthy assessments before vaccine date
"""
with sess.Session() as s:
# open related datasets
src = s.open_dataset(ADATA, 'r', 'asmt')
asmt = src['assessments']
vacc = s.open_dataset(VDATA, 'r', 'vacc')
dst = s.open_dataset(DSTDATA, 'w', 'dst')
#filter vaccine type
vbrand_filter = (vacc['vaccine_doses']['brand'].data[:] == 2) | \
(vacc['vaccine_doses']['brand'].data[:] == 3)
dvacc = dst.create_dataframe('vacc')
vacc['vaccine_doses'].apply_filter(vbrand_filter, ddf=dvacc)
#join asmt with vaccine using patient_id, write to result
asmt_v = dst.create_dataframe('asmt_v')
dataframe.merge(asmt, dvacc, asmt_v, 'patient_id', 'patient_id', how='inner')
#filter healthy asmt record within 10days of vaccine date
symp_list = ['persistent_cough', 'fever', 'fatigue', 'delirium', 'shortness_of_breath', 'diarrhoea',
'abdominal_pain', 'chest_pain', 'hoarse_voice', 'skipped_meals', 'loss_of_smell', 'headache',
'sore_throat', 'chills_or_shivers', 'eye_soreness', 'nausea', 'blisters_on_feet',
'unusual_muscle_pains', 'runny_nose', 'red_welts_on_face_or_lips', 'dizzy_light_headed',
'swollen_glands', 'sneezing', 'skin_burning', 'earache', 'altered_smell', 'brain_fog',
'irregular_heartbeat']
symp_filter = asmt_v['persistent_cough'].data[:] > 1 # has symptom
for symptom1 in symp_list:
symp_filter |= asmt_v[symptom1].data[:] > 1 # has symptom
symp_filter = ~symp_filter # has no symptom
symp_filter &= asmt_v['date_taken_specific'].data[:] > asmt_v['updated_at_l'].data[:] # asmt before vaccine
symp_filter &= asmt_v['updated_at_l'].data[:] > asmt_v['date_taken_specific'].data[:] - 3600 * 24 * 10 # 10 days
asmt_v.apply_filter(symp_filter)
# has symptom after vaccine
yes_symp_filter = asmt_v['persistent_cough'].data[:] > 1
for symptom1 in symp_list:
yes_symp_filter |= asmt_v[symptom1].data[:] > 1 # has symptom
yes_symp_filter &= asmt_v['date_taken_specific'].data[:] < asmt_v['updated_at_l'].data[:] # assessment after vaccine
yes_symp_filter &= asmt_v['date_taken_specific'].data[:] + 3600 * 24 * 10 > asmt_v['updated_at_l'].data[:] # assessment within 7 days of vaccine
asmt_v.apply_filter(yes_symp_filter)
print("finish asmt join vaccine.")
def join_tests():
"""
Merge tests to previous merged (assessments, vaccine), filter out subjects has test records within 10days after vaccine
"""
with sess.Session() as s:
# open related datasets
src = s.open_dataset(ADATA, 'r', 'asmt')
tests_src = src['tests']
dst = s.open_dataset(DSTDATA, 'r+', 'dst')
vacc = dst['asmt_v']
tests_m = dst.create_dataframe('tests_m')
dataframe.merge(vacc, tests_src, tests_m, 'patient_id_l', 'patient_id', how='inner')
# filter out subjects has tests after 10days of vaccine
# date_taken_specific_l is vaccine date, date_taken_specific_r is tests date
test_filter = tests_m['date_taken_specific_l'] < tests_m['date_taken_specific_r'] # test after vaccine
test_filter &= tests_m['date_taken_specific_l'] > (tests_m['date_taken_specific_r'] - 3600 * 24 * 10)
tests_m.apply_filter(test_filter)
def count():
with sess.Session() as s:
# open related datasets
dst = s.open_dataset(DSTDATA, 'r', 'dst')
vacc = dst['tests_m']
print(len(dst['tests_m']['patient_id_l_l']))
if __name__ == '__main__':
print(datetime.now())
asmt_merge_vacc()
join_tests()
#count()
print(datetime.now())
| 45.945055
| 153
| 0.643387
|
from datetime import datetime
import numpy as np
import exetera.core.session as sess
from exetera.core import dataframe
ADATA = '/home/jd21/data/processed_May17_processed.hdf5'
VDATA = '/home/jd21/data/vacc.0603.h5'
DSTDATA = '/home/jd21/data/full_merge.h5'
def asmt_merge_vacc():
with sess.Session() as s:
src = s.open_dataset(ADATA, 'r', 'asmt')
asmt = src['assessments']
vacc = s.open_dataset(VDATA, 'r', 'vacc')
dst = s.open_dataset(DSTDATA, 'w', 'dst')
vbrand_filter = (vacc['vaccine_doses']['brand'].data[:] == 2) | \
(vacc['vaccine_doses']['brand'].data[:] == 3)
dvacc = dst.create_dataframe('vacc')
vacc['vaccine_doses'].apply_filter(vbrand_filter, ddf=dvacc)
asmt_v = dst.create_dataframe('asmt_v')
dataframe.merge(asmt, dvacc, asmt_v, 'patient_id', 'patient_id', how='inner')
symp_list = ['persistent_cough', 'fever', 'fatigue', 'delirium', 'shortness_of_breath', 'diarrhoea',
'abdominal_pain', 'chest_pain', 'hoarse_voice', 'skipped_meals', 'loss_of_smell', 'headache',
'sore_throat', 'chills_or_shivers', 'eye_soreness', 'nausea', 'blisters_on_feet',
'unusual_muscle_pains', 'runny_nose', 'red_welts_on_face_or_lips', 'dizzy_light_headed',
'swollen_glands', 'sneezing', 'skin_burning', 'earache', 'altered_smell', 'brain_fog',
'irregular_heartbeat']
symp_filter = asmt_v['persistent_cough'].data[:] > 1
for symptom1 in symp_list:
symp_filter |= asmt_v[symptom1].data[:] > 1
symp_filter = ~symp_filter
symp_filter &= asmt_v['date_taken_specific'].data[:] > asmt_v['updated_at_l'].data[:]
symp_filter &= asmt_v['updated_at_l'].data[:] > asmt_v['date_taken_specific'].data[:] - 3600 * 24 * 10
asmt_v.apply_filter(symp_filter)
yes_symp_filter = asmt_v['persistent_cough'].data[:] > 1
for symptom1 in symp_list:
yes_symp_filter |= asmt_v[symptom1].data[:] > 1
yes_symp_filter &= asmt_v['date_taken_specific'].data[:] < asmt_v['updated_at_l'].data[:]
yes_symp_filter &= asmt_v['date_taken_specific'].data[:] + 3600 * 24 * 10 > asmt_v['updated_at_l'].data[:]
asmt_v.apply_filter(yes_symp_filter)
print("finish asmt join vaccine.")
def join_tests():
with sess.Session() as s:
src = s.open_dataset(ADATA, 'r', 'asmt')
tests_src = src['tests']
dst = s.open_dataset(DSTDATA, 'r+', 'dst')
vacc = dst['asmt_v']
tests_m = dst.create_dataframe('tests_m')
dataframe.merge(vacc, tests_src, tests_m, 'patient_id_l', 'patient_id', how='inner')
test_filter = tests_m['date_taken_specific_l'] < tests_m['date_taken_specific_r']
test_filter &= tests_m['date_taken_specific_l'] > (tests_m['date_taken_specific_r'] - 3600 * 24 * 10)
tests_m.apply_filter(test_filter)
def count():
with sess.Session() as s:
dst = s.open_dataset(DSTDATA, 'r', 'dst')
vacc = dst['tests_m']
print(len(dst['tests_m']['patient_id_l_l']))
if __name__ == '__main__':
print(datetime.now())
asmt_merge_vacc()
join_tests()
print(datetime.now())
| true
| true
|
7904abd77015675bb1233aacba24a03ea36cc363
| 2,529
|
py
|
Python
|
angrmanagement/ui/widgets/qpatch_table.py
|
GeistInDerSH/angr-management
|
7033aa25957d8d59cea7ba10e296d38b4b6678b7
|
[
"BSD-2-Clause"
] | 1
|
2021-09-09T13:52:51.000Z
|
2021-09-09T13:52:51.000Z
|
angrmanagement/ui/widgets/qpatch_table.py
|
GeistInDerSH/angr-management
|
7033aa25957d8d59cea7ba10e296d38b4b6678b7
|
[
"BSD-2-Clause"
] | null | null | null |
angrmanagement/ui/widgets/qpatch_table.py
|
GeistInDerSH/angr-management
|
7033aa25957d8d59cea7ba10e296d38b4b6678b7
|
[
"BSD-2-Clause"
] | null | null | null |
import binascii
from PySide2.QtWidgets import QTableWidget, QTableWidgetItem, QAbstractItemView
from PySide2.QtCore import Qt
class QPatchTableItem:
def __init__(self, patch, old_bytes):
self.patch = patch
self.old_bytes = old_bytes
def widgets(self):
patch = self.patch
widgets = [
QTableWidgetItem("%#x" % patch.addr),
QTableWidgetItem("%d bytes" % len(patch)),
QTableWidgetItem(binascii.hexlify(self.old_bytes).decode("ascii") if self.old_bytes else "<unknown>"),
QTableWidgetItem(binascii.hexlify(patch.new_bytes).decode("ascii")),
]
for w in widgets:
w.setFlags(w.flags() & ~Qt.ItemIsEditable)
return widgets
class QPatchTable(QTableWidget):
HEADER = ['Address', 'Size', 'Old Bytes', 'New Bytes']
def __init__(self, instance, parent):
super(QPatchTable, self).__init__(parent)
self.setColumnCount(len(self.HEADER))
self.setHorizontalHeaderLabels(self.HEADER)
self.setSelectionBehavior(QAbstractItemView.SelectRows)
self.verticalHeader().setVisible(False)
self.items = [ ]
self.instance = instance
self.instance.patches.am_subscribe(self._watch_patches)
def current_patch(self):
selected_index = self.currentRow()
if 0 <= selected_index < len(self.items):
return self.items[selected_index]
else:
return None
def reload(self):
current_row = self.currentRow()
self.clearContents()
self.items = [QPatchTableItem(item,
self._get_bytes(self.instance.project, item.addr, len(item)))
for item in self.instance.project.kb.patches.values()]
items_count = len(self.items)
self.setRowCount(items_count)
for idx, item in enumerate(self.items):
for i, it in enumerate(item.widgets()):
self.setItem(idx, i, it)
#if 0 <= current_row < len(self.items):
# self.setCurrentItem(current_row, 0)
def _on_state_selected(self, *args):
if self._selected is not None:
self._selected(self.current_state_record())
def _watch_patches(self, **kwargs):
if not self.instance.patches.am_none:
self.reload()
@staticmethod
def _get_bytes(proj, addr, size):
try:
return proj.loader.memory.load(addr, size)
except KeyError:
return None
| 30.46988
| 114
| 0.619217
|
import binascii
from PySide2.QtWidgets import QTableWidget, QTableWidgetItem, QAbstractItemView
from PySide2.QtCore import Qt
class QPatchTableItem:
def __init__(self, patch, old_bytes):
self.patch = patch
self.old_bytes = old_bytes
def widgets(self):
patch = self.patch
widgets = [
QTableWidgetItem("%#x" % patch.addr),
QTableWidgetItem("%d bytes" % len(patch)),
QTableWidgetItem(binascii.hexlify(self.old_bytes).decode("ascii") if self.old_bytes else "<unknown>"),
QTableWidgetItem(binascii.hexlify(patch.new_bytes).decode("ascii")),
]
for w in widgets:
w.setFlags(w.flags() & ~Qt.ItemIsEditable)
return widgets
class QPatchTable(QTableWidget):
HEADER = ['Address', 'Size', 'Old Bytes', 'New Bytes']
def __init__(self, instance, parent):
super(QPatchTable, self).__init__(parent)
self.setColumnCount(len(self.HEADER))
self.setHorizontalHeaderLabels(self.HEADER)
self.setSelectionBehavior(QAbstractItemView.SelectRows)
self.verticalHeader().setVisible(False)
self.items = [ ]
self.instance = instance
self.instance.patches.am_subscribe(self._watch_patches)
def current_patch(self):
selected_index = self.currentRow()
if 0 <= selected_index < len(self.items):
return self.items[selected_index]
else:
return None
def reload(self):
current_row = self.currentRow()
self.clearContents()
self.items = [QPatchTableItem(item,
self._get_bytes(self.instance.project, item.addr, len(item)))
for item in self.instance.project.kb.patches.values()]
items_count = len(self.items)
self.setRowCount(items_count)
for idx, item in enumerate(self.items):
for i, it in enumerate(item.widgets()):
self.setItem(idx, i, it)
def _on_state_selected(self, *args):
if self._selected is not None:
self._selected(self.current_state_record())
def _watch_patches(self, **kwargs):
if not self.instance.patches.am_none:
self.reload()
@staticmethod
def _get_bytes(proj, addr, size):
try:
return proj.loader.memory.load(addr, size)
except KeyError:
return None
| true
| true
|
7904abff7d1aa5738d8b5453dbfe318987c1ab13
| 18,835
|
py
|
Python
|
Lib/test/test_contextlib_async.py
|
syokoysn/cpython
|
889036f7ef7290ef15b6c3373023f6a35387af0c
|
[
"0BSD"
] | 2
|
2021-08-03T10:25:23.000Z
|
2021-08-07T20:14:43.000Z
|
Lib/test/test_contextlib_async.py
|
syokoysn/cpython
|
889036f7ef7290ef15b6c3373023f6a35387af0c
|
[
"0BSD"
] | 10
|
2021-05-01T05:44:13.000Z
|
2022-03-01T08:01:37.000Z
|
Lib/test/test_contextlib_async.py
|
rapidcow/cpython
|
dd3adc013b21ec1338bb5fea2e2c04a4fc650306
|
[
"0BSD"
] | 1
|
2021-07-04T14:39:48.000Z
|
2021-07-04T14:39:48.000Z
|
import asyncio
from contextlib import (
asynccontextmanager, AbstractAsyncContextManager,
AsyncExitStack, nullcontext, aclosing)
import functools
from test import support
import unittest
from test.test_contextlib import TestBaseExitStack
def _async_test(func):
"""Decorator to turn an async function into a test case."""
@functools.wraps(func)
def wrapper(*args, **kwargs):
coro = func(*args, **kwargs)
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
try:
return loop.run_until_complete(coro)
finally:
loop.close()
asyncio.set_event_loop_policy(None)
return wrapper
class TestAbstractAsyncContextManager(unittest.TestCase):
@_async_test
async def test_enter(self):
class DefaultEnter(AbstractAsyncContextManager):
async def __aexit__(self, *args):
await super().__aexit__(*args)
manager = DefaultEnter()
self.assertIs(await manager.__aenter__(), manager)
async with manager as context:
self.assertIs(manager, context)
@_async_test
async def test_async_gen_propagates_generator_exit(self):
# A regression test for https://bugs.python.org/issue33786.
@asynccontextmanager
async def ctx():
yield
async def gen():
async with ctx():
yield 11
ret = []
exc = ValueError(22)
with self.assertRaises(ValueError):
async with ctx():
async for val in gen():
ret.append(val)
raise exc
self.assertEqual(ret, [11])
def test_exit_is_abstract(self):
class MissingAexit(AbstractAsyncContextManager):
pass
with self.assertRaises(TypeError):
MissingAexit()
def test_structural_subclassing(self):
class ManagerFromScratch:
async def __aenter__(self):
return self
async def __aexit__(self, exc_type, exc_value, traceback):
return None
self.assertTrue(issubclass(ManagerFromScratch, AbstractAsyncContextManager))
class DefaultEnter(AbstractAsyncContextManager):
async def __aexit__(self, *args):
await super().__aexit__(*args)
self.assertTrue(issubclass(DefaultEnter, AbstractAsyncContextManager))
class NoneAenter(ManagerFromScratch):
__aenter__ = None
self.assertFalse(issubclass(NoneAenter, AbstractAsyncContextManager))
class NoneAexit(ManagerFromScratch):
__aexit__ = None
self.assertFalse(issubclass(NoneAexit, AbstractAsyncContextManager))
class AsyncContextManagerTestCase(unittest.TestCase):
@_async_test
async def test_contextmanager_plain(self):
state = []
@asynccontextmanager
async def woohoo():
state.append(1)
yield 42
state.append(999)
async with woohoo() as x:
self.assertEqual(state, [1])
self.assertEqual(x, 42)
state.append(x)
self.assertEqual(state, [1, 42, 999])
@_async_test
async def test_contextmanager_finally(self):
state = []
@asynccontextmanager
async def woohoo():
state.append(1)
try:
yield 42
finally:
state.append(999)
with self.assertRaises(ZeroDivisionError):
async with woohoo() as x:
self.assertEqual(state, [1])
self.assertEqual(x, 42)
state.append(x)
raise ZeroDivisionError()
self.assertEqual(state, [1, 42, 999])
@_async_test
async def test_contextmanager_no_reraise(self):
@asynccontextmanager
async def whee():
yield
ctx = whee()
await ctx.__aenter__()
# Calling __aexit__ should not result in an exception
self.assertFalse(await ctx.__aexit__(TypeError, TypeError("foo"), None))
@_async_test
async def test_contextmanager_trap_yield_after_throw(self):
@asynccontextmanager
async def whoo():
try:
yield
except:
yield
ctx = whoo()
await ctx.__aenter__()
with self.assertRaises(RuntimeError):
await ctx.__aexit__(TypeError, TypeError('foo'), None)
@_async_test
async def test_contextmanager_trap_no_yield(self):
@asynccontextmanager
async def whoo():
if False:
yield
ctx = whoo()
with self.assertRaises(RuntimeError):
await ctx.__aenter__()
@_async_test
async def test_contextmanager_trap_second_yield(self):
@asynccontextmanager
async def whoo():
yield
yield
ctx = whoo()
await ctx.__aenter__()
with self.assertRaises(RuntimeError):
await ctx.__aexit__(None, None, None)
@_async_test
async def test_contextmanager_non_normalised(self):
@asynccontextmanager
async def whoo():
try:
yield
except RuntimeError:
raise SyntaxError
ctx = whoo()
await ctx.__aenter__()
with self.assertRaises(SyntaxError):
await ctx.__aexit__(RuntimeError, None, None)
@_async_test
async def test_contextmanager_except(self):
state = []
@asynccontextmanager
async def woohoo():
state.append(1)
try:
yield 42
except ZeroDivisionError as e:
state.append(e.args[0])
self.assertEqual(state, [1, 42, 999])
async with woohoo() as x:
self.assertEqual(state, [1])
self.assertEqual(x, 42)
state.append(x)
raise ZeroDivisionError(999)
self.assertEqual(state, [1, 42, 999])
@_async_test
async def test_contextmanager_except_stopiter(self):
@asynccontextmanager
async def woohoo():
yield
for stop_exc in (StopIteration('spam'), StopAsyncIteration('ham')):
with self.subTest(type=type(stop_exc)):
try:
async with woohoo():
raise stop_exc
except Exception as ex:
self.assertIs(ex, stop_exc)
else:
self.fail(f'{stop_exc} was suppressed')
@_async_test
async def test_contextmanager_wrap_runtimeerror(self):
@asynccontextmanager
async def woohoo():
try:
yield
except Exception as exc:
raise RuntimeError(f'caught {exc}') from exc
with self.assertRaises(RuntimeError):
async with woohoo():
1 / 0
# If the context manager wrapped StopAsyncIteration in a RuntimeError,
# we also unwrap it, because we can't tell whether the wrapping was
# done by the generator machinery or by the generator itself.
with self.assertRaises(StopAsyncIteration):
async with woohoo():
raise StopAsyncIteration
def _create_contextmanager_attribs(self):
def attribs(**kw):
def decorate(func):
for k,v in kw.items():
setattr(func,k,v)
return func
return decorate
@asynccontextmanager
@attribs(foo='bar')
async def baz(spam):
"""Whee!"""
yield
return baz
def test_contextmanager_attribs(self):
baz = self._create_contextmanager_attribs()
self.assertEqual(baz.__name__,'baz')
self.assertEqual(baz.foo, 'bar')
@support.requires_docstrings
def test_contextmanager_doc_attrib(self):
baz = self._create_contextmanager_attribs()
self.assertEqual(baz.__doc__, "Whee!")
@support.requires_docstrings
@_async_test
async def test_instance_docstring_given_cm_docstring(self):
baz = self._create_contextmanager_attribs()(None)
self.assertEqual(baz.__doc__, "Whee!")
async with baz:
pass # suppress warning
@_async_test
async def test_keywords(self):
# Ensure no keyword arguments are inhibited
@asynccontextmanager
async def woohoo(self, func, args, kwds):
yield (self, func, args, kwds)
async with woohoo(self=11, func=22, args=33, kwds=44) as target:
self.assertEqual(target, (11, 22, 33, 44))
@_async_test
async def test_recursive(self):
depth = 0
ncols = 0
@asynccontextmanager
async def woohoo():
nonlocal ncols
ncols += 1
nonlocal depth
before = depth
depth += 1
yield
depth -= 1
self.assertEqual(depth, before)
@woohoo()
async def recursive():
if depth < 10:
await recursive()
await recursive()
self.assertEqual(ncols, 10)
self.assertEqual(depth, 0)
class AclosingTestCase(unittest.TestCase):
@support.requires_docstrings
def test_instance_docs(self):
cm_docstring = aclosing.__doc__
obj = aclosing(None)
self.assertEqual(obj.__doc__, cm_docstring)
@_async_test
async def test_aclosing(self):
state = []
class C:
async def aclose(self):
state.append(1)
x = C()
self.assertEqual(state, [])
async with aclosing(x) as y:
self.assertEqual(x, y)
self.assertEqual(state, [1])
@_async_test
async def test_aclosing_error(self):
state = []
class C:
async def aclose(self):
state.append(1)
x = C()
self.assertEqual(state, [])
with self.assertRaises(ZeroDivisionError):
async with aclosing(x) as y:
self.assertEqual(x, y)
1 / 0
self.assertEqual(state, [1])
@_async_test
async def test_aclosing_bpo41229(self):
state = []
class Resource:
def __del__(self):
state.append(1)
async def agenfunc():
r = Resource()
yield -1
yield -2
x = agenfunc()
self.assertEqual(state, [])
with self.assertRaises(ZeroDivisionError):
async with aclosing(x) as y:
self.assertEqual(x, y)
self.assertEqual(-1, await x.__anext__())
1 / 0
self.assertEqual(state, [1])
class TestAsyncExitStack(TestBaseExitStack, unittest.TestCase):
class SyncAsyncExitStack(AsyncExitStack):
@staticmethod
def run_coroutine(coro):
loop = asyncio.get_event_loop_policy().get_event_loop()
t = loop.create_task(coro)
t.add_done_callback(lambda f: loop.stop())
loop.run_forever()
exc = t.exception()
if not exc:
return t.result()
else:
context = exc.__context__
try:
raise exc
except:
exc.__context__ = context
raise exc
def close(self):
return self.run_coroutine(self.aclose())
def __enter__(self):
return self.run_coroutine(self.__aenter__())
def __exit__(self, *exc_details):
return self.run_coroutine(self.__aexit__(*exc_details))
exit_stack = SyncAsyncExitStack
def setUp(self):
self.loop = asyncio.new_event_loop()
asyncio.set_event_loop(self.loop)
self.addCleanup(self.loop.close)
self.addCleanup(asyncio.set_event_loop_policy, None)
@_async_test
async def test_async_callback(self):
expected = [
((), {}),
((1,), {}),
((1,2), {}),
((), dict(example=1)),
((1,), dict(example=1)),
((1,2), dict(example=1)),
]
result = []
async def _exit(*args, **kwds):
"""Test metadata propagation"""
result.append((args, kwds))
async with AsyncExitStack() as stack:
for args, kwds in reversed(expected):
if args and kwds:
f = stack.push_async_callback(_exit, *args, **kwds)
elif args:
f = stack.push_async_callback(_exit, *args)
elif kwds:
f = stack.push_async_callback(_exit, **kwds)
else:
f = stack.push_async_callback(_exit)
self.assertIs(f, _exit)
for wrapper in stack._exit_callbacks:
self.assertIs(wrapper[1].__wrapped__, _exit)
self.assertNotEqual(wrapper[1].__name__, _exit.__name__)
self.assertIsNone(wrapper[1].__doc__, _exit.__doc__)
self.assertEqual(result, expected)
result = []
async with AsyncExitStack() as stack:
with self.assertRaises(TypeError):
stack.push_async_callback(arg=1)
with self.assertRaises(TypeError):
self.exit_stack.push_async_callback(arg=2)
with self.assertRaises(TypeError):
stack.push_async_callback(callback=_exit, arg=3)
self.assertEqual(result, [])
@_async_test
async def test_async_push(self):
exc_raised = ZeroDivisionError
async def _expect_exc(exc_type, exc, exc_tb):
self.assertIs(exc_type, exc_raised)
async def _suppress_exc(*exc_details):
return True
async def _expect_ok(exc_type, exc, exc_tb):
self.assertIsNone(exc_type)
self.assertIsNone(exc)
self.assertIsNone(exc_tb)
class ExitCM(object):
def __init__(self, check_exc):
self.check_exc = check_exc
async def __aenter__(self):
self.fail("Should not be called!")
async def __aexit__(self, *exc_details):
await self.check_exc(*exc_details)
async with self.exit_stack() as stack:
stack.push_async_exit(_expect_ok)
self.assertIs(stack._exit_callbacks[-1][1], _expect_ok)
cm = ExitCM(_expect_ok)
stack.push_async_exit(cm)
self.assertIs(stack._exit_callbacks[-1][1].__self__, cm)
stack.push_async_exit(_suppress_exc)
self.assertIs(stack._exit_callbacks[-1][1], _suppress_exc)
cm = ExitCM(_expect_exc)
stack.push_async_exit(cm)
self.assertIs(stack._exit_callbacks[-1][1].__self__, cm)
stack.push_async_exit(_expect_exc)
self.assertIs(stack._exit_callbacks[-1][1], _expect_exc)
stack.push_async_exit(_expect_exc)
self.assertIs(stack._exit_callbacks[-1][1], _expect_exc)
1/0
@_async_test
async def test_enter_async_context(self):
class TestCM(object):
async def __aenter__(self):
result.append(1)
async def __aexit__(self, *exc_details):
result.append(3)
result = []
cm = TestCM()
async with AsyncExitStack() as stack:
@stack.push_async_callback # Registered first => cleaned up last
async def _exit():
result.append(4)
self.assertIsNotNone(_exit)
await stack.enter_async_context(cm)
self.assertIs(stack._exit_callbacks[-1][1].__self__, cm)
result.append(2)
self.assertEqual(result, [1, 2, 3, 4])
@_async_test
async def test_enter_async_context_errors(self):
class LacksEnterAndExit:
pass
class LacksEnter:
async def __aexit__(self, *exc_info):
pass
class LacksExit:
async def __aenter__(self):
pass
async with self.exit_stack() as stack:
with self.assertRaisesRegex(TypeError, 'asynchronous context manager'):
await stack.enter_async_context(LacksEnterAndExit())
with self.assertRaisesRegex(TypeError, 'asynchronous context manager'):
await stack.enter_async_context(LacksEnter())
with self.assertRaisesRegex(TypeError, 'asynchronous context manager'):
await stack.enter_async_context(LacksExit())
self.assertFalse(stack._exit_callbacks)
@_async_test
async def test_async_exit_exception_chaining(self):
# Ensure exception chaining matches the reference behaviour
async def raise_exc(exc):
raise exc
saved_details = None
async def suppress_exc(*exc_details):
nonlocal saved_details
saved_details = exc_details
return True
try:
async with self.exit_stack() as stack:
stack.push_async_callback(raise_exc, IndexError)
stack.push_async_callback(raise_exc, KeyError)
stack.push_async_callback(raise_exc, AttributeError)
stack.push_async_exit(suppress_exc)
stack.push_async_callback(raise_exc, ValueError)
1 / 0
except IndexError as exc:
self.assertIsInstance(exc.__context__, KeyError)
self.assertIsInstance(exc.__context__.__context__, AttributeError)
# Inner exceptions were suppressed
self.assertIsNone(exc.__context__.__context__.__context__)
else:
self.fail("Expected IndexError, but no exception was raised")
# Check the inner exceptions
inner_exc = saved_details[1]
self.assertIsInstance(inner_exc, ValueError)
self.assertIsInstance(inner_exc.__context__, ZeroDivisionError)
@_async_test
async def test_instance_bypass_async(self):
class Example(object): pass
cm = Example()
cm.__aenter__ = object()
cm.__aexit__ = object()
stack = self.exit_stack()
with self.assertRaisesRegex(TypeError, 'asynchronous context manager'):
await stack.enter_async_context(cm)
stack.push_async_exit(cm)
self.assertIs(stack._exit_callbacks[-1][1], cm)
class TestAsyncNullcontext(unittest.TestCase):
@_async_test
async def test_async_nullcontext(self):
class C:
pass
c = C()
async with nullcontext(c) as c_in:
self.assertIs(c_in, c)
if __name__ == '__main__':
unittest.main()
| 32.251712
| 84
| 0.583754
|
import asyncio
from contextlib import (
asynccontextmanager, AbstractAsyncContextManager,
AsyncExitStack, nullcontext, aclosing)
import functools
from test import support
import unittest
from test.test_contextlib import TestBaseExitStack
def _async_test(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
coro = func(*args, **kwargs)
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
try:
return loop.run_until_complete(coro)
finally:
loop.close()
asyncio.set_event_loop_policy(None)
return wrapper
class TestAbstractAsyncContextManager(unittest.TestCase):
@_async_test
async def test_enter(self):
class DefaultEnter(AbstractAsyncContextManager):
async def __aexit__(self, *args):
await super().__aexit__(*args)
manager = DefaultEnter()
self.assertIs(await manager.__aenter__(), manager)
async with manager as context:
self.assertIs(manager, context)
@_async_test
async def test_async_gen_propagates_generator_exit(self):
@asynccontextmanager
async def ctx():
yield
async def gen():
async with ctx():
yield 11
ret = []
exc = ValueError(22)
with self.assertRaises(ValueError):
async with ctx():
async for val in gen():
ret.append(val)
raise exc
self.assertEqual(ret, [11])
def test_exit_is_abstract(self):
class MissingAexit(AbstractAsyncContextManager):
pass
with self.assertRaises(TypeError):
MissingAexit()
def test_structural_subclassing(self):
class ManagerFromScratch:
async def __aenter__(self):
return self
async def __aexit__(self, exc_type, exc_value, traceback):
return None
self.assertTrue(issubclass(ManagerFromScratch, AbstractAsyncContextManager))
class DefaultEnter(AbstractAsyncContextManager):
async def __aexit__(self, *args):
await super().__aexit__(*args)
self.assertTrue(issubclass(DefaultEnter, AbstractAsyncContextManager))
class NoneAenter(ManagerFromScratch):
__aenter__ = None
self.assertFalse(issubclass(NoneAenter, AbstractAsyncContextManager))
class NoneAexit(ManagerFromScratch):
__aexit__ = None
self.assertFalse(issubclass(NoneAexit, AbstractAsyncContextManager))
class AsyncContextManagerTestCase(unittest.TestCase):
@_async_test
async def test_contextmanager_plain(self):
state = []
@asynccontextmanager
async def woohoo():
state.append(1)
yield 42
state.append(999)
async with woohoo() as x:
self.assertEqual(state, [1])
self.assertEqual(x, 42)
state.append(x)
self.assertEqual(state, [1, 42, 999])
@_async_test
async def test_contextmanager_finally(self):
state = []
@asynccontextmanager
async def woohoo():
state.append(1)
try:
yield 42
finally:
state.append(999)
with self.assertRaises(ZeroDivisionError):
async with woohoo() as x:
self.assertEqual(state, [1])
self.assertEqual(x, 42)
state.append(x)
raise ZeroDivisionError()
self.assertEqual(state, [1, 42, 999])
@_async_test
async def test_contextmanager_no_reraise(self):
@asynccontextmanager
async def whee():
yield
ctx = whee()
await ctx.__aenter__()
self.assertFalse(await ctx.__aexit__(TypeError, TypeError("foo"), None))
@_async_test
async def test_contextmanager_trap_yield_after_throw(self):
@asynccontextmanager
async def whoo():
try:
yield
except:
yield
ctx = whoo()
await ctx.__aenter__()
with self.assertRaises(RuntimeError):
await ctx.__aexit__(TypeError, TypeError('foo'), None)
@_async_test
async def test_contextmanager_trap_no_yield(self):
@asynccontextmanager
async def whoo():
if False:
yield
ctx = whoo()
with self.assertRaises(RuntimeError):
await ctx.__aenter__()
@_async_test
async def test_contextmanager_trap_second_yield(self):
@asynccontextmanager
async def whoo():
yield
yield
ctx = whoo()
await ctx.__aenter__()
with self.assertRaises(RuntimeError):
await ctx.__aexit__(None, None, None)
@_async_test
async def test_contextmanager_non_normalised(self):
@asynccontextmanager
async def whoo():
try:
yield
except RuntimeError:
raise SyntaxError
ctx = whoo()
await ctx.__aenter__()
with self.assertRaises(SyntaxError):
await ctx.__aexit__(RuntimeError, None, None)
@_async_test
async def test_contextmanager_except(self):
state = []
@asynccontextmanager
async def woohoo():
state.append(1)
try:
yield 42
except ZeroDivisionError as e:
state.append(e.args[0])
self.assertEqual(state, [1, 42, 999])
async with woohoo() as x:
self.assertEqual(state, [1])
self.assertEqual(x, 42)
state.append(x)
raise ZeroDivisionError(999)
self.assertEqual(state, [1, 42, 999])
@_async_test
async def test_contextmanager_except_stopiter(self):
@asynccontextmanager
async def woohoo():
yield
for stop_exc in (StopIteration('spam'), StopAsyncIteration('ham')):
with self.subTest(type=type(stop_exc)):
try:
async with woohoo():
raise stop_exc
except Exception as ex:
self.assertIs(ex, stop_exc)
else:
self.fail(f'{stop_exc} was suppressed')
@_async_test
async def test_contextmanager_wrap_runtimeerror(self):
@asynccontextmanager
async def woohoo():
try:
yield
except Exception as exc:
raise RuntimeError(f'caught {exc}') from exc
with self.assertRaises(RuntimeError):
async with woohoo():
1 / 0
# done by the generator machinery or by the generator itself.
with self.assertRaises(StopAsyncIteration):
async with woohoo():
raise StopAsyncIteration
def _create_contextmanager_attribs(self):
def attribs(**kw):
def decorate(func):
for k,v in kw.items():
setattr(func,k,v)
return func
return decorate
@asynccontextmanager
@attribs(foo='bar')
async def baz(spam):
yield
return baz
def test_contextmanager_attribs(self):
baz = self._create_contextmanager_attribs()
self.assertEqual(baz.__name__,'baz')
self.assertEqual(baz.foo, 'bar')
@support.requires_docstrings
def test_contextmanager_doc_attrib(self):
baz = self._create_contextmanager_attribs()
self.assertEqual(baz.__doc__, "Whee!")
@support.requires_docstrings
@_async_test
async def test_instance_docstring_given_cm_docstring(self):
baz = self._create_contextmanager_attribs()(None)
self.assertEqual(baz.__doc__, "Whee!")
async with baz:
pass # suppress warning
@_async_test
async def test_keywords(self):
# Ensure no keyword arguments are inhibited
@asynccontextmanager
async def woohoo(self, func, args, kwds):
yield (self, func, args, kwds)
async with woohoo(self=11, func=22, args=33, kwds=44) as target:
self.assertEqual(target, (11, 22, 33, 44))
@_async_test
async def test_recursive(self):
depth = 0
ncols = 0
@asynccontextmanager
async def woohoo():
nonlocal ncols
ncols += 1
nonlocal depth
before = depth
depth += 1
yield
depth -= 1
self.assertEqual(depth, before)
@woohoo()
async def recursive():
if depth < 10:
await recursive()
await recursive()
self.assertEqual(ncols, 10)
self.assertEqual(depth, 0)
class AclosingTestCase(unittest.TestCase):
@support.requires_docstrings
def test_instance_docs(self):
cm_docstring = aclosing.__doc__
obj = aclosing(None)
self.assertEqual(obj.__doc__, cm_docstring)
@_async_test
async def test_aclosing(self):
state = []
class C:
async def aclose(self):
state.append(1)
x = C()
self.assertEqual(state, [])
async with aclosing(x) as y:
self.assertEqual(x, y)
self.assertEqual(state, [1])
@_async_test
async def test_aclosing_error(self):
state = []
class C:
async def aclose(self):
state.append(1)
x = C()
self.assertEqual(state, [])
with self.assertRaises(ZeroDivisionError):
async with aclosing(x) as y:
self.assertEqual(x, y)
1 / 0
self.assertEqual(state, [1])
@_async_test
async def test_aclosing_bpo41229(self):
state = []
class Resource:
def __del__(self):
state.append(1)
async def agenfunc():
r = Resource()
yield -1
yield -2
x = agenfunc()
self.assertEqual(state, [])
with self.assertRaises(ZeroDivisionError):
async with aclosing(x) as y:
self.assertEqual(x, y)
self.assertEqual(-1, await x.__anext__())
1 / 0
self.assertEqual(state, [1])
class TestAsyncExitStack(TestBaseExitStack, unittest.TestCase):
class SyncAsyncExitStack(AsyncExitStack):
@staticmethod
def run_coroutine(coro):
loop = asyncio.get_event_loop_policy().get_event_loop()
t = loop.create_task(coro)
t.add_done_callback(lambda f: loop.stop())
loop.run_forever()
exc = t.exception()
if not exc:
return t.result()
else:
context = exc.__context__
try:
raise exc
except:
exc.__context__ = context
raise exc
def close(self):
return self.run_coroutine(self.aclose())
def __enter__(self):
return self.run_coroutine(self.__aenter__())
def __exit__(self, *exc_details):
return self.run_coroutine(self.__aexit__(*exc_details))
exit_stack = SyncAsyncExitStack
def setUp(self):
self.loop = asyncio.new_event_loop()
asyncio.set_event_loop(self.loop)
self.addCleanup(self.loop.close)
self.addCleanup(asyncio.set_event_loop_policy, None)
@_async_test
async def test_async_callback(self):
expected = [
((), {}),
((1,), {}),
((1,2), {}),
((), dict(example=1)),
((1,), dict(example=1)),
((1,2), dict(example=1)),
]
result = []
async def _exit(*args, **kwds):
result.append((args, kwds))
async with AsyncExitStack() as stack:
for args, kwds in reversed(expected):
if args and kwds:
f = stack.push_async_callback(_exit, *args, **kwds)
elif args:
f = stack.push_async_callback(_exit, *args)
elif kwds:
f = stack.push_async_callback(_exit, **kwds)
else:
f = stack.push_async_callback(_exit)
self.assertIs(f, _exit)
for wrapper in stack._exit_callbacks:
self.assertIs(wrapper[1].__wrapped__, _exit)
self.assertNotEqual(wrapper[1].__name__, _exit.__name__)
self.assertIsNone(wrapper[1].__doc__, _exit.__doc__)
self.assertEqual(result, expected)
result = []
async with AsyncExitStack() as stack:
with self.assertRaises(TypeError):
stack.push_async_callback(arg=1)
with self.assertRaises(TypeError):
self.exit_stack.push_async_callback(arg=2)
with self.assertRaises(TypeError):
stack.push_async_callback(callback=_exit, arg=3)
self.assertEqual(result, [])
@_async_test
async def test_async_push(self):
exc_raised = ZeroDivisionError
async def _expect_exc(exc_type, exc, exc_tb):
self.assertIs(exc_type, exc_raised)
async def _suppress_exc(*exc_details):
return True
async def _expect_ok(exc_type, exc, exc_tb):
self.assertIsNone(exc_type)
self.assertIsNone(exc)
self.assertIsNone(exc_tb)
class ExitCM(object):
def __init__(self, check_exc):
self.check_exc = check_exc
async def __aenter__(self):
self.fail("Should not be called!")
async def __aexit__(self, *exc_details):
await self.check_exc(*exc_details)
async with self.exit_stack() as stack:
stack.push_async_exit(_expect_ok)
self.assertIs(stack._exit_callbacks[-1][1], _expect_ok)
cm = ExitCM(_expect_ok)
stack.push_async_exit(cm)
self.assertIs(stack._exit_callbacks[-1][1].__self__, cm)
stack.push_async_exit(_suppress_exc)
self.assertIs(stack._exit_callbacks[-1][1], _suppress_exc)
cm = ExitCM(_expect_exc)
stack.push_async_exit(cm)
self.assertIs(stack._exit_callbacks[-1][1].__self__, cm)
stack.push_async_exit(_expect_exc)
self.assertIs(stack._exit_callbacks[-1][1], _expect_exc)
stack.push_async_exit(_expect_exc)
self.assertIs(stack._exit_callbacks[-1][1], _expect_exc)
1/0
@_async_test
async def test_enter_async_context(self):
class TestCM(object):
async def __aenter__(self):
result.append(1)
async def __aexit__(self, *exc_details):
result.append(3)
result = []
cm = TestCM()
async with AsyncExitStack() as stack:
@stack.push_async_callback # Registered first => cleaned up last
async def _exit():
result.append(4)
self.assertIsNotNone(_exit)
await stack.enter_async_context(cm)
self.assertIs(stack._exit_callbacks[-1][1].__self__, cm)
result.append(2)
self.assertEqual(result, [1, 2, 3, 4])
@_async_test
async def test_enter_async_context_errors(self):
class LacksEnterAndExit:
pass
class LacksEnter:
async def __aexit__(self, *exc_info):
pass
class LacksExit:
async def __aenter__(self):
pass
async with self.exit_stack() as stack:
with self.assertRaisesRegex(TypeError, 'asynchronous context manager'):
await stack.enter_async_context(LacksEnterAndExit())
with self.assertRaisesRegex(TypeError, 'asynchronous context manager'):
await stack.enter_async_context(LacksEnter())
with self.assertRaisesRegex(TypeError, 'asynchronous context manager'):
await stack.enter_async_context(LacksExit())
self.assertFalse(stack._exit_callbacks)
@_async_test
async def test_async_exit_exception_chaining(self):
# Ensure exception chaining matches the reference behaviour
async def raise_exc(exc):
raise exc
saved_details = None
async def suppress_exc(*exc_details):
nonlocal saved_details
saved_details = exc_details
return True
try:
async with self.exit_stack() as stack:
stack.push_async_callback(raise_exc, IndexError)
stack.push_async_callback(raise_exc, KeyError)
stack.push_async_callback(raise_exc, AttributeError)
stack.push_async_exit(suppress_exc)
stack.push_async_callback(raise_exc, ValueError)
1 / 0
except IndexError as exc:
self.assertIsInstance(exc.__context__, KeyError)
self.assertIsInstance(exc.__context__.__context__, AttributeError)
# Inner exceptions were suppressed
self.assertIsNone(exc.__context__.__context__.__context__)
else:
self.fail("Expected IndexError, but no exception was raised")
# Check the inner exceptions
inner_exc = saved_details[1]
self.assertIsInstance(inner_exc, ValueError)
self.assertIsInstance(inner_exc.__context__, ZeroDivisionError)
@_async_test
async def test_instance_bypass_async(self):
class Example(object): pass
cm = Example()
cm.__aenter__ = object()
cm.__aexit__ = object()
stack = self.exit_stack()
with self.assertRaisesRegex(TypeError, 'asynchronous context manager'):
await stack.enter_async_context(cm)
stack.push_async_exit(cm)
self.assertIs(stack._exit_callbacks[-1][1], cm)
class TestAsyncNullcontext(unittest.TestCase):
@_async_test
async def test_async_nullcontext(self):
class C:
pass
c = C()
async with nullcontext(c) as c_in:
self.assertIs(c_in, c)
if __name__ == '__main__':
unittest.main()
| true
| true
|
7904add70e4abbcf3f37aba53f90e891b9dc8808
| 5,950
|
py
|
Python
|
atkinson/dlrn/http_data.py
|
jpichon/atkinson
|
e829d9c15161ac252f77605a14be696109b6bfb3
|
[
"MIT"
] | null | null | null |
atkinson/dlrn/http_data.py
|
jpichon/atkinson
|
e829d9c15161ac252f77605a14be696109b6bfb3
|
[
"MIT"
] | null | null | null |
atkinson/dlrn/http_data.py
|
jpichon/atkinson
|
e829d9c15161ac252f77605a14be696109b6bfb3
|
[
"MIT"
] | null | null | null |
#! /usr/bin/env python
"""Functions for working with the DLRN API"""
import csv
import os.path
import requests
from toolchest import yaml
from atkinson.config.manager import ConfigManager
from atkinson.logging.logger import getLogger
def _raw_fetch(url, logger):
"""
Fetch remote data and return the text output.
:param url: The URL to fetch the data from
:param logger: A logger instance to use.
:return: Raw text data, None otherwise
"""
ret_data = None
try:
req = requests.get(url)
if req.status_code == requests.codes.ok:
ret_data = req.text
except requests.exceptions.ConnectionError as error:
logger.warning(error.request)
return ret_data
def _fetch_yaml(url, logger):
"""
Fetch remote data and process the text as yaml.
:param url: The URL to fetch the data from
:param logger: A logger instance to use.
:return: Parsed yaml data in the form of a dictionary
"""
ret_data = None
raw_data = _raw_fetch(url, logger)
if raw_data is not None:
ret_data = yaml.parse(raw_data)
return ret_data
def dlrn_http_factory(host, config_file=None, link_name=None,
logger=getLogger()):
"""
Create a DlrnData instance based on a host.
:param host: A host name string to build instances
:param config_file: A dlrn config file(s) to use in addition to
the default.
:param link_name: A dlrn symlink to use. This overrides the config files
link parameter.
:param logger: An atkinson logger to use. Default is the base logger.
:return: A DlrnData instance
"""
manager = None
files = ['dlrn.yml']
if config_file is not None:
if isinstance(config_file, list):
files.extend(config_file)
else:
files.append(config_file)
local_path = os.path.realpath(os.path.dirname(__file__))
manager = ConfigManager(filenames=files, paths=local_path)
if manager is None:
return None
config = manager.config
if host not in config:
return None
link = config[host]['link']
if link_name is not None:
link = link_name
return DlrnHttpData(config[host]['url'],
config[host]['release'],
link_name=link,
logger=logger)
class DlrnHttpData():
"""A class used to interact with the dlrn API"""
def __init__(self, url, release, link_name='current', logger=getLogger()):
"""
Class constructor
:param url: The URL to the host to obtain data.
:param releases: The release name to use for lookup.
:param link_name: The name of the dlrn symlink to fetch data from.
:param logger: An atkinson logger to use. Default is the base logger.
"""
self.url = os.path.join(url, release)
self.release = release
self._logger = logger
self._link_name = link_name
self._commit_data = {}
self._fetch_commit()
def _fetch_commit(self):
"""
Fetch the commit data from dlrn
"""
full_url = os.path.join(self.url,
self._link_name,
'commit.yaml')
data = _fetch_yaml(full_url, self._logger)
if data is not None and 'commits' in data:
pkg = data['commits'][0]
if pkg['status'] == 'SUCCESS':
self._commit_data = {'name': pkg['project_name'],
'dist_hash': pkg['distro_hash'],
'commit_hash': pkg['commit_hash'],
'extended_hash': pkg.get('extended_hash')}
else:
msg = '{0} has a status of error'.format(str(pkg))
self._logger.warning(msg)
def _build_url(self):
"""
Generate a url given a commit hash and distgit hash to match the format
base/AB/CD/ABCD123_XYZ987 where ABCD123 is the commit hash and XYZ987
is a portion of the distgit hash.
:return: A string with the full URL.
"""
first = self._commit_data['commit_hash'][0:2]
second = self._commit_data['commit_hash'][2:4]
third = self._commit_data['commit_hash']
for key in ['dist_hash', 'extended_hash']:
if self._commit_data.get(key, 'None') != 'None':
third += '_' + self._commit_data[key][0:8]
return os.path.join(self.url,
first,
second,
third)
@property
def commit(self):
"""
Get the dlrn commit information
:return: A dictionary of name, dist-git hash, commit hash and
extended hash.
An empty dictionary is returned otherwise.
"""
return self._commit_data
@property
def versions(self):
"""
Get the version data for the versions.csv file and return the
data in a dictionary
:return: A dictionary of packages with commit and dist-git hashes
"""
ret_dict = {}
full_url = os.path.join(self._build_url(), 'versions.csv')
data = _raw_fetch(full_url, self._logger)
if data is not None:
data = data.replace(' ', '_')
split_data = data.split()
reader = csv.DictReader(split_data)
for row in reader:
ret_dict[row['Project']] = {'source': row['Source_Sha'],
'state': row['Status'],
'distgit': row['Dist_Sha'],
'nvr': row['Pkg_NVR']}
else:
msg = 'Could not fetch {0}'.format(full_url)
self._logger.error(msg)
return ret_dict
| 32.692308
| 79
| 0.565546
|
import csv
import os.path
import requests
from toolchest import yaml
from atkinson.config.manager import ConfigManager
from atkinson.logging.logger import getLogger
def _raw_fetch(url, logger):
ret_data = None
try:
req = requests.get(url)
if req.status_code == requests.codes.ok:
ret_data = req.text
except requests.exceptions.ConnectionError as error:
logger.warning(error.request)
return ret_data
def _fetch_yaml(url, logger):
ret_data = None
raw_data = _raw_fetch(url, logger)
if raw_data is not None:
ret_data = yaml.parse(raw_data)
return ret_data
def dlrn_http_factory(host, config_file=None, link_name=None,
logger=getLogger()):
manager = None
files = ['dlrn.yml']
if config_file is not None:
if isinstance(config_file, list):
files.extend(config_file)
else:
files.append(config_file)
local_path = os.path.realpath(os.path.dirname(__file__))
manager = ConfigManager(filenames=files, paths=local_path)
if manager is None:
return None
config = manager.config
if host not in config:
return None
link = config[host]['link']
if link_name is not None:
link = link_name
return DlrnHttpData(config[host]['url'],
config[host]['release'],
link_name=link,
logger=logger)
class DlrnHttpData():
def __init__(self, url, release, link_name='current', logger=getLogger()):
self.url = os.path.join(url, release)
self.release = release
self._logger = logger
self._link_name = link_name
self._commit_data = {}
self._fetch_commit()
def _fetch_commit(self):
full_url = os.path.join(self.url,
self._link_name,
'commit.yaml')
data = _fetch_yaml(full_url, self._logger)
if data is not None and 'commits' in data:
pkg = data['commits'][0]
if pkg['status'] == 'SUCCESS':
self._commit_data = {'name': pkg['project_name'],
'dist_hash': pkg['distro_hash'],
'commit_hash': pkg['commit_hash'],
'extended_hash': pkg.get('extended_hash')}
else:
msg = '{0} has a status of error'.format(str(pkg))
self._logger.warning(msg)
def _build_url(self):
first = self._commit_data['commit_hash'][0:2]
second = self._commit_data['commit_hash'][2:4]
third = self._commit_data['commit_hash']
for key in ['dist_hash', 'extended_hash']:
if self._commit_data.get(key, 'None') != 'None':
third += '_' + self._commit_data[key][0:8]
return os.path.join(self.url,
first,
second,
third)
@property
def commit(self):
return self._commit_data
@property
def versions(self):
ret_dict = {}
full_url = os.path.join(self._build_url(), 'versions.csv')
data = _raw_fetch(full_url, self._logger)
if data is not None:
data = data.replace(' ', '_')
split_data = data.split()
reader = csv.DictReader(split_data)
for row in reader:
ret_dict[row['Project']] = {'source': row['Source_Sha'],
'state': row['Status'],
'distgit': row['Dist_Sha'],
'nvr': row['Pkg_NVR']}
else:
msg = 'Could not fetch {0}'.format(full_url)
self._logger.error(msg)
return ret_dict
| true
| true
|
7904ae4da0b6717e33cffa41ad9f29f6e442f000
| 1,712
|
py
|
Python
|
cataloger/settings.py
|
centuri-engineering/cataloger
|
4faf7a1a02249e067aea3faf23770324dccd0f69
|
[
"MIT"
] | 1
|
2022-01-14T19:27:09.000Z
|
2022-01-14T19:27:09.000Z
|
cataloger/settings.py
|
centuri-engineering/cataloger
|
4faf7a1a02249e067aea3faf23770324dccd0f69
|
[
"MIT"
] | 10
|
2020-10-12T13:47:50.000Z
|
2022-02-25T18:28:27.000Z
|
cataloger/settings.py
|
centuri-engineering/cataloger
|
4faf7a1a02249e067aea3faf23770324dccd0f69
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""Application configuration.
Most configuration is set via environment variables.
For local development, use a .env file to set
environment variables.
"""
from environs import Env
env = Env()
env.read_env()
ENV = env.str("FLASK_ENV", default="production")
DEBUG = ENV == "development"
SQLALCHEMY_DATABASE_URI = env.str("DATABASE_URL")
SECRET_KEY = env.str("SECRET_KEY")
SEND_FILE_MAX_AGE_DEFAULT = env.int("SEND_FILE_MAX_AGE_DEFAULT")
BCRYPT_LOG_ROUNDS = env.int("BCRYPT_LOG_ROUNDS", default=13)
DEBUG_TB_ENABLED = DEBUG
DEBUG_TB_INTERCEPT_REDIRECTS = False
CACHE_TYPE = "simple" # Can be "memcached", "redis", etc.
SQLALCHEMY_TRACK_MODIFICATIONS = False
APPLICATION_ROOT = "/"
SCRIPT_NAME = "/"
AUTH_METHOD = env.str("AUTH_METHOD") # can be 'LDAP', 'OMERO'
if AUTH_METHOD == "LDAP":
LDAP_PORT = env.int("LDAP_PORT", 369)
LDAP_HOST = env.str("LDAP_HOST", "localhost")
LDAP_READONLY = env.bool("LDAP_READONLY", True)
LDAP_BASE_DN = env.str("LDAP_BASE_DN", "")
LDAP_BIND_USER_DN = env.str("LDAP_BIND_USER_DN")
LDAP_BIND_USER_PASSWORD = env.str("LDAP_BIND_USER_PASSWORD")
LDAP_BIND_DIRECT_CREDENTIALS = env.bool("LDAP_BIND_DIRECT_CREDENTIALS")
LDAP_ALWAYS_SEARCH_BIND = env.bool("LDAP_ALWAYS_SEARCH_BIND")
LDAP_USER_LOGIN_ATTR = env.str("LDAP_USER_LOGIN_ATTR", "uid")
LDAP_USER_RDN_ATTR = env.str("LDAP_USER_RDN_ATTR", "uid")
LDAP_USER_DN = env.str("LDAP_USER_DN")
LDAP_USER_SEARCH_SCOPE = env.str("LDAP_USER_SEARCH_SCOPE", "LEVEL")
LDAP_SEARCH_FOR_GROUPS = env.bool("LDAP_SEARCH_FOR_GROUPS", False)
elif AUTH_METHOD == "OMERO":
OMERO_HOST = env.str("OMERO_HOST", "localhost")
OMERO_PORT = env.int("OMERO_PORT", 4064)
| 34.24
| 75
| 0.739486
|
from environs import Env
env = Env()
env.read_env()
ENV = env.str("FLASK_ENV", default="production")
DEBUG = ENV == "development"
SQLALCHEMY_DATABASE_URI = env.str("DATABASE_URL")
SECRET_KEY = env.str("SECRET_KEY")
SEND_FILE_MAX_AGE_DEFAULT = env.int("SEND_FILE_MAX_AGE_DEFAULT")
BCRYPT_LOG_ROUNDS = env.int("BCRYPT_LOG_ROUNDS", default=13)
DEBUG_TB_ENABLED = DEBUG
DEBUG_TB_INTERCEPT_REDIRECTS = False
CACHE_TYPE = "simple"
SQLALCHEMY_TRACK_MODIFICATIONS = False
APPLICATION_ROOT = "/"
SCRIPT_NAME = "/"
AUTH_METHOD = env.str("AUTH_METHOD")
if AUTH_METHOD == "LDAP":
LDAP_PORT = env.int("LDAP_PORT", 369)
LDAP_HOST = env.str("LDAP_HOST", "localhost")
LDAP_READONLY = env.bool("LDAP_READONLY", True)
LDAP_BASE_DN = env.str("LDAP_BASE_DN", "")
LDAP_BIND_USER_DN = env.str("LDAP_BIND_USER_DN")
LDAP_BIND_USER_PASSWORD = env.str("LDAP_BIND_USER_PASSWORD")
LDAP_BIND_DIRECT_CREDENTIALS = env.bool("LDAP_BIND_DIRECT_CREDENTIALS")
LDAP_ALWAYS_SEARCH_BIND = env.bool("LDAP_ALWAYS_SEARCH_BIND")
LDAP_USER_LOGIN_ATTR = env.str("LDAP_USER_LOGIN_ATTR", "uid")
LDAP_USER_RDN_ATTR = env.str("LDAP_USER_RDN_ATTR", "uid")
LDAP_USER_DN = env.str("LDAP_USER_DN")
LDAP_USER_SEARCH_SCOPE = env.str("LDAP_USER_SEARCH_SCOPE", "LEVEL")
LDAP_SEARCH_FOR_GROUPS = env.bool("LDAP_SEARCH_FOR_GROUPS", False)
elif AUTH_METHOD == "OMERO":
OMERO_HOST = env.str("OMERO_HOST", "localhost")
OMERO_PORT = env.int("OMERO_PORT", 4064)
| true
| true
|
7904ae5794cd8c14f01c88125eb4edc68be9f382
| 1,703
|
py
|
Python
|
chapter06/dags/listing_6_4.py
|
add54/Data_PipeLine_Apache_Airflow
|
40b52ba6fcda3203b194be9e1c2850135997215a
|
[
"BSD-Source-Code"
] | 303
|
2019-09-30T10:59:15.000Z
|
2022-03-30T17:03:27.000Z
|
chapter06/dags/listing_6_4.py
|
andreaschandra/data-pipelines-with-apache-airflow
|
40b52ba6fcda3203b194be9e1c2850135997215a
|
[
"BSD-Source-Code"
] | 13
|
2020-04-08T12:28:30.000Z
|
2021-12-30T06:40:37.000Z
|
chapter06/dags/listing_6_4.py
|
andreaschandra/data-pipelines-with-apache-airflow
|
40b52ba6fcda3203b194be9e1c2850135997215a
|
[
"BSD-Source-Code"
] | 148
|
2020-01-03T03:30:39.000Z
|
2022-03-28T04:19:43.000Z
|
from pathlib import Path
import airflow.utils.dates
from airflow import DAG
from airflow.operators.dummy import DummyOperator
from airflow.operators.trigger_dagrun import TriggerDagRunOperator
from airflow.sensors.python import PythonSensor
dag1 = DAG(
dag_id="listing_6_04_dag01",
start_date=airflow.utils.dates.days_ago(3),
schedule_interval="0 16 * * *",
)
dag2 = DAG(
dag_id="listing_6_04_dag02",
start_date=airflow.utils.dates.days_ago(3),
schedule_interval=None,
)
def _wait_for_supermarket(supermarket_id_):
supermarket_path = Path("/data/" + supermarket_id_)
data_files = supermarket_path.glob("data-*.csv")
success_file = supermarket_path / "_SUCCESS"
return data_files and success_file.exists()
for supermarket_id in range(1, 5):
wait = PythonSensor(
task_id=f"wait_for_supermarket_{supermarket_id}",
python_callable=_wait_for_supermarket,
op_kwargs={"supermarket_id_": f"supermarket{supermarket_id}"},
dag=dag1,
)
copy = DummyOperator(task_id=f"copy_to_raw_supermarket_{supermarket_id}", dag=dag1)
process = DummyOperator(task_id=f"process_supermarket_{supermarket_id}", dag=dag1)
trigger_create_metrics_dag = TriggerDagRunOperator(
task_id=f"trigger_create_metrics_dag_supermarket_{supermarket_id}",
trigger_dag_id="listing_6_04_dag02",
dag=dag1,
)
wait >> copy >> process >> trigger_create_metrics_dag
compute_differences = DummyOperator(task_id="compute_differences", dag=dag2)
update_dashboard = DummyOperator(task_id="update_dashboard", dag=dag2)
notify_new_data = DummyOperator(task_id="notify_new_data", dag=dag2)
compute_differences >> update_dashboard
| 35.479167
| 87
| 0.760423
|
from pathlib import Path
import airflow.utils.dates
from airflow import DAG
from airflow.operators.dummy import DummyOperator
from airflow.operators.trigger_dagrun import TriggerDagRunOperator
from airflow.sensors.python import PythonSensor
dag1 = DAG(
dag_id="listing_6_04_dag01",
start_date=airflow.utils.dates.days_ago(3),
schedule_interval="0 16 * * *",
)
dag2 = DAG(
dag_id="listing_6_04_dag02",
start_date=airflow.utils.dates.days_ago(3),
schedule_interval=None,
)
def _wait_for_supermarket(supermarket_id_):
supermarket_path = Path("/data/" + supermarket_id_)
data_files = supermarket_path.glob("data-*.csv")
success_file = supermarket_path / "_SUCCESS"
return data_files and success_file.exists()
for supermarket_id in range(1, 5):
wait = PythonSensor(
task_id=f"wait_for_supermarket_{supermarket_id}",
python_callable=_wait_for_supermarket,
op_kwargs={"supermarket_id_": f"supermarket{supermarket_id}"},
dag=dag1,
)
copy = DummyOperator(task_id=f"copy_to_raw_supermarket_{supermarket_id}", dag=dag1)
process = DummyOperator(task_id=f"process_supermarket_{supermarket_id}", dag=dag1)
trigger_create_metrics_dag = TriggerDagRunOperator(
task_id=f"trigger_create_metrics_dag_supermarket_{supermarket_id}",
trigger_dag_id="listing_6_04_dag02",
dag=dag1,
)
wait >> copy >> process >> trigger_create_metrics_dag
compute_differences = DummyOperator(task_id="compute_differences", dag=dag2)
update_dashboard = DummyOperator(task_id="update_dashboard", dag=dag2)
notify_new_data = DummyOperator(task_id="notify_new_data", dag=dag2)
compute_differences >> update_dashboard
| true
| true
|
7904ae6539ae32b4869881b5da4552aa128ddb2f
| 7,061
|
py
|
Python
|
update-attack.py
|
Alexander-RB/attack-website
|
43f21a2b5db0c37826283a3e427d330ba3668b22
|
[
"Apache-2.0"
] | 2
|
2021-04-08T08:05:39.000Z
|
2021-06-01T08:17:46.000Z
|
temp-directory/MITRE-ATTACK-WEBSITE/update-attack.py
|
devgunho/CTI_with_NLP
|
5b98cc76923b79f76e9977745a74e9b868a92ab0
|
[
"Apache-2.0"
] | null | null | null |
temp-directory/MITRE-ATTACK-WEBSITE/update-attack.py
|
devgunho/CTI_with_NLP
|
5b98cc76923b79f76e9977745a74e9b868a92ab0
|
[
"Apache-2.0"
] | null | null | null |
import argparse
import colorama
import json
import os
import time
from string import Template
import modules
from modules import site_config
from modules import util
# argument defaults and options for the CLI
module_choices = ['clean', 'stix_data', 'groups', 'search', 'matrices', 'mitigations', 'software', 'tactics', 'techniques', 'tour', 'website_build', 'random_page', 'subdirectory', 'tests']
extras = ['resources', 'versions', 'contribute', 'blog', 'attack_redirections']
test_choices = ['size', 'links', 'external_links', 'citations']
def validate_subdirectory_string(subdirectory_str):
""" Validate subdirectory string """
if not subdirectory_str.isascii():
raise argparse.ArgumentTypeError("%s contains non ascii characters" % subdirectory_str)
# Remove leading and trailing /
if subdirectory_str.startswith("/"):
subdirectory_str = subdirectory_str[1:]
if subdirectory_str.endswith("/"):
subdirectory_str = subdirectory_str[:-1]
site_config.set_subdirectory(subdirectory_str)
return subdirectory_str
def get_parsed_args():
"""Create argument parser and parse arguments"""
parser = argparse.ArgumentParser(description=("Build the ATT&CK website.\n"
"All flags are optional. If you run the build without flags, "
"the modules that pertain to the ATT&CK dataset will be ran. "
"If you would like to run extra modules, opt-in these modules with the"
"--extras flag."))
parser.add_argument('--refresh', '-r', action='store_true',
help='Pull down the current STIX data from the MITRE/CTI GitHub respository')
parser.add_argument('--no-stix-link-replacement', action='store_true',
help="If this flag is absent, links to attack.mitre.org/[page] in the STIX data will be replaced with /[page]. Add this flag to preserve links to attack.mitre.org.")
parser.add_argument('--modules', '-m', nargs='+',
type=str,
choices=module_choices,
help=("Run specific modules by selecting from the "
"list and leaving one space in "
"between them. For example: '-m clean techniques tactics'."
"Will run all the modules if flag is not called, or selected "
"without arguments."))
parser.add_argument('--extras', '-e', nargs='*',
type=str,
choices=extras,
help=("Run extra modules that do not pertain to the ATT&CK dataset. "
"Select from the list and leaving one space in "
"between them. For example: '-m resources blog'.\n"
"These modules will only run if the user adds this flag. "
"Calling this flag without arguments will select all the extra modules."))
parser.add_argument('--test', '-t', nargs='+',
choices=test_choices,
dest="tests",
help="Run specific tests by selecting from the list and leaving "
"one space in between them. For example: '-t output links'. "
"Tests: "
"size (size of output directory against github pages limit); "
"links (dead internal hyperlinks and relative hyperlinks); "
"external_links (dead external hyperlinks); "
"citations (unparsed citation text).")
parser.add_argument('--attack-brand', action='store_true',
help="Applies ATT&CK brand colors. See also the --extras flag.")
parser.add_argument('--proxy', help="set proxy")
parser.add_argument('--subdirectory',
help="If you intend to host the site from a sub-directory, specify the directory using this flag.",
type=validate_subdirectory_string)
parser.add_argument("--print-tests",
dest="print_tests",
action="store_true",
help="Force test output to print to stdout even if the results are very long.")
parser.add_argument("--no-test-exitstatus",
dest="override_exit_status",
action='store_true',
help="Forces application to exit with success status codes even if tests fail.")
args = parser.parse_args()
# If modules is empty, means all modules will be ran
if not args.modules:
args.modules = module_choices
# If the extras flag was called without params, set to all
if not args.extras and isinstance(args.extras, list):
args.extras = extras
# Set global argument list for modules
site_config.args = args
return args
def remove_from_build(arg_modules, arg_extras):
""" Given a list of modules from command line, remove modules that appear in module
directory that are not in list.
"""
def remove_from_running_pool():
""" Remove modules from running pool if they are not in modules list from argument """
copy_of_modules = []
for module in modules.run_ptr:
if module["name"].lower() in arg_modules:
copy_of_modules.append(module)
modules.run_ptr = copy_of_modules
def remove_from_menu():
""" Remove modules from menu if they are not in modules list from argument """
copy_of_menu = []
for module in modules.menu_ptr:
if module["name"].lower() in arg_modules:
copy_of_menu.append(module)
modules.menu_ptr = copy_of_menu
# Only add extra modules if argument flag was used
if arg_extras:
arg_modules = arg_modules + arg_extras
remove_from_running_pool()
remove_from_menu()
if __name__ == "__main__":
"""Beginning of ATT&CK update module"""
# Get args
args = get_parsed_args()
# Remove modules from build
remove_from_build(args.modules, args.extras)
# Arguments used for pelican
site_config.send_to_pelican("no_stix_link_replacement", args.no_stix_link_replacement)
# Start time of update
update_start = time.time()
# Init colorama for output
colorama.init()
# Get running modules and priorities
for ptr in modules.run_ptr:
util.buildhelpers.print_start(ptr['name'])
start_time = time.time()
ptr['run_module']()
end_time = time.time()
util.buildhelpers.print_end(ptr['name'], start_time, end_time)
# Print end of module
update_end = time.time()
util.buildhelpers.print_end("TOTAL Update Time", update_start, update_end)
| 43.319018
| 189
| 0.596233
|
import argparse
import colorama
import json
import os
import time
from string import Template
import modules
from modules import site_config
from modules import util
module_choices = ['clean', 'stix_data', 'groups', 'search', 'matrices', 'mitigations', 'software', 'tactics', 'techniques', 'tour', 'website_build', 'random_page', 'subdirectory', 'tests']
extras = ['resources', 'versions', 'contribute', 'blog', 'attack_redirections']
test_choices = ['size', 'links', 'external_links', 'citations']
def validate_subdirectory_string(subdirectory_str):
if not subdirectory_str.isascii():
raise argparse.ArgumentTypeError("%s contains non ascii characters" % subdirectory_str)
if subdirectory_str.startswith("/"):
subdirectory_str = subdirectory_str[1:]
if subdirectory_str.endswith("/"):
subdirectory_str = subdirectory_str[:-1]
site_config.set_subdirectory(subdirectory_str)
return subdirectory_str
def get_parsed_args():
parser = argparse.ArgumentParser(description=("Build the ATT&CK website.\n"
"All flags are optional. If you run the build without flags, "
"the modules that pertain to the ATT&CK dataset will be ran. "
"If you would like to run extra modules, opt-in these modules with the"
"--extras flag."))
parser.add_argument('--refresh', '-r', action='store_true',
help='Pull down the current STIX data from the MITRE/CTI GitHub respository')
parser.add_argument('--no-stix-link-replacement', action='store_true',
help="If this flag is absent, links to attack.mitre.org/[page] in the STIX data will be replaced with /[page]. Add this flag to preserve links to attack.mitre.org.")
parser.add_argument('--modules', '-m', nargs='+',
type=str,
choices=module_choices,
help=("Run specific modules by selecting from the "
"list and leaving one space in "
"between them. For example: '-m clean techniques tactics'."
"Will run all the modules if flag is not called, or selected "
"without arguments."))
parser.add_argument('--extras', '-e', nargs='*',
type=str,
choices=extras,
help=("Run extra modules that do not pertain to the ATT&CK dataset. "
"Select from the list and leaving one space in "
"between them. For example: '-m resources blog'.\n"
"These modules will only run if the user adds this flag. "
"Calling this flag without arguments will select all the extra modules."))
parser.add_argument('--test', '-t', nargs='+',
choices=test_choices,
dest="tests",
help="Run specific tests by selecting from the list and leaving "
"one space in between them. For example: '-t output links'. "
"Tests: "
"size (size of output directory against github pages limit); "
"links (dead internal hyperlinks and relative hyperlinks); "
"external_links (dead external hyperlinks); "
"citations (unparsed citation text).")
parser.add_argument('--attack-brand', action='store_true',
help="Applies ATT&CK brand colors. See also the --extras flag.")
parser.add_argument('--proxy', help="set proxy")
parser.add_argument('--subdirectory',
help="If you intend to host the site from a sub-directory, specify the directory using this flag.",
type=validate_subdirectory_string)
parser.add_argument("--print-tests",
dest="print_tests",
action="store_true",
help="Force test output to print to stdout even if the results are very long.")
parser.add_argument("--no-test-exitstatus",
dest="override_exit_status",
action='store_true',
help="Forces application to exit with success status codes even if tests fail.")
args = parser.parse_args()
if not args.modules:
args.modules = module_choices
if not args.extras and isinstance(args.extras, list):
args.extras = extras
site_config.args = args
return args
def remove_from_build(arg_modules, arg_extras):
def remove_from_running_pool():
copy_of_modules = []
for module in modules.run_ptr:
if module["name"].lower() in arg_modules:
copy_of_modules.append(module)
modules.run_ptr = copy_of_modules
def remove_from_menu():
copy_of_menu = []
for module in modules.menu_ptr:
if module["name"].lower() in arg_modules:
copy_of_menu.append(module)
modules.menu_ptr = copy_of_menu
if arg_extras:
arg_modules = arg_modules + arg_extras
remove_from_running_pool()
remove_from_menu()
if __name__ == "__main__":
args = get_parsed_args()
remove_from_build(args.modules, args.extras)
site_config.send_to_pelican("no_stix_link_replacement", args.no_stix_link_replacement)
update_start = time.time()
colorama.init()
for ptr in modules.run_ptr:
util.buildhelpers.print_start(ptr['name'])
start_time = time.time()
ptr['run_module']()
end_time = time.time()
util.buildhelpers.print_end(ptr['name'], start_time, end_time)
update_end = time.time()
util.buildhelpers.print_end("TOTAL Update Time", update_start, update_end)
| true
| true
|
7904b03b0a2aec971e32e1cd340649c330ce75df
| 2,293
|
py
|
Python
|
server/websockets/consumers/world/broadcasts/avatar.py
|
nking1232/html5-msoy
|
6e026f1989b15310ad67c050beb69a168c3bdd5f
|
[
"MIT"
] | null | null | null |
server/websockets/consumers/world/broadcasts/avatar.py
|
nking1232/html5-msoy
|
6e026f1989b15310ad67c050beb69a168c3bdd5f
|
[
"MIT"
] | null | null | null |
server/websockets/consumers/world/broadcasts/avatar.py
|
nking1232/html5-msoy
|
6e026f1989b15310ad67c050beb69a168c3bdd5f
|
[
"MIT"
] | 2
|
2020-12-18T19:19:38.000Z
|
2020-12-18T19:53:56.000Z
|
from asgiref.sync import sync_to_async
from channels.layers import get_channel_layer
from ....models import Participant
import humps
channel_layer = get_channel_layer()
def get_participant(room_channel_name, channel_name):
participant = Participant.objects.get(
channel_room__channel_name=room_channel_name,
channel_name=channel_name
)
return participant
def get_participant_id(participant):
return participant.id
async def broadcast_avatar_position(room_channel_name, channel_name, json_data):
"""
Sends the new avatar's position to the users of the room.
"""
type = json_data['type']
payload = json_data['payload']
position = payload["position"]
animate = payload["animate"]
# receive the participant that sent this message
participant = await sync_to_async(get_participant)(room_channel_name, channel_name)
participant_id = await sync_to_async(get_participant_id)(participant)
# if this was for an avatar, then set participant's position to the payload data
def set_participant_position():
participant.x = position["x"]
participant.y = position["y"]
participant.direction_x = position["directionX"]
participant.save()
await sync_to_async(set_participant_position)()
await channel_layer.group_send(
room_channel_name,
{
'type': type,
'payload': {
"participant_id": participant_id,
"position": position,
"animate": animate,
}
}
)
async def broadcast_avatar_state(room_channel_name, channel_name, json_data):
"""
Sends the new avatar's state to the users of the room.
"""
type = json_data['type']
payload = json_data['payload']
state = payload['value']
# receive the participant that sent this message
participant = await sync_to_async(get_participant)(room_channel_name, channel_name)
participant_id = await sync_to_async(get_participant_id)(participant)
await channel_layer.group_send(
room_channel_name,
{
'type': humps.decamelize(type),
'payload': {
"participant_id": participant_id,
"state": state
}
}
)
| 30.573333
| 87
| 0.66812
|
from asgiref.sync import sync_to_async
from channels.layers import get_channel_layer
from ....models import Participant
import humps
channel_layer = get_channel_layer()
def get_participant(room_channel_name, channel_name):
participant = Participant.objects.get(
channel_room__channel_name=room_channel_name,
channel_name=channel_name
)
return participant
def get_participant_id(participant):
return participant.id
async def broadcast_avatar_position(room_channel_name, channel_name, json_data):
type = json_data['type']
payload = json_data['payload']
position = payload["position"]
animate = payload["animate"]
participant = await sync_to_async(get_participant)(room_channel_name, channel_name)
participant_id = await sync_to_async(get_participant_id)(participant)
def set_participant_position():
participant.x = position["x"]
participant.y = position["y"]
participant.direction_x = position["directionX"]
participant.save()
await sync_to_async(set_participant_position)()
await channel_layer.group_send(
room_channel_name,
{
'type': type,
'payload': {
"participant_id": participant_id,
"position": position,
"animate": animate,
}
}
)
async def broadcast_avatar_state(room_channel_name, channel_name, json_data):
type = json_data['type']
payload = json_data['payload']
state = payload['value']
# receive the participant that sent this message
participant = await sync_to_async(get_participant)(room_channel_name, channel_name)
participant_id = await sync_to_async(get_participant_id)(participant)
await channel_layer.group_send(
room_channel_name,
{
'type': humps.decamelize(type),
'payload': {
"participant_id": participant_id,
"state": state
}
}
)
| true
| true
|
7904b0e611c3fa61bcb656dfacb3cb6407036a58
| 17,862
|
py
|
Python
|
egs/librispeech/ASR/tdnn_lstm_ctc/train.py
|
aarora8/icefall
|
8cb7f712e413fffbcdfdd865be73d6ff43f0ce7a
|
[
"Apache-2.0"
] | null | null | null |
egs/librispeech/ASR/tdnn_lstm_ctc/train.py
|
aarora8/icefall
|
8cb7f712e413fffbcdfdd865be73d6ff43f0ce7a
|
[
"Apache-2.0"
] | null | null | null |
egs/librispeech/ASR/tdnn_lstm_ctc/train.py
|
aarora8/icefall
|
8cb7f712e413fffbcdfdd865be73d6ff43f0ce7a
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
# Copyright 2021 Xiaomi Corp. (authors: Fangjun Kuang
# Mingshuang Luo)
#
# See ../../../../LICENSE for clarification regarding multiple authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import logging
from pathlib import Path
from shutil import copyfile
from typing import Optional, Tuple
import k2
import torch
import torch.multiprocessing as mp
import torch.nn as nn
import torch.optim as optim
from asr_datamodule import LibriSpeechAsrDataModule
from lhotse.utils import fix_random_seed
from model import TdnnLstm
from torch import Tensor
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.nn.utils import clip_grad_norm_
from torch.optim.lr_scheduler import StepLR
from torch.utils.tensorboard import SummaryWriter
from icefall.checkpoint import load_checkpoint
from icefall.checkpoint import save_checkpoint as save_checkpoint_impl
from icefall.dist import cleanup_dist, setup_dist
from icefall.graph_compiler import CtcTrainingGraphCompiler
from icefall.lexicon import Lexicon
from icefall.utils import (
AttributeDict,
MetricsTracker,
encode_supervisions,
get_env_info,
setup_logger,
str2bool,
)
def get_parser():
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument(
"--world-size",
type=int,
default=1,
help="Number of GPUs for DDP training.",
)
parser.add_argument(
"--master-port",
type=int,
default=12354,
help="Master port to use for DDP training.",
)
parser.add_argument(
"--tensorboard",
type=str2bool,
default=True,
help="Should various information be logged in tensorboard.",
)
parser.add_argument(
"--num-epochs",
type=int,
default=20,
help="Number of epochs to train.",
)
parser.add_argument(
"--start-epoch",
type=int,
default=0,
help="""Resume training from from this epoch.
If it is positive, it will load checkpoint from
tdnn_lstm_ctc/exp/epoch-{start_epoch-1}.pt
""",
)
return parser
def get_params() -> AttributeDict:
"""Return a dict containing training parameters.
All training related parameters that are not passed from the commandline
is saved in the variable `params`.
Commandline options are merged into `params` after they are parsed, so
you can also access them via `params`.
Explanation of options saved in `params`:
- exp_dir: It specifies the directory where all training related
files, e.g., checkpoints, log, etc, are saved
- lang_dir: It contains language related input files such as
"lexicon.txt"
- lr: It specifies the initial learning rate
- feature_dim: The model input dim. It has to match the one used
in computing features.
- weight_decay: The weight_decay for the optimizer.
- subsampling_factor: The subsampling factor for the model.
- best_train_loss: Best training loss so far. It is used to select
the model that has the lowest training loss. It is
updated during the training.
- best_valid_loss: Best validation loss so far. It is used to select
the model that has the lowest validation loss. It is
updated during the training.
- best_train_epoch: It is the epoch that has the best training loss.
- best_valid_epoch: It is the epoch that has the best validation loss.
- batch_idx_train: Used to writing statistics to tensorboard. It
contains number of batches trained so far across
epochs.
- log_interval: Print training loss if batch_idx % log_interval` is 0
- reset_interval: Reset statistics if batch_idx % reset_interval is 0
- valid_interval: Run validation if batch_idx % valid_interval` is 0
- beam_size: It is used in k2.ctc_loss
- reduction: It is used in k2.ctc_loss
- use_double_scores: It is used in k2.ctc_loss
"""
params = AttributeDict(
{
"exp_dir": Path("tdnn_lstm_ctc/exp"),
"lang_dir": Path("data/lang_phone"),
"lr": 1e-3,
"feature_dim": 80,
"weight_decay": 5e-4,
"subsampling_factor": 3,
"best_train_loss": float("inf"),
"best_valid_loss": float("inf"),
"best_train_epoch": -1,
"best_valid_epoch": -1,
"batch_idx_train": 0,
"log_interval": 10,
"reset_interval": 200,
"valid_interval": 1000,
"beam_size": 10,
"reduction": "sum",
"use_double_scores": True,
"env_info": get_env_info(),
}
)
return params
def load_checkpoint_if_available(
params: AttributeDict,
model: nn.Module,
optimizer: Optional[torch.optim.Optimizer] = None,
scheduler: Optional[torch.optim.lr_scheduler._LRScheduler] = None,
) -> None:
"""Load checkpoint from file.
If params.start_epoch is positive, it will load the checkpoint from
`params.start_epoch - 1`. Otherwise, this function does nothing.
Apart from loading state dict for `model`, `optimizer` and `scheduler`,
it also updates `best_train_epoch`, `best_train_loss`, `best_valid_epoch`,
and `best_valid_loss` in `params`.
Args:
params:
The return value of :func:`get_params`.
model:
The training model.
optimizer:
The optimizer that we are using.
scheduler:
The learning rate scheduler we are using.
Returns:
Return None.
"""
if params.start_epoch <= 0:
return
filename = params.exp_dir / f"epoch-{params.start_epoch-1}.pt"
saved_params = load_checkpoint(
filename,
model=model,
optimizer=optimizer,
scheduler=scheduler,
)
keys = [
"best_train_epoch",
"best_valid_epoch",
"batch_idx_train",
"best_train_loss",
"best_valid_loss",
]
for k in keys:
params[k] = saved_params[k]
return saved_params
def save_checkpoint(
params: AttributeDict,
model: nn.Module,
optimizer: torch.optim.Optimizer,
scheduler: torch.optim.lr_scheduler._LRScheduler,
rank: int = 0,
) -> None:
"""Save model, optimizer, scheduler and training stats to file.
Args:
params:
It is returned by :func:`get_params`.
model:
The training model.
"""
if rank != 0:
return
filename = params.exp_dir / f"epoch-{params.cur_epoch}.pt"
save_checkpoint_impl(
filename=filename,
model=model,
params=params,
optimizer=optimizer,
scheduler=scheduler,
rank=rank,
)
if params.best_train_epoch == params.cur_epoch:
best_train_filename = params.exp_dir / "best-train-loss.pt"
copyfile(src=filename, dst=best_train_filename)
if params.best_valid_epoch == params.cur_epoch:
best_valid_filename = params.exp_dir / "best-valid-loss.pt"
copyfile(src=filename, dst=best_valid_filename)
def compute_loss(
params: AttributeDict,
model: nn.Module,
batch: dict,
graph_compiler: CtcTrainingGraphCompiler,
is_training: bool,
) -> Tuple[Tensor, MetricsTracker]:
"""
Compute CTC loss given the model and its inputs.
Args:
params:
Parameters for training. See :func:`get_params`.
model:
The model for training. It is an instance of TdnnLstm in our case.
batch:
A batch of data. See `lhotse.dataset.K2SpeechRecognitionDataset()`
for the content in it.
graph_compiler:
It is used to build a decoding graph from a ctc topo and training
transcript. The training transcript is contained in the given `batch`,
while the ctc topo is built when this compiler is instantiated.
is_training:
True for training. False for validation. When it is True, this
function enables autograd during computation; when it is False, it
disables autograd.
"""
device = graph_compiler.device
feature = batch["inputs"]
# at entry, feature is (N, T, C)
feature = feature.permute(0, 2, 1) # now feature is (N, C, T)
assert feature.ndim == 3
feature = feature.to(device)
with torch.set_grad_enabled(is_training):
nnet_output = model(feature)
# nnet_output is (N, T, C)
# NOTE: We need `encode_supervisions` to sort sequences with
# different duration in decreasing order, required by
# `k2.intersect_dense` called in `k2.ctc_loss`
supervisions = batch["supervisions"]
supervision_segments, texts = encode_supervisions(
supervisions, subsampling_factor=params.subsampling_factor
)
decoding_graph = graph_compiler.compile(texts)
dense_fsa_vec = k2.DenseFsaVec(
nnet_output,
supervision_segments,
allow_truncate=params.subsampling_factor - 1,
)
loss = k2.ctc_loss(
decoding_graph=decoding_graph,
dense_fsa_vec=dense_fsa_vec,
output_beam=params.beam_size,
reduction=params.reduction,
use_double_scores=params.use_double_scores,
)
assert loss.requires_grad == is_training
info = MetricsTracker()
info["frames"] = supervision_segments[:, 2].sum().item()
info["loss"] = loss.detach().cpu().item()
return loss, info
def compute_validation_loss(
params: AttributeDict,
model: nn.Module,
graph_compiler: CtcTrainingGraphCompiler,
valid_dl: torch.utils.data.DataLoader,
world_size: int = 1,
) -> MetricsTracker:
"""Run the validation process. The validation loss
is saved in `params.valid_loss`.
"""
model.eval()
tot_loss = MetricsTracker()
for batch_idx, batch in enumerate(valid_dl):
loss, loss_info = compute_loss(
params=params,
model=model,
batch=batch,
graph_compiler=graph_compiler,
is_training=False,
)
assert loss.requires_grad is False
tot_loss = tot_loss + loss_info
if world_size > 1:
tot_loss.reduce(loss.device)
loss_value = tot_loss["loss"] / tot_loss["frames"]
if loss_value < params.best_valid_loss:
params.best_valid_epoch = params.cur_epoch
params.best_valid_loss = loss_value
return tot_loss
def train_one_epoch(
params: AttributeDict,
model: nn.Module,
optimizer: torch.optim.Optimizer,
graph_compiler: CtcTrainingGraphCompiler,
train_dl: torch.utils.data.DataLoader,
valid_dl: torch.utils.data.DataLoader,
tb_writer: Optional[SummaryWriter] = None,
world_size: int = 1,
) -> None:
"""Train the model for one epoch.
The training loss from the mean of all frames is saved in
`params.train_loss`. It runs the validation process every
`params.valid_interval` batches.
Args:
params:
It is returned by :func:`get_params`.
model:
The model for training.
optimizer:
The optimizer we are using.
graph_compiler:
It is used to convert transcripts to FSAs.
train_dl:
Dataloader for the training dataset.
valid_dl:
Dataloader for the validation dataset.
tb_writer:
Writer to write log messages to tensorboard.
world_size:
Number of nodes in DDP training. If it is 1, DDP is disabled.
"""
model.train()
tot_loss = MetricsTracker()
for batch_idx, batch in enumerate(train_dl):
params.batch_idx_train += 1
batch_size = len(batch["supervisions"]["text"])
loss, loss_info = compute_loss(
params=params,
model=model,
batch=batch,
graph_compiler=graph_compiler,
is_training=True,
)
# summary stats.
tot_loss = (tot_loss * (1 - 1 / params.reset_interval)) + loss_info
optimizer.zero_grad()
loss.backward()
clip_grad_norm_(model.parameters(), 5.0, 2.0)
optimizer.step()
if batch_idx % params.log_interval == 0:
logging.info(
f"Epoch {params.cur_epoch}, "
f"batch {batch_idx}, loss[{loss_info}], "
f"tot_loss[{tot_loss}], batch size: {batch_size}"
)
if batch_idx % params.log_interval == 0:
if tb_writer is not None:
loss_info.write_summary(
tb_writer, "train/current_", params.batch_idx_train
)
tot_loss.write_summary(
tb_writer, "train/tot_", params.batch_idx_train
)
if batch_idx > 0 and batch_idx % params.valid_interval == 0:
valid_info = compute_validation_loss(
params=params,
model=model,
graph_compiler=graph_compiler,
valid_dl=valid_dl,
world_size=world_size,
)
model.train()
logging.info(f"Epoch {params.cur_epoch}, validation {valid_info}")
if tb_writer is not None:
valid_info.write_summary(
tb_writer,
"train/valid_",
params.batch_idx_train,
)
loss_value = tot_loss["loss"] / tot_loss["frames"]
params.train_loss = loss_value
if params.train_loss < params.best_train_loss:
params.best_train_epoch = params.cur_epoch
params.best_train_loss = params.train_loss
def run(rank, world_size, args):
"""
Args:
rank:
It is a value between 0 and `world_size-1`, which is
passed automatically by `mp.spawn()` in :func:`main`.
The node with rank 0 is responsible for saving checkpoint.
world_size:
Number of GPUs for DDP training.
args:
The return value of get_parser().parse_args()
"""
params = get_params()
params.update(vars(args))
fix_random_seed(42)
if world_size > 1:
setup_dist(rank, world_size, params.master_port)
setup_logger(f"{params.exp_dir}/log/log-train")
logging.info("Training started")
logging.info(params)
if args.tensorboard and rank == 0:
tb_writer = SummaryWriter(log_dir=f"{params.exp_dir}/tensorboard")
else:
tb_writer = None
lexicon = Lexicon(params.lang_dir)
max_phone_id = max(lexicon.tokens)
device = torch.device("cpu")
if torch.cuda.is_available():
device = torch.device("cuda", rank)
graph_compiler = CtcTrainingGraphCompiler(lexicon=lexicon, device=device)
model = TdnnLstm(
num_features=params.feature_dim,
num_classes=max_phone_id + 1, # +1 for the blank symbol
subsampling_factor=params.subsampling_factor,
)
checkpoints = load_checkpoint_if_available(params=params, model=model)
model.to(device)
if world_size > 1:
model = DDP(model, device_ids=[rank])
optimizer = optim.AdamW(
model.parameters(),
lr=params.lr,
weight_decay=params.weight_decay,
)
scheduler = StepLR(optimizer, step_size=8, gamma=0.1)
if checkpoints:
optimizer.load_state_dict(checkpoints["optimizer"])
scheduler.load_state_dict(checkpoints["scheduler"])
librispeech = LibriSpeechAsrDataModule(args)
train_dl = librispeech.train_dataloaders()
valid_dl = librispeech.valid_dataloaders()
for epoch in range(params.start_epoch, params.num_epochs):
train_dl.sampler.set_epoch(epoch)
if epoch > params.start_epoch:
logging.info(f"epoch {epoch}, lr: {scheduler.get_last_lr()[0]}")
if tb_writer is not None:
tb_writer.add_scalar(
"train/lr",
scheduler.get_last_lr()[0],
params.batch_idx_train,
)
tb_writer.add_scalar("train/epoch", epoch, params.batch_idx_train)
params.cur_epoch = epoch
train_one_epoch(
params=params,
model=model,
optimizer=optimizer,
graph_compiler=graph_compiler,
train_dl=train_dl,
valid_dl=valid_dl,
tb_writer=tb_writer,
world_size=world_size,
)
scheduler.step()
save_checkpoint(
params=params,
model=model,
optimizer=optimizer,
scheduler=scheduler,
rank=rank,
)
logging.info("Done!")
if world_size > 1:
torch.distributed.barrier()
cleanup_dist()
def main():
parser = get_parser()
LibriSpeechAsrDataModule.add_arguments(parser)
args = parser.parse_args()
world_size = args.world_size
assert world_size >= 1
if world_size > 1:
mp.spawn(run, args=(world_size, args), nprocs=world_size, join=True)
else:
run(rank=0, world_size=1, args=args)
if __name__ == "__main__":
main()
| 29.969799
| 79
| 0.632124
|
import argparse
import logging
from pathlib import Path
from shutil import copyfile
from typing import Optional, Tuple
import k2
import torch
import torch.multiprocessing as mp
import torch.nn as nn
import torch.optim as optim
from asr_datamodule import LibriSpeechAsrDataModule
from lhotse.utils import fix_random_seed
from model import TdnnLstm
from torch import Tensor
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.nn.utils import clip_grad_norm_
from torch.optim.lr_scheduler import StepLR
from torch.utils.tensorboard import SummaryWriter
from icefall.checkpoint import load_checkpoint
from icefall.checkpoint import save_checkpoint as save_checkpoint_impl
from icefall.dist import cleanup_dist, setup_dist
from icefall.graph_compiler import CtcTrainingGraphCompiler
from icefall.lexicon import Lexicon
from icefall.utils import (
AttributeDict,
MetricsTracker,
encode_supervisions,
get_env_info,
setup_logger,
str2bool,
)
def get_parser():
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument(
"--world-size",
type=int,
default=1,
help="Number of GPUs for DDP training.",
)
parser.add_argument(
"--master-port",
type=int,
default=12354,
help="Master port to use for DDP training.",
)
parser.add_argument(
"--tensorboard",
type=str2bool,
default=True,
help="Should various information be logged in tensorboard.",
)
parser.add_argument(
"--num-epochs",
type=int,
default=20,
help="Number of epochs to train.",
)
parser.add_argument(
"--start-epoch",
type=int,
default=0,
help="""Resume training from from this epoch.
If it is positive, it will load checkpoint from
tdnn_lstm_ctc/exp/epoch-{start_epoch-1}.pt
""",
)
return parser
def get_params() -> AttributeDict:
params = AttributeDict(
{
"exp_dir": Path("tdnn_lstm_ctc/exp"),
"lang_dir": Path("data/lang_phone"),
"lr": 1e-3,
"feature_dim": 80,
"weight_decay": 5e-4,
"subsampling_factor": 3,
"best_train_loss": float("inf"),
"best_valid_loss": float("inf"),
"best_train_epoch": -1,
"best_valid_epoch": -1,
"batch_idx_train": 0,
"log_interval": 10,
"reset_interval": 200,
"valid_interval": 1000,
"beam_size": 10,
"reduction": "sum",
"use_double_scores": True,
"env_info": get_env_info(),
}
)
return params
def load_checkpoint_if_available(
params: AttributeDict,
model: nn.Module,
optimizer: Optional[torch.optim.Optimizer] = None,
scheduler: Optional[torch.optim.lr_scheduler._LRScheduler] = None,
) -> None:
if params.start_epoch <= 0:
return
filename = params.exp_dir / f"epoch-{params.start_epoch-1}.pt"
saved_params = load_checkpoint(
filename,
model=model,
optimizer=optimizer,
scheduler=scheduler,
)
keys = [
"best_train_epoch",
"best_valid_epoch",
"batch_idx_train",
"best_train_loss",
"best_valid_loss",
]
for k in keys:
params[k] = saved_params[k]
return saved_params
def save_checkpoint(
params: AttributeDict,
model: nn.Module,
optimizer: torch.optim.Optimizer,
scheduler: torch.optim.lr_scheduler._LRScheduler,
rank: int = 0,
) -> None:
if rank != 0:
return
filename = params.exp_dir / f"epoch-{params.cur_epoch}.pt"
save_checkpoint_impl(
filename=filename,
model=model,
params=params,
optimizer=optimizer,
scheduler=scheduler,
rank=rank,
)
if params.best_train_epoch == params.cur_epoch:
best_train_filename = params.exp_dir / "best-train-loss.pt"
copyfile(src=filename, dst=best_train_filename)
if params.best_valid_epoch == params.cur_epoch:
best_valid_filename = params.exp_dir / "best-valid-loss.pt"
copyfile(src=filename, dst=best_valid_filename)
def compute_loss(
params: AttributeDict,
model: nn.Module,
batch: dict,
graph_compiler: CtcTrainingGraphCompiler,
is_training: bool,
) -> Tuple[Tensor, MetricsTracker]:
device = graph_compiler.device
feature = batch["inputs"]
feature = feature.permute(0, 2, 1)
assert feature.ndim == 3
feature = feature.to(device)
with torch.set_grad_enabled(is_training):
nnet_output = model(feature)
supervisions = batch["supervisions"]
supervision_segments, texts = encode_supervisions(
supervisions, subsampling_factor=params.subsampling_factor
)
decoding_graph = graph_compiler.compile(texts)
dense_fsa_vec = k2.DenseFsaVec(
nnet_output,
supervision_segments,
allow_truncate=params.subsampling_factor - 1,
)
loss = k2.ctc_loss(
decoding_graph=decoding_graph,
dense_fsa_vec=dense_fsa_vec,
output_beam=params.beam_size,
reduction=params.reduction,
use_double_scores=params.use_double_scores,
)
assert loss.requires_grad == is_training
info = MetricsTracker()
info["frames"] = supervision_segments[:, 2].sum().item()
info["loss"] = loss.detach().cpu().item()
return loss, info
def compute_validation_loss(
params: AttributeDict,
model: nn.Module,
graph_compiler: CtcTrainingGraphCompiler,
valid_dl: torch.utils.data.DataLoader,
world_size: int = 1,
) -> MetricsTracker:
model.eval()
tot_loss = MetricsTracker()
for batch_idx, batch in enumerate(valid_dl):
loss, loss_info = compute_loss(
params=params,
model=model,
batch=batch,
graph_compiler=graph_compiler,
is_training=False,
)
assert loss.requires_grad is False
tot_loss = tot_loss + loss_info
if world_size > 1:
tot_loss.reduce(loss.device)
loss_value = tot_loss["loss"] / tot_loss["frames"]
if loss_value < params.best_valid_loss:
params.best_valid_epoch = params.cur_epoch
params.best_valid_loss = loss_value
return tot_loss
def train_one_epoch(
params: AttributeDict,
model: nn.Module,
optimizer: torch.optim.Optimizer,
graph_compiler: CtcTrainingGraphCompiler,
train_dl: torch.utils.data.DataLoader,
valid_dl: torch.utils.data.DataLoader,
tb_writer: Optional[SummaryWriter] = None,
world_size: int = 1,
) -> None:
model.train()
tot_loss = MetricsTracker()
for batch_idx, batch in enumerate(train_dl):
params.batch_idx_train += 1
batch_size = len(batch["supervisions"]["text"])
loss, loss_info = compute_loss(
params=params,
model=model,
batch=batch,
graph_compiler=graph_compiler,
is_training=True,
)
tot_loss = (tot_loss * (1 - 1 / params.reset_interval)) + loss_info
optimizer.zero_grad()
loss.backward()
clip_grad_norm_(model.parameters(), 5.0, 2.0)
optimizer.step()
if batch_idx % params.log_interval == 0:
logging.info(
f"Epoch {params.cur_epoch}, "
f"batch {batch_idx}, loss[{loss_info}], "
f"tot_loss[{tot_loss}], batch size: {batch_size}"
)
if batch_idx % params.log_interval == 0:
if tb_writer is not None:
loss_info.write_summary(
tb_writer, "train/current_", params.batch_idx_train
)
tot_loss.write_summary(
tb_writer, "train/tot_", params.batch_idx_train
)
if batch_idx > 0 and batch_idx % params.valid_interval == 0:
valid_info = compute_validation_loss(
params=params,
model=model,
graph_compiler=graph_compiler,
valid_dl=valid_dl,
world_size=world_size,
)
model.train()
logging.info(f"Epoch {params.cur_epoch}, validation {valid_info}")
if tb_writer is not None:
valid_info.write_summary(
tb_writer,
"train/valid_",
params.batch_idx_train,
)
loss_value = tot_loss["loss"] / tot_loss["frames"]
params.train_loss = loss_value
if params.train_loss < params.best_train_loss:
params.best_train_epoch = params.cur_epoch
params.best_train_loss = params.train_loss
def run(rank, world_size, args):
params = get_params()
params.update(vars(args))
fix_random_seed(42)
if world_size > 1:
setup_dist(rank, world_size, params.master_port)
setup_logger(f"{params.exp_dir}/log/log-train")
logging.info("Training started")
logging.info(params)
if args.tensorboard and rank == 0:
tb_writer = SummaryWriter(log_dir=f"{params.exp_dir}/tensorboard")
else:
tb_writer = None
lexicon = Lexicon(params.lang_dir)
max_phone_id = max(lexicon.tokens)
device = torch.device("cpu")
if torch.cuda.is_available():
device = torch.device("cuda", rank)
graph_compiler = CtcTrainingGraphCompiler(lexicon=lexicon, device=device)
model = TdnnLstm(
num_features=params.feature_dim,
num_classes=max_phone_id + 1,
subsampling_factor=params.subsampling_factor,
)
checkpoints = load_checkpoint_if_available(params=params, model=model)
model.to(device)
if world_size > 1:
model = DDP(model, device_ids=[rank])
optimizer = optim.AdamW(
model.parameters(),
lr=params.lr,
weight_decay=params.weight_decay,
)
scheduler = StepLR(optimizer, step_size=8, gamma=0.1)
if checkpoints:
optimizer.load_state_dict(checkpoints["optimizer"])
scheduler.load_state_dict(checkpoints["scheduler"])
librispeech = LibriSpeechAsrDataModule(args)
train_dl = librispeech.train_dataloaders()
valid_dl = librispeech.valid_dataloaders()
for epoch in range(params.start_epoch, params.num_epochs):
train_dl.sampler.set_epoch(epoch)
if epoch > params.start_epoch:
logging.info(f"epoch {epoch}, lr: {scheduler.get_last_lr()[0]}")
if tb_writer is not None:
tb_writer.add_scalar(
"train/lr",
scheduler.get_last_lr()[0],
params.batch_idx_train,
)
tb_writer.add_scalar("train/epoch", epoch, params.batch_idx_train)
params.cur_epoch = epoch
train_one_epoch(
params=params,
model=model,
optimizer=optimizer,
graph_compiler=graph_compiler,
train_dl=train_dl,
valid_dl=valid_dl,
tb_writer=tb_writer,
world_size=world_size,
)
scheduler.step()
save_checkpoint(
params=params,
model=model,
optimizer=optimizer,
scheduler=scheduler,
rank=rank,
)
logging.info("Done!")
if world_size > 1:
torch.distributed.barrier()
cleanup_dist()
def main():
parser = get_parser()
LibriSpeechAsrDataModule.add_arguments(parser)
args = parser.parse_args()
world_size = args.world_size
assert world_size >= 1
if world_size > 1:
mp.spawn(run, args=(world_size, args), nprocs=world_size, join=True)
else:
run(rank=0, world_size=1, args=args)
if __name__ == "__main__":
main()
| true
| true
|
7904b1463761804da7691d4d9ee95e88b306acc6
| 574
|
py
|
Python
|
py/memcheck.py
|
lbyoo/l_clib
|
8a0eaa0fe505d0f35ca24e8ba239c2643dbdb784
|
[
"Apache-2.0"
] | null | null | null |
py/memcheck.py
|
lbyoo/l_clib
|
8a0eaa0fe505d0f35ca24e8ba239c2643dbdb784
|
[
"Apache-2.0"
] | null | null | null |
py/memcheck.py
|
lbyoo/l_clib
|
8a0eaa0fe505d0f35ca24e8ba239c2643dbdb784
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
"""
Usage: program | ./memcheck.py
"""
import fileinput
import pdb
with fileinput.input() as f:
data = "".join(f)
s = {}
for l in data.splitlines():
if "malloc:" in l:
c = l.split(":")
s[c[-1].strip()] = l
# print("malloc:%s" %c[-1].strip())
if "free:" in l:
c = l.split(":")
del s[c[-1].strip()]
# print("free:%s" %c[-1].strip())
# print("size: %d" % len(s))
print("以下内存申请可能未释放,请检查:")
for l in s:
print(s[l])
else:
print("没有需要处理的")
| 16.882353
| 44
| 0.463415
|
import fileinput
import pdb
with fileinput.input() as f:
data = "".join(f)
s = {}
for l in data.splitlines():
if "malloc:" in l:
c = l.split(":")
s[c[-1].strip()] = l
if "free:" in l:
c = l.split(":")
del s[c[-1].strip()]
print("以下内存申请可能未释放,请检查:")
for l in s:
print(s[l])
else:
print("没有需要处理的")
| true
| true
|
7904b165e3b895846a449ff76f2e4e98d5080f5d
| 2,476
|
py
|
Python
|
datasets/ade.py
|
hsfzxjy/ESSNet
|
6dc2f53b074a0800c17109a1f38a010e3944d96b
|
[
"MIT"
] | 27
|
2020-12-12T13:34:09.000Z
|
2022-03-23T07:35:32.000Z
|
datasets/ade.py
|
hsfzxjy/ESSNet
|
6dc2f53b074a0800c17109a1f38a010e3944d96b
|
[
"MIT"
] | 6
|
2021-02-15T02:22:58.000Z
|
2021-04-09T20:22:09.000Z
|
datasets/ade.py
|
hsfzxjy/ESSNet
|
6dc2f53b074a0800c17109a1f38a010e3944d96b
|
[
"MIT"
] | 3
|
2020-12-15T09:38:51.000Z
|
2021-03-21T12:23:36.000Z
|
from __future__ import print_function, division
import json
import torch
from torch.utils.data import Dataset
import numpy as np
import os
import sys
import collections
import torch.utils.data as data
import shutil
from PIL import Image
from torchvision.datasets.utils import download_url, check_integrity
class ADE20KDataset(Dataset):
def __init__(self,ROOT_DIR, period, transform=None):
self.root_dir = ROOT_DIR
self.rst_dir = os.path.join(self.root_dir,'ADEChallengeData2016','result')
self.period = period
self.num_categories = 150
self.transform = transform
self.odgt = None
if self.period == 'train':
self.odgt = os.path.join(self.root_dir,'ADEChallengeData2016','train.odgt')
else:
self.odgt = os.path.join(self.root_dir,'ADEChallengeData2016','validation.odgt')
self.list_sample = [json.loads(x.rstrip()) for x in open(self.odgt, 'r')]
def __len__(self):
return len(self.list_sample)
def __getitem__(self, idx):
image_path = os.path.join(self.root_dir, self.list_sample[idx]['fpath_img'])
img = Image.open(image_path).convert('RGB')
r = self.list_sample[idx]['height']
c = self.list_sample[idx]['width']
name = self.list_sample[idx]['fpath_img'].replace('ADEChallengeData2016/images/','')
if self.period == 'train':
name = name.replace('train/','')
if 'val' in self.period:
name = name.replace('validation/','')
assert(self.period != 'test')
name = name.replace('.jpg','')
sample = {'image': img, 'name': name, 'row': r, 'col': c}
if self.period == 'train' or self.period == 'val':
seg_path = os.path.join(self.root_dir, self.list_sample[idx]['fpath_segm'])
seg = Image.open(seg_path)
sample['segmentation'] = seg
#assert(seg.ndim == 2)
assert(img.size[0] == seg.size[0])
assert(img.size[1] == seg.size[1])
if self.transform is not None:
img, target = self.transform(img, seg)
return img, target
def decode_target(self, label):
m = label.astype(np.uint16)
r,c = m.shape
cmap = np.zeros((r,c,3), dtype=np.uint8)
cmap[:,:,0] = (m&1)<<7 | (m&8)<<3 | (m&64)>>1
cmap[:,:,1] = (m&2)<<6 | (m&16)<<2 | (m&128)>>2
cmap[:,:,2] = (m&4)<<5 | (m&32)<<1
return cmap
| 36.411765
| 92
| 0.5937
|
from __future__ import print_function, division
import json
import torch
from torch.utils.data import Dataset
import numpy as np
import os
import sys
import collections
import torch.utils.data as data
import shutil
from PIL import Image
from torchvision.datasets.utils import download_url, check_integrity
class ADE20KDataset(Dataset):
def __init__(self,ROOT_DIR, period, transform=None):
self.root_dir = ROOT_DIR
self.rst_dir = os.path.join(self.root_dir,'ADEChallengeData2016','result')
self.period = period
self.num_categories = 150
self.transform = transform
self.odgt = None
if self.period == 'train':
self.odgt = os.path.join(self.root_dir,'ADEChallengeData2016','train.odgt')
else:
self.odgt = os.path.join(self.root_dir,'ADEChallengeData2016','validation.odgt')
self.list_sample = [json.loads(x.rstrip()) for x in open(self.odgt, 'r')]
def __len__(self):
return len(self.list_sample)
def __getitem__(self, idx):
image_path = os.path.join(self.root_dir, self.list_sample[idx]['fpath_img'])
img = Image.open(image_path).convert('RGB')
r = self.list_sample[idx]['height']
c = self.list_sample[idx]['width']
name = self.list_sample[idx]['fpath_img'].replace('ADEChallengeData2016/images/','')
if self.period == 'train':
name = name.replace('train/','')
if 'val' in self.period:
name = name.replace('validation/','')
assert(self.period != 'test')
name = name.replace('.jpg','')
sample = {'image': img, 'name': name, 'row': r, 'col': c}
if self.period == 'train' or self.period == 'val':
seg_path = os.path.join(self.root_dir, self.list_sample[idx]['fpath_segm'])
seg = Image.open(seg_path)
sample['segmentation'] = seg
assert(img.size[0] == seg.size[0])
assert(img.size[1] == seg.size[1])
if self.transform is not None:
img, target = self.transform(img, seg)
return img, target
def decode_target(self, label):
m = label.astype(np.uint16)
r,c = m.shape
cmap = np.zeros((r,c,3), dtype=np.uint8)
cmap[:,:,0] = (m&1)<<7 | (m&8)<<3 | (m&64)>>1
cmap[:,:,1] = (m&2)<<6 | (m&16)<<2 | (m&128)>>2
cmap[:,:,2] = (m&4)<<5 | (m&32)<<1
return cmap
| true
| true
|
7904b168a4116b84fb89d03c6509bd216b10c0ed
| 5,765
|
py
|
Python
|
py/util/config.py
|
PurdueMINDS/MCLV-RBM
|
46b1f90b52447687983113f37a5ce2c66b8f0465
|
[
"Apache-2.0"
] | 4
|
2018-07-21T14:36:09.000Z
|
2021-01-27T15:40:04.000Z
|
py/util/config.py
|
PurdueMINDS/MCLV-RBM
|
46b1f90b52447687983113f37a5ce2c66b8f0465
|
[
"Apache-2.0"
] | null | null | null |
py/util/config.py
|
PurdueMINDS/MCLV-RBM
|
46b1f90b52447687983113f37a5ce2c66b8f0465
|
[
"Apache-2.0"
] | 1
|
2018-07-21T14:36:10.000Z
|
2018-07-21T14:36:10.000Z
|
# Copyright 2017 Bruno Ribeiro, Mayank Kakodkar, Pedro Savarese
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import torch
from bean.phase import Phase
def parse_top_level_arguments():
parser = argparse.ArgumentParser(description='Fit RBM to MNIST using different gradient estimators')
parser.add_argument('--local', '-l', dest='LOCAL', action='store_const',
const=True, default=False,
help='Enables Local run')
parser.add_argument('--basefolder', '-b', dest='BASE_FOLDER', action='store'
, default='/Users/mkakodka/Code/Research/RBM_V1/',
help='Base Folder for all directory paths')
parser.add_argument('--phase', '-p', dest='PHASE', action='store'
, default='DATA',
help=str(Phase.__dict__))
parser.add_argument('-n', dest='RUNS', action='store'
, default='1',
help='Number of runs')
parser.add_argument('-iteration', dest='iteration', action='store'
, default='-1',
help='iteration')
parser.add_argument('--method', '-m', dest='method', action='store',
default="MCLV",
help='Method to use')
parser.add_argument('-sfs', dest='sample_from_supernode', action='store_const',
const=True, default=False,
help='Sample from supernode for tour distribution')
parser.add_argument('-cdk', dest='cdk', action='store',
default=1,
help='contrastive divergence steps limit')
parser.add_argument('-mclvk', dest='mclvk', action='store',
default=1,
help='tour length limit')
parser.add_argument('-wm', dest='warmup', action='store',
default=2,
help='warmup epochs')
parser.add_argument('-tot', '--total-epochs', dest='total_epochs', action='store',
default=100,
help='total epochs')
parser.add_argument('-mbs', '--mini-batch-size', dest='mini_batch_size', action='store',
default=128,
help='mini batch size')
parser.add_argument('--learning-rate', '-lr', dest='learning_rate', action='store',
default=0.1,
help='learning rate')
parser.add_argument('--weight-decay', '-wd', dest='weight_decay', action='store',
default=0.0,
help='weight decay')
parser.add_argument('--momentum', '-mm', dest='momentum', action='store',
default=0.0,
help='momentum')
parser.add_argument('--plateau', '-pt', dest='plateau', action='store',
default=1000,
help='Robbins Munro Schedule plateau length')
parser.add_argument('--hidden', dest='num_hidden', action='store',
default=16,
help='Number of hidden units')
parser.add_argument('--supernode-samples', '-ss', dest='supernode_samples', action='store',
default=1,
help='Number of samples to include in the supernode')
parser.add_argument('--gpu-id', dest='gpu_id', action='store',
default=-1,
help='gpu_id')
parser.add_argument('--gpu-limit', dest='gpu_limit', action='store',
default=18,
help='gpu_limit')
parser.add_argument('--filename', dest='filename', action='store',
default='temp_local',
help='filename')
parser.add_argument('--final-likelihood', dest='final_likelihood', action='store_const',
const=True, default=False,
help='compute final likelihood')
parser.add_argument('--log-tour', dest='LOG_TOUR', action='store_const',
const=True, default=False,
help='LOG_TOUR')
parser.add_argument('--name', dest='name', action='store',
default=None,
help='Name this run')
args = parser.parse_args()
return args.LOCAL, args.BASE_FOLDER, args
LOCAL, BASE_FOLDER, ARGS = parse_top_level_arguments()
print("Config.BASE_FOLDER=%s" % BASE_FOLDER)
print("Config.LOCAL=%s" % LOCAL)
DATA_FOLDER = BASE_FOLDER + 'data/'
MODEL_FOLDER = BASE_FOLDER + 'data/model/'
OUTPUT_FOLDER = BASE_FOLDER + 'output/'
MNIST_FOLDER = BASE_FOLDER + 'py/MNIST_data/'
PLOT_OUTPUT_FOLDER = BASE_FOLDER + 'plots/'
SQLITE_FILE = DATA_FOLDER + 'results.db'
SERVER_SQLITE_FILE = DATA_FOLDER + 'results_server.db' if LOCAL else SQLITE_FILE
GPU_LIMIT = int(ARGS.gpu_limit)
USE_GPU = torch.cuda.is_available() and not LOCAL
LOG_TOUR = ARGS.LOG_TOUR
TOUR_LENGTHS_TABLE = "TOUR_LENGTH_DISTRIBUTIONS"
# These are hardcoded for the MNIST dataset
WIDTH = 28
HEIGHT = 28
# These options do not work right now, we'll fix them soon
PIN = False
GPU_ID = int(ARGS.gpu_id) if int(ARGS.gpu_id) >= 0 else None
| 44.346154
| 104
| 0.580225
|
import argparse
import torch
from bean.phase import Phase
def parse_top_level_arguments():
parser = argparse.ArgumentParser(description='Fit RBM to MNIST using different gradient estimators')
parser.add_argument('--local', '-l', dest='LOCAL', action='store_const',
const=True, default=False,
help='Enables Local run')
parser.add_argument('--basefolder', '-b', dest='BASE_FOLDER', action='store'
, default='/Users/mkakodka/Code/Research/RBM_V1/',
help='Base Folder for all directory paths')
parser.add_argument('--phase', '-p', dest='PHASE', action='store'
, default='DATA',
help=str(Phase.__dict__))
parser.add_argument('-n', dest='RUNS', action='store'
, default='1',
help='Number of runs')
parser.add_argument('-iteration', dest='iteration', action='store'
, default='-1',
help='iteration')
parser.add_argument('--method', '-m', dest='method', action='store',
default="MCLV",
help='Method to use')
parser.add_argument('-sfs', dest='sample_from_supernode', action='store_const',
const=True, default=False,
help='Sample from supernode for tour distribution')
parser.add_argument('-cdk', dest='cdk', action='store',
default=1,
help='contrastive divergence steps limit')
parser.add_argument('-mclvk', dest='mclvk', action='store',
default=1,
help='tour length limit')
parser.add_argument('-wm', dest='warmup', action='store',
default=2,
help='warmup epochs')
parser.add_argument('-tot', '--total-epochs', dest='total_epochs', action='store',
default=100,
help='total epochs')
parser.add_argument('-mbs', '--mini-batch-size', dest='mini_batch_size', action='store',
default=128,
help='mini batch size')
parser.add_argument('--learning-rate', '-lr', dest='learning_rate', action='store',
default=0.1,
help='learning rate')
parser.add_argument('--weight-decay', '-wd', dest='weight_decay', action='store',
default=0.0,
help='weight decay')
parser.add_argument('--momentum', '-mm', dest='momentum', action='store',
default=0.0,
help='momentum')
parser.add_argument('--plateau', '-pt', dest='plateau', action='store',
default=1000,
help='Robbins Munro Schedule plateau length')
parser.add_argument('--hidden', dest='num_hidden', action='store',
default=16,
help='Number of hidden units')
parser.add_argument('--supernode-samples', '-ss', dest='supernode_samples', action='store',
default=1,
help='Number of samples to include in the supernode')
parser.add_argument('--gpu-id', dest='gpu_id', action='store',
default=-1,
help='gpu_id')
parser.add_argument('--gpu-limit', dest='gpu_limit', action='store',
default=18,
help='gpu_limit')
parser.add_argument('--filename', dest='filename', action='store',
default='temp_local',
help='filename')
parser.add_argument('--final-likelihood', dest='final_likelihood', action='store_const',
const=True, default=False,
help='compute final likelihood')
parser.add_argument('--log-tour', dest='LOG_TOUR', action='store_const',
const=True, default=False,
help='LOG_TOUR')
parser.add_argument('--name', dest='name', action='store',
default=None,
help='Name this run')
args = parser.parse_args()
return args.LOCAL, args.BASE_FOLDER, args
LOCAL, BASE_FOLDER, ARGS = parse_top_level_arguments()
print("Config.BASE_FOLDER=%s" % BASE_FOLDER)
print("Config.LOCAL=%s" % LOCAL)
DATA_FOLDER = BASE_FOLDER + 'data/'
MODEL_FOLDER = BASE_FOLDER + 'data/model/'
OUTPUT_FOLDER = BASE_FOLDER + 'output/'
MNIST_FOLDER = BASE_FOLDER + 'py/MNIST_data/'
PLOT_OUTPUT_FOLDER = BASE_FOLDER + 'plots/'
SQLITE_FILE = DATA_FOLDER + 'results.db'
SERVER_SQLITE_FILE = DATA_FOLDER + 'results_server.db' if LOCAL else SQLITE_FILE
GPU_LIMIT = int(ARGS.gpu_limit)
USE_GPU = torch.cuda.is_available() and not LOCAL
LOG_TOUR = ARGS.LOG_TOUR
TOUR_LENGTHS_TABLE = "TOUR_LENGTH_DISTRIBUTIONS"
WIDTH = 28
HEIGHT = 28
PIN = False
GPU_ID = int(ARGS.gpu_id) if int(ARGS.gpu_id) >= 0 else None
| true
| true
|
7904b1a75bef9f140e7eac3da786676eba0628ab
| 2,032
|
py
|
Python
|
archive/model_archive/ConvModel.py
|
Sensors-in-Paradise/OpportunityML
|
a123b4842de45f735d517be6bcd96ca35171db91
|
[
"MIT"
] | 1
|
2022-03-25T16:00:36.000Z
|
2022-03-25T16:00:36.000Z
|
archive/model_archive/ConvModel.py
|
Sensors-in-Paradise/OpportunityML
|
a123b4842de45f735d517be6bcd96ca35171db91
|
[
"MIT"
] | 1
|
2022-03-28T13:50:28.000Z
|
2022-03-28T13:50:28.000Z
|
archive/model_archive/ConvModel.py
|
Sensors-in-Paradise/OpportunityML
|
a123b4842de45f735d517be6bcd96ca35171db91
|
[
"MIT"
] | null | null | null |
from random import shuffle
from models.RainbowModelLeaveRecsOut import RainbowModelLeaveRecsOut
from tensorflow.keras.layers import Conv1D, MaxPooling1D, Flatten, Dense, Dropout # type: ignore
from tensorflow.keras.models import Sequential # type: ignore
import numpy as np
from utils.Recording import Recording
from utils.array_operations import split_list_by_percentage
from utils.typing import assert_type
class ConvModel(RainbowModelLeaveRecsOut):
def __init__(self, **kwargs):
"""
Convolutional model
:param kwargs:
window_size: int
stride_size: int
test_percentage: float
n_features: int
n_outputs: int
"""
# hyper params to instance vars
self.window_size = kwargs["window_size"]
self.stride_size = kwargs["stride_size"]
self.test_percentage = kwargs["test_percentage"]
self.verbose = 0
self.epochs = 10
self.batch_size = 32
# create model
self.model = self.__create_model(kwargs["n_features"], kwargs["n_outputs"])
def __create_model(self, n_features, n_outputs):
# window_size, n_features, n_outputs = X.shape[1], X.shape[2], y.shape[1]
print(
f"Building model for {self.window_size} timesteps (window_size) and {n_features} features"
)
model = Sequential()
model.add(
Conv1D(
filters=64,
kernel_size=3,
activation="relu",
input_shape=(self.window_size, n_features),
)
)
model.add(Conv1D(filters=64, kernel_size=3, activation="relu"))
model.add(Dropout(0.5))
model.add(MaxPooling1D(pool_size=2))
model.add(Flatten())
model.add(Dense(100, activation="relu"))
model.add(Dense(n_outputs, activation="softmax"))
model.compile(
loss="categorical_crossentropy", optimizer="adam", metrics=["accuracy"]
)
return model
| 33.311475
| 102
| 0.628937
|
from random import shuffle
from models.RainbowModelLeaveRecsOut import RainbowModelLeaveRecsOut
from tensorflow.keras.layers import Conv1D, MaxPooling1D, Flatten, Dense, Dropout
from tensorflow.keras.models import Sequential
import numpy as np
from utils.Recording import Recording
from utils.array_operations import split_list_by_percentage
from utils.typing import assert_type
class ConvModel(RainbowModelLeaveRecsOut):
def __init__(self, **kwargs):
self.window_size = kwargs["window_size"]
self.stride_size = kwargs["stride_size"]
self.test_percentage = kwargs["test_percentage"]
self.verbose = 0
self.epochs = 10
self.batch_size = 32
self.model = self.__create_model(kwargs["n_features"], kwargs["n_outputs"])
def __create_model(self, n_features, n_outputs):
print(
f"Building model for {self.window_size} timesteps (window_size) and {n_features} features"
)
model = Sequential()
model.add(
Conv1D(
filters=64,
kernel_size=3,
activation="relu",
input_shape=(self.window_size, n_features),
)
)
model.add(Conv1D(filters=64, kernel_size=3, activation="relu"))
model.add(Dropout(0.5))
model.add(MaxPooling1D(pool_size=2))
model.add(Flatten())
model.add(Dense(100, activation="relu"))
model.add(Dense(n_outputs, activation="softmax"))
model.compile(
loss="categorical_crossentropy", optimizer="adam", metrics=["accuracy"]
)
return model
| true
| true
|
7904b1a95ec4a92851d54e13112e93ebd44a8795
| 1,130
|
py
|
Python
|
setup.py
|
ddkwing/har2case
|
6d440651c8d79228b7bf034790334e7c9406f023
|
[
"MIT"
] | null | null | null |
setup.py
|
ddkwing/har2case
|
6d440651c8d79228b7bf034790334e7c9406f023
|
[
"MIT"
] | null | null | null |
setup.py
|
ddkwing/har2case
|
6d440651c8d79228b7bf034790334e7c9406f023
|
[
"MIT"
] | null | null | null |
# encoding: utf-8
import io
from setuptools import find_packages, setup
from har2case import __version__
with io.open("README.rst", encoding='utf-8') as f:
long_description = f.read()
install_requires = open("requirements.txt").readlines()
setup(
name='har2case',
version=__version__,
description='Convert HAR(HTTP Archive) to YAML/JSON testcases for HttpRunner.',
long_description=long_description,
author='Leo Lee',
author_email='mail@debugtalk.com',
url='https://github.com/HttpRunner/har2case',
license='MIT',
packages=find_packages(exclude=['test.*', 'test']),
package_data={},
keywords='har converter yaml json',
install_requires=install_requires,
classifiers=[
"Development Status :: 3 - Alpha",
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6'
],
entry_points={
'console_scripts': [
'har2case=har2case.cli:main'
]
}
)
| 28.25
| 83
| 0.645133
|
import io
from setuptools import find_packages, setup
from har2case import __version__
with io.open("README.rst", encoding='utf-8') as f:
long_description = f.read()
install_requires = open("requirements.txt").readlines()
setup(
name='har2case',
version=__version__,
description='Convert HAR(HTTP Archive) to YAML/JSON testcases for HttpRunner.',
long_description=long_description,
author='Leo Lee',
author_email='mail@debugtalk.com',
url='https://github.com/HttpRunner/har2case',
license='MIT',
packages=find_packages(exclude=['test.*', 'test']),
package_data={},
keywords='har converter yaml json',
install_requires=install_requires,
classifiers=[
"Development Status :: 3 - Alpha",
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6'
],
entry_points={
'console_scripts': [
'har2case=har2case.cli:main'
]
}
)
| true
| true
|
7904b23ee14067bfd38e03513fc61139dffd378a
| 9,003
|
py
|
Python
|
rasa/utils/common.py
|
paper2code/rasa
|
2e77a0b71a2813a89bdfa60782c761fe71490722
|
[
"Apache-2.0"
] | null | null | null |
rasa/utils/common.py
|
paper2code/rasa
|
2e77a0b71a2813a89bdfa60782c761fe71490722
|
[
"Apache-2.0"
] | 9
|
2020-09-15T20:10:23.000Z
|
2020-09-15T20:19:07.000Z
|
rasa/utils/common.py
|
karen-white/rasa
|
302825e10305a995184b5c0b92fea4813cd3416e
|
[
"Apache-2.0"
] | null | null | null |
import asyncio
import logging
import os
import shutil
import warnings
from types import TracebackType
from typing import Any, Coroutine, Dict, List, Optional, Text, Type, TypeVar
import rasa.core.utils
import rasa.utils.io
from rasa.constants import (
DEFAULT_LOG_LEVEL_LIBRARIES,
ENV_LOG_LEVEL_LIBRARIES,
)
from rasa.shared.constants import DEFAULT_LOG_LEVEL, ENV_LOG_LEVEL
import rasa.shared.utils.io
logger = logging.getLogger(__name__)
T = TypeVar("T")
class TempDirectoryPath(str):
"""Represents a path to an temporary directory. When used as a context
manager, it erases the contents of the directory on exit.
"""
def __enter__(self) -> "TempDirectoryPath":
return self
def __exit__(
self,
_exc: Optional[Type[BaseException]],
_value: Optional[Exception],
_tb: Optional[TracebackType],
) -> bool:
if os.path.exists(self):
shutil.rmtree(self)
def read_global_config(path: Text) -> Dict[Text, Any]:
"""Read global Rasa configuration.
Args:
path: Path to the configuration
Returns:
The global configuration
"""
# noinspection PyBroadException
try:
return rasa.shared.utils.io.read_config_file(path)
except Exception:
# if things go south we pretend there is no config
return {}
def set_log_level(log_level: Optional[int] = None):
"""Set log level of Rasa and Tensorflow either to the provided log level or
to the log level specified in the environment variable 'LOG_LEVEL'. If none is set
a default log level will be used."""
if not log_level:
log_level = os.environ.get(ENV_LOG_LEVEL, DEFAULT_LOG_LEVEL)
log_level = logging.getLevelName(log_level)
logging.getLogger("rasa").setLevel(log_level)
update_tensorflow_log_level()
update_asyncio_log_level()
update_apscheduler_log_level()
update_socketio_log_level()
os.environ[ENV_LOG_LEVEL] = logging.getLevelName(log_level)
def update_apscheduler_log_level() -> None:
log_level = os.environ.get(ENV_LOG_LEVEL_LIBRARIES, DEFAULT_LOG_LEVEL_LIBRARIES)
apscheduler_loggers = [
"apscheduler",
"apscheduler.scheduler",
"apscheduler.executors",
"apscheduler.executors.default",
]
for logger_name in apscheduler_loggers:
logging.getLogger(logger_name).setLevel(log_level)
logging.getLogger(logger_name).propagate = False
def update_socketio_log_level() -> None:
log_level = os.environ.get(ENV_LOG_LEVEL_LIBRARIES, DEFAULT_LOG_LEVEL_LIBRARIES)
socketio_loggers = ["websockets.protocol", "engineio.server", "socketio.server"]
for logger_name in socketio_loggers:
logging.getLogger(logger_name).setLevel(log_level)
logging.getLogger(logger_name).propagate = False
def update_tensorflow_log_level() -> None:
"""Set the log level of Tensorflow to the log level specified in the environment
variable 'LOG_LEVEL_LIBRARIES'."""
# Disables libvinfer, tensorRT, cuda, AVX2 and FMA warnings (CPU support). This variable needs to be set before the
# first import since some warnings are raised on the first import.
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
import tensorflow as tf
log_level = os.environ.get(ENV_LOG_LEVEL_LIBRARIES, DEFAULT_LOG_LEVEL_LIBRARIES)
if log_level == "DEBUG":
tf_log_level = tf.compat.v1.logging.DEBUG
elif log_level == "INFO":
tf_log_level = tf.compat.v1.logging.INFO
elif log_level == "WARNING":
tf_log_level = tf.compat.v1.logging.WARN
else:
tf_log_level = tf.compat.v1.logging.ERROR
tf.compat.v1.logging.set_verbosity(tf_log_level)
logging.getLogger("tensorflow").propagate = False
def update_sanic_log_level(log_file: Optional[Text] = None):
"""Set the log level of sanic loggers to the log level specified in the environment
variable 'LOG_LEVEL_LIBRARIES'."""
from sanic.log import logger, error_logger, access_logger
log_level = os.environ.get(ENV_LOG_LEVEL_LIBRARIES, DEFAULT_LOG_LEVEL_LIBRARIES)
logger.setLevel(log_level)
error_logger.setLevel(log_level)
access_logger.setLevel(log_level)
logger.propagate = False
error_logger.propagate = False
access_logger.propagate = False
if log_file is not None:
formatter = logging.Formatter("%(asctime)s [%(levelname)-5.5s] %(message)s")
file_handler = logging.FileHandler(log_file)
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
error_logger.addHandler(file_handler)
access_logger.addHandler(file_handler)
def update_asyncio_log_level() -> None:
"""Set the log level of asyncio to the log level specified in the environment
variable 'LOG_LEVEL_LIBRARIES'."""
log_level = os.environ.get(ENV_LOG_LEVEL_LIBRARIES, DEFAULT_LOG_LEVEL_LIBRARIES)
logging.getLogger("asyncio").setLevel(log_level)
def set_log_and_warnings_filters() -> None:
"""
Set log filters on the root logger, and duplicate filters for warnings.
Filters only propagate on handlers, not loggers.
"""
for handler in logging.getLogger().handlers:
handler.addFilter(RepeatedLogFilter())
warnings.filterwarnings("once", category=UserWarning)
def obtain_verbosity() -> int:
"""Returns a verbosity level according to the set log level."""
log_level = os.environ.get(ENV_LOG_LEVEL, DEFAULT_LOG_LEVEL)
verbosity = 0
if log_level == "DEBUG":
verbosity = 2
if log_level == "INFO":
verbosity = 1
return verbosity
def sort_list_of_dicts_by_first_key(dicts: List[Dict]) -> List[Dict]:
"""Sorts a list of dictionaries by their first key."""
return sorted(dicts, key=lambda d: list(d.keys())[0])
def write_global_config_value(name: Text, value: Any) -> None:
"""Read global Rasa configuration."""
# need to use `rasa.constants.GLOBAL_USER_CONFIG_PATH` to allow patching
# in tests
config_path = rasa.constants.GLOBAL_USER_CONFIG_PATH
try:
os.makedirs(os.path.dirname(config_path), exist_ok=True)
c = read_global_config(config_path)
c[name] = value
rasa.core.utils.dump_obj_as_yaml_to_file(
rasa.constants.GLOBAL_USER_CONFIG_PATH, c
)
except Exception as e:
logger.warning(f"Failed to write global config. Error: {e}. Skipping.")
def read_global_config_value(name: Text, unavailable_ok: bool = True) -> Any:
"""Read a value from the global Rasa configuration."""
def not_found():
if unavailable_ok:
return None
else:
raise ValueError(f"Configuration '{name}' key not found.")
# need to use `rasa.constants.GLOBAL_USER_CONFIG_PATH` to allow patching
# in tests
config_path = rasa.constants.GLOBAL_USER_CONFIG_PATH
if not os.path.exists(config_path):
return not_found()
c = read_global_config(config_path)
if name in c:
return c[name]
else:
return not_found()
def update_existing_keys(
original: Dict[Any, Any], updates: Dict[Any, Any]
) -> Dict[Any, Any]:
"""Iterate through all the updates and update a value in the original dictionary.
If the updates contain a key that is not present in the original dict, it will
be ignored."""
updated = original.copy()
for k, v in updates.items():
if k in updated:
updated[k] = v
return updated
class RepeatedLogFilter(logging.Filter):
"""Filter repeated log records."""
last_log = None
def filter(self, record):
current_log = (
record.levelno,
record.pathname,
record.lineno,
record.msg,
record.args,
)
if current_log != self.last_log:
self.last_log = current_log
return True
return False
def run_in_loop(
f: Coroutine[Any, Any, T], loop: Optional[asyncio.AbstractEventLoop] = None
) -> T:
"""Execute the awaitable in the passed loop.
If no loop is passed, the currently existing one is used or a new one is created
if no loop has been started in the current context.
After the awaitable is finished, all remaining tasks on the loop will be
awaited as well (background tasks).
WARNING: don't use this if there are never ending background tasks scheduled.
in this case, this function will never return.
Args:
f: function to execute
loop: loop to use for the execution
Returns:
return value from the function
"""
if loop is None:
try:
loop = asyncio.get_event_loop()
except RuntimeError:
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
result = loop.run_until_complete(f)
# Let's also finish all running tasks:
pending = asyncio.Task.all_tasks()
loop.run_until_complete(asyncio.gather(*pending))
return result
| 30.01
| 119
| 0.690103
|
import asyncio
import logging
import os
import shutil
import warnings
from types import TracebackType
from typing import Any, Coroutine, Dict, List, Optional, Text, Type, TypeVar
import rasa.core.utils
import rasa.utils.io
from rasa.constants import (
DEFAULT_LOG_LEVEL_LIBRARIES,
ENV_LOG_LEVEL_LIBRARIES,
)
from rasa.shared.constants import DEFAULT_LOG_LEVEL, ENV_LOG_LEVEL
import rasa.shared.utils.io
logger = logging.getLogger(__name__)
T = TypeVar("T")
class TempDirectoryPath(str):
def __enter__(self) -> "TempDirectoryPath":
return self
def __exit__(
self,
_exc: Optional[Type[BaseException]],
_value: Optional[Exception],
_tb: Optional[TracebackType],
) -> bool:
if os.path.exists(self):
shutil.rmtree(self)
def read_global_config(path: Text) -> Dict[Text, Any]:
try:
return rasa.shared.utils.io.read_config_file(path)
except Exception:
return {}
def set_log_level(log_level: Optional[int] = None):
if not log_level:
log_level = os.environ.get(ENV_LOG_LEVEL, DEFAULT_LOG_LEVEL)
log_level = logging.getLevelName(log_level)
logging.getLogger("rasa").setLevel(log_level)
update_tensorflow_log_level()
update_asyncio_log_level()
update_apscheduler_log_level()
update_socketio_log_level()
os.environ[ENV_LOG_LEVEL] = logging.getLevelName(log_level)
def update_apscheduler_log_level() -> None:
log_level = os.environ.get(ENV_LOG_LEVEL_LIBRARIES, DEFAULT_LOG_LEVEL_LIBRARIES)
apscheduler_loggers = [
"apscheduler",
"apscheduler.scheduler",
"apscheduler.executors",
"apscheduler.executors.default",
]
for logger_name in apscheduler_loggers:
logging.getLogger(logger_name).setLevel(log_level)
logging.getLogger(logger_name).propagate = False
def update_socketio_log_level() -> None:
log_level = os.environ.get(ENV_LOG_LEVEL_LIBRARIES, DEFAULT_LOG_LEVEL_LIBRARIES)
socketio_loggers = ["websockets.protocol", "engineio.server", "socketio.server"]
for logger_name in socketio_loggers:
logging.getLogger(logger_name).setLevel(log_level)
logging.getLogger(logger_name).propagate = False
def update_tensorflow_log_level() -> None:
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
import tensorflow as tf
log_level = os.environ.get(ENV_LOG_LEVEL_LIBRARIES, DEFAULT_LOG_LEVEL_LIBRARIES)
if log_level == "DEBUG":
tf_log_level = tf.compat.v1.logging.DEBUG
elif log_level == "INFO":
tf_log_level = tf.compat.v1.logging.INFO
elif log_level == "WARNING":
tf_log_level = tf.compat.v1.logging.WARN
else:
tf_log_level = tf.compat.v1.logging.ERROR
tf.compat.v1.logging.set_verbosity(tf_log_level)
logging.getLogger("tensorflow").propagate = False
def update_sanic_log_level(log_file: Optional[Text] = None):
from sanic.log import logger, error_logger, access_logger
log_level = os.environ.get(ENV_LOG_LEVEL_LIBRARIES, DEFAULT_LOG_LEVEL_LIBRARIES)
logger.setLevel(log_level)
error_logger.setLevel(log_level)
access_logger.setLevel(log_level)
logger.propagate = False
error_logger.propagate = False
access_logger.propagate = False
if log_file is not None:
formatter = logging.Formatter("%(asctime)s [%(levelname)-5.5s] %(message)s")
file_handler = logging.FileHandler(log_file)
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
error_logger.addHandler(file_handler)
access_logger.addHandler(file_handler)
def update_asyncio_log_level() -> None:
log_level = os.environ.get(ENV_LOG_LEVEL_LIBRARIES, DEFAULT_LOG_LEVEL_LIBRARIES)
logging.getLogger("asyncio").setLevel(log_level)
def set_log_and_warnings_filters() -> None:
for handler in logging.getLogger().handlers:
handler.addFilter(RepeatedLogFilter())
warnings.filterwarnings("once", category=UserWarning)
def obtain_verbosity() -> int:
log_level = os.environ.get(ENV_LOG_LEVEL, DEFAULT_LOG_LEVEL)
verbosity = 0
if log_level == "DEBUG":
verbosity = 2
if log_level == "INFO":
verbosity = 1
return verbosity
def sort_list_of_dicts_by_first_key(dicts: List[Dict]) -> List[Dict]:
return sorted(dicts, key=lambda d: list(d.keys())[0])
def write_global_config_value(name: Text, value: Any) -> None:
config_path = rasa.constants.GLOBAL_USER_CONFIG_PATH
try:
os.makedirs(os.path.dirname(config_path), exist_ok=True)
c = read_global_config(config_path)
c[name] = value
rasa.core.utils.dump_obj_as_yaml_to_file(
rasa.constants.GLOBAL_USER_CONFIG_PATH, c
)
except Exception as e:
logger.warning(f"Failed to write global config. Error: {e}. Skipping.")
def read_global_config_value(name: Text, unavailable_ok: bool = True) -> Any:
def not_found():
if unavailable_ok:
return None
else:
raise ValueError(f"Configuration '{name}' key not found.")
config_path = rasa.constants.GLOBAL_USER_CONFIG_PATH
if not os.path.exists(config_path):
return not_found()
c = read_global_config(config_path)
if name in c:
return c[name]
else:
return not_found()
def update_existing_keys(
original: Dict[Any, Any], updates: Dict[Any, Any]
) -> Dict[Any, Any]:
updated = original.copy()
for k, v in updates.items():
if k in updated:
updated[k] = v
return updated
class RepeatedLogFilter(logging.Filter):
last_log = None
def filter(self, record):
current_log = (
record.levelno,
record.pathname,
record.lineno,
record.msg,
record.args,
)
if current_log != self.last_log:
self.last_log = current_log
return True
return False
def run_in_loop(
f: Coroutine[Any, Any, T], loop: Optional[asyncio.AbstractEventLoop] = None
) -> T:
if loop is None:
try:
loop = asyncio.get_event_loop()
except RuntimeError:
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
result = loop.run_until_complete(f)
pending = asyncio.Task.all_tasks()
loop.run_until_complete(asyncio.gather(*pending))
return result
| true
| true
|
7904b28224c3d7798b69b07fb4d2841d4934d390
| 11,013
|
py
|
Python
|
examples/add_saml_sso_from_metadata.py
|
YmonOy/lastline_api
|
cb17088f55eef3daf107cc8ad37eee4d70422796
|
[
"Apache-2.0"
] | 2
|
2017-12-30T21:58:47.000Z
|
2018-02-28T13:13:30.000Z
|
examples/add_saml_sso_from_metadata.py
|
YmonOy/lastline_api
|
cb17088f55eef3daf107cc8ad37eee4d70422796
|
[
"Apache-2.0"
] | null | null | null |
examples/add_saml_sso_from_metadata.py
|
YmonOy/lastline_api
|
cb17088f55eef3daf107cc8ad37eee4d70422796
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
"""
Sample program to add SSO options to a Manager/Pinbox.
:Copyright:
Copyright 2014 Lastline, Inc. All Rights Reserved.
Created on: Dec 8, 2014 by Lukyan Hritsko
"""
import requests
import argparse
import ConfigParser
import os.path
import logging
import re
from lxml import etree
from json import dumps
from urlparse import urlparse
from papi_client import papi_client
from papi_client import loader
class MissingValue(Exception):
pass
class InvalidXML(Exception):
pass
class InvalidFile(Exception):
pass
class InvalidURL(Exception):
pass
class MetadataExtractor(object):
XPATHS = {
'entity_descriptor': '/md:EntityDescriptor',
'idp_sso_descriptor': '/md:EntityDescriptor/md:IDPSSODescriptor'
}
NAMESPACES = {
'md': 'urn:oasis:names:tc:SAML:2.0:metadata',
'ds': 'http://www.w3.org/2000/09/xmldsig#'
}
def __init__(self, xml):
self.entity_id = None
self.x509_cert = None
self.sso_service_url = None
self.idp_binding = None
self.name_id_format = None
self.parse_values(xml)
def get_values_as_dict(self):
return {
'entity_id': self.entity_id,
'x509_cert': self.x509_cert,
'sso_service_url': self.sso_service_url,
'idp_binding': self.idp_binding,
'name_id_format': self.name_id_format,
}
def parse_entity_id(self, xml_root):
try:
entity_descriptor = xml_root.xpath(MetadataExtractor.XPATHS['entity_descriptor'],
namespaces=MetadataExtractor.NAMESPACES)[0]
self.entity_id = entity_descriptor.attrib['entityID']
except (KeyError, IndexError):
raise MissingValue("Unable to parse entityID")
def parse_x509_cert(self, key_desc_node):
xpath_from_node = 'ds:KeyInfo/ds:X509Data/ds:X509Certificate'
try:
x509_node = key_desc_node.xpath(xpath_from_node,
namespaces=MetadataExtractor.NAMESPACES)[0]
self.x509_cert = x509_node.text
if not self.x509_cert:
raise MissingValue
except (IndexError, MissingValue):
raise MissingValue("Unable to parse x509 certificate")
def parse_idp_binding_and_location(self, sso_node):
try:
attributes = sso_node.attrib
self.sso_service_url = attributes['Location']
self.idp_binding = attributes['Binding']
except (KeyError) as e:
raise MissingValue("Unable to parse %s", e.message)
def parse_name_id_format(self, name_id_node):
self.name_id_format = name_id_node.text
if not self.name_id_format:
raise MissingValue("Unable to parse name id format")
def extract_tag(self, raw_tag):
return raw_tag[raw_tag.find('}') + 1:]
def get_parser_dispatcher(self):
return {
'KeyDescriptor': self.parse_x509_cert,
'NameIDFormat': self.parse_name_id_format,
'SingleSignOnService': self.parse_idp_binding_and_location
}
def parse_values(self, xml):
try:
root = etree.fromstring(xml)
except (Exception) as e:
raise InvalidXML("Unable to load XML: %s" % e.message)
parser_dispatcher = self.get_parser_dispatcher()
self.parse_entity_id(root)
try:
idp_sso_desc = root.xpath(MetadataExtractor.XPATHS['idp_sso_descriptor'],
namespaces=MetadataExtractor.NAMESPACES)[0]
except (IndexError) as e:
raise InvalidXML("Unable to parse IdP SSO Descriptor Node")
for node in idp_sso_desc.getchildren():
tag = self.extract_tag(node.tag)
parser = parser_dispatcher[tag]
parser(node)
def xml_read_from_file(file_name):
xml_fn = os.path.expanduser(file_name)
if not os.path.isfile(xml_fn):
raise InvalidFile("Specified file: '%s' not found" % xml_fn)
with open(xml_fn, 'r') as fp:
return fp.read()
def xml_read_from_url(url, skip_validation=False):
try:
req = requests.get(url, verify=(not skip_validation))
req.raise_for_status()
if not req.content:
raise Exception
except Exception:
raise InvalidURL("Unable to extract metadata from URL")
return req.content
def get_config_parser(file_name):
config_fn = os.path.expanduser(file_name)
if not os.path.isfile(config_fn):
raise InvalidFile("Specified config file: '%s' not found" % config_fn)
config_parser = ConfigParser.ConfigParser()
config_parser.read(config_fn)
return config_parser
def get_logger():
# Python logger...
logger = logging.getLogger()
sh = logging.StreamHandler()
logger.setLevel(logging.DEBUG)
sh.setLevel(logging.DEBUG)
logger.addHandler(sh)
return logger
def get_papi_client(config_parser, logger):
base_client = papi_client.PapiClientFactory.client_from_config(
config_parser,
'papi',
logger)
client = loader.PapiClientCollection(base_client=base_client,
conf=config_parser,
logger=logger)
client.load_view("appliance_mgmt")
return client
class SAMLApplianceConfiguration(object):
def __init__(
self, appliance_uuid, config_index, metadata=None, display_name=None):
self._appliance_uuid = appliance_uuid
self._config_index = config_index
self._metadata = metadata
self._display_name = display_name
def _get_config_settings(self, is_add=True):
sso_config_key = "sso_saml2_config%d" % self._config_index
sso_enabled_key = "sso_saml2_enabled%d" % self._config_index
if is_add:
sso_config_settings = self._metadata.get_values_as_dict()
sso_config_settings['display_name'] = self._display_name
else:
sso_config_settings = {}
return {
sso_enabled_key: is_add,
sso_config_key: dumps(sso_config_settings)
}
def add_sso(self, client):
settings = self._get_config_settings()
client.appliance_mgmt.configure(
self._appliance_uuid,
settings=settings)
def delete_sso(self, client):
settings = self._get_config_settings(is_add=False)
client.appliance_mgmt.configure(
self._appliance_uuid,
settings=settings)
def url_or_file(string):
if re.match(r'https?://', string, re.IGNORECASE):
return {'url': string}
else:
return {'file': string}
def main():
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers(dest="mode",
help="Add or delete a config")
# Parser for add mode
add_parser = subparsers.add_parser('add')
add_parser.add_argument("appliance_uuid",
type=str,
help="Specify the appliance UUID to configure.")
add_parser.add_argument("url_or_file",
type=url_or_file,
help="Specify file location of metadata or specify "
"a url to automatically parse information.")
add_parser.add_argument("display_name",
nargs="?",
default=None,
help="Specify a namne that will be displayed in "
"the UI.")
add_parser.add_argument("-n",
"--index",
type=int,
dest="config_index",
default=0,
choices=xrange(0, 4),
help="Specify configuration index for single "
"sign on. This is used when configuring "
"multiple SSO options, i.e., first config "
"is 0, second is 1, and so on...")
add_parser.add_argument("--skip-verify-ssl",
default=False,
action="store_true",
help="Skips validation of SSL when retrieving "
"metadata from a URL")
add_parser.add_argument("-c",
"--config",
type=str,
dest="config",
default="papi_client.ini")
# Parser for delete mode
delete_parser = subparsers.add_parser("delete")
delete_parser.add_argument("appliance_uuid",
type=str,
help="Specify the appliance UUID to configure.")
delete_parser.add_argument("config_index",
type=int,
choices=xrange(0, 4),
help="Specify which configuration to remove.")
delete_parser.add_argument("-c",
"--config",
type=str,
dest="config",
default="papi_client.ini")
args = parser.parse_args()
logger = get_logger()
try:
config_parser = get_config_parser(args.config)
client = get_papi_client(config_parser, logger)
if args.mode == "delete":
saml_configuration = SAMLApplianceConfiguration(
args.appliance_uuid, args.config_index)
saml_configuration.delete_sso(client)
return 0
if args.url_or_file.get('url', None):
xml_content = xml_read_from_url(args.url_or_file['url'],
args.skip_verify_ssl)
else:
xml_content = xml_read_from_file(args.url_or_file['file'])
metadata = MetadataExtractor(xml_content)
# If no display name exists, let's use the FQDN of the IdP
display_name = args.display_name
if not display_name:
display_name = urlparse(metadata.entity_id).netloc # pylint: disable=E1101
logger.info("Adding SSO configuration (index %d) for appliance %s" %
(args.config_index, args.appliance_uuid))
saml_configuration = SAMLApplianceConfiguration(args.appliance_uuid,
args.config_index,
metadata=metadata,
display_name=display_name)
saml_configuration.add_sso(client)
except (MissingValue, InvalidXML, InvalidFile, InvalidURL) as e:
logger.error(e.message)
return 1
return 0
if __name__ == "__main__":
main()
| 33.886154
| 93
| 0.579769
|
import requests
import argparse
import ConfigParser
import os.path
import logging
import re
from lxml import etree
from json import dumps
from urlparse import urlparse
from papi_client import papi_client
from papi_client import loader
class MissingValue(Exception):
pass
class InvalidXML(Exception):
pass
class InvalidFile(Exception):
pass
class InvalidURL(Exception):
pass
class MetadataExtractor(object):
XPATHS = {
'entity_descriptor': '/md:EntityDescriptor',
'idp_sso_descriptor': '/md:EntityDescriptor/md:IDPSSODescriptor'
}
NAMESPACES = {
'md': 'urn:oasis:names:tc:SAML:2.0:metadata',
'ds': 'http://www.w3.org/2000/09/xmldsig#'
}
def __init__(self, xml):
self.entity_id = None
self.x509_cert = None
self.sso_service_url = None
self.idp_binding = None
self.name_id_format = None
self.parse_values(xml)
def get_values_as_dict(self):
return {
'entity_id': self.entity_id,
'x509_cert': self.x509_cert,
'sso_service_url': self.sso_service_url,
'idp_binding': self.idp_binding,
'name_id_format': self.name_id_format,
}
def parse_entity_id(self, xml_root):
try:
entity_descriptor = xml_root.xpath(MetadataExtractor.XPATHS['entity_descriptor'],
namespaces=MetadataExtractor.NAMESPACES)[0]
self.entity_id = entity_descriptor.attrib['entityID']
except (KeyError, IndexError):
raise MissingValue("Unable to parse entityID")
def parse_x509_cert(self, key_desc_node):
xpath_from_node = 'ds:KeyInfo/ds:X509Data/ds:X509Certificate'
try:
x509_node = key_desc_node.xpath(xpath_from_node,
namespaces=MetadataExtractor.NAMESPACES)[0]
self.x509_cert = x509_node.text
if not self.x509_cert:
raise MissingValue
except (IndexError, MissingValue):
raise MissingValue("Unable to parse x509 certificate")
def parse_idp_binding_and_location(self, sso_node):
try:
attributes = sso_node.attrib
self.sso_service_url = attributes['Location']
self.idp_binding = attributes['Binding']
except (KeyError) as e:
raise MissingValue("Unable to parse %s", e.message)
def parse_name_id_format(self, name_id_node):
self.name_id_format = name_id_node.text
if not self.name_id_format:
raise MissingValue("Unable to parse name id format")
def extract_tag(self, raw_tag):
return raw_tag[raw_tag.find('}') + 1:]
def get_parser_dispatcher(self):
return {
'KeyDescriptor': self.parse_x509_cert,
'NameIDFormat': self.parse_name_id_format,
'SingleSignOnService': self.parse_idp_binding_and_location
}
def parse_values(self, xml):
try:
root = etree.fromstring(xml)
except (Exception) as e:
raise InvalidXML("Unable to load XML: %s" % e.message)
parser_dispatcher = self.get_parser_dispatcher()
self.parse_entity_id(root)
try:
idp_sso_desc = root.xpath(MetadataExtractor.XPATHS['idp_sso_descriptor'],
namespaces=MetadataExtractor.NAMESPACES)[0]
except (IndexError) as e:
raise InvalidXML("Unable to parse IdP SSO Descriptor Node")
for node in idp_sso_desc.getchildren():
tag = self.extract_tag(node.tag)
parser = parser_dispatcher[tag]
parser(node)
def xml_read_from_file(file_name):
xml_fn = os.path.expanduser(file_name)
if not os.path.isfile(xml_fn):
raise InvalidFile("Specified file: '%s' not found" % xml_fn)
with open(xml_fn, 'r') as fp:
return fp.read()
def xml_read_from_url(url, skip_validation=False):
try:
req = requests.get(url, verify=(not skip_validation))
req.raise_for_status()
if not req.content:
raise Exception
except Exception:
raise InvalidURL("Unable to extract metadata from URL")
return req.content
def get_config_parser(file_name):
config_fn = os.path.expanduser(file_name)
if not os.path.isfile(config_fn):
raise InvalidFile("Specified config file: '%s' not found" % config_fn)
config_parser = ConfigParser.ConfigParser()
config_parser.read(config_fn)
return config_parser
def get_logger():
logger = logging.getLogger()
sh = logging.StreamHandler()
logger.setLevel(logging.DEBUG)
sh.setLevel(logging.DEBUG)
logger.addHandler(sh)
return logger
def get_papi_client(config_parser, logger):
base_client = papi_client.PapiClientFactory.client_from_config(
config_parser,
'papi',
logger)
client = loader.PapiClientCollection(base_client=base_client,
conf=config_parser,
logger=logger)
client.load_view("appliance_mgmt")
return client
class SAMLApplianceConfiguration(object):
def __init__(
self, appliance_uuid, config_index, metadata=None, display_name=None):
self._appliance_uuid = appliance_uuid
self._config_index = config_index
self._metadata = metadata
self._display_name = display_name
def _get_config_settings(self, is_add=True):
sso_config_key = "sso_saml2_config%d" % self._config_index
sso_enabled_key = "sso_saml2_enabled%d" % self._config_index
if is_add:
sso_config_settings = self._metadata.get_values_as_dict()
sso_config_settings['display_name'] = self._display_name
else:
sso_config_settings = {}
return {
sso_enabled_key: is_add,
sso_config_key: dumps(sso_config_settings)
}
def add_sso(self, client):
settings = self._get_config_settings()
client.appliance_mgmt.configure(
self._appliance_uuid,
settings=settings)
def delete_sso(self, client):
settings = self._get_config_settings(is_add=False)
client.appliance_mgmt.configure(
self._appliance_uuid,
settings=settings)
def url_or_file(string):
if re.match(r'https?://', string, re.IGNORECASE):
return {'url': string}
else:
return {'file': string}
def main():
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers(dest="mode",
help="Add or delete a config")
add_parser = subparsers.add_parser('add')
add_parser.add_argument("appliance_uuid",
type=str,
help="Specify the appliance UUID to configure.")
add_parser.add_argument("url_or_file",
type=url_or_file,
help="Specify file location of metadata or specify "
"a url to automatically parse information.")
add_parser.add_argument("display_name",
nargs="?",
default=None,
help="Specify a namne that will be displayed in "
"the UI.")
add_parser.add_argument("-n",
"--index",
type=int,
dest="config_index",
default=0,
choices=xrange(0, 4),
help="Specify configuration index for single "
"sign on. This is used when configuring "
"multiple SSO options, i.e., first config "
"is 0, second is 1, and so on...")
add_parser.add_argument("--skip-verify-ssl",
default=False,
action="store_true",
help="Skips validation of SSL when retrieving "
"metadata from a URL")
add_parser.add_argument("-c",
"--config",
type=str,
dest="config",
default="papi_client.ini")
delete_parser = subparsers.add_parser("delete")
delete_parser.add_argument("appliance_uuid",
type=str,
help="Specify the appliance UUID to configure.")
delete_parser.add_argument("config_index",
type=int,
choices=xrange(0, 4),
help="Specify which configuration to remove.")
delete_parser.add_argument("-c",
"--config",
type=str,
dest="config",
default="papi_client.ini")
args = parser.parse_args()
logger = get_logger()
try:
config_parser = get_config_parser(args.config)
client = get_papi_client(config_parser, logger)
if args.mode == "delete":
saml_configuration = SAMLApplianceConfiguration(
args.appliance_uuid, args.config_index)
saml_configuration.delete_sso(client)
return 0
if args.url_or_file.get('url', None):
xml_content = xml_read_from_url(args.url_or_file['url'],
args.skip_verify_ssl)
else:
xml_content = xml_read_from_file(args.url_or_file['file'])
metadata = MetadataExtractor(xml_content)
display_name = args.display_name
if not display_name:
display_name = urlparse(metadata.entity_id).netloc # pylint: disable=E1101
logger.info("Adding SSO configuration (index %d) for appliance %s" %
(args.config_index, args.appliance_uuid))
saml_configuration = SAMLApplianceConfiguration(args.appliance_uuid,
args.config_index,
metadata=metadata,
display_name=display_name)
saml_configuration.add_sso(client)
except (MissingValue, InvalidXML, InvalidFile, InvalidURL) as e:
logger.error(e.message)
return 1
return 0
if __name__ == "__main__":
main()
| true
| true
|
7904b2aa6e643007a5f5761c391708fd1ac11ac3
| 3,688
|
py
|
Python
|
wallux.py
|
Manoj-Paramsetti/Wallux
|
8975b9c7e3dffc997d7dcb55f85694b5ad9d7f28
|
[
"MIT"
] | 1
|
2022-01-03T14:36:02.000Z
|
2022-01-03T14:36:02.000Z
|
wallux.py
|
Manoj-Paramsetti/Wallux
|
8975b9c7e3dffc997d7dcb55f85694b5ad9d7f28
|
[
"MIT"
] | null | null | null |
wallux.py
|
Manoj-Paramsetti/Wallux
|
8975b9c7e3dffc997d7dcb55f85694b5ad9d7f28
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import os
import requests
os.system("clear")
print("""
██ ██ █████ ██ ██ ██ ██ ██ ██
██ ██ ██ ██ ██ ██ ██ ██ ██ ██
██ █ ██ ███████ ██ ██ ██ ██ ███
██ ███ ██ ██ ██ ██ ██ ██ ██ ██ ██
███ ███ ██ ██ ███████ ███████ ██████ ██ ██
""")
print("[INFO] Initializing...\n")
baseurl = "https://raw.githubusercontent.com/Wallux-0/Wallpapers/main/"
req = requests.get(
"https://raw.githubusercontent.com/Wallux-0/Wallux/main/static/tags.json")
if req:
content = eval(req.content)
content = content['wallpaper']
else:
print("[ERROR] Please connect to internet and try again.")
print("""Hello! Wallux is a wallpaper library hosted on Github.
Please visit https://wallux-0.github.io/Wallux/ to choose a wallpaper and enter its Wallux ID here.
Wallux ID:""")
try:
walluxid = int(input())
except:
print("[ERROR] Not a valid Wallux ID.")
exit()
for w in content:
if str(walluxid) == ''.join([n for n in w['path'] if n.isdigit()]):
print("[INFO] Downloading your new wallpaper...")
req = requests.get(baseurl+w['path'], stream=True)
if req:
img = req.raw.read()
path = os.path.expanduser(
"~/Documents/"+w['path'].lstrip("wallpapers/").strip())
with open(path, 'wb') as f:
f.write(img)
print("[INFO] Image Downloaded")
else:
print("[ERROR] Please connect to an internet connection.")
break
os.system("""echo $(ps -e | grep -E -i "xfce|kde|gnome") > /tmp/wallux.file""")
parseStr = ''
with open("/tmp/wallux.file") as f:
parseStr = f.read()
os.remove("/tmp/wallux.file")
de = {}
de['kde'] = parseStr.lower().count("kde")
de['gnome'] = parseStr.lower().count('gnome')
de['xfce'] = parseStr.lower().count('xfce')
if max(de, key=de.get) == "gnome":
os.system(
"gsettings set org.gnome.desktop.background picture-uri file://{}".format(path))
print("[SUCCESS] Enjoy your new wallpaper!")
exit()
elif max(de, key=de.get) == "kde":
import dbus
plugin = 'org.kde.image'
jscript = """
var allDesktops = desktops();
print (allDesktops);
for (i=0;i<allDesktops.length;i++) {
d = allDesktops[i];
d.wallpaperPlugin = "%s";
d.currentConfigGroup = Array("Wallpaper", "%s", "General");
d.writeConfig("Image", "file://%s")
}
"""
bus = dbus.SessionBus()
plasma = dbus.Interface(bus.get_object(
'org.kde.plasmashell', '/PlasmaShell'), dbus_interface='org.kde.PlasmaShell')
plasma.evaluateScript(jscript % (plugin, plugin, path))
print("[SUCCESS] Enjoy your new wallpaper!")
exit()
elif max(de, key=de.get) == "xfce":
"""
To find out what property is changed when the backgound changes, run the following command in a terminal window:
xfconf-query -c xfce4-desktop -m
...and then change the background using the Settings Manager > Desktop.
The command monitors channel xfce4-desktop for changes. It will tell which property on channel xfce4-desktop is changed.
Then the command to change that property would be like this
xfconf-query -c xfce4-desktop -p insert_property_here -s path/image
"""
os.system("xfconf-query --channel xfce4-desktop --property /backdrop/screen0/monitoreDP-1/workspace0/last-image --set {}".format(path))
print("[SUCCESS] Enjoy your new wallpaper!")
exit()
else:
print("[ERROR] Oops. Your desktop enviroinment is not supported at the moment. But I saved the wallpaper to your Documents folder. Enjoy!")
| 40.086957
| 143
| 0.588124
|
import os
import requests
os.system("clear")
print("""
██ ██ █████ ██ ██ ██ ██ ██ ██
██ ██ ██ ██ ██ ██ ██ ██ ██ ██
██ █ ██ ███████ ██ ██ ██ ██ ███
██ ███ ██ ██ ██ ██ ██ ██ ██ ██ ██
███ ███ ██ ██ ███████ ███████ ██████ ██ ██
""")
print("[INFO] Initializing...\n")
baseurl = "https://raw.githubusercontent.com/Wallux-0/Wallpapers/main/"
req = requests.get(
"https://raw.githubusercontent.com/Wallux-0/Wallux/main/static/tags.json")
if req:
content = eval(req.content)
content = content['wallpaper']
else:
print("[ERROR] Please connect to internet and try again.")
print("""Hello! Wallux is a wallpaper library hosted on Github.
Please visit https://wallux-0.github.io/Wallux/ to choose a wallpaper and enter its Wallux ID here.
Wallux ID:""")
try:
walluxid = int(input())
except:
print("[ERROR] Not a valid Wallux ID.")
exit()
for w in content:
if str(walluxid) == ''.join([n for n in w['path'] if n.isdigit()]):
print("[INFO] Downloading your new wallpaper...")
req = requests.get(baseurl+w['path'], stream=True)
if req:
img = req.raw.read()
path = os.path.expanduser(
"~/Documents/"+w['path'].lstrip("wallpapers/").strip())
with open(path, 'wb') as f:
f.write(img)
print("[INFO] Image Downloaded")
else:
print("[ERROR] Please connect to an internet connection.")
break
os.system("""echo $(ps -e | grep -E -i "xfce|kde|gnome") > /tmp/wallux.file""")
parseStr = ''
with open("/tmp/wallux.file") as f:
parseStr = f.read()
os.remove("/tmp/wallux.file")
de = {}
de['kde'] = parseStr.lower().count("kde")
de['gnome'] = parseStr.lower().count('gnome')
de['xfce'] = parseStr.lower().count('xfce')
if max(de, key=de.get) == "gnome":
os.system(
"gsettings set org.gnome.desktop.background picture-uri file://{}".format(path))
print("[SUCCESS] Enjoy your new wallpaper!")
exit()
elif max(de, key=de.get) == "kde":
import dbus
plugin = 'org.kde.image'
jscript = """
var allDesktops = desktops();
print (allDesktops);
for (i=0;i<allDesktops.length;i++) {
d = allDesktops[i];
d.wallpaperPlugin = "%s";
d.currentConfigGroup = Array("Wallpaper", "%s", "General");
d.writeConfig("Image", "file://%s")
}
"""
bus = dbus.SessionBus()
plasma = dbus.Interface(bus.get_object(
'org.kde.plasmashell', '/PlasmaShell'), dbus_interface='org.kde.PlasmaShell')
plasma.evaluateScript(jscript % (plugin, plugin, path))
print("[SUCCESS] Enjoy your new wallpaper!")
exit()
elif max(de, key=de.get) == "xfce":
"""
To find out what property is changed when the backgound changes, run the following command in a terminal window:
xfconf-query -c xfce4-desktop -m
...and then change the background using the Settings Manager > Desktop.
The command monitors channel xfce4-desktop for changes. It will tell which property on channel xfce4-desktop is changed.
Then the command to change that property would be like this
xfconf-query -c xfce4-desktop -p insert_property_here -s path/image
"""
os.system("xfconf-query --channel xfce4-desktop --property /backdrop/screen0/monitoreDP-1/workspace0/last-image --set {}".format(path))
print("[SUCCESS] Enjoy your new wallpaper!")
exit()
else:
print("[ERROR] Oops. Your desktop enviroinment is not supported at the moment. But I saved the wallpaper to your Documents folder. Enjoy!")
| true
| true
|
7904b2ed358e366a409681b9af8d829027a8c18d
| 8,140
|
py
|
Python
|
pacu/modules/lightsail__generate_temp_access/main.py
|
damienjburks/pacu
|
5853f9668a7d78945c40d403bf88a47101ba2b3d
|
[
"BSD-3-Clause"
] | 1
|
2021-12-22T22:39:49.000Z
|
2021-12-22T22:39:49.000Z
|
pacu/modules/lightsail__generate_temp_access/main.py
|
damienjburks/pacu
|
5853f9668a7d78945c40d403bf88a47101ba2b3d
|
[
"BSD-3-Clause"
] | null | null | null |
pacu/modules/lightsail__generate_temp_access/main.py
|
damienjburks/pacu
|
5853f9668a7d78945c40d403bf88a47101ba2b3d
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python3
import argparse
from botocore.exceptions import ClientError
import os
from pacu.core.lib import downloads_dir
module_info = {
# Name of the module (should be the same as the filename)
"name": "lightsail__generate_temp_access",
# Name and any other notes about the author
"author": "Alexander Morgenstern alexander.morgenstern@rhinosecuritylabs.com",
# Category of the module. Make sure the name matches an existing category.
"category": "EXPLOIT",
# One liner description of the module functionality. This shows up when a user searches for modules.
"one_liner": "Creates temporary SSH keys for available instances in AWS Lightsail.",
# Full description about what the module does and how it works
"description": "This module creates temporary SSH keys that can be used to connect to Lightsail instances, and downloads them into the session's download directory.",
# A list of AWS services that the module utilizes during its execution
"services": ["Lightsail"],
# For prerequisite modules, try and see if any existing modules return the data that is required for your module before writing that code yourself, that way, session data can stay separated and modular.
"prerequisite_modules": ["lightsail__enum"],
# External resources that the module depends on. Valid options are either a GitHub URL (must end in .git) or single file URL.
"external_dependencies": [],
# Module arguments to autocomplete when the user hits tab
"arguments_to_autocomplete": ["--instances", "--regions"],
}
parser = argparse.ArgumentParser(add_help=False, description=module_info["description"])
parser.add_argument(
"--instances",
required=False,
help="One or more Lightsail instance names, their regions, and their access protocol in the format instanceid@region@protocol. Windows instances will use the RDP protocol, and others use SSH. Defaults to all instances.",
)
parser.add_argument(
"--regions",
required=False,
default=None,
help="One or more (comma separated) AWS regions in the format us-east-1. Defaults to all session regions.",
)
def write_keys_to_file(created_keys, session):
for region in created_keys:
ssh_key_dir = os.path.join(downloads_dir(), module_info["name"], region)
if not os.path.exists(ssh_key_dir):
os.makedirs(ssh_key_dir)
for credential in created_keys[region]:
if credential["protocol"] == "rdp":
windows_file_dir = os.path.join(ssh_key_dir, credential["instanceName"])
try:
with open(windows_file_dir, "w") as windows_file:
# Create header for file.
windows_file.write("instanceName,ipAddress,username,password\n")
windows_file.write(credential["instanceName"] + ",")
windows_file.write(credential["ipAddress"] + ",")
windows_file.write(credential["username"] + ",")
windows_file.write(credential["password"] + "\n")
except IOError:
print(
"Error writing credential file for {}.".format(
credential["instanceName"]
)
)
continue
else:
private_key_file_dir = os.path.join(
ssh_key_dir, credential["instanceName"]
)
cert_key_file_dir = os.path.join(
ssh_key_dir, credential["instanceName"] + "-cert.pub"
)
try:
with open(private_key_file_dir, "w") as private_key_file:
private_key_file.write(credential["privateKey"])
with open(cert_key_file_dir, "w") as cert_key_file:
cert_key_file.write(credential["certKey"])
except IOError:
print(
"Error writing credential file for {}.".format(
credential["instanceName"]
)
)
continue
def main(args, pacu_main):
session = pacu_main.get_active_session()
print = pacu_main.print
get_regions = pacu_main.get_regions
fetch_data = pacu_main.fetch_data
args = parser.parse_args(args)
regions = args.regions.split(",") if args.regions else get_regions("lightsail")
instances = []
if (
args.instances is not None
): # need to update this to include the regions of these IDs
for instance in args.instances.split(","):
instance_name = instance.split("@")[0]
region = instance.split("@")[1]
protocol = instance.split("@")[2]
if region not in regions:
print(" {} is not a valid region".format(region))
continue
else:
instances.append(
{
"name": instance_name,
"protocol": protocol,
"region": region,
}
)
else:
print("Targeting all Lightsail instances...")
if (
fetch_data(
["Lightsail"], module_info["prerequisite_modules"][0], "--instances"
)
is False
):
print("Pre-req module not run successfully. Exiting...")
return
for instance in session.Lightsail["instances"]:
if instance["region"] in regions:
protocol = "rdp" if "Windows" in instance["blueprintName"] else "ssh"
instances.append(
{
"name": instance["name"],
"protocol": protocol,
"region": instance["region"],
}
)
temp_keys = {}
for instance in instances:
temp_keys[instance["region"]] = []
for instance in instances:
client = pacu_main.get_boto3_client("lightsail", instance["region"])
print(" Instance {}".format(instance["name"]))
try:
response = client.get_instance_access_details(
instanceName=instance["name"], protocol=instance["protocol"]
)
temp_keys[instance["region"]].append(response["accessDetails"])
print(
" Successfully created temporary access for {}".format(
instance["name"]
)
)
except ClientError as error:
code = error.response["Error"]["Code"]
if code == "AccessDeniedException":
print(" Unauthorized to generate temporary access.")
return
elif code == "OperationFailureException":
print(" FAILED: Unable to interact with non-running instance.")
continue
else:
print(error)
break
write_keys_to_file(temp_keys, session)
windows_count = 0
ssh_count = 0
for region in temp_keys:
for credential in temp_keys[region]:
if credential["protocol"] == "rdp":
windows_count += 1
else:
ssh_count += 1
if windows_count or ssh_count:
written_file_path = os.path.join(downloads_dir(), module_info["name"])
else:
written_file_path = None
summary_data = {
"windows": windows_count,
"linux": ssh_count,
"written_file_path": written_file_path,
}
return summary_data
def summary(data, pacu_main):
out = " Created temporary access for {} Windows instances.\n".format(
data["windows"]
)
out += " Created temporary access for {} Linux instances.\n".format(data["linux"])
if data["written_file_path"] is not None:
out += "\n Credential files written to:\n {}{}".format(
data["written_file_path"], os.path.sep
)
return out
| 40.7
| 224
| 0.575553
|
import argparse
from botocore.exceptions import ClientError
import os
from pacu.core.lib import downloads_dir
module_info = {
"name": "lightsail__generate_temp_access",
"author": "Alexander Morgenstern alexander.morgenstern@rhinosecuritylabs.com",
"category": "EXPLOIT",
"one_liner": "Creates temporary SSH keys for available instances in AWS Lightsail.",
"description": "This module creates temporary SSH keys that can be used to connect to Lightsail instances, and downloads them into the session's download directory.",
# A list of AWS services that the module utilizes during its execution
"services": ["Lightsail"],
# For prerequisite modules, try and see if any existing modules return the data that is required for your module before writing that code yourself, that way, session data can stay separated and modular.
"prerequisite_modules": ["lightsail__enum"],
# External resources that the module depends on. Valid options are either a GitHub URL (must end in .git) or single file URL.
"external_dependencies": [],
# Module arguments to autocomplete when the user hits tab
"arguments_to_autocomplete": ["--instances", "--regions"],
}
parser = argparse.ArgumentParser(add_help=False, description=module_info["description"])
parser.add_argument(
"--instances",
required=False,
help="One or more Lightsail instance names, their regions, and their access protocol in the format instanceid@region@protocol. Windows instances will use the RDP protocol, and others use SSH. Defaults to all instances.",
)
parser.add_argument(
"--regions",
required=False,
default=None,
help="One or more (comma separated) AWS regions in the format us-east-1. Defaults to all session regions.",
)
def write_keys_to_file(created_keys, session):
for region in created_keys:
ssh_key_dir = os.path.join(downloads_dir(), module_info["name"], region)
if not os.path.exists(ssh_key_dir):
os.makedirs(ssh_key_dir)
for credential in created_keys[region]:
if credential["protocol"] == "rdp":
windows_file_dir = os.path.join(ssh_key_dir, credential["instanceName"])
try:
with open(windows_file_dir, "w") as windows_file:
# Create header for file.
windows_file.write("instanceName,ipAddress,username,password\n")
windows_file.write(credential["instanceName"] + ",")
windows_file.write(credential["ipAddress"] + ",")
windows_file.write(credential["username"] + ",")
windows_file.write(credential["password"] + "\n")
except IOError:
print(
"Error writing credential file for {}.".format(
credential["instanceName"]
)
)
continue
else:
private_key_file_dir = os.path.join(
ssh_key_dir, credential["instanceName"]
)
cert_key_file_dir = os.path.join(
ssh_key_dir, credential["instanceName"] + "-cert.pub"
)
try:
with open(private_key_file_dir, "w") as private_key_file:
private_key_file.write(credential["privateKey"])
with open(cert_key_file_dir, "w") as cert_key_file:
cert_key_file.write(credential["certKey"])
except IOError:
print(
"Error writing credential file for {}.".format(
credential["instanceName"]
)
)
continue
def main(args, pacu_main):
session = pacu_main.get_active_session()
print = pacu_main.print
get_regions = pacu_main.get_regions
fetch_data = pacu_main.fetch_data
args = parser.parse_args(args)
regions = args.regions.split(",") if args.regions else get_regions("lightsail")
instances = []
if (
args.instances is not None
): # need to update this to include the regions of these IDs
for instance in args.instances.split(","):
instance_name = instance.split("@")[0]
region = instance.split("@")[1]
protocol = instance.split("@")[2]
if region not in regions:
print(" {} is not a valid region".format(region))
continue
else:
instances.append(
{
"name": instance_name,
"protocol": protocol,
"region": region,
}
)
else:
print("Targeting all Lightsail instances...")
if (
fetch_data(
["Lightsail"], module_info["prerequisite_modules"][0], "--instances"
)
is False
):
print("Pre-req module not run successfully. Exiting...")
return
for instance in session.Lightsail["instances"]:
if instance["region"] in regions:
protocol = "rdp" if "Windows" in instance["blueprintName"] else "ssh"
instances.append(
{
"name": instance["name"],
"protocol": protocol,
"region": instance["region"],
}
)
temp_keys = {}
for instance in instances:
temp_keys[instance["region"]] = []
for instance in instances:
client = pacu_main.get_boto3_client("lightsail", instance["region"])
print(" Instance {}".format(instance["name"]))
try:
response = client.get_instance_access_details(
instanceName=instance["name"], protocol=instance["protocol"]
)
temp_keys[instance["region"]].append(response["accessDetails"])
print(
" Successfully created temporary access for {}".format(
instance["name"]
)
)
except ClientError as error:
code = error.response["Error"]["Code"]
if code == "AccessDeniedException":
print(" Unauthorized to generate temporary access.")
return
elif code == "OperationFailureException":
print(" FAILED: Unable to interact with non-running instance.")
continue
else:
print(error)
break
write_keys_to_file(temp_keys, session)
windows_count = 0
ssh_count = 0
for region in temp_keys:
for credential in temp_keys[region]:
if credential["protocol"] == "rdp":
windows_count += 1
else:
ssh_count += 1
if windows_count or ssh_count:
written_file_path = os.path.join(downloads_dir(), module_info["name"])
else:
written_file_path = None
summary_data = {
"windows": windows_count,
"linux": ssh_count,
"written_file_path": written_file_path,
}
return summary_data
def summary(data, pacu_main):
out = " Created temporary access for {} Windows instances.\n".format(
data["windows"]
)
out += " Created temporary access for {} Linux instances.\n".format(data["linux"])
if data["written_file_path"] is not None:
out += "\n Credential files written to:\n {}{}".format(
data["written_file_path"], os.path.sep
)
return out
| true
| true
|
7904b41a53181da51a73376ea7fdd8e568e8b6c4
| 5,348
|
py
|
Python
|
build/android/adb_install_apk.py
|
google-ar/chromium
|
2441c86a5fd975f09a6c30cddb57dfb7fc239699
|
[
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 777
|
2017-08-29T15:15:32.000Z
|
2022-03-21T05:29:41.000Z
|
build/android/adb_install_apk.py
|
harrymarkovskiy/WebARonARCore
|
2441c86a5fd975f09a6c30cddb57dfb7fc239699
|
[
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 66
|
2017-08-30T18:31:18.000Z
|
2021-08-02T10:59:35.000Z
|
build/android/adb_install_apk.py
|
harrymarkovskiy/WebARonARCore
|
2441c86a5fd975f09a6c30cddb57dfb7fc239699
|
[
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 123
|
2017-08-30T01:19:34.000Z
|
2022-03-17T22:55:31.000Z
|
#!/usr/bin/env python
#
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Utility script to install APKs from the command line quickly."""
import argparse
import glob
import logging
import os
import sys
import devil_chromium
from devil.android import apk_helper
from devil.android import device_blacklist
from devil.android import device_errors
from devil.android import device_utils
from devil.utils import run_tests_helper
from pylib import constants
def main():
parser = argparse.ArgumentParser()
apk_group = parser.add_mutually_exclusive_group(required=True)
apk_group.add_argument('--apk', dest='apk_name',
help='DEPRECATED The name of the apk containing the'
' application (with the .apk extension).')
apk_group.add_argument('apk_path', nargs='?',
help='The path to the APK to install.')
# TODO(jbudorick): Remove once no clients pass --apk_package
parser.add_argument('--apk_package', help='DEPRECATED unused')
parser.add_argument('--split',
action='append',
dest='splits',
help='A glob matching the apk splits. '
'Can be specified multiple times.')
parser.add_argument('--keep_data',
action='store_true',
default=False,
help='Keep the package data when installing '
'the application.')
parser.add_argument('--debug', action='store_const', const='Debug',
dest='build_type',
default=os.environ.get('BUILDTYPE', 'Debug'),
help='If set, run test suites under out/Debug. '
'Default is env var BUILDTYPE or Debug')
parser.add_argument('--release', action='store_const', const='Release',
dest='build_type',
help='If set, run test suites under out/Release. '
'Default is env var BUILDTYPE or Debug.')
parser.add_argument('-d', '--device', dest='devices', action='append',
default=[],
help='Target device for apk to install on. Enter multiple'
' times for multiple devices.')
parser.add_argument('--adb-path', type=os.path.abspath,
help='Absolute path to the adb binary to use.')
parser.add_argument('--blacklist-file', help='Device blacklist JSON file.')
parser.add_argument('-v', '--verbose', action='count',
help='Enable verbose logging.')
parser.add_argument('--downgrade', action='store_true',
help='If set, allows downgrading of apk.')
parser.add_argument('--timeout', type=int,
default=device_utils.DeviceUtils.INSTALL_DEFAULT_TIMEOUT,
help='Seconds to wait for APK installation. '
'(default: %(default)s)')
args = parser.parse_args()
run_tests_helper.SetLogLevel(args.verbose)
constants.SetBuildType(args.build_type)
devil_chromium.Initialize(
output_directory=constants.GetOutDirectory(),
adb_path=args.adb_path)
apk = args.apk_path or args.apk_name
if not apk.endswith('.apk'):
apk += '.apk'
if not os.path.exists(apk):
apk = os.path.join(constants.GetOutDirectory(), 'apks', apk)
if not os.path.exists(apk):
parser.error('%s not found.' % apk)
if args.splits:
splits = []
base_apk_package = apk_helper.ApkHelper(apk).GetPackageName()
for split_glob in args.splits:
apks = [f for f in glob.glob(split_glob) if f.endswith('.apk')]
if not apks:
logging.warning('No apks matched for %s.', split_glob)
for f in apks:
helper = apk_helper.ApkHelper(f)
if (helper.GetPackageName() == base_apk_package
and helper.GetSplitName()):
splits.append(f)
blacklist = (device_blacklist.Blacklist(args.blacklist_file)
if args.blacklist_file
else None)
devices = device_utils.DeviceUtils.HealthyDevices(blacklist=blacklist,
device_arg=args.devices)
def blacklisting_install(device):
try:
if args.splits:
device.InstallSplitApk(apk, splits, reinstall=args.keep_data,
allow_downgrade=args.downgrade)
else:
device.Install(apk, reinstall=args.keep_data,
allow_downgrade=args.downgrade,
timeout=args.timeout)
except device_errors.CommandFailedError:
logging.exception('Failed to install %s', args.apk_name)
if blacklist:
blacklist.Extend([str(device)], reason='install_failure')
logging.warning('Blacklisting %s', str(device))
except device_errors.CommandTimeoutError:
logging.exception('Timed out while installing %s', args.apk_name)
if blacklist:
blacklist.Extend([str(device)], reason='install_timeout')
logging.warning('Blacklisting %s', str(device))
device_utils.DeviceUtils.parallel(devices).pMap(blacklisting_install)
if __name__ == '__main__':
sys.exit(main())
| 40.210526
| 80
| 0.618923
|
import argparse
import glob
import logging
import os
import sys
import devil_chromium
from devil.android import apk_helper
from devil.android import device_blacklist
from devil.android import device_errors
from devil.android import device_utils
from devil.utils import run_tests_helper
from pylib import constants
def main():
parser = argparse.ArgumentParser()
apk_group = parser.add_mutually_exclusive_group(required=True)
apk_group.add_argument('--apk', dest='apk_name',
help='DEPRECATED The name of the apk containing the'
' application (with the .apk extension).')
apk_group.add_argument('apk_path', nargs='?',
help='The path to the APK to install.')
parser.add_argument('--apk_package', help='DEPRECATED unused')
parser.add_argument('--split',
action='append',
dest='splits',
help='A glob matching the apk splits. '
'Can be specified multiple times.')
parser.add_argument('--keep_data',
action='store_true',
default=False,
help='Keep the package data when installing '
'the application.')
parser.add_argument('--debug', action='store_const', const='Debug',
dest='build_type',
default=os.environ.get('BUILDTYPE', 'Debug'),
help='If set, run test suites under out/Debug. '
'Default is env var BUILDTYPE or Debug')
parser.add_argument('--release', action='store_const', const='Release',
dest='build_type',
help='If set, run test suites under out/Release. '
'Default is env var BUILDTYPE or Debug.')
parser.add_argument('-d', '--device', dest='devices', action='append',
default=[],
help='Target device for apk to install on. Enter multiple'
' times for multiple devices.')
parser.add_argument('--adb-path', type=os.path.abspath,
help='Absolute path to the adb binary to use.')
parser.add_argument('--blacklist-file', help='Device blacklist JSON file.')
parser.add_argument('-v', '--verbose', action='count',
help='Enable verbose logging.')
parser.add_argument('--downgrade', action='store_true',
help='If set, allows downgrading of apk.')
parser.add_argument('--timeout', type=int,
default=device_utils.DeviceUtils.INSTALL_DEFAULT_TIMEOUT,
help='Seconds to wait for APK installation. '
'(default: %(default)s)')
args = parser.parse_args()
run_tests_helper.SetLogLevel(args.verbose)
constants.SetBuildType(args.build_type)
devil_chromium.Initialize(
output_directory=constants.GetOutDirectory(),
adb_path=args.adb_path)
apk = args.apk_path or args.apk_name
if not apk.endswith('.apk'):
apk += '.apk'
if not os.path.exists(apk):
apk = os.path.join(constants.GetOutDirectory(), 'apks', apk)
if not os.path.exists(apk):
parser.error('%s not found.' % apk)
if args.splits:
splits = []
base_apk_package = apk_helper.ApkHelper(apk).GetPackageName()
for split_glob in args.splits:
apks = [f for f in glob.glob(split_glob) if f.endswith('.apk')]
if not apks:
logging.warning('No apks matched for %s.', split_glob)
for f in apks:
helper = apk_helper.ApkHelper(f)
if (helper.GetPackageName() == base_apk_package
and helper.GetSplitName()):
splits.append(f)
blacklist = (device_blacklist.Blacklist(args.blacklist_file)
if args.blacklist_file
else None)
devices = device_utils.DeviceUtils.HealthyDevices(blacklist=blacklist,
device_arg=args.devices)
def blacklisting_install(device):
try:
if args.splits:
device.InstallSplitApk(apk, splits, reinstall=args.keep_data,
allow_downgrade=args.downgrade)
else:
device.Install(apk, reinstall=args.keep_data,
allow_downgrade=args.downgrade,
timeout=args.timeout)
except device_errors.CommandFailedError:
logging.exception('Failed to install %s', args.apk_name)
if blacklist:
blacklist.Extend([str(device)], reason='install_failure')
logging.warning('Blacklisting %s', str(device))
except device_errors.CommandTimeoutError:
logging.exception('Timed out while installing %s', args.apk_name)
if blacklist:
blacklist.Extend([str(device)], reason='install_timeout')
logging.warning('Blacklisting %s', str(device))
device_utils.DeviceUtils.parallel(devices).pMap(blacklisting_install)
if __name__ == '__main__':
sys.exit(main())
| true
| true
|
7904b43875f56a776f055dd0752c594e08d497aa
| 7,246
|
py
|
Python
|
ml/train_net.py
|
brungcm/health-hack-2019
|
3f537ea40ceefdcf5f3044b6931bfa3951c351f7
|
[
"MIT"
] | null | null | null |
ml/train_net.py
|
brungcm/health-hack-2019
|
3f537ea40ceefdcf5f3044b6931bfa3951c351f7
|
[
"MIT"
] | null | null | null |
ml/train_net.py
|
brungcm/health-hack-2019
|
3f537ea40ceefdcf5f3044b6931bfa3951c351f7
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import tensorflow as tf
import logging
logging.basicConfig()
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
import argparse
from aquaman_net import AquamanNet
from utils import IMAGE_SIZE
EPOCHS = 1000
BATCH_SIZE = 4
def preproc(image_bytes):
image_jpg = tf.image.decode_jpeg(image_bytes, channels=3)
image_jpg = tf.image.resize_images(image_jpg, IMAGE_SIZE)
image_jpg = tf.to_float(image_jpg) / 255.0
image_jpg = tf.reshape(
image_jpg, [IMAGE_SIZE[0], IMAGE_SIZE[1], 3], name="Reshape_Preproc")
return image_jpg
def input_fn(tf_records_list, epochs=10, batch_size=8, n_frames=16):
def _parse_proto(example_proto):
parsed_dict = {
"target": tf.FixedLenFeature((), tf.float32, default_value=0)
}
for i in range(n_frames):
parsed_dict['frame_{}'.format(i)] = tf.FixedLenFeature(
(), tf.string, default_value="")
parsed_features = tf.parse_single_example(example_proto, parsed_dict)
return parsed_features
def _split_xy(feat_dict):
target = tf.one_hot(tf.to_int32(
feat_dict['target']), depth=2, dtype=tf.float32)
input_frames = {}
for i in range(n_frames):
frame_id = 'frame_{}'.format(i)
input_frames[frame_id] = feat_dict[frame_id]
return input_frames, {'target': target}
def _input_fn():
dataset = tf.data.TFRecordDataset(
tf_records_list, compression_type='GZIP')
dataset = dataset.map(_parse_proto)
dataset = dataset.map(_split_xy)
dataset = dataset.shuffle(buffer_size=2 * batch_size)
dataset = dataset.repeat(epochs)
dataset = dataset.batch(batch_size)
return dataset
return _input_fn
def metrics(logits, labels):
argmax_logits = tf.argmax(logits, axis=1)
argmax_labels = tf.argmax(labels, axis=1)
return {'accuracy': tf.metrics.accuracy(argmax_labels, argmax_logits)}
def get_serving_fn(window_size):
input_tensor = {"frame_{}".format(i): tf.placeholder(
dtype=tf.string, shape=[None]) for i in range(window_size)}
return tf.estimator.export.build_raw_serving_input_receiver_fn(input_tensor)
def model_fn(n_frames):
def _model_fn(features, labels, mode, params):
input_tensors_list = []
for i in range(n_frames):
frame_id = 'frame_{}'.format(i)
frame_tensor = tf.map_fn(preproc, features[frame_id], tf.float32)
frame_tensor = tf.expand_dims(frame_tensor, axis=-1)
frame_tensor = tf.transpose(frame_tensor, [0, 1, 2, 4, 3])
print(frame_tensor)
input_tensors_list.append(frame_tensor)
input_tensor_stream = tf.concat(input_tensors_list, axis=3)
print(input_tensor_stream)
is_training = mode == tf.estimator.ModeKeys.TRAIN
logits = AquamanNet(input_tensor_stream, is_training, 2)
# Loss, training and eval operations are not needed during inference.
total_loss = None
loss = None
train_op = None
eval_metric_ops = {}
export_outputs = None
prediction_dict = {'class': tf.argmax(
logits, axis=1, name="predictions")}
if mode != tf.estimator.ModeKeys.PREDICT:
# IT IS VERY IMPORTANT TO RETRIEVE THE REGULARIZATION LOSSES
reg_loss = tf.losses.get_regularization_loss()
# This summary is automatically caught by the Estimator API
tf.summary.scalar("Regularization_Loss", tensor=reg_loss)
loss = tf.losses.softmax_cross_entropy(
onehot_labels=labels['target'], logits=logits)
tf.summary.scalar("XEntropy_LOSS", tensor=loss)
total_loss = loss + reg_loss
learning_rate = tf.constant(1e-4, name='fixed_learning_rate')
#optimizer = tf.train.GradientDescentOptimizer(learning_rate)
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
vars_to_train = tf.trainable_variables()
tf.logging.info("Variables to train: {}".format(vars_to_train))
if is_training:
# You DO must get this collection in order to perform updates on batch_norm variables
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
train_op = optimizer.minimize(
loss=total_loss, global_step=tf.train.get_global_step(), var_list=vars_to_train)
eval_metric_ops = metrics(logits, labels['target'])
else:
# pass
export_outputs = {
'logits': tf.estimator.export.PredictOutput(outputs=logits)}
return tf.estimator.EstimatorSpec(
mode=mode,
predictions=prediction_dict,
loss=total_loss,
train_op=train_op,
eval_metric_ops=eval_metric_ops,
export_outputs=export_outputs)
return _model_fn
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--train-tf-list',
dest='train_tf_list',
type=str,
required=True)
parser.add_argument('--test-tf-list',
dest='test_tf_list',
type=str,
required=True)
parser.add_argument('--output-dir',
dest='output_dir',
type=str,
required=True)
parser.add_argument('--window-size',
dest='window_size',
type=int,
required=True)
args = parser.parse_args()
tfrecord_list_train = args.train_tf_list.split(',')
tfrecord_list_test = args.test_tf_list.split(',')
session_config = tf.ConfigProto(
allow_soft_placement=True,
log_device_placement=False
)
run_config = tf.estimator.RunConfig(
model_dir=args.output_dir,
save_summary_steps=100,
session_config=session_config,
save_checkpoints_steps=100,
save_checkpoints_secs=None,
keep_checkpoint_max=1
)
estimator = tf.estimator.Estimator(
model_fn=model_fn(args.window_size),
config=run_config
)
train_input_fn = input_fn(
batch_size=BATCH_SIZE, tf_records_list=tfrecord_list_train, epochs=EPOCHS, n_frames=args.window_size)
test_input_fn = input_fn(
batch_size=BATCH_SIZE, tf_records_list=tfrecord_list_test, epochs=1, n_frames=args.window_size)
train_spec = tf.estimator.TrainSpec(
input_fn=train_input_fn, max_steps=10000)
# eval_steps = math.ceil(EVAL_SET_SIZE / FLAGS.batch_size)
eval_spec = tf.estimator.EvalSpec(
input_fn=test_input_fn,
# steps=eval_steps,
start_delay_secs=60,
throttle_secs=60)
tf.estimator.train_and_evaluate(
estimator=estimator, train_spec=train_spec, eval_spec=eval_spec)
estimator.export_savedmodel(
export_dir_base=args.output_dir, serving_input_receiver_fn=get_serving_fn(args.window_size))
| 32.63964
| 109
| 0.637593
|
import tensorflow as tf
import logging
logging.basicConfig()
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
import argparse
from aquaman_net import AquamanNet
from utils import IMAGE_SIZE
EPOCHS = 1000
BATCH_SIZE = 4
def preproc(image_bytes):
image_jpg = tf.image.decode_jpeg(image_bytes, channels=3)
image_jpg = tf.image.resize_images(image_jpg, IMAGE_SIZE)
image_jpg = tf.to_float(image_jpg) / 255.0
image_jpg = tf.reshape(
image_jpg, [IMAGE_SIZE[0], IMAGE_SIZE[1], 3], name="Reshape_Preproc")
return image_jpg
def input_fn(tf_records_list, epochs=10, batch_size=8, n_frames=16):
def _parse_proto(example_proto):
parsed_dict = {
"target": tf.FixedLenFeature((), tf.float32, default_value=0)
}
for i in range(n_frames):
parsed_dict['frame_{}'.format(i)] = tf.FixedLenFeature(
(), tf.string, default_value="")
parsed_features = tf.parse_single_example(example_proto, parsed_dict)
return parsed_features
def _split_xy(feat_dict):
target = tf.one_hot(tf.to_int32(
feat_dict['target']), depth=2, dtype=tf.float32)
input_frames = {}
for i in range(n_frames):
frame_id = 'frame_{}'.format(i)
input_frames[frame_id] = feat_dict[frame_id]
return input_frames, {'target': target}
def _input_fn():
dataset = tf.data.TFRecordDataset(
tf_records_list, compression_type='GZIP')
dataset = dataset.map(_parse_proto)
dataset = dataset.map(_split_xy)
dataset = dataset.shuffle(buffer_size=2 * batch_size)
dataset = dataset.repeat(epochs)
dataset = dataset.batch(batch_size)
return dataset
return _input_fn
def metrics(logits, labels):
argmax_logits = tf.argmax(logits, axis=1)
argmax_labels = tf.argmax(labels, axis=1)
return {'accuracy': tf.metrics.accuracy(argmax_labels, argmax_logits)}
def get_serving_fn(window_size):
input_tensor = {"frame_{}".format(i): tf.placeholder(
dtype=tf.string, shape=[None]) for i in range(window_size)}
return tf.estimator.export.build_raw_serving_input_receiver_fn(input_tensor)
def model_fn(n_frames):
def _model_fn(features, labels, mode, params):
input_tensors_list = []
for i in range(n_frames):
frame_id = 'frame_{}'.format(i)
frame_tensor = tf.map_fn(preproc, features[frame_id], tf.float32)
frame_tensor = tf.expand_dims(frame_tensor, axis=-1)
frame_tensor = tf.transpose(frame_tensor, [0, 1, 2, 4, 3])
print(frame_tensor)
input_tensors_list.append(frame_tensor)
input_tensor_stream = tf.concat(input_tensors_list, axis=3)
print(input_tensor_stream)
is_training = mode == tf.estimator.ModeKeys.TRAIN
logits = AquamanNet(input_tensor_stream, is_training, 2)
total_loss = None
loss = None
train_op = None
eval_metric_ops = {}
export_outputs = None
prediction_dict = {'class': tf.argmax(
logits, axis=1, name="predictions")}
if mode != tf.estimator.ModeKeys.PREDICT:
reg_loss = tf.losses.get_regularization_loss()
tf.summary.scalar("Regularization_Loss", tensor=reg_loss)
loss = tf.losses.softmax_cross_entropy(
onehot_labels=labels['target'], logits=logits)
tf.summary.scalar("XEntropy_LOSS", tensor=loss)
total_loss = loss + reg_loss
learning_rate = tf.constant(1e-4, name='fixed_learning_rate')
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
vars_to_train = tf.trainable_variables()
tf.logging.info("Variables to train: {}".format(vars_to_train))
if is_training:
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
train_op = optimizer.minimize(
loss=total_loss, global_step=tf.train.get_global_step(), var_list=vars_to_train)
eval_metric_ops = metrics(logits, labels['target'])
else:
export_outputs = {
'logits': tf.estimator.export.PredictOutput(outputs=logits)}
return tf.estimator.EstimatorSpec(
mode=mode,
predictions=prediction_dict,
loss=total_loss,
train_op=train_op,
eval_metric_ops=eval_metric_ops,
export_outputs=export_outputs)
return _model_fn
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--train-tf-list',
dest='train_tf_list',
type=str,
required=True)
parser.add_argument('--test-tf-list',
dest='test_tf_list',
type=str,
required=True)
parser.add_argument('--output-dir',
dest='output_dir',
type=str,
required=True)
parser.add_argument('--window-size',
dest='window_size',
type=int,
required=True)
args = parser.parse_args()
tfrecord_list_train = args.train_tf_list.split(',')
tfrecord_list_test = args.test_tf_list.split(',')
session_config = tf.ConfigProto(
allow_soft_placement=True,
log_device_placement=False
)
run_config = tf.estimator.RunConfig(
model_dir=args.output_dir,
save_summary_steps=100,
session_config=session_config,
save_checkpoints_steps=100,
save_checkpoints_secs=None,
keep_checkpoint_max=1
)
estimator = tf.estimator.Estimator(
model_fn=model_fn(args.window_size),
config=run_config
)
train_input_fn = input_fn(
batch_size=BATCH_SIZE, tf_records_list=tfrecord_list_train, epochs=EPOCHS, n_frames=args.window_size)
test_input_fn = input_fn(
batch_size=BATCH_SIZE, tf_records_list=tfrecord_list_test, epochs=1, n_frames=args.window_size)
train_spec = tf.estimator.TrainSpec(
input_fn=train_input_fn, max_steps=10000)
eval_spec = tf.estimator.EvalSpec(
input_fn=test_input_fn,
start_delay_secs=60,
throttle_secs=60)
tf.estimator.train_and_evaluate(
estimator=estimator, train_spec=train_spec, eval_spec=eval_spec)
estimator.export_savedmodel(
export_dir_base=args.output_dir, serving_input_receiver_fn=get_serving_fn(args.window_size))
| true
| true
|
7904b4459d529e374c9aba9733d3f3df8c17f078
| 299
|
py
|
Python
|
picscope/urls.py
|
yeaske/picscope
|
efb38459631b7aee8b2db4f38da1f437c2d96ad8
|
[
"MIT"
] | null | null | null |
picscope/urls.py
|
yeaske/picscope
|
efb38459631b7aee8b2db4f38da1f437c2d96ad8
|
[
"MIT"
] | null | null | null |
picscope/urls.py
|
yeaske/picscope
|
efb38459631b7aee8b2db4f38da1f437c2d96ad8
|
[
"MIT"
] | null | null | null |
from django.conf.urls import patterns, include, url
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'picscope.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
url(r'^admin/', include(admin.site.urls)),
)
| 23
| 53
| 0.652174
|
from django.conf.urls import patterns, include, url
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
url(r'^admin/', include(admin.site.urls)),
)
| true
| true
|
7904b50baf488b7d1514f62d0a16210c6ad537bd
| 5,303
|
py
|
Python
|
tencentcloud/ims/v20200713/ims_client.py
|
dyllllll/tencentcloud-sdk-python
|
677424361ec00927a52fd3c6d5110c4de5737449
|
[
"Apache-2.0"
] | 2
|
2021-07-10T09:40:16.000Z
|
2022-02-04T09:01:22.000Z
|
tencentcloud/ims/v20200713/ims_client.py
|
dyllllll/tencentcloud-sdk-python
|
677424361ec00927a52fd3c6d5110c4de5737449
|
[
"Apache-2.0"
] | null | null | null |
tencentcloud/ims/v20200713/ims_client.py
|
dyllllll/tencentcloud-sdk-python
|
677424361ec00927a52fd3c6d5110c4de5737449
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf8 -*-
# Copyright (c) 2017-2018 THL A29 Limited, a Tencent company. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from tencentcloud.common.exception.tencent_cloud_sdk_exception import TencentCloudSDKException
from tencentcloud.common.abstract_client import AbstractClient
from tencentcloud.ims.v20200713 import models
class ImsClient(AbstractClient):
_apiVersion = '2020-07-13'
_endpoint = 'ims.tencentcloudapi.com'
_service = 'ims'
def DescribeImageStat(self, request):
"""控制台识别统计
:param request: Request instance for DescribeImageStat.
:type request: :class:`tencentcloud.ims.v20200713.models.DescribeImageStatRequest`
:rtype: :class:`tencentcloud.ims.v20200713.models.DescribeImageStatResponse`
"""
try:
params = request._serialize()
body = self.call("DescribeImageStat", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.DescribeImageStatResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def DescribeImsList(self, request):
"""图片机器审核明细
:param request: Request instance for DescribeImsList.
:type request: :class:`tencentcloud.ims.v20200713.models.DescribeImsListRequest`
:rtype: :class:`tencentcloud.ims.v20200713.models.DescribeImsListResponse`
"""
try:
params = request._serialize()
body = self.call("DescribeImsList", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.DescribeImsListResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def ImageModeration(self, request):
"""图片内容检测服务(Image Moderation, IM)能自动扫描图片,识别可能令人反感、不安全或不适宜的内容,同时支持用户配置图片黑名单,打击自定义识别类型的图片。
<div class="rno-api-explorer" style="margin-bottom:20px">
<div class="rno-api-explorer-inner">
<div class="rno-api-explorer-hd">
<div class="rno-api-explorer-title">
关于版本迭代的描述
</div>
</div>
<div class="rno-api-explorer-body">
<div class="rno-api-explorer-cont">
<p>当前页面版本为图片内容安全2020版本,2020.11.3日前接入的图片内容安全接口为2019版本,在此时间前接入的用户可直接访问以下链接进行维护操作:<a href="https://cloud.tencent.com/document/product/1125/38206" target="_blank">图片内容安全-2019版本</a></p>
<p>2020版本相对2019版本进行了升级,支持更灵活的多场景业务策略配置以及更丰富的识别回调信息,满足不同业务的识别需求,建议按照2020版本接入指引进行接口升级;同时,2019版本也会持续维护直至用户不再使用为止。</p>
</div>
</div>
</div>
</div>
:param request: Request instance for ImageModeration.
:type request: :class:`tencentcloud.ims.v20200713.models.ImageModerationRequest`
:rtype: :class:`tencentcloud.ims.v20200713.models.ImageModerationResponse`
"""
try:
params = request._serialize()
body = self.call("ImageModeration", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.ImageModerationResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
| 42.087302
| 204
| 0.613426
|
import json
from tencentcloud.common.exception.tencent_cloud_sdk_exception import TencentCloudSDKException
from tencentcloud.common.abstract_client import AbstractClient
from tencentcloud.ims.v20200713 import models
class ImsClient(AbstractClient):
_apiVersion = '2020-07-13'
_endpoint = 'ims.tencentcloudapi.com'
_service = 'ims'
def DescribeImageStat(self, request):
try:
params = request._serialize()
body = self.call("DescribeImageStat", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.DescribeImageStatResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def DescribeImsList(self, request):
try:
params = request._serialize()
body = self.call("DescribeImsList", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.DescribeImsListResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def ImageModeration(self, request):
try:
params = request._serialize()
body = self.call("ImageModeration", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.ImageModerationResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
| true
| true
|
7904b57c4115b987aa10e92260dce68694256203
| 5,124
|
py
|
Python
|
satchmo/newsletter/mailman.py
|
sankroh/satchmo
|
e48df0c2a4be4ce14785d0a5d6dd1e516c57a838
|
[
"BSD-3-Clause"
] | 1
|
2016-05-09T12:21:04.000Z
|
2016-05-09T12:21:04.000Z
|
satchmo/newsletter/mailman.py
|
sankroh/satchmo
|
e48df0c2a4be4ce14785d0a5d6dd1e516c57a838
|
[
"BSD-3-Clause"
] | null | null | null |
satchmo/newsletter/mailman.py
|
sankroh/satchmo
|
e48df0c2a4be4ce14785d0a5d6dd1e516c57a838
|
[
"BSD-3-Clause"
] | null | null | null |
"""A Mailman newsletter subscription interface.
To use this plugin, enable the newsletter module and set the newsletter module and name settings
in the admin settings page.
"""
from django.utils.translation import ugettext as _
from Mailman import MailList, Errors
from models import Subscription
from satchmo.configuration import config_value
import logging
import sys
log = logging.getLogger('newsletter.mailman')
class UserDesc: pass
def is_subscribed(contact):
return Subscription.email_is_subscribed(contact.email)
def update_contact(contact, subscribe, attributes={}):
email = contact.email
current = Subscription.email_is_subscribed(email)
attributesChanged = False
sub = None
if attributes:
sub, created = Subscription.objects.get_or_create(email=email)
if created:
attributesChanged = True
else:
oldAttr = [(a.name,a.value) for a in sub.attributes.all()]
oldAttr.sort()
sub.update_attributes(attributes)
newAttr = [(a.name,a.value) for a in sub.attributes.all()]
newAttr.sort()
if not created:
attributesChanged = oldAttr != newAttr
if current == subscribe:
if subscribe:
if attributesChanged:
result = _("Updated subscription for %(email)s.")
else:
result = _("Already subscribed %(email)s.")
else:
result = _("Already removed %(email)s.")
else:
if not sub:
sub, created = Subscription.objects.get_or_create(email=email)
sub.subscribed = subscribe
sub.save()
if subscribe:
mailman_add(contact)
result = _("Subscribed: %(email)s")
else:
mailman_remove(contact)
result = _("Unsubscribed: %(email)s")
return result % { 'email' : email }
def mailman_add(contact, listname=None, send_welcome_msg=None, admin_notify=None):
"""Add a Satchmo contact to a mailman mailing list.
Parameters:
- `Contact`: A Satchmo Contact
- `listname`: the Mailman listname, defaulting to whatever you have set in settings.NEWSLETTER_NAME
- `send_welcome_msg`: True or False, defaulting to the list default
- `admin_notify`: True of False, defaulting to the list default
"""
mm, listname = _get_maillist(listname)
print >> sys.stderr, 'mailman adding %s to %s' % (contact.email, listname)
if send_welcome_msg is None:
send_welcome_msg = mm.send_welcome_msg
userdesc = UserDesc()
userdesc.fullname = contact.full_name
userdesc.address = contact.email
userdesc.digest = False
if mm.isMember(contact.email):
print >> sys.stderr, _('Already Subscribed: %s' % contact.email)
else:
try:
try:
mm.Lock()
mm.ApprovedAddMember(userdesc, send_welcome_msg, admin_notify)
mm.Save()
print >> sys.stderr, _('Subscribed: %(email)s') % { 'email' : contact.email }
except Errors.MMAlreadyAMember:
print >> sys.stderr, _('Already a member: %(email)s') % { 'email' : contact.email }
except Errors.MMBadEmailError:
if userdesc.address == '':
print >> sys.stderr, _('Bad/Invalid email address: blank line')
else:
print >> sys.stderr, _('Bad/Invalid email address: %(email)s') % { 'email' : contact.email }
except Errors.MMHostileAddress:
print >> sys.stderr, _('Hostile address (illegal characters): %(email)s') % { 'email' : contact.email }
finally:
mm.Unlock()
def mailman_remove(contact, listname=None, userack=None, admin_notify=None):
"""Remove a Satchmo contact from a Mailman mailing list
Parameters:
- `contact`: A Satchmo contact
- `listname`: the Mailman listname, defaulting to whatever you have set in settings.NEWSLETTER_NAME
- `userack`: True or False, whether to notify the user, defaulting to the list default
- `admin_notify`: True or False, defaulting to the list default
"""
mm, listname = _get_maillist(listname)
print >> sys.stderr, 'mailman removing %s from %s' % (contact.email, listname)
if mm.isMember(contact.email):
try:
mm.Lock()
mm.ApprovedDeleteMember(contact.email, 'satchmo.newsletter', admin_notify, userack)
mm.Save()
finally:
mm.Unlock()
def _get_maillist(listname):
try:
if not listname:
listname = config_value('NEWSLETTER', 'NEWSLETTER_NAME')
if listname == "":
log.warn("NEWSLETTER_NAME not set in store settings")
raise NameError('No NEWSLETTER_NAME in settings')
return MailList.MailList(listname, lock=0), listname
except Errors.MMUnknownListError:
print >> sys.stderr, "Can't find the MailMan newsletter: %s" % listname
raise NameError('No such newsletter, "%s"' % listname)
| 34.621622
| 119
| 0.62178
|
from django.utils.translation import ugettext as _
from Mailman import MailList, Errors
from models import Subscription
from satchmo.configuration import config_value
import logging
import sys
log = logging.getLogger('newsletter.mailman')
class UserDesc: pass
def is_subscribed(contact):
return Subscription.email_is_subscribed(contact.email)
def update_contact(contact, subscribe, attributes={}):
email = contact.email
current = Subscription.email_is_subscribed(email)
attributesChanged = False
sub = None
if attributes:
sub, created = Subscription.objects.get_or_create(email=email)
if created:
attributesChanged = True
else:
oldAttr = [(a.name,a.value) for a in sub.attributes.all()]
oldAttr.sort()
sub.update_attributes(attributes)
newAttr = [(a.name,a.value) for a in sub.attributes.all()]
newAttr.sort()
if not created:
attributesChanged = oldAttr != newAttr
if current == subscribe:
if subscribe:
if attributesChanged:
result = _("Updated subscription for %(email)s.")
else:
result = _("Already subscribed %(email)s.")
else:
result = _("Already removed %(email)s.")
else:
if not sub:
sub, created = Subscription.objects.get_or_create(email=email)
sub.subscribed = subscribe
sub.save()
if subscribe:
mailman_add(contact)
result = _("Subscribed: %(email)s")
else:
mailman_remove(contact)
result = _("Unsubscribed: %(email)s")
return result % { 'email' : email }
def mailman_add(contact, listname=None, send_welcome_msg=None, admin_notify=None):
mm, listname = _get_maillist(listname)
print >> sys.stderr, 'mailman adding %s to %s' % (contact.email, listname)
if send_welcome_msg is None:
send_welcome_msg = mm.send_welcome_msg
userdesc = UserDesc()
userdesc.fullname = contact.full_name
userdesc.address = contact.email
userdesc.digest = False
if mm.isMember(contact.email):
print >> sys.stderr, _('Already Subscribed: %s' % contact.email)
else:
try:
try:
mm.Lock()
mm.ApprovedAddMember(userdesc, send_welcome_msg, admin_notify)
mm.Save()
print >> sys.stderr, _('Subscribed: %(email)s') % { 'email' : contact.email }
except Errors.MMAlreadyAMember:
print >> sys.stderr, _('Already a member: %(email)s') % { 'email' : contact.email }
except Errors.MMBadEmailError:
if userdesc.address == '':
print >> sys.stderr, _('Bad/Invalid email address: blank line')
else:
print >> sys.stderr, _('Bad/Invalid email address: %(email)s') % { 'email' : contact.email }
except Errors.MMHostileAddress:
print >> sys.stderr, _('Hostile address (illegal characters): %(email)s') % { 'email' : contact.email }
finally:
mm.Unlock()
def mailman_remove(contact, listname=None, userack=None, admin_notify=None):
mm, listname = _get_maillist(listname)
print >> sys.stderr, 'mailman removing %s from %s' % (contact.email, listname)
if mm.isMember(contact.email):
try:
mm.Lock()
mm.ApprovedDeleteMember(contact.email, 'satchmo.newsletter', admin_notify, userack)
mm.Save()
finally:
mm.Unlock()
def _get_maillist(listname):
try:
if not listname:
listname = config_value('NEWSLETTER', 'NEWSLETTER_NAME')
if listname == "":
log.warn("NEWSLETTER_NAME not set in store settings")
raise NameError('No NEWSLETTER_NAME in settings')
return MailList.MailList(listname, lock=0), listname
except Errors.MMUnknownListError:
print >> sys.stderr, "Can't find the MailMan newsletter: %s" % listname
raise NameError('No such newsletter, "%s"' % listname)
| true
| true
|
7904b5d0340a5014924842d37fa20c59899899eb
| 1,049
|
py
|
Python
|
runtime/image_classification/models/vgg16/gpus=16_straight/stage5.py
|
NestLakerJasonLIN/pipedream
|
f50827f2e28cbdbd82a4ea686c0498272b1460d6
|
[
"MIT"
] | 273
|
2019-08-31T14:12:11.000Z
|
2022-03-05T13:34:25.000Z
|
runtime/image_classification/models/vgg16/gpus=16_straight/stage5.py
|
albertsh10/pipedream
|
cad624f79a71f44ba79099f0c38321347b13e5c2
|
[
"MIT"
] | 67
|
2019-09-19T15:36:59.000Z
|
2022-01-13T09:11:54.000Z
|
runtime/image_classification/models/vgg16/gpus=16_straight/stage5.py
|
albertsh10/pipedream
|
cad624f79a71f44ba79099f0c38321347b13e5c2
|
[
"MIT"
] | 100
|
2019-09-16T20:59:14.000Z
|
2022-03-23T12:56:56.000Z
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import torch
class Stage5(torch.nn.Module):
def __init__(self):
super(Stage5, self).__init__()
self.layer1 = torch.nn.Conv2d(64, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
self._initialize_weights()
def forward(self, input0):
out0 = input0.clone()
out1 = self.layer1(out0)
return out1
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, torch.nn.Conv2d):
torch.nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
if m.bias is not None:
torch.nn.init.constant_(m.bias, 0)
elif isinstance(m, torch.nn.BatchNorm2d):
torch.nn.init.constant_(m.weight, 1)
torch.nn.init.constant_(m.bias, 0)
elif isinstance(m, torch.nn.Linear):
torch.nn.init.normal_(m.weight, 0, 0.01)
torch.nn.init.constant_(m.bias, 0)
| 34.966667
| 97
| 0.585319
|
import torch
class Stage5(torch.nn.Module):
def __init__(self):
super(Stage5, self).__init__()
self.layer1 = torch.nn.Conv2d(64, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
self._initialize_weights()
def forward(self, input0):
out0 = input0.clone()
out1 = self.layer1(out0)
return out1
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, torch.nn.Conv2d):
torch.nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
if m.bias is not None:
torch.nn.init.constant_(m.bias, 0)
elif isinstance(m, torch.nn.BatchNorm2d):
torch.nn.init.constant_(m.weight, 1)
torch.nn.init.constant_(m.bias, 0)
elif isinstance(m, torch.nn.Linear):
torch.nn.init.normal_(m.weight, 0, 0.01)
torch.nn.init.constant_(m.bias, 0)
| true
| true
|
7904b5e587db74f3b24656b9c3e16df5d33013fc
| 172
|
py
|
Python
|
labs/lab2.py
|
sw33tr0ll/aws-training
|
db071a1592c717b1edd1786fa4d9ae07a51ecf1e
|
[
"MIT"
] | 2
|
2020-08-12T05:36:25.000Z
|
2020-08-12T17:12:17.000Z
|
labs/lab2.py
|
sw33tr0ll/aws-training
|
db071a1592c717b1edd1786fa4d9ae07a51ecf1e
|
[
"MIT"
] | null | null | null |
labs/lab2.py
|
sw33tr0ll/aws-training
|
db071a1592c717b1edd1786fa4d9ae07a51ecf1e
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import boto3
s3_client = boto3.client('s3')
raw_response = s3_client.list_buckets()
for bucket in raw_response['Buckets']:
print(bucket['Name'])
| 24.571429
| 39
| 0.738372
|
import boto3
s3_client = boto3.client('s3')
raw_response = s3_client.list_buckets()
for bucket in raw_response['Buckets']:
print(bucket['Name'])
| true
| true
|
7904b63a049f8fc242f82641fda19c361cd92ba7
| 186
|
py
|
Python
|
config.py
|
matale14/api-blueprint
|
fdeb31fdac48ef1d0fdfd68fe17cbb0b7f2470ec
|
[
"MIT"
] | null | null | null |
config.py
|
matale14/api-blueprint
|
fdeb31fdac48ef1d0fdfd68fe17cbb0b7f2470ec
|
[
"MIT"
] | null | null | null |
config.py
|
matale14/api-blueprint
|
fdeb31fdac48ef1d0fdfd68fe17cbb0b7f2470ec
|
[
"MIT"
] | null | null | null |
import os
basedir = os.path.abspath(os.path.dirname(__file__))
class Config(object):
#SESSION_COOKIE_SECURE = True
SECRET_KEY = os.environ.get('SECRET_KEY') or 'you-will-never-guess'
| 26.571429
| 68
| 0.763441
|
import os
basedir = os.path.abspath(os.path.dirname(__file__))
class Config(object):
SECRET_KEY = os.environ.get('SECRET_KEY') or 'you-will-never-guess'
| true
| true
|
7904b72dd954318ac996a1568fb8a6aab0bfa2ab
| 13,280
|
py
|
Python
|
django/core/management/templates.py
|
skyl/django
|
843e7450ddcb820b2bdc6d47d6c4aab9820a46c4
|
[
"BSD-3-Clause"
] | 1
|
2021-11-22T17:41:19.000Z
|
2021-11-22T17:41:19.000Z
|
django/core/management/templates.py
|
skyl/django
|
843e7450ddcb820b2bdc6d47d6c4aab9820a46c4
|
[
"BSD-3-Clause"
] | null | null | null |
django/core/management/templates.py
|
skyl/django
|
843e7450ddcb820b2bdc6d47d6c4aab9820a46c4
|
[
"BSD-3-Clause"
] | 1
|
2020-06-03T07:55:20.000Z
|
2020-06-03T07:55:20.000Z
|
import cgi
import errno
import mimetypes
import os
import posixpath
import re
import shutil
import stat
import sys
import tempfile
try:
from urllib.request import urlretrieve
except ImportError: # Python 2
from urllib import urlretrieve
from optparse import make_option
from os import path
import django
from django.template import Template, Context
from django.utils import archive
from django.utils._os import rmtree_errorhandler
from django.core.management.base import BaseCommand, CommandError
from django.core.management.commands.makemessages import handle_extensions
_drive_re = re.compile('^([a-z]):', re.I)
_url_drive_re = re.compile('^([a-z])[:|]', re.I)
class TemplateCommand(BaseCommand):
"""
Copies either a Django application layout template or a Django project
layout template into the specified directory.
:param style: A color style object (see django.core.management.color).
:param app_or_project: The string 'app' or 'project'.
:param name: The name of the application or project.
:param directory: The directory to which the template should be copied.
:param options: The additional variables passed to project or app templates
"""
args = "[name] [optional destination directory]"
option_list = BaseCommand.option_list + (
make_option('--template',
action='store', dest='template',
help='The dotted import path to load the template from.'),
make_option('--extension', '-e', dest='extensions',
action='append', default=['py'],
help='The file extension(s) to render (default: "py"). '
'Separate multiple extensions with commas, or use '
'-e multiple times.'),
make_option('--name', '-n', dest='files',
action='append', default=[],
help='The file name(s) to render. '
'Separate multiple extensions with commas, or use '
'-n multiple times.')
)
requires_model_validation = False
# Can't import settings during this command, because they haven't
# necessarily been created.
can_import_settings = False
# The supported URL schemes
url_schemes = ['http', 'https', 'ftp']
# Can't perform any active locale changes during this command, because
# setting might not be available at all.
leave_locale_alone = True
def handle(self, app_or_project, name, target=None, **options):
self.app_or_project = app_or_project
self.paths_to_remove = []
self.verbosity = int(options.get('verbosity'))
self.validate_name(name, app_or_project)
# if some directory is given, make sure it's nicely expanded
if target is None:
top_dir = path.join(os.getcwd(), name)
try:
os.makedirs(top_dir)
except OSError as e:
if e.errno == errno.EEXIST:
message = "'%s' already exists" % top_dir
else:
message = e
raise CommandError(message)
else:
top_dir = os.path.abspath(path.expanduser(target))
if not os.path.exists(top_dir):
raise CommandError("Destination directory '%s' does not "
"exist, please create it first." % top_dir)
extensions = tuple(
handle_extensions(options.get('extensions'), ignored=()))
extra_files = []
for file in options.get('files'):
extra_files.extend(map(lambda x: x.strip(), file.split(',')))
if self.verbosity >= 2:
self.stdout.write("Rendering %s template files with "
"extensions: %s\n" %
(app_or_project, ', '.join(extensions)))
self.stdout.write("Rendering %s template files with "
"filenames: %s\n" %
(app_or_project, ', '.join(extra_files)))
base_name = '%s_name' % app_or_project
base_subdir = '%s_template' % app_or_project
base_directory = '%s_directory' % app_or_project
if django.VERSION[-1] == 0:
docs_version = 'dev'
else:
docs_version = '%d.%d' % django.VERSION[:2]
context = Context(dict(options, **{
base_name: name,
base_directory: top_dir,
'docs_version': docs_version,
}), autoescape=False)
# Setup a stub settings environment for template rendering
from django.conf import settings
if not settings.configured:
settings.configure()
template_dir = self.handle_template(options.get('template'),
base_subdir)
prefix_length = len(template_dir) + 1
for root, dirs, files in os.walk(template_dir):
path_rest = root[prefix_length:]
relative_dir = path_rest.replace(base_name, name)
if relative_dir:
target_dir = path.join(top_dir, relative_dir)
if not path.exists(target_dir):
os.mkdir(target_dir)
for dirname in dirs[:]:
if dirname.startswith('.') or dirname == '__pycache__':
dirs.remove(dirname)
for filename in files:
if filename.endswith(('.pyo', '.pyc', '.py.class')):
# Ignore some files as they cause various breakages.
continue
old_path = path.join(root, filename)
new_path = path.join(top_dir, relative_dir,
filename.replace(base_name, name))
if path.exists(new_path):
raise CommandError("%s already exists, overlaying a "
"project or app into an existing "
"directory won't replace conflicting "
"files" % new_path)
# Only render the Python files, as we don't want to
# accidentally render Django templates files
with open(old_path, 'rb') as template_file:
content = template_file.read()
if filename.endswith(extensions) or filename in extra_files:
content = content.decode('utf-8')
template = Template(content)
content = template.render(context)
content = content.encode('utf-8')
with open(new_path, 'wb') as new_file:
new_file.write(content)
if self.verbosity >= 2:
self.stdout.write("Creating %s\n" % new_path)
try:
shutil.copymode(old_path, new_path)
self.make_writeable(new_path)
except OSError:
self.stderr.write(
"Notice: Couldn't set permission bits on %s. You're "
"probably using an uncommon filesystem setup. No "
"problem." % new_path, self.style.NOTICE)
if self.paths_to_remove:
if self.verbosity >= 2:
self.stdout.write("Cleaning up temporary files.\n")
for path_to_remove in self.paths_to_remove:
if path.isfile(path_to_remove):
os.remove(path_to_remove)
else:
shutil.rmtree(path_to_remove,
onerror=rmtree_errorhandler)
def handle_template(self, template, subdir):
"""
Determines where the app or project templates are.
Use django.__path__[0] as the default because we don't
know into which directory Django has been installed.
"""
if template is None:
return path.join(django.__path__[0], 'conf', subdir)
else:
if template.startswith('file://'):
template = template[7:]
expanded_template = path.expanduser(template)
expanded_template = path.normpath(expanded_template)
if path.isdir(expanded_template):
return expanded_template
if self.is_url(template):
# downloads the file and returns the path
absolute_path = self.download(template)
else:
absolute_path = path.abspath(expanded_template)
if path.exists(absolute_path):
return self.extract(absolute_path)
raise CommandError("couldn't handle %s template %s." %
(self.app_or_project, template))
def validate_name(self, name, app_or_project):
if name is None:
raise CommandError("you must provide %s %s name" % (
"an" if app_or_project == "app" else "a", app_or_project))
# If it's not a valid directory name.
if not re.search(r'^[_a-zA-Z]\w*$', name):
# Provide a smart error message, depending on the error.
if not re.search(r'^[_a-zA-Z]', name):
message = 'make sure the name begins with a letter or underscore'
else:
message = 'use only numbers, letters and underscores'
raise CommandError("%r is not a valid %s name. Please %s." %
(name, app_or_project, message))
def download(self, url):
"""
Downloads the given URL and returns the file name.
"""
def cleanup_url(url):
tmp = url.rstrip('/')
filename = tmp.split('/')[-1]
if url.endswith('/'):
display_url = tmp + '/'
else:
display_url = url
return filename, display_url
prefix = 'django_%s_template_' % self.app_or_project
tempdir = tempfile.mkdtemp(prefix=prefix, suffix='_download')
self.paths_to_remove.append(tempdir)
filename, display_url = cleanup_url(url)
if self.verbosity >= 2:
self.stdout.write("Downloading %s\n" % display_url)
try:
the_path, info = urlretrieve(url, path.join(tempdir, filename))
except IOError as e:
raise CommandError("couldn't download URL %s to %s: %s" %
(url, filename, e))
used_name = the_path.split('/')[-1]
# Trying to get better name from response headers
content_disposition = info.get('content-disposition')
if content_disposition:
_, params = cgi.parse_header(content_disposition)
guessed_filename = params.get('filename') or used_name
else:
guessed_filename = used_name
# Falling back to content type guessing
ext = self.splitext(guessed_filename)[1]
content_type = info.get('content-type')
if not ext and content_type:
ext = mimetypes.guess_extension(content_type)
if ext:
guessed_filename += ext
# Move the temporary file to a filename that has better
# chances of being recognnized by the archive utils
if used_name != guessed_filename:
guessed_path = path.join(tempdir, guessed_filename)
shutil.move(the_path, guessed_path)
return guessed_path
# Giving up
return the_path
def splitext(self, the_path):
"""
Like os.path.splitext, but takes off .tar, too
"""
base, ext = posixpath.splitext(the_path)
if base.lower().endswith('.tar'):
ext = base[-4:] + ext
base = base[:-4]
return base, ext
def extract(self, filename):
"""
Extracts the given file to a temporarily and returns
the path of the directory with the extracted content.
"""
prefix = 'django_%s_template_' % self.app_or_project
tempdir = tempfile.mkdtemp(prefix=prefix, suffix='_extract')
self.paths_to_remove.append(tempdir)
if self.verbosity >= 2:
self.stdout.write("Extracting %s\n" % filename)
try:
archive.extract(filename, tempdir)
return tempdir
except (archive.ArchiveException, IOError) as e:
raise CommandError("couldn't extract file %s to %s: %s" %
(filename, tempdir, e))
def is_url(self, template):
"""
Returns True if the name looks like a URL
"""
if ':' not in template:
return False
scheme = template.split(':', 1)[0].lower()
return scheme in self.url_schemes
def make_writeable(self, filename):
"""
Make sure that the file is writeable.
Useful if our source is read-only.
"""
if sys.platform.startswith('java'):
# On Jython there is no os.access()
return
if not os.access(filename, os.W_OK):
st = os.stat(filename)
new_permissions = stat.S_IMODE(st.st_mode) | stat.S_IWUSR
os.chmod(filename, new_permissions)
| 40.364742
| 81
| 0.565437
|
import cgi
import errno
import mimetypes
import os
import posixpath
import re
import shutil
import stat
import sys
import tempfile
try:
from urllib.request import urlretrieve
except ImportError:
from urllib import urlretrieve
from optparse import make_option
from os import path
import django
from django.template import Template, Context
from django.utils import archive
from django.utils._os import rmtree_errorhandler
from django.core.management.base import BaseCommand, CommandError
from django.core.management.commands.makemessages import handle_extensions
_drive_re = re.compile('^([a-z]):', re.I)
_url_drive_re = re.compile('^([a-z])[:|]', re.I)
class TemplateCommand(BaseCommand):
args = "[name] [optional destination directory]"
option_list = BaseCommand.option_list + (
make_option('--template',
action='store', dest='template',
help='The dotted import path to load the template from.'),
make_option('--extension', '-e', dest='extensions',
action='append', default=['py'],
help='The file extension(s) to render (default: "py"). '
'Separate multiple extensions with commas, or use '
'-e multiple times.'),
make_option('--name', '-n', dest='files',
action='append', default=[],
help='The file name(s) to render. '
'Separate multiple extensions with commas, or use '
'-n multiple times.')
)
requires_model_validation = False
can_import_settings = False
url_schemes = ['http', 'https', 'ftp']
# setting might not be available at all.
leave_locale_alone = True
def handle(self, app_or_project, name, target=None, **options):
self.app_or_project = app_or_project
self.paths_to_remove = []
self.verbosity = int(options.get('verbosity'))
self.validate_name(name, app_or_project)
# if some directory is given, make sure it's nicely expanded
if target is None:
top_dir = path.join(os.getcwd(), name)
try:
os.makedirs(top_dir)
except OSError as e:
if e.errno == errno.EEXIST:
message = "'%s' already exists" % top_dir
else:
message = e
raise CommandError(message)
else:
top_dir = os.path.abspath(path.expanduser(target))
if not os.path.exists(top_dir):
raise CommandError("Destination directory '%s' does not "
"exist, please create it first." % top_dir)
extensions = tuple(
handle_extensions(options.get('extensions'), ignored=()))
extra_files = []
for file in options.get('files'):
extra_files.extend(map(lambda x: x.strip(), file.split(',')))
if self.verbosity >= 2:
self.stdout.write("Rendering %s template files with "
"extensions: %s\n" %
(app_or_project, ', '.join(extensions)))
self.stdout.write("Rendering %s template files with "
"filenames: %s\n" %
(app_or_project, ', '.join(extra_files)))
base_name = '%s_name' % app_or_project
base_subdir = '%s_template' % app_or_project
base_directory = '%s_directory' % app_or_project
if django.VERSION[-1] == 0:
docs_version = 'dev'
else:
docs_version = '%d.%d' % django.VERSION[:2]
context = Context(dict(options, **{
base_name: name,
base_directory: top_dir,
'docs_version': docs_version,
}), autoescape=False)
from django.conf import settings
if not settings.configured:
settings.configure()
template_dir = self.handle_template(options.get('template'),
base_subdir)
prefix_length = len(template_dir) + 1
for root, dirs, files in os.walk(template_dir):
path_rest = root[prefix_length:]
relative_dir = path_rest.replace(base_name, name)
if relative_dir:
target_dir = path.join(top_dir, relative_dir)
if not path.exists(target_dir):
os.mkdir(target_dir)
for dirname in dirs[:]:
if dirname.startswith('.') or dirname == '__pycache__':
dirs.remove(dirname)
for filename in files:
if filename.endswith(('.pyo', '.pyc', '.py.class')):
continue
old_path = path.join(root, filename)
new_path = path.join(top_dir, relative_dir,
filename.replace(base_name, name))
if path.exists(new_path):
raise CommandError("%s already exists, overlaying a "
"project or app into an existing "
"directory won't replace conflicting "
"files" % new_path)
# Only render the Python files, as we don't want to
with open(old_path, 'rb') as template_file:
content = template_file.read()
if filename.endswith(extensions) or filename in extra_files:
content = content.decode('utf-8')
template = Template(content)
content = template.render(context)
content = content.encode('utf-8')
with open(new_path, 'wb') as new_file:
new_file.write(content)
if self.verbosity >= 2:
self.stdout.write("Creating %s\n" % new_path)
try:
shutil.copymode(old_path, new_path)
self.make_writeable(new_path)
except OSError:
self.stderr.write(
"Notice: Couldn't set permission bits on %s. You're "
"probably using an uncommon filesystem setup. No "
"problem." % new_path, self.style.NOTICE)
if self.paths_to_remove:
if self.verbosity >= 2:
self.stdout.write("Cleaning up temporary files.\n")
for path_to_remove in self.paths_to_remove:
if path.isfile(path_to_remove):
os.remove(path_to_remove)
else:
shutil.rmtree(path_to_remove,
onerror=rmtree_errorhandler)
def handle_template(self, template, subdir):
if template is None:
return path.join(django.__path__[0], 'conf', subdir)
else:
if template.startswith('file://'):
template = template[7:]
expanded_template = path.expanduser(template)
expanded_template = path.normpath(expanded_template)
if path.isdir(expanded_template):
return expanded_template
if self.is_url(template):
absolute_path = self.download(template)
else:
absolute_path = path.abspath(expanded_template)
if path.exists(absolute_path):
return self.extract(absolute_path)
raise CommandError("couldn't handle %s template %s." %
(self.app_or_project, template))
def validate_name(self, name, app_or_project):
if name is None:
raise CommandError("you must provide %s %s name" % (
"an" if app_or_project == "app" else "a", app_or_project))
# If it's not a valid directory name.
if not re.search(r'^[_a-zA-Z]\w*$', name):
if not re.search(r'^[_a-zA-Z]', name):
message = 'make sure the name begins with a letter or underscore'
else:
message = 'use only numbers, letters and underscores'
raise CommandError("%r is not a valid %s name. Please %s." %
(name, app_or_project, message))
def download(self, url):
def cleanup_url(url):
tmp = url.rstrip('/')
filename = tmp.split('/')[-1]
if url.endswith('/'):
display_url = tmp + '/'
else:
display_url = url
return filename, display_url
prefix = 'django_%s_template_' % self.app_or_project
tempdir = tempfile.mkdtemp(prefix=prefix, suffix='_download')
self.paths_to_remove.append(tempdir)
filename, display_url = cleanup_url(url)
if self.verbosity >= 2:
self.stdout.write("Downloading %s\n" % display_url)
try:
the_path, info = urlretrieve(url, path.join(tempdir, filename))
except IOError as e:
raise CommandError("couldn't download URL %s to %s: %s" %
(url, filename, e))
used_name = the_path.split('/')[-1]
# Trying to get better name from response headers
content_disposition = info.get('content-disposition')
if content_disposition:
_, params = cgi.parse_header(content_disposition)
guessed_filename = params.get('filename') or used_name
else:
guessed_filename = used_name
# Falling back to content type guessing
ext = self.splitext(guessed_filename)[1]
content_type = info.get('content-type')
if not ext and content_type:
ext = mimetypes.guess_extension(content_type)
if ext:
guessed_filename += ext
# Move the temporary file to a filename that has better
# chances of being recognnized by the archive utils
if used_name != guessed_filename:
guessed_path = path.join(tempdir, guessed_filename)
shutil.move(the_path, guessed_path)
return guessed_path
# Giving up
return the_path
def splitext(self, the_path):
base, ext = posixpath.splitext(the_path)
if base.lower().endswith('.tar'):
ext = base[-4:] + ext
base = base[:-4]
return base, ext
def extract(self, filename):
prefix = 'django_%s_template_' % self.app_or_project
tempdir = tempfile.mkdtemp(prefix=prefix, suffix='_extract')
self.paths_to_remove.append(tempdir)
if self.verbosity >= 2:
self.stdout.write("Extracting %s\n" % filename)
try:
archive.extract(filename, tempdir)
return tempdir
except (archive.ArchiveException, IOError) as e:
raise CommandError("couldn't extract file %s to %s: %s" %
(filename, tempdir, e))
def is_url(self, template):
if ':' not in template:
return False
scheme = template.split(':', 1)[0].lower()
return scheme in self.url_schemes
def make_writeable(self, filename):
if sys.platform.startswith('java'):
return
if not os.access(filename, os.W_OK):
st = os.stat(filename)
new_permissions = stat.S_IMODE(st.st_mode) | stat.S_IWUSR
os.chmod(filename, new_permissions)
| true
| true
|
7904b778d9b92de92c48d8a78e68442549089945
| 1,328
|
py
|
Python
|
posts/models.py
|
SergeyKorobenkov/hw05_final
|
6ab9c2a3cb5eaa319860fa3e2947ea664db6016d
|
[
"MIT"
] | null | null | null |
posts/models.py
|
SergeyKorobenkov/hw05_final
|
6ab9c2a3cb5eaa319860fa3e2947ea664db6016d
|
[
"MIT"
] | 8
|
2021-04-08T21:57:32.000Z
|
2022-02-10T10:49:21.000Z
|
posts/models.py
|
SergeyKorobenkov/hw05_final
|
6ab9c2a3cb5eaa319860fa3e2947ea664db6016d
|
[
"MIT"
] | null | null | null |
from django.db import models
from django.contrib.auth import get_user_model
User = get_user_model()
class Group(models.Model):
title = models.CharField(max_length=200)
slug = models.SlugField(unique=True)
description = models.TextField()
class Post(models.Model):
text = models.TextField()
pub_date = models.DateTimeField("date published", auto_now_add=True)
author = models.ForeignKey(User, on_delete=models.CASCADE, related_name="post_author")
group = models.ForeignKey(Group, on_delete=models.CASCADE, blank=True, null=True)
image = models.ImageField(upload_to='posts/', blank=True, null=True)
class Comment(models.Model):
post = models.ForeignKey(Post, on_delete=models.CASCADE, related_name='comments')
author = models.ForeignKey(User, on_delete=models.CASCADE, related_name='comments')
text = models.TextField()
created = models.DateTimeField('Дата и время публикации', auto_now_add=True, db_index=True)
def __str__(self):
return self.text
class Follow(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE, related_name='follower') #тот который подписывается
author = models.ForeignKey(User, on_delete=models.CASCADE, related_name='following') #тот на которого подписываются
def __str__(self):
return self.text
| 36.888889
| 119
| 0.743223
|
from django.db import models
from django.contrib.auth import get_user_model
User = get_user_model()
class Group(models.Model):
title = models.CharField(max_length=200)
slug = models.SlugField(unique=True)
description = models.TextField()
class Post(models.Model):
text = models.TextField()
pub_date = models.DateTimeField("date published", auto_now_add=True)
author = models.ForeignKey(User, on_delete=models.CASCADE, related_name="post_author")
group = models.ForeignKey(Group, on_delete=models.CASCADE, blank=True, null=True)
image = models.ImageField(upload_to='posts/', blank=True, null=True)
class Comment(models.Model):
post = models.ForeignKey(Post, on_delete=models.CASCADE, related_name='comments')
author = models.ForeignKey(User, on_delete=models.CASCADE, related_name='comments')
text = models.TextField()
created = models.DateTimeField('Дата и время публикации', auto_now_add=True, db_index=True)
def __str__(self):
return self.text
class Follow(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE, related_name='follower')
author = models.ForeignKey(User, on_delete=models.CASCADE, related_name='following')
def __str__(self):
return self.text
| true
| true
|
7904b816d584f80681ac44108e8d394e8df61fe7
| 8,558
|
py
|
Python
|
config/settings/production.py
|
Musyimi97/veritasLtd
|
5f764eb6fad87de3419ce85461467c402e8e74ca
|
[
"MIT"
] | 1
|
2019-08-03T16:42:10.000Z
|
2019-08-03T16:42:10.000Z
|
config/settings/production.py
|
Musyimi97/veritasLtd
|
5f764eb6fad87de3419ce85461467c402e8e74ca
|
[
"MIT"
] | 6
|
2020-06-05T22:25:15.000Z
|
2021-06-09T18:25:38.000Z
|
config/settings/production.py
|
Musyimi97/veritasLtd
|
5f764eb6fad87de3419ce85461467c402e8e74ca
|
[
"MIT"
] | null | null | null |
import logging
import sentry_sdk
from sentry_sdk.integrations.django import DjangoIntegration
from sentry_sdk.integrations.logging import LoggingIntegration
from sentry_sdk.integrations.celery import CeleryIntegration
from .base import * # noqa
from .base import env
# GENERAL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
SECRET_KEY = env("DJANGO_SECRET_KEY")
# https://docs.djangoproject.com/en/dev/ref/settings/#allowed-hosts
ALLOWED_HOSTS = env.list("DJANGO_ALLOWED_HOSTS", default=["veritas.ke"])
# DATABASES
# ------------------------------------------------------------------------------
DATABASES["default"] = env.db("DATABASE_URL") # noqa F405
DATABASES["default"]["ATOMIC_REQUESTS"] = True # noqa F405
DATABASES["default"]["CONN_MAX_AGE"] = env.int("CONN_MAX_AGE", default=60) # noqa F405
# CACHES
# ------------------------------------------------------------------------------
CACHES = {
"default": {
"BACKEND": "django_redis.cache.RedisCache",
"LOCATION": env("REDIS_URL"),
"OPTIONS": {
"CLIENT_CLASS": "django_redis.client.DefaultClient",
# Mimicing memcache behavior.
# http://niwinz.github.io/django-redis/latest/#_memcached_exceptions_behavior
"IGNORE_EXCEPTIONS": True,
},
}
}
# SECURITY
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-proxy-ssl-header
SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-ssl-redirect
SECURE_SSL_REDIRECT = env.bool("DJANGO_SECURE_SSL_REDIRECT", default=True)
# https://docs.djangoproject.com/en/dev/ref/settings/#session-cookie-secure
SESSION_COOKIE_SECURE = True
# https://docs.djangoproject.com/en/dev/ref/settings/#csrf-cookie-secure
CSRF_COOKIE_SECURE = True
# https://docs.djangoproject.com/en/dev/topics/security/#ssl-https
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-hsts-seconds
# TODO: set this to 60 seconds first and then to 518400 once you prove the former works
SECURE_HSTS_SECONDS = 60
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-hsts-include-subdomains
SECURE_HSTS_INCLUDE_SUBDOMAINS = env.bool(
"DJANGO_SECURE_HSTS_INCLUDE_SUBDOMAINS", default=True
)
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-hsts-preload
SECURE_HSTS_PRELOAD = env.bool("DJANGO_SECURE_HSTS_PRELOAD", default=True)
# https://docs.djangoproject.com/en/dev/ref/middleware/#x-content-type-options-nosniff
SECURE_CONTENT_TYPE_NOSNIFF = env.bool(
"DJANGO_SECURE_CONTENT_TYPE_NOSNIFF", default=True
)
# STORAGES
# ------------------------------------------------------------------------------
# https://django-storages.readthedocs.io/en/latest/#installation
INSTALLED_APPS += ["storages"] # noqa F405
# https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#settings
AWS_ACCESS_KEY_ID = env("DJANGO_AWS_ACCESS_KEY_ID")
# https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#settings
AWS_SECRET_ACCESS_KEY = env("DJANGO_AWS_SECRET_ACCESS_KEY")
# https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#settings
AWS_STORAGE_BUCKET_NAME = env("DJANGO_AWS_STORAGE_BUCKET_NAME")
# https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#settings
AWS_QUERYSTRING_AUTH = False
# DO NOT change these unless you know what you're doing.
_AWS_EXPIRY = 60 * 60 * 24 * 7
# https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#settings
AWS_S3_OBJECT_PARAMETERS = {
'CacheControl': 'max-age=86400',
}
# https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#settings
AWS_DEFAULT_ACL = None
# https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#settings
AWS_S3_REGION_NAME = env("DJANGO_AWS_S3_REGION_NAME", default=None)
AWS_S3_ENDPOINT_URL = env("AWS_S3_ENDPOINT_URL")
# STATIC
# ------------------------
STATICFILES_STORAGE = "whitenoise.storage.CompressedManifestStaticFilesStorage"
# MEDIA
# ------------------------------------------------------------------------------
MEDIAFILES_STORAGE="storages.backends.s3boto3.S3Boto3Storage"
# region http://stackoverflow.com/questions/10390244/
# Full-fledge class: https://stackoverflow.com/a/18046120/104731
from storages.backends.s3boto3 import S3Boto3Storage # noqa E402
class StaticRootS3Boto3Storage(S3Boto3Storage):
location = "static"
default_acl = "public-read"
class MediaRootS3Boto3Storage(S3Boto3Storage):
location = "media"
file_overwrite = False
# endregion
DEFAULT_FILE_STORAGE = "config.settings.production.MediaRootS3Boto3Storage"
MEDIA_URL = f"https://{AWS_STORAGE_BUCKET_NAME}.s3.amazonaws.com/media/"
# TEMPLATES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#templates
TEMPLATES[0]["OPTIONS"]["loaders"] = [ # noqa F405
(
"django.template.loaders.cached.Loader",
[
"django.template.loaders.filesystem.Loader",
"django.template.loaders.app_directories.Loader",
],
)
]
# EMAIL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#default-from-email
DEFAULT_FROM_EMAIL = env(
"DJANGO_DEFAULT_FROM_EMAIL", default="Veritas <noreply@veritas.ke>"
)
# https://docs.djangoproject.com/en/dev/ref/settings/#server-email
SERVER_EMAIL = env("DJANGO_SERVER_EMAIL", default=DEFAULT_FROM_EMAIL)
# https://docs.djangoproject.com/en/dev/ref/settings/#email-subject-prefix
EMAIL_SUBJECT_PREFIX = env(
"DJANGO_EMAIL_SUBJECT_PREFIX", default="[Veritas]"
)
# ADMIN
# ------------------------------------------------------------------------------
# Django Admin URL regex.
ADMIN_URL = env("DJANGO_ADMIN_URL")
# Anymail (Mailgun)
# ------------------------------------------------------------------------------
# https://anymail.readthedocs.io/en/stable/installation/#installing-anymail
INSTALLED_APPS += ["anymail"] # noqa F405
EMAIL_BACKEND = "anymail.backends.mailgun.EmailBackend"
# https://anymail.readthedocs.io/en/stable/installation/#anymail-settings-reference
ANYMAIL = {
"MAILGUN_API_KEY": env("MAILGUN_API_KEY"),
"MAILGUN_SENDER_DOMAIN": env("MAILGUN_DOMAIN"),
"MAILGUN_API_URL": env("MAILGUN_API_URL", default="https://api.mailgun.net/v3"),
}
# WhiteNoise
# ------------------------------------------------------------------------------
# http://whitenoise.evans.io/en/latest/django.html#enable-whitenoise
MIDDLEWARE.insert(1, "whitenoise.middleware.WhiteNoiseMiddleware") # noqa F405
# LOGGING
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#logging
# See https://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
"version": 1,
"disable_existing_loggers": True,
"formatters": {
"verbose": {
"format": "%(levelname)s %(asctime)s %(module)s "
"%(process)d %(thread)d %(message)s"
}
},
"handlers": {
"console": {
"level": "DEBUG",
"class": "logging.StreamHandler",
"formatter": "verbose",
}
},
"root": {"level": "INFO", "handlers": ["console"]},
"loggers": {
"django.db.backends": {
"level": "ERROR",
"handlers": ["console"],
"propagate": False,
},
# Errors logged by the SDK itself
"sentry_sdk": {"level": "ERROR", "handlers": ["console"], "propagate": False},
"django.security.DisallowedHost": {
"level": "ERROR",
"handlers": ["console"],
"propagate": False,
},
},
}
# Sentry
# ------------------------------------------------------------------------------
SENTRY_DSN = env("SENTRY_DSN")
SENTRY_LOG_LEVEL = env.int("DJANGO_SENTRY_LOG_LEVEL", logging.INFO)
sentry_logging = LoggingIntegration(
level=SENTRY_LOG_LEVEL, # Capture info and above as breadcrumbs
event_level=logging.ERROR, # Send errors as events
)
sentry_sdk.init(
dsn=SENTRY_DSN,
integrations=[sentry_logging, DjangoIntegration(), CeleryIntegration()],
)
# Your stuff...
# ------------------------------------------------------------------------------
| 39.256881
| 89
| 0.6276
|
import logging
import sentry_sdk
from sentry_sdk.integrations.django import DjangoIntegration
from sentry_sdk.integrations.logging import LoggingIntegration
from sentry_sdk.integrations.celery import CeleryIntegration
from .base import *
from .base import env
= env("DJANGO_SECRET_KEY")
= env.list("DJANGO_ALLOWED_HOSTS", default=["veritas.ke"])
DATABASES["default"] = env.db("DATABASE_URL")
DATABASES["default"]["ATOMIC_REQUESTS"] = True
DATABASES["default"]["CONN_MAX_AGE"] = env.int("CONN_MAX_AGE", default=60)
CACHES = {
"default": {
"BACKEND": "django_redis.cache.RedisCache",
"LOCATION": env("REDIS_URL"),
"OPTIONS": {
"CLIENT_CLASS": "django_redis.client.DefaultClient",
": True,
},
}
}
= ("HTTP_X_FORWARDED_PROTO", "https")
= env.bool("DJANGO_SECURE_SSL_REDIRECT", default=True)
= True
= True
env.bool(
"DJANGO_SECURE_HSTS_INCLUDE_SUBDOMAINS", default=True
)
= env.bool("DJANGO_SECURE_HSTS_PRELOAD", default=True)
env.bool(
"DJANGO_SECURE_CONTENT_TYPE_NOSNIFF", default=True
)
PS += ["storages"]
SS_KEY_ID = env("DJANGO_AWS_ACCESS_KEY_ID")
ET_ACCESS_KEY = env("DJANGO_AWS_SECRET_ACCESS_KEY")
AGE_BUCKET_NAME = env("DJANGO_AWS_STORAGE_BUCKET_NAME")
YSTRING_AUTH = False
_AWS_EXPIRY = 60 * 60 * 24 * 7
# https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#settings
AWS_S3_OBJECT_PARAMETERS = {
'CacheControl': 'max-age=86400',
}
# https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#settings
AWS_DEFAULT_ACL = None
# https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#settings
AWS_S3_REGION_NAME = env("DJANGO_AWS_S3_REGION_NAME", default=None)
AWS_S3_ENDPOINT_URL = env("AWS_S3_ENDPOINT_URL")
# STATIC
# ------------------------
STATICFILES_STORAGE = "whitenoise.storage.CompressedManifestStaticFilesStorage"
# MEDIA
# ------------------------------------------------------------------------------
MEDIAFILES_STORAGE="storages.backends.s3boto3.S3Boto3Storage"
# region http://stackoverflow.com/questions/10390244/
# Full-fledge class: https://stackoverflow.com/a/18046120/104731
from storages.backends.s3boto3 import S3Boto3Storage # noqa E402
class StaticRootS3Boto3Storage(S3Boto3Storage):
location = "static"
default_acl = "public-read"
class MediaRootS3Boto3Storage(S3Boto3Storage):
location = "media"
file_overwrite = False
# endregion
DEFAULT_FILE_STORAGE = "config.settings.production.MediaRootS3Boto3Storage"
MEDIA_URL = f"https://{AWS_STORAGE_BUCKET_NAME}.s3.amazonaws.com/media/"
# TEMPLATES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#templates
TEMPLATES[0]["OPTIONS"]["loaders"] = [ # noqa F405
(
"django.template.loaders.cached.Loader",
[
"django.template.loaders.filesystem.Loader",
"django.template.loaders.app_directories.Loader",
],
)
]
# EMAIL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#default-from-email
DEFAULT_FROM_EMAIL = env(
"DJANGO_DEFAULT_FROM_EMAIL", default="Veritas <noreply@veritas.ke>"
)
# https://docs.djangoproject.com/en/dev/ref/settings/#server-email
SERVER_EMAIL = env("DJANGO_SERVER_EMAIL", default=DEFAULT_FROM_EMAIL)
# https://docs.djangoproject.com/en/dev/ref/settings/#email-subject-prefix
EMAIL_SUBJECT_PREFIX = env(
"DJANGO_EMAIL_SUBJECT_PREFIX", default="[Veritas]"
)
# ADMIN
# ------------------------------------------------------------------------------
# Django Admin URL regex.
ADMIN_URL = env("DJANGO_ADMIN_URL")
# Anymail (Mailgun)
# ------------------------------------------------------------------------------
# https://anymail.readthedocs.io/en/stable/installation/#installing-anymail
INSTALLED_APPS += ["anymail"] # noqa F405
EMAIL_BACKEND = "anymail.backends.mailgun.EmailBackend"
# https://anymail.readthedocs.io/en/stable/installation/#anymail-settings-reference
ANYMAIL = {
"MAILGUN_API_KEY": env("MAILGUN_API_KEY"),
"MAILGUN_SENDER_DOMAIN": env("MAILGUN_DOMAIN"),
"MAILGUN_API_URL": env("MAILGUN_API_URL", default="https://api.mailgun.net/v3"),
}
# WhiteNoise
# ------------------------------------------------------------------------------
# http://whitenoise.evans.io/en/latest/django.html#enable-whitenoise
MIDDLEWARE.insert(1, "whitenoise.middleware.WhiteNoiseMiddleware") # noqa F405
# LOGGING
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#logging
# See https://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
"version": 1,
"disable_existing_loggers": True,
"formatters": {
"verbose": {
"format": "%(levelname)s %(asctime)s %(module)s "
"%(process)d %(thread)d %(message)s"
}
},
"handlers": {
"console": {
"level": "DEBUG",
"class": "logging.StreamHandler",
"formatter": "verbose",
}
},
"root": {"level": "INFO", "handlers": ["console"]},
"loggers": {
"django.db.backends": {
"level": "ERROR",
"handlers": ["console"],
"propagate": False,
},
# Errors logged by the SDK itself
"sentry_sdk": {"level": "ERROR", "handlers": ["console"], "propagate": False},
"django.security.DisallowedHost": {
"level": "ERROR",
"handlers": ["console"],
"propagate": False,
},
},
}
# Sentry
# ------------------------------------------------------------------------------
SENTRY_DSN = env("SENTRY_DSN")
SENTRY_LOG_LEVEL = env.int("DJANGO_SENTRY_LOG_LEVEL", logging.INFO)
sentry_logging = LoggingIntegration(
level=SENTRY_LOG_LEVEL, # Capture info and above as breadcrumbs
event_level=logging.ERROR, # Send errors as events
)
sentry_sdk.init(
dsn=SENTRY_DSN,
integrations=[sentry_logging, DjangoIntegration(), CeleryIntegration()],
)
# Your stuff...
# ------------------------------------------------------------------------------
| true
| true
|
7904b86d8abe58e7e4529517770077d8ca8f90b5
| 140
|
py
|
Python
|
metrics/outputs/__init__.py
|
sebMathieu/code_metrics
|
f188041c8f2c0950c5f63a1f719cdb05aaeb42c9
|
[
"MIT"
] | null | null | null |
metrics/outputs/__init__.py
|
sebMathieu/code_metrics
|
f188041c8f2c0950c5f63a1f719cdb05aaeb42c9
|
[
"MIT"
] | null | null | null |
metrics/outputs/__init__.py
|
sebMathieu/code_metrics
|
f188041c8f2c0950c5f63a1f719cdb05aaeb42c9
|
[
"MIT"
] | null | null | null |
"""
Output formats.
"""
from .rst import RST
from .console import Console
from .json import JSON
from .svg import SVG
from .png import PNG
| 14
| 28
| 0.735714
|
from .rst import RST
from .console import Console
from .json import JSON
from .svg import SVG
from .png import PNG
| true
| true
|
7904b8a2cdca64039dc37b164ff5dca05d0dc8cc
| 319
|
py
|
Python
|
admob/config.py
|
Mavhod/GodotAdmob
|
d603f259fba414f22fc6e3ea977cbcdc36ef460e
|
[
"MIT"
] | 76
|
2015-02-12T15:25:34.000Z
|
2021-11-05T03:48:54.000Z
|
admob/config.py
|
Mavhod/GodotAdmob
|
d603f259fba414f22fc6e3ea977cbcdc36ef460e
|
[
"MIT"
] | 5
|
2016-01-18T02:58:52.000Z
|
2016-12-16T16:03:26.000Z
|
admob/config.py
|
Mavhod/GodotAdmob
|
d603f259fba414f22fc6e3ea977cbcdc36ef460e
|
[
"MIT"
] | 26
|
2015-01-28T21:25:02.000Z
|
2020-11-20T12:31:30.000Z
|
def can_build(plat):
return plat=="android"
def configure(env):
if (env['platform'] == 'android'):
env.android_add_dependency("compile 'com.google.android.gms:play-services-ads:8.3.0'")
env.android_add_java_dir("android")
env.android_add_to_manifest("android/AndroidManifestChunk.xml")
env.disable_module()
| 29
| 88
| 0.752351
|
def can_build(plat):
return plat=="android"
def configure(env):
if (env['platform'] == 'android'):
env.android_add_dependency("compile 'com.google.android.gms:play-services-ads:8.3.0'")
env.android_add_java_dir("android")
env.android_add_to_manifest("android/AndroidManifestChunk.xml")
env.disable_module()
| true
| true
|
7904b8e33c31f6d92956725446e35ad01c19f1d0
| 861
|
py
|
Python
|
src/scan_pdf/combine.py
|
wuan/scan_pdf
|
ebac89ff0c7be9266142904946b41f0f05e07413
|
[
"Apache-2.0"
] | null | null | null |
src/scan_pdf/combine.py
|
wuan/scan_pdf
|
ebac89ff0c7be9266142904946b41f0f05e07413
|
[
"Apache-2.0"
] | null | null | null |
src/scan_pdf/combine.py
|
wuan/scan_pdf
|
ebac89ff0c7be9266142904946b41f0f05e07413
|
[
"Apache-2.0"
] | null | null | null |
import logging
import os
import subprocess
logger = logging.getLogger(__name__)
class Combiner(object):
def __init__(self, options):
self.options = options
def combine(self, page_file_names):
output_file_name = self.options.output_file_name[0]
logger.info("combine %d pages into %s", len(page_file_names), output_file_name)
combine_args = ['pdfunite']
combine_args += page_file_names
combine_args += [os.path.basename(output_file_name)]
logger.debug("call: %s", " ".join(combine_args))
returncode = subprocess.call(combine_args)
if returncode != 0:
logger.error("combine failed: %s", " ".join(combine_args))
if not os.path.exists(output_file_name):
logger.error("output file '%s' does not exist", output_file_name)
return returncode
| 29.689655
| 87
| 0.663182
|
import logging
import os
import subprocess
logger = logging.getLogger(__name__)
class Combiner(object):
def __init__(self, options):
self.options = options
def combine(self, page_file_names):
output_file_name = self.options.output_file_name[0]
logger.info("combine %d pages into %s", len(page_file_names), output_file_name)
combine_args = ['pdfunite']
combine_args += page_file_names
combine_args += [os.path.basename(output_file_name)]
logger.debug("call: %s", " ".join(combine_args))
returncode = subprocess.call(combine_args)
if returncode != 0:
logger.error("combine failed: %s", " ".join(combine_args))
if not os.path.exists(output_file_name):
logger.error("output file '%s' does not exist", output_file_name)
return returncode
| true
| true
|
7904b9125f4918bcc6cf4c739f53a33485849458
| 2,383
|
py
|
Python
|
lightning_conceptnet/nodes.py
|
ldtoolkit/lightning-conceptnet
|
f2be7209ef90f98c08df23892529227a2a45882e
|
[
"Apache-2.0"
] | null | null | null |
lightning_conceptnet/nodes.py
|
ldtoolkit/lightning-conceptnet
|
f2be7209ef90f98c08df23892529227a2a45882e
|
[
"Apache-2.0"
] | null | null | null |
lightning_conceptnet/nodes.py
|
ldtoolkit/lightning-conceptnet
|
f2be7209ef90f98c08df23892529227a2a45882e
|
[
"Apache-2.0"
] | null | null | null |
from lightning_conceptnet.uri import concept_uri
from wordfreq import simple_tokenize
from wordfreq.preprocess import preprocess_text
STOPWORDS = [
'the', 'a', 'an'
]
DROP_FIRST = ['to']
def english_filter(tokens):
"""
Given a list of tokens, remove a small list of English stopwords.
"""
non_stopwords = [token for token in tokens if token not in STOPWORDS]
while non_stopwords and non_stopwords[0] in DROP_FIRST:
non_stopwords = non_stopwords[1:]
if non_stopwords:
return non_stopwords
else:
return tokens
def standardized_concept_uri(lang, text, *more):
"""
Make the appropriate URI for a concept in a particular language, including
removing English stopwords, normalizing the text in a way appropriate
to that language (using the text normalization from wordfreq), and joining
its tokens with underscores in a concept URI.
This text normalization can smooth over some writing differences: for
example, it removes vowel points from Arabic words, and it transliterates
Serbian written in the Cyrillic alphabet to the Latin alphabet so that it
can match other words written in Latin letters.
'more' contains information to distinguish word senses, such as a part
of speech or a WordNet domain. The items in 'more' get lowercased and
joined with underscores, but skip many of the other steps -- for example,
they won't have stopwords removed.
>>> standardized_concept_uri('en', 'this is a test')
'/c/en/this_is_test'
>>> standardized_concept_uri('en', 'this is a test', 'n', 'example phrase')
'/c/en/this_is_test/n/example_phrase'
>>> standardized_concept_uri('sh', 'симетрија')
'/c/sh/simetrija'
"""
lang = lang.lower()
if lang == 'en':
token_filter = english_filter
else:
token_filter = None
text = preprocess_text(text.replace('_', ' '), lang)
tokens = simple_tokenize(text)
if token_filter is not None:
tokens = token_filter(tokens)
norm_text = '_'.join(tokens)
more_text = []
for item in more:
if item is not None:
tokens = simple_tokenize(item.replace('_', ' '))
if token_filter is not None:
tokens = token_filter(tokens)
more_text.append('_'.join(tokens))
return concept_uri(lang, norm_text, *more_text)
| 34.042857
| 79
| 0.684431
|
from lightning_conceptnet.uri import concept_uri
from wordfreq import simple_tokenize
from wordfreq.preprocess import preprocess_text
STOPWORDS = [
'the', 'a', 'an'
]
DROP_FIRST = ['to']
def english_filter(tokens):
non_stopwords = [token for token in tokens if token not in STOPWORDS]
while non_stopwords and non_stopwords[0] in DROP_FIRST:
non_stopwords = non_stopwords[1:]
if non_stopwords:
return non_stopwords
else:
return tokens
def standardized_concept_uri(lang, text, *more):
lang = lang.lower()
if lang == 'en':
token_filter = english_filter
else:
token_filter = None
text = preprocess_text(text.replace('_', ' '), lang)
tokens = simple_tokenize(text)
if token_filter is not None:
tokens = token_filter(tokens)
norm_text = '_'.join(tokens)
more_text = []
for item in more:
if item is not None:
tokens = simple_tokenize(item.replace('_', ' '))
if token_filter is not None:
tokens = token_filter(tokens)
more_text.append('_'.join(tokens))
return concept_uri(lang, norm_text, *more_text)
| true
| true
|
7904b91e96d8e96213fffe27a374d056ae9b25f2
| 4,573
|
py
|
Python
|
deploy_gh_pages.py
|
follower/docs
|
bf920c47ae46cdc4f9f984fa8b6fdaa733749222
|
[
"MIT"
] | null | null | null |
deploy_gh_pages.py
|
follower/docs
|
bf920c47ae46cdc4f9f984fa8b6fdaa733749222
|
[
"MIT"
] | null | null | null |
deploy_gh_pages.py
|
follower/docs
|
bf920c47ae46cdc4f9f984fa8b6fdaa733749222
|
[
"MIT"
] | 1
|
2021-01-26T15:19:11.000Z
|
2021-01-26T15:19:11.000Z
|
import json
import os
import shutil
import tempfile
def copytree(src, dst, symlinks=False, ignore=None):
for item in os.listdir(src):
s = os.path.join(src, item)
d = os.path.join(dst, item)
if os.path.isdir(s):
shutil.copytree(s, d, symlinks, ignore)
else:
shutil.copy2(s, d)
def call(command, ignore_error=False):
ret = os.system(command)
if ret != 0 and not ignore_error:
raise Exception("Command failed: %s" % command)
def clean_gh_pages():
call('git config remote.origin.fetch "+refs/heads/*:refs/remotes/origin/*" 1>/dev/null')
call("git fetch origin -q")
call("git checkout gh-pages")
if os.path.exists("en"):
shutil.rmtree("en")
def build_and_copy(branch, folder_name, versions_available, themes_dir, validate_links=False):
call("git checkout %s" % branch)
call("git pull origin %s" % branch)
with open('versions.json', 'w') as f:
f.write(json.dumps(versions_available))
shutil.rmtree("_themes")
copytree(themes_dir, "_themes")
call("make html > /dev/null")
if validate_links:
call("make spelling > /dev/null")
call("make linkcheck")
call("make latexpdf > /dev/null")
tmp_dir = tempfile.mkdtemp()
copytree("_build/html/", tmp_dir)
shutil.copy2("_build/latex/conan.pdf", tmp_dir)
shutil.rmtree("_build")
# Go to deploy branch, copy new files and commit
call("git stash")
call("git stash drop || true")
call("git clean -d -f")
call("git checkout gh-pages")
if not os.path.exists("en"):
os.mkdir("en")
version_folders = ["en/%s" % folder_name]
if branch == "master":
version_folders.append("en/latest")
for version_folder in version_folders:
if os.path.exists(version_folder):
shutil.rmtree(version_folder)
os.mkdir(version_folder)
copytree(tmp_dir, version_folder)
call("git add -A .")
call("git commit --message 'committed version %s'" % folder_name, ignore_error=True)
def should_deploy():
if not os.getenv("TRAVIS_BRANCH", None) == "master":
print("Skipping deploy for not master branch")
return False
if os.getenv("TRAVIS_PULL_REQUEST", "") != "false":
print("Deploy skipped, This is a PR in the main repository")
return False
if not os.getenv("GITHUB_API_KEY"):
print("Deploy skipped, missing GITHUB_API_KEY. Is this a PR?")
return False
return True
def deploy():
call('rm -rf .git')
call('git init .')
call('git add .')
call('git checkout -b gh-pages')
call('git commit -m "Cleared web"')
call('git remote add origin-pages '
'https://%s@github.com/conan-io/docs.git > /dev/null 2>&1' % os.getenv("GITHUB_API_KEY"))
call('git push origin-pages gh-pages --force')
if __name__ == "__main__":
if should_deploy():
# Copy the _themes to be able to share them between old versions
themes_dir = tempfile.mkdtemp()
copytree("_themes", themes_dir)
clean_gh_pages()
versions_dict = {"master": "1.25",
"release/1.24.1": "1.24",
"release/1.23.0": "1.23",
"release/1.22.3": "1.22",
"release/1.21.3": "1.21",
"release/1.20.5": "1.20",
"release/1.19.3": "1.19",
"release/1.18.5": "1.18",
"release/1.17.2": "1.17",
"release/1.16.1": "1.16",
"release/1.15.2": "1.15",
"release/1.14.5": "1.14",
"release/1.13.3": "1.13",
"release/1.12.3": "1.12",
"release/1.11.2": "1.11",
"release/1.10.2": "1.10",
"release/1.9.4": "1.9",
"release/1.8.4": "1.8",
"release/1.7.4": "1.7",
"release/1.6.1": "1.6",
"release/1.5.2": "1.5",
"release/1.4.5": "1.4",
"release/1.3.3": "1.3"}
for branch, folder_name in versions_dict.items():
print("Building {}...".format(branch))
build_and_copy(branch, folder_name, versions_dict, themes_dir)
deploy()
else:
call("make html > /dev/null")
call("make spelling")
call("make linkcheck")
| 31.321918
| 98
| 0.535535
|
import json
import os
import shutil
import tempfile
def copytree(src, dst, symlinks=False, ignore=None):
for item in os.listdir(src):
s = os.path.join(src, item)
d = os.path.join(dst, item)
if os.path.isdir(s):
shutil.copytree(s, d, symlinks, ignore)
else:
shutil.copy2(s, d)
def call(command, ignore_error=False):
ret = os.system(command)
if ret != 0 and not ignore_error:
raise Exception("Command failed: %s" % command)
def clean_gh_pages():
call('git config remote.origin.fetch "+refs/heads/*:refs/remotes/origin/*" 1>/dev/null')
call("git fetch origin -q")
call("git checkout gh-pages")
if os.path.exists("en"):
shutil.rmtree("en")
def build_and_copy(branch, folder_name, versions_available, themes_dir, validate_links=False):
call("git checkout %s" % branch)
call("git pull origin %s" % branch)
with open('versions.json', 'w') as f:
f.write(json.dumps(versions_available))
shutil.rmtree("_themes")
copytree(themes_dir, "_themes")
call("make html > /dev/null")
if validate_links:
call("make spelling > /dev/null")
call("make linkcheck")
call("make latexpdf > /dev/null")
tmp_dir = tempfile.mkdtemp()
copytree("_build/html/", tmp_dir)
shutil.copy2("_build/latex/conan.pdf", tmp_dir)
shutil.rmtree("_build")
call("git stash")
call("git stash drop || true")
call("git clean -d -f")
call("git checkout gh-pages")
if not os.path.exists("en"):
os.mkdir("en")
version_folders = ["en/%s" % folder_name]
if branch == "master":
version_folders.append("en/latest")
for version_folder in version_folders:
if os.path.exists(version_folder):
shutil.rmtree(version_folder)
os.mkdir(version_folder)
copytree(tmp_dir, version_folder)
call("git add -A .")
call("git commit --message 'committed version %s'" % folder_name, ignore_error=True)
def should_deploy():
if not os.getenv("TRAVIS_BRANCH", None) == "master":
print("Skipping deploy for not master branch")
return False
if os.getenv("TRAVIS_PULL_REQUEST", "") != "false":
print("Deploy skipped, This is a PR in the main repository")
return False
if not os.getenv("GITHUB_API_KEY"):
print("Deploy skipped, missing GITHUB_API_KEY. Is this a PR?")
return False
return True
def deploy():
call('rm -rf .git')
call('git init .')
call('git add .')
call('git checkout -b gh-pages')
call('git commit -m "Cleared web"')
call('git remote add origin-pages '
'https://%s@github.com/conan-io/docs.git > /dev/null 2>&1' % os.getenv("GITHUB_API_KEY"))
call('git push origin-pages gh-pages --force')
if __name__ == "__main__":
if should_deploy():
themes_dir = tempfile.mkdtemp()
copytree("_themes", themes_dir)
clean_gh_pages()
versions_dict = {"master": "1.25",
"release/1.24.1": "1.24",
"release/1.23.0": "1.23",
"release/1.22.3": "1.22",
"release/1.21.3": "1.21",
"release/1.20.5": "1.20",
"release/1.19.3": "1.19",
"release/1.18.5": "1.18",
"release/1.17.2": "1.17",
"release/1.16.1": "1.16",
"release/1.15.2": "1.15",
"release/1.14.5": "1.14",
"release/1.13.3": "1.13",
"release/1.12.3": "1.12",
"release/1.11.2": "1.11",
"release/1.10.2": "1.10",
"release/1.9.4": "1.9",
"release/1.8.4": "1.8",
"release/1.7.4": "1.7",
"release/1.6.1": "1.6",
"release/1.5.2": "1.5",
"release/1.4.5": "1.4",
"release/1.3.3": "1.3"}
for branch, folder_name in versions_dict.items():
print("Building {}...".format(branch))
build_and_copy(branch, folder_name, versions_dict, themes_dir)
deploy()
else:
call("make html > /dev/null")
call("make spelling")
call("make linkcheck")
| true
| true
|
7904b9a4efc32f95ac981147d443811bf80d46e4
| 684
|
py
|
Python
|
dags/test_dag_failure.py
|
GrokData/grok-airflow-dags
|
545c2fb9bc1a3653b0df5e112e1c672d1b3558f0
|
[
"MIT"
] | null | null | null |
dags/test_dag_failure.py
|
GrokData/grok-airflow-dags
|
545c2fb9bc1a3653b0df5e112e1c672d1b3558f0
|
[
"MIT"
] | null | null | null |
dags/test_dag_failure.py
|
GrokData/grok-airflow-dags
|
545c2fb9bc1a3653b0df5e112e1c672d1b3558f0
|
[
"MIT"
] | 1
|
2021-09-24T02:57:48.000Z
|
2021-09-24T02:57:48.000Z
|
import datetime as dt
from airflow.models import DAG
from airflow.operators.dummy import DummyOperator
from airflow.operators.python import PythonOperator
default_args = {
'start_date': dt.datetime.now() - dt.timedelta(days=7),
'owner': 'airflow'
}
def throw_error():
raise Exception('It failed!')
with DAG(dag_id='test_dag_failure', description='A DAG that always fail.', default_args=default_args, tags=['test'], schedule_interval=None) as dag:
should_succeed = DummyOperator(
task_id='should_succeed'
)
should_fail = PythonOperator(
task_id='should_fail',
python_callable=throw_error
)
should_succeed >> should_fail
| 23.586207
| 148
| 0.717836
|
import datetime as dt
from airflow.models import DAG
from airflow.operators.dummy import DummyOperator
from airflow.operators.python import PythonOperator
default_args = {
'start_date': dt.datetime.now() - dt.timedelta(days=7),
'owner': 'airflow'
}
def throw_error():
raise Exception('It failed!')
with DAG(dag_id='test_dag_failure', description='A DAG that always fail.', default_args=default_args, tags=['test'], schedule_interval=None) as dag:
should_succeed = DummyOperator(
task_id='should_succeed'
)
should_fail = PythonOperator(
task_id='should_fail',
python_callable=throw_error
)
should_succeed >> should_fail
| true
| true
|
7904b9ec43363d65c6c7499691923c70ca846c82
| 781
|
py
|
Python
|
setup.oci.py
|
busunkim96/cc-utils
|
aa864b1fad3061410907d6b93b8aee8cd25f33b5
|
[
"Apache-2.0"
] | 15
|
2018-04-18T13:25:30.000Z
|
2022-03-04T09:25:41.000Z
|
setup.oci.py
|
busunkim96/cc-utils
|
aa864b1fad3061410907d6b93b8aee8cd25f33b5
|
[
"Apache-2.0"
] | 221
|
2018-04-12T06:29:43.000Z
|
2022-03-27T03:01:40.000Z
|
setup.oci.py
|
busunkim96/cc-utils
|
aa864b1fad3061410907d6b93b8aee8cd25f33b5
|
[
"Apache-2.0"
] | 29
|
2018-04-11T14:42:23.000Z
|
2021-11-09T16:26:32.000Z
|
import setuptools
import os
own_dir = os.path.abspath(os.path.dirname(__file__))
def requirements():
with open(os.path.join(own_dir, 'requirements.oci.txt')) as f:
for line in f.readlines():
line = line.strip()
if not line or line.startswith('#'):
continue
yield line
def modules():
return [
]
def version():
with open(os.path.join(own_dir, 'VERSION')) as f:
return f.read().strip()
setuptools.setup(
name='gardener-oci',
version=version(),
description='gardener OCI lib',
python_requires='>=3.9.*',
py_modules=modules(),
packages=['oci'],
package_data={
'ci':['version'],
},
install_requires=list(requirements()),
entry_points={
},
)
| 19.04878
| 66
| 0.583867
|
import setuptools
import os
own_dir = os.path.abspath(os.path.dirname(__file__))
def requirements():
with open(os.path.join(own_dir, 'requirements.oci.txt')) as f:
for line in f.readlines():
line = line.strip()
if not line or line.startswith('#'):
continue
yield line
def modules():
return [
]
def version():
with open(os.path.join(own_dir, 'VERSION')) as f:
return f.read().strip()
setuptools.setup(
name='gardener-oci',
version=version(),
description='gardener OCI lib',
python_requires='>=3.9.*',
py_modules=modules(),
packages=['oci'],
package_data={
'ci':['version'],
},
install_requires=list(requirements()),
entry_points={
},
)
| true
| true
|
7904badcd2a11493ff4f9fd979f7602edbf8b9e7
| 806
|
py
|
Python
|
yamlHighlighter.py
|
ShardulNalegave/pycode
|
6050c3c44dad4c460ecea32352429bc463ac8009
|
[
"MIT"
] | 5
|
2018-06-02T11:07:07.000Z
|
2020-10-27T00:26:54.000Z
|
yamlHighlighter.py
|
ShardulNalegave/pycode
|
6050c3c44dad4c460ecea32352429bc463ac8009
|
[
"MIT"
] | null | null | null |
yamlHighlighter.py
|
ShardulNalegave/pycode
|
6050c3c44dad4c460ecea32352429bc463ac8009
|
[
"MIT"
] | 1
|
2020-08-16T14:38:40.000Z
|
2020-08-16T14:38:40.000Z
|
import wx.stc as stc
def highlight(editor, styles, faces):
editor.SetLexer(stc.STC_LEX_YAML)
editor.StyleSetSpec(stc.STC_YAML_DEFAULT, "fore:" +
styles["default"] + ",face:%(helv)s,size:%(size)d" % faces)
editor.StyleSetSpec(stc.STC_YAML_COMMENT, "fore:" +
styles["comment"] + ",face:%(helv)s,size:%(size)d" % faces)
editor.StyleSetSpec(stc.STC_YAML_ERROR, "fore:" +
styles["error"] + ",face:%(helv)s,size:%(size)d" % faces)
editor.StyleSetSpec(stc.STC_YAML_IDENTIFIER, "fore:" +
styles["identifier"] + ",face:%(helv)s,size:%(size)d" % faces)
editor.StyleSetSpec(stc.STC_YAML_NUMBER, "fore:" +
styles["number"] + ",face:%(helv)s,size:%(size)d" % faces)
| 35.043478
| 86
| 0.569479
|
import wx.stc as stc
def highlight(editor, styles, faces):
editor.SetLexer(stc.STC_LEX_YAML)
editor.StyleSetSpec(stc.STC_YAML_DEFAULT, "fore:" +
styles["default"] + ",face:%(helv)s,size:%(size)d" % faces)
editor.StyleSetSpec(stc.STC_YAML_COMMENT, "fore:" +
styles["comment"] + ",face:%(helv)s,size:%(size)d" % faces)
editor.StyleSetSpec(stc.STC_YAML_ERROR, "fore:" +
styles["error"] + ",face:%(helv)s,size:%(size)d" % faces)
editor.StyleSetSpec(stc.STC_YAML_IDENTIFIER, "fore:" +
styles["identifier"] + ",face:%(helv)s,size:%(size)d" % faces)
editor.StyleSetSpec(stc.STC_YAML_NUMBER, "fore:" +
styles["number"] + ",face:%(helv)s,size:%(size)d" % faces)
| true
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.