content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from typing import Tuple
import torch
import torch.distributed as distrib
from ss_baselines.saven.models.rollout_storage import RolloutStorage
from ss_baselines.saven.ppo.ppo import PPO
EPS_PPO = 1e-5
def distributed_mean_and_var(
values: torch.Tensor,
) -> Tuple[torch.Tensor, torch.Tensor]:
r"""Computes the mean and variances of a tensor over multiple workers.
This method is equivalent to first collecting all versions of values and
then computing the mean and variance locally over that
:param values: (*,) shaped tensors to compute mean and variance over. Assumed
to be solely the workers local copy of this tensor,
the resultant mean and variance will be computed
over _all_ workers version of this tensor.
"""
assert distrib.is_initialized(), "Distributed must be initialized"
world_size = distrib.get_world_size()
mean = values.mean()
distrib.all_reduce(mean)
mean /= world_size
sq_diff = (values - mean).pow(2).mean()
distrib.all_reduce(sq_diff)
var = sq_diff / world_size
return mean, var
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
2,
15069,
357,
66,
8,
3203,
11,
3457,
13,
290,
663,
29116,
13,
198,
2,
1439,
2489,
10395,
13,
198,
198,
2,
770,
2723,
2438,
318,
11971,
739,
262,
5964,
1043,
287,
262,
1... | 2.867647 | 476 |
from netapp.netapp_object import NetAppObject
class Group2ViewInfo(NetAppObject):
"""
2nd nested typedef at level 1
"""
_field_5 = None
@property
def field_5(self):
"""
Generic/Dummy Field 5
Attributes: required-for-create, modifiable
"""
return self._field_5
@field_5.setter
_field_6 = None
@property
def field_6(self):
"""
Generic/Dummy Field 6
Attributes: required-for-create, modifiable
"""
return self._field_6
@field_6.setter
_field_7 = None
@property
def field_7(self):
"""
Generic/Dummy Field 7
Attributes: non-creatable, non-modifiable
"""
return self._field_7
@field_7.setter
_field_8 = None
@property
def field_8(self):
"""
Generic/Dummy Field 8
Attributes: non-creatable, non-modifiable
"""
return self._field_8
@field_8.setter
@staticmethod
@staticmethod
| [
6738,
2010,
1324,
13,
3262,
1324,
62,
15252,
1330,
3433,
4677,
10267,
198,
198,
4871,
4912,
17,
7680,
12360,
7,
7934,
4677,
10267,
2599,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
362,
358,
28376,
25683,
891,
379,
1241,
352,
198,
... | 2.081836 | 501 |
if __name__ == '__main__':
main()
| [
201,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
220,
201,
198,
220,
220,
220,
1388,
3419,
201,
198
] | 1.954545 | 22 |
# -*- coding: utf-8 -*-
################################################################################
## Form generated from reading UI file 'bookwindow.ui'
##
## Created by: Qt User Interface Compiler version 5.14.0
##
## WARNING! All changes made in this file will be lost when recompiling UI file!
################################################################################
from PySide2.QtCore import (QCoreApplication, QMetaObject, QObject, QPoint,
QRect, QSize, QUrl, Qt)
from PySide2.QtGui import (QBrush, QColor, QConicalGradient, QFont,
QFontDatabase, QIcon, QLinearGradient, QPalette, QPainter, QPixmap,
QRadialGradient)
from PySide2.QtWidgets import *
# setupUi
# retranslateUi
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
29113,
29113,
14468,
198,
2235,
5178,
7560,
422,
3555,
12454,
2393,
705,
2070,
17497,
13,
9019,
6,
198,
2235,
198,
2235,
15622,
416,
25,
33734,
11787,
26491,
3082,
... | 3.324074 | 216 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from machineService import MachineService
from configService import ConfigService
from downloadService import DownloadService
from installService import InstallService
from envService import EnvService
from buildService import BuildService
from runService import RunService
from perfService import PerfService
from testService import TestService
from benchService import BenchmarkService
from containerService import ContainerService
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
220,
198,
198,
6738,
4572,
16177,
1330,
10850,
16177,
198,
6738,
4566,
16177,
1330,
17056,
16177,
198,
6738,
4321,
16177,
... | 4.556604 | 106 |
import graphene
from ..types import SynProject
from core import Synapse
class SynProjectQuery(graphene.ObjectType):
"""
Defines all the SynProject queries.
"""
syn_project = graphene.Field(
SynProject,
id=graphene.String(required=True)
)
| [
11748,
42463,
198,
6738,
11485,
19199,
1330,
16065,
16775,
198,
6738,
4755,
1330,
16065,
7512,
628,
198,
4871,
16065,
16775,
20746,
7,
70,
2416,
29473,
13,
10267,
6030,
2599,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
2896,
1127,
47... | 2.76 | 100 |
from paramiko import SSHClient, AutoAddPolicy
from paramiko.auth_handler import AuthenticationException
from paramiko.ssh_exception import NoValidConnectionsError
class Config(object):
"""Worker access data"""
class Worker(object):
"""Worker Object to connect and execute commands from the 'chief' worker"""
def connect(self):
"""Connect to the worker"""
if self.client is None:
try:
client = SSHClient()
client.set_missing_host_key_policy(self.policy)
client.connect(hostname=self.ip,
port=self.port,
username=self.user,
password=self.pwd)
except AuthenticationException:
print("Authentication failed!")
except NoValidConnectionsError:
print("Connection failed!")
finally:
client.exec_command("hostnamectl")
return client
return self.client
def exec_cmd(self, cmd, inBackground=False, timeout=None):
"""Execute command and return status and output"""
""" status 0 means no error"""
status=0
stdout='Process run in background'
self.client = self.connect()
if inBackground:
transport = self.client.get_transport()
channel = transport.open_session()
channel.setblocking(0)
channel.exec_command(cmd)
else:
stdin, stdout, stderr = self.client.exec_command(cmd)
status = stdout.channel.recv_exit_status()
if status != 0:
stdout = stderr
return status, stdout
| [
6738,
5772,
12125,
1330,
33825,
11792,
11,
11160,
4550,
36727,
198,
6738,
5772,
12125,
13,
18439,
62,
30281,
1330,
48191,
16922,
198,
6738,
5772,
12125,
13,
45824,
62,
1069,
4516,
1330,
1400,
47139,
13313,
507,
12331,
198,
198,
4871,
1705... | 2.175284 | 793 |
from pyspark.sql import SparkSession
import os
| [
6738,
279,
893,
20928,
13,
25410,
1330,
17732,
36044,
198,
11748,
28686,
628,
198
] | 3.5 | 14 |
import numpy as np
import math
from feature_mining.em_base import ExpectationMaximization
class ExpectationMaximizationOriginal(ExpectationMaximization):
"""
Original EM Algorithm as developed by Santu.
"""
if __name__ == '__main__':
em = ExpectationMaximizationOriginal()
em.em()
| [
11748,
299,
32152,
355,
45941,
198,
11748,
10688,
198,
6738,
3895,
62,
45374,
13,
368,
62,
8692,
1330,
23600,
341,
11518,
320,
1634,
628,
198,
4871,
23600,
341,
11518,
320,
1634,
20556,
7,
3109,
806,
341,
11518,
320,
1634,
2599,
198,
... | 3.090909 | 99 |
import os
import pdb
import time
import errno
import shapely.wkt
import shapely.ops
from shapely import speedups
import shapely.prepared
import subprocess
import logging
import tempfile
import distutils.version
import atexit
import functools
import math
import numpy
import gdal
import gdalconst
from osgeo import gdal
from osgeo import osr
from osgeo import ogr
LOGGER = logging.getLogger('pygeoprocessing.geoprocessing')
LOGGER.addHandler(logging.NullHandler()) # silence logging by default
_LOGGING_PERIOD = 5.0 # min 5.0 seconds per update log message for the module
_DEFAULT_GTIFF_CREATION_OPTIONS = (
'TILED=YES', 'BIGTIFF=IF_SAFER', 'COMPRESS=LZW')
_LARGEST_ITERBLOCK = 2**20 # largest block for iterblocks to read in cells
# A dictionary to map the resampling method input string to the gdal type
_RESAMPLE_DICT = {
"nearest": gdal.GRA_NearestNeighbour,
"bilinear": gdal.GRA_Bilinear,
"cubic": gdal.GRA_Cubic,
"cubic_spline": gdal.GRA_CubicSpline,
"lanczos": gdal.GRA_Lanczos,
'mode': gdal.GRA_Mode,
'average': gdal.GRA_Average,
}
# GDAL 2.2.3 added a couple of useful interpolation values.
if (distutils.version.LooseVersion(gdal.__version__)
>= distutils.version.LooseVersion('2.2.3')):
_RESAMPLE_DICT.update({
'max': gdal.GRA_Max,
'min': gdal.GRA_Min,
'med': gdal.GRA_Med,
'q1': gdal.GRA_Q1,
'q3': gdal.GRA_Q3,
})
def convert_raster_to_ascii(path_input_raster, path_ascii_output, overwrite=True):
"""
Convert input raster to ascii format
Args:
path_input_raster:
path_ascii_output:
overwrite:
Returns:
"""
if overwrite and os.path.isfile(path_ascii_output):
os.remove(path_ascii_output)
# Open existing dataset
path_inp_ds = gdal.Open(path_input_raster)
# Open output format driver, gdal_translate --formats lists all of them
format_file = 'AAIGrid'
driver = gdal.GetDriverByName(format_file)
# Output to new format
path_dest_ds = driver.CreateCopy(path_ascii_output, path_inp_ds, 0)
# Close the datasets to flush to disk
path_dest_ds = None
path_inp_ds = None
def get_dataset_type(path_ds):
"""
Return dataset type e.g. GeoTiff
Args:
path_ds:
Returns:
"""
dataset = gdal.Open(path_ds, gdalconst.GA_ReadOnly)
dataset_type = dataset.GetDriver().LongName
dataset = None # Close dataset
return dataset_type
def get_dataset_datatype(path_ds):
"""
Return datatype of dataset e.g. GDT_UInt32
Args:
path_ds:
Returns:
"""
dataset = gdal.Open(path_ds, gdalconst.GA_ReadOnly)
band = dataset.GetRasterBand(1)
bandtype = gdal.GetDataTypeName(band.DataType) # UInt32
dataset = None # Close dataset
if bandtype == 'UInt32':
return gdalconst.GDT_UInt32
elif bandtype == 'UInt16':
return gdalconst.GDT_UInt16
elif bandtype == 'Float32':
return gdalconst.GDT_Float32
elif bandtype == 'Float64':
return gdalconst.GDT_Float64
elif bandtype == 'Int16':
return gdalconst.GDT_Int16
elif bandtype == 'Int32':
return gdalconst.GDT_Int32
elif bandtype == 'Unknown':
return gdalconst.GDT_Unknown
else:
return gdalconst.GDT_UInt32
def _gdal_to_numpy_type(band):
"""Calculates the equivalent numpy datatype from a GDAL raster band type
band - GDAL band
returns numpy equivalent of band.DataType"""
gdal_type_to_numpy_lookup = {
gdal.GDT_Int16: numpy.int16,
gdal.GDT_Int32: numpy.int32,
gdal.GDT_UInt16: numpy.uint16,
gdal.GDT_UInt32: numpy.uint32,
gdal.GDT_Float32: numpy.float32,
gdal.GDT_Float64: numpy.float64
}
if band.DataType in gdal_type_to_numpy_lookup:
return gdal_type_to_numpy_lookup[band.DataType]
#only class not in the lookup is a Byte but double check.
if band.DataType != gdal.GDT_Byte:
raise ValueError("Unknown DataType: %s" % str(band.DataType))
metadata = band.GetMetadata('IMAGE_STRUCTURE')
if 'PIXELTYPE' in metadata and metadata['PIXELTYPE'] == 'SIGNEDBYTE':
return numpy.int8
return numpy.uint8
def get_datatype_from_uri(dataset_uri):
"""
Returns the datatype for the first band from a gdal dataset
Args:
dataset_uri (string): a uri to a gdal dataset
Returns:
datatype: datatype for dataset band 1"""
dataset = gdal.Open(dataset_uri)
band = dataset.GetRasterBand(1)
datatype = band.DataType
#Make sure the dataset is closed and cleaned up
band = None
gdal.Dataset.__swig_destroy__(dataset)
dataset = None
return datatype
def get_row_col_from_uri(dataset_uri):
"""
Returns a tuple of number of rows and columns of that dataset uri.
Args:
dataset_uri (string): a uri to a gdal dataset
Returns:
tuple (tuple): 2-tuple (n_row, n_col) from dataset_uri"""
dataset = gdal.Open(dataset_uri)
n_rows = dataset.RasterYSize
n_cols = dataset.RasterXSize
#Make sure the dataset is closed and cleaned up
band = None
gdal.Dataset.__swig_destroy__(dataset)
dataset = None
return (n_rows, n_cols)
def calculate_raster_stats_uri(dataset_uri):
"""
Calculates and sets the min, max, stdev, and mean for the bands in
the raster.
Args:
dataset_uri (string): a uri to a GDAL raster dataset that will be
modified by having its band statistics set
Returns:
nothing
"""
dataset = gdal.Open(dataset_uri, gdal.GA_Update)
for band_number in range(dataset.RasterCount):
band = dataset.GetRasterBand(band_number + 1)
band.ComputeStatistics(False)
# Close and clean up dataset
band = None
gdal.Dataset.__swig_destroy__(dataset)
dataset = None
def get_statistics_from_uri(dataset_uri):
"""
Retrieves the min, max, mean, stdev from a GDAL Dataset
Args:
dataset_uri (string): a uri to a gdal dataset
Returns:
statistics: min, max, mean, stddev
"""
dataset = gdal.Open(dataset_uri)
band = dataset.GetRasterBand(1)
statistics = band.GetStatistics(0, 1)
# Close and clean up dataset
band = None
gdal.Dataset.__swig_destroy__(dataset)
dataset = None
return statistics
def get_cell_size_from_uri(dataset_uri):
"""
Returns the cell size of the dataset in meters. Raises an exception if the
raster is not square since this'll break most of the raster_utils
algorithms.
Args:
dataset_uri (string): uri to a gdal dataset
Returns:
size_meters: cell size of the dataset in meters"""
srs = osr.SpatialReference()
dataset = gdal.Open(dataset_uri)
if dataset is None:
raise IOError(
'File not found or not valid dataset type at: %s' % dataset_uri)
srs.SetProjection(dataset.GetProjection())
linear_units = srs.GetLinearUnits()
geotransform = dataset.GetGeoTransform()
# take absolute value since sometimes negative widths/heights
try:
numpy.testing.assert_approx_equal(
abs(geotransform[1]), abs(geotransform[5]))
size_meters = abs(geotransform[1]) * linear_units
except AssertionError as e:
size_meters = (
abs(geotransform[1]) + abs(geotransform[5])) / 2.0 * linear_units
# Close and clean up dataset
gdal.Dataset.__swig_destroy__(dataset)
dataset = None
return size_meters
def get_rat_as_dictionary_uri(dataset_uri):
"""
Returns the RAT of the first band of dataset as a dictionary.
Args:
dataset_uri: a GDAL dataset that has a RAT associated with the first band
Returns:
value (dictionary): a 2D dictionary where the first key is the column name and second is the row number
"""
dataset = gdal.Open(dataset_uri)
value = get_rat_as_dictionary(dataset)
# Make sure the dataset is closed and cleaned up
gdal.Dataset.__swig_destroy__(dataset)
dataset = None
return value
def get_rat_as_dictionary(dataset):
"""
Returns the RAT of the first band of dataset as a dictionary.
Args:
dataset: a GDAL dataset that has a RAT associated with the first band
Returns:
rat_dictionary (dictionary): a 2D dictionary where the first key is the column name and second is the row number
"""
pdb.set_trace()
band = dataset.GetRasterBand(1).GetDefaultRAT()
rat = band.GetDefaultRAT()
n_columns = rat.GetColumnCount()
n_rows = rat.GetRowCount()
rat_dictionary = {}
for col_index in range(n_columns):
# Initialize an empty list to store row data and figure out the type of data stored in that column.
col_type = rat.GetTypeOfCol(col_index)
col_name = rat.GetNameOfCol(col_index)
rat_dictionary[col_name] = []
# Now burn through all the rows to populate the column
for row_index in range(n_rows):
# This bit of python ugliness handles the known 3 types of gdal RAT fields.
if col_type == gdal.GFT_Integer:
value = rat.GetValueAsInt(row_index, col_index)
elif col_type == gdal.GFT_Real:
value = rat.GetValueAsDouble(row_index, col_index)
else:
# If the type is not int or real, default to a string, I think this is better than testing for a string
# and raising an exception if not
value = rat.GetValueAsString(row_index, col_index)
rat_dictionary[col_name].append(value)
return rat_dictionary
def get_raster_properties_uri(dataset_uri):
"""
Wrapper function for get_raster_properties() that passes in the dataset
URI instead of the datasets itself
Args:
dataset_uri (string): a URI to a GDAL raster dataset
Returns:
value (dictionary): a dictionary with the properties stored under relevant keys. The current list of things
returned is: width (w-e pixel resolution), height (n-s pixel resolution), XSize, YSize
"""
dataset = gdal.Open(dataset_uri)
value = get_raster_properties(dataset)
# Make sure the dataset is closed and cleaned up
gdal.Dataset.__swig_destroy__(dataset)
dataset = None
return value
def get_raster_properties(dataset):
"""
Get the width, height, X size, and Y size of the dataset and return the
values in a dictionary.
*This function can be expanded to return more properties if needed*
Args:
dataset: a GDAL raster dataset to get the properties from
Returns:
dataset_dict (dictionary): a dictionary with the properties stored under relevant keys. The current list of
things returned is: width (w-e pixel resolution), height (n-s pixel resolution), XSize, YSize
"""
dataset_dict = {}
geo_transform = dataset.GetGeoTransform()
dataset_dict['width'] = float(geo_transform[1])
dataset_dict['height'] = float(geo_transform[5])
dataset_dict['x_size'] = dataset.GetRasterBand(1).XSize
dataset_dict['y_size'] = dataset.GetRasterBand(1).YSize
return dataset_dict
def get_nodata_from_uri(dataset_uri):
"""
Returns the nodata value for the first band from a gdal dataset cast to its
correct numpy type.
Args:
dataset_uri (string): a uri to a gdal dataset
Returns:
nodata_cast: nodata value for dataset band 1
"""
dataset = gdal.Open(dataset_uri)
band = dataset.GetRasterBand(1)
nodata = band.GetNoDataValue()
if nodata is not None:
nodata = _gdal_to_numpy_type(band)(nodata)
else:
pass
band = None
gdal.Dataset.__swig_destroy__(dataset)
dataset = None
return nodata
def reclassify(rasterio_rst, reclass_list, output_filename, band=1, creation_options=dict()):
"""
MODIFIED: removed window walking... too slow..
this function will take a raster image as input and
reclassify its values given in the reclass_list.
The reclass list is a simple list of lists with the
following formatting:
[[begin_range, end_range, new_value]]
ie. [ [ 1,3,5 ],[ 3,4,6 ] ]
* which converts values 1 to 2.99999999 to 5
and values 3 to 3.99999999 to 6
all other values stay the same.
arguments:
rasterio_rst = raster image instance from rasterio package
reclass_list = list of reclassification values * see explanation
band = integer marking which band you wnat to return from the raster
default is 1.
creation_options = gdal style creation options, but in the rasterio implementation
* options must be in a dict where the key is the name of the gdal -co and the
value is the value passed to that flag.
i.e.
["COMPRESS=LZW"] becomes dict([('compress','lzw')])
"""
# this will update the metadata if a creation_options dict is passed as an arg.
import rasterio
meta = rasterio_rst.meta
if len(creation_options) < 0:
meta.update(creation_options)
with rasterio.open(output_filename, mode='w', **meta) as out_rst:
band_arr = rasterio_rst.read_band(band).data # this is a gotcha with the .data stuff
for rcl in reclass_list:
band_arr[numpy.logical_and(band_arr >= rcl[0], band_arr < rcl[1])] = rcl[2]
out_rst.write_band(band, band_arr)
return rasterio.open(output_filename)
def get_cell_size_from_uri(dataset_uri):
"""Get the cell size of a dataset in units of meters.
Raises an exception if the raster is not square since this'll break most of
the pygeoprocessing algorithms.
Args:
dataset_uri (string): uri to a gdal dataset
Returns:
size_meters: cell size of the dataset in meters
"""
srs = osr.SpatialReference()
dataset = gdal.Open(dataset_uri)
if dataset is None:
raise IOError(
'File not found or not valid dataset type at: %s' % dataset_uri)
srs.SetProjection(dataset.GetProjection())
linear_units = srs.GetLinearUnits()
geotransform = dataset.GetGeoTransform()
# take absolute value since sometimes negative widths/heights
try:
numpy.testing.assert_approx_equal(
abs(geotransform[1]), abs(geotransform[5]))
size_meters = abs(geotransform[1]) * linear_units
except AssertionError as e:
size_meters = (
abs(geotransform[1]) + abs(geotransform[5])) / 2.0 * linear_units
# Close and clean up dataset
gdal.Dataset.__swig_destroy__(dataset)
dataset = None
return size_meters
def reclassify_dataset_uri(
dataset_uri, value_map, raster_out_uri, out_datatype, out_nodata,
exception_flag='values_required', assert_dataset_projected=True):
"""Reclassify values in a dataset.
A function to reclassify values in dataset to any output type. By default
the values except for nodata must be in value_map.
Args:
dataset_uri (string): a uri to a gdal dataset
value_map (dictionary): a dictionary of values of
{source_value: dest_value, ...}
where source_value's type is a postive integer type and dest_value
is of type out_datatype.
raster_out_uri (string): the uri for the output raster
out_datatype (gdal type): the type for the output dataset
out_nodata (numerical type): the nodata value for the output raster.
Must be the same type as out_datatype
Keyword Args:
exception_flag (string): either 'none' or 'values_required'.
If 'values_required' raise an exception if there is a value in the
raster that is not found in value_map
assert_dataset_projected (boolean): if True this operation will
test if the input dataset is not projected and raise an exception
if so.
Returns:
nothing
Raises:
Exception: if exception_flag == 'values_required' and the value from
'key_raster' is not a key in 'attr_dict'
"""
if exception_flag not in ['none', 'values_required']:
raise ValueError('unknown exception_flag %s', exception_flag)
values_required = exception_flag == 'values_required'
nodata = get_nodata_from_uri(dataset_uri)
value_map_copy = value_map.copy()
# possible that nodata value is not defined, so test for None first
# otherwise if nodata not predefined, remap it into the dictionary
if nodata is not None and nodata not in value_map_copy:
value_map_copy[nodata] = out_nodata
keys = sorted(value_map_copy.keys())
values = numpy.array([value_map_copy[x] for x in keys])
def map_dataset_to_value(original_values):
"""Convert a block of original values to the lookup values."""
if values_required:
unique = numpy.unique(original_values)
has_map = numpy.in1d(unique, keys)
if not all(has_map):
raise ValueError(
'There was not a value for at least the following codes '
'%s for this file %s.\nNodata value is: %s' % (
str(unique[~has_map]), dataset_uri, str(nodata)))
index = numpy.digitize(original_values.ravel(), keys, right=True)
return values[index].reshape(original_values.shape)
out_pixel_size = get_cell_size_from_uri(dataset_uri)
vectorize_datasets(
[dataset_uri], map_dataset_to_value,
raster_out_uri, out_datatype, out_nodata, out_pixel_size,
"intersection", dataset_to_align_index=0,
vectorize_op=False, assert_datasets_projected=assert_dataset_projected,
datasets_are_pre_aligned=True)
def clip_dataset_uri(
source_dataset_uri, aoi_datasource_uri, out_dataset_uri,
assert_projections=True, process_pool=None, all_touched=False):
"""Clip raster dataset to bounding box of provided vector datasource aoi.
This function will clip source_dataset to the bounding box of the
polygons in aoi_datasource and mask out the values in source_dataset
outside of the AOI with the nodata values in source_dataset.
Args:
source_dataset_uri (string): uri to single band GDAL dataset to clip
aoi_datasource_uri (string): uri to ogr datasource
out_dataset_uri (string): path to disk for the clipped datset
Keyword Args:
assert_projections (boolean): a boolean value for whether the dataset
needs to be projected
process_pool: a process pool for multiprocessing
all_touched (boolean): if true the clip uses the option
ALL_TOUCHED=TRUE when calling RasterizeLayer for AOI masking.
Returns:
None
"""
source_dataset = gdal.Open(source_dataset_uri)
band = source_dataset.GetRasterBand(1)
nodata = band.GetNoDataValue()
datatype = band.DataType
if nodata is None:
nodata = -9999
gdal.Dataset.__swig_destroy__(source_dataset)
source_dataset = None
pixel_size = get_raster_info(source_dataset_uri)['mean_pixel_size']
vectorize_datasets(
[source_dataset_uri], lambda x: x, out_dataset_uri, datatype, nodata,
pixel_size, 'intersection', aoi_uri=aoi_datasource_uri,
assert_datasets_projected=assert_projections,
process_pool=process_pool, vectorize_op=False, all_touched=all_touched)
def get_raster_info(raster_path):
"""Get information about a GDAL raster dataset.
Parameters:
raster_path (String): a path to a GDAL raster.
Returns:
raster_properties (dictionary): a dictionary with the properties
stored under relevant keys.
'pixel_size' (tuple): (pixel x-size, pixel y-size) from
geotransform.
'mean_pixel_size' (float): the average size of the absolute value
of each pixel size element.
'raster_size' (tuple): number of raster pixels in (x, y)
direction.
'nodata' (float or list): if number of bands is 1, then this value
is the nodata value of the single band, otherwise a list of
the nodata values in increasing band index
'n_bands' (int): number of bands in the raster.
'geotransform' (tuple): a 6-tuple representing the geotransform of
(x orign, x-increase, xy-increase,
y origin, yx-increase, y-increase),
'datatype' (int): An instance of an enumerated gdal.GDT_* int
that represents the datatype of the raster.
"""
raster_properties = {}
raster = gdal.Open(raster_path)
geo_transform = raster.GetGeoTransform()
raster_properties['pixel_size'] = (geo_transform[1], geo_transform[5])
raster_properties['mean_pixel_size'] = (
(abs(geo_transform[1]) + abs(geo_transform[5])) / 2.0)
raster_properties['raster_size'] = (
raster.GetRasterBand(1).XSize,
raster.GetRasterBand(1).YSize)
raster_properties['n_bands'] = raster.RasterCount
raster_properties['nodata'] = [
raster.GetRasterBand(index).GetNoDataValue() for index in range(
1, raster_properties['n_bands']+1)]
if len(raster_properties['nodata']) == 1:
raster_properties['nodata'] = raster_properties['nodata'][0]
raster_properties['bounding_box'] = [
geo_transform[0], geo_transform[3],
(geo_transform[0] +
raster_properties['raster_size'][0] * geo_transform[1]),
(geo_transform[3] +
raster_properties['raster_size'][1] * geo_transform[5])]
raster_properties['geotransform'] = geo_transform
# datatype is the same for the whole raster, but is associated with band
raster_properties['datatype'] = raster.GetRasterBand(1).DataType
raster = None
return raster_properties
def vectorize_datasets(
dataset_uri_list, dataset_pixel_op, dataset_out_uri, datatype_out,
nodata_out, pixel_size_out, bounding_box_mode,
resample_method_list=None, dataset_to_align_index=None,
dataset_to_bound_index=None, aoi_uri=None,
assert_datasets_projected=True, process_pool=None, vectorize_op=True,
datasets_are_pre_aligned=False, dataset_options=None,
all_touched=False):
"""Apply local raster operation on stack of datasets.
This function applies a user defined function across a stack of
datasets. It has functionality align the output dataset grid
with one of the input datasets, output a dataset that is the union
or intersection of the input dataset bounding boxes, and control
over the interpolation techniques of the input datasets, if
necessary. The datasets in dataset_uri_list must be in the same
projection; the function will raise an exception if not.
Args:
dataset_uri_list (list): a list of file uris that point to files that
can be opened with gdal.Open.
dataset_pixel_op (function) a function that must take in as many
arguments as there are elements in dataset_uri_list. The arguments
can be treated as interpolated or actual pixel values from the
input datasets and the function should calculate the output
value for that pixel stack. The function is a parallel
paradigmn and does not know the spatial position of the
pixels in question at the time of the call. If the
`bounding_box_mode` parameter is "union" then the values
of input dataset pixels that may be outside their original
range will be the nodata values of those datasets. Known
bug: if dataset_pixel_op does not return a value in some cases
the output dataset values are undefined even if the function
does not crash or raise an exception.
dataset_out_uri (string): the uri of the output dataset. The
projection will be the same as the datasets in dataset_uri_list.
datatype_out: the GDAL output type of the output dataset
nodata_out: the nodata value of the output dataset.
pixel_size_out: the pixel size of the output dataset in
projected coordinates.
bounding_box_mode (string): one of "union" or "intersection",
"dataset". If union the output dataset bounding box will be the
union of the input datasets. Will be the intersection otherwise.
An exception is raised if the mode is "intersection" and the
input datasets have an empty intersection. If dataset it will make
a bounding box as large as the given dataset, if given
dataset_to_bound_index must be defined.
Keyword Args:
resample_method_list (list): a list of resampling methods
for each output uri in dataset_out_uri list. Each element
must be one of "nearest|bilinear|cubic|cubic_spline|lanczos".
If None, the default is "nearest" for all input datasets.
dataset_to_align_index (int): an int that corresponds to the position
in one of the dataset_uri_lists that, if positive aligns the output
rasters to fix on the upper left hand corner of the output
datasets. If negative, the bounding box aligns the intersection/
union without adjustment.
dataset_to_bound_index: if mode is "dataset" this indicates which
dataset should be the output size.
aoi_uri (string): a URI to an OGR datasource to be used for the
aoi. Irrespective of the `mode` input, the aoi will be used
to intersect the final bounding box.
assert_datasets_projected (boolean): if True this operation will
test if any datasets are not projected and raise an exception
if so.
process_pool: a process pool for multiprocessing
vectorize_op (boolean): if true the model will try to numpy.vectorize
dataset_pixel_op. If dataset_pixel_op is designed to use maximize
array broadcasting, set this parameter to False, else it may
inefficiently invoke the function on individual elements.
datasets_are_pre_aligned (boolean): If this value is set to False
this operation will first align and interpolate the input datasets
based on the rules provided in bounding_box_mode,
resample_method_list, dataset_to_align_index, and
dataset_to_bound_index, if set to True the input dataset list must
be aligned, probably by raster_utils.align_dataset_list
dataset_options: this is an argument list that will be
passed to the GTiff driver. Useful for blocksizes, compression,
etc.
all_touched (boolean): if true the clip uses the option
ALL_TOUCHED=TRUE when calling RasterizeLayer for AOI masking.
Returns:
None
Raises:
ValueError: invalid input provided
"""
if not isinstance(dataset_uri_list, list):
raise ValueError(
"dataset_uri_list was not passed in as a list, maybe a single "
"file was passed in? Here is its value: %s" %
(str(dataset_uri_list)))
if aoi_uri is None:
assert_file_existance(dataset_uri_list)
else:
assert_file_existance(dataset_uri_list + [aoi_uri])
if dataset_out_uri in dataset_uri_list:
raise ValueError(
"%s is used as an output file, but it is also an input file "
"in the input list %s" % (dataset_out_uri, str(dataset_uri_list)))
valid_bounding_box_modes = ["union", "intersection", "dataset"]
if bounding_box_mode not in valid_bounding_box_modes:
raise ValueError(
"Unknown bounding box mode %s; should be one of %s",
bounding_box_mode, valid_bounding_box_modes)
# Create a temporary list of filenames whose files delete on the python
# interpreter exit
if not datasets_are_pre_aligned:
# Handle the cases where optional arguments are passed in
if resample_method_list is None:
resample_method_list = ["nearest"] * len(dataset_uri_list)
if dataset_to_align_index is None:
dataset_to_align_index = -1
dataset_out_uri_list = [
temporary_filename(suffix='.tif') for _ in dataset_uri_list]
# Align and resample the datasets, then load datasets into a list
align_dataset_list(
dataset_uri_list, dataset_out_uri_list, resample_method_list,
pixel_size_out, bounding_box_mode, dataset_to_align_index,
dataset_to_bound_index=dataset_to_bound_index,
aoi_uri=aoi_uri,
assert_datasets_projected=assert_datasets_projected,
all_touched=all_touched)
aligned_datasets = [
gdal.Open(filename, gdal.GA_ReadOnly) for filename in
dataset_out_uri_list]
else:
# otherwise the input datasets are already aligned
aligned_datasets = [
gdal.Open(filename, gdal.GA_ReadOnly) for filename in
dataset_uri_list]
aligned_bands = [dataset.GetRasterBand(1) for dataset in aligned_datasets]
n_rows = aligned_datasets[0].RasterYSize
n_cols = aligned_datasets[0].RasterXSize
output_dataset = new_raster_from_base(
aligned_datasets[0], dataset_out_uri, 'GTiff', nodata_out,
datatype_out, dataset_options=dataset_options)
output_band = output_dataset.GetRasterBand(1)
block_size = output_band.GetBlockSize()
# makes sense to get the largest block size possible to reduce the number
# of expensive readasarray calls
for current_block_size in [band.GetBlockSize() for band in aligned_bands]:
if (current_block_size[0] * current_block_size[1] >
block_size[0] * block_size[1]):
block_size = current_block_size
cols_per_block, rows_per_block = block_size[0], block_size[1]
n_col_blocks = int(math.ceil(n_cols / float(cols_per_block)))
n_row_blocks = int(math.ceil(n_rows / float(rows_per_block)))
# If there's an AOI, mask it out
if aoi_uri is not None:
mask_uri = temporary_filename(suffix='.tif')
mask_dataset = new_raster_from_base(
aligned_datasets[0], mask_uri, 'GTiff', 255, gdal.GDT_Byte,
fill_value=0, dataset_options=dataset_options)
mask_band = mask_dataset.GetRasterBand(1)
aoi_datasource = ogr.Open(aoi_uri)
aoi_layer = aoi_datasource.GetLayer()
if all_touched:
option_list = ["ALL_TOUCHED=TRUE"]
else:
option_list = []
gdal.RasterizeLayer(
mask_dataset, [1], aoi_layer, burn_values=[1], options=option_list)
aoi_layer = None
aoi_datasource = None
# We only want to do this if requested, otherwise we might have a more
# efficient call if we don't vectorize.
if vectorize_op:
dataset_pixel_op = numpy.vectorize(
dataset_pixel_op, otypes=[_gdal_to_numpy_type(output_band)])
last_time = time.time()
last_row_block_width = None
last_col_block_width = None
for row_block_index in range(n_row_blocks):
row_offset = row_block_index * rows_per_block
row_block_width = n_rows - row_offset
if row_block_width > rows_per_block:
row_block_width = rows_per_block
for col_block_index in range(n_col_blocks):
col_offset = col_block_index * cols_per_block
col_block_width = n_cols - col_offset
if col_block_width > cols_per_block:
col_block_width = cols_per_block
current_time = time.time()
if current_time - last_time > 5.0:
last_time = current_time
#This is true at least once since last_* initialized with None
if (last_row_block_width != row_block_width or
last_col_block_width != col_block_width):
dataset_blocks = [
numpy.zeros(
(row_block_width, col_block_width),
dtype=_gdal_to_numpy_type(band)) for band in aligned_bands]
if aoi_uri != None:
mask_array = numpy.zeros(
(row_block_width, col_block_width), dtype=numpy.int8)
last_row_block_width = row_block_width
last_col_block_width = col_block_width
for dataset_index in range(len(aligned_bands)):
aligned_bands[dataset_index].ReadAsArray(
xoff=col_offset, yoff=row_offset,
win_xsize=col_block_width,
win_ysize=row_block_width,
buf_obj=dataset_blocks[dataset_index])
out_block = dataset_pixel_op(*dataset_blocks)
# Mask out the row if there is a mask
if aoi_uri is not None:
mask_band.ReadAsArray(
xoff=col_offset, yoff=row_offset,
win_xsize=col_block_width,
win_ysize=row_block_width,
buf_obj=mask_array)
out_block[mask_array == 0] = nodata_out
output_band.WriteArray(
out_block[0:row_block_width, 0:col_block_width],
xoff=col_offset, yoff=row_offset)
# Making sure the band and dataset is flushed and not in memory before
# adding stats
output_band.FlushCache()
output_band = None
output_dataset.FlushCache()
gdal.Dataset.__swig_destroy__(output_dataset)
output_dataset = None
# Clean up the files made by temporary file because we had an issue once
# where I was running the water yield model over 2000 times and it made
# so many temporary files I ran out of disk space.
if aoi_uri is not None:
mask_band = None
gdal.Dataset.__swig_destroy__(mask_dataset)
mask_dataset = None
os.remove(mask_uri)
aligned_bands = None
for dataset in aligned_datasets:
gdal.Dataset.__swig_destroy__(dataset)
aligned_datasets = None
if not datasets_are_pre_aligned:
# if they weren't pre-aligned then we have temporary files to remove
for temp_dataset_uri in dataset_out_uri_list:
try:
os.remove(temp_dataset_uri)
except OSError:
pass
calculate_raster_stats_uri(dataset_out_uri
)
def assert_file_existance(dataset_uri_list):
"""Assert that provided uris exist in filesystem.
Verify that the uris passed in the argument exist on the filesystem
if not, raise an exeception indicating which files do not exist
Args:
dataset_uri_list (list): a list of relative or absolute file paths to
validate
Returns:
None
Raises:
IOError: if any files are not found
"""
not_found_uris = []
for uri in dataset_uri_list:
if not os.path.exists(uri):
not_found_uris.append(uri)
if len(not_found_uris) != 0:
error_message = (
"The following files do not exist on the filesystem: " +
str(not_found_uris))
raise IOError(error_message)
def temporary_filename(suffix=''):
"""Get path to new temporary file that will be deleted on program exit.
Returns a temporary filename using mkstemp. The file is deleted
on exit using the atexit register.
Keyword Args:
suffix (string): the suffix to be appended to the temporary file
Returns:
fname: a unique temporary filename
"""
file_handle, path = tempfile.mkstemp(suffix=suffix)
os.close(file_handle)
def remove_file(path):
"""Function to remove a file and handle exceptions to register
in atexit."""
try:
os.remove(path)
except OSError:
# This happens if the file didn't exist, which is okay because
# maybe we deleted it in a method
pass
atexit.register(remove_file, path)
return path
def new_raster_from_base_uri(base_uri, *args, **kwargs):
"""A wrapper for the function new_raster_from_base that opens up
the base_uri before passing it to new_raster_from_base.
base_uri - a URI to a GDAL dataset on disk.
All other arguments to new_raster_from_base are passed in.
Returns nothing.
"""
base_raster = gdal.Open(base_uri)
if base_raster is None:
raise IOError("%s not found when opening GDAL raster")
new_raster = new_raster_from_base(base_raster, *args, **kwargs)
gdal.Dataset.__swig_destroy__(new_raster)
gdal.Dataset.__swig_destroy__(base_raster)
new_raster = None
base_raster = None
def new_raster_from_base(
base, output_uri, gdal_format, nodata, datatype, fill_value=None,
n_rows=None, n_cols=None, dataset_options=None):
"""Create a new, empty GDAL raster dataset with the spatial references,
geotranforms of the base GDAL raster dataset.
base - a the GDAL raster dataset to base output size, and transforms on
output_uri - a string URI to the new output raster dataset.
gdal_format - a string representing the GDAL file format of the
output raster. See http://gdal.org/formats_list.html for a list
of available formats. This parameter expects the format code, such
as 'GTiff' or 'MEM'
nodata - a value that will be set as the nodata value for the
output raster. Should be the same type as 'datatype'
datatype - the pixel datatype of the output raster, for example
gdal.GDT_Float32. See the following header file for supported
pixel types:
http://www.gdal.org/gdal_8h.html#22e22ce0a55036a96f652765793fb7a4
fill_value - (optional) the value to fill in the raster on creation
n_rows - (optional) if set makes the resulting raster have n_rows in it
if not, the number of rows of the outgoing dataset are equal to
the base.
n_cols - (optional) similar to n_rows, but for the columns.
dataset_options - (optional) a list of dataset options that gets
passed to the gdal creation driver, overrides defaults
returns a new GDAL raster dataset."""
#This might be a numpy type coming in, set it to native python type
try:
nodata = nodata.item()
except AttributeError:
pass
if n_rows is None:
n_rows = base.RasterYSize
if n_cols is None:
n_cols = base.RasterXSize
projection = base.GetProjection()
geotransform = base.GetGeoTransform()
driver = gdal.GetDriverByName(gdal_format)
base_band = base.GetRasterBand(1)
block_size = base_band.GetBlockSize()
metadata = base_band.GetMetadata('IMAGE_STRUCTURE')
base_band = None
if dataset_options == None:
#make a new list to make sure we aren't ailiasing one passed in
dataset_options = []
#first, should it be tiled? yes if it's not striped
if block_size[0] != n_cols:
#just do 256x256 blocks
dataset_options = [
'TILED=YES',
'BLOCKXSIZE=256',
'BLOCKYSIZE=256',
'BIGTIFF=IF_SAFER']
if 'PIXELTYPE' in metadata:
dataset_options.append('PIXELTYPE=' + metadata['PIXELTYPE'])
new_raster = driver.Create(
output_uri.encode('utf-8'), n_cols, n_rows, 1, datatype,
options=dataset_options)
new_raster.SetProjection(projection)
new_raster.SetGeoTransform(geotransform)
band = new_raster.GetRasterBand(1)
if nodata is not None:
band.SetNoDataValue(nodata)
else:
pass
if fill_value != None:
band.Fill(fill_value)
elif nodata is not None:
band.Fill(nodata)
band = None
return new_raster
def get_bounding_box(dataset_uri):
"""Get bounding box where coordinates are in projected units.
Args:
dataset_uri (string): a uri to a GDAL dataset
Returns:
bounding_box (list):
[upper_left_x, upper_left_y, lower_right_x, lower_right_y] in
projected coordinates
"""
dataset = gdal.Open(dataset_uri)
geotransform = dataset.GetGeoTransform()
n_cols = dataset.RasterXSize
n_rows = dataset.RasterYSize
bounding_box = [geotransform[0],
geotransform[3],
geotransform[0] + n_cols * geotransform[1],
geotransform[3] + n_rows * geotransform[5]]
# Close and cleanup dataset
gdal.Dataset.__swig_destroy__(dataset)
dataset = None
return bounding_box
def align_dataset_list(
dataset_uri_list, dataset_out_uri_list, resample_method_list,
out_pixel_size, mode, dataset_to_align_index,
dataset_to_bound_index=None, aoi_uri=None,
assert_datasets_projected=True, all_touched=False):
"""Create a new list of datasets that are aligned based on a list of
inputted datasets.
Take a list of dataset uris and generates a new set that is completely
aligned with identical projections and pixel sizes.
Args:
dataset_uri_list (list): a list of input dataset uris
dataset_out_uri_list (list): a parallel dataset uri list whose
positions correspond to entries in dataset_uri_list
resample_method_list (list): a list of resampling methods for each
output uri in dataset_out_uri list. Each element must be one of
"nearest|bilinear|cubic|cubic_spline|lanczos"
out_pixel_size: the output pixel size
mode (string): one of "union", "intersection", or "dataset" which
defines how the output output extents are defined as either the
union or intersection of the input datasets or to have the same
bounds as an existing raster. If mode is "dataset" then
dataset_to_bound_index must be defined
dataset_to_align_index (int): an int that corresponds to the position
in one of the dataset_uri_lists that, if positive aligns the output
rasters to fix on the upper left hand corner of the output
datasets. If negative, the bounding box aligns the intersection/
union without adjustment.
all_touched (boolean): if True and an AOI is passed, the
ALL_TOUCHED=TRUE option is passed to the RasterizeLayer function
when determining the mask of the AOI.
Keyword Args:
dataset_to_bound_index: if mode is "dataset" then this index is
used to indicate which dataset to define the output bounds of the
dataset_out_uri_list
aoi_uri (string): a URI to an OGR datasource to be used for the
aoi. Irrespective of the `mode` input, the aoi will be used
to intersect the final bounding box.
Returns:
None
"""
import functools
last_time = time.time()
# make sure that the input lists are of the same length
list_lengths = [
len(dataset_uri_list), len(dataset_out_uri_list),
len(resample_method_list)]
if not functools.reduce(lambda x, y: x if x == y else False, list_lengths):
raise Exception(
"dataset_uri_list, dataset_out_uri_list, and "
"resample_method_list must be the same length "
" current lengths are %s" % (str(list_lengths)))
if assert_datasets_projected:
assert_datasets_in_same_projection(dataset_uri_list)
if mode not in ["union", "intersection", "dataset"]:
raise Exception("Unknown mode %s" % (str(mode)))
if dataset_to_align_index >= len(dataset_uri_list):
raise Exception(
"Alignment index is out of bounds of the datasets index: %s"
"n_elements %s" % (dataset_to_align_index, len(dataset_uri_list)))
if mode == "dataset" and dataset_to_bound_index is None:
raise Exception(
"Mode is 'dataset' but dataset_to_bound_index is not defined")
if mode == "dataset" and (dataset_to_bound_index < 0 or
dataset_to_bound_index >= len(dataset_uri_list)):
raise Exception(
"dataset_to_bound_index is out of bounds of the datasets index: %s"
"n_elements %s" % (dataset_to_bound_index, len(dataset_uri_list)))
def merge_bounding_boxes(bb1, bb2, mode):
"""Helper function to merge two bounding boxes through union or
intersection"""
less_than_or_equal = lambda x, y: x if x <= y else y
greater_than = lambda x, y: x if x > y else y
if mode == "union":
comparison_ops = [
less_than_or_equal, greater_than, greater_than,
less_than_or_equal]
if mode == "intersection":
comparison_ops = [
greater_than, less_than_or_equal, less_than_or_equal,
greater_than]
bb_out = [op(x, y) for op, x, y in zip(comparison_ops, bb1, bb2)]
return bb_out
# get the intersecting or unioned bounding box
if mode == "dataset":
bounding_box = get_bounding_box(
dataset_uri_list[dataset_to_bound_index])
else:
bounding_box = functools.reduce(
functools.partial(merge_bounding_boxes, mode=mode),
[get_bounding_box(dataset_uri) for dataset_uri in dataset_uri_list])
if aoi_uri is not None:
bounding_box = merge_bounding_boxes(
bounding_box, get_datasource_bounding_box(aoi_uri), "intersection")
if (bounding_box[0] >= bounding_box[2] or
bounding_box[1] <= bounding_box[3]) and mode == "intersection":
raise Exception("The datasets' intersection is empty "
"(i.e., not all the datasets touch each other).")
if dataset_to_align_index >= 0:
# bounding box needs alignment
align_bounding_box = get_bounding_box(
dataset_uri_list[dataset_to_align_index])
align_pixel_size = get_cell_size_from_uri(
dataset_uri_list[dataset_to_align_index])
for index in [0, 1]:
n_pixels = int(
(bounding_box[index] - align_bounding_box[index]) /
float(align_pixel_size))
bounding_box[index] = \
n_pixels * align_pixel_size + align_bounding_box[index]
for original_dataset_uri, out_dataset_uri, resample_method, index in zip(
dataset_uri_list, dataset_out_uri_list, resample_method_list,
range(len(dataset_uri_list))):
current_time = time.time()
if current_time - last_time > 5.0:
last_time = current_time
resize_and_resample_dataset_uri(
original_dataset_uri, bounding_box, out_pixel_size,
out_dataset_uri, resample_method)
# If there's an AOI, mask it out
if aoi_uri is not None:
first_dataset = gdal.Open(dataset_out_uri_list[0])
n_rows = first_dataset.RasterYSize
n_cols = first_dataset.RasterXSize
gdal.Dataset.__swig_destroy__(first_dataset)
first_dataset = None
mask_uri = temporary_filename(suffix='.tif')
new_raster_from_base_uri(
dataset_out_uri_list[0], mask_uri, 'GTiff', 255, gdal.GDT_Byte,
fill_value=0)
mask_dataset = gdal.Open(mask_uri, gdal.GA_Update)
mask_band = mask_dataset.GetRasterBand(1)
aoi_datasource = ogr.Open(aoi_uri)
aoi_layer = aoi_datasource.GetLayer()
if all_touched:
option_list = ["ALL_TOUCHED=TRUE"]
else:
option_list = []
gdal.RasterizeLayer(
mask_dataset, [1], aoi_layer, burn_values=[1], options=option_list)
mask_row = numpy.zeros((1, n_cols), dtype=numpy.int8)
out_dataset_list = [
gdal.Open(uri, gdal.GA_Update) for uri in dataset_out_uri_list]
out_band_list = [
dataset.GetRasterBand(1) for dataset in out_dataset_list]
nodata_out_list = [
get_nodata_from_uri(uri) for uri in dataset_out_uri_list]
for row_index in range(n_rows):
mask_row = (mask_band.ReadAsArray(
0, row_index, n_cols, 1) == 0)
for out_band, nodata_out in zip(out_band_list, nodata_out_list):
dataset_row = out_band.ReadAsArray(
0, row_index, n_cols, 1)
out_band.WriteArray(
numpy.where(mask_row, nodata_out, dataset_row),
xoff=0, yoff=row_index)
# Remove the mask aoi if necessary
mask_band = None
gdal.Dataset.__swig_destroy__(mask_dataset)
mask_dataset = None
os.remove(mask_uri)
# Close and clean up datasource
aoi_layer = None
ogr.DataSource.__swig_destroy__(aoi_datasource)
aoi_datasource = None
# Clean up datasets
out_band_list = None
for dataset in out_dataset_list:
dataset.FlushCache()
gdal.Dataset.__swig_destroy__(dataset)
out_dataset_list = None
def assert_datasets_in_same_projection(dataset_uri_list):
"""Assert that provided datasets are all in the same projection.
Tests if datasets represented by their uris are projected and in
the same projection and raises an exception if not.
Args:
dataset_uri_list (list): (description)
Returns:
is_true (boolean): True (otherwise exception raised)
Raises:
DatasetUnprojected: if one of the datasets is unprojected.
DifferentProjections: if at least one of the datasets is in
a different projection
"""
dataset_list = [gdal.Open(dataset_uri) for dataset_uri in dataset_uri_list]
dataset_projections = []
unprojected_datasets = set()
for dataset in dataset_list:
projection_as_str = dataset.GetProjection()
dataset_sr = osr.SpatialReference()
dataset_sr.ImportFromWkt(projection_as_str)
if not dataset_sr.IsProjected():
unprojected_datasets.add(dataset.GetFileList()[0])
dataset_projections.append((dataset_sr, dataset.GetFileList()[0]))
if len(unprojected_datasets) > 0:
pass
for index in range(len(dataset_projections)-1):
if not dataset_projections[index][0].IsSame(
dataset_projections[index+1][0]):
pass
for dataset in dataset_list:
# Close and clean up dataset
gdal.Dataset.__swig_destroy__(dataset)
dataset_list = None
return True
def resize_and_resample_dataset_uri(
original_dataset_uri, bounding_box, out_pixel_size, output_uri,
resample_method):
"""Resize and resample the given dataset.
Args:
original_dataset_uri (string): a GDAL dataset
bounding_box (list): [upper_left_x, upper_left_y, lower_right_x,
lower_right_y]
out_pixel_size: the pixel size in projected linear units
output_uri (string): the location of the new resampled GDAL dataset
resample_method (string): the resampling technique, one of
"nearest|bilinear|cubic|cubic_spline|lanczos"
Returns:
None
"""
resample_dict = {
"nearest": gdal.GRA_NearestNeighbour,
"bilinear": gdal.GRA_Bilinear,
"cubic": gdal.GRA_Cubic,
"cubic_spline": gdal.GRA_CubicSpline,
"lanczos": gdal.GRA_Lanczos
}
original_dataset = gdal.Open(original_dataset_uri)
original_band = original_dataset.GetRasterBand(1)
original_nodata = original_band.GetNoDataValue()
if original_nodata is None:
original_nodata = -9999
original_sr = osr.SpatialReference()
original_sr.ImportFromWkt(original_dataset.GetProjection())
output_geo_transform = [
bounding_box[0], out_pixel_size, 0.0, bounding_box[1], 0.0,
-out_pixel_size]
new_x_size = abs(
int(numpy.round((bounding_box[2] - bounding_box[0]) / out_pixel_size)))
new_y_size = abs(
int(numpy.round((bounding_box[3] - bounding_box[1]) / out_pixel_size)))
if new_x_size == 0:
new_x_size = 1
if new_y_size == 0:
new_y_size = 1
# create the new x and y size
block_size = original_band.GetBlockSize()
# If the original band is tiled, then its x blocksize will be different
# than the number of columns
if original_band.XSize > 256 and original_band.YSize > 256:
# it makes sense for many functions to have 256x256 blocks
block_size[0] = 256
block_size[1] = 256
gtiff_creation_options = [
'TILED=YES', 'BIGTIFF=IF_SAFER', 'BLOCKXSIZE=%d' % block_size[0],
'BLOCKYSIZE=%d' % block_size[1]]
metadata = original_band.GetMetadata('IMAGE_STRUCTURE')
if 'PIXELTYPE' in metadata:
gtiff_creation_options.append('PIXELTYPE=' + metadata['PIXELTYPE'])
else:
# it is so small or strangely aligned, use the default creation options
gtiff_creation_options = []
create_directories([os.path.dirname(output_uri)])
gdal_driver = gdal.GetDriverByName('GTiff')
output_dataset = gdal_driver.Create(
output_uri, new_x_size, new_y_size, 1, original_band.DataType,
options=gtiff_creation_options)
output_band = output_dataset.GetRasterBand(1)
output_band.SetNoDataValue(original_nodata)
# Set the geotransform
output_dataset.SetGeoTransform(output_geo_transform)
output_dataset.SetProjection(original_sr.ExportToWkt())
# need to make this a closure so we get the current time and we can affect
# state
def reproject_callback(df_complete, psz_message, p_progress_arg):
"""The argument names come from the GDAL API for callbacks."""
try:
current_time = time.time()
if ((current_time - reproject_callback.last_time) > 5.0 or
(df_complete == 1.0 and reproject_callback.total_time >= 5.0)):
reproject_callback.last_time = current_time
reproject_callback.total_time += current_time
except AttributeError:
reproject_callback.last_time = time.time()
reproject_callback.total_time = 0.0
# Perform the projection/resampling
gdal.ReprojectImage(
original_dataset, output_dataset, original_sr.ExportToWkt(),
original_sr.ExportToWkt(), resample_dict[resample_method], 0, 0,
reproject_callback, [output_uri])
# Make sure the dataset is closed and cleaned up
original_band = None
gdal.Dataset.__swig_destroy__(original_dataset)
original_dataset = None
output_dataset.FlushCache()
gdal.Dataset.__swig_destroy__(output_dataset)
output_dataset = None
calculate_raster_stats_uri(output_uri)
def create_directories(directory_list):
"""Make directories provided in list of path strings.
This function will create any of the directories in the directory list
if possible and raise exceptions if something exception other than
the directory previously existing occurs.
Args:
directory_list (list): a list of string uri paths
Returns:
None
"""
for dir_name in directory_list:
try:
os.makedirs(dir_name)
except OSError as exception:
#It's okay if the directory already exists, if it fails for
#some other reason, raise that exception
if (exception.errno != errno.EEXIST and
exception.errno != errno.ENOENT):
raise
def get_datasource_bounding_box(datasource_uri):
"""Get datasource bounding box where coordinates are in projected units.
Args:
dataset_uri (string): a uri to a GDAL dataset
Returns:
bounding_box (list):
[upper_left_x, upper_left_y, lower_right_x, lower_right_y] in
projected coordinates
"""
datasource = ogr.Open(datasource_uri)
layer = datasource.GetLayer(0)
extent = layer.GetExtent()
# Reindex datasource extents into the upper left/lower right coordinates
bounding_box = [extent[0],
extent[3],
extent[1],
extent[2]]
return bounding_boxz
def iterblocks(
raster_path, band_index_list=None, largest_block=_LARGEST_ITERBLOCK,
astype=None, offset_only=False):
"""Iterate across all the memory blocks in the input raster.
Result is a generator of block location information and numpy arrays.
This is especially useful when a single value needs to be derived from the
pixel values in a raster, such as the sum total of all pixel values, or
a sequence of unique raster values. In such cases, `raster_local_op`
is overkill, since it writes out a raster.
As a generator, this can be combined multiple times with itertools.izip()
to iterate 'simultaneously' over multiple rasters, though the user should
be careful to do so only with prealigned rasters.
Parameters:
raster_path (string): Path to raster file to iterate over.
band_index_list (list of ints or None): A list of the bands for which
the matrices should be returned. The band number to operate on.
Defaults to None, which will return all bands. Bands may be
specified in any order, and band indexes may be specified multiple
times. The blocks returned on each iteration will be in the order
specified in this list.
largest_block (int): Attempts to iterate over raster blocks with
this many elements. Useful in cases where the blocksize is
relatively small, memory is available, and the function call
overhead dominates the iteration. Defaults to 2**20. A value of
anything less than the original blocksize of the raster will
result in blocksizes equal to the original size.
astype (list of numpy types): If none, output blocks are in the native
type of the raster bands. Otherwise this parameter is a list
of len(band_index_list) length that contains the desired output
types that iterblock generates for each band.
offset_only (boolean): defaults to False, if True `iterblocks` only
returns offset dictionary and doesn't read any binary data from
the raster. This can be useful when iterating over writing to
an output.
Returns:
If `offset_only` is false, on each iteration, a tuple containing a dict
of block data and `n` 2-dimensional numpy arrays are returned, where
`n` is the number of bands requested via `band_list`. The dict of
block data has these attributes:
data['xoff'] - The X offset of the upper-left-hand corner of the
block.
data['yoff'] - The Y offset of the upper-left-hand corner of the
block.
data['win_xsize'] - The width of the block.
data['win_ysize'] - The height of the block.
If `offset_only` is True, the function returns only the block offset
data and does not attempt to read binary data from the raster.
"""
raster = gdal.OpenEx(raster_path)
if band_index_list is None:
band_index_list = range(1, raster.RasterCount + 1)
band_index_list = [
raster.GetRasterBand(index) for index in band_index_list]
block = band_index_list[0].GetBlockSize()
cols_per_block = block[0]
rows_per_block = block[1]
n_cols = raster.RasterXSize
n_rows = raster.RasterYSize
block_area = cols_per_block * rows_per_block
# try to make block wider
if largest_block / block_area > 0:
width_factor = largest_block / block_area
cols_per_block *= width_factor
if cols_per_block > n_cols:
cols_per_block = n_cols
block_area = cols_per_block * rows_per_block
# try to make block taller
if largest_block / block_area > 0:
height_factor = largest_block / block_area
rows_per_block *= height_factor
if rows_per_block > n_rows:
rows_per_block = n_rows
n_col_blocks = int(math.ceil(n_cols / float(cols_per_block)))
n_row_blocks = int(math.ceil(n_rows / float(rows_per_block)))
# Initialize to None so a block array is created on the first iteration
last_row_block_width = None
last_col_block_width = None
if astype is not None:
block_type_list = [astype] * len(band_index_list)
else:
block_type_list = [
_gdal_to_numpy_type(ds_band) for ds_band in band_index_list]
for row_block_index in range(n_row_blocks):
row_offset = row_block_index * rows_per_block
row_block_width = n_rows - row_offset
if row_block_width > rows_per_block:
row_block_width = rows_per_block
for col_block_index in range(n_col_blocks):
col_offset = col_block_index * cols_per_block
col_block_width = n_cols - col_offset
if col_block_width > cols_per_block:
col_block_width = cols_per_block
# resize the raster block cache if necessary
if (last_row_block_width != row_block_width or
last_col_block_width != col_block_width):
raster_blocks = [
numpy.zeros(
(row_block_width, col_block_width),
dtype=block_type) for block_type in
block_type_list]
offset_dict = {
'xoff': col_offset,
'yoff': row_offset,
'win_xsize': col_block_width,
'win_ysize': row_block_width,
}
result = offset_dict
if not offset_only:
for ds_band, block in zip(band_index_list, raster_blocks):
ds_band.ReadAsArray(buf_obj=block, **offset_dict)
result = (result,) + tuple(raster_blocks)
yield result
def get_vector_info(vector_path, layer_index=0):
"""Get information about an OGR vector (datasource).
Parameters:
vector_path (str): a path to a OGR vector.
layer_index (int): index of underlying layer to analyze. Defaults to
0.
Returns:
raster_properties (dictionary): a dictionary with the following
properties stored under relevant keys.
'projection' (string): projection of the vector in Well Known
Text.
'bounding_box' (list): list of floats representing the bounding
box in projected coordinates as [minx, miny, maxx, maxy].
"""
vector = gdal.OpenEx(vector_path)
vector_properties = {}
layer = vector.GetLayer(iLayer=layer_index)
# projection is same for all layers, so just use the first one
vector_properties['projection'] = layer.GetSpatialRef().ExportToWkt()
layer_bb = layer.GetExtent()
layer = None
vector = None
# convert form [minx,maxx,miny,maxy] to [minx,miny,maxx,maxy]
vector_properties['bounding_box'] = [layer_bb[i] for i in [0, 2, 1, 3]]
return vector_properties
def _merge_bounding_boxes(bb1, bb2, mode):
"""Merge two bounding boxes through union or intersection.
Parameters:
bb1, bb2 (list): list of float representing bounding box in the
form bb=[minx,miny,maxx,maxy]
mode (string); one of 'union' or 'intersection'
Returns:
Reduced bounding box of bb1/bb2 depending on mode.
"""
if mode == "union":
comparison_ops = [
_less_than_or_equal, _less_than_or_equal,
_greater_than, _greater_than]
if mode == "intersection":
comparison_ops = [
_greater_than, _greater_than,
_less_than_or_equal, _less_than_or_equal]
bb_out = [op(x, y) for op, x, y in zip(comparison_ops, bb1, bb2)]
return bb_out
def _invoke_timed_callback(
reference_time, callback_lambda, callback_period):
"""Invoke callback if a certain amount of time has passed.
This is a convenience function to standardize update callbacks from the
module.
Parameters:
reference_time (float): time to base `callback_period` length from.
callback_lambda (lambda): function to invoke if difference between
current time and `reference_time` has exceeded `callback_period`.
callback_period (float): time in seconds to pass until
`callback_lambda` is invoked.
Returns:
`reference_time` if `callback_lambda` not invoked, otherwise the time
when `callback_lambda` was invoked.
"""
current_time = time.time()
if current_time - reference_time > callback_period:
callback_lambda()
return current_time
return reference_time
def align_and_resize_raster_stack(
base_raster_path_list, target_raster_path_list, resample_method_list,
target_pixel_size, bounding_box_mode, base_vector_path_list=None,
raster_align_index=None,
gtiff_creation_options=_DEFAULT_GTIFF_CREATION_OPTIONS):
"""Generate rasters from a base such that they align geospatially.
This function resizes base rasters that are in the same geospatial
projection such that the result is an aligned stack of rasters that have
the same cell size, dimensions, and bounding box. This is achieved by
clipping or resizing the rasters to intersected, unioned, or equivocated
bounding boxes of all the raster and vector input.
Parameters:
base_raster_path_list (list): a list of base raster paths that will
be transformed and will be used to determine the target bounding
box.
target_raster_path_list (list): a list of raster paths that will be
created to one-to-one map with `base_raster_path_list` as aligned
versions of those original rasters.
resample_method_list (list): a list of resampling methods which
one to one map each path in `base_raster_path_list` during
resizing. Each element must be one of
"nearest|bilinear|cubic|cubic_spline|lanczos|mode".
target_pixel_size (tuple): the target raster's x and y pixel size
example: [30, -30].
bounding_box_mode (string): one of "union", "intersection", or
a list of floats of the form [minx, miny, maxx, maxy]. Depending
on the value, output extents are defined as the union,
intersection, or the explicit bounding box.
base_vector_path_list (list): a list of base vector paths whose
bounding boxes will be used to determine the final bounding box
of the raster stack if mode is 'union' or 'intersection'. If mode
is 'bb=[...]' then these vectors are not used in any calculation.
raster_align_index (int): indicates the index of a
raster in `base_raster_path_list` that the target rasters'
bounding boxes pixels should align with. This feature allows
rasters whose raster dimensions are the same, but bounding boxes
slightly shifted less than a pixel size to align with a desired
grid layout. If `None` then the bounding box of the target
rasters is calculated as the precise intersection, union, or
bounding box.
gtiff_creation_options (list): list of strings that will be passed
as GDAL "dataset" creation options to the GTIFF driver, or ignored
if None.
Returns:
None
"""
last_time = time.time()
# make sure that the input lists are of the same length
list_lengths = [
len(base_raster_path_list), len(target_raster_path_list),
len(resample_method_list)]
if len(set(list_lengths)) != 1:
raise ValueError(
"base_raster_path_list, target_raster_path_list, and "
"resample_method_list must be the same length "
" current lengths are %s" % (str(list_lengths)))
# we can accept 'union', 'intersection', or a 4 element list/tuple
if bounding_box_mode not in ["union", "intersection"] and (
not isinstance(bounding_box_mode, (list, tuple)) or
len(bounding_box_mode) != 4):
raise ValueError("Unknown bounding_box_mode %s" % (
str(bounding_box_mode)))
if ((raster_align_index is not None) and
((raster_align_index < 0) or
(raster_align_index >= len(base_raster_path_list)))):
raise ValueError(
"Alignment index is out of bounds of the datasets index: %s"
" n_elements %s" % (
raster_align_index, len(base_raster_path_list)))
raster_info_list = [
get_raster_info(path) for path in base_raster_path_list]
if base_vector_path_list is not None:
vector_info_list = [
get_vector_info(path) for path in base_vector_path_list]
else:
vector_info_list = []
# get the literal or intersecting/unioned bounding box
if isinstance(bounding_box_mode, (list, tuple)):
target_bounding_box = bounding_box_mode
else:
# either intersection or union
from functools import reduce
target_bounding_box = reduce(
functools.partial(_merge_bounding_boxes, mode=bounding_box_mode),
[info['bounding_box'] for info in
(raster_info_list + vector_info_list)])
if bounding_box_mode == "intersection" and (
target_bounding_box[0] > target_bounding_box[2] or
target_bounding_box[1] > target_bounding_box[3]):
raise ValueError("The rasters' and vectors' intersection is empty "
"(not all rasters and vectors touch each other).")
if raster_align_index >= 0:
# bounding box needs alignment
align_bounding_box = (
raster_info_list[raster_align_index]['bounding_box'])
align_pixel_size = (
raster_info_list[raster_align_index]['pixel_size'])
# adjust bounding box so lower left corner aligns with a pixel in
# raster[raster_align_index]
for index in [0, 1]:
n_pixels = int(
(target_bounding_box[index] - align_bounding_box[index]) /
float(align_pixel_size[index]))
target_bounding_box[index] = (
n_pixels * align_pixel_size[index] +
align_bounding_box[index])
for index, (base_path, target_path, resample_method) in enumerate(zip(
base_raster_path_list, target_raster_path_list,
resample_method_list)):
last_time = _invoke_timed_callback(
last_time, lambda: LOGGER.info(
"align_dataset_list aligning dataset %d of %d",
index, len(base_raster_path_list)), _LOGGING_PERIOD)
warp_raster(
base_path, target_pixel_size,
target_path, resample_method,
target_bb=target_bounding_box,
gtiff_creation_options=gtiff_creation_options)
def warp_raster(
base_raster_path, target_pixel_size, target_raster_path,
resample_method, target_bb=None, target_sr_wkt=None,
gtiff_creation_options=_DEFAULT_GTIFF_CREATION_OPTIONS):
"""Resize/resample raster to desired pixel size, bbox and projection.
Parameters:
base_raster_path (string): path to base raster.
target_pixel_size (list): a two element list or tuple indicating the
x and y pixel size in projected units.
target_raster_path (string): the location of the resized and
resampled raster.
resample_method (string): the resampling technique, one of
"nearest|bilinear|cubic|cubic_spline|lanczos|mode"
target_bb (list): if None, target bounding box is the same as the
source bounding box. Otherwise it's a list of float describing
target bounding box in target coordinate system as
[minx, miny, maxx, maxy].
target_sr_wkt (string): if not None, desired target projection in Well
Known Text format.
gtiff_creation_options (list or tuple): list of strings that will be
passed as GDAL "dataset" creation options to the GTIFF driver.
Returns:
None
"""
base_raster = gdal.OpenEx(base_raster_path)
base_sr = osr.SpatialReference()
base_sr.ImportFromWkt(base_raster.GetProjection())
if target_bb is None:
target_bb = get_raster_info(base_raster_path)['bounding_box']
# transform the target_bb if target_sr_wkt is not None
if target_sr_wkt is not None:
target_bb = transform_bounding_box(
get_raster_info(base_raster_path)['bounding_box'],
get_raster_info(base_raster_path)['projection'],
target_sr_wkt)
target_geotransform = [
target_bb[0], target_pixel_size[0], 0.0, target_bb[1], 0.0,
target_pixel_size[1]]
# this handles a case of a negative pixel size in which case the raster
# row will increase downward
if target_pixel_size[0] < 0:
target_geotransform[0] = target_bb[2]
if target_pixel_size[1] < 0:
target_geotransform[3] = target_bb[3]
target_x_size = abs((target_bb[2] - target_bb[0]) / target_pixel_size[0])
target_y_size = abs((target_bb[3] - target_bb[1]) / target_pixel_size[1])
if target_x_size - int(target_x_size) > 0:
target_x_size = int(target_x_size) + 1
else:
target_x_size = int(target_x_size)
if target_y_size - int(target_y_size) > 0:
target_y_size = int(target_y_size) + 1
else:
target_y_size = int(target_y_size)
if target_x_size == 0:
LOGGER.warn(
"bounding_box is so small that x dimension rounds to 0; "
"clamping to 1.")
target_x_size = 1
if target_y_size == 0:
LOGGER.warn(
"bounding_box is so small that y dimension rounds to 0; "
"clamping to 1.")
target_y_size = 1
local_gtiff_creation_options = list(gtiff_creation_options)
# PIXELTYPE is sometimes used to define signed vs. unsigned bytes and
# the only place that is stored is in the IMAGE_STRUCTURE metadata
# copy it over if it exists; get this info from the first band since
# all bands have the same datatype
base_band = base_raster.GetRasterBand(1)
metadata = base_band.GetMetadata('IMAGE_STRUCTURE')
if 'PIXELTYPE' in metadata:
local_gtiff_creation_options.append(
'PIXELTYPE=' + metadata['PIXELTYPE'])
# make directory if it doesn't exist
try:
os.makedirs(os.path.dirname(target_raster_path))
except OSError:
pass
gdal_driver = gdal.GetDriverByName('GTiff')
target_raster = gdal_driver.Create(
target_raster_path, target_x_size, target_y_size,
base_raster.RasterCount, base_band.DataType,
options=local_gtiff_creation_options)
base_band = None
for index in range(target_raster.RasterCount):
base_nodata = base_raster.GetRasterBand(1+index).GetNoDataValue()
if base_nodata is not None:
target_band = target_raster.GetRasterBand(1+index)
target_band.SetNoDataValue(base_nodata)
target_band = None
# Set the geotransform
target_raster.SetGeoTransform(target_geotransform)
if target_sr_wkt is None:
target_sr_wkt = base_sr.ExportToWkt()
target_raster.SetProjection(target_sr_wkt)
# need to make this a closure so we get the current time and we can affect
# state
reproject_callback = _make_logger_callback(
"ReprojectImage %.1f%% complete %s, psz_message '%s'")
# Perform the projection/resampling
gdal.ReprojectImage(
base_raster, target_raster, base_sr.ExportToWkt(),
target_sr_wkt, _RESAMPLE_DICT[resample_method], 0, 0,
reproject_callback, [target_raster_path])
target_raster = None
base_raster = None
calculate_raster_stats(target_raster_path)
def transform_bounding_box(
bounding_box, base_ref_wkt, target_ref_wkt, edge_samples=11):
"""Transform input bounding box to output projection.
This transform accounts for the fact that the reprojected square bounding
box might be warped in the new coordinate system. To account for this,
the function samples points along the original bounding box edges and
attempts to make the largest bounding box around any transformed point
on the edge whether corners or warped edges.
Parameters:
bounding_box (list): a list of 4 coordinates in `base_epsg` coordinate
system describing the bound in the order [xmin, ymin, xmax, ymax]
base_ref_wkt (string): the spatial reference of the input coordinate
system in Well Known Text.
target_ref_wkt (string): the spatial reference of the desired output
coordinate system in Well Known Text.
edge_samples (int): the number of interpolated points along each
bounding box edge to sample along. A value of 2 will sample just
the corners while a value of 3 will also sample the corners and
the midpoint.
Returns:
A list of the form [xmin, ymin, xmax, ymax] that describes the largest
fitting bounding box around the original warped bounding box in
`new_epsg` coordinate system.
"""
base_ref = osr.SpatialReference()
base_ref.ImportFromWkt(base_ref_wkt)
target_ref = osr.SpatialReference()
target_ref.ImportFromWkt(target_ref_wkt)
transformer = osr.CoordinateTransformation(base_ref, target_ref)
def _transform_point(point):
"""Transform an (x,y) point tuple from base_ref to target_ref."""
trans_x, trans_y, _ = (transformer.TransformPoint(*point))
return (trans_x, trans_y)
# The following list comprehension iterates over each edge of the bounding
# box, divides each edge into `edge_samples` number of points, then
# reduces that list to an appropriate `bounding_fn` given the edge.
# For example the left edge needs to be the minimum x coordinate so
# we generate `edge_samples` number of points between the upper left and
# lower left point, transform them all to the new coordinate system
# then get the minimum x coordinate "min(p[0] ...)" of the batch.
# points are numbered from 0 starting upper right as follows:
# 0--3
# | |
# 1--2
p_0 = numpy.array((bounding_box[0], bounding_box[3]))
p_1 = numpy.array((bounding_box[0], bounding_box[1]))
p_2 = numpy.array((bounding_box[2], bounding_box[1]))
p_3 = numpy.array((bounding_box[2], bounding_box[3]))
transformed_bounding_box = [
bounding_fn(
[_transform_point(
p_a * v + p_b * (1 - v)) for v in numpy.linspace(
0, 1, edge_samples)])
for p_a, p_b, bounding_fn in [
(p_0, p_1, lambda p_list: min([p[0] for p in p_list])),
(p_1, p_2, lambda p_list: min([p[1] for p in p_list])),
(p_2, p_3, lambda p_list: max([p[0] for p in p_list])),
(p_3, p_0, lambda p_list: max([p[1] for p in p_list]))]]
return transformed_bounding_box
def _make_logger_callback(message):
"""Build a timed logger callback that prints `message` replaced.
Parameters:
message (string): a string that expects 3 placement %% variables,
first for % complete from `df_complete`, second `psz_message`
and last is `p_progress_arg[0]`.
Returns:
Function with signature:
logger_callback(df_complete, psz_message, p_progress_arg)
"""
def logger_callback(df_complete, psz_message, p_progress_arg):
"""The argument names come from the GDAL API for callbacks."""
try:
current_time = time.time()
if ((current_time - logger_callback.last_time) > 5.0 or
(df_complete == 1.0 and
logger_callback.total_time >= 5.0)):
LOGGER.info(
message, df_complete * 100, p_progress_arg[0],
psz_message)
logger_callback.last_time = current_time
logger_callback.total_time += current_time
except AttributeError:
logger_callback.last_time = time.time()
logger_callback.total_time = 0.0
return logger_callback
def _is_raster_path_band_formatted(raster_path_band):
"""Returns true if raster path band is a (str, int) tuple/list."""
if not isinstance(raster_path_band, (list, tuple)):
return False
elif len(raster_path_band) != 2:
return False
elif not isinstance(raster_path_band[0], str):
return False
elif not isinstance(raster_path_band[1], int):
return False
else:
return True
def zonal_statistics(
base_raster_path_band, aggregate_vector_path,
aggregate_field_name, aggregate_layer_name=None,
ignore_nodata=True, all_touched=False, polygons_might_overlap=True,
working_dir=None):
"""Collect stats on pixel values which lie within polygons.
This function summarizes raster statistics including min, max,
mean, stddev, and pixel count over the regions on the raster that are
overlaped by the polygons in the vector layer. This function can
handle cases where polygons overlap, which is notable since zonal stats
functions provided by ArcGIS or QGIS usually incorrectly aggregate
these areas. Overlap avoidance is achieved by calculating a minimal set
of disjoint non-overlapping polygons from `aggregate_vector_path` and
rasterizing each set separately during the raster aggregation phase. That
set of rasters are then used to calculate the zonal stats of all polygons
without aggregating vector overlap.
Parameters:
base_raster_path_band (tuple): a str/int tuple indicating the path to
the base raster and the band index of that raster to analyze.
aggregate_vector_path (string): a path to an ogr compatable polygon
vector whose geometric features indicate the areas over
`base_raster_path_band` to calculate statistics over.
aggregate_field_name (string): field name in `aggregate_vector_path`
that represents an identifying value for a given polygon. Result
of this function will be indexed by the values found in this
field.
aggregate_layer_name (string): name of shapefile layer that will be
used to aggregate results over. If set to None, the first layer
in the DataSource will be used as retrieved by `.GetLayer()`.
Note: it is normal and expected to set this field at None if the
aggregating shapefile is a single layer as many shapefiles,
including the common 'ESRI Shapefile', are.
ignore_nodata: if true, then nodata pixels are not accounted for when
calculating min, max, count, or mean. However, the value of
`nodata_count` will always be the number of nodata pixels
aggregated under the polygon.
all_touched (boolean): if true will account for any pixel whose
geometry passes through the pixel, not just the center point.
polygons_might_overlap (boolean): if True the function calculates
aggregation coverage close to optimally by rasterizing sets of
polygons that don't overlap. However, this step can be
computationally expensive for cases where there are many polygons.
Setting this flag to False directs the function rasterize in one
step.
working_dir (string): If not None, indicates where temporary files
should be created during this run.
Returns:
nested dictionary indexed by aggregating feature id, and then by one
of 'min' 'max' 'sum' 'mean' 'count' and 'nodata_count'. Example:
{0: {'min': 0, 'max': 1, 'mean': 0.5, 'count': 2, 'nodata_count': 1}}
"""
import uuid
import shutil
if not _is_raster_path_band_formatted(base_raster_path_band):
raise ValueError(
"`base_raster_path_band` not formatted as expected. Expects "
"(path, band_index), recieved %s" + base_raster_path_band)
aggregate_vector = gdal.OpenEx(aggregate_vector_path)
if aggregate_layer_name is not None:
aggregate_layer = aggregate_vector.GetLayerByName(
aggregate_layer_name)
else:
aggregate_layer = aggregate_vector.GetLayer()
aggregate_layer_defn = aggregate_layer.GetLayerDefn()
aggregate_field_index = aggregate_layer_defn.GetFieldIndex(
aggregate_field_name)
if aggregate_field_index == -1: # -1 returned when field does not exist.
# Raise exception if user provided a field that's not in vector
raise ValueError(
'Vector %s must have a field named %s' %
(aggregate_vector_path, aggregate_field_name))
aggregate_field_def = aggregate_layer_defn.GetFieldDefn(
aggregate_field_index)
# create a new aggregate ID field to map base vector aggregate fields to
# local ones that are guaranteed to be integers.
local_aggregate_field_name = str(uuid.uuid4())[-8:-1]
local_aggregate_field_def = ogr.FieldDefn(
local_aggregate_field_name, ogr.OFTInteger)
# Adding the rasterize by attribute option
rasterize_layer_args = {
'options': [
'ALL_TOUCHED=%s' % str(all_touched).upper(),
'ATTRIBUTE=%s' % local_aggregate_field_name]
}
# clip base raster to aggregating vector intersection
raster_info = get_raster_info(base_raster_path_band[0])
# -1 here because bands are 1 indexed
print(raster_info)
raster_nodata = None
with tempfile.NamedTemporaryFile(
prefix='clipped_raster', delete=False,
dir=working_dir) as clipped_raster_file:
clipped_raster_path = clipped_raster_file.name
align_and_resize_raster_stack(
[base_raster_path_band[0]], [clipped_raster_path], ['nearest'],
raster_info['pixel_size'], 'intersection',
base_vector_path_list=[aggregate_vector_path], raster_align_index=0)
clipped_raster = gdal.OpenEx(clipped_raster_path)
# make a shapefile that non-overlapping layers can be added to
driver = ogr.GetDriverByName('ESRI Shapefile')
disjoint_vector_dir = tempfile.mkdtemp(dir=working_dir)
disjoint_vector = driver.CreateDataSource(
os.path.join(disjoint_vector_dir, 'disjoint_vector.shp'))
spat_ref = aggregate_layer.GetSpatialRef()
# Initialize these dictionaries to have the shapefile fields in the
# original datasource even if we don't pick up a value later
base_to_local_aggregate_value = {}
for feature in aggregate_layer:
aggregate_field_value = feature.GetField(aggregate_field_name)
# this builds up a map of aggregate field values to unique ids
if aggregate_field_value not in base_to_local_aggregate_value:
base_to_local_aggregate_value[aggregate_field_value] = len(
base_to_local_aggregate_value)
aggregate_layer.ResetReading()
# Loop over each polygon and aggregate
if polygons_might_overlap:
minimal_polygon_sets = calculate_disjoint_polygon_set(
aggregate_vector_path)
else:
minimal_polygon_sets = [
set([feat.GetFID() for feat in aggregate_layer])]
clipped_band = clipped_raster.GetRasterBand(base_raster_path_band[1])
with tempfile.NamedTemporaryFile(
prefix='aggregate_id_raster',
delete=False, dir=working_dir) as aggregate_id_raster_file:
aggregate_id_raster_path = aggregate_id_raster_file.name
aggregate_id_nodata = len(base_to_local_aggregate_value)
new_raster_from_base(
clipped_raster_path, aggregate_id_raster_path, gdal.GDT_Int32,
[aggregate_id_nodata])
aggregate_id_raster = gdal.OpenEx(aggregate_id_raster_path, gdal.GA_Update)
aggregate_stats = {}
for polygon_set in minimal_polygon_sets:
disjoint_layer = disjoint_vector.CreateLayer(
'disjoint_vector', spat_ref, ogr.wkbPolygon)
disjoint_layer.CreateField(local_aggregate_field_def)
# add polygons to subset_layer
for index, poly_fid in enumerate(polygon_set):
poly_feat = aggregate_layer.GetFeature(poly_fid)
disjoint_layer.CreateFeature(poly_feat)
# we seem to need to reload the feature and set the index because
# just copying over the feature left indexes as all 0s. Not sure
# why.
new_feat = disjoint_layer.GetFeature(index)
new_feat.SetField(
local_aggregate_field_name, base_to_local_aggregate_value[
poly_feat.GetField(aggregate_field_name)])
disjoint_layer.SetFeature(new_feat)
disjoint_layer.SyncToDisk()
# nodata out the mask
aggregate_id_band = aggregate_id_raster.GetRasterBand(1)
aggregate_id_band.Fill(aggregate_id_nodata)
aggregate_id_band = None
gdal.RasterizeLayer(
aggregate_id_raster, [1], disjoint_layer, **rasterize_layer_args)
aggregate_id_raster.FlushCache()
# Delete the features we just added to the subset_layer
disjoint_layer = None
disjoint_vector.DeleteLayer(0)
# create a key array
# and parallel min, max, count, and nodata count arrays
for aggregate_id_offsets, aggregate_id_block in iterblocks(
aggregate_id_raster_path):
clipped_block = clipped_band.ReadAsArray(**aggregate_id_offsets)
# guard against a None nodata type
valid_mask = numpy.ones(aggregate_id_block.shape, dtype=bool)
if aggregate_id_nodata is not None:
valid_mask[:] = aggregate_id_block != aggregate_id_nodata
valid_aggregate_id = aggregate_id_block[valid_mask]
valid_clipped = clipped_block[valid_mask]
for aggregate_id in numpy.unique(valid_aggregate_id):
aggregate_mask = valid_aggregate_id == aggregate_id
masked_clipped_block = valid_clipped[aggregate_mask]
clipped_nodata_mask = (masked_clipped_block == raster_nodata)
if aggregate_id not in aggregate_stats:
aggregate_stats[aggregate_id] = {
'min': None,
'max': None,
'count': 0,
'nodata_count': 0,
'sum': 0.0
}
aggregate_stats[aggregate_id]['nodata_count'] += (
numpy.count_nonzero(clipped_nodata_mask))
if ignore_nodata:
masked_clipped_block = (
masked_clipped_block[~clipped_nodata_mask])
if masked_clipped_block.size == 0:
continue
if aggregate_stats[aggregate_id]['min'] is None:
aggregate_stats[aggregate_id]['min'] = (
masked_clipped_block[0])
aggregate_stats[aggregate_id]['max'] = (
masked_clipped_block[0])
aggregate_stats[aggregate_id]['min'] = min(
numpy.min(masked_clipped_block),
aggregate_stats[aggregate_id]['min'])
aggregate_stats[aggregate_id]['max'] = max(
numpy.max(masked_clipped_block),
aggregate_stats[aggregate_id]['max'])
aggregate_stats[aggregate_id]['count'] += (
masked_clipped_block.size)
aggregate_stats[aggregate_id]['sum'] += numpy.sum(
masked_clipped_block)
# clean up temporary files
clipped_band = None
clipped_raster = None
aggregate_id_raster = None
disjoint_layer = None
disjoint_vector = None
for filename in [aggregate_id_raster_path, clipped_raster_path]:
os.remove(filename)
shutil.rmtree(disjoint_vector_dir)
# map the local ids back to the original base value
local_to_base_aggregate_value = {
value: key for key, value in
base_to_local_aggregate_value.iteritems()}
return {
local_to_base_aggregate_value[key]: value
for key, value in aggregate_stats.iteritems()}
def calculate_disjoint_polygon_set(vector_path, layer_index=0):
"""Create a list of sets of polygons that don't overlap.
Determining the minimal number of those sets is an np-complete problem so
this is an approximation that builds up sets of maximal subsets.
Parameters:
vector_path (string): a path to an OGR vector.
layer_index (int): index of underlying layer in `vector_path` to
calculate disjoint set. Defaults to 0.
Returns:
subset_list (list): list of sets of FIDs from vector_path
"""
import heapq
vector = gdal.OpenEx(vector_path)
vector_layer = vector.GetLayer()
poly_intersect_lookup = {}
for poly_feat in vector_layer:
poly_wkt = poly_feat.GetGeometryRef().ExportToWkt()
shapely_polygon = shapely.wkt.loads(poly_wkt)
poly_wkt = None
poly_fid = poly_feat.GetFID()
poly_intersect_lookup[poly_fid] = {
'poly': shapely_polygon,
'intersects': set(),
}
vector_layer = None
vector = None
for poly_fid in poly_intersect_lookup:
polygon = shapely.prepared.prep(
poly_intersect_lookup[poly_fid]['poly'])
for intersect_poly_fid in poly_intersect_lookup:
if intersect_poly_fid == poly_fid or polygon.intersects(
poly_intersect_lookup[intersect_poly_fid]['poly']):
poly_intersect_lookup[poly_fid]['intersects'].add(
intersect_poly_fid)
polygon = None
# Build maximal subsets
subset_list = []
while len(poly_intersect_lookup) > 0:
# sort polygons by increasing number of intersections
heap = []
for poly_fid, poly_dict in poly_intersect_lookup.iteritems():
heapq.heappush(
heap, (len(poly_dict['intersects']), poly_fid, poly_dict))
# build maximal subset
maximal_set = set()
while len(heap) > 0:
_, poly_fid, poly_dict = heapq.heappop(heap)
for maxset_fid in maximal_set:
if maxset_fid in poly_intersect_lookup[poly_fid]['intersects']:
# it intersects and can't be part of the maximal subset
break
else:
# made it through without an intersection, add poly_fid to
# the maximal set
maximal_set.add(poly_fid)
# remove that polygon and update the intersections
del poly_intersect_lookup[poly_fid]
# remove all the polygons from intersections once they're compuated
for maxset_fid in maximal_set:
for poly_dict in poly_intersect_lookup.itervalues():
poly_dict['intersects'].discard(maxset_fid)
subset_list.append(maximal_set)
return subset_list
def calculate_raster_stats(raster_path):
"""Calculate and set min, max, stdev, and mean for all bands in raster.
Parameters:
raster_path (string): a path to a GDAL raster raster that will be
modified by having its band statistics set
Returns:
None
"""
raster = gdal.OpenEx(raster_path, gdal.GA_Update)
raster_properties = get_raster_info(raster_path)
for band_index in range(raster.RasterCount):
target_min = None
target_max = None
target_n = 0
target_sum = 0.0
for _, target_block in iterblocks(
raster_path, band_index_list=[band_index+1]):
nodata_target = raster_properties['nodata'][band_index]
# guard against an undefined nodata target
valid_mask = numpy.ones(target_block.shape, dtype=bool)
if nodata_target is not None:
valid_mask[:] = target_block != nodata_target
valid_block = target_block[valid_mask]
if valid_block.size == 0:
continue
if target_min is None:
# initialize first min/max
target_min = target_max = valid_block[0]
target_sum += numpy.sum(valid_block)
target_min = min(numpy.min(valid_block), target_min)
target_max = max(numpy.max(valid_block), target_max)
target_n += valid_block.size
if target_min is not None:
target_mean = target_sum / float(target_n)
stdev_sum = 0.0
for _, target_block in iterblocks(
raster_path, band_index_list=[band_index+1]):
# guard against an undefined nodata target
valid_mask = numpy.ones(target_block.shape, dtype=bool)
if nodata_target is not None:
valid_mask = target_block != nodata_target
valid_block = target_block[valid_mask]
stdev_sum += numpy.sum((valid_block - target_mean) ** 2)
target_stddev = (stdev_sum / float(target_n)) ** 0.5
target_band = raster.GetRasterBand(band_index+1)
target_band.SetStatistics(
float(target_min), float(target_max), float(target_mean),
float(target_stddev))
target_band = None
else:
LOGGER.warn(
"Stats not calculated for %s band %d since no non-nodata "
"pixels were found.", raster_path, band_index+1)
raster = None
| [
11748,
28686,
198,
11748,
279,
9945,
198,
11748,
640,
198,
11748,
11454,
3919,
198,
11748,
5485,
306,
13,
86,
21841,
198,
11748,
5485,
306,
13,
2840,
198,
6738,
5485,
306,
1330,
2866,
4739,
198,
11748,
5485,
306,
13,
3866,
29190,
198,
... | 2.35225 | 41,885 |
import random
from ..serializers.course_serializers import CourseSerializer
from ..serializers.partition_serializer import PartitionSerializer, PartitionTaskSerializer
from ..models.user_course import UserCourse
from ..models.course import Course
from ..models.partition import Partition
from ..models.partition_task import PartitionTask
course_service = CourseService()
| [
11748,
4738,
198,
198,
6738,
11485,
46911,
11341,
13,
17319,
62,
46911,
11341,
1330,
20537,
32634,
7509,
198,
6738,
11485,
46911,
11341,
13,
3911,
653,
62,
46911,
7509,
1330,
2142,
653,
32634,
7509,
11,
2142,
653,
25714,
32634,
7509,
198,... | 4.10989 | 91 |
# Generated by Django 2.0.4 on 2018-04-30 11:18
from django.db import migrations, models
import django.utils.timezone
| [
2,
2980,
515,
416,
37770,
362,
13,
15,
13,
19,
319,
2864,
12,
3023,
12,
1270,
1367,
25,
1507,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
198,
11748,
42625,
14208,
13,
26791,
13,
2435,
11340,
628
] | 2.926829 | 41 |
#! python3
import datetime
import json
from csscompressor import compress
# Run Python script
if __name__ == '__main__':
main()
| [
2,
0,
21015,
18,
198,
11748,
4818,
8079,
198,
11748,
33918,
198,
6738,
269,
824,
5589,
44292,
1330,
27413,
198,
198,
2,
5660,
11361,
4226,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
1388,
3419,
... | 3.093023 | 43 |
"""Demo to visualize data pipeline output."""
import matplotlib.pyplot as plt
import tensorflow as tf
from dataloaders.s3dis import create_s3dis_dataset
if __name__ == "__main__":
main(create_pointcloud_dump=False)
| [
37811,
11522,
78,
284,
38350,
1366,
11523,
5072,
526,
15931,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
11748,
11192,
273,
11125,
355,
48700,
198,
6738,
4818,
282,
1170,
364,
13,
82,
18,
6381,
1330,
2251,
62,
8... | 2.933333 | 75 |
"""
The top-level library for Heron's Python DSL, which enables you to write Heron
[topologies](https://twitter.github.io/heron/docs/concepts/topologies/) in
a Python DSL.
Heron topologies are acyclic graphs used to process streaming data. Topologies
have two major components:
[spouts](spout/spout.m.html#heron_py.spout.spout.Spout) pull data into the
topology and then [emit](spout/spout.m.html#heron_py.spout.spout.Spout.emit)
that data as tuples (lists in Python) to
[bolts](bolt/bolt.m.html#heron_py.bolt.bolt.Bolt) that process that data.
"""
# Load basic dsl modules
from .streamlet import Streamlet, OperationType, TimeWindow
| [
37811,
198,
464,
1353,
12,
5715,
5888,
329,
2332,
261,
338,
11361,
32643,
11,
543,
13536,
345,
284,
3551,
2332,
261,
198,
58,
4852,
5823,
16151,
5450,
1378,
6956,
13,
12567,
13,
952,
14,
372,
261,
14,
31628,
14,
43169,
82,
14,
4852,... | 2.985915 | 213 |
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.preprocessing.image import img_to_array
from tensorflow.keras.preprocessing.image import load_img
from tensorflow.keras.applications import MobileNetV2
from tensorflow.keras.applications.mobilenet_v2 import preprocess_input
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.layers import AveragePooling2D
from tensorflow.keras.layers import Dropout
from tensorflow.keras.layers import Flatten
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import Input
from tensorflow.keras.models import Model
from tensorflow.keras.utils import to_categorical
from sklearn.preprocessing import LabelBinarizer
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
import matplotlib.pyplot as plt
import cv2
import numpy as np
import os
INIT_LR = 1e-4
EPOCHS = 5
BS = 32
DIRECTORY = "dataset"
CATEGORIES = os.listdir(DIRECTORY) # ["with_mask", "without_mask"]
print("[INFO] loading images...")
data = []
labels = []
# Preprocessing
for category in CATEGORIES:
path = os.path.join(DIRECTORY, category)
for img_name in os.listdir(path):
img_path = os.path.join(path, img_name)
image = load_img(img_path, target_size=(224, 224))
image = img_to_array(image)
image = preprocess_input(image)
data.append(image)
labels.append(category)
lb = LabelBinarizer()
labels = lb.fit_transform(labels)
labels = to_categorical(labels)
data = np.array(data, dtype="float32")
labels = np.array(labels)
trainX, testX, trainY, testY = train_test_split(
data, labels, test_size=0.2, stratify=labels, random_state=42
)
# trainX = data
# testX = data
# trainY = labels
# testY = labels
# Learning
aug = ImageDataGenerator(
rotation_range=30,
zoom_range=0.10,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.10,
horizontal_flip=True,
fill_mode="nearest",
)
baseModel = MobileNetV2(
weights="imagenet", include_top=False, input_tensor=Input(shape=(224, 224, 3))
)
headModel = baseModel.output
headModel = AveragePooling2D(pool_size=(7, 7))(headModel)
headModel = Flatten(name="flatten")(headModel)
headModel = Dense(128, activation="relu")(headModel)
headModel = Dropout(0.5)(headModel)
headModel = Dense(2, activation="softmax")(headModel)
model = Model(inputs=baseModel.input, outputs=headModel)
for layer in baseModel.layers:
layer.trainable = False
print("[INFO] compiling model...")
opt = Adam(lr=INIT_LR, decay=INIT_LR / EPOCHS)
model.compile(loss="binary_crossentropy", optimizer=opt, metrics=["accuracy"])
print("[INFO] training head")
H = model.fit(
aug.flow(trainX, trainY, batch_size=BS),
steps_per_epoch=len(trainX) // BS,
validation_data=(testX, testY),
validation_steps=len(testX) // BS,
epochs=EPOCHS,
)
print("[INFO] evaluating network")
predIdxs = model.predict(testX, batch_size=BS)
predIdxs = np.argmax(predIdxs, axis=1)
print(classification_report(testY.argmax(axis=1), predIdxs, target_names=lb.classes_))
print("[INFO] saving mask detector model...")
model.save("mask_detector.model", save_format="h5")
| [
6738,
11192,
273,
11125,
13,
6122,
292,
13,
3866,
36948,
13,
9060,
1330,
7412,
6601,
8645,
1352,
198,
6738,
11192,
273,
11125,
13,
6122,
292,
13,
3866,
36948,
13,
9060,
1330,
33705,
62,
1462,
62,
18747,
198,
6738,
11192,
273,
11125,
1... | 2.673877 | 1,202 |
""" Unittest """
import unittest
# pylint: disable=import-error
from _utils.statics import inhabitants
class TestModuleStaticsInhabitants(unittest.TestCase):
""" Test Module for statics.inhabitants """
def test_total(self):
""" Test for inhabitants.TOTAL """
self.assertEqual(type(inhabitants.TOTAL).__name__, "int")
def test_states(self):
""" Test for inhabitants.STATES """
self.assertEqual(type(inhabitants.STATES).__name__,"dict")
if __name__ == '__main__':
unittest.main()
| [
37811,
791,
715,
395,
37227,
198,
11748,
555,
715,
395,
198,
2,
279,
2645,
600,
25,
15560,
28,
11748,
12,
18224,
198,
6738,
4808,
26791,
13,
14269,
873,
1330,
17622,
198,
198,
4871,
6208,
26796,
17126,
873,
818,
5976,
270,
1187,
7,
... | 2.821229 | 179 |
#!/usr/bin/python
"""
Register brains, landmarks, and labels to a template.
(c) 2011, @rno klein
"""
import os
from os.path import exists
from subprocess import call
from numpy import float, isnan
# Run intensity-based registration
# 1. Register brains to template
# 2. Transform brains to each other via template
# 3. Transform landmarks to template
register_to_template = 1
transform_pairs_via_template = 1
transform_landmarks_to_template = 0
# Run landmark-driven registration to template:
register_landmarks_to_template = 0
transform_landmarks_via_template = 0
# Atlas-based evaluation for the above settings:
# 1. prepare target atlas mask
# 2. transform source atlas
# 3. fill #1 with #2
# 4. measure overlap of #3 with target atlas labels
prepare_target_mask = 0
evaluate_with_atlases = 1
verbose = 1
dim = 3
#
# Files
#
source_files = ['m1','m2','m3','m4','m5','m6','m7','m8','m9','m10','m11','m12']
target_files = ['m1','m2','m3','m4','m5','m6','m7','m8','m9','m10','m11','m12']
#source_files = ['m1','m2','m3','m4']#,'m5','m6']
#target_files = ['m1','m2','m3','m4']#,'m5','m6']
ANTSPATH = os.environ.get("ANTSPATH")
FSLPATH = '/usr/local/fsl/bin/'
out_path = '/hd2/Archive/registration_evaluation_2011_output/'
xfm_dir = os.path.join( out_path, 'Transforms/')
xfm_brain_dir = os.path.join( out_path, 'Transformed_Brains/')
xfm_landmarks_dir = os.path.join( out_path, 'Transformed_Landmarks/')
xfm_atlas_dir = os.path.join( out_path, 'Transformed_Atlases/')
atlas_dir = '/hd2/Brains/CUMC12/Atlases/'
brain_dir = '/hd2/Brains/CUMC12/Brains/'
brainmask_dir = '/hd2/Brains/CUMC12/BrainMasks/'
ext = '.nii.gz'
template = '/hd2/Brains/CUMC12/CUMC12template.nii.gz'
landmarks_dir = '/hd2/Brains/CUMC12/Landmarks/pits_kiho_im_binary/'
landmark_type = 'pits_kiho_im'
landmarks_dir = '/hd2/Brains/CUMC12/Landmarks/pits_yrjo_hame_binary/'
landmark_type = 'pits_yrjo_hame'
landmarks_dir = '/hd2/Brains/CUMC12/Landmarks/pits_forrest_bao_binary/'
landmark_type = 'pits_forrest_bao'
landmarks_dir = '/hd2/Brains/CUMC12/Landmarks/ribbons_brain_visa_binary/'
landmark_type = 'ribbons_brain_visa'
landmarks_dir = '/hd2/Brains/CUMC12/Landmarks/fundi_gang_li_binary/'
landmark_type = 'fundi_gang_li'
landmarks_dir = '/hd2/Brains/CUMC12/Landmarks/fundi_brain_visa_binary/'
landmark_type = 'fundi_brain_visa'
landmarks_dir = '/hd2/Brains/CUMC12/Landmarks/fundi_forrest_bao_binary/'
landmark_type = 'fundi_forrest_bao'
results_dir = os.path.join( out_path, 'Results/')
label_file = 'CUMC12_labels_regions.txt'
#
# Registration parameters
#
gradient_step_size = 0.5
iterations = "30x100x10"
options = " --use-Histogram-Matching"
initialize = " --number-of-affine-iterations 10000x10000x10000x10000x10000"
warp = ANTSPATH + "ANTS " + str(dim) + " -t SyN[" + str(gradient_step_size) +"] -i " + \
str(iterations) + options + initialize
apply_warp = ANTSPATH + "WarpImageMultiTransform " + str(dim)
#
# Regularization parameters
#
regularizer = "Gauss"
regularizer_setting = 3
deformation_field_sigma = 0
regularize = "-r Gauss[" + str(regularizer_setting) + ", " + \
str(deformation_field_sigma) + "]"
#
# Intensity parameters
#
intensity_measure = "CC"
intensity_weight = 1.0
intensity_setting = 3
#
# Landmark parameters
#
landmark_measure1 = "PSE"
landmark_measure2 = "MSQ"
landmark_weight1 = 0.1
landmark_weight2 = 0.1
percent = 1.0 # real number: 1.0 = 100%
boundary = 0 # 0: not only boundaries
sigma = 10
neighbor = 100
matching_iter = 100000 # partial matching iterations
if evaluate_with_atlases:
f = open(label_file,'r')
label_table = f.readlines()
f.close()
labels = []
for row in label_table:
labels.append(int(row.split()[0]))
#------------------------------------------
# Register brains and landmarks to template
#------------------------------------------
if register_to_template + transform_landmarks_to_template + \
prepare_target_mask > 0:
for file in source_files:
source = brain_dir+file+ext
output = xfm_dir+file+'_to_template'
out = '-o ' + output+ext
if os.path.exists(source) and os.path.exists(template) and os.path.exists(xfm_dir):
# Intensity-based registration to template:
if register_to_template:
intensity = [template, source, intensity_weight, intensity_setting]
intensity = "-m "+intensity_measure+"[" + ", ".join([str(s) for s in intensity]) + "]"
args = " ".join([warp, regularize, intensity, out])
if verbose: print(args); print(''); p = call(args, shell="True")
# Prepare binary (target atlas) masks for filling with labels:
if prepare_target_mask:
args = " ".join(['c3d', atlas_dir+file+ext, '-binarize -o', brainmask_dir+file+ext])
if verbose: print(args); print(''); p = call(args, shell="True")
# Transform landmarks to template space:
if transform_landmarks_to_template:
source_landmarks = landmarks_dir+file+ext
output_landmarks = xfm_landmarks_dir+file+'_to_template_'+landmark_type+ext
try:
os.path.exists(source_landmarks) and os.path.exists(xfm_landmarks_dir)
except:
raise NameError('Check ' + source_landmarks + ' and ' + xfm_landmarks_dir)
args = " ".join([apply_warp, source_landmarks, output_landmarks, \
'-R', template, output+'Warp'+ext, output+'Affine.txt', '--use-NN'])
if verbose: print(args); print(''); p = call(args, shell="True")
else:
if not os.path.exists(source):
raise NameError('Check input file ' + source)
elif not os.path.exists(template):
raise NameError('Check input file ' + template)
elif not os.path.exists(xfm_dir):
raise NameError('Check input file ' + xfm_dir)
#--------------------------------------------------------------
# Register landmarks to transformed landmarks in template space
#--------------------------------------------------------------
if register_landmarks_to_template:
for file in source_files:
source = brain_dir+file+ext
source_landmarks = landmarks_dir+file+ext
for file2 in target_files:
if file2 != file:
template_landmarks = xfm_landmarks_dir+file2+'_to_template_'+landmark_type+ext
output_xfm = xfm_dir+file+'_to_'+file2+'_in_template_space_'+landmark_type+ext
if os.path.exists(source) and os.path.exists(template) and \
os.path.exists(source_landmarks) and os.path.exists(template_landmarks):
# Intensity similarity:
intensity = [template, source, intensity_weight, intensity_setting]
intensity = " -m "+intensity_measure+"[" + ", ".join([str(s) for s in intensity]) + "]"
# Landmark similarity:
lm_args1 = [template, source, template_landmarks, source_landmarks,
landmark_weight1, percent, sigma, boundary, neighbor, matching_iter]
landmarks1 = ", ".join([" -m PSE[" + ", ".join([str(s) for s in lm_args1]) + "]"])
lm_args2 = [template_landmarks, source_landmarks, landmark_weight2, 0]
landmarks2 = " ".join([" -m MSQ[" + ", ".join([str(s) for s in lm_args2]) + "]"])
#
# Run command
#
args = " ".join([warp, '-o', output_xfm, regularize, intensity, landmarks1, landmarks2])
if verbose: print(args); print(''); p = call(args, shell="True")
else:
if not os.path.exists(source):
raise NameError('Check input file ' + source)
elif not os.path.exists(template):
raise NameError('Check input file ' + template)
elif not os.path.exists(source_landmarks):
raise NameError('Check input file ' + source_landmarks)
elif not os.path.exists(template_landmarks):
raise NameError('Check input file ' + template_landmarks)
#----------------------------------------------
# Apply intensity-based registration transforms
# to register brains to each other via template
#----------------------------------------------
if transform_pairs_via_template:
if evaluate_with_atlases:
avg_results_file = results_dir+'dice_jacc_overlaps.txt'
f_avg = open(avg_results_file, 'w');
for file in source_files:
source = brain_dir+file+ext
for file2 in target_files:
if file2 != file:
target = brain_dir+file2+ext
if os.path.exists(brain_dir+file+ext) and \
os.path.exists(brain_dir+file2+ext) and \
os.path.exists(xfm_dir+file+'_to_templateWarp.nii.gz'):
output_stem = file + '_to_' + file2
# Transform brains
args = " ".join([ANTSPATH+'WarpImageMultiTransform', str(dim), \
source, xfm_brain_dir+output_stem+ext, '-R',target, \
'-i', xfm_dir+file2+'_to_templateAffine.txt', \
xfm_dir+file2+'_to_templateInverseWarp.nii.gz', \
xfm_dir+file+'_to_templateWarp.nii.gz', \
xfm_dir+file+'_to_templateAffine.txt'])
#if verbose: print(args); print(''); p = call(args, shell="True")
if evaluate_with_atlases:
# Transform atlases
source_labels = atlas_dir+file+ext
target_labels = atlas_dir+file2+ext
args = " ".join([ANTSPATH+'WarpImageMultiTransform', str(dim), \
source_labels, xfm_atlas_dir+output_stem+ext, '-R', target_labels, \
'-i', xfm_dir+file2+'_to_templateAffine.txt', \
xfm_dir+file2+'_to_templateInverseWarp.nii.gz', \
xfm_dir+file+'_to_templateWarp.nii.gz', \
xfm_dir+file+'_to_templateAffine.txt','--use-NN'])
#if verbose: print(args); print(''); p = call(args, shell="True")
# Fill target atlas mask with transformed source atlas labels
args = " ".join(['ImageMath', str(dim), xfm_atlas_dir+output_stem+'_filled'+ext, \
'PropagateLabelsThroughMask', brainmask_dir+file2+ext, \
xfm_atlas_dir+output_stem+ext])
#if verbose: print(args); print(''); p = call(args, shell="True")
# Measure overlap of target atlas and transformed source atlas labels
results_file = results_dir+output_stem+'.txt'
f_eval = open(results_file, 'w');
average_dice = 0
average_jacc = 0
print(results_file)
for label in labels:
args = " ".join(['c3d', xfm_atlas_dir+output_stem+'_filled'+ext, \
atlas_dir+file2+ext, '-overlap', str(label), \
'>'+results_dir+'temp_overlap.txt'])
p = call(args, shell="True")
f = open(results_dir+'temp_overlap.txt','r')
temp = f.read()
if temp != '':
dice = float(temp.split()[-2].split(',')[0])
jacc = float(temp.split()[-1].split(',')[0])
else:
dice = 0.0
jacc = 0.0
if isnan(dice):
dice = 0.0
if isnan(jacc):
jacc = 0.0
print_out = ' '.join(['Label:', str(label), 'Dice:', str(dice), \
'Jaccard:', str(jacc)])
print(print_out)
f_eval.close()
f_eval = open(results_file, 'a')
f_eval.write(print_out + '\n')
average_dice += dice
average_jacc += jacc
average_dice = average_dice/len(labels)
average_jacc = average_jacc/len(labels)
print_out1 = 'Average Dice: ' + str(average_dice)
print_out2 = 'Average Jacc: ' + str(average_jacc)
print(print_out1);
print(print_out2)
f_eval.close()
f_eval = open(results_file, 'a')
f_eval.write(print_out1 + '\n' + print_out2 + '\n')
f_eval.close()
f_avg.close()
f_avg = open(avg_results_file, 'a');
f_avg.write(output_stem + ' ' + str(average_dice) + ' ' + str(average_jacc) + '\n')
else:
if not os.path.exists(brain_dir+file+ext):
raise NameError('Check input file ' + brain_dir+file+ext)
elif not os.path.exists(brain_dir+file2+ext):
raise NameError('Check input file ' + brain_dir+file2+ext)
elif not os.path.exists(xfm_dir+file+'Warp.nii.gz'):
raise NameError('Check input file ' + xfm_dir+file+'Warp.nii.gz')
if evaluate_with_atlases:
f_avg.close()
#----------------------------------------------
# Apply landmark-driven registration transforms
# to register brains to each other via template
#----------------------------------------------
if transform_landmarks_via_template:
if evaluate_with_atlases:
avg_results_file = results_dir+'dice_jacc_overlaps_'+landmark_type+'.txt'
f_avg = open(avg_results_file, 'w');
for file in source_files:
source = brain_dir+file+ext
source_landmarks = landmarks_dir+file+ext
for file2 in target_files:
if file2 != file:
target = brain_dir+file2+ext
target_landmarks = landmarks_dir+file2+ext
if os.path.exists(source) and \
os.path.exists(target) and \
os.path.exists(source_landmarks) and \
os.path.exists(target_landmarks):
pair = file+'_to_'+file2
inv_pair = file2+'_to_'+file
output_stem = pair+'_'+landmark_type
xfm_stem = xfm_dir+pair+'_in_template_space_'+landmark_type
inv_xfm_stem = xfm_dir+inv_pair+'_in_template_space_'+landmark_type
# Transform brains
if not os.path.exists(xfm_brain_dir+output_stem+ext):
args = " ".join([ANTSPATH+'WarpImageMultiTransform', str(dim), \
source, xfm_brain_dir+output_stem+ext, '-R', target, \
'-i', inv_xfm_stem+'Affine.txt', \
inv_xfm_stem+'InverseWarp.nii.gz', \
xfm_stem+'Warp.nii.gz', \
xfm_stem+'Affine.txt'])
if verbose: print(args); print(''); p = call(args, shell="True")
# Transform landmarks
if not os.path.exists(xfm_landmarks_dir+output_stem+ext):
args = " ".join([ANTSPATH+'WarpImageMultiTransform', str(dim), \
source_landmarks, xfm_landmarks_dir+output_stem+ext, '-R',target_landmarks, \
'-i', inv_xfm_stem+'Affine.txt', \
inv_xfm_stem+'InverseWarp.nii.gz', \
xfm_stem+'Warp.nii.gz', \
xfm_stem+'Affine.txt','--use-NN'])
if verbose: print(args); print(''); p = call(args, shell="True")
if evaluate_with_atlases:
if not os.path.exists(xfm_atlas_dir+output_stem+ext):
if not os.path.exists(results_dir+output_stem+'.txt'):
# Transform atlases
source_labels = atlas_dir+file+ext
target_labels = atlas_dir+file2+ext
args = " ".join([ANTSPATH+'WarpImageMultiTransform', str(dim), \
source_labels, xfm_atlas_dir+output_stem+ext, '-R', target_labels, \
'-i', inv_xfm_stem+'Affine.txt', \
inv_xfm_stem+'InverseWarp.nii.gz', \
xfm_stem+'Warp.nii.gz', \
xfm_stem+'Affine.txt','--use-NN'])
if verbose: print(args); print(''); p = call(args, shell="True")
# Fill target atlas mask with transformed source atlas labels
args = " ".join(['ImageMath', str(dim), xfm_atlas_dir+output_stem+'_filled'+ext, \
'PropagateLabelsThroughMask', brainmask_dir+file2+ext, \
xfm_atlas_dir+output_stem+ext])
if verbose: print(args); print(''); p = call(args, shell="True")
# Measure overlap of target atlas and transformed source atlas labels
results_file = results_dir+output_stem+'.txt'
f_eval = open(results_file, 'w');
average_dice = 0
average_jacc = 0
for label in labels:
args = " ".join(['c3d', xfm_atlas_dir+output_stem+'_filled'+ext, \
atlas_dir+file2+ext, '-overlap', str(label), \
'>'+results_dir+'temp_overlap.txt'])
p = call(args, shell="True")
f = open(results_dir+'temp_overlap.txt','r')
temp = f.read()
dice = 0
jacc = 0
if temp != '':
dice = float(temp.split()[-2].split(',')[0])
jacc = float(temp.split()[-1].split(',')[0])
print_out = " ".join(['Label:', str(label), 'Dice:', str(dice), \
'Jaccard:', str(jacc)])
print(print_out)
f_eval.close()
f_eval = open(results_file, 'a')
f_eval.write(print_out + '\n')
if isnan(dice):
dice = 0
if isnan(jacc):
jacc = 0
average_dice += dice
average_jacc += jacc
average_dice = average_dice/len(labels)
average_jacc = average_jacc/len(labels)
print_out1 = 'Average Dice: ' + str(average_dice)
print_out2 = 'Average Jacc: ' + str(average_jacc)
print(print_out1);
print(print_out2)
f_eval.close()
f_eval = open(results_file, 'a')
f_eval.write('\n' + print_out1 + '\n' + print_out2 + '\n\n')
f_eval.close()
f_avg.close()
f_avg = open(avg_results_file, 'a');
f_avg.write(output_stem + ' ' + str(average_dice) + ' ' + str(average_jacc) + '\n')
else:
if not os.path.exists(source_landmarks):
raise NameError('Check input file ' + source_landmarks)
elif not os.path.exists(target_landmarks):
raise NameError('Check input file ' + target_landmarks)
if evaluate_with_atlases:
f_avg.close()
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
37811,
198,
38804,
14290,
11,
41532,
11,
290,
14722,
284,
257,
11055,
13,
198,
198,
7,
66,
8,
2813,
11,
2488,
81,
3919,
479,
33663,
198,
37811,
198,
198,
11748,
28686,
198,
6738,
28686,
13,... | 1.819986 | 11,638 |
#inputString = input()
inputString = "3,-4, 2, -1,-3, 2, 1"
inputList = [int(val) for val in inputString.split(",")]
print(solve(inputList)) | [
201,
198,
201,
198,
2,
15414,
10100,
796,
5128,
3419,
201,
198,
15414,
10100,
796,
366,
18,
12095,
19,
11,
362,
11,
532,
16,
12095,
18,
11,
362,
11,
352,
1,
201,
198,
15414,
8053,
796,
685,
600,
7,
2100,
8,
329,
1188,
287,
5128,... | 2.409836 | 61 |
# ============================================================================
# FILE: session.py
# AUTHOR: Shougo Matsushita <Shougo.Matsu at gmail.com>
# License: MIT license
# ============================================================================
import typing
| [
2,
38093,
2559,
18604,
198,
2,
45811,
25,
6246,
13,
9078,
198,
2,
44746,
25,
911,
280,
2188,
30107,
1530,
5350,
1279,
2484,
280,
2188,
13,
44,
19231,
379,
308,
4529,
13,
785,
29,
198,
2,
13789,
25,
17168,
5964,
198,
2,
38093,
2559... | 5.44 | 50 |
# NOTE: this code is currently copypasta'd from pytorch official examples repo at
# https://github.com/pytorch/examples/blob/master/vae/main.py
# In the future, this could probably be added as a submodule
import pdb
import torch
import torch.nn as nn
import torch.nn.functional as F
# Reconstruction + KL divergence losses summed over all elements and batch
| [
2,
24550,
25,
428,
2438,
318,
3058,
2243,
4464,
40197,
1549,
422,
12972,
13165,
354,
1743,
6096,
29924,
379,
198,
2,
3740,
1378,
12567,
13,
785,
14,
9078,
13165,
354,
14,
1069,
12629,
14,
2436,
672,
14,
9866,
14,
33353,
14,
12417,
1... | 3.646465 | 99 |
import unittest
from ncclient.devices.iosxr import *
| [
11748,
555,
715,
395,
198,
6738,
299,
535,
75,
1153,
13,
42034,
13,
4267,
87,
81,
1330,
1635,
628
] | 2.842105 | 19 |
# -*- coding: utf-8 -*-
from django.apps import AppConfig
class OnfidoAppConfig(AppConfig):
"""AppConfig for Django-Onfido."""
name = 'onfido'
verbose_name = "Onfido"
configs = []
def ready(self):
"""Validate config and connect signals."""
super(OnfidoAppConfig, self).ready()
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
6738,
42625,
14208,
13,
18211,
1330,
2034,
16934,
628,
198,
4871,
1550,
69,
17305,
4677,
16934,
7,
4677,
16934,
2599,
628,
220,
220,
220,
37227,
4677,
16934,
329,
37770,
... | 2.465116 | 129 |
#!/usr/bin/env python
# vim: set fileencoding=utf-8 :
# Tiago de Freitas Pereira <tiago.pereira@idiap.ch>
"""
PolaThermal database: database implementation
"""
from bob.bio.base.database import CSVDataset
from bob.bio.base.database import CSVToSampleLoaderBiometrics
from bob.bio.face.database.sample_loaders import EyesAnnotations
from bob.extension import rc
from bob.extension.download import get_file
import bob.io.base
from sklearn.pipeline import make_pipeline
class PolaThermalDatabase(CSVDataset):
"""
Collected by USA Army, the Polarimetric Thermal Database contains basically VIS and Thermal face images.
Follow bellow the description of the imager used to capture this device.
The **polarimetric** LWIR imager used to collect this database was developed by Polaris Sensor Technologies.
The imager is based on the division-of-time spinning achromatic retarder (SAR) design that uses a spinning phase-retarder mounted in series with a linear wire-grid polarizer.
This system, also referred to as a polarimeter, has a spectral response range of 7.5-11.1, using a Stirling-cooled mercury telluride focal plane array with pixel array dimensions of 640×480.
A Fourier modulation technique is applied to the pixel readout, followed by a series expansion and inversion to compute the Stokes images.
Data were recorded at 60 frames per second (fps) for this database, using a wide FOV of 10.6°×7.9°. Prior to collecting data for each subject, a two-point non-uniformity correction (NUC) was performed using a Mikron blackbody at 20°C and 40°C, which covers the range of typical facial temperatures (30°C-35°C).
Data was recorded on a laptop using custom vendor software.
An array of four Basler Scout series cameras was used to collect the corresponding **visible spectrum imagery**.
Two of the cameras are monochrome (model # scA640-70gm), with pixel array dimensions of 659×494.
The other two cameras are color (model # scA640-70gc), with pixel array dimensions of 658×494.
The dataset contains 60 subjects in total.
For **VIS** images (considered only the 87 pixels interpupil distance) there are 4 samples per subject with neutral expression (called baseline condition **B**) and 12 samples per subject varying the facial expression (called expression **E**).
Such variability was introduced by asking the subject to count orally.
In total there are 960 images for this modality.
For the **thermal** images there are 4 types of thermal imagery based on the Stokes parameters (:math:`S_0`, :math:`S_1`, :math:`S_2` and :math:`S_3`) commonly used to represent the polarization state.
The thermal imagery is the following:
- :math:`S_0`: The conventional thermal image
- :math:`S_1`
- :math:`S_2`
- DoLP: The degree-of-linear-polarization (DoLP) describes the portion of an electromagnetic wave that is linearly polarized, as defined :math:`\\frac{sqrt(S_{1}^{2} + S_{2}^{2})}{S_0}`.
Since :math:`S_3` is very small and usually taken to be zero, the authors of the database decided not to provide this part of the data.
The same facial expression variability introduced in **VIS** is introduced for **Thermal** images.
The distance between the subject and the camera is the last source of variability introduced in the thermal images.
There are 3 ranges: R1 (2.5m), R2 (5m) and R3 (7.5m).
In total there are 11,520 images for this modality and for each subject they are split as the following:
+----------------+----------+----------+----------+
| Imagery/Range | R1 (B/E) | R2 (B/E) | R3 (B/E) |
+================+==========+==========+==========+
| :math:`S_0` | 16 (8/8) | 16 (8/8) | 16 (8/8) |
+----------------+----------+----------+----------+
| :math:`S_1` | 16 (8/8) | 16 (8/8) | 16 (8/8) |
+----------------+----------+----------+----------+
| :math:`S_2` | 16 (8/8) | 16 (8/8) | 16 (8/8) |
+----------------+----------+----------+----------+
| DoLP | 16 (8/8) | 16 (8/8) | 16 (8/8) |
+----------------+----------+----------+----------+
.. warning::
Use the command below to set the path of the real data::
$ bob config set bob.db.pola-thermal.directory [PATH-TO-MEDS-DATA]
Parameters
----------
protocol: str
One of the database protocols.
"""
@staticmethod
@staticmethod
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
43907,
25,
900,
2393,
12685,
7656,
28,
40477,
12,
23,
1058,
198,
2,
16953,
3839,
390,
4848,
21416,
17229,
8704,
1279,
20259,
3839,
13,
431,
260,
8704,
31,
19830,
499,
13,
354,
29,... | 3.192806 | 1,390 |
import unittest
import numpy as np
import numpy.testing as npt
from scoring.component_parameters import ComponentParameters
from scoring.function import CustomSum
from utils.enums.component_specific_parameters_enum import ComponentSpecificParametersEnum
from utils.enums.scoring_function_component_enum import ScoringFunctionComponentNameEnum
from utils.enums.transformation_type_enum import TransformationTypeEnum
| [
11748,
555,
715,
395,
198,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
299,
32152,
13,
33407,
355,
299,
457,
198,
198,
6738,
9689,
13,
42895,
62,
17143,
7307,
1330,
35100,
48944,
198,
6738,
9689,
13,
8818,
1330,
8562,
13065,
198,
... | 3.980952 | 105 |
# -*- coding: utf-8 -*-
"""
Created on Sat Mar 5 02:12:12 2022
@author: Kraken
Project: MHP Hackathon
"""
import os
import pandas as pd
import matplotlib.pyplot as plt
plt.rcParams.update({'font.size': 16})
WORKING_DIR = "model_14"
WORKING_DIR2 = "model_12"
# "model_8": dqn with fixed weights
# "model_4": dqn
MVG_AVG_WINDOW = 5
# =============================================================================
# Queue Plots - Combined
# =============================================================================
QUEUE = "plot_queue_data.txt"
# rl agent
with open(os.path.join(WORKING_DIR, QUEUE), "r") as txtfile:
data = txtfile.readlines()
data = [float(x.rstrip("\n")) for x in data][:250]
# ctl
with open(os.path.join(WORKING_DIR2, QUEUE), "r") as txtfile:
data2 = txtfile.readlines()
data2 = [float(x.rstrip("\n")) for x in data2]
fig = plt.figure(figsize=(12, 8))
plt.plot(data2, "blue", label="Conventional Traffic Lights")
plt.plot(data, "orange", label="RL Agent")
plt.xlabel("# Episodes")
plt.ylabel("Average queue length (vehicles)")
plt.title("Conventional Traffic Lights & RL Optimized Smart Traffic Lights")
plt.grid()
plt.legend(loc="upper right")
plt.savefig(QUEUE.replace("_data.txt", "_combined.png"))
# =============================================================================
# Delay Plots - Combined
# =============================================================================
QUEUE = "plot_delay_data.txt"
# rl agent
with open(os.path.join(WORKING_DIR, QUEUE), "r") as txtfile:
data = txtfile.readlines()
data = [float(x.rstrip("\n")) for x in data][:250]
# ctl
with open(os.path.join(WORKING_DIR2, QUEUE), "r") as txtfile:
data2 = txtfile.readlines()
data2 = [float(x.rstrip("\n")) for x in data2]
fig = plt.figure(figsize=(12, 8))
plt.plot(data, "orange", label="RL Agent")
plt.plot(data2, "blue", label="Conventional Traffic Lights")
plt.xlabel("# Episodes")
plt.ylabel("Cumulative Delay (s)")
plt.title("Conventional Traffic Lights & RL Optimized Smart Traffic Lights")
plt.grid()
plt.legend(loc="upper right")
plt.savefig(QUEUE.replace("_data.txt", "_combined.png"))
# =============================================================================
# Reward Plots - Combined
# =============================================================================
QUEUE = "plot_reward_data.txt"
# rl agent
with open(os.path.join(WORKING_DIR, QUEUE), "r") as txtfile:
data = txtfile.readlines()
data = [float(x.rstrip("\n")) for x in data][:250]
# ctl
with open(os.path.join(WORKING_DIR2, QUEUE), "r") as txtfile:
data2 = txtfile.readlines()
data2 = [float(x.rstrip("\n")) for x in data2]
fig = plt.figure(figsize=(12, 8))
plt.plot(data, "orange", label="RL Agent")
plt.plot(data2, "blue", label="Conventional Traffic Lights")
plt.xlabel("# Episodes")
plt.ylabel("Cumulative Negative Reward")
plt.title("Conventional Traffic Lights & RL Optimized Smart Traffic Lights")
plt.grid()
plt.legend(loc="best")
plt.savefig(QUEUE.replace("_data.txt", "_combined.png"))
WORKING_DIR = "model_14"
MVG_AVG_WINDOW = 5
# =============================================================================
# Queue Plots
# =============================================================================
QUEUE = "plot_queue_data.txt"
with open(os.path.join(WORKING_DIR, QUEUE), "r") as txtfile:
data = txtfile.readlines()
data = [float(x.rstrip("\n")) for x in data]
data_series = pd.Series(data).rolling(MVG_AVG_WINDOW).mean().tolist()
first_value = data_series[MVG_AVG_WINDOW - 1]
last_value = data_series[-1]
perc_decrease = (first_value - last_value) / first_value * 100
fig = plt.figure(figsize=(12, 8))
plt.plot(data)
plt.plot(data_series, "r")
plt.xlabel("# Episodes")
plt.ylabel("Average queue length (vehicles)")
plt.title(f"Decrease: {first_value:.2f} -> {last_value:.2f} = {perc_decrease:.2f}%")
plt.savefig(os.path.join(WORKING_DIR, QUEUE.replace("_data.txt", "_new.png")))
# =============================================================================
# Delay Plots
# =============================================================================
DELAY = "plot_delay_data.txt"
with open(os.path.join(WORKING_DIR, DELAY), "r") as txtfile:
data = txtfile.readlines()
data = [float(x.rstrip("\n")) for x in data]
data_series = pd.Series(data).rolling(MVG_AVG_WINDOW).mean().tolist()
first_value = data_series[MVG_AVG_WINDOW - 1]
last_value = data_series[-1]
perc_decrease = (first_value - last_value) / first_value * 100
fig = plt.figure(figsize=(12, 8))
plt.plot(data)
plt.plot(data_series, "r")
plt.xlabel("# Episodes")
plt.ylabel("Cumulative Delay (s) / 1000 vehicles")
plt.title(f"Decrease: {first_value:.2f} -> {last_value:.2f} = {perc_decrease:.2f}%")
plt.savefig(os.path.join(WORKING_DIR, DELAY.replace("_data.txt", "_new.png")))
# =============================================================================
# Reward Plots
# =============================================================================
REWARD = "plot_reward_data.txt"
with open(os.path.join(WORKING_DIR, REWARD), "r") as txtfile:
data = txtfile.readlines()
data = [float(x.rstrip("\n")) for x in data]
data_series = pd.Series(data).rolling(MVG_AVG_WINDOW).mean().tolist()
first_value = data_series[MVG_AVG_WINDOW - 1]
last_value = data_series[-1]
perc_decrease = (first_value - last_value) / first_value * 100
fig = plt.figure(figsize=(12, 8))
plt.plot(data)
plt.plot(data_series, "r")
plt.xlabel("# Episodes")
plt.ylabel("Cumulative negative reward")
plt.title("Reward Maximization by RL Agent")
plt.savefig(os.path.join(WORKING_DIR, REWARD.replace("_data.txt", "_new.png")))
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
41972,
319,
7031,
1526,
220,
642,
7816,
25,
1065,
25,
1065,
33160,
198,
31,
9800,
25,
43392,
198,
198,
16775,
25,
337,
14082,
18281,
12938,
198,
37811,
198,
... | 2.820628 | 2,007 |
# coding=utf-8
# Copyright 2021 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Feature extraction saving/loading class for common feature extractors.
"""
import copy
import json
import os
from collections import UserDict
from typing import TYPE_CHECKING, Any, Dict, Optional, Tuple, Union
import numpy as np
from transformers.file_utils import (
cached_path,
hf_bucket_url,
is_flax_available,
is_remote_url,
is_tf_available,
is_torch_available,
torch_required,
)
from transformers.utils import logging
from .file_utils import *
FEATURE_EXTRACTOR_NAME = "preprocessor_config.json"
ENV_VARS_TRUE_VALUES = {"1", "ON", "YES", "TRUE"}
_is_offline_mode = True if os.environ.get("TRANSFORMERS_OFFLINE", "0").upper() in ENV_VARS_TRUE_VALUES else False
from enum import Enum
class ExplicitEnum(Enum):
"""
Enum with more explicit error message for missing values.
"""
@classmethod
class TensorType(ExplicitEnum):
"""
Possible values for the `return_tensors` argument in [`PreTrainedTokenizerBase.__call__`]. Useful for
tab-completion in an IDE.
"""
PYTORCH = "pt"
TENSORFLOW = "tf"
NUMPY = "np"
JAX = "jax"
if TYPE_CHECKING:
if is_torch_available():
import torch
logger = logging.get_logger(__name__)
PreTrainedFeatureExtractor = Union["SequenceFeatureExtractor"] # noqa: F821
class BatchFeature(UserDict):
r"""
Holds the output of the :meth:`~transformers.SequenceFeatureExtractor.pad` and feature extractor specific
``__call__`` methods.
This class is derived from a python dictionary and can be used as a dictionary.
Args:
data (:obj:`dict`):
Dictionary of lists/arrays/tensors returned by the __call__/pad methods ('input_values', 'attention_mask',
etc.).
tensor_type (:obj:`Union[None, str, TensorType]`, `optional`):
You can give a tensor_type here to convert the lists of integers in PyTorch/TensorFlow/Numpy Tensors at
initialization.
"""
def __getitem__(self, item: str) -> Union[Any]:
"""
If the key is a string, returns the value of the dict associated to :obj:`key` ('input_values',
'attention_mask', etc.).
"""
if isinstance(item, str):
return self.data[item]
else:
raise KeyError("Indexing with integers is not available when using Python based feature extractors")
# Copied from transformers.tokenization_utils_base.BatchEncoding.keys
# Copied from transformers.tokenization_utils_base.BatchEncoding.values
# Copied from transformers.tokenization_utils_base.BatchEncoding.items
def convert_to_tensors(self, tensor_type: Optional[Union[str, TensorType]] = None):
"""
Convert the inner content to tensors.
Args:
tensor_type (:obj:`str` or :class:`~transformers.file_utils.TensorType`, `optional`):
The type of tensors to use. If :obj:`str`, should be one of the values of the enum
:class:`~transformers.file_utils.TensorType`. If :obj:`None`, no modification is done.
"""
if tensor_type is None:
return self
# Convert to TensorType
if not isinstance(tensor_type, TensorType):
tensor_type = TensorType(tensor_type)
# Get a function reference for the correct framework
if tensor_type == TensorType.TENSORFLOW:
if not is_tf_available():
raise ImportError(
"Unable to convert output to TensorFlow tensors format, TensorFlow is not installed."
)
import tensorflow as tf
as_tensor = tf.constant
is_tensor = tf.is_tensor
elif tensor_type == TensorType.PYTORCH:
if not is_torch_available():
raise ImportError("Unable to convert output to PyTorch tensors format, PyTorch is not installed.")
import torch
as_tensor = torch.tensor
is_tensor = torch.is_tensor
elif tensor_type == TensorType.JAX:
if not is_flax_available():
raise ImportError("Unable to convert output to JAX tensors format, JAX is not installed.")
import jax.numpy as jnp # noqa: F811
as_tensor = jnp.array
is_tensor = _is_jax
else:
as_tensor = np.asarray
is_tensor = _is_numpy
# Do the tensor conversion in batch
for key, value in self.items():
try:
if not is_tensor(value):
tensor = as_tensor(value)
self[key] = tensor
except: # noqa E722
if key == "overflowing_values":
raise ValueError("Unable to create tensor returning overflowing values of different lengths. ")
raise ValueError(
"Unable to create tensor, you should probably activate padding "
"with 'padding=True' to have batched tensors with the same length."
)
return self
@torch_required
# Copied from transformers.tokenization_utils_base.BatchEncoding.to with BatchEncoding->BatchFeature
def to(self, device: Union[str, "torch.device"]) -> "BatchFeature":
"""
Send all values to device by calling :obj:`v.to(device)` (PyTorch only).
Args:
device (:obj:`str` or :obj:`torch.device`): The device to put the tensors on.
Returns:
:class:`~transformers.BatchFeature`: The same instance after modification.
"""
# This check catches things like APEX blindly calling "to" on all inputs to a module
# Otherwise it passes the casts down and casts the LongTensor containing the token idxs
# into a HalfTensor
if isinstance(device, str) or _is_torch_device(device) or isinstance(device, int):
self.data = {k: v.to(device=device) for k, v in self.data.items()}
else:
logger.warning(f"Attempting to cast a BatchFeature to type {str(device)}. This is not supported.")
return self
class FeatureExtractionMixin:
"""
This is a feature extraction mixin used to provide saving/loading functionality for sequential and image feature
extractors.
"""
def __init__(self, **kwargs):
"""Set elements of `kwargs` as attributes."""
# Additional attributes without default values
for key, value in kwargs.items():
try:
setattr(self, key, value)
except AttributeError as err:
logger.error(f"Can't set {key} with value {value} for {self}")
raise err
@classmethod
def from_pretrained(
cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs
) -> PreTrainedFeatureExtractor:
r"""
Instantiate a type of :class:`~transformers.feature_extraction_utils.FeatureExtractionMixin` from a feature
extractor, *e.g.* a derived class of :class:`~transformers.SequenceFeatureExtractor`.
Args:
pretrained_model_name_or_path (:obj:`str` or :obj:`os.PathLike`):
This can be either:
- a string, the `model id` of a pretrained feature_extractor hosted inside a model repo on
huggingface.co. Valid model ids can be located at the root-level, like ``bert-base-uncased``, or
namespaced under a user or organization name, like ``dbmdz/bert-base-german-cased``.
- a path to a `directory` containing a feature extractor file saved using the
:func:`~transformers.feature_extraction_utils.FeatureExtractionMixin.save_pretrained` method, e.g.,
``./my_model_directory/``.
- a path or url to a saved feature extractor JSON `file`, e.g.,
``./my_model_directory/preprocessor_config.json``.
cache_dir (:obj:`str` or :obj:`os.PathLike`, `optional`):
Path to a directory in which a downloaded pretrained model feature extractor should be cached if the
standard cache should not be used.
force_download (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to force to (re-)download the feature extractor files and override the cached versions
if they exist.
resume_download (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to delete incompletely received file. Attempts to resume the download if such a file
exists.
proxies (:obj:`Dict[str, str]`, `optional`):
A dictionary of proxy servers to use by protocol or endpoint, e.g., :obj:`{'http': 'foo.bar:3128',
'http://hostname': 'foo.bar:4012'}.` The proxies are used on each request.
use_auth_token (:obj:`str` or `bool`, `optional`):
The token to use as HTTP bearer authorization for remote files. If :obj:`True`, will use the token
generated when running :obj:`transformers-cli login` (stored in :obj:`~/.huggingface`).
revision(:obj:`str`, `optional`, defaults to :obj:`"main"`):
The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a
git-based system for storing models and other artifacts on huggingface.co, so ``revision`` can be any
identifier allowed by git.
return_unused_kwargs (:obj:`bool`, `optional`, defaults to :obj:`False`):
If :obj:`False`, then this function returns just the final feature extractor object. If :obj:`True`,
then this functions returns a :obj:`Tuple(feature_extractor, unused_kwargs)` where `unused_kwargs` is a
dictionary consisting of the key/value pairs whose keys are not feature extractor attributes: i.e., the
part of ``kwargs`` which has not been used to update ``feature_extractor`` and is otherwise ignored.
kwargs (:obj:`Dict[str, Any]`, `optional`):
The values in kwargs of any keys which are feature extractor attributes will be used to override the
loaded values. Behavior concerning key/value pairs whose keys are *not* feature extractor attributes is
controlled by the ``return_unused_kwargs`` keyword parameter.
.. note::
Passing :obj:`use_auth_token=True` is required when you want to use a private model.
Returns:
A feature extractor of type :class:`~transformers.feature_extraction_utils.FeatureExtractionMixin`.
Examples::
# We can't instantiate directly the base class `FeatureExtractionMixin` nor `SequenceFeatureExtractor` so let's show the examples on a
# derived class: `Wav2Vec2FeatureExtractor`
feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained('facebook/wav2vec2-base-960h') # Download feature_extraction_config from huggingface.co and cache.
feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained('./test/saved_model/') # E.g. feature_extractor (or model) was saved using `save_pretrained('./test/saved_model/')`
feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained('./test/saved_model/preprocessor_config.json')
feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained('facebook/wav2vec2-base-960h', return_attention_mask=False, foo=False)
assert feature_extractor.return_attention_mask is False
feature_extractor, unused_kwargs = Wav2Vec2FeatureExtractor.from_pretrained('facebook/wav2vec2-base-960h', return_attention_mask=False,
foo=False, return_unused_kwargs=True)
assert feature_extractor.return_attention_mask is False
assert unused_kwargs == {'foo': False}
"""
feature_extractor_dict, kwargs = cls.get_feature_extractor_dict(pretrained_model_name_or_path, **kwargs)
return cls.from_dict(feature_extractor_dict, **kwargs)
def save_pretrained(self, save_directory: Union[str, os.PathLike]):
"""
Save a feature_extractor object to the directory ``save_directory``, so that it can be re-loaded using the
:func:`~transformers.feature_extraction_utils.FeatureExtractionMixin.from_pretrained` class method.
Args:
save_directory (:obj:`str` or :obj:`os.PathLike`):
Directory where the feature extractor JSON file will be saved (will be created if it does not exist).
"""
if os.path.isfile(save_directory):
raise AssertionError(f"Provided path ({save_directory}) should be a directory, not a file")
os.makedirs(save_directory, exist_ok=True)
# If we save using the predefined names, we can load using `from_pretrained`
output_feature_extractor_file = os.path.join(save_directory, FEATURE_EXTRACTOR_NAME)
self.to_json_file(output_feature_extractor_file)
logger.info(f"Configuration saved in {output_feature_extractor_file}")
@classmethod
def get_feature_extractor_dict(
cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs
) -> Tuple[Dict[str, Any], Dict[str, Any]]:
"""
From a ``pretrained_model_name_or_path``, resolve to a dictionary of parameters, to be used for instantiating a
feature extractor of type :class:`~transformers.feature_extraction_utils.FeatureExtractionMixin` using
``from_dict``.
Parameters:
pretrained_model_name_or_path (:obj:`str` or :obj:`os.PathLike`):
The identifier of the pre-trained checkpoint from which we want the dictionary of parameters.
Returns:
:obj:`Tuple[Dict, Dict]`: The dictionary(ies) that will be used to instantiate the feature extractor
object.
"""
cache_dir = kwargs.pop("cache_dir", None)
force_download = kwargs.pop("force_download", False)
resume_download = kwargs.pop("resume_download", False)
proxies = kwargs.pop("proxies", None)
use_auth_token = kwargs.pop("use_auth_token", None)
local_files_only = kwargs.pop("local_files_only", False)
revision = kwargs.pop("revision", None)
from_pipeline = kwargs.pop("_from_pipeline", None)
from_auto_class = kwargs.pop("_from_auto", False)
user_agent = {"file_type": "feature extractor", "from_auto_class": from_auto_class}
if from_pipeline is not None:
user_agent["using_pipeline"] = from_pipeline
if is_offline_mode() and not local_files_only:
logger.info("Offline mode: forcing local_files_only=True")
local_files_only = True
pretrained_model_name_or_path = str(pretrained_model_name_or_path)
if os.path.isdir(pretrained_model_name_or_path):
feature_extractor_file = os.path.join(pretrained_model_name_or_path, FEATURE_EXTRACTOR_NAME)
elif os.path.isfile(pretrained_model_name_or_path) or is_remote_url(pretrained_model_name_or_path):
feature_extractor_file = pretrained_model_name_or_path
else:
feature_extractor_file = hf_bucket_url(
pretrained_model_name_or_path, filename=FEATURE_EXTRACTOR_NAME, revision=revision, mirror=None
)
try:
# Load from URL or cache if already cached
resolved_feature_extractor_file = cached_path(
feature_extractor_file,
cache_dir=cache_dir,
force_download=force_download,
proxies=proxies,
resume_download=resume_download,
local_files_only=local_files_only,
use_auth_token=use_auth_token,
user_agent=user_agent,
)
# Load feature_extractor dict
with open(resolved_feature_extractor_file, "r", encoding="utf-8") as reader:
text = reader.read()
feature_extractor_dict = json.loads(text)
except EnvironmentError as err:
logger.error(err)
msg = (
f"Can't load feature extractor for '{pretrained_model_name_or_path}'. Make sure that:\n\n"
f"- '{pretrained_model_name_or_path}' is a correct model identifier listed on 'https://huggingface.co/models'\n\n"
f"- or '{pretrained_model_name_or_path}' is the correct path to a directory containing a {FEATURE_EXTRACTOR_NAME} file\n\n"
)
raise EnvironmentError(msg)
except json.JSONDecodeError:
msg = (
f"Couldn't reach server at '{feature_extractor_file}' to download feature extractor configuration file or "
"feature extractor configuration file is not a valid JSON file. "
f"Please check network or file content here: {resolved_feature_extractor_file}."
)
raise EnvironmentError(msg)
if resolved_feature_extractor_file == feature_extractor_file:
logger.info(f"loading feature extractor configuration file {feature_extractor_file}")
else:
logger.info(
f"loading feature extractor configuration file {feature_extractor_file} from cache at {resolved_feature_extractor_file}"
)
return feature_extractor_dict, kwargs
@classmethod
def from_dict(cls, feature_extractor_dict: Dict[str, Any], **kwargs) -> PreTrainedFeatureExtractor:
"""
Instantiates a type of :class:`~transformers.feature_extraction_utils.FeatureExtractionMixin` from a Python
dictionary of parameters.
Args:
feature_extractor_dict (:obj:`Dict[str, Any]`):
Dictionary that will be used to instantiate the feature extractor object. Such a dictionary can be
retrieved from a pretrained checkpoint by leveraging the
:func:`~transformers.feature_extraction_utils.FeatureExtractionMixin.to_dict` method.
kwargs (:obj:`Dict[str, Any]`):
Additional parameters from which to initialize the feature extractor object.
Returns:
:class:`~transformers.feature_extraction_utils.FeatureExtractionMixin`: The feature extractor object
instantiated from those parameters.
"""
return_unused_kwargs = kwargs.pop("return_unused_kwargs", False)
feature_extractor = cls(**feature_extractor_dict)
# Update feature_extractor with kwargs if needed
to_remove = []
for key, value in kwargs.items():
if hasattr(feature_extractor, key):
setattr(feature_extractor, key, value)
to_remove.append(key)
for key in to_remove:
kwargs.pop(key, None)
logger.info(f"Feature extractor {feature_extractor}")
if return_unused_kwargs:
return feature_extractor, kwargs
else:
return feature_extractor
def to_dict(self) -> Dict[str, Any]:
"""
Serializes this instance to a Python dictionary.
Returns:
:obj:`Dict[str, Any]`: Dictionary of all the attributes that make up this feature extractor instance.
"""
output = copy.deepcopy(self.__dict__)
output["feature_extractor_type"] = self.__class__.__name__
return output
@classmethod
def from_json_file(cls, json_file: Union[str, os.PathLike]) -> PreTrainedFeatureExtractor:
"""
Instantiates a feature extractor of type :class:`~transformers.feature_extraction_utils.FeatureExtractionMixin`
from the path to a JSON file of parameters.
Args:
json_file (:obj:`str` or :obj:`os.PathLike`):
Path to the JSON file containing the parameters.
Returns:
A feature extractor of type :class:`~transformers.feature_extraction_utils.FeatureExtractionMixin`: The
feature_extractor object instantiated from that JSON file.
"""
with open(json_file, "r", encoding="utf-8") as reader:
text = reader.read()
feature_extractor_dict = json.loads(text)
return cls(**feature_extractor_dict)
def to_json_string(self) -> str:
"""
Serializes this instance to a JSON string.
Returns:
:obj:`str`: String containing all the attributes that make up this feature_extractor instance in JSON
format.
"""
return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
def to_json_file(self, json_file_path: Union[str, os.PathLike]):
"""
Save this instance to a JSON file.
Args:
json_file_path (:obj:`str` or :obj:`os.PathLike`):
Path to the JSON file in which this feature_extractor instance's parameters will be saved.
"""
with open(json_file_path, "w", encoding="utf-8") as writer:
writer.write(self.to_json_string())
| [
2,
19617,
28,
40477,
12,
23,
198,
2,
15069,
33448,
383,
12905,
2667,
32388,
3457,
13,
1074,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
... | 2.401876 | 9,060 |
import os, sys
import pytest
import lxml
from copy import deepcopy
from shapely.geometry import LineString
from tests.fixtures import network_object_from_test_data, full_fat_default_config_path, assert_semantically_equal
from tests import xml_diff
from genet.outputs_handler import matsim_xml_writer
from genet.core import Network
from genet.schedule_elements import read_vehicle_types
from genet.inputs_handler import read
import xml.etree.cElementTree as ET
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))
pt2matsim_network_test_file = os.path.abspath(
os.path.join(os.path.dirname(__file__), "test_data", "matsim", "network.xml"))
pt2matsim_schedule_file = os.path.abspath(
os.path.join(os.path.dirname(__file__), "test_data", "matsim", "schedule.xml"))
pt2matsim_vehicles_file = os.path.abspath(
os.path.join(os.path.dirname(__file__), "test_data", "matsim", "vehicles.xml"))
@pytest.fixture
@pytest.fixture
@pytest.fixture
@pytest.fixture
| [
11748,
28686,
11,
25064,
198,
11748,
12972,
9288,
198,
11748,
300,
19875,
198,
6738,
4866,
1330,
2769,
30073,
198,
6738,
5485,
306,
13,
469,
15748,
1330,
6910,
10100,
198,
6738,
5254,
13,
69,
25506,
1330,
3127,
62,
15252,
62,
6738,
62,
... | 2.649351 | 385 |
import sys
import getpass
import json
from prettytable import PrettyTable
import pandas
import logging
import os.path
import time
# 定义变量
RESULT = {}
# USERINFO = ("admin", "123456")
USERINFO = ("a", "a")
FIELDS = ['name', 'age', 'tel', 'email']
# RESULT.append(FIELDS)
FORMAT = """
====================================================================
1.表字段格式
username age tel email
2. 增删改查和搜索
2.1 增 add # add monkey 12 132xxx monkey@51reboot.com
2.2 删 delete # delete monkey
2.3 改 update # update monkey set age = 18
2.4 查 list # list
2.5 搜 find # find monkey
2.6 分页 display # display page 1 pagesize 5
2.7 保存csv格式,可跟上名称,否则默认 # export csvname
2.8 帮助文档 # 'h' or 'help'
===================================================================
"""
# 日志函数
# 读取文件里的数据
# 持久化
# 添加用户函数
# 分页
# 保存为csv文件
# 删除用户函数
# 修改用户函数
# 打印成表格的函数
# 按需打印用户函数
# 查找用户函数
if __name__ == '__main__':
main() | [
11748,
25064,
198,
11748,
651,
6603,
198,
11748,
33918,
198,
6738,
2495,
11487,
1330,
20090,
10962,
198,
11748,
19798,
292,
198,
11748,
18931,
198,
11748,
28686,
13,
6978,
198,
11748,
640,
628,
198,
2,
10263,
106,
248,
20046,
231,
20998,
... | 1.680815 | 589 |
# -*- coding: utf-8 -*-
"""
Implement a Orchestration Framework.
"""
try:
from typing import List, Tuple, Dict, Type
except:
pass
import attr
from collections import OrderedDict
from pathlib_mate import PathCls as Path
from .mate import AWSObject, Template
from .canned import Canned
def resolve_pipeline(plan):
"""
:type plan: List[Tuple[str, str]]
:param plan: [(can_id, tag), ...]
:rtype: List[Tuple[List[str], str]]]
"""
pipeline_change_set = list()
job = ([], None)
previous_env = None
for tier_name, tier_env in plan:
if tier_env != previous_env:
pipeline_change_set.append(job)
previous_env = tier_env
job = ([tier_name, ], tier_env)
else:
job[0].append(tier_name)
pipeline_change_set.append(job)
pipeline_change_set = pipeline_change_set[1:]
dct = dict()
pipeline = list()
for tier_list, tier_env in pipeline_change_set:
if tier_env in dct:
dct[tier_env].extend(tier_list)
else:
dct[tier_env] = tier_list
pipeline.append((list(dct[tier_env]), tier_env))
return pipeline
@attr.s
class CanLabel(object):
"""
A wrapper around a ``troposphere_mate.Canned``. It defines the metadata
about the ``Canned``
**中文文档**
在 ``Canned`` 之外的进一层包装. ``logic_id`` 是当 ``Canned`` 封装的 Template 会
被作为 Nested Stack 时起作用的. 因为 ``troposphere`` 实现的 Template 可能在其他
Template 中作为 ``AWS::CloudFormation::Stack`` Resource 使用. 作为
Nested Stack 是不知道 Master Stack 中的 Resource Logic Id 的. ``filename``
则是指定了实体文件的文件名. 因为 ``Template`` 本身只关注模板数据, 不关注模板文件.
CanLabel 实现了 Y 轴上的编排.
"""
logic_id = attr.ib() # type: str
can_class = attr.ib() # type: Type[Canned]
filename = attr.ib() # type: str
@attr.s
class ConfigData(object):
"""
**中文文档**
一串的 CanLabel (本质上是一串原子的 Nested Stack, 要么该 Stack 中的资源被全部
创建, 要么全部不被创建) 构成了一个架构的设计. 而这个架构的设计可能被部署到不同的环境中,
在不同的环境中, 配置数据可能不同, 实际被部署的 Nested Stack 的数量也可能不同.
ConfigData 提供了在不同环境下 (用 env_tag 做区分) 的配置数据.
ConfigData 实现了 X 轴上的编排.
"""
env_tag = attr.ib() # type: str
data = attr.ib() # type: dict
@attr.s
# ---
@attr.s
class TemplateFile(object):
"""
**中文文档**
包含了 ``troposphere_mate.Template`` 的实例 以及实际的文件路径 (绝对路径)
"""
template = attr.ib() # type: Template
filepath = attr.ib() # type: str
@filepath.validator
@attr.s
class ExecutionJob(object):
"""
**中文文档**
每个 ExecutionJob 对应一次 ``aws cloudformation deploy`` 命令的执行.
本质上一个 ExecutionJob 包含了一串最终的 Template 文件实体. 所以我们需要知道
Master Template 的路径, 以及所有的 Template 的数据以及路径.
"""
master_can = attr.ib() # type: Canned
master_template_path = attr.ib() # type: str
template_file_list = attr.ib() # type: List[TemplateFile]
class Orchestration(object):
"""
**中文文档**
Orchestration 的本质是对 CanLabel 和 ConfigData 进行编排. 使用:
``CanLabel.logic_id`` 和 ``ConfigData.env_tag`` 指定了编排中的某个最小单元,
通过指定云架构部署的顺序, 最终实现编排.
"""
def __init__(self,
master_canlabel_id,
canlabel_list,
config_data_list,
notes):
"""
:type master_canlabel_id: str
:type canlabel_list: List[CanLabel]
:type config_data_list: List[ConfigData]
:type notes: List[Note]
"""
self.master_canlabel_id = master_canlabel_id # type: str
self.canlabel_mapper = OrderedDict([
(canlabel.logic_id, canlabel)
for canlabel in canlabel_list
]) # type: Dict[str, CanLabel]
self.config_data_mapper = OrderedDict([
(config_data.env_tag, config_data)
for config_data in config_data_list
]) # type: Dict[str, ConfigData]
self.notes = notes # type: List[Note]
# print(self.canlabel_mapper[self.master_canlabel_id])
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
37811,
198,
3546,
26908,
257,
30369,
12401,
25161,
13,
198,
37811,
198,
198,
28311,
25,
198,
220,
220,
220,
422,
19720,
1330,
7343,
11,
309,
29291,
11,
360,
713,
... | 1.711916 | 2,291 |
import numpy as np
from kalmanFilter import KalmanFilter
from scipy.optimize import linear_sum_assignment
from collections import deque
class Tracks(object):
"""docstring for Tracks"""
class Tracker(object):
"""docstring for Tracker"""
| [
11748,
299,
32152,
355,
45941,
220,
201,
198,
6738,
479,
282,
805,
22417,
1330,
12612,
805,
22417,
201,
198,
6738,
629,
541,
88,
13,
40085,
1096,
1330,
14174,
62,
16345,
62,
562,
16747,
201,
198,
6738,
17268,
1330,
390,
4188,
201,
198... | 2.632075 | 106 |
import pathlib
import numpy as np
from ballir_dicom_manager.file_readers.read_image_volume import ReadImageVolume
from ballir_dicom_manager.file_loaders.nifti_loader import NiftiLoader
from ballir_dicom_manager.file_viewers.array_viewer import ArrayViewer
| [
11748,
3108,
8019,
198,
198,
11748,
299,
32152,
355,
45941,
198,
198,
6738,
2613,
343,
62,
67,
291,
296,
62,
37153,
13,
7753,
62,
961,
364,
13,
961,
62,
9060,
62,
29048,
1330,
4149,
5159,
31715,
198,
6738,
2613,
343,
62,
67,
291,
... | 2.977011 | 87 |
import datetime
from sys import argv
import tensorflow as tf
import os
import glob
from skimage import io
import matplotlib.pyplot as plt
from multiprocessing import Process
from PIL import Image, ImageDraw
from pathlib import Path
from PyQt5.QtWidgets import QApplication, QWidget, QMessageBox
import qdarkstyle
import numpy as np
from keras.models import Model, load_model
# 提示
if __name__ == "__main__":
Frol().startfind(argv[1])
| [
198,
11748,
4818,
8079,
198,
6738,
25064,
1330,
1822,
85,
198,
11748,
11192,
273,
11125,
355,
48700,
198,
11748,
28686,
198,
11748,
15095,
198,
6738,
1341,
9060,
1330,
33245,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
... | 2.898734 | 158 |
import requests
def upload_file(file):
"""
Upload a file to pixeldrain
upload_file(file)
"""
response = requests.post(
"https://pixeldrain.com/api/file",
data={"anonymous": True},
files={"file": open(file, "rb")}
)
return response.json()
def file(file_id):
"""
Returns direct file link
file(file_id)
"""
return "https://pixeldrain.com/api/file/"+file_id
def download_file(file_id, file_name):
"""
Download the full file associated with the ID.
Supports byte range requests.
download_file(file_id, file_name)
"""
response = requests.get(file(file_id))
with open(file_name, "wb") as file:
file.write(response.content)
return file_name
def info(file_id):
"""
Returns information about one or more files.
You can also put a comma separated list of file IDs in the URL and it will return an array of file info, instead of a single object.
info(file_id)
"""
info = requests.get(f"https://pixeldrain.com/api/file/{file_id}/info")
return info.json()
def thumbnail(file_id, width="", height=""):
"""
Returns a PNG thumbnail image representing the file.
The thumbnail image will be 128x128 px by default.
You can specify the width and height with parameters in the URL.
The width and height parameters need to be a multiple of 16.
So the allowed values are 16, 32, 48, 64, 80, 96, 112 and 128.
If a thumbnail cannot be generated for the file you will be redirected to a mime type image of 128x128 px.
thumbnail(file_id, width, height)
width and height is optional
"""
api = f"https://pixeldrain.com/api/file/{file_id}/thumbnail"
api += "?" if width or height else ""
api += "width=" + width if width else ""
api += "&" if width and height else ""
api += "height=" + height if height else ""
thumbnail = requests.get(api)
return thumbnail
| [
11748,
7007,
628,
198,
4299,
9516,
62,
7753,
7,
7753,
2599,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
36803,
257,
2393,
284,
279,
844,
68,
335,
3201,
198,
220,
220,
220,
220,
198,
220,
220,
220,
9516,
62,
7753,
7,
7753,
8,
... | 2.707418 | 728 |
#!/usr/bin/python
# -*- coding:utf-8 -*-
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.linear_model import Lasso, Ridge, ElasticNet
from sklearn.model_selection import GridSearchCV
if __name__ == "__main__":
# pandas读入
data = pd.read_csv('8.Advertising.csv') # TV、Radio、Newspaper、Sales
x = data[['TV', 'Radio', 'Newspaper']]
# x = data[['TV', 'Radio']]
y = data['Sales']
print(x)
print(y)
x_train, x_test, y_train, y_test = train_test_split(x, y, random_state=1)
# print x_train, y_train
model = Lasso()
# model = Ridge()
alpha_can = np.logspace(-3, 2, 10) # 多个alpha 用于交叉验证
lasso_model = GridSearchCV(model, param_grid={'alpha': alpha_can}, cv=5) # cv=5 5折交叉验证, 即分成5分
lasso_model.fit(x, y)
print('验证参数:\n', lasso_model.best_params_) # 找出最佳模型参数
y_hat = lasso_model.predict(np.array(x_test))
print(lasso_model.score(x_test, y_test))
mse = np.average((y_hat - np.array(y_test)) ** 2) # Mean Squared Error
rmse = np.sqrt(mse) # Root Mean Squared Error
print(mse, rmse)
t = np.arange(len(x_test))
plt.plot(t, y_test, 'r-', linewidth=2, label='Test')
plt.plot(t, y_hat, 'g-', linewidth=2, label='Predict')
plt.legend(loc='upper right')
plt.grid()
plt.show()
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
2,
532,
9,
12,
19617,
25,
40477,
12,
23,
532,
9,
12,
198,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
11748,
19798,
292,
355,
27... | 2.062215 | 659 |
import logging
from twisted.web import server, guard, resource
from twisted.cred import portal
from lbrynet import conf
from .auth import PasswordChecker, HttpPasswordRealm
from ..auth.keyring import Keyring
log = logging.getLogger(__name__)
| [
11748,
18931,
198,
198,
6738,
19074,
13,
12384,
1330,
4382,
11,
4860,
11,
8271,
198,
6738,
19074,
13,
66,
445,
1330,
17898,
198,
198,
6738,
18360,
563,
3262,
1330,
1013,
198,
6738,
764,
18439,
1330,
30275,
9787,
263,
11,
367,
29281,
3... | 3.542857 | 70 |
from functools import partial
from typing import Union, Dict, Optional
from http_async_client.enums import SupportedProtocols, Methods
import httpx
import re
from dataclasses import dataclass
from httpx._types import RequestContent, URLTypes, RequestData, RequestFiles, QueryParamTypes, HeaderTypes, CookieTypes
from nanoid import generate
import base64
import threading
from httpx import Request
class EndPointRegistry(type):
"""This Class is a singleton that inherits from the `type` class, in order to provide it as a metaclass to other classes
This class is the core of the HTTP client that differs from others client, because it will allow to manage different
domains within the same class
This is very useful for example if you need to send request to different third party APIS and you want to follow the
way of that request with a same request ID.
With this class you can keep a domain registry. Every new domain will be registered to this class. On each new call,
it will check if the domain exists in the registry and if not il will
create and entry for it. Afterward it will set this domain as the current domain.
"""
def __call__(cls, *args, **kwargs):
"""
Instantiate the Singleton using the thread library in order to guarantee only one instance !
Arguments:
host: string, the domain's host
port: int : Optional
protocol : string, must be a member of the SupportedProtocol Enum
Returns:
cls.__instance : EndPointRegistry instance
"""
if cls.__instance is None:
with cls._locker:
if cls.__instance is None:
cls.__instance = super().__call__(*args, **kwargs)
# On each call : add to registry (if it is already in the reg, it wont be added but only defined as current)
cls.add_to_reg(**kwargs)
return cls.__instance
def add_to_reg(cls, **kwargs):
"""Method that will create and eventually add a class EndPoint instance object and will add it to the registry if its base64 url is not present in it
In that way, if there is the same origin with two different ports, it will be two different entry in the registry
Arguments:
host: string, the domain's host
port: int : Optional
protocol : string, must be a member of the SupportedProtocol Enum
"""
port = kwargs.get("port", None)
protocol = kwargs.get("protocol", None)
host = kwargs.get("host", None)
end_point = EndPoint(host, port, protocol)
if not end_point.base_url:
raise ValueError("EndPointRegistry error trying to add new client : host is missing")
try:
end_point_key = base64.b64encode(bytes(end_point.base_url, encoding='utf-8'))
if end_point_key not in cls.endpoints_registry:
cls.endpoints_registry[end_point_key] = end_point
cls.current = end_point_key
except TypeError as te:
raise TypeError(f"Cannot encode base url to registry : {str(te)}")
@dataclass
async_client_factory = BaseRESTAsyncClient.get_instance
| [
6738,
1257,
310,
10141,
1330,
13027,
198,
6738,
19720,
1330,
4479,
11,
360,
713,
11,
32233,
198,
6738,
2638,
62,
292,
13361,
62,
16366,
13,
268,
5700,
1330,
36848,
19703,
4668,
82,
11,
25458,
198,
11748,
2638,
87,
198,
11748,
302,
198... | 2.793734 | 1,149 |
import sys
sys.path.append("../")
from appJar import gui
app=gui()
app.addScrolledTextArea("t1")
app.setTextAreaChangeFunction("t1", press)
app.addButtons(["CLEAR", "SET"], press)
app.addButtons(["LOG", "CHECK"], log)
app.addCheckBox("CALL")
app.addCheckBox("END")
app.addEntry("text")
app.go()
| [
11748,
25064,
198,
17597,
13,
6978,
13,
33295,
7203,
40720,
4943,
198,
6738,
598,
47511,
1330,
11774,
628,
198,
1324,
28,
48317,
3419,
198,
198,
1324,
13,
2860,
3351,
8375,
8206,
30547,
7203,
83,
16,
4943,
198,
1324,
13,
2617,
8206,
3... | 2.6 | 115 |
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tree method for computing multiple DP quantiles.
Code is modeled after the quantile trees implementation in this Java library:
https://github.com/google/differential-privacy/blob/main/java/main/com/google/privacy/differentialprivacy/BoundedQuantiles.java
The method is essentially using range trees to answer rank queries, as in the
mechanism presented in Section 7.2 of "Private and Continual Release of
Statistics" by Chan et al.: https://eprint.iacr.org/2010/076.pdf
"""
import collections
import enum
import numpy as np
# Smallest value difference that is considered significant.
_NUMERICAL_TOLERANCE = 1e-6
# Index of the root of the tree.
_ROOT_INDEX = 0
# Heuristic for filtering out empty nodes. Suppose that the total sum of a
# node's noisy value and all of its siblings' noisy values is t. Then, if the
# node's value is less than _ALPHA * t, it will be discarded, and a new sum t'
# will be computed excluding it. Setting _ALPHA to zero implies no filtering.
_ALPHA = 0.005
class PrivateQuantileTree:
"""Tree structure for computing DP quantiles."""
def __init__(self,
noise_type,
epsilon,
delta,
data_low,
data_high,
swap,
tree_height=4,
branching_factor=16):
"""Initializes an empty tree and creates a noise generator.
Leaf nodes of the tree can be thought of as bins that uniformly partition
the [data_low, data_high] range.
Args:
noise_type: Sepecifies a value from the NoiseType enum.
epsilon: Differential privacy parameter epsilon.
delta: Differential privacy parameter delta.
data_low: Smallest possible value for data points; any data points with
smaller values will be clamped at data_low.
data_high: Largest possible value for data points; any data points with
larger values will be clamped at data_high.
swap: If true, uses swap dp sensitivity, otherwise uses add-remove.
tree_height: Depth of the tree structure. Must be greater than or equal
to one; height zero corresponds to a tree that is just a single node.
branching_factor: Number of children of each internal tree node. Must be
at least two.
Throws: ValueError if any input arg does not conform to the above
specifications.
"""
if data_low >= data_high:
raise ValueError("Invalid data bounds [{}, {}]; data_low must be smaller "
"than data_high.".format(data_low, data_high))
self._data_low = data_low
self._data_high = data_high
if tree_height < 1:
raise ValueError(
"Invalid value of {} for tree_height input; height must be at least"
" 1.".format(tree_height))
self._tree_height = tree_height
if branching_factor < 2:
raise ValueError("Invalid value of {} for branching_factor input; factor "
"must be at least 2.".format(branching_factor))
self._branching_factor = branching_factor
self._tree = collections.Counter()
self._noised_tree = {}
self._num_leaves = branching_factor**tree_height
num_nodes = ((branching_factor**(tree_height + 1)) - 1) / (
branching_factor - 1)
self._leftmost_leaf_index = (int)(num_nodes - self._num_leaves)
self._range = self._data_high - self._data_low
self._finalized = False
# Create noise generator function.
# For sensitivity computations: We assume each user contributes one data
# point, which means that each user contributes a count of one to one node
# in each level of the tree. L1 and L2 sensitivity are thus identical.
scaling = 2 if swap else 1
sensitivity = scaling * self._tree_height
if noise_type == PrivateQuantileTree.NoiseType.LAPLACE:
scale = sensitivity / epsilon
self._gen_noise = lambda: np.random.laplace(loc=0.0, scale=scale)
elif noise_type == PrivateQuantileTree.NoiseType.GAUSSIAN:
stdev = np.sqrt(2 * sensitivity * np.log(1.32 / delta)) / epsilon
self._gen_noise = lambda: np.random.normal(loc=0.0, scale=stdev)
else:
raise ValueError(
"Invalid value of {} for noise_type input.".format(noise_type))
def get_leaf_indices(self, values):
"""Returns the indices of the leaf node bins into which the values fall.
Leaf nodes uniformly partition the [data_low, data_high] range.
Args:
values: Array of values, assumed to lie in [data_low, data_high].
"""
range_fracs = (values - self._data_low) / self._range
leaf_indices = np.trunc(range_fracs * self._num_leaves)
high_values = values == self._data_high
leaf_indices[high_values] -= 1
return self._leftmost_leaf_index + leaf_indices
def get_parents(self, child_indices):
"""Returns the indices of the parents of the child_indices nodes.
Args:
child_indices: Array of child indices.
"""
return np.trunc((child_indices - 1) / self._branching_factor)
def add_data(self, data):
""""Inserts data into the tree.
Args:
data: Array of data points.
Raises:
RuntimeError: If this method is called after tree is finalized.
"""
if self._finalized:
raise RuntimeError("Cannot add data once tree is finalized.")
if data.size == 0:
return
clipped_data = np.clip(data, self._data_low, self._data_high)
# Increment counts at leaf nodes and then iterate upwards, incrementing
# counts at all ancestors on the path to the root (but not the root itself).
indices = self.get_leaf_indices(clipped_data)
indices, counts = np.unique(indices, return_counts=True)
index_count_map = dict(zip(indices, counts))
while indices[0] != _ROOT_INDEX:
self._tree.update(index_count_map)
new_indices = self.get_parents(indices)
new_index_count_map = collections.Counter()
for i in range(len(indices)):
new_index_count_map[new_indices[i]] += index_count_map[indices[i]]
indices = np.unique(new_indices)
index_count_map = new_index_count_map
return
def finalize(self):
"""Disables calling add_data, and enables calling compute_quantile."""
self._finalized = True
return
def get_leftmost_child(self, parent_index):
"""Returns the leftmost (lowest-numbered) child of the parent_index node.
Args:
parent_index: Index of the parent node.
"""
return parent_index * self._branching_factor + 1
def get_rightmost_child(self, parent_index):
"""Returns the rightmost (highest-numbered) child of the parent_index node.
Args:
parent_index: Index of the parent node.
"""
return (parent_index + 1) * self._branching_factor
def get_left_value(self, index):
"""Returns the minimum value that is mapped to index's subtree.
Args:
index: Index of a node in the tree.
"""
# Find the smallest-index leaf node in this subtree.
while index < self._leftmost_leaf_index:
index = self.get_leftmost_child(index)
return self._data_low + self._range * (
index - self._leftmost_leaf_index) / self._num_leaves
def get_right_value(self, index):
"""Returns the maximum value that is mapped to index's subtree.
Args:
index: Index of a node in the tree.
"""
# Find the largest-index leaf node in this subtree.
while index < self._leftmost_leaf_index:
index = self.get_rightmost_child(index)
return self._data_low + self._range * (index - self._leftmost_leaf_index +
1) / self._num_leaves
def get_noised_count(self, index):
"""Returns a noised version of the count for the given index.
Note that if the count has previously been noised, the same value as before
is returned.
Args:
index: Index of a node in the tree.
"""
if index in self._noised_tree:
return self._noised_tree[index]
noised_count = self._tree[index] + self._gen_noise()
self._noised_tree[index] = noised_count
return noised_count
def compute_quantile(self, quantile):
"""Returns a differentially private estimate of the quantile.
Args:
quantile: A value in [0, 1].
"""
# Ensure no data can be added once a quantile has been computed.
self.finalize()
if quantile < 0.0 or quantile > 1.0:
raise ValueError(
"Quantile must be in [0, 1]; requested quantile {}.".format(quantile))
# Find the (approximate) index of the leaf node containing the quantile.
index = _ROOT_INDEX
while index < self._leftmost_leaf_index:
leftmost_child_index = self.get_leftmost_child(index)
rightmost_child_index = self.get_rightmost_child(index)
# Sum all child nodes' noisy counts.
noised_counts = np.asarray([
self.get_noised_count(i)
for i in range(leftmost_child_index, rightmost_child_index + 1)
])
total = np.sum(noised_counts)
# If all child nodes are "empty", return rank value of current subtree.
if total <= 0.0:
break
# Sum again, but only noisy counts exceeding min_value_cutoff.
min_value_cutoff = total * _ALPHA
passes_cutoff = noised_counts >= min_value_cutoff
filtered_counts = noised_counts[passes_cutoff]
adjusted_total = np.sum(filtered_counts)
if adjusted_total == 0.0:
break
# Find the child whose subtree contains the quantile.
partial_count = 0.0
for i in range(self._branching_factor):
# Skip nodes whose contributions are too small.
if passes_cutoff[i]:
ith_count = noised_counts[i]
partial_count += ith_count
# Break if the current child's subtree contains the quantile.
if partial_count / adjusted_total >= quantile - _NUMERICAL_TOLERANCE:
quantile = (adjusted_total * quantile -
(partial_count - ith_count)) / ith_count
# Truncate at 1; calculated quantile may be larger than 1, due to
# the subtraction of the numerical tolerance value above.
quantile = min(quantile, 1.0)
index = i + leftmost_child_index
break
# Linearly interpolate between the min and max values associated with the
# node of the current index.
return (1 - quantile) * self.get_left_value(
index) + quantile * self.get_right_value(index)
def tree(sampled_data,
data_low,
data_high,
qs,
eps,
delta,
swap,
tree_height=4,
branching_factor=16):
"""Computes (eps, delta)-differentially private quantile estimates for qs.
Creates a PrivateQuantileTree with Laplace noise when delta is zero, and
Gaussian noise otherwise.
Args:
sampled_data: Array of data points.
data_low: Lower bound for data.
data_high: Upper bound for data.
qs: Increasing array of quantiles in (0,1).
eps: Privacy parameter epsilon.
delta: Privacy parameter delta.
swap: If true, uses swap dp sensitivity, otherwise uses add-remove.
tree_height: Height for PrivateQuantileTree.
branching_factor: Branching factor for PrivateQuantileTree.
Returns:
Array o where o[i] is the quantile estimate corresponding to quantile q[i].
"""
noise_type = (
PrivateQuantileTree.NoiseType.LAPLACE
if delta == 0 else PrivateQuantileTree.NoiseType.GAUSSIAN)
t = PrivateQuantileTree(
noise_type=noise_type,
epsilon=eps,
delta=delta,
data_low=data_low,
data_high=data_high,
swap=swap,
tree_height=tree_height,
branching_factor=branching_factor)
t.add_data(sampled_data)
results = np.empty(len(qs))
for i in range(len(qs)):
results[i] = t.compute_quantile(qs[i])
return results
| [
2,
19617,
28,
40477,
12,
23,
198,
2,
15069,
33448,
383,
3012,
4992,
46665,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
... | 2.703244 | 4,593 |
# -*- coding: utf-8 -*-
import enum
import random
from typing import Union
class Card:
"""
represents a single playing card
"""
class Deck:
"""
playing card deck class
"""
class Icons:
"""
icon container for Card/Deck
"""
card_back = '🂠'
joker_red = '🂿'
joker_white = '🃏'
joker_black = '🃟'
clubs = '🃑🃒🃓🃔🃕🃖🃗🃘🃙🃚🃛🃝🃞'
clubs_extra = '🃜'
diamonds = '🃁🃂🃃🃄🃅🃆🃇🃈🃉🃊🃋🃍🃎'
diamonds_extra = '🃌'
hearts = '🂱🂲🂳🂴🂵🂶🂷🂸🂹🂺🂻🂽🂾'
hearts_extra = '🂼'
spades = '🂡🂢🂣🂤🂥🂦🂧🂨🂩🂪🂫🂭🂮'
spades_extra = '🂬'
if __name__ == '__main__':
tests()
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
628,
198,
11748,
33829,
198,
11748,
4738,
198,
6738,
19720,
1330,
4479,
628,
198,
4871,
5172,
25,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
6870,
257,
2060,
2712,
2657,
... | 1.400835 | 479 |
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""Top-level namespace for freesurfer."""
from .base import Info, FSCommand, no_freesurfer
from .preprocess import (ParseDICOMDir, UnpackSDICOMDir, MRIConvert, Resample,
ReconAll, BBRegister, ApplyVolTransform, Smooth,
DICOMConvert, RobustRegister, FitMSParams,
SynthesizeFLASH, MNIBiasCorrection, WatershedSkullStrip,
Normalize, CANormalize, CARegister, CALabel, MRIsCALabel,
SegmentCC, SegmentWM, EditWMwithAseg, ConcatenateLTA)
from .model import (MRISPreproc, MRISPreprocReconAll, GLMFit, OneSampleTTest, Binarize,
Concatenate, SegStats, SegStatsReconAll, Label2Vol, MS_LDA,
Label2Label, Label2Annot, SphericalAverage)
from .utils import (SampleToSurface, SurfaceSmooth, SurfaceTransform, Surface2VolTransform,
SurfaceSnapshots, ApplyMask, MRIsConvert, MRITessellate, MRIPretess,
MRIMarchingCubes, SmoothTessellation, MakeAverageSubject,
ExtractMainComponent, Tkregister2, AddXFormToHeader,
CheckTalairachAlignment, TalairachAVI, TalairachQC, RemoveNeck,
MRIFill, MRIsInflate, Sphere, FixTopology, EulerNumber,
RemoveIntersection, MakeSurfaces, Curvature, CurvatureStats,
Jacobian, MRIsCalc, VolumeMask, ParcellationStats, Contrast,
RelabelHypointensities, Aparc2Aseg, Apas2Aseg)
from .longitudinal import (RobustTemplate, FuseSegmentations)
from .registration import (MPRtoMNI305, RegisterAVItoTalairach, EMRegister, Register,
Paint)
| [
2,
795,
16436,
25,
532,
9,
12,
4235,
25,
21015,
26,
12972,
12,
521,
298,
12,
28968,
25,
604,
26,
33793,
12,
8658,
82,
12,
14171,
25,
18038,
532,
9,
12,
198,
2,
25357,
25,
900,
10117,
28,
29412,
39747,
28,
19,
40379,
28,
19,
15... | 2.144893 | 842 |
def validTime(time):
"""
Boolean indicating if time is in valid 24hr format
"""
tokens = time.split(':')
hours = int(tokens[0])
minutes = int(tokens[1])
return not (hours > 23 or minutes > 59) | [
4299,
4938,
7575,
7,
2435,
2599,
198,
197,
37811,
198,
197,
46120,
13087,
12739,
611,
640,
318,
287,
4938,
1987,
11840,
5794,
198,
197,
37811,
198,
220,
220,
220,
16326,
796,
640,
13,
35312,
7,
10354,
11537,
198,
220,
220,
220,
2250,
... | 2.6375 | 80 |
# -*- coding: utf-8 -*-
"""
Created on Tue Apr 20 13:32:20 2021
#--- Copy csv results to single folder based on dashboard_db.csv
#--- Murilo Vianna (murilodsv@gmail.com)
#--- May, 2021.
#--- Dev-log in: https://github.com/Murilodsv/py-jules
@author: muril
"""
# DEBUG import os; os.chdir('C:/Murilo/py-jules')
#------------------------------#
#--- generate qsub-clusters ---#
#------------------------------#
dash_nm = 'dashboard_db.csv' # Filename of Dashboard CSV
wd_out = 'ginore/csv_res'
#--- Get scripts arguments
if __name__ == "__main__":
import sys
if len(sys.argv) > 1:
#--- use arguments
dash_nm = str(sys.argv[1]) # debug dash_nm = 'dashboard_db_future.csv'
wd_out = str(sys.argv[2])
#----------------------#
#--- Load libraries ---#
#----------------------#
import os
import util as u
import shutil
import glob
from time import time
#--- Track progress
run_start = time()
#----------------------#
#--- Read dashboard ---#
#----------------------#
#--- get run wd
wd = os.getcwd().replace('\\','/')
#--- Open CSVs
dash = u.df_csv(wd+'/'+dash_nm)
#--- list of clusters
l_ids = dash['run_id'].unique()
for i in l_ids:
print('Copying results for '+i)
#--- list csv files
l_csv = glob.glob(wd+'/jules_run/'+i+'/namelists/output/*.csv')
#--- copy every file to folder
for f in l_csv:
shutil.copyfile(f,
wd+'/'+wd_out+'/'+f.split('/')[-1].split('\\')[-1])
#--- track time
print("\nElapsed time of copying: --- %.3f seconds ---" % (time() - run_start)) | [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
41972,
319,
30030,
2758,
1160,
1511,
25,
2624,
25,
1238,
33448,
198,
220,
220,
220,
220,
198,
220,
220,
220,
1303,
6329,
17393,
269,
21370,
2482,
284,
2060,
... | 2.268657 | 737 |
from django.core.management.base import BaseCommand, CommandError
from django.utils.timezone import get_current_timezone
from orders.models import Order
from datetime import datetime
import csv
import pytz | [
6738,
42625,
14208,
13,
7295,
13,
27604,
13,
8692,
1330,
7308,
21575,
11,
9455,
12331,
198,
6738,
42625,
14208,
13,
26791,
13,
2435,
11340,
1330,
651,
62,
14421,
62,
2435,
11340,
198,
6738,
6266,
13,
27530,
1330,
8284,
198,
6738,
4818,
... | 3.867925 | 53 |
import os
display_name = f"csv files in {os.path.expanduser('~')}"
| [
11748,
28686,
198,
198,
13812,
62,
3672,
796,
277,
1,
40664,
3696,
287,
1391,
418,
13,
6978,
13,
11201,
392,
7220,
10786,
93,
11537,
36786,
198
] | 2.615385 | 26 |
from ansible import errors
#
# Additional Jinja2 filter to wrap list elements with quote
#
def wrap_list_elements(arg):
"""
Wrap each list element with quote, to use before join filter
:param arg: the brute list to manage
:type arg: list
:return: quoted elements
:rtype: list
"""
arg_type = type(arg)
# Check if type is valid
if arg_type != list:
raise errors.AnsibleFilterError(
'Invalid value type "%s", list expected' % arg_type)
return ['"%s"' % element for element in arg]
class FilterModule(object):
""" Filters to manage aptly configuration list values"""
filter_map = {
'wrap_list_elements': wrap_list_elements
}
| [
6738,
9093,
856,
1330,
8563,
198,
198,
2,
198,
2,
15891,
17297,
6592,
17,
8106,
284,
14441,
1351,
4847,
351,
9577,
198,
2,
628,
198,
4299,
14441,
62,
4868,
62,
68,
3639,
7,
853,
2599,
198,
220,
220,
220,
37227,
198,
220,
220,
220,... | 2.682482 | 274 |
import cv2
import numpy as np
import matplotlib.pyplot as plt
img = cv2.imread("Computer-Vision-with-Python/DATA/dog_backpack.png")
plt.imshow(img)
img_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
plt.imshow(img_rgb)
new_img = img_rgb.copy()
new_img = cv2.flip(new_img, 0)
plt.imshow(new_img)
pt1 = (200, 380)
pt2 = (600, 700)
cv2.rectangle(img_rgb, pt1=pt1, pt2=pt2, color=(255,0,0), thickness=10)
plt.imshow(img_rgb) | [
11748,
269,
85,
17,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
198,
9600,
796,
269,
85,
17,
13,
320,
961,
7203,
34556,
12,
44206,
12,
4480,
12,
37906,
14,
26947,
14,
9703,
... | 2.033816 | 207 |
from django.conf.urls import url
from points import apis
urlpatterns = [
url(r'^$', apis.PointsTableApi.as_view(), name="api_points_table")
]
| [
198,
6738,
42625,
14208,
13,
10414,
13,
6371,
82,
1330,
19016,
198,
198,
6738,
2173,
1330,
2471,
271,
628,
198,
6371,
33279,
82,
796,
685,
198,
220,
220,
220,
19016,
7,
81,
6,
61,
3,
3256,
2471,
271,
13,
40710,
10962,
32,
14415,
1... | 2.542373 | 59 |
import pygame
from tile_movingbox import MovingBox
| [
11748,
12972,
6057,
201,
198,
6738,
17763,
62,
31462,
3524,
1330,
26768,
14253,
201,
198,
201
] | 3.375 | 16 |
from django.conf.urls import url
from django.contrib.auth.views import login, logout
from django.views.generic import TemplateView
from osmaxx.excerptexport.views import (
delete_excerpt,
export_list,
export_detail,
manage_own_excerpts,
order_new_excerpt,
order_existing_excerpt,
)
excerpt_export_urlpatterns = [
url(r'^$', TemplateView.as_view(template_name="excerptexport/templates/index.html"), name='index'),
url(r'^exports/$', export_list, name='export_list'),
url(r'^exports/(?P<id>[A-Za-z0-9_-]+)/$', export_detail, name='export_detail'),
url(r'^orders/new/new_excerpt/$', order_new_excerpt, name='order_new_excerpt'),
url(r'^orders/new/existing_excerpt/$', order_existing_excerpt, name='order_existing_excerpt'),
url(r'^excerpts/(?P<pk>[A-Za-z0-9_-]+)/delete/$', delete_excerpt, name='delete_excerpt'),
url(r'^excerpts/$', manage_own_excerpts, name='manage_own_excerpts'),
]
login_logout_patterns = [
url(r'^login/$', login,
{'template_name': 'osmaxx/login.html'}, name='login'),
url(r'^logout/$', logout,
{'template_name': 'osmaxx/logout.html'}, name='logout'),
]
urlpatterns = excerpt_export_urlpatterns + login_logout_patterns
| [
6738,
42625,
14208,
13,
10414,
13,
6371,
82,
1330,
19016,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
13,
33571,
1330,
17594,
11,
2604,
448,
198,
6738,
42625,
14208,
13,
33571,
13,
41357,
1330,
37350,
7680,
198,
198,
6738,
267,
... | 2.442 | 500 |
# -*- coding: utf-8 -*-
""" Unit tests for the running.config module
"""
from __future__ import print_function, division, unicode_literals
import os
import running.config as config
def test_load_config_ini():
""" Load a test configuration file in .ini format, and
check if the DEFAULT section propagated correctly.
"""
final_dict_should_be = {
"ABC": {
"def": '123',
"ghi": 'a okay',
"vwx": 'one',
"yz": 'two',
},
"MNO": {
"pqr": 'yes',
"vwx": 'one',
"yz": 'two',
"def": '456',
},
}
test_file_path = os.path.join('tests', 'files', 'load_config_ini.ini')
loaded_dict = dict(config.load_config_ini(test_file_path))
for item in loaded_dict:
loaded_dict[item] = dict(loaded_dict[item])
assert loaded_dict == final_dict_should_be
def test_merge_defaults():
""" A dictionary that has an item with a "DEFAULT" key, if
that item is itself a dictionary, then it should merge
that item's subitems with all the other items in the
dictionary that are also themselves dictionaries.
"""
original_dict = {
"ABC": {
"def": 123,
"ghi": 'a okay',
},
"JKL": 9.25,
"MNO": {
"pqr": True,
},
"DEFAULT": {
"vwx": 'one',
"yz": 'two',
"def": 456,
},
}
merged_dict = dict(config.merge_defaults(original_dict))
merged_dict_should_be = {
"ABC": {
"def": 123,
"ghi": 'a okay',
"vwx": 'one',
"yz": 'two',
},
"JKL": 9.25,
"MNO": {
"pqr": True,
"vwx": 'one',
"yz": 'two',
"def": 456,
},
}
assert merged_dict == merged_dict_should_be
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
37811,
11801,
5254,
329,
262,
2491,
13,
11250,
8265,
198,
37811,
198,
198,
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
11,
7297,
11,
28000,
1098,
62,
17201,
874,... | 1.935976 | 984 |
from django.shortcuts import get_object_or_404, render
from feincms3 import plugins
from feincms3.regions import Regions
from feincms3.renderer import TemplatePluginRenderer
from .models import HTML, Article, Download
renderer = TemplatePluginRenderer()
renderer.register_string_renderer(HTML, plugins.html.render_html)
renderer.register_template_renderer(Download, "plugins/download.html")
| [
6738,
42625,
14208,
13,
19509,
23779,
1330,
651,
62,
15252,
62,
273,
62,
26429,
11,
8543,
198,
6738,
730,
1939,
907,
18,
1330,
20652,
198,
6738,
730,
1939,
907,
18,
13,
2301,
507,
1330,
47089,
198,
6738,
730,
1939,
907,
18,
13,
1092... | 3.45614 | 114 |
from django.test import TestCase
from .resources import StudentResource
from .models import Student
# TODO modify code to do REAL testing | [
6738,
42625,
14208,
13,
9288,
1330,
6208,
20448,
198,
6738,
764,
37540,
1330,
13613,
26198,
198,
6738,
764,
27530,
1330,
13613,
198,
198,
2,
16926,
46,
13096,
2438,
284,
466,
32744,
4856
] | 4.3125 | 32 |
'''
Adapted from DeeperCut by Eldar Insafutdinov
https://github.com/eldar/pose-tensorflow
'''
from enum import Enum
import numpy as np
# Augmentation functions
def CropImage(joints,im,Xlabel,Ylabel,cfg):
''' Randomly cropping image around xlabel,ylabel taking into account size of image. Introduced in DLC 2 '''
widthforward=int(cfg["minsize"]+np.random.randint(cfg["rightwidth"]))
widthback=int(cfg["minsize"]+np.random.randint(cfg["leftwidth"]))
hup=int(cfg["minsize"]+np.random.randint(cfg["topheight"]))
hdown=int(cfg["minsize"]+np.random.randint(cfg["bottomheight"]))
Xstart=max(0,int(Xlabel-widthback))
Xstop=min(np.shape(im)[1]-1,int(Xlabel+widthforward))
Ystart=max(0,int(Ylabel-hdown))
Ystop=min(np.shape(im)[0]-1,int(Ylabel+hup))
joints[0,:,1]-=Xstart
joints[0,:,2]-=Ystart
inbounds=np.where((joints[0,:,1]>0)*(joints[0,:,1]<np.shape(im)[1])*(joints[0,:,2]>0)*(joints[0,:,2]<np.shape(im)[0]))[0]
return joints[:,inbounds,:],im[Ystart:Ystop+1,Xstart:Xstop+1,:]
| [
7061,
6,
198,
48003,
276,
422,
1024,
5723,
26254,
416,
19208,
283,
7088,
1878,
315,
25194,
709,
198,
5450,
1378,
12567,
13,
785,
14,
68,
335,
283,
14,
3455,
12,
83,
22854,
11125,
198,
198,
7061,
6,
198,
6738,
33829,
1330,
2039,
388,... | 2.253275 | 458 |
from typing import Tuple
from hypothesis import given
from shewchuk import (kind,
vectors_dot_product)
from tests.utils import (exact_kind,
to_sign)
from . import strategies
@given(strategies.floats_sextuplets)
@given(strategies.floats_quadruplets)
@given(strategies.floats_sextuplets)
@given(strategies.floats_sextuplets)
| [
6738,
19720,
1330,
309,
29291,
198,
198,
6738,
14078,
1330,
1813,
198,
198,
6738,
673,
86,
46019,
1330,
357,
11031,
11,
198,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
... | 2.331288 | 163 |
from setuptools import setup
setup(name='petpy',
version='0.1',
description='Petrophysics utilities',
url='https://example.com/',
author = 'Fan',
author_email='yuanzhong.fan@shell.com',
license = 'Apache 2',
pakages=['petpy'],
install_requires=['numpy'],
test_require = ['pytest','pytest-cov'],
entry_points={'console_scripts':
['gardner=petpy.__main__:main',
]}
) | [
6738,
900,
37623,
10141,
1330,
9058,
198,
198,
40406,
7,
3672,
11639,
6449,
9078,
3256,
198,
220,
220,
220,
220,
220,
220,
220,
2196,
11639,
15,
13,
16,
3256,
198,
220,
220,
220,
220,
220,
220,
220,
6764,
11639,
25803,
10051,
23154,
... | 2 | 243 |
"""
Lightweight implementation of exponential backoff,
used for operations that require polling. This is simple enough
that it isn't worth bringing in a new dependency for it.
"""
from time import sleep
from datetime import datetime
from math import ceil
from random import randrange
class ExpBackoff:
"""
``SaturnCluster._start()`` requires polling until the
Dask scheduled comes up. Exponential backoff is better
in these situations than fixed-wait-time polling, because
it minimizes the number of requests that need to be
made from the beginning of polling to the time the
scheduler is up.
"""
def __init__(self, wait_timeout: int = 1200, min_sleep: int = 5, max_sleep: int = 60):
"""
Used to generate sleep times with a capped exponential backoff.
Jitter reduces contention on the event of multiple clients making
these calls at the same time.
:param wait_timeout: Maximum total time in seconds to wait before timing out
:param min_sleep: Minimum amount of time to sleep in seconds
:param max_sleep: Maximum time to sleep over one period in seconds
:return: Boolean indicating if current wait time is less than wait_timeout
"""
self.wait_timeout = wait_timeout
self.max_sleep = max_sleep
self.min_sleep = min_sleep
self.retries = 0
self.start_time = None
def wait(self) -> bool:
"""
This methods returns ``False`` if the timeout has been
exceeded and code that is using ``ExpBackoff`` for polling
should just consider the polling failed.
If there there is still time left until
``self.wait_timeout``, waits for some time and then
returns ``True``.
"""
if self.retries == 0:
self.start_time = datetime.now()
# Check if timeout has been reached
time_delta = (datetime.now() - self.start_time).total_seconds()
if time_delta >= self.wait_timeout:
return False
# Generate exp backoff with jitter
self.retries += 1
backoff = min(self.max_sleep, self.min_sleep * 2 ** self.retries) / 2
jitter = randrange(0, ceil(backoff))
wait_time = backoff + jitter
# Make sure we aren't waiting longer than wait_timeout
remaining_time = self.wait_timeout - time_delta
if remaining_time < wait_time:
wait_time = remaining_time
sleep(wait_time)
return True
| [
37811,
198,
15047,
6551,
7822,
286,
39682,
736,
2364,
11,
198,
1484,
329,
4560,
326,
2421,
13985,
13,
770,
318,
2829,
1576,
198,
5562,
340,
2125,
470,
2861,
6079,
287,
257,
649,
20203,
329,
340,
13,
198,
37811,
198,
198,
6738,
640,
... | 2.771775 | 907 |
import io
from dataclasses import dataclass
import cv2 # type: ignore
import numpy as np # type: ignore
from PIL import Image # type: ignore
@dataclass
| [
11748,
33245,
198,
6738,
4818,
330,
28958,
1330,
4818,
330,
31172,
198,
198,
11748,
269,
85,
17,
220,
1303,
2099,
25,
8856,
198,
11748,
299,
32152,
355,
45941,
220,
1303,
2099,
25,
8856,
198,
6738,
350,
4146,
1330,
7412,
220,
1303,
20... | 3.076923 | 52 |
from django.db import models
from django.urls import reverse
from django.contrib.auth.models import User
class Reader(User):
"""
A reader is any school member that can have access to the library for
reading purpose
"""
# Reader's school
school = models.ForeignKey('schools.School',
on_delete=models.CASCADE)
#Get the reader image for a better interaction with other readers
image = models.ImageField(upload_to="Reader_Profile",
verbose_name="Profil Image", null=True, blank=True)
#Save all the reader's reading for creating an history and customize the book suggestions
reading = models.ManyToManyField('schools.Reading',
blank=True)
book_registered = models.ManyToManyField('schools.Book', blank=True)
# Save the reader preference of a customized reading suggestions
category_preference = models.ManyToManyField('schools.Category', blank=True)
author_preference = models.ManyToManyField('account.Author', blank=True)
def image_url(self):
"""
"""
if self.image and hasattr(self.image, "url"):
return self.image.url
class Meta(object):
"""docstring for Meta"""
verbose_name = 'Reader'
#verbose_name_plural = _('Reader')
class Author(User):
"""
Author for an eBook on the website (for online reading/download or sale)
"""
#Get the reader image for a better interaction with other readers
image = models.ImageField(upload_to="Reader_Profile",
verbose_name="Profil Image", null=True, blank=True)
bio = models.TextField(null=True)
class Meta(object):
"""docstring for Meta"""
verbose_name = 'Author'
class Administrator(User):
""" Admin Account for the school library.
Affectations: - Register the school
- Can Register all students
- Access to the full school dashboard
- Pay for the service
"""
school = models.ForeignKey('schools.School',
on_delete=models.CASCADE, null=True, blank=True)
#Get the reader image for a better interaction with other readers
image = models.ImageField(upload_to="Reader_Profile",
verbose_name="Profil Image", null=True, blank=True)
# Payment methodes
creditCardNumber = models.IntegerField( null=True, blank=True)
def image_url(self):
"""
"""
if self.image and hasattr(self.image, "url"):
return self.image.url
class Meta(object):
"""docstring for Meta"""
verbose_name = 'Administrator'
#verbose_name_plural = _('Reader')
| [
6738,
42625,
14208,
13,
9945,
1330,
4981,
198,
198,
6738,
42625,
14208,
13,
6371,
82,
1330,
9575,
198,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
13,
27530,
1330,
11787,
628,
198,
4871,
25342,
7,
12982,
2599,
198,
197,
37811,
... | 3.106632 | 769 |
from base64 import b64encode as _b64encode
from typing import Tuple
from hashlib import sha256
from secrets import token_urlsafe
from urllib.parse import urlencode
from .decor import parse_token, parse_refreshed_token
from .token import Token
from ..scope import Scope
from ..._sender import Sender, Client, send_and_process, Request
OAUTH_AUTHORIZE_URL = 'https://accounts.spotify.com/authorize'
OAUTH_TOKEN_URL = 'https://accounts.spotify.com/api/token'
def b64encode(msg: str) -> str:
"""Encode a unicode string in base-64."""
return _b64encode(msg.encode()).decode()
def b64urlencode(msg: bytes) -> str:
"""Encode bytes in url-safe base-64 alphabet."""
encoded = _b64encode(msg).decode()
stripped = encoded.split("=")[0]
return stripped.replace("+", "-").replace("/", "_")
class Credentials(Client):
"""
Client for retrieving access tokens.
Parameters
----------
client_id
client id
client_secret
client secret, not required for PKCE user authorisation
redirect_uri
whitelisted redirect URI, required for user authorisation
sender
request sender
asynchronous
synchronicity requirement
"""
@send_and_process(parse_token(uses_pkce=False))
def request_client_token(self) -> Token:
"""
Request a client token.
Returns
-------
Token
client access token
"""
payload = {'grant_type': 'client_credentials'}
return self._token_request(payload, auth=True), ()
def user_authorisation_url(
self,
scope=None,
state: str = None,
show_dialog: bool = False
) -> str:
"""
Construct an authorisation URL.
Step 1/2 in authorisation code flow.
User should be redirected to the resulting URL for authorisation.
Step 2/2: :meth:`request_user_token`.
Parameters
----------
scope
token privileges, accepts a :class:`Scope`, a single :class:`scope`,
a list of :class:`scopes <scope>` and strings for :class:`Scope`,
or a space-separated list of scopes as a string
state
additional state
show_dialog
force login dialog even if previously authorised
Returns
-------
str
login URL
"""
payload = self._user_auth_payload(scope, state)
payload['show_dialog'] = str(show_dialog).lower()
return OAUTH_AUTHORIZE_URL + '?' + urlencode(payload)
@send_and_process(parse_token(uses_pkce=False))
def request_user_token(self, code: str) -> Token:
"""
Request a new user token.
Step 2/2 in authorisation code flow.
Code is provided as a URL parameter in the redirect URI
after login in step 1: :meth:`user_authorisation_url`.
Parameters
----------
code
code from redirect parameters
Returns
-------
Token
user access token
"""
payload = {
'code': code,
'redirect_uri': self.redirect_uri,
'grant_type': 'authorization_code'
}
return self._token_request(payload, auth=True), ()
@send_and_process(parse_refreshed_token(uses_pkce=False))
def refresh_user_token(self, refresh_token: str) -> Token:
"""
Request a refreshed user token.
Parameters
----------
refresh_token
refresh token
Returns
-------
Token
refreshed user access token
"""
payload = {
'refresh_token': refresh_token,
'grant_type': 'refresh_token'
}
return self._token_request(payload, auth=True), (refresh_token,)
def pkce_user_authorisation(
self,
scope=None,
state: str = None,
verifier_bytes: int = 32,
) -> Tuple[str, str]:
"""
Construct authorisation URL and verifier.
Step 1/2 in authorisation code flow with proof key for code exchange.
The user should be redirected to the resulting URL for authorisation.
The verifier is passed to :meth:`request_pkce_token` in step 2.
Parameters
----------
scope
token privileges, accepts a :class:`Scope`, a single :class:`scope`,
a list of :class:`scopes <scope>` and strings for :class:`Scope`,
or a space-separated list of scopes as a string
state
additional state
verifier_bytes
number of bytes to generate PKCE verifier with, ``32 <= bytes <= 96``.
The specified range of bytes generates the appropriate number of
characters (43 - 128) after base-64 encoding, as required in RFC 7636.
Returns
-------
Tuple[str, str]
authorisation URL and PKCE code verifier
"""
assert 32 <= verifier_bytes <= 96, 'Invalid number of verifier bytes!'
verifier = token_urlsafe(verifier_bytes)
sha = sha256(verifier.encode())
challenge = b64urlencode(sha.digest())
payload = self._user_auth_payload(scope, state)
payload['code_challenge'] = challenge
payload['code_challenge_method'] = 'S256'
auth_url = OAUTH_AUTHORIZE_URL + '?' + urlencode(payload)
return auth_url, verifier
@send_and_process(parse_token(uses_pkce=True))
def request_pkce_token(self, code: str, verifier: str) -> Token:
"""
Request a new PKCE user token.
Step 2/2 in authorisation code flow with proof key for code exchange.
Code is provided as a URL parameter in the redirect URI
after login in step 1: :meth:`pkce_user_authorisation`.
Parameters
----------
code
code from redirect parameters
verifier
PKCE code verifier generated for authorisation URL
Returns
-------
Token
user access token
"""
payload = {
'client_id': self.client_id,
'code': code,
'code_verifier': verifier,
'grant_type': 'authorization_code',
'redirect_uri': self.redirect_uri,
}
return self._token_request(payload, auth=False), ()
@send_and_process(parse_refreshed_token(uses_pkce=True))
def refresh_pkce_token(self, refresh_token: str) -> Token:
"""
Request a refreshed PKCE user token.
Parameters
----------
refresh_token
refresh token
Returns
-------
Token
refreshed user access token
"""
payload = {
'client_id': self.client_id,
'grant_type': 'refresh_token',
'refresh_token': refresh_token,
}
return self._token_request(payload, auth=False), (refresh_token,)
def refresh(self, token: Token) -> Token:
"""
Refresh an access token.
Both client and user tokens are accepted and refreshed.
The correct refreshing method is applied regardless if PKCE was used or not.
For client tokens, a new token is returned.
For user tokens, a refreshed token is returned.
Parameters
----------
token
token to be refreshed
Returns
-------
Token
refreshed access token
"""
if token.refresh_token is None:
return self.request_client_token()
elif token.uses_pkce:
return self.refresh_pkce_token(token.refresh_token)
else:
return self.refresh_user_token(token.refresh_token)
| [
6738,
2779,
2414,
1330,
275,
2414,
268,
8189,
355,
4808,
65,
2414,
268,
8189,
198,
6738,
19720,
1330,
309,
29291,
198,
6738,
12234,
8019,
1330,
427,
64,
11645,
198,
6738,
13141,
1330,
11241,
62,
6371,
21230,
198,
198,
6738,
2956,
297,
... | 2.308012 | 3,370 |
#!/usr/bin/env python
"""
This ROS node takes the field survey file and publishes a
field polygon as a geometry_msgs/PolygonStamped for use in
other nodes and for visualization in rviz.
"""
import roslib; roslib.load_manifest('automow_maps')
import rospy
from geometry_msgs.msg import PolygonStamped, Point32, Polygon
class FieldPublisherNode(object):
"""
This is a ROS node that is responsible for publishing the field.
"""
if __name__ == '__main__':
fpn = FieldPublisherNode()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
37811,
198,
1212,
48263,
10139,
2753,
262,
2214,
5526,
2393,
290,
34134,
257,
198,
3245,
7514,
14520,
355,
257,
22939,
62,
907,
14542,
14,
34220,
14520,
1273,
13322,
329,
779,
287,
... | 3.066265 | 166 |
"""Evaluation metrics.
Scientific Machine Learning Benchmark:
A benchmark of regression models in chem- and materials informatics.
(c) Matthias Rupp 2019, Citrine Informatics.
Related terms: objective functions, loss functions, cost functions,
reward functions, utility functions, fitness functions, score functions, merit functions.
Provides classes EvaluationMetric, ScalarEvaluationMetric, VectorEvaluationMetric.
See documentation for relationships and derived metrics.
"""
from abc import ABCMeta, abstractmethod, abstractproperty
from warnings import warn
import numpy as np
import scipy as sp
import scipy.stats # for normal distribution. Python 3.8 will offer a 'statistics' module including PDF and CDF of the normal distribution
from smlb import InvalidParameterError
from smlb import SmlbObject
from smlb import params
##################
# Base classes #
##################
class EvaluationMetric(SmlbObject, metaclass=ABCMeta):
"""Abstract base class for evaluation metrics.
Base class for ScalarEvaluationMetric and VectorEvaluationMetric.
Design notes:
* Derived classes define _evaluate(). Actual evaluation is done by evaluate(),
which can take additional action, for example, modifying the sign of the
returned value according to a preferred orientation for ScalarEvaluationMetrics.
* This solution avoids errors due to derived classes' implementations of
evaluate() not running additional processing required. it does not prevent
a class from accidentally overriding evaluate() instead of _evaluate().
* (_)evaluate methods get passed only the observed ('true') labels of the
validation set. In particular, they do not have access to the training set
labels. This is because the performance of predictions on a set V should
not depend on any other external information; including the training set.
Otherwise, performance on V could change without any change in V.
"""
# A variant with only evaluate() was tried where each evaluate() returns
# a call to a processing method, `return self.processingf(result)`.
# However, for inheritance chains EvaluationMetric -> A -> B
# this would require an additional parameter 'raw' telling when and when not
# to modify the result (or more complicated solutions) and was therefore abandoned.
@abstractmethod
def _evaluate(self, true, pred):
"""Evaluate metric for given observations and predictions.
See evaluate() for function signature and explanation.
Derived classes overwrite this function instead of evaluate()
to allow further modification by EvaluationMetric class.
"""
raise NotImplementedError
def evaluate(self, true, pred):
"""Evaluate metric for given observations and predictions.
Parameters:
true: observed property distributions (PredictiveDistribution)
pred: predictive property distributions (PredictiveDistribution)
Returns:
value of evaluation metric; type depends on the evaluation metric,
for example, a scalar (ScalarEvaluationMetric) or a vector (VectorEvaluationMetric)
Note that both true and pred are distributions, including but not limited to
the delta distribution (deterministic values) and the normal distribution.
Each EvaluationMetric should support at least all combinations (for true and pred) of
deterministic values (delta distributions) and normal distributions.
"""
raise NotImplementedError
def __call__(self, *args, **kwargs):
"""Provide convenient evaluation by being callable.
See evaluate() for details.
"""
return self.evaluate(*args, **kwargs)
class ScalarEvaluationMetric(EvaluationMetric):
"""Base class for scalar-valued EvaluationMetrics."""
def __init__(self, orient=None, **kwargs):
"""Initialize state.
Parameters:
orient: actively orients metric towards minimization (-1) or maximization (+1)
if unspecified, the natural orientation of the metric is retained
Raises:
InvalidParameterError if trying to orient a metric with no natural orientation
"""
super().__init__(**kwargs)
orient = params.enumeration(orient, {-1, +1, None})
self._sign = +1 # default value leaves _evaluate() unchanged
if orient is not None:
if not self.has_orientation:
raise InvalidParameterError("oriented metric", self.orientation)
# -1 if desired and actual orientation disagree, otherwise +1
self._sign = orient * self.orientation
@property
def has_orientation(self):
"""True if oriented.
Here, oriented means that the metric has a preferred direction
(either more negative or more positive values indicating improvement)
and is ordered.
Returns:
True if the metric has an orientation, False otherwise
"""
return self.orientation != 0
@property
def orientation(self):
"""Whether optimization for this metric means minimization, maximization or neither.
Examples without orientation include signed residuals and composite metrics.
Orientation must be constant, that is, it must not change over the lifetime of an object.
Returns:
-1 for minimization, +1 for maximization, or 0 if not applicable
"""
return 0 # default is non-oriented, override method to add orientation
def evaluate(self, true, pred):
"""Evaluate metric for given observations and predictions.
Parameters:
true: observed property distributions (PredictiveDistribution)
pred: predictive property distributions (PredictiveDistribution)
Returns:
a scalar value
Note that both true and pred are distributions, including but not limited to
the delta distribution (deterministic values) and the normal distribution.
The desired orientation can be set in the initializer.
"""
return self._sign * self._evaluate(true, pred)
# todo: introduce a 'summaryf' parameter to enable mean, min, max, ... of vector-valued evaluation metrics
class VectorEvaluationMetric(EvaluationMetric):
"""Base class for vector-valued EvaluationMetrics."""
def evaluate(self, true, pred):
"""Evaluate metric for given observations and predictions.
Parameters:
true: observed property distributions (PredictiveDistribution)
pred: predicted property distributions (PredictiveDistribution)
Returns:
a vector
Note that both true and pred are distributions, including but not limited to
the delta distribution (deterministic values) and the normal distribution.
"""
return self._evaluate(true, pred)
######################
# Error statistics #
######################
class Residuals(VectorEvaluationMetric):
r"""Signed errors (residuals).
Prediction error residuals $f(x_i) - y_i$,
where $x_i$ are inputs, $f$ is the learner and $y_i$ are observed values.
"""
def _evaluate(self, true, pred):
"""Evaluate prediction error residuals.
residuals = predicted - observed
Parameters:
true: observed property distribution; requires only means
pred: predictive property distribution; requires only means
Returns:
residuals as NumPy array
"""
true = params.distribution(true).mean
pred = params.distribution(pred).mean
return pred - true
class AbsoluteResiduals(Residuals):
"""Absolute value of residuals.
Unsigned residuals. Absolute prediction error residuals $|f(x_i) - y_i|$,
where $x_i$ are inputs, $f$ is the learner and $y_i$ are observed values.
"""
def _evaluate(self, true, pred):
"""Evaluate unsigned prediction errors.
unsigned residuals = | pred - observed |
Parameters:
true: observed property distribution; requires only means
pred: predictive property distribution; requires only means
Returns:
unsigned residuals as NumPy array
"""
return np.abs(super()._evaluate(true, pred))
class SquaredResiduals(Residuals):
"""Squared prediction errors.
As Residuals, but squared.
"""
def _evaluate(self, true, pred):
"""Evaluate squared prediction errors.
squared residuals = ( pred - observed )^2
Parameters:
true: observed property distribution; requires only means
pred: predictive property distribution; requires only means
Returns:
squared residuals as NumPy array
"""
return np.square(super()._evaluate(true, pred))
class MeanAbsoluteError(ScalarEvaluationMetric):
"""Mean Absolute Error (MAE)."""
@property
def orientation(self):
"""Indicate minimization."""
return -1
def _evaluate(self, true, pred):
r"""Evaluate Mean Absolute Error (MAE).
\[ \text{MAE} = \frac{1}{n} \sum_{i=1}^n | f(x_i) - y_i | \]
MAE = mean( | pred - observed | )
Parameters:
true: observed property distribution; requires only means
pred: predictive property distribution; requires only means
Returns:
mean absolute error as floating point number
"""
return float(np.mean(AbsoluteResiduals()._evaluate(true, pred)))
class MeanSquaredError(ScalarEvaluationMetric):
"""Mean squared error (MSE)."""
@property
def orientation(self):
"""Indicate minimization."""
return -1
def _evaluate(self, true, pred):
r"""Mean Squared Error (MSE).
\[ \text{MSE} = \frac{1}{n} \sum_{i=1}^n ( f(x_i) - y_i )^2 \]
MSE = mean( square( pred - observed ) )
Parameters:
true: observed property distribution; requires only means
pred: predictive property distribution; requires only means
Returns:
mean squared error as a floating point number
"""
return float(np.mean(SquaredResiduals()._evaluate(true, pred)))
class RootMeanSquaredError(MeanSquaredError):
"""Root Mean Squared Error (RMSE)."""
# same orientation as MeanSquaredError base class
def _evaluate(self, true, pred):
r"""Root Mean Squared Error (RMSE).
\[ \text{RMSE} = \sqrt{ \frac{1}{n} \sum_{i=1}^n ( f(x_i) - y_i )^2 } \]
MSE = root( mean( square( pred - observed ) ) )
Parameters:
true: observed property distribution; requires only means
pred: predictive property distribution; requires only means
Returns:
root mean squared error as a floating point number
"""
return float(np.sqrt(super()._evaluate(true, pred)))
class StandardizedRootMeanSquaredError(RootMeanSquaredError):
r"""Standardized Root Mean Squared Error (stdRMSE).
The standardized RMSE (stdRMSE), relative RMSE, or non-dimensional model
error (NDME) is given by
stdRMSE = RMSE / std. dev., where
\[ \text{std. dev.} = \sqrt{ \frac{1}{n} \sum_{i=1}^n ( y_i - \bar{y} )^2 } \]
and $\bar{y} = \frac{1}{n} \sum_{i=1}^n y_i$.
The denominator can be interpreted as the RMSE of a model that predicts the
mean of the validation set (!) labels. stdRMSE is a unit-less (non-dimensional)
quantity, often between 0 (perfect model) and 1 (guess-the-mean performance).
If the IID assumption is violated, that is, label distributions of
training and validation set differ, stdRMSE can be arbitrarily high.
The name "standardized RMSE" was chosen over "non-dimensional model error"
because it is more specific (e.g., which "error"?) and more directly related
to statistical terminology (e.g., "standard score").
If the IID assumption holds, stdRMSE can be used to compare prediction errors
across different datasets on the same scale (the datasets can still vary in
how hard they are to learn).
An advantage of stdRMSE over RMSE divided by label range is that stdRMSE is less
statistically volatile (min and max are extremal statistics with high variance).
For the estimator of the standard deviation, no bias correction is used by default
(easing comparisons in many cases). See __init__ docstring for other options.
"""
def __init__(self, bias_correction: float = 0, **kwargs):
"""Initialize metric.
Parameters:
bias_correction: no correction by default. if a positive value d is given,
division is by n-d. Bessel's correction (d=1) is unbiased for variance
estimators, but not for standard deviation estimators. While there is
no value that works across all distributions, d=1.5 is a reasonably
good correction.
"""
self._bias_correction = params.real(bias_correction, from_=0)
super().__init__(**kwargs)
# same orientation as RootMeanSquaredError
def _evaluate(self, true, pred):
"""Root mean squared error divided by standard deviation of labels.
stdRMSE = RMSE / std. dev.
See class docstring for details.
Parameters:
true: observed property distribution; requires only means
pred: predictive property distribution; requires only means
Returns:
standardized root mean squared error as a floating point number
"""
true = params.distribution(true)
# ensure sufficiently many samples
n = len(true.mean)
if n <= 1:
raise InvalidParameterError(
"enough samples to compute standard deviation", f"{n} samples"
)
# compute RMSE and standard deviation
rmse = super()._evaluate(true, pred)
stddev = np.std(true.mean, ddof=self._bias_correction)
# ensure sufficient variance in samples
if stddev <= 1e-3: # hard-coded, could be initialization parameter
raise InvalidParameterError(
"sufficient label variance for non-zero standard deviation",
f"standard deviation of {stddev}",
)
return float(rmse / stddev)
############################
# Uncertainty statistics #
############################
class LogPredictiveDensity(VectorEvaluationMetric):
r"""Logarithmized Predictive Density (LPD)."""
def _evaluate(self, true, pred):
r"""Logarithmic Predictive Density (LPD).
Assumes a normal predictive distribution.
\[
\log p (y_i = t_i | x_i)
= - ( \log \sqrt{2\pi} + \log \sigma_i + \frac{1}{2} ( \frac{y_i - t_i}{\sigma_i} )^2 )
\]
See, for example,
Joaquin Quinonero-Candela, Carl Edward Rasmussen, Fabian Sinz, Olivier Bousquet, and Bernhard Schölkopf.
Evaluating predictive uncertainty challenge, p. 1-27, 2005. In Joaquin Quinonero-Candela, Ido Dagan,
Bernardo Magnini, and Florence d'Alché Buc (editors), Proceedings of the First PASCAL Machine Learning
Challenges Workshop (MLCW 2005), Southampton, United Kingdom, April 11–13, 2005.
Parameters:
true: observed property distribution; requires only means
pred: predictive property distribution; must be normal
Returns:
logarithmic predictive densities as a NumPy vector of floating point numbers
"""
true = params.distribution(true)
pred = params.normal_distribution(pred)
if np.any(pred.stddev == 0):
warn(
f"Some uncertainties are zero. Metric {self.__class__.__name__}" "may return nan.",
RuntimeWarning,
)
lpd = -(
np.log(np.sqrt(2 * np.pi))
+ np.log(pred.stddev)
+ 0.5 * np.square((true.mean - pred.mean) / pred.stddev)
)
return lpd
class MeanLogPredictiveDensity(ScalarEvaluationMetric):
"""Mean Logarithmized Predictive Density (MLPD)."""
@property
def orientation(self):
"""Indicate maximization."""
return +1
def _evaluate(self, true, pred):
r"""Mean Logarithmic Predictive Density (MLPD).
Mean of LogPredictiveDensity.
Assumes a normal predictive distribution.
\[
1/n \sum_{i=1}^n \log p (y_i = t_i | x_i)
= - ( \log \sqrt{2\pi} + \frac{1}{2n} \sum_{i=1}^n ( \log \sigma_i^2 + \frac{(y_i-t_i)^2}{\sigma_i^2} ) )
\]
See LogPredictiveDensity for details.
Parameters:
true: observed property distribution; requires only means
pred: predictive property distribution; must be normal
Returns:
mean logarithmic predictive densities as a floating point number
"""
return np.mean(LogPredictiveDensity()._evaluate(true, pred))
class ContinuousRankedProbabilityScore(VectorEvaluationMetric):
r"""Continuous Ranked Probability Score (CRPS).
The Continuous Ranked Probability Score (CRPS) [1] is the squared-difference integral
between the predicted cumulative distribution function F and that of a delta function
on the true value:
\int\limits_{-\infty}^{\infty} \bigl( F(u) - F_y(u) \bigr)^2 w(u) \mathrm{d} u ,
where $F_y(u) = 0$ for $u \leq y$ and 1 otherwise, and $w$ is a weighting function.
For normal predictive distributions, an analytic expression exists: [2]
\sigma \Bigl( y' \bigl( 2 \Phi(y') - 1 \bigr) + 2 \phi(y') - \frac{1}{\sqrt{\pi}} \Bigr)
where $y' = \frac{y-\mu}{\sigma}$, and, $\Phi$ and $\phi$ are cumulative and probability
density functions of the standard normal distribution.
[1] James E. Matheson and Robert L. Winkler. Scoring rules for continuous
probability distributions. Management Science 22(10):1087–1096, 1976.
[2] Tilmann Gneiting, Adrian E. Raftery, Anton H. Westveld III, Tom Goldman. Calibrated
probabilistic forecasting using ensemble model output statistics and minimum CRPS
estimation. Monthly Weather Review, 133(5):1098–1118, 2005.
"""
def _evaluate(self, true, pred):
"""Evaluate continuous ranked probability score (CRPS).
CRPS depends on the mean of the observations and, in general, the full predictive distribution.
Currently implemented only for normal predictive distributions, for which a closed-form expression exists.
For arbitrary distributions (given as samples), an expression suitable for direct implementation is given by Equ. 3 in
Eric P. Grimit, Tilmann Gneiting, Veronica J. Berrocal, Nicholas A. Johnson:
The continuous ranked probability score for circular variables and its application to mesoscale forecast ensemble verification,
Quarterly Journal of the Royal Meteorological Society 132(621C): 2925--2942, 2006. DOI 10.1256/qj.05.235
Parameters:
true: observed property distributions; requires only means
pred: predictive property distributions
Returns:
sequence of metric values
continuous ranked probability scores as a NumPy vector of floating point numbers
"""
true = params.distribution(true)
pred = params.normal_distribution(pred)
if np.any(pred.stddev == 0):
warn(
f"Some uncertainties are zero. Metric {self.__class__.__name__}" "may return nan.",
RuntimeWarning,
)
strue = (true.mean - pred.mean) / pred.stddev # re-used intermediate quantity
crps = pred.stddev * (
strue * (2 * sp.stats.norm.cdf(strue) - 1)
+ 2 * sp.stats.norm.pdf(strue)
- 1 / np.sqrt(np.pi)
)
return crps
class MeanContinuousRankedProbabilityScore(ScalarEvaluationMetric):
"""Mean Continuous Ranked Probability Score (mCRPS)."""
@property
def orientation(self):
"""Indicate minimization."""
return -1
def _evaluate(self, true, pred):
"""Return arithmetic mean of CRPS."""
return np.mean(ContinuousRankedProbabilityScore()._evaluate(true, pred))
class StandardConfidence(ScalarEvaluationMetric):
"""Fraction of the time that the magnitude of the residual is less than the predicted standard deviation.
Standard confidence evaluates the quality of the predicted uncertainty estimates, both in terms of individual predictions and overall normalization.
Does not depend on the predicted values, only the residuals.
An alternative definition of standard confidence is as the fraction of observations for which the
"normalized residual" -- residual divided by predicted uncertainty -- is less than one.
In the ideal case the normalized residuals are normally distributed with std=1, and
so in the ideal case the standard confidence will be 0.68. Thus there is no "orientation",
and closer to 0.68 is better.
The standard confidence is the observed coverage probability at the 68% confidence level.
See e.g. https://www.stats.ox.ac.uk/pub/bdr/IAUL/Course1Notes5.pdf.
"""
def _evaluate(self, true, pred):
"""Compute standard confidence
Parameters:
true: observed property distributions; requires only means
pred: predictive property distributions
Returns:
standard confidence
"""
true = params.distribution(true)
pred = params.normal_distribution(pred)
abs_residual = np.abs(true.mean - pred.mean)
is_less = abs_residual < pred.stddev
stdconf = np.mean(is_less)
return stdconf
class RootMeanSquareStandardizedResiduals(ScalarEvaluationMetric):
"""Root Mean Square of the Standardized Residuals (RMSSE).
RMSSE evaluates the quality of the predicted uncertainty estimates, both in terms of individual predictions and overall normalization.
Compared to standard confidence, RMSSE is more sensitive to outliers.
Does not depend on the predicted values, only the residuals.
No "orientation". Closer to 1 is better.
"""
def _evaluate(self, true, pred):
"""Compute RMSSE.
Parameters:
true: observed property distributions; requires only means
pred: predictive property distributions
Returns:
RMSSE
"""
true = params.distribution(true)
pred = params.normal_distribution(pred)
if np.any(pred.stddev == 0):
warn(
f"Some uncertainties are zero. Metric {self.__class__.__name__}" "will be nan.",
RuntimeWarning,
)
return np.nan
strue = (true.mean - pred.mean) / pred.stddev
rmsse = np.sqrt(np.mean(np.power(strue, 2)))
return rmsse
class UncertaintyCorrelation(ScalarEvaluationMetric):
"""Correlation between uncertainty estimate and abs(residual).
A positive value is desirable. A negative value indicates pathological behavior.
Does not depend on the predicted values, only the residuals.
"""
@property
def orientation(self):
"""Indicate maximization."""
return +1
def _evaluate(self, true, pred):
"""Compute Uncertainty Correlation
Parameters:
true: observed property distributions; requires only means
pred: predictive property distributions
Returns:
uncertainty correlation
"""
true = params.distribution(true)
pred = params.normal_distribution(pred)
abs_residual = np.abs(true.mean - pred.mean)
uc_corr = np.corrcoef(abs_residual, pred.stddev)[
0, 1
] # get off-diagonal of correlation matrix
return uc_corr
# helper function
def two_sample_cumulative_distribution_function_statistic(
sample_a, sample_b, f=lambda p, t: np.square(p - t), g=lambda s, w: np.sum(s * w)
):
r"""Compute a statistic of the difference between two empirical cumulative distribution functions.
Calculate statistics of the cumulative distribution functions (CDF) of two samples.
Let $x_1,\ldots,x_d$ be the union of the two samples, $x_i < x_{i+1}$, and let
$w_i = x_{i+1}-x_i$, $i = 1,\ldots,d-1$ be the differences between them.
The calculated statistics have the form $g(s,w)$ where $s_i = f(F_a(x_i), F_b(x_i))$)
and $F_a$, $F_b$ are the CDFs of the two samples.
Here, the $x_i$ are the points where one or both of the CDFs changes, $f$ is a statistic
that depends on the value of the two CDFs, and $g$ is an arbitrary function of $s$ and $w$.
The default choice for $g$ is Riemann integration; as the CDFs are step functions, this is exact
and leads to statistics of the form
\[ \int_{-\infty}^{\infty} f(F_a(x),F_b(x)) dx . \]
Parameters:
sample_a: first sample; a sequence of real numbers
sample_b: second sample; a sequence of real numbers;
can be of different length than first sample
f: function accepting two same-length real vectors, returning a real vector of same length.
This function computes a value that depends only on the two CDFs, and is thus constant
between change points. The default is the squared difference, f(a,b) = np.square(a-b).
The convention here is to use the left endpoint of the "steps".
g: function accepting two same-length real vectors, returning a real number.
Computes the statistic based on values of f and step "widths".
The default, g(s,w) = np.sum(g * w), performs Riemann integration.
"""
sample_a = params.real_vector(sample_a)
sample_b = params.real_vector(sample_b)
allx = np.union1d(sample_a, sample_b) # all x where F_a and F_b change
xdif = np.ediff1d(allx) # width of Riemann integration bars
allx = allx.reshape((len(allx), 1))
cdfa = np.count_nonzero(np.sort(sample_a) <= allx, axis=1) / len(sample_a)
cdfb = np.count_nonzero(np.sort(sample_b) <= allx, axis=1) / len(sample_b)
stat = np.asfarray(f(cdfa, cdfb))
return g(stat[:-1], xdif)
| [
37811,
36,
2100,
2288,
20731,
13,
198,
198,
23010,
811,
10850,
18252,
25187,
4102,
25,
220,
198,
32,
18335,
286,
20683,
4981,
287,
4607,
12,
290,
5696,
4175,
23372,
13,
198,
7,
66,
8,
45524,
4448,
371,
7211,
13130,
11,
15792,
7640,
... | 2.7687 | 9,572 |
import torch
from torch import nn
class DistractionConv(nn.Module):
"""Inspired by SKNet"""
| [
11748,
28034,
198,
6738,
28034,
1330,
299,
77,
628,
628,
628,
628,
628,
628,
628,
198,
198,
4871,
4307,
7861,
3103,
85,
7,
20471,
13,
26796,
2599,
198,
220,
220,
220,
37227,
41502,
1202,
416,
14277,
7934,
37811,
628
] | 2.871795 | 39 |
"""
Test script that uses ASE to run an EMT calculation
Script partly from the ASE intro tutorials
https://wiki.fysik.dtu.dk/ase/tutorials/surface.html
"""
# --------------------- STEP 1: Prepare the atoms/structure object ------------
#from ase.build import fcc111
#h = 1.85
#d = 1.10
#atoms = fcc111('Cu', size=(4, 4, 2), vacuum=10.0)
#atoms.write('atoms_in.json', format='json')
from ase.io import read
atoms = read('atoms_in.json')
# ==================== START ASE SCRIPT to AiiDA ==============================
from ase.calculators.emt import EMT
from ase.optimize import FIRE
# -------------------- STEP 2: Attach the calculator --------------------------
calc = EMT(properties=['energy', 'stress'])
atoms.set_calculator(calc)
# -------------------- STEP 3: run the dynamics -------------------------------
# write optimizer steps to logfile
dyn = FIRE(atoms, trajectory='Cu111.traj', logfile='FIRE.log')
dyn.run(fmax=0.05)
# -------------------- STEP 4: Extract and save results -----------------------
results = {}
results['potential_energy'] = atoms.get_potential_energy()
results['stress'] = atoms.get_stress()
print('potential energy: ', results['potential_energy'])
print('stress: ', results['stress'])
# ==================== END ASE SCRIPT to AiiDA ================================
# NEED TO STORE
# writes last step of optimization to a .json file that is storable in a db
atoms.write('atoms_out.json', format='json') # to store 1
# FIRE.log file for reference - to store 2
# results as entry in database - to store 3
# reference to Cu111.traj file for provenance - to store 4
| [
37811,
198,
14402,
4226,
326,
3544,
317,
5188,
284,
1057,
281,
412,
13752,
17952,
198,
198,
7391,
11476,
422,
262,
317,
5188,
18951,
27992,
198,
5450,
1378,
15466,
13,
69,
893,
1134,
13,
67,
28047,
13,
34388,
14,
589,
14,
83,
44917,
... | 3.334719 | 481 |
# Generated by Django 3.0.7 on 2020-07-08 11:39
from django.db import migrations
| [
2,
2980,
515,
416,
37770,
513,
13,
15,
13,
22,
319,
12131,
12,
2998,
12,
2919,
1367,
25,
2670,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
628
] | 2.766667 | 30 |
'''
Module of Windows API for plyer.cpu.
'''
from ctypes import (
c_ulonglong, c_ulong, byref,
Structure, POINTER, Union, windll, create_string_buffer,
sizeof, cast, c_void_p, c_uint32
)
from ctypes.wintypes import (
BYTE, DWORD, WORD
)
from plyer.facades import CPU
KERNEL = windll.kernel32
ERROR_INSUFFICIENT_BUFFER = 0x0000007A
class CacheType:
'''
Win API PROCESSOR_CACHE_TYPE enum.
'''
unified = 0
instruction = 1
data = 2
trace = 3
class RelationshipType:
'''
Win API LOGICAL_PROCESSOR_RELATIONSHIP enum.
'''
processor_core = 0 # logical proc sharing single core
numa_node = 1 # logical proc sharing single NUMA node
cache = 2 # logical proc sharing cache
processor_package = 3 # logical proc sharing physical package
group = 4 # logical proc sharing processor group
all = 0xffff # logical proc info for all groups
class CacheDescriptor(Structure):
'''
Win API CACHE_DESCRIPTOR struct.
'''
_fields_ = [
('Level', BYTE),
('Associativity', BYTE),
('LineSize', WORD),
('Size', DWORD),
('Type', DWORD)
]
class ProcessorCore(Structure):
'''
Win API ProcessorCore struct.
'''
_fields_ = [('Flags', BYTE)]
class NumaNode(Structure):
'''
Win API NumaNode struct.
'''
_fields_ = [('NodeNumber', DWORD)]
class SystemLPIUnion(Union):
'''
Win API SYSTEM_LOGICAL_PROCESSOR_INFORMATION union without name.
'''
_fields_ = [
('ProcessorCore', ProcessorCore),
('NumaNode', NumaNode),
('Cache', CacheDescriptor),
('Reserved', c_ulonglong)
]
class SystemLPI(Structure):
'''
Win API SYSTEM_LOGICAL_PROCESSOR_INFORMATION struct.
'''
_fields_ = [
('ProcessorMask', c_ulong),
('Relationship', c_ulong),
('LPI', SystemLPIUnion)
]
class WinCPU(CPU):
'''
Implementation of Windows CPU API.
'''
@staticmethod
def instance():
'''
Instance for facade proxy.
'''
return WinCPU()
# Resources:
# GetLogicalProcessInformation
# https://msdn.microsoft.com/en-us/library/ms683194(v=vs.85).aspx
# SYSTEM_LOGICAL_PROCESSOR_INFORMATION
# https://msdn.microsoft.com/en-us/library/ms686694(v=vs.85).aspx
# LOGICAL_PROCESSOR_RELATIONSHIP enum (0 - 4, 0xffff)
# https://msdn.microsoft.com/2ada52f0-70ec-4146-9ef7-9af3b08996f9
# CACHE_DESCRIPTOR struct
# https://msdn.microsoft.com/38cfa605-831c-45ef-a99f-55f42b2b56e9
# PROCESSOR_CACHE_TYPE
# https://msdn.microsoft.com/23044f67-e944-43c2-8c75-3d2fba87cb3c
# C example
# https://msdn.microsoft.com/en-us/904d2d35-f419-4e8f-a689-f39ed926644c
| [
7061,
6,
198,
26796,
286,
3964,
7824,
329,
35960,
263,
13,
36166,
13,
198,
7061,
6,
198,
198,
6738,
269,
19199,
1330,
357,
198,
220,
220,
220,
269,
62,
377,
506,
6511,
11,
269,
62,
377,
506,
11,
416,
5420,
11,
198,
220,
220,
220... | 2.298407 | 1,193 |
import numpy as np
import matplotlib.pyplot as plt
from distributions import gaussian
mu_list = [0, -5, 3]
sigma_list = [1, 2, 4]
x = np.linspace(-15, 15, 500)
for i in range(len(mu_list)):
mu = mu_list[i]
sigma = sigma_list[i]
plt.plot(x, gaussian(x, mu, sigma), label=r'$\mu = %d$'%mu + r' $\sigma = %d$'%sigma)
markerline, stemlines, baseline = plt.stem(mu_list, gaussian(np.array(mu_list), np.array(mu_list), np.array(sigma_list)), '--', label='Expected value')
plt.setp(stemlines, 'color', 'gainsboro')
plt.setp(markerline, 'color', 'silver')
plt.setp(baseline, visible=False)
plt.xlabel('x', fontsize=15)
plt.ylabel(r'$P(x, \mu, \sigma)$', fontsize=15)
plt.xlim(-15,15)
plt.ylim(0,0.5)
plt.legend()
plt.show() | [
11748,
299,
32152,
355,
45941,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
198,
6738,
24570,
1330,
31986,
31562,
198,
198,
30300,
62,
4868,
796,
685,
15,
11,
532,
20,
11,
513,
60,
198,
82,
13495,
62,
4868,
796... | 2.142857 | 343 |
import datasets
import matplotlib.pyplot as plt
| [
11748,
40522,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
628,
198
] | 3.333333 | 15 |
# Copyright 2021 TileDB Inc.
# Licensed under the MIT License.
import numpy as np
import pytest
import tiledb
from tiledb.cf import Group, NetCDF4ConverterEngine
netCDF4 = pytest.importorskip("netCDF4")
class TestSimplyCopyChunks:
"""Test converting a simple NetCDF in chunks.
NetCDF File:
dimensions:
x (8)
y (8)
z (8)
variables:
f (x, y, z) = reshape([0, ..., 511], (8, 8, 8))
"""
attr_data = np.reshape(np.arange(512), (8, 8, 8))
@pytest.fixture(scope="class")
def netcdf_file(self, tmpdir_factory):
"""Returns the NetCDF file that will be used to test the conversion."""
filepath = tmpdir_factory.mktemp("input_file").join("simple_copy_chunks.nc")
with netCDF4.Dataset(filepath, mode="w") as dataset:
dataset.createDimension("x", 8)
dataset.createDimension("y", 8)
dataset.createDimension("z", 8)
var = dataset.createVariable(
varname="f", datatype=np.int64, dimensions=("x", "y", "z")
)
var[:, :, :] = self.attr_data
return filepath
@pytest.mark.parametrize(
"sparse,expected_result", ((False, attr_data), (True, np.arange(512)))
)
def test_convert_chunks(self, netcdf_file, tmpdir, sparse, expected_result):
"""Test copying NetCDF file in chunks for a simple NetCDF file."""
uri = str(tmpdir.mkdir("output").join("simple_copy_chunks"))
converter = NetCDF4ConverterEngine.from_file(netcdf_file)
array_creator = converter.get_array_creator_by_attr("f")
array_creator.sparse = sparse
assert array_creator.domain_creator.max_fragment_shape == (None, None, None)
array_creator.domain_creator.max_fragment_shape = (4, 8, 2)
assert array_creator.domain_creator.max_fragment_shape == (4, 8, 2)
converter.convert_to_group(uri)
with Group(uri) as group:
with group.open_array(attr="f") as array:
array_uri = array.uri
result = array[...]
result = result["f"] if isinstance(result, dict) else result
np.testing.assert_equal(result, expected_result)
fragment_info = tiledb.FragmentInfoList(array_uri)
assert len(fragment_info) == 8
@pytest.mark.parametrize(
"sparse,expected_result",
((False, np.reshape(np.arange(512), (8, 8, 8))), (True, np.arange(512))),
)
def test_convert_chunks_with_injected(
self, netcdf_file, tmpdir, sparse, expected_result
):
"""Test copying NetCDF file in chunks for a simple NetCDF file with externally
provided dimension and attribute values."""
uri = str(tmpdir.mkdir("output").join("simple_copy_chunks"))
converter = NetCDF4ConverterEngine.from_file(netcdf_file)
converter.add_shared_dim("t", domain=(0, 3), dtype=np.uint64)
array_creator = converter.get_array_creator_by_attr("f")
array_creator.sparse = sparse
array_creator.add_attr_creator(name="g", dtype=np.float64)
array_creator.domain_creator.inject_dim_creator("t", 0)
array_creator.domain_creator.max_fragment_shape = (1, 4, 8, 2)
# Define data for extra variable
g_data = np.reshape(np.random.random_sample((512)), (1, 8, 8, 8))
converter.convert_to_group(
uri,
assigned_dim_values={"t": 0},
assigned_attr_values={"g": g_data},
)
with Group(uri) as group:
with group.open_array("array0") as array:
array_uri = array.uri
result = array[0, :, :, :]
f_result = result["f"]
np.testing.assert_equal(f_result, expected_result)
g_result = np.reshape(result["g"], (1, 8, 8, 8))
np.testing.assert_equal(g_data, g_result)
fragment_info = tiledb.FragmentInfoList(array_uri)
assert len(fragment_info) == 8
class TestCoordinateCopyChunks:
"""Test converting a simple NetCDF in chunks.
NetCDF File:
dimensions:
x (8)
y (8)
variables:
x (x) = linspace(-1, 1, 8)
y (y) = linspace(0, 2, 8)
f (x, y) = [[0, 1, ...],...,[...,62,63]]
"""
x_data = np.linspace(-1.0, 1.0, 8)
y_data = np.linspace(0.0, 2.0, 8)
attr_data = np.reshape(np.arange(64), (8, 8))
@pytest.fixture(scope="class")
def netcdf_file(self, tmpdir_factory):
"""Returns the NetCDF file that will be used to test the conversion."""
filepath = tmpdir_factory.mktemp("input_file").join("simple_copy_chunks.nc")
with netCDF4.Dataset(filepath, mode="w") as dataset:
dataset.createDimension("x", 8)
dataset.createDimension("y", 8)
var = dataset.createVariable(
varname="f", datatype=np.int64, dimensions=("x", "y")
)
var[:, :] = self.attr_data
var = dataset.createVariable(
varname="x", datatype=np.float64, dimensions=("x")
)
var[:] = self.x_data
var = dataset.createVariable(
varname="y", datatype=np.float64, dimensions=("y")
)
var[:] = self.y_data
return filepath
def test_convert_chunks(self, netcdf_file, tmpdir):
"""Test copying NetCDF file in chunks for a NetCDF to TileDB conversion that
maps NetCDF coordinates to dimensions."""
uri = str(tmpdir.mkdir("output").join("simple_copy_chunks"))
converter = NetCDF4ConverterEngine.from_file(netcdf_file, coords_to_dims=True)
converter.get_shared_dim("x").domain = (-1.0, 1.0)
converter.get_shared_dim("y").domain = (0.0, 2.0)
array_creator = converter.get_array_creator_by_attr("f")
array_creator.domain_creator.max_fragment_shape = (4, 4)
converter.convert_to_group(uri)
with Group(uri) as group:
with group.open_array(attr="f") as array:
array_uri = array.uri
result = array[...]
result = result["f"]
expected_result = np.arange(64)
np.testing.assert_equal(result, expected_result)
fragment_info = tiledb.FragmentInfoList(array_uri)
assert len(fragment_info) == 4
| [
2,
15069,
33448,
47870,
11012,
3457,
13,
198,
2,
49962,
739,
262,
17168,
13789,
13,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
12972,
9288,
198,
198,
11748,
256,
3902,
65,
198,
6738,
256,
3902,
65,
13,
12993,
1330,
4912,
11,
3433... | 2.163906 | 2,898 |
"""
@package mi.dataset.driver.ctdpf_ckl.wfp.test.test_driver
@file marine-integrations/mi/dataset/driver/ctdpf_ckl/wfp/driver.py
@author cgoodrich
@brief Test cases for ctdpf_ckl_wfp driver
USAGE:
Make tests verbose and provide stdout
* From the IDK
$ bin/dsa/test_driver
$ bin/dsa/test_driver -i [-t testname]
$ bin/dsa/test_driver -q [-t testname]
"""
__author__ = 'cgoodrich'
__license__ = 'Apache 2.0'
import unittest
from nose.plugins.attrib import attr
from mock import Mock
from pyon.agent.agent import ResourceAgentState
from interface.objects import ResourceAgentErrorEvent
from mi.core.log import get_logger ; log = get_logger()
from mi.idk.exceptions import SampleTimeout
from mi.idk.dataset.unit_test import DataSetTestCase
from mi.idk.dataset.unit_test import DataSetIntegrationTestCase
from mi.idk.dataset.unit_test import DataSetQualificationTestCase
from mi.dataset.dataset_driver import DriverParameter
from mi.dataset.dataset_driver import DataSourceConfigKey, DataSetDriverConfigKeys
from mi.dataset.driver.ctdpf_ckl.wfp.driver import CtdpfCklWfpDataSetDriver
from mi.dataset.parser.ctdpf_ckl_wfp import CtdpfCklWfpParserDataParticle, DataParticleType
from mi.dataset.parser.wfp_c_file_common import StateKey
# Fill in driver details
DataSetTestCase.initialize(
driver_module='mi.dataset.driver.ctdpf_ckl.wfp.driver',
driver_class='CtdpfCklWfpDataSetDriver',
agent_resource_id = '123xyz',
agent_name = 'Agent007',
agent_packet_config = CtdpfCklWfpDataSetDriver.stream_config(),
startup_config = {
DataSourceConfigKey.RESOURCE_ID: 'ctdpf_ckl_wfp',
DataSourceConfigKey.HARVESTER:
{
DataSetDriverConfigKeys.DIRECTORY: '/tmp/dsatest',
DataSetDriverConfigKeys.PATTERN: 'C*.DAT',
DataSetDriverConfigKeys.FREQUENCY: 1,
},
DataSourceConfigKey.PARSER: {}
}
)
# The integration and qualification tests generated here are suggested tests,
# but may not be enough to fully test your driver. Additional tests should be
# written as needed.
###############################################################################
# INTEGRATION TESTS #
# Device specific integration tests are for #
# testing device specific capabilities #
###############################################################################
@attr('INT', group='mi')
###############################################################################
# QUALIFICATION TESTS #
# Device specific qualification tests are for #
# testing device specific capabilities #
###############################################################################
@attr('QUAL', group='mi') | [
37811,
198,
31,
26495,
21504,
13,
19608,
292,
316,
13,
26230,
13,
310,
26059,
69,
62,
694,
75,
13,
86,
46428,
13,
9288,
13,
9288,
62,
26230,
198,
31,
7753,
16050,
12,
18908,
9143,
14,
11632,
14,
19608,
292,
316,
14,
26230,
14,
310... | 2.545927 | 1,154 |
from interpretability.explanation_methods.explainers.rise import RISE
from interpretability.explanation_methods.explainers.lime import Lime
from interpretability.explanation_methods.explainers.occlusion import Occlusion
from interpretability.explanation_methods.explainers.captum import GradCam, GB, IxG, Grad, DeepLIFT, IntGrad
from interpretability.explanation_methods.explanation_configs import explainer_configs
explainer_map = {
"Ours": lambda x: x,
"RISE": RISE,
"Occlusion": Occlusion,
"GCam": GradCam,
"LIME": Lime,
"IntGrad": IntGrad,
"GB": GB,
"IxG": IxG,
"Grad": Grad,
"DeepLIFT": DeepLIFT
}
| [
6738,
6179,
1799,
13,
1069,
11578,
341,
62,
24396,
82,
13,
20676,
50221,
13,
17163,
1330,
371,
24352,
198,
6738,
6179,
1799,
13,
1069,
11578,
341,
62,
24396,
82,
13,
20676,
50221,
13,
27299,
1330,
43503,
198,
6738,
6179,
1799,
13,
106... | 2.691667 | 240 |
import operator, logging, math, psutil
from enum import Enum
from repartition_experiments.file_formats.hdf5 import HDF5_manager
logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.DEBUG)
logger = logging.getLogger(__name__)
def get_volumes(R, B):
""" Returns a dictionary mapping each buffer (numeric) index to a Volume object containing its coordinates in R.
Arguments:
----------
R: original array
B: buffer shape
"""
buffers_partition = get_partition(R, B)
return buffers_partition, get_named_volumes(buffers_partition, B)
def hypercubes_overlap(hypercube1, hypercube2):
""" Evaluate if two hypercubes cross each other.
"""
if not isinstance(hypercube1, Volume) or \
not isinstance(hypercube2, Volume):
raise TypeError()
lowercorner1, uppercorner1 = hypercube1.get_corners()
lowercorner2, uppercorner2 = hypercube2.get_corners()
for i in range(len(uppercorner1)):
if uppercorner1[i] <= lowercorner2[i] or \
uppercorner2[i] <= lowercorner1[i]:
return False
return True
def get_blocks_shape(big_array, small_array):
""" Return the number of small arrays in big array in all dimensions as a shape.
"""
return tuple([int(b/s) for b, s in zip(big_array, small_array)])
def get_crossed_outfiles(buffer_of_interest, outfiles_volumes):
""" Returns list of output files that are crossing buffer at buffer_index.
Arguments:
----------
outfiles_volumes: dict of volumes representing the output files, indexed in storage order.
"""
crossing = list()
for outfile in outfiles_volumes.values():
if hypercubes_overlap(buffer_of_interest, outfile):
crossing.append(outfile) # we add a Volume obj
return crossing
def merge_volumes(volume1, volume2):
""" Merge two volumes into one.
"""
if not isinstance(volume1, Volume) or \
not isinstance(volume2, Volume):
raise TypeError()
lowercorner1, uppercorner1 = volume1.get_corners()
lowercorner2, uppercorner2 = volume2.get_corners()
lowercorner = (min(lowercorner1[0], lowercorner2[0]),
min(lowercorner1[1], lowercorner2[1]),
min(lowercorner1[2], lowercorner2[2]))
uppercorner = (max(uppercorner1[0], uppercorner2[0]),
max(uppercorner1[1], uppercorner2[1]),
max(uppercorner1[2], uppercorner2[2]))
return Volume('0_merged', lowercorner, uppercorner)
def included_in(volume, outfile):
""" Alias of hypercubes_overlap.
We do not verify that it is included but by definition
of the problem if volume crosses outfile then volume in outfile.
Arguments:
----------
volume: Volume in buffer
outfile: Volume representing an output file
"""
if not isinstance(volume, Volume) or \
not isinstance(outfile, Volume):
raise TypeError()
volume_bl, volume_ur = volume.get_corners() # ur=upper right, bl=bottom left
outfile_bl, outfile_ur = outfile.get_corners()
nb_dims = len(outfile_bl)
nb_matching_dims = 0
for dim in range(nb_dims):
out_min, out_max = outfile_bl[dim], outfile_ur[dim]
volume_min, volume_max = volume_bl[dim], volume_ur[dim]
if (volume_min >= out_min and volume_min <= out_max) and (volume_max >= out_min and volume_max <= out_max):
nb_matching_dims += 1
if nb_matching_dims == nb_dims:
return True
return False
def add_to_array_dict(array_dict, outfile, volume):
""" Add volume information to dictionary associating output file index to
Arguments:
----------
outfile: outfile volume
volume: volume from buffer
"""
if (not isinstance(outfile.index, int)
or not isinstance(volume, Volume)
or not isinstance(outfile, Volume)):
raise TypeError()
if not outfile.index in array_dict.keys():
array_dict[outfile.index] = list()
array_dict[outfile.index].append(volume)
def clean_arrays_dict(arrays_dict):
""" From a dictionary of Volumes, creates a dictionary of list of slices.
The new arrays_dict associates each output file to each volume that must be written at a time.
"""
for k in arrays_dict.keys():
volumes_list = arrays_dict[k]
arrays_dict[k] = [convert_Volume_to_slices(v) for v in volumes_list]
def get_overlap_subarray(hypercube1, hypercube2):
""" Find the intersection of both files.
Refactor of hypercubes_overlap to return the overlap subarray
Returns:
--------
pair of corners of the subarray
See also:
---------
utils.hypercubes_overlap
"""
if not isinstance(hypercube1, Volume) or \
not isinstance(hypercube2, Volume):
raise TypeError()
lowercorner1, uppercorner1 = hypercube1.get_corners()
lowercorner2, uppercorner2 = hypercube2.get_corners()
nb_dims = len(uppercorner1)
subarray_lowercorner = list()
subarray_uppercorner = list()
for i in range(nb_dims):
subarray_lowercorner.append(max(lowercorner1[i], lowercorner2[i]))
subarray_uppercorner.append(min(uppercorner1[i], uppercorner2[i]))
# print(f"Overlap subarray : {subarray_lowercorner[0]}:{subarray_uppercorner[0]}, {subarray_lowercorner[1]}:{subarray_uppercorner[1]}, {subarray_lowercorner[2]}:{subarray_uppercorner[2]}")
return (subarray_lowercorner, subarray_uppercorner)
def get_named_volumes(blocks_partition, block_shape):
""" Return the coordinates of all entities of shape block shape in the reconstructed image.
The first entity is placed at the origin of the base.
Returns:
---------
d: dictionary mapping each buffer numeric index to a Volume representing its coordinates
Arguments:
----------
blocks_partition: Number of blocks in each dimension. Shape of the reconstructed image in terms of the blocks considered.
block_shape: shape of one block, all blocks having the same shape
"""
# logger.debug("== Function == get_named_volumes")
d = dict()
# logger.debug("[Arg] blocks_partition: %s", blocks_partition)
# logger.debug("[Arg] block_shape: %s", block_shape)
for i in range(blocks_partition[0]):
for j in range(blocks_partition[1]):
for k in range(blocks_partition[2]):
bl_corner = (block_shape[0] * i,
block_shape[1] * j,
block_shape[2] * k)
tr_corner = (block_shape[0] * (i+1),
block_shape[1] * (j+1),
block_shape[2] * (k+1))
index = _3d_to_numeric_pos((i, j, k), blocks_partition, order='C')
d[index] = Volume(index, bl_corner, tr_corner)
# logger.debug("Indices of names volumes found: %s", d.keys())
# logger.debug("End\n")
return d
def apply_merge(volume, volumes, merge_directions):
""" Merge volume with other volumes from volumes list in the merge directions.
Arguments:
----------
volume: volume to merge
volumes: list of volumes
merge_directions: indicates neighbours to merge with
"""
import copy
logger.debug("\t== Function == apply_merge")
p1, p2 = volume.get_corners()
logger.debug("\tTargetting volume with low corner %s", p1)
if len(merge_directions) == 1:
if Axes.k in merge_directions:
p1_target = list(copy.deepcopy(p1))
p1_target[Axes.k.value] = p2[Axes.k.value]
new_volume = get_new_volume(volume, tuple(p1_target))
elif Axes.j in merge_directions:
p1_target = list(copy.deepcopy(p1))
p1_target[Axes.j.value] = p2[Axes.j.value]
new_volume = get_new_volume(volume, tuple(p1_target))
elif Axes.i in merge_directions:
p1_target = list(copy.deepcopy(p1))
p1_target[Axes.i.value] = p2[Axes.i.value]
new_volume = get_new_volume(volume, tuple(p1_target))
elif len(merge_directions) == 2:
logger.debug("\tMerge directions: %s", merge_directions)
axis1, axis2 = merge_directions
p1_target = list(copy.deepcopy(p1))
p1_target[axis1.value] = p2[axis1.value]
volume_axis1 = get_new_volume(volume, tuple(p1_target))
new_volume_axis1 = apply_merge(volume_axis1, volumes, [axis2])
new_volume_axis2 = apply_merge(volume, volumes, [axis2])
new_volume = merge_volumes(new_volume_axis1, new_volume_axis2)
elif len(merge_directions) == 3:
logger.debug("\tMerge directions %s", merge_directions)
axis1, axis2, axis3 = merge_directions
p1_target = list(copy.deepcopy(p1))
p1_target[axis1.value] = p2[axis1.value]
volume_axis1 = get_new_volume(volume, tuple(p1_target))
new_vol1 = apply_merge(volume, volumes, [axis2, axis3])
new_vol2 = apply_merge(volume_axis1, volumes, [axis2, axis3])
new_volume = merge_volumes(new_vol1, new_vol2)
else:
raise ValueError()
logger.debug("\tEnd")
return new_volume
def numeric_to_3d_pos(numeric_pos, blocks_partition, order):
""" Convert numeric block position into its 3d position in the array in a given storage order.
See also:
--------
get_partition
"""
if order == 'C':
nb_blocks_per_row = blocks_partition[2]
nb_blocks_per_slice = blocks_partition[1] * blocks_partition[2]
else:
raise ValueError("unsupported")
i = math.floor(numeric_pos / nb_blocks_per_slice)
numeric_pos -= i * nb_blocks_per_slice
j = math.floor(numeric_pos / nb_blocks_per_row)
numeric_pos -= j * nb_blocks_per_row
k = numeric_pos
return (i, j, k)
def _3d_to_numeric_pos(_3d_pos, blocks_partition, order):
""" Convert 3d block position into its numeric position in a given storage order.
See also:
--------
get_partition
"""
if order == 'C':
nb_blocks_per_row = blocks_partition[2]
nb_blocks_per_slice = blocks_partition[1] * blocks_partition[2]
else:
raise ValueError("unsupported")
return (_3d_pos[0] * nb_blocks_per_slice) + \
(_3d_pos[1] * nb_blocks_per_row) + _3d_pos[2]
def get_partition(array_shape, chunk_shape):
""" Returns partition of array by chunks.
Arguments:
----------
array_shape: shape of input array
chunk_shape: shape of one chunk
Returns:
--------
the partition as a tuple
"""
chunks = chunk_shape
# logger.debug(f'Chunks for get_array_block_dims: {chunks}')
if not len(array_shape) == len(chunks):
raise ValueError(
"chunks and shape should have the same dimension",
array_shape,
chunks)
return tuple([int(s / c) for s, c in zip(array_shape, chunks)])
def to_basis(v, basis):
""" Create a new volume from volume v with basis changed from R to basis
Arguments:
----------
v: Volume obj
basis: Volume obj
"""
v2 = Volume(0, v.p1, v.p2)
offset = ((-1) * basis.p1[0], (-1) * basis.p1[1], (-1) * basis.p1[2])
v2.add_offset(offset)
# sanity check
p1, p2 = v2.get_corners()
for p in [p1, p2]:
for e in p:
if e < 0:
print("Volume in basis R:")
v.print()
print("Basis:")
basis.print()
raise ValueError("An error occured while changing from basis R to new basis")
return v2 | [
11748,
10088,
11,
18931,
11,
10688,
11,
26692,
22602,
198,
6738,
33829,
1330,
2039,
388,
198,
198,
6738,
1128,
433,
653,
62,
23100,
6800,
13,
7753,
62,
687,
1381,
13,
71,
7568,
20,
1330,
5572,
37,
20,
62,
37153,
198,
198,
6404,
2667... | 2.345403 | 4,971 |
"""
一、1167. 连接棒材的最低费用.py
遇到这种需要排序,然后取最小的两个或者最大的两个数进行操作得到一个结果,这个结果又要与剩下的元素进行同样操作的时候,可以采用堆的数据结构简化
二、1564. 把箱子放进仓库里 I.py
1.当有双指针时,其中一个指针必须遍历完所有元素,则可将while替换为for循环
2.这个跟分发饼干有些类似,关键在于将高低不同的warehouse转换为非递增的序列
例如 3 5 4 2 3, 后一块墙能通过多大的板子受到前一块板子的限制,也就是能通过当前墙面的最大板子为min(前一块墙高度,当前墙高度)
所以可以通过从左到右遍历,两个相邻的墙对比,如果后一块墙要高于前一快,则将后一块改成跟前一块一样高就行,
再对boxes进行排序,这样warehouse和boxes都是有序的,两个有序序列的分发问题,就是分发饼干了,用双指针即可。
三、870. 优势洗牌.py
方法1: 排序+贪心 (单指针插入法): 用一个索引变量来控制元素插入位置,插入一个则变量自增1,如果遇到插入位置已经有元素,
则再向右移动直到找到右边第一个未插入元素的位置
方法2: 排序+贪心 (双表拆合法): 将两种不同性质的元素分拆到两个列表里,然后再根据条件选择其中的一个元素放入结果列表中合并
四、342. 4的幂.py
取模运算定律
五、389. 找不同.py
相同字符进行异或运算则抵消为0,所以如果s比t少了一个字符,则直接将两个字符串的所有字符进行异或运算即可
六、405. 数字转换为十六进制数.py
hex_str = "0123456789abcdef"; 可以使用这个对十进制数转换为十六进制数进行映射简化操作
七、面试题 17.10. 主要元素.py
摩尔投票法:
1.判断票数是否为0,如果为0则取当前元素为结果
2.判断当前结果是否与当前元素相等,相等则将票数+1,否则-1
八、面试题 05.06. 整数转换.py
Python3占用字节数
九、751. IP 到 CIDR.py
start & -start算出来的是start的二进制表示中,最右边的一个“1”及该“1”右边的所有0
用于构建子网的位必须都为0,否则不能用于构建本题中的ip
能够用于构建子网的位不能比n的二进制表示的长度要大,n二进制表示为100,则用于构建子网的位必须 < 3
而由于start & (-start)除了包含用于构建子网的“0”,还包含start二进制表示中最右边的一个1,
故(start & -start).bit_length() <= n.bit_length(), 所以在符合要求情况下,最长的用于构建子网的位为:
min((start & -start).bit_length(), n.bit_length()) - 1
mask = 32 - (min((start & -start).bit_length(), n.bit_length()) - 1) =》
33 - min((start & -start).bit_length(), n.bit_length()
十、1356. 根据数字二进制下 1 的数目排序.py
bin(x).count("1")
""" | [
37811,
198,
31660,
23513,
1157,
3134,
13,
5525,
123,
252,
162,
236,
98,
162,
96,
240,
30266,
238,
21410,
17312,
222,
19526,
236,
164,
112,
117,
18796,
101,
13,
9078,
198,
34402,
229,
26344,
108,
32573,
247,
163,
100,
235,
165,
250,
... | 0.704077 | 2,085 |
from .root import RootCmd # noqa
| [
6738,
764,
15763,
1330,
20410,
40109,
220,
1303,
645,
20402,
198
] | 3.090909 | 11 |
import random
import matplotlib.pyplot as plt
import pandas as pd
from math import fabs
import numpy as np
sex_differences = []
pair_differences = []
for i in range(2, 100):
for j in range(i):
men = j
women = i - j
homo_pair = 0
hetero_pair = 0
for m in range(men):
men = men - 1
if random.randint(1,i) <= j:
men = men - 1
homo_pair = homo_pair + 1
else:
women = women - 1
hetero_pair = hetero_pair + 1
homo_pair = homo_pair + women // 2
sex_difference = fabs(2*j - i) / i
sex_differences.append(sex_difference)
pair_difference = homo_pair / (homo_pair + hetero_pair)
pair_differences.append(pair_difference)
plt.scatter(x=pair_differences, y=sex_differences, color='blue', label='generated characteristic')
darwin_output = pd.read_csv('output/logs.csv', sep=';')
darwin_sex_differences = np.fabs((np.array(darwin_output.adult_males_number.tolist()) - np.array(darwin_output.adult_females_number.tolist())) / (np.array(darwin_output.adult_males_number.tolist()) + np.array(darwin_output.adult_females_number.tolist())))
darwin_pair_differences = np.array(darwin_output.homo_couples_number.tolist()) / (np.array(darwin_output.homo_couples_number.tolist()) + np.array(darwin_output.straight_couples_number.tolist()))
plt.scatter(x=darwin_pair_differences, y=darwin_sex_differences, color='red', label='actual results')
plt.title('Sex difference')
plt.xlabel('How many more homosexual couples there are')
plt.ylabel('Difference in number of adults of a given sex')
plt.legend(loc=2)
plt.show()
| [
11748,
4738,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
11748,
19798,
292,
355,
279,
67,
198,
6738,
10688,
1330,
7843,
82,
198,
11748,
299,
32152,
355,
45941,
198,
198,
8044,
62,
26069,
4972,
796,
17635,
198,
2... | 2.289402 | 736 |
"""
Module to provide for "-m project_summarizer" access to the module,
as if it was run from the console.
"""
import project_summarizer
def main():
"""
Main entry point. Exposed in this manner so that the setup
entry_points configuration has something to execute.
"""
project_summarizer.ProjectSummarizer().main()
if __name__ == "__main__":
main()
| [
37811,
198,
26796,
284,
2148,
329,
27444,
76,
1628,
62,
16345,
3876,
7509,
1,
1895,
284,
262,
8265,
11,
198,
292,
611,
340,
373,
1057,
422,
262,
8624,
13,
198,
37811,
198,
11748,
1628,
62,
16345,
3876,
7509,
628,
198,
4299,
1388,
33... | 3.15 | 120 |
"""
HexagDLy utilities for illustrative examples.
"""
import numpy as np
import numpy.linalg as LA
from scipy.interpolate import griddata
import torch
import torch.utils.data
import torch.nn as nn
import torch.optim as optim
from torch.autograd import Variable
import torch.nn.functional as F
import torch.optim.lr_scheduler as scheduler
import os
import matplotlib.pyplot as plt
import time
class toy_data:
r"""Object that contains a set of toy images of randomly scattered
hexagonal shapes of a certain kind.
Args:
shape: str, choose from ...
nx: int, dimension in x
ny: int, dimension in y
nchannels: int, number of input channels ('colour' channels)
nexamples: int, number of images
px: int, center row for shape
py: int, center column for shape
"""
###################################################################
class toy_data2:
r"""Object that contains a set of toy images of randomly scattered
hexagonal shapes of a certain kind.
Args:
shape: str, choose from ...
nx: int, dimension in x
ny: int, dimension in y
nchannels: int, number of input channels ('colour' channels)
nexamples: int, number of images
px: int, center row for shape
py: int, center column for shape
"""
class toy_dataset:
r"""Object that creates a data set containing different shapes
Args:
shapes: list of strings with names of different shapes
nperclass: int, number of images of each shape
nx: int, number of columns of pixels
ny: int, number of rows of pixels
nchannels: int, number of channels for each image
"""
class model:
r"""A toy model CNN
Args:
train_dataloader: pytorch dataloader with training data
val_dataloader: pytorch dataloader with validation data
net: CNN model
epochs: int, number of epochs to train
"""
| [
37811,
220,
198,
39,
1069,
363,
19260,
88,
20081,
329,
6406,
13260,
6096,
13,
198,
198,
37811,
198,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
299,
32152,
13,
75,
1292,
70,
355,
9131,
198,
6738,
629,
541,
88,
13,
3849,
16104,
3... | 2.385792 | 915 |
# sum of elements in given range
arr=list(map(int,input().split()))
m=int(input("query size: "))
query=[]
for i in range(m):
l,r=map(int,input().split())
query.append([l,r])
query.sort(key=lambda x:x[1])
curL,curR,curS=0,0,0
for i in range(len(query)):
l,r=query[i]
while curL<l: #move to right
curS-=arr[curL]
curL+=1
while curL>l: #move to left
curS+=arr[curL-1]
curL-=1
while curR<=r: #move to right
curS+=arr[curR]
curR+=1
while curR>r+1: #move to left
curS-=arr[curR-1]
curR-=1
print("sum of ",query[i],": ",curS)
| [
2,
2160,
286,
4847,
287,
1813,
2837,
198,
198,
3258,
28,
4868,
7,
8899,
7,
600,
11,
15414,
22446,
35312,
3419,
4008,
198,
76,
28,
600,
7,
15414,
7203,
22766,
2546,
25,
366,
4008,
198,
22766,
28,
21737,
198,
1640,
1312,
287,
2837,
... | 1.813889 | 360 |
import re
import os
from .cli import CLI
from .runner import Runner
from .__cxx__ import CxxRunner
if __name__ == '__main__':
main()
| [
11748,
302,
198,
11748,
28686,
198,
198,
6738,
764,
44506,
1330,
43749,
198,
6738,
764,
16737,
1330,
21529,
198,
6738,
764,
834,
66,
5324,
834,
1330,
327,
5324,
49493,
628,
628,
628,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
... | 2.88 | 50 |
"""
Subscription: Subscriptions are repeat billing agreements with specific buyers.
BitPay sends bill emails to buyers identified in active subscriptions according
to the specified schedule.
"""
from .bill_data import BillData
from ...utils.key_utils import change_camel_case_to_snake_case
class Subscription:
"""
Subscription
"""
__id = None
__status = None
"""
BillData
"""
__bill_data = None
__schedule = None
__next_delivery = None
__created_date = None
__token = None
def get_id(self):
"""
Get method for the id
:return: id
"""
return self.__id
def set_id(self, id):
"""
Set method for to id
:param id: id
"""
self.__id = id
def get_status(self):
"""
Get method for the status
:return: status
"""
return self.__status
def set_status(self, status):
"""
Set method for to status
:param status: status
"""
self.__status = status
def get_bill_data(self):
"""
Get method for the bill_data
:return: bill_data
"""
return self.__bill_data
def set_bill_data(self, bill_data: BillData):
"""
Set method for to bill_data
:param bill_data: bill_data
"""
self.__bill_data = bill_data
def get_schedule(self):
"""
Get method for the schedule
:return: schedule
"""
return self.__schedule
def set_schedule(self, schedule):
"""
Set method for to schedule
:param schedule: schedule
"""
self.__schedule = schedule
def get_next_delivery(self):
"""
Get method for the next_delivery
:return: next_delivery
"""
return self.__next_delivery
def set_next_delivery(self, next_delivery):
"""
Set method for to next_delivery
:param next_delivery: next_delivery
"""
self.__next_delivery = next_delivery
def get_created_date(self):
"""
Get method for the created_date
:return: created_date
"""
return self.__created_date
def set_created_date(self, created_date):
"""
Set method for to created_date
:param created_date: created_date
"""
self.__created_date = created_date
def get_token(self):
"""
Get method for the token
:return: token
"""
return self.__token
def set_token(self, token):
"""
Set method for to token
:param token: token
"""
self.__token = token
def to_json(self):
"""
:return: data in json
"""
data = {
"id": self.get_id(),
"status": self.get_status(),
"billData": self.get_bill_data().to_json(),
"schedule": self.get_schedule(),
"nextDelivery": self.get_next_delivery(),
"createdDate": self.get_created_date(),
"token": self.get_token(),
}
data = {key: value for key, value in data.items() if value}
return data
| [
37811,
198,
7004,
33584,
25,
3834,
12048,
507,
389,
9585,
26297,
11704,
351,
2176,
14456,
13,
198,
13128,
19197,
12800,
2855,
7237,
284,
14456,
5174,
287,
4075,
35675,
1864,
198,
1462,
262,
7368,
7269,
13,
198,
37811,
198,
6738,
764,
35... | 2.177898 | 1,484 |
import os.path as _path
import cantera
from ._ember import *
from . import _ember
from .input import *
from .output import *
from . import utils
__version__ = '1.4.0'
# Add Ember's data file directory to Cantera's search path. Because the Python
# module is statically linked to Cantera, this needs to be done separately for
# each of the two copies of the Cantera library that have been loaded.
_datapath = _path.join(_path.dirname(_path.abspath(__file__)), 'data')
_ember.addCanteraDirectory(_datapath)
cantera.add_directory(_datapath)
| [
11748,
28686,
13,
6978,
355,
4808,
6978,
198,
198,
11748,
460,
49600,
198,
6738,
47540,
1491,
1330,
1635,
198,
6738,
764,
1330,
4808,
1491,
198,
6738,
764,
15414,
1330,
1635,
198,
6738,
764,
22915,
1330,
1635,
198,
6738,
764,
1330,
3384... | 3.319018 | 163 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import os
import unittest
import tempfile
from mock.mock import patch, MagicMock, call
from agent import shell
from sys import platform as _platform
import subprocess, time
import sys
import platform
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
7061,
6,
198,
26656,
15385,
284,
262,
24843,
10442,
5693,
357,
1921,
37,
8,
739,
530,
198,
273,
517,
18920,
5964... | 3.847909 | 263 |
from setuptools import setup
import recs
_classifiers = [
'Development Status :: 4 - Beta',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Topic :: Software Development :: Libraries',
'Topic :: Utilities',
]
setup(name='recs',
version=recs.__version__,
author='Tom Ritchford',
author_email='tom@swirly.com',
url='https://github.com/rec/recs',
tests_require=['pytest'],
py_modules=['recs'],
description='Try to import all modules below a given root',
long_description=open('README.rst').read(),
license='MIT',
classifiers=_classifiers,
keywords=['testing', 'importing'],
)
| [
6738,
900,
37623,
10141,
1330,
9058,
198,
11748,
664,
82,
198,
198,
62,
4871,
13350,
796,
685,
198,
220,
220,
220,
705,
41206,
12678,
7904,
604,
532,
17993,
3256,
198,
220,
220,
220,
705,
15167,
2229,
15417,
7904,
11361,
7904,
513,
13... | 2.658147 | 313 |
import unittest
from base import BaseTestCase
| [
11748,
555,
715,
395,
198,
198,
6738,
2779,
1330,
7308,
14402,
20448,
628
] | 3.692308 | 13 |
from django.contrib.sitemaps import Sitemap
from django.shortcuts import reverse
from .models import Product
| [
6738,
42625,
14208,
13,
3642,
822,
13,
82,
9186,
1686,
1330,
311,
9186,
499,
201,
198,
6738,
42625,
14208,
13,
19509,
23779,
1330,
9575,
201,
198,
6738,
764,
27530,
1330,
8721,
201,
198,
220,
220,
220,
220
] | 3.135135 | 37 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2019-06-18 14:00
# @Author : 张江
# @Site :
# @File : CustomMiddleModule.py
# @Software: PyCharm
#添加自己的中间件
from django.utils.deprecation import MiddlewareMixin
# 可以在中间件中添加用户认证和登录设置等信息 | [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
2488,
7575,
220,
220,
220,
1058,
13130,
12,
3312,
12,
1507,
1478,
25,
405,
198,
2,
2488,
13838,
220,
1058,
10263,
... | 1.502994 | 167 |
from io import BytesIO
from os import SEEK_END, SEEK_SET
import re
import gs_chunked_io as gscio
from .basefile import BaseAnVILFile
from .basefolder import BaseAnVILFolder
| [
6738,
33245,
1330,
2750,
4879,
9399,
198,
6738,
28686,
1330,
31107,
42,
62,
10619,
11,
31107,
42,
62,
28480,
198,
198,
11748,
302,
198,
198,
11748,
308,
82,
62,
354,
2954,
276,
62,
952,
355,
308,
1416,
952,
198,
198,
6738,
764,
8692... | 2.8 | 65 |
import requests
import json
filepath = '../data/maps/edusharing-subject-mapping.tsv'
input = open(filepath).readlines()
for row in input:
if "https" in row:
uri = row.split('\t')[1].rstrip()
label = requests.get(uri, headers={"accept":"application/json"}).json()['prefLabel']['de'].encode('utf-8').strip()
with open('../data/maps/subject-labels.tsv', 'a') as f:
f.write(uri + "\t" + label + "\n") | [
11748,
7007,
198,
11748,
33918,
198,
198,
7753,
6978,
796,
705,
40720,
7890,
14,
31803,
14,
276,
1530,
1723,
12,
32796,
12,
76,
5912,
13,
912,
85,
6,
198,
198,
15414,
796,
1280,
7,
7753,
6978,
737,
961,
6615,
3419,
198,
198,
1640,
... | 2.360215 | 186 |
# http://www.codewars.com/kata/53daa9e5af55c184db00025f/
| [
2,
2638,
1378,
2503,
13,
19815,
413,
945,
13,
785,
14,
74,
1045,
14,
4310,
6814,
64,
24,
68,
20,
1878,
2816,
66,
22883,
9945,
830,
1495,
69,
14,
198
] | 1.9 | 30 |
from NanoTCAD_ViDES import *
from numpy import genfromtxt
fi = genfromtxt("./datiout_idvds/idvds.out", delimiter = ' ')
plot(fi[:,0],fi[:,1])
show() | [
6738,
33504,
4825,
2885,
62,
38432,
30910,
1330,
1635,
198,
6738,
299,
32152,
1330,
2429,
6738,
14116,
198,
198,
12463,
796,
2429,
6738,
14116,
7,
1911,
14,
67,
7246,
448,
62,
312,
85,
9310,
14,
312,
85,
9310,
13,
448,
1600,
46728,
... | 2.365079 | 63 |
# Copyright Contributors to the Pyro-Cov project.
# SPDX-License-Identifier: Apache-2.0
import pyro.poutine as poutine
import pytest
import torch
from pyro.infer.autoguide import AutoDelta
from pyrocov.substitution import GeneralizedTimeReversible, JukesCantor69
@pytest.mark.parametrize("Model", [JukesCantor69, GeneralizedTimeReversible])
| [
2,
15069,
25767,
669,
284,
262,
44954,
12,
34,
709,
1628,
13,
198,
2,
30628,
55,
12,
34156,
12,
33234,
7483,
25,
24843,
12,
17,
13,
15,
198,
198,
11748,
12972,
305,
13,
79,
28399,
355,
279,
28399,
198,
11748,
12972,
9288,
198,
117... | 3.053097 | 113 |
#!/usr/bin/env python
import operator
import optparse
import os
import re
import subprocess
import sys
import datetime
#def lrange(num1, num2 = None, step = 1, format = format_murmur):
# offset = 0 if format == format_md5 else 2**63
# max = 2**127-1 if format == format_md5 else 2**63-1
# wrap = 2**128 if format == format_md5 else 2**64
#
# print "%d %d" % (num1+offset, num2+offset)
# while (num1 + offset < num2 + offset):
# yield num1
# num1 += step
# if num1 > max:
# num1 -= wrap
# for i in ring:
# if token > i:
# return i
#
# if is_murmur_ring(ring):
# return 2**63 - 1
#
# return 2**127 - 1
# for i in lrange(start + step_increment, stop + 1, step_increment, format):
# print "start = %d, i = %d" % (start, i)
# yield start, i
# start = i
if __name__ == '__main__':
main()
# success, ring_tokens, error = get_ring_tokens()
# success, host_token, error = get_host_token()
# range_termination = get_range_termination(host_token, ring_tokens)
# steps = 100
# print repr(is_murmur_ring(ring_tokens))
# print repr(get_ring_tokens())
# print repr(get_host_token())
# print repr(get_range_termination(host_token, ring_tokens))
# print repr(get_sub_range_generator(host_token, range_termination, steps).next())
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
11748,
10088,
198,
11748,
2172,
29572,
198,
11748,
28686,
198,
11748,
302,
198,
11748,
850,
14681,
198,
11748,
25064,
198,
11748,
4818,
8079,
198,
198,
2,
4299,
300,
9521,
7,
22510,
16,
... | 2.375899 | 556 |
from django.contrib.gis import admin
from .models import Room
admin.site.register(Room, RoomAdmin)
| [
6738,
42625,
14208,
13,
3642,
822,
13,
70,
271,
1330,
13169,
198,
6738,
764,
27530,
1330,
10096,
628,
198,
198,
28482,
13,
15654,
13,
30238,
7,
41178,
11,
10096,
46787,
8,
198
] | 3.1875 | 32 |
import pygame
from colors import *
class Surface:
"""
Handles functions that interact with the screen including:
drawing to the screen
creating the screen
"""
def __init__(
self, rows=17, columns=17, blocksize=20, caption="Snake Game", color=WHITE
):
"""
:param:
(rows=17, columns=17, blocksize=20, caption="Snake Game")
rows - tells how many rows there will be
columns - tells how many columns there will be
blocksize - tells how big a square
caption - tells what the title in the game window
color - tells what the color of screen is
"""
# constants
self.rows = rows
self.columns = columns
self.blocksize = blocksize
self.caption = caption
self.color = color
def make_screen(self):
"""
Initializes the screen object where the game is played.
Only used at runtime, or when game plays
"""
self.screen = pygame.display.set_mode(
(self.rows * self.blocksize, self.columns * self.blocksize)
)
pygame.display.set_caption(self.caption)
self.screen.fill(self.color)
def make_rect(self, x, y, color, **kwargs):
"""
Used by apple and snake object.
Draws a rectangle onto the screen.
"""
rect = pygame.Rect(x, y, self.blocksize, self.blocksize)
pygame.draw.rect(self.screen, color, rect, **kwargs)
pygame.display.update(rect)
if __name__ == "__main__":
# Test for creation of the screen
pygame.init()
surface = Surface(color=WHITE)
surface.make_screen()
pygame.display.flip()
running = True
while running:
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
| [
11748,
12972,
6057,
198,
6738,
7577,
1330,
1635,
198,
198,
4871,
20321,
25,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
7157,
829,
5499,
326,
9427,
351,
262,
3159,
1390,
25,
198,
220,
220,
220,
8263,
284,
262,
3159,
198,
220,
220... | 2.387306 | 772 |
stringVar1 = "190905042"
stringVar2 = "5042"
stringVar3 = "Kunal"
stringVar4 = "Wadhwa"
stringVar5 = "abc"
stringVar = ["Kunal", "Tanya", "Olivia", "5042", "123"]
WeightOfArrayOfStrings(stringVar)
MaximumAndMinimum_Value_Of_Strings(stringVar)
stringVar = ["AAA", "BBB", "CCC", "DDD"]
WeightOfArrayOfStrings(stringVar)
MaximumAndMinimum_Value_Of_Strings(stringVar)
stringVar = ["1234", "5678", "91011", "12131"]
WeightOfArrayOfStrings(stringVar)
MaximumAndMinimum_Value_Of_Strings(stringVar)
MaximumAndMinimum_Value_Of_Strings(stringVar1)
MaximumAndMinimum_Value_Of_Strings(stringVar2)
MaximumAndMinimum_Value_Of_Strings(stringVar3)
WeightOfArrayOfStrings(stringVar4)
MaximumAndMinimum_Value_Of_Strings(stringVar4)
MaximumAndMinimum_Value_Of_Strings(stringVar5)
"""
Output:
Index: 0 String : Kunal Weight : 507
Index: 1 String : Tanya Weight : 509
Index: 2 String : Olivia Weight : 612
Index: 3 String : 5042 Weight : 203
Index: 4 String : 123 Weight : 150
Maximum : Tanya Weight : 509
Minimum : 123 Weight : 150
Index: 0 String : AAA Weight : 195
Index: 1 String : BBB Weight : 198
Index: 2 String : CCC Weight : 201
Index: 3 String : DDD Weight : 204
Maximum : DDD Weight : 204
Minimum : AAA Weight : 195
Index: 0 String : 1234 Weight : 202
Index: 1 String : 5678 Weight : 218
Index: 2 String : 91011 Weight : 252
Index: 3 String : 12131 Weight : 248
Maximum : 91011 Weight : 252
Minimum : 12131 Weight : 248
Maximum : 9 Weight : 57
Minimum : 0 Weight : 48
Maximum : 5 Weight : 53
Minimum : 0 Weight : 48
Maximum : u Weight : 117
Minimum : K Weight : 75
Index: 0 String : W Weight : 87
Index: 1 String : a Weight : 97
Index: 2 String : d Weight : 100
Index: 3 String : h Weight : 104
Index: 4 String : w Weight : 119
Index: 5 String : a Weight : 97
Maximum : w Weight : 119
Minimum : W Weight : 87
Maximum : c Weight : 99
Minimum : a Weight : 97
""" | [
8841,
19852,
16,
796,
366,
1129,
2931,
28669,
3682,
1,
198,
8841,
19852,
17,
796,
366,
1120,
3682,
1,
198,
8841,
19852,
18,
796,
366,
42,
18835,
1,
198,
8841,
19852,
19,
796,
366,
54,
24411,
10247,
1,
198,
8841,
19852,
20,
796,
36... | 2.207724 | 958 |
"""
Various wrapper functions/classes which use :mod:`socket` or are strongly tied to functions in this file
which use :mod:`socket`. Part of :mod:`privex.helpers.net` - network related helper code.
**Copyright**::
+===================================================+
| © 2019 Privex Inc. |
| https://www.privex.io |
+===================================================+
| |
| Originally Developed by Privex Inc. |
| License: X11 / MIT |
| |
| Core Developer(s): |
| |
| (+) Chris (@someguy123) [Privex] |
| (+) Kale (@kryogenic) [Privex] |
| |
+===================================================+
Copyright 2019 Privex Inc. ( https://www.privex.io )
"""
import asyncio
import functools
import socket
import ssl
import time
from ipaddress import ip_network
from typing import Any, Callable, Generator, IO, Iterable, List, Optional, Tuple, Union
import attr
from privex.helpers import settings
from privex.helpers.common import LayeredContext, byteify, empty, empty_if, is_true, stringify, strip_null
from privex.helpers.thread import SafeLoopThread
from privex.helpers.asyncx import await_if_needed, run_coro_thread
from privex.helpers.net.util import generate_http_request, get_ssl_context, ip_is_v6, ip_sock_ver, is_ip
from privex.helpers.net.dns import resolve_ip, resolve_ip_async
from privex.helpers.types import AUTO, AUTO_DETECTED, AnyNum, STRBYTES, T
import logging
log = logging.getLogger(__name__)
__all__ = [
'AnySocket', 'OpAnySocket', 'SocketContextManager',
'StopLoopOnMatch', 'SocketWrapper', 'AsyncSocketWrapper', 'send_data_async', 'send_data', 'upload_termbin',
'upload_termbin_file', 'upload_termbin_async', 'upload_termbin_file_async'
]
AnySocket = Union[ssl.SSLSocket, "socket.socket"]
OpAnySocket = Optional[Union[ssl.SSLSocket, "socket.socket"]]
@attr.s
class SocketTracker:
"""
Data class used by :class:`.SocketWrapper` / :class:`.AsyncSocketWrapper` for managing sockets
"""
host: str = attr.ib()
port: int = attr.ib(converter=int)
timeout: Union[int, float] = attr.ib(factory=lambda: settings.DEFAULT_SOCKET_TIMEOUT)
server: bool = attr.ib(default=False, converter=is_true)
connected: bool = attr.ib(default=False, converter=is_true)
binded: bool = attr.ib(default=False, converter=is_true)
listening: bool = attr.ib(default=False, converter=is_true)
use_ssl: bool = attr.ib(default=False, converter=is_true)
socket_conf: dict = attr.ib(factory=dict)
ssl_conf: dict = attr.ib(factory=dict)
ssl_wrap_conf: dict = attr.ib(factory=dict)
hostname: str = attr.ib(default=None)
_ssl_context: ssl.SSLContext = attr.ib(default=None)
_ssl_socket: ssl.SSLSocket = attr.ib(default=None)
_loop: asyncio.AbstractEventLoop = attr.ib(default=None)
_socket: AnySocket = attr.ib(default=None)
_socket_layer_ctx = attr.ib(default=None)
_host_v4: Optional[str] = attr.ib(default=None)
_host_v6: Optional[str] = attr.ib(default=None)
_host_v4_resolved: bool = attr.ib(default=False)
_host_v6_resolved: bool = attr.ib(default=False)
@property
@family.setter
@property
@property
@property
@socket.setter
@property
@socket_layer_ctx.setter
@property
@ssl_context.setter
@property
@ssl_socket.setter
@property
@property
@property
@property
connected_ip = ip_address
@property
@classmethod
class SocketWrapper(object):
"""
A wrapper class to make working with :class:`socket.socket` much simpler.
.. NOTE:: For AsyncIO, use :class:`.AsyncSocketWrapper` instead.
**Features**
* Automatic address family detection - detects whether you have working IPv4 / IPv6, and decides the best way
to connect to a host, depending on what IP versions that host supports
* ``Happy Eyeballs`` for IPv6. If something goes wrong with an IPv6 connection, it will fallback to IPv4 if the
host has it available (i.e. a domain with both ``A`` and ``AAAA`` records)
* Easy to use SSL, which works with HTTPS and other SSL-secured protocols. Just pass ``use_ssl=True`` in the constructor.
* Many wrapper methods such as :meth:`.recv_eof`, :meth:`.query`, and :meth:`.http_request` to make working
with sockets much easier.
**Examples**
Send a string of bytes / text to a server, and then read until EOF::
>>> sw = SocketWrapper('icanhazip.org', 80)
>>> res = sw.query("GET / HTTP/1.1\\nHost: icanhazip.com\\n\\n")
>>> print(res)
HTTP/1.1 200 OK
Server: nginx
Content-Type: text/plain; charset=UTF-8
Content-Length: 17
x-rtfm: Learn about this site at http://bit.ly/icanhazip-faq and do not abuse the service.
2a07:e00::abc
For basic HTTP requests, you can use :meth:`.http_request`, which will automatically send ``Host`` (based on the host you passed),
and ``User-Agent``. SSL works too, just set ``use_ssl=True``::
>>> sw = SocketWrapper('myip.privex.io', 443, use_ssl=True)
>>> res = sw.http_request('/?format=json')
>>> print(res)
HTTP/1.1 200 OK
Server: nginx
Date: Tue, 22 Sep 2020 03:40:48 GMT
Content-Type: application/json
Content-Length: 301
Connection: close
Access-Control-Allow-Origin: *
{"error":false,"geo":{"as_name":"Privex Inc.","as_number":210083,"city":"Stockholm","country":"Sweden",
"country_code":"SE","error":false,"zip":"173 11"},"ip":"2a07:e00::abc","ip_type":"ipv6","ip_valid":true,
"messages":[], "ua":"Python Privex Helpers ( https://github.com/Privex/python-helpers )"}
Standard low-level sending and receiving data::
>>> sw = SocketWrapper('127.0.0.1', 8888)
>>> sw.sendall(b"hello world") # Send the text 'hello world'
>>> sw.recv(64) # read up to 64 bytes of data from the socket
b"lorem ipsum\n"
"""
DEFAULT_TIMEOUT = empty_if(socket.getdefaulttimeout(), settings.DEFAULT_SOCKET_TIMEOUT, zero=True)
_context: Optional[ssl.SSLContext]
_socket: OpAnySocket
_base_socket: Optional[socket.socket]
_ssl_socket: Optional[ssl.SSLSocket]
_layer_context: Optional[LayeredContext]
_socket_ctx_mgr: SocketContextManager
# connected: bool
auto_connect: bool
auto_listen: bool
listen_backlog: int
tracker: SocketTracker
@property
@ssl_conf.setter
@property
@ssl_wrap_conf.setter
@property
@socket_conf.setter
@property
@property
@timeout.setter
@property
@hostname.setter
@property
ssl_context = context
@property
@base_socket.setter
@property
@socket.setter
@property
# @connected.setter
# def connected(self, value):
# self.tracker.connected = value
@_sockwrapper_auto_connect()
@_sockwrapper_auto_connect()
@_sockwrapper_auto_connect()
@_sockwrapper_auto_connect()
@_sockwrapper_auto_connect()
@_sockwrapper_auto_connect()
@_sockwrapper_auto_connect()
@_sockwrapper_auto_connect()
@_sockwrapper_auto_connect()
@_sockwrapper_auto_connect()
@_sockwrapper_auto_connect()
# @_sockwrapper_auto_connect()
# def query(self, data: Union[str, bytes], bufsize: int = 32, eof_timeout=30, **kwargs):
# timeout_fail, send_flags = kwargs.get('timeout_fail'), kwargs.get('send_flags', kwargs.get('flags', None))
# recv_flags = kwargs.get('recv_flags', kwargs.get('flags', None))
# log.debug(" >> Sending %s bytes to %s:%s", len(data), self.host, self.port)
# self.sendall(byteify(data), flags=send_flags)
# log.debug(" >> Reading %s bytes per chunk from %s:%s", bufsize, self.host, self.port)
# return self.read_eof(bufsize, eof_timeout=eof_timeout, flags=recv_flags, timeout_fail=timeout_fail)
# @_sockwrapper_auto_connect()
# def http_request(
# self, url="/", host=AUTO_DETECTED, method="GET", user_agent=DEFAULT_USER_AGENT, extra_data: Union[STRBYTES, List[str]] = None,
# body: STRBYTES = None, eof_timeout=30, **kwargs
# ) -> Union[bytes, Awaitable[bytes]]:
# bufsize, flags, timeout_fail = kwargs.pop('bufsize', 256), kwargs.pop('flags', None), kwargs.pop('timeout_fail', False)
# data = self._http_request(url, host=host, method=method, user_agent=user_agent, extra=extra_data, body=body, **kwargs)
# self.sendall(data, flags=flags)
# return self.read_eof(bufsize, eof_timeout=eof_timeout, flags=flags, timeout_fail=timeout_fail)
@_sockwrapper_auto_connect()
@_sockwrapper_auto_connect()
@_sockwrapper_auto_connect()
@_sockwrapper_auto_connect()
@classmethod
class AsyncSocketWrapper(SocketWrapper):
"""
>>> from privex.helpers import AsyncSocketWrapper
>>> sw = AsyncSocketWrapper('termbin.com', 9999)
>>> url = await sw.query("HELLO world\\n\\nThis is a test\\nusing async sockets\\n\\nwith Python")
'https://termbin.com/lsd93'
>>> url = await sw.read_eof()
"""
_loop: Optional[asyncio.AbstractEventLoop]
DEFAULT_TIMEOUT = empty_if(socket.getdefaulttimeout(), settings.DEFAULT_SOCKET_TIMEOUT, zero=True)
@property
@_async_sockwrapper_auto_connect()
@_async_sockwrapper_auto_connect()
@_async_sockwrapper_auto_connect()
@_async_sockwrapper_auto_connect()
@_async_sockwrapper_auto_connect()
@_async_sockwrapper_auto_connect()
@_async_sockwrapper_auto_connect()
@_async_sockwrapper_auto_connect()
@_sockwrapper_auto_connect()
async def send_data_async(
host: str, port: int, data: Union[bytes, str, Iterable], timeout: AnyNum = None, **kwargs
) -> Optional[Union[str, bytes]]:
"""
>>> await send_data_async('termbin.com', 9999, "hello world\\nthis is a test\\n\\nlorem ipsum dolor\\n")
'https://termbin.com/oi07'
:param host:
:param port:
:param data:
:param timeout:
:param kwargs:
:return:
"""
fhost = f"({host}):{port}"
chunk_size = int(kwargs.get('chunk', kwargs.get('chunk_size', 64)))
string_result = is_true(kwargs.get('string_result', True))
strip_result = is_true(kwargs.get('strip_result', True))
fail = is_true(kwargs.get('fail', True))
ip_version = kwargs.get('ip_version', 'any')
timeout = empty_if(timeout, empty_if(socket.getdefaulttimeout(), 15, zero=True), zero=True)
is_iter, data_iter = False, None
if data is not None:
if isinstance(data, (str, bytes, int, float)):
data = byteify(data)
else:
try:
data_iter = iter(data)
is_iter = True
except TypeError:
# noinspection PyTypeChecker
data = byteify(data)
loop = asyncio.get_event_loop()
try:
s_ver = socket.AF_INET
ip = await resolve_ip_async(host, ip_version)
if ip_is_v6(ip): s_ver = socket.AF_INET6
fhost += f" (IP: {ip})"
with socket.socket(s_ver, socket.SOCK_STREAM) as s:
s.settimeout(float(timeout))
log.debug(" [...] Connecting to host: %s", fhost)
await loop.sock_connect(s, (ip, port))
log.debug(" [+++] Connected to %s\n", fhost)
if data is None:
log.debug(" [!!!] 'data' is None. Not transmitting any data to the host.")
elif is_iter:
i = 1
for c in data_iter:
log.debug(" [...] Sending %s byte chunk (%s)\n", len(c), i)
await loop.sock_sendall(s, c)
else:
# We use 'sendall' to reliably send the entire contents of 'data' to the service we're connected to.
log.debug(" [...] Sending %s bytes to %s ...\n", len(data), fhost)
await loop.sock_sendall(s, data)
# s.sendall(data)
log.debug(" >> Reading response ...")
res = b''
i = 1
while True:
chunk = await loop.sock_recv(s, chunk_size)
if not chunk: break
res += chunk
log.debug(" [...] Read %s byte chunk (%s)\n", len(chunk), i)
i += 1
if string_result:
res = stringify(res)
if strip_result: res = res.strip("\x00").strip().strip("\x00").strip()
log.debug(" [+++] Got result ( %s bytes ) \n", len(res))
except (socket.timeout, ConnectionRefusedError, ConnectionResetError, socket.gaierror) as e:
if fail:
raise e
log.warning("Exception while connecting + sending data to: %s - reason: %s %s", fhost, type(e), str(e))
return None
return res
def send_data(
host: str, port: int, data: Optional[Union[bytes, str, Iterable]] = None, timeout: Union[int, float] = None, **kwargs
) -> Optional[Union[str, bytes]]:
"""
>>> from privex.helpers import send_data
>>> send_data('termbin.com', 9999, "hello world\\nthis is a test\\n\\nlorem ipsum dolor\\n")
'https://termbin.com/oi07'
:param str host: The hostname or IPv4/v6 address to connect to
:param port: The port number to connect to on ``host``
:param bytes|str|iter data: The data to send to ``host:port`` via a TCP socket. Generally :class:`bytes` / :class:`str`.
Can be an iterator/generator to send data in chunks. Can be ``None`` to disable sending data, instead
only receiving and returning data.
:param float|int timeout: Socket timeout. If not passed, uses the default from :func:`socket.getdefaulttimeout`.
If the global default timeout is ``None``, then falls back to ``15``
:param kwargs:
:keyword int chunk: (Default: ``64``) Maximum number of bytes to read into buffer per socket receive call.
:keyword bool string_result: (Default: ``True``) If ``True``, the response sent by the server will be casted into a :class:`str`
before returning it.
:keyword bool strip_result: (Default: ``True``) This argument only works if ``string_result`` is also True.
If both ``string_result`` and ``strip_result`` are ``True``, the response sent by the server will
have whitespace, newlines, and null bytes trimmed from the start and end after it's casted into a string.
:keyword bool fail: (Default: ``True``) If ``True``, will raise exceptions when connection errors occur. When ``False``, will simply
``None`` if there are connection exceptions raised during this function's execution.
:keyword str|int ip_version: (Default: ``any``)
:return:
"""
fhost = f"({host}):{port}"
chunk_size = int(kwargs.get('chunk', kwargs.get('chunk_size', 64)))
string_result = is_true(kwargs.get('string_result', True))
strip_result = is_true(kwargs.get('strip_result', True))
fail = is_true(kwargs.get('fail', True))
ip_version = kwargs.get('ip_version', 'any')
timeout = empty_if(timeout, empty_if(socket.getdefaulttimeout(), 15, zero=True), zero=True)
is_iter, data_iter, is_v6, v4_address, host_is_ip = False, None, False, None, False
if data is not None:
if isinstance(data, (str, bytes, int, float)):
data = byteify(data)
else:
try:
data_iter = iter(data)
is_iter = True
except TypeError:
# noinspection PyTypeChecker
data = byteify(data)
try:
ip_network(host)
host_is_ip = True
except (TypeError, ValueError) as e:
host_is_ip = False
try:
# First we resolve the IP address of 'host', so we can detect whether we're connecting to an IPv4 or IPv6 host,
# letting us adjust the AF_INET variable accordingly.
s_ver = socket.AF_INET
ip = resolve_ip(host, ip_version)
if ip_is_v6(ip):
s_ver, is_v6 = socket.AF_INET6, True
if not host_is_ip:
try:
v4_address = resolve_ip(host, 'v4')
except (socket.timeout, ConnectionRefusedError, ConnectionResetError, socket.gaierror, AttributeError) as e:
log.warning(
"Warning: failed to resolve IPv4 address for %s (to be used as a backup if IPv6 is broken). Reason: %s %s ",
type(e), str(e)
)
fhost += f" (IP: {ip})"
except (socket.timeout, ConnectionRefusedError, ConnectionResetError, socket.gaierror) as e:
if fail:
raise e
log.warning("Exception while connecting + sending data to: %s - reason: %s %s", fhost, type(e), str(e))
return None
try:
with socket.socket(s_ver, socket.SOCK_STREAM) as s:
# Once we have our socket object, we set the timeout (by default it could hang forever), and open the connection.
s.settimeout(timeout)
log.debug(" [...] Connecting to host: %s", fhost)
s.connect((ip, port))
log.debug(" [+++] Connected to %s\n", fhost)
if data is None:
log.debug(" [!!!] 'data' is None. Not transmitting any data to the host.")
elif is_iter:
i = 1
for c in data_iter:
log.debug(" [...] Sending %s byte chunk (%s)\n", len(c), i)
s.sock_sendall(c)
else:
# We use 'sendall' to reliably send the entire contents of 'data' to the service we're connected to.
log.debug(" [...] Sending %s bytes to %s ...\n", len(data), fhost)
s.sendall(data)
# Once we've sent 'data',
log.debug(" >> Reading response ...")
res = b''
i = 1
while True:
chunk = s.recv(chunk_size)
if not chunk: break
res += chunk
log.debug(" [...] Read %s byte chunk (%s)\n", len(chunk), i)
i += 1
if string_result:
res = stringify(res)
if strip_result: res = res.strip("\x00").strip().strip("\x00").strip()
log.debug(" [+++] Got result ( %s bytes ) \n", len(res))
except (socket.timeout, ConnectionRefusedError, ConnectionResetError, socket.gaierror) as e:
log.warning("Exception while connecting + sending data to: %s - reason: %s %s", fhost, type(e), str(e))
if is_v6 and not empty(v4_address):
log.warning(
"Retrying connection to %s over IPv4 instead of IPv6. || IPv6 address: %s || IPv4 address: %s ",
fhost, ip, v4_address
)
return send_data(host, port, data, timeout=timeout, **kwargs)
if fail:
raise e
return None
return res
def upload_termbin(data: Union[bytes, str], timeout: Union[int, float] = None, **kwargs) -> str:
"""
Upload the :class:`bytes` / :class:`string` ``data`` to the pastebin service `TermBin`_ ,
using the hostname and port defined in :attr:`privex.helpers.settings.TERMBIN_HOST`
and :attr:`privex.helpers.settings.TERMBIN_PORT`
NOTE - An AsyncIO version of this function is available: :func:`.upload_termbin_async`
Returns the `TermBin`_ URL as a string - which is a raw download / viewing link for the paste.
.. _TermBin: https://termbin.com
>>> my_data = "hello world\\nthis is a test\\n\\nlorem ipsum dolor\\n"
>>> upload_termbin(my_data)
'https://termbin.com/kerjk'
:param bytes|str data: The data to upload to `TermBin`_ - as either :class:`str` or :class:`bytes`
:param float|int timeout: Socket timeout. If not passed, uses the default from :func:`socket.getdefaulttimeout`.
If the global default timeout is ``None``, then falls back to ``15``
:return str url: The `TermBin`_ URL to your paste as a string - which is a raw download / viewing link for the paste.
"""
data = byteify(data)
log.info(" [...] Uploading %s bytes to termbin ...\n", len(data))
res = send_data(settings.TERMBIN_HOST, settings.TERMBIN_PORT, data, timeout=timeout, **kwargs)
log.info(" [+++] Got termbin link: %s \n", res)
return res
def upload_termbin_file(filename: str, timeout: int = 15, **kwargs) -> str:
"""
Uploads the file ``filename`` to `TermBin`_ and returns the paste URL as a string.
.. NOTE:: An AsyncIO version of this function is available: :func:`.upload_termbin_file_async`
.. NOTE:: If the data you want to upload is already loaded into a variable - you can use :func:`.upload_termbin` instead,
which accepts your data directly - through a :class:`str` or :class:`bytes` parameter
.. _TermBin: https://termbin.com
:param str filename: The path (absolute or relative) to the file you want to upload to `TermBin`_ - as a :class:`str`
:param float|int timeout: Socket timeout. If not passed, uses the default from :func:`socket.getdefaulttimeout`.
If the global default timeout is ``None``, then falls back to ``15``
:return str url: The `TermBin`_ URL to your paste as a string - which is a raw download / viewing link for the paste.
"""
log.info(" >> Uploading file '%s' to termbin", filename)
with open(filename, 'rb') as fh:
log.debug(" [...] Opened file %s - reading contents into RAM...", filename)
data = fh.read()
log.debug(" [+++] Loaded file into RAM. Total size: %s bytes", len(data))
res = upload_termbin(data, timeout=timeout, **kwargs)
log.info(" [+++] Uploaded file %s to termbin. Got termbin link: %s \n", filename, res)
return res
async def upload_termbin_async(data: Union[bytes, str], timeout: Union[int, float] = None) -> str:
"""
Upload the :class:`bytes` / :class:`string` ``data`` to the pastebin service `TermBin`_ ,
using the hostname and port defined in :attr:`privex.helpers.settings.TERMBIN_HOST`
and :attr:`privex.helpers.settings.TERMBIN_PORT`
NOTE - A synchronous (non-async) version of this function is available: :func:`.upload_termbin`
Returns the `TermBin`_ URL as a string - which is a raw download / viewing link for the paste.
.. _TermBin: https://termbin.com
>>> my_data = "hello world\\nthis is a test\\n\\nlorem ipsum dolor\\n"
>>> await upload_termbin_async(my_data)
'https://termbin.com/kerjk'
:param bytes|str data: The data to upload to `TermBin`_ - as either :class:`str` or :class:`bytes`
:param float|int timeout: Socket timeout. If not passed, uses the default from :func:`socket.getdefaulttimeout`.
If the global default timeout is ``None``, then falls back to ``15``
:return str url: The `TermBin`_ URL to your paste as a string - which is a raw download / viewing link for the paste.
"""
data = byteify(data)
log.info(" [...] Uploading %s bytes to termbin ...\n", len(data))
res = await send_data_async(settings.TERMBIN_HOST, settings.TERMBIN_PORT, data, timeout=timeout)
log.info(" [+++] Got termbin link: %s \n", res)
return res
async def upload_termbin_file_async(filename: str, timeout: int = 15) -> str:
"""
Uploads the file ``filename`` to `TermBin`_ and returns the paste URL as a string.
.. NOTE:: A synchronous (non-async) version of this function is available: :func:`.upload_termbin_file`
.. NOTE:: If the data you want to upload is already loaded into a variable - you can use :func:`.upload_termbin_async` instead,
which accepts your data directly - through a :class:`str` or :class:`bytes` parameter
.. _TermBin: https://termbin.com
:param str filename: The path (absolute or relative) to the file you want to upload to `TermBin`_ - as a :class:`str`
:param float|int timeout: Socket timeout. If not passed, uses the default from :func:`socket.getdefaulttimeout`.
If the global default timeout is ``None``, then falls back to ``15``
:return str url: The `TermBin`_ URL to your paste as a string - which is a raw download / viewing link for the paste.
"""
log.info(" >> Uploading file '%s' to termbin", filename)
with open(filename, 'rb') as fh:
log.debug(" [...] Opened file %s - reading contents into RAM...", filename)
data = fh.read()
log.debug(" [+++] Loaded file into RAM. Total size: %s bytes", len(data))
res = await upload_termbin_async(data, timeout=timeout)
log.info(" [+++] Uploaded file %s to termbin. Got termbin link: %s \n", filename, res)
return res
| [
37811,
198,
40009,
29908,
5499,
14,
37724,
543,
779,
1058,
4666,
25,
63,
44971,
63,
393,
389,
7634,
8165,
284,
5499,
287,
428,
2393,
198,
4758,
779,
1058,
4666,
25,
63,
44971,
44646,
2142,
286,
1058,
4666,
25,
63,
3448,
303,
87,
13,... | 2.299109 | 11,220 |
def fibonacci(N):
"""Return all fibonacci numbers up to N. """
result = [0]
next_n = 1
while next_n <= N:
result.append(next_n)
next_n = sum(result[-2:])
return result
print(fibonacci(0)) # [0]
print(fibonacci(1)) # [0, 1, 1]
print(fibonacci(50)) # [0, 1, 1, 2, 3, 5, 8, 13, 21, 34]
| [
4299,
12900,
261,
44456,
7,
45,
2599,
198,
220,
220,
220,
37227,
13615,
477,
12900,
261,
44456,
3146,
510,
284,
399,
13,
37227,
198,
220,
220,
220,
1255,
796,
685,
15,
60,
198,
220,
220,
220,
1306,
62,
77,
796,
352,
198,
220,
220,... | 1.98773 | 163 |
import os
import sys
import yaml
import launch
import launch_ros.actions
from launch.conditions import IfCondition
from launch.substitutions import LaunchConfiguration
from launch.substitutions import PythonExpression
from ament_index_python.packages import get_package_share_directory
if __name__ == '__main__':
generate_launch_description()
| [
11748,
28686,
198,
11748,
25064,
198,
11748,
331,
43695,
198,
198,
11748,
4219,
198,
11748,
4219,
62,
4951,
13,
4658,
198,
6738,
4219,
13,
17561,
1756,
1330,
1002,
48362,
198,
6738,
4219,
13,
7266,
301,
270,
3508,
1330,
21225,
38149,
19... | 3.734043 | 94 |
import numpy as np
class Statistic:
"""
Contains statistic functions helper
"""
def __init__(self, numbers = [1,2], confidence=0.95):
"""
numbers = array of numbers
confidence = confidence interval, default is 95%
"""
self.numbers = numbers
self.confidence = confidence
def mean(self):
"""
Calculate the mean of dataset
"""
mean = sum(self.numbers)/len(self.numbers)
return mean
def variance(self):
"""
Calculate the variances of sample dataset (n - 1)
"""
mean = sum(self.numbers) / len(self.numbers)
differences = [(x-mean)**2 for x in self.numbers]
variance = sum(differences)/(len(differences)-1)
return variance
def stdev(self):
"""
Calculate the standard deviation of sample dataset (n - 1)
"""
mean = sum(self.numbers) / len(self.numbers)
differences = [(x-mean)**2 for x in self.numbers]
variance = sum(differences)/(len(differences)-1)
stdev = np.sqrt(variance)
return stdev
test = Statistic().stdev()
print(test)
| [
11748,
299,
32152,
355,
45941,
198,
198,
4871,
5133,
2569,
25,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
49850,
24696,
5499,
31904,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
825,
11593,
15003,
834,
7,
944,
11,
3146,
796,
6... | 2.320717 | 502 |
# -*- coding: utf-8 -*-
# ISC License
# Copyright (C) 2015 Jan Lebert
from __future__ import absolute_import
from __future__ import unicode_literals
from collections import namedtuple
import requests
from requests_oauthlib import OAuth1
from redis import StrictRedis
import json
from datetime import datetime, timedelta
from mw.api import Session as RevertsSession
from mw.lib.reverts import api as reverts
from .. import config
from . import logger
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
3180,
34,
13789,
198,
2,
15069,
357,
34,
8,
1853,
2365,
1004,
4835,
198,
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
198,
6738,
11593,
37443,
834,
1330,
28000,
... | 3.465649 | 131 |