hexsha stringlengths 40 40 | size int64 1 1.03M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 3 239 | max_stars_repo_name stringlengths 5 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 239 | max_issues_repo_name stringlengths 5 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 239 | max_forks_repo_name stringlengths 5 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.03M | avg_line_length float64 1 958k | max_line_length int64 1 1.03M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
795961ba2b06f18424853086302e6fd2fe707e52 | 4,036 | py | Python | strappy/pipelines/base_pipeline.py | Strabes/strappy | f897e2635b8eb308341b87451a7e9839cddedc7e | [
"BSD-3-Clause"
] | null | null | null | strappy/pipelines/base_pipeline.py | Strabes/strappy | f897e2635b8eb308341b87451a7e9839cddedc7e | [
"BSD-3-Clause"
] | 13 | 2021-05-23T12:11:55.000Z | 2021-06-27T12:05:00.000Z | strappy/pipelines/base_pipeline.py | Strabes/strappy | f897e2635b8eb308341b87451a7e9839cddedc7e | [
"BSD-3-Clause"
] | null | null | null | """
Base pipeline constructs a pipeline of
generic transformations for easy EDA
"""
from typing import Union
import numpy as np
import pandas as pd
from feature_engine.imputation import (
ArbitraryNumberImputer,
AddMissingIndicator,
CategoricalImputer
)
from feature_engine.encoding import (
RareLabelEncoder,
OneHotEncoder
)
from ..transformers._variable_selector import MakeColumnSelector
from ..transformers._text_vectorizer import VectorizeText
from sklearn.compose import ColumnTransformer
from sklearn.pipeline import Pipeline
def create_transformer_pipeline(params=None, text_cols : Union[None,list]=None):
"""
Pipeline for preprocessing transformations
Parameters
----------
params : dict
dictionary of parameters for pipeline
text_cols : Union[None, list]
Returns
-------
combined_pipe : sklearn.pipeline.Pipeline
A pipeline to fit
"""
p_num = Pipeline([
("add_missing_ind", AddMissingIndicator()),
("arb_num_imputer", ArbitraryNumberImputer(arbitrary_number=0))
])
p_cat = Pipeline([
("cat_imputer", CategoricalImputer(fill_value= "_MISSING_")),
("rare_label_enc", RareLabelEncoder()),
("one_hot_encoder", OneHotEncoder())
])
p_text = []
if text_cols is not None:
pattern_exclude = "^" + "$|^".join(text_cols) + "$"
for i, col in enumerate(text_cols):
p_text.append(("text_" + str(i), VectorizeText(), MakeColumnSelector(pattern=col)))
else:
pattern_exclude = None
transformers = [
('num', p_num, MakeColumnSelector(dtype_include=np.number)),
('cat', p_cat, MakeColumnSelector(
dtype_include=object,pattern_exclude = pattern_exclude))
] + p_text
combined_pipe = ColumnTransformer(transformers, remainder='drop')
if params:
combined_pipe.set_params(**params)
return combined_pipe
def name_tracker(p, X):
"""
"""
cols_in = X.columns.tolist()
df = pd.DataFrame({"cols":cols_in,"cols_in":cols_in})
# indicators for missing numeric cols
add_missing_ind = p.transformers_[0][1]["add_missing_ind"]
try:
nan_num_ind = pd.DataFrame({
"cols":[i + "_na" for i in add_missing_ind.variables_],
"cols_in": add_missing_ind.variables_})
df = pd.concat([df, nan_num_ind])
except:
pass
# onehot encoding of categorical columns
one = p.transformers_[1][1]["one_hot_encoder"]
try:
one_hot_encoder = pd.DataFrame(set().union(*[
[(k + "_" + i, k) for i in v]
for k,v in one.encoder_dict_.items()]),
columns = ["cols", "cols_in"])
df = pd.concat([df,one_hot_encoder])
except:
pass
# handle the text columns
running_text_names = []
for t in p.transformers_[2:]:
try:
v_name = t[2][0]
col_tfidf = t[1].get_feature_names()
col_tfidf_df = pd.DataFrame(
{"cols": [v_name + "_" + i for i in col_tfidf],
"cols_in": [v_name] * len(col_tfidf)})
df = pd.concat([df,col_tfidf_df])
running_text_names += [v_name + "_" + i for i in col_tfidf]
except:
pass
numeric_preds = p.transformers_[0][2]
if len(numeric_preds) > 0:
final_num_cols = (p
.transformers_[0][1]
.transform(
X.head(1)[numeric_preds])
.columns.tolist())
else:
final_num_cols = []
object_preds = p.transformers_[1][2]
if len(object_preds) > 0:
final_obj_cols = (p
.transformers_[1][1]
.transform(X.head(1)[object_preds])
.columns.tolist())
else:
final_obj_cols = []
df_ = pd.DataFrame({"final_cols":
final_num_cols + final_obj_cols + running_text_names})
df = (pd.merge(
df_, df, left_on="final_cols",
right_on="cols")
.loc[:,["final_cols","cols_in"]])
return df | 27.834483 | 95 | 0.611992 |
795961e68f66115246143a55cbfbabbd6a76a079 | 50 | py | Python | booking/__init__.py | pincoin/withthai-old | 10e6248f14faba4c4ce3e387e93e28dabb76a2f7 | [
"MIT"
] | null | null | null | booking/__init__.py | pincoin/withthai-old | 10e6248f14faba4c4ce3e387e93e28dabb76a2f7 | [
"MIT"
] | 3 | 2021-03-30T12:56:11.000Z | 2021-09-22T18:47:42.000Z | booking/__init__.py | pincoin/withthai-old | 10e6248f14faba4c4ce3e387e93e28dabb76a2f7 | [
"MIT"
] | null | null | null | default_app_config = 'booking.apps.BookingConfig'
| 25 | 49 | 0.84 |
795961fa722523696f727d570ef6701003b0193e | 16,515 | py | Python | catkit/hardware/sbig/SbigCamera.py | aidancgray/catkit | c789f20a327faf91f5b34b6818670732eab24040 | [
"BSD-3-Clause"
] | 1 | 2020-09-04T22:50:21.000Z | 2020-09-04T22:50:21.000Z | catkit/hardware/sbig/SbigCamera.py | aidancgray/catkit | c789f20a327faf91f5b34b6818670732eab24040 | [
"BSD-3-Clause"
] | 2 | 2020-09-08T20:50:39.000Z | 2020-09-14T17:52:27.000Z | catkit/hardware/sbig/SbigCamera.py | aidancgray/catkit | c789f20a327faf91f5b34b6818670732eab24040 | [
"BSD-3-Clause"
] | null | null | null | from catkit.catkit_types import MetaDataEntry
from catkit.interfaces.Camera import Camera
from catkit.config import CONFIG_INI
from catkit.catkit_types import units, quantity
import catkit.util
from astropy.io import fits
import numpy as np
import logging
import os
import requests
import sys
import catkit.util
# implementation of a camera to run the SBIG STX-16803 Pupil Cam and KAF-1603ME/STT-1603M small cam
class SbigCamera(Camera):
FRAME_TYPE_DARK = 0
FRAME_TYPE_LIGHT = 1
FRAME_TYPE_BIAS = 2
FRAME_TYPE_FLAT_FIELD = 3
IMAGER_STATE_IDLE = 0
IMAGER_STATE_EXPOSING = 2
IMAGER_STATE_READING_OUT = 3
IMAGER_STATE_ERROR = 5
NO_IMAGE_AVAILABLE = 0
IMAGE_AVAILABLE = 1
log = logging.getLogger()
def initialize(self, *args, **kwargs):
"""Loads the SBIG config information and verifies that the camera is idle.
Uses the config_id to look up parameters in the config.ini"""
# find the SBIG config information
camera_name = CONFIG_INI.get(self.config_id, "camera_name")
self.base_url = CONFIG_INI.get(self.config_id, "base_url")
self.timeout = CONFIG_INI.getint(self.config_id, "timeout")
self.min_delay = CONFIG_INI.getfloat(self.config_id, 'min_delay')
# check the status, which should be idle
imager_status = self.__check_imager_state()
if imager_status > self.IMAGER_STATE_IDLE:
# Error. Can't start the camera or camera is already busy
raise Exception("Camera reported incorrect state (" + str(imager_status) + ") during initialization.")
self.imager_status = imager_status
def close(self):
# check status and abort any imaging in progress
imager_status = self.__check_imager_state()
if imager_status > self.IMAGER_STATE_IDLE:
# work in progress, abort the exposure
catkit.util.sleep(self.min_delay) # limit the rate at which requests go to the camera
r = requests.get(self.base_url + "ImagerAbortExposure.cgi")
# no data is returned, but an http error indicates if the abort failed
r.raise_for_status()
def take_exposures(self, exposure_time, num_exposures,
file_mode=False, raw_skip=0, path=None, filename=None,
extra_metadata=None,
resume=False,
return_metadata=False,
subarray_x=None, subarray_y=None, width=None, height=None, gain=None, full_image=None,
bins=None):
"""
Low level method to take exposures using an SBIG camera. By default keeps image data in memory
:param exposure_time: Pint quantity for exposure time, otherwise in microseconds.
:param num_exposures: Number of exposures.
:param file_mode: If true fits file will be written to disk
:param raw_skip: Skips x images for every one taken, when used images will be stored in memory and returned.
:param path: Path of the directory to save fits file to, required if write_raw_fits is true.
:param filename: Name for file, required if write_raw_fits is true.
:param extra_metadata: Will be appended to metadata created and written to fits header.
:param resume: If True, skips exposure if filename exists on disk already. Doesn't support data-only mode.
:param return_metadata: If True, returns a list of meta data as a second return parameter.
:param subarray_x: X coordinate of center pixel of the subarray.
:param subarray_y: Y coordinate of center pixel of the subarray.
:param width: Desired width of image.
:param height: Desired height of image.
:param gain: Gain is ignored for the SBIG camera; the API doesn't have a way to set gain.
:param full_image: Boolean for whether to take a full image.
:param bins: Integer value for number of bins.
:return: Two parameters: Image list (numpy data or paths), Metadata list of MetaDataEntry objects.
"""
# Convert exposure time to contain units if not already a Pint quantity.
if type(exposure_time) is not quantity:
exposure_time = quantity(exposure_time, units.microsecond)
self.__setup_control_values(exposure_time, subarray_x=subarray_x, subarray_y=subarray_y, width=width,
height=height, gain=gain, full_image=full_image, bins=bins)
# Create metadata from extra_metadata input.
meta_data = [MetaDataEntry("Exposure Time", "EXP_TIME", exposure_time.to(units.microsecond).m, "microseconds")]
meta_data.append(MetaDataEntry("Camera", "CAMERA", self.config_id, "Camera model, correlates to entry in ini"))
meta_data.append(MetaDataEntry("Bins", "BINS", self.bins, "Binning for camera"))
if extra_metadata is not None:
if isinstance(extra_metadata, list):
meta_data.extend(extra_metadata)
else:
meta_data.append(extra_metadata)
# DATA MODE: Takes images and returns data and metadata (does not write anything to disk).
img_list = []
if not file_mode:
# Take exposures and add to list.
for i in range(num_exposures):
img = self.__capture(exposure_time)
img_list.append(img)
if return_metadata:
return img_list, meta_data
else:
return img_list
else:
# Check that path and filename are specified.
if path is None or filename is None:
raise Exception("You need to specify path and filename when file_mode=True.")
# FILE MODE:
# Check for fits extension.
if not (filename.endswith(".fit") or filename.endswith(".fits")):
filename += ".fits"
# Split the filename once here, code below may append _frame=xxx to basename.
file_split = os.path.splitext(filename)
file_root = file_split[0]
file_ext = file_split[1]
# Create directory if it doesn't exist.
if not os.path.exists(path):
os.makedirs(path)
# Take exposures. Use Astropy to handle fits format.
skip_counter = 0
for i in range(num_exposures):
# For multiple exposures append frame number to end of base file name.
if num_exposures > 1:
filename = file_root + "_frame" + str(i + 1) + file_ext
full_path = os.path.join(path, filename)
# If Resume is enabled, continue if the file already exists on disk.
if resume and os.path.isfile(full_path):
self.log.info("File already exists: " + full_path)
img_list.append(full_path)
continue
# Take exposure.
img = self.__capture(exposure_time)
# Skip writing the fits files per the raw_skip value, and keep img data in memory.
if raw_skip != 0:
img_list.append(img)
if skip_counter == (raw_skip + 1):
skip_counter = 0
if skip_counter == 0:
# Write fits.
skip_counter += 1
elif skip_counter > 0:
# Skip fits.
skip_counter += 1
continue
# Create a PrimaryHDU object to encapsulate the data.
hdu = fits.PrimaryHDU(img)
# Add headers.
hdu.header["FRAME"] = i + 1
hdu.header["FILENAME"] = filename
# Add testbed state metadata.
for entry in meta_data:
if len(entry.name_8chars) > 8:
self.log.warning("Fits Header Keyword: " + entry.name_8chars +
" is greater than 8 characters and will be truncated.")
if len(entry.comment) > 47:
self.log.warning("Fits Header comment for " + entry.name_8chars +
" is greater than 47 characters and will be truncated.")
hdu.header[entry.name_8chars[:8]] = (entry.value, entry.comment)
# Create a HDUList to contain the newly created primary HDU, and write to a new file.
fits.HDUList([hdu])
hdu.writeto(full_path, overwrite=True)
self.log.info("wrote " + full_path)
if raw_skip == 0:
img_list.append(full_path)
# If data mode, return meta_data with data.
if return_metadata:
return img_list, meta_data
else:
return img_list
def __setup_control_values(self, exposure_time, subarray_x=None, subarray_y=None, width=None, height=None,
gain=None, full_image=None, bins=None):
"""Applies control values found in the config.ini unless overrides are passed in, and does error checking.
Makes HTTP requests to set the imager settings. Will raise an exception for an HTTP error."""
self.log.info("Setting up control values")
# Load values from config.ini into variables, and override with keyword args when applicable.
self.cooler_state = CONFIG_INI.getint(self.config_id, 'cooler_state')
self.subarray_x = subarray_x if subarray_x is not None else CONFIG_INI.getint(self.config_id, 'subarray_x')
self.subarray_y = subarray_y if subarray_y is not None else CONFIG_INI.getint(self.config_id, 'subarray_y')
self.width = width if width is not None else CONFIG_INI.getint(self.config_id, 'width')
self.height = height if height is not None else CONFIG_INI.getint(self.config_id, 'height')
self.full_image = full_image if full_image is not None else CONFIG_INI.getboolean(self.config_id, 'full_image')
self.bins = bins if bins is not None else CONFIG_INI.getint(self.config_id, 'bins')
self.exposure_time = exposure_time if exposure_time is not None else CONFIG_INI.getfloat(self.config_id,
'exposure_time')
# Store the camera's detector shape.
detector_max_x = CONFIG_INI.getint(self.config_id, 'detector_width')
detector_max_y = CONFIG_INI.getint(self.config_id, 'detector_length')
if self.full_image:
self.log.info("Taking full", detector_max_x, "x", detector_max_y, "image, ignoring region of interest params.")
fi_params = {'StartX': '0', 'StartY': '0',
'NumX': str(detector_max_x), 'NumY': str(detector_max_y),
'CoolerState': str(self.cooler_state)}
r = requests.get(self.base_url + "ImagerSetSettings.cgi", params=fi_params, timeout=self.timeout)
r.raise_for_status()
return
# Check for errors, log before exiting.
error_flag = False
# Unlike ZWO, width and height are in camera pixels, unaffected by bins
if self.bins != 1:
# set the parameters for binning
bin_params = {'BinX': str(self.bins), 'BinY': str(self.bins)}
r = requests.get(self.base_url + "ImagerSetSettings.cgi", params=bin_params, timeout=self.timeout)
r.raise_for_status()
# Derive the start x/y position of the region of interest, and check that it falls on the detector.
derived_start_x = self.subarray_x - (self.width // 2)
derived_start_y = self.subarray_y - (self.height // 2)
derived_end_x = self.subarray_x + (self.width // 2)
derived_end_y = self.subarray_y + (self.height // 2)
if derived_start_x > detector_max_x or derived_start_x < 0:
self.log.error("Derived start x coordinate is off the detector ( max", detector_max_x - 1, "):", derived_start_x)
error_flag = True
if derived_start_y > detector_max_y or derived_start_y < 0:
self.log.error("Derived start y coordinate is off the detector ( max", detector_max_y - 1, "):", derived_start_y)
error_flag = True
if derived_end_x > detector_max_x or derived_end_x < 0:
self.log.error("Derived end x coordinate is off the detector ( max", detector_max_x - 1, "):", derived_end_x)
error_flag = True
if derived_end_y > detector_max_y or derived_end_y < 0:
self.log.error("Derived end y coordinate is off the detector ( max", detector_max_y - 1, "):", derived_end_y)
error_flag = True
if self.full_image:
self.log.info("Taking full", detector_max_x, "x", detector_max_y, "image, ignoring region of interest params.")
fi_params = {'StartX': '0', 'StartY': '0',
'NumX': str(detector_max_x), 'NumY': str(detector_max_y),
'CoolerState': '0'}
r = requests.get(self.base_url + "ImagerSetSettings.cgi", params=fi_params, timeout=self.timeout)
r.raise_for_status()
else:
if error_flag:
sys.exit("Exiting. Correct errors in the config.ini file or input parameters.")
# Set Region of Interest.
if not full_image:
roi_params = {'StartX': str(derived_start_x), 'StartY': str(derived_start_y),
'NumX': str(self.width), 'NumY': str(self.height),
'CoolerState': str(self.cooler_state)}
r = requests.get(self.base_url + "ImagerSetSettings.cgi", params=roi_params, timeout=self.timeout)
r.raise_for_status()
def __check_imager_state(self):
"""Utility function to get the current state of the camera.
Make an HTTP request and check for good response, then return the value of the response.
Will raise an exception on an HTTP failure."""
r = requests.get(self.base_url + "ImagerState.cgi", timeout=self.timeout)
r.raise_for_status()
return int(r.text)
def __check_image_status(self):
"""Utility function to check that the camera is ready to expose.
Make an HTTP request and check for good response, then return the value of hte response.
Will raise an exception on an HTTP failure."""
r = requests.get(self.base_url + "ImagerImageReady.cgi", timeout=self.timeout)
r.raise_for_status()
return int(r.text)
def __capture(self, exposure_time):
"""Utility function to start and exposure and wait until the camera has completed the
exposure. Then wait for the image to be ready for download, and download it.
Assumes the parameters for the exposure are already set."""
# start an exposure.
params = {'Duration': exposure_time.to(units.second).magnitude,
'FrameType': self.FRAME_TYPE_LIGHT}
r = requests.get(self.base_url + "ImagerStartExposure.cgi",
params=params,
timeout=self.timeout)
r.raise_for_status()
imager_state = self.IMAGER_STATE_EXPOSING
# wait until imager has taken an image
while imager_state > self.IMAGER_STATE_IDLE:
catkit.util.sleep(self.min_delay) # limit the rate at which requests go to the camera
imager_state = self.__check_imager_state()
if imager_state == self.IMAGER_STATE_ERROR:
# an error has occurred
self.log.error('Imager error during exposure')
raise Exception("Camera reported error during exposure.")
# at loop exit, the image should be available
image_status = self.__check_image_status()
if image_status != self.IMAGE_AVAILABLE:
self.log.error('No image after exposure')
raise Exception("Camera reported no image available after exposure.")
# get the image
r = requests.get(self.base_url + "ImagerData.bin", timeout=self.timeout)
r.raise_for_status()
image = np.reshape(np.frombuffer(r.content, np.uint16), (self.width // self.bins, self.height // self.bins))
# Apply rotation and flip to the image based on config.ini file.
theta = CONFIG_INI.getint(self.config_id, 'image_rotation')
fliplr = CONFIG_INI.getboolean(self.config_id, 'image_fliplr')
image = catkit.util.rotate_and_flip_image(image, theta, fliplr)
return image
| 49.151786 | 125 | 0.630518 |
79596227dd3f8b04f243ad83ac3fda5e8f7425f2 | 407 | py | Python | register_search/wsgi.py | sHalnes/register_search | cb9cd4f287c82e0319eb45dc474340f2567fa35b | [
"MIT"
] | null | null | null | register_search/wsgi.py | sHalnes/register_search | cb9cd4f287c82e0319eb45dc474340f2567fa35b | [
"MIT"
] | null | null | null | register_search/wsgi.py | sHalnes/register_search | cb9cd4f287c82e0319eb45dc474340f2567fa35b | [
"MIT"
] | null | null | null | """
WSGI config for register_search project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "register_search.settings")
application = get_wsgi_application()
| 23.941176 | 78 | 0.793612 |
795962c0027739c3411dab6366b3e1c4af16695b | 5,457 | py | Python | tests/model_connectors/test_time_series_heating_indirect.py | mingzhe37/geojson-modelica-translator | 23c969fa5a1b776dfd6dd773b9dd8f6e3a0ce28b | [
"BSD-3-Clause-LBNL"
] | 11 | 2019-08-19T16:58:23.000Z | 2022-01-25T14:23:49.000Z | tests/model_connectors/test_time_series_heating_indirect.py | mingzhe37/geojson-modelica-translator | 23c969fa5a1b776dfd6dd773b9dd8f6e3a0ce28b | [
"BSD-3-Clause-LBNL"
] | 331 | 2019-07-24T16:15:52.000Z | 2022-03-10T04:58:15.000Z | tests/model_connectors/test_time_series_heating_indirect.py | mingzhe37/geojson-modelica-translator | 23c969fa5a1b776dfd6dd773b9dd8f6e3a0ce28b | [
"BSD-3-Clause-LBNL"
] | 10 | 2019-07-12T22:21:32.000Z | 2022-02-22T06:30:25.000Z | """
****************************************************************************************************
:copyright (c) 2019-2021 URBANopt, Alliance for Sustainable Energy, LLC, and other contributors.
All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted
provided that the following conditions are met:
Redistributions of source code must retain the above copyright notice, this list of conditions
and the following disclaimer.
Redistributions in binary form must reproduce the above copyright notice, this list of conditions
and the following disclaimer in the documentation and/or other materials provided with the
distribution.
Neither the name of the copyright holder nor the names of its contributors may be used to endorse
or promote products derived from this software without specific prior written permission.
Redistribution of this software, without modification, must refer to the software by the same
designation. Redistribution of a modified version of this software (i) may not refer to the
modified version by the same designation, or by any confusingly similar designation, and
(ii) must refer to the underlying software originally provided by Alliance as “URBANopt”. Except
to comply with the foregoing, the term “URBANopt”, or any confusingly similar designation may
not be used to refer to any modified version of this software or any modified version of the
underlying software originally provided by Alliance without the prior written consent of Alliance.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR
IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
****************************************************************************************************
"""
import os
import pytest
from geojson_modelica_translator.geojson.urbanopt_geojson import (
UrbanOptGeoJson
)
from geojson_modelica_translator.model_connectors.couplings.coupling import (
Coupling
)
from geojson_modelica_translator.model_connectors.couplings.graph import (
CouplingGraph
)
from geojson_modelica_translator.model_connectors.districts.district import (
District
)
from geojson_modelica_translator.model_connectors.energy_transfer_systems.ets_cold_water_stub import (
EtsColdWaterStub
)
from geojson_modelica_translator.model_connectors.energy_transfer_systems.heating_indirect import (
HeatingIndirect
)
from geojson_modelica_translator.model_connectors.load_connectors.time_series import (
TimeSeries
)
from geojson_modelica_translator.model_connectors.networks.network_heated_water_stub import (
NetworkHeatedWaterStub
)
from geojson_modelica_translator.system_parameters.system_parameters import (
SystemParameters
)
from ..base_test_case import TestCaseBase
@pytest.mark.simulation
class DistrictSystemTest(TestCaseBase):
def test_district_system(self):
project_name = "time_series_heating_indirect"
self.data_dir, self.output_dir = self.set_up(os.path.dirname(__file__), project_name)
# load in the example geojson with a single office building
filename = os.path.join(self.data_dir, "time_series_ex1.json")
self.gj = UrbanOptGeoJson(filename)
# load system parameter data
filename = os.path.join(self.data_dir, "time_series_system_params_ets.json")
sys_params = SystemParameters(filename)
# Create the time series load, ets and their coupling
time_series_load = TimeSeries(sys_params, self.gj.buildings[0])
geojson_load_id = self.gj.buildings[0].feature.properties["id"]
heating_indirect_system = HeatingIndirect(sys_params, geojson_load_id)
ts_hi_coupling = Coupling(time_series_load, heating_indirect_system)
# create heated water stub for the ets
heated_water_stub = NetworkHeatedWaterStub(sys_params)
hi_hw_coupling = Coupling(heating_indirect_system, heated_water_stub)
# create cold water stub for the load
cold_water_stub = EtsColdWaterStub(sys_params)
ts_cw_coupling = Coupling(time_series_load, cold_water_stub)
graph = CouplingGraph([
ts_hi_coupling,
hi_hw_coupling,
ts_cw_coupling,
])
district = District(
root_dir=self.output_dir,
project_name=project_name,
system_parameters=sys_params,
coupling_graph=graph
)
district.to_modelica()
root_path = os.path.abspath(os.path.join(district._scaffold.districts_path.files_dir))
self.run_and_assert_in_docker(os.path.join(root_path, 'DistrictEnergySystem.mo'),
project_path=district._scaffold.project_path,
project_name=district._scaffold.project_name)
| 45.857143 | 102 | 0.741983 |
79596548e60d7470c297dc2a06e2ffd498928bf0 | 173 | py | Python | grampower/__init__.py | kmrigendra/grampowerapp | 1cd44a7defad8007b38805759b9f3ffe995e3698 | [
"MIT"
] | null | null | null | grampower/__init__.py | kmrigendra/grampowerapp | 1cd44a7defad8007b38805759b9f3ffe995e3698 | [
"MIT"
] | null | null | null | grampower/__init__.py | kmrigendra/grampowerapp | 1cd44a7defad8007b38805759b9f3ffe995e3698 | [
"MIT"
] | null | null | null | import os
import json
from flask.app import Flask
app = Flask(__name__)
app.config.from_object('config')
app.url_map.strict_slashes = False
import grampower.controllers | 14.416667 | 34 | 0.797688 |
79596555742bbe706fe3a1a3b84f065855557644 | 636 | py | Python | src/products/migrations/0001_initial.py | bluemonkey9241/django-js-frontend | ae5379766b2327738e0c23ff044ae550fd44cc2d | [
"MIT"
] | 61 | 2018-04-05T04:49:28.000Z | 2021-08-24T11:54:33.000Z | src/products/migrations/0001_initial.py | bluemonkey9241/django-js-frontend | ae5379766b2327738e0c23ff044ae550fd44cc2d | [
"MIT"
] | null | null | null | src/products/migrations/0001_initial.py | bluemonkey9241/django-js-frontend | ae5379766b2327738e0c23ff044ae550fd44cc2d | [
"MIT"
] | 35 | 2018-05-11T10:39:25.000Z | 2022-03-02T21:03:08.000Z | # Generated by Django 2.0.4 on 2018-04-04 21:21
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Product',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=120)),
('description', models.TextField()),
('price', models.DecimalField(decimal_places=2, default=9.99, max_digits=10)),
],
),
]
| 26.5 | 114 | 0.577044 |
795965653549373842eca389c4308a83f934aef6 | 43,832 | py | Python | src/sagemaker/estimator.py | Shuhuasong/Sage-Maker | 32534662acf290d7f9748f2008e6ded4f4e32660 | [
"Apache-2.0"
] | null | null | null | src/sagemaker/estimator.py | Shuhuasong/Sage-Maker | 32534662acf290d7f9748f2008e6ded4f4e32660 | [
"Apache-2.0"
] | null | null | null | src/sagemaker/estimator.py | Shuhuasong/Sage-Maker | 32534662acf290d7f9748f2008e6ded4f4e32660 | [
"Apache-2.0"
] | null | null | null | # Copyright 2017-2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from __future__ import print_function, absolute_import
import json
import logging
import os
from abc import ABCMeta
from abc import abstractmethod
from six import with_metaclass
from sagemaker.analytics import TrainingJobAnalytics
from sagemaker.fw_utils import (create_image_uri, tar_and_upload_dir, parse_s3_url, UploadedCode,
validate_source_dir)
from sagemaker.job import _Job
from sagemaker.local import LocalSession
from sagemaker.model import Model
from sagemaker.model import (SCRIPT_PARAM_NAME, DIR_PARAM_NAME, CLOUDWATCH_METRICS_PARAM_NAME,
CONTAINER_LOG_LEVEL_PARAM_NAME, JOB_NAME_PARAM_NAME, SAGEMAKER_REGION_PARAM_NAME)
from sagemaker.predictor import RealTimePredictor
from sagemaker.session import Session
from sagemaker.session import s3_input
from sagemaker.transformer import Transformer
from sagemaker.utils import base_name_from_image, name_from_base, name_from_image, get_config_value
class EstimatorBase(with_metaclass(ABCMeta, object)):
"""Handle end-to-end Amazon SageMaker training and deployment tasks.
For introduction to model training and deployment, see
http://docs.aws.amazon.com/sagemaker/latest/dg/how-it-works-training.html
Subclasses must define a way to determine what image to use for training,
what hyperparameters to use, and how to create an appropriate predictor instance.
"""
def __init__(self, role, train_instance_count, train_instance_type,
train_volume_size=30, train_max_run=24 * 60 * 60, input_mode='File',
output_path=None, output_kms_key=None, base_job_name=None, sagemaker_session=None, tags=None):
"""Initialize an ``EstimatorBase`` instance.
Args:
role (str): An AWS IAM role (either name or full ARN). The Amazon SageMaker training jobs and APIs
that create Amazon SageMaker endpoints use this role to access training data and model artifacts.
After the endpoint is created, the inference code might use the IAM role,
if it needs to access an AWS resource.
train_instance_count (int): Number of Amazon EC2 instances to use for training.
train_instance_type (str): Type of EC2 instance to use for training, for example, 'ml.c4.xlarge'.
train_volume_size (int): Size in GB of the EBS volume to use for storing input data
during training (default: 30). Must be large enough to store training data if File Mode is used
(which is the default).
train_max_run (int): Timeout in seconds for training (default: 24 * 60 * 60).
After this amount of time Amazon SageMaker terminates the job regardless of its current status.
input_mode (str): The input mode that the algorithm supports (default: 'File'). Valid modes:
'File' - Amazon SageMaker copies the training dataset from the S3 location to a local directory.
'Pipe' - Amazon SageMaker streams data directly from S3 to the container via a Unix-named pipe.
output_path (str): S3 location for saving the trainig result (model artifacts and output files).
If not specified, results are stored to a default bucket. If the bucket with the specific name
does not exist, the estimator creates the bucket during the
:meth:`~sagemaker.estimator.EstimatorBase.fit` method execution.
output_kms_key (str): Optional. KMS key ID for encrypting the training output (default: None).
base_job_name (str): Prefix for training job name when the :meth:`~sagemaker.estimator.EstimatorBase.fit`
method launches. If not specified, the estimator generates a default job name, based on
the training image name and current timestamp.
sagemaker_session (sagemaker.session.Session): Session object which manages interactions with
Amazon SageMaker APIs and any other AWS services needed. If not specified, the estimator creates one
using the default AWS configuration chain.
tags (list[dict]): List of tags for labeling a training job. For more, see
https://docs.aws.amazon.com/sagemaker/latest/dg/API_Tag.html.
"""
self.role = role
self.train_instance_count = train_instance_count
self.train_instance_type = train_instance_type
self.train_volume_size = train_volume_size
self.train_max_run = train_max_run
self.input_mode = input_mode
self.tags = tags
if self.train_instance_type in ('local', 'local_gpu'):
if self.train_instance_type == 'local_gpu' and self.train_instance_count > 1:
raise RuntimeError("Distributed Training in Local GPU is not supported")
self.sagemaker_session = sagemaker_session or LocalSession()
else:
self.sagemaker_session = sagemaker_session or Session()
self.base_job_name = base_job_name
self._current_job_name = None
self.output_path = output_path
self.output_kms_key = output_kms_key
self.latest_training_job = None
@abstractmethod
def train_image(self):
"""Return the Docker image to use for training.
The :meth:`~sagemaker.estimator.EstimatorBase.fit` method, which does the model training, calls this method to
find the image to use for model training.
Returns:
str: The URI of the Docker image.
"""
pass
@abstractmethod
def hyperparameters(self):
"""Return the hyperparameters as a dictionary to use for training.
The :meth:`~sagemaker.estimator.EstimatorBase.fit` method, which trains the model, calls this method to
find the hyperparameters.
Returns:
dict[str, str]: The hyperparameters.
"""
pass
def _prepare_for_training(self, job_name=None):
"""Set any values in the estimator that need to be set before training.
Args:
* job_name (str): Name of the training job to be created. If not specified, one is generated,
using the base name given to the constructor if applicable.
"""
if job_name is not None:
self._current_job_name = job_name
else:
# honor supplied base_job_name or generate it
base_name = self.base_job_name or base_name_from_image(self.train_image())
self._current_job_name = name_from_base(base_name)
# if output_path was specified we use it otherwise initialize here.
# For Local Mode with local_code=True we don't need an explicit output_path
if self.output_path is None:
local_code = get_config_value('local.local_code', self.sagemaker_session.config)
if self.sagemaker_session.local_mode and local_code:
self.output_path = ''
else:
self.output_path = 's3://{}/'.format(self.sagemaker_session.default_bucket())
def fit(self, inputs, wait=True, logs=True, job_name=None):
"""Train a model using the input training dataset.
The API calls the Amazon SageMaker CreateTrainingJob API to start model training.
The API uses configuration you provided to create the estimator and the
specified input training data to send the CreatingTrainingJob request to Amazon SageMaker.
This is a synchronous operation. After the model training successfully completes,
you can call the ``deploy()`` method to host the model using the Amazon SageMaker hosting services.
Args:
inputs (str or dict or sagemaker.session.s3_input): Information about the training data.
This can be one of three types:
* (str) the S3 location where training data is saved.
* (dict[str, str] or dict[str, sagemaker.session.s3_input]) If using multiple channels for
training data, you can specify a dict mapping channel names
to strings or :func:`~sagemaker.session.s3_input` objects.
* (sagemaker.session.s3_input) - channel configuration for S3 data sources that can provide
additional information as well as the path to the training dataset.
See :func:`sagemaker.session.s3_input` for full details.
wait (bool): Whether the call should wait until the job completes (default: True).
logs (bool): Whether to show the logs produced by the job.
Only meaningful when wait is True (default: True).
job_name (str): Training job name. If not specified, the estimator generates a default job name,
based on the training image name and current timestamp.
"""
self._prepare_for_training(job_name=job_name)
self.latest_training_job = _TrainingJob.start_new(self, inputs)
if wait:
self.latest_training_job.wait(logs=logs)
@classmethod
def _from_training_job(cls, init_params, hyperparameters, image, sagemaker_session):
"""Create an Estimator from existing training job data.
Args:
init_params (dict): The init_params the training job was created with.
hyperparameters (dict): The hyperparameters the training job was created with.
image (str): Container image (if any) the training job was created with
sagemaker_session (sagemaker.session.Session): A sagemaker Session to pass to the estimator.
Returns: An instance of the calling Estimator Class.
"""
raise NotImplementedError()
@classmethod
def attach(cls, training_job_name, sagemaker_session=None):
"""Attach to an existing training job.
Create an Estimator bound to an existing training job, each subclass is responsible to implement
``_prepare_init_params_from_job_description()`` as this method delegates the actual conversion of a training
job description to the arguments that the class constructor expects. After attaching, if the training job has a
Complete status, it can be ``deploy()`` ed to create a SageMaker Endpoint and return a ``Predictor``.
If the training job is in progress, attach will block and display log messages
from the training job, until the training job completes.
Args:
training_job_name (str): The name of the training job to attach to.
sagemaker_session (sagemaker.session.Session): Session object which manages interactions with
Amazon SageMaker APIs and any other AWS services needed. If not specified, the estimator creates one
using the default AWS configuration chain.
Examples:
>>> my_estimator.fit(wait=False)
>>> training_job_name = my_estimator.latest_training_job.name
Later on:
>>> attached_estimator = Estimator.attach(training_job_name)
>>> attached_estimator.deploy()
Returns:
Instance of the calling ``Estimator`` Class with the attached training job.
"""
sagemaker_session = sagemaker_session or Session()
job_details = sagemaker_session.sagemaker_client.describe_training_job(TrainingJobName=training_job_name)
init_params = cls._prepare_init_params_from_job_description(job_details)
estimator = cls(sagemaker_session=sagemaker_session, **init_params)
estimator.latest_training_job = _TrainingJob(sagemaker_session=sagemaker_session,
training_job_name=init_params['base_job_name'])
estimator.latest_training_job.wait()
return estimator
def deploy(self, initial_instance_count, instance_type, endpoint_name=None, **kwargs):
"""Deploy the trained model to an Amazon SageMaker endpoint and return a ``sagemaker.RealTimePredictor`` object.
More information:
http://docs.aws.amazon.com/sagemaker/latest/dg/how-it-works-training.html
Args:
initial_instance_count (int): Minimum number of EC2 instances to deploy to an endpoint for prediction.
instance_type (str): Type of EC2 instance to deploy to an endpoint for prediction,
for example, 'ml.c4.xlarge'.
endpoint_name (str): Name to use for creating an Amazon SageMaker endpoint. If not specified, the name of
the training job is used.
**kwargs: Passed to invocation of ``create_model()``. Implementations may customize
``create_model()`` to accept ``**kwargs`` to customize model creation during deploy.
For more, see the implementation docs.
Returns:
sagemaker.predictor.RealTimePredictor: A predictor that provides a ``predict()`` method,
which can be used to send requests to the Amazon SageMaker endpoint and obtain inferences.
"""
self._ensure_latest_training_job()
endpoint_name = endpoint_name or self.latest_training_job.name
self.deploy_instance_type = instance_type
return self.create_model(**kwargs).deploy(
instance_type=instance_type,
initial_instance_count=initial_instance_count,
endpoint_name=endpoint_name)
@property
def model_data(self):
"""str: The model location in S3. Only set if Estimator has been ``fit()``."""
return self.sagemaker_session.sagemaker_client.describe_training_job(
TrainingJobName=self.latest_training_job.name)['ModelArtifacts']['S3ModelArtifacts']
@abstractmethod
def create_model(self, **kwargs):
"""Create a SageMaker ``Model`` object that can be deployed to an ``Endpoint``.
Args:
**kwargs: Keyword arguments used by the implemented method for creating the ``Model``.
Returns:
sagemaker.model.Model: A SageMaker ``Model`` object. See :func:`~sagemaker.model.Model` for full details.
"""
pass
@classmethod
def _prepare_init_params_from_job_description(cls, job_details):
"""Convert the job description to init params that can be handled by the class constructor
Args:
job_details: the returned job details from a describe_training_job API call.
Returns:
dictionary: The transformed init_params
"""
init_params = dict()
init_params['role'] = job_details['RoleArn']
init_params['train_instance_count'] = job_details['ResourceConfig']['InstanceCount']
init_params['train_instance_type'] = job_details['ResourceConfig']['InstanceType']
init_params['train_volume_size'] = job_details['ResourceConfig']['VolumeSizeInGB']
init_params['train_max_run'] = job_details['StoppingCondition']['MaxRuntimeInSeconds']
init_params['input_mode'] = job_details['AlgorithmSpecification']['TrainingInputMode']
init_params['base_job_name'] = job_details['TrainingJobName']
init_params['output_path'] = job_details['OutputDataConfig']['S3OutputPath']
init_params['output_kms_key'] = job_details['OutputDataConfig']['KmsKeyId']
init_params['hyperparameters'] = job_details['HyperParameters']
init_params['image'] = job_details['AlgorithmSpecification']['TrainingImage']
return init_params
def delete_endpoint(self):
"""Delete an Amazon SageMaker ``Endpoint``.
Raises:
ValueError: If the endpoint does not exist.
"""
self._ensure_latest_training_job(error_message='Endpoint was not created yet')
self.sagemaker_session.delete_endpoint(self.latest_training_job.name)
def transformer(self, instance_count, instance_type, strategy=None, assemble_with=None, output_path=None,
output_kms_key=None, accept=None, env=None, max_concurrent_transforms=None,
max_payload=None, tags=None, role=None):
"""Return a ``Transformer`` that uses a SageMaker Model based on the training job. It reuses the
SageMaker Session and base job name used by the Estimator.
Args:
instance_count (int): Number of EC2 instances to use.
instance_type (str): Type of EC2 instance to use, for example, 'ml.c4.xlarge'.
strategy (str): The strategy used to decide how to batch records in a single request (default: None).
Valid values: 'MULTI_RECORD' and 'SINGLE_RECORD'.
assemble_with (str): How the output is assembled (default: None). Valid values: 'Line' or 'None'.
output_path (str): S3 location for saving the transform result. If not specified, results are stored to
a default bucket.
output_kms_key (str): Optional. KMS key ID for encrypting the transform output (default: None).
accept (str): The content type accepted by the endpoint deployed during the transform job.
env (dict): Environment variables to be set for use during the transform job (default: None).
max_concurrent_transforms (int): The maximum number of HTTP requests to be made to
each individual transform container at one time.
max_payload (int): Maximum size of the payload in a single HTTP request to the container in MB.
tags (list[dict]): List of tags for labeling a transform job. If none specified, then the tags used for
the training job are used for the transform job.
role (str): The ``ExecutionRoleArn`` IAM Role ARN for the ``Model``, which is also used during
transform jobs. If not specified, the role from the Estimator will be used.
"""
self._ensure_latest_training_job()
model_name = self.sagemaker_session.create_model_from_job(self.latest_training_job.name, role=role)
tags = tags or self.tags
return Transformer(model_name, instance_count, instance_type, strategy=strategy, assemble_with=assemble_with,
output_path=output_path, output_kms_key=output_kms_key, accept=accept,
max_concurrent_transforms=max_concurrent_transforms, max_payload=max_payload,
env=env, tags=tags, base_transform_job_name=self.base_job_name,
sagemaker_session=self.sagemaker_session)
@property
def training_job_analytics(self):
"""Return a ``TrainingJobAnalytics`` object for the current training job.
"""
if self._current_job_name is None:
raise ValueError('Estimator is not associated with a TrainingJob')
return TrainingJobAnalytics(self._current_job_name, sagemaker_session=self.sagemaker_session)
def _ensure_latest_training_job(self, error_message='Estimator is not associated with a training job'):
if self.latest_training_job is None:
raise ValueError(error_message)
class _TrainingJob(_Job):
def __init__(self, sagemaker_session, training_job_name):
super(_TrainingJob, self).__init__(sagemaker_session, training_job_name)
@classmethod
def start_new(cls, estimator, inputs):
"""Create a new Amazon SageMaker training job from the estimator.
Args:
estimator (sagemaker.estimator.EstimatorBase): Estimator object created by the user.
inputs (str): Parameters used when called :meth:`~sagemaker.estimator.EstimatorBase.fit`.
Returns:
sagemaker.estimator._TrainingJob: Constructed object that captures all information about the started
training job.
"""
local_mode = estimator.sagemaker_session.local_mode
# Allow file:// input only in local mode
if isinstance(inputs, str) and inputs.startswith('file://'):
if not local_mode:
raise ValueError('File URIs are supported in local mode only. Please use a S3 URI instead.')
config = _Job._load_config(inputs, estimator)
if estimator.hyperparameters() is not None:
hyperparameters = {str(k): str(v) for (k, v) in estimator.hyperparameters().items()}
estimator.sagemaker_session.train(image=estimator.train_image(), input_mode=estimator.input_mode,
input_config=config['input_config'], role=config['role'],
job_name=estimator._current_job_name, output_config=config['output_config'],
resource_config=config['resource_config'], hyperparameters=hyperparameters,
stop_condition=config['stop_condition'], tags=estimator.tags)
return cls(estimator.sagemaker_session, estimator._current_job_name)
def wait(self, logs=True):
if logs:
self.sagemaker_session.logs_for_job(self.job_name, wait=True)
else:
self.sagemaker_session.wait_for_job(self.job_name)
class Estimator(EstimatorBase):
"""
A generic Estimator to train using any supplied algorithm. This class is designed for use with
algorithms that don't have their own, custom class.
"""
def __init__(self, image_name, role, train_instance_count, train_instance_type,
train_volume_size=30, train_max_run=24 * 60 * 60, input_mode='File',
output_path=None, output_kms_key=None, base_job_name=None, sagemaker_session=None,
hyperparameters=None):
"""Initialize an ``Estimator`` instance.
Args:
image_name (str): The container image to use for training.
role (str): An AWS IAM role (either name or full ARN). The Amazon SageMaker training jobs and APIs
that create Amazon SageMaker endpoints use this role to access training data and model artifacts.
After the endpoint is created, the inference code might use the IAM role,
if it needs to access an AWS resource.
train_instance_count (int): Number of Amazon EC2 instances to use for training.
train_instance_type (str): Type of EC2 instance to use for training, for example, 'ml.c4.xlarge'.
train_volume_size (int): Size in GB of the EBS volume to use for storing input data
during training (default: 30). Must be large enough to store training data if File Mode is used
(which is the default).
train_max_run (int): Timeout in seconds for training (default: 24 * 60 * 60).
After this amount of time Amazon SageMaker terminates the job regardless of its current status.
input_mode (str): The input mode that the algorithm supports (default: 'File'). Valid modes:
* 'File' - Amazon SageMaker copies the training dataset from the S3 location to a local directory.
* 'Pipe' - Amazon SageMaker streams data directly from S3 to the container via a Unix-named pipe.
output_path (str): S3 location for saving the trainig result (model artifacts and output files).
If not specified, results are stored to a default bucket. If the bucket with the specific name
does not exist, the estimator creates the bucket during the
:meth:`~sagemaker.estimator.EstimatorBase.fit` method execution.
output_kms_key (str): Optional. KMS key ID for encrypting the training output (default: None).
base_job_name (str): Prefix for training job name when the :meth:`~sagemaker.estimator.EstimatorBase.fit`
method launches. If not specified, the estimator generates a default job name, based on
the training image name and current timestamp.
sagemaker_session (sagemaker.session.Session): Session object which manages interactions with
Amazon SageMaker APIs and any other AWS services needed. If not specified, the estimator creates one
using the default AWS configuration chain.
hyperparameters (dict): Dictionary containing the hyperparameters to initialize this estimator with.
"""
self.image_name = image_name
self.hyperparam_dict = hyperparameters.copy() if hyperparameters else {}
super(Estimator, self).__init__(role, train_instance_count, train_instance_type,
train_volume_size, train_max_run, input_mode,
output_path, output_kms_key, base_job_name, sagemaker_session)
def train_image(self):
"""
Returns the docker image to use for training.
The fit() method, that does the model training, calls this method to find the image to use for model training.
"""
return self.image_name
def set_hyperparameters(self, **kwargs):
for k, v in kwargs.items():
self.hyperparam_dict[k] = v
def hyperparameters(self):
"""Returns the hyperparameters as a dictionary to use for training.
The fit() method, that does the model training, calls this method to find the hyperparameters you specified.
"""
return self.hyperparam_dict
def create_model(self, role=None, image=None, predictor_cls=None, serializer=None, deserializer=None,
content_type=None, accept=None, **kwargs):
"""
Create a model to deploy.
Args:
role (str): The ``ExecutionRoleArn`` IAM Role ARN for the ``Model``, which is also used during
transform jobs. If not specified, the role from the Estimator will be used.
image (str): An container image to use for deploying the model. Defaults to the image used for training.
predictor_cls (RealTimePredictor): The predictor class to use when deploying the model.
serializer (callable): Should accept a single argument, the input data, and return a sequence
of bytes. May provide a content_type attribute that defines the endpoint request content type
deserializer (callable): Should accept two arguments, the result data and the response content type,
and return a sequence of bytes. May provide a content_type attribute that defines th endpoint
response Accept content type.
content_type (str): The invocation ContentType, overriding any content_type from the serializer
accept (str): The invocation Accept, overriding any accept from the deserializer.
The serializer, deserializer, content_type, and accept arguments are only used to define a default
RealTimePredictor. They are ignored if an explicit predictor class is passed in. Other arguments
are passed through to the Model class.
Returns: a Model ready for deployment.
"""
if predictor_cls is None:
def predict_wrapper(endpoint, session):
return RealTimePredictor(endpoint, session, serializer, deserializer, content_type, accept)
predictor_cls = predict_wrapper
role = role or self.role
return Model(self.model_data, image or self.train_image(), role, sagemaker_session=self.sagemaker_session,
predictor_cls=predictor_cls, **kwargs)
@classmethod
def _prepare_init_params_from_job_description(cls, job_details):
"""Convert the job description to init params that can be handled by the class constructor
Args:
job_details: the returned job details from a describe_training_job API call.
Returns:
dictionary: The transformed init_params
"""
init_params = super(Estimator, cls)._prepare_init_params_from_job_description(job_details)
init_params['image_name'] = init_params.pop('image')
return init_params
class Framework(EstimatorBase):
"""Base class that cannot be instantiated directly.
Subclasses define functionality pertaining to specific ML frameworks,
such as training/deployment images and predictor instances.
"""
def __init__(self, entry_point, source_dir=None, hyperparameters=None, enable_cloudwatch_metrics=False,
container_log_level=logging.INFO, code_location=None, image_name=None, **kwargs):
"""Base class initializer. Subclasses which override ``__init__`` should invoke ``super()``
Args:
entry_point (str): Path (absolute or relative) to the Python source file which should be executed
as the entry point to training. This should be compatible with either Python 2.7 or Python 3.5.
source_dir (str): Path (absolute or relative) to a directory with any other training
source code dependencies aside from tne entry point file (default: None). Structure within this
directory are preserved when training on Amazon SageMaker.
hyperparameters (dict): Hyperparameters that will be used for training (default: None).
The hyperparameters are made accessible as a dict[str, str] to the training code on SageMaker.
For convenience, this accepts other types for keys and values, but ``str()`` will be called
to convert them before training.
enable_cloudwatch_metrics (bool): Whether training and hosting containers will
generate CloudWatch metrics under the AWS/SageMakerContainer namespace (default: False).
container_log_level (int): Log level to use within the container (default: logging.INFO).
Valid values are defined in the Python logging module.
code_location (str): Name of the S3 bucket where custom code is uploaded (default: None).
If not specified, default bucket created by ``sagemaker.session.Session`` is used.
**kwargs: Additional kwargs passed to the ``EstimatorBase`` constructor.
image_name (str): An alternate image name to use instead of the official Sagemaker image
for the framework. This is useful to run one of the Sagemaker supported frameworks
with an image containing custom dependencies.
"""
super(Framework, self).__init__(**kwargs)
self.source_dir = source_dir
self.entry_point = entry_point
self.enable_cloudwatch_metrics = enable_cloudwatch_metrics
self.container_log_level = container_log_level
self._hyperparameters = hyperparameters or {}
self.code_location = code_location
self.image_name = image_name
def _prepare_for_training(self, job_name=None):
"""Set hyperparameters needed for training. This method will also validate ``source_dir``.
Args:
* job_name (str): Name of the training job to be created. If not specified, one is generated,
using the base name given to the constructor if applicable.
"""
super(Framework, self)._prepare_for_training(job_name=job_name)
# validate source dir will raise a ValueError if there is something wrong with the
# source directory. We are intentionally not handling it because this is a critical error.
if self.source_dir and not self.source_dir.lower().startswith('s3://'):
validate_source_dir(self.entry_point, self.source_dir)
# if we are in local mode with local_code=True. We want the container to just
# mount the source dir instead of uploading to S3.
local_code = get_config_value('local.local_code', self.sagemaker_session.config)
if self.sagemaker_session.local_mode and local_code:
# if there is no source dir, use the directory containing the entry point.
if self.source_dir is None:
self.source_dir = os.path.dirname(self.entry_point)
self.entry_point = os.path.basename(self.entry_point)
code_dir = 'file://' + self.source_dir
script = self.entry_point
else:
self.uploaded_code = self._stage_user_code_in_s3()
code_dir = self.uploaded_code.s3_prefix
script = self.uploaded_code.script_name
# Modify hyperparameters in-place to point to the right code directory and script URIs
self._hyperparameters[DIR_PARAM_NAME] = code_dir
self._hyperparameters[SCRIPT_PARAM_NAME] = script
self._hyperparameters[CLOUDWATCH_METRICS_PARAM_NAME] = self.enable_cloudwatch_metrics
self._hyperparameters[CONTAINER_LOG_LEVEL_PARAM_NAME] = self.container_log_level
self._hyperparameters[JOB_NAME_PARAM_NAME] = self._current_job_name
self._hyperparameters[SAGEMAKER_REGION_PARAM_NAME] = self.sagemaker_session.boto_region_name
def _stage_user_code_in_s3(self):
"""Upload the user training script to s3 and return the location.
Returns: s3 uri
"""
if self.code_location is None:
code_bucket = self.sagemaker_session.default_bucket()
code_s3_prefix = '{}/source'.format(self._current_job_name)
else:
code_bucket, key_prefix = parse_s3_url(self.code_location)
code_s3_prefix = '/'.join(filter(None, [key_prefix, self._current_job_name, 'source']))
return tar_and_upload_dir(session=self.sagemaker_session.boto_session,
bucket=code_bucket,
s3_key_prefix=code_s3_prefix,
script=self.entry_point,
directory=self.source_dir)
def _model_source_dir(self):
"""Get the appropriate value to pass as source_dir to model constructor on deploying
Returns:
str: Either a local or an S3 path pointing to the source_dir to be used for code by the model to be deployed
"""
return self.source_dir if self.sagemaker_session.local_mode else self.uploaded_code.s3_prefix
def hyperparameters(self):
"""Return the hyperparameters as a dictionary to use for training.
The :meth:`~sagemaker.estimator.EstimatorBase.fit` method, which trains the model, calls this method
to find the hyperparameters.
Returns:
dict[str, str]: The hyperparameters.
"""
return self._json_encode_hyperparameters(self._hyperparameters)
@classmethod
def _prepare_init_params_from_job_description(cls, job_details):
"""Convert the job description to init params that can be handled by the class constructor
Args:
job_details: the returned job details from a describe_training_job API call.
Returns:
dictionary: The transformed init_params
"""
init_params = super(Framework, cls)._prepare_init_params_from_job_description(job_details)
init_params['entry_point'] = json.loads(init_params['hyperparameters'].get(SCRIPT_PARAM_NAME))
init_params['source_dir'] = json.loads(init_params['hyperparameters'].get(DIR_PARAM_NAME))
init_params['enable_cloudwatch_metrics'] = json.loads(
init_params['hyperparameters'].get(CLOUDWATCH_METRICS_PARAM_NAME))
init_params['container_log_level'] = json.loads(
init_params['hyperparameters'].get(CONTAINER_LOG_LEVEL_PARAM_NAME))
hyperparameters = {}
for k, v in init_params['hyperparameters'].items():
# Tuning jobs add this special hyperparameter which is not JSON serialized
if k == '_tuning_objective_metric':
if v.startswith('"') and v.endswith('"'):
v = v.strip('"')
hyperparameters[k] = v
else:
hyperparameters[k] = json.loads(v)
init_params['hyperparameters'] = hyperparameters
return init_params
def train_image(self):
"""Return the Docker image to use for training.
The :meth:`~sagemaker.estimator.EstimatorBase.fit` method, which does the model training,
calls this method to find the image to use for model training.
Returns:
str: The URI of the Docker image.
"""
if self.image_name:
return self.image_name
else:
return create_image_uri(self.sagemaker_session.boto_region_name, self.__framework_name__,
self.train_instance_type, self.framework_version, py_version=self.py_version)
@classmethod
def attach(cls, training_job_name, sagemaker_session=None):
"""Attach to an existing training job.
Create an Estimator bound to an existing training job, each subclass is responsible to implement
``_prepare_init_params_from_job_description()`` as this method delegates the actual conversion of a training
job description to the arguments that the class constructor expects. After attaching, if the training job has a
Complete status, it can be ``deploy()`` ed to create a SageMaker Endpoint and return a ``Predictor``.
If the training job is in progress, attach will block and display log messages
from the training job, until the training job completes.
Args:
training_job_name (str): The name of the training job to attach to.
sagemaker_session (sagemaker.session.Session): Session object which manages interactions with
Amazon SageMaker APIs and any other AWS services needed. If not specified, the estimator creates one
using the default AWS configuration chain.
Examples:
>>> my_estimator.fit(wait=False)
>>> training_job_name = my_estimator.latest_training_job.name
Later on:
>>> attached_estimator = Estimator.attach(training_job_name)
>>> attached_estimator.deploy()
Returns:
Instance of the calling ``Estimator`` Class with the attached training job.
"""
estimator = super(Framework, cls).attach(training_job_name, sagemaker_session)
estimator.uploaded_code = UploadedCode(estimator.source_dir, estimator.entry_point)
return estimator
@staticmethod
def _json_encode_hyperparameters(hyperparameters):
return {str(k): json.dumps(v) for (k, v) in hyperparameters.items()}
@classmethod
def _update_init_params(cls, hp, tf_arguments):
updated_params = {}
for argument in tf_arguments:
value = hp.pop(argument, None)
if value is not None:
value = json.loads(value)
updated_params[argument] = value
return updated_params
def transformer(self, instance_count, instance_type, strategy=None, assemble_with=None, output_path=None,
output_kms_key=None, accept=None, env=None, max_concurrent_transforms=None,
max_payload=None, tags=None, role=None, model_server_workers=None):
"""Return a ``Transformer`` that uses a SageMaker Model based on the training job. It reuses the
SageMaker Session and base job name used by the Estimator.
Args:
instance_count (int): Number of EC2 instances to use.
instance_type (str): Type of EC2 instance to use, for example, 'ml.c4.xlarge'.
strategy (str): The strategy used to decide how to batch records in a single request (default: None).
Valid values: 'MULTI_RECORD' and 'SINGLE_RECORD'.
assemble_with (str): How the output is assembled (default: None). Valid values: 'Line' or 'None'.
output_path (str): S3 location for saving the transform result. If not specified, results are stored to
a default bucket.
output_kms_key (str): Optional. KMS key ID for encrypting the transform output (default: None).
accept (str): The content type accepted by the endpoint deployed during the transform job.
env (dict): Environment variables to be set for use during the transform job (default: None).
max_concurrent_transforms (int): The maximum number of HTTP requests to be made to
each individual transform container at one time.
max_payload (int): Maximum size of the payload in a single HTTP request to the container in MB.
tags (list[dict]): List of tags for labeling a transform job. If none specified, then the tags used for
the training job are used for the transform job.
role (str): The ``ExecutionRoleArn`` IAM Role ARN for the ``Model``, which is also used during
transform jobs. If not specified, the role from the Estimator will be used.
model_server_workers (int): Optional. The number of worker processes used by the inference server.
If None, server will use one worker per vCPU.
"""
self._ensure_latest_training_job()
role = role or self.role
model = self.create_model(role=role, model_server_workers=model_server_workers)
container_def = model.prepare_container_def(instance_type)
model_name = model.name or name_from_image(container_def['Image'])
self.sagemaker_session.create_model(model_name, role, container_def)
transform_env = model.env.copy()
if env is not None:
transform_env.update(env)
tags = tags or self.tags
return Transformer(model_name, instance_count, instance_type, strategy=strategy, assemble_with=assemble_with,
output_path=output_path, output_kms_key=output_kms_key, accept=accept,
max_concurrent_transforms=max_concurrent_transforms, max_payload=max_payload,
env=transform_env, tags=tags, base_transform_job_name=self.base_job_name,
sagemaker_session=self.sagemaker_session)
def _s3_uri_prefix(channel_name, s3_data):
if isinstance(s3_data, s3_input):
s3_uri = s3_data.config['DataSource']['S3DataSource']['S3Uri']
else:
s3_uri = s3_data
if not s3_uri.startswith('s3://'):
raise ValueError('Expecting an s3 uri. Got {}'.format(s3_uri))
return {channel_name: s3_uri[5:]}
# E.g. 's3://bucket/data' would return 'bucket/data'.
# Also accepts other valid input types, e.g. dict and s3_input.
def _s3_uri_without_prefix_from_input(input_data):
# Unpack an input_config object from a dict if a dict was passed in.
if isinstance(input_data, dict):
response = {}
for channel_name, channel_s3_uri in input_data.items():
response.update(_s3_uri_prefix(channel_name, channel_s3_uri))
return response
elif isinstance(input_data, str):
return _s3_uri_prefix('training', input_data)
elif isinstance(input_data, s3_input):
return _s3_uri_prefix('training', input_data)
else:
raise ValueError('Unrecognized type for S3 input data config - not str or s3_input: {}'.format(input_data))
| 53.649939 | 120 | 0.676811 |
79596a626e24d95057acfc7502f1fdad80902f1c | 18,808 | py | Python | General/data_conversions.py | ElgaSalvadore/watools | daaaad474add572f32dd6a45a4230ccf636c479a | [
"Apache-2.0"
] | 1 | 2019-08-09T12:59:27.000Z | 2019-08-09T12:59:27.000Z | General/data_conversions.py | ElgaSalvadore/watools | daaaad474add572f32dd6a45a4230ccf636c479a | [
"Apache-2.0"
] | 2 | 2019-07-25T06:10:40.000Z | 2019-07-25T07:09:27.000Z | General/data_conversions.py | ElgaSalvadore/watools | daaaad474add572f32dd6a45a4230ccf636c479a | [
"Apache-2.0"
] | 2 | 2019-07-25T06:03:05.000Z | 2019-10-07T08:50:51.000Z | # -*- coding: utf-8 -*-
"""
Created on Sun Dec 18 13:07:32 2016
@author: tih
"""
from __future__ import division
from builtins import range
import gzip
import zipfile
import tarfile
from osgeo import gdal, osr
#import osr
import os
import pandas as pd
import numpy as np
import netCDF4
import time
def Convert_nc_to_tiff(input_nc, output_folder):
"""
This function converts the nc file into tiff files
Keyword Arguments:
input_nc -- name, name of the adf file
output_folder -- Name of the output tiff file
"""
from datetime import date
import watools.General.raster_conversions as RC
#All_Data = RC.Open_nc_array(input_nc)
if type(input_nc) == str:
nc = netCDF4.Dataset(input_nc)
elif type(input_nc) == list:
nc = netCDF4.MFDataset(input_nc)
Var = list(nc.variables.keys())[-1]
All_Data = nc[Var]
geo_out, epsg, size_X, size_Y, size_Z, Time = RC.Open_nc_info(input_nc)
if epsg == 4326:
epsg = 'WGS84'
# Create output folder if needed
if not os.path.exists(output_folder):
os.mkdir(output_folder)
for i in range(0,size_Z):
if not Time == -9999:
time_one = Time[i]
d = date.fromordinal(time_one)
name = os.path.splitext(os.path.basename(input_nc))[0]
nameparts = name.split('_')[0:-2]
name_out = os.path.join(output_folder, '_'.join(nameparts) + '_%d.%02d.%02d.tif' %(d.year, d.month, d.day))
Data_one = All_Data[i,:,:]
else:
name=os.path.splitext(os.path.basename(input_nc))[0]
name_out = os.path.join(output_folder, name + '.tif')
Data_one = All_Data[:,:]
Save_as_tiff(name_out, Data_one, geo_out, epsg)
return()
def Convert_grb2_to_nc(input_wgrib, output_nc, band):
import watools.General.raster_conversions as RC
# Get environmental variable
# WA_env_paths = os.environ["WA_PATHS"].split(';')
# GDAL_env_path = WA_env_paths[0]
# GDAL_TRANSLATE_PATH = os.path.join(GDAL_env_path, 'gdal_translate.exe')
GDAL_TRANSLATE_PATH = 'gdal_translate'
# Create command
fullCmd = ' '.join(['%s -of netcdf -b %d' %(GDAL_TRANSLATE_PATH, band), input_wgrib, output_nc]) # -r {nearest}
RC.Run_command_window(fullCmd)
return()
def Convert_adf_to_tiff(input_adf, output_tiff):
"""
This function converts the adf files into tiff files
Keyword Arguments:
input_adf -- name, name of the adf file
output_tiff -- Name of the output tiff file
"""
import watools.General.raster_conversions as RC
# Get environmental variable
WA_env_paths = os.environ["WA_PATHS"].split(';')
GDAL_env_path = WA_env_paths[0]
GDAL_TRANSLATE_PATH = os.path.join(GDAL_env_path, 'gdal_translate.exe')
# convert data from ESRI GRID to GeoTIFF
fullCmd = ('"%s" -co COMPRESS=DEFLATE -co PREDICTOR=1 -co '
'ZLEVEL=1 -of GTiff %s %s') % (GDAL_TRANSLATE_PATH, input_adf, output_tiff)
RC.Run_command_window(fullCmd)
return(output_tiff)
def Convert_bil_to_tiff(input_bil, output_tiff):
"""
This function converts the bil files into tiff files
Keyword Arguments:
input_bil -- name, name of the bil file
output_tiff -- Name of the output tiff file
"""
import gdalconst
gdal.GetDriverByName('EHdr').Register()
dest = gdal.Open(input_bil, gdalconst.GA_ReadOnly)
Array = dest.GetRasterBand(1).ReadAsArray()
geo_out = dest.GetGeoTransform()
Save_as_tiff(output_tiff, Array, geo_out, "WGS84")
return(output_tiff)
def Convert_hdf5_to_tiff(inputname_hdf, Filename_tiff_end, Band_number, scaling_factor, geo_out):
"""
This function converts the hdf5 files into tiff files
Keyword Arguments:
input_adf -- name, name of the adf file
output_tiff -- Name of the output tiff file
Band_number -- bandnumber of the hdf5 that needs to be converted
scaling_factor -- factor multipied by data is the output array
geo -- [minimum lon, pixelsize, rotation, maximum lat, rotation,
pixelsize], (geospatial dataset)
"""
import watools.General.raster_conversions as RC
# Open the hdf file
g = gdal.Open(inputname_hdf, gdal.GA_ReadOnly)
# Define temporary file out and band name in
name_in = g.GetSubDatasets()[Band_number][0]
# Get environmental variable
WA_env_paths = os.environ["WA_PATHS"].split(';')
GDAL_env_path = WA_env_paths[0]
GDAL_TRANSLATE = os.path.join(GDAL_env_path, 'gdal_translate.exe')
# run gdal translate command
FullCmd = '%s -of GTiff %s %s' %(GDAL_TRANSLATE, name_in, Filename_tiff_end)
RC.Run_command_window(FullCmd)
# Get the data array
dest = gdal.Open(Filename_tiff_end)
Data = dest.GetRasterBand(1).ReadAsArray()
dest = None
# If the band data is not SM change the DN values into PROBA-V values and write into the spectral_reflectance_PROBAV
Data_scaled = Data * scaling_factor
# Save the PROBA-V as a tif file
Save_as_tiff(Filename_tiff_end, Data_scaled, geo_out, "WGS84")
return()
def Extract_Data(input_file, output_folder):
"""
This function extract the zip files
Keyword Arguments:
output_file -- name, name of the file that must be unzipped
output_folder -- Dir, directory where the unzipped data must be
stored
"""
# extract the data
z = zipfile.ZipFile(input_file, 'r')
z.extractall(output_folder)
z.close()
def Extract_Data_gz(zip_filename, outfilename):
"""
This function extract the zip files
Keyword Arguments:
zip_filename -- name, name of the file that must be unzipped
outfilename -- Dir, directory where the unzipped data must be
stored
"""
with gzip.GzipFile(zip_filename, 'rb') as zf:
file_content = zf.read()
save_file_content = open(outfilename, 'wb')
save_file_content.write(file_content)
save_file_content.close()
zf.close()
os.remove(zip_filename)
def Extract_Data_tar_gz(zip_filename, output_folder):
"""
This function extract the tar.gz files
Keyword Arguments:
zip_filename -- name, name of the file that must be unzipped
output_folder -- Dir, directory where the unzipped data must be
stored
"""
os.chdir(output_folder)
tar = tarfile.open(zip_filename, "r:gz")
tar.extractall()
tar.close()
def Save_as_tiff(name='', data='', geo='', projection=''):
"""
This function save the array as a geotiff
Keyword arguments:
name -- string, directory name
data -- [array], dataset of the geotiff
geo -- [minimum lon, pixelsize, rotation, maximum lat, rotation,
pixelsize], (geospatial dataset)
projection -- integer, the EPSG code
"""
# save as a geotiff
driver = gdal.GetDriverByName("GTiff")
dst_ds = driver.Create(name, int(data.shape[1]), int(data.shape[0]), 1,
gdal.GDT_Float32, ['COMPRESS=LZW'])
srse = osr.SpatialReference()
if projection == '':
srse.SetWellKnownGeogCS("WGS84")
else:
try:
if not srse.SetWellKnownGeogCS(projection) == 6:
srse.SetWellKnownGeogCS(projection)
else:
try:
srse.ImportFromEPSG(int(projection))
except:
srse.ImportFromWkt(projection)
except:
try:
srse.ImportFromEPSG(int(projection))
except:
srse.ImportFromWkt(projection)
dst_ds.SetProjection(srse.ExportToWkt())
dst_ds.GetRasterBand(1).SetNoDataValue(-9999)
dst_ds.SetGeoTransform(geo)
dst_ds.GetRasterBand(1).WriteArray(data)
dst_ds = None
return()
def Save_as_MEM(data='', geo='', projection=''):
"""
This function save the array as a memory file
Keyword arguments:
data -- [array], dataset of the geotiff
geo -- [minimum lon, pixelsize, rotation, maximum lat, rotation,
pixelsize], (geospatial dataset)
projection -- interger, the EPSG code
"""
# save as a geotiff
driver = gdal.GetDriverByName("MEM")
dst_ds = driver.Create('', int(data.shape[1]), int(data.shape[0]), 1,
gdal.GDT_Float32)
srse = osr.SpatialReference()
if projection == '':
srse.SetWellKnownGeogCS("WGS84")
else:
srse.SetWellKnownGeogCS(projection)
dst_ds.SetProjection(srse.ExportToWkt())
dst_ds.GetRasterBand(1).SetNoDataValue(-9999)
dst_ds.SetGeoTransform(geo)
dst_ds.GetRasterBand(1).WriteArray(data)
return(dst_ds)
def Save_as_NC(namenc, DataCube, Var, Reference_filename, Startdate = '', Enddate = '', Time_steps = '', Scaling_factor = 1):
"""
This function save the array as a netcdf file
Keyword arguments:
namenc -- string, complete path of the output file with .nc extension
DataCube -- [array], dataset of the nc file, can be a 2D or 3D array [time, lat, lon], must be same size as reference data
Var -- string, the name of the variable
Reference_filename -- string, complete path to the reference file name
Startdate -- 'YYYY-mm-dd', needs to be filled when you want to save a 3D array, defines the Start datum of the dataset
Enddate -- 'YYYY-mm-dd', needs to be filled when you want to save a 3D array, defines the End datum of the dataset
Time_steps -- 'monthly' or 'daily', needs to be filled when you want to save a 3D array, defines the timestep of the dataset
Scaling_factor -- number, scaling_factor of the dataset, default = 1
"""
# Import modules
import watools.General.raster_conversions as RC
from netCDF4 import Dataset
if not os.path.exists(namenc):
# Get raster information
geo_out, proj, size_X, size_Y = RC.Open_array_info(Reference_filename)
# Create the lat/lon rasters
lon = np.arange(size_X)*geo_out[1]+geo_out[0] - 0.5 * geo_out[1]
lat = np.arange(size_Y)*geo_out[5]+geo_out[3] - 0.5 * geo_out[5]
# Create the nc file
nco = Dataset(namenc, 'w', format='NETCDF4_CLASSIC')
nco.description = '%s data' %Var
# Create dimensions, variables and attributes:
nco.createDimension('longitude', size_X)
nco.createDimension('latitude', size_Y)
# Create time dimension if the parameter is time dependent
if Startdate is not '':
if Time_steps == 'monthly':
Dates = pd.date_range(Startdate,Enddate,freq = 'MS')
if Time_steps == 'daily':
Dates = pd.date_range(Startdate,Enddate,freq = 'D')
time_or=np.zeros(len(Dates))
i = 0
for Date in Dates:
time_or[i] = Date.toordinal()
i += 1
nco.createDimension('time', None)
timeo = nco.createVariable('time', 'f4', ('time',))
timeo.units = '%s' %Time_steps
timeo.standard_name = 'time'
# Create the lon variable
lono = nco.createVariable('longitude', 'f8', ('longitude',))
lono.standard_name = 'longitude'
lono.units = 'degrees_east'
lono.pixel_size = geo_out[1]
# Create the lat variable
lato = nco.createVariable('latitude', 'f8', ('latitude',))
lato.standard_name = 'latitude'
lato.units = 'degrees_north'
lato.pixel_size = geo_out[5]
# Create container variable for CRS: lon/lat WGS84 datum
crso = nco.createVariable('crs', 'i4')
crso.long_name = 'Lon/Lat Coords in WGS84'
crso.grid_mapping_name = 'latitude_longitude'
crso.projection = proj
crso.longitude_of_prime_meridian = 0.0
crso.semi_major_axis = 6378137.0
crso.inverse_flattening = 298.257223563
crso.geo_reference = geo_out
# Create the data variable
if Startdate is not '':
preco = nco.createVariable('%s' %Var, 'f8', ('time', 'latitude', 'longitude'), zlib=True, least_significant_digit=1)
timeo[:]=time_or
else:
preco = nco.createVariable('%s' %Var, 'f8', ('latitude', 'longitude'), zlib=True, least_significant_digit=1)
# Set the data variable information
preco.scale_factor = Scaling_factor
preco.add_offset = 0.00
preco.grid_mapping = 'crs'
preco.set_auto_maskandscale(False)
# Set the lat/lon variable
lono[:] = lon
lato[:] = lat
# Set the data variable
if Startdate is not '':
for i in range(len(Dates)):
preco[i,:,:] = DataCube[i,:,:] / np.float(Scaling_factor)
else:
preco[:,:] = DataCube[:,:] / np.float(Scaling_factor)
nco.close()
return()
def Create_NC_name(Var, Simulation, Dir_Basin, sheet_nmbr, info = ''):
# Create the output name
nameOut=''.join(['_'.join([Var,'Simulation%d' % Simulation,'_'.join(info)]),'.nc'])
namePath = os.path.join(Dir_Basin,'Simulations','Simulation_%d' %Simulation, 'Sheet_%d' %sheet_nmbr)
if not os.path.exists(namePath):
os.makedirs(namePath)
nameTot=os.path.join(namePath,nameOut)
return(nameTot)
def Create_new_NC_file(nc_outname, Basin_Example_File, Basin):
# Open basin file
dest = gdal.Open(Basin_Example_File)
Basin_array = dest.GetRasterBand(1).ReadAsArray()
Basin_array[np.isnan(Basin_array)] = -9999
Basin_array[Basin_array<0] = -9999
# Get Basic information
Geo = dest.GetGeoTransform()
size_X = dest.RasterXSize
size_Y = dest.RasterYSize
epsg = dest.GetProjection()
# Get Year and months
year = int(os.path.basename(nc_outname).split(".")[0])
Dates = pd.date_range("%d-01-01" %year, "%d-12-31" %year, freq = "MS")
# Latitude and longitude
lons = np.arange(size_X)*Geo[1]+Geo[0] + 0.5 * Geo[1]
lats = np.arange(size_Y)*Geo[5]+Geo[3] + 0.5 * Geo[5]
# Create NetCDF file
nco = netCDF4.Dataset(nc_outname, 'w', format = 'NETCDF4_CLASSIC')
nco.set_fill_on()
nco.description = '%s' %Basin
# Create dimensions
nco.createDimension('latitude', size_Y)
nco.createDimension('longitude', size_X)
nco.createDimension('time', None)
# Create NetCDF variables
crso = nco.createVariable('crs', 'i4')
crso.long_name = 'Lon/Lat Coords in WGS84'
crso.standard_name = 'crs'
crso.grid_mapping_name = 'latitude_longitude'
crso.projection = epsg
crso.longitude_of_prime_meridian = 0.0
crso.semi_major_axis = 6378137.0
crso.inverse_flattening = 298.257223563
crso.geo_reference = Geo
######################### Save Rasters in NetCDF ##############################
lato = nco.createVariable('latitude', 'f8', ('latitude',))
lato.units = 'degrees_north'
lato.standard_name = 'latitude'
lato.pixel_size = Geo[5]
lono = nco.createVariable('longitude', 'f8', ('longitude',))
lono.units = 'degrees_east'
lono.standard_name = 'longitude'
lono.pixel_size = Geo[1]
timeo = nco.createVariable('time', 'f4', ('time',))
timeo.units = 'Monthly'
timeo.standard_name = 'time'
# Variables
basin_var = nco.createVariable('Landuse', 'i',
('latitude', 'longitude'),
fill_value=-9999)
basin_var.long_name = 'Landuse'
basin_var.grid_mapping = 'crs'
# Create time unit
i = 0
time_or=np.zeros(len(Dates))
for Date in Dates:
time_or[i] = Date.toordinal()
i += 1
# Load data
lato[:] = lats
lono[:] = lons
timeo[:] = time_or
basin_var[:,:] = Basin_array
# close the file
time.sleep(1)
nco.close()
return()
def Add_NC_Array_Variable(nc_outname, Array, name, unit, Scaling_factor = 1):
# create input array
Array[np.isnan(Array)] = -9999 * np.float(Scaling_factor)
Array = np.int_(Array /np.float(Scaling_factor))
# Create NetCDF file
nco = netCDF4.Dataset(nc_outname, 'r+', format = 'NETCDF4_CLASSIC')
nco.set_fill_on()
paro = nco.createVariable('%s' %name, 'i',
('time', 'latitude', 'longitude'),fill_value=-9999,
zlib=True, least_significant_digit=0)
paro.scale_factor = Scaling_factor
paro.add_offset = 0.00
paro.grid_mapping = 'crs'
paro.long_name = name
paro.units = unit
paro.set_auto_maskandscale(False)
# Set the data variable
paro[:,:,:] = Array
# close the file
time.sleep(1)
nco.close()
return()
def Add_NC_Array_Static(nc_outname, Array, name, unit, Scaling_factor = 1):
# create input array
Array[np.isnan(Array)] = -9999 * np.float(Scaling_factor)
Array = np.int_(Array /np.float(Scaling_factor))
# Create NetCDF file
nco = netCDF4.Dataset(nc_outname, 'r+', format = 'NETCDF4_CLASSIC')
nco.set_fill_on()
paro = nco.createVariable('%s' %name, 'i',
('latitude', 'longitude'),fill_value=-9999,
zlib=True, least_significant_digit=0)
paro.scale_factor = Scaling_factor
paro.add_offset = 0.00
paro.grid_mapping = 'crs'
paro.long_name = name
paro.units = unit
paro.set_auto_maskandscale(False)
# Set the data variable
paro[:,:] = Array
# close the file
time.sleep(1)
nco.close()
return()
def Convert_dict_to_array(River_dict, Array_dict, Reference_data):
import numpy as np
import os
import watools.General.raster_conversions as RC
if os.path.splitext(Reference_data)[-1] == '.nc':
# Get raster information
geo_out, proj, size_X, size_Y, size_Z, Time = RC.Open_nc_info(Reference_data)
else:
# Get raster information
geo_out, proj, size_X, size_Y = RC.Open_array_info(Reference_data)
# Create ID Matrix
y,x = np.indices((size_Y, size_X))
ID_Matrix = np.int32(np.ravel_multi_index(np.vstack((y.ravel(),x.ravel())),(size_Y,size_X),mode='clip').reshape(x.shape)) + 1
# Get tiff array time dimension:
time_dimension = int(np.shape(Array_dict[0])[0])
# create an empty array
DataCube = np.ones([time_dimension, size_Y, size_X]) * np.nan
for river_part in range(0,len(River_dict)):
for river_pixel in range(1,len(River_dict[river_part])):
river_pixel_ID = River_dict[river_part][river_pixel]
if len(np.argwhere(ID_Matrix == river_pixel_ID))>0:
row, col = np.argwhere(ID_Matrix == river_pixel_ID)[0][:]
DataCube[:,row,col] = Array_dict[river_part][:,river_pixel]
return(DataCube)
| 33.112676 | 129 | 0.634836 |
79596b1f424e10bd737db036505c096dfe391519 | 7,038 | py | Python | agents/maze_agents/toy_maze/skill_discovery/edl.py | victorcampos7/edl | ffdf23d4e102ca7d69a1408bafa267b0c7d8bfa0 | [
"MIT"
] | 30 | 2020-02-16T15:52:59.000Z | 2022-03-22T10:54:54.000Z | agents/maze_agents/toy_maze/skill_discovery/edl.py | imatge-upc/edl | ffdf23d4e102ca7d69a1408bafa267b0c7d8bfa0 | [
"MIT"
] | null | null | null | agents/maze_agents/toy_maze/skill_discovery/edl.py | imatge-upc/edl | ffdf23d4e102ca7d69a1408bafa267b0c7d8bfa0 | [
"MIT"
] | 7 | 2020-02-16T15:53:05.000Z | 2022-01-18T03:41:03.000Z | # Copyright (c) 2019, salesforce.com, inc.
# All rights reserved.
# SPDX-License-Identifier: MIT
# For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/MIT
import os
import json
import torch
import torch.nn as nn
from .base import StochasticAgent
from agents.maze_agents.toy_maze.env import Env
from base.modules.normalization import DatasetNormalizer
from agents.maze_agents.modules.density import VQVAEDensity
from agents.maze_agents.modules import StochasticPolicy, Value
from base.learners.skill_discovery.edl import BaseEDLLearner, BaseEDLSiblingRivalryLearner
class DistanceStochasticAgent(StochasticAgent):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.batch_keys += ['goal'] # 'goal' is only used for visualization purposes
def _make_modules(self, policy, skill_embedding, vae):
super()._make_modules(policy, skill_embedding)
self.vae = vae
def step(self, do_eval=False):
super().step(do_eval=do_eval)
self.episode[-1]['goal'] = self.env.goal.detach()
def reset(self, skill=None, *args, **kwargs):
self.reset_skill(skill)
kwargs['goal'] = self.vae.get_centroids(dict(skill=self.curr_skill.view([]))).detach().numpy()
self.env.reset(*args, **kwargs)
self.episode = []
def preprocess_skill(self, curr_skill):
assert curr_skill is not None
return self.skill_embedding(curr_skill).detach()
class SiblingRivalryStochasticAgent(DistanceStochasticAgent):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.batch_keys += ['antigoal']
class VQVAEDiscriminator(VQVAEDensity):
def __init__(self, state_size, hidden_size, codebook_size, code_size, beta=0.25, **kwargs):
super().__init__(num_skills=0, state_size=state_size, hidden_size=hidden_size, codebook_size=codebook_size,
code_size=code_size, beta=beta, **kwargs)
self.softmax = nn.Softmax(dim=1)
def _make_normalizer_module(self):
self.normalizer = DatasetNormalizer(self.input_size) if self.normalize_inputs else None
def compute_logprob(self, batch, with_codes=False):
x = batch[self.input_key]
z_e_x = self.encoder(x)
z_q_x, selected_codes = self.vq.straight_through(z_e_x)
x_ = self.decoder(z_q_x)
if self.normalizes_inputs:
x_ = self.normalizer.denormalize(x_)
logprob = -1. * self.mse_loss(x, x_).sum(dim=1)
if with_codes:
return logprob, z_e_x, selected_codes
else:
return logprob
def compute_logprob_under_latent(self, batch, z=None):
x = batch[self.input_key]
if z is None:
z = batch['skill']
z_q_x = self.vq.embedding(z).detach()
x_ = self.decoder(z_q_x).detach()
if self.normalizes_inputs:
x_ = self.normalizer.denormalize(x_)
logprob = -1. * self.mse_loss(x, x_).sum(dim=1)
return logprob
def log_approx_posterior(self, batch):
x, z = batch[self.input_key], batch['skill']
z_e_x = self.encoder(x)
codebook_distances = self.vq.compute_distances(z_e_x)
p = self.softmax(codebook_distances)
p_z = p[torch.arange(0, p.shape[0]), z]
return torch.log(p_z)
def surprisal(self, batch):
with torch.no_grad():
return self.compute_logprob_under_latent(batch).detach()
class EDLLearner(BaseEDLLearner):
def __init__(self, vae_logdir, **kwargs):
self._parse_init_args(vae_logdir, **kwargs)
super().__init__(**kwargs)
def _parse_init_args(self, vae_logdir, **kwargs):
vae_logdir = str(vae_logdir)
if not os.path.isabs(vae_logdir):
root_dir = os.environ.get("ROOT_DIR", os.getcwd()) # useful when loading experiments from a notebook
vae_logdir = os.path.join(root_dir, vae_logdir)
assert os.path.exists(vae_logdir), "Directory not found: {}".format(vae_logdir)
self.vae_args = json.load(open(os.path.join(vae_logdir, "config.json")))["vae_args"]
self.vae_checkpoint_path = os.path.join(vae_logdir, "model.pth.tar")
def create_env(self):
return Env(**self.env_params)
def _make_agent_modules(self):
self.vae = VQVAEDiscriminator(state_size=self._dummy_env.state_size, **self.vae_args)
self.vae.load_checkpoint(self.vae_checkpoint_path)
kwargs = dict(env=self._dummy_env, hidden_size=self.hidden_size, num_layers=self.num_layers,
goal_size=self.vae.code_size, normalize_inputs=self.normalize_inputs)
self.policy = StochasticPolicy(**kwargs)
self.v_module = Value(use_antigoal=False, **kwargs)
def _make_agent(self):
return DistanceStochasticAgent(env=self.create_env(), policy=self.policy, skill_n=self.vae.codebook_size,
skill_embedding=self.vae.vq.embedding, vae=self.vae)
def get_values(self, batch):
return self.v_module(
batch['state'],
self.preprocess_skill(batch['skill'])
)
def get_terminal_values(self, batch):
return self.v_module(
batch['next_state'][-1:],
self.preprocess_skill(batch['skill'][-1:]),
)
def get_policy_lprobs_and_nents(self, batch):
log_prob, n_ent, _ = self.policy(
batch['state'],
self.preprocess_skill(batch['skill']),
action_logit=batch['action_logit']
)
return log_prob.sum(dim=1), n_ent
class EDLSiblingRivalryLearner(BaseEDLSiblingRivalryLearner, EDLLearner):
def __init__(self, **kwargs):
self._parse_init_args(**kwargs)
super().__init__(**kwargs)
def _make_agent_modules(self):
self.vae = VQVAEDiscriminator(state_size=self._dummy_env.state_size, **self.vae_args)
self.vae.load_checkpoint(self.vae_checkpoint_path)
kwargs = dict(env=self._dummy_env, hidden_size=self.hidden_size, num_layers=self.num_layers,
goal_size=self.vae.code_size, normalize_inputs=self.normalize_inputs)
self.policy = StochasticPolicy(**kwargs)
self.v_module = Value(use_antigoal=self.use_antigoal, **kwargs)
def _make_agent(self):
return SiblingRivalryStochasticAgent(env=self.create_env(), policy=self.policy, skill_n=self.vae.codebook_size,
skill_embedding=self.vae.vq.embedding, vae=self.vae)
def get_values(self, batch):
return self.v_module(
batch['state'],
self.preprocess_skill(batch['skill']),
batch.get('antigoal', None)
)
def get_terminal_values(self, batch):
if 'antigoal' in batch:
antigoal = batch['antigoal'][-1:]
else:
antigoal = None
return self.v_module(
batch['next_state'][-1:],
self.preprocess_skill(batch['skill'][-1:]),
antigoal
)
| 39.1 | 119 | 0.657289 |
79596bc321386986ab075c95ccc9797d98eca52e | 1,787 | py | Python | timings/benchmark_truncated_ek1_vs_ref_ek1_brusselator.py | pnkraemer/tornadox | 86a1d22ab53220627c7c504c937b0185f825db6f | [
"MIT"
] | 9 | 2022-02-18T14:39:25.000Z | 2022-03-10T11:56:42.000Z | timings/benchmark_truncated_ek1_vs_ref_ek1_brusselator.py | pnkraemer/tornadox | 86a1d22ab53220627c7c504c937b0185f825db6f | [
"MIT"
] | 2 | 2022-03-01T06:55:45.000Z | 2022-03-10T06:25:12.000Z | timings/benchmark_truncated_ek1_vs_ref_ek1_brusselator.py | pnkraemer/tornadox | 86a1d22ab53220627c7c504c937b0185f825db6f | [
"MIT"
] | null | null | null | """Does truncation in the EK1 do something for high-ish dimensional problems?"""
import jax.numpy as jnp
import tornadox
def solve(ivp, solver):
"""Convenience access"""
for idx, state in enumerate(solver.solution_generator(ivp=ivp)):
pass
return state, idx
def error(m1, m2):
"""Check discrepancy between solutions"""
return jnp.linalg.norm((m1 - m2) / m1) / jnp.sqrt(m1.size)
# Set up the brusselator test problem
N = 20 # ode dimension will be d=2*N
bruss = tornadox.ivp.brusselator(N=N)
# Adaptive steps with medium/high accuracy
tolerance = 1e-5
first_dt = tornadox.step.propose_first_dt(ivp=bruss)
steps = tornadox.step.AdaptiveSteps(
first_dt=first_dt, abstol=tolerance, reltol=tolerance
)
# Assemble both solvers
nu = 4
truncated_solver = tornadox.ek1.TruncationEK1(
num_derivatives=nu, ode_dimension=bruss.dimension, steprule=steps
)
reference_solver = tornadox.ek1.ReferenceEK1(
num_derivatives=nu, ode_dimension=bruss.dimension, steprule=steps
)
truncated_solution, num_steps_trunc = solve(ivp=bruss, solver=truncated_solver)
reference_solution, num_steps_ref = solve(ivp=bruss, solver=reference_solver)
print("Number of steps:")
print(f"\tTruncated: {num_steps_trunc}")
print(f"\tReference: {num_steps_ref}")
# Check the outputs are roughly equal
refmean = reference_solution.y.mean
truncmean = truncated_solution.y.mean.reshape(refmean.shape, order="F")
print(f"Discrepancy: {error(truncmean, refmean)}")
assert jnp.allclose(refmean, truncmean, rtol=1e-2, atol=1e-2)
# %timeit solve(ivp=bruss, solver=truncated_solver)
# --> 7.94 s ± 970 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)
# %timeit solve(ivp=bruss, solver=reference_solver)
# --> 21.5 s ± 1.8 s per loop (mean ± std. dev. of 7 runs, 1 loop each)
| 29.783333 | 80 | 0.740907 |
79596bf588fe650f132f18ac32ccd7693e857ed6 | 35,911 | py | Python | core/storage/user/gae_models_test.py | yashdusing/oppia | c0218e13ba29f9bc25cc5ec6f7f13108ee4fdb9a | [
"Apache-2.0"
] | null | null | null | core/storage/user/gae_models_test.py | yashdusing/oppia | c0218e13ba29f9bc25cc5ec6f7f13108ee4fdb9a | [
"Apache-2.0"
] | null | null | null | core/storage/user/gae_models_test.py | yashdusing/oppia | c0218e13ba29f9bc25cc5ec6f7f13108ee4fdb9a | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for core.storage.user.gae_models."""
import datetime
from core.domain import exp_domain
from core.domain import exp_services
from core.platform import models
from core.tests import test_utils
import feconf
(user_models,) = models.Registry.import_models([models.NAMES.user])
class UserSettingsModelTest(test_utils.GenericTestBase):
"""Tests for UserSettingsModel class."""
user_id = 'user_id'
user_email = 'user@example.com'
user_role = feconf.ROLE_ID_ADMIN
user2_email = 'user2@example.com'
user2_role = feconf.ROLE_ID_BANNED_USER
user3_email = 'user3@example.com'
user3_role = feconf.ROLE_ID_ADMIN
user3_id = 'user3_id'
generic_username = 'user'
generic_date = datetime.datetime(2019, 5, 20)
generic_image_url = 'www.example.com/example.png'
generic_user_bio = 'I am a user of Oppia!'
generic_subject_interests = ['Math', 'Science']
generic_language_codes = ['en', 'es']
def setUp(self):
super(UserSettingsModelTest, self).setUp()
user_models.UserSettingsModel(
id=self.user_id, email=self.user_email, role=self.user_role).put()
user_models.UserSettingsModel(
email=self.user2_email, role=self.user2_role).put()
user_models.UserSettingsModel(
email=self.user3_email, role=self.user3_role).put()
user_models.UserSettingsModel(
id=self.user3_id,
email=self.user3_email,
role=self.user3_role,
username=self.generic_username,
normalized_username=self.generic_username,
last_agreed_to_terms=self.generic_date,
last_started_state_editor_tutorial=self.generic_date,
last_started_state_translation_tutorial=self.generic_date,
last_logged_in=self.generic_date,
last_created_an_exploration=self.generic_date,
last_edited_an_exploration=self.generic_date,
profile_picture_data_url=self.generic_image_url,
default_dashboard='learner', creator_dashboard_display_pref='card',
user_bio=self.generic_user_bio,
subject_interests=self.generic_subject_interests,
first_contribution_msec=1,
preferred_language_codes=self.generic_language_codes,
preferred_site_language_code=(self.generic_language_codes[0]),
preferred_audio_language_code=(self.generic_language_codes[0])
).put()
def test_get_by_role(self):
user = user_models.UserSettingsModel.get_by_role(
feconf.ROLE_ID_ADMIN)
self.assertEqual(user[0].role, feconf.ROLE_ID_ADMIN)
def test_export_data_trivial(self):
user = user_models.UserSettingsModel.get_by_id(self.user_id)
user_data = user.export_data(user.id)
expected_user_data = {
'email': 'user@example.com',
'role': feconf.ROLE_ID_ADMIN,
'username': None,
'normalized_username': None,
'last_agreed_to_terms': None,
'last_started_state_editor_tutorial': None,
'last_started_state_translation_tutorial': None,
'last_logged_in': None,
'last_edited_an_exploration': None,
'profile_picture_data_url': None,
'default_dashboard': 'learner',
'creator_dashboard_display_pref': 'card',
'user_bio': None,
'subject_interests': [],
'first_contribution_msec': None,
'preferred_language_codes': [],
'preferred_site_language_code': None,
'preferred_audio_language_code': None
}
self.assertEqual(expected_user_data, user_data)
def test_export_data_nontrivial(self):
user = user_models.UserSettingsModel.get_by_id(self.user3_id)
user_data = user.export_data(user.id)
expected_user_data = {
'email': self.user3_email,
'role': feconf.ROLE_ID_ADMIN,
'username': self.generic_username,
'normalized_username': self.generic_username,
'last_agreed_to_terms': self.generic_date,
'last_started_state_editor_tutorial': self.generic_date,
'last_started_state_translation_tutorial': self.generic_date,
'last_logged_in': self.generic_date,
'last_edited_an_exploration': self.generic_date,
'profile_picture_data_url': self.generic_image_url,
'default_dashboard': 'learner',
'creator_dashboard_display_pref': 'card',
'user_bio': self.generic_user_bio,
'subject_interests': self.generic_subject_interests,
'first_contribution_msec': 1,
'preferred_language_codes': self.generic_language_codes,
'preferred_site_language_code': self.generic_language_codes[0],
'preferred_audio_language_code': self.generic_language_codes[0]
}
self.assertEqual(expected_user_data, user_data)
class StoryProgressModelTests(test_utils.GenericTestBase):
def test_get_multi(self):
model = user_models.StoryProgressModel.create(
'user_id', 'story_id_1')
model.put()
model = user_models.StoryProgressModel.create(
'user_id', 'story_id_2')
model.put()
story_progress_models = user_models.StoryProgressModel.get_multi(
'user_id', ['story_id_1', 'story_id_2'])
self.assertEqual(len(story_progress_models), 2)
self.assertEqual(story_progress_models[0].user_id, 'user_id')
self.assertEqual(story_progress_models[0].story_id, 'story_id_1')
self.assertEqual(story_progress_models[1].user_id, 'user_id')
self.assertEqual(story_progress_models[1].story_id, 'story_id_2')
class ExpUserLastPlaythroughModelTest(test_utils.GenericTestBase):
"""Tests for ExpUserLastPlaythroughModel class."""
USER_ID = 'user_id'
EXP_ID_0 = 'exp_id_0'
EXP_ID_1 = 'exp_id_1'
def setUp(self):
super(ExpUserLastPlaythroughModelTest, self).setUp()
user_models.ExpUserLastPlaythroughModel(
id='%s.%s' % (self.USER_ID, self.EXP_ID_0), user_id=self.USER_ID,
exploration_id=self.EXP_ID_0, last_played_exp_version=1,
last_played_state_name='state_name').put()
def test_create_success(self):
user_models.ExpUserLastPlaythroughModel.create(
self.USER_ID, self.EXP_ID_1).put()
retrieved_object = user_models.ExpUserLastPlaythroughModel.get_by_id(
'%s.%s' % (self.USER_ID, self.EXP_ID_1))
self.assertEqual(retrieved_object.user_id, self.USER_ID)
self.assertEqual(retrieved_object.exploration_id, self.EXP_ID_1)
def test_get_success(self):
retrieved_object = user_models.ExpUserLastPlaythroughModel.get(
self.USER_ID, self.EXP_ID_0)
self.assertEqual(retrieved_object.user_id, self.USER_ID)
self.assertEqual(retrieved_object.exploration_id, self.EXP_ID_0)
self.assertEqual(retrieved_object.last_played_exp_version, 1)
self.assertEqual(retrieved_object.last_played_state_name, 'state_name')
def test_get_failure(self):
retrieved_object = user_models.ExpUserLastPlaythroughModel.get(
self.USER_ID, 'unknown_exp_id')
self.assertEqual(retrieved_object, None)
class UserStatsModelTest(test_utils.GenericTestBase):
"""Tests for the UserStatsModel class."""
USER_ID_1 = 1
USER_ID_2 = 2
USER_ID_3 = 3
USER_1_IMPACT_SCORE = 0.87
USER_1_TOTAL_PLAYS = 33
USER_1_AVERAGE_RATINGS = 4.37
USER_1_NUM_RATINGS = 22
USER_1_WEEKLY_CREATOR_STATS_LIST = [
{
('2019-05-21'): {
'average_ratings': 4.00,
'total_plays': 5
}
},
{
('2019-05-28'): {
'average_ratings': 4.95,
'total_plays': 10
}
}
]
USER_2_IMPACT_SCORE = 0.33
USER_2_TOTAL_PLAYS = 15
USER_2_AVERAGE_RATINGS = 2.50
USER_2_NUM_RATINGS = 10
USER_2_WEEKLY_CREATOR_STATS_LIST = [
{
('2019-05-21'): {
'average_ratings': 2.50,
'total_plays': 4
}
},
{
('2019-05-28'): {
'average_ratings': 2.50,
'total_plays': 6
}
}
]
def setUp(self):
"""Set up user models in datastore for use in testing."""
super(UserStatsModelTest, self).setUp()
user_model_1 = user_models.UserStatsModel(id=self.USER_ID_1)
user_model_1.impact_score = self.USER_1_IMPACT_SCORE
user_model_1.total_plays = self.USER_1_TOTAL_PLAYS
user_model_1.average_ratings = self.USER_1_AVERAGE_RATINGS
user_model_1.num_ratings = self.USER_1_NUM_RATINGS
user_model_1.weekly_creator_stats_list = (
self.USER_1_WEEKLY_CREATOR_STATS_LIST)
user_models.UserStatsModel.put(user_model_1)
user_model_2 = user_models.UserStatsModel(id=self.USER_ID_2)
user_model_2.impact_score = self.USER_2_IMPACT_SCORE
user_model_2.total_plays = self.USER_2_TOTAL_PLAYS
user_model_2.average_ratings = self.USER_2_AVERAGE_RATINGS
user_model_2.num_ratings = self.USER_2_NUM_RATINGS
user_model_2.weekly_creator_stats_list = (
self.USER_2_WEEKLY_CREATOR_STATS_LIST)
user_models.UserStatsModel.put(user_model_2)
def test_export_data_on_existing_user(self):
"""Test if export_data works when user is in data store."""
user_data = user_models.UserStatsModel.export_data(self.USER_ID_1)
test_data = {
'impact_score': self.USER_1_IMPACT_SCORE,
'total_plays': self.USER_1_TOTAL_PLAYS,
'average_ratings': self.USER_1_AVERAGE_RATINGS,
'num_ratings': self.USER_1_NUM_RATINGS,
'weekly_creator_stats_list': self.USER_1_WEEKLY_CREATOR_STATS_LIST
}
self.assertEqual(user_data, test_data)
def test_export_data_on_multiple_users(self):
"""Test if export_data works on multiple users in data store."""
user_1_data = user_models.UserStatsModel.export_data(self.USER_ID_1)
test_1_data = {
'impact_score': self.USER_1_IMPACT_SCORE,
'total_plays': self.USER_1_TOTAL_PLAYS,
'average_ratings': self.USER_1_AVERAGE_RATINGS,
'num_ratings': self.USER_1_NUM_RATINGS,
'weekly_creator_stats_list': self.USER_1_WEEKLY_CREATOR_STATS_LIST
}
user_2_data = user_models.UserStatsModel.export_data(self.USER_ID_2)
test_2_data = {
'impact_score': self.USER_2_IMPACT_SCORE,
'total_plays': self.USER_2_TOTAL_PLAYS,
'average_ratings': self.USER_2_AVERAGE_RATINGS,
'num_ratings': self.USER_2_NUM_RATINGS,
'weekly_creator_stats_list': self.USER_2_WEEKLY_CREATOR_STATS_LIST
}
self.assertEqual(user_1_data, test_1_data)
self.assertEqual(user_2_data, test_2_data)
def test_export_data_on_nonexistent_user(self):
"""Test if export_data returns None when user is not in data store."""
user_data = user_models.UserStatsModel.export_data(self.USER_ID_3)
test_data = None
self.assertEqual(user_data, test_data)
class ExplorationUserDataModelTest(test_utils.GenericTestBase):
"""Tests for the ExplorationUserDataModel class."""
DATETIME_OBJECT = datetime.datetime.strptime('2016-02-16', '%Y-%m-%d')
USER_ID = 'user_id'
EXP_ID_ONE = 'exp_id_one'
EXP_ID_TWO = 'exp_id_two'
EXP_ID_THREE = 'exp_id_three'
def setUp(self):
super(ExplorationUserDataModelTest, self).setUp()
user_models.ExplorationUserDataModel(
id='%s.%s' % (self.USER_ID, self.EXP_ID_ONE), user_id=self.USER_ID,
exploration_id=self.EXP_ID_ONE, rating=2,
rated_on=self.DATETIME_OBJECT,
draft_change_list={'new_content': {}},
draft_change_list_last_updated=self.DATETIME_OBJECT,
draft_change_list_exp_version=3,
draft_change_list_id=1).put()
def test_create_success(self):
user_models.ExplorationUserDataModel.create(
self.USER_ID, self.EXP_ID_TWO).put()
retrieved_object = user_models.ExplorationUserDataModel.get_by_id(
'%s.%s' % (self.USER_ID, self.EXP_ID_TWO))
self.assertEqual(retrieved_object.user_id, self.USER_ID)
self.assertEqual(retrieved_object.exploration_id, self.EXP_ID_TWO)
def test_get_success(self):
retrieved_object = user_models.ExplorationUserDataModel.get(
self.USER_ID, self.EXP_ID_ONE)
self.assertEqual(retrieved_object.user_id, self.USER_ID)
self.assertEqual(retrieved_object.exploration_id, self.EXP_ID_ONE)
self.assertEqual(retrieved_object.rating, 2)
self.assertEqual(retrieved_object.rated_on, self.DATETIME_OBJECT)
self.assertEqual(
retrieved_object.draft_change_list, {'new_content': {}})
self.assertEqual(
retrieved_object.draft_change_list_last_updated,
self.DATETIME_OBJECT)
self.assertEqual(retrieved_object.draft_change_list_exp_version, 3)
self.assertEqual(retrieved_object.draft_change_list_id, 1)
def test_get_failure(self):
retrieved_object = user_models.ExplorationUserDataModel.get(
self.USER_ID, 'unknown_exp_id')
self.assertEqual(retrieved_object, None)
def test_export_data_one_exploration(self):
"""Test export data when user has one exploration."""
user_data = user_models.ExplorationUserDataModel.export_data(
self.USER_ID)
expected_data = {
self.EXP_ID_ONE: {
'rating': 2,
'rated_on': self.DATETIME_OBJECT,
'draft_change_list': {'new_content': {}},
'draft_change_list_last_updated': self.DATETIME_OBJECT,
'draft_change_list_exp_version': 3,
'draft_change_list_id': 1,
'mute_suggestion_notifications': (
feconf.DEFAULT_SUGGESTION_NOTIFICATIONS_MUTED_PREFERENCE),
'mute_feedback_notifications': (
feconf.DEFAULT_SUGGESTION_NOTIFICATIONS_MUTED_PREFERENCE)
}
}
self.assertDictEqual(expected_data, user_data)
def test_export_data_multiple_explorations(self):
"""Test export data when user has multiple explorations."""
# Add two more explorations.
user_models.ExplorationUserDataModel.create(
self.USER_ID, self.EXP_ID_TWO).put()
user_models.ExplorationUserDataModel(
id='%s.%s' % (self.USER_ID, self.EXP_ID_THREE),
user_id=self.USER_ID,
exploration_id=self.EXP_ID_THREE, rating=5,
rated_on=self.DATETIME_OBJECT,
draft_change_list={'new_content': {'content': 3}},
draft_change_list_last_updated=self.DATETIME_OBJECT,
draft_change_list_exp_version=2,
draft_change_list_id=2).put()
user_data = user_models.ExplorationUserDataModel.export_data(
self.USER_ID)
expected_data = {
self.EXP_ID_ONE: {
'rating': 2,
'rated_on': self.DATETIME_OBJECT,
'draft_change_list': {'new_content': {}},
'draft_change_list_last_updated': self.DATETIME_OBJECT,
'draft_change_list_exp_version': 3,
'draft_change_list_id': 1,
'mute_suggestion_notifications': (
feconf.DEFAULT_SUGGESTION_NOTIFICATIONS_MUTED_PREFERENCE),
'mute_feedback_notifications': (
feconf.DEFAULT_SUGGESTION_NOTIFICATIONS_MUTED_PREFERENCE)
},
self.EXP_ID_TWO: {
'rating': None,
'rated_on': None,
'draft_change_list': None,
'draft_change_list_last_updated': None,
'draft_change_list_exp_version': None,
'draft_change_list_id': 0,
'mute_suggestion_notifications': (
feconf.DEFAULT_SUGGESTION_NOTIFICATIONS_MUTED_PREFERENCE),
'mute_feedback_notifications': (
feconf.DEFAULT_SUGGESTION_NOTIFICATIONS_MUTED_PREFERENCE)
},
self.EXP_ID_THREE: {
'rating': 5,
'rated_on': self.DATETIME_OBJECT,
'draft_change_list': {'new_content': {'content': 3}},
'draft_change_list_last_updated': self.DATETIME_OBJECT,
'draft_change_list_exp_version': 2,
'draft_change_list_id': 2,
'mute_suggestion_notifications': (
feconf.DEFAULT_SUGGESTION_NOTIFICATIONS_MUTED_PREFERENCE),
'mute_feedback_notifications': (
feconf.DEFAULT_SUGGESTION_NOTIFICATIONS_MUTED_PREFERENCE)
}
}
self.assertDictEqual(expected_data, user_data)
class UserQueryModelTests(test_utils.GenericTestBase):
"""Tests for UserQueryModel."""
def test_instance_stores_correct_data(self):
submitter_id = 'submitter'
query_id = 'qid'
inactive_in_last_n_days = 5
created_at_least_n_exps = 1
created_fewer_than_n_exps = 3
edited_at_least_n_exps = 2
edited_fewer_than_n_exps = 5
has_not_logged_in_for_n_days = 10
user_models.UserQueryModel(
id=query_id,
inactive_in_last_n_days=inactive_in_last_n_days,
created_at_least_n_exps=created_at_least_n_exps,
created_fewer_than_n_exps=created_fewer_than_n_exps,
edited_at_least_n_exps=edited_at_least_n_exps,
edited_fewer_than_n_exps=edited_fewer_than_n_exps,
has_not_logged_in_for_n_days=has_not_logged_in_for_n_days,
submitter_id=submitter_id).put()
query_model = user_models.UserQueryModel.get(query_id)
self.assertEqual(query_model.submitter_id, submitter_id)
self.assertEqual(
query_model.inactive_in_last_n_days, inactive_in_last_n_days)
self.assertEqual(
query_model.has_not_logged_in_for_n_days,
has_not_logged_in_for_n_days)
self.assertEqual(
query_model.created_at_least_n_exps, created_at_least_n_exps)
self.assertEqual(
query_model.created_fewer_than_n_exps, created_fewer_than_n_exps)
self.assertEqual(
query_model.edited_at_least_n_exps, edited_at_least_n_exps)
self.assertEqual(
query_model.edited_fewer_than_n_exps, edited_fewer_than_n_exps)
def test_fetch_page(self):
submitter_id = 'submitter_1'
query_id = 'qid_1'
inactive_in_last_n_days = 5
created_at_least_n_exps = 1
created_fewer_than_n_exps = 3
edited_at_least_n_exps = 2
edited_fewer_than_n_exps = 5
has_not_logged_in_for_n_days = 10
user_models.UserQueryModel(
id=query_id,
inactive_in_last_n_days=inactive_in_last_n_days,
created_at_least_n_exps=created_at_least_n_exps,
created_fewer_than_n_exps=created_fewer_than_n_exps,
edited_at_least_n_exps=edited_at_least_n_exps,
edited_fewer_than_n_exps=edited_fewer_than_n_exps,
has_not_logged_in_for_n_days=has_not_logged_in_for_n_days,
submitter_id=submitter_id).put()
submitter_id = 'submitter_2'
query_id = 'qid_2'
inactive_in_last_n_days = 6
created_at_least_n_exps = 7
created_fewer_than_n_exps = 4
edited_at_least_n_exps = 3
edited_fewer_than_n_exps = 6
has_not_logged_in_for_n_days = 11
user_models.UserQueryModel(
id=query_id,
inactive_in_last_n_days=inactive_in_last_n_days,
created_at_least_n_exps=created_at_least_n_exps,
created_fewer_than_n_exps=created_fewer_than_n_exps,
edited_at_least_n_exps=edited_at_least_n_exps,
edited_fewer_than_n_exps=edited_fewer_than_n_exps,
has_not_logged_in_for_n_days=has_not_logged_in_for_n_days,
submitter_id=submitter_id).put()
# Fetch only one entity.
query_models, _, _ = user_models.UserQueryModel.fetch_page(
1, None)
self.assertEqual(len(query_models), 1)
self.assertEqual(query_models[0].submitter_id, 'submitter_2')
self.assertEqual(query_models[0].id, 'qid_2')
self.assertEqual(query_models[0].inactive_in_last_n_days, 6)
self.assertEqual(query_models[0].created_at_least_n_exps, 7)
self.assertEqual(query_models[0].created_fewer_than_n_exps, 4)
self.assertEqual(query_models[0].edited_at_least_n_exps, 3)
self.assertEqual(query_models[0].edited_fewer_than_n_exps, 6)
self.assertEqual(query_models[0].has_not_logged_in_for_n_days, 11)
# Fetch both entities.
query_models, _, _ = user_models.UserQueryModel.fetch_page(
2, None)
self.assertEqual(len(query_models), 2)
self.assertEqual(query_models[0].submitter_id, 'submitter_2')
self.assertEqual(query_models[0].id, 'qid_2')
self.assertEqual(query_models[0].inactive_in_last_n_days, 6)
self.assertEqual(query_models[0].created_at_least_n_exps, 7)
self.assertEqual(query_models[0].created_fewer_than_n_exps, 4)
self.assertEqual(query_models[0].edited_at_least_n_exps, 3)
self.assertEqual(query_models[0].edited_fewer_than_n_exps, 6)
self.assertEqual(query_models[0].has_not_logged_in_for_n_days, 11)
self.assertEqual(query_models[1].submitter_id, 'submitter_1')
self.assertEqual(query_models[1].id, 'qid_1')
self.assertEqual(query_models[1].inactive_in_last_n_days, 5)
self.assertEqual(query_models[1].created_at_least_n_exps, 1)
self.assertEqual(query_models[1].created_fewer_than_n_exps, 3)
self.assertEqual(query_models[1].edited_at_least_n_exps, 2)
self.assertEqual(query_models[1].edited_fewer_than_n_exps, 5)
self.assertEqual(query_models[1].has_not_logged_in_for_n_days, 10)
class UserSkillMasteryModelTests(test_utils.GenericTestBase):
"""Tests for UserSkillMasteryModel."""
USER_ID = 'user_id'
SKILL_ID_1 = 'skill_id_1'
SKILL_ID_2 = 'skill_id_2'
DEGREE_OF_MASTERY = 0.5
def setUp(self):
super(UserSkillMasteryModelTests, self).setUp()
user_models.UserSkillMasteryModel(
id=user_models.UserSkillMasteryModel.construct_model_id(
self.USER_ID, self.SKILL_ID_1),
user_id=self.USER_ID,
skill_id=self.SKILL_ID_1,
degree_of_mastery=self.DEGREE_OF_MASTERY).put()
user_models.UserSkillMasteryModel(
id=user_models.UserSkillMasteryModel.construct_model_id(
self.USER_ID, self.SKILL_ID_2),
user_id=self.USER_ID,
skill_id=self.SKILL_ID_2,
degree_of_mastery=self.DEGREE_OF_MASTERY).put()
def test_construct_model_id(self):
constructed_model_id = (
user_models.UserSkillMasteryModel.construct_model_id(
self.USER_ID, self.SKILL_ID_1))
self.assertEqual(constructed_model_id, 'user_id.skill_id_1')
def test_get_success(self):
constructed_model_id = (
user_models.UserSkillMasteryModel.construct_model_id(
self.USER_ID, self.SKILL_ID_1))
retrieved_object = user_models.UserSkillMasteryModel.get(
constructed_model_id)
self.assertEqual(retrieved_object.user_id, 'user_id')
self.assertEqual(retrieved_object.skill_id, 'skill_id_1')
self.assertEqual(retrieved_object.degree_of_mastery, 0.5)
def test_get_failure(self):
retrieved_object = user_models.UserSkillMasteryModel.get(
'unknown_model_id', strict=False)
self.assertEqual(retrieved_object, None)
def test_get_multi_success(self):
skill_ids = [
user_models.UserSkillMasteryModel.construct_model_id(
self.USER_ID, self.SKILL_ID_1),
user_models.UserSkillMasteryModel.construct_model_id(
self.USER_ID, self.SKILL_ID_2)]
retrieved_object = user_models.UserSkillMasteryModel.get_multi(
skill_ids)
self.assertEqual(retrieved_object[0].user_id, 'user_id')
self.assertEqual(retrieved_object[0].skill_id, 'skill_id_1')
self.assertEqual(retrieved_object[0].degree_of_mastery, 0.5)
self.assertEqual(retrieved_object[1].user_id, 'user_id')
self.assertEqual(retrieved_object[1].skill_id, 'skill_id_2')
self.assertEqual(retrieved_object[1].degree_of_mastery, 0.5)
def test_get_multi_failure(self):
skill_ids = ['unknown_model_id_1', 'unknown_model_id_2']
retrieved_object = user_models.UserSkillMasteryModel.get_multi(
skill_ids)
self.assertEqual(retrieved_object, [None, None])
class UserContributionsScoringModelTests(test_utils.GenericTestBase):
"""Tests for UserContributionScoringModel."""
def test_create_model(self):
user_models.UserContributionScoringModel.create('user1', 'category1', 1)
score_models = (user_models.UserContributionScoringModel
.get_all_scores_of_user('user1'))
self.assertEqual(len(score_models), 1)
self.assertEqual(score_models[0].id, 'category1.user1')
self.assertEqual(score_models[0].user_id, 'user1')
self.assertEqual(score_models[0].score_category, 'category1')
self.assertEqual(score_models[0].score, 1)
def test_create_entry_already_exists_failure(self):
user_models.UserContributionScoringModel.create('user1', 'category1', 1)
with self.assertRaisesRegexp(
Exception, 'There is already an entry with the given id:'
' category1.user1'):
user_models.UserContributionScoringModel.create(
'user1', 'category1', 2)
def test_get_all_users_with_score_above_minimum_for_category(self):
user_models.UserContributionScoringModel.create('user1', 'category1', 1)
user_models.UserContributionScoringModel.create(
'user2', 'category1', 21)
user_models.UserContributionScoringModel.create(
'user3', 'category1', 11)
user_models.UserContributionScoringModel.create(
'user4', 'category1', 11)
user_models.UserContributionScoringModel.create(
'user1', 'category2', 11)
user_models.UserContributionScoringModel.create('user2', 'category2', 1)
user_models.UserContributionScoringModel.create('user3', 'category2', 1)
user_models.UserContributionScoringModel.create('user4', 'category2', 1)
score_models = (user_models.UserContributionScoringModel
.get_all_users_with_score_above_minimum_for_category(
'category1'))
self.assertEqual(len(score_models), 3)
self.assertIn(user_models.UserContributionScoringModel.get_by_id(
'category1.user2'), score_models)
self.assertIn(user_models.UserContributionScoringModel.get_by_id(
'category1.user3'), score_models)
self.assertIn(user_models.UserContributionScoringModel.get_by_id(
'category1.user4'), score_models)
score_models = (user_models.UserContributionScoringModel
.get_all_users_with_score_above_minimum_for_category(
'category2'))
self.assertEqual(len(score_models), 1)
self.assertIn(user_models.UserContributionScoringModel.get_by_id(
'category2.user1'), score_models)
def test_get_score_of_user_for_category(self):
user_models.UserContributionScoringModel.create('user1', 'category1', 1)
score = (user_models.UserContributionScoringModel
.get_score_of_user_for_category('user1', 'category1'))
self.assertEqual(score, 1)
def test_increment_score_for_user(self):
user_models.UserContributionScoringModel.create('user1', 'category1', 1)
user_models.UserContributionScoringModel.increment_score_for_user(
'user1', 'category1', 2)
score = (user_models.UserContributionScoringModel
.get_score_of_user_for_category('user1', 'category1'))
self.assertEqual(score, 3)
def test_get_all_scores_of_user(self):
user_models.UserContributionScoringModel.create('user1', 'category1', 1)
user_models.UserContributionScoringModel.create('user1', 'category2', 1)
user_models.UserContributionScoringModel.create('user1', 'category3', 1)
score_models = (user_models.UserContributionScoringModel
.get_all_scores_of_user('user1'))
self.assertEqual(len(score_models), 3)
self.assertIn(user_models.UserContributionScoringModel.get_by_id(
'category1.user1'), score_models)
self.assertIn(user_models.UserContributionScoringModel.get_by_id(
'category2.user1'), score_models)
self.assertIn(user_models.UserContributionScoringModel.get_by_id(
'category3.user1'), score_models)
def test_get_categories_where_user_can_review(self):
user_models.UserContributionScoringModel.create(
'user1', 'category1', 15)
user_models.UserContributionScoringModel.create('user1', 'category2', 1)
user_models.UserContributionScoringModel.create(
'user1', 'category3', 15)
score_categories = (
user_models.UserContributionScoringModel
.get_all_categories_where_user_can_review('user1'))
self.assertIn('category1', score_categories)
self.assertIn('category3', score_categories)
self.assertNotIn('category2', score_categories)
class UserSubscriptionsModelTests(test_utils.GenericTestBase):
"""Tests for UserSubscriptionsModel."""
USER_ID_1 = 'user_id_1'
USER_ID_2 = 'user_id_2'
USER_ID_3 = 'user_id_3'
CREATOR_IDS = ['4', '8', '16']
COLLECTION_IDS = ['23', '42', '4']
ACTIVITY_IDS = ['8', '16', '23']
GENERAL_FEEDBACK_THREAD_IDS = ['42', '4', '8']
def setUp(self):
"""Set up user models in datastore for use in testing."""
super(UserSubscriptionsModelTests, self).setUp()
user_models.UserSubscriptionsModel(id=self.USER_ID_1).put()
user_models.UserSubscriptionsModel(
id=self.USER_ID_2, creator_ids=self.CREATOR_IDS,
collection_ids=self.COLLECTION_IDS,
activity_ids=self.ACTIVITY_IDS,
general_feedback_thread_ids=self.GENERAL_FEEDBACK_THREAD_IDS).put()
def test_export_data_trivial(self):
"""Test if empty user data is properly exported."""
user_data = (
user_models.UserSubscriptionsModel.export_data(self.USER_ID_1))
test_data = {
'creator_ids': [],
'collection_ids': [],
'activity_ids': [],
'general_feedback_thread_ids': [],
'last_checked': None
}
self.assertEqual(user_data, test_data)
def test_export_data_nontrivial(self):
"""Test if nonempty user data is properly exported."""
user_data = (
user_models.UserSubscriptionsModel.export_data(self.USER_ID_2))
test_data = {
'creator_ids': self.CREATOR_IDS,
'collection_ids': self.COLLECTION_IDS,
'activity_ids': self.ACTIVITY_IDS,
'general_feedback_thread_ids': self.GENERAL_FEEDBACK_THREAD_IDS,
'last_checked': None
}
self.assertEqual(user_data, test_data)
def test_export_data_on_nonexistent_user(self):
"""Test if exception is raised on nonexistent UserSubscriptionsModel."""
export_data_exception = (
self.assertRaisesRegexp(
Exception, 'UserSubscriptionsModel does not exist.'))
with export_data_exception:
user_models.UserSubscriptionsModel.export_data(self.USER_ID_3)
class UserContributionsModelTests(test_utils.GenericTestBase):
"""Tests for the UserContributionsModel class."""
NONEXISTENT_USER_ID = 'id_x'
USER_A_EMAIL = 'a@example.com'
USER_B_EMAIL = 'b@example.com'
USER_A_USERNAME = 'a'
USER_B_USERNAME = 'b'
EXP_ID_1 = 'exp_1'
EXP_ID_2 = 'exp_2'
def setUp(self):
"""Set up user models in datastore for use in testing."""
super(UserContributionsModelTests, self).setUp()
# User A has no created explorations, one edited exploration.
# User B has two created and edited explorations.
self.signup(self.USER_A_EMAIL, self.USER_A_USERNAME)
self.user_a_id = self.get_user_id_from_email(self.USER_A_EMAIL)
self.signup(self.USER_B_EMAIL, self.USER_B_USERNAME)
self.user_b_id = self.get_user_id_from_email(self.USER_B_EMAIL)
# Note that creating an exploration counts as editing it.
self.save_new_valid_exploration(
self.EXP_ID_1, self.user_b_id, end_state_name='End')
exp_services.update_exploration(
self.user_a_id, self.EXP_ID_1, [exp_domain.ExplorationChange({
'cmd': 'edit_exploration_property',
'property_name': 'objective',
'new_value': 'the objective'
})], 'Test edit')
self.save_new_valid_exploration(
self.EXP_ID_2, self.user_b_id, end_state_name='End')
exp_services.update_exploration(
self.user_a_id, self.EXP_ID_2, [exp_domain.ExplorationChange({
'cmd': 'edit_exploration_property',
'property_name': 'objective',
'new_value': 'the objective'
})], 'Test edit')
def test_export_data_on_nonexistent_user(self):
"""Test if export_data returns None when user is not in datastore."""
user_data = user_models.UserContributionsModel.export_data(
self.NONEXISTENT_USER_ID)
self.assertEqual(None, user_data)
def test_export_data_on_partially_involved_user(self):
"""Test export_data on user with no creations and two edits."""
user_data = user_models.UserContributionsModel.export_data(
self.user_a_id)
expected_data = {
'created_exploration_ids': [],
'edited_exploration_ids': [self.EXP_ID_1, self.EXP_ID_2]
}
self.assertEqual(expected_data, user_data)
def test_export_data_on_highly_involved_user(self):
"""Test export data on user with two creations and two edits."""
user_data = user_models.UserContributionsModel.export_data(
self.user_b_id)
expected_data = {
'created_exploration_ids': [self.EXP_ID_1, self.EXP_ID_2],
'edited_exploration_ids': [self.EXP_ID_1, self.EXP_ID_2]
}
self.assertEqual(expected_data, user_data)
| 42.347877 | 80 | 0.673192 |
79596bfb0b1f28b882f9d92293d2eb6f7ec25062 | 258 | py | Python | website/urls.py | bartels/satchless | 4d333014333dc4fd5815f9e0bbea565959919a30 | [
"BSD-4-Clause"
] | 1 | 2015-11-05T05:09:27.000Z | 2015-11-05T05:09:27.000Z | website/urls.py | bartels/satchless | 4d333014333dc4fd5815f9e0bbea565959919a30 | [
"BSD-4-Clause"
] | null | null | null | website/urls.py | bartels/satchless | 4d333014333dc4fd5815f9e0bbea565959919a30 | [
"BSD-4-Clause"
] | null | null | null | from django.conf.urls.defaults import *
urlpatterns = patterns('',
url(r'^$', 'django.views.generic.simple.direct_to_template', {'template': 'index.html'}),
url(r'^static/(?P<path>.*)$', 'django.views.static.serve', {'document_root': 'static/'}),
)
| 36.857143 | 93 | 0.658915 |
79596ccf0eeac21507b24c47c435958283cdf307 | 6,410 | py | Python | scrapthor/maping.py | walogo/web_scrap | 909660c096b3e0e931c2b6b37bfc971650123fa0 | [
"MIT"
] | null | null | null | scrapthor/maping.py | walogo/web_scrap | 909660c096b3e0e931c2b6b37bfc971650123fa0 | [
"MIT"
] | 1 | 2018-12-05T20:48:46.000Z | 2018-12-05T22:45:00.000Z | scrapthor/maping.py | walogo/web_scrap | 909660c096b3e0e931c2b6b37bfc971650123fa0 | [
"MIT"
] | null | null | null | from asyncio import sleep
from os import mkdir, getcwd
from os.path import join
from sys import platform
from .logger import logger
from .criptografia import fingerprint
from .descargar import _extraer_links, aio_descargar, filtro, _crear_archivo, Reglas
from csv import writer
# Crear loop paraa async
# Create loop for async
if platform == 'win32':
from asyncio import ProactorEventLoop
_loop = ProactorEventLoop()
else:
from asyncio import get_event_loop
_loop = get_event_loop()
# ESPANOL
# Funcion para guardar los links encontrados en un archivo .txt
# ENGLISH
# Function to save all links founded in a .txt file
def _guardar_links(directorio, links):
with open(join(directorio, 'lista_links.txt'), 'w') as archivo:
archivo.write('\n'.join(x for x in links))
archivo.close()
# ESPANOL
# Funcion para guardar hashes en un archivo .csv
# ENGLISH
# Funtion to create a .csv file and save hashes in there
def _guardar_hashes(directorio, hashes):
with open(join(directorio, 'hashes.csv'), 'w') as archivo:
escritor = writer(archivo)
escritor.writerow(['Archivo', 'Hash'])
for hash in hashes:
escritor.writerow(hash)
archivo.close()
###Crea un directorio y luego cd en el
def crear_mantener_directorio(directorio, loger):
try:
mkdir(directorio)
except Exception as e:
loger.logear(e)
# ESPANOL#
###Funcion que crea una ruta a un directorio de una carpeta
# ENGLISH#
###This function found the directory to work
def conseguir_directorio(directorio, archivo):
if directorio != None:
return join(directorio, archivo)
# ESPANOL#
###Funcion que fuciona la creacion de un hash con la descarga de un archivo
# ENGLISH#
###This function create a file with the bytes recieved and calculate the hash for fingerprint, them that hash is
###Inserted into a the hash list that the user have, them return it [name_of file,hash]
def hash_y_archivo(hashes, nombre, contenido, directorio, parametros, descargar_archivos, guardar_hashes, loger,
reglas):
try:
if not filtro(nombre, contenido, parametros, reglas):
if descargar_archivos:
_crear_archivo(join(directorio, nombre), contenido, logger=loger)
if guardar_hashes:
try:
hashes.append([nombre, fingerprint(contenido)])
except Exception as e:
loger.logear(e)
except:
print('error en hashyarchivo')
return hashes
# ESPANOL
# Funcion que analiza un link para luego extraerle el hash,, descargarlo o determinar los links que tiene
# ENGLISH
# This function realize all the scraping process
async def mapear(url, profundidad, parametros, descargar_archivos, guardar_links, guardar_hashes, loger, reglas,
velocidad,
informacion=None, directorio_a=getcwd()):
hashes = [] # This variable store all the hashes found
try:
if profundidad > 0:
###This is only used after the first link is used
if informacion == None:
informacion = await aio_descargar(url, logger=loger)
# Get the directory to work
directorio = conseguir_directorio(directorio_a, informacion[0])
# Try to create a directory and cd into it
crear_mantener_directorio(directorio, loger)
# Extrack all the links of html bytes
links = _extraer_links(informacion[2])
# Try to download the file and extrack the hash
hashes = hash_y_archivo(hashes, informacion[1], informacion[2], directorio, parametros, descargar_archivos,
guardar_hashes, loger, reglas)
# Continue if someone wants to save all the linkss found in a file ('lista_links.txt')
if guardar_links:
_guardar_links(directorio, links)
# Work with all links in the links list
for numero, link in enumerate(links):
try:
# This statement because sometimes without it the program make duplications or use unnecesary resources
if link != url:
# Get information to mapear function
informacion = await aio_descargar(link, logger=loger)
await sleep(profundidad / velocidad) # Go to other process in this time
# Extract the hash and download the file
hashes = hash_y_archivo(hashes, informacion[1], informacion[2], directorio, parametros,
descargar_archivos,
guardar_hashes, loger, reglas)
# Start maping the link
_loop.create_task(
mapear(link, profundidad - 1, parametros, descargar_archivos, guardar_links, guardar_hashes,
loger, reglas, velocidad=velocidad,
informacion=informacion, directorio_a=directorio))
except:
pass
# This is the progress of analysis in the current 'url'
loger.logear('{}% en {}'.format(100 * (numero + 1) / len(links), url))
# Save all the hashes found in a .csv file
_guardar_hashes(directorio, hashes)
except Exception as e:
# Exception debuging
loger.logear(e)
try:
# Try to create the file
_guardar_hashes(directorio, hashes)
except Exception as e:
# Debuging
loger.logear('[ERROR] Se re-intento es cribir el hash pero no se logro')
# ESPANOL
# Funcion final que se utiliza para hacer el analisis de un url, (esta es la que debe usarce para el scraping)
# ENGLISH
# This function is the only that can be used from this file, it is the implementation of mapear
def scrap(url, debug_file=None, debug=True, profundidad=2, parametros={}, descargar_archivos=True, guardar_links=True,
guardar_hashes=True, reglas=Reglas, velocidad=3):
loger = logger(debug_file, debug)
_loop.run_until_complete(
mapear(url, profundidad, parametros, descargar_archivos, guardar_links, guardar_hashes, loger, reglas,
velocidad=velocidad))
| 39.813665 | 123 | 0.634321 |
79596d3dac4396d21bccd1767367f08f3ba78b54 | 155 | py | Python | examples/bot.py | BaySchoolCS2/WPAW | 8ff05cda94ba7c8dc74b52c6624e266b22154cce | [
"MIT"
] | null | null | null | examples/bot.py | BaySchoolCS2/WPAW | 8ff05cda94ba7c8dc74b52c6624e266b22154cce | [
"MIT"
] | null | null | null | examples/bot.py | BaySchoolCS2/WPAW | 8ff05cda94ba7c8dc74b52c6624e266b22154cce | [
"MIT"
] | null | null | null | from pwrapper import Wrapper
b = Wrapper(user_agent="test_bot")
b.token = "vjdOXdRyF52j"
b.me()
print b.raw
print b.new_post("test", "testtest").text
| 12.916667 | 41 | 0.716129 |
79596de2d5b560dbbe7e108e56770b106b1e5985 | 1,073 | py | Python | src/mdp/frozen_lake_levels/frozen_lake_level_4x4_B.py | Gnosling/RLASP | e4da8c72e295fbebeebe3bc20857fb1cd4e5814a | [
"MIT"
] | null | null | null | src/mdp/frozen_lake_levels/frozen_lake_level_4x4_B.py | Gnosling/RLASP | e4da8c72e295fbebeebe3bc20857fb1cd4e5814a | [
"MIT"
] | null | null | null | src/mdp/frozen_lake_levels/frozen_lake_level_4x4_B.py | Gnosling/RLASP | e4da8c72e295fbebeebe3bc20857fb1cd4e5814a | [
"MIT"
] | null | null | null | from .frozen_lake_level import FrozenLakeLevel
"""
Fixed 4x4-level for a frozen-lake:
_____________
|S F F F |
|H F F F |
|H H F F |
|H H H G |
|___________|
Whereby:
S = START
F = FROZEN
H = HOLE
G = GOAL
(S and G must be frozen)
"""
class FrozenLakeLevel4x4_B(FrozenLakeLevel):
def __init__(self):
# TODO: im .lp ist length = width --> ändern?
states = {"position(0..15)",
"frozen(0..3)",
"hole(4)", "frozen(5..7)",
"hole(8..9)", "frozen(10..11)",
"hole(12..14)", "frozen(15)",
"#const goal = 15", "#const length = 4",
"leftEdge(0)", "leftEdge(4)", "leftEdge(8)", "leftEdge(12)",
"rightEdge(3)", "rightEdge(7)", "rightEdge(11)", "rightEdge(15)",
"upperEdge(0..3)", "lowerEdge(12..15)"
}
start = {"currentPosition(0)"}
width = 4
height = 4
super().__init__(start, width, height, states)
| 29.805556 | 83 | 0.469711 |
79596df9ce000500c59aa24cb8ef2b9ee5b47b89 | 8,566 | py | Python | cinder/tests/unit/db/test_qos_specs.py | aarunsai81/netapp | 8f0f7bf9be7f4d9fb9c3846bfc639c90a05f86ba | [
"Apache-2.0"
] | 11 | 2015-08-25T13:11:18.000Z | 2020-10-15T11:29:20.000Z | cinder/tests/unit/db/test_qos_specs.py | aarunsai81/netapp | 8f0f7bf9be7f4d9fb9c3846bfc639c90a05f86ba | [
"Apache-2.0"
] | 5 | 2018-01-25T11:31:56.000Z | 2019-05-06T23:13:35.000Z | cinder/tests/unit/db/test_qos_specs.py | aarunsai81/netapp | 8f0f7bf9be7f4d9fb9c3846bfc639c90a05f86ba | [
"Apache-2.0"
] | 11 | 2015-02-20T18:48:24.000Z | 2021-01-30T20:26:18.000Z | # Copyright (C) 2013 eBay Inc.
# Copyright (C) 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for quality_of_service_specs table."""
import time
from cinder import context
from cinder import db
from cinder import exception
from cinder import test
from cinder.tests.unit import fake_constants as fake
from cinder.volume import volume_types
def fake_qos_specs_get_by_name(context, name, session=None, inactive=False):
pass
class QualityOfServiceSpecsTableTestCase(test.TestCase):
"""Test case for QualityOfServiceSpecs model."""
def setUp(self):
super(QualityOfServiceSpecsTableTestCase, self).setUp()
self.ctxt = context.RequestContext(user_id=fake.USER_ID,
project_id=fake.PROJECT_ID,
is_admin=True)
def _create_qos_specs(self, name, consumer='back-end', values=None):
"""Create a transfer object."""
if values is None:
values = {'key1': 'value1', 'key2': 'value2'}
specs = {'name': name,
'consumer': consumer,
'specs': values}
return db.qos_specs_create(self.ctxt, specs)['id']
def test_qos_specs_create(self):
# If there is qos specs with the same name exists,
# a QoSSpecsExists exception will be raised.
name = 'QoSSpecsCreationTest'
self._create_qos_specs(name)
self.assertRaises(exception.QoSSpecsExists,
db.qos_specs_create, self.ctxt, dict(name=name))
specs_id = self._create_qos_specs('NewName')
query_id = db.qos_specs_get_by_name(
self.ctxt, 'NewName')['id']
self.assertEqual(specs_id, query_id)
def test_qos_specs_get(self):
qos_spec = {'name': 'Name1',
'consumer': 'front-end',
'specs': {'key1': 'foo', 'key2': 'bar'}}
specs_id = self._create_qos_specs(qos_spec['name'],
qos_spec['consumer'],
qos_spec['specs'])
fake_id = fake.WILL_NOT_BE_FOUND_ID
self.assertRaises(exception.QoSSpecsNotFound,
db.qos_specs_get, self.ctxt, fake_id)
specs_returned = db.qos_specs_get(self.ctxt, specs_id)
qos_spec['id'] = specs_id
self.assertDictMatch(qos_spec, specs_returned)
def test_qos_specs_get_all(self):
qos_list = [
{'name': 'Name1',
'consumer': 'front-end',
'specs': {'key1': 'v1', 'key2': 'v2'}},
{'name': 'Name2',
'consumer': 'back-end',
'specs': {'key1': 'v3', 'key2': 'v4'}},
{'name': 'Name3',
'consumer': 'back-end',
'specs': {'key1': 'v5', 'key2': 'v6'}}]
for qos in qos_list:
qos['id'] = self._create_qos_specs(qos['name'],
qos['consumer'],
qos['specs'])
specs_list_returned = db.qos_specs_get_all(self.ctxt)
self.assertEqual(len(qos_list), len(specs_list_returned),
"Unexpected number of qos specs records")
for expected_qos in qos_list:
self.assertIn(expected_qos, specs_list_returned)
def test_qos_specs_delete(self):
name = str(int(time.time()))
specs_id = self._create_qos_specs(name)
db.qos_specs_delete(self.ctxt, specs_id)
self.assertRaises(exception.QoSSpecsNotFound,
db.qos_specs_get,
self.ctxt, specs_id)
def test_qos_specs_item_delete(self):
name = str(int(time.time()))
value = dict(foo='Foo', bar='Bar')
specs_id = self._create_qos_specs(name, 'front-end', value)
del value['foo']
expected = {'name': name,
'id': specs_id,
'consumer': 'front-end',
'specs': value}
db.qos_specs_item_delete(self.ctxt, specs_id, 'foo')
specs = db.qos_specs_get(self.ctxt, specs_id)
self.assertDictMatch(expected, specs)
def test_associate_type_with_qos(self):
self.assertRaises(exception.VolumeTypeNotFound,
db.volume_type_qos_associate,
self.ctxt, fake.VOLUME_ID, fake.QOS_SPEC_ID)
type_id = volume_types.create(self.ctxt, 'TypeName')['id']
specs_id = self._create_qos_specs('FakeQos')
db.volume_type_qos_associate(self.ctxt, type_id, specs_id)
res = db.qos_specs_associations_get(self.ctxt, specs_id)
self.assertEqual(1, len(res))
self.assertEqual(type_id, res[0]['id'])
self.assertEqual(specs_id, res[0]['qos_specs_id'])
def test_qos_associations_get(self):
self.assertRaises(exception.QoSSpecsNotFound,
db.qos_specs_associations_get,
self.ctxt, fake.WILL_NOT_BE_FOUND_ID)
type_id = volume_types.create(self.ctxt, 'TypeName')['id']
specs_id = self._create_qos_specs('FakeQos')
res = db.qos_specs_associations_get(self.ctxt, specs_id)
self.assertEqual(0, len(res))
db.volume_type_qos_associate(self.ctxt, type_id, specs_id)
res = db.qos_specs_associations_get(self.ctxt, specs_id)
self.assertEqual(1, len(res))
self.assertEqual(type_id, res[0]['id'])
self.assertEqual(specs_id, res[0]['qos_specs_id'])
type0_id = volume_types.create(self.ctxt, 'Type0Name')['id']
db.volume_type_qos_associate(self.ctxt, type0_id, specs_id)
res = db.qos_specs_associations_get(self.ctxt, specs_id)
self.assertEqual(2, len(res))
self.assertEqual(specs_id, res[0]['qos_specs_id'])
self.assertEqual(specs_id, res[1]['qos_specs_id'])
def test_qos_specs_disassociate(self):
type_id = volume_types.create(self.ctxt, 'TypeName')['id']
specs_id = self._create_qos_specs('FakeQos')
db.volume_type_qos_associate(self.ctxt, type_id, specs_id)
res = db.qos_specs_associations_get(self.ctxt, specs_id)
self.assertEqual(type_id, res[0]['id'])
self.assertEqual(specs_id, res[0]['qos_specs_id'])
db.qos_specs_disassociate(self.ctxt, specs_id, type_id)
res = db.qos_specs_associations_get(self.ctxt, specs_id)
self.assertEqual(0, len(res))
res = db.volume_type_get(self.ctxt, type_id)
self.assertIsNone(res['qos_specs_id'])
def test_qos_specs_disassociate_all(self):
specs_id = self._create_qos_specs('FakeQos')
type1_id = volume_types.create(self.ctxt, 'Type1Name')['id']
type2_id = volume_types.create(self.ctxt, 'Type2Name')['id']
type3_id = volume_types.create(self.ctxt, 'Type3Name')['id']
db.volume_type_qos_associate(self.ctxt, type1_id, specs_id)
db.volume_type_qos_associate(self.ctxt, type2_id, specs_id)
db.volume_type_qos_associate(self.ctxt, type3_id, specs_id)
res = db.qos_specs_associations_get(self.ctxt, specs_id)
self.assertEqual(3, len(res))
db.qos_specs_disassociate_all(self.ctxt, specs_id)
res = db.qos_specs_associations_get(self.ctxt, specs_id)
self.assertEqual(0, len(res))
def test_qos_specs_update(self):
name = 'FakeName'
specs_id = self._create_qos_specs(name)
value = {'consumer': 'both',
'specs': {'key2': 'new_value2', 'key3': 'value3'}}
self.assertRaises(exception.QoSSpecsNotFound, db.qos_specs_update,
self.ctxt, fake.WILL_NOT_BE_FOUND_ID, value)
db.qos_specs_update(self.ctxt, specs_id, value)
specs = db.qos_specs_get(self.ctxt, specs_id)
self.assertEqual('new_value2', specs['specs']['key2'])
self.assertEqual('value3', specs['specs']['key3'])
self.assertEqual('both', specs['consumer'])
| 41.381643 | 78 | 0.618959 |
795971d1034dd4ec6594c57fa1598339f7cfa18a | 4,840 | py | Python | pygmm/atkinson_boore_2006.py | arkottke/pygmm | e3b37ea3f81ffb847e040292c6731e36627ac9aa | [
"MIT"
] | 15 | 2016-03-29T05:01:09.000Z | 2022-02-09T08:26:07.000Z | pygmm/atkinson_boore_2006.py | arkottke/pygmm | e3b37ea3f81ffb847e040292c6731e36627ac9aa | [
"MIT"
] | 3 | 2016-03-29T05:12:28.000Z | 2017-08-18T21:49:34.000Z | pygmm/atkinson_boore_2006.py | arkottke/pygmm | e3b37ea3f81ffb847e040292c6731e36627ac9aa | [
"MIT"
] | 13 | 2016-03-29T05:18:39.000Z | 2022-01-26T03:37:08.000Z | # -*- coding: utf-8 -*-
"""Atkinson and Boore (2006, :cite:`atkinson06`) model."""
import numpy as np
from . import model
__author__ = "Albert Kottke"
class AtkinsonBoore2006(model.GroundMotionModel):
"""Atkinson and Boore (2006, :cite:`atkinson06`) model.
Developed for the Eastern North America with a reference velocity of 760
or 2000 m/s.
Parameters
----------
scenario : :class:`pygmm.model.Scenario`
earthquake scenario
"""
NAME = "Atkinson and Boore (2006)"
ABBREV = "AB06"
# Load the coefficients for the model
COEFF = dict(
bc=model.load_data_file("atkinson_boore_2006-bc.csv", 2),
rock=model.load_data_file("atkinson_boore_2006-rock.csv", 2),
)
PERIODS = COEFF["bc"]["period"]
COEFF_SITE = model.load_data_file("atkinson_boore_2006-site.csv", 2)
COEFF_SF = model.load_data_file("atkinson_boore_2006-sf.csv", 2)
INDEX_PGD = 0
INDEX_PGV = 1
INDEX_PGA = 2
INDICES_PSA = np.arange(3, 27)
PARAMS = [
model.NumericParameter("mag", True),
model.NumericParameter("dist_rup", True),
model.NumericParameter("v_s30", True),
]
def __init__(self, scenario: model.Scenario):
"""Initialize the model."""
super(AtkinsonBoore2006, self).__init__(scenario)
self._ln_resp = self._calc_ln_resp()
self._ln_std = self._calc_ln_std()
def _calc_ln_resp(self) -> np.ndarray:
"""Calculate the natural logarithm of the response.
Returns
-------
ln_resp : class:`np.array`:
natural log of the response
"""
s = self._scenario
c = self.COEFF["bc"] if s.v_s30 else self.COEFF["rock"]
# Compute the response at the reference condition
r0 = 10.0
r1 = 70.0
r2 = 140.0
f0 = np.maximum(np.log10(r0 / s.dist_rup), 0)
f1 = np.minimum(np.log10(s.dist_rup), np.log10(r1))
f2 = np.maximum(np.log10(s.dist_rup / r2), 0)
# Compute the log10 PSA in units of cm/sec/sec
log10_resp = (
c.c_1
+ c.c_2 * s.mag
+ c.c_3 * s.mag ** 2
+ (c.c_4 + c.c_5 * s.mag) * f1
+ (c.c_6 + c.c_7 * s.mag) * f2
+ (c.c_8 + c.c_9 * s.mag) * f0
+ c.c_10 * s.dist_rup
)
# Apply stress drop correction
log10_resp += self._calc_stress_factor()
if s.v_s30:
# Compute the site amplification
pga_bc = 10 ** log10_resp[self.INDEX_PGA]
log10_site = self._calc_log10_site(pga_bc)
log10_resp += log10_site
# Convert from cm/sec/sec to gravity
log10_resp -= np.log10(980.665)
ln_resp = np.log(10 ** log10_resp)
return ln_resp
def _calc_ln_std(self) -> np.ndarray:
"""Calculate the logarithmic standard deviation.
Returns
-------
ln_std : class:`np.array`:
natural log standard deviation
"""
ln_std = np.ones_like(self.PERIODS) * 0.30
return ln_std
def _calc_stress_factor(self) -> float:
"""Calculate the stress correction factor proposed by Atkinson and
Boore (2011) :cite:`atkinson11`.
Returns
-------
log10_stress_factor : class:`np.array`:
log base 10 of the stress factor
"""
s = self._scenario
c = self.COEFF_SF
stress_drop = 10.0 ** (3.45 - 0.2 * s.mag)
v1 = c.delta + 0.05
v2 = 0.05 + c.delta * np.maximum(s.mag - c.m_1, 0) / (c.m_h - c.m_1)
log10_stress_factor = np.minimum(2.0, stress_drop / 140.0) * np.minimum(v1, v2)
return np.interp(self.PERIODS, c.period, log10_stress_factor)
def _calc_log10_site(self, pga_bc: float) -> np.ndarray:
"""Calculate the log10 of the site amplification.
Parameters
----------
pga_bc : float
peak ground acceleration (PGA, g) at the B/C boundary.
Returns
-------
log_10_site : :class:`np.ndarray`
log base 10 of the site amplification.
"""
s = self._scenario
c = self.COEFF_SITE
VS_1 = 180.0
VS_2 = 300.0
VS_REF = 760.0
if s.v_s30 <= VS_1:
b_nl = c.b_1
elif VS_1 < s.v_s30 <= VS_2:
b_nl = (c.b_1 - c.b_2) * np.log(s.v_s30 / VS_2) / np.log(VS_1 / VS_2)
elif VS_2 < s.v_s30 <= VS_REF:
b_nl = c.b_2 * np.log(s.v_s30 / VS_REF) / np.log(VS_2 / VS_REF)
else:
# Vs30 > VS_REF
b_nl = 0
pga_bc = max(pga_bc, 60.0)
log10_site = np.log10(
np.exp(c.b_lin * np.log(s.v_s30 / VS_REF) + b_nl * np.log(pga_bc / 100.0))
)
return np.interp(self.PERIODS, c.period, log10_site)
| 28.639053 | 87 | 0.559711 |
795972205e261e2484f1a20c5f9feefd5669d7da | 415 | py | Python | easy/pascal triangle/solution.py | ilya-sokolov/leetcode | ad421111d0d7c5ec5245f33552e94a373b6fd426 | [
"MIT"
] | 4 | 2021-06-03T22:19:13.000Z | 2021-10-05T18:14:12.000Z | easy/pascal triangle/solution.py | ilya-sokolov/leetcode | ad421111d0d7c5ec5245f33552e94a373b6fd426 | [
"MIT"
] | null | null | null | easy/pascal triangle/solution.py | ilya-sokolov/leetcode | ad421111d0d7c5ec5245f33552e94a373b6fd426 | [
"MIT"
] | null | null | null | from typing import List
class Solution:
def generate(self, numRows: int) -> List[List[int]]:
row = [1]
triangle = []
for i in range(numRows):
triangle.append(row)
row = [sum(x) for x in zip([0] + row, row + [0])]
return triangle
s = Solution()
print(s.generate(1))
print(s.generate(5))
print(s.generate(7))
print(s.generate(30))
print(s.generate(10))
| 20.75 | 61 | 0.578313 |
795972c0553573adf71c1976091de08abede2afe | 1,703 | py | Python | server_main.py | TAViT2022/TAViT | 6ea42150c57af0e0618675565440df85121cf50a | [
"Apache-2.0"
] | null | null | null | server_main.py | TAViT2022/TAViT | 6ea42150c57af0e0618675565440df85121cf50a | [
"Apache-2.0"
] | null | null | null | server_main.py | TAViT2022/TAViT | 6ea42150c57af0e0618675565440df85121cf50a | [
"Apache-2.0"
] | null | null | null | import argparse
import torch
from omegaconf import OmegaConf
import src.server.app as app
from src.server.client_manager import TAViTClientManager
from src.server.tavit_server import TAViTServer
from src.server.strategy import CustomStrategy, FedAvg, Strategy
from src.server.torch_model.transformer.transformer import cnnTransformer
def main():
parser = argparse.ArgumentParser(description='')
parser.add_argument('--config_path', type=str, default='src/server/config/server_config.yaml')
parser.add_argument('--server_address', type=str, default='localhost:8080')
parser.add_argument('--gpu', type=str, default=0)
args = parser.parse_args()
DEVICE = torch.device("cuda:{}".format(args.gpu) if (torch.cuda.is_available() and args.gpu != 'None') else "cpu")
config = OmegaConf.load(args.config_path)
client_manager = TAViTClientManager()
strategy = CustomStrategy(min_available_clients=config.server_config.num_clients,
min_fit_clients=config.server_config.num_clients,
fraction_fit=1.0)
body_model = cnnTransformer(**config.model_config.body_config)
tavit_server = TAViTServer(body=body_model,
client_manager=client_manager,
strategy=strategy,
device=DEVICE,
config=config.server_config,
)
app.start_tavit_server(server_address=args.server_address,
server=tavit_server,
config={"num_cycles":config.server_config.num_cycles})
if __name__=='__main__':
main()
| 41.536585 | 118 | 0.652378 |
795972d482012f59b88577ff3d5c0c95f50e14b7 | 6,586 | py | Python | test/sampleData/micropython/BMP180.py | chloeyutianyi/cyanobyte | e6462b892d22d1cb51ab460ad98b509ba905f39a | [
"Apache-2.0"
] | null | null | null | test/sampleData/micropython/BMP180.py | chloeyutianyi/cyanobyte | e6462b892d22d1cb51ab460ad98b509ba905f39a | [
"Apache-2.0"
] | null | null | null | test/sampleData/micropython/BMP180.py | chloeyutianyi/cyanobyte | e6462b892d22d1cb51ab460ad98b509ba905f39a | [
"Apache-2.0"
] | null | null | null | # Copyright (C) 2020 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Auto-generated file for BMP180 v0.1.0.
# Generated from peripherals/BMP180.yaml using Cyanobyte Codegen v0.1.0
"""
Class for BMP180
"""
import math
from machine import I2C
class BMP180:
"""
Bosch Digital Temperature / Pressure Sensor
"""
device_address = 119
REGISTER_CONTROL = 244
REGISTER_PRESSURECALAC1 = 170
REGISTER_PRESSURECALAC2 = 172
REGISTER_PRESSURECALVB1 = 182
REGISTER_PRESSURECALVB2 = 184
REGISTER_RESULT = 246
REGISTER_TEMPCAL3 = 174
REGISTER_TEMPCAL4 = 176
REGISTER_TEMPCAL5 = 178
REGISTER_TEMPCAL6 = 180
REGISTER_TEMPCALMC = 188
REGISTER_TEMPCALMD = 190
def __init__(self, i2c):
# Initialize connection to peripheral
self.i2c = i2c
def set_control(self, data):
"""
Register stores what the measurement type should be
"""
buffer = []
buffer[0] = (data >> 0) & 0xFF
self.i2c.writeto_mem(
self.device_address,
self.REGISTER_CONTROL,
buffer,
addrsize=8
)
def get_pressurecalac1(self):
"""
For calibration
"""
byte_list = self.i2c.readfrom_mem(
self.device_address,
self.REGISTER_PRESSURECALAC1,
2,
addrsize=16
)
val = 0
val = val << 8 | byte_list[0]
val = val << 8 | byte_list[1]
return val
def get_pressurecalac2(self):
"""
For calibration
"""
byte_list = self.i2c.readfrom_mem(
self.device_address,
self.REGISTER_PRESSURECALAC2,
2,
addrsize=16
)
val = 0
val = val << 8 | byte_list[0]
val = val << 8 | byte_list[1]
return val
def get_pressurecalvb1(self):
"""
For calibration
"""
byte_list = self.i2c.readfrom_mem(
self.device_address,
self.REGISTER_PRESSURECALVB1,
2,
addrsize=16
)
val = 0
val = val << 8 | byte_list[0]
val = val << 8 | byte_list[1]
return val
def get_pressurecalvb2(self):
"""
For calibration
"""
byte_list = self.i2c.readfrom_mem(
self.device_address,
self.REGISTER_PRESSURECALVB2,
2,
addrsize=16
)
val = 0
val = val << 8 | byte_list[0]
val = val << 8 | byte_list[1]
return val
def get_result(self):
"""
Register stores most recent measurement result
"""
byte_list = self.i2c.readfrom_mem(
self.device_address,
self.REGISTER_RESULT,
2,
addrsize=16
)
val = 0
val = val << 8 | byte_list[0]
val = val << 8 | byte_list[1]
return val
def get_tempcal3(self):
"""
For calibration
"""
byte_list = self.i2c.readfrom_mem(
self.device_address,
self.REGISTER_TEMPCAL3,
2,
addrsize=16
)
val = 0
val = val << 8 | byte_list[0]
val = val << 8 | byte_list[1]
return val
def get_tempcal4(self):
"""
For calibration
"""
byte_list = self.i2c.readfrom_mem(
self.device_address,
self.REGISTER_TEMPCAL4,
2,
addrsize=16
)
val = 0
val = val << 8 | byte_list[0]
val = val << 8 | byte_list[1]
return val
def get_tempcal5(self):
"""
For calibration
"""
byte_list = self.i2c.readfrom_mem(
self.device_address,
self.REGISTER_TEMPCAL5,
2,
addrsize=16
)
val = 0
val = val << 8 | byte_list[0]
val = val << 8 | byte_list[1]
return val
def get_tempcal6(self):
"""
For calibration
"""
byte_list = self.i2c.readfrom_mem(
self.device_address,
self.REGISTER_TEMPCAL6,
2,
addrsize=16
)
val = 0
val = val << 8 | byte_list[0]
val = val << 8 | byte_list[1]
return val
def get_tempcalmc(self):
"""
For calibration
"""
byte_list = self.i2c.readfrom_mem(
self.device_address,
self.REGISTER_TEMPCALMC,
2,
addrsize=16
)
val = 0
val = val << 8 | byte_list[0]
val = val << 8 | byte_list[1]
return val
def get_tempcalmd(self):
"""
For calibration
"""
byte_list = self.i2c.readfrom_mem(
self.device_address,
self.REGISTER_TEMPCALMD,
2,
addrsize=16
)
val = 0
val = val << 8 | byte_list[0]
val = val << 8 | byte_list[1]
return val
def temperature_ascelsius(self):
"""
Reads the temperature
"""
raw_comp = None # Variable declaration
raw_mc = None # Variable declaration
raw_md = None # Variable declaration
temperature = None # Variable declaration
var_ac5 = None # Variable declaration
var_ac6 = None # Variable declaration
var_c5 = None # Variable declaration
var_mc = None # Variable declaration
var_md = None # Variable declaration
self.set_control(46)
temperature = self.get_result()
var_ac5 = self.get_tempcal5()
var_ac6 = self.get_tempcal6()
var_c5 = ((math.pow(2, -15)/160)*var_ac5)
raw_comp = (var_c5*(temperature-var_ac6))
raw_mc = self.get_tempcalmc()
var_mc = ((math.pow(2, 11)/math.pow(160, 2))*raw_mc)
raw_md = self.get_tempcalmd()
var_md = (raw_md/160)
temperature = (raw_comp+(var_mc/(raw_comp+var_md)))
return temperature
| 24.392593 | 74 | 0.538263 |
795972dfed129112894f663be442bffa99afff99 | 16,683 | py | Python | salt/utils/win_runas.py | silviud/salt | 9d35ea66428a030d00ef1a2f3b93ecfe90be023c | [
"Apache-2.0"
] | 1 | 2018-09-19T22:42:54.000Z | 2018-09-19T22:42:54.000Z | salt/utils/win_runas.py | silviud/salt | 9d35ea66428a030d00ef1a2f3b93ecfe90be023c | [
"Apache-2.0"
] | null | null | null | salt/utils/win_runas.py | silviud/salt | 9d35ea66428a030d00ef1a2f3b93ecfe90be023c | [
"Apache-2.0"
] | 1 | 2019-07-23T13:42:23.000Z | 2019-07-23T13:42:23.000Z | # -*- coding: utf-8 -*-
'''
Implements the ability to run processes as another user in Windows for salt
'''
from __future__ import absolute_import
# Import Python Libraries
import os
import logging
# Import Third Party Libs
try:
import win32con
import win32api
import win32process
import win32security
import win32pipe
import win32event
import win32profile
import msvcrt
import ctypes
import winerror
from ctypes import wintypes
HAS_WIN32 = True
except ImportError:
HAS_WIN32 = False
# Import Salt Libs
import salt.utils
# Set up logging
log = logging.getLogger(__name__)
def __virtual__():
'''
Load only on Windows
'''
if salt.utils.is_windows() and HAS_WIN32:
return 'win_runas'
return False
if HAS_WIN32:
# ctypes definitions
kernel32 = ctypes.WinDLL('kernel32')
advapi32 = ctypes.WinDLL('advapi32')
INVALID_HANDLE_VALUE = wintypes.HANDLE(-1).value
INVALID_DWORD_VALUE = wintypes.DWORD(-1).value # ~WinAPI
INFINITE = INVALID_DWORD_VALUE
LOGON_WITH_PROFILE = 0x00000001
STD_INPUT_HANDLE = wintypes.DWORD(-10).value
STD_OUTPUT_HANDLE = wintypes.DWORD(-11).value
STD_ERROR_HANDLE = wintypes.DWORD(-12).value
class SECURITY_ATTRIBUTES(ctypes.Structure):
_fields_ = (('nLength', wintypes.DWORD),
('lpSecurityDescriptor', wintypes.LPVOID),
('bInheritHandle', wintypes.BOOL))
def __init__(self, **kwds):
self.nLength = ctypes.sizeof(self)
super(SECURITY_ATTRIBUTES, self).__init__(**kwds)
LPSECURITY_ATTRIBUTES = ctypes.POINTER(SECURITY_ATTRIBUTES)
LPBYTE = ctypes.POINTER(wintypes.BYTE)
LPHANDLE = PHANDLE = ctypes.POINTER(ctypes.c_void_p)
LPDWORD = ctypes.POINTER(ctypes.c_ulong)
class STARTUPINFO(ctypes.Structure):
"""https://msdn.microsoft.com/en-us/library/ms686331"""
_fields_ = (('cb', wintypes.DWORD),
('lpReserved', wintypes.LPWSTR),
('lpDesktop', wintypes.LPWSTR),
('lpTitle', wintypes.LPWSTR),
('dwX', wintypes.DWORD),
('dwY', wintypes.DWORD),
('dwXSize', wintypes.DWORD),
('dwYSize', wintypes.DWORD),
('dwXCountChars', wintypes.DWORD),
('dwYCountChars', wintypes.DWORD),
('dwFillAttribute', wintypes.DWORD),
('dwFlags', wintypes.DWORD),
('wShowWindow', wintypes.WORD),
('cbReserved2', wintypes.WORD),
('lpReserved2', LPBYTE),
('hStdInput', wintypes.HANDLE),
('hStdOutput', wintypes.HANDLE),
('hStdError', wintypes.HANDLE))
def __init__(self, **kwds):
self.cb = ctypes.sizeof(self)
super(STARTUPINFO, self).__init__(**kwds)
if HAS_WIN32:
LPSTARTUPINFO = ctypes.POINTER(STARTUPINFO)
class PROC_THREAD_ATTRIBUTE_LIST(ctypes.Structure):
pass
PPROC_THREAD_ATTRIBUTE_LIST = ctypes.POINTER(PROC_THREAD_ATTRIBUTE_LIST)
class STARTUPINFOEX(STARTUPINFO):
_fields_ = (('lpAttributeList', PPROC_THREAD_ATTRIBUTE_LIST),)
LPSTARTUPINFOEX = ctypes.POINTER(STARTUPINFOEX)
class PROCESS_INFORMATION(ctypes.Structure):
"""https://msdn.microsoft.com/en-us/library/ms684873"""
_fields_ = (('hProcess', wintypes.HANDLE),
('hThread', wintypes.HANDLE),
('dwProcessId', wintypes.DWORD),
('dwThreadId', wintypes.DWORD))
LPPROCESS_INFORMATION = ctypes.POINTER(PROCESS_INFORMATION)
class HANDLE_IHV(wintypes.HANDLE):
pass
def errcheck_ihv(result, func, args):
if result.value == INVALID_HANDLE_VALUE:
raise ctypes.WinError()
return result.value
class DWORD_IDV(wintypes.DWORD):
pass
def errcheck_idv(result, func, args):
if result.value == INVALID_DWORD_VALUE:
raise ctypes.WinError()
return result.value
def errcheck_bool(result, func, args):
if not result:
raise ctypes.WinError()
return args
def _win(func, restype, *argtypes):
func.restype = restype
func.argtypes = argtypes
if issubclass(restype, HANDLE_IHV):
func.errcheck = errcheck_ihv
elif issubclass(restype, DWORD_IDV):
func.errcheck = errcheck_idv
else:
func.errcheck = errcheck_bool
# https://msdn.microsoft.com/en-us/library/ms687032
_win(kernel32.WaitForSingleObject, DWORD_IDV,
wintypes.HANDLE, # _In_ hHandle
wintypes.DWORD) # _In_ dwMilliseconds
# https://msdn.microsoft.com/en-us/library/ms683231
_win(kernel32.GetStdHandle, HANDLE_IHV,
wintypes.DWORD) # _In_ nStdHandle
# https://msdn.microsoft.com/en-us/library/ms724211
_win(kernel32.CloseHandle, wintypes.BOOL,
wintypes.HANDLE) # _In_ hObject
# https://msdn.microsoft.com/en-us/library/ms724935
_win(kernel32.SetHandleInformation, wintypes.BOOL,
wintypes.HANDLE, # _In_ hObject
wintypes.DWORD, # _In_ dwMask
wintypes.DWORD) # _In_ dwFlags
# https://msdn.microsoft.com/en-us/library/ms724251
_win(kernel32.DuplicateHandle, wintypes.BOOL,
wintypes.HANDLE, # _In_ hSourceProcessHandle,
wintypes.HANDLE, # _In_ hSourceHandle,
wintypes.HANDLE, # _In_ hTargetProcessHandle,
LPHANDLE, # _Out_ lpTargetHandle,
wintypes.DWORD, # _In_ dwDesiredAccess,
wintypes.BOOL, # _In_ bInheritHandle,
wintypes.DWORD) # _In_ dwOptions
# https://msdn.microsoft.com/en-us/library/ms683179
_win(kernel32.GetCurrentProcess, wintypes.HANDLE)
# https://msdn.microsoft.com/en-us/library/ms683189
_win(kernel32.GetExitCodeProcess, wintypes.BOOL,
wintypes.HANDLE, # _In_ hProcess,
LPDWORD) # _Out_ lpExitCode
# https://msdn.microsoft.com/en-us/library/aa365152
_win(kernel32.CreatePipe, wintypes.BOOL,
PHANDLE, # _Out_ hReadPipe,
PHANDLE, # _Out_ hWritePipe,
LPSECURITY_ATTRIBUTES, # _In_opt_ lpPipeAttributes,
wintypes.DWORD) # _In_ nSize
# https://msdn.microsoft.com/en-us/library/ms682431
_win(advapi32.CreateProcessWithLogonW, wintypes.BOOL,
wintypes.LPCWSTR, # _In_ lpUsername
wintypes.LPCWSTR, # _In_opt_ lpDomain
wintypes.LPCWSTR, # _In_ lpPassword
wintypes.DWORD, # _In_ dwLogonFlags
wintypes.LPCWSTR, # _In_opt_ lpApplicationName
wintypes.LPWSTR, # _Inout_opt_ lpCommandLine
wintypes.DWORD, # _In_ dwCreationFlags
wintypes.LPCWSTR, # _In_opt_ lpEnvironment
wintypes.LPCWSTR, # _In_opt_ lpCurrentDirectory
LPSTARTUPINFO, # _In_ lpStartupInfo
LPPROCESS_INFORMATION) # _Out_ lpProcessInformation
# High-level wrappers
def DuplicateHandle(hsrc=kernel32.GetCurrentProcess(),
srchandle=kernel32.GetCurrentProcess(),
htgt=kernel32.GetCurrentProcess(),
access=0, inherit=False,
options=win32con.DUPLICATE_SAME_ACCESS):
tgthandle = wintypes.HANDLE()
kernel32.DuplicateHandle(hsrc, srchandle,
htgt, ctypes.byref(tgthandle),
access, inherit, options)
return tgthandle.value
def CreatePipe(inherit_read=False, inherit_write=False):
read, write = wintypes.HANDLE(), wintypes.HANDLE()
kernel32.CreatePipe(ctypes.byref(read), ctypes.byref(write), None, 0)
if inherit_read:
kernel32.SetHandleInformation(read, win32con.HANDLE_FLAG_INHERIT,
win32con.HANDLE_FLAG_INHERIT)
if inherit_write:
kernel32.SetHandleInformation(write, win32con.HANDLE_FLAG_INHERIT,
win32con.HANDLE_FLAG_INHERIT)
return read.value, write.value
def CreateProcessWithLogonW(username=None,
domain=None,
password=None,
logonflags=0,
applicationname=None,
commandline=None,
creationflags=0,
environment=None,
currentdirectory=None,
startupinfo=None):
creationflags |= win32con.CREATE_UNICODE_ENVIRONMENT
if commandline is not None:
commandline = ctypes.create_unicode_buffer(commandline)
if startupinfo is None:
startupinfo = STARTUPINFO()
process_info = PROCESS_INFORMATION()
advapi32.CreateProcessWithLogonW(username,
domain,
password,
logonflags,
applicationname,
commandline,
creationflags,
environment,
currentdirectory,
ctypes.byref(startupinfo),
ctypes.byref(process_info))
return process_info
def make_inheritable(token):
return win32api.DuplicateHandle(win32api.GetCurrentProcess(),
token,
win32api.GetCurrentProcess(),
0,
1,
win32con.DUPLICATE_SAME_ACCESS)
def runas_system(cmd, username, password):
# This only works as system, when salt is running as a service for example
# Check for a domain
domain = '.'
if '@' in username:
username, domain = username.split('@')
if '\\' in username:
domain, username = username.split('\\')
# Get User Token
token = win32security.LogonUser(username,
domain,
password,
win32con.LOGON32_LOGON_INTERACTIVE,
win32con.LOGON32_PROVIDER_DEFAULT)
try:
# Get Unrestricted Token (UAC) if this is an Admin Account
elevated_token = win32security.GetTokenInformation(
token, win32security.TokenLinkedToken)
# Get list of privileges this token contains
privileges = win32security.GetTokenInformation(
elevated_token, win32security.TokenPrivileges)
# Create a set of all privileges to be enabled
enable_privs = set()
for luid, flags in privileges:
enable_privs.add((luid, win32con.SE_PRIVILEGE_ENABLED))
# Enable the privileges
win32security.AdjustTokenPrivileges(elevated_token, 0, enable_privs)
except win32security.error as exc:
# User doesn't have admin, use existing token
if exc[0] == winerror.ERROR_NO_SUCH_LOGON_SESSION \
or exc[0] == winerror.ERROR_PRIVILEGE_NOT_HELD:
elevated_token = token
else:
raise
# Get Security Attributes
security_attributes = win32security.SECURITY_ATTRIBUTES()
security_attributes.bInheritHandle = 1
# Create a pipe to set as stdout in the child. The write handle needs to be
# inheritable.
stdin_read, stdin_write = win32pipe.CreatePipe(security_attributes, 0)
stdin_read = make_inheritable(stdin_read)
stdout_read, stdout_write = win32pipe.CreatePipe(security_attributes, 0)
stdout_write = make_inheritable(stdout_write)
stderr_read, stderr_write = win32pipe.CreatePipe(security_attributes, 0)
stderr_write = make_inheritable(stderr_write)
# Get startup info structure
startup_info = win32process.STARTUPINFO()
startup_info.dwFlags = win32con.STARTF_USESTDHANDLES
startup_info.hStdInput = stdin_read
startup_info.hStdOutput = stdout_write
startup_info.hStdError = stderr_write
# Get User Environment
user_environment = win32profile.CreateEnvironmentBlock(token, False)
# Build command
cmd = 'cmd /c {0}'.format(cmd)
# Run command and return process info structure
procArgs = (None,
cmd,
security_attributes,
security_attributes,
1,
0,
user_environment,
None,
startup_info)
hProcess, hThread, PId, TId = \
win32process.CreateProcessAsUser(elevated_token, *procArgs)
if stdin_read is not None:
stdin_read.Close()
if stdout_write is not None:
stdout_write.Close()
if stderr_write is not None:
stderr_write.Close()
hThread.Close()
# Initialize ret and set first element
ret = {'pid': PId}
# Get Standard Out
fd_out = msvcrt.open_osfhandle(stdout_read, os.O_RDONLY | os.O_TEXT)
with os.fdopen(fd_out, 'r') as f_out:
ret['stdout'] = f_out.read()
# Get Standard Error
fd_err = msvcrt.open_osfhandle(stderr_read, os.O_RDONLY | os.O_TEXT)
with os.fdopen(fd_err, 'r') as f_err:
ret['stderr'] = f_err.read()
# Get Return Code
if win32event.WaitForSingleObject(hProcess, win32event.INFINITE) == win32con.WAIT_OBJECT_0:
exitcode = win32process.GetExitCodeProcess(hProcess)
ret['retcode'] = exitcode
# Close handle to process
win32api.CloseHandle(hProcess)
return ret
def runas(cmd, username, password, cwd=None):
# This only works when not running under the system account
# Debug mode for example
if win32api.GetUserName() == 'SYSTEM':
return runas_system(cmd, username, password)
# Create a pipe to set as stdout in the child. The write handle needs to be
# inheritable.
c2pread, c2pwrite = CreatePipe(inherit_read=False, inherit_write=True)
errread, errwrite = CreatePipe(inherit_read=False, inherit_write=True)
# Create inheritable copy of the stdin
stdin = kernel32.GetStdHandle(STD_INPUT_HANDLE)
dupin = DuplicateHandle(srchandle=stdin, inherit=True)
# Get startup info structure
startup_info = STARTUPINFO(dwFlags=win32con.STARTF_USESTDHANDLES,
hStdInput=dupin,
hStdOutput=c2pwrite,
hStdError=errwrite)
# Build command
cmd = 'cmd /c {0}'.format(cmd)
# Check for a domain
domain = None
if '@' in username:
username, domain = username.split('@')
if '\\' in username:
domain, username = username.split('\\')
# Run command and return process info structure
process_info = CreateProcessWithLogonW(username=username,
domain=domain,
password=password,
logonflags=LOGON_WITH_PROFILE,
commandline=cmd,
startupinfo=startup_info,
currentdirectory=cwd)
kernel32.CloseHandle(dupin)
kernel32.CloseHandle(c2pwrite)
kernel32.CloseHandle(errwrite)
kernel32.CloseHandle(process_info.hThread)
# Initialize ret and set first element
ret = {'pid': process_info.dwProcessId}
# Get Standard Out
fd_out = msvcrt.open_osfhandle(c2pread, os.O_RDONLY | os.O_TEXT)
with os.fdopen(fd_out, 'r') as f_out:
ret['stdout'] = f_out.read()
# Get Standard Error
fd_err = msvcrt.open_osfhandle(errread, os.O_RDONLY | os.O_TEXT)
with os.fdopen(fd_err, 'r') as f_err:
ret['stderr'] = f_err.read()
# Get Return Code
if kernel32.WaitForSingleObject(process_info.hProcess, INFINITE) == \
win32con.WAIT_OBJECT_0:
exitcode = wintypes.DWORD()
kernel32.GetExitCodeProcess(process_info.hProcess,
ctypes.byref(exitcode))
ret['retcode'] = exitcode.value
# Close handle to process
kernel32.CloseHandle(process_info.hProcess)
return ret
| 36.50547 | 95 | 0.598394 |
7959735b4e492ac6a9a6051f71b39c3d67384cb7 | 3,561 | py | Python | dpctl/tensor/_device.py | reazulhoque/dpctl | 27634efff7bcaf2096d3e236d9739e1a25e0d99e | [
"Apache-2.0"
] | null | null | null | dpctl/tensor/_device.py | reazulhoque/dpctl | 27634efff7bcaf2096d3e236d9739e1a25e0d99e | [
"Apache-2.0"
] | 6 | 2021-07-08T08:08:25.000Z | 2021-09-10T13:55:55.000Z | dpctl/tensor/_device.py | 1e-to/dpctl | 29c2cbc34a82f7007f8e170d9b2548ab3e2b48d4 | [
"Apache-2.0"
] | null | null | null | # Data Parallel Control (dpctl)
#
# Copyright 2020-2021 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import dpctl
class Device:
"""
Class representing Data-API concept of device.
This is a wrapper around :class:`dpctl.SyclQueue` with custom
formatting. The class does not have public constructor,
but a class method to construct it from device= keyword
in Array-API functions.
Instance can be queried for ``sycl_queue``, ``sycl_context``,
or ``sycl_device``.
"""
def __new__(cls, *args, **kwargs):
raise TypeError("No public constructor")
@classmethod
def create_device(cls, dev):
"""
Device.create_device(device)
Creates instance of Device from argument.
Args:
dev: None, :class:`.Device`, :class:`dpctl.SyclQueue`, or
a :class:`dpctl.SyclDevice` corresponding to a root SYCL
device.
Raises:
ValueError: if an instance of :class:`dpctl.SycDevice` corresponding
to a sub-device was specified as the argument
SyclQueueCreationError: if :class:`dpctl.SyclQueue` could not be
created from the argument
"""
obj = super().__new__(cls)
if isinstance(dev, Device):
obj.sycl_queue_ = dev.sycl_queue
elif isinstance(dev, dpctl.SyclQueue):
obj.sycl_queue_ = dev
elif isinstance(dev, dpctl.SyclDevice):
par = dev.parent_device
if par is None:
obj.sycl_queue_ = dpctl.SyclQueue(dev)
else:
raise ValueError(
"Using non-root device {} to specify offloading "
"target is ambiguous. Please use dpctl.SyclQueue "
"targeting this device".format(dev)
)
else:
if dev is None:
obj.sycl_queue_ = dpctl.SyclQueue()
else:
obj.sycl_queue_ = dpctl.SyclQueue(dev)
return obj
@property
def sycl_queue(self):
"""
:class:`dpctl.SyclQueue` used to offload to this :class:`.Device`.
"""
return self.sycl_queue_
@property
def sycl_context(self):
"""
:class:`dpctl.SyclContext` associated with this :class:`.Device`.
"""
return self.sycl_queue_.sycl_context
@property
def sycl_device(self):
"""
:class:`dpctl.SyclDevice` targed by this :class:`.Device`.
"""
return self.sycl_queue_.sycl_device
def __repr__(self):
try:
sd = self.sycl_device
except AttributeError:
raise ValueError(
"Instance of {} is not initialized".format(self.__class__)
)
try:
fs = sd.filter_string
return "Device({})".format(fs)
except TypeError:
# This is a sub-device
return repr(self.sycl_queue)
| 32.972222 | 80 | 0.590284 |
795973b7ca99aafd206d9a8f79b933b27e6313d2 | 9,587 | py | Python | jishaku/features/python.py | Murilobeluco/DisnakeWavelinkBot | 51fb2931883f9f9c262de6028c339d10bbc761d7 | [
"MIT"
] | null | null | null | jishaku/features/python.py | Murilobeluco/DisnakeWavelinkBot | 51fb2931883f9f9c262de6028c339d10bbc761d7 | [
"MIT"
] | null | null | null | jishaku/features/python.py | Murilobeluco/DisnakeWavelinkBot | 51fb2931883f9f9c262de6028c339d10bbc761d7 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
jishaku.features.python
~~~~~~~~~~~~~~~~~~~~~~~~
The jishaku Python evaluation/execution commands.
:copyright: (c) 2021 Devon (Gorialis) R
:license: MIT, see LICENSE for more details.
"""
import io
import disnake
import mystbin
from disnake.ext import commands
from jishaku.codeblocks import codeblock_converter
from jishaku.exception_handling import ReplResponseReactor
from jishaku.features.baseclass import Feature
from jishaku.flags import Flags
from jishaku.functools import AsyncSender
from jishaku.paginators import PaginatorInterface, WrappedPaginator, use_file_check
from jishaku.repl import (
AsyncCodeExecutor,
Scope,
all_inspections,
disassemble,
get_var_dict_from_ctx,
)
class PythonFeature(Feature):
"""
Feature containing the Python-related commands
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._scope = Scope()
self.retain = Flags.RETAIN
self.last_result = None
self.myst_bin_client = mystbin.Client()
@property
def scope(self):
"""
Gets a scope for use in REPL.
If retention is on, this is the internal stored scope,
otherwise it is always a new Scope.
"""
if self.retain:
return self._scope
return Scope()
@Feature.Command(parent="jsk", name="retain")
async def jsk_retain(self, ctx: commands.Context, *, toggle: bool = None):
"""
Turn variable retention for REPL on or off.
Provide no argument for current status.
"""
if toggle is None:
if self.retain:
return await ctx.send("Variable retention is set to ON.")
return await ctx.send("Variable retention is set to OFF.")
if toggle:
if self.retain:
return await ctx.channel.send(
"Variable retention is already set to ON."
)
self.retain = True
self._scope = Scope()
return await ctx.channel.send(
"Variable retention is ON. Future REPL sessions will retain their scope."
)
if not self.retain:
return await ctx.channel.send("Variable retention is already set to OFF.")
self.retain = False
return await ctx.channel.send(
"Variable retention is OFF. Future REPL sessions will dispose their scope when done."
)
async def jsk_python_result_handling(self, ctx: commands.Context, result):
"""
Determines what is done with a result when it comes out of jsk py.
This allows you to override how this is done without having to rewrite the command itself.
What you return is what gets stored in the temporary _ variable.
"""
if isinstance(result, disnake.Message):
return await ctx.send(f"<Message <{result.jump_url}>>")
if isinstance(result, disnake.File):
return await ctx.send(file=result)
if isinstance(result, disnake.Embed):
return await ctx.send(embed=result)
if isinstance(result, PaginatorInterface):
return await result.send_to(ctx)
if not isinstance(result, str):
# repr all non-strings
result = repr(result)
# Eventually the below handling should probably be put somewhere else
if len(result) <= 2000:
if result.strip() == "":
result = "\u200b"
return await ctx.send(
result.replace(
self.bot.http.token, "You were trying to leak my token!. Dumbass."
) # For the users,
# who try to do stupid things with jsk.
)
if use_file_check(ctx, len(result)): # File "full content" preview limit
# disnake's desktop and web client now supports an interactive file content
# display for files encoded in UTF-8.
# Since this avoids escape issues and is more intuitive than pagination for
# long results, it will now be prioritized over PaginatorInterface if the
# resultant content is below the filesize threshold
paste = await self.myst_bin_client.post(result, syntax="python")
return await ctx.channel.send(
embed=disnake.Embed(
title="Python Result",
description=f"As the result was too long to display, I've posted it to mystbin.\n"
f"[Click here to view it]({paste.url})",
color=disnake.Colour.random(),
)
)
# inconsistency here, results get wrapped in codeblocks when they are too large
# but don't if they're not. probably not that bad, but noting for later review
paginator = WrappedPaginator(prefix="```py", suffix="```", max_size=1985)
paginator.add_line(result)
interface = PaginatorInterface(ctx.bot, paginator, owner=ctx.author)
return await interface.send_to(ctx)
@Feature.Command(parent="jsk", name="py", aliases=["python"])
async def jsk_python(self, ctx: commands.Context, *, argument: codeblock_converter):
"""
Direct evaluation of Python code.
"""
arg_dict = get_var_dict_from_ctx(ctx, Flags.SCOPE_PREFIX)
arg_dict["_"] = self.last_result
scope = self.scope
try:
async with ReplResponseReactor(ctx.message):
with self.submit(ctx):
executor = AsyncCodeExecutor(
argument.content, scope, arg_dict=arg_dict
)
async for send, result in AsyncSender(executor):
if result is None:
continue
self.last_result = result
send(await self.jsk_python_result_handling(ctx, result))
finally:
scope.clear_intersection(arg_dict)
@Feature.Command(
parent="jsk",
name="py_inspect",
aliases=["pyi", "python_inspect", "pythoninspect"],
)
async def jsk_python_inspect(
self, ctx: commands.Context, *, argument: codeblock_converter
): # pylint: disable=too-many-locals
"""
Evaluation of Python code with inspect information.
"""
arg_dict = get_var_dict_from_ctx(ctx, Flags.SCOPE_PREFIX)
arg_dict["_"] = self.last_result
scope = self.scope
try:
async with ReplResponseReactor(ctx.message):
with self.submit(ctx):
executor = AsyncCodeExecutor(
argument.content, scope, arg_dict=arg_dict
)
async for send, result in AsyncSender(executor):
self.last_result = result
header = (
repr(result)
.replace("``", "`\u200b`")
.replace(self.bot.http.token, "[token omitted]")
)
if len(header) > 485:
header = header[0:482] + "..."
lines = [f"=== {header} ===", ""]
for name, res in all_inspections(result):
lines.append(f"{name:16.16} :: {res}")
text = "\n".join(lines)
if use_file_check(
ctx, len(text)
): # File "full content" preview limit
send(
await ctx.send(
file=disnake.File(
filename="inspection.prolog",
fp=io.BytesIO(text.encode("utf-8")),
)
)
)
else:
paginator = WrappedPaginator(
prefix="```prolog", max_size=1985
)
paginator.add_line(text)
interface = PaginatorInterface(
ctx.bot, paginator, owner=ctx.author
)
send(await interface.send_to(ctx))
finally:
scope.clear_intersection(arg_dict)
@Feature.Command(parent="jsk", name="dis", aliases=["disassemble"])
async def jsk_disassemble(
self, ctx: commands.Context, *, argument: codeblock_converter
):
"""
Disassemble Python code into bytecode.
"""
arg_dict = get_var_dict_from_ctx(ctx, Flags.SCOPE_PREFIX)
async with ReplResponseReactor(ctx.message):
text = "\n".join(disassemble(argument.content, arg_dict=arg_dict))
if use_file_check(ctx, len(text)): # File "full content" preview limit
await ctx.send(
file=disnake.File(
filename="dis.py", fp=io.BytesIO(text.encode("utf-8"))
)
)
else:
paginator = WrappedPaginator(prefix="```py", max_size=1985)
paginator.add_line(text)
interface = PaginatorInterface(ctx.bot, paginator, owner=ctx.author)
await interface.send_to(ctx)
| 34.861818 | 102 | 0.543027 |
795973c13c39aee874cbc35fae7d3e51ed6a689c | 2,333 | py | Python | backend/core/models/configurations.py | google/co-op-4-all | 6bf68ea902da552e01c3647787f7212c541050e6 | [
"Apache-2.0"
] | 3 | 2022-01-28T18:30:56.000Z | 2022-03-30T17:39:05.000Z | backend/core/models/configurations.py | google/co-op-4-all | 6bf68ea902da552e01c3647787f7212c541050e6 | [
"Apache-2.0"
] | null | null | null | backend/core/models/configurations.py | google/co-op-4-all | 6bf68ea902da552e01c3647787f7212c541050e6 | [
"Apache-2.0"
] | 1 | 2022-02-21T12:49:01.000Z | 2022-02-21T12:49:01.000Z | # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from datetime import datetime, timezone
from typing import List, Optional, Union
from pydantic import BaseModel, conint, conlist, constr, validator
from core.models.destinations import Dv360Destination, GoogleAdsDestination
class Filter(BaseModel):
type: str
data: conlist(str, min_items=1)
class RetailerConfig(BaseModel):
name: constr(regex="^[A-Za-z0-9\_]{3,50}$")
bq_ga_table: constr(regex="^([A-Za-z0-9\-]{6,30})\.([A-Za-z0-9\_]{1,30})\.(events\_\*)$")
time_zone: constr(regex="^[A-Za-z\_\/]{3,25}$")
currency: constr(regex="^[A-Za-z]{3}$")
coop_max_backfill: conint(ge=30, le=180) = 90
is_active: bool = True
created_at: datetime = None
modified_at: datetime = None
bq_updated_at: datetime = None
@validator('created_at', pre=True, always=True)
def default_ts_created(cls, v):
return v or datetime.now(timezone.utc)
@validator('modified_at', pre=True, always=True)
def default_ts_updated(cls, v):
return datetime.now(timezone.utc)
class CoopCampaignConfig(BaseModel):
name: constr(regex="^[A-Za-z0-9\_]{3,50}$")
retailer_name: constr(regex="^[A-Za-z0-9\_]{3,50}$")
utm_campaigns: conlist(str, min_items=1)
filters: List[Filter]
destinations: Optional[List[Union[GoogleAdsDestination, Dv360Destination]]]
attribution_window: conint(ge=1, le=30) = 7
is_active: bool = True
created_at: datetime = None
modified_at: datetime = None
bq_updated_at: datetime = None
@validator('created_at', pre=True, always=True)
def default_ts_created(cls, v):
return v or datetime.now(timezone.utc)
@validator('modified_at', pre=True, always=True)
def default_ts_updated(cls, v):
return datetime.now(timezone.utc)
| 35.892308 | 93 | 0.704243 |
795974d0f50b851a49e32f4973d82086fb2a4476 | 2,848 | py | Python | es_rnn/data_loading.py | petercwill/temp | 5988f169744b50e138c9e764c93e6606f9906dd7 | [
"MIT"
] | null | null | null | es_rnn/data_loading.py | petercwill/temp | 5988f169744b50e138c9e764c93e6606f9906dd7 | [
"MIT"
] | null | null | null | es_rnn/data_loading.py | petercwill/temp | 5988f169744b50e138c9e764c93e6606f9906dd7 | [
"MIT"
] | null | null | null | import numpy as np
import torch
from torch.utils.data import Dataset
import pandas as pd
def read_file(file_location):
series = []
ids = []
with open(file_location, 'r') as file:
data = file.read().split("\n")
for i in range(1, len(data) - 1):
# for i in range(1, len(data)):
row = data[i].replace('"', '').split(',')
series.append(np.array([float(j) for j in row[1:] if j != ""]))
ids.append(row[0])
series = np.array(series)
return series
def create_val_set(train, output_size):
val = []
train = list(train)
for i in range(len(train)):
val.append(train[i][-output_size:])
train[i] = train[i][:-output_size]
return np.array(val)
def chop_series(train, chop_val):
# CREATE MASK FOR VALUES TO BE CHOPPED
train_len_mask = [True if len(i) >= chop_val else False for i in train]
# FILTER AND CHOP TRAIN
train = [train[i][-chop_val:] for i in range(len(train)) if train_len_mask[i]]
return train, train_len_mask
def create_datasets(train_file_location, test_file_location, output_size):
train = read_file(train_file_location)
test = read_file(test_file_location)
val = create_val_set(train, output_size)
return train, val, test
class SeriesDataset(Dataset):
def __init__(self, dataTrain, dataVal, dataTest, info, variable, chop_value, device):
dataTrain, mask = chop_series(dataTrain, chop_value)
self.dataInfoCatOHE = pd.get_dummies(info[info['SP'] == variable]['category'])
self.dataInfoCatHeaders = np.array([i for i in self.dataInfoCatOHE.columns.values])
self.dataInfoCat = torch.from_numpy(self.dataInfoCatOHE[mask].values).float()
self.dataTrain = [torch.tensor(dataTrain[i]) for i in range(len(dataTrain))] # ALREADY MASKED IN CHOP FUNCTION
self.dataVal = [torch.tensor(dataVal[i]) for i in range(len(dataVal)) if mask[i]]
self.dataTest = [torch.tensor(dataTest[i]) for i in range(len(dataTest)) if mask[i]]
self.device = device
def __len__(self):
return len(self.dataTrain)
def __getitem__(self, idx):
return self.dataTrain[idx].to(self.device), \
self.dataVal[idx].to(self.device), \
self.dataTest[idx].to(self.device), \
self.dataInfoCat[idx].to(self.device), \
idx
def collate_lines(seq_list):
train_, val_, test_, info_cat_, idx_ = zip(*seq_list)
train_lens = [len(seq) for seq in train_]
seq_order = sorted(range(len(train_lens)), key=train_lens.__getitem__, reverse=True)
train = [train_[i] for i in seq_order]
val = [val_[i] for i in seq_order]
test = [test_[i] for i in seq_order]
info_cat = [info_cat_[i] for i in seq_order]
idx = [idx_[i] for i in seq_order]
return train, val, test, info_cat, idx
| 34.731707 | 119 | 0.651334 |
795975da64fb4115ce1069ec2a42cf5c8ed05ee6 | 5,512 | py | Python | models/official/detection/executor/tpu_executor.py | DanielDimanov/tpu | 883065e163e4f7745a60aa726b426cdca35d38aa | [
"Apache-2.0"
] | 1 | 2019-08-09T10:23:53.000Z | 2019-08-09T10:23:53.000Z | models/official/detection/executor/tpu_executor.py | DanielDimanov/tpu | 883065e163e4f7745a60aa726b426cdca35d38aa | [
"Apache-2.0"
] | null | null | null | models/official/detection/executor/tpu_executor.py | DanielDimanov/tpu | 883065e163e4f7745a60aa726b426cdca35d38aa | [
"Apache-2.0"
] | 1 | 2019-08-09T10:26:24.000Z | 2019-08-09T10:26:24.000Z | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""An executor class for running model on TPUs."""
import collections
import os
import numpy as np
import tensorflow as tf
from evaluation import factory
def write_summary(logs, summary_writer, current_step):
"""Write out summaries of current training step for the checkpoint."""
with tf.Graph().as_default():
summaries = [tf.Summary.Value(tag=tag, simple_value=value)
for tag, value in logs.items()]
tf_summary = tf.Summary(value=summaries)
summary_writer.add_summary(tf_summary, current_step)
class TpuExecutor(object):
"""An executor class for running jobs on TPUs."""
def __init__(self, model_fn, params):
self._model_dir = params.model_dir
# Sets up evaluator.
self._evaluator = factory.evaluator_generator(params.eval)
input_partition_dims = None
num_cores_per_replica = None
if params.use_tpu:
tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver(
params.platform.tpu,
zone=params.platform.tpu_zone,
project=params.platform.gcp_project)
tpu_grpc_url = tpu_cluster_resolver.get_master()
tf.Session.reset(tpu_grpc_url)
# If the input image is transposed (from NHWC to HWCN), the partition
# dimensions also need to be transposed the same way.
def _maybe_transpose(input_partition_dims):
if input_partition_dims and params.train.transpose_input:
return [input_partition_dims[i] for i in [1, 2, 3, 0]]
else:
return input_partition_dims
if params.train.input_partition_dims is not None:
num_cores_per_replica = params.train.num_cores_per_replica
input_partition_dims = params.train.input_partition_dims
# Parse 'None' into None.
input_partition_dims = [
None if x == 'None' else _maybe_transpose(x)
for x in input_partition_dims
]
else:
tpu_cluster_resolver = None
# Sets up config for TPUEstimator.
tpu_config = tf.contrib.tpu.TPUConfig(
params.train.iterations_per_loop,
num_cores_per_replica=num_cores_per_replica,
input_partition_dims=input_partition_dims,
per_host_input_for_training=tf.contrib.tpu.InputPipelineConfig.PER_HOST_V2 # pylint: disable=line-too-long
)
run_config = tf.contrib.tpu.RunConfig(
cluster=tpu_cluster_resolver,
evaluation_master=params.platform.eval_master,
model_dir=params.model_dir,
log_step_count_steps=params.train.iterations_per_loop,
tpu_config=tpu_config,
)
self._estimator = tf.contrib.tpu.TPUEstimator(
model_fn=model_fn,
use_tpu=params.use_tpu,
train_batch_size=params.train.train_batch_size,
eval_batch_size=params.eval.eval_batch_size,
predict_batch_size=params.predict.predict_batch_size,
config=run_config,
params=params.as_dict())
def train(self, input_fn, steps):
"""Training the model with training data and labels in input_fn."""
self._estimator.train(input_fn=input_fn, max_steps=steps)
def evaluate(self, input_fn, eval_steps, checkpoint_path=None):
"""Evaluating the model with data and labels in input_fn.
Args:
input_fn: Eval `input function` for tf.Estimator.
eval_steps: Int - the number of steps to evaluate.
checkpoint_path: String - the checkpoint path to evaluate. If it is None,
the latest checkpoint will be inferred from `model_dir` of `Estimator`.
Returns:
A dictionary as evaluation metrics.
"""
if not checkpoint_path:
checkpoint_path = self._estimator.latest_checkpoint()
current_step = int(os.path.basename(checkpoint_path).split('-')[1])
predictor = self._estimator.predict(
input_fn=input_fn,
checkpoint_path=checkpoint_path,
yield_single_examples=False)
losses = collections.defaultdict(lambda: 0.0)
for _ in range(eval_steps):
outputs = predictor.next()
predictions = {}
groundtruths = {}
for key, val in outputs.items():
if key[0:5] == 'pred_':
predictions[key[5::]] = val
if key[0:3] == 'gt_':
groundtruths[key[3::]] = val
if key[0:5] == 'loss_':
losses[key[5::]] += (np.mean(val) / eval_steps)
self._evaluator.update(predictions, groundtruths)
metrics = self._evaluator.evaluate()
# Summary writer writes out eval metrics.
output_dir = os.path.join(self._model_dir, 'eval')
tf.gfile.MakeDirs(output_dir)
summary_writer = tf.summary.FileWriter(output_dir)
write_summary(metrics, summary_writer, current_step)
write_summary(losses, summary_writer, current_step)
summary_writer.close()
return metrics
def predict(self, input_fn):
return self._estimator.predict(input_fn=input_fn)
| 37.496599 | 115 | 0.693578 |
795976fe67067806b05e4f4eb0cc2668e0be0e47 | 5,477 | py | Python | trade/utils/timeseriessplit.py | khoppa/cryptotrade | ee932b1cda5578ddcad60caf930f46cf86b132b6 | [
"Apache-2.0"
] | 3 | 2018-01-04T00:46:12.000Z | 2022-01-01T21:41:16.000Z | trade/utils/timeseriessplit.py | khoppa/cryptotrade | ee932b1cda5578ddcad60caf930f46cf86b132b6 | [
"Apache-2.0"
] | null | null | null | trade/utils/timeseriessplit.py | khoppa/cryptotrade | ee932b1cda5578ddcad60caf930f46cf86b132b6 | [
"Apache-2.0"
] | 1 | 2022-01-01T21:39:15.000Z | 2022-01-01T21:39:15.000Z | from sklearn.model_selection import TimeSeriesSplit
from sklearn.utils import indexable
from sklearn.utils.validation import _num_samples
import numpy as np
class TimeSeriesSplitCustom(TimeSeriesSplit):
"""Time Series cross-validator
Provides train/test indices to split time series data samples
that are observed at fixed time intervals, in train/test sets.
In each split, test indices must be higher than before, and thus shuffling
in cross validator is inappropriate.
This cross-validation object is a variation of :class:`KFold`.
In the kth split, it returns first k folds as train set and the
(k+1)th fold as test set.
Note that unlike standard cross-validation methods, successive
training sets are supersets of those that come before them.
Read more in the :ref:`User Guide `.
Parameters
----------
n_splits : int, default=3
Number of splits. Must be at least 1.
Examples
--------
>>> from sklearn.model_selection import TimeSeriesSplit
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([1, 2, 3, 4])
>>> tscv = TimeSeriesSplit(n_splits=3)
>>> print(tscv) # doctest: +NORMALIZE_WHITESPACE
TimeSeriesSplit(n_splits=3)
>>> for train_index, test_index in tscv.split(X):
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [0] TEST: [1]
TRAIN: [0 1] TEST: [2]
TRAIN: [0 1 2] TEST: [3]
>>> for train_index, test_index in tscv.split(X, fixed_length=True):
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [0] TEST: [1]
TRAIN: [1] TEST: [2]
TRAIN: [2] TEST: [3]
>>> for train_index, test_index in tscv.split(X, fixed_length=True,
... train_splits=2):
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [0 1] TEST: [2]
TRAIN: [1 2] TEST: [3]
Notes
-----
When ``fixed_length`` is ``False``, the training set has size
``i * train_splits * n_samples // (n_splits + 1) + n_samples %
(n_splits + 1)`` in the ``i``th split, with a test set of size
``n_samples//(n_splits + 1) * test_splits``, where ``n_samples``
is the number of samples. If fixed_length is True, replace ``i``
in the above formulation with 1, and ignore ``n_samples %
(n_splits + 1)`` except for the first training set. The number
of test sets is ``n_splits + 2 - train_splits - test_splits``.
"""
def split(self, X, y=None, groups=None, fixed_length=False,
train_splits=1, test_splits=1):
"""Generate indices to split data into training and test set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape (n_samples,)
Always ignored, exists for compatibility.
groups : array-like, with shape (n_samples,), optional
Always ignored, exists for compatibility.
fixed_length : bool, hether training sets should always have
common length
train_splits : positive int, for the minimum number of
splits to include in training sets
test_splits : positive int, for the number of splits to
include in the test set
Returns
-------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
"""
X, y, groups = indexable(X, y, groups)
n_samples = _num_samples(X)
n_splits = self.n_splits
n_folds = n_splits + 1
train_splits, test_splits = int(train_splits), int(test_splits)
if n_folds > n_samples:
raise ValueError(
("Cannot have number of folds ={0} greater"
" than the number of samples: {1}.").format(n_folds,
n_samples))
if ((n_folds - train_splits - test_splits) < 0 and test_splits > 0):
raise ValueError(
("Both train_splits and test_splits must be positive"
" integers."))
indices = np.arange(n_samples)
split_size = (n_samples // n_folds)
test_size = split_size * test_splits
train_size = split_size * train_splits
test_starts = range(train_size + n_samples % n_folds,
n_samples - (test_size - split_size),
split_size)
if fixed_length:
for i, test_start in zip(range(len(test_starts)),
test_starts):
rem = 0
if i == 0:
rem = n_samples % n_folds
yield (indices[(test_start - train_size - rem):test_start],
indices[test_start:test_start + test_size])
else:
for test_start in test_starts:
yield (indices[:test_start],
indices[test_start:test_start + test_size])
| 44.893443 | 78 | 0.596494 |
7959794640b2628017f1cafd29a4d60766e1a50a | 30,711 | py | Python | nltk/corpus/reader/util.py | SamuraiT/nltk3-alpha | 18a1a0ff8697eaeeb5d3c0bc6dad251d5b8fe931 | [
"Apache-2.0"
] | 1 | 2018-08-09T14:30:50.000Z | 2018-08-09T14:30:50.000Z | nltk/corpus/reader/util.py | wangz10/nltk | 171b2cd0061955de0794db4fae5381e47bf450ae | [
"Apache-2.0"
] | null | null | null | nltk/corpus/reader/util.py | wangz10/nltk | 171b2cd0061955de0794db4fae5381e47bf450ae | [
"Apache-2.0"
] | 2 | 2019-10-28T01:33:22.000Z | 2019-10-30T06:43:43.000Z | # Natural Language Toolkit: Corpus Reader Utilities
#
# Copyright (C) 2001-2014 NLTK Project
# Author: Steven Bird <stevenbird1@gmail.com>
# Edward Loper <edloper@gmail.com>
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
import os
import bisect
import re
import tempfile
from functools import reduce
try:
import cPickle as pickle
except ImportError:
import pickle
# Use the c version of ElementTree, which is faster, if possible:
try: from xml.etree import cElementTree as ElementTree
except ImportError: from xml.etree import ElementTree
from nltk import compat
from nltk.tokenize import wordpunct_tokenize
from nltk.internals import slice_bounds
from nltk.data import PathPointer, FileSystemPathPointer, ZipFilePathPointer
from nltk.data import SeekableUnicodeStreamReader
from nltk.util import AbstractLazySequence, LazySubsequence, LazyConcatenation, py25
######################################################################
#{ Corpus View
######################################################################
class StreamBackedCorpusView(AbstractLazySequence):
"""
A 'view' of a corpus file, which acts like a sequence of tokens:
it can be accessed by index, iterated over, etc. However, the
tokens are only constructed as-needed -- the entire corpus is
never stored in memory at once.
The constructor to ``StreamBackedCorpusView`` takes two arguments:
a corpus fileid (specified as a string or as a ``PathPointer``);
and a block reader. A "block reader" is a function that reads
zero or more tokens from a stream, and returns them as a list. A
very simple example of a block reader is:
>>> def simple_block_reader(stream):
... return stream.readline().split()
This simple block reader reads a single line at a time, and
returns a single token (consisting of a string) for each
whitespace-separated substring on the line.
When deciding how to define the block reader for a given
corpus, careful consideration should be given to the size of
blocks handled by the block reader. Smaller block sizes will
increase the memory requirements of the corpus view's internal
data structures (by 2 integers per block). On the other hand,
larger block sizes may decrease performance for random access to
the corpus. (But note that larger block sizes will *not*
decrease performance for iteration.)
Internally, ``CorpusView`` maintains a partial mapping from token
index to file position, with one entry per block. When a token
with a given index *i* is requested, the ``CorpusView`` constructs
it as follows:
1. First, it searches the toknum/filepos mapping for the token
index closest to (but less than or equal to) *i*.
2. Then, starting at the file position corresponding to that
index, it reads one block at a time using the block reader
until it reaches the requested token.
The toknum/filepos mapping is created lazily: it is initially
empty, but every time a new block is read, the block's
initial token is added to the mapping. (Thus, the toknum/filepos
map has one entry per block.)
In order to increase efficiency for random access patterns that
have high degrees of locality, the corpus view may cache one or
more blocks.
:note: Each ``CorpusView`` object internally maintains an open file
object for its underlying corpus file. This file should be
automatically closed when the ``CorpusView`` is garbage collected,
but if you wish to close it manually, use the ``close()``
method. If you access a ``CorpusView``'s items after it has been
closed, the file object will be automatically re-opened.
:warning: If the contents of the file are modified during the
lifetime of the ``CorpusView``, then the ``CorpusView``'s behavior
is undefined.
:warning: If a unicode encoding is specified when constructing a
``CorpusView``, then the block reader may only call
``stream.seek()`` with offsets that have been returned by
``stream.tell()``; in particular, calling ``stream.seek()`` with
relative offsets, or with offsets based on string lengths, may
lead to incorrect behavior.
:ivar _block_reader: The function used to read
a single block from the underlying file stream.
:ivar _toknum: A list containing the token index of each block
that has been processed. In particular, ``_toknum[i]`` is the
token index of the first token in block ``i``. Together
with ``_filepos``, this forms a partial mapping between token
indices and file positions.
:ivar _filepos: A list containing the file position of each block
that has been processed. In particular, ``_toknum[i]`` is the
file position of the first character in block ``i``. Together
with ``_toknum``, this forms a partial mapping between token
indices and file positions.
:ivar _stream: The stream used to access the underlying corpus file.
:ivar _len: The total number of tokens in the corpus, if known;
or None, if the number of tokens is not yet known.
:ivar _eofpos: The character position of the last character in the
file. This is calculated when the corpus view is initialized,
and is used to decide when the end of file has been reached.
:ivar _cache: A cache of the most recently read block. It
is encoded as a tuple (start_toknum, end_toknum, tokens), where
start_toknum is the token index of the first token in the block;
end_toknum is the token index of the first token not in the
block; and tokens is a list of the tokens in the block.
"""
def __init__(self, fileid, block_reader=None, startpos=0,
encoding='utf8'):
"""
Create a new corpus view, based on the file ``fileid``, and
read with ``block_reader``. See the class documentation
for more information.
:param fileid: The path to the file that is read by this
corpus view. ``fileid`` can either be a string or a
``PathPointer``.
:param startpos: The file position at which the view will
start reading. This can be used to skip over preface
sections.
:param encoding: The unicode encoding that should be used to
read the file's contents. If no encoding is specified,
then the file's contents will be read as a non-unicode
string (i.e., a str).
"""
if block_reader:
self.read_block = block_reader
# Initialize our toknum/filepos mapping.
self._toknum = [0]
self._filepos = [startpos]
self._encoding = encoding
# We don't know our length (number of tokens) yet.
self._len = None
self._fileid = fileid
self._stream = None
self._current_toknum = None
"""This variable is set to the index of the next token that
will be read, immediately before ``self.read_block()`` is
called. This is provided for the benefit of the block
reader, which under rare circumstances may need to know
the current token number."""
self._current_blocknum = None
"""This variable is set to the index of the next block that
will be read, immediately before ``self.read_block()`` is
called. This is provided for the benefit of the block
reader, which under rare circumstances may need to know
the current block number."""
# Find the length of the file.
try:
if isinstance(self._fileid, PathPointer):
self._eofpos = self._fileid.file_size()
else:
self._eofpos = os.stat(self._fileid).st_size
except Exception as exc:
raise ValueError('Unable to open or access %r -- %s' %
(fileid, exc))
# Maintain a cache of the most recently read block, to
# increase efficiency of random access.
self._cache = (-1, -1, None)
fileid = property(lambda self: self._fileid, doc="""
The fileid of the file that is accessed by this view.
:type: str or PathPointer""")
def read_block(self, stream):
"""
Read a block from the input stream.
:return: a block of tokens from the input stream
:rtype: list(any)
:param stream: an input stream
:type stream: stream
"""
raise NotImplementedError('Abstract Method')
def _open(self):
"""
Open the file stream associated with this corpus view. This
will be called performed if any value is read from the view
while its file stream is closed.
"""
if isinstance(self._fileid, PathPointer):
self._stream = self._fileid.open(self._encoding)
elif self._encoding:
self._stream = SeekableUnicodeStreamReader(
open(self._fileid, 'rb'), self._encoding)
else:
self._stream = open(self._fileid, 'rb')
def close(self):
"""
Close the file stream associated with this corpus view. This
can be useful if you are worried about running out of file
handles (although the stream should automatically be closed
upon garbage collection of the corpus view). If the corpus
view is accessed after it is closed, it will be automatically
re-opened.
"""
if self._stream is not None:
self._stream.close()
self._stream = None
def __len__(self):
if self._len is None:
# iterate_from() sets self._len when it reaches the end
# of the file:
for tok in self.iterate_from(self._toknum[-1]): pass
return self._len
def __getitem__(self, i):
if isinstance(i, slice):
start, stop = slice_bounds(self, i)
# Check if it's in the cache.
offset = self._cache[0]
if offset <= start and stop <= self._cache[1]:
return self._cache[2][start-offset:stop-offset]
# Construct & return the result.
return LazySubsequence(self, start, stop)
else:
# Handle negative indices
if i < 0: i += len(self)
if i < 0: raise IndexError('index out of range')
# Check if it's in the cache.
offset = self._cache[0]
if offset <= i < self._cache[1]:
return self._cache[2][i-offset]
# Use iterate_from to extract it.
try:
return next(self.iterate_from(i))
except StopIteration:
raise IndexError('index out of range')
# If we wanted to be thread-safe, then this method would need to
# do some locking.
def iterate_from(self, start_tok):
# Start by feeding from the cache, if possible.
if self._cache[0] <= start_tok < self._cache[1]:
for tok in self._cache[2][start_tok-self._cache[0]:]:
yield tok
start_tok += 1
# Decide where in the file we should start. If `start` is in
# our mapping, then we can jump straight to the correct block;
# otherwise, start at the last block we've processed.
if start_tok < self._toknum[-1]:
block_index = bisect.bisect_right(self._toknum, start_tok)-1
toknum = self._toknum[block_index]
filepos = self._filepos[block_index]
else:
block_index = len(self._toknum)-1
toknum = self._toknum[-1]
filepos = self._filepos[-1]
# Open the stream, if it's not open already.
if self._stream is None:
self._open()
# Each iteration through this loop, we read a single block
# from the stream.
while filepos < self._eofpos:
# Read the next block.
self._stream.seek(filepos)
self._current_toknum = toknum
self._current_blocknum = block_index
tokens = self.read_block(self._stream)
assert isinstance(tokens, (tuple, list, AbstractLazySequence)), (
'block reader %s() should return list or tuple.' %
self.read_block.__name__)
num_toks = len(tokens)
new_filepos = self._stream.tell()
assert new_filepos > filepos, (
'block reader %s() should consume at least 1 byte (filepos=%d)' %
(self.read_block.__name__, filepos))
# Update our cache.
self._cache = (toknum, toknum+num_toks, list(tokens))
# Update our mapping.
assert toknum <= self._toknum[-1]
if num_toks > 0:
block_index += 1
if toknum == self._toknum[-1]:
assert new_filepos > self._filepos[-1] # monotonic!
self._filepos.append(new_filepos)
self._toknum.append(toknum+num_toks)
else:
# Check for consistency:
assert new_filepos == self._filepos[block_index], (
'inconsistent block reader (num chars read)')
assert toknum+num_toks == self._toknum[block_index], (
'inconsistent block reader (num tokens returned)')
# If we reached the end of the file, then update self._len
if new_filepos == self._eofpos:
self._len = toknum + num_toks
# Generate the tokens in this block (but skip any tokens
# before start_tok). Note that between yields, our state
# may be modified.
for tok in tokens[max(0, start_tok-toknum):]:
yield tok
# If we're at the end of the file, then we're done.
assert new_filepos <= self._eofpos
if new_filepos == self._eofpos:
break
# Update our indices
toknum += num_toks
filepos = new_filepos
# If we reach this point, then we should know our length.
assert self._len is not None
# Use concat for these, so we can use a ConcatenatedCorpusView
# when possible.
def __add__(self, other):
return concat([self, other])
def __radd__(self, other):
return concat([other, self])
def __mul__(self, count):
return concat([self] * count)
def __rmul__(self, count):
return concat([self] * count)
class ConcatenatedCorpusView(AbstractLazySequence):
"""
A 'view' of a corpus file that joins together one or more
``StreamBackedCorpusViews<StreamBackedCorpusView>``. At most
one file handle is left open at any time.
"""
def __init__(self, corpus_views):
self._pieces = corpus_views
"""A list of the corpus subviews that make up this
concatenation."""
self._offsets = [0]
"""A list of offsets, indicating the index at which each
subview begins. In particular::
offsets[i] = sum([len(p) for p in pieces[:i]])"""
self._open_piece = None
"""The most recently accessed corpus subview (or None).
Before a new subview is accessed, this subview will be closed."""
def __len__(self):
if len(self._offsets) <= len(self._pieces):
# Iterate to the end of the corpus.
for tok in self.iterate_from(self._offsets[-1]): pass
return self._offsets[-1]
def close(self):
for piece in self._pieces:
piece.close()
def iterate_from(self, start_tok):
piecenum = bisect.bisect_right(self._offsets, start_tok)-1
while piecenum < len(self._pieces):
offset = self._offsets[piecenum]
piece = self._pieces[piecenum]
# If we've got another piece open, close it first.
if self._open_piece is not piece:
if self._open_piece is not None:
self._open_piece.close()
self._open_piece = piece
# Get everything we can from this piece.
for tok in piece.iterate_from(max(0, start_tok-offset)):
yield tok
# Update the offset table.
if piecenum+1 == len(self._offsets):
self._offsets.append(self._offsets[-1] + len(piece))
# Move on to the next piece.
piecenum += 1
def concat(docs):
"""
Concatenate together the contents of multiple documents from a
single corpus, using an appropriate concatenation function. This
utility function is used by corpus readers when the user requests
more than one document at a time.
"""
if len(docs) == 1:
return docs[0]
if len(docs) == 0:
raise ValueError('concat() expects at least one object!')
types = set(d.__class__ for d in docs)
# If they're all strings, use string concatenation.
if all(isinstance(doc, compat.string_types) for doc in docs):
return ''.join(docs)
# If they're all corpus views, then use ConcatenatedCorpusView.
for typ in types:
if not issubclass(typ, (StreamBackedCorpusView,
ConcatenatedCorpusView)):
break
else:
return ConcatenatedCorpusView(docs)
# If they're all lazy sequences, use a lazy concatenation
for typ in types:
if not issubclass(typ, AbstractLazySequence):
break
else:
return LazyConcatenation(docs)
# Otherwise, see what we can do:
if len(types) == 1:
typ = list(types)[0]
if issubclass(typ, list):
return reduce((lambda a,b:a+b), docs, [])
if issubclass(typ, tuple):
return reduce((lambda a,b:a+b), docs, ())
if ElementTree.iselement(typ):
xmltree = ElementTree.Element('documents')
for doc in docs: xmltree.append(doc)
return xmltree
# No method found!
raise ValueError("Don't know how to concatenate types: %r" % types)
######################################################################
#{ Corpus View for Pickled Sequences
######################################################################
class PickleCorpusView(StreamBackedCorpusView):
"""
A stream backed corpus view for corpus files that consist of
sequences of serialized Python objects (serialized using
``pickle.dump``). One use case for this class is to store the
result of running feature detection on a corpus to disk. This can
be useful when performing feature detection is expensive (so we
don't want to repeat it); but the corpus is too large to store in
memory. The following example illustrates this technique:
>>> from nltk.corpus.reader.util import PickleCorpusView
>>> from nltk.util import LazyMap
>>> feature_corpus = LazyMap(detect_features, corpus) # doctest: +SKIP
>>> PickleCorpusView.write(feature_corpus, some_fileid) # doctest: +SKIP
>>> pcv = PickleCorpusView(some_fileid) # doctest: +SKIP
"""
BLOCK_SIZE = 100
PROTOCOL = -1
def __init__(self, fileid, delete_on_gc=False):
"""
Create a new corpus view that reads the pickle corpus
``fileid``.
:param delete_on_gc: If true, then ``fileid`` will be deleted
whenever this object gets garbage-collected.
"""
self._delete_on_gc = delete_on_gc
StreamBackedCorpusView.__init__(self, fileid)
def read_block(self, stream):
result = []
for i in range(self.BLOCK_SIZE):
try: result.append(pickle.load(stream))
except EOFError: break
return result
def __del__(self):
"""
If ``delete_on_gc`` was set to true when this
``PickleCorpusView`` was created, then delete the corpus view's
fileid. (This method is called whenever a
``PickledCorpusView`` is garbage-collected.
"""
if getattr(self, '_delete_on_gc'):
if os.path.exists(self._fileid):
try: os.remove(self._fileid)
except (OSError, IOError): pass
self.__dict__.clear() # make the garbage collector's job easier
@classmethod
def write(cls, sequence, output_file):
if isinstance(output_file, compat.string_types):
output_file = open(output_file, 'wb')
for item in sequence:
pickle.dump(item, output_file, cls.PROTOCOL)
@classmethod
def cache_to_tempfile(cls, sequence, delete_on_gc=True):
"""
Write the given sequence to a temporary file as a pickle
corpus; and then return a ``PickleCorpusView`` view for that
temporary corpus file.
:param delete_on_gc: If true, then the temporary file will be
deleted whenever this object gets garbage-collected.
"""
try:
fd, output_file_name = tempfile.mkstemp('.pcv', 'nltk-')
output_file = os.fdopen(fd, 'wb')
cls.write(sequence, output_file)
output_file.close()
return PickleCorpusView(output_file_name, delete_on_gc)
except (OSError, IOError) as e:
raise ValueError('Error while creating temp file: %s' % e)
######################################################################
#{ Block Readers
######################################################################
def read_whitespace_block(stream):
toks = []
for i in range(20): # Read 20 lines at a time.
toks.extend(stream.readline().split())
return toks
def read_wordpunct_block(stream):
toks = []
for i in range(20): # Read 20 lines at a time.
toks.extend(wordpunct_tokenize(stream.readline()))
return toks
def read_line_block(stream):
toks = []
for i in range(20):
line = stream.readline()
if not line: return toks
toks.append(line.rstrip('\n'))
return toks
def read_blankline_block(stream):
s = ''
while True:
line = stream.readline()
# End of file:
if not line:
if s: return [s]
else: return []
# Blank line:
elif line and not line.strip():
if s: return [s]
# Other line:
else:
s += line
def read_alignedsent_block(stream):
s = ''
while True:
line = stream.readline()
if line[0] == '=' or line[0] == '\n' or line[:2] == '\r\n':
continue
# End of file:
if not line:
if s: return [s]
else: return []
# Other line:
else:
s += line
if re.match('^\d+-\d+', line) is not None:
return [s]
def read_regexp_block(stream, start_re, end_re=None):
"""
Read a sequence of tokens from a stream, where tokens begin with
lines that match ``start_re``. If ``end_re`` is specified, then
tokens end with lines that match ``end_re``; otherwise, tokens end
whenever the next line matching ``start_re`` or EOF is found.
"""
# Scan until we find a line matching the start regexp.
while True:
line = stream.readline()
if not line: return [] # end of file.
if re.match(start_re, line): break
# Scan until we find another line matching the regexp, or EOF.
lines = [line]
while True:
oldpos = stream.tell()
line = stream.readline()
# End of file:
if not line:
return [''.join(lines)]
# End of token:
if end_re is not None and re.match(end_re, line):
return [''.join(lines)]
# Start of new token: backup to just before it starts, and
# return the token we've already collected.
if end_re is None and re.match(start_re, line):
stream.seek(oldpos)
return [''.join(lines)]
# Anything else is part of the token.
lines.append(line)
def read_sexpr_block(stream, block_size=16384, comment_char=None):
"""
Read a sequence of s-expressions from the stream, and leave the
stream's file position at the end the last complete s-expression
read. This function will always return at least one s-expression,
unless there are no more s-expressions in the file.
If the file ends in in the middle of an s-expression, then that
incomplete s-expression is returned when the end of the file is
reached.
:param block_size: The default block size for reading. If an
s-expression is longer than one block, then more than one
block will be read.
:param comment_char: A character that marks comments. Any lines
that begin with this character will be stripped out.
(If spaces or tabs precede the comment character, then the
line will not be stripped.)
"""
start = stream.tell()
block = stream.read(block_size)
encoding = getattr(stream, 'encoding', None)
assert encoding is not None or isinstance(block, compat.text_type)
if encoding not in (None, 'utf-8'):
import warnings
warnings.warn('Parsing may fail, depending on the properties '
'of the %s encoding!' % encoding)
# (e.g., the utf-16 encoding does not work because it insists
# on adding BOMs to the beginning of encoded strings.)
if comment_char:
COMMENT = re.compile('(?m)^%s.*$' % re.escape(comment_char))
while True:
try:
# If we're stripping comments, then make sure our block ends
# on a line boundary; and then replace any comments with
# space characters. (We can't just strip them out -- that
# would make our offset wrong.)
if comment_char:
block += stream.readline()
block = re.sub(COMMENT, _sub_space, block)
# Read the block.
tokens, offset = _parse_sexpr_block(block)
# Skip whitespace
offset = re.compile(r'\s*').search(block, offset).end()
# Move to the end position.
if encoding is None:
stream.seek(start+offset)
else:
stream.seek(start+len(block[:offset].encode(encoding)))
# Return the list of tokens we processed
return tokens
except ValueError as e:
if e.args[0] == 'Block too small':
next_block = stream.read(block_size)
if next_block:
block += next_block
continue
else:
# The file ended mid-sexpr -- return what we got.
return [block.strip()]
else: raise
def _sub_space(m):
"""Helper function: given a regexp match, return a string of
spaces that's the same length as the matched string."""
return ' '*(m.end()-m.start())
def _parse_sexpr_block(block):
tokens = []
start = end = 0
while end < len(block):
m = re.compile(r'\S').search(block, end)
if not m:
return tokens, end
start = m.start()
# Case 1: sexpr is not parenthesized.
if m.group() != '(':
m2 = re.compile(r'[\s(]').search(block, start)
if m2:
end = m2.start()
else:
if tokens: return tokens, end
raise ValueError('Block too small')
# Case 2: parenthesized sexpr.
else:
nesting = 0
for m in re.compile(r'[()]').finditer(block, start):
if m.group()=='(': nesting += 1
else: nesting -= 1
if nesting == 0:
end = m.end()
break
else:
if tokens: return tokens, end
raise ValueError('Block too small')
tokens.append(block[start:end])
return tokens, end
######################################################################
#{ Finding Corpus Items
######################################################################
def find_corpus_fileids(root, regexp):
if not isinstance(root, PathPointer):
raise TypeError('find_corpus_fileids: expected a PathPointer')
regexp += '$'
# Find fileids in a zipfile: scan the zipfile's namelist. Filter
# out entries that end in '/' -- they're directories.
if isinstance(root, ZipFilePathPointer):
fileids = [name[len(root.entry):] for name in root.zipfile.namelist()
if not name.endswith('/')]
items = [name for name in fileids if re.match(regexp, name)]
return sorted(items)
# Find fileids in a directory: use os.walk to search all (proper
# or symlinked) subdirectories, and match paths against the regexp.
elif isinstance(root, FileSystemPathPointer):
items = []
# workaround for py25 which doesn't support followlinks
kwargs = {}
if not py25():
kwargs = {'followlinks': True}
for dirname, subdirs, fileids in os.walk(root.path, **kwargs):
prefix = ''.join('%s/' % p for p in _path_from(root.path, dirname))
items += [prefix+fileid for fileid in fileids
if re.match(regexp, prefix+fileid)]
# Don't visit svn directories:
if '.svn' in subdirs: subdirs.remove('.svn')
return sorted(items)
else:
raise AssertionError("Don't know how to handle %r" % root)
def _path_from(parent, child):
if os.path.split(parent)[1] == '':
parent = os.path.split(parent)[0]
path = []
while parent != child:
child, dirname = os.path.split(child)
path.insert(0, dirname)
assert os.path.split(child)[0] != child
return path
######################################################################
#{ Paragraph structure in Treebank files
######################################################################
def tagged_treebank_para_block_reader(stream):
# Read the next paragraph.
para = ''
while True:
line = stream.readline()
# End of paragraph:
if re.match('======+\s*$', line):
if para.strip(): return [para]
# End of file:
elif line == '':
if para.strip(): return [para]
else: return []
# Content line:
else:
para += line
| 38.484962 | 84 | 0.595389 |
795979cba2727d2b4c558b401e2b1666a62e079e | 5,982 | py | Python | contrib/zmq/zmq_sub.py | mytitanium/Titanium-Core-1.0 | 470e6a0a23de1ea867d693e362d1a0f6ccc12aa7 | [
"MIT"
] | 2 | 2020-12-01T17:15:50.000Z | 2020-12-11T13:29:54.000Z | contrib/zmq/zmq_sub.py | mytitanium/Titanium-Core-1.0 | 470e6a0a23de1ea867d693e362d1a0f6ccc12aa7 | [
"MIT"
] | 1 | 2020-07-27T10:54:07.000Z | 2020-08-28T05:37:26.000Z | contrib/zmq/zmq_sub.py | mytitanium/Titanium-Core-1.0 | 470e6a0a23de1ea867d693e362d1a0f6ccc12aa7 | [
"MIT"
] | 2 | 2020-11-09T16:38:04.000Z | 2021-04-02T05:27:36.000Z | #!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""
ZMQ example using python3's asyncio
Ttm should be started with the command line arguments:
ttmd -testnet -daemon \
-zmqpubrawtx=tcp://127.0.0.1:28332 \
-zmqpubrawblock=tcp://127.0.0.1:28332 \
-zmqpubhashtx=tcp://127.0.0.1:28332 \
-zmqpubhashblock=tcp://127.0.0.1:28332
We use the asyncio library here. `self.handle()` installs itself as a
future at the end of the function. Since it never returns with the event
loop having an empty stack of futures, this creates an infinite loop. An
alternative is to wrap the contents of `handle` inside `while True`.
A blocking example using python 2.7 can be obtained from the git history:
https://github.com/bitcoin/bitcoin/blob/37a7fe9e440b83e2364d5498931253937abe9294/contrib/zmq/zmq_sub.py
"""
import binascii
import asyncio
import zmq
import zmq.asyncio
import signal
import struct
import sys
if (sys.version_info.major, sys.version_info.minor) < (3, 5):
print("This example only works with Python 3.5 and greater")
sys.exit(1)
port = 28332
class ZMQHandler():
def __init__(self):
self.loop = asyncio.get_event_loop()
self.zmqContext = zmq.asyncio.Context()
self.zmqSubSocket = self.zmqContext.socket(zmq.SUB)
self.zmqSubSocket.setsockopt_string(zmq.SUBSCRIBE, "hashblock")
self.zmqSubSocket.setsockopt_string(zmq.SUBSCRIBE, "hashchainlock")
self.zmqSubSocket.setsockopt_string(zmq.SUBSCRIBE, "hashtx")
self.zmqSubSocket.setsockopt_string(zmq.SUBSCRIBE, "hashtxlock")
self.zmqSubSocket.setsockopt_string(zmq.SUBSCRIBE, "hashgovernancevote")
self.zmqSubSocket.setsockopt_string(zmq.SUBSCRIBE, "hashgovernanceobject")
self.zmqSubSocket.setsockopt_string(zmq.SUBSCRIBE, "hashinstantsenddoublespend")
self.zmqSubSocket.setsockopt_string(zmq.SUBSCRIBE, "rawblock")
self.zmqSubSocket.setsockopt_string(zmq.SUBSCRIBE, "rawchainlock")
self.zmqSubSocket.setsockopt_string(zmq.SUBSCRIBE, "rawchainlocksig")
self.zmqSubSocket.setsockopt_string(zmq.SUBSCRIBE, "rawtx")
self.zmqSubSocket.setsockopt_string(zmq.SUBSCRIBE, "rawtxlock")
self.zmqSubSocket.setsockopt_string(zmq.SUBSCRIBE, "rawtxlocksig")
self.zmqSubSocket.setsockopt_string(zmq.SUBSCRIBE, "rawgovernancevote")
self.zmqSubSocket.setsockopt_string(zmq.SUBSCRIBE, "rawgovernanceobject")
self.zmqSubSocket.setsockopt_string(zmq.SUBSCRIBE, "rawinstantsenddoublespend")
self.zmqSubSocket.connect("tcp://127.0.0.1:%i" % port)
async def handle(self) :
msg = await self.zmqSubSocket.recv_multipart()
topic = msg[0]
body = msg[1]
sequence = "Unknown"
if len(msg[-1]) == 4:
msgSequence = struct.unpack('<I', msg[-1])[-1]
sequence = str(msgSequence)
if topic == b"hashblock":
print('- HASH BLOCK ('+sequence+') -')
print(binascii.hexlify(body).decode("utf-8"))
elif topic == b"hashchainlock":
print('- HASH CHAINLOCK ('+sequence+') -')
print(binascii.hexlify(body).decode("utf-8"))
elif topic == b"hashtx":
print ('- HASH TX ('+sequence+') -')
print(binascii.hexlify(body).decode("utf-8"))
elif topic == b"hashtxlock":
print('- HASH TX LOCK ('+sequence+') -')
print(binascii.hexlify(body).decode("utf-8"))
elif topic == b"hashgovernancevote":
print('- HASH GOVERNANCE VOTE ('+sequence+') -')
print(binascii.hexlify(body).decode("utf-8"))
elif topic == b"hashgovernanceobject":
print('- HASH GOVERNANCE OBJECT ('+sequence+') -')
print(binascii.hexlify(body).decode("utf-8"))
elif topic == b"hashinstantsenddoublespend":
print('- HASH IS DOUBLE SPEND ('+sequence+') -')
print(binascii.hexlify(body).decode("utf-8"))
elif topic == b"rawblock":
print('- RAW BLOCK HEADER ('+sequence+') -')
print(binascii.hexlify(body[:80]).decode("utf-8"))
elif topic == b"rawchainlock":
print('- RAW CHAINLOCK ('+sequence+') -')
print(binascii.hexlify(body[:80]).decode("utf-8"))
elif topic == b"rawchainlocksig":
print('- RAW CHAINLOCK SIG ('+sequence+') -')
print(binascii.hexlify(body[:80]).decode("utf-8"))
elif topic == b"rawtx":
print('- RAW TX ('+sequence+') -')
print(binascii.hexlify(body).decode("utf-8"))
elif topic == b"rawtxlock":
print('- RAW TX LOCK ('+sequence+') -')
print(binascii.hexlify(body).decode("utf-8"))
elif topic == b"rawtxlocksig":
print('- RAW TX LOCK SIG ('+sequence+') -')
print(binascii.hexlify(body).decode("utf-8"))
elif topic == b"rawgovernancevote":
print('- RAW GOVERNANCE VOTE ('+sequence+') -')
print(binascii.hexlify(body).decode("utf-8"))
elif topic == b"rawgovernanceobject":
print('- RAW GOVERNANCE OBJECT ('+sequence+') -')
print(binascii.hexlify(body).decode("utf-8"))
elif topic == b"rawinstantsenddoublespend":
print('- RAW IS DOUBLE SPEND ('+sequence+') -')
print(binascii.hexlify(body).decode("utf-8"))
# schedule ourselves to receive the next message
asyncio.ensure_future(self.handle())
def start(self):
self.loop.add_signal_handler(signal.SIGINT, self.stop)
self.loop.create_task(self.handle())
self.loop.run_forever()
def stop(self):
self.loop.stop()
self.zmqContext.destroy()
daemon = ZMQHandler()
daemon.start()
| 44.311111 | 107 | 0.634069 |
795979de9e341510530270bc3a53ef3fc4532879 | 5,146 | py | Python | tagging.py | doggo404/sowsbetter | 3d23317f755821d4215caa0398449ec94230d711 | [
"MIT"
] | 14 | 2021-02-15T21:38:24.000Z | 2022-02-03T00:13:32.000Z | tagging.py | doggo404/sowsbetter | 3d23317f755821d4215caa0398449ec94230d711 | [
"MIT"
] | null | null | null | tagging.py | doggo404/sowsbetter | 3d23317f755821d4215caa0398449ec94230d711 | [
"MIT"
] | 1 | 2022-01-17T00:32:37.000Z | 2022-01-17T00:32:37.000Z | """Simple tagging for sowsbetter.
"""
import os.path
import re
import mutagen
import mutagen.flac
import mutagen.mp3
from mutagen.easyid3 import EasyID3
numeric_tags = set([
'tracknumber',
'discnumber',
'tracktotal',
'totaltracks',
'disctotal',
'totaldiscs',
])
class TaggingException(Exception):
pass
def valid_fractional_tag(value):
# m or m/n
if re.match(r"""\d+(/(\d+))?$""", value):
return True
else:
return False
def scrub_tag(name, value):
"""Strip whitespace (and other common problems) from tag values.
May return the empty string ''.
"""
scrubbed_value = value.strip().strip('\x00')
# Strip trailing '/' or '/0' from numeric tags.
if name in numeric_tags:
scrubbed_value = re.sub(r"""/(0+)?$""", '', scrubbed_value)
# Remove leading '/' from numeric tags.
if name in numeric_tags:
scrubbed_value = scrubbed_value.lstrip('/')
# Numeric tags should not be '0' (but tracknumber 0 is OK, e.g.,
# hidden track).
if name in numeric_tags - set(['tracknumber']):
if re.match(r"""0+(/.*)?$""", scrubbed_value):
return ''
return scrubbed_value
def check_tags(filename, check_tracknumber_format=True):
"""Verify that the file has the required What.CD tags.
Returns (True, None) if OK, (False, msg) if a tag is missing or
invalid.
"""
info = mutagen.File(filename, easy=True)
for tag in ['artist', 'album', 'title', 'tracknumber']:
if tag not in info.keys():
return (False, '"%s" has no %s tag' % (filename, tag))
elif info[tag] == [u'']:
return (False, '"%s" has an empty %s tag' % (filename, tag))
if check_tracknumber_format:
tracknumber = info['tracknumber'][0]
if not valid_fractional_tag(tracknumber):
return (False, '"%s" has a malformed tracknumber tag ("%s")' % (filename, tracknumber))
return (True, None)
def copy_tags(flac_file, transcode_file):
flac_info = mutagen.flac.FLAC(flac_file)
transcode_info = None
valid_key_fn = None
transcode_ext = os.path.splitext(transcode_file)[1].lower()
if transcode_ext == '.flac':
transcode_info = mutagen.flac.FLAC(transcode_file)
valid_key_fn = lambda k: True
elif transcode_ext == '.mp3':
transcode_info = mutagen.mp3.EasyMP3(transcode_file)
valid_key_fn = lambda k: k in EasyID3.valid_keys.keys()
else:
raise TaggingException('Unsupported tag format "%s"' % transcode_file)
for tag in filter(valid_key_fn, flac_info):
# scrub the FLAC tags, just to be on the safe side.
values = list(map(lambda v: scrub_tag(tag,v), flac_info[tag]))
if values and values != [u'']:
transcode_info[tag] = values
if transcode_ext == '.mp3':
# Support for TRCK and TPOS x/y notation, which is not
# supported by EasyID3.
#
# These tags don't make sense as lists, so we just use the head
# element when fixing them up.
#
# totaltracks and totaldiscs may also appear in the FLAC file
# as 'tracktotal' and 'disctotal'. We support either tag, but
# in files with both we choose only one.
if 'tracknumber' in transcode_info.keys():
totaltracks = None
if 'totaltracks' in flac_info.keys():
totaltracks = scrub_tag('totaltracks', flac_info['totaltracks'][0])
elif 'tracktotal' in flac_info.keys():
totaltracks = scrub_tag('tracktotal', flac_info['tracktotal'][0])
if totaltracks:
transcode_info['tracknumber'] = [u'%s/%s' % (transcode_info['tracknumber'][0], totaltracks)]
if 'discnumber' in transcode_info.keys():
totaldiscs = None
if 'totaldiscs' in flac_info.keys():
totaldiscs = scrub_tag('totaldiscs', flac_info['totaldiscs'][0])
elif 'disctotal' in flac_info.keys():
totaldiscs = scrub_tag('disctotal', flac_info['disctotal'][0])
if totaldiscs:
transcode_info['discnumber'] = [u'%s/%s' % (transcode_info['discnumber'][0], totaldiscs)]
transcode_info.save()
# EasyID3 extensions for sowsbetter.
for key, frameid in {
'albumartist': 'TPE2',
'album artist': 'TPE2',
'grouping': 'TIT1',
'content group': 'TIT1',
}.items():
EasyID3.RegisterTextKey(key, frameid)
def comment_get(id3, _):
return [comment.text for comment in id3['COMM'].text]
def comment_set(id3, _, value):
id3.add(mutagen.id3.COMM(encoding=3, lang='eng', desc='', text=value))
def originaldate_get(id3, _):
return [stamp.text for stamp in id3['TDOR'].text]
def originaldate_set(id3, _, value):
id3.add(mutagen.id3.TDOR(encoding=3, text=value))
EasyID3.RegisterKey('comment', comment_get, comment_set)
EasyID3.RegisterKey('description', comment_get, comment_set)
EasyID3.RegisterKey('originaldate', originaldate_get, originaldate_set)
EasyID3.RegisterKey('original release date', originaldate_get, originaldate_set)
| 33.415584 | 108 | 0.629615 |
79597abb266962ec0e2f6dcfc150341cb8434a46 | 21,187 | py | Python | sdk/python/pulumi_aws/elasticloadbalancing/load_balancer.py | sibuthomasmathew/pulumi-aws | 6351f2182eb6f693d4e09e4136c385adfa0ab674 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_aws/elasticloadbalancing/load_balancer.py | sibuthomasmathew/pulumi-aws | 6351f2182eb6f693d4e09e4136c385adfa0ab674 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_aws/elasticloadbalancing/load_balancer.py | sibuthomasmathew/pulumi-aws | 6351f2182eb6f693d4e09e4136c385adfa0ab674 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from .. import _utilities, _tables
from . import outputs
from ._inputs import *
__all__ = ['LoadBalancer']
warnings.warn("""aws.elasticloadbalancing.LoadBalancer has been deprecated in favor of aws.elb.LoadBalancer""", DeprecationWarning)
class LoadBalancer(pulumi.CustomResource):
warnings.warn("""aws.elasticloadbalancing.LoadBalancer has been deprecated in favor of aws.elb.LoadBalancer""", DeprecationWarning)
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
access_logs: Optional[pulumi.Input[pulumi.InputType['LoadBalancerAccessLogsArgs']]] = None,
availability_zones: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
connection_draining: Optional[pulumi.Input[bool]] = None,
connection_draining_timeout: Optional[pulumi.Input[int]] = None,
cross_zone_load_balancing: Optional[pulumi.Input[bool]] = None,
health_check: Optional[pulumi.Input[pulumi.InputType['LoadBalancerHealthCheckArgs']]] = None,
idle_timeout: Optional[pulumi.Input[int]] = None,
instances: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
internal: Optional[pulumi.Input[bool]] = None,
listeners: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['LoadBalancerListenerArgs']]]]] = None,
name: Optional[pulumi.Input[str]] = None,
name_prefix: Optional[pulumi.Input[str]] = None,
security_groups: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
source_security_group: Optional[pulumi.Input[str]] = None,
subnets: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
Provides an Elastic Load Balancer resource, also known as a "Classic
Load Balancer" after the release of
`Application/Network Load Balancers`.
> **NOTE on ELB Instances and ELB Attachments:** This provider currently
provides both a standalone ELB Attachment resource
(describing an instance attached to an ELB), and an ELB resource with
`instances` defined in-line. At this time you cannot use an ELB with in-line
instances in conjunction with a ELB Attachment resources. Doing so will cause a
conflict and will overwrite attachments.
## Example Usage
```python
import pulumi
import pulumi_aws as aws
# Create a new load balancer
bar = aws.elb.LoadBalancer("bar",
availability_zones=[
"us-west-2a",
"us-west-2b",
"us-west-2c",
],
access_logs=aws.elb.LoadBalancerAccessLogsArgs(
bucket="foo",
bucket_prefix="bar",
interval=60,
),
listeners=[
aws.elb.LoadBalancerListenerArgs(
instance_port=8000,
instance_protocol="http",
lb_port=80,
lb_protocol="http",
),
aws.elb.LoadBalancerListenerArgs(
instance_port=8000,
instance_protocol="http",
lb_port=443,
lb_protocol="https",
ssl_certificate_id="arn:aws:iam::123456789012:server-certificate/certName",
),
],
health_check=aws.elb.LoadBalancerHealthCheckArgs(
healthy_threshold=2,
unhealthy_threshold=2,
timeout=3,
target="HTTP:8000/",
interval=30,
),
instances=[aws_instance["foo"]["id"]],
cross_zone_load_balancing=True,
idle_timeout=400,
connection_draining=True,
connection_draining_timeout=400,
tags={
"Name": "foobar-elb",
})
```
## Note on ECDSA Key Algorithm
If the ARN of the `ssl_certificate_id` that is pointed to references a
certificate that was signed by an ECDSA key, note that ELB only supports the
P256 and P384 curves. Using a certificate signed by a key using a different
curve could produce the error `ERR_SSL_VERSION_OR_CIPHER_MISMATCH` in your
browser.
## Import
ELBs can be imported using the `name`, e.g.
```sh
$ pulumi import aws:elasticloadbalancing/loadBalancer:LoadBalancer bar elb-production-12345
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[pulumi.InputType['LoadBalancerAccessLogsArgs']] access_logs: An Access Logs block. Access Logs documented below.
:param pulumi.Input[Sequence[pulumi.Input[str]]] availability_zones: The AZ's to serve traffic in.
:param pulumi.Input[bool] connection_draining: Boolean to enable connection draining. Default: `false`
:param pulumi.Input[int] connection_draining_timeout: The time in seconds to allow for connections to drain. Default: `300`
:param pulumi.Input[bool] cross_zone_load_balancing: Enable cross-zone load balancing. Default: `true`
:param pulumi.Input[pulumi.InputType['LoadBalancerHealthCheckArgs']] health_check: A health_check block. Health Check documented below.
:param pulumi.Input[int] idle_timeout: The time in seconds that the connection is allowed to be idle. Default: `60`
:param pulumi.Input[Sequence[pulumi.Input[str]]] instances: A list of instance ids to place in the ELB pool.
:param pulumi.Input[bool] internal: If true, ELB will be an internal ELB.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['LoadBalancerListenerArgs']]]] listeners: A list of listener blocks. Listeners documented below.
:param pulumi.Input[str] name: The name of the ELB. By default generated by this provider.
:param pulumi.Input[str] name_prefix: Creates a unique name beginning with the specified
prefix. Conflicts with `name`.
:param pulumi.Input[Sequence[pulumi.Input[str]]] security_groups: A list of security group IDs to assign to the ELB.
Only valid if creating an ELB within a VPC
:param pulumi.Input[str] source_security_group: The name of the security group that you can use as
part of your inbound rules for your load balancer's back-end application
instances. Use this for Classic or Default VPC only.
:param pulumi.Input[Sequence[pulumi.Input[str]]] subnets: A list of subnet IDs to attach to the ELB.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A map of tags to assign to the resource.
"""
pulumi.log.warn("""LoadBalancer is deprecated: aws.elasticloadbalancing.LoadBalancer has been deprecated in favor of aws.elb.LoadBalancer""")
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['access_logs'] = access_logs
__props__['availability_zones'] = availability_zones
__props__['connection_draining'] = connection_draining
__props__['connection_draining_timeout'] = connection_draining_timeout
__props__['cross_zone_load_balancing'] = cross_zone_load_balancing
__props__['health_check'] = health_check
__props__['idle_timeout'] = idle_timeout
__props__['instances'] = instances
__props__['internal'] = internal
if listeners is None and not opts.urn:
raise TypeError("Missing required property 'listeners'")
__props__['listeners'] = listeners
__props__['name'] = name
__props__['name_prefix'] = name_prefix
__props__['security_groups'] = security_groups
__props__['source_security_group'] = source_security_group
__props__['subnets'] = subnets
__props__['tags'] = tags
__props__['arn'] = None
__props__['dns_name'] = None
__props__['source_security_group_id'] = None
__props__['zone_id'] = None
super(LoadBalancer, __self__).__init__(
'aws:elasticloadbalancing/loadBalancer:LoadBalancer',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
access_logs: Optional[pulumi.Input[pulumi.InputType['LoadBalancerAccessLogsArgs']]] = None,
arn: Optional[pulumi.Input[str]] = None,
availability_zones: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
connection_draining: Optional[pulumi.Input[bool]] = None,
connection_draining_timeout: Optional[pulumi.Input[int]] = None,
cross_zone_load_balancing: Optional[pulumi.Input[bool]] = None,
dns_name: Optional[pulumi.Input[str]] = None,
health_check: Optional[pulumi.Input[pulumi.InputType['LoadBalancerHealthCheckArgs']]] = None,
idle_timeout: Optional[pulumi.Input[int]] = None,
instances: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
internal: Optional[pulumi.Input[bool]] = None,
listeners: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['LoadBalancerListenerArgs']]]]] = None,
name: Optional[pulumi.Input[str]] = None,
name_prefix: Optional[pulumi.Input[str]] = None,
security_groups: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
source_security_group: Optional[pulumi.Input[str]] = None,
source_security_group_id: Optional[pulumi.Input[str]] = None,
subnets: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
zone_id: Optional[pulumi.Input[str]] = None) -> 'LoadBalancer':
"""
Get an existing LoadBalancer resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[pulumi.InputType['LoadBalancerAccessLogsArgs']] access_logs: An Access Logs block. Access Logs documented below.
:param pulumi.Input[str] arn: The ARN of the ELB
:param pulumi.Input[Sequence[pulumi.Input[str]]] availability_zones: The AZ's to serve traffic in.
:param pulumi.Input[bool] connection_draining: Boolean to enable connection draining. Default: `false`
:param pulumi.Input[int] connection_draining_timeout: The time in seconds to allow for connections to drain. Default: `300`
:param pulumi.Input[bool] cross_zone_load_balancing: Enable cross-zone load balancing. Default: `true`
:param pulumi.Input[str] dns_name: The DNS name of the ELB
:param pulumi.Input[pulumi.InputType['LoadBalancerHealthCheckArgs']] health_check: A health_check block. Health Check documented below.
:param pulumi.Input[int] idle_timeout: The time in seconds that the connection is allowed to be idle. Default: `60`
:param pulumi.Input[Sequence[pulumi.Input[str]]] instances: A list of instance ids to place in the ELB pool.
:param pulumi.Input[bool] internal: If true, ELB will be an internal ELB.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['LoadBalancerListenerArgs']]]] listeners: A list of listener blocks. Listeners documented below.
:param pulumi.Input[str] name: The name of the ELB. By default generated by this provider.
:param pulumi.Input[str] name_prefix: Creates a unique name beginning with the specified
prefix. Conflicts with `name`.
:param pulumi.Input[Sequence[pulumi.Input[str]]] security_groups: A list of security group IDs to assign to the ELB.
Only valid if creating an ELB within a VPC
:param pulumi.Input[str] source_security_group: The name of the security group that you can use as
part of your inbound rules for your load balancer's back-end application
instances. Use this for Classic or Default VPC only.
:param pulumi.Input[str] source_security_group_id: The ID of the security group that you can use as
part of your inbound rules for your load balancer's back-end application
instances. Only available on ELBs launched in a VPC.
:param pulumi.Input[Sequence[pulumi.Input[str]]] subnets: A list of subnet IDs to attach to the ELB.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A map of tags to assign to the resource.
:param pulumi.Input[str] zone_id: The canonical hosted zone ID of the ELB (to be used in a Route 53 Alias record)
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["access_logs"] = access_logs
__props__["arn"] = arn
__props__["availability_zones"] = availability_zones
__props__["connection_draining"] = connection_draining
__props__["connection_draining_timeout"] = connection_draining_timeout
__props__["cross_zone_load_balancing"] = cross_zone_load_balancing
__props__["dns_name"] = dns_name
__props__["health_check"] = health_check
__props__["idle_timeout"] = idle_timeout
__props__["instances"] = instances
__props__["internal"] = internal
__props__["listeners"] = listeners
__props__["name"] = name
__props__["name_prefix"] = name_prefix
__props__["security_groups"] = security_groups
__props__["source_security_group"] = source_security_group
__props__["source_security_group_id"] = source_security_group_id
__props__["subnets"] = subnets
__props__["tags"] = tags
__props__["zone_id"] = zone_id
return LoadBalancer(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="accessLogs")
def access_logs(self) -> pulumi.Output[Optional['outputs.LoadBalancerAccessLogs']]:
"""
An Access Logs block. Access Logs documented below.
"""
return pulumi.get(self, "access_logs")
@property
@pulumi.getter
def arn(self) -> pulumi.Output[str]:
"""
The ARN of the ELB
"""
return pulumi.get(self, "arn")
@property
@pulumi.getter(name="availabilityZones")
def availability_zones(self) -> pulumi.Output[Sequence[str]]:
"""
The AZ's to serve traffic in.
"""
return pulumi.get(self, "availability_zones")
@property
@pulumi.getter(name="connectionDraining")
def connection_draining(self) -> pulumi.Output[Optional[bool]]:
"""
Boolean to enable connection draining. Default: `false`
"""
return pulumi.get(self, "connection_draining")
@property
@pulumi.getter(name="connectionDrainingTimeout")
def connection_draining_timeout(self) -> pulumi.Output[Optional[int]]:
"""
The time in seconds to allow for connections to drain. Default: `300`
"""
return pulumi.get(self, "connection_draining_timeout")
@property
@pulumi.getter(name="crossZoneLoadBalancing")
def cross_zone_load_balancing(self) -> pulumi.Output[Optional[bool]]:
"""
Enable cross-zone load balancing. Default: `true`
"""
return pulumi.get(self, "cross_zone_load_balancing")
@property
@pulumi.getter(name="dnsName")
def dns_name(self) -> pulumi.Output[str]:
"""
The DNS name of the ELB
"""
return pulumi.get(self, "dns_name")
@property
@pulumi.getter(name="healthCheck")
def health_check(self) -> pulumi.Output['outputs.LoadBalancerHealthCheck']:
"""
A health_check block. Health Check documented below.
"""
return pulumi.get(self, "health_check")
@property
@pulumi.getter(name="idleTimeout")
def idle_timeout(self) -> pulumi.Output[Optional[int]]:
"""
The time in seconds that the connection is allowed to be idle. Default: `60`
"""
return pulumi.get(self, "idle_timeout")
@property
@pulumi.getter
def instances(self) -> pulumi.Output[Sequence[str]]:
"""
A list of instance ids to place in the ELB pool.
"""
return pulumi.get(self, "instances")
@property
@pulumi.getter
def internal(self) -> pulumi.Output[bool]:
"""
If true, ELB will be an internal ELB.
"""
return pulumi.get(self, "internal")
@property
@pulumi.getter
def listeners(self) -> pulumi.Output[Sequence['outputs.LoadBalancerListener']]:
"""
A list of listener blocks. Listeners documented below.
"""
return pulumi.get(self, "listeners")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of the ELB. By default generated by this provider.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="namePrefix")
def name_prefix(self) -> pulumi.Output[Optional[str]]:
"""
Creates a unique name beginning with the specified
prefix. Conflicts with `name`.
"""
return pulumi.get(self, "name_prefix")
@property
@pulumi.getter(name="securityGroups")
def security_groups(self) -> pulumi.Output[Sequence[str]]:
"""
A list of security group IDs to assign to the ELB.
Only valid if creating an ELB within a VPC
"""
return pulumi.get(self, "security_groups")
@property
@pulumi.getter(name="sourceSecurityGroup")
def source_security_group(self) -> pulumi.Output[str]:
"""
The name of the security group that you can use as
part of your inbound rules for your load balancer's back-end application
instances. Use this for Classic or Default VPC only.
"""
return pulumi.get(self, "source_security_group")
@property
@pulumi.getter(name="sourceSecurityGroupId")
def source_security_group_id(self) -> pulumi.Output[str]:
"""
The ID of the security group that you can use as
part of your inbound rules for your load balancer's back-end application
instances. Only available on ELBs launched in a VPC.
"""
return pulumi.get(self, "source_security_group_id")
@property
@pulumi.getter
def subnets(self) -> pulumi.Output[Sequence[str]]:
"""
A list of subnet IDs to attach to the ELB.
"""
return pulumi.get(self, "subnets")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
A map of tags to assign to the resource.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter(name="zoneId")
def zone_id(self) -> pulumi.Output[str]:
"""
The canonical hosted zone ID of the ELB (to be used in a Route 53 Alias record)
"""
return pulumi.get(self, "zone_id")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| 47.611236 | 163 | 0.64667 |
79597abc608bdb7689f1c48a1763ff8d7a4d1c66 | 251 | py | Python | problem/10000~19999/15828/15828.py3.py | njw1204/BOJ-AC | 1de41685725ae4657a7ff94e413febd97a888567 | [
"MIT"
] | 1 | 2019-04-19T16:37:44.000Z | 2019-04-19T16:37:44.000Z | problem/10000~19999/15828/15828.py3.py | njw1204/BOJ-AC | 1de41685725ae4657a7ff94e413febd97a888567 | [
"MIT"
] | 1 | 2019-04-20T11:42:44.000Z | 2019-04-20T11:42:44.000Z | problem/10000~19999/15828/15828.py3.py | njw1204/BOJ-AC | 1de41685725ae4657a7ff94e413febd97a888567 | [
"MIT"
] | 3 | 2019-04-19T16:37:47.000Z | 2021-10-25T00:45:00.000Z | import sys
from collections import *
input=sys.stdin.readline
n=int(input())
q=deque()
while 1:
x=int(input())
if x==-1: break
if x==0: q.popleft()
if x and len(q)<n: q.append(x)
if not q:
print('empty')
else:
while q: print(q.popleft(),end=' ') | 17.928571 | 36 | 0.649402 |
79597b1199eaa280e720345d72580bb9d6d610a7 | 1,135 | py | Python | Kimberley ter Heerdt/Poster/Visual-1:2/visualisatie1-birthjan1.py | ArtezGDA/MappingTheCity-Maps | a29377af7878907d30b4199d0859f007ba08b5e6 | [
"MIT"
] | null | null | null | Kimberley ter Heerdt/Poster/Visual-1:2/visualisatie1-birthjan1.py | ArtezGDA/MappingTheCity-Maps | a29377af7878907d30b4199d0859f007ba08b5e6 | [
"MIT"
] | null | null | null | Kimberley ter Heerdt/Poster/Visual-1:2/visualisatie1-birthjan1.py | ArtezGDA/MappingTheCity-Maps | a29377af7878907d30b4199d0859f007ba08b5e6 | [
"MIT"
] | null | null | null | import matplotlib.pyplot as plt
import json
def visual_file(file_name , life_color, death_color):
with open(file_name, 'r') as f:
data = json.load(f)
for d in data:
cur_biths = d['birth']
date = d['date'].split(' ')
date = int(date[1])
for cut_birth in cur_biths:
year = cut_birth['year']
deathyear = cut_birth['deathyear']
if deathyear.isdigit():
dotsize = abs(year - int(deathyear)) // 10
color = life_color
else:
dotsize = 3
color = death_color
plt.plot(year, date, 'ro', color=color, markersize=dotsize)
plt.xlim(0, 2016)
plt.xticks([i for i in range(0, 2016, 100)])
plt.ylim(0, 30)
plt.yticks([i for i in range(0, 30)])
plt.xlabel('year')
plt.ylabel('date')
plt.show()
if __name__ == '__main__':
file_name = 'wikibirth-jan1.json'
life_color = 'yellow'
death_color = 'red'
visual_file(file_name, life_color, death_color)
| 29.868421 | 75 | 0.518943 |
79597b4e3c4a57c30b777c95daddaf7259de5a66 | 6,654 | py | Python | adacrowd/trainer_baselines.py | maheshkkumar/adacrowd | c171fdb3674908fb06d5838fcba12c90b78bdbe4 | [
"MIT"
] | 7 | 2021-01-21T06:25:16.000Z | 2021-11-17T02:42:07.000Z | adacrowd/trainer_baselines.py | maheshkkumar/adacrowd | c171fdb3674908fb06d5838fcba12c90b78bdbe4 | [
"MIT"
] | 3 | 2021-03-03T12:17:11.000Z | 2021-12-31T09:11:59.000Z | adacrowd/trainer_baselines.py | maheshkkumar/adacrowd | c171fdb3674908fb06d5838fcba12c90b78bdbe4 | [
"MIT"
] | 3 | 2021-05-22T10:46:37.000Z | 2021-11-13T13:26:41.000Z | import numpy as np
import torch
from torch import optim
from torch.autograd import Variable
from torch.optim.lr_scheduler import StepLR
from config.baselines import cfg
from misc.utils import (AverageMeter, Timer, logger, print_summary,
update_model, vis_results)
from models.cc_baselines import CrowdCounter
class Trainer():
def __init__(self, dataloader, cfg_data, pwd):
self.cfg_data = cfg_data
self.data_mode = cfg.DATASET
self.exp_name = cfg.EXP_NAME
self.exp_path = cfg.EXP_PATH
self.pwd = pwd
self.net_name = cfg.NET
self.net = CrowdCounter(
cfg.GPU_ID,
self.net_name,
num_norm=cfg.NUM_NORM).cuda()
self.optimizer = optim.Adam(
self.net.CCN.parameters(),
lr=cfg.LR,
weight_decay=1e-4)
self.scheduler = StepLR(
self.optimizer,
step_size=cfg.NUM_EPOCH_LR_DECAY,
gamma=cfg.LR_DECAY)
self.train_record = {
'best_mae': 1e20,
'best_mse': 1e20,
'best_model_name': ''}
self.timer = {
'iter time': Timer(),
'train time': Timer(),
'val time': Timer()}
self.epoch = 0
self.i_tb = 0
self.train_loader, self.val_loader, self.restore_transform = dataloader()
print("Length of {} train loader: {}".format(
self.data_mode, len(self.train_loader)))
print(
"Length of {} test loader: {}".format(
self.data_mode, len(
self.val_loader)))
if cfg.RESUME:
latest_state = torch.load(cfg.RESUME_PATH)
self.net.load_state_dict(latest_state['net'])
self.optimizer.load_state_dict(latest_state['optimizer'])
self.scheduler.load_state_dict(latest_state['scheduler'])
self.epoch = latest_state['epoch'] + 1
self.i_tb = latest_state['i_tb']
self.train_record = latest_state['train_record']
self.exp_path = latest_state['exp_path']
self.exp_name = latest_state['exp_name']
self.writer, self.log_txt = logger(
self.exp_path, self.exp_name, self.pwd, 'exp', resume=cfg.RESUME, baseline=True)
def forward(self):
# self.validate_V3()
for epoch in range(self.epoch, cfg.MAX_EPOCH):
self.epoch = epoch
if epoch > cfg.LR_DECAY_START:
self.scheduler.step()
# training
self.timer['train time'].tic()
self.train()
self.timer['train time'].toc(average=False)
print('train time: {:.2f}s'.format(self.timer['train time'].diff))
print('=' * 20)
# validation
if epoch % cfg.VAL_FREQ == 0 or epoch > cfg.VAL_DENSE_START:
self.timer['val time'].tic()
self.validate()
self.timer['val time'].toc(average=False)
print('val time: {:.2f}s'.format(self.timer['val time'].diff))
def train(self): # training for all datasets
self.net.train()
for i, data in enumerate(self.train_loader, 0):
self.timer['iter time'].tic()
img, gt_map = data
img = Variable(img).cuda()
gt_map = Variable(gt_map).cuda()
self.optimizer.zero_grad()
pred_map = self.net(img, gt_map)
loss = self.net.loss
loss.backward()
self.optimizer.step()
if (i + 1) % cfg.PRINT_FREQ == 0:
self.i_tb += 1
self.writer.add_scalar('train_loss', loss.item(), self.i_tb)
self.timer['iter time'].toc(average=False)
print(
'[ep %d][it %d][loss %.4f][lr %.4f][%.2fs]' %
(self.epoch +
1,
i +
1,
loss.item(),
self.optimizer.param_groups[0]['lr'] *
10000,
self.timer['iter time'].diff))
print(
' [cnt: gt: %.1f pred: %.2f]' %
(gt_map[0].sum().data /
self.cfg_data.LOG_PARA,
pred_map[0].sum().data /
self.cfg_data.LOG_PARA))
def validate(self):
self.net.eval()
losses = AverageMeter()
maes = AverageMeter()
mses = AverageMeter()
for vi, data in enumerate(self.val_loader, 0):
img, gt_map = data
with torch.no_grad():
img = Variable(img).cuda()
gt_map = Variable(gt_map).cuda()
pred_map = self.net.forward(img, gt_map)
pred_map = pred_map.data.cpu().numpy()
gt_map = gt_map.data.cpu().numpy()
for i_img in range(pred_map.shape[0]):
pred_cnt = np.sum(pred_map[i_img]) / self.cfg_data.LOG_PARA
gt_count = np.sum(gt_map[i_img]) / self.cfg_data.LOG_PARA
losses.update(self.net.loss.item())
maes.update(abs(gt_count - pred_cnt))
mses.update((gt_count - pred_cnt) * (gt_count - pred_cnt))
if vi == 0:
vis_results(
self.exp_name,
self.epoch,
self.writer,
self.restore_transform,
img,
pred_map,
gt_map)
mae = maes.avg
mse = np.sqrt(mses.avg)
loss = losses.avg
self.writer.add_scalar('val_loss', loss, self.epoch + 1)
self.writer.add_scalar('mae', mae, self.epoch + 1)
self.writer.add_scalar('mse', mse, self.epoch + 1)
self.train_record = update_model(self.net,
self.optimizer,
self.scheduler,
self.epoch,
self.i_tb,
self.exp_path,
self.exp_name,
[mae,
mse,
loss],
self.train_record,
self.log_txt)
print_summary(self.exp_name, [mae, mse, loss], self.train_record)
| 35.393617 | 92 | 0.481966 |
79597c37ca89ef3da68ce20182329e28c0f0278a | 2,412 | py | Python | playground/warehouse/config.py | murlokito/playground | 405a7091bbfd6705db967e872ed6c4591bd892e6 | [
"MIT"
] | null | null | null | playground/warehouse/config.py | murlokito/playground | 405a7091bbfd6705db967e872ed6c4591bd892e6 | [
"MIT"
] | null | null | null | playground/warehouse/config.py | murlokito/playground | 405a7091bbfd6705db967e872ed6c4591bd892e6 | [
"MIT"
] | null | null | null | __title__ = "simulation"
__author__ = "murlux"
__copyright__ = "Copyright 2019, " + __author__
__credits__ = (__author__, )
__license__ = "MIT"
__email__ = "murlux@protonmail.com"
# Global imports
from typing import Any, Dict, List, Optional
# Local imports
from playground.util import setup_logger
from playground.messaging.producer import ProducerConfig, producer_config_from_json
logger = setup_logger(name=__name__)
class WarehouseConfig:
"""An object representing the WarehouseConfig."""
name: str = None
module_name: str = None
api: Dict[str, Any] = None
socket_ip: str = None
socket_port: int = None
producer_config: ProducerConfig = None
def __init__(
self, name: str = None, module_name: str = None, api: Dict[str, Any] = None,
socket_ip: str = None, socket_port: int = None, producer_config: Dict[str, Any] = None,
):
"""
Simply initiate the WarehouseConfig.
"""
if name is None:
raise Exception("WarehouseConfig class needs `name` param to designate itself")
self.name = name
if module_name is None:
raise Exception("WarehouseConfig class needs `module_name` param to designate itself")
self.module_name = module_name
if producer_config is not None:
self.producer_config = producer_config_from_json(json=producer_config)
def warehouse_config_from_json(json: Dict[str, Any] = None) -> Optional[WarehouseConfig]:
"""
Simply initiate the BridgeConfig.
"""
if json is None:
raise Exception("BridgeConfig warehouse_config_from_json method needs `json` param")
warehouse_json = json.get('warehouse', None)
if warehouse_json is None:
raise Exception("BridgeConfig warehouse_config_from_json method got invalid json")
api_json = json.get('api', None)
if api_json is None:
raise Exception("BridgeConfig warehouse_config_from_json method got invalid json")
logger.info('Attempting to create warehouse data flows from config..')
return WarehouseConfig(
name=warehouse_json.get('name', None),
module_name=warehouse_json.get('module_name', None),
api=api_json,
socket_ip=warehouse_json.get('websocket_ip', None),
socket_port=warehouse_json.get('websocket_port', None),
producer_config=json.get('producer_config', None),
)
| 31.324675 | 98 | 0.691542 |
79597d7241dcd603a368651c45fcec14a26301ec | 821 | py | Python | setup.py | phi1010/decorated-paho-mqtt | 7414e2313cc3a7aa31e5f94ffb266554f56a1843 | [
"BSD-3-Clause"
] | null | null | null | setup.py | phi1010/decorated-paho-mqtt | 7414e2313cc3a7aa31e5f94ffb266554f56a1843 | [
"BSD-3-Clause"
] | null | null | null | setup.py | phi1010/decorated-paho-mqtt | 7414e2313cc3a7aa31e5f94ffb266554f56a1843 | [
"BSD-3-Clause"
] | null | null | null | import setuptools
with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
setuptools.setup(
name='decorated_paho_mqtt',
version='1.0.5',
url='https://github.com/phi1010/decorated-paho-mqtt',
author='Phillip Kuhrt',
author_email='mail@phi1010.com',
description='Wrapper for Paho MQTT with declarative subscriptions and topic parsing utilities',
long_description=long_description,
long_description_content_type="text/markdown",
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
],
package_dir={"": "src"},
install_requires=[
'paho-mqtt',
],
packages=setuptools.find_packages(where="src"),
python_requires=">=3.7",
)
| 30.407407 | 99 | 0.663825 |
79597dee0efa89ce832d04df7ea0aaa65d8a854e | 1,349 | py | Python | python/ql/test/library-tests/PointsTo/guarded/type_test.py | vadi2/codeql | a806a4f08696d241ab295a286999251b56a6860c | [
"MIT"
] | 4,036 | 2020-04-29T00:09:57.000Z | 2022-03-31T14:16:38.000Z | python/ql/test/library-tests/PointsTo/guarded/type_test.py | vadi2/codeql | a806a4f08696d241ab295a286999251b56a6860c | [
"MIT"
] | 2,970 | 2020-04-28T17:24:18.000Z | 2022-03-31T22:40:46.000Z | python/ql/test/library-tests/PointsTo/guarded/type_test.py | ScriptBox99/github-codeql | 2ecf0d3264db8fb4904b2056964da469372a235c | [
"MIT"
] | 794 | 2020-04-29T00:28:25.000Z | 2022-03-30T08:21:46.000Z |
def f(d = {}):
if isinstance(d, dict):
use(d)
else:
use(d)
def g(cond):
x = 0 if cond else 1.0
if isinstance(x, int):
use(x)
elif isinstance(x, float):
use(x)
else:
use(x)
def h(arg=int):
if issubclass(arg, int):
use(arg)
else:
use(arg)
class D(object):
pass
class E(D):
pass
def j(arg=E()):
if isinstance(arg, E):
use(arg)
else:
use(arg)
def k(arg=E()):
if isinstance(arg, D):
use(arg)
else:
use(arg)
def l(arg=E):
if issubclass(arg, E):
use(arg)
else:
use(arg)
def m(arg=E):
if issubclass(arg, D):
use(arg)
else:
use(arg)
number = int, float
def n(cond):
x = 0 if cond else 1.0
if not isinstance(x, number):
use(x)
else:
use(x)
import sys
if sys.version < "3":
from collections import Iterable, Sequence, Set
else:
from collections.abc import Iterable, Sequence, Set
def p():
if issubclass(list, Iterable):
use(0)
else:
use(1)
def q():
if issubclass(list, Sequence):
use(0)
else:
use(1)
def p():
if isinstance({0}, Iterable):
use(0)
else:
use(1)
def q():
if isinstance({0}, Set):
use(0)
else:
use(1)
| 13.765306 | 55 | 0.49444 |
79597e85ac0a7bdda597fe67d42966de1363d3a0 | 66,805 | py | Python | sdk/containerservice/azure-mgmt-containerservice/azure/mgmt/containerservice/v2020_06_01/operations/_managed_clusters_operations.py | rsdoherty/azure-sdk-for-python | 6bba5326677468e6660845a703686327178bb7b1 | [
"MIT"
] | 2,728 | 2015-01-09T10:19:32.000Z | 2022-03-31T14:50:33.000Z | sdk/containerservice/azure-mgmt-containerservice/azure/mgmt/containerservice/v2020_06_01/operations/_managed_clusters_operations.py | rsdoherty/azure-sdk-for-python | 6bba5326677468e6660845a703686327178bb7b1 | [
"MIT"
] | 17,773 | 2015-01-05T15:57:17.000Z | 2022-03-31T23:50:25.000Z | sdk/containerservice/azure-mgmt-containerservice/azure/mgmt/containerservice/v2020_06_01/operations/_managed_clusters_operations.py | rsdoherty/azure-sdk-for-python | 6bba5326677468e6660845a703686327178bb7b1 | [
"MIT"
] | 1,916 | 2015-01-19T05:05:41.000Z | 2022-03-31T19:36:44.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class ManagedClustersOperations(object):
"""ManagedClustersOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.containerservice.v2020_06_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.ManagedClusterListResult"]
"""Gets a list of managed clusters in the specified subscription.
Gets a list of managed clusters in the specified subscription. The operation returns properties
of each managed cluster.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ManagedClusterListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.containerservice.v2020_06_01.models.ManagedClusterListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ManagedClusterListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('ManagedClusterListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.ContainerService/managedClusters'} # type: ignore
def list_by_resource_group(
self,
resource_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.ManagedClusterListResult"]
"""Lists managed clusters in the specified subscription and resource group.
Lists managed clusters in the specified subscription and resource group. The operation returns
properties of each managed cluster.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ManagedClusterListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.containerservice.v2020_06_01.models.ManagedClusterListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ManagedClusterListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_resource_group.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('ManagedClusterListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters'} # type: ignore
def get_upgrade_profile(
self,
resource_group_name, # type: str
resource_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.ManagedClusterUpgradeProfile"
"""Gets upgrade profile for a managed cluster.
Gets the details of the upgrade profile for a managed cluster with a specified resource group
and name.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ManagedClusterUpgradeProfile, or the result of cls(response)
:rtype: ~azure.mgmt.containerservice.v2020_06_01.models.ManagedClusterUpgradeProfile
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ManagedClusterUpgradeProfile"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-01"
accept = "application/json"
# Construct URL
url = self.get_upgrade_profile.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=1),
'resourceName': self._serialize.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ManagedClusterUpgradeProfile', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_upgrade_profile.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/upgradeProfiles/default'} # type: ignore
def get_access_profile(
self,
resource_group_name, # type: str
resource_name, # type: str
role_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.ManagedClusterAccessProfile"
"""Gets an access profile of a managed cluster.
Gets the accessProfile for the specified role name of the managed cluster with a specified
resource group and name. **WARNING**\ : This API will be deprecated. Instead use
`ListClusterUserCredentials
<https://docs.microsoft.com/en-us/rest/api/aks/managedclusters/listclusterusercredentials>`_ or
`ListClusterAdminCredentials
<https://docs.microsoft.com/en-us/rest/api/aks/managedclusters/listclusteradmincredentials>`_ .
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource.
:type resource_name: str
:param role_name: The name of the role for managed cluster accessProfile resource.
:type role_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ManagedClusterAccessProfile, or the result of cls(response)
:rtype: ~azure.mgmt.containerservice.v2020_06_01.models.ManagedClusterAccessProfile
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ManagedClusterAccessProfile"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-01"
accept = "application/json"
# Construct URL
url = self.get_access_profile.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=1),
'resourceName': self._serialize.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'),
'roleName': self._serialize.url("role_name", role_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ManagedClusterAccessProfile', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_access_profile.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/accessProfiles/{roleName}/listCredential'} # type: ignore
def list_cluster_admin_credentials(
self,
resource_group_name, # type: str
resource_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.CredentialResults"
"""Gets cluster admin credential of a managed cluster.
Gets cluster admin credential of the managed cluster with a specified resource group and name.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: CredentialResults, or the result of cls(response)
:rtype: ~azure.mgmt.containerservice.v2020_06_01.models.CredentialResults
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.CredentialResults"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-01"
accept = "application/json"
# Construct URL
url = self.list_cluster_admin_credentials.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=1),
'resourceName': self._serialize.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('CredentialResults', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list_cluster_admin_credentials.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/listClusterAdminCredential'} # type: ignore
def list_cluster_user_credentials(
self,
resource_group_name, # type: str
resource_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.CredentialResults"
"""Gets cluster user credential of a managed cluster.
Gets cluster user credential of the managed cluster with a specified resource group and name.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: CredentialResults, or the result of cls(response)
:rtype: ~azure.mgmt.containerservice.v2020_06_01.models.CredentialResults
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.CredentialResults"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-01"
accept = "application/json"
# Construct URL
url = self.list_cluster_user_credentials.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=1),
'resourceName': self._serialize.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('CredentialResults', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list_cluster_user_credentials.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/listClusterUserCredential'} # type: ignore
def list_cluster_monitoring_user_credentials(
self,
resource_group_name, # type: str
resource_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.CredentialResults"
"""Gets cluster monitoring user credential of a managed cluster.
Gets cluster monitoring user credential of the managed cluster with a specified resource group
and name.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: CredentialResults, or the result of cls(response)
:rtype: ~azure.mgmt.containerservice.v2020_06_01.models.CredentialResults
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.CredentialResults"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-01"
accept = "application/json"
# Construct URL
url = self.list_cluster_monitoring_user_credentials.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=1),
'resourceName': self._serialize.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('CredentialResults', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list_cluster_monitoring_user_credentials.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/listClusterMonitoringUserCredential'} # type: ignore
def get(
self,
resource_group_name, # type: str
resource_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.ManagedCluster"
"""Gets a managed cluster.
Gets the details of the managed cluster with a specified resource group and name.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ManagedCluster, or the result of cls(response)
:rtype: ~azure.mgmt.containerservice.v2020_06_01.models.ManagedCluster
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ManagedCluster"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=1),
'resourceName': self._serialize.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ManagedCluster', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}'} # type: ignore
def _create_or_update_initial(
self,
resource_group_name, # type: str
resource_name, # type: str
parameters, # type: "_models.ManagedCluster"
**kwargs # type: Any
):
# type: (...) -> "_models.ManagedCluster"
cls = kwargs.pop('cls', None) # type: ClsType["_models.ManagedCluster"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=1),
'resourceName': self._serialize.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'ManagedCluster')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('ManagedCluster', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('ManagedCluster', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
resource_name, # type: str
parameters, # type: "_models.ManagedCluster"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.ManagedCluster"]
"""Creates or updates a managed cluster.
Creates or updates a managed cluster with the specified configuration for agents and Kubernetes
version.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource.
:type resource_name: str
:param parameters: Parameters supplied to the Create or Update a Managed Cluster operation.
:type parameters: ~azure.mgmt.containerservice.v2020_06_01.models.ManagedCluster
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either ManagedCluster or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.containerservice.v2020_06_01.models.ManagedCluster]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ManagedCluster"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
resource_name=resource_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ManagedCluster', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=1),
'resourceName': self._serialize.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}'} # type: ignore
def _update_tags_initial(
self,
resource_group_name, # type: str
resource_name, # type: str
parameters, # type: "_models.TagsObject"
**kwargs # type: Any
):
# type: (...) -> "_models.ManagedCluster"
cls = kwargs.pop('cls', None) # type: ClsType["_models.ManagedCluster"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._update_tags_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=1),
'resourceName': self._serialize.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'TagsObject')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ManagedCluster', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_tags_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}'} # type: ignore
def begin_update_tags(
self,
resource_group_name, # type: str
resource_name, # type: str
parameters, # type: "_models.TagsObject"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.ManagedCluster"]
"""Updates tags on a managed cluster.
Updates a managed cluster with the specified tags.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource.
:type resource_name: str
:param parameters: Parameters supplied to the Update Managed Cluster Tags operation.
:type parameters: ~azure.mgmt.containerservice.v2020_06_01.models.TagsObject
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either ManagedCluster or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.containerservice.v2020_06_01.models.ManagedCluster]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ManagedCluster"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._update_tags_initial(
resource_group_name=resource_group_name,
resource_name=resource_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ManagedCluster', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=1),
'resourceName': self._serialize.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}'} # type: ignore
def _delete_initial(
self,
resource_group_name, # type: str
resource_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=1),
'resourceName': self._serialize.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
resource_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes a managed cluster.
Deletes the managed cluster with a specified resource group and name.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
resource_name=resource_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=1),
'resourceName': self._serialize.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}'} # type: ignore
def _reset_service_principal_profile_initial(
self,
resource_group_name, # type: str
resource_name, # type: str
parameters, # type: "_models.ManagedClusterServicePrincipalProfile"
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._reset_service_principal_profile_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=1),
'resourceName': self._serialize.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'ManagedClusterServicePrincipalProfile')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_reset_service_principal_profile_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/resetServicePrincipalProfile'} # type: ignore
def begin_reset_service_principal_profile(
self,
resource_group_name, # type: str
resource_name, # type: str
parameters, # type: "_models.ManagedClusterServicePrincipalProfile"
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Reset Service Principal Profile of a managed cluster.
Update the service principal Profile for a managed cluster.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource.
:type resource_name: str
:param parameters: Parameters supplied to the Reset Service Principal Profile operation for a
Managed Cluster.
:type parameters: ~azure.mgmt.containerservice.v2020_06_01.models.ManagedClusterServicePrincipalProfile
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._reset_service_principal_profile_initial(
resource_group_name=resource_group_name,
resource_name=resource_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=1),
'resourceName': self._serialize.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_reset_service_principal_profile.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/resetServicePrincipalProfile'} # type: ignore
def _reset_aad_profile_initial(
self,
resource_group_name, # type: str
resource_name, # type: str
parameters, # type: "_models.ManagedClusterAADProfile"
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._reset_aad_profile_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=1),
'resourceName': self._serialize.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'ManagedClusterAADProfile')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_reset_aad_profile_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/resetAADProfile'} # type: ignore
def begin_reset_aad_profile(
self,
resource_group_name, # type: str
resource_name, # type: str
parameters, # type: "_models.ManagedClusterAADProfile"
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Reset AAD Profile of a managed cluster.
Update the AAD Profile for a managed cluster.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource.
:type resource_name: str
:param parameters: Parameters supplied to the Reset AAD Profile operation for a Managed
Cluster.
:type parameters: ~azure.mgmt.containerservice.v2020_06_01.models.ManagedClusterAADProfile
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._reset_aad_profile_initial(
resource_group_name=resource_group_name,
resource_name=resource_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=1),
'resourceName': self._serialize.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_reset_aad_profile.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/resetAADProfile'} # type: ignore
def _rotate_cluster_certificates_initial(
self,
resource_group_name, # type: str
resource_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-01"
accept = "application/json"
# Construct URL
url = self._rotate_cluster_certificates_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=1),
'resourceName': self._serialize.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_rotate_cluster_certificates_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/rotateClusterCertificates'} # type: ignore
def begin_rotate_cluster_certificates(
self,
resource_group_name, # type: str
resource_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Rotate certificates of a managed cluster.
Rotate certificates of a managed cluster.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._rotate_cluster_certificates_initial(
resource_group_name=resource_group_name,
resource_name=resource_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=1),
'resourceName': self._serialize.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_rotate_cluster_certificates.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/rotateClusterCertificates'} # type: ignore
| 51.348962 | 253 | 0.668648 |
79597eb04ca7042eaa6ddb75bd8509ef679a0438 | 760 | py | Python | Prototipo avl/cnavas.py | 3lyalvarado/Metodos-basicos | e670fa9bfb85000f9eaba516643750d324f005f2 | [
"MIT"
] | null | null | null | Prototipo avl/cnavas.py | 3lyalvarado/Metodos-basicos | e670fa9bfb85000f9eaba516643750d324f005f2 | [
"MIT"
] | null | null | null | Prototipo avl/cnavas.py | 3lyalvarado/Metodos-basicos | e670fa9bfb85000f9eaba516643750d324f005f2 | [
"MIT"
] | null | null | null | import tkinter as tk
from PIL import Image
from tkinter import ttk
from graphviz import render
import time
class Aplicacion:
def __init__(self):
self.ventana1=tk.Tk()
self.canvas1=tk.Canvas(self.ventana1, width=700, height=500, background="black")
self.canvas1.grid(column=0, row=0)
nombre = "NombreArchivo"
render('dot', 'png', f'{nombre}.dot')
f = Image.open("NombreArchivo"+'.dot.png')
f'{nombre}.png'
time.sleep(3)
archi1=tk.PhotoImage(file=f"{NombreArchivo sin}"+".dot.png")
self.canvas1.create_image(0, 0, image=archi1, anchor="nw")
self.ventana1.mainloop()
aplicacion1=Aplicacion()
| 33.043478 | 95 | 0.584211 |
79597ef93fe2b0f2cffa17ad16b470b2eeccdca9 | 6,501 | py | Python | src/datasets/fingerprint.py | joeddav/datasets | f955fa2d4785a1cea381a7999e0c5d0c0314046b | [
"Apache-2.0"
] | 1 | 2020-09-11T14:27:41.000Z | 2020-09-11T14:27:41.000Z | src/nlp/fingerprint.py | vinayya/nlp | 115d65db3f68f92be79063a4333552eccfc0df68 | [
"Apache-2.0"
] | null | null | null | src/nlp/fingerprint.py | vinayya/nlp | 115d65db3f68f92be79063a4333552eccfc0df68 | [
"Apache-2.0"
] | null | null | null | import json
import os
from copy import deepcopy
from dataclasses import asdict
from functools import wraps
from typing import TYPE_CHECKING
import numpy as np
import pyarrow as pa
import xxhash
from .info import DatasetInfo
from .utils.py_utils import dumps
if TYPE_CHECKING:
from .arrow_dataset import Dataset
def hashregister(t):
def proxy(func):
Hasher.dispatch[t] = func
return func
return proxy
class Hasher:
"""Hasher that accepts python objets as inputs."""
dispatch = {}
def __init__(self):
self.m = xxhash.xxh64()
@classmethod
def hash_bytes(cls, value):
value = [value] if isinstance(value, bytes) else value
m = xxhash.xxh64()
for x in value:
m.update(x)
return m.hexdigest()
@classmethod
def hash_default(cls, value):
return cls.hash_bytes(dumps(value))
@classmethod
def hash(cls, value):
if type(value) in cls.dispatch:
return cls.dispatch[type(value)](cls, value)
else:
return cls.hash_default(value)
def update(self, value):
self.m.update(f"=={type(value)}==".encode("utf8"))
self.m.update(self.hash(value).encode("utf-8"))
def hexdigest(self):
return self.m.hexdigest()
# Register a new hasher can be useful for two possible reasons:
# 1 - optimize the hashing of large amount of data (e.g. pa.Table)
# 2 - take advantage of a custom serialization method (e.g. DatasetInfo)
@hashregister(pa.Table)
def _hash_pa_table(hasher, value):
def _hash_pa_array(value):
if isinstance(value, pa.ChunkedArray):
return hasher.hash_bytes(c.to_string() for c in value.chunks)
else:
return hasher.hash_bytes(value)
value = "-".join(col + "-" + _hash_pa_array(value[col]) for col in sorted(value.column_names))
return hasher.hash_bytes(value.encode("utf-8"))
@hashregister(DatasetInfo)
def _hash_dataset_info(hasher, value):
return hasher.hash_bytes(json.dumps(asdict(value), sort_keys=True).encode("utf-8"))
def generate_fingerprint(dataset):
state = dataset.__getstate__()
hasher = Hasher()
for key in sorted(state):
if key == "_fingerprint":
continue
hasher.update(key)
hasher.update(state[key])
# hash data files last modification timestamps as well
for data_file in state.get("_data_files", []) + state.get("_indices_data_files", []):
hasher.update(os.path.getmtime(data_file["filename"]))
return hasher.hexdigest()
def update_fingerprint(fingerprint, transform, transform_args):
hasher = Hasher()
hasher.update(fingerprint)
hasher.update(transform)
for key in sorted(transform_args):
hasher.update(key)
hasher.update(transform_args[key])
return hasher.hexdigest()
def fingerprint(inplace, use_kwargs=None, ignore_kwargs=None, fingerprint_names=None, randomized_function=None):
assert use_kwargs is None or isinstance(use_kwargs, list), "use_kwargs is supposed to be a list, not {}".format(
type(use_kwargs)
)
assert ignore_kwargs is None or isinstance(
ignore_kwargs, list
), "ignore_kwargs is supposed to be a list, not {}".format(type(use_kwargs))
assert not inplace or not fingerprint_names, "fingerprint_names are only used when inplace is False"
fingerprint_names = fingerprint_names if fingerprint_names is not None else ["new_fingerprint"]
def _fingerprint(func):
assert inplace or all(
name in func.__code__.co_varnames for name in fingerprint_names
), "function {} is missing parameters {} in signature".format(func, fingerprint_names)
if randomized_function: # randomized function have seed and generator parameters
assert "seed" in func.__code__.co_varnames, "'seed' must be in {}'s signature".format(func)
assert "generator" in func.__code__.co_varnames, "'generator' must be in {}'s signature".format(func)
@wraps(func)
def wrapper(*args, **kwargs):
if args:
self: "Dataset" = args[0]
args = args[1:]
else:
self: "Dataset" = kwargs.pop("self")
kwargs_for_fingerprint = dict(kwargs)
kwargs_for_fingerprint.update(zip(func.__code__.co_varnames, args))
# keep the right kwargs to be hashed to generate the fingerprint
if use_kwargs:
kwargs_for_fingerprint = {k: v for k, v in kwargs_for_fingerprint.items() if k in use_kwargs}
if ignore_kwargs:
kwargs_for_fingerprint = {k: v for k, v in kwargs_for_fingerprint.items() if k not in ignore_kwargs}
if randomized_function: # randomized functions have `seed` and `generator` parameters
if kwargs_for_fingerprint.get("seed") is None and kwargs_for_fingerprint.get("generator") is None:
kwargs_for_fingerprint["generator"] = np.random.default_rng(None)
# compute new_fingerprint and add it to the args of not in-place transforms
transform = func.__module__ + "." + func.__qualname__
if inplace:
new_fingerprint = update_fingerprint(self._fingerprint, transform, kwargs_for_fingerprint)
new_inplace_history_item = (func.__name__, deepcopy(args), deepcopy(kwargs))
else:
for fingerprint_name in fingerprint_names: # transforms like `train_test_split` have several hashes
if kwargs.get(fingerprint_name) is None:
kwargs_for_fingerprint["fingerprint_name"] = fingerprint_name
kwargs[fingerprint_name] = update_fingerprint(
self._fingerprint, transform, kwargs_for_fingerprint
)
# Call actual function
out = func(self, *args, **kwargs)
# Update fingerprint of in-place transforms + update in-place history of transforms
if inplace: # update after calling func so that the fingerprint doesn't change if the function fails
self._fingerprint = new_fingerprint
for inplace_hist_per_file in self._inplace_history:
inplace_hist_per_file["transforms"].append(new_inplace_history_item)
return out
wrapper._decorator_name_ = "fingerprint"
return wrapper
return _fingerprint
| 36.522472 | 116 | 0.65713 |
79597f10c1b833219fd1dd87ef480b903f4709da | 973 | py | Python | clamav.py | NVISOsecurity/assemblyline-service-clamav | 41772ff197cf61d296ef4e4ca6af0a9ca4789e1d | [
"MIT"
] | 1 | 2021-08-30T15:57:56.000Z | 2021-08-30T15:57:56.000Z | clamav.py | x1mus/assemblyline-service-clamav | a4fc43421f4cf9a704762d809122e7013eeceb44 | [
"MIT"
] | null | null | null | clamav.py | x1mus/assemblyline-service-clamav | a4fc43421f4cf9a704762d809122e7013eeceb44 | [
"MIT"
] | 1 | 2022-03-23T08:20:23.000Z | 2022-03-23T08:20:23.000Z | import subprocess
from assemblyline_v4_service.common.base import ServiceBase
from assemblyline_v4_service.common.result import Result, ResultSection
class ClamAV(ServiceBase):
def __init__(self, config=None):
super(ClamAV, self).__init__(config)
def start(self):
self.log.debug("ClamAV service started")
def stop(self):
self.log.debug("ClamAV service ended")
def execute(self, request):
result = Result()
file_path = request.file_path
p1 = subprocess.Popen("clamscan -a -z --detect-pua --alert-macros " + file_path, shell=True, stdout=subprocess.PIPE)
p1.wait()
stdout = p1.communicate()[0].decode("utf-8")
report = stdout.split("\n")
report = list(filter(None, report))
text_section = ResultSection("Successfully scanned the file")
if "FOUND" in report[0]:
text_section.set_heuristic(1)
for l in report:
text_section.add_line(l)
result.add_section(text_section)
request.result = result
| 27.027778 | 119 | 0.711202 |
79597f7164767826bbb53aee52c4b77fa0f4dc2d | 475 | py | Python | dots/adamiak.py | HakierGrzonzo/syncing | 28d1f0e809b7597c18ec075ac29e90d6b766b9f9 | [
"MIT"
] | null | null | null | dots/adamiak.py | HakierGrzonzo/syncing | 28d1f0e809b7597c18ec075ac29e90d6b766b9f9 | [
"MIT"
] | null | null | null | dots/adamiak.py | HakierGrzonzo/syncing | 28d1f0e809b7597c18ec075ac29e90d6b766b9f9 | [
"MIT"
] | null | null | null | import itertools
def f1(a, b, c):
a = 10
b += 5
c += a
return a, b, c
def f2(a, b, c):
b += c
a += b
return a, b, c
def f3(a, b, c):
c = b + 10
a *= 2
b += a
return a, b, c
threadies = [f1, f2, f3]
for possibility in itertools.permutations(threadies):
print(list([x.__qualname__ for x in possibility]))
a, b, c = 0, 0, 3
for f in possibility:
a, b, c = f(a, b, c)
print(a, b, c)
print(a + b + c)
| 17.592593 | 54 | 0.486316 |
79597fc4d86127788960e780ee2fcfc43d81c716 | 15,803 | py | Python | qmsk/backup/rsync.py | funidata/qmsk-backup | 47e991a195136b28767eae7d51f6cfd448441884 | [
"MIT"
] | 1 | 2021-07-08T20:56:00.000Z | 2021-07-08T20:56:00.000Z | qmsk/backup/rsync.py | funidata/qmsk-backup | 47e991a195136b28767eae7d51f6cfd448441884 | [
"MIT"
] | 1 | 2021-04-30T14:30:30.000Z | 2021-09-22T12:18:30.000Z | qmsk/backup/rsync.py | funidata/qmsk-backup | 47e991a195136b28767eae7d51f6cfd448441884 | [
"MIT"
] | 1 | 2019-12-19T08:45:21.000Z | 2019-12-19T08:45:21.000Z | """
rsync handling.
Apologies for the 'RSync' nomenclature
"""
import contextlib
import datetime
import logging
import os.path
import qmsk.backup.mount
import qmsk.invoke
import re
from qmsk.backup.lvm import LVM, LVMVolume, LVMSnapshot
from qmsk.backup import zfs
log = logging.getLogger('qmsk.backup.rsync')
RSYNC = '/usr/bin/rsync'
STATS_REGEX = re.compile(r'(.+?): ([0-9.]+)(?: (.+))?')
def parse_stats (stdout):
"""
Parse rsync --stats output.
Returns a { string: int/float } of values
>>> lines = '''
... Number of files: 2 (reg: 1, dir: 1)
... Number of created files: 0
... Number of deleted files: 0
... Number of regular files transferred: 0
... Total file size: 29 bytes
... Total transferred file size: 0 bytes
... Literal data: 0 bytes
... Matched data: 0 bytes
... File list size: 0
... File list generation time: 0.001 seconds
... File list transfer time: 0.000 seconds
... Total bytes sent: 65
... Total bytes received: 19
...
... sent 65 bytes received 19 bytes 168.00 bytes/sec
... total size is 29 speedup is 0.35
... '''.splitlines()
>>> for n, v in parse_stats(lines): print((n, v))
('Number of files', 2)
('Number of files: reg', 1)
('Number of files: dir', 1)
('Number of created files', 0)
('Number of deleted files', 0)
('Number of regular files transferred', 0)
('Total file size', 29)
('Total transferred file size', 0)
('Literal data', 0)
('Matched data', 0)
('File list size', 0)
('File list generation time', 0.001)
('File list transfer time', 0.0)
('Total bytes sent', 65)
('Total bytes received', 19)
"""
for line in stdout:
match = STATS_REGEX.match(line)
if not match:
continue
name = match.group(1)
value = match.group(2)
unit = match.group(3)
if '.' in value:
value = float(value)
else:
value = int(value)
yield name, value
if unit and unit.startswith('('):
for part in unit.strip('()').split(', '):
subname, value = part.split(': ')
yield name + ': ' + subname, int(value)
FORMAT_UNITS = [
(10**12, 'T'),
(10**9, 'G'),
(10**6, 'M'),
(10**3, 'K'),
]
def format_units(value):
for quant, unit in FORMAT_UNITS:
if value > quant:
return "{:3.2f}{:}".format(value / quant, unit)
return "{:3.2f} ".format(value)
def format_percentage(num, total):
if total > 0.0:
return "{:3.2f}".format(num / total * 100.0)
else:
return " "
def read_stats(row, *names):
for name in names:
if name in row:
return row[name]
def print_stats(rows):
"""
Output stats from iterable of (name, duration, stats).
"""
ROW = "{name:18} {time:10} | {files:>8} / {files_total:>8} = {files_pct:>6}% | {size:>8} / {size_total:>8} = {size_pct:>6}% | {send:>8} {recv:>8}"
print(ROW.format(
name = "NAME",
time = "TIME",
files = "FILES",
files_total = "TOTAL",
files_pct = "",
size = "SIZE",
size_total = "TOTAL",
size_pct = "",
send = "SEND",
recv = "RECV",
))
for name, duration, stats in rows:
files = read_stats(stats, "Number of regular files transferred", "Number of files transferred")
print(ROW.format(
name = name,
time = format_units(duration.total_seconds()),
files = format_units(files),
files_total = format_units(stats["Number of files"]),
files_pct = format_percentage(files, stats["Number of files"]),
size = format_units(stats["Total transferred file size"]),
size_total = format_units(stats["Total file size"]),
size_pct = format_percentage(stats["Total transferred file size"], stats["Total file size"]),
send = format_units(stats["Total bytes sent"]),
recv = format_units(stats["Total bytes received"]),
))
def rsync (options, paths, sudo=False):
"""
Run rsync, passing through stdout.
Raises qmsk.invoke.InvokeError
"""
log.info("rsync %s %s", ' '.join(options), ' '.join(paths))
stdout = qmsk.invoke.invoke(RSYNC, options + paths, sudo=sudo, stdout=True)
def rsync_stats (options, paths, sudo=False):
"""
Run rsync.
Returns a stats dict if there is any valid --stats output, None otherwise.
Raises qmsk.invoke.InvokeError
"""
log.info("rsync %s %s", ' '.join(options), ' '.join(paths))
stdout = qmsk.invoke.invoke(RSYNC, options + paths, sudo=sudo)
try:
stats = dict(parse_stats(stdout))
except ValueError as error:
log.exception("Invalid rsync --stats output: %s")
return None
else:
return stats
def rsync_server (options, paths, sudo=False):
"""
Run rsync in --server mode, passing through stdin/out.
Raises qmsk.invoke.InvokeError
"""
log.info("rsync-server %s %s", ' '.join(options), ' '.join(paths))
# invoke directly; no option-handling, nor stdin/out redirection
qmsk.invoke.invoke(RSYNC, options + paths, stdin=True, stdout=True, sudo=sudo)
class Error (Exception):
pass
class CommandError (Error):
"""
Invalid rsync command.
"""
pass
class SourceError (Error):
"""
Invalid rsync source
"""
pass
class Source (object):
"""
rsync source
"""
def __init__ (self, path, sudo=None):
self.path = path
self.sudo = sudo
@contextlib.contextmanager
def mount_snapshot (self):
"""
Return local filesystem path for rsync source.
"""
yield self.path
@contextlib.contextmanager
def mount_restore (self):
"""
Return local filesystem path for rsync dest.
"""
yield self.path
def rsync_server (self, options):
"""
Run to restore path in --server mode, passing through stdin/stdout.
"""
with self.mount_restore() as path:
return rsync_server(options, ['.', path], sudo=self.sudo)
def rsync_sender (self, options):
"""
Run from snapshot path in --server --sender mode, passing through stdin/stdout.
"""
with self.mount_snapshot() as path:
return rsync_server(options, ['.', path], sudo=self.sudo)
def rsync (self, options, dest):
"""
Run from snapshot to given destination, returning optional stats dict.
"""
with self.mount_snapshot() as path:
return rsync_stats(options, [path, dest], sudo=self.sudo)
def rsync_restore (self, options, dest):
"""
Run from given destination to restore path.
"""
with self.mount_restore() as path:
rsync(options, [dest, path], sudo=self.sudo)
def __str__ (self):
return self.path
class LVMSource(Source):
"""
Backup LVM LV by snapshotting + mounting it.
"""
def __init__ (self, vg, lv, path, sudo=None, lvm_opts={}):
"""
vg - str: LVM vg name
lv - str: LVM vg name
path - str: filesystem path within lvm volume; no leading /
sudo - use sudo for LVM operations
lvm_opts - options for LVM.snapshot
"""
self.path = path.lstrip('/')
self.sudo = sudo
self.lvm = LVM(vg, sudo=sudo)
self.lvm_volume = self.lvm.volume(lv)
self.lvm_opts = lvm_opts
@contextlib.contextmanager
def mount_snapshot (self):
"""
Mount LVM snapshot of volume
"""
# snapshot
log.info("Creating LVM snapshot: %s", self.lvm_volume)
with self.lvm.snapshot(self.lvm_volume,
tag = 'backup',
**self.lvm_opts
) as snapshot:
# mount
log.info("Mounting LVM snapshot: %s", snapshot)
with qmsk.backup.mount.mount(snapshot.dev_path,
name_hint = 'lvm_' + snapshot.name + '_',
readonly = True,
sudo = self.sudo,
) as mountpoint:
yield mountpoint.path + '/' + self.path
@contextlib.contextmanager
def mount_restore (self):
"""
Return local filesystem path for rsync dest.
"""
dev = self.lvm_volume.dev
try:
device, mount, fstype = qmsk.backup.mount.find_dev(dev)
except FileNotFoundError:
raise SourceError("LVM {lvm} is not mounted for restore".format(lvm=self.lvm_volume))
else:
yield mount.rstrip('/') + '/' + self.path
def __str__ (self):
return 'lvm:{volume}'.format(volume=self.lvm_volume)
class ZFSSource(Source):
"""
Backup ZFS by snapshotting + mounting it.
"""
def __init__ (self, zfs, path='/', **opts):
"""
zfs - qmsk.backup.zfs.ZFS
path - str: filesystem path within lvm volume; no leading /
"""
super().__init__(path.lstrip('/'), **opts)
self.zfs = zfs
def snapshot(self):
"""
With ZFS snapshot.
"""
log.info("Creating ZFS snapshot: %s", self.zfs)
return qmsk.backup.zfs.snapshot(self.zfs, properties={
'qmsk-backup:source': self.path,
})
@contextlib.contextmanager
def mount_snapshot (self):
"""
Mount ZFS snapshot of volume.
"""
with self.snapshot() as snapshot:
# mount
log.info("Mounting ZFS snapshot: %s", snapshot)
with qmsk.backup.mount.mount(str(snapshot),
fstype = 'zfs',
name_hint = 'zfs_' + str(self.zfs).replace('/', '_') + '_',
readonly = True,
sudo = self.sudo,
) as mountpoint:
yield mountpoint.path + '/' + self.path
@contextlib.contextmanager
def mount_restore (self):
"""
Return local filesystem path for rsync dest.
"""
raise SourceError("No restore support for zfs sources")
def __str__ (self):
return 'zfs:{zfs}'.format(zfs=self.zfs)
def parse_command (command):
"""
Parse rsync server command into bits.
command: - list(argv) including 'rsync' command and options/arguments
Returns:
cmd: rsync argv[0]
options: list of --options and -opts
paths: list of path arguments
Raises:
CommandError
>>> import shlex
>>> parse_command(shlex.split('rsync --server --sender -ax . lvm:asdf:test'))
('rsync', ['--server', '--sender', '-ax'], ['.', 'lvm:asdf:test'])
"""
cmd = None
options = []
paths = []
# parse
for part in command:
if cmd is None:
cmd = part
elif part.startswith('--'):
options.append(part)
elif part.startswith('-'):
# XXX: parse out individual flags..?
options.append(part)
else:
paths.append(part)
return cmd, options, paths
def parse_server_command(command):
"""
Parse rsync's internal --server command used when remoting over SSH.
Returns:
options: list of --options and -opts from parse_options
path: source path if sender, dest path if server
sender: True if sender, False if server
Raises:
CommandError
"""
cmd, options, args = parse_command(command)
if cmd.split('/')[-1] != 'rsync':
raise CommandError("Invalid command: {cmd}".format(cmd=cmd))
if not '--server' in options:
raise CommandError("Missing --server")
if len(args) != 2:
raise CommandError("Invalid source/destination paths")
if args[0] != '.':
raise CommandError("Invalid source-path for server")
# parse real source
path = args[1]
if '--sender' in options:
sender = True
else :
sender = False
# ok
return options, path, sender
def parse_source (path, restrict_paths=None, allow_remote=True, sudo=None, lvm_opts={}):
"""
Parse an LVM source path, supporting custom extensions for LVM support.
restrict_paths - raise CommandError if source path is not under any of the given sources.
allow_remote - allow remote sources?
lvm_opts - **opts for LVMSource
"""
if not path:
raise SourceError("No path given")
endslash = path.endswith('/')
# normalize
path = os.path.normpath(path)
if endslash and not path.endswith('/'):
# add it back in
# happens for 'foo:/' and such
path += '/'
# verify path
if restrict_paths:
for restrict_path in restrict_paths:
if path.startswith(restrict_path):
# ok
break
else:
# fail
raise SourceError("Restricted path")
if path.startswith('/'):
log.debug("filesystem: %s", path)
return Source(path,
sudo = sudo,
)
elif path.startswith('lvm:'):
_, path = path.split(':', 1)
# LVM VG
try:
if ':' in path:
vg, path = path.split(':', 1)
log.warn("old 'lvm:%s:%s' syntax; use 'lvm:%s/%s'", vg, path)
elif '/' in path:
vg, path = path.split('/', 1)
else:
raise ValueError("Invalid vg/lv separator")
except ValueError as error:
raise SourceError("Invalid lvm pseudo-path: {error}".format(error=error))
# LVM LV, and path within LV
if '/' in path:
lv, path = path.split('/', 1)
else:
lv = path
path = ''
# lookup
log.debug("LVM: %s/%s/%s", vg, lv, path)
# open
return LVMSource(vg, lv, path,
sudo = sudo,
lvm_opts = lvm_opts,
)
elif path.startswith('zfs:'):
_, path = path.split(':', 1)
if path.startswith('/'):
device, mount, fstype, name = qmsk.backup.mount.find(path)
log.debug("%s: mount=%s fstype=%s device=%s name=%s", path, mount, fstype, device, name)
if fstype != 'zfs':
raise SourceError("Not a ZFS mount %s: mount=%s fstype=%s", path, mount, device)
else:
device = path
name = ''
# lookup
log.debug("ZFS %s: %s / %s", path, device, name)
# open
return ZFSSource(qmsk.backup.zfs.open(device, invoker=qmsk.invoke.Invoker(sudo=sudo)),
path = name,
sudo = sudo,
)
elif ':' in path: # remote host
if not allow_remote:
raise SourceError("Invalid remote path")
# remote host
log.debug("remote: %s", path)
return Source(path,
sudo = sudo,
)
else:
# invalid
raise SourceError("Unknown path format")
| 27.246552 | 150 | 0.530153 |
795980c95f91b7fe1804cd9ed8dedf307b02d651 | 419 | py | Python | iris_sdk/models/maps/telephone_number.py | NumberAI/python-bandwidth-iris | 0e05f79d68b244812afb97e00fd65b3f46d00aa3 | [
"MIT"
] | 2 | 2020-04-13T13:47:59.000Z | 2022-02-23T20:32:41.000Z | iris_sdk/models/maps/telephone_number.py | bandwidthcom/python-bandwidth-iris | dbcb30569631395041b92917252d913166f7d3c9 | [
"MIT"
] | 5 | 2020-09-18T20:59:24.000Z | 2021-08-25T16:51:42.000Z | iris_sdk/models/maps/telephone_number.py | bandwidthcom/python-bandwidth-iris | dbcb30569631395041b92917252d913166f7d3c9 | [
"MIT"
] | 5 | 2018-12-12T14:39:50.000Z | 2020-11-17T21:42:29.000Z | #!/usr/bin/env python
from iris_sdk.models.maps.base_map import BaseMap
class TelephoneNumberMap(BaseMap):
account_id = None
city = None
full_number = None
last_modified = None
lata = None
order_create_date = None
order_id = None
order_type = None
rate_center = None
site_id = None
state = None
status = None
tier = None
vendor_id = None
vendor_name = None | 19.952381 | 49 | 0.663484 |
79598101ee4277d807f667fbbe8fed85a34be2e3 | 2,783 | py | Python | examples/0234-provider.py | dnoneill/pyIIIFpres | 600b8ff323677b7851c2f420f06ac5bf00d4ca8c | [
"MIT"
] | 12 | 2021-02-23T07:49:02.000Z | 2021-12-28T09:37:39.000Z | examples/0234-provider.py | dnoneill/pyIIIFpres | 600b8ff323677b7851c2f420f06ac5bf00d4ca8c | [
"MIT"
] | 11 | 2021-06-03T06:24:10.000Z | 2022-03-29T18:30:47.000Z | examples/0234-provider.py | dnoneill/pyIIIFpres | 600b8ff323677b7851c2f420f06ac5bf00d4ca8c | [
"MIT"
] | 1 | 2021-09-02T19:21:12.000Z | 2021-09-02T19:21:12.000Z | # https://iiif.io/api/cookbook/recipe/0234-provider/
from IIIFpres import iiifpapi3
iiifpapi3.BASE_URL = r"https://iiif.io/api/cookbook/recipe/0234-provider/"
manifest = iiifpapi3.Manifest()
manifest.set_id(extendbase_url="manifest.json")
manifest.add_label("en","Playbill Cover")
manifest.add_summary("en","Cover of playbill for \"Akiba gongen kaisen-banashi,\" \"Futatsu chōchō kuruwa nikki\" and \"Godairiki koi no fūjime\" performed at the Chikugo Theater in Osaka from the fifth month of Kaei 2 (May, 1849); main actors: Gadō Kataoka II, Ebizō Ichikawa VI, Kitō Sawamura II, Daigorō Mimasu IV, and Karoku Nakamura I; on front cover: producer Mominosuke Ichikawa's crest.")
prov = manifest.add_provider()
prov.set_id("https://id.loc.gov/authorities/n79055331")
prov.set_type()
prov.add_label(language='en',text="UCLA Library")
homp = prov.add_homepage()
homp.set_id("https://digital.library.ucla.edu/")
homp.set_type("Text")
homp.add_label("en","UCLA Library Digital Collections")
homp.set_format("text/html")
homp.set_language("en")
logo = prov.add_logo()
logo.set_id("https://iiif.library.ucla.edu/iiif/2/UCLA-Library-Logo-double-line-2/full/full/0/default.png")
serv = logo.add_service()
serv.set_id("https://iiif.library.ucla.edu/iiif/2/UCLA-Library-Logo-double-line-2")
serv.set_type("ImageService3")
serv.set_profile("level2")
serv.set_width(1200)
serv.set_height(502)
serv.add_size(width=300,height=126)
serv.add_size(600,251)
serv.add_size(1200,502)
seAls = prov.add_seeAlso()
seAls.set_id("https://id.loc.gov/authorities/names/n79055331.madsxml.xml")
seAls.set_type("Dataset")
seAls.add_label("en","US Library of Congress data about the UCLA Library")
seAls.set_format("application/xml")
seAls.set_profile("http://www.loc.gov/mads/v2")
canvas = manifest.add_canvas_to_items()
canvas.set_id(extendbase_url="canvas/p0")
canvas.add_label("en","front cover with color bar")
canvas.set_height(5312)
canvas.set_width(4520)
annopage = canvas.add_annotationpage_to_items()
annopage.set_id(extendbase_url="page/p0/1")
annotation = annopage.add_annotation_to_items(target=canvas.id)
annotation.set_motivation("painting")
annotation.set_id(extendbase_url="annotation/p0000-image")
annotation.body.set_height(5312)
annotation.body.set_width(4520)
annotation.body.set_id("https://iiif.io/api/image/3.0/example/reference/4f92cceb12dd53b52433425ce44308c7-ucla_bib1987273_no001_rs_001_full/full/max/0/default.jpg")
annotation.body.set_format("image/jpeg")
annotation.body.set_type("Image")
srv = annotation.body.add_service()
srv.set_id("https://iiif.io/api/image/3.0/example/reference/4f92cceb12dd53b52433425ce44308c7-ucla_bib1987273_no001_rs_001_full")
srv.set_type("ImageService3")
srv.set_profile("level1")
if __name__ == "__main__":
manifest.json_save("0234-provider.json") | 47.982759 | 396 | 0.789077 |
79598445dc4ecd42e6165e49aade26242df36fd8 | 405 | py | Python | halici/wsgi.py | matua34/villageMarket | bdd183300f748afcd72ce3f87e3891bd2ed823a4 | [
"BSL-1.0"
] | null | null | null | halici/wsgi.py | matua34/villageMarket | bdd183300f748afcd72ce3f87e3891bd2ed823a4 | [
"BSL-1.0"
] | null | null | null | halici/wsgi.py | matua34/villageMarket | bdd183300f748afcd72ce3f87e3891bd2ed823a4 | [
"BSL-1.0"
] | null | null | null | """
WSGI config for halici project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'halici.settings')
application = get_wsgi_application()
| 23.823529 | 79 | 0.753086 |
795984dfe94cdb4d6b146694c60021d457c964e3 | 775 | py | Python | db_repository/versions/002_migration.py | Happyxianyueveryday/vjcs-web | 3b33ee6a6a0f24f770ec45dc4552ee1cd21b4f28 | [
"MIT"
] | 3 | 2019-04-22T14:12:21.000Z | 2020-12-09T05:23:16.000Z | db_repository/versions/002_migration.py | Happyxianyueveryday/vjcs-web | 3b33ee6a6a0f24f770ec45dc4552ee1cd21b4f28 | [
"MIT"
] | null | null | null | db_repository/versions/002_migration.py | Happyxianyueveryday/vjcs-web | 3b33ee6a6a0f24f770ec45dc4552ee1cd21b4f28 | [
"MIT"
] | 1 | 2020-12-09T05:24:03.000Z | 2020-12-09T05:24:03.000Z | from sqlalchemy import *
from migrate import *
from migrate.changeset import schema
pre_meta = MetaData()
post_meta = MetaData()
post = Table('post', post_meta,
Column('id', Integer, primary_key=True, nullable=False),
Column('body', String(length=160)),
Column('timestamp', DateTime),
Column('user_id', Integer),
)
def upgrade(migrate_engine):
# Upgrade operations go here. Don't create your own engine; bind
# migrate_engine to your metadata
pre_meta.bind = migrate_engine
post_meta.bind = migrate_engine
post_meta.tables['post'].create()
def downgrade(migrate_engine):
# Operations to reverse the above upgrade go here.
pre_meta.bind = migrate_engine
post_meta.bind = migrate_engine
post_meta.tables['post'].drop()
| 26.724138 | 68 | 0.71871 |
7959852140671d8fdfa4dd6a2381e802a36cb352 | 4,082 | py | Python | CVLab9/orangeball.py | ReneeDress/19Sp-CVLab | ed7474f80c11a8755b69c0f0a3bbbb285ff8781d | [
"WTFPL"
] | null | null | null | CVLab9/orangeball.py | ReneeDress/19Sp-CVLab | ed7474f80c11a8755b69c0f0a3bbbb285ff8781d | [
"WTFPL"
] | null | null | null | CVLab9/orangeball.py | ReneeDress/19Sp-CVLab | ed7474f80c11a8755b69c0f0a3bbbb285ff8781d | [
"WTFPL"
] | null | null | null | # USAGE
# python ball_tracking.py --video ball_tracking_example.mp4
# python ball_tracking.py
# import the necessary packages
from collections import deque
from imutils.video import VideoStream
import numpy as np
import argparse
import cv2
import imutils
import time
# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-v", "--video",
help="path to the (optional) video file")
ap.add_argument("-b", "--buffer", type=int, default=64,
help="max buffer size")
args = vars(ap.parse_args())
# define the lower and upper boundaries of the "green"
# ball in the HSV color space, then initialize the
# list of tracked points
orangeLower = (5, 155, 155)
orangeUpper = (23, 255, 230)
pts = deque(maxlen=args["buffer"])
# if a video path was not supplied, grab the reference
# to the webcam
if not args.get("video", False):
vs = VideoStream(src=0).start()
# otherwise, grab a reference to the video file
else:
vs = cv2.VideoCapture(args["video"])
fps = vs.get(cv2.CAP_PROP_FPS)
size = (int(vs.get(cv2.CAP_PROP_FRAME_WIDTH)), int(vs.get(cv2.CAP_PROP_FRAME_HEIGHT)))
size = (600, int(vs.get(cv2.CAP_PROP_FRAME_HEIGHT)/vs.get(cv2.CAP_PROP_FRAME_WIDTH)*600))
print(size)
# set the video writer
fourcc = cv2.VideoWriter_fourcc('M', 'P', '4', '2')
out = cv2.VideoWriter('orange_ball_result.avi', fourcc, fps, size)
# allow the camera or video file to warm up
time.sleep(2.0)
# keep looping
while True:
# grab the current frame
frame = vs.read()
# handle the frame from VideoCapture or VideoStream
frame = frame[1] if args.get("video", False) else frame
# if we are viewing a video and we did not grab a frame,
# then we have reached the end of the video
if frame is None:
break
# resize the frame, blur it, and convert it to the HSV
# color space
frame = imutils.resize(frame, width=600)
blurred = cv2.GaussianBlur(frame, (11, 11), 0)
hsv = cv2.cvtColor(blurred, cv2.COLOR_BGR2HSV)
# construct a mask for the color "green", then perform
# a series of dilations and erosions to remove any small
# blobs left in the mask
mask = cv2.inRange(hsv, orangeLower, orangeUpper)
mask = cv2.erode(mask, None, iterations=2)
mask = cv2.dilate(mask, None, iterations=2)
# find contours in the mask and initialize the current
# (x, y) center of the ball
cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
center = None
# only proceed if at least one contour was found
if len(cnts) > 0:
# find the largest contour in the mask, then use
# it to compute the minimum enclosing circle and
# centroid
c = max(cnts, key=cv2.contourArea)
((x, y), radius) = cv2.minEnclosingCircle(c)
M = cv2.moments(c)
center = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"]))
# only proceed if the radius meets a minimum size
if radius > 10:
# draw the circle and centroid on the frame,
# then update the list of tracked points
cv2.circle(frame, (int(x), int(y)), int(radius),
(0, 255, 255), 2)
cv2.circle(frame, center, 5, (0, 0, 255), -1)
# update the points queue
pts.appendleft(center)
# loop over the set of tracked points
for i in range(1, len(pts)):
# if either of the tracked points are None, ignore
# them
if pts[i - 1] is None or pts[i] is None:
continue
# otherwise, compute the thickness of the line and
# draw the connecting lines
thickness = int(np.sqrt(args["buffer"] / float(i + 1)) * 2.5)
cv2.line(frame, pts[i - 1], pts[i], (0, 0, 255), thickness)
# show the frame to our screen
cv2.imshow("Frame", frame)
# print(frame.shape)
# frame = imutils.resize(frame, width=size[1])
# print(frame.shape)
key = cv2.waitKey(1) & 0xFF
# save the frame
out.write(frame)
# if the 'q' key is pressed, stop the loop
if key == ord("q"):
break
# if we are not using a video file, stop the camera video stream
if not args.get("video", False):
vs.stop()
# otherwise, release the camera
else:
vs.release()
# release the video writer
out.release()
# close all windows
cv2.destroyAllWindows() | 28.950355 | 90 | 0.703577 |
795985def1fca990b97834950d289c11912b0e57 | 6,458 | py | Python | robosuite/wrappers/data_collection_wrapper.py | junjungoal/robosuite | 14a9a8672bb14145dd4586a0c0080e1d0d3ff74e | [
"MIT"
] | null | null | null | robosuite/wrappers/data_collection_wrapper.py | junjungoal/robosuite | 14a9a8672bb14145dd4586a0c0080e1d0d3ff74e | [
"MIT"
] | null | null | null | robosuite/wrappers/data_collection_wrapper.py | junjungoal/robosuite | 14a9a8672bb14145dd4586a0c0080e1d0d3ff74e | [
"MIT"
] | null | null | null | """
This file implements a wrapper for saving simulation states to disk.
This data collection wrapper is useful for collecting demonstrations.
"""
import os
import time
import numpy as np
from robosuite.wrappers import Wrapper
from robosuite.utils.mjcf_utils import save_sim_model
class DataCollectionWrapper(Wrapper):
def __init__(self, env, directory, collect_freq=1, flush_freq=100):
"""
Initializes the data collection wrapper.
Args:
env (MujocoEnv): The environment to monitor.
directory (str): Where to store collected data.
collect_freq (int): How often to save simulation state, in terms of environment steps.
flush_freq (int): How frequently to dump data to disk, in terms of environment steps.
"""
super().__init__(env)
# the base directory for all logging
self.directory = directory
# in-memory cache for simulation states and action info
self.states = []
self.observations = []
self.action_infos = [] # stores information about actions taken
self.images = []
# how often to save simulation state, in terms of environment steps
self.collect_freq = collect_freq
# how frequently to dump data to disk, in terms of environment steps
self.flush_freq = flush_freq
if not os.path.exists(directory):
print("DataCollectionWrapper: making new directory at {}".format(directory))
os.makedirs(directory)
# store logging directory for current episode
self.ep_directory = None
# remember whether any environment interaction has occurred
self.has_interaction = False
# some variables for remembering the current episode's initial state and model xml
self._current_task_instance_state = None
self._current_task_instance_xml = None
def _start_new_episode(self):
"""
Bookkeeping to do at the start of each new episode.
"""
# flush any data left over from the previous episode if any interactions have happened
if self.has_interaction:
self._flush()
# timesteps in current episode
self.t = 0
self.has_interaction = False
# save the task instance (will be saved on the first env interaction)
self._current_task_instance_xml = self.env.sim.model.get_xml()
self._current_task_instance_state = np.array(self.env.sim.get_state().flatten())
# trick for ensuring that we can play MuJoCo demonstrations back
# deterministically by using the recorded actions open loop
self.env.reset_from_xml_string(self._current_task_instance_xml)
self.env.sim.reset()
self.env.sim.set_state_from_flattened(self._current_task_instance_state)
self.env.sim.forward()
def _on_first_interaction(self):
"""
Bookkeeping for first timestep of episode.
This function is necessary to make sure that logging only happens after the first
step call to the simulation, instead of on the reset (people tend to call
reset more than is necessary in code).
Raises:
AssertionError: [Episode path already exists]
"""
self.has_interaction = True
# create a directory with a timestamp
t1, t2 = str(time.time()).split(".")
self.ep_directory = os.path.join(self.directory, "ep_{}_{}".format(t1, t2))
assert not os.path.exists(self.ep_directory)
print("DataCollectionWrapper: making folder at {}".format(self.ep_directory))
os.makedirs(self.ep_directory)
# save the model xml
xml_path = os.path.join(self.ep_directory, "model.xml")
with open(xml_path, "w") as f:
f.write(self._current_task_instance_xml)
# save initial state and action
assert len(self.states) == 0
self.states.append(self._current_task_instance_state)
def _flush(self):
"""
Method to flush internal state to disk.
"""
t1, t2 = str(time.time()).split(".")
state_path = os.path.join(self.ep_directory, "state_{}_{}.npz".format(t1, t2))
if hasattr(self.env, "unwrapped"):
env_name = self.env.unwrapped.__class__.__name__
else:
env_name = self.env.__class__.__name__
np.savez(
state_path,
states=np.array(self.states),
action_infos=self.action_infos,
env=env_name,
obs=self.observations,
)
self.states = []
self.images = []
self.action_infos = []
self.observations = []
def reset(self):
"""
Extends vanilla reset() function call to accommodate data collection
Returns:
OrderedDict: Environment observation space after reset occurs
"""
ret = super().reset()
self._start_new_episode()
self.observations.append(ret)
return ret
def step(self, action):
"""
Extends vanilla step() function call to accommodate data collection
Args:
action (np.array): Action to take in environment
Returns:
4-tuple:
- (OrderedDict) observations from the environment
- (float) reward from the environment
- (bool) whether the current episode is completed or not
- (dict) misc information
"""
ret = super().step(action)
self.t += 1
# on the first time step, make directories for logging
if not self.has_interaction:
self._on_first_interaction()
# collect the current simulation state if necessary
if self.t % self.collect_freq == 0:
state = self.env.sim.get_state().flatten()
self.states.append(state)
self.observations.append(ret[0])
self.images.append(self.env.render())
info = {}
info["actions"] = np.array(action)
self.action_infos.append(info)
# flush collected data to disk if necessary
if self.t % self.flush_freq == 0:
self._flush()
return ret
def close(self):
"""
Override close method in order to flush left over data
"""
if self.has_interaction:
self._flush()
self.env.close()
| 33.989474 | 98 | 0.623877 |
795986127cde7348198435975cf2a6671335fa3c | 90,227 | py | Python | pandas/core/arrays/categorical.py | gbaychev/pandas | 04893a954b91574279c402e8730a4b5fae2ae9e1 | [
"BSD-3-Clause"
] | 1 | 2020-01-06T06:50:30.000Z | 2020-01-06T06:50:30.000Z | pandas/core/arrays/categorical.py | gbaychev/pandas | 04893a954b91574279c402e8730a4b5fae2ae9e1 | [
"BSD-3-Clause"
] | null | null | null | pandas/core/arrays/categorical.py | gbaychev/pandas | 04893a954b91574279c402e8730a4b5fae2ae9e1 | [
"BSD-3-Clause"
] | null | null | null | import operator
from shutil import get_terminal_size
import textwrap
from typing import Type, Union, cast
from warnings import warn
import numpy as np
from pandas._config import get_option
from pandas._libs import algos as libalgos, hashtable as htable, lib
from pandas.compat.numpy import function as nv
from pandas.util._decorators import (
Appender,
Substitution,
cache_readonly,
deprecate_kwarg,
)
from pandas.util._validators import validate_bool_kwarg, validate_fillna_kwargs
from pandas.core.dtypes.cast import coerce_indexer_dtype, maybe_infer_to_datetimelike
from pandas.core.dtypes.common import (
ensure_int64,
ensure_object,
ensure_platform_int,
is_categorical_dtype,
is_datetime64_dtype,
is_datetimelike,
is_dict_like,
is_dtype_equal,
is_extension_array_dtype,
is_float_dtype,
is_integer_dtype,
is_iterator,
is_list_like,
is_object_dtype,
is_scalar,
is_sequence,
is_timedelta64_dtype,
)
from pandas.core.dtypes.dtypes import CategoricalDtype
from pandas.core.dtypes.generic import ABCDataFrame, ABCIndexClass, ABCSeries
from pandas.core.dtypes.inference import is_hashable
from pandas.core.dtypes.missing import isna, notna
from pandas._typing import ArrayLike, Dtype, Ordered
from pandas.core import ops
from pandas.core.accessor import PandasDelegate, delegate_names
import pandas.core.algorithms as algorithms
from pandas.core.algorithms import (
_get_data_algo,
_hashtables,
factorize,
take,
take_1d,
unique1d,
)
from pandas.core.base import NoNewAttributesMixin, PandasObject, _shared_docs
import pandas.core.common as com
from pandas.core.construction import array, extract_array, sanitize_array
from pandas.core.missing import interpolate_2d
from pandas.core.sorting import nargsort
from pandas.io.formats import console
from .base import ExtensionArray, _extension_array_shared_docs
_take_msg = textwrap.dedent(
"""\
Interpreting negative values in 'indexer' as missing values.
In the future, this will change to meaning positional indices
from the right.
Use 'allow_fill=True' to retain the previous behavior and silence this
warning.
Use 'allow_fill=False' to accept the new behavior."""
)
def _cat_compare_op(op):
opname = "__{op}__".format(op=op.__name__)
def f(self, other):
# On python2, you can usually compare any type to any type, and
# Categoricals can be seen as a custom type, but having different
# results depending whether categories are the same or not is kind of
# insane, so be a bit stricter here and use the python3 idea of
# comparing only things of equal type.
if isinstance(other, (ABCDataFrame, ABCSeries, ABCIndexClass)):
return NotImplemented
other = lib.item_from_zerodim(other)
if is_list_like(other) and len(other) != len(self):
# TODO: Could this fail if the categories are listlike objects?
raise ValueError("Lengths must match.")
if not self.ordered:
if opname in ["__lt__", "__gt__", "__le__", "__ge__"]:
raise TypeError(
"Unordered Categoricals can only compare equality or not"
)
if isinstance(other, Categorical):
# Two Categoricals can only be be compared if the categories are
# the same (maybe up to ordering, depending on ordered)
msg = "Categoricals can only be compared if 'categories' are the same."
if len(self.categories) != len(other.categories):
raise TypeError(msg + " Categories are different lengths")
elif self.ordered and not (self.categories == other.categories).all():
raise TypeError(msg)
elif not set(self.categories) == set(other.categories):
raise TypeError(msg)
if not (self.ordered == other.ordered):
raise TypeError(
"Categoricals can only be compared if 'ordered' is the same"
)
if not self.ordered and not self.categories.equals(other.categories):
# both unordered and different order
other_codes = _get_codes_for_values(other, self.categories)
else:
other_codes = other._codes
mask = (self._codes == -1) | (other_codes == -1)
f = getattr(self._codes, opname)
ret = f(other_codes)
if mask.any():
# In other series, the leads to False, so do that here too
ret[mask] = False
return ret
if is_scalar(other):
if other in self.categories:
i = self.categories.get_loc(other)
ret = getattr(self._codes, opname)(i)
# check for NaN in self
mask = self._codes == -1
ret[mask] = False
return ret
else:
if opname == "__eq__":
return np.repeat(False, len(self))
elif opname == "__ne__":
return np.repeat(True, len(self))
else:
msg = (
"Cannot compare a Categorical for op {op} with a "
"scalar, which is not a category."
)
raise TypeError(msg.format(op=opname))
else:
# allow categorical vs object dtype array comparisons for equality
# these are only positional comparisons
if opname in ["__eq__", "__ne__"]:
return getattr(np.array(self), opname)(np.array(other))
msg = (
"Cannot compare a Categorical for op {op} with type {typ}."
"\nIf you want to compare values, use 'np.asarray(cat) "
"<op> other'."
)
raise TypeError(msg.format(op=opname, typ=type(other)))
f.__name__ = opname
return f
def contains(cat, key, container):
"""
Helper for membership check for ``key`` in ``cat``.
This is a helper method for :method:`__contains__`
and :class:`CategoricalIndex.__contains__`.
Returns True if ``key`` is in ``cat.categories`` and the
location of ``key`` in ``categories`` is in ``container``.
Parameters
----------
cat : :class:`Categorical`or :class:`categoricalIndex`
key : a hashable object
The key to check membership for.
container : Container (e.g. list-like or mapping)
The container to check for membership in.
Returns
-------
is_in : bool
True if ``key`` is in ``self.categories`` and location of
``key`` in ``categories`` is in ``container``, else False.
Notes
-----
This method does not check for NaN values. Do that separately
before calling this method.
"""
hash(key)
# get location of key in categories.
# If a KeyError, the key isn't in categories, so logically
# can't be in container either.
try:
loc = cat.categories.get_loc(key)
except (KeyError, TypeError):
return False
# loc is the location of key in categories, but also the *value*
# for key in container. So, `key` may be in categories,
# but still not in `container`. Example ('b' in categories,
# but not in values):
# 'b' in Categorical(['a'], categories=['a', 'b']) # False
if is_scalar(loc):
return loc in container
else:
# if categories is an IntervalIndex, loc is an array.
return any(loc_ in container for loc_ in loc)
_codes_doc = """
The category codes of this categorical.
Level codes are an array if integer which are the positions of the real
values in the categories array.
There is not setter, use the other categorical methods and the normal item
setter to change values in the categorical.
"""
class Categorical(ExtensionArray, PandasObject):
"""
Represent a categorical variable in classic R / S-plus fashion.
`Categoricals` can only take on only a limited, and usually fixed, number
of possible values (`categories`). In contrast to statistical categorical
variables, a `Categorical` might have an order, but numerical operations
(additions, divisions, ...) are not possible.
All values of the `Categorical` are either in `categories` or `np.nan`.
Assigning values outside of `categories` will raise a `ValueError`. Order
is defined by the order of the `categories`, not lexical order of the
values.
Parameters
----------
values : list-like
The values of the categorical. If categories are given, values not in
categories will be replaced with NaN.
categories : Index-like (unique), optional
The unique categories for this categorical. If not given, the
categories are assumed to be the unique values of `values` (sorted, if
possible, otherwise in the order in which they appear).
ordered : bool, default False
Whether or not this categorical is treated as a ordered categorical.
If True, the resulting categorical will be ordered.
An ordered categorical respects, when sorted, the order of its
`categories` attribute (which in turn is the `categories` argument, if
provided).
dtype : CategoricalDtype
An instance of ``CategoricalDtype`` to use for this categorical
.. versionadded:: 0.21.0
Attributes
----------
categories : Index
The categories of this categorical
codes : ndarray
The codes (integer positions, which point to the categories) of this
categorical, read only.
ordered : bool
Whether or not this Categorical is ordered.
dtype : CategoricalDtype
The instance of ``CategoricalDtype`` storing the ``categories``
and ``ordered``.
.. versionadded:: 0.21.0
Methods
-------
from_codes
__array__
Raises
------
ValueError
If the categories do not validate.
TypeError
If an explicit ``ordered=True`` is given but no `categories` and the
`values` are not sortable.
See Also
--------
CategoricalDtype : Type for categorical data.
CategoricalIndex : An Index with an underlying ``Categorical``.
Notes
-----
See the `user guide
<http://pandas.pydata.org/pandas-docs/stable/user_guide/categorical.html>`_
for more.
Examples
--------
>>> pd.Categorical([1, 2, 3, 1, 2, 3])
[1, 2, 3, 1, 2, 3]
Categories (3, int64): [1, 2, 3]
>>> pd.Categorical(['a', 'b', 'c', 'a', 'b', 'c'])
[a, b, c, a, b, c]
Categories (3, object): [a, b, c]
Ordered `Categoricals` can be sorted according to the custom order
of the categories and can have a min and max value.
>>> c = pd.Categorical(['a', 'b', 'c', 'a', 'b', 'c'], ordered=True,
... categories=['c', 'b', 'a'])
>>> c
[a, b, c, a, b, c]
Categories (3, object): [c < b < a]
>>> c.min()
'c'
"""
# For comparisons, so that numpy uses our implementation if the compare
# ops, which raise
__array_priority__ = 1000
_dtype = CategoricalDtype(ordered=False)
# tolist is not actually deprecated, just suppressed in the __dir__
_deprecations = PandasObject._deprecations | frozenset(
["tolist", "itemsize", "get_values"]
)
_typ = "categorical"
def __init__(
self, values, categories=None, ordered=None, dtype=None, fastpath=False
):
dtype = CategoricalDtype._from_values_or_dtype(
values, categories, ordered, dtype
)
# At this point, dtype is always a CategoricalDtype, but
# we may have dtype.categories be None, and we need to
# infer categories in a factorization step futher below
if fastpath:
self._codes = coerce_indexer_dtype(values, dtype.categories)
self._dtype = self._dtype.update_dtype(dtype)
return
# null_mask indicates missing values we want to exclude from inference.
# This means: only missing values in list-likes (not arrays/ndframes).
null_mask = np.array(False)
# sanitize input
if is_categorical_dtype(values):
if dtype.categories is None:
dtype = CategoricalDtype(values.categories, dtype._ordered)
elif not isinstance(values, (ABCIndexClass, ABCSeries)):
# sanitize_array coerces np.nan to a string under certain versions
# of numpy
values = maybe_infer_to_datetimelike(values, convert_dates=True)
if not isinstance(values, np.ndarray):
values = _convert_to_list_like(values)
# By convention, empty lists result in object dtype:
if len(values) == 0:
sanitize_dtype = "object"
else:
sanitize_dtype = None
null_mask = isna(values)
if null_mask.any():
values = [values[idx] for idx in np.where(~null_mask)[0]]
values = sanitize_array(values, None, dtype=sanitize_dtype)
if dtype.categories is None:
try:
codes, categories = factorize(values, sort=True)
except TypeError:
codes, categories = factorize(values, sort=False)
if dtype._ordered:
# raise, as we don't have a sortable data structure and so
# the user should give us one by specifying categories
raise TypeError(
"'values' is not ordered, please "
"explicitly specify the categories order "
"by passing in a categories argument."
)
except ValueError:
# FIXME
raise NotImplementedError(
"> 1 ndim Categorical are not supported at this time"
)
# we're inferring from values
dtype = CategoricalDtype(categories, dtype._ordered)
elif is_categorical_dtype(values):
old_codes = (
values._values.codes if isinstance(values, ABCSeries) else values.codes
)
codes = _recode_for_categories(
old_codes, values.dtype.categories, dtype.categories
)
else:
codes = _get_codes_for_values(values, dtype.categories)
if null_mask.any():
# Reinsert -1 placeholders for previously removed missing values
full_codes = -np.ones(null_mask.shape, dtype=codes.dtype)
full_codes[~null_mask] = codes
codes = full_codes
self._dtype = self._dtype.update_dtype(dtype)
self._codes = coerce_indexer_dtype(codes, dtype.categories)
@property
def categories(self):
"""
The categories of this categorical.
Setting assigns new values to each category (effectively a rename of
each individual category).
The assigned value has to be a list-like object. All items must be
unique and the number of items in the new categories must be the same
as the number of items in the old categories.
Assigning to `categories` is a inplace operation!
Raises
------
ValueError
If the new categories do not validate as categories or if the
number of new categories is unequal the number of old categories
See Also
--------
rename_categories
reorder_categories
add_categories
remove_categories
remove_unused_categories
set_categories
"""
return self.dtype.categories
@categories.setter
def categories(self, categories):
new_dtype = CategoricalDtype(categories, ordered=self.ordered)
if self.dtype.categories is not None and len(self.dtype.categories) != len(
new_dtype.categories
):
raise ValueError(
"new categories need to have the same number of "
"items as the old categories!"
)
self._dtype = new_dtype
@property
def ordered(self) -> Ordered:
"""
Whether the categories have an ordered relationship.
"""
return self.dtype._ordered
@property
def dtype(self) -> CategoricalDtype:
"""
The :class:`~pandas.api.types.CategoricalDtype` for this instance.
"""
return self._dtype
@property
def _ndarray_values(self) -> np.ndarray:
return self.codes
@property
def _constructor(self) -> Type["Categorical"]:
return Categorical
@classmethod
def _from_sequence(cls, scalars, dtype=None, copy=False):
return Categorical(scalars, dtype=dtype)
def _formatter(self, boxed=False):
# Defer to CategoricalFormatter's formatter.
return None
def copy(self) -> "Categorical":
"""
Copy constructor.
"""
return self._constructor(
values=self._codes.copy(), dtype=self.dtype, fastpath=True
)
def astype(self, dtype: Dtype, copy: bool = True) -> ArrayLike:
"""
Coerce this type to another dtype
Parameters
----------
dtype : numpy dtype or pandas type
copy : bool, default True
By default, astype always returns a newly allocated object.
If copy is set to False and dtype is categorical, the original
object is returned.
"""
if is_categorical_dtype(dtype):
dtype = cast(Union[str, CategoricalDtype], dtype)
# GH 10696/18593
dtype = self.dtype.update_dtype(dtype)
self = self.copy() if copy else self
if dtype == self.dtype:
return self
return self._set_dtype(dtype)
if is_extension_array_dtype(dtype):
return array(self, dtype=dtype, copy=copy) # type: ignore # GH 28770
if is_integer_dtype(dtype) and self.isna().any():
msg = "Cannot convert float NaN to integer"
raise ValueError(msg)
return np.array(self, dtype=dtype, copy=copy)
@cache_readonly
def size(self) -> int:
"""
return the len of myself
"""
return self._codes.size
@cache_readonly
def itemsize(self) -> int:
"""
return the size of a single category
"""
return self.categories.itemsize
def tolist(self) -> list:
"""
Return a list of the values.
These are each a scalar type, which is a Python scalar
(for str, int, float) or a pandas scalar
(for Timestamp/Timedelta/Interval/Period)
"""
return list(self)
to_list = tolist
@property
def base(self) -> None:
"""
compat, we are always our own object
"""
return None
@classmethod
def _from_inferred_categories(
cls, inferred_categories, inferred_codes, dtype, true_values=None
):
"""
Construct a Categorical from inferred values.
For inferred categories (`dtype` is None) the categories are sorted.
For explicit `dtype`, the `inferred_categories` are cast to the
appropriate type.
Parameters
----------
inferred_categories : Index
inferred_codes : Index
dtype : CategoricalDtype or 'category'
true_values : list, optional
If none are provided, the default ones are
"True", "TRUE", and "true."
Returns
-------
Categorical
"""
from pandas import Index, to_numeric, to_datetime, to_timedelta
cats = Index(inferred_categories)
known_categories = (
isinstance(dtype, CategoricalDtype) and dtype.categories is not None
)
if known_categories:
# Convert to a specialized type with `dtype` if specified.
if dtype.categories.is_numeric():
cats = to_numeric(inferred_categories, errors="coerce")
elif is_datetime64_dtype(dtype.categories):
cats = to_datetime(inferred_categories, errors="coerce")
elif is_timedelta64_dtype(dtype.categories):
cats = to_timedelta(inferred_categories, errors="coerce")
elif dtype.categories.is_boolean():
if true_values is None:
true_values = ["True", "TRUE", "true"]
cats = cats.isin(true_values)
if known_categories:
# Recode from observation order to dtype.categories order.
categories = dtype.categories
codes = _recode_for_categories(inferred_codes, cats, categories)
elif not cats.is_monotonic_increasing:
# Sort categories and recode for unknown categories.
unsorted = cats.copy()
categories = cats.sort_values()
codes = _recode_for_categories(inferred_codes, unsorted, categories)
dtype = CategoricalDtype(categories, ordered=False)
else:
dtype = CategoricalDtype(cats, ordered=False)
codes = inferred_codes
return cls(codes, dtype=dtype, fastpath=True)
@classmethod
def from_codes(cls, codes, categories=None, ordered=None, dtype=None):
"""
Make a Categorical type from codes and categories or dtype.
This constructor is useful if you already have codes and
categories/dtype and so do not need the (computation intensive)
factorization step, which is usually done on the constructor.
If your data does not follow this convention, please use the normal
constructor.
Parameters
----------
codes : array-like of int
An integer array, where each integer points to a category in
categories or dtype.categories, or else is -1 for NaN.
categories : index-like, optional
The categories for the categorical. Items need to be unique.
If the categories are not given here, then they must be provided
in `dtype`.
ordered : bool, optional
Whether or not this categorical is treated as an ordered
categorical. If not given here or in `dtype`, the resulting
categorical will be unordered.
dtype : CategoricalDtype or "category", optional
If :class:`CategoricalDtype`, cannot be used together with
`categories` or `ordered`.
.. versionadded:: 0.24.0
When `dtype` is provided, neither `categories` nor `ordered`
should be provided.
Returns
-------
Categorical
Examples
--------
>>> dtype = pd.CategoricalDtype(['a', 'b'], ordered=True)
>>> pd.Categorical.from_codes(codes=[0, 1, 0, 1], dtype=dtype)
[a, b, a, b]
Categories (2, object): [a < b]
"""
dtype = CategoricalDtype._from_values_or_dtype(
categories=categories, ordered=ordered, dtype=dtype
)
if dtype.categories is None:
msg = (
"The categories must be provided in 'categories' or "
"'dtype'. Both were None."
)
raise ValueError(msg)
codes = np.asarray(codes) # #21767
if not is_integer_dtype(codes):
msg = "codes need to be array-like integers"
if is_float_dtype(codes):
icodes = codes.astype("i8")
if (icodes == codes).all():
msg = None
codes = icodes
warn(
(
"float codes will be disallowed in the future and "
"raise a ValueError"
),
FutureWarning,
stacklevel=2,
)
if msg:
raise ValueError(msg)
if len(codes) and (codes.max() >= len(dtype.categories) or codes.min() < -1):
raise ValueError("codes need to be between -1 and len(categories)-1")
return cls(codes, dtype=dtype, fastpath=True)
def _get_codes(self):
"""
Get the codes.
Returns
-------
codes : integer array view
A non writable view of the `codes` array.
"""
v = self._codes.view()
v.flags.writeable = False
return v
def _set_codes(self, codes):
"""
Not settable by the user directly
"""
raise ValueError("cannot set Categorical codes directly")
codes = property(fget=_get_codes, fset=_set_codes, doc=_codes_doc)
def _set_categories(self, categories, fastpath=False):
"""
Sets new categories inplace
Parameters
----------
fastpath : bool, default False
Don't perform validation of the categories for uniqueness or nulls
Examples
--------
>>> c = pd.Categorical(['a', 'b'])
>>> c
[a, b]
Categories (2, object): [a, b]
>>> c._set_categories(pd.Index(['a', 'c']))
>>> c
[a, c]
Categories (2, object): [a, c]
"""
if fastpath:
new_dtype = CategoricalDtype._from_fastpath(categories, self.ordered)
else:
new_dtype = CategoricalDtype(categories, ordered=self.ordered)
if (
not fastpath
and self.dtype.categories is not None
and len(new_dtype.categories) != len(self.dtype.categories)
):
raise ValueError(
"new categories need to have the same number of "
"items than the old categories!"
)
self._dtype = new_dtype
def _set_dtype(self, dtype: CategoricalDtype) -> "Categorical":
"""
Internal method for directly updating the CategoricalDtype
Parameters
----------
dtype : CategoricalDtype
Notes
-----
We don't do any validation here. It's assumed that the dtype is
a (valid) instance of `CategoricalDtype`.
"""
codes = _recode_for_categories(self.codes, self.categories, dtype.categories)
return type(self)(codes, dtype=dtype, fastpath=True)
def set_ordered(self, value, inplace=False):
"""
Set the ordered attribute to the boolean value.
Parameters
----------
value : bool
Set whether this categorical is ordered (True) or not (False).
inplace : bool, default False
Whether or not to set the ordered attribute in-place or return
a copy of this categorical with ordered set to the value.
"""
inplace = validate_bool_kwarg(inplace, "inplace")
new_dtype = CategoricalDtype(self.categories, ordered=value)
cat = self if inplace else self.copy()
cat._dtype = new_dtype
if not inplace:
return cat
def as_ordered(self, inplace=False):
"""
Set the Categorical to be ordered.
Parameters
----------
inplace : bool, default False
Whether or not to set the ordered attribute in-place or return
a copy of this categorical with ordered set to True.
Returns
-------
Categorical
Ordered Categorical.
"""
inplace = validate_bool_kwarg(inplace, "inplace")
return self.set_ordered(True, inplace=inplace)
def as_unordered(self, inplace=False):
"""
Set the Categorical to be unordered.
Parameters
----------
inplace : bool, default False
Whether or not to set the ordered attribute in-place or return
a copy of this categorical with ordered set to False.
Returns
-------
Categorical
Unordered Categorical.
"""
inplace = validate_bool_kwarg(inplace, "inplace")
return self.set_ordered(False, inplace=inplace)
def set_categories(self, new_categories, ordered=None, rename=False, inplace=False):
"""
Set the categories to the specified new_categories.
`new_categories` can include new categories (which will result in
unused categories) or remove old categories (which results in values
set to NaN). If `rename==True`, the categories will simple be renamed
(less or more items than in old categories will result in values set to
NaN or in unused categories respectively).
This method can be used to perform more than one action of adding,
removing, and reordering simultaneously and is therefore faster than
performing the individual steps via the more specialised methods.
On the other hand this methods does not do checks (e.g., whether the
old categories are included in the new categories on a reorder), which
can result in surprising changes, for example when using special string
dtypes on python3, which does not considers a S1 string equal to a
single char python string.
Parameters
----------
new_categories : Index-like
The categories in new order.
ordered : bool, default False
Whether or not the categorical is treated as a ordered categorical.
If not given, do not change the ordered information.
rename : bool, default False
Whether or not the new_categories should be considered as a rename
of the old categories or as reordered categories.
inplace : bool, default False
Whether or not to reorder the categories in-place or return a copy
of this categorical with reordered categories.
Returns
-------
Categorical with reordered categories or None if inplace.
Raises
------
ValueError
If new_categories does not validate as categories
See Also
--------
rename_categories
reorder_categories
add_categories
remove_categories
remove_unused_categories
"""
inplace = validate_bool_kwarg(inplace, "inplace")
if ordered is None:
ordered = self.dtype._ordered
new_dtype = CategoricalDtype(new_categories, ordered=ordered)
cat = self if inplace else self.copy()
if rename:
if cat.dtype.categories is not None and len(new_dtype.categories) < len(
cat.dtype.categories
):
# remove all _codes which are larger and set to -1/NaN
cat._codes[cat._codes >= len(new_dtype.categories)] = -1
else:
codes = _recode_for_categories(
cat.codes, cat.categories, new_dtype.categories
)
cat._codes = codes
cat._dtype = new_dtype
if not inplace:
return cat
def rename_categories(self, new_categories, inplace=False):
"""
Rename categories.
Parameters
----------
new_categories : list-like, dict-like or callable
New categories which will replace old categories.
* list-like: all items must be unique and the number of items in
the new categories must match the existing number of categories.
* dict-like: specifies a mapping from
old categories to new. Categories not contained in the mapping
are passed through and extra categories in the mapping are
ignored.
.. versionadded:: 0.21.0.
* callable : a callable that is called on all items in the old
categories and whose return values comprise the new categories.
.. versionadded:: 0.23.0.
inplace : bool, default False
Whether or not to rename the categories inplace or return a copy of
this categorical with renamed categories.
Returns
-------
cat : Categorical or None
With ``inplace=False``, the new categorical is returned.
With ``inplace=True``, there is no return value.
Raises
------
ValueError
If new categories are list-like and do not have the same number of
items than the current categories or do not validate as categories
See Also
--------
reorder_categories
add_categories
remove_categories
remove_unused_categories
set_categories
Examples
--------
>>> c = pd.Categorical(['a', 'a', 'b'])
>>> c.rename_categories([0, 1])
[0, 0, 1]
Categories (2, int64): [0, 1]
For dict-like ``new_categories``, extra keys are ignored and
categories not in the dictionary are passed through
>>> c.rename_categories({'a': 'A', 'c': 'C'})
[A, A, b]
Categories (2, object): [A, b]
You may also provide a callable to create the new categories
>>> c.rename_categories(lambda x: x.upper())
[A, A, B]
Categories (2, object): [A, B]
"""
inplace = validate_bool_kwarg(inplace, "inplace")
cat = self if inplace else self.copy()
if is_dict_like(new_categories):
cat.categories = [new_categories.get(item, item) for item in cat.categories]
elif callable(new_categories):
cat.categories = [new_categories(item) for item in cat.categories]
else:
cat.categories = new_categories
if not inplace:
return cat
def reorder_categories(self, new_categories, ordered=None, inplace=False):
"""
Reorder categories as specified in new_categories.
`new_categories` need to include all old categories and no new category
items.
Parameters
----------
new_categories : Index-like
The categories in new order.
ordered : bool, optional
Whether or not the categorical is treated as a ordered categorical.
If not given, do not change the ordered information.
inplace : bool, default False
Whether or not to reorder the categories inplace or return a copy of
this categorical with reordered categories.
Returns
-------
cat : Categorical with reordered categories or None if inplace.
Raises
------
ValueError
If the new categories do not contain all old category items or any
new ones
See Also
--------
rename_categories
add_categories
remove_categories
remove_unused_categories
set_categories
"""
inplace = validate_bool_kwarg(inplace, "inplace")
if set(self.dtype.categories) != set(new_categories):
raise ValueError(
"items in new_categories are not the same as in old categories"
)
return self.set_categories(new_categories, ordered=ordered, inplace=inplace)
def add_categories(self, new_categories, inplace=False):
"""
Add new categories.
`new_categories` will be included at the last/highest place in the
categories and will be unused directly after this call.
Parameters
----------
new_categories : category or list-like of category
The new categories to be included.
inplace : bool, default False
Whether or not to add the categories inplace or return a copy of
this categorical with added categories.
Returns
-------
cat : Categorical with new categories added or None if inplace.
Raises
------
ValueError
If the new categories include old categories or do not validate as
categories
See Also
--------
rename_categories
reorder_categories
remove_categories
remove_unused_categories
set_categories
"""
inplace = validate_bool_kwarg(inplace, "inplace")
if not is_list_like(new_categories):
new_categories = [new_categories]
already_included = set(new_categories) & set(self.dtype.categories)
if len(already_included) != 0:
msg = (
"new categories must not include old categories: "
"{already_included!s}"
)
raise ValueError(msg.format(already_included=already_included))
new_categories = list(self.dtype.categories) + list(new_categories)
new_dtype = CategoricalDtype(new_categories, self.ordered)
cat = self if inplace else self.copy()
cat._dtype = new_dtype
cat._codes = coerce_indexer_dtype(cat._codes, new_dtype.categories)
if not inplace:
return cat
def remove_categories(self, removals, inplace=False):
"""
Remove the specified categories.
`removals` must be included in the old categories. Values which were in
the removed categories will be set to NaN
Parameters
----------
removals : category or list of categories
The categories which should be removed.
inplace : bool, default False
Whether or not to remove the categories inplace or return a copy of
this categorical with removed categories.
Returns
-------
cat : Categorical with removed categories or None if inplace.
Raises
------
ValueError
If the removals are not contained in the categories
See Also
--------
rename_categories
reorder_categories
add_categories
remove_unused_categories
set_categories
"""
inplace = validate_bool_kwarg(inplace, "inplace")
if not is_list_like(removals):
removals = [removals]
removal_set = set(list(removals))
not_included = removal_set - set(self.dtype.categories)
new_categories = [c for c in self.dtype.categories if c not in removal_set]
# GH 10156
if any(isna(removals)):
not_included = [x for x in not_included if notna(x)]
new_categories = [x for x in new_categories if notna(x)]
if len(not_included) != 0:
msg = "removals must all be in old categories: {not_included!s}"
raise ValueError(msg.format(not_included=not_included))
return self.set_categories(
new_categories, ordered=self.ordered, rename=False, inplace=inplace
)
def remove_unused_categories(self, inplace=False):
"""
Remove categories which are not used.
Parameters
----------
inplace : bool, default False
Whether or not to drop unused categories inplace or return a copy of
this categorical with unused categories dropped.
Returns
-------
cat : Categorical with unused categories dropped or None if inplace.
See Also
--------
rename_categories
reorder_categories
add_categories
remove_categories
set_categories
"""
inplace = validate_bool_kwarg(inplace, "inplace")
cat = self if inplace else self.copy()
idx, inv = np.unique(cat._codes, return_inverse=True)
if idx.size != 0 and idx[0] == -1: # na sentinel
idx, inv = idx[1:], inv - 1
new_categories = cat.dtype.categories.take(idx)
new_dtype = CategoricalDtype._from_fastpath(
new_categories, ordered=self.ordered
)
cat._dtype = new_dtype
cat._codes = coerce_indexer_dtype(inv, new_dtype.categories)
if not inplace:
return cat
def map(self, mapper):
"""
Map categories using input correspondence (dict, Series, or function).
Maps the categories to new categories. If the mapping correspondence is
one-to-one the result is a :class:`~pandas.Categorical` which has the
same order property as the original, otherwise a :class:`~pandas.Index`
is returned. NaN values are unaffected.
If a `dict` or :class:`~pandas.Series` is used any unmapped category is
mapped to `NaN`. Note that if this happens an :class:`~pandas.Index`
will be returned.
Parameters
----------
mapper : function, dict, or Series
Mapping correspondence.
Returns
-------
pandas.Categorical or pandas.Index
Mapped categorical.
See Also
--------
CategoricalIndex.map : Apply a mapping correspondence on a
:class:`~pandas.CategoricalIndex`.
Index.map : Apply a mapping correspondence on an
:class:`~pandas.Index`.
Series.map : Apply a mapping correspondence on a
:class:`~pandas.Series`.
Series.apply : Apply more complex functions on a
:class:`~pandas.Series`.
Examples
--------
>>> cat = pd.Categorical(['a', 'b', 'c'])
>>> cat
[a, b, c]
Categories (3, object): [a, b, c]
>>> cat.map(lambda x: x.upper())
[A, B, C]
Categories (3, object): [A, B, C]
>>> cat.map({'a': 'first', 'b': 'second', 'c': 'third'})
[first, second, third]
Categories (3, object): [first, second, third]
If the mapping is one-to-one the ordering of the categories is
preserved:
>>> cat = pd.Categorical(['a', 'b', 'c'], ordered=True)
>>> cat
[a, b, c]
Categories (3, object): [a < b < c]
>>> cat.map({'a': 3, 'b': 2, 'c': 1})
[3, 2, 1]
Categories (3, int64): [3 < 2 < 1]
If the mapping is not one-to-one an :class:`~pandas.Index` is returned:
>>> cat.map({'a': 'first', 'b': 'second', 'c': 'first'})
Index(['first', 'second', 'first'], dtype='object')
If a `dict` is used, all unmapped categories are mapped to `NaN` and
the result is an :class:`~pandas.Index`:
>>> cat.map({'a': 'first', 'b': 'second'})
Index(['first', 'second', nan], dtype='object')
"""
new_categories = self.categories.map(mapper)
try:
return self.from_codes(
self._codes.copy(), categories=new_categories, ordered=self.ordered
)
except ValueError:
# NA values are represented in self._codes with -1
# np.take causes NA values to take final element in new_categories
if np.any(self._codes == -1):
new_categories = new_categories.insert(len(new_categories), np.nan)
return np.take(new_categories, self._codes)
__eq__ = _cat_compare_op(operator.eq)
__ne__ = _cat_compare_op(operator.ne)
__lt__ = _cat_compare_op(operator.lt)
__gt__ = _cat_compare_op(operator.gt)
__le__ = _cat_compare_op(operator.le)
__ge__ = _cat_compare_op(operator.ge)
# for Series/ndarray like compat
@property
def shape(self):
"""
Shape of the Categorical.
For internal compatibility with numpy arrays.
Returns
-------
shape : tuple
"""
return tuple([len(self._codes)])
def shift(self, periods, fill_value=None):
"""
Shift Categorical by desired number of periods.
Parameters
----------
periods : int
Number of periods to move, can be positive or negative
fill_value : object, optional
The scalar value to use for newly introduced missing values.
.. versionadded:: 0.24.0
Returns
-------
shifted : Categorical
"""
# since categoricals always have ndim == 1, an axis parameter
# doesn't make any sense here.
codes = self.codes
if codes.ndim > 1:
raise NotImplementedError("Categorical with ndim > 1.")
if np.prod(codes.shape) and (periods != 0):
codes = np.roll(codes, ensure_platform_int(periods), axis=0)
if isna(fill_value):
fill_value = -1
elif fill_value in self.categories:
fill_value = self.categories.get_loc(fill_value)
else:
raise ValueError(
"'fill_value={}' is not present "
"in this Categorical's "
"categories".format(fill_value)
)
if periods > 0:
codes[:periods] = fill_value
else:
codes[periods:] = fill_value
return self.from_codes(codes, dtype=self.dtype)
def __array__(self, dtype=None):
"""
The numpy array interface.
Returns
-------
numpy.array
A numpy array of either the specified dtype or,
if dtype==None (default), the same dtype as
categorical.categories.dtype.
"""
ret = take_1d(self.categories.values, self._codes)
if dtype and not is_dtype_equal(dtype, self.categories.dtype):
return np.asarray(ret, dtype)
if is_extension_array_dtype(ret):
# When we're a Categorical[ExtensionArray], like Interval,
# we need to ensure __array__ get's all the way to an
# ndarray.
ret = np.asarray(ret)
return ret
def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
# for binary ops, use our custom dunder methods
result = ops.maybe_dispatch_ufunc_to_dunder_op(
self, ufunc, method, *inputs, **kwargs
)
if result is not NotImplemented:
return result
# for all other cases, raise for now (similarly as what happens in
# Series.__array_prepare__)
raise TypeError(
"Object with dtype {dtype} cannot perform "
"the numpy op {op}".format(dtype=self.dtype, op=ufunc.__name__)
)
def __setstate__(self, state):
"""Necessary for making this object picklable"""
if not isinstance(state, dict):
raise Exception("invalid pickle state")
# compat with pre 0.21.0 CategoricalDtype change
if "_dtype" not in state:
state["_dtype"] = CategoricalDtype(state["_categories"], state["_ordered"])
for k, v in state.items():
setattr(self, k, v)
@property
def T(self):
"""
Return transposed numpy array.
"""
return self
@property
def nbytes(self):
return self._codes.nbytes + self.dtype.categories.values.nbytes
def memory_usage(self, deep=False):
"""
Memory usage of my values
Parameters
----------
deep : bool
Introspect the data deeply, interrogate
`object` dtypes for system-level memory consumption
Returns
-------
bytes used
Notes
-----
Memory usage does not include memory consumed by elements that
are not components of the array if deep=False
See Also
--------
numpy.ndarray.nbytes
"""
return self._codes.nbytes + self.dtype.categories.memory_usage(deep=deep)
@Substitution(klass="Categorical")
@Appender(_shared_docs["searchsorted"])
def searchsorted(self, value, side="left", sorter=None):
# searchsorted is very performance sensitive. By converting codes
# to same dtype as self.codes, we get much faster performance.
if is_scalar(value):
codes = self.categories.get_loc(value)
codes = self.codes.dtype.type(codes)
else:
locs = [self.categories.get_loc(x) for x in value]
codes = np.array(locs, dtype=self.codes.dtype)
return self.codes.searchsorted(codes, side=side, sorter=sorter)
def isna(self):
"""
Detect missing values
Missing values (-1 in .codes) are detected.
Returns
-------
a boolean array of whether my values are null
See Also
--------
isna : Top-level isna.
isnull : Alias of isna.
Categorical.notna : Boolean inverse of Categorical.isna.
"""
ret = self._codes == -1
return ret
isnull = isna
def notna(self):
"""
Inverse of isna
Both missing values (-1 in .codes) and NA as a category are detected as
null.
Returns
-------
a boolean array of whether my values are not null
See Also
--------
notna : Top-level notna.
notnull : Alias of notna.
Categorical.isna : Boolean inverse of Categorical.notna.
"""
return ~self.isna()
notnull = notna
def put(self, *args, **kwargs):
"""
Replace specific elements in the Categorical with given values.
"""
raise NotImplementedError(("'put' is not yet implemented for Categorical"))
def dropna(self):
"""
Return the Categorical without null values.
Missing values (-1 in .codes) are detected.
Returns
-------
valid : Categorical
"""
result = self[self.notna()]
return result
def value_counts(self, dropna=True):
"""
Return a Series containing counts of each category.
Every category will have an entry, even those with a count of 0.
Parameters
----------
dropna : bool, default True
Don't include counts of NaN.
Returns
-------
counts : Series
See Also
--------
Series.value_counts
"""
from pandas import Series, CategoricalIndex
code, cat = self._codes, self.categories
ncat, mask = len(cat), 0 <= code
ix, clean = np.arange(ncat), mask.all()
if dropna or clean:
obs = code if clean else code[mask]
count = np.bincount(obs, minlength=ncat or 0)
else:
count = np.bincount(np.where(mask, code, ncat))
ix = np.append(ix, -1)
ix = self._constructor(ix, dtype=self.dtype, fastpath=True)
return Series(count, index=CategoricalIndex(ix), dtype="int64")
def get_values(self):
"""
Return the values.
.. deprecated:: 0.25.0
For internal compatibility with pandas formatting.
Returns
-------
numpy.array
A numpy array of the same dtype as categorical.categories.dtype or
Index if datetime / periods.
"""
warn(
"The 'get_values' method is deprecated and will be removed in a "
"future version",
FutureWarning,
stacklevel=2,
)
return self._internal_get_values()
def _internal_get_values(self):
# if we are a datetime and period index, return Index to keep metadata
if is_datetimelike(self.categories):
return self.categories.take(self._codes, fill_value=np.nan)
elif is_integer_dtype(self.categories) and -1 in self._codes:
return self.categories.astype("object").take(self._codes, fill_value=np.nan)
return np.array(self)
def check_for_ordered(self, op):
""" assert that we are ordered """
if not self.ordered:
raise TypeError(
"Categorical is not ordered for operation {op}\n"
"you can use .as_ordered() to change the "
"Categorical to an ordered one\n".format(op=op)
)
def _values_for_argsort(self):
return self._codes.copy()
def argsort(self, ascending=True, kind="quicksort", *args, **kwargs):
"""
Return the indices that would sort the Categorical.
.. versionchanged:: 0.25.0
Changed to sort missing values at the end.
Parameters
----------
ascending : bool, default True
Whether the indices should result in an ascending
or descending sort.
kind : {'quicksort', 'mergesort', 'heapsort'}, optional
Sorting algorithm.
*args, **kwargs:
passed through to :func:`numpy.argsort`.
Returns
-------
numpy.array
See Also
--------
numpy.ndarray.argsort
Notes
-----
While an ordering is applied to the category values, arg-sorting
in this context refers more to organizing and grouping together
based on matching category values. Thus, this function can be
called on an unordered Categorical instance unlike the functions
'Categorical.min' and 'Categorical.max'.
Examples
--------
>>> pd.Categorical(['b', 'b', 'a', 'c']).argsort()
array([2, 0, 1, 3])
>>> cat = pd.Categorical(['b', 'b', 'a', 'c'],
... categories=['c', 'b', 'a'],
... ordered=True)
>>> cat.argsort()
array([3, 0, 1, 2])
Missing values are placed at the end
>>> cat = pd.Categorical([2, None, 1])
>>> cat.argsort()
array([2, 0, 1])
"""
return super().argsort(ascending=ascending, kind=kind, *args, **kwargs)
def sort_values(self, inplace=False, ascending=True, na_position="last"):
"""
Sort the Categorical by category value returning a new
Categorical by default.
While an ordering is applied to the category values, sorting in this
context refers more to organizing and grouping together based on
matching category values. Thus, this function can be called on an
unordered Categorical instance unlike the functions 'Categorical.min'
and 'Categorical.max'.
Parameters
----------
inplace : bool, default False
Do operation in place.
ascending : bool, default True
Order ascending. Passing False orders descending. The
ordering parameter provides the method by which the
category values are organized.
na_position : {'first', 'last'} (optional, default='last')
'first' puts NaNs at the beginning
'last' puts NaNs at the end
Returns
-------
Categorical or None
See Also
--------
Categorical.sort
Series.sort_values
Examples
--------
>>> c = pd.Categorical([1, 2, 2, 1, 5])
>>> c
[1, 2, 2, 1, 5]
Categories (3, int64): [1, 2, 5]
>>> c.sort_values()
[1, 1, 2, 2, 5]
Categories (3, int64): [1, 2, 5]
>>> c.sort_values(ascending=False)
[5, 2, 2, 1, 1]
Categories (3, int64): [1, 2, 5]
Inplace sorting can be done as well:
>>> c.sort_values(inplace=True)
>>> c
[1, 1, 2, 2, 5]
Categories (3, int64): [1, 2, 5]
>>>
>>> c = pd.Categorical([1, 2, 2, 1, 5])
'sort_values' behaviour with NaNs. Note that 'na_position'
is independent of the 'ascending' parameter:
>>> c = pd.Categorical([np.nan, 2, 2, np.nan, 5])
>>> c
[NaN, 2.0, 2.0, NaN, 5.0]
Categories (2, int64): [2, 5]
>>> c.sort_values()
[2.0, 2.0, 5.0, NaN, NaN]
Categories (2, int64): [2, 5]
>>> c.sort_values(ascending=False)
[5.0, 2.0, 2.0, NaN, NaN]
Categories (2, int64): [2, 5]
>>> c.sort_values(na_position='first')
[NaN, NaN, 2.0, 2.0, 5.0]
Categories (2, int64): [2, 5]
>>> c.sort_values(ascending=False, na_position='first')
[NaN, NaN, 5.0, 2.0, 2.0]
Categories (2, int64): [2, 5]
"""
inplace = validate_bool_kwarg(inplace, "inplace")
if na_position not in ["last", "first"]:
msg = "invalid na_position: {na_position!r}"
raise ValueError(msg.format(na_position=na_position))
sorted_idx = nargsort(self, ascending=ascending, na_position=na_position)
if inplace:
self._codes = self._codes[sorted_idx]
else:
return self._constructor(
values=self._codes[sorted_idx], dtype=self.dtype, fastpath=True
)
def _values_for_rank(self):
"""
For correctly ranking ordered categorical data. See GH#15420
Ordered categorical data should be ranked on the basis of
codes with -1 translated to NaN.
Returns
-------
numpy.array
"""
from pandas import Series
if self.ordered:
values = self.codes
mask = values == -1
if mask.any():
values = values.astype("float64")
values[mask] = np.nan
elif self.categories.is_numeric():
values = np.array(self)
else:
# reorder the categories (so rank can use the float codes)
# instead of passing an object array to rank
values = np.array(
self.rename_categories(Series(self.categories).rank().values)
)
return values
def ravel(self, order="C"):
"""
Return a flattened (numpy) array.
For internal compatibility with numpy arrays.
Returns
-------
numpy.array
"""
warn(
"Categorical.ravel will return a Categorical object instead "
"of an ndarray in a future version.",
FutureWarning,
stacklevel=2,
)
return np.array(self)
def view(self, dtype=None):
if dtype is not None:
raise NotImplementedError(dtype)
return self._constructor(values=self._codes, dtype=self.dtype, fastpath=True)
def to_dense(self):
"""
Return my 'dense' representation
For internal compatibility with numpy arrays.
Returns
-------
dense : array
"""
return np.asarray(self)
@deprecate_kwarg(old_arg_name="fill_value", new_arg_name="value")
def fillna(self, value=None, method=None, limit=None):
"""
Fill NA/NaN values using the specified method.
Parameters
----------
value : scalar, dict, Series
If a scalar value is passed it is used to fill all missing values.
Alternatively, a Series or dict can be used to fill in different
values for each index. The value should not be a list. The
value(s) passed should either be in the categories or should be
NaN.
method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None
Method to use for filling holes in reindexed Series
pad / ffill: propagate last valid observation forward to next valid
backfill / bfill: use NEXT valid observation to fill gap
limit : int, default None
(Not implemented yet for Categorical!)
If method is specified, this is the maximum number of consecutive
NaN values to forward/backward fill. In other words, if there is
a gap with more than this number of consecutive NaNs, it will only
be partially filled. If method is not specified, this is the
maximum number of entries along the entire axis where NaNs will be
filled.
Returns
-------
filled : Categorical with NA/NaN filled
"""
value, method = validate_fillna_kwargs(
value, method, validate_scalar_dict_value=False
)
if value is None:
value = np.nan
if limit is not None:
raise NotImplementedError(
"specifying a limit for fillna has not been implemented yet"
)
codes = self._codes
# pad / bfill
if method is not None:
values = self.to_dense().reshape(-1, len(self))
values = interpolate_2d(values, method, 0, None, value).astype(
self.categories.dtype
)[0]
codes = _get_codes_for_values(values, self.categories)
else:
# If value is a dict or a Series (a dict value has already
# been converted to a Series)
if isinstance(value, ABCSeries):
if not value[~value.isin(self.categories)].isna().all():
raise ValueError("fill value must be in categories")
values_codes = _get_codes_for_values(value, self.categories)
indexer = np.where(codes == -1)
codes[indexer] = values_codes[indexer]
# If value is not a dict or Series it should be a scalar
elif is_hashable(value):
if not isna(value) and value not in self.categories:
raise ValueError("fill value must be in categories")
mask = codes == -1
if mask.any():
codes = codes.copy()
if isna(value):
codes[mask] = -1
else:
codes[mask] = self.categories.get_loc(value)
else:
raise TypeError(
'"value" parameter must be a scalar, dict '
"or Series, but you passed a "
'"{0}"'.format(type(value).__name__)
)
return self._constructor(codes, dtype=self.dtype, fastpath=True)
def take_nd(self, indexer, allow_fill=None, fill_value=None):
"""
Take elements from the Categorical.
Parameters
----------
indexer : sequence of int
The indices in `self` to take. The meaning of negative values in
`indexer` depends on the value of `allow_fill`.
allow_fill : bool, default None
How to handle negative values in `indexer`.
* False: negative values in `indices` indicate positional indices
from the right. This is similar to
:func:`numpy.take`.
* True: negative values in `indices` indicate missing values
(the default). These values are set to `fill_value`. Any other
other negative values raise a ``ValueError``.
.. versionchanged:: 0.23.0
Deprecated the default value of `allow_fill`. The deprecated
default is ``True``. In the future, this will change to
``False``.
fill_value : object
The value to use for `indices` that are missing (-1), when
``allow_fill=True``. This should be the category, i.e. a value
in ``self.categories``, not a code.
Returns
-------
Categorical
This Categorical will have the same categories and ordered as
`self`.
See Also
--------
Series.take : Similar method for Series.
numpy.ndarray.take : Similar method for NumPy arrays.
Examples
--------
>>> cat = pd.Categorical(['a', 'a', 'b'])
>>> cat
[a, a, b]
Categories (2, object): [a, b]
Specify ``allow_fill==False`` to have negative indices mean indexing
from the right.
>>> cat.take([0, -1, -2], allow_fill=False)
[a, b, a]
Categories (2, object): [a, b]
With ``allow_fill=True``, indices equal to ``-1`` mean "missing"
values that should be filled with the `fill_value`, which is
``np.nan`` by default.
>>> cat.take([0, -1, -1], allow_fill=True)
[a, NaN, NaN]
Categories (2, object): [a, b]
The fill value can be specified.
>>> cat.take([0, -1, -1], allow_fill=True, fill_value='a')
[a, a, a]
Categories (3, object): [a, b]
Specifying a fill value that's not in ``self.categories``
will raise a ``TypeError``.
"""
indexer = np.asarray(indexer, dtype=np.intp)
if allow_fill is None:
if (indexer < 0).any():
warn(_take_msg, FutureWarning, stacklevel=2)
allow_fill = True
dtype = self.dtype
if isna(fill_value):
fill_value = -1
elif allow_fill:
# convert user-provided `fill_value` to codes
if fill_value in self.categories:
fill_value = self.categories.get_loc(fill_value)
else:
msg = "'fill_value' ('{}') is not in this Categorical's categories."
raise TypeError(msg.format(fill_value))
codes = take(self._codes, indexer, allow_fill=allow_fill, fill_value=fill_value)
result = type(self).from_codes(codes, dtype=dtype)
return result
take = take_nd
def __len__(self):
"""
The length of this Categorical.
"""
return len(self._codes)
def __iter__(self):
"""
Returns an Iterator over the values of this Categorical.
"""
return iter(self._internal_get_values().tolist())
def __contains__(self, key):
"""
Returns True if `key` is in this Categorical.
"""
# if key is a NaN, check if any NaN is in self.
if is_scalar(key) and isna(key):
return self.isna().any()
return contains(self, key, container=self._codes)
def _tidy_repr(self, max_vals=10, footer=True):
""" a short repr displaying only max_vals and an optional (but default
footer)
"""
num = max_vals // 2
head = self[:num]._get_repr(length=False, footer=False)
tail = self[-(max_vals - num) :]._get_repr(length=False, footer=False)
result = "{head}, ..., {tail}".format(head=head[:-1], tail=tail[1:])
if footer:
result = "{result}\n{footer}".format(
result=result, footer=self._repr_footer()
)
return str(result)
def _repr_categories(self):
"""
return the base repr for the categories
"""
max_categories = (
10
if get_option("display.max_categories") == 0
else get_option("display.max_categories")
)
from pandas.io.formats import format as fmt
if len(self.categories) > max_categories:
num = max_categories // 2
head = fmt.format_array(self.categories[:num], None)
tail = fmt.format_array(self.categories[-num:], None)
category_strs = head + ["..."] + tail
else:
category_strs = fmt.format_array(self.categories, None)
# Strip all leading spaces, which format_array adds for columns...
category_strs = [x.strip() for x in category_strs]
return category_strs
def _repr_categories_info(self):
"""
Returns a string representation of the footer.
"""
category_strs = self._repr_categories()
dtype = str(self.categories.dtype)
levheader = "Categories ({length}, {dtype}): ".format(
length=len(self.categories), dtype=dtype
)
width, height = get_terminal_size()
max_width = get_option("display.width") or width
if console.in_ipython_frontend():
# 0 = no breaks
max_width = 0
levstring = ""
start = True
cur_col_len = len(levheader) # header
sep_len, sep = (3, " < ") if self.ordered else (2, ", ")
linesep = sep.rstrip() + "\n" # remove whitespace
for val in category_strs:
if max_width != 0 and cur_col_len + sep_len + len(val) > max_width:
levstring += linesep + (" " * (len(levheader) + 1))
cur_col_len = len(levheader) + 1 # header + a whitespace
elif not start:
levstring += sep
cur_col_len += len(val)
levstring += val
start = False
# replace to simple save space by
return levheader + "[" + levstring.replace(" < ... < ", " ... ") + "]"
def _repr_footer(self):
return "Length: {length}\n{info}".format(
length=len(self), info=self._repr_categories_info()
)
def _get_repr(self, length=True, na_rep="NaN", footer=True):
from pandas.io.formats import format as fmt
formatter = fmt.CategoricalFormatter(
self, length=length, na_rep=na_rep, footer=footer
)
result = formatter.to_string()
return str(result)
def __repr__(self):
"""
String representation.
"""
_maxlen = 10
if len(self._codes) > _maxlen:
result = self._tidy_repr(_maxlen)
elif len(self._codes) > 0:
result = self._get_repr(length=len(self) > _maxlen)
else:
msg = self._get_repr(length=False, footer=True).replace("\n", ", ")
result = "[], {repr_msg}".format(repr_msg=msg)
return result
def _maybe_coerce_indexer(self, indexer):
"""
return an indexer coerced to the codes dtype
"""
if isinstance(indexer, np.ndarray) and indexer.dtype.kind == "i":
indexer = indexer.astype(self._codes.dtype)
return indexer
def __getitem__(self, key):
"""
Return an item.
"""
if isinstance(key, (int, np.integer)):
i = self._codes[key]
if i == -1:
return np.nan
else:
return self.categories[i]
else:
return self._constructor(
values=self._codes[key], dtype=self.dtype, fastpath=True
)
def __setitem__(self, key, value):
"""
Item assignment.
Raises
------
ValueError
If (one or more) Value is not in categories or if a assigned
`Categorical` does not have the same categories
"""
value = extract_array(value, extract_numpy=True)
# require identical categories set
if isinstance(value, Categorical):
if not is_dtype_equal(self, value):
raise ValueError(
"Cannot set a Categorical with another, "
"without identical categories"
)
if not self.categories.equals(value.categories):
new_codes = _recode_for_categories(
value.codes, value.categories, self.categories
)
value = Categorical.from_codes(new_codes, dtype=self.dtype)
rvalue = value if is_list_like(value) else [value]
from pandas import Index
to_add = Index(rvalue).difference(self.categories)
# no assignments of values not in categories, but it's always ok to set
# something to np.nan
if len(to_add) and not isna(to_add).all():
raise ValueError(
"Cannot setitem on a Categorical with a new "
"category, set the categories first"
)
# set by position
if isinstance(key, (int, np.integer)):
pass
# tuple of indexers (dataframe)
elif isinstance(key, tuple):
# only allow 1 dimensional slicing, but can
# in a 2-d case be passd (slice(None),....)
if len(key) == 2:
if not com.is_null_slice(key[0]):
raise AssertionError("invalid slicing for a 1-ndim categorical")
key = key[1]
elif len(key) == 1:
key = key[0]
else:
raise AssertionError("invalid slicing for a 1-ndim categorical")
# slicing in Series or Categorical
elif isinstance(key, slice):
pass
# else: array of True/False in Series or Categorical
lindexer = self.categories.get_indexer(rvalue)
lindexer = self._maybe_coerce_indexer(lindexer)
self._codes[key] = lindexer
def _reverse_indexer(self):
"""
Compute the inverse of a categorical, returning
a dict of categories -> indexers.
*This is an internal function*
Returns
-------
dict of categories -> indexers
Examples
--------
>>> c = pd.Categorical(list('aabca'))
>>> c
[a, a, b, c, a]
Categories (3, object): [a, b, c]
>>> c.categories
Index(['a', 'b', 'c'], dtype='object')
>>> c.codes
array([0, 0, 1, 2, 0], dtype=int8)
>>> c._reverse_indexer()
{'a': array([0, 1, 4]), 'b': array([2]), 'c': array([3])}
"""
categories = self.categories
r, counts = libalgos.groupsort_indexer(
self.codes.astype("int64"), categories.size
)
counts = counts.cumsum()
result = (r[start:end] for start, end in zip(counts, counts[1:]))
result = dict(zip(categories, result))
return result
# reduction ops #
def _reduce(self, name, axis=0, **kwargs):
func = getattr(self, name, None)
if func is None:
msg = "Categorical cannot perform the operation {op}"
raise TypeError(msg.format(op=name))
return func(**kwargs)
def min(self, numeric_only=None, **kwargs):
"""
The minimum value of the object.
Only ordered `Categoricals` have a minimum!
Raises
------
TypeError
If the `Categorical` is not `ordered`.
Returns
-------
min : the minimum of this `Categorical`
"""
self.check_for_ordered("min")
if numeric_only:
good = self._codes != -1
pointer = self._codes[good].min(**kwargs)
else:
pointer = self._codes.min(**kwargs)
if pointer == -1:
return np.nan
else:
return self.categories[pointer]
def max(self, numeric_only=None, **kwargs):
"""
The maximum value of the object.
Only ordered `Categoricals` have a maximum!
Raises
------
TypeError
If the `Categorical` is not `ordered`.
Returns
-------
max : the maximum of this `Categorical`
"""
self.check_for_ordered("max")
if numeric_only:
good = self._codes != -1
pointer = self._codes[good].max(**kwargs)
else:
pointer = self._codes.max(**kwargs)
if pointer == -1:
return np.nan
else:
return self.categories[pointer]
def mode(self, dropna=True):
"""
Returns the mode(s) of the Categorical.
Always returns `Categorical` even if only one value.
Parameters
----------
dropna : bool, default True
Don't consider counts of NaN/NaT.
.. versionadded:: 0.24.0
Returns
-------
modes : `Categorical` (sorted)
"""
codes = self._codes
if dropna:
good = self._codes != -1
codes = self._codes[good]
codes = sorted(htable.mode_int64(ensure_int64(codes), dropna))
return self._constructor(values=codes, dtype=self.dtype, fastpath=True)
def unique(self):
"""
Return the ``Categorical`` which ``categories`` and ``codes`` are
unique. Unused categories are NOT returned.
- unordered category: values and categories are sorted by appearance
order.
- ordered category: values are sorted by appearance order, categories
keeps existing order.
Returns
-------
unique values : ``Categorical``
Examples
--------
An unordered Categorical will return categories in the
order of appearance.
>>> pd.Categorical(list('baabc'))
[b, a, c]
Categories (3, object): [b, a, c]
>>> pd.Categorical(list('baabc'), categories=list('abc'))
[b, a, c]
Categories (3, object): [b, a, c]
An ordered Categorical preserves the category ordering.
>>> pd.Categorical(list('baabc'),
... categories=list('abc'),
... ordered=True)
[b, a, c]
Categories (3, object): [a < b < c]
See Also
--------
unique
CategoricalIndex.unique
Series.unique
"""
# unlike np.unique, unique1d does not sort
unique_codes = unique1d(self.codes)
cat = self.copy()
# keep nan in codes
cat._codes = unique_codes
# exclude nan from indexer for categories
take_codes = unique_codes[unique_codes != -1]
if self.ordered:
take_codes = np.sort(take_codes)
return cat.set_categories(cat.categories.take(take_codes))
def _values_for_factorize(self):
codes = self.codes.astype("int64")
return codes, -1
@classmethod
def _from_factorized(cls, uniques, original):
return original._constructor(
original.categories.take(uniques), dtype=original.dtype
)
def equals(self, other):
"""
Returns True if categorical arrays are equal.
Parameters
----------
other : `Categorical`
Returns
-------
bool
"""
if self.is_dtype_equal(other):
if self.categories.equals(other.categories):
# fastpath to avoid re-coding
other_codes = other._codes
else:
other_codes = _recode_for_categories(
other.codes, other.categories, self.categories
)
return np.array_equal(self._codes, other_codes)
return False
def is_dtype_equal(self, other):
"""
Returns True if categoricals are the same dtype
same categories, and same ordered
Parameters
----------
other : Categorical
Returns
-------
bool
"""
try:
return hash(self.dtype) == hash(other.dtype)
except (AttributeError, TypeError):
return False
def describe(self):
"""
Describes this Categorical
Returns
-------
description: `DataFrame`
A dataframe with frequency and counts by category.
"""
counts = self.value_counts(dropna=False)
freqs = counts / float(counts.sum())
from pandas.core.reshape.concat import concat
result = concat([counts, freqs], axis=1)
result.columns = ["counts", "freqs"]
result.index.name = "categories"
return result
@Substitution(klass="Categorical")
@Appender(_extension_array_shared_docs["repeat"])
def repeat(self, repeats, axis=None):
nv.validate_repeat(tuple(), dict(axis=axis))
codes = self._codes.repeat(repeats)
return self._constructor(values=codes, dtype=self.dtype, fastpath=True)
# Implement the ExtensionArray interface
@property
def _can_hold_na(self):
return True
@classmethod
def _concat_same_type(self, to_concat):
from pandas.core.dtypes.concat import concat_categorical
return concat_categorical(to_concat)
def isin(self, values):
"""
Check whether `values` are contained in Categorical.
Return a boolean NumPy Array showing whether each element in
the Categorical matches an element in the passed sequence of
`values` exactly.
Parameters
----------
values : set or list-like
The sequence of values to test. Passing in a single string will
raise a ``TypeError``. Instead, turn a single string into a
list of one element.
Returns
-------
isin : numpy.ndarray (bool dtype)
Raises
------
TypeError
* If `values` is not a set or list-like
See Also
--------
pandas.Series.isin : Equivalent method on Series.
Examples
--------
>>> s = pd.Categorical(['lama', 'cow', 'lama', 'beetle', 'lama',
... 'hippo'])
>>> s.isin(['cow', 'lama'])
array([ True, True, True, False, True, False])
Passing a single string as ``s.isin('lama')`` will raise an error. Use
a list of one element instead:
>>> s.isin(['lama'])
array([ True, False, True, False, True, False])
"""
if not is_list_like(values):
raise TypeError(
"only list-like objects are allowed to be passed"
" to isin(), you passed a [{values_type}]".format(
values_type=type(values).__name__
)
)
values = sanitize_array(values, None, None)
null_mask = np.asarray(isna(values))
code_values = self.categories.get_indexer(values)
code_values = code_values[null_mask | (code_values >= 0)]
return algorithms.isin(self.codes, code_values)
# The Series.cat accessor
@delegate_names(
delegate=Categorical, accessors=["categories", "ordered"], typ="property"
)
@delegate_names(
delegate=Categorical,
accessors=[
"rename_categories",
"reorder_categories",
"add_categories",
"remove_categories",
"remove_unused_categories",
"set_categories",
"as_ordered",
"as_unordered",
],
typ="method",
)
class CategoricalAccessor(PandasDelegate, PandasObject, NoNewAttributesMixin):
"""
Accessor object for categorical properties of the Series values.
Be aware that assigning to `categories` is a inplace operation, while all
methods return new categorical data per default (but can be called with
`inplace=True`).
Parameters
----------
data : Series or CategoricalIndex
Examples
--------
>>> s.cat.categories
>>> s.cat.categories = list('abc')
>>> s.cat.rename_categories(list('cab'))
>>> s.cat.reorder_categories(list('cab'))
>>> s.cat.add_categories(['d','e'])
>>> s.cat.remove_categories(['d'])
>>> s.cat.remove_unused_categories()
>>> s.cat.set_categories(list('abcde'))
>>> s.cat.as_ordered()
>>> s.cat.as_unordered()
"""
_deprecations = PandasObject._deprecations | frozenset(
["categorical", "index", "name"]
)
def __init__(self, data):
self._validate(data)
self._parent = data.values
self._index = data.index
self._name = data.name
self._freeze()
@staticmethod
def _validate(data):
if not is_categorical_dtype(data.dtype):
raise AttributeError("Can only use .cat accessor with a 'category' dtype")
def _delegate_property_get(self, name):
return getattr(self._parent, name)
def _delegate_property_set(self, name, new_values):
return setattr(self._parent, name, new_values)
@property
def codes(self):
"""
Return Series of codes as well as the index.
"""
from pandas import Series
return Series(self._parent.codes, index=self._index)
def _delegate_method(self, name, *args, **kwargs):
from pandas import Series
method = getattr(self._parent, name)
res = method(*args, **kwargs)
if res is not None:
return Series(res, index=self._index, name=self._name)
@property
def categorical(self):
# Note: Upon deprecation, `test_tab_completion_with_categorical` will
# need to be updated. `categorical` will need to be removed from
# `ok_for_cat`.
warn(
"`Series.cat.categorical` has been deprecated. Use the "
"attributes on 'Series.cat' directly instead.",
FutureWarning,
stacklevel=2,
)
return self._parent
@property
def name(self):
# Note: Upon deprecation, `test_tab_completion_with_categorical` will
# need to be updated. `name` will need to be removed from
# `ok_for_cat`.
warn(
"`Series.cat.name` has been deprecated. Use `Series.name` instead.",
FutureWarning,
stacklevel=2,
)
return self._name
@property
def index(self):
# Note: Upon deprecation, `test_tab_completion_with_categorical` will
# need to be updated. `index` will need to be removed from
# ok_for_cat`.
warn(
"`Series.cat.index` has been deprecated. Use `Series.index` instead.",
FutureWarning,
stacklevel=2,
)
return self._index
# utility routines
def _get_codes_for_values(values, categories):
"""
utility routine to turn values into codes given the specified categories
"""
dtype_equal = is_dtype_equal(values.dtype, categories.dtype)
if dtype_equal:
# To prevent erroneous dtype coercion in _get_data_algo, retrieve
# the underlying numpy array. gh-22702
values = getattr(values, "_ndarray_values", values)
categories = getattr(categories, "_ndarray_values", categories)
elif is_extension_array_dtype(categories.dtype) and is_object_dtype(values):
# Support inferring the correct extension dtype from an array of
# scalar objects. e.g.
# Categorical(array[Period, Period], categories=PeriodIndex(...))
try:
values = categories.dtype.construct_array_type()._from_sequence(values)
except Exception:
# but that may fail for any reason, so fall back to object
values = ensure_object(values)
categories = ensure_object(categories)
else:
values = ensure_object(values)
categories = ensure_object(categories)
(hash_klass, vec_klass), vals = _get_data_algo(values, _hashtables)
(_, _), cats = _get_data_algo(categories, _hashtables)
t = hash_klass(len(cats))
t.map_locations(cats)
return coerce_indexer_dtype(t.lookup(vals), cats)
def _recode_for_categories(codes: np.ndarray, old_categories, new_categories):
"""
Convert a set of codes for to a new set of categories
Parameters
----------
codes : np.ndarray
old_categories, new_categories : Index
Returns
-------
new_codes : np.ndarray[np.int64]
Examples
--------
>>> old_cat = pd.Index(['b', 'a', 'c'])
>>> new_cat = pd.Index(['a', 'b'])
>>> codes = np.array([0, 1, 1, 2])
>>> _recode_for_categories(codes, old_cat, new_cat)
array([ 1, 0, 0, -1])
"""
if len(old_categories) == 0:
# All null anyway, so just retain the nulls
return codes.copy()
elif new_categories.equals(old_categories):
# Same categories, so no need to actually recode
return codes.copy()
indexer = coerce_indexer_dtype(
new_categories.get_indexer(old_categories), new_categories
)
new_codes = take_1d(indexer, codes.copy(), fill_value=-1)
return new_codes
def _convert_to_list_like(list_like):
if hasattr(list_like, "dtype"):
return list_like
if isinstance(list_like, list):
return list_like
if is_sequence(list_like) or isinstance(list_like, tuple) or is_iterator(list_like):
return list(list_like)
elif is_scalar(list_like):
return [list_like]
else:
# TODO: is this reached?
return [list_like]
def _factorize_from_iterable(values):
"""
Factorize an input `values` into `categories` and `codes`. Preserves
categorical dtype in `categories`.
*This is an internal function*
Parameters
----------
values : list-like
Returns
-------
codes : ndarray
categories : Index
If `values` has a categorical dtype, then `categories` is
a CategoricalIndex keeping the categories and order of `values`.
"""
if not is_list_like(values):
raise TypeError("Input must be list-like")
if is_categorical_dtype(values):
values = extract_array(values)
# The Categorical we want to build has the same categories
# as values but its codes are by def [0, ..., len(n_categories) - 1]
cat_codes = np.arange(len(values.categories), dtype=values.codes.dtype)
categories = Categorical.from_codes(cat_codes, dtype=values.dtype)
codes = values.codes
else:
# The value of ordered is irrelevant since we don't use cat as such,
# but only the resulting categories, the order of which is independent
# from ordered. Set ordered to False as default. See GH #15457
cat = Categorical(values, ordered=False)
categories = cat.categories
codes = cat.codes
return codes, categories
def _factorize_from_iterables(iterables):
"""
A higher-level wrapper over `_factorize_from_iterable`.
*This is an internal function*
Parameters
----------
iterables : list-like of list-likes
Returns
-------
codes_list : list of ndarrays
categories_list : list of Indexes
Notes
-----
See `_factorize_from_iterable` for more info.
"""
if len(iterables) == 0:
# For consistency, it should return a list of 2 lists.
return [[], []]
return map(list, zip(*(_factorize_from_iterable(it) for it in iterables)))
| 32.809818 | 88 | 0.581234 |
7959861914a22184c6ec2a204dd80c4dde66e050 | 10,686 | py | Python | main_test.py | zkcys001/distracting_feature | 508c4f7a1b2e6a99407a44e33e630803a3d0c89d | [
"Apache-1.1"
] | 25 | 2019-11-28T13:39:09.000Z | 2021-11-25T05:30:47.000Z | main_test.py | zkcys001/distracting_feature | 508c4f7a1b2e6a99407a44e33e630803a3d0c89d | [
"Apache-1.1"
] | 2 | 2020-01-11T15:36:12.000Z | 2020-02-27T06:57:59.000Z | main_test.py | zkcys001/distracting_feature | 508c4f7a1b2e6a99407a44e33e630803a3d0c89d | [
"Apache-1.1"
] | 4 | 2019-12-25T07:57:40.000Z | 2021-09-21T16:42:51.000Z | import misc
import os
import torch.utils.data
import time
import math
from argparse import ArgumentParser
from model.model_resnet import resnet50
from model.model_wresnet import wresnet50
from model.model_b3_p import Reab3p16
from model.model_b3_plstm import b3_plstm
from model.model_b3_palstm import b3palstm
from model.model_b3_pa import b3pa
from model.model_rn_mlp import rn_mlp
from model.model_zkc import tmp
from model.model_plusMLP import WildRelationNet
from model.model_m_a import RN_ap
from model.model_r import RN_r
from model.model_pluspanda import RNap2
from model.model_esem import esemble
from model.model_nmn import nmn
from model.model_baseline_mlp import ReasonNet_p
from model.model_baseline_mlp16 import ReasonNet_p16
from model.model_baseline import ReasonNet
from model.model_a_mlp import ReasonNet_ap
from model.model_b3_p3 import b3p3
from model.model_multi3 import multi3
from model.model_split import b3_split
from rl.help_function import *
from rl.qlearning import *
class Dataset(torch.utils.data.Dataset):
def __init__(self, data_files):
self.data_files = data_files
def __getitem__(self, ind):
data_file = self.data_files[ind]
data = np.load(data_file.replace("/neutral", "/neutral_s"))
x = data['shap_im'].reshape(16, 160, 160)
# x = msk_abstract.post_data(x)
y = data['target']
# print( data['relation_structure_encoded'][0])
x, y = torch.from_numpy(x), torch.from_numpy(y)
x = x.type(torch.float32)
return x, y
def __len__(self):
return len(self.data_files)
def compute_data_files():
'''Sort the data files in increasing order of complexity, then return the n least complex datapoints.'''
data_files = []
print('Loading structure metadata')
structure_to_files = misc.load_file('save_state/neutral/structure_to_files.pkl')
all_structure_strs = list(structure_to_files.keys())
# The number of commas in the structure_str is used as a proxy for complexity
i = 0
all_structure_strs.sort(key=lambda x: x.count(','))
'''
for structure_str in all_structure_strs:
data_i=structure_to_files[structure_str]
if ( "SHAPE" in structure_str) and len(data_i)>10000 and len(data_i)<20000:
data_files.extend(data_i)
else:
continue
print(structure_str, len(structure_to_files[structure_str]))
structure_str = '(PROGRESSION,SHAPE,TYPE)'#(XOR,SHAPE,SIZE)
data_files.extend(structure_to_files[structure_str])
print(structure_str, len(structure_to_files[structure_str]))
'''
return all_structure_strs
def init_weights(m):
if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear):
torch.nn.init.kaiming_normal_(m.weight, nonlinearity='relu')
nn.init.constant_(m.bias, 0)
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
if m.bias is not None:
m.bias.data.zero_()
elif classname.find('BatchNorm') != -1:
m.weight.data.fill_(1)
m.bias.data.zero_()
elif classname.find('Linear') != -1:
n = m.weight.size(1)
m.weight.data.normal_(0, 0.01)
m.bias.data = torch.ones(m.bias.data.size())
def save_state(state, path):
os.makedirs(os.path.dirname(path), exist_ok=True)
torch.save(state, path)
def averagenum(num):
nsum = 0
for i in range(len(num)):
nsum += num[i]
return nsum / len(num)
def adjust_learning_rate(optimizer, epoch, lr_steps, n):
"""Sets the learning rate to the initial LR decayed by 10 every 30 epochs"""
decay = 0.2
if n > 1:
for param_group in optimizer.module.param_groups:
param_group['lr'] = decay * param_group['lr']
print(("epoch %d : lr=%.5f") % (epoch, param_group['lr']))
if epoch > 15:
param_group['momentum'] = 0.9
param_group['weight_decay'] = decay * param_group['lr']
else:
for param_group in optimizer.param_groups:
param_group['lr'] = decay * param_group['lr']
param_group['weight_decay'] = decay * param_group['lr']
print(("epoch %d : lr=%.5f") % (epoch, param_group['lr']))
if epoch > 15:
param_group['momentum'] = 0.9
def main(args):
# Step 1: init data folders
'''if os.path.exists('save_state/'+args.regime+'/normalization_stats.pkl'):
print('Loading normalization stats')
x_mean, x_sd = misc.load_file('save_state/'+args.regime+'/normalization_stats.pkl')
else:
x_mean, x_sd = preprocess.save_normalization_stats(args.regime)
print('x_mean: %.3f, x_sd: %.3f' % (x_mean, x_sd))'''
# Step 2: init neural networks
print("network is:", args.net)
if args.net == "resnet":
model = resnet50(pretrained=True)
elif args.net == 'wresnet':
model = wresnet50(pretrained=True)
elif args.net == "tmp":
model = tmp()
elif args.net == 'RN_mlp':
model = WildRelationNet()
elif args.net == 'ReasonNet':
model = ReasonNet()
elif args.net == 'ReaP':
model = ReasonNet_p()
elif args.net == 'Reap16':
model = ReasonNet_p16()
elif args.net == 'Reaap':
model = ReasonNet_ap()
elif args.net == 'RN_ap':
model = RN_ap()
elif args.net == 'RN_r':
model = RN_r()
elif args.net == 'esemble':
model = esemble()
elif args.net == 'RNap2':
model = RNap2()
elif args.net == 'rn_mlp':
model = rn_mlp()
elif args.net == 'Reab3p16':
model = Reab3p16()
elif args.net == 'b3pa':
model = b3pa()
elif args.net == "b3_plstm":
model = b3_plstm()
elif args.net == "b3_palstm":
model = b3palstm()
elif args.net == "nmn":
model = nmn()
elif args.net == "b3p3":
model = b3p3()
elif args.net == "multi3":
model = multi3()
elif args.net == "split":
model = b3_split()
if args.gpunum > 1:
model = nn.DataParallel(model, device_ids=range(args.gpunum))
if args.net != 'RN_r':
model.apply(weights_init)
print('weight initial')
weights_path = args.path_weight
if os.path.exists(weights_path) and args.restore:
pretrained_dict = torch.load(weights_path)
model_dict = model.state_dict()
pretrained_dict1 = {}
for k, v in pretrained_dict.items():
if k in model_dict:
pretrained_dict1[k] = v
# print(k)
model_dict.update(pretrained_dict1)
model.load_state_dict(model_dict)
# optimizer.load_state_dict(torch.load(optimizer_path))
print('load weight')
model.cuda()
epoch_count = 1
print(time.strftime('%H:%M:%S', time.localtime(time.time())), 'testing')
print('Loading structure metadata')
structure_to_files = misc.load_file('save_state/neutral/structure_to_files.pkl')
all_structure_strs = list(structure_to_files.keys())
# The number of commas in the structure_str is used as a proxy for complexity
accuracy_all = []
for structure_str in all_structure_strs:
data_files = []
data_i = structure_to_files[structure_str]
if ("SHAPE" in structure_str) and len(
data_i) > 10000: # and len(data_i) < 20000:
data_files.extend(data_i)
else:
continue
test_files = [data_file for data_file in data_files if 'test' in data_file]
test_loader = torch.utils.data.DataLoader(Dataset(test_files), batch_size=args.batch_size, shuffle=True,
num_workers=args.numwork)
since = time.time()
model.eval()
accuracy_epoch = []
for x, y in test_loader:
x, y = Variable(x).cuda(), Variable(y).cuda()
pred = model(x)
pred = pred.data.max(1)[1]
correct = pred.eq(y.data).cpu().sum().numpy()
accuracy = correct * 100.0 / len(y)
accuracy_epoch.append(accuracy)
accuracy_all.append(accuracy)
acc = sum(accuracy_epoch) / len(accuracy_epoch)
print(('epoch:%d, acc:%.1f') % (epoch_count, acc), "test_num:", len(test_files), (structure_str))
epoch_count += 1
print(('epoch:%d, acc:%.1f') % (epoch_count, sum(accuracy_all) / len(accuracy_all)))
if __name__ == '__main__':
'''
parser = ArgumentParser()
parser.add_argument('--regime', degst='regime', type=str, default='neutral')
parser.add_argument('--dataset_size', dest='dataset_size', type=int, default=1, help='-1 for full dataset')
parser.add_argument('--batch_size', dest='batch_size', type=int, default=160 )
parser.add_argument('--lr_step', dest='lr_step', type=int, default=5)
parser.add_argument('--lr', dest='lr', type=float, default=3e-2)
parser.add_argument('--weight_decay', dest='weight_decay', type=float, default=5e-4)
parser.add_argument('--mo', dest='mo', type=float, default=0.8)
parser.add_argument('--net', dest='net', type=str, default='RN_e')
parser.add_argument('--optm', dest='optm', type=str, default='SGD')
parser.add_argument('--gpunum', dest='gpunum', type=int, default=1)
parser.add_argument('--numwork', dest='numwork', type=int, default=6)
args = parser.parse_args()
main(args)
'''
parser = ArgumentParser()
parser.add_argument('--regime', dest='regime', type=str, default='neutral')
parser.add_argument('--dataset_size', dest='dataset_size', type=int, default=1, help='-1 for full dataset')
parser.add_argument('--batch_size', dest='batch_size', type=int, default=104 * 2)
parser.add_argument('--lr_step', dest='lr_step', type=int, default=8)
parser.add_argument('--lr', dest='lr', type=float, default=5e-2)
parser.add_argument('--weight_decay', dest='weight_decay', type=float, default=5e-4)
parser.add_argument('--mo', dest='mo', type=float, default=0.8)
parser.add_argument('--net', dest='net', type=str, default="Reab3p16") # Reab3p16 b3p3
parser.add_argument('--optm', dest='optm', type=str, default='SGD')
parser.add_argument('--gpunum', dest='gpunum', type=int, default=2)
parser.add_argument('--numwork', dest='numwork', type=int, default=6)
parser.add_argument('--restore', dest='restore', type=bool, default=True)
parser.add_argument('--path_weight', dest='path_weight', type=str, default='save/neutral/rl.pt')
args = parser.parse_args()
main(args)
| 37.363636 | 112 | 0.642897 |
7959867c6336fcc8232949274d25ef578649d3cf | 381 | py | Python | generate_train.py | samarthanand12/Custom-Object-Detection-using-Yolo | d293c73db84aa989d65aed5881bd76958f1d3740 | [
"MIT"
] | 40 | 2020-08-27T13:44:40.000Z | 2022-03-04T13:50:21.000Z | generate_train.py | samarthanand12/Custom-Object-Detection-using-Yolo | d293c73db84aa989d65aed5881bd76958f1d3740 | [
"MIT"
] | 8 | 2020-11-13T17:46:08.000Z | 2022-02-10T02:19:36.000Z | generate_train.py | samarthanand12/Custom-Object-Detection-using-Yolo | d293c73db84aa989d65aed5881bd76958f1d3740 | [
"MIT"
] | 18 | 2021-01-19T07:34:07.000Z | 2022-03-20T10:20:41.000Z | import os
image_files = []
os.chdir(os.path.join("data", "obj"))
for filename in os.listdir(os.getcwd()):
if filename.endswith(".jpg"):
image_files.append("data/obj/" + filename)
os.chdir("..")
with open("train.txt", "w") as outfile:
for image in image_files:
outfile.write(image)
outfile.write("\n")
outfile.close()
os.chdir("..") | 27.214286 | 51 | 0.60105 |
79598700a61867388cae882da8211df58a347318 | 843 | py | Python | tasksupervisor/api/tasksupervisor_info.py | iml130/mod.sw.tp.ts | 4cda3ef0d3791eb204d5510631fdb9ec7ec57aab | [
"Apache-2.0"
] | null | null | null | tasksupervisor/api/tasksupervisor_info.py | iml130/mod.sw.tp.ts | 4cda3ef0d3791eb204d5510631fdb9ec7ec57aab | [
"Apache-2.0"
] | 1 | 2021-04-24T08:43:59.000Z | 2021-04-24T08:43:59.000Z | tasksupervisor/api/tasksupervisor_info.py | iml130/mod.sw.tp.ts | 4cda3ef0d3791eb204d5510631fdb9ec7ec57aab | [
"Apache-2.0"
] | 1 | 2021-03-04T10:32:52.000Z | 2021-03-04T10:32:52.000Z | """ Contains TaskSupervisorInfo API class """
import uuid
class TaskSupervisorInfo():
"""
Gets created at the start of the Supervisor and provides information
about the total number of started materialflows.
"""
def __init__(self):
self.id = uuid.uuid4()
self.used_materialflows = []
self.number_of_materialflows = len(self.used_materialflows)
self.message = ""
def append_materialflow(self, id_):
if id_ not in self.used_materialflows:
self.used_materialflows.append(id_)
self.number_of_materialflows = len(self.used_materialflows)
def remove_materialflow(self,id_):
if id_ in self.used_materialflows == True:
self.used_materialflows.remove(id_)
self.number_of_materialflows = len(self.used_materialflows)
| 33.72 | 76 | 0.676157 |
795987b1e13cf361fbabea3e8aa1ff5e5d3f7c5a | 1,409 | py | Python | ooobuild/lo/beans/property_set_info_change.py | Amourspirit/ooo_uno_tmpl | 64e0c86fd68f24794acc22d63d8d32ae05dd12b8 | [
"Apache-2.0"
] | null | null | null | ooobuild/lo/beans/property_set_info_change.py | Amourspirit/ooo_uno_tmpl | 64e0c86fd68f24794acc22d63d8d32ae05dd12b8 | [
"Apache-2.0"
] | null | null | null | ooobuild/lo/beans/property_set_info_change.py | Amourspirit/ooo_uno_tmpl | 64e0c86fd68f24794acc22d63d8d32ae05dd12b8 | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
#
# Copyright 2022 :Barry-Thomas-Paul: Moss
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http: // www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Const Class
# this is a auto generated file generated by Cheetah
# Libre Office Version: 7.3
# Namespace: com.sun.star.beans
class PropertySetInfoChange(object):
"""
Const Class
specifies reasons for sending PropertySetInfoChangeEvents.
See Also:
`API PropertySetInfoChange <https://api.libreoffice.org/docs/idl/ref/namespacecom_1_1sun_1_1star_1_1beans_1_1PropertySetInfoChange.html>`_
"""
__ooo_ns__: str = 'com.sun.star.beans'
__ooo_full_ns__: str = 'com.sun.star.beans.PropertySetInfoChange'
__ooo_type_name__: str = 'const'
PROPERTY_INSERTED = 0
"""
A property was inserted into a XPropertySetInfo.
"""
PROPERTY_REMOVED = 1
"""
A property was removed from a XPropertySetInfo.
"""
__all__ = ['PropertySetInfoChange']
| 30.630435 | 146 | 0.730305 |
795988c364cf1ce7ca726fc9411ad5695574f21e | 1,558 | py | Python | pckt/cli.py | Allaman/pckt | e04d8c08c05b095f5e76d8bca02c8f17a2e79827 | [
"MIT"
] | null | null | null | pckt/cli.py | Allaman/pckt | e04d8c08c05b095f5e76d8bca02c8f17a2e79827 | [
"MIT"
] | null | null | null | pckt/cli.py | Allaman/pckt | e04d8c08c05b095f5e76d8bca02c8f17a2e79827 | [
"MIT"
] | null | null | null | """
cli defines command line argument options for pckt application
"""
import click
from db import clear_db, update_db
from fetch import get_data, fetch_items, get_pocket
from read import get_tags_stats, select_col, list_entries, view_entries
@click.group()
def main():
"""CLI for interacting with the Pocket API"""
pass
@main.command()
@click.option('--count', default=10, help='number of entries to fetch')
@click.option('--path', default='pckt.db', help='path to sqlite3 file')
def update(count, path):
"""Updates/recreates the database containing all information"""
clear_db(path)
update_db(path, get_data(fetch_items(get_pocket(), count)))
@main.command()
@click.option('--path', default='pckt.db', help='path to sqlite3 file')
@click.option('--sort/--no-sort', default=False, help='sort by tag numbers')
def tags(path, sort):
"""Prints tags and their figures"""
get_tags_stats(select_col(path, 2), sort)
@main.command()
@click.option('--path', default='pckt.db', help='path to sqlite3 file')
@click.option('--width', default=80, help='column width of url and title')
@click.option('--col', default='complete', help='Which column to search')
@click.option('--count/--no-count', default=False, help='Prints number of items')
@click.option('--parsable/--no-parsable', default=False, help='Output no asci table')
@click.argument('keywords', nargs=-1)
def filter(path, width, col, count, parsable, keywords):
"""Filter and list entries"""
view_entries(list_entries(path, col, keywords), width, count, parsable)
| 34.622222 | 85 | 0.712452 |
795989241a0d7172a602160703f9fbac1d434dad | 16,811 | py | Python | mit_semseg/models/hrnet.py | starkgines/PDI | dd6908c022179f935ae25d3afee9ea44bb49f162 | [
"BSD-3-Clause"
] | 4,303 | 2018-04-08T00:48:44.000Z | 2022-03-31T12:54:08.000Z | mit_semseg/models/hrnet.py | starkgines/PDI | dd6908c022179f935ae25d3afee9ea44bb49f162 | [
"BSD-3-Clause"
] | 212 | 2018-04-08T16:02:59.000Z | 2022-03-16T14:52:44.000Z | mit_semseg/models/hrnet.py | starkgines/PDI | dd6908c022179f935ae25d3afee9ea44bb49f162 | [
"BSD-3-Clause"
] | 1,057 | 2018-04-08T03:29:26.000Z | 2022-03-30T17:36:12.000Z | """
This HRNet implementation is modified from the following repository:
https://github.com/HRNet/HRNet-Semantic-Segmentation
"""
import logging
import torch
import torch.nn as nn
import torch.nn.functional as F
from .utils import load_url
from mit_semseg.lib.nn import SynchronizedBatchNorm2d
BatchNorm2d = SynchronizedBatchNorm2d
BN_MOMENTUM = 0.1
logger = logging.getLogger(__name__)
__all__ = ['hrnetv2']
model_urls = {
'hrnetv2': 'http://sceneparsing.csail.mit.edu/model/pretrained_resnet/hrnetv2_w48-imagenet.pth',
}
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = BatchNorm2d(planes, momentum=BN_MOMENTUM)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = BatchNorm2d(planes, momentum=BN_MOMENTUM)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = BatchNorm2d(planes, momentum=BN_MOMENTUM)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = BatchNorm2d(planes, momentum=BN_MOMENTUM)
self.conv3 = nn.Conv2d(planes, planes * self.expansion, kernel_size=1,
bias=False)
self.bn3 = BatchNorm2d(planes * self.expansion,
momentum=BN_MOMENTUM)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class HighResolutionModule(nn.Module):
def __init__(self, num_branches, blocks, num_blocks, num_inchannels,
num_channels, fuse_method, multi_scale_output=True):
super(HighResolutionModule, self).__init__()
self._check_branches(
num_branches, blocks, num_blocks, num_inchannels, num_channels)
self.num_inchannels = num_inchannels
self.fuse_method = fuse_method
self.num_branches = num_branches
self.multi_scale_output = multi_scale_output
self.branches = self._make_branches(
num_branches, blocks, num_blocks, num_channels)
self.fuse_layers = self._make_fuse_layers()
self.relu = nn.ReLU(inplace=True)
def _check_branches(self, num_branches, blocks, num_blocks,
num_inchannels, num_channels):
if num_branches != len(num_blocks):
error_msg = 'NUM_BRANCHES({}) <> NUM_BLOCKS({})'.format(
num_branches, len(num_blocks))
logger.error(error_msg)
raise ValueError(error_msg)
if num_branches != len(num_channels):
error_msg = 'NUM_BRANCHES({}) <> NUM_CHANNELS({})'.format(
num_branches, len(num_channels))
logger.error(error_msg)
raise ValueError(error_msg)
if num_branches != len(num_inchannels):
error_msg = 'NUM_BRANCHES({}) <> NUM_INCHANNELS({})'.format(
num_branches, len(num_inchannels))
logger.error(error_msg)
raise ValueError(error_msg)
def _make_one_branch(self, branch_index, block, num_blocks, num_channels,
stride=1):
downsample = None
if stride != 1 or \
self.num_inchannels[branch_index] != num_channels[branch_index] * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.num_inchannels[branch_index],
num_channels[branch_index] * block.expansion,
kernel_size=1, stride=stride, bias=False),
BatchNorm2d(num_channels[branch_index] * block.expansion,
momentum=BN_MOMENTUM),
)
layers = []
layers.append(block(self.num_inchannels[branch_index],
num_channels[branch_index], stride, downsample))
self.num_inchannels[branch_index] = \
num_channels[branch_index] * block.expansion
for i in range(1, num_blocks[branch_index]):
layers.append(block(self.num_inchannels[branch_index],
num_channels[branch_index]))
return nn.Sequential(*layers)
def _make_branches(self, num_branches, block, num_blocks, num_channels):
branches = []
for i in range(num_branches):
branches.append(
self._make_one_branch(i, block, num_blocks, num_channels))
return nn.ModuleList(branches)
def _make_fuse_layers(self):
if self.num_branches == 1:
return None
num_branches = self.num_branches
num_inchannels = self.num_inchannels
fuse_layers = []
for i in range(num_branches if self.multi_scale_output else 1):
fuse_layer = []
for j in range(num_branches):
if j > i:
fuse_layer.append(nn.Sequential(
nn.Conv2d(num_inchannels[j],
num_inchannels[i],
1,
1,
0,
bias=False),
BatchNorm2d(num_inchannels[i], momentum=BN_MOMENTUM)))
elif j == i:
fuse_layer.append(None)
else:
conv3x3s = []
for k in range(i-j):
if k == i - j - 1:
num_outchannels_conv3x3 = num_inchannels[i]
conv3x3s.append(nn.Sequential(
nn.Conv2d(num_inchannels[j],
num_outchannels_conv3x3,
3, 2, 1, bias=False),
BatchNorm2d(num_outchannels_conv3x3,
momentum=BN_MOMENTUM)))
else:
num_outchannels_conv3x3 = num_inchannels[j]
conv3x3s.append(nn.Sequential(
nn.Conv2d(num_inchannels[j],
num_outchannels_conv3x3,
3, 2, 1, bias=False),
BatchNorm2d(num_outchannels_conv3x3,
momentum=BN_MOMENTUM),
nn.ReLU(inplace=True)))
fuse_layer.append(nn.Sequential(*conv3x3s))
fuse_layers.append(nn.ModuleList(fuse_layer))
return nn.ModuleList(fuse_layers)
def get_num_inchannels(self):
return self.num_inchannels
def forward(self, x):
if self.num_branches == 1:
return [self.branches[0](x[0])]
for i in range(self.num_branches):
x[i] = self.branches[i](x[i])
x_fuse = []
for i in range(len(self.fuse_layers)):
y = x[0] if i == 0 else self.fuse_layers[i][0](x[0])
for j in range(1, self.num_branches):
if i == j:
y = y + x[j]
elif j > i:
width_output = x[i].shape[-1]
height_output = x[i].shape[-2]
y = y + F.interpolate(
self.fuse_layers[i][j](x[j]),
size=(height_output, width_output),
mode='bilinear',
align_corners=False)
else:
y = y + self.fuse_layers[i][j](x[j])
x_fuse.append(self.relu(y))
return x_fuse
blocks_dict = {
'BASIC': BasicBlock,
'BOTTLENECK': Bottleneck
}
class HRNetV2(nn.Module):
def __init__(self, n_class, **kwargs):
super(HRNetV2, self).__init__()
extra = {
'STAGE2': {'NUM_MODULES': 1, 'NUM_BRANCHES': 2, 'BLOCK': 'BASIC', 'NUM_BLOCKS': (4, 4), 'NUM_CHANNELS': (48, 96), 'FUSE_METHOD': 'SUM'},
'STAGE3': {'NUM_MODULES': 4, 'NUM_BRANCHES': 3, 'BLOCK': 'BASIC', 'NUM_BLOCKS': (4, 4, 4), 'NUM_CHANNELS': (48, 96, 192), 'FUSE_METHOD': 'SUM'},
'STAGE4': {'NUM_MODULES': 3, 'NUM_BRANCHES': 4, 'BLOCK': 'BASIC', 'NUM_BLOCKS': (4, 4, 4, 4), 'NUM_CHANNELS': (48, 96, 192, 384), 'FUSE_METHOD': 'SUM'},
'FINAL_CONV_KERNEL': 1
}
# stem net
self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=2, padding=1,
bias=False)
self.bn1 = BatchNorm2d(64, momentum=BN_MOMENTUM)
self.conv2 = nn.Conv2d(64, 64, kernel_size=3, stride=2, padding=1,
bias=False)
self.bn2 = BatchNorm2d(64, momentum=BN_MOMENTUM)
self.relu = nn.ReLU(inplace=True)
self.layer1 = self._make_layer(Bottleneck, 64, 64, 4)
self.stage2_cfg = extra['STAGE2']
num_channels = self.stage2_cfg['NUM_CHANNELS']
block = blocks_dict[self.stage2_cfg['BLOCK']]
num_channels = [
num_channels[i] * block.expansion for i in range(len(num_channels))]
self.transition1 = self._make_transition_layer([256], num_channels)
self.stage2, pre_stage_channels = self._make_stage(
self.stage2_cfg, num_channels)
self.stage3_cfg = extra['STAGE3']
num_channels = self.stage3_cfg['NUM_CHANNELS']
block = blocks_dict[self.stage3_cfg['BLOCK']]
num_channels = [
num_channels[i] * block.expansion for i in range(len(num_channels))]
self.transition2 = self._make_transition_layer(
pre_stage_channels, num_channels)
self.stage3, pre_stage_channels = self._make_stage(
self.stage3_cfg, num_channels)
self.stage4_cfg = extra['STAGE4']
num_channels = self.stage4_cfg['NUM_CHANNELS']
block = blocks_dict[self.stage4_cfg['BLOCK']]
num_channels = [
num_channels[i] * block.expansion for i in range(len(num_channels))]
self.transition3 = self._make_transition_layer(
pre_stage_channels, num_channels)
self.stage4, pre_stage_channels = self._make_stage(
self.stage4_cfg, num_channels, multi_scale_output=True)
def _make_transition_layer(
self, num_channels_pre_layer, num_channels_cur_layer):
num_branches_cur = len(num_channels_cur_layer)
num_branches_pre = len(num_channels_pre_layer)
transition_layers = []
for i in range(num_branches_cur):
if i < num_branches_pre:
if num_channels_cur_layer[i] != num_channels_pre_layer[i]:
transition_layers.append(nn.Sequential(
nn.Conv2d(num_channels_pre_layer[i],
num_channels_cur_layer[i],
3,
1,
1,
bias=False),
BatchNorm2d(
num_channels_cur_layer[i], momentum=BN_MOMENTUM),
nn.ReLU(inplace=True)))
else:
transition_layers.append(None)
else:
conv3x3s = []
for j in range(i+1-num_branches_pre):
inchannels = num_channels_pre_layer[-1]
outchannels = num_channels_cur_layer[i] \
if j == i-num_branches_pre else inchannels
conv3x3s.append(nn.Sequential(
nn.Conv2d(
inchannels, outchannels, 3, 2, 1, bias=False),
BatchNorm2d(outchannels, momentum=BN_MOMENTUM),
nn.ReLU(inplace=True)))
transition_layers.append(nn.Sequential(*conv3x3s))
return nn.ModuleList(transition_layers)
def _make_layer(self, block, inplanes, planes, blocks, stride=1):
downsample = None
if stride != 1 or inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
BatchNorm2d(planes * block.expansion, momentum=BN_MOMENTUM),
)
layers = []
layers.append(block(inplanes, planes, stride, downsample))
inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(inplanes, planes))
return nn.Sequential(*layers)
def _make_stage(self, layer_config, num_inchannels,
multi_scale_output=True):
num_modules = layer_config['NUM_MODULES']
num_branches = layer_config['NUM_BRANCHES']
num_blocks = layer_config['NUM_BLOCKS']
num_channels = layer_config['NUM_CHANNELS']
block = blocks_dict[layer_config['BLOCK']]
fuse_method = layer_config['FUSE_METHOD']
modules = []
for i in range(num_modules):
# multi_scale_output is only used last module
if not multi_scale_output and i == num_modules - 1:
reset_multi_scale_output = False
else:
reset_multi_scale_output = True
modules.append(
HighResolutionModule(
num_branches,
block,
num_blocks,
num_inchannels,
num_channels,
fuse_method,
reset_multi_scale_output)
)
num_inchannels = modules[-1].get_num_inchannels()
return nn.Sequential(*modules), num_inchannels
def forward(self, x, return_feature_maps=False):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.conv2(x)
x = self.bn2(x)
x = self.relu(x)
x = self.layer1(x)
x_list = []
for i in range(self.stage2_cfg['NUM_BRANCHES']):
if self.transition1[i] is not None:
x_list.append(self.transition1[i](x))
else:
x_list.append(x)
y_list = self.stage2(x_list)
x_list = []
for i in range(self.stage3_cfg['NUM_BRANCHES']):
if self.transition2[i] is not None:
x_list.append(self.transition2[i](y_list[-1]))
else:
x_list.append(y_list[i])
y_list = self.stage3(x_list)
x_list = []
for i in range(self.stage4_cfg['NUM_BRANCHES']):
if self.transition3[i] is not None:
x_list.append(self.transition3[i](y_list[-1]))
else:
x_list.append(y_list[i])
x = self.stage4(x_list)
# Upsampling
x0_h, x0_w = x[0].size(2), x[0].size(3)
x1 = F.interpolate(
x[1], size=(x0_h, x0_w), mode='bilinear', align_corners=False)
x2 = F.interpolate(
x[2], size=(x0_h, x0_w), mode='bilinear', align_corners=False)
x3 = F.interpolate(
x[3], size=(x0_h, x0_w), mode='bilinear', align_corners=False)
x = torch.cat([x[0], x1, x2, x3], 1)
# x = self.last_layer(x)
return [x]
def hrnetv2(pretrained=False, **kwargs):
model = HRNetV2(n_class=1000, **kwargs)
if pretrained:
model.load_state_dict(load_url(model_urls['hrnetv2']), strict=False)
return model
| 37.692825 | 164 | 0.551484 |
79598992fb86fbccaca1b789cd24b6e690fb6594 | 212 | py | Python | src/setup.py | siboles/bioMultiScale | 34fb96cd1f08c85b94fdb025b16eb8de23bba602 | [
"BSD-3-Clause"
] | null | null | null | src/setup.py | siboles/bioMultiScale | 34fb96cd1f08c85b94fdb025b16eb8de23bba602 | [
"BSD-3-Clause"
] | null | null | null | src/setup.py | siboles/bioMultiScale | 34fb96cd1f08c85b94fdb025b16eb8de23bba602 | [
"BSD-3-Clause"
] | null | null | null | try:
from setuptools.core import setup
except ImportError:
from distutils.core import setup
setup(
name='bioMultiScale',
version='0.0',
author='Scott Sibole',
packages=['bioMultiScale'])
| 19.272727 | 37 | 0.688679 |
79598c12d873d706c3783f909e5a611f7851d928 | 13,735 | py | Python | ocempgui/widgets/Table.py | djkool/OcempGUI3 | 43a68033cb0dbad10654231299cb762cd18b7c25 | [
"BSD-2-Clause"
] | null | null | null | ocempgui/widgets/Table.py | djkool/OcempGUI3 | 43a68033cb0dbad10654231299cb762cd18b7c25 | [
"BSD-2-Clause"
] | null | null | null | ocempgui/widgets/Table.py | djkool/OcempGUI3 | 43a68033cb0dbad10654231299cb762cd18b7c25 | [
"BSD-2-Clause"
] | null | null | null | # $Id: Table.py,v 1.26.2.8 2007/03/23 11:57:14 marcusva Exp $
#
# Copyright (c) 2004-2007, Marcus von Appen
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Widget class, which places its children in a table grid"""
from .Container import Container
from .Constants import *
from . import base
from functools import reduce
class Table (Container):
"""Table (rows, cols) -> Table
A container widget, which packs its children in a table like manner.
The Table class is a layout container, which packs it children in a
regular, table like manner and allows each widget to be aligned
within its table cell. The table uses a 0-based (Null-based)
indexing, which means, that if 4 rows are created, they can be
accessed using a row value ranging from 0 to 3. The same applies to
the columns.
The Table provides read-only 'columns' and 'rows' attributes, which
are the amount of columns and rows within that Table.
totalr = table.rows
totalc = table.columns
To access the children of the Table the 'grid' attribute can be
used. It is a dictionary containing the widgets as values. To access
a widget, a tuple containing the row and column is used as the
dictionary key.
widget = table.grid[(0, 3)]
widget = table.grid[(7, 0)]
The above examples will get the widget located at the first row,
fourth column (0, 3) and the eighth row, first column (7, 0).
The layout for each widget within the table can be set individually
using the set_align() method. Alignments can be combined, which
means, that a ALIGN_TOP | ALIGN_LEFT would align the widget at the
topleft corner of its cell.
However, not every alignment make sense, so a ALIGN_TOP | ALIGN_BOTTOM
would cause the widget to be placed at the top. The priority
order for the alignment follows. The lower the value, the higher the
priority.
Alignment Priority
-----------------------
ALIGN_TOP 0
ALIGN_BOTTOM 1
ALIGN_LEFT 0
ALIGN_RIGHT 1
ALIGN_NONE 2
Default action (invoked by activate()):
None
Mnemonic action (invoked by activate_mnemonic()):
None
Attributes:
columns - The column amount of the Table.
rows - The row amount of the Table.
grid - Grid to hold the children of the Table.
"""
def __init__ (self, rows, cols):
Container.__init__ (self)
if (type (rows) != int) or (type (cols) != int):
raise TypeError ("Arguments must be positive integers")
if (rows <= 0) or (cols <= 0):
raise ValueError ("Arguments must be positive integers")
self._cols = cols
self._rows = rows
# The grid for the children.
self._grid = {}
for i in range (self._rows):
for j in range (self._cols):
self._grid[(i, j)] = None # None means unused, !None is used.
# Grid for the layout.
self._layout = {}
for i in range (self._rows):
for j in range (self._cols):
self._layout[(i, j)] = ALIGN_NONE
# Width and height grids.
self._colwidth = {}
self._rowheight = {}
for i in range (self._cols):
self._colwidth[i] = 0
for i in range (self._rows):
self._rowheight[i] = 0
self.dirty = True # Enforce creation of the internals.
def add_child (self, row, col, widget):
"""T.add_child (...) -> None
Adds a widget into the cell located at (row, col) of the Table.
Raises a ValueError, if the passed row and col arguments are not
within the cell range of the Table.
Raises an Exception, if the cell at the passed row and col
coordinates is already occupied.
"""
if (row, col) not in self.grid:
raise ValueError ("Cell (%d, %d) out of range" % (row, col))
if self.grid[(row, col)] != None:
raise Exception ("Cell (%d, %d) already occupied" % (row, col))
self.grid[(row, col)] = widget
Container.add_child (self, widget)
def remove_child (self, widget):
"""T.remove_widget (...) -> None
Removes a widget from the Table.
"""
Container.remove_child (self, widget)
for i in range (self._rows):
for j in range (self._cols):
if self.grid[(i, j)] == widget:
self.grid[(i, j)] = None
def set_children (self, children):
"""T.set_children (...) -> None
Sets the children of the Table.
When setting the children of the Table, keep in mind, that the
children will be added row for row, causing the Table to fill
the first row of itself, then the second and so on.
Raises a ValueError, if the passed amount of children exceeds
the cell amount of the Table.
"""
if children != None:
if len (children) > (self.columns * self.rows):
raise ValueError ("children exceed the Table size.")
# Remove all children first.
for i in range (self._rows):
for j in range (self._cols):
self.grid[(i, j)] = None
Container.set_children (self, children)
if children == None:
return
cells = len (children)
for i in range (self._rows):
for j in range (self._cols):
self.grid[(i, j)] = children[-cells]
cells -= 1
if cells == 0:
return
def insert_child (self, pos, *children):
"""C.insert_child (...) -> None
Inserts one or more children at the desired position.
Raises a NotImplementedError, as this method cannot be applied
to the Table.
"""
raise NotImplementedError
def set_focus (self, focus=True):
"""T.set_focus (focus=True) -> None
Overrides the set_focus() behaviour for the Table.
The Table class is not focusable by default. It is a layout
class for other widgets, so it does not need to get the input
focus and thus it will return false without doing anything.
"""
return False
def set_align (self, row, col, align=ALIGN_NONE):
"""T.set_align (...) -> None
Sets the alignment for a specific cell.
Raises a ValueError, if the passed row and col arguments are not
within the rows and columns of the Table.
Raises a TypeError, if the passed align argument is not a value
from ALIGN_TYPES.
"""
if (row, col) not in self._layout:
raise ValueError ("Cell (%d, %d) out of range" % (row, col))
if not constants_is_align (align):
raise TypeError ("align must be a value from ALIGN_TYPES")
self._layout[(row, col)] = align
self.dirty = True
def set_column_align (self, col, align=ALIGN_NONE):
"""T.set_column_align (...) -> None
Sets the alignment for a whole column range.
Raises a ValueError, if the passed col argument is not within
the column range of the Table.
Raises a TypeError, if the passed align argument is not a value from
ALIGN_TYPES.
"""
if (0, col) not in self._layout:
raise ValueError ("Column %d out of range" % col)
if not constants_is_align (align):
raise TypeError ("align must be a value from ALIGN_TYPES")
for i in range (self.rows):
self._layout[(i, col)] = align
self.dirty = True
def set_row_align (self, row, align=ALIGN_NONE):
"""T.set_row_align (...) -> None
Sets the alignment for a whole row.
Raises a ValueError, if the passed row argument is not within
the row range of the Table.
Raises a TypeError, if the passed align argument is not a value
from ALIGN_TYPES.
"""
if (row, 0) not in self._layout:
raise ValueError ("Row %d out of range" % row)
if not constants_is_align (align):
raise TypeError ("align must be a value from ALIGN_TYPES")
for i in range (self.columns):
self._layout[(row, i)] = align
self.dirty = True
def destroy (self):
"""T.destroy () -> None
Destroys the Table and all its children and shedules them for
deletion by the renderer.
"""
Container.destroy (self)
del self._grid
del self._layout
del self._colwidth
del self._rowheight
def calculate_size (self):
"""T.calculate_size () -> int, int
Calculates the size needed by the children.
Calculates the size needed by the children and returns the
resulting width and height.
"""
for i in range (self._cols):
self._colwidth[i] = 0
for i in range (self._rows):
self._rowheight[i] = 0
spacing = self.spacing
# Fill the width and height grids with correct values.
for row in range (self._rows):
actheight = 0
for col in range (self._cols):
widget = self.grid[(row, col)]
if not widget: # No child here.
continue
cw = widget.width + spacing
ch = widget.height + spacing
if self._colwidth[col] < cw:
self._colwidth[col] = cw
if actheight < ch:
actheight = ch
if self._rowheight[row] < actheight:
self._rowheight[row] = actheight
height = reduce (lambda x, y: x + y, list(self._rowheight.values ()), 0)
height += 2 * self.padding - spacing
width = reduce (lambda x, y: x + y, list(self._colwidth.values ()), 0)
width += 2 * self.padding - spacing
return max (width, 0), max (height, 0)
def dispose_widgets (self):
"""T.dispose_widgets (...) -> None
Sets the children to their correct positions within the Table.
"""
# Move all widgets to their correct position.
spacing = self.spacing
padding = self.padding
x = padding
y = padding
for row in range (self._rows):
for col in range (self._cols):
widget = self.grid[(row, col)]
if not widget: # no child here
x += self._colwidth[col]
continue
# Dependant on the cell layout, move the widget to the
# desired position.
align = self._layout[(row, col)]
# Default align is centered.
posx = x + (self._colwidth[col] - widget.width - spacing) / 2
posy = y + (self._rowheight[row] - widget.height - spacing) / 2
if align & ALIGN_LEFT == ALIGN_LEFT:
posx = x
elif align & ALIGN_RIGHT == ALIGN_RIGHT:
posx = x + self._colwidth[col] - widget.width - spacing
if align & ALIGN_TOP == ALIGN_TOP:
posy = y
elif align & ALIGN_BOTTOM == ALIGN_BOTTOM:
posy = y + self._rowheight[row] - widget.height - spacing
widget.topleft = (posx, posy)
x += self._colwidth[col]
y += self._rowheight[row]
x = padding
def draw_bg (self):
"""T.draw_bg () -> Surface
Draws the background surface of the Table and returns it.
Creates the visible surface of the Table and returns it to the
caller.
"""
return base.GlobalStyle.engine.draw_table (self)
def draw (self):
"""T.draw () -> None
Draws the Table surface and places its children on it.
"""
Container.draw (self)
# Draw all children.
self.dispose_widgets ()
blit = self.image.blit
for widget in self.children:
blit (widget.image, widget.rect)
columns = property (lambda self: self._cols,
doc = "The column amount of the Table.")
rows = property (lambda self: self._rows,
doc = "The row amount of the Table.")
grid = property (lambda self: self._grid, doc = "The grid of the Table.")
| 36.432361 | 80 | 0.593811 |
79598c31a8e33ced577feb6eebaf2917bbf4511a | 3,925 | py | Python | backend/applications/user/migrations/0001_initial.py | relsi/muutoca | 8db8c7783558ac1f71cd0a257c23ddc8737e1cdf | [
"MIT"
] | 1 | 2021-03-06T23:27:47.000Z | 2021-03-06T23:27:47.000Z | backend/applications/user/migrations/0001_initial.py | relsi/muutoca | 8db8c7783558ac1f71cd0a257c23ddc8737e1cdf | [
"MIT"
] | 1 | 2021-03-09T23:06:24.000Z | 2021-03-09T23:06:24.000Z | backend/applications/user/migrations/0001_initial.py | relsi/muutoca | 8db8c7783558ac1f71cd0a257c23ddc8737e1cdf | [
"MIT"
] | 1 | 2021-03-06T18:49:10.000Z | 2021-03-06T18:49:10.000Z | # Generated by Django 3.1.7 on 2021-03-08 17:47
import django.contrib.auth.models
import django.contrib.auth.validators
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0012_alter_user_first_name_max_length'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('username', models.CharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=150, unique=True, validators=[django.contrib.auth.validators.UnicodeUsernameValidator()], verbose_name='username')),
('first_name', models.CharField(blank=True, max_length=150, verbose_name='first name')),
('last_name', models.CharField(blank=True, max_length=150, verbose_name='last name')),
('email', models.EmailField(blank=True, max_length=254, verbose_name='email address')),
('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status')),
('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active')),
('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')),
('address_latitude', models.FloatField(null=True)),
('address_longitude', models.FloatField(null=True)),
('address_zipcode', models.CharField(max_length=15, null=True)),
('address_1', models.CharField(max_length=150, null=True)),
('address_2', models.CharField(max_length=100, null=True)),
('address_number', models.CharField(max_length=20, null=True)),
('address_city', models.CharField(max_length=30, null=True)),
('address_state', models.CharField(max_length=2, null=True)),
('address_country', models.CharField(max_length=20, null=True)),
('address_neighborhood', models.CharField(max_length=50, null=True)),
('ein_number', models.IntegerField(null=True)),
('is_deliveryman', models.BooleanField(default=False)),
('has_business', models.BooleanField(default=False)),
('accepted_the_terms', models.BooleanField(default=False)),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'verbose_name': 'user',
'verbose_name_plural': 'users',
'abstract': False,
},
managers=[
('objects', django.contrib.auth.models.UserManager()),
],
),
]
| 66.525424 | 329 | 0.647898 |
79598c5df6af7ebdb1f484f0ac743e9e51e0efa7 | 1,331 | py | Python | ps1/ps1a.py | ghidalgo93/mit_python | 3664a6059d37e93566ececd94ba78e6382abfc87 | [
"MIT"
] | null | null | null | ps1/ps1a.py | ghidalgo93/mit_python | 3664a6059d37e93566ececd94ba78e6382abfc87 | [
"MIT"
] | null | null | null | ps1/ps1a.py | ghidalgo93/mit_python | 3664a6059d37e93566ececd94ba78e6382abfc87 | [
"MIT"
] | null | null | null |
# Float, Float, Float -> Int
# Given a starting annual salary (Float), a portion of salary to be saved (Float), and total cost of a dream home (Float), return the months it will take you to save up enough money for a down payment
# ask for user input
# calculate months it will take to save up
# stub
# def calc_month_to_downpayment(salary, perc_of_salary_saved, total_house_cost):
# return 0
# template
def calc_month_to_downpayment(anual_salary: float, percent_saved: float, total_cost: float):
# consts
portion_down_payment: float = 0.25 # perc. of total cost needed for downpayment
roi: float = 0.04 # return on investment constant
down_payment: float = total_cost * portion_down_payment # down_payment needed to purchase house
monthly_salary: float = anual_salary / 12
monthly_salary_saved: float = monthly_salary * percent_saved;
# variables
current_savings: float = 0 # current savings starts at 0
month: int = 0 # current month starts at 0
while (current_savings <= down_payment):
current_month_roi = current_savings * (roi / 12)
current_savings = current_savings + current_month_roi + monthly_salary_saved
month = month + 1
return month
calc_month_to_downpayment(120000, .10, 1000000)
| 38.028571 | 200 | 0.703982 |
79598eb9fd529e76bd3473780c59f56878ea8419 | 4,390 | py | Python | sdks/python/http_client/v1/polyaxon_sdk/models/v1_flink.py | AdamHillier/polyaxon | a6407f4b22eddcd970bb4be17e111cbc8d6bca6d | [
"Apache-2.0"
] | null | null | null | sdks/python/http_client/v1/polyaxon_sdk/models/v1_flink.py | AdamHillier/polyaxon | a6407f4b22eddcd970bb4be17e111cbc8d6bca6d | [
"Apache-2.0"
] | null | null | null | sdks/python/http_client/v1/polyaxon_sdk/models/v1_flink.py | AdamHillier/polyaxon | a6407f4b22eddcd970bb4be17e111cbc8d6bca6d | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
#
# Copyright 2018-2021 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
"""
Polyaxon SDKs and REST API specification.
Polyaxon SDKs and REST API specification. # noqa: E501
The version of the OpenAPI document: 1.7.2
Contact: contact@polyaxon.com
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from polyaxon_sdk.configuration import Configuration
class V1Flink(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'kind': 'str',
'spec': 'object'
}
attribute_map = {
'kind': 'kind',
'spec': 'spec'
}
def __init__(self, kind='flink', spec=None, local_vars_configuration=None): # noqa: E501
"""V1Flink - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._kind = None
self._spec = None
self.discriminator = None
if kind is not None:
self.kind = kind
if spec is not None:
self.spec = spec
@property
def kind(self):
"""Gets the kind of this V1Flink. # noqa: E501
:return: The kind of this V1Flink. # noqa: E501
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""Sets the kind of this V1Flink.
:param kind: The kind of this V1Flink. # noqa: E501
:type: str
"""
self._kind = kind
@property
def spec(self):
"""Gets the spec of this V1Flink. # noqa: E501
:return: The spec of this V1Flink. # noqa: E501
:rtype: object
"""
return self._spec
@spec.setter
def spec(self, spec):
"""Sets the spec of this V1Flink.
:param spec: The spec of this V1Flink. # noqa: E501
:type: object
"""
self._spec = spec
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1Flink):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1Flink):
return True
return self.to_dict() != other.to_dict()
| 26.768293 | 93 | 0.579499 |
795990c427e5ca12058d81883f066c33f498e2d4 | 5,036 | py | Python | project_redss/auto_code_show_messages.py | AfricasVoices/Project-REDSS | 64999c5240fe8f1c839ccafccfa1e75b155c4787 | [
"MIT"
] | null | null | null | project_redss/auto_code_show_messages.py | AfricasVoices/Project-REDSS | 64999c5240fe8f1c839ccafccfa1e75b155c4787 | [
"MIT"
] | 20 | 2018-11-21T15:58:36.000Z | 2019-03-12T11:19:59.000Z | project_redss/auto_code_show_messages.py | AfricasVoices/Project-REDSS | 64999c5240fe8f1c839ccafccfa1e75b155c4787 | [
"MIT"
] | null | null | null | import random
import time
from os import path
from core_data_modules.cleaners import somali, Codes
from core_data_modules.cleaners.cleaning_utils import CleaningUtils
from core_data_modules.traced_data import Metadata
from core_data_modules.traced_data.io import TracedDataCSVIO, TracedDataCodaV2IO
from core_data_modules.util import IOUtils
from dateutil.parser import isoparse
from project_redss.lib import ICRTools, Channels
from project_redss.lib import MessageFilters
from project_redss.lib.pipeline_configuration import PipelineConfiguration
class AutoCodeShowMessages(object):
RQA_KEYS = []
for plan in PipelineConfiguration.RQA_CODING_PLANS:
RQA_KEYS.append(plan.raw_field)
SENT_ON_KEY = "sent_on"
NOISE_KEY = "noise"
PROJECT_START_DATE = isoparse("2018-12-02T00:00:00+03:00")
PROJECT_END_DATE = isoparse("2018-12-31T00:00:00+03:00")
ICR_MESSAGES_COUNT = 200
ICR_SEED = 0
@classmethod
def auto_code_show_messages(cls, user, data, icr_output_dir, coda_output_dir):
# Filter out test messages sent by AVF.
if not PipelineConfiguration.DEV_MODE:
data = MessageFilters.filter_test_messages(data)
# Filter for runs which don't contain a response to any week's question
data = MessageFilters.filter_empty_messages(data, cls.RQA_KEYS)
# Filter out runs sent outwith the project start and end dates
data = MessageFilters.filter_time_range(data, cls.SENT_ON_KEY, cls.PROJECT_START_DATE, cls.PROJECT_END_DATE)
# Tag messages which are noise as being noise
for td in data:
is_noise = True
for rqa_key in cls.RQA_KEYS:
if rqa_key in td and not somali.DemographicCleaner.is_noise(td[rqa_key], min_length=10):
is_noise = False
td.append_data({cls.NOISE_KEY: is_noise}, Metadata(user, Metadata.get_call_location(), time.time()))
# Label missing data
for td in data:
missing_dict = dict()
for plan in PipelineConfiguration.RQA_CODING_PLANS:
if plan.raw_field not in td:
na_label = CleaningUtils.make_label_from_cleaner_code(
plan.code_scheme, plan.code_scheme.get_code_with_control_code(Codes.TRUE_MISSING),
Metadata.get_call_location()
)
missing_dict[plan.coded_field] = [na_label.to_dict()]
if plan.binary_code_scheme is not None:
na_label = CleaningUtils.make_label_from_cleaner_code(
plan.binary_code_scheme, plan.binary_code_scheme.get_code_with_control_code(Codes.TRUE_MISSING),
Metadata.get_call_location()
)
missing_dict[plan.binary_coded_field] = na_label.to_dict()
td.append_data(missing_dict, Metadata(user, Metadata.get_call_location(), time.time()))
# Label each message with channel keys
Channels.set_channel_keys(user, data, cls.SENT_ON_KEY)
# Filter for messages which aren't noise (in order to export to Coda and export for ICR)
not_noise = MessageFilters.filter_noise(data, cls.NOISE_KEY, lambda x: x)
# Output messages which aren't noise to Coda
IOUtils.ensure_dirs_exist(coda_output_dir)
for plan in PipelineConfiguration.RQA_CODING_PLANS:
TracedDataCodaV2IO.compute_message_ids(user, not_noise, plan.raw_field, plan.id_field)
output_path = path.join(coda_output_dir, plan.coda_filename)
with open(output_path, "w") as f:
TracedDataCodaV2IO.export_traced_data_iterable_to_coda_2(
not_noise, plan.raw_field, cls.SENT_ON_KEY, plan.id_field, {}, f
)
# Output messages for ICR
IOUtils.ensure_dirs_exist(icr_output_dir)
for plan in PipelineConfiguration.RQA_CODING_PLANS:
rqa_messages = []
for td in not_noise:
# This test works because the only codes which have been applied at this point are TRUE_MISSING.
# If any other coding is done above, this test will need to change.
if plan.coded_field not in td:
rqa_messages.append(td)
else:
assert len(td[plan.coded_field]) == 1
assert td[plan.coded_field][0]["CodeID"] == \
plan.code_scheme.get_code_with_control_code(Codes.TRUE_MISSING).code_id
icr_messages = ICRTools.generate_sample_for_icr(
rqa_messages, cls.ICR_MESSAGES_COUNT, random.Random(cls.ICR_SEED))
icr_output_path = path.join(icr_output_dir, plan.icr_filename)
with open(icr_output_path, "w") as f:
TracedDataCSVIO.export_traced_data_iterable_to_csv(
icr_messages, f, headers=[plan.run_id_field, plan.raw_field]
)
return data
| 45.781818 | 124 | 0.662629 |
795990c9a3436c095fb91bd0e943960499ba1b40 | 4,754 | py | Python | .kodi/addons/plugin.video.p2p-streams/resources/core/parsers/livefootballws/main.py | C6SUMMER/allinclusive-kodi-pi | 8baf247c79526849c640c6e56ca57a708a65bd11 | [
"Apache-2.0"
] | null | null | null | .kodi/addons/plugin.video.p2p-streams/resources/core/parsers/livefootballws/main.py | C6SUMMER/allinclusive-kodi-pi | 8baf247c79526849c640c6e56ca57a708a65bd11 | [
"Apache-2.0"
] | null | null | null | .kodi/addons/plugin.video.p2p-streams/resources/core/parsers/livefootballws/main.py | C6SUMMER/allinclusive-kodi-pi | 8baf247c79526849c640c6e56ca57a708a65bd11 | [
"Apache-2.0"
] | 2 | 2018-04-17T17:34:39.000Z | 2020-07-26T03:43:33.000Z | # -*- coding: utf-8 -*-
"""
This plugin is 3rd party and not part of p2p-streams addon
livefootball.ws
"""
import sys,os
current_dir = os.path.dirname(os.path.realpath(__file__))
basename = os.path.basename(current_dir)
core_dir = current_dir.replace(basename,'').replace('parsers','')
sys.path.append(core_dir)
from peertopeerutils.webutils import *
from peertopeerutils.pluginxbmc import *
from peertopeerutils.directoryhandle import *
base_url = "http://www.livefootball.ws"
def module_tree(name,url,iconimage,mode,parser,parserfunction):
if not parserfunction: livefootballws_events()
elif parserfunction == 'streams': livefootballws_streams(url)
def livefootballws_events():
try:
source = mechanize_browser(base_url)
except: source = ""; xbmcgui.Dialog().ok(translate(40000),translate(40128))
if source:
items = re.findall('<div class="base custom" align="center"(.*?)</center></div><br></div>', source, re.DOTALL)
number_of_items= len(items)
for item in reversed(items):
data = re.compile('<div style="text-align: center;">(.+?)</div>').findall(item)
try:
check = re.compile(">.+? (.+?):(.+?)").findall(data[-1].replace("color:",""))
if not check and "Online" not in data[-1]:pass
else:
data_item = data[-1].replace("<strong>","").replace("</strong>","").replace('<span style="color: #008000;">','').replace("</span>","")
url = re.compile('<a href="(.+?)">').findall(item)
teams = re.compile('/.+?-(.+?).html').findall(url[0])
try:
match = re.compile('(.+?) (.+?) (.+?):(.*)').findall(data_item)
import datetime
from peertopeerutils import pytzimp
timezona= settings.getSetting('timezone_new')
d = pytzimp.timezone(str(pytzimp.timezone('Europe/Moscow'))).localize(datetime.datetime(2014, 6, int(match[0][0]), hour=int(match[0][2]), minute=int(match[0][3])))
my_place=pytzimp.timezone(pytzimp.all_timezones[int(timezona)])
convertido=d.astimezone(my_place)
fmt = "%d %H:%M"
time=convertido.strftime(fmt)
addDir("[B][COLOR orange]("+translate(600012)+time+")[/COLOR][/B] "+teams[0],url[0],401,os.path.join(current_dir,'icon.png'),number_of_items,True,parser="livefootballws",parserfunction="streams")
except:
if '<span style="color: #000000;">' not in data_item:
addDir("[B][COLOR green]("+data_item.replace('<br />','')+")[/COLOR][/B] "+teams[0],url[0],401,os.path.join(current_dir,'icon.png'),number_of_items,True,parser="livefootballws",parserfunction="streams")
else: pass
except: pass
def livefootballws_streams(url):
try:
source = mechanize_browser(url)
except: source = ""; xbmcgui.Dialog().ok(translate(40000),translate(40128));sys.exit(0)
if source:
items = re.findall('<td style="text-align: center;">(.*?)</tr>', source, re.DOTALL)
number_of_items = len(items)
if items:
for item in items:
match =re.compile('href="(.+?)"').findall(item)
if match:
if "sop://" or "torrentstream" or "acestream://" in match[-1]:
stream_quality = re.compile('>(.+?) kbps</td>').findall(item)
channel_info_arr = re.compile('<td style="text-align: center;">(.+?)</td>').findall(item)
try:
channel = channel_info_arr[-4].replace('<span style="text-align: center;">','').replace('</span>','')
except: channel = 'N/A'
if "sop://" in match[-1]:
try:
addDir("[B][COLOR orange][SopCast] [/COLOR] "+channel+"[/B] ("+stream_quality[0]+' Kbs)',match[-1],2,os.path.join(current_dir,'icon.png'),number_of_items,False)
except: pass
elif "acestream://" in match[-1]:
link = re.compile("acestream://(.*)").findall(match[-1])
try:
addDir("[B][COLOR orange][Acestream] [/COLOR] "+channel.replace('<br />','')+"[/B] ("+stream_quality[0]+' Kbs)',link[0],1,os.path.join(current_dir,'icon.png'),number_of_items,False)
except: pass
elif "torrentstream" in match[-1]:
link = re.compile("http://torrentstream.org/stream/test.php\?id=(.*)").findall(match[-1])
try:
addDir("[B][COLOR orange][Acestream] [/COLOR] "+channel.replace('<br />','')+"[/B] ("+stream_quality[0]+' Kbs)',link[0],1,os.path.join(current_dir,'icon.png'),number_of_items,False)
except: pass
else:pass
else:
xbmcgui.Dialog().ok(translate(40000),translate(40022))
sys.exit(0)
| 51.11828 | 243 | 0.588136 |
795990e21efd013a7e023105fab405b39b9f48aa | 4,434 | py | Python | Code/Build/Models/DAISY/Training/SVMTraining.py | IliaBahmutov/ArtML | e44bdf03428c3984a76f556417bf47d90793dc2d | [
"BSD-3-Clause"
] | null | null | null | Code/Build/Models/DAISY/Training/SVMTraining.py | IliaBahmutov/ArtML | e44bdf03428c3984a76f556417bf47d90793dc2d | [
"BSD-3-Clause"
] | null | null | null | Code/Build/Models/DAISY/Training/SVMTraining.py | IliaBahmutov/ArtML | e44bdf03428c3984a76f556417bf47d90793dc2d | [
"BSD-3-Clause"
] | null | null | null | import numpy #for numpy storage
import os #to find files
import time #for time to complete
from sklearn import svm
import pickle
start = time.time()
from sklearn.calibration import CalibratedClassifierCV
from statistics import mean
#Import Training Data & Labels
odata = numpy.load("Data/20ImpTrainingData.npy")
data = odata.reshape(9826,(3*3*78))
isnot = numpy.load("Data/20ImpTrainingDataLabels.npy")
#Import Test Data & Labels
wdata0 = numpy.load("Data/UkiTestingData.npy")
wdata0 = wdata0.reshape(350,(3*3*78))
wdata1 = numpy.load("Data/LReTestingData.npy")
wdata1 = wdata1.reshape(384,(3*3*78))
wdata2 = numpy.load("Data/MinTestingData.npy")
wdata2 = wdata2.reshape(401,(3*3*78))
wdata3 = numpy.load("Data/HreTestingData.npy")
wdata3 = wdata3.reshape(403,(3*3*78))
wdata4 = numpy.load("Data/EreTestingData.npy")
wdata4 = wdata4.reshape(417,(3*3*78))
wdata5 = numpy.load("Data/PopTestingData.npy")
wdata5 = wdata5.reshape(445,(3*3*78))
wdata6 = numpy.load("Data/CFPTestingData.npy")
wdata6 = wdata6.reshape(484,(3*3*78))
wdata7 = numpy.load("Data/ROCTestingData.npy")
wdata7 = wdata7.reshape(627,(3*3*78))
wdata8 = numpy.load("Data/CubTestingData.npy")
wdata8 = wdata8.reshape(660,(3*3*78))
wdata9 = numpy.load("Data/NAPTestingData.npy")
wdata9 = wdata9.reshape(721,(3*3*78))
wdata10 = numpy.load("Data/NreTestingData.npy")
wdata10 = wdata10.reshape(766,(3*3*78))
wdata11 = numpy.load("Data/AExTestingData.npy")
wdata11 = wdata11.reshape(835,(3*3*78))
wdata12 = numpy.load("Data/BarTestingData.npy")
wdata12 = wdata12.reshape(1272,(3*3*78))
wdata13 = numpy.load("Data/AMNTestingData.npy")
wdata13 = wdata13.reshape(1300,(3*3*78))
wdata14 = numpy.load("Data/SymTestingData.npy")
wdata14 = wdata14.reshape(1358,(3*3*78))
wdata15 = numpy.load("Data/PImTestingData.npy")
wdata15 = wdata15.reshape(1934,(3*3*78))
wdata16 = numpy.load("Data/ExpTestingData.npy")
wdata16 = wdata16.reshape(2021,(3*3*78))
wdata17 = numpy.load("Data/RomTestingData.npy")
wdata17 = wdata17.reshape(2106,(3*3*78))
wdata18 = numpy.load("Data/RelTestingData.npy")
wdata18 = wdata18.reshape(3220,(3*3*78))
wdata19 = numpy.load("Data/ImpTestingData.npy")
wdata19 = wdata19.reshape(3918,(3*3*78))
#cval = 21 length from 2^-5 to 2^15
cval = [0.03125,0.0625,0.125,0.25,0.5,1,2,4,8,16,32,64,128]
print ("Training Test Data")
results = [0] *19
checkagainst = [0]
falsepositive = 0;
falsenegative = 0;
truepositive = 0;
truenegative = 0;
for cavls in cval:
machine = svm.LinearSVC(C = cavls, random_state = 2,max_iter = 1000000,loss="hinge")
machine = CalibratedClassifierCV(machine, cv = 3)
machine.fit(data,isnot)
#score the data
checkagainst[0] = mean(machine.predict_proba(wdata19)[:,1]) #true positive
falsenegative = 1-checkagainst[0]
#make sure correct wdataXX isn't in the results and that the other 19 are
results[0] = mean(machine.predict_proba(wdata0)[:,1])
results[1] = mean(machine.predict_proba(wdata1)[:,1])
results[2] = mean(machine.predict_proba(wdata2)[:,1])
results[3] = mean(machine.predict_proba(wdata3)[:,1])
results[4] = mean(machine.predict_proba(wdata4)[:,1])
results[5] = mean(machine.predict_proba(wdata5)[:,1])
results[6] = mean(machine.predict_proba(wdata6)[:,1])
results[7] = mean(machine.predict_proba(wdata7)[:,1])
results[8] = mean(machine.predict_proba(wdata8)[:,1])
results[9] = mean(machine.predict_proba(wdata9)[:,1])
results[10] = mean(machine.predict_proba(wdata10)[:,1])
results[11] = mean(machine.predict_proba(wdata11)[:,1])
results[12] = mean(machine.predict_proba(wdata12)[:,1])
results[13] = mean(machine.predict_proba(wdata13)[:,1])
results[14] = mean(machine.predict_proba(wdata14)[:,1])
results[15] = mean(machine.predict_proba(wdata15)[:,1])
results[16] = mean(machine.predict_proba(wdata16)[:,1])
results[17] = mean(machine.predict_proba(wdata17)[:,1])
results[18] = mean(machine.predict_proba(wdata18)[:,1])
for numbers in results:
falsepositive = falsepositive+numbers
truenegative = truenegative+(1-numbers)
#ACC = (TP+TN)/(TP+TN+FP+FN)
accuracy = ((truepositive+truenegative)/(truepositive+truenegative+falsepositive+falsenegative))
print (str(accuracy))
checkagainst = [0]
falsepositive = 0;
falsenegative = 0;
truepositive = 0;
truenegative = 0;
end = time.time()
print (str(round((end - start),2)) + " seconds to complete")
| 33.338346 | 100 | 0.707713 |
79599128dd99424536ac66090ac5ffad8d6881c2 | 874 | py | Python | nbgrader/preprocessors/computechecksums.py | FrattisUC/nbgrader | f6402dcbb875e41ee3317be9e7af518afda9f72c | [
"BSD-3-Clause-Clear"
] | 2 | 2021-09-11T20:32:18.000Z | 2021-09-11T20:32:37.000Z | nbgrader/preprocessors/computechecksums.py | FrattisUC/nbgrader | f6402dcbb875e41ee3317be9e7af518afda9f72c | [
"BSD-3-Clause-Clear"
] | 4 | 2019-03-02T11:49:46.000Z | 2020-09-07T10:17:52.000Z | nbgrader/preprocessors/computechecksums.py | FrattisUC/nbgrader | f6402dcbb875e41ee3317be9e7af518afda9f72c | [
"BSD-3-Clause-Clear"
] | 2 | 2019-05-31T08:53:48.000Z | 2019-05-31T09:42:26.000Z | from .. import utils
from . import NbGraderPreprocessor
class ComputeChecksums(NbGraderPreprocessor):
"""A preprocessor to compute checksums of grade cells."""
def preprocess_cell(self, cell, resources, cell_index):
# compute checksums of grade cell and solution cells
if utils.is_grade(cell) or utils.is_solution(cell) or utils.is_locked(cell):
checksum = utils.compute_checksum(cell)
cell.metadata.nbgrader['checksum'] = checksum
cell.metadata.nbgrader['cell_type'] = cell.cell_type
if utils.is_grade(cell) or utils.is_solution(cell):
self.log.debug(
"Checksum for %s cell '%s' is %s",
cell.metadata.nbgrader['cell_type'],
cell.metadata.nbgrader['grade_id'],
checksum)
return cell, resources
| 39.727273 | 84 | 0.624714 |
7959917bf69e54336c2e70db1716b91f5ab409b6 | 3,348 | py | Python | src/backend/mec/routes/meetings/crud.py | rollsroycedev/MEC | d9a342059f56d199acba1968659b9d440a764278 | [
"MIT"
] | 1 | 2021-09-07T14:53:16.000Z | 2021-09-07T14:53:16.000Z | src/backend/mec/routes/meetings/crud.py | rollsroycedev/MEC | d9a342059f56d199acba1968659b9d440a764278 | [
"MIT"
] | 24 | 2021-07-22T14:17:26.000Z | 2022-02-14T09:42:12.000Z | src/backend/mec/routes/meetings/crud.py | rropen/MEC | d9a342059f56d199acba1968659b9d440a764278 | [
"MIT"
] | null | null | null | from decimal import Decimal
from sqlalchemy.sql.functions import grouping_sets
from mec.models import Meeting
from mec.schemas import MeetingBase
from sqlalchemy.orm import Session
def get_all(db: Session, skip: int = 0, limit: int = 25):
"""Get all the meetings and return the them."""
return db.query(Meeting).offset(skip).limit(limit).all()
def update_group_cost(db: Session, meeting, meeting_data=None, is_delete=False):
if meeting_data:
groupMeetings = (
db.query(Meeting).filter_by(meetingGroup=meeting_data.meetingGroup).all()
)
else:
groupMeetings = (
db.query(Meeting).filter_by(meetingGroup=meeting.meetingGroup).all()
)
if not is_delete:
if (
not groupMeetings
): # if no matching groups, must be first meeting and thus groupCost == totalCost
meeting.groupCost = meeting_data.totalCost
else:
# new_group_cost = groupMeetings[-1].groupCost + Decimal(str(meeting_data.totalCost))
new_group_cost = sum([m.totalCost for m in groupMeetings]) + Decimal(
str(meeting_data.totalCost)
)
meeting.groupCost = new_group_cost
for old_cost_mtg in groupMeetings:
old_cost_mtg.groupCost = new_group_cost
return [meeting, groupMeetings]
else:
# new_group_cost = meeting.groupCost - Decimal(str(meeting_data.totalCost))
new_group_cost = sum([m.totalCost for m in groupMeetings]) - Decimal(
str(meeting.totalCost)
)
for old_cost_mtg in groupMeetings:
old_cost_mtg.groupCost = new_group_cost
return groupMeetings
def create_meeting(db: Session, meeting_data):
"""Take data from request and create a new meeting in the database."""
meeting = Meeting()
meeting.meetingId = meeting_data.meetingId
meeting.totalCost = meeting_data.totalCost
meeting.time = meeting_data.time
meeting.date = meeting_data.date
meeting.employeeNumber = meeting_data.employeeNumber
meeting.meetingGroup = meeting_data.meetingGroup
meeting.powerpointSlides = meeting_data.powerpointSlides
meeting.comment = meeting_data.comment
meeting.title = meeting_data.title
[meeting, groupMeetings] = update_group_cost(db, meeting, meeting_data)
db.add(meeting)
db.add_all(groupMeetings)
db.commit()
# Check the new record
new_meeting = db.query(Meeting).filter_by(id=meeting.id).first()
if new_meeting.meetingId == meeting_data.meetingId:
return True # successfully created record
else:
return False # didn't store correctly
def delete_meeting(db: Session, item_id):
"""Take a meetingId (not primary key "id") and remove the row from the database."""
mtg = db.query(Meeting).filter_by(meetingId=item_id).first()
# print(mtg)
groupMeetings = update_group_cost(db, mtg, is_delete=True)
# if type(groupMeetings) is list:
#
for meeting in groupMeetings:
print(meeting)
db.add(meeting)
db.delete(mtg)
db.commit()
# Check our work
row = db.query(Meeting).filter_by(meetingId=item_id).first()
if row:
return False # Row didn't successfully delete or another one exists
else:
return True # We were successful
| 33.48 | 97 | 0.678017 |
795993798a64145d7691a67cf4ff1b682bef5ab4 | 2,728 | py | Python | dnnv/verifiers/eran/utils.py | Felipetoledo4815/DNNV | b94147d7dec8e7b50943e616e57c7a1a2b8ed517 | [
"MIT"
] | null | null | null | dnnv/verifiers/eran/utils.py | Felipetoledo4815/DNNV | b94147d7dec8e7b50943e616e57c7a1a2b8ed517 | [
"MIT"
] | null | null | null | dnnv/verifiers/eran/utils.py | Felipetoledo4815/DNNV | b94147d7dec8e7b50943e616e57c7a1a2b8ed517 | [
"MIT"
] | null | null | null | import numpy as np
import tensorflow.compat.v1 as tf
from typing import List, Type
from dnnv.nn.layers import Convolutional, FullyConnected, InputLayer, Layer
from dnnv.verifiers.common import HyperRectangle, VerifierTranslatorError
from .layers import conv_as_tf
def as_tf(
layers: List[Layer],
translator_error: Type[VerifierTranslatorError] = VerifierTranslatorError,
include_input: bool = False,
):
input_layer = layers[0]
if not isinstance(input_layer, InputLayer):
raise translator_error(
f"Unsupported input layer type: {type(input_layer).__name__!r}"
)
input_size = np.asarray(input_layer.shape)
if len(input_size) == 4:
input_size = input_size[[0, 2, 3, 1]]
input_size = [d if d >= 0 else None for d in input_size]
input_placeholder = x = tf.placeholder(input_layer.dtype, input_size)
seen_fullyconnected = False
for layer in layers[1:]:
if isinstance(layer, FullyConnected):
weights = layer.weights.astype(np.float32)
weights = weights[layer.w_permutation]
if not seen_fullyconnected:
seen_fullyconnected = True
if len(x.shape) == 4:
shape = np.array(tuple(int(d) for d in x.shape))
weights = weights[
(
np.arange(np.product(shape))
.reshape(shape[[0, 3, 1, 2]])
.transpose((0, 2, 3, 1))
.flatten()
)
]
if len(x.shape) > 2:
x = tf.reshape(x, (x.shape[0], -1))
if len(x.shape) == 1:
x = tf.reshape(x, (1, x.shape[0]))
x = tf.nn.bias_add(tf.matmul(x, weights), layer.bias.astype(np.float32))
x = x[0]
else:
x = tf.nn.bias_add(tf.matmul(x, weights), layer.bias.astype(np.float32))
if layer.activation == "relu":
x = tf.nn.relu(x)
elif layer.activation == "sigmoid":
x = tf.nn.sigmoid(x)
elif layer.activation == "tanh":
x = tf.nn.tanh(x)
elif layer.activation is not None:
raise translator_error(
f"{layer.activation} activation is currently unsupported"
)
elif isinstance(layer, Convolutional):
x = conv_as_tf(layer, x)
elif hasattr(layer, "as_tf"):
x = layer.as_tf(x)
else:
raise translator_error(f"Unsupported layer: {type(layer).__name__}")
if include_input:
return input_placeholder, x
return x
| 38.422535 | 88 | 0.547654 |
795993b3d43dd4c1023450b89e7d2616cbc110df | 9,286 | py | Python | engine/doc/grit_book/translate_xml.py | AugustoMoura/GritEnginePR | 0f8303df7f70972036d9b555dffe08cadb473926 | [
"MIT"
] | null | null | null | engine/doc/grit_book/translate_xml.py | AugustoMoura/GritEnginePR | 0f8303df7f70972036d9b555dffe08cadb473926 | [
"MIT"
] | null | null | null | engine/doc/grit_book/translate_xml.py | AugustoMoura/GritEnginePR | 0f8303df7f70972036d9b555dffe08cadb473926 | [
"MIT"
] | null | null | null | #!/usr/bin/python
import os.path
import re
import textwrap
import lxml.etree as ET
class TranslateError:
def __init__(self, el, problem):
self.filename = el.base
self.line = el.sourceline
self.problem = problem
def __str__(self):
return '%s:%d: %s' % (self.filename, self.line, self.problem)
def Error(el, msg):
raise TranslateError(el, msg)
def AssertNoBody(el):
if el.text and el.text.strip('\n\t '):
Error(el, 'Cannot have text after element %s.' % el.tag)
def AssertNoTail(el):
if el.tail and el.tail.strip('\n\t '):
Error(el, 'Cannot have text after element %s.' % el.tag)
def AssertExists(el, attr):
v = el.get(attr)
if not v:
Error(el, 'Tag: "%s" needs attribute "%s".' % (el.tag, attr))
return v
def AssertTag(el, tag):
if el.tag != tag:
Error(el, 'Expected %s, not %s.' % (tag, el.tag))
def AssertFile(el, fname):
if not os.path.isfile(fname):
Error(el, "File does not exist: " + fname)
class Node:
def __init__(self, kind, parent, **kwargs):
self.kind = kind
self.parent = parent
self.attr = kwargs
def __getattr__(self, key):
return self.attr.get(key)
def __str__(self):
return repr(self)
def __repr__(self):
return 'Node("%s", attr=%s)' % (self.kind, repr(self.attr))
def __iter__(self):
if self.data:
for a in self.data:
yield a
def __len__(self):
return len(self.data)
def __nonzero__(self):
return True
def __eq__(self, other):
if other.kind != self.kind: return False
if other.parent != self.parent: return False
if other.attr != self.attr: return False
return True
def __ne__(self, other):
return not (self == other)
inline_tags = { 'def', 'web', 'issue', 'todo', 'sref', 'code', 'emph' }
def MinimiseWhiteSpace(n):
"""Convert any whitespace to a single space."""
return re.sub('[ \t\n]+', ' ', n)
def StripParagraphWhiteSpace(p, inside=False):
"""At the root of a paragraph, strip leading whitespace from the first
element, and trailing whitespace from the last. Minimise whitespace in all
other cases.
Args:
p: List of strings and tags.
inside: Whether or not we are recursing within the body of a tag within the paragraph.
Returns:
The list of strings and tags with maybe less whitespace.
"""
r = []
for i, n in enumerate(p):
if isinstance(n, basestring):
n = MinimiseWhiteSpace(n)
if not inside:
if i==0:
n = n.lstrip(' \t\n')
if i==len(p)-1:
n = n.rstrip(' \t\n')
elif n.data:
n.data = StripParagraphWhiteSpace(n.data, True)
r.append(n)
return r
def NonEmptyParagraph(para):
"""A paragraph is non-empty if it contains more than just whitespace."""
for el in para:
if not isinstance(el, basestring):
return True
if el.strip('\n\t'):
return True
return False
SECTION_MAP = {}
def ResolveReferences(ast):
if not isinstance(ast, Node):
return
if ast.kind == "SRef":
target = SECTION_MAP.get(ast.target)
# ChedkRef ensures that the references are all valid.
ast.target = target
for ch in ast:
ResolveReferences(ch)
def TranslateParagraphs(content, context_ast, dosplit=True):
"""Translate text and XML elements into paragraphs by examining whitespace.
Args:
content: A list of text and tags. The text may span several paragraphs
(represented by blank lines).
context_ast:
dosplit: Whether or not to treat blank lines as paragraph dividers.
Returns:
A list of paragraphs, where each paragraph is given as a list of strings and nodes.
"""
r = []
r2 = None
for i, c in enumerate(content):
if isinstance(c, basestring):
for i, s in enumerate(re.split('\n[ \t]*\n',c) if dosplit else [c]):
if i==0:
# Could be after an inline block, therefore not the beginning of a paragraph.
if not r2:
r2 = []
r.append(r2)
else:
r2 = []
r.append(r2)
r2.append(s)
else:
if not r2:
r2 = []
r.append(r2)
content2 = TranslateParagraphs(([c.text] if c.text else []) + [ch for ch in c], context_ast, False)
flattened = [item for sublist in content2 for item in sublist]
if c.tag == "def":
r2.append(Node('Definition', context_ast, data=flattened))
elif c.tag == "web":
r2.append(Node('Web', context_ast, url=c.get('url'), data=flattened))
elif c.tag == "issue":
if flattened:
Error(c, "Tag: %s should not have textual content." % c.tag)
r2.append(Node('Issue', context_ast, id=int(c.get('id'))))
elif c.tag == "todo":
r2.append(Node('Todo', context_ast, data=flattened))
elif c.tag == "emph":
r2.append(Node('Emph', context_ast, data=flattened))
elif c.tag == "code":
r2.append(Node('Code', context_ast, data=flattened))
elif c.tag == "sref":
target = c.get('id')
if not target:
Error(c, 'Tag: %s should have an id attribute.' % c.tag)
r2.append(Node('SRef', context_ast, target=target, data=flattened))
else:
Error(c, 'Unknown tag: ' + str(c.tag))
r = map(StripParagraphWhiteSpace, r)
r = filter(NonEmptyParagraph, r)
return r
def TranslateBlockContents(block, block_ast):
block_content = []
text_content = None
if block.text:
text_content = [block.text]
block_content.append(text_content)
for el in block:
if el.tag == ET.Comment:
if el.tail:
text_content = [el.tail]
block_content.append(text_content)
elif el.tag in inline_tags:
if text_content:
text_content.append(el)
else:
text_content = [el]
block_content.append(text_content)
if el.tail:
text_content.append(el.tail)
else:
block_content.append(el)
text_content = None
if el.tail:
text_content = [el.tail]
block_content.append(text_content)
output_content = []
for el in block_content:
if isinstance(el,list):
paragraphs = []
for inline in TranslateParagraphs(el, None):
n = Node('Paragraph', block_ast, data=inline)
for i in inline:
if not isinstance(i, basestring):
i.parent = n
paragraphs.append(n)
output_content += paragraphs
else:
if el.tag == "section":
id = AssertExists(el, 'id')
title = AssertExists(el, 'title')
sb = el.get('splitbelow')
# Add Section node without data field first.
translated_content = Node('Section', block_ast, split=sb=="true", id=id,
title=title, data=False)
SECTION_MAP[id] = translated_content
translated_content.data = TranslateBlockContents(el, translated_content)
AssertNoTail(el)
elif el.tag == "image":
src = el.get('src')
thumb_src = 'thumb_' + src
caption = MinimiseWhiteSpace(el.text or '')
title = MinimiseWhiteSpace(el.get('title'))
AssertFile(el, src)
AssertFile(el, thumb_src)
translated_content = Node('Image', block_ast, src=src, caption=caption, thumb_src=thumb_src, title=title)
elif el.tag == "ul":
AssertNoBody(el)
translated_items = []
translated_content = Node('UnorderedList', block_ast)
for item in el:
AssertTag(item, 'li')
translated_items.append(TranslateBlockContents(item, translated_content))
AssertNoTail(item)
translated_content.data = translated_items
elif el.tag == "lua":
translated_content = Node('Lua', block_ast, data=textwrap.dedent(el.text))
elif el.tag == "pre":
translated_content = Node('Preformatted', block_ast, data=textwrap.dedent(el.text))
else:
Error(el, 'Unknown tag: ' + str(el.tag))
output_content.append(translated_content)
return output_content
def GetOutputFile(node):
"""Return the node that begins the current file, by chasing parents."""
if not node.parent:
raise 'Got to the bottom of the DOM tree without finding an output file.'
if node.parent.split:
return node
return GetOutputFile(node.parent)
| 33.890511 | 121 | 0.553629 |
7959944703301245e6ccf3e2f97b433032f2f3c6 | 514 | py | Python | manage.py | mirest/stylist-api | 6687b807bd0ead95eca110f95e1fb8efcea847ae | [
"MIT"
] | 1 | 2019-05-02T15:37:14.000Z | 2019-05-02T15:37:14.000Z | manage.py | mirest/stylist-api | 6687b807bd0ead95eca110f95e1fb8efcea847ae | [
"MIT"
] | 4 | 2019-04-01T13:28:51.000Z | 2019-04-07T17:38:13.000Z | manage.py | mirest/stylist-api | 6687b807bd0ead95eca110f95e1fb8efcea847ae | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import os
import sys
if __name__ == '__main__':
os.environ.get('DJANGO_SETTINGS_MODULE', 'config.default')
try:
from django.core.management import execute_from_command_line
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
execute_from_command_line(sys.argv)
| 32.125 | 73 | 0.678988 |
79599463a091b951938e0cc78113a493452b162c | 536 | py | Python | anibot/extractors/RSSExtractor.py | iddqd0/anibot | 2f72bc6d624706078dece81aac6c215f1f559b9d | [
"MIT"
] | null | null | null | anibot/extractors/RSSExtractor.py | iddqd0/anibot | 2f72bc6d624706078dece81aac6c215f1f559b9d | [
"MIT"
] | null | null | null | anibot/extractors/RSSExtractor.py | iddqd0/anibot | 2f72bc6d624706078dece81aac6c215f1f559b9d | [
"MIT"
] | null | null | null | import time, feedparser
from ..util import AnibotEvent
from .Extractor import Extractor
class RSSExtractor(Extractor):
name, url = "Dummy RSS Extractor", ""
polling = 600
def raw(self):
if self.url:
feed = feedparser.parse(self.url)
return feed.entries
def run(self):
while True:
data = self.poll()
if data:
self.bot.event(event=AnibotEvent(event=AnibotEvent.EXTRACTOR_INPUT, data=data, source=self))
time.sleep(self.polling)
| 28.210526 | 108 | 0.613806 |
79599521d59197c67fb64eaf68b14a6b1ec6846e | 43,755 | py | Python | test/pecos/xmc/xlinear/test_xlinear.py | yuhchenlin/pecos | 0085d92b09a83a0d81b67ce2b2e0b4b51d163992 | [
"Apache-2.0",
"BSD-3-Clause"
] | 2 | 2021-07-28T21:09:58.000Z | 2021-09-24T03:37:45.000Z | test/pecos/xmc/xlinear/test_xlinear.py | yuhchenlin/pecos | 0085d92b09a83a0d81b67ce2b2e0b4b51d163992 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | test/pecos/xmc/xlinear/test_xlinear.py | yuhchenlin/pecos | 0085d92b09a83a0d81b67ce2b2e0b4b51d163992 | [
"Apache-2.0",
"BSD-3-Clause"
] | 1 | 2021-09-24T04:00:47.000Z | 2021-09-24T04:00:47.000Z | # Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
# with the License. A copy of the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
# and limitations under the License.
import pytest # noqa: F401; pylint: disable=unused-variable
from pytest import approx
def test_importable():
import pecos.xmc.xlinear # noqa: F401
from pecos.xmc.xlinear import XLinearModel # noqa: F401
from pecos.xmc import Indexer # noqa: F401
def test_cost_sensitive():
from pecos.utils import smat_util
from pecos.xmc import MLProblem, MLModel
X = smat_util.load_matrix("test/tst-data/xmc/xlinear/X.npz")
Y = smat_util.load_matrix("test/tst-data/xmc/xlinear/Y.npz")
# Cp=2.0 and R=none should equiv to Cp=1.0 and R=2.0
Cp = 2.0
model_v0 = MLModel.train(
MLProblem(X, Y, C=None, M=None, R=None),
train_params=MLModel.TrainParams(Cp=Cp),
)
R = smat_util.binarized(Y)
R.data = Cp * R.data
model_v1 = MLModel.train(
MLProblem(X, Y, C=None, M=None, R=R),
train_params=MLModel.TrainParams(Cp=1.0),
)
assert model_v0.W.todense() == approx(model_v1.W.todense(), abs=1e-9)
def test_predict_consistency_between_python_and_cpp(tmpdir):
import subprocess
import shlex
from pecos.xmc import PostProcessor
from pecos.xmc.xlinear import XLinearModel as py_xlm
train_X_file = "test/tst-data/xmc/xlinear/X.npz"
train_Y_file = "test/tst-data/xmc/xlinear/Y.npz"
test_X_file = "test/tst-data/xmc/xlinear/Xt.npz"
model_folder = str(tmpdir.join("save_model"))
models = []
# Obtain a xlinear model
cmd = []
cmd += ["python3 -m pecos.xmc.xlinear.train"]
cmd += ["-x {}".format(train_X_file)]
cmd += ["-y {}".format(train_Y_file)]
cmd += ["-m {}".format(model_folder)]
process = subprocess.run(
shlex.split(" ".join(cmd)), stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
assert process.returncode == 0, " ".join(cmd)
models += [model_folder]
# Obtain xlinear models with vairous number of splits
for splits in [2, 4]:
model_folder_local = f"{model_folder}-{splits}"
cmd = []
cmd += ["python3 -m pecos.xmc.xlinear.train"]
cmd += [f"-x {train_X_file}"]
cmd += [f"-y {train_Y_file}"]
cmd += [f"--nr-splits {splits}"]
cmd += [f"--max-leaf-size 2"]
cmd += [f"-m {model_folder_local}"]
process = subprocess.run(
shlex.split(" ".join(cmd)), stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
assert process.returncode == 0, " ".join(cmd)
models += [model_folder_local]
X = py_xlm.load_feature_matrix(test_X_file)
for model in models:
py_m = py_xlm.load(model)
py_bin_search_m = py_xlm.load(
model, is_predict_only=True, weight_matrix_type="BINARY_SEARCH_CHUNKED"
)
py_hash_m = py_xlm.load(model, is_predict_only=True, weight_matrix_type="HASH_CHUNKED")
py_csc_m = py_xlm.load(model, is_predict_only=True, weight_matrix_type="CSC")
for pp in PostProcessor.valid_list():
kwargs = {"post_processor": pp, "beam_size": 2}
# in batch mode
py_pred = py_m.predict(X, **kwargs).todense()
# Sparse inputs
# Test csr_t x bin_search_chunked_matrix_t
py_bin_search_pred = py_bin_search_m.predict(X, **kwargs).todense()
# Test csr_t x hash_chunked_matrix_t
py_hash_pred = py_hash_m.predict(X, **kwargs).todense()
# Test csr_t x csc_t
py_csc_pred = py_csc_m.predict(X, **kwargs).todense()
# Dense inputs
# Test drm_ x binary search chunked
py_bin_search_dense_pred = py_bin_search_m.predict(X.todense(), **kwargs).todense()
# Test drm_t x hash chunked
py_hash_chunked_dense_pred = py_hash_m.predict(X.todense(), **kwargs).todense()
# Test drm_t x csc_t
py_csc_dense_pred = py_csc_m.predict(X.todense(), **kwargs).todense()
assert py_bin_search_pred == approx(
py_pred, abs=1e-6
), f"model:{model} (sparse, bin-search) post_processor:{pp}"
assert py_hash_pred == approx(
py_pred, abs=1e-6
), f"model:{model} (sparse, hash) post_processor:{pp}"
assert py_csc_pred == approx(
py_pred, abs=1e-6
), f"model:{model} (sparse, csc) post_processor:{pp}"
assert py_bin_search_dense_pred == approx(
py_pred, abs=1e-6
), f"model:{model} (dense, bin-search) post_processor:{pp}"
assert py_hash_chunked_dense_pred == approx(
py_pred, abs=3e-6
), f"model:{model} (dense, hash) post_processor:{pp}"
assert py_csc_dense_pred == approx(
py_pred, abs=1e-6
), f"model:{model} (dense, csc) post_processor:{pp}"
# in realtime mode
for i in range(X.shape[0]):
query_slice = X[[i], :]
# Some versions of Scipy don't maintain sortedness when slicing
query_slice.sort_indices()
py_pred = py_m.predict(query_slice, **kwargs).todense()
# Sparse Inputs
# Test csr_t x bin_search_chunked_matrix_t
py_bin_search_pred = py_bin_search_m.predict(query_slice, **kwargs).todense()
# Test csr_t x hash_chunked_matrix_t
py_hash_pred = py_hash_m.predict(query_slice, **kwargs).todense()
# Test csr_t x csc_t
py_csc_pred = py_csc_m.predict(query_slice, **kwargs).todense()
# Dense Inputs
# Test drm_ x binary search chunked
py_bin_search_dense_pred = py_bin_search_m.predict(
query_slice.todense(), **kwargs
).todense()
# Test drm_t x hash chunked
py_hash_chunked_dense_pred = py_hash_m.predict(
query_slice.todense(), **kwargs
).todense()
# Test csr_t x csc_t
py_csc_dense_pred = py_csc_m.predict(query_slice.todense(), **kwargs).todense()
assert py_bin_search_pred == approx(
py_pred, abs=1e-6
), f"model:{model} (sparse, bin-search) post_processor:{pp}, inst:{i}"
assert py_hash_pred == approx(
py_pred, abs=1e-6
), f"model:{model} (sparse, hash) post_processor:{pp}, inst:{i}"
assert py_csc_pred == approx(
py_pred, abs=1e-6
), f"model:{model} (sparse, csc) post_processor:{pp}, inst:{i}"
assert py_bin_search_dense_pred == approx(
py_pred, abs=1e-6
), f"model:{model} (dense, bin-search) post_processor:{pp}, inst:{i}"
assert py_hash_chunked_dense_pred == approx(
py_pred, abs=3e-6
), f"model:{model} (dense, hash) post_processor:{pp}, inst:{i}"
assert py_csc_dense_pred == approx(
py_pred, abs=1e-6
), f"model:{model} (dense, csc) post_processor:{pp}, inst:{i}"
def test_cli(tmpdir):
import subprocess
import shlex
import numpy as np
import scipy.sparse as smat
from pecos.xmc.xlinear import XLinearModel as xlm
from pecos.xmc import Indexer, LabelEmbeddingFactory
from pecos.utils import smat_util
train_sX_file = "test/tst-data/xmc/xlinear/X.npz"
train_dX_file = str(tmpdir.join("X.trn.npy"))
train_Y_file = "test/tst-data/xmc/xlinear/Y.npz"
test_sX_file = "test/tst-data/xmc/xlinear/Xt.npz"
test_dX_file = str(tmpdir.join("X.tst.npy"))
test_Y_file = "test/tst-data/xmc/xlinear/Yt.npz"
true_Y_pred_file = "test/tst-data/xmc/xlinear/Yt_pred.npz"
true_Y_pred_with_man_file = "test/tst-data/xmc/xlinear/Yt_pred_with_tfn+man.npz"
true_Yt_pred_with_splits = {
2: "test/tst-data/xmc/xlinear/P:nr_splits=2.npz",
4: "test/tst-data/xmc/xlinear/P:nr_splits=4.npz",
}
test_Y_pred_file = str(tmpdir.join("Yt_pred_test.npz"))
code_file = str(tmpdir.join("codes.npz"))
cluster_chain_folder = str(tmpdir.join("cluster_chain"))
match_file = str(tmpdir.join("M.npz"))
model_folder = str(tmpdir.join("save_model"))
np.save(train_dX_file, smat_util.load_matrix(train_sX_file).toarray(), allow_pickle=False)
np.save(test_dX_file, smat_util.load_matrix(test_sX_file).toarray(), allow_pickle=False)
for train_X, test_X in [(train_sX_file, test_sX_file), (train_dX_file, test_dX_file)]:
# Training
cmd = []
cmd += ["python3 -m pecos.xmc.xlinear.train"]
cmd += ["-x {}".format(train_X)]
cmd += ["-y {}".format(train_Y_file)]
cmd += ["-m {}".format(model_folder)]
process = subprocess.run(
shlex.split(" ".join(cmd)), stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
assert process.returncode == 0, " ".join(cmd)
# Batch Inference
cmd = []
cmd += ["python3 -m pecos.xmc.xlinear.predict"]
cmd += ["-x {}".format(test_X)]
cmd += ["-y {}".format(test_Y_file)]
cmd += ["-o {}".format(test_Y_pred_file)]
cmd += ["-m {}".format(model_folder)]
process = subprocess.run(
shlex.split(" ".join(cmd)), stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
assert process.returncode == 0, " ".join(cmd)
true_Yt_pred = smat_util.load_matrix(true_Y_pred_file)
Yt_pred = smat_util.load_matrix(test_Y_pred_file)
assert Yt_pred.todense() == approx(true_Yt_pred.todense(), abs=1e-6)
# Select Inference
cmd = []
cmd += ["python3 -m pecos.xmc.xlinear.predict"]
cmd += ["-x {}".format(test_X)]
cmd += ["-y {}".format(test_Y_file)]
cmd += ["-so {}".format(true_Y_pred_file)]
cmd += ["-o {}".format(test_Y_pred_file)]
cmd += ["-m {}".format(model_folder)]
process = subprocess.run(
shlex.split(" ".join(cmd)), stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
assert process.returncode == 0, " ".join(cmd)
true_Yt_pred = smat_util.load_matrix(true_Y_pred_file)
Yt_selected_pred = smat_util.load_matrix(test_Y_pred_file)
assert Yt_selected_pred.todense() == approx(true_Yt_pred.todense(), abs=1e-6)
# Evaluate
cmd = []
cmd += ["python3 -m pecos.xmc.xlinear.evaluate"]
cmd += ["-y {}".format(test_Y_file)]
cmd += ["-p {}".format(true_Y_pred_file)]
process = subprocess.run(
shlex.split(" ".join(cmd)), stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
assert process.returncode == 0, " ".join(cmd)
std_output = b"==== evaluation results ====\nprec = 100.00 50.00 33.33 25.00 20.00 16.67 14.29 12.50 11.11 10.00\nrecall = 100.00 100.00 100.00 100.00 100.00 100.00 100.00 100.00 100.00 100.00\n"
assert process.stdout == std_output
# Training with Existing Clustering
X = xlm.load_feature_matrix(train_X)
Y = xlm.load_label_matrix(train_Y_file)
label_feat = LabelEmbeddingFactory.create(Y, X, method="pifa")
# Training with cluster chain stored in a cluster folder
cluster_chain = Indexer.gen(label_feat)
cluster_chain.save(cluster_chain_folder)
cmd = []
cmd += ["python3 -m pecos.xmc.xlinear.train"]
cmd += ["-x {}".format(train_X)]
cmd += ["-y {}".format(train_Y_file)]
cmd += ["-c {}".format(cluster_chain_folder)]
cmd += ["-m {}".format(model_folder)]
process = subprocess.run(
shlex.split(" ".join(cmd)), stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
assert process.returncode == 0, " ".join(cmd)
# Training with last layer code matrix stored in a scipy.sparse matrix
C = cluster_chain[-1]
smat.save_npz(code_file, C)
cmd = []
cmd += ["python3 -m pecos.xmc.xlinear.train"]
cmd += ["-x {}".format(train_X)]
cmd += ["-y {}".format(train_Y_file)]
cmd += ["-c {}".format(code_file)]
cmd += ["-m {}".format(model_folder)]
process = subprocess.run(
shlex.split(" ".join(cmd)), stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
assert process.returncode == 0, " ".join(cmd)
# Batch Inference
cmd = []
cmd += ["python3 -m pecos.xmc.xlinear.predict"]
cmd += ["-x {}".format(test_X)]
cmd += ["-y {}".format(test_Y_file)]
cmd += ["-o {}".format(test_Y_pred_file)]
cmd += ["-m {}".format(model_folder)]
process = subprocess.run(
shlex.split(" ".join(cmd)), stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
assert process.returncode == 0, " ".join(cmd)
true_Yt_pred = smat_util.load_matrix(true_Y_pred_file)
Yt_pred = smat_util.load_matrix(test_Y_pred_file)
assert Yt_pred.todense() == approx(true_Yt_pred.todense(), abs=1e-6)
# Select Inference
cmd = []
cmd += ["python3 -m pecos.xmc.xlinear.predict"]
cmd += ["-x {}".format(test_X)]
cmd += ["-y {}".format(test_Y_file)]
cmd += ["-so {}".format(true_Y_pred_file)]
cmd += ["-o {}".format(test_Y_pred_file)]
cmd += ["-m {}".format(model_folder)]
process = subprocess.run(
shlex.split(" ".join(cmd)), stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
assert process.returncode == 0, " ".join(cmd)
true_Yt_pred = smat_util.load_matrix(true_Y_pred_file)
Yt_selected_pred = smat_util.load_matrix(test_Y_pred_file)
assert Yt_selected_pred.todense() == approx(true_Yt_pred.todense(), abs=1e-6)
# Training with User Supplied Negative
M = (Y * C).tocsc()
smat.save_npz(match_file, M)
cmd = []
cmd += ["python3 -m pecos.xmc.xlinear.train"]
cmd += ["-x {}".format(train_X)]
cmd += ["-y {}".format(train_Y_file)]
cmd += ["-c {}".format(code_file)]
cmd += ["-m {}".format(model_folder)]
cmd += ["-um {}".format(match_file)]
cmd += ["-uy {}".format(train_Y_file)]
process = subprocess.run(
shlex.split(" ".join(cmd)), stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
assert process.returncode == 0, " ".join(cmd)
# Batch Inference
cmd = []
cmd += ["python3 -m pecos.xmc.xlinear.predict"]
cmd += ["-x {}".format(test_X)]
cmd += ["-y {}".format(test_Y_file)]
cmd += ["-o {}".format(test_Y_pred_file)]
cmd += ["-m {}".format(model_folder)]
process = subprocess.run(
shlex.split(" ".join(cmd)), stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
assert process.returncode == 0, " ".join(cmd)
true_Yt_pred = smat_util.load_matrix(true_Y_pred_file)
Yt_pred = smat_util.load_matrix(test_Y_pred_file)
assert Yt_pred.todense() == approx(true_Yt_pred.todense(), abs=1e-6)
# Select Inference
cmd = []
cmd += ["python3 -m pecos.xmc.xlinear.predict"]
cmd += ["-x {}".format(test_X)]
cmd += ["-y {}".format(test_Y_file)]
cmd += ["-so {}".format(true_Y_pred_file)]
cmd += ["-o {}".format(test_Y_pred_file)]
cmd += ["-m {}".format(model_folder)]
process = subprocess.run(
shlex.split(" ".join(cmd)), stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
assert process.returncode == 0, " ".join(cmd)
true_Yt_pred = smat_util.load_matrix(true_Y_pred_file)
Yt_pred = smat_util.load_matrix(test_Y_pred_file)
assert Yt_pred.todense() == approx(true_Yt_pred.todense(), abs=1e-6)
# Evaluate
cmd = []
cmd += ["python3 -m pecos.xmc.xlinear.evaluate"]
cmd += ["-y {}".format(test_Y_file)]
cmd += ["-p {}".format(true_Y_pred_file)]
process = subprocess.run(
shlex.split(" ".join(cmd)), stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
assert process.returncode == 0, " ".join(cmd)
std_output = b"==== evaluation results ====\nprec = 100.00 50.00 33.33 25.00 20.00 16.67 14.29 12.50 11.11 10.00\nrecall = 100.00 100.00 100.00 100.00 100.00 100.00 100.00 100.00 100.00 100.00\n"
assert process.stdout == std_output
# Training with Matcher Aware Negatives
cmd = []
cmd += ["python3 -m pecos.xmc.xlinear.train"]
cmd += ["-x {}".format(train_X)]
cmd += ["-y {}".format(train_Y_file)]
cmd += ["-m {}".format(model_folder)]
cmd += ["-pp noop"]
cmd += ["-b 2"]
cmd += ["-ns tfn+man"]
process = subprocess.run(
shlex.split(" ".join(cmd)), stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
assert process.returncode == 0, " ".join(cmd)
# Batch Inference
cmd = []
cmd += ["python3 -m pecos.xmc.xlinear.predict"]
cmd += ["-x {}".format(test_X)]
cmd += ["-y {}".format(test_Y_file)]
cmd += ["-o {}".format(test_Y_pred_file)]
cmd += ["-m {}".format(model_folder)]
cmd += ["-pp sigmoid"]
cmd += ["-b 4"]
process = subprocess.run(
shlex.split(" ".join(cmd)), stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
assert process.returncode == 0, " ".join(cmd)
true_Yt_pred_with_man = smat_util.load_matrix(true_Y_pred_with_man_file)
Yt_pred = smat_util.load_matrix(test_Y_pred_file)
assert Yt_pred.todense() == approx(true_Yt_pred_with_man.todense(), abs=1e-6)
# Select Inference
cmd = []
cmd += ["python3 -m pecos.xmc.xlinear.predict"]
cmd += ["-x {}".format(test_X)]
cmd += ["-y {}".format(test_Y_file)]
cmd += ["-so {}".format(true_Y_pred_with_man_file)]
cmd += ["-o {}".format(test_Y_pred_file)]
cmd += ["-m {}".format(model_folder)]
cmd += ["-pp sigmoid"]
cmd += ["-b 4"]
process = subprocess.run(
shlex.split(" ".join(cmd)), stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
assert process.returncode == 0, " ".join(cmd)
true_Yt_pred_with_man = smat_util.load_matrix(true_Y_pred_with_man_file)
Yt_selected_pred = smat_util.load_matrix(test_Y_pred_file)
assert Yt_selected_pred.todense() == approx(true_Yt_pred_with_man.todense(), abs=1e-6)
# Training with Matcher Aware Negatives
cmd = []
cmd += ["python3 -m pecos.xmc.xlinear.train"]
cmd += ["-x {}".format(train_X)]
cmd += ["-y {}".format(train_Y_file)]
cmd += ["-m {}".format(model_folder)]
cmd += ["-pp noop"]
cmd += ["-b 2"]
cmd += ["-ns tfn+man"]
process = subprocess.run(
shlex.split(" ".join(cmd)), stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
assert process.returncode == 0, " ".join(cmd)
# Batch Inference
cmd = []
cmd += ["python3 -m pecos.xmc.xlinear.predict"]
cmd += ["-x {}".format(test_X)]
cmd += ["-y {}".format(test_Y_file)]
cmd += ["-o {}".format(test_Y_pred_file)]
cmd += ["-m {}".format(model_folder)]
cmd += ["-pp sigmoid"]
cmd += ["-b 4"]
process = subprocess.run(
shlex.split(" ".join(cmd)), stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
assert process.returncode == 0, " ".join(cmd)
true_Yt_pred_with_man = smat_util.load_matrix(true_Y_pred_with_man_file)
Yt_pred = smat_util.load_matrix(test_Y_pred_file)
assert Yt_pred.todense() == approx(true_Yt_pred_with_man.todense(), abs=1e-6)
# Select Inference
cmd = []
cmd += ["python3 -m pecos.xmc.xlinear.predict"]
cmd += ["-x {}".format(test_X)]
cmd += ["-y {}".format(test_Y_file)]
cmd += ["-so {}".format(true_Y_pred_with_man_file)]
cmd += ["-o {}".format(test_Y_pred_file)]
cmd += ["-m {}".format(model_folder)]
cmd += ["-pp sigmoid"]
cmd += ["-b 4"]
process = subprocess.run(
shlex.split(" ".join(cmd)), stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
assert process.returncode == 0, " ".join(cmd)
true_Yt_pred_with_man = smat_util.load_matrix(true_Y_pred_with_man_file)
Yt_selected_pred = smat_util.load_matrix(test_Y_pred_file)
assert Yt_selected_pred.todense() == approx(true_Yt_pred_with_man.todense(), abs=1e-6)
# Training with various number of splits to construct hierarchy
for splits in [2, 4]:
model_folder_local = f"{model_folder}-{splits}"
cmd = []
cmd += ["python3 -m pecos.xmc.xlinear.train"]
cmd += [f"-x {train_X}"]
cmd += [f"-y {train_Y_file}"]
cmd += [f"--nr-splits {splits}"]
cmd += [f"--max-leaf-size 2"]
cmd += [f"-m {model_folder_local}"]
process = subprocess.run(
shlex.split(" ".join(cmd)), stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
assert process.returncode == 0, " ".join(cmd)
cmd = []
cmd += ["python3 -m pecos.xmc.xlinear.predict"]
cmd += [f"-x {test_X}"]
cmd += [f"-y {test_Y_file}"]
cmd += [f"-m {model_folder_local}"]
cmd += [f"-o {test_Y_pred_file}"]
cmd += [f"-B 2"]
process = subprocess.run(
shlex.split(" ".join(cmd)), stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
assert process.returncode == 0, " ".join(cmd)
true_Yt_pred = smat_util.load_matrix(true_Yt_pred_with_splits[splits])
Yt_pred = smat_util.load_matrix(test_Y_pred_file)
assert Yt_pred.todense() == approx(true_Yt_pred.todense(), abs=1e-6)
cmd = []
cmd += ["python3 -m pecos.xmc.xlinear.predict"]
cmd += [f"-x {test_X}"]
cmd += [f"-y {test_Y_file}"]
cmd += [f"-so {true_Yt_pred_with_splits[splits]}"]
cmd += [f"-m {model_folder_local}"]
cmd += [f"-o {test_Y_pred_file}"]
cmd += [f"-B 2"]
process = subprocess.run(
shlex.split(" ".join(cmd)), stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
assert process.returncode == 0, " ".join(cmd)
true_Yt_pred = smat_util.load_matrix(true_Yt_pred_with_splits[splits])
Yt_selected_pred = smat_util.load_matrix(test_Y_pred_file)
assert Yt_selected_pred.todense() == approx(true_Yt_pred.todense(), abs=1e-6)
def test_split_model_at_depth():
import numpy as np
import scipy.sparse as smat
from pecos.xmc.xlinear.model import XLinearModel
from pecos.xmc import MLModel, HierarchicalMLModel
c_matrix_1 = smat.csc_matrix([[1], [1]], dtype=np.float32)
w_matrix_1 = smat.csc_matrix(np.random.normal(size=(10, 2)), dtype=np.float32)
c_matrix_2 = smat.csc_matrix([[1, 0], [1, 0], [0, 1], [0, 1]], dtype=np.float32)
w_matrix_2 = smat.csc_matrix(np.random.normal(size=(10, 4)), dtype=np.float32)
model_chain = [MLModel(C=c_matrix_1, W=w_matrix_1), MLModel(C=c_matrix_2, W=w_matrix_2)]
xlm = XLinearModel(HierarchicalMLModel(model_chain))
model_group = xlm.split_model_at_depth(given_depth=1, reindex=True)
parent_model = model_group["parent_model"]
child_models = model_group["child_models"]
assert len(parent_model.model.model_chain) == 1
assert len(child_models) == 2
assert len(child_models[0][0].model.model_chain) == 1
assert (parent_model.model.model_chain[0].C != c_matrix_1).nnz == 0
assert (parent_model.model.model_chain[0].W != w_matrix_1).nnz == 0
assert (child_models[0][0].model.model_chain[0].C != c_matrix_1).nnz == 0
assert (child_models[0][0].model.model_chain[0].W != w_matrix_2[:, 0:2]).nnz == 0
assert (child_models[1][0].model.model_chain[0].C != c_matrix_1).nnz == 0
assert (child_models[1][0].model.model_chain[0].W != w_matrix_2[:, 2::]).nnz == 0
assert child_models[0][1][0] == 0
assert child_models[0][1][1] == 1
assert child_models[1][1][0] == 2
assert child_models[1][1][1] == 3
c_matrix_1 = smat.csc_matrix([[1], [1]], dtype=np.float32)
w_matrix_1 = smat.csc_matrix(np.random.normal(size=(10, 2)), dtype=np.float32)
c_matrix_2 = smat.csc_matrix([[1, 0], [1, 0], [0, 1], [0, 1]], dtype=np.float32)
w_matrix_2 = smat.csc_matrix(np.random.normal(size=(10, 4)), dtype=np.float32)
model_chain = [MLModel(C=c_matrix_1, W=w_matrix_1), MLModel(C=c_matrix_2, W=w_matrix_2)]
xlm = XLinearModel(HierarchicalMLModel(model_chain))
model_group = xlm.split_model_at_depth(given_depth=1, reindex=False)
parent_model = model_group["parent_model"]
child_models = model_group["child_models"]
assert (parent_model.model.model_chain[0].C != c_matrix_1[:, [0]]).nnz == 0
assert (parent_model.model.model_chain[0].W != w_matrix_1).nnz == 0
assert child_models[0][0].model.model_chain[0].C.shape == (4, 1)
assert child_models[0][0].model.model_chain[0].W.shape == (10, 4)
assert len(child_models[0][0].model.model_chain[0].W.data) == 20
def test_reconstruct_model():
import numpy as np
import scipy.sparse as smat
from pecos.xmc.xlinear.model import XLinearModel
from pecos.xmc import MLModel, HierarchicalMLModel
c_matrix_1 = smat.csc_matrix([[1], [1]], dtype=np.float32)
w_matrix_1 = smat.csc_matrix(np.random.normal(size=(10, 2)), dtype=np.float32)
c_matrix_2 = smat.csc_matrix([[1, 0], [1, 0], [0, 1], [0, 1]], dtype=np.float32)
w_matrix_2 = smat.csc_matrix(np.random.normal(size=(10, 4)), dtype=np.float32)
model_chain = [MLModel(C=c_matrix_1, W=w_matrix_1), MLModel(C=c_matrix_2, W=w_matrix_2)]
xlm = XLinearModel(HierarchicalMLModel(model_chain))
model_group = xlm.split_model_at_depth(given_depth=1, reindex=True)
parent_model = model_group["parent_model"]
child_models_with_ids = model_group["child_models"]
child_models = [child_model_with_ids[0] for child_model_with_ids in child_models_with_ids]
new_xlm = XLinearModel.reconstruct_model(parent_model, child_models)
assert len(new_xlm.model.model_chain) == 2
assert new_xlm.model.model_chain[0].C.shape == (2, 1)
assert new_xlm.model.model_chain[0].W.shape == (10, 2)
assert new_xlm.model.model_chain[1].C.shape == (4, 2)
assert new_xlm.model.model_chain[1].W.shape == (10, 4)
assert (new_xlm.model.model_chain[0].C != xlm.model.model_chain[0].C).nnz == 0
assert (new_xlm.model.model_chain[0].W != xlm.model.model_chain[0].W).nnz == 0
assert (new_xlm.model.model_chain[1].C != xlm.model.model_chain[1].C).nnz == 0
assert (new_xlm.model.model_chain[1].W != xlm.model.model_chain[1].W).nnz == 0
# different c_matrix_2
c_matrix_1 = smat.csc_matrix([[1], [1]], dtype=np.float32)
w_matrix_1 = smat.csc_matrix(np.random.normal(size=(10, 2)), dtype=np.float32)
c_matrix_2 = smat.csc_matrix([[1, 0], [0, 1], [1, 0], [0, 1]], dtype=np.float32)
w_matrix_2 = smat.csc_matrix(np.random.normal(size=(10, 4)), dtype=np.float32)
model_chain = [MLModel(C=c_matrix_1, W=w_matrix_1), MLModel(C=c_matrix_2, W=w_matrix_2)]
xlm = XLinearModel(HierarchicalMLModel(model_chain))
model_group = xlm.split_model_at_depth(given_depth=1, reindex=True)
parent_model = model_group["parent_model"]
child_models_with_ids = model_group["child_models"]
child_models = [child_model_with_ids[0] for child_model_with_ids in child_models_with_ids]
Y_ids_of_child_models = [
child_model_with_ids[1] for child_model_with_ids in child_models_with_ids
]
new_xlm = XLinearModel.reconstruct_model(parent_model, child_models, Y_ids_of_child_models)
assert len(new_xlm.model.model_chain) == 2
assert new_xlm.model.model_chain[0].C.shape == (2, 1)
assert new_xlm.model.model_chain[0].W.shape == (10, 2)
assert new_xlm.model.model_chain[1].C.shape == (4, 2)
assert new_xlm.model.model_chain[1].W.shape == (10, 4)
assert (new_xlm.model.model_chain[0].C != xlm.model.model_chain[0].C).nnz == 0
assert (new_xlm.model.model_chain[0].W != xlm.model.model_chain[0].W).nnz == 0
assert (new_xlm.model.model_chain[1].C != xlm.model.model_chain[1].C).nnz == 0
assert (new_xlm.model.model_chain[1].W != xlm.model.model_chain[1].W).nnz == 0
def test_manual_init(tmpdir):
import numpy as np
from pecos.xmc.xlinear.model import XLinearModel
from pecos.xmc import MLModel, HierarchicalMLModel
from pecos.utils import smat_util
train_X = smat_util.load_matrix("test/tst-data/xmc/xlinear/X.npz").astype(np.float32)
train_Y = smat_util.load_matrix("test/tst-data/xmc/xlinear/Y.npz").astype(np.float32)
test_X = smat_util.load_matrix("test/tst-data/xmc/xlinear/Xt.npz").astype(np.float32)
xlm = XLinearModel.train(train_X, train_Y, bias=1.0)
cluster_chain = [model.C for model in xlm.model.model_chain]
weight_chain = [model.W for model in xlm.model.model_chain]
# Initialize XLinearModel using weight and clustering matrices
model_chain = [MLModel(C=C, W=W, bias=1.0) for C, W in zip(cluster_chain, weight_chain)]
xlm_manual_init = XLinearModel(HierarchicalMLModel(model_chain))
Yt_pred = xlm.predict(test_X)
Yt_pred_manual = xlm_manual_init.predict(test_X)
assert Yt_pred.todense() == approx(Yt_pred_manual.todense(), abs=1e-6)
def test_matcher_ranker_mode():
from pecos.utils import smat_util
from pecos.xmc.xlinear.model import XLinearModel
from pecos.xmc import Indexer, LabelEmbeddingFactory
X = smat_util.load_matrix("test/tst-data/xmc/xlinear/X.npz")
Y = smat_util.load_matrix("test/tst-data/xmc/xlinear/Y.npz")
test_X = smat_util.load_matrix("test/tst-data/xmc/xlinear/Xt.npz")
pred_kwargs = {"post_processor": "noop"}
label_feat = LabelEmbeddingFactory.create(Y, X, method="pifa")
cluster_chain = Indexer.gen(label_feat, max_leaf_size=2)
xlmatcher = XLinearModel.train(
X,
Y,
C=cluster_chain,
ranker_level=3,
mode="matcher",
negative_sampling_scheme="tfn+man",
pred_kwargs=pred_kwargs,
)
M_pred = xlmatcher.predict(test_X, only_topk=2)
xlranker = XLinearModel.train(
X,
Y,
C=cluster_chain,
ranker_level=3,
mode="ranker",
user_supplied_negatives={3: M_pred},
negative_sampling_scheme="usn+tfn+man",
pred_kwargs=pred_kwargs,
)
Y_pred = xlranker.predict(test_X, only_topk=2)
xlfull = XLinearModel.train(
X,
Y,
C=cluster_chain,
mode="full-model",
negative_sampling_scheme="tfn+man",
pred_kwargs=pred_kwargs,
)
Y_pred_full = xlfull.predict(test_X, only_topk=2)
assert Y_pred.todense() == approx(Y_pred_full.todense(), abs=1e-6)
def test_ova_shallow_mode(tmpdir):
from pecos.utils import smat_util
from pecos.xmc.xlinear.model import XLinearModel
from pecos.xmc import Indexer, LabelEmbeddingFactory
import subprocess
import shlex
X = smat_util.load_matrix("test/tst-data/xmc/xlinear/X.npz")
Y = smat_util.load_matrix("test/tst-data/xmc/xlinear/Y.npz")
test_X = smat_util.load_matrix("test/tst-data/xmc/xlinear/Xt.npz")
label_feat = LabelEmbeddingFactory.create(Y, X, method="pifa")
cluster_chain = Indexer.gen(label_feat)
xlova = XLinearModel.train(
X,
Y,
C=None,
)
ova_pred = str(tmpdir.join("P_ova.npz"))
smat_util.save_matrix(ova_pred, xlova.predict(test_X))
xlshallow = XLinearModel.train(
X,
Y,
C=cluster_chain[-1],
shallow=True,
)
shallow_pred = str(tmpdir.join("P_shallow.npz"))
smat_util.save_matrix(shallow_pred, xlshallow.predict(test_X))
# Evaluate
cmd = []
cmd += ["python3 -m pecos.xmc.xlinear.evaluate"]
cmd += ["-y {}".format("test/tst-data/xmc/xlinear/Yt.npz")]
cmd += ["-p {}".format(ova_pred)]
process = subprocess.run(
shlex.split(" ".join(cmd)), stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
assert process.returncode == 0, " ".join(cmd)
std_output = b"==== evaluation results ====\nprec = 100.00 50.00 33.33 25.00 20.00 16.67 14.29 12.50 11.11 10.00\nrecall = 100.00 100.00 100.00 100.00 100.00 100.00 100.00 100.00 100.00 100.00\n"
assert process.stdout == std_output
cmd = []
cmd += ["python3 -m pecos.xmc.xlinear.evaluate"]
cmd += ["-y {}".format("test/tst-data/xmc/xlinear/Yt.npz")]
cmd += ["-p {}".format(shallow_pred)]
process = subprocess.run(
shlex.split(" ".join(cmd)), stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
assert process.returncode == 0, " ".join(cmd)
assert process.stdout == std_output
def test_set_output_constraint(tmpdir):
from pecos.utils import smat_util
from pecos.xmc.xlinear import XLinearModel
from pecos.xmc import Indexer, LabelEmbeddingFactory
train_X_file = "test/tst-data/xmc/xlinear/X.npz"
train_Y_file = "test/tst-data/xmc/xlinear/Y.npz"
test_X_file = "test/tst-data/xmc/xlinear/Xt.npz"
model_folder = str(tmpdir.join("save_model"))
X = smat_util.load_matrix(train_X_file)
Y = smat_util.load_matrix(train_Y_file)
label_feat = LabelEmbeddingFactory.create(Y, X, method="pifa")
Xt = smat_util.load_matrix(test_X_file)
label_size = Y.shape[1]
model_folder_list = []
# Obtain xlinear models with vairous number of splits
for splits in [2, 4]:
model_folder_local = f"{model_folder}-{splits}"
cluster_chain = Indexer.gen(label_feat, nr_splits=splits, max_leaf_size=2)
py_model = XLinearModel.train(X, Y, C=cluster_chain)
py_model.save(model_folder_local)
model_folder_list.append(model_folder_local)
# Obtain the OVA model
py_model_ova = XLinearModel.train(X, Y, C=None)
model_folder_local_ova = model_folder + "-ova"
py_model_ova.save(model_folder_local_ova)
model_folder_list.append(model_folder_local_ova)
labels_to_keep_list = [[]] # empty list
labels_to_keep_list.append(
list(set(map(int, [0, label_size / 3, label_size / 2, label_size * 2 / 3, label_size - 1])))
) # symmetric label indices to keep
labels_to_keep_list.append(
list(set(map(int, [0, label_size / 3, label_size / 2])))
) # asymmetric label indices to keep
beam_size_list = [2, label_size]
def obtain_ground_truth_pred(model, X, pruned_model, beam_size=None, post_processor=None):
pred_csr = None
default_kwargs = {"beam_size": 10, "only_topk": 20, "post_processor": "l3-hinge"}
if post_processor is None:
post_processor = default_kwargs.get("post_processor")
if beam_size is None:
beam_size = default_kwargs.get("beam_size")
for d in range(model.depth):
cur_model = model.model_chain[d]
pred_csr = cur_model.predict(
X, only_topk=cur_model.C.shape[0], csr_codes=pred_csr, post_processor=post_processor
)
kept_children = pruned_model.model_chain[d].C.indices
for i_nnz in range(pred_csr.nnz):
if pred_csr.indices[i_nnz] not in kept_children:
pred_csr.data[i_nnz] = 0
pred_csr.eliminate_zeros()
pred_csr = smat_util.sorted_csr(pred_csr, only_topk=beam_size)
return pred_csr
for model_folder_local in model_folder_list:
for labels_to_keep in labels_to_keep_list:
for beam_size in beam_size_list:
py_model = XLinearModel.load(model_folder_local)
py_model.set_output_constraint(labels_to_keep)
model_folder_local_with_constraint = model_folder_local + "-constraint"
py_model.save(model_folder_local_with_constraint)
py_model_full = XLinearModel.load(model_folder_local)
pred_ground_truth = obtain_ground_truth_pred(
py_model_full.model, Xt, py_model.model, beam_size
).todense()
py_model_with_constraint = XLinearModel.load(model_folder_local_with_constraint)
pred_with_constraint = py_model_with_constraint.predict(
X, beam_size=beam_size, only_topk=beam_size
).todense()
py_model_with_constraint_predict_only = XLinearModel.load(
model_folder_local_with_constraint, is_predict_only=True
)
pred_with_constraint_predict_only = py_model_with_constraint_predict_only.predict(
X, beam_size=beam_size, only_topk=beam_size
).todense()
assert pred_ground_truth[:, labels_to_keep] == approx(
pred_with_constraint[:, labels_to_keep], abs=1e-6
), f"prediction result for label constraints {labels_to_keep}, beam_size={beam_size}, model={model_folder_local_with_constraint} and XLinearModel.load is not correct"
assert pred_ground_truth[:, labels_to_keep] == approx(
pred_with_constraint_predict_only[:, labels_to_keep], abs=1e-6
), f"prediction result for label constraints {labels_to_keep}, beam_size={beam_size}, model={model_folder_local_with_constraint} and XLinearModel.load in predict-only mode is not correct"
def test_get_submodel():
import numpy as np
import scipy.sparse as smat
from pecos.xmc import MLModel
c_matrix = smat.csc_matrix([[0, 0, 1], [0, 1, 0], [1, 0, 0], [0, 1, 1]], dtype=np.float32)
w_matrix = smat.csc_matrix(np.eye(4), dtype=np.float32)
ml_model = MLModel(C=c_matrix, W=w_matrix)
out = ml_model.get_submodel(selected_codes=[1, 2], reindex=True)
assert len(out["active_labels"]) == 3
assert len(out["active_codes"]) == 2
assert 0 in out["active_labels"]
assert 1 in out["active_labels"]
assert 3 in out["active_labels"]
new_C = ml_model.C[out["active_labels"], :]
new_C = new_C[:, [1, 2]]
assert (out["model"].C != new_C).nnz == 0 # check for equality of sparse matrices
assert (out["model"].W != ml_model.W[:, out["active_labels"]]).nnz == 0
c_matrix = smat.csc_matrix([[0, 0, 1], [0, 1, 0], [1, 0, 0], [0, 1, 1]], dtype=np.float32)
w_matrix = smat.csc_matrix(np.random.normal(size=(10, 4)), dtype=np.float32)
ml_model = MLModel(C=c_matrix, W=w_matrix)
out = ml_model.get_submodel(selected_codes=[1, 2], reindex=False)
assert out["model"].C.shape == c_matrix.shape
assert out["model"].W.shape == w_matrix.shape
for r in range(c_matrix.shape[0]):
for c in range(c_matrix.shape[1]):
if r in [0, 1, 3] and c in [1, 2]:
assert out["model"].C[r, c] == c_matrix[r, c]
else:
assert out["model"].C[r, c] == 0
for r in range(w_matrix.shape[0]):
for c in range(w_matrix.shape[1]):
if c in [0, 1, 3]:
assert out["model"].W[r, c] == w_matrix[r, c]
else:
assert out["model"].W[r, c] == 0
assert len(out["active_labels"]) == 3
assert len(out["active_codes"]) == 2
assert 0 in out["active_labels"]
assert 1 in out["active_labels"]
assert 3 in out["active_labels"]
def test_predict_consistency_between_topk_and_selected(tmpdir):
from pecos.xmc import PostProcessor, Indexer, LabelEmbeddingFactory
from pecos.xmc.xlinear import XLinearModel
train_X_file = "test/tst-data/xmc/xlinear/X.npz"
train_Y_file = "test/tst-data/xmc/xlinear/Y.npz"
test_X_file = "test/tst-data/xmc/xlinear/Xt.npz"
Xt = XLinearModel.load_feature_matrix(train_X_file)
Yt = XLinearModel.load_feature_matrix(train_Y_file)
model_folder = str(tmpdir.join("save_model"))
label_feat = LabelEmbeddingFactory.create(Yt, Xt, method="pifa")
model_folder_list = []
# Obtain xlinear models with vairous number of splits
for splits in [2, 4]:
model_folder_local = f"{model_folder}-{splits}"
cluster_chain = Indexer.gen(label_feat, nr_splits=splits)
py_model = XLinearModel.train(Xt, Yt, C=cluster_chain)
py_model.save(model_folder_local)
model_folder_list.append(model_folder_local)
X = XLinearModel.load_feature_matrix(test_X_file)
def test_on_model(model, X):
for pp in PostProcessor.valid_list():
# Batch mode topk
py_sparse_topk_pred = model.predict(X, post_processor=pp)
py_dense_topk_pred = model.predict(X.todense(), post_processor=pp)
# Sparse Input
py_selected_sparse_topk_pred = model.predict(
X, selected_outputs_csr=py_sparse_topk_pred, post_processor=pp
)
# Dense Input
py_selected_dense_topk_pred = model.predict(
X.todense(), selected_outputs_csr=py_dense_topk_pred, post_processor=pp
)
assert py_sparse_topk_pred.todense() == approx(
py_selected_sparse_topk_pred.todense(), abs=1e-6
), f"model:{model_folder_local} (batch, sparse, topk) post_processor:{pp})"
assert py_dense_topk_pred.todense() == approx(
py_selected_dense_topk_pred.todense(), abs=1e-6
), f"model:{model_folder_local} (batch, dense, topk) post_processor:{pp})"
# Realtime mode topk
for i in range(X.shape[0]):
query_slice = X[[i], :]
query_slice.sort_indices()
py_sparse_realtime_pred = model.predict(query_slice, post_processor=pp)
py_dense_realtime_pred = model.predict(query_slice.todense(), post_processor=pp)
# Sparse Input
py_selected_sparse_realtime_pred = model.predict(
query_slice, selected_outputs_csr=py_sparse_realtime_pred, post_processor=pp
)
# Dense input
py_selected_dense_realtime_pred = model.predict(
query_slice.todense(),
selected_outputs_csr=py_dense_realtime_pred,
post_processor=pp,
)
assert py_sparse_realtime_pred.todense() == approx(
py_selected_sparse_realtime_pred.todense(), abs=1e-6
), f"model:{model_folder_local} (realtime, sparse, topk) post_processor:{pp}"
assert py_dense_realtime_pred.todense() == approx(
py_selected_dense_realtime_pred.todense(), abs=1e-6
), f"model:{model_folder_local} (realtime, dense, topk) post_processor:{pp}"
for model_folder_local in model_folder_list:
model_f = XLinearModel.load(model_folder_local, is_predict_only=False)
model_t = XLinearModel.load(
model_folder_local, is_predict_only=True, weight_matrix_type="CSC"
)
test_on_model(model_f, X)
test_on_model(model_t, X)
| 44.466463 | 205 | 0.622489 |
7959956341ab9c68d2e8078c12dd9bb30e722d65 | 7,283 | py | Python | scripts/exp_scripts/src/ec2_scripts/generate_ec2_config_file.py | MobilityFirst/GNS | 1eb5524457e0075dc9f451bd66e39f9291052eb8 | [
"Apache-2.0"
] | 17 | 2015-11-16T18:02:47.000Z | 2020-08-02T08:53:11.000Z | scripts/exp_scripts/src/ec2_scripts/generate_ec2_config_file.py | MobilityFirst/GNS | 1eb5524457e0075dc9f451bd66e39f9291052eb8 | [
"Apache-2.0"
] | 63 | 2015-12-22T20:52:28.000Z | 2019-03-06T02:44:20.000Z | scripts/exp_scripts/src/ec2_scripts/generate_ec2_config_file.py | MobilityFirst/GNS | 1eb5524457e0075dc9f451bd66e39f9291052eb8 | [
"Apache-2.0"
] | 62 | 2015-11-13T20:04:47.000Z | 2020-01-10T12:20:44.000Z | #!/usr/bin/env python
import os
import sys
from read_pl_latencies import read_pl_latencies
import exp_config
from trace_generator import trace_generator
pl_lns_workload = exp_config.pl_lns_workload
hosts_file_ns = '/home/abhigyan/gnrs/ec2_scripts/pl_ns'
hosts_file_lns = '/home/abhigyan/gnrs/ec2_scripts/pl_lns'
config_dir = exp_config.config_folder
config_file = 'config'
pl_latency_folder = exp_config.pl_latency_folder
lookup_trace = exp_config.lookupTrace
update_trace = exp_config.updateTrace
def main():
#ns = exp_config.num_ns
#lns = exp_config.num_lns
load = exp_config.load
generate_ec2_config_file(load)
def generate_ec2_config_file(load):
global pl_latencies
# config files
os.system('mkdir -p ' + config_dir+ '; rm -rf ' + config_dir + '/*')
from read_array_from_file import read_col_from_file
ns_hostnames = read_col_from_file(hosts_file_ns)
ns = len(ns_hostnames)
lns_hostnames = read_col_from_file(hosts_file_lns)
lns = len(lns_hostnames)
assert len(ns_hostnames) == ns
assert len(lns_hostnames) == lns
pl_latencies = read_pl_latencies(pl_latency_folder)
for i in range(ns + lns):
if i < ns:
config_file1 = os.path.join(config_dir, config_file + '_' + ns_hostnames[i])
else:
config_file1 = os.path.join(config_dir, config_file + '_' + lns_hostnames[i - ns])
write_config_file(i, config_file1, ns, lns, hosts_file_ns, hosts_file_lns)
print 'Written config files. ' + config_dir + ' count = ' + str(ns + lns)
# workload
if exp_config.experiment_run_time > 0:
if exp_config.gen_workload:
generate_workload(ns, lns, ns_hostnames, lns_hostnames, load)
print 'Generate full workload. Nodes = ', lns
elif exp_config.gen_test_workload:
generate_test_workload(ns, lns_hostnames[ns - ns]) # id of lns is 'ns'
print 'Generated single LNS test workload.'
else :
os.system('rm -rf lookupLocal updateLocal')
pl_latencies = {}
def get_pl_latency(node1, node2):
return pl_latencies[node1][node2]
def write_config_file(node_id, config_file, ns, lns, hosts_file_ns, hosts_file_lns):
from read_array_from_file import read_col_from_file
from random import random
#hosts = ['compute-0-13']
hosts = read_col_from_file(hosts_file_ns)
host_count = 0
port_number = 44001
port_per_node = 50
fw = open(config_file, 'w')
for i in range(ns):
port_number += port_per_node
#s = '\t'.join([str(i), 'yes', hosts[host_count], str(port_number), str(random()), '100.0', '100.0'])
latency = get_pl_latency(node_id, i)
s = '\t'.join([str(i), 'yes', hosts[host_count], str(port_number), str(latency), '100.0', '100.0'])
fw.write(s + '\n')
#print s
host_count = (host_count + 1) % len(hosts)
hosts = read_col_from_file(hosts_file_lns)
host_count = 0
#port_number = 20000
for i in range(lns):
port_number += port_per_node
latency = get_pl_latency(node_id, i + ns)
s = '\t'.join([str(i + ns), 'no', hosts[host_count], str(port_number), str(latency), '100.0', '100.0'])
fw.write(s + '\n')
#print s
host_count = (host_count + 1) % len(hosts)
fw.close()
def generate_test_workload(lns_id, hostname):
from generate_sequence import write_single_name_trace,write_random_name_trace
name = '0'
number = exp_config.lookup_count
filename = os.path.join(lookup_trace, 'lookup_' + hostname)
if exp_config.regular_workload + exp_config.mobile_workload == 1:
write_single_name_trace(filename, number, name)
else:
write_random_name_trace(filename,exp_config.regular_workload + exp_config.mobile_workload, number)
number = exp_config.update_count
filename = os.path.join(update_trace, 'update_' + hostname)
if exp_config.regular_workload + exp_config.mobile_workload== 1:
write_single_name_trace(filename, number, name)
else:
write_random_name_trace(filename,exp_config.regular_workload + exp_config.mobile_workload, number)
def generate_workload(ns, lns, ns_hostnames, lns_hostnames, load):
# now workload
from read_array_from_file import read_col_from_file
lns_names = read_col_from_file(pl_lns_workload)
lns_count = 0
os.system('rm -rf ' + lookup_trace + '; mkdir -p ' + lookup_trace)
os.system('rm -rf ' + update_trace + '; mkdir -p ' + update_trace)
# generate trace for load = load
lookup_temp = '/home/abhigyan/gnrs/lookupTrace' + str(load)
update_temp = '/home/abhigyan/gnrs/updateTrace' + str(load)
trace_generator(load, lookup_temp, update_temp)
#os.system('/home/abhigyan/gnrs/trace_generator.py ' + str(load))
# trace generator outputs in following folders
#generate_beehive_trace(load)
no_updates = False
for i in range(lns):
#id = str(i + ns)
node_id = lns_hostnames[i]
host = lns_names[lns_count]
lookup_input = lookup_temp + '/lookup_' + host
lookup_output = os.path.join(lookup_trace, 'lookup_' + node_id)
os.system('cp ' + lookup_input + ' ' + lookup_output)
# if os.path.exists(lookup_temp + '/lookup_' + host):
# #print 'cp ' + lookup_temp + '/lookup_' + host + ' lookupLocal/' + id
# output_file = os.path.join(lookup_trace, 'lookup_' + id)
# os.system('cp ' + lookup_temp + '/lookup_' + host + ' ' + id)
# else:
# #print 'rm lookupLocal/' + id + '; touch lookupLocal/' + id
# os.system('rm lookupLocal/' + id + '; touch lookupLocal/' + id)
update_input = update_temp + '/update_' + host
update_output = os.path.join(update_trace, 'update_' + node_id)
os.system('cp ' + update_input + ' ' + update_output)
# if no_updates == False and os.path.exists(update_temp + '/update_' + host):
# os.system('cp ' + update_temp + '/update_' + host + ' updateLocal/' + id)
# else :
# os.system('rm -rf updateLocal/' + id + '; touch updateLocal/' + id)
#if os.path.exists('workloadTrace/workload_' + host):
# os.system('cp workloadTrace/workload_' + host + ' workloadLocal/' + id)
#else :
# os.system('rm workloadLocal/' + id + '; touch workloadLocal/' + id)
lns_count = (lns_count + 1) % len(lns_names)
# delete folders
os.system('rm -rf ' + lookup_temp + ' ' + update_temp)
def generate_beehive_trace(load):
lookup_temp = 'lookupTrace' + str(load)
update_temp = 'updateTrace' + str(load)
# make a read list file
os.system('cat ' + lookup_temp + '/* > read_list.txt' )
# now write a read rate file
from count_frequency import output_read_rate
output_read_rate()
from beehive_trace_transform import output_transformed_trace
output_transformed_trace(lookup_temp, 'read_rate')
output_transformed_trace(update_temp, 'read_rate')
os.system('rm -rf ' + lookup_temp + ' ' + update_temp)
os.system('mv ' + lookup_temp + '_beehive ' + lookup_temp)
os.system('mv ' + update_temp + '_beehive ' + update_temp)
if __name__ == "__main__":
main()
| 37.932292 | 111 | 0.658108 |
7959968ae42119ff4bfcb14c33bec8f613d4792f | 2,516 | py | Python | tests/test_check_redcap_event.py | utsw-bicf/nacculator | 1e8eb9b4029c7c52b242c76f941a1572577d300e | [
"BSD-2-Clause"
] | null | null | null | tests/test_check_redcap_event.py | utsw-bicf/nacculator | 1e8eb9b4029c7c52b242c76f941a1572577d300e | [
"BSD-2-Clause"
] | 20 | 2020-05-20T16:04:13.000Z | 2020-07-28T16:10:15.000Z | tests/test_check_redcap_event.py | utsw-bicf/nacculator | 1e8eb9b4029c7c52b242c76f941a1572577d300e | [
"BSD-2-Clause"
] | null | null | null | import unittest
from nacc.redcap2nacc import check_redcap_event
class option():
lbd = False
ftld = False
csf = False
np = False
m = False
ivp = False
fvp = False
class TestRedcapEvent(unittest.TestCase):
'''
These tests are meant to ensure that the check_redcap_event function is
properly distinguishing between REDCap events in an imported CSV of various
records. Ideally, redcap2nacc should only be outputting PTIDs with the
redcap_event_name specified by the options flag (-ivp, -ldb, et cetera) and
skipping all others, leaving an output .txt file with no blank lines.
'''
def setUp(self):
self.options = option()
def test_for_ivp(self):
'''
Checks that the -ivp flag with no other options returns the correct
visit (not LBD IVP or FTLD IVP).
'''
self.options.ivp = True
record = {'redcap_event_name': 'initial_visit_year_arm_1',
'ivp_z1_complete': '', 'ivp_z1x_complete': '2'}
result = check_redcap_event(self.options, record)
self.assertTrue(result)
def test_for_not_ivp(self):
'''
Checks that the initial_visit is not returned when the -ivp flag is not
set.
'''
self.options.fvp = True
record = {'redcap_event_name': 'initial_visit_year_arm_1',
'fvp_z1_complete': '', 'fvp_z1x_complete': ''}
result = check_redcap_event(self.options, record)
self.assertFalse(result)
def test_for_multiple_flags(self):
'''
Checks that options like -lbd -ivp are functional.
'''
self.options.ivp = True
self.options.lbd = True
record = {'redcap_event_name': 'initial_visit_year_arm_1',
'lbd_ivp_b1l_complete': '2'}
result = check_redcap_event(self.options, record)
self.assertTrue(result)
def test_for_not_multiple_flags(self):
'''
Checks that -ivp alone is not returned with options like -lbd -ivp.
'''
self.options.ivp = True
record = {'redcap_event_name': 'initial_visit_year_arm_1',
'ivp_z1_complete': '', 'ivp_z1x_complete': '',
'lbd_ivp_b1l_complete': '2'}
incorrect = check_redcap_event(self.options, record)
self.options.lbd = True
result = check_redcap_event(self.options, record)
self.assertNotEqual(incorrect, result)
if __name__ == "__main__":
unittest.main()
| 32.25641 | 79 | 0.630763 |
795996cd662a5513a1f62652ea9456ba3d931357 | 5,869 | py | Python | tests/test_24_env_db.py | bfclarke/kipoi | 992b41eee8e35b39ae61262d988db974d8583759 | [
"MIT"
] | null | null | null | tests/test_24_env_db.py | bfclarke/kipoi | 992b41eee8e35b39ae61262d988db974d8583759 | [
"MIT"
] | null | null | null | tests/test_24_env_db.py | bfclarke/kipoi | 992b41eee8e35b39ae61262d988db974d8583759 | [
"MIT"
] | null | null | null | import json
import os
import kipoi
from kipoi.cli.env import generate_env_db_entry, get_envs_by_model
from kipoi.env_db import EnvDb
def get_args(def_kwargs):
class dummy_args:
kwargs = def_kwargs
model = kwargs["model"]
source = kwargs["source"]
def _get_kwargs(self):
return self.kwargs
return dummy_args
def assert_rec(a, b):
if isinstance(a, dict):
assert set(a.keys()) == set(b.keys())
for k in a:
assert_rec(a[k], b[k])
elif isinstance(a, list):
assert len(a) == len(b)
for a_el, b_el in zip(a, b):
assert_rec(a_el, b_el)
else:
assert a == b
def test_env_db_kipoi(tmpdir, monkeypatch):
# Test the kipoi vs. dir path ambiguation
# Test the DeepSEA model using the `kipoi` and the `dir` sources
# Test the `shared/envs/kipoi-py3-keras1.2.yaml` model using the `kipoi` and the `dir` sources
json_file = os.path.join(str(tmpdir), "db.json")
sample_cli_path = os.path.join(str(tmpdir), "sample")
with open(sample_cli_path, "w") as fh:
fh.write("")
db = EnvDb(json_file)
kwargs = {"dataloader": [], "gpu": True, "model": None, "source": "kipoi",
"tmpdir": "something", "vep": True}
# generate the kipoi entries
kipoi_entries = []
for model in [["DeepSEA"], ["shared/envs/kipoi-py3-keras1.2"]]:
kwargs['model'] = model
db_entry = generate_env_db_entry(get_args(kwargs)())
db.append(db_entry)
kipoi_entries.append(db_entry)
# generate the kipoi entries
dir_entries = []
local_path = kipoi.get_source("dir").local_path
kwargs["source"] = "dir"
for model in [["example/models/pyt"], ["example/models/shared/envs/kipoi-py3-keras1.2"]]:
kwargs['model'] = [os.path.join(local_path,model[0])]
db_entry = generate_env_db_entry(get_args(kwargs)())
db.append(db_entry)
dir_entries.append(db_entry)
# make sure there is no mixup between the kipoi and dir models and make sure the full path is only used
# for dir models
assert db.get_entry_by_model("DeepSEA", only_most_recent=False) == [kipoi_entries[0]]
assert db.get_entry_by_model("CpGenie/merged", only_most_recent=False) == [dir_entries[1], kipoi_entries[1]]
assert db.get_entry_by_model(os.path.join(local_path, "example/models/pyt"),
only_most_recent=False) == [dir_entries[0]]
# monkeypatch the get_model_env_db()
monkeypatch.setattr(kipoi.env_db, 'get_model_env_db', lambda: db)
assert get_envs_by_model(['DeepSEA'], "kipoi", only_most_recent=False, only_valid=False) == [kipoi_entries[0]]
assert get_envs_by_model(["CpGenie/merged"], "kipoi", only_most_recent=False,
only_valid=False) == [dir_entries[1],kipoi_entries[1]]
assert get_envs_by_model(["example/models/pyt"], "dir", only_most_recent=False,
only_valid=False) == [dir_entries[0]]
def test_env_db(tmpdir):
json_file = os.path.join(str(tmpdir), "db.json")
sample_cli_path = os.path.join(str(tmpdir), "sample")
with open(sample_cli_path, "w") as fh:
fh.write("")
db = EnvDb(json_file)
kwargs = {"dataloader": [], "env": "test_env", "gpu": True, "model": None, "source": "dir",
"tmpdir": "something", "vep": True}
entries = []
source_path = kipoi.get_source("dir").local_path
for model in [["example/models/pyt"], ["example/models/shared/envs/kipoi-py3-keras1.2", "example/models/pyt"]]:
kwargs['model'] = model
db_entry = generate_env_db_entry(get_args(kwargs)())
db.append(db_entry)
entries.append(db_entry)
pyt_query_name = os.path.join(source_path, "example/models/pyt")
assert db.get_entry_by_model(pyt_query_name) == entries[1]
assert db.get_entry_by_model(pyt_query_name + "_class") is None
assert db.get_entry_by_model(pyt_query_name, only_most_recent=False) == entries[::-1]
# test if the viability check is ok:
entry = db.get_entry_by_model(pyt_query_name)
entry.successful = True
entry.cli_path = sample_cli_path
assert db.get_entry_by_model(pyt_query_name, only_most_recent=False, only_valid=True) == [entry]
entry.successful = False
assert len(db.get_entry_by_model(pyt_query_name, only_most_recent=False, only_valid=True)) == 0
entry.successful = True
entry.cli_path = None
assert len(db.get_entry_by_model(pyt_query_name, only_most_recent=False, only_valid=True)) == 0
db.save()
del db
# Test if loading is fine
db2 = EnvDb(json_file)
# test dict identity
assert_rec(db2.get_entry_by_model(pyt_query_name).get_config(), entries[1].get_config())
assert db2.get_entry_by_model(pyt_query_name + "_class") is None
del db2
# Test if bad entries are skipped
with open(json_file, "r") as fh:
db_dict = json.load(fh)
# Add a bad entry:
new_key = max([int(k) for k in db_dict["_default"]]) + 1
db_dict["_default"][str(new_key)] = {"conda_version": "conda 4.5.4", "kipoi_version": "0.5.6"}
with open(json_file, "w") as fh:
json.dump(db_dict, fh)
# Check if there is a warning
# with pytest.warns(UserWarning): # There seems to be a general problem with warnings...
db_warns = EnvDb(json_file)
assert len(db_warns.entries) == 2
# Now save so that the bad entry is be gone
db_warns.save()
del db_warns
# Make sure the bad entry is not there anymore
with open(json_file, "r") as fh:
db_dict_recovered = json.load(fh)
found = 0
for val in db_dict_recovered['_default'].values():
found += int(val == db_dict["_default"][str(new_key)])
assert len(db_dict_recovered["_default"]) == new_key - 1
assert found == 0
os.unlink(json_file)
| 35.786585 | 115 | 0.654796 |
79599785cb290f9e1f132df6f0fe2c5785525236 | 3,605 | py | Python | examples/intraday_mean.py | prutskov/Bodo-examples | 7c96e89f3ac9062eb598b99da2729718007b8e4d | [
"Apache-2.0"
] | 17 | 2020-09-27T04:31:33.000Z | 2022-02-24T12:02:56.000Z | examples/intraday_mean.py | prutskov/Bodo-examples | 7c96e89f3ac9062eb598b99da2729718007b8e4d | [
"Apache-2.0"
] | 12 | 2021-09-08T21:30:40.000Z | 2022-03-25T13:20:29.000Z | examples/intraday_mean.py | prutskov/Bodo-examples | 7c96e89f3ac9062eb598b99da2729718007b8e4d | [
"Apache-2.0"
] | 5 | 2020-09-02T23:48:40.000Z | 2022-03-01T19:34:38.000Z | """
Intraday example to demonstrate Pandas functionality.
Usage:
mpiexec -n [cores] python intraday_mean.py --file [file] --maxDays [max_num_days]
# adapted from:
# http://www.pythonforfinance.net/2017/02/20/intraday-stock-mean-reversion-trading-backtest-in-python/
See data generation script in data/stock_data_read.py
"""
import pandas as pd
import numpy as np
import h5py
import argparse
import time
import bodo
from bodo import prange
@bodo.jit(
# More information on 'locals' in the bodo decorator
# http://docs.bodo.ai/latest/source/user_guide.html#input-array-types
locals={
"s_open": bodo.float64[:],
"s_high": bodo.float64[:],
"s_low": bodo.float64[:],
"s_close": bodo.float64[:],
"s_vol": bodo.float64[:],
}
)
def intraday_mean_revert(file_name, max_num_days):
f = h5py.File(file_name, "r")
sym_list = list(f.keys())
nsyms = len(sym_list)
all_res = np.zeros(max_num_days)
t1 = time.time()
# More information on bodo's explicit parallel loop: prange
# http://docs.bodo.ai/latest/source/user_guide.html#explicit-parallel-loops
for i in prange(nsyms):
symbol = sym_list[i]
s_open = f[symbol + "/Open"][:]
s_high = f[symbol + "/High"][:]
s_low = f[symbol + "/Low"][:]
s_close = f[symbol + "/Close"][:]
s_vol = f[symbol + "/Volume"][:]
df = pd.DataFrame(
{
"Open": s_open,
"High": s_high,
"Low": s_low,
"Close": s_close,
"Volume": s_vol,
}
)
# create column to hold our 90 day rolling standard deviation
df["Stdev"] = df["Close"].rolling(window=90).std()
# create a column to hold our 20 day moving average
df["Moving Average"] = df["Close"].rolling(window=20).mean()
# create a column which holds a TRUE value if the gap down from previous day's low to next
# day's open is larger than the 90 day rolling standard deviation
df["Criteria1"] = (df["Open"] - df["Low"].shift(1)) < -df["Stdev"]
# create a column which holds a TRUE value if the opening price of the stock is above the 20 day moving average
df["Criteria2"] = df["Open"] > df["Moving Average"]
# create a column that holds a TRUE value if both above criteria are also TRUE
df["BUY"] = df["Criteria1"] & df["Criteria2"]
# calculate daily % return series for stock
df["Pct Change"] = (df["Close"] - df["Open"]) / df["Open"]
# create a strategy return series by using the daily stock returns where the trade criteria above are met
df["Rets"] = df["Pct Change"][df["BUY"] == True]
n_days = len(df["Rets"])
res = np.zeros(max_num_days)
if n_days > max_num_days:
print(n_days, max_num_days)
raise ValueError("Number of days execeeds maximum")
if n_days:
res[-n_days:] = df["Rets"].fillna(0).values
all_res += res
f.close()
print(all_res.mean())
print("execution time:", time.time() - t1)
def main():
parser = argparse.ArgumentParser(description="Intraday Mean example")
parser.add_argument(
"--file", dest="file", type=str, default="data/stock_data_all_yahoo.hdf5"
)
parser.add_argument("--maxDays", dest="max_num_days", type=int, default=20000)
args = parser.parse_args()
file_name = args.file
max_num_days = args.max_num_days
intraday_mean_revert(file_name, max_num_days)
if __name__ == "__main__":
main()
| 33.073394 | 119 | 0.613592 |
79599791a96a1abe563b9cd817b144090e328106 | 4,343 | py | Python | chatbot_env/Lib/site-packages/sklearn/neighbors/_unsupervised.py | rakmakan/Chatbot | d04bc1526b56961a16c25148d9ef18c4f157e9c4 | [
"MIT"
] | 6,989 | 2017-07-18T06:23:18.000Z | 2022-03-31T15:58:36.000Z | chatbot_env/Lib/site-packages/sklearn/neighbors/_unsupervised.py | rakmakan/Chatbot | d04bc1526b56961a16c25148d9ef18c4f157e9c4 | [
"MIT"
] | 1,978 | 2017-07-18T09:17:58.000Z | 2022-03-31T14:28:43.000Z | chatbot_env/Lib/site-packages/sklearn/neighbors/_unsupervised.py | rakmakan/Chatbot | d04bc1526b56961a16c25148d9ef18c4f157e9c4 | [
"MIT"
] | 1,228 | 2017-07-18T09:03:13.000Z | 2022-03-29T05:57:40.000Z | """Unsupervised nearest neighbors learner"""
from ._base import NeighborsBase
from ._base import KNeighborsMixin
from ._base import RadiusNeighborsMixin
from ._base import UnsupervisedMixin
class NearestNeighbors(NeighborsBase, KNeighborsMixin,
RadiusNeighborsMixin, UnsupervisedMixin):
"""Unsupervised learner for implementing neighbor searches.
Read more in the :ref:`User Guide <unsupervised_neighbors>`.
.. versionadded:: 0.9
Parameters
----------
n_neighbors : int, optional (default = 5)
Number of neighbors to use by default for :meth:`kneighbors` queries.
radius : float, optional (default = 1.0)
Range of parameter space to use by default for :meth:`radius_neighbors`
queries.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDTree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or KDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
metric : string or callable, default 'minkowski'
the distance metric to use for the tree. The default metric is
minkowski, and with p=2 is equivalent to the standard Euclidean
metric. See the documentation of the DistanceMetric class for a
list of available metrics.
If metric is "precomputed", X is assumed to be a distance matrix and
must be square during fit. X may be a :term:`Glossary <sparse graph>`,
in which case only "nonzero" elements may be considered neighbors.
p : integer, optional (default = 2)
Parameter for the Minkowski metric from
sklearn.metrics.pairwise.pairwise_distances. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params : dict, optional (default = None)
Additional keyword arguments for the metric function.
n_jobs : int or None, optional (default=None)
The number of parallel jobs to run for neighbors search.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
Attributes
----------
effective_metric_ : string
Metric used to compute distances to neighbors.
effective_metric_params_ : dict
Parameters for the metric used to compute distances to neighbors.
Examples
--------
>>> import numpy as np
>>> from sklearn.neighbors import NearestNeighbors
>>> samples = [[0, 0, 2], [1, 0, 0], [0, 0, 1]]
>>> neigh = NearestNeighbors(2, 0.4)
>>> neigh.fit(samples)
NearestNeighbors(...)
>>> neigh.kneighbors([[0, 0, 1.3]], 2, return_distance=False)
array([[2, 0]]...)
>>> nbrs = neigh.radius_neighbors([[0, 0, 1.3]], 0.4, return_distance=False)
>>> np.asarray(nbrs[0][0])
array(2)
See also
--------
KNeighborsClassifier
RadiusNeighborsClassifier
KNeighborsRegressor
RadiusNeighborsRegressor
BallTree
Notes
-----
See :ref:`Nearest Neighbors <neighbors>` in the online documentation
for a discussion of the choice of ``algorithm`` and ``leaf_size``.
https://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
"""
def __init__(self, n_neighbors=5, radius=1.0,
algorithm='auto', leaf_size=30, metric='minkowski',
p=2, metric_params=None, n_jobs=None):
super().__init__(
n_neighbors=n_neighbors,
radius=radius,
algorithm=algorithm,
leaf_size=leaf_size, metric=metric, p=p,
metric_params=metric_params, n_jobs=n_jobs)
| 37.119658 | 82 | 0.652544 |
79599832fa39dfef8bbf89a9bcfb5d4617c7f21d | 9,096 | py | Python | misc/config_tools/static_allocators/memory_allocator.py | jackwhich/acrn-hypervisor-1 | 2ff11c2ef04a2668979b3e363e25f13cf48376ac | [
"BSD-3-Clause"
] | null | null | null | misc/config_tools/static_allocators/memory_allocator.py | jackwhich/acrn-hypervisor-1 | 2ff11c2ef04a2668979b3e363e25f13cf48376ac | [
"BSD-3-Clause"
] | null | null | null | misc/config_tools/static_allocators/memory_allocator.py | jackwhich/acrn-hypervisor-1 | 2ff11c2ef04a2668979b3e363e25f13cf48376ac | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python3
#
# Copyright (C) 2022 Intel Corporation.
#
# SPDX-License-Identifier: BSD-3-Clause
#
import os
import sys
import lib.error
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', 'library'))
import common, math, logging
def import_memory_info(board_etree):
ram_range = {}
start = board_etree.xpath("/acrn-config/memory/range/@start")
size = board_etree.xpath("/acrn-config/memory/range/@size")
for i in range(len(start)):
start_hex = int(start[i], 16)
size_hex = int(size[i], 10)
ram_range[start_hex] = size_hex
return ram_range
def check_hpa(vm_node_info):
hpa_node_list = vm_node_info.xpath("./memory/hpa_region/*")
hpa_node_list_new = []
for hpa_node in hpa_node_list:
if int(hpa_node.text, 16) != 0:
hpa_node_list_new.append(hpa_node)
return hpa_node_list_new
def get_memory_info(vm_node_info):
start_hpa = []
size_hpa = []
hpa_info = {}
whole_node_list = vm_node_info.xpath("./memory/size")
if len(whole_node_list) != 0:
hpa_info[0] = int(whole_node_list[0].text, 16)
hpa_node_list = check_hpa(vm_node_info)
if len(hpa_node_list) != 0:
for hpa_node in hpa_node_list:
if hpa_node.tag == "start_hpa":
start_hpa.append(int(hpa_node.text, 16))
elif hpa_node.tag == "size_hpa":
size_hpa.append(int(hpa_node.text))
if len(start_hpa) != 0 and len(start_hpa) == len(start_hpa):
for i in range(len(start_hpa)):
hpa_info[start_hpa[i]] = size_hpa[i]
return hpa_info
def alloc_memory(scenario_etree, ram_range_info):
vm_node_list = scenario_etree.xpath("/acrn-config/vm[load_order = 'PRE_LAUNCHED_VM']")
mem_info_list = []
vm_node_index_list = []
dic_key = sorted(ram_range_info)
for key in dic_key:
if key <= 0x100000000:
ram_range_info.pop(key)
for vm_node in vm_node_list:
mem_info = get_memory_info(vm_node)
mem_info_list.append(mem_info)
vm_node_index_list.append(vm_node.attrib["id"])
ram_range_info = alloc_hpa_region(ram_range_info, mem_info_list, vm_node_index_list)
ram_range_info, mem_info_list = alloc_whole_size(ram_range_info, mem_info_list, vm_node_index_list)
return ram_range_info, mem_info_list, vm_node_index_list
def alloc_hpa_region(ram_range_info, mem_info_list, vm_node_index_list):
mem_key = sorted(ram_range_info)
for vm_index in range(len(vm_node_index_list)):
hpa_key = sorted(mem_info_list[vm_index])
for mem_start in mem_key:
mem_size = ram_range_info[mem_start]
for hpa_start in hpa_key:
hpa_size = mem_info_list[vm_index][hpa_start]
if hpa_start != 0:
if mem_start < hpa_start and mem_start + mem_size > hpa_start + hpa_size:
ram_range_info[mem_start] = hpa_start - mem_start
ram_range_info[hpa_start - mem_start] = mem_start + mem_size - hpa_start - hpa_size
elif mem_start == hpa_start and mem_start + mem_size > hpa_start + hpa_size:
del ram_range_info[mem_start]
ram_range_info[hpa_start + hpa_size] = mem_start + mem_size - hpa_start - hpa_size
elif mem_start < hpa_start and mem_start + mem_size == hpa_start + hpa_size:
ram_range_info[mem_start] = hpa_start - mem_start
elif mem_start == hpa_start and mem_start + mem_size == hpa_start + hpa_size:
del ram_range_info[mem_start]
elif mem_start > hpa_start or mem_start + mem_size < hpa_start + hpa_size:
raise lib.error.ResourceError(f"Start address of HPA is out of available memory range: vm id: {vm_index}, hpa_start: {hpa_start}.")
elif mem_size < hpa_size:
raise lib.error.ResourceError(f"Size of HPA is out of available memory range: vm id: {vm_index}, hpa_size: {hpa_size}.")
return ram_range_info
def alloc_whole_size(ram_range_info, mem_info_list, vm_node_index_list):
for vm_index in range(len(vm_node_index_list)):
if 0 in mem_info_list[vm_index].keys() and mem_info_list[vm_index][0] != 0:
remain_size = mem_info_list[vm_index][0]
hpa_info = mem_info_list[vm_index]
mem_key = sorted(ram_range_info)
for mem_start in mem_key:
mem_size = ram_range_info[mem_start]
if remain_size != 0 and remain_size <= mem_size:
del ram_range_info[mem_start]
hpa_info[mem_start] = remain_size
del hpa_info[0]
if mem_size > remain_size:
ram_range_info[mem_start + remain_size] = mem_size - remain_size
remain_size = 0
elif remain_size > mem_size:
hpa_info[mem_start] = mem_size
del ram_range_info[mem_start]
hpa_info[0] = remain_size - mem_size
remain_size = hpa_info[0]
return ram_range_info, mem_info_list
def write_hpa_info(allocation_etree, mem_info_list, vm_node_index_list):
for i in range(len(vm_node_index_list)):
vm_id = vm_node_index_list[i]
hpa_info = mem_info_list[i]
vm_node = common.get_node(f"/acrn-config/vm[@id = '{vm_id}']", allocation_etree)
if vm_node is None:
vm_node = common.append_node("/acrn-config/vm", None, allocation_etree, id=vm_id)
memory_node = common.get_node("./memory", vm_node)
if memory_node is None:
memory_node = common.append_node(f"./memory", None, vm_node)
region_index = 1
start_key = sorted(hpa_info)
for start_hpa in start_key:
hpa_region_node = common.get_node(f"./hpa_region[@id='{region_index}']", memory_node)
if hpa_region_node is None:
hpa_region_node = common.append_node("./hpa_region", None, memory_node, id=str(region_index).encode('UTF-8'))
start_hpa_node = common.get_node("./start_hpa", hpa_region_node)
if start_hpa_node is None:
common.append_node("./start_hpa", hex(start_hpa), hpa_region_node)
size_hpa_node = common.get_node("./size_hpa", hpa_region_node)
if size_hpa_node is None:
common.append_node("./size_hpa", hex(hpa_info[start_hpa] * 0x100000), hpa_region_node)
region_index = region_index + 1
def alloc_vm_memory(board_etree, scenario_etree, allocation_etree):
ram_range_info = import_memory_info(board_etree)
ram_range_info, mem_info_list, vm_node_index_list = alloc_memory(scenario_etree, ram_range_info)
write_hpa_info(allocation_etree, mem_info_list, vm_node_index_list)
def allocate_hugepages(board_etree, scenario_etree, allocation_etree):
hugepages_1gb = 0
hugepages_2mb = 0
ram_range_info = import_memory_info(board_etree)
total_hugepages = sum(ram_range_info[i] for i in ram_range_info if i >= 0x100000000)/(1024*1024*1024) \
- sum(int(i) for i in scenario_etree.xpath("//vm[load_order = 'PRE_LAUNCHED_VM']/memory/hpa_region/size_hpa/text()"))/1024 \
- 4 - 300/1024 * len(scenario_etree.xpath("//virtio_devices/gpu"))
post_launch_vms = scenario_etree.xpath("//vm[load_order = 'POST_LAUNCHED_VM']")
if len(post_launch_vms) > 0:
for post_launch_vm in post_launch_vms:
size = common.get_node("./memory/size/text()", post_launch_vm)
if size is not None:
mb, gb = math.modf(int(size)/1024)
hugepages_1gb = int(hugepages_1gb + gb)
hugepages_2mb = int(hugepages_2mb + math.ceil(mb * 1024 / 2))
post_vms_memory = sum(int(i) for i in scenario_etree.xpath("//vm[load_order = 'POST_LAUNCHED_VM']/memory/size/text()")) / 1024
correction_mb, correction_gb = math.modf(total_hugepages - post_vms_memory)
if total_hugepages - post_vms_memory < 0:
logging.warning(f"The sum {post_vms_memory} of memory configured in post launch VMs should not be larger than " \
f"the calculated total hugepages {total_hugepages} of service VMs. Please update the configuration in post launch VMs")
hugepages_1gb = hugepages_1gb + correction_gb
hugepages_2mb = hugepages_2mb + math.ceil(correction_mb * 1024 / 2)
allocation_service_vm_node = common.get_node("/acrn-config/vm[load_order = 'SERVICE_VM']", allocation_etree)
if allocation_service_vm_node is not None:
common.append_node("./hugepages/gb", int(hugepages_1gb), allocation_service_vm_node)
common.append_node("./hugepages/mb", int(hugepages_2mb), allocation_service_vm_node)
def fn(board_etree, scenario_etree, allocation_etree):
alloc_vm_memory(board_etree, scenario_etree, allocation_etree)
allocate_hugepages(board_etree, scenario_etree, allocation_etree)
| 48.382979 | 155 | 0.659631 |
795998e04f6a260acd626434ea03c5f61c04bd5e | 4,133 | py | Python | main.py | gigae-Cyon/PearlAbyssNewsArchive | f929d90dc32d807de28fc48ed3d4966b5e8a5471 | [
"MIT"
] | null | null | null | main.py | gigae-Cyon/PearlAbyssNewsArchive | f929d90dc32d807de28fc48ed3d4966b5e8a5471 | [
"MIT"
] | null | null | null | main.py | gigae-Cyon/PearlAbyssNewsArchive | f929d90dc32d807de28fc48ed3d4966b5e8a5471 | [
"MIT"
] | null | null | null | import requests
from bs4 import BeautifulSoup
import pickle
import re, datetime
# 뉴스 중복 확인
def duplication_check(new_news, saved_news_list):
if new_news in saved_news_list:
return False
else:
saved_news_list.append(new_news)
return True
# 기사 날짜, 시간 표현 (시간정보가 '~전'인 경우)
def get_released_time1(current_time, time_info):
return_string = ''
p = re.compile('^[\d]*')
m = p.search(time_info)
number = int(time_info[:m.end()])
korean = time_info[m.end()]
# 뉴스사 페이지마다 날짜를 담은 태그가 다르고(=>태그나 class등으로 찾기 어려움), 페이지에 현재 날짜가 기사 입력 날짜보다 먼저 나오는
# 경우도 있어(뉴스1) 정규표현식으로도 정확한 기사 입력날짜를 얻기 힘듦.
# 기사의 발행 시각(released time)구하기
if korean == '분': # n분 전
released_time = current_time - datetime.timedelta(minutes=number)
return_string = released_time.strftime('%Y-%m-%d %H시')
elif korean == '시': # n시간 전
released_time = current_time - datetime.timedelta(hours=number)
return_string = released_time.strftime('%Y-%m-%d %H시')
elif korean == '일': # n일 전
released_time = current_time - datetime.timedelta(days=number)
return_string = released_time.strftime('%Y-%m-%d 00시') # 기사의 시간순 정렬을 위해 00시로 설정
else: # 몇 초전 기사일 수도 있음
released_time = current_time
return return_string
# 기사 날짜, 시간 표현 (시간정보가 '20xx.xx.xx'인 경우)
def get_released_time2(date_str):
yea = date_str[:4]
mon = date_str[5:7]
day = date_str[8:10]
return_string = yea + '-' + mon + '-' + day + ' 00시'
return return_string
# 기사의 시간순 정렬을 위한 연,월,일,시간 정보
def get_time_members(line):
p = re.compile('\d{4}-\d{2}-\d{2} \d{2}')
m = p.search(line)
yea = m.group()[:4]
mon = m.group()[5:7]
day = m.group()[8:10]
hou = m.group()[11:13]
return int(yea), int(mon), int(day), int(hou)
# 검색 페이지 request
url = 'https://search.naver.com/search.naver?where=news&query=%ED%8E%84%EC%96%B4%EB%B9%84%EC%8A%A4&sm=tab_tmr&nso=so:r,p:all,a:all&sort=0'
res = requests.get(url)
res.raise_for_status()
current_time = datetime.datetime.today()
# 뉴스 컨테이너 객체 설정
soup = BeautifulSoup(res.text, 'lxml')
news_container = soup.find('ul', attrs={'class':'list_news'})
list_news = news_container.find_all('li', attrs={'class':'bx'})
# 저장된 뉴스 제목 리스트 불러옴 (중복을 피하기 위해 기존에 저장된 뉴스의 제목으로 이루어진 리스트)
try:
saved_news_file = open('saved_news_list.pickle', 'rb')
saved_news_list = pickle.load(saved_news_file)
saved_news_file.close()
except Exception:
saved_news_list = list()
print('new list created')
finally:
print('list loaded successfully')
# 뉴스 제목의 앞 14글자로 중복을 파악한 후 중복이 아닌 뉴스는 html파일로 작성
with open('pana.html', 'a', encoding='utf-8') as f:
for news in list_news:
news_link = news.find('a', attrs={'class':'news_tit'})
if duplication_check(news_link.get_text()[:14], saved_news_list): # 제목이 길기 때문에 앞의 14글자만 비교
try:
time_info = news.find('span', text=re.compile(' 전$')) # class가 info인 span을 이용하면 신문의 몇면 몇단의 기사인지를 알려주는 내용도 있음
time_str = get_released_time1(current_time, time_info.get_text())
except AttributeError:
time_info = news.find('span', text=re.compile('\d{4}.\d{2}.\d{2}.')) # 일정기간 지난 뉴스는 time_info에 '~전'이 아니라 '2021.12.14'처럼 날짜의 형태로 나올 수 있음
time_str = get_released_time2(time_info.get_text())
finally:
f.write('<h3><a href="' + news_link['href'] + '" target="blank">' + news_link['title'] + ', ' +
time_str + '</a></h3>')
f.write('<br/>')
f.write('\n')
# saved_news_list.pickle 파일(저장된 뉴스의 제목을 담은 리스트) 갱신
with open('saved_news_list.pickle', 'wb') as f:
pickle.dump(saved_news_list, f)
print('dump successed')
# 여기부터 기사의 시간순 정렬
# lines list에 각 기사의 내용과 입력시간을 2차원 list로 저장 ([html내용, 연, 월, 일, 시]의 형태)
with open('pana.html', 'r', encoding='utf8') as f:
file_data = f.readlines()
lines = list()
for data in file_data:
lines.append([data])
for idx, data in enumerate(file_data):
year, month, day, hour = get_time_members(data)
lines[idx].append(year)
lines[idx].append(month)
lines[idx].append(day)
lines[idx].append(hour)
# lines를 기사 입력시간 순으로 정렬
lines.sort(key=lambda x: (x[1], x[2], x[3], x[4]))
# 정렬한 순서대로 내용을 덮어씀
with open('pana.html', 'w', encoding='utf8') as f:
for line in lines:
f.write(line[0]) | 33.330645 | 142 | 0.663199 |
795999976f9cd352322a3ed99857547f7ae7772d | 1,994 | py | Python | tests/py/test_for_community_json.py | webmaven/gratipay.com | 31f6bcf903029895a4c56290aedde755e852c82f | [
"CC0-1.0"
] | 1 | 2019-10-09T10:13:53.000Z | 2019-10-09T10:13:53.000Z | tests/py/test_for_community_json.py | webmaven/gratipay.com | 31f6bcf903029895a4c56290aedde755e852c82f | [
"CC0-1.0"
] | null | null | null | tests/py/test_for_community_json.py | webmaven/gratipay.com | 31f6bcf903029895a4c56290aedde755e852c82f | [
"CC0-1.0"
] | null | null | null | import json
from gratipay.models.community import slugize
from gratipay.testing import Harness
class TestForCommunityJson(Harness):
def setUp(self):
Harness.setUp(self)
self.add_participant('alice')
self.add_participant('bob')
carl = self.add_participant('carl')
carl.insert_into_communities(False, 'test', 'test')
def add_participant(self, participant_name):
participant = self.make_participant(participant_name)
participant.insert_into_communities(True, 'test', slugize('test'))
return participant
def test_get_non_existing_community(self):
response = self.client.GxT('/for/NonExisting/index.json')
assert response.code == 404
def test_get_existing_community(self):
response = self.client.GET('/for/test/index.json')
result = json.loads(response.body)
assert len(result['members']) == 2
assert result['name'] == 'test'
def test_post_not_supported(self):
response = self.client.PxST('/for/test/index.json')
assert response.code == 405
def test_limit(self):
response = self.client.GET('/for/test/index.json?limit=1')
result = json.loads(response.body)
assert len(result['members']) == 1
def test_offset(self):
response = self.client.GET('/for/test/index.json?offset=1')
result = json.loads(response.body)
assert len(result['members']) == 1
def test_max_limit(self):
for i in range(110):
self.add_participant(str(i))
response = self.client.GET('/for/test/index.json?limit=200')
result = json.loads(response.body)
assert len(result['members']) == 100
def test_invalid_limit(self):
response = self.client.GxT('/for/test/index.json?limit=abc')
assert response.code == 400
def test_invalid_offset(self):
response = self.client.GxT('/for/test/index.json?offset=abc')
assert response.code == 400
| 33.79661 | 74 | 0.653962 |
795999b8a086d2a92c7c0d0019a508d781dcdb36 | 4,889 | py | Python | code/visualization/2020/04/0_0_compression_tucker_sparse_facto_select_lr.py | lucgiffon/psm-nets | dec43c26281febf6e5c8b8f42bfb78098ae7101d | [
"MIT"
] | 1 | 2021-07-15T07:05:18.000Z | 2021-07-15T07:05:18.000Z | code/visualization/2020/04/0_0_compression_tucker_sparse_facto_select_lr.py | lucgiffon/psm-nets | dec43c26281febf6e5c8b8f42bfb78098ae7101d | [
"MIT"
] | 2 | 2021-07-15T06:12:47.000Z | 2021-07-16T10:05:36.000Z | code/visualization/2020/04/0_0_compression_tucker_sparse_facto_select_lr.py | lucgiffon/psm-nets | dec43c26281febf6e5c8b8f42bfb78098ae7101d | [
"MIT"
] | null | null | null | import pathlib
import pandas as pd
from palmnet.visualization.utils import get_palminized_model_and_df, get_df
import matplotlib.pyplot as plt
import numpy as np
import logging
import plotly.graph_objects as go
import plotly.express as px
from pprint import pprint as pprint
mpl_logger = logging.getLogger('matplotlib')
mpl_logger.setLevel(logging.ERROR)
dataset = {
"Cifar10": "--cifar10",
"Cifar100": "--cifar100",
"SVHN": "--svhn",
"MNIST": "--mnist"
}
basemodels = {
"Cifar100": ["--cifar100-vgg19", "--cifar100-resnet20", "--cifar100-resnet50"],
"Cifar10": ["--cifar10-vgg19"],
"SVHN": ["--svhn-vgg19"],
"MNIST": ["--mnist-lenet"]
}
def show_for_tucker():
# compression_method = ["tucker", "tensortrain"]
# df = df.apply(pd.to_numeric, errors='coerce')
dct_config_lr = dict()
lst_name_trace_low = list()
for dataname in dataset:
df_data = df[df[dataset[dataname]] == 1]
for base_model_name in basemodels[dataname]:
df_model = df_data[df_data[base_model_name] == 1]
for index, row in df_model.iterrows():
fig = go.Figure()
csv_file = pathlib.Path(row["results_dir"]) / row["output_file_csvcbprinter"]
df_csv = pd.read_csv(csv_file)
win_size = 5
lr_values = df_csv["lr"].values
lr_values_log = np.log10(lr_values)
lr_rolling_mean = pd.Series(lr_values_log).rolling(window=win_size).mean().iloc[win_size - 1:].values
loss_rolling_mean = df_csv["loss"].rolling(window=win_size).mean().iloc[win_size - 1:].values
if all(np.isnan(loss_rolling_mean)):
continue
delta_loss = (np.hstack([loss_rolling_mean, [0]]) - np.hstack([[0], loss_rolling_mean]))[1:-1]
delta_loss_rolling_mean = pd.Series(delta_loss).rolling(window=win_size).mean().iloc[win_size - 1:].values
lr_rolling_mean_2x = pd.Series(lr_rolling_mean).rolling(window=win_size).mean().iloc[win_size - 1:].values
lr_rolling_mean_2x_exp = 10 ** lr_rolling_mean_2x
# fig.add_trace(go.Scatter(x=lr_rolling_mean_exp, y=loss_rolling_mean, name="sp_fac {} - hiearchical {}".format(row["--sparsity-factor"], row["--hierarchical"])))
fig.add_trace(go.Scatter(x=lr_rolling_mean_2x_exp[:-1], y=delta_loss_rolling_mean, name=""))
argmin_loss = np.argmin(delta_loss_rolling_mean)
val = lr_rolling_mean_2x_exp[:-1][argmin_loss]
log_val = np.log10(val)
approx = 10 ** np.around(log_val, decimals=0)
sparsity = int(row["--sparsity-factor"])
hierarchical = bool(row["--hierarchical"])
str_hierarchical = " H" if hierarchical else ""
try:
nb_fac = int(row["--nb-factor"])
except ValueError:
nb_fac = None
name_trace = f"tucker_sparse_facto-{dataset[dataname]}-{base_model_name}-Q={nb_fac}-K={sparsity}{str_hierarchical}"
print(len(delta_loss_rolling_mean), name_trace)
if len(delta_loss_rolling_mean) < 10:
lst_name_trace_low.append(name_trace)
continue
dct_config_lr[name_trace] = approx
# title_str = "{}:{} - {} - keep first :{}".format(dataname, base_model_name, "tucker", keep_first)
fig.update_layout(barmode='group',
title=name_trace,
xaxis_title="lr",
yaxis_title="loss",
xaxis_type="log",
xaxis={'type': 'category'},
)
# fig.show()
pprint(dct_config_lr)
pprint(lst_name_trace_low)
if __name__ == "__main__":
root_source_dir = pathlib.Path("/home/luc/PycharmProjects/palmnet/results/")
expe_path = "2020/04/0_0_compression_tucker_sparse_facto_select_lr"
expe_path_errors = "2020/04/0_0_compression_tucker_sparse_facto_select_lr_errors"
src_results_dir = root_source_dir / expe_path
src_results_dir_errors = root_source_dir / expe_path_errors
get_df_and_assign = lambda x: get_df(x).assign(results_dir=str(x))
df = get_df_and_assign(src_results_dir)
df_errors = get_df_and_assign(src_results_dir_errors)
df = pd.concat([df, df_errors])
df = df.dropna(subset=["failure"])
df = df[df["failure"] == 0]
df = df.drop(columns="oar_id").drop_duplicates()
root_output_dir = pathlib.Path("/home/luc/PycharmProjects/palmnet/reports/figures/")
output_dir = root_output_dir / expe_path / "line_plots"
output_dir.mkdir(parents=True, exist_ok=True)
show_for_tucker() | 40.07377 | 178 | 0.607895 |
79599a1fd5e5704b5861c125ebec485dcace616e | 13,283 | py | Python | chia/cmds/init_funcs.py | namelessperson0/silicoin-blockchain | 85db78631f9ea5571a840a226d503fd2bda20356 | [
"Apache-2.0"
] | null | null | null | chia/cmds/init_funcs.py | namelessperson0/silicoin-blockchain | 85db78631f9ea5571a840a226d503fd2bda20356 | [
"Apache-2.0"
] | null | null | null | chia/cmds/init_funcs.py | namelessperson0/silicoin-blockchain | 85db78631f9ea5571a840a226d503fd2bda20356 | [
"Apache-2.0"
] | null | null | null | import os
import shutil
from pathlib import Path
from typing import Any, Dict, List, Optional, Tuple
import yaml
from chia import __version__
from chia.consensus.coinbase import create_puzzlehash_for_pk
from chia.ssl.create_ssl import generate_ca_signed_cert, get_chia_ca_crt_key, make_ca_cert
from chia.util.bech32m import encode_puzzle_hash
from chia.util.config import (
create_default_chia_config,
initial_config_file,
load_config,
save_config,
unflatten_properties,
)
from chia.util.ints import uint32
from chia.util.keychain import Keychain
from chia.util.path import mkdir
from chia.wallet.derive_keys import master_sk_to_pool_sk, master_sk_to_wallet_sk
private_node_names = {"full_node", "wallet", "farmer", "harvester", "timelord", "daemon"}
public_node_names = {"full_node", "wallet", "farmer", "introducer", "timelord"}
def dict_add_new_default(updated: Dict, default: Dict, do_not_migrate_keys: Dict[str, Any]):
for k in do_not_migrate_keys:
if k in updated and do_not_migrate_keys[k] == "":
updated.pop(k)
for k, v in default.items():
ignore = False
if k in do_not_migrate_keys:
do_not_data = do_not_migrate_keys[k]
if isinstance(do_not_data, dict):
ignore = False
else:
ignore = True
if isinstance(v, dict) and k in updated and ignore is False:
# If there is an intermediate key with empty string value, do not migrate all descendants
if do_not_migrate_keys.get(k, None) == "":
do_not_migrate_keys[k] = v
dict_add_new_default(updated[k], default[k], do_not_migrate_keys.get(k, {}))
elif k not in updated or ignore is True:
updated[k] = v
def check_keys(new_root: Path) -> None:
keychain: Keychain = Keychain()
all_sks = keychain.get_all_private_keys()
if len(all_sks) == 0:
print("No keys are present in the keychain. Generate them with 'silicoin keys generate'")
return None
config: Dict = load_config(new_root, "config.yaml")
pool_child_pubkeys = [master_sk_to_pool_sk(sk).get_g1() for sk, _ in all_sks]
all_targets = []
stop_searching_for_farmer = "xch_target_address" not in config["farmer"]
stop_searching_for_pool = "xch_target_address" not in config["pool"]
number_of_ph_to_search = 500
selected = config["selected_network"]
prefix = config["network_overrides"]["config"][selected]["address_prefix"]
for i in range(number_of_ph_to_search):
if stop_searching_for_farmer and stop_searching_for_pool and i > 0:
break
for sk, _ in all_sks:
all_targets.append(
encode_puzzle_hash(create_puzzlehash_for_pk(master_sk_to_wallet_sk(sk, uint32(i)).get_g1()), prefix)
)
if all_targets[-1] == config["farmer"].get("xch_target_address"):
stop_searching_for_farmer = True
if all_targets[-1] == config["pool"].get("xch_target_address"):
stop_searching_for_pool = True
# Set the destinations
if "xch_target_address" not in config["farmer"]:
print(f"Setting the xch destination address for coinbase fees reward to {all_targets[0]}")
config["farmer"]["xch_target_address"] = all_targets[0]
elif config["farmer"]["xch_target_address"] not in all_targets:
print(
f"WARNING: using a farmer address which we don't have the private"
f" keys for. We searched the first {number_of_ph_to_search} addresses. Consider overriding "
f"{config['farmer']['xch_target_address']} with {all_targets[0]}"
)
if "pool" not in config:
config["pool"] = {}
if "xch_target_address" not in config["pool"]:
print(f"Setting the xch destination address for coinbase reward to {all_targets[0]}")
config["pool"]["xch_target_address"] = all_targets[0]
elif config["pool"]["xch_target_address"] not in all_targets:
print(
f"WARNING: using a pool address which we don't have the private"
f" keys for. We searched the first {number_of_ph_to_search} addresses. Consider overriding "
f"{config['pool']['xch_target_address']} with {all_targets[0]}"
)
# Set the pool pks in the farmer
pool_pubkeys_hex = set(bytes(pk).hex() for pk in pool_child_pubkeys)
if "pool_public_keys" in config["farmer"]:
for pk_hex in config["farmer"]["pool_public_keys"]:
# Add original ones in config
pool_pubkeys_hex.add(pk_hex)
config["farmer"]["pool_public_keys"] = pool_pubkeys_hex
save_config(new_root, "config.yaml", config)
def copy_files_rec(old_path: Path, new_path: Path):
if old_path.is_file():
print(f"{new_path}")
mkdir(new_path.parent)
shutil.copy(old_path, new_path)
elif old_path.is_dir():
for old_path_child in old_path.iterdir():
new_path_child = new_path / old_path_child.name
copy_files_rec(old_path_child, new_path_child)
def migrate_from(
old_root: Path,
new_root: Path,
manifest: List[str],
do_not_migrate_settings: List[str],
):
"""
Copy all the files in "manifest" to the new config directory.
"""
if old_root == new_root:
print("same as new path, exiting")
return 1
if not old_root.is_dir():
print(f"{old_root} not found - this is ok if you did not install this version")
return 0
print(f"\n{old_root} found")
print(f"Copying files from {old_root} to {new_root}\n")
for f in manifest:
old_path = old_root / f
new_path = new_root / f
copy_files_rec(old_path, new_path)
# update config yaml with new keys
config: Dict = load_config(new_root, "config.yaml")
config_str: str = initial_config_file("config.yaml")
default_config: Dict = yaml.safe_load(config_str)
flattened_keys = unflatten_properties({k: "" for k in do_not_migrate_settings})
dict_add_new_default(config, default_config, flattened_keys)
save_config(new_root, "config.yaml", config)
create_all_ssl(new_root)
return 1
def create_all_ssl(root: Path):
# remove old key and crt
config_dir = root / "config"
old_key_path = config_dir / "trusted.key"
old_crt_path = config_dir / "trusted.crt"
if old_key_path.exists():
print(f"Old key not needed anymore, deleting {old_key_path}")
os.remove(old_key_path)
if old_crt_path.exists():
print(f"Old crt not needed anymore, deleting {old_crt_path}")
os.remove(old_crt_path)
ssl_dir = config_dir / "ssl"
if not ssl_dir.exists():
ssl_dir.mkdir()
ca_dir = ssl_dir / "ca"
if not ca_dir.exists():
ca_dir.mkdir()
private_ca_key_path = ca_dir / "private_ca.key"
private_ca_crt_path = ca_dir / "private_ca.crt"
chia_ca_crt, chia_ca_key = get_chia_ca_crt_key()
chia_ca_crt_path = ca_dir / "chia_ca.crt"
chia_ca_key_path = ca_dir / "chia_ca.key"
chia_ca_crt_path.write_bytes(chia_ca_crt)
chia_ca_key_path.write_bytes(chia_ca_key)
if not private_ca_key_path.exists() or not private_ca_crt_path.exists():
# Create private CA
print(f"Can't find private CA, creating a new one in {root} to generate TLS certificates")
make_ca_cert(private_ca_crt_path, private_ca_key_path)
# Create private certs for each node
ca_key = private_ca_key_path.read_bytes()
ca_crt = private_ca_crt_path.read_bytes()
generate_ssl_for_nodes(ssl_dir, ca_crt, ca_key, True)
else:
# This is entered when user copied over private CA
print(f"Found private CA in {root}, using it to generate TLS certificates")
ca_key = private_ca_key_path.read_bytes()
ca_crt = private_ca_crt_path.read_bytes()
generate_ssl_for_nodes(ssl_dir, ca_crt, ca_key, True)
chia_ca_crt, chia_ca_key = get_chia_ca_crt_key()
generate_ssl_for_nodes(ssl_dir, chia_ca_crt, chia_ca_key, False, overwrite=False)
def generate_ssl_for_nodes(ssl_dir: Path, ca_crt: bytes, ca_key: bytes, private: bool, overwrite=True):
if private:
names = private_node_names
else:
names = public_node_names
for node_name in names:
node_dir = ssl_dir / node_name
if not node_dir.exists():
node_dir.mkdir()
if private:
prefix = "private"
else:
prefix = "public"
key_path = node_dir / f"{prefix}_{node_name}.key"
crt_path = node_dir / f"{prefix}_{node_name}.crt"
if key_path.exists() and crt_path.exists() and overwrite is False:
continue
generate_ca_signed_cert(ca_crt, ca_key, crt_path, key_path)
def copy_cert_files(cert_path: Path, new_path: Path):
for ext in "*.crt", "*.key":
for old_path_child in cert_path.glob(ext):
new_path_child = new_path / old_path_child.name
copy_files_rec(old_path_child, new_path_child)
def init(create_certs: Optional[Path], root_path: Path):
if create_certs is not None:
if root_path.exists():
if os.path.isdir(create_certs):
ca_dir: Path = root_path / "config/ssl/ca"
if ca_dir.exists():
print(f"Deleting your OLD CA in {ca_dir}")
shutil.rmtree(ca_dir)
print(f"Copying your CA from {create_certs} to {ca_dir}")
copy_cert_files(create_certs, ca_dir)
create_all_ssl(root_path)
else:
print(f"** Directory {create_certs} does not exist **")
else:
print(f"** {root_path} does not exist. Executing core init **")
# sanity check here to prevent infinite recursion
if chia_init(root_path) == 0 and root_path.exists():
return init(create_certs, root_path)
print(f"** {root_path} was not created. Exiting **")
return -1
else:
return chia_init(root_path)
def chia_version_number() -> Tuple[str, str, str, str]:
scm_full_version = __version__
left_full_version = scm_full_version.split("+")
version = left_full_version[0].split(".")
scm_major_version = version[0]
scm_minor_version = version[1]
if len(version) > 2:
smc_patch_version = version[2]
patch_release_number = smc_patch_version
else:
smc_patch_version = ""
major_release_number = scm_major_version
minor_release_number = scm_minor_version
dev_release_number = ""
# If this is a beta dev release - get which beta it is
if "0b" in scm_minor_version:
original_minor_ver_list = scm_minor_version.split("0b")
major_release_number = str(1 - int(scm_major_version)) # decrement the major release for beta
minor_release_number = scm_major_version
patch_release_number = original_minor_ver_list[1]
if smc_patch_version and "dev" in smc_patch_version:
dev_release_number = "." + smc_patch_version
elif "0rc" in version[1]:
original_minor_ver_list = scm_minor_version.split("0rc")
major_release_number = str(1 - int(scm_major_version)) # decrement the major release for release candidate
minor_release_number = str(int(scm_major_version) + 1) # RC is 0.2.1 for RC 1
patch_release_number = original_minor_ver_list[1]
if smc_patch_version and "dev" in smc_patch_version:
dev_release_number = "." + smc_patch_version
else:
major_release_number = scm_major_version
minor_release_number = scm_minor_version
patch_release_number = smc_patch_version
dev_release_number = ""
install_release_number = major_release_number + "." + minor_release_number
if len(patch_release_number) > 0:
install_release_number += "." + patch_release_number
if len(dev_release_number) > 0:
install_release_number += dev_release_number
return major_release_number, minor_release_number, patch_release_number, dev_release_number
def chia_minor_release_number():
res = int(chia_version_number()[2])
print(f"Install release number: {res}")
return res
def chia_full_version_str() -> str:
major, minor, patch, dev = chia_version_number()
return f"{major}.{minor}.{patch}{dev}"
def chia_init(root_path: Path):
if os.environ.get("CHIA_ROOT", None) is not None:
print(
f"warning, your CHIA_ROOT is set to {os.environ['CHIA_ROOT']}. "
f"Please unset the environment variable and run silicoin init again\n"
f"or manually migrate config.yaml"
)
print(f"Chia directory {root_path}")
if root_path.is_dir() and Path(root_path / "config" / "config.yaml").exists():
# This is reached if CHIA_ROOT is set, or if user has run silicoin init twice
# before a new update.
check_keys(root_path)
print(f"{root_path} already exists, no migration action taken")
return -1
create_default_chia_config(root_path)
create_all_ssl(root_path)
check_keys(root_path)
print("")
print("To see your keys, run 'silicoin keys show --show-mnemonic-seed'")
return 0
| 39.182891 | 116 | 0.669126 |
79599a44cc658e29653c5313a69357554683f2a6 | 1,844 | py | Python | warehouse/migrations/versions/590c513f1c74_new_psf_staff_boolean_flag.py | fairhopeweb/warehouse | 7d8ef742e8fe6b401190c28ce56761848041c89f | [
"Apache-2.0"
] | 3,103 | 2015-01-30T00:24:10.000Z | 2022-03-31T23:21:39.000Z | warehouse/migrations/versions/590c513f1c74_new_psf_staff_boolean_flag.py | fairhopeweb/warehouse | 7d8ef742e8fe6b401190c28ce56761848041c89f | [
"Apache-2.0"
] | 6,709 | 2015-01-05T01:23:20.000Z | 2022-03-31T14:49:46.000Z | warehouse/migrations/versions/590c513f1c74_new_psf_staff_boolean_flag.py | fairhopeweb/warehouse | 7d8ef742e8fe6b401190c28ce56761848041c89f | [
"Apache-2.0"
] | 959 | 2015-01-12T22:22:40.000Z | 2022-03-31T22:21:51.000Z | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Add psf_staff boolean flag
Revision ID: 590c513f1c74
Revises: d0c22553b338
Create Date: 2021-06-07 11:49:50.688410
"""
import sqlalchemy as sa
from alembic import op
revision = "590c513f1c74"
down_revision = "d0c22553b338"
# Note: It is VERY important to ensure that a migration does not lock for a
# long period of time and to ensure that each individual migration does
# not break compatibility with the *previous* version of the code base.
# This is because the migrations will be ran automatically as part of the
# deployment process, but while the previous version of the code is still
# up and running. Thus backwards incompatible changes must be broken up
# over multiple migrations inside of multiple pull requests in order to
# phase them in over multiple deploys.
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column(
"users",
sa.Column(
"is_psf_staff",
sa.Boolean(),
server_default=sa.sql.false(),
nullable=False,
),
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column("users", "is_psf_staff")
# ### end Alembic commands ###
| 33.527273 | 79 | 0.697397 |
79599c7b764f03a1731f4d840e697ef3da6d99ac | 11,447 | py | Python | src/acousticfield/process.py | meguia/acousticfield | 4028aab25ae62ea9018868c57ee44b3e4c51ff85 | [
"MIT"
] | null | null | null | src/acousticfield/process.py | meguia/acousticfield | 4028aab25ae62ea9018868c57ee44b3e4c51ff85 | [
"MIT"
] | null | null | null | src/acousticfield/process.py | meguia/acousticfield | 4028aab25ae62ea9018868c57ee44b3e4c51ff85 | [
"MIT"
] | null | null | null | import numpy as np
from scipy import signal
from scipy.io import wavfile
from scipy.interpolate import interp1d
from scipy.fft import next_fast_len, rfft, irfft, fft, ifft
from numpy.fft.helper import fftfreq
def ir_extract(rec,fileinv,fileout='ir_out',loopback=None,dur=None,fs=48000):
'''
extrae la respuesta impulso a partir de la grabacion del sweep (rec) y el filtro inverso
almacenado en fileinv (archivo npy), ambos parametros obligatorios.
rec puede ser el array numpy de nsamp x nchan o el nombre del archivo wav.
Si hay un canal de loopback lo usa para alinear y hay que proporcionar el numero de canal
devuelve la ri obtenida (puede ser mas de un canal) y la almacena en fileout
(si la entrada fue un archivo) con una duracion dur (la ir completa por defecto)
'''
# rec puede ser un nombre de un archivo o un prefijo
if type(rec) is str:
fs, data = wavfile.read(rec + '.wav')
elif type(rec) is np.ndarray:
data = rec
else:
raise TypeError('First argument must be the array given by play_rec or a file name')
if data.ndim == 1:
data = data[:,np.newaxis] # el array debe ser 2D
datainv = np.load(fileinv + '_inv.npz')
_, nchan = np.shape(data)
if fs != datainv['fs']:
raise ValueError('sampling rate of inverse filter does not match file sample rate')
if datainv['type'] == 'sweep':
ir_stack=ir_sweep(data,datainv,nchan)
elif datainv['type'] == 'golay':
ir_stack=ir_golay(data,datainv,nchan)
else:
raise ValueError("inv_type must be 'sweep' or 'golay'")
# ir dimensions: Nrep, nsamples, nchan
Nrep,N,_ = ir_stack.shape
if loopback is not None:
# usar el loopback para alinear todos los otros canales
n0 = np.argmax(ir_stack[:,:,loopback],axis=1)
else:
n0 = np.zeros((Nrep,),dtype=int)
if dur is None:
ndur = np.min(int(N/2)-n0)
else:
ndur = int(np.round(dur*fs))
ir_align = np.zeros((Nrep,ndur,nchan))
for n in range(nchan):
for m in range(Nrep):
ir_align[m,:,n] = ir_stack[m,n0[m]:n0[m]+ndur,n]
ir = np.mean(ir_align,axis=0)
ir_std = np.std(ir_align,axis=0)
if loopback is not None:
ir = np.delete(ir ,loopback ,1)
ir_std = np.delete(ir_std ,loopback ,1)
wavfile.write(fileout + '.wav',fs,ir)
np.savez(fileout,ir=ir,ir_std=ir_std,ir_stack=ir_stack,fs=fs,loopback=loopback)
return ir
def ir_sweep(data,datainv,nchan):
invsweepfft = datainv['invsweepfft']
N = invsweepfft.shape[0]
Nrep = datainv['Nrep']
invfilt = invsweepfft[np.newaxis,:,np.newaxis]
data_stack = np.reshape(data[:N*Nrep,:],(Nrep,N,nchan))
data_fft=fft(data_stack,N,axis=1)
ir_stack = np.real(ifft(data_fft*invfilt,axis=1))
return ir_stack
def ir_golay(data,datainv,nchan):
a = datainv['a']
b = datainv['b']
Ng = len(a)
Nrep = datainv['Nrep']
rc_stack = np.reshape(data[:2*Ng*Nrep],(Nrep,2,Ng,nchan))
A = rfft(a,Ng,norm="ortho")
Ap = rfft(rc_stack[:,0,:,:],Ng,axis=1,norm="ortho")
B = rfft(b,Ng,norm="ortho")
Bp = rfft(rc_stack[:,1,:,:],Ng,axis=1,norm="ortho")
aa = irfft(Ap*np.conj(A[np.newaxis,:,np.newaxis]),axis=1,norm="ortho")
bb = irfft(Bp*np.conj(B[np.newaxis,:,np.newaxis]),axis=1,norm="ortho")
ir_stack = aa+bb
return ir_stack
def fconvolve(in1,in2):
'''
in1 can be multichannel, in2 single channel
'''
#the samples must be along axis -1
n1 = np.max(in1.shape)
n2 = np.max(in2.shape)
ntot = n1+n2-1
if np.argmin(in1.shape)>0:
in1_fft=rfft(in1.T,ntot)
else:
in1_fft=rfft(in1,ntot)
if np.argmin(in2.shape)>0:
in2_fft=rfft(in2.T,ntot)
else:
in2_fft=rfft(in2,ntot)
return irfft(in1_fft*in2_fft).T
# funcion para hacer time stretch y compensar variaciones de temperatura o corregir drift en el clock
#def ir_stretch(ir,threshold):
# funcion para detectar outliers en un conjunto de IR
#def ir_average(ir,reject_outliers=True,threshold): # con opcion de eliminar outliers
# fadeinout
def fadeinout(data, fadein=0.05, fadeout=None, fs=48000):
if fadein is not None:
nin = int(fadein*fs)
a = (1.0-np.cos(np.linspace(0,np.pi,nin)))/2.0
if data.ndim == 2:
for n in range(data.shape[1]):
data[:nin,n] *= a
else:
data[:nin] *= a
if fadeout is not None:
nout = int(fadeout*fs)
a = (1.0+np.cos(np.linspace(0,np.pi,nout)))/2.0
if data.ndim == 2:
for n in range(data.shape[1]):
data[-nout:,n] *= a
else:
data[-nout:] *= a
return
def burst(data, nburst=3, dur=0.05, gap=0.02, fadein=0.01, fadeout=None, fs=48000):
a = np.zeros((len(data),))
dn = int(np.floor(dur*fs))
for n in range(nburst):
n1 = int(np.floor(n*(dur+gap)*fs))
n2 = n1 + dn
a[n1:n2] = 1.0
if fadein is not None:
nin = int(fadein*fs)
a[n1:n1+nin] = (1.0-np.cos(np.linspace(0,np.pi,nin)))/2.0
if fadeout is not None:
nout = int(fadeout*fs)
a[n2-nout:n2] = (1.0+np.cos(np.linspace(0,np.pi,nout)))/2.0
if data.ndim == 2:
for n in range(data.shape[1]):
data[:,n] *= a
else:
data *= a
return
#filtros
def butter_bandpass(lowcut, highcut, fs, order=5, N=10000):
nyq = 0.5 * fs
low = lowcut / nyq
high = highcut / nyq
sos = signal.butter(order, [low, high], btype='band', output='sos')
w, h = signal.sosfreqz(sos,worN=N)
return sos, w, h
def make_filterbank(fmin=62.5,noct=8,bwoct=1,fs=48000,order=5,N=10000,bankname='fbank_8_1',show=False):
'''
Arma un banco de filtros de noct octavas desde la frecuencia fmin con bwoct filtros
por octava con filtros butter de orden order en formato sos y los guarda en bankname
'''
nfilt = (noct-1)*bwoct+1 # las octavas inicial y final inclusive
fc = np.array([fmin* 2 ** (n * 1 / bwoct) for n in range(nfilt)])
lf = 2. ** (-0.5/bwoct)*fc
sos = np.zeros((nfilt,order,6),dtype=np.float64)
for n, f0 in enumerate(lf):
sos[n], w, h = butter_bandpass(f0,f0*2**(1/bwoct),fs,order,N)
#if show:
# plt.semilogx((fs * 0.5 / np.pi) * w, abs(h))
np.savez_compressed(bankname,sos=sos,fc=fc,fs=fs,order=order)
print('Banco de filtros generado: ' + str(noct) + ' octavas,' + str(bwoct) + ' bandas/octava,' +
'desde ' + str(fmin) + ' Hz,' + 'Almacenada en archivo ' + bankname)
#if show:
# plt.show()
return
def A_weighting(fs=48000):
"""
Diseña filtro A para la frecuencia de sampleo fs
adaptado de https://gist.github.com/endolith/148112
Usage: B, A = A_weighting(fs)
"""
z = [0, 0, 0, 0]
p = [-2*np.pi*20.598997057568145,
-2*np.pi*20.598997057568145,
-2*np.pi*12194.21714799801,
-2*np.pi*12194.21714799801,
-2*np.pi*107.65264864304628,
-2*np.pi*737.8622307362899]
k = 1
# Normalize to 0 dB at 1 kHz for all curves
b, a = signal.zpk2tf(z, p, k)
k /= abs(signal.freqs(b, a, [2*np.pi*1000])[1][0])
z_d, p_d, k_d = signal.bilinear_zpk(z, p, k, fs)
return signal.zpk2sos(z_d, p_d, k_d)
def apply_bands(data, bankname='fbank_10_1', fs=48000, norma=True):
"""
Aplica el banco de filtros almacenado en bankname a la senal data
por defecto normaliza las senales filtradas, sino hacer norma=false
"""
try:
fbank = np.load(bankname + '.npz')
except:
make_filterbank(bankname=bankname)
fbank = np.load(bankname + '.npz')
data = data - np.mean(data)
nsamples = len(data)
nbands, order, dd = fbank['sos'].shape
data_filt = np.empty((nsamples,nbands))
for n in range(nbands):
temp = signal.sosfiltfilt(fbank['sos'][n], data)
if (norma):
temp = temp/np.amax(np.abs(temp))
data_filt[:,n] = temp
# agregar fadeinfadeout
return data_filt
def spectrum(data_input, fs=48000):
"""
Computes the spectral power density (in dB) of signal data
Can be usede to obtain the transfer functio from the impulse response
Rturns a dictionary sp with keys
sp['f'] center frequencies
sp['s'] power spectral density in dB
sp['amplitude'] amplitude of the FFT
sp['phase] phase of the FFT for signal reconstruction
"""
if type(data_input) is str:
fs, data = wavfile.read(data_input + '.wav')
elif type(data_input) is np.ndarray:
data = data_input
else:
raise TypeError('Primer argumento debe ser el array devuelto por extractir o un nombre de archivo')
if data.ndim == 1:
data = data[:,np.newaxis] # el array debe ser 2D
nsamples, nchan = np.shape(data)
nf = int(np.ceil((nsamples+1)/2))
freq = fftfreq(nsamples, d=1/fs)
listofkeys = ['nchan','f','s','amplitude','phase']
sp = dict.fromkeys(listofkeys,0 )
sp['nchan'] = nchan
sp['f'] = np.abs(freq[:nf])
sp['s'] = np.zeros((nchan,nf))
sp['amplitude'] = np.zeros((nchan,nf))
sp['phase'] = np.zeros((nchan,nf))
for n in np.arange(nchan):
s = rfft(data[:,n])
sp['amplitude'][n] = np.abs(s)
sp['phase'][n] = np.angle(s)
sp['s'][n] = 20*np.log10(sp['amplitude'][n])
return sp
def spectrogram(data, **kwargs):
"""
Computes the spectrogram and the analytic envelope of the signal
"""
#force to power of next fast FFT length
windowSize = next_fast_len(kwargs['windowSize'])
overlap = kwargs['overlap']
if data.ndim == 1:
data = data[:,np.newaxis] # el array debe ser 2D
nsamples, nchan = np.shape(data)
nt = int(np.floor((nsamples-windowSize)/(windowSize-overlap)))+1
nenv = next_fast_len(nsamples)
# Dict for spectrogram
listofkeys = ['nchan','nsamples','f','t','s','env','nt','nf','df','window','overlap']
spec = dict.fromkeys(listofkeys,0 )
spec['nchan'] = nchan
spec['nf'] = windowSize//2+1
spec['s'] = np.zeros((nchan,spec['nf'],nt))
spec['env'] = np.zeros((nchan,nenv))
spec['window'] = windowSize
spec['overlap'] = overlap
spec['nt'] = nt
spec['nsamples']=nsamples
for n in np.arange(nchan):
env = np.abs(signal.hilbert(data[:,n],nenv))
f,t,spectro = signal.spectrogram(data[:,n], kwargs['fs'], window=kwargs['windowType'], nperseg=windowSize, noverlap=overlap)
spec['t'] = t
spec['df'] = f[1]
spec['env'][n] = env
if kwargs['logf']:
lf = np.power(2,np.linspace(np.log2(f[1]),np.log2(f[-1]),spec['nf']))
fint = interp1d(f,spectro.T,fill_value="extrapolate")
spec['f'] = lf
spec['s'][n] = fint(lf).T
else:
spec['f'] = f
spec['s'][n] = spectro
if kwargs['normalized']:
spec['s'][n] = spec['s'][n]/np.max(spec['s'][n])
spec['env'][n] = spec['env'][n]/np.max(spec['env'][n])
return spec
def hipass_filter(data, **kwargs):
nyq = 0.5 * kwargs['fs']
low = kwargs['lowcut'] / nyq
sos = signal.butter(kwargs['order'], low, btype='highpass', output='sos')
return signal.sosfiltfilt(sos, data, axis=0)
# agregar una funcion para detectar clipeo
| 35.439628 | 132 | 0.599284 |
79599c7eb79335c5fab84c682dfa9ed084dee572 | 12,837 | py | Python | src/view/MainDialog.py | slavi010/random_dice_bot | 68742314dddcc06b03f961b7da66a6cd65e01c2e | [
"MIT"
] | 1 | 2020-05-28T20:31:36.000Z | 2020-05-28T20:31:36.000Z | src/view/MainDialog.py | slavi010/random_dice_bot | 68742314dddcc06b03f961b7da66a6cd65e01c2e | [
"MIT"
] | 8 | 2020-05-28T14:15:00.000Z | 2022-01-13T02:47:35.000Z | src/view/MainDialog.py | slavi010/random_dice_bot | 68742314dddcc06b03f961b7da66a6cd65e01c2e | [
"MIT"
] | 1 | 2021-10-13T21:47:00.000Z | 2021-10-13T21:47:00.000Z | #################################################################################
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR #
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, #
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE #
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER #
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, #
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE #
# SOFTWARE. #
#################################################################################
#
# Contributors :
# Copyright (c) 2020 slavi010 pro@slavi.dev
#
import tkinter as tk
from abc import abstractmethod, ABC
from typing import Optional, Dict
import cv2
import numpy as np
from PIL import ImageTk, Image
from src.model.DiceEnum import DiceColorEnum
from src.model.FeaturePlateau import merge_dice_feature, buy_upgrade_feature, buy_dice_feature, auto_ad_feature
from src.model.Plateau import Plateau
from src.view.Deck import Deck
from src.view.DialogConfFeature import DiceMergeFeatureConfDialog, BuyUpgradeFeatureConfDialog
class MainDialog:
"""The main Dialog, with this dialog, you can change the deck and the bot actions"""
def __init__(self, root):
self.root = root
# show this dialog
self.show = False
self.root.deiconify()
# Frame
self.frm_deck = tk.Frame(self.root, width=300, height=60, pady=3)
self.frm_feature = tk.Frame(self.root, width=300, height=60, pady=3)
self.frm_action = tk.Frame(self.root, width=300, height=60, pady=3)
# Frame grid
self.frm_deck.grid(row=0, sticky="ew")
self.frm_feature.grid(row=1, sticky="ew")
self.frm_action.grid(row=2, sticky="ew")
# frm_deck widgets
self.deck = Deck([DiceColorEnum.JOKER,
DiceColorEnum.GROWTH,
DiceColorEnum.MIMIC,
DiceColorEnum.METASTASIS,
DiceColorEnum.COMBO])
self.dices_canvas = []
for i in range(5):
canvas = tk.Canvas(self.frm_deck, width=50, height=50)
self.dices_canvas.append(canvas)
self.dices_canvas[0].bind("<Button-1>", lambda event: self.change_dice_dialog(0))
self.dices_canvas[1].bind("<Button-1>", lambda event: self.change_dice_dialog(1))
self.dices_canvas[2].bind("<Button-1>", lambda event: self.change_dice_dialog(2))
self.dices_canvas[3].bind("<Button-1>", lambda event: self.change_dice_dialog(3))
self.dices_canvas[4].bind("<Button-1>", lambda event: self.change_dice_dialog(4))
# frm_deck layout widgets
for idx, dice_canvas in enumerate(self.dices_canvas):
dice_canvas.grid(row=0, column=idx)
# frm_feature widgets
self.sub_feature_frms = []
# frm_feature widgets layouts
# self.update_frm_feature()
# frm_action widgets
self.btn_add_merge_dice_feature = tk.Button(self.frm_action, text="Add merge dice feature")
self.btn_add_buy_shop_feature = tk.Button(self.frm_action, text="Add buy shop feature")
self.btn_add_buy_dice_feature = tk.Button(self.frm_action, text="Add buy dice feature")
self.btn_add_auto_ad_feature = tk.Button(self.frm_action, text="Auto ad feature")
self.btn_start = tk.Button(self.frm_action, text="Start bot")
self.btn_add_merge_dice_feature['command'] = \
lambda: self.add_feature(MergeDiceFeatureView(self.frm_feature, deck=self.deck))
self.btn_add_buy_shop_feature['command'] = \
lambda: self.add_feature(BuyUpgradeFeatureView(self.frm_feature, deck=self.deck))
self.btn_add_buy_dice_feature['command'] = \
lambda: self.add_feature(BuyDiceFeatureView(self.frm_feature))
self.btn_add_auto_ad_feature['command'] = \
lambda: self.add_feature(AutoAdFeatureView(self.frm_feature))
self.btn_start['command'] = \
lambda: self.set_show(False)
# frm_action widgets layouts
self.btn_add_merge_dice_feature.grid(row=0, column=0)
self.btn_add_buy_shop_feature.grid(row=1, column=0)
self.btn_add_buy_dice_feature.grid(row=2, column=0)
self.btn_start.grid(row=4, column=0)
# load all dices images
self.all_dice_images = {}
for name, member in DiceColorEnum.__members__.items():
image = Image.open("../image/dice/%s.png" % name)
image = image.resize((50, 50), Image.ANTIALIAS)
self.all_dice_images[member] = ImageTk.PhotoImage(image)
def update_image_dices(self):
"""
Update images in the ui with the current dice deck
"""
for idx, dice_canvas in enumerate(self.dices_canvas):
dice_canvas.delete("all")
dice_canvas.create_image(0, 0, anchor="nw",
image=self.all_dice_images[self.deck.dices[idx]])
self.deck.notify()
def change_dice_dialog(self, index_dice_deck):
"""
Open the SelectDiceDialog for change a dice
"""
new = tk.Toplevel(self.root)
new.wait_visibility()
new.grab_set()
SelectDiceDialog(new, self, index_dice_deck)
def update_frm_feature(self):
for idx, sub_feature_frm in enumerate(self.sub_feature_frms):
sub_feature_frm.get_frm().grid_forget()
sub_feature_frm.get_frm().grid(row=idx, column=0)
def add_feature(self, feature_view):
self.sub_feature_frms.append(feature_view)
self.update_frm_feature()
self.update_image_dices()
def set_show(self, value: bool):
self.show = value
def show_dialog(self):
"""Show this dialog"""
self.root.deiconify()
self.show = True
while self.show:
self.root.update_idletasks()
self.root.update()
self.root.withdraw()
return self
class SelectDiceDialog:
"""Select one dice of all dice"""
def __init__(self, root, main_dialog: MainDialog, index_dice_deck):
self.root = root
# self.root.protocol("WM_DELETE_WINDOW", self.on_closing)
self.main_dialog = main_dialog
self.index_dice_deck = index_dice_deck
# widgets
self.lst_dices = self.lst_dices = tk.Listbox(self.root, width=20, height=10)
for name, member in DiceColorEnum.__members__.items():
self.lst_dices.insert(tk.END, name)
self.btn_selectionner = tk.Button(self.root, text="Selectionnner", command=lambda: self.callback_selectionner())
# layout widgets
self.lst_dices.pack()
self.btn_selectionner.pack()
def callback_selectionner(self):
for index in self.lst_dices.curselection():
type_dice = DiceColorEnum[self.lst_dices.get(index)]
flag_dice_already_in_deck = False
idx_dice = 0
for idx, dice in enumerate(self.main_dialog.deck.dices):
if dice == type_dice:
flag_dice_already_in_deck = True
idx_dice = idx
break
if flag_dice_already_in_deck:
self.main_dialog.deck.dices[idx_dice] = self.main_dialog.deck.dices[self.index_dice_deck]
self.main_dialog.deck.dices[self.index_dice_deck] = type_dice
else:
self.main_dialog.deck.dices[self.index_dice_deck] = type_dice
self.main_dialog.update_image_dices()
self.root.destroy()
class AbstractFeatureView:
def __init__(self, root):
self.root = root
# Frame
self.frm = tk.Frame(self.root, width=2, height=1)
self.frm_lbl = tk.Frame(self.frm, width=2, height=1)
self.frm_option = tk.Frame(self.frm, width=2, height=1)
# Frame grid
self.frm_lbl.pack()
self.frm_option.pack()
# frm widgets
self.lbl_name_feature = tk.Label(self.frm_lbl, text="default", anchor="w", font='Helvetica 12 bold')
self.lbl_name_custom = tk.Label(self.frm_lbl, anchor="w")
# frm widgets layout
self.lbl_name_feature.grid(row=0, column=0)
self.lbl_name_custom.grid(row=0, column=1)
# parameters
self.parameters = None
@abstractmethod
def get_frm(self):
pass
@abstractmethod
def get_callback_feature(self):
pass
class MergeDiceFeatureView(AbstractFeatureView):
"""Merge dice"""
def __init__(self, root, deck: Deck, parameters: Optional[Dict] = None):
super().__init__(root)
self.deck = deck
# change name label
self.lbl_name_feature['text'] = "Merge dice"
# widgets
# dices
self.lbx_dices_value = tk.StringVar()
# config
self.btn_config = tk.Button(self.frm_option, width=8, text="Config")
self.btn_config['command'] = self.callback_config
self.btn_config.pack()
# default parameters
if parameters is not None:
self.parameters = parameters
else:
self.parameters = {
"name": "",
"lst_from": [],
"lst_to": [],
"min_dices_board": 2,
"max_dices_board": 15,
"min_dots": 1,
"max_dots": 7,
"min_dices_from": 1,
"min_dices_to": 1,
"merge_priority": 1,
}
self.lbl_name_custom['text'] = self.parameters.get('name')
def callback_config(self):
self.parameters = DiceMergeFeatureConfDialog(self.deck, True, self.parameters).returning
self.lbl_name_custom['text'] = self.parameters.get('name')
def get_frm(self):
return self.frm
def get_callback_feature(self):
return lambda plateau: merge_dice_feature(
plateau,
self.parameters.get('lst_from'),
self.parameters.get('lst_to'),
self.parameters.get('min_dices_board'),
self.parameters.get('max_dices_board'),
self.parameters.get('min_dots'),
self.parameters.get('max_dots'),
self.parameters.get('min_dices_from'),
self.parameters.get('min_dices_to'),
self.parameters.get('merge_priority') == 1,
)
class BuyUpgradeFeatureView(AbstractFeatureView):
"""Buy upgrade"""
def __init__(self, root, deck: Deck, parameters: Optional[Dict] = None):
super().__init__(root)
self.deck = deck
# change name label
self.lbl_name_feature['text'] = "Buy upgrade"
# config
self.btn_config = tk.Button(self.frm_option, width=8, text="Config")
self.btn_config['command'] = self.callback_config
self.btn_config.pack()
# default parameters
if parameters is not None:
self.parameters = parameters
else:
self.parameters = {
"name": "",
"lst_index_dice": [],
"proba_buy_upgrade": 0.05,
"min_dices_board": 8,
}
self.lbl_name_custom['text'] = self.parameters.get('name')
def callback_config(self):
self.parameters = BuyUpgradeFeatureConfDialog(self.deck, True, self.parameters).returning
self.lbl_name_custom['text'] = self.parameters.get('name')
def get_frm(self):
return self.frm
def get_callback_feature(self):
return lambda plateau: buy_upgrade_feature(
plateau,
self.parameters.get('proba_buy_upgrade'),
self.parameters.get('lst_index_dice'),
self.parameters.get('min_dices_board'),
)
class BuyDiceFeatureView(AbstractFeatureView):
"""Buy dice"""
def __init__(self, root):
super().__init__(root)
# change name label
self.lbl_name_feature['text'] = "Buy dice"
def get_frm(self):
return self.frm
def get_callback_feature(self):
return lambda plateau: buy_dice_feature(plateau)
class AutoAdFeatureView(AbstractFeatureView):
"""Auto ad"""
def __init__(self, root):
super().__init__(root)
# change name label
self.lbl_name_feature['text'] = "Auto ad"
def get_frm(self):
return self.frm
def get_callback_feature(self):
return lambda plateau: auto_ad_feature(plateau)
# root = tk.Tk()
# ahk = AHK()
# # plateau = Plateau(ahk)
#
# main_dialog = MainDialog(root)
# main_dialog.update_image_dices()
# main_dialog.update_frm_feature()
#
# root.mainloop()
| 34.883152 | 120 | 0.616188 |
79599daef5acd8d5a2d506647961f126f2d07395 | 36,134 | py | Python | backend/api/tests/test_team.py | Savage-Aim/app | ecb3b7635caba552ded17172c0aa3535b5f3b98b | [
"MIT"
] | null | null | null | backend/api/tests/test_team.py | Savage-Aim/app | ecb3b7635caba552ded17172c0aa3535b5f3b98b | [
"MIT"
] | 1 | 2022-02-07T02:58:14.000Z | 2022-02-07T02:58:14.000Z | backend/api/tests/test_team.py | Savage-Aim/app | ecb3b7635caba552ded17172c0aa3535b5f3b98b | [
"MIT"
] | null | null | null | # stdlib
from io import StringIO
# lib
from django.core.management import call_command
from django.urls import reverse
from rest_framework import status
# local
from api.models import BISList, Character, Gear, Notification, Job, Team, TeamMember, Tier
from api.serializers import TeamSerializer
from .test_base import SavageAimTestCase
class TeamCollection(SavageAimTestCase):
"""
Test the list and create methods
"""
def setUp(self):
"""
Run the seed commands, and create some necessary data
"""
call_command('job_seed', stdout=StringIO())
call_command('gear_seed', stdout=StringIO())
call_command('tier_seed', stdout=StringIO())
self.char = Character.objects.create(
avatar_url='https://img.savageaim.com/abcde',
lodestone_id=1234567890,
user=self._get_user(),
name='Char 1',
world='Lich',
)
g = Gear.objects.first()
self.bis = BISList.objects.create(
bis_body=g,
bis_bracelet=g,
bis_earrings=g,
bis_feet=g,
bis_hands=g,
bis_head=g,
bis_left_ring=g,
bis_legs=g,
bis_mainhand=g,
bis_necklace=g,
bis_offhand=g,
bis_right_ring=g,
current_body=g,
current_bracelet=g,
current_earrings=g,
current_feet=g,
current_hands=g,
current_head=g,
current_left_ring=g,
current_legs=g,
current_mainhand=g,
current_necklace=g,
current_offhand=g,
current_right_ring=g,
job=Job.objects.first(),
owner=self.char,
)
def tearDown(self):
"""
Clean up the DB after each test
"""
TeamMember.objects.all().delete()
Team.objects.all().delete()
BISList.objects.all().delete()
Character.objects.all().delete()
def test_list(self):
"""
Create a couple of characters for a user and send a list request for them
ensure the data is returned as expected
"""
url = reverse('api:team_collection')
user = self._get_user()
self.client.force_authenticate(user)
# Create two teams, make the character a member of both
team1 = Team.objects.create(
invite_code=Team.generate_invite_code(),
name='Test Team 1',
tier=Tier.objects.first(),
)
team2 = Team.objects.create(
invite_code=Team.generate_invite_code(),
name='Test Team 2',
tier=Tier.objects.first(),
)
# Test against data leakage
Team.objects.create(
invite_code=Team.generate_invite_code(),
name='Test Team 3',
tier=Tier.objects.first(),
)
TeamMember.objects.create(team=team1, character=self.char, bis_list=self.bis, lead=True)
TeamMember.objects.create(team=team2, character=self.char, bis_list=self.bis, lead=False)
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)
content = response.json()
self.assertEqual(len(content), 2)
self.assertDictEqual(content[0], TeamSerializer(team1).data)
self.assertDictEqual(content[1], TeamSerializer(team2).data)
def test_list_filters(self):
"""
Test the same as above but also ensure that the filtering works
"""
base_url = reverse('api:team_collection')
user = self._get_user()
self.client.force_authenticate(user)
# Create two teams, make the character a member of both
team1 = Team.objects.create(
invite_code=Team.generate_invite_code(),
name='Test Team 1',
tier=Tier.objects.first(),
)
team2 = Team.objects.create(
invite_code=Team.generate_invite_code(),
name='Test Team 2',
tier=Tier.objects.first(),
)
TeamMember.objects.create(team=team1, character=self.char, bis_list=self.bis, lead=True)
TeamMember.objects.create(team=team2, character=self.char, bis_list=self.bis, lead=False)
# Test with character's id
response = self.client.get(f'{base_url}?char_id={self.char.id}')
self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)
content = response.json()
self.assertEqual(len(content), 2)
# Test with different ID
response = self.client.get(f'{base_url}?char_id=999')
self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)
content = response.json()
self.assertEqual(len(content), 0)
# Test with letters and ensure full response is returned
response = self.client.get(f'{base_url}?char_id=abcde')
self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)
content = response.json()
self.assertEqual(len(content), 2)
def test_create(self):
"""
Create a new Team in the database
Ensure that the record is created, and the returned token equals the one in the database
"""
url = reverse('api:team_collection')
self.client.force_authenticate(self._get_user())
self.char.verified = True
self.char.save()
data = {
'name': 'Test',
'tier_id': Tier.objects.first().pk,
'bis_list_id': self.bis.id,
'character_id': self.char.id,
}
response = self.client.post(url, data)
self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.content)
self.assertEqual(Team.objects.count(), 1)
obj = Team.objects.first()
self.assertEqual(response.json()['id'], str(obj.pk))
def test_create_400(self):
"""
Attempt to test creation of a Team with invalid data and ensure appropriate responses are returned
Character ID Not Sent: 'This field is required.'
Character ID Not Int: 'A valid integer is required.'
Character ID Invalid: 'Please select a valid, verified Character that you own.'
BIS List ID Not Sent: 'This field is required.'
BIS List ID Not Int: 'A valid integer is required.'
BIS List doesn't belong to any of your characters: 'Please select a valid BISList belonging to your Character.'
BIS List doesn't belong to sent character: 'Please select a valid BISList belonging to your Character.'
Name not sent: 'This field is required.'
Name Too Long: 'Ensure this field has no more than 64 characters.'
Tier ID Not Sent: 'This field is required.'
Tier ID Not Int: 'A valid integer is required.'
Tier ID Invalid: 'Please select a valid Tier.'
"""
url = reverse('api:team_collection')
self.client.force_authenticate(self._get_user())
response = self.client.post(url)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, response.content)
content = response.json()
self.assertEqual(content['name'], ['This field is required.'])
self.assertEqual(content['tier_id'], ['This field is required.'])
self.assertEqual(content['bis_list_id'], ['This field is required.'])
self.assertEqual(content['character_id'], ['This field is required.'])
data = {
'character_id': 'abcde',
'bis_list_id': 'abcde',
'name': 'abcde' * 100,
'tier_id': 'abcde',
}
response = self.client.post(url, data)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, response.content)
content = response.json()
self.assertEqual(content['name'], ['Ensure this field has no more than 64 characters.'])
self.assertEqual(content['tier_id'], ['A valid integer is required.'])
self.assertEqual(content['bis_list_id'], ['A valid integer is required.'])
self.assertEqual(content['character_id'], ['A valid integer is required.'])
data = {
'character_id': '90',
'bis_list_id': '90',
'name': 'abcde',
'tier_id': '90',
}
response = self.client.post(url, data)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, response.content)
content = response.json()
self.assertEqual(content['bis_list_id'], ['Please select a valid BISList belonging to your Character.'])
self.assertEqual(content['character_id'], ['Please select a valid, verified Character that you own.'])
self.assertEqual(content['tier_id'], ['Please select a valid Tier.'])
# Test with valid unverified char, and someone elses' bis list
char = Character.objects.create(
avatar_url='https://img.savageaim.com/abcde',
lodestone_id=1348724213,
user=self._create_user(),
name='Char 2',
world='Lich',
)
bis = BISList.objects.create(
bis_body=Gear.objects.first(),
bis_bracelet=Gear.objects.first(),
bis_earrings=Gear.objects.first(),
bis_feet=Gear.objects.first(),
bis_hands=Gear.objects.first(),
bis_head=Gear.objects.first(),
bis_left_ring=Gear.objects.first(),
bis_legs=Gear.objects.first(),
bis_mainhand=Gear.objects.first(),
bis_necklace=Gear.objects.first(),
bis_offhand=Gear.objects.first(),
bis_right_ring=Gear.objects.first(),
current_body=Gear.objects.last(),
current_bracelet=Gear.objects.last(),
current_earrings=Gear.objects.last(),
current_feet=Gear.objects.last(),
current_hands=Gear.objects.last(),
current_head=Gear.objects.last(),
current_left_ring=Gear.objects.last(),
current_legs=Gear.objects.last(),
current_mainhand=Gear.objects.last(),
current_necklace=Gear.objects.last(),
current_offhand=Gear.objects.last(),
current_right_ring=Gear.objects.last(),
job=Job.objects.get(pk='DRG'),
owner=char,
)
data = {
'character_id': self.char.id,
'bis_list_id': bis.id,
'name': 'Test',
'tier_id': Tier.objects.first().pk,
}
response = self.client.post(url, data)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, response.content)
content = response.json()
self.assertEqual(content['character_id'], ['Please select a valid, verified Character that you own.'])
self.assertEqual(content['bis_list_id'], ['Please select a valid BISList belonging to your Character.'])
# Lastly check the top level validate error
char.user = self._get_user()
char.save()
self.char.verified = True
self.char.save()
response = self.client.post(url, data)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, response.content)
content = response.json()
self.assertEqual(content['bis_list_id'], ['Please select a valid BISList belonging to your Character.'])
class TeamResource(SavageAimTestCase):
"""
Test the read and update methods
"""
def setUp(self):
"""
Run the seed commands, and create some necessary data
"""
call_command('job_seed', stdout=StringIO())
call_command('gear_seed', stdout=StringIO())
call_command('tier_seed', stdout=StringIO())
self.char = Character.objects.create(
avatar_url='https://img.savageaim.com/abcde',
lodestone_id=1234567890,
user=self._get_user(),
name='Char 1',
world='Lich',
)
g = Gear.objects.first()
self.bis = BISList.objects.create(
bis_body=g,
bis_bracelet=g,
bis_earrings=g,
bis_feet=g,
bis_hands=g,
bis_head=g,
bis_left_ring=g,
bis_legs=g,
bis_mainhand=g,
bis_necklace=g,
bis_offhand=g,
bis_right_ring=g,
current_body=g,
current_bracelet=g,
current_earrings=g,
current_feet=g,
current_hands=g,
current_head=g,
current_left_ring=g,
current_legs=g,
current_mainhand=g,
current_necklace=g,
current_offhand=g,
current_right_ring=g,
job=Job.objects.first(),
owner=self.char,
)
self.team = Team.objects.create(
invite_code=Team.generate_invite_code(),
name='Test Team 1',
tier=Tier.objects.first(),
)
self.tm = TeamMember.objects.create(team=self.team, character=self.char, bis_list=self.bis, lead=True)
def tearDown(self):
"""
Clean up the DB after each test
"""
Notification.objects.all().delete()
TeamMember.objects.all().delete()
Team.objects.all().delete()
BISList.objects.all().delete()
Character.objects.all().delete()
def test_read(self):
"""
Read the Team object as a user whose character is in the team
"""
user = self._get_user()
self.client.force_authenticate(user)
url = reverse('api:team_resource', kwargs={'pk': self.team.id})
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)
content = response.json()
self.assertDictEqual(content, TeamSerializer(self.team).data)
self.assertIn('members', content)
self.assertEqual(len(content['members']), 1)
def test_regenerate_token(self):
"""
Send a PATCH request to the endpoint, and ensure the team's invite code has changed
"""
user = self._get_user()
self.client.force_authenticate(user)
url = reverse('api:team_resource', kwargs={'pk': self.team.id})
response = self.client.patch(url)
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT, response.content)
with self.assertRaises(Team.DoesNotExist):
Team.objects.get(invite_code=self.team.invite_code)
def test_update(self):
"""
Update the Team fully and ensure the data in the DB has been updated
"""
user = self._get_user()
self.client.force_authenticate(user)
url = reverse('api:team_resource', kwargs={'pk': self.team.id})
# Create required objects
new_tier = Tier.objects.create(name='Memes', max_item_level=900, raid_gear_name='The End')
char = Character.objects.create(
avatar_url='https://img.savageaim.com/abcde',
lodestone_id=1348724213,
user=self._create_user(),
name='Char 2',
world='Lich',
)
bis = BISList.objects.create(
bis_body=Gear.objects.first(),
bis_bracelet=Gear.objects.first(),
bis_earrings=Gear.objects.first(),
bis_feet=Gear.objects.first(),
bis_hands=Gear.objects.first(),
bis_head=Gear.objects.first(),
bis_left_ring=Gear.objects.first(),
bis_legs=Gear.objects.first(),
bis_mainhand=Gear.objects.first(),
bis_necklace=Gear.objects.first(),
bis_offhand=Gear.objects.first(),
bis_right_ring=Gear.objects.first(),
current_body=Gear.objects.last(),
current_bracelet=Gear.objects.last(),
current_earrings=Gear.objects.last(),
current_feet=Gear.objects.last(),
current_hands=Gear.objects.last(),
current_head=Gear.objects.last(),
current_left_ring=Gear.objects.last(),
current_legs=Gear.objects.last(),
current_mainhand=Gear.objects.last(),
current_necklace=Gear.objects.last(),
current_offhand=Gear.objects.last(),
current_right_ring=Gear.objects.last(),
job=Job.objects.get(pk='DRG'),
owner=char,
)
tm = TeamMember.objects.create(team=self.team, character=char, bis_list=bis, lead=False)
# Send the request
data = {
'name': 'Updated Team Name',
'tier_id': new_tier.id,
'team_lead': char.id,
}
response = self.client.put(url, data)
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT, response.content)
old_name = self.team.name
# Ensure the Team has been updated
self.team.refresh_from_db()
self.assertEqual(self.team.name, data['name'])
self.assertEqual(self.team.tier.id, new_tier.id)
tm.refresh_from_db()
self.assertTrue(tm.lead)
self.tm.refresh_from_db()
self.assertFalse(self.tm.lead)
# Ensure the new character got a notification
self.assertEqual(Notification.objects.filter(user=char.user).count(), 2)
notifs = Notification.objects.filter(user=char.user)
note_map = {
'team_lead': f'{char} has been made the Team Leader of {self.team.name}!',
'team_rename': f'{old_name} has been renamed to {self.team.name}!',
}
for notif in notifs:
self.assertEqual(notif.link, f'/team/{self.team.id}/')
self.assertEqual(notif.text, note_map[notif.type])
self.assertFalse(notif.read)
def test_update_400(self):
"""
Send invalid update requests and ensure the right errors are returned from each request
Name Not Sent: 'This field is required.'
Name Too Long: 'Ensure this field has no more than 64 characters.'
Tier ID Not Sent: 'This field is required.'
Tier ID Not Int: 'A valid integer is required.'
Tier ID Invalid: 'Please select a valid Tier.'
Team Lead Not Sent: 'This field is required.'
Team Lead Not Int: 'A valid integer is required.'
Team Lead Invalid: 'Please select a member of the Team to be the new team lead.'
"""
user = self._get_user()
self.client.force_authenticate(user)
url = reverse('api:team_resource', kwargs={'pk': self.team.id})
response = self.client.put(url)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, response.content)
content = response.json()
self.assertEqual(content['name'], ['This field is required.'])
self.assertEqual(content['tier_id'], ['This field is required.'])
self.assertEqual(content['team_lead'], ['This field is required.'])
data = {
'name': 'abcde' * 100,
'tier_id': 'abcde',
'team_lead': 'abcde',
}
response = self.client.put(url, data)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, response.content)
content = response.json()
self.assertEqual(content['name'], ['Ensure this field has no more than 64 characters.'])
self.assertEqual(content['tier_id'], ['A valid integer is required.'])
self.assertEqual(content['team_lead'], ['A valid integer is required.'])
data = {
'name': 'Hi c:',
'tier_id': 123,
'team_lead': 123,
}
response = self.client.put(url, data)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, response.content)
content = response.json()
self.assertEqual(content['tier_id'], ['Please select a valid Tier.'])
self.assertEqual(content['team_lead'], ['Please select a member of the Team to be the new team lead.'])
# Run the team lead test again with a valid character id that isn't on the team
char = Character.objects.create(
avatar_url='https://img.savageaim.com/abcde',
lodestone_id=1348724213,
user=self._create_user(),
name='Char 2',
world='Lich',
)
data = {
'name': 'Hi c:',
'tier_id': Tier.objects.first().pk,
'team_lead': char.id,
}
response = self.client.put(url, data)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, response.content)
content = response.json()
self.assertEqual(content['team_lead'], ['Please select a member of the Team to be the new team lead.'])
def test_delete(self):
"""
Send a request to disband a Team.
Ensure that other Members of the Team receive a disband notification as well
"""
user = self._get_user()
self.client.force_authenticate(user)
url = reverse('api:team_resource', kwargs={'pk': self.team.id})
other_char = Character.objects.create(
avatar_url='https://img.savageaim.com/abcde',
lodestone_id=987654321,
user=self._create_user(),
name='Char 2',
world='Lich',
)
g = Gear.objects.first()
other_bis = BISList.objects.create(
bis_body=g,
bis_bracelet=g,
bis_earrings=g,
bis_feet=g,
bis_hands=g,
bis_head=g,
bis_left_ring=g,
bis_legs=g,
bis_mainhand=g,
bis_necklace=g,
bis_offhand=g,
bis_right_ring=g,
current_body=g,
current_bracelet=g,
current_earrings=g,
current_feet=g,
current_hands=g,
current_head=g,
current_left_ring=g,
current_legs=g,
current_mainhand=g,
current_necklace=g,
current_offhand=g,
current_right_ring=g,
job=Job.objects.last(),
owner=other_char,
)
self.team.members.create(character=other_char, bis_list=other_bis, lead=False)
response = self.client.delete(url)
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT, response.content)
with self.assertRaises(Team.DoesNotExist):
Team.objects.get(pk=self.team.pk)
self.assertEqual(Notification.objects.filter(user=user).count(), 0)
user = other_char.user
self.assertEqual(Notification.objects.filter(user=user).count(), 1)
notif = Notification.objects.filter(user=user).first()
self.assertEqual(notif.text, f'"{self.team.name}" has been disbanded!')
def test_404(self):
"""
Test the cases that cause a 404 to be returned;
- ID doesn't exist
- Read request from someone who doesn't have a character in the Team
- Update request from someone who doesn't have a character in the Team
- Update request from someone that isn't the team lead
"""
user = self._get_user()
self.client.force_authenticate(user)
# ID doesn't exist
url = reverse('api:team_resource', kwargs={'pk': 'abcde-abcde-abcde-abcde'})
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND, response.content)
response = self.client.put(url)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND, response.content)
response = self.client.patch(url)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND, response.content)
response = self.client.delete(url)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND, response.content)
url = reverse('api:team_resource', kwargs={'pk': self.team.id})
# Check update as non-team lead
self.tm.lead = False
self.tm.save()
response = self.client.put(url)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND, response.content)
response = self.client.patch(url)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND, response.content)
response = self.client.delete(url)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND, response.content)
# Delete membership altogether and test both read and update
self.tm.delete()
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND, response.content)
response = self.client.put(url)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND, response.content)
response = self.client.patch(url)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND, response.content)
response = self.client.delete(url)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND, response.content)
class TeamInvite(SavageAimTestCase):
"""
Test the Team Invite views work as we want them to
"""
def setUp(self):
"""
Run the seed commands, and create some necessary data
"""
call_command('job_seed', stdout=StringIO())
call_command('gear_seed', stdout=StringIO())
call_command('tier_seed', stdout=StringIO())
self.char = Character.objects.create(
avatar_url='https://img.savageaim.com/abcde',
lodestone_id=1234567890,
user=self._get_user(),
name='Char 1',
world='Lich',
)
g = Gear.objects.first()
self.bis = BISList.objects.create(
bis_body=g,
bis_bracelet=g,
bis_earrings=g,
bis_feet=g,
bis_hands=g,
bis_head=g,
bis_left_ring=g,
bis_legs=g,
bis_mainhand=g,
bis_necklace=g,
bis_offhand=g,
bis_right_ring=g,
current_body=g,
current_bracelet=g,
current_earrings=g,
current_feet=g,
current_hands=g,
current_head=g,
current_left_ring=g,
current_legs=g,
current_mainhand=g,
current_necklace=g,
current_offhand=g,
current_right_ring=g,
job=Job.objects.first(),
owner=self.char,
)
self.team = Team.objects.create(
invite_code=Team.generate_invite_code(),
name='Test Team 1',
tier=Tier.objects.first(),
)
def tearDown(self):
"""
Clean up the DB after each test
"""
Notification.objects.all().delete()
TeamMember.objects.all().delete()
Team.objects.all().delete()
BISList.objects.all().delete()
Character.objects.all().delete()
def test_head(self):
"""
Ensure the head method returns status 200 for a valid invite code, without having to be a Member
"""
user = self._get_user()
self.client.force_authenticate(user)
url = reverse('api:team_invite', kwargs={'invite_code': self.team.invite_code})
response = self.client.head(url)
self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)
def test_read(self):
"""
Ensure that anyone can read a team via its invite code
"""
user = self._get_user()
self.client.force_authenticate(user)
url = reverse('api:team_invite', kwargs={'invite_code': self.team.invite_code})
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)
self.assertDictEqual(response.json(), TeamSerializer(self.team).data)
def test_join(self):
"""
Attempt to join a Team using a character and bis list
"""
user = self._create_user()
self.client.force_authenticate(user)
url = reverse('api:team_invite', kwargs={'invite_code': self.team.invite_code})
# Link the self.char to the team for notification checking
TeamMember.objects.create(team=self.team, character=self.char, bis_list=self.bis, lead=True)
# Create new details
char = Character.objects.create(
avatar_url='https://img.savageaim.com/abcde',
lodestone_id=1234567890,
user=user,
name='Char 1',
world='Lich',
verified=True,
)
g = Gear.objects.first()
bis = BISList.objects.create(
bis_body=g,
bis_bracelet=g,
bis_earrings=g,
bis_feet=g,
bis_hands=g,
bis_head=g,
bis_left_ring=g,
bis_legs=g,
bis_mainhand=g,
bis_necklace=g,
bis_offhand=g,
bis_right_ring=g,
current_body=g,
current_bracelet=g,
current_earrings=g,
current_feet=g,
current_hands=g,
current_head=g,
current_left_ring=g,
current_legs=g,
current_mainhand=g,
current_necklace=g,
current_offhand=g,
current_right_ring=g,
job=Job.objects.first(),
owner=char,
)
data = {
'character_id': char.id,
'bis_list_id': bis.id,
}
response = self.client.post(url, data)
self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.content)
self.assertEqual(response.json()['id'], str(self.team.id))
# Check that the self.user has a notification
self.assertEqual(Notification.objects.filter(user=self.char.user).count(), 1)
notif = Notification.objects.filter(user=self.char.user).first()
self.assertEqual(notif.link, f'/team/{self.team.id}/')
self.assertEqual(notif.text, f'{char} has joined {self.team.name}!')
self.assertEqual(notif.type, 'team_join')
self.assertFalse(notif.read)
def test_join_400(self):
"""
Attempt to join a Team using bad values and ensure 400 responses and correct errors are returned
Character ID Sent: 'This field is required.'
Character ID Not Int: 'A valid integer is required.'
Character ID Invalid: 'Please select a valid, verified Character that you own.'
Character already in team: 'This Character is already a member of the Team.'
BIS List ID Not Sent: 'This field is required.'
BIS List ID Not Int: 'A valid integer is required.'
BIS List doesn't belong to any of your characters: 'Please select a valid BISList belonging to your Character.'
BIS List doesn't belong to sent character: 'Please select a valid BISList belonging to your Character.'
"""
user = self._get_user()
self.client.force_authenticate(user)
url = reverse('api:team_invite', kwargs={'invite_code': self.team.invite_code})
response = self.client.post(url)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, response.content)
content = response.json()
self.assertEqual(content['character_id'], ['This field is required.'])
self.assertEqual(content['bis_list_id'], ['This field is required.'])
data = {
'character_id': 'abcde',
'bis_list_id': 'abcde',
}
response = self.client.post(url, data)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, response.content)
content = response.json()
self.assertEqual(content['character_id'], ['A valid integer is required.'])
self.assertEqual(content['bis_list_id'], ['A valid integer is required.'])
data = {
'character_id': '999999999999',
'bis_list_id': '999999999999',
}
response = self.client.post(url, data)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, response.content)
content = response.json()
self.assertEqual(content['character_id'], ['Please select a valid, verified Character that you own.'])
self.assertEqual(content['bis_list_id'], ['Please select a valid BISList belonging to your Character.'])
# Test with valid unverified char, and someone elses' bis list
char = Character.objects.create(
avatar_url='https://img.savageaim.com/abcde',
lodestone_id=1348724213,
user=self._create_user(),
name='Char 2',
world='Lich',
)
bis = BISList.objects.create(
bis_body=Gear.objects.first(),
bis_bracelet=Gear.objects.first(),
bis_earrings=Gear.objects.first(),
bis_feet=Gear.objects.first(),
bis_hands=Gear.objects.first(),
bis_head=Gear.objects.first(),
bis_left_ring=Gear.objects.first(),
bis_legs=Gear.objects.first(),
bis_mainhand=Gear.objects.first(),
bis_necklace=Gear.objects.first(),
bis_offhand=Gear.objects.first(),
bis_right_ring=Gear.objects.first(),
current_body=Gear.objects.last(),
current_bracelet=Gear.objects.last(),
current_earrings=Gear.objects.last(),
current_feet=Gear.objects.last(),
current_hands=Gear.objects.last(),
current_head=Gear.objects.last(),
current_left_ring=Gear.objects.last(),
current_legs=Gear.objects.last(),
current_mainhand=Gear.objects.last(),
current_necklace=Gear.objects.last(),
current_offhand=Gear.objects.last(),
current_right_ring=Gear.objects.last(),
job=Job.objects.get(pk='DRG'),
owner=char,
)
data = {
'character_id': self.char.id,
'bis_list_id': bis.id,
}
response = self.client.post(url, data)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, response.content)
content = response.json()
self.assertEqual(content['character_id'], ['Please select a valid, verified Character that you own.'])
self.assertEqual(content['bis_list_id'], ['Please select a valid BISList belonging to your Character.'])
# Make the character a member of the team
tm = TeamMember.objects.create(team=self.team, character=self.char, bis_list=self.bis)
self.char.verified = True
self.char.save()
data = {
'character_id': self.char.id,
'bis_list_id': bis.id,
}
response = self.client.post(url, data)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, response.content)
content = response.json()
self.assertEqual(content['character_id'], ['This Character is already a member of the Team.'])
# Lastly check the top level validate error
tm.delete()
char.user = self._get_user()
char.save()
response = self.client.post(url, data)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, response.content)
content = response.json()
self.assertEqual(content['bis_list_id'], ['Please select a valid BISList belonging to your Character.'])
def test_404(self):
"""
Test 404 errors for invalid invite urls
"""
user = self._get_user()
self.client.force_authenticate(user)
url = reverse('api:team_invite', kwargs={'invite_code': 'abcde' * 8})
self.assertEqual(self.client.head(url).status_code, status.HTTP_404_NOT_FOUND)
self.assertEqual(self.client.get(url).status_code, status.HTTP_404_NOT_FOUND)
self.assertEqual(self.client.post(url).status_code, status.HTTP_404_NOT_FOUND)
| 40.059867 | 119 | 0.608208 |
79599dc37b7e442bba189c1cd4fc1b2e6d05454f | 96 | py | Python | venv/lib/python3.8/site-packages/setuptools/_distutils/command/config.py | Retraces/UkraineBot | 3d5d7f8aaa58fa0cb8b98733b8808e5dfbdb8b71 | [
"MIT"
] | 2 | 2022-03-13T01:58:52.000Z | 2022-03-31T06:07:54.000Z | venv/lib/python3.8/site-packages/setuptools/_distutils/command/config.py | DesmoSearch/Desmobot | b70b45df3485351f471080deb5c785c4bc5c4beb | [
"MIT"
] | 19 | 2021-11-20T04:09:18.000Z | 2022-03-23T15:05:55.000Z | venv/lib/python3.8/site-packages/setuptools/_distutils/command/config.py | DesmoSearch/Desmobot | b70b45df3485351f471080deb5c785c4bc5c4beb | [
"MIT"
] | null | null | null | /home/runner/.cache/pip/pool/d9/a4/e3/c30dcfc23301f3e6626c27b83fb07ea86d61335827feb257632c51cfa7 | 96 | 96 | 0.895833 |
79599ef91ed81b290b1e0958a48dfba9289106f5 | 13,148 | py | Python | test/functional/llmq-is-cl-conflicts.py | INTICOIN/SolD | cfa2f3a96b0f8831fee63c70203af17732181fe5 | [
"MIT"
] | null | null | null | test/functional/llmq-is-cl-conflicts.py | INTICOIN/SolD | cfa2f3a96b0f8831fee63c70203af17732181fe5 | [
"MIT"
] | null | null | null | test/functional/llmq-is-cl-conflicts.py | INTICOIN/SolD | cfa2f3a96b0f8831fee63c70203af17732181fe5 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2015-2020 The SolD Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
import time
from decimal import Decimal
from test_framework import mininode
from test_framework.blocktools import get_masternode_payment, create_coinbase, create_block
from test_framework.mininode import *
from test_framework.test_framework import SolDTestFramework
from test_framework.util import sync_blocks, sync_mempools, p2p_port, assert_raises_rpc_error, get_bip9_status
'''
llmq-is-cl-conflicts.py
Checks conflict handling between ChainLocks and InstantSend
'''
class TestNode(P2PInterface):
def __init__(self):
super().__init__()
self.clsigs = {}
self.islocks = {}
def send_clsig(self, clsig):
hash = uint256_from_str(hash256(clsig.serialize()))
self.clsigs[hash] = clsig
inv = msg_inv([CInv(29, hash)])
self.send_message(inv)
def send_islock(self, islock):
hash = uint256_from_str(hash256(islock.serialize()))
self.islocks[hash] = islock
inv = msg_inv([CInv(30, hash)])
self.send_message(inv)
def on_getdata(self, message):
for inv in message.inv:
if inv.hash in self.clsigs:
self.send_message(self.clsigs[inv.hash])
if inv.hash in self.islocks:
self.send_message(self.islocks[inv.hash])
class LLMQ_IS_CL_Conflicts(SolDTestFramework):
def set_test_params(self):
self.set_sold_test_params(4, 3, fast_dip3_enforcement=True)
self.set_sold_dip8_activation(10)
#disable_mocktime()
def run_test(self):
while self.nodes[0].getblockchaininfo()["bip9_softforks"]["dip0008"]["status"] != "active":
self.nodes[0].generate(10)
self.sync_blocks(self.nodes, timeout=60*5)
self.test_node = self.nodes[0].add_p2p_connection(TestNode())
network_thread_start()
self.nodes[0].p2p.wait_for_verack()
self.nodes[0].spork("SPORK_17_QUORUM_DKG_ENABLED", 0)
self.nodes[0].spork("SPORK_19_CHAINLOCKS_ENABLED", 0)
self.nodes[0].spork("SPORK_2_INSTANTSEND_ENABLED", 0)
self.nodes[0].spork("SPORK_3_INSTANTSEND_BLOCK_FILTERING", 0)
self.wait_for_sporks_same()
self.mine_quorum()
# mine single block, wait for chainlock
self.nodes[0].generate(1)
self.wait_for_chainlocked_block_all_nodes(self.nodes[0].getbestblockhash())
self.test_chainlock_overrides_islock(False)
self.test_chainlock_overrides_islock(True)
self.test_islock_overrides_nonchainlock()
def test_chainlock_overrides_islock(self, test_block_conflict):
# create three raw TXs, they will conflict with each other
rawtx1 = self.create_raw_tx(self.nodes[0], self.nodes[0], 1, 1, 100)['hex']
rawtx2 = self.create_raw_tx(self.nodes[0], self.nodes[0], 1, 1, 100)['hex']
rawtx3 = self.create_raw_tx(self.nodes[0], self.nodes[0], 1, 1, 100)['hex']
rawtx1_obj = FromHex(CTransaction(), rawtx1)
rawtx2_obj = FromHex(CTransaction(), rawtx2)
rawtx3_obj = FromHex(CTransaction(), rawtx3)
rawtx1_txid = self.nodes[0].sendrawtransaction(rawtx1)
rawtx2_txid = encode(hash256(hex_str_to_bytes(rawtx2))[::-1], 'hex_codec').decode('ascii')
rawtx3_txid = encode(hash256(hex_str_to_bytes(rawtx3))[::-1], 'hex_codec').decode('ascii')
# Create a chained TX on top of tx1
inputs = []
n = 0
for out in rawtx1_obj.vout:
if out.nValue == 100000000:
inputs.append({"txid": rawtx1_txid, "vout": n})
n += 1
rawtx4 = self.nodes[0].createrawtransaction(inputs, {self.nodes[0].getnewaddress(): 0.999})
rawtx4 = self.nodes[0].signrawtransaction(rawtx4)['hex']
rawtx4_txid = self.nodes[0].sendrawtransaction(rawtx4)
# wait for transactions to propagate
self.sync_mempools()
for node in self.nodes:
self.wait_for_instantlock(rawtx1_txid, node)
self.wait_for_instantlock(rawtx4_txid, node)
block = self.create_block(self.nodes[0], [rawtx2_obj])
if test_block_conflict:
# The block shouldn't be accepted/connected but it should be known to node 0 now
submit_result = self.nodes[0].submitblock(ToHex(block))
assert(submit_result == "conflict-tx-lock")
cl = self.create_chainlock(self.nodes[0].getblockcount() + 1, block.sha256)
self.test_node.send_clsig(cl)
for node in self.nodes:
self.wait_for_best_chainlock(node, "%064x" % block.sha256)
self.sync_blocks()
# At this point all nodes should be in sync and have the same "best chainlock"
submit_result = self.nodes[1].submitblock(ToHex(block))
if test_block_conflict:
# Node 1 should receive the block from node 0 and should not accept it again via submitblock
assert(submit_result == "duplicate")
else:
# The block should get accepted now, and at the same time prune the conflicting ISLOCKs
assert(submit_result is None)
for node in self.nodes:
self.wait_for_chainlocked_block(node, "%064x" % block.sha256)
# Create a chained TX on top of tx2
inputs = []
n = 0
for out in rawtx2_obj.vout:
if out.nValue == 100000000:
inputs.append({"txid": rawtx2_txid, "vout": n})
n += 1
rawtx5 = self.nodes[0].createrawtransaction(inputs, {self.nodes[0].getnewaddress(): 0.999})
rawtx5 = self.nodes[0].signrawtransaction(rawtx5)['hex']
rawtx5_txid = self.nodes[0].sendrawtransaction(rawtx5)
# wait for the transaction to propagate
self.sync_mempools()
for node in self.nodes:
self.wait_for_instantlock(rawtx5_txid, node)
# Lets verify that the ISLOCKs got pruned
for node in self.nodes:
assert_raises_rpc_error(-5, "No such mempool or blockchain transaction", node.getrawtransaction, rawtx1_txid, True)
assert_raises_rpc_error(-5, "No such mempool or blockchain transaction", node.getrawtransaction, rawtx4_txid, True)
rawtx = node.getrawtransaction(rawtx2_txid, True)
assert(rawtx['chainlock'])
assert(rawtx['instantlock'])
assert(not rawtx['instantlock_internal'])
def test_islock_overrides_nonchainlock(self):
# create two raw TXs, they will conflict with each other
rawtx1 = self.create_raw_tx(self.nodes[0], self.nodes[0], 1, 1, 100)['hex']
rawtx2 = self.create_raw_tx(self.nodes[0], self.nodes[0], 1, 1, 100)['hex']
rawtx1_txid = encode(hash256(hex_str_to_bytes(rawtx1))[::-1], 'hex_codec').decode('ascii')
rawtx2_txid = encode(hash256(hex_str_to_bytes(rawtx2))[::-1], 'hex_codec').decode('ascii')
# Create an ISLOCK but don't broadcast it yet
islock = self.create_islock(rawtx2)
# Stop enough MNs so that ChainLocks don't work anymore
for i in range(2):
self.stop_node(len(self.nodes) - 1)
self.nodes.pop(len(self.nodes) - 1)
self.mninfo.pop(len(self.mninfo) - 1)
# Send tx1, which will later conflict with the ISLOCK
self.nodes[0].sendrawtransaction(rawtx1)
# fast forward 11 minutes, so that the TX is considered safe and included in the next block
self.bump_mocktime(int(60 * 11))
# Mine the conflicting TX into a block
good_tip = self.nodes[0].getbestblockhash()
self.nodes[0].generate(2)
self.sync_all()
# Assert that the conflicting tx got mined and the locked TX is not valid
assert(self.nodes[0].getrawtransaction(rawtx1_txid, True)['confirmations'] > 0)
assert_raises_rpc_error(-25, "Missing inputs", self.nodes[0].sendrawtransaction, rawtx2)
# Send the ISLOCK, which should result in the last 2 blocks to be invalidated, even though the nodes don't know
# the locked transaction yet
self.test_node.send_islock(islock)
time.sleep(5)
assert(self.nodes[0].getbestblockhash() == good_tip)
assert(self.nodes[1].getbestblockhash() == good_tip)
# Send the actual transaction and mine it
self.nodes[0].sendrawtransaction(rawtx2)
self.nodes[0].generate(1)
self.sync_all()
assert(self.nodes[0].getrawtransaction(rawtx2_txid, True)['confirmations'] > 0)
assert(self.nodes[1].getrawtransaction(rawtx2_txid, True)['confirmations'] > 0)
assert(self.nodes[0].getrawtransaction(rawtx2_txid, True)['instantlock'])
assert(self.nodes[1].getrawtransaction(rawtx2_txid, True)['instantlock'])
assert(self.nodes[0].getbestblockhash() != good_tip)
assert(self.nodes[1].getbestblockhash() != good_tip)
def create_block(self, node, vtx=[]):
bt = node.getblocktemplate()
height = bt['height']
tip_hash = bt['previousblockhash']
coinbasevalue = bt['coinbasevalue']
miner_address = node.getnewaddress()
mn_payee = bt['masternode'][0]['payee']
# calculate fees that the block template included (we'll have to remove it from the coinbase as we won't
# include the template's transactions
bt_fees = 0
for tx in bt['transactions']:
bt_fees += tx['fee']
new_fees = 0
for tx in vtx:
in_value = 0
out_value = 0
for txin in tx.vin:
txout = node.gettxout("%064x" % txin.prevout.hash, txin.prevout.n, False)
in_value += int(txout['value'] * COIN)
for txout in tx.vout:
out_value += txout.nValue
new_fees += in_value - out_value
# fix fees
coinbasevalue -= bt_fees
coinbasevalue += new_fees
realloc_info = get_bip9_status(self.nodes[0], 'realloc')
realloc_height = 99999999
if realloc_info['status'] == 'active':
realloc_height = realloc_info['since']
mn_amount = get_masternode_payment(height, coinbasevalue, realloc_height)
miner_amount = coinbasevalue - mn_amount
outputs = {miner_address: str(Decimal(miner_amount) / COIN)}
if mn_amount > 0:
outputs[mn_payee] = str(Decimal(mn_amount) / COIN)
coinbase = FromHex(CTransaction(), node.createrawtransaction([], outputs))
coinbase.vin = create_coinbase(height).vin
# We can't really use this one as it would result in invalid merkle roots for masternode lists
if len(bt['coinbase_payload']) != 0:
cbtx = FromHex(CCbTx(version=1), bt['coinbase_payload'])
coinbase.nVersion = 3
coinbase.nType = 5 # CbTx
coinbase.vExtraPayload = cbtx.serialize()
coinbase.calc_sha256()
block = create_block(int(tip_hash, 16), coinbase, nTime=bt['curtime'])
block.vtx += vtx
# Add quorum commitments from template
for tx in bt['transactions']:
tx2 = FromHex(CTransaction(), tx['data'])
if tx2.nType == 6:
block.vtx.append(tx2)
block.hashMerkleRoot = block.calc_merkle_root()
block.solve()
return block
def create_chainlock(self, height, blockHash):
request_id = "%064x" % uint256_from_str(hash256(ser_string(b"clsig") + struct.pack("<I", height)))
message_hash = "%064x" % blockHash
for mn in self.mninfo:
mn.node.quorum('sign', 100, request_id, message_hash)
recSig = None
t = time.time()
while time.time() - t < 10:
try:
recSig = self.nodes[0].quorum('getrecsig', 100, request_id, message_hash)
break
except:
time.sleep(0.1)
assert(recSig is not None)
clsig = msg_clsig(height, blockHash, hex_str_to_bytes(recSig['sig']))
return clsig
def create_islock(self, hextx):
tx = FromHex(CTransaction(), hextx)
tx.rehash()
request_id_buf = ser_string(b"islock") + ser_compact_size(len(tx.vin))
inputs = []
for txin in tx.vin:
request_id_buf += txin.prevout.serialize()
inputs.append(txin.prevout)
request_id = "%064x" % uint256_from_str(hash256(request_id_buf))
message_hash = "%064x" % tx.sha256
for mn in self.mninfo:
mn.node.quorum('sign', 100, request_id, message_hash)
recSig = None
t = time.time()
while time.time() - t < 10:
try:
recSig = self.nodes[0].quorum('getrecsig', 100, request_id, message_hash)
break
except:
time.sleep(0.1)
assert(recSig is not None)
islock = msg_islock(inputs, tx.sha256, hex_str_to_bytes(recSig['sig']))
return islock
if __name__ == '__main__':
LLMQ_IS_CL_Conflicts().main()
| 39.483483 | 127 | 0.635458 |
79599f741328d8152fbd298d347f8c7bea8550a8 | 11,227 | py | Python | src/tests/ftest/util/nvme_utils.py | grom72/daos | 88f57e0ab29222565dfdd1e23f2103f0ad2e0670 | [
"BSD-2-Clause-Patent"
] | 429 | 2016-09-28T20:43:20.000Z | 2022-03-25T01:22:50.000Z | src/tests/ftest/util/nvme_utils.py | grom72/daos | 88f57e0ab29222565dfdd1e23f2103f0ad2e0670 | [
"BSD-2-Clause-Patent"
] | 6,341 | 2016-11-24T12:34:26.000Z | 2022-03-31T23:53:46.000Z | src/tests/ftest/util/nvme_utils.py | grom72/daos | 88f57e0ab29222565dfdd1e23f2103f0ad2e0670 | [
"BSD-2-Clause-Patent"
] | 202 | 2016-10-30T14:47:53.000Z | 2022-03-30T21:29:11.000Z | #!/usr/bin/python
"""
(C) Copyright 2020-2021 Intel Corporation.
SPDX-License-Identifier: BSD-2-Clause-Patent
"""
import threading
import re
import time
import queue
from command_utils_base import CommandFailure
from avocado.core.exceptions import TestFail
from ior_test_base import IorTestBase
from ior_utils import IorCommand
from server_utils import ServerFailed
def get_device_ids(dmg, servers):
"""Get the NVMe Device ID from servers.
Args:
dmg: DmgCommand class instance.
servers (list): list of server hosts.
Returns:
devices (dictionary): Device UUID for servers.
"""
devices = {}
dmg.set_sub_command("storage")
dmg.sub_command_class.set_sub_command("query")
dmg.sub_command_class.sub_command_class.set_sub_command("list-devices")
for host in servers:
dmg.hostlist = host
try:
result = dmg.run()
except CommandFailure as _error:
raise CommandFailure(
"dmg list-devices failed with error {}".format(
_error)) from _error
drive_list = []
for line in result.stdout_text.split('\n'):
if 'UUID' in line:
drive_list.append(line.split('UUID:')[1].split(' ')[0])
devices[host] = drive_list
return devices
class ServerFillUp(IorTestBase):
# pylint: disable=too-many-ancestors,too-many-instance-attributes
"""Class to fill up the servers based on pool percentage given.
It will get the drives listed in yaml file and find the maximum capacity of
the pool which will be created.
IOR block size will be calculated as part of function based on percentage
of pool needs to fill up.
"""
def __init__(self, *args, **kwargs):
"""Initialize a IorTestBase object."""
super().__init__(*args, **kwargs)
self.no_of_pools = 1
self.capacity = 1
self.no_of_servers = 1
self.no_of_drives = 1
self.pool = None
self.dmg = None
self.set_faulty_device = False
self.set_online_rebuild = False
self.rank_to_kill = None
self.scm_fill = False
self.nvme_fill = False
self.ior_matrix = None
self.fail_on_warning = False
def setUp(self):
"""Set up each test case."""
# obtain separate logs
self.update_log_file_names()
# Start the servers and agents
super().setUp()
self.hostfile_clients = None
self.ior_default_flags = self.ior_cmd.flags.value
self.ior_scm_xfersize = self.ior_cmd.transfer_size.value
self.ior_read_flags = self.params.get("read_flags",
'/run/ior/iorflags/*',
'-r -R -k -G 1')
self.ior_nvme_xfersize = self.params.get(
"nvme_transfer_size", '/run/ior/transfersize_blocksize/*',
'16777216')
# Get the number of daos_engine
self.engines = (self.server_managers[0].manager.job.yaml.engine_params)
self.out_queue = queue.Queue()
def start_ior_thread(self, results, create_cont, operation='WriteRead'):
"""Start IOR write/read threads and wait until all threads are finished.
Args:
results (queue): queue for returning thread results
operation (str): IOR operation for read/write.
Default it will do whatever mention in ior_flags
set.
"""
self.ior_cmd.flags.value = self.ior_default_flags
# For IOR Other operation, calculate the block size based on server %
# to fill up. Store the container UUID for future reading operation.
if operation == 'Write':
block_size = self.calculate_ior_block_size()
self.ior_cmd.block_size.update('{}'.format(block_size))
# For IOR Read only operation, retrieve the stored container UUID
elif operation == 'Read':
create_cont = False
self.ior_cmd.flags.value = self.ior_read_flags
# run IOR Command
try:
out = self.run_ior_with_pool(create_cont=create_cont,
fail_on_warning=self.fail_on_warning)
self.ior_matrix = IorCommand.get_ior_metrics(out)
results.put("PASS")
except (CommandFailure, TestFail) as _error:
results.put("FAIL")
def calculate_ior_block_size(self):
"""Calculate IOR Block size to fill up the Server.
Returns:
block_size(int): IOR Block size
"""
# Check the replica for IOR object to calculate the correct block size.
_replica = re.findall(r'_(.+?)G', self.ior_cmd.dfs_oclass.value)
if not _replica:
replica_server = 1
# This is for EC Parity
elif 'P' in _replica[0]:
replica_server = re.findall(r'\d+', _replica[0])[0]
else:
replica_server = _replica[0]
print('Replica Server = {}'.format(replica_server))
if self.scm_fill:
free_space = self.pool.get_pool_daos_space()["s_total"][0]
self.ior_cmd.transfer_size.value = self.ior_scm_xfersize
elif self.nvme_fill:
free_space = self.pool.get_pool_daos_space()["s_total"][1]
self.ior_cmd.transfer_size.value = self.ior_nvme_xfersize
else:
self.fail('Provide storage type (SCM/NVMe) to be filled')
# Get the block size based on the capacity to be filled. For example
# If nvme_free_space is 100G and to fill 50% of capacity.
# Formula : (107374182400 / 100) * 50.This will give 50% of space to be
# filled. Divide with total number of process, 16 process means each
# process will write 3.12Gb.last, if there is replica set, For RP_2G1
# will divide the individual process size by number of replica.
# 3.12G (Single process size)/2 (No of Replica) = 1.56G
# To fill 50 % of 100GB pool with total 16 process and replica 2, IOR
# single process size will be 1.56GB.
_tmp_block_size = (((free_space/100)*self.capacity)/self.processes)
_tmp_block_size = int(_tmp_block_size / int(replica_server))
block_size = (
int(_tmp_block_size / int(self.ior_cmd.transfer_size.value)) *
int(self.ior_cmd.transfer_size.value))
return block_size
def set_device_faulty(self, server, disk_id):
"""Set the devices to Faulty and wait for rebuild to complete.
Args:
server (string): server hostname where it generate the NVMe fault.
disk_id (string): NVMe disk ID where it will be changed to faulty.
"""
self.dmg.hostlist = server
self.dmg.storage_set_faulty(disk_id)
result = self.dmg.storage_query_device_health(disk_id)
# Check if device state changed to EVICTED.
if 'State:EVICTED' not in result.stdout_text:
self.fail("device State {} on host {} suppose to be EVICTED"
.format(disk_id, server))
# Wait for rebuild to start
self.pool.wait_for_rebuild(True)
# Wait for rebuild to complete
self.pool.wait_for_rebuild(False)
def set_device_faulty_loop(self):
"""Set devices to Faulty one by one and wait for rebuild to complete."""
# Get the device ids from all servers and try to eject the disks
device_ids = get_device_ids(self.dmg, self.hostlist_servers)
# no_of_servers and no_of_drives can be set from test yaml.
# 1 Server, 1 Drive = Remove single drive from single server
for num in range(0, self.no_of_servers):
server = self.hostlist_servers[num]
for disk_id in range(0, self.no_of_drives):
self.set_device_faulty(server, device_ids[server][disk_id])
def get_max_storage_sizes(self):
"""Get the maximum pool sizes for the current server configuration.
Returns:
list: a list of the maximum SCM and NVMe size
"""
try:
sizes_dict = self.server_managers[0].get_available_storage()
sizes = [sizes_dict["scm"], sizes_dict["nvme"]]
except (ServerFailed, KeyError) as error:
self.fail(error)
# Return the 96% of storage space as it won't be used 100%
# for pool creation.
for index, _size in enumerate(sizes):
sizes[index] = int(sizes[index] * 0.96)
return sizes
def create_pool_max_size(self, scm=False, nvme=False):
"""Create a single pool with Maximum NVMe/SCM size available.
Args:
scm (bool): To create the pool with max SCM size or not.
nvme (bool): To create the pool with max NVMe size or not.
Note: Method to Fill up the server. It will get the maximum Storage
space and create the pool.
Replace with dmg options in future when it's available.
"""
# Create a pool
self.add_pool(create=False)
if nvme or scm:
sizes = self.get_max_storage_sizes()
# If NVMe is True get the max NVMe size from servers
if nvme:
self.pool.nvme_size.update('{}'.format(sizes[1]))
# If SCM is True get the max SCM size from servers
if scm:
self.pool.scm_size.update('{}'.format(sizes[0]))
# Create the Pool
self.pool.create()
def start_ior_load(self, storage='NVMe', operation="Write", percent=1,
create_cont=True):
"""Fill up the server either SCM or NVMe.
Fill up based on percent amount given using IOR.
Args:
storage (string): SCM or NVMe, by default it will fill NVMe.
operation (string): Write/Read operation
percent (int): % of storage to be filled
create_cont (bool): To create the new container for IOR
"""
self.capacity = percent
# Fill up NVMe by default
self.nvme_fill = 'NVMe' in storage
self.scm_fill = 'SCM' in storage
# Create the IOR threads
job = threading.Thread(target=self.start_ior_thread,
kwargs={"results": self.out_queue,
"create_cont": create_cont,
"operation": operation})
# Launch the IOR thread
job.start()
# Set NVMe device faulty if it's set
if self.set_faulty_device:
time.sleep(60)
# Set the device faulty
self.set_device_faulty_loop()
# Kill the server rank while IOR in progress
if self.set_online_rebuild:
time.sleep(30)
# Kill the server rank
if self.rank_to_kill is not None:
self.get_dmg_command().system_stop(True, self.rank_to_kill)
# Wait to finish the thread
job.join()
# Verify the queue and make sure no FAIL for any IOR run
while not self.out_queue.empty():
if self.out_queue.get() == "FAIL":
self.fail("FAIL")
| 38.44863 | 80 | 0.610314 |
7959a20e63df9c85e85417113dce07a0abb8e256 | 3,527 | py | Python | goldy_smart_house/commands.py | THEGOLDENPRO/Goldy-Smart-House | 1745ed8f2aa60d77ba2031d2ee1a679bd771214b | [
"MIT"
] | null | null | null | goldy_smart_house/commands.py | THEGOLDENPRO/Goldy-Smart-House | 1745ed8f2aa60d77ba2031d2ee1a679bd771214b | [
"MIT"
] | null | null | null | goldy_smart_house/commands.py | THEGOLDENPRO/Goldy-Smart-House | 1745ed8f2aa60d77ba2031d2ee1a679bd771214b | [
"MIT"
] | null | null | null | import threading
import time
import goldy_smart_house
import datetime
MODULE_NAME = "COMMANDS"
class Command(object):
"""Formats a plain unformatted command in commands.txt into a command class object."""
def __init__(self, unformatted_command_string:str):
self.unformatted_command_string = unformatted_command_string
@property
def id(self):
return self.command_id
@property
def name(self):
return self.unformatted_command_string.split("]: ", 2)[1]
@property
def datetime(self) -> datetime.datetime:
return datetime.datetime.strptime(self.unformatted_command_string.split(": ")[0], "[%B %d, %Y at %I:%M%p]")
class Loop(threading.Thread):
"""'on_command' event Loop."""
def __init__(self, client:goldy_smart_house.client.Client):
threading.Thread.__init__(self)
self.stop_ = False
self.client = client
self.dropbox = client.dropbox
self.log = goldy_smart_house.utility.log
def run(self):
# Append all old commands to cache.
for line in self.dropbox.read_file().splitlines():
goldy_smart_house.cache.main_cache_dict["old_commands"].append(line)
while True:
command_list = self.dropbox.read_file().splitlines()
# This triggers when a new command is found in 'commands.txt'.
if len(command_list) > len(goldy_smart_house.cache.main_cache_dict["old_commands"]):
new_command = Command(command_list[-1])
self.log(self.client, f"[{MODULE_NAME}] NEW command detected >>> {new_command.name}")
# Checks if the command has been declared in user's code.
result = self.does_command_exist(new_command)
if result[0]:
self.log(self.client, f"[{MODULE_NAME}] Command found, running it's function...")
# Runs the Function
self.execute_command_func(
func=goldy_smart_house.cache.main_cache_dict["assigned_commands"][f"{result[1]}"]["function_object"],
result=result)
else:
self.log(self.client, f"[{MODULE_NAME}] That command was not found!")
goldy_smart_house.cache.main_cache_dict["old_commands"].append(line)
time.sleep(1)
if self.stop_:
self.log(self.client, f"[{MODULE_NAME}] Loop stopped!")
break
def stop(self):
self.stop_ = True
def execute_command_func(self, func, result):
read_back_string = func()
self.log(self.client, f"[{MODULE_NAME}] Function executed!")
if goldy_smart_house.cache.main_cache_dict["assigned_commands"][f"{result[1]}"]["smart_speaker_read_back"]:
if not read_back_string == None:
if not self.client.google_nest_device == None:
self.log(self.client, f"[{MODULE_NAME}] Reading back to Google Nest Device...")
self.client.google_nest_device.say(read_back_string)
def does_command_exist(self, command:Command) -> tuple:
"""Checks if the command exists."""
for assigned_command in goldy_smart_house.cache.main_cache_dict["assigned_commands"]:
if (command.name).lower() == goldy_smart_house.cache.main_cache_dict["assigned_commands"][f"{assigned_command}"]["name"]:
return (True, assigned_command)
return (False, None) | 40.54023 | 133 | 0.624043 |
7959a2146cd1c016160d52d784d4c1863c85de25 | 2,350 | py | Python | applications/tensorflow/cnns/inference/data.py | Splendon/examples | ed4a8a01857b6ddca49559141acf5d0986eb01e1 | [
"MIT"
] | null | null | null | applications/tensorflow/cnns/inference/data.py | Splendon/examples | ed4a8a01857b6ddca49559141acf5d0986eb01e1 | [
"MIT"
] | null | null | null | applications/tensorflow/cnns/inference/data.py | Splendon/examples | ed4a8a01857b6ddca49559141acf5d0986eb01e1 | [
"MIT"
] | null | null | null | # Copyright 2019 Graphcore Ltd.
from functools import partial
from typing import Callable, Tuple
import tensorflow as tf
def load_and_preprocess_data(img_path: str, img_width: int, img_height: int,
preprocess_fn: Callable, dtype: tf.DType) -> tf.Tensor:
"""Read and pre-process image.
Args:
img_path: Path to image
img_width: Target width
img_height: Target height
preprocess_fn: Function that scales the input to the correct range.
Returns: tf.Tensor representing pre-processed image in fp16.
"""
image = tf.read_file(img_path)
image = tf.image.decode_jpeg(image, channels=3)
image = tf.image.resize(image, [img_height, img_width])
image = preprocess_fn(image, data_format='channels_last')
return tf.cast(image, dtype)
def get_dataset(image_filenames: Tuple, batch_size: int, preprocess_fn: Callable, img_width: int, img_height: int,
loop: bool, dtype: tf.DType) -> tf.data.Dataset:
"""Creates an `Iterator` for enumerating the elements of this dataset.
Note: The returned iterator will be in an uninitialized state,
and you must run the `iterator.initializer` operation before using it:
```python
dataset = ...
iterator = dataset.make_initializable_iterator()
# ...
sess.run(iterator.initializer)
```
Args:
image_filenames: Tuple of image filenames, with each filename corresponding to the label of the image.
batch_size: Number of images per batch
preprocess_fn: Pre-processing to apply
img_width: Expected width of image
img_height: Expected height of image
loop: Repeatedly loop through images.
dtype: Input data type.
Returns:
Iterator over images and labels.
"""
image_ds = tf.data.Dataset.from_tensor_slices(tf.constant([str(item) for item in image_filenames]))
if loop:
image_ds = image_ds.repeat()
input_preprocess = partial(load_and_preprocess_data, img_width=img_width, img_height=img_height,
preprocess_fn=preprocess_fn, dtype=dtype)
image_ds = image_ds.map(map_func=input_preprocess, num_parallel_calls=100)
image_ds = image_ds.batch(batch_size, drop_remainder=True)
image_ds = image_ds.prefetch(buffer_size=100)
return image_ds
| 35.606061 | 114 | 0.693617 |
7959a2489cd3ec4f82574827f0b93ce0f3c9b057 | 110 | py | Python | deepfryer/batcheffect/__init__.py | guigolab/DeepFryer | 83ddde69134c28a662735f153f568b247cb4d091 | [
"MIT"
] | 1 | 2018-06-18T02:15:50.000Z | 2018-06-18T02:15:50.000Z | deepfryer/batcheffect/__init__.py | guigolab/DeepFryer | 83ddde69134c28a662735f153f568b247cb4d091 | [
"MIT"
] | null | null | null | deepfryer/batcheffect/__init__.py | guigolab/DeepFryer | 83ddde69134c28a662735f153f568b247cb4d091 | [
"MIT"
] | 4 | 2018-09-25T18:36:14.000Z | 2020-01-04T12:47:31.000Z | from __future__ import absolute_import
from .covariate import *
from .plot import *
from .correction import * | 22 | 38 | 0.8 |
7959a558de61ee1b10e17473fe1a641baf340444 | 2,904 | py | Python | operator_api/ledger/integrity/transfer.py | liquidity-network/nocust-hub | 76f49f9b8a6c264fcbe9e0c110e98031d463c0a8 | [
"MIT"
] | 1 | 2021-08-04T06:09:46.000Z | 2021-08-04T06:09:46.000Z | operator_api/ledger/integrity/transfer.py | liquidity-network/nocust-hub | 76f49f9b8a6c264fcbe9e0c110e98031d463c0a8 | [
"MIT"
] | 8 | 2020-11-01T19:48:21.000Z | 2022-02-10T14:12:25.000Z | operator_api/ledger/integrity/transfer.py | liquidity-network/nocust-hub | 76f49f9b8a6c264fcbe9e0c110e98031d463c0a8 | [
"MIT"
] | 3 | 2020-11-01T15:59:56.000Z | 2021-09-16T07:18:18.000Z | # Transfer amount must be non-negative
non_negative_transfer_amount = \
"""
ALTER TABLE ledger_transfer DROP CONSTRAINT IF EXISTS non_negative_transfer_amount;
ALTER TABLE ledger_transfer ADD CONSTRAINT non_negative_transfer_amount CHECK ("amount" >= 0);
ALTER TABLE ledger_transfer VALIDATE CONSTRAINT non_negative_transfer_amount;
"""
# Transfer delivery state must be consistent (sent -> processed == ~sent or processed)
only_processed_transfers_may_be_sent = \
"""
ALTER TABLE ledger_transfer DROP CONSTRAINT IF EXISTS only_processed_transfers_may_be_sent;
ALTER TABLE ledger_transfer ADD CONSTRAINT only_processed_transfers_may_be_sent CHECK ((amount_swapped IS NOT NULL) OR (NOT complete) OR processed);
ALTER TABLE ledger_transfer VALIDATE CONSTRAINT only_processed_transfers_may_be_sent;
"""
# New transfers may not overspend
only_transferrable_amounts_may_be_sent = \
"""
CREATE OR REPLACE FUNCTION check_transfer_amount() RETURNS TRIGGER AS $$
DECLARE
deposited_amount NUMERIC(80, 0);
withdrawing_amount NUMERIC(80, 0);
balance_amount NUMERIC(80, 0) := 0;
active_state_amount NUMERIC(80, 0);
BEGIN
SELECT SUM(amount) INTO deposited_amount
FROM ledger_deposit
WHERE wallet_id = NEW.wallet_id
AND eon_number = NEW.eon_number;
SELECT SUM(amount) INTO withdrawing_amount
FROM ledger_withdrawalrequest
WHERE wallet_id = NEW.wallet_id
AND eon_number = NEW.eon_number;
SELECT (b.right - b.left) INTO balance_amount
FROM ledger_exclusivebalanceallotment b
WHERE b.wallet_id = NEW.wallet_id
AND b.eon_number = NEW.eon_number;
SELECT (updated_gains - updated_spendings) INTO active_state_amount
FROM ledger_activestate
WHERE wallet_id = NEW.wallet_id
AND eon_number = NEW.eon_number
AND id = NEW.sender_active_state_id;
IF deposited_amount - withdrawing_amount + balance_amount + active_state_amount < 0 THEN
RAISE EXCEPTION 'Overspending transaction';
END IF;
RETURN NEW;
END;
$$ LANGUAGE plpgsql;
DROP TRIGGER IF EXISTS only_transferrable_amounts_may_be_sent ON ledger_transfer CASCADE;
CREATE CONSTRAINT TRIGGER only_transferrable_amounts_may_be_sent
AFTER INSERT OR UPDATE
ON ledger_transfer
DEFERRABLE
INITIALLY DEFERRED
FOR EACH ROW
EXECUTE PROCEDURE check_transfer_amount();
"""
# Transfer swap flag should be true if and only if amount_swapped is not null
swap_flag = \
"""
ALTER TABLE ledger_transfer DROP CONSTRAINT IF EXISTS swap_flag;
ALTER TABLE ledger_transfer ADD CONSTRAINT swap_flag CHECK (swap = (amount_swapped IS NOT NULL));
ALTER TABLE ledger_transfer VALIDATE CONSTRAINT swap_flag;
"""
# TODO transfer active state updates must make sense (sender, then recipient, then operator signs both atomically)
| 39.243243 | 148 | 0.748623 |
7959a5a0dba989acbbd811ca68095455076c1ab1 | 2,115 | py | Python | fillplots/inequalities.py | wwwennie/miu_maps | f73398e731c1973b052c444afa0912ec48e335b3 | [
"MIT"
] | 3 | 2016-05-12T19:14:16.000Z | 2021-02-04T07:27:08.000Z | fillplots/inequalities.py | wwwennie/miu_maps | f73398e731c1973b052c444afa0912ec48e335b3 | [
"MIT"
] | 3 | 2015-05-20T17:29:05.000Z | 2021-02-04T07:29:29.000Z | fillplots/inequalities.py | wwwennie/miu_maps | f73398e731c1973b052c444afa0912ec48e335b3 | [
"MIT"
] | 4 | 2016-01-28T18:37:34.000Z | 2019-03-06T12:42:54.000Z | import numpy
from .core import Configurable
from .boundaries import (
BaseBoundary, YFunctionBoundary, XConstBoundary, to_boundary)
class BaseInequality(Configurable):
def __init__(self, baseconfig, data, less=False, domain=None):
super(BaseInequality, self).__init__(baseconfig)
bclass = self._boundaryclass
if isinstance(data, bclass):
assert domain is None
self.boundary = data
"""
An instance of :class:`.BaseBoundary` instance.
"""
else:
self.boundary = bclass(self.config, data, domain=domain)
self.less = less
def get_errorbar_kwds(self):
kwds = {}
for line in self.boundary.cax.lines:
kwds['boundary_color'] = line.get_color()
break
return kwds
def plot_positive_direction(self):
"""
Plot direction that makes LHS of the inequality positive.
"""
class YFunctionInequality(BaseInequality):
_boundaryclass = YFunctionBoundary
def plot_positive_direction(self):
self.cax.yerrorbar(self.boundary._masked_y, self.less,
xlim=self.boundary._domain,
**self.get_errorbar_kwds())
class XConstInequality(BaseInequality):
_boundaryclass = XConstBoundary
def plot_positive_direction(self):
func = lambda ys: self.x * numpy.ones_like(ys)
self.cax.xerrorbar(func, self.less, **self.get_errorbar_kwds())
_IEQ_CLASSES = [YFunctionInequality, XConstInequality]
_IEQ_CLASS_MAP = dict((cls._boundaryclass, cls) for cls in _IEQ_CLASSES)
def to_inequality(config, obj):
if isinstance(obj, BaseInequality):
# FIXME: should I care other cases?
obj.config._set_base(config)
return obj
obj = tuple(obj)
if isinstance(obj[0], BaseBoundary):
data = to_boundary(config, obj[0])
return _IEQ_CLASS_MAP[data.__class__](config, data, *obj[1:])
elif callable(obj[0]):
return YFunctionInequality(config, *obj)
else:
return XConstInequality(config, *obj)
| 29.375 | 72 | 0.645863 |
7959a79d729b128b5250428f26f1bf0596015ff1 | 2,119 | py | Python | tests/test_cfpq.py | bozhnyukAlex/formal-lang-course | 07615803fc79afa2ab8e28e8aea1e0692631bb5a | [
"Apache-2.0"
] | null | null | null | tests/test_cfpq.py | bozhnyukAlex/formal-lang-course | 07615803fc79afa2ab8e28e8aea1e0692631bb5a | [
"Apache-2.0"
] | 2 | 2021-09-06T13:20:39.000Z | 2022-01-23T10:24:49.000Z | tests/test_cfpq.py | bozhnyukAlex/formal-lang-course | 07615803fc79afa2ab8e28e8aea1e0692631bb5a | [
"Apache-2.0"
] | null | null | null | from collections import namedtuple
from itertools import product
import pytest
from cfpq_data import labeled_cycle_graph
from pyformlang.cfg import CFG
from project import generate_two_cycles_graph, matrix_cfpq, hellings_cfpq, tensor_cfpq
Config = namedtuple("Config", ["start_var", "start_nodes", "final_nodes", "exp_ans"])
@pytest.fixture(params=[matrix_cfpq, hellings_cfpq, tensor_cfpq])
def cfpq(request):
return request.param
@pytest.mark.parametrize(
"cfg, graph, confs",
[
(
"""
A -> a A | epsilon
B -> b B | b
""",
labeled_cycle_graph(3, "a", verbose=False),
[
Config("A", {0}, {0}, {(0, 0)}),
Config("A", None, None, set(product(range(3), range(3)))),
Config("B", None, None, set()),
],
),
(
"""
S -> epsilon
""",
labeled_cycle_graph(4, "b", verbose=False),
[
Config("S", {0, 1}, {0, 1}, {(0, 0), (1, 1)}),
Config("S", None, None, set((v, v) for v in range(4))),
Config("B", None, None, set()),
],
),
(
"""
S -> A B
S -> A S1
S1 -> S B
A -> a
B -> b
""",
generate_two_cycles_graph(2, 1, ("a", "b")),
[
Config(
"S", None, None, {(0, 0), (0, 3), (2, 0), (2, 3), (1, 0), (1, 3)}
),
Config("A", None, None, {(0, 1), (1, 2), (2, 0)}),
Config("B", None, None, {(3, 0), (0, 3)}),
Config("S", {0}, {0}, {(0, 0)}),
],
),
],
)
def test_cfpq_answer(cfpq, cfg, graph, confs):
assert all(
cfpq(
graph,
CFG.from_text(cfg),
conf.start_nodes,
conf.final_nodes,
conf.start_var,
)
== conf.exp_ans
for conf in confs
)
| 27.881579 | 86 | 0.409627 |
7959a8d9690eacad079e0582c956e03248ff7f84 | 412 | py | Python | back/account/migrations/0004_auto_20200812_2314.py | LEEJ0NGWAN/FreeChart | 8769aea07079fe3a5af4aab701028e6aa9cb6598 | [
"MIT"
] | null | null | null | back/account/migrations/0004_auto_20200812_2314.py | LEEJ0NGWAN/FreeChart | 8769aea07079fe3a5af4aab701028e6aa9cb6598 | [
"MIT"
] | 6 | 2021-04-08T19:48:40.000Z | 2022-02-27T08:27:06.000Z | back/account/migrations/0004_auto_20200812_2314.py | LEEJ0NGWAN/FreeList | 8769aea07079fe3a5af4aab701028e6aa9cb6598 | [
"MIT"
] | null | null | null | # Generated by Django 3.0.8 on 2020-08-12 14:14
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('account', '0003_auto_20200709_1826'),
]
operations = [
migrations.AlterField(
model_name='user',
name='username',
field=models.CharField(max_length=150, verbose_name='username'),
),
]
| 21.684211 | 76 | 0.61165 |
7959a912bc80695460dab70eb56a87cb92e5762a | 61 | py | Python | fable_py/__init__.py | dbrattli/Fable.Jupyter | 44e4f0d01889714db6d227f51aefe3308fbd00ed | [
"MIT"
] | 21 | 2021-01-18T02:12:46.000Z | 2021-10-21T08:57:59.000Z | fable_py/__init__.py | dbrattli/Fable.Jupyter | 44e4f0d01889714db6d227f51aefe3308fbd00ed | [
"MIT"
] | 1 | 2021-11-08T06:48:17.000Z | 2021-11-08T06:48:17.000Z | fable_py/__init__.py | fable-compiler/Fable.Jupyter | 54e1fe9160f9174148963f226abafaed03be3574 | [
"MIT"
] | 1 | 2021-07-30T01:26:38.000Z | 2021-07-30T01:26:38.000Z |
from .kernel import Fable
from .version import __version__
| 12.2 | 32 | 0.803279 |
7959a96c7d78c247d05de0c6019e9774e43e1e47 | 158 | py | Python | src/collective/solr/browser/interfaces.py | IMIO/collective.solr | 844219eb3968b34d2b83a7bd5f59340d676d149e | [
"ZPL-1.1"
] | null | null | null | src/collective/solr/browser/interfaces.py | IMIO/collective.solr | 844219eb3968b34d2b83a7bd5f59340d676d149e | [
"ZPL-1.1"
] | null | null | null | src/collective/solr/browser/interfaces.py | IMIO/collective.solr | 844219eb3968b34d2b83a7bd5f59340d676d149e | [
"ZPL-1.1"
] | null | null | null | # -*- coding: utf-8 -*-
from zope.interface import Interface
class IThemeSpecific(Interface):
"""marker interface that defines a zope3 browser layer"""
| 22.571429 | 61 | 0.721519 |
7959a9de8f0bf74598997a60dc7e0b877a957a23 | 1,260 | py | Python | lib_analyza.py | SamuelAmrich/SpearPy | 7011644a9bf440d082bf12425b1ee146567304ec | [
"MIT"
] | null | null | null | lib_analyza.py | SamuelAmrich/SpearPy | 7011644a9bf440d082bf12425b1ee146567304ec | [
"MIT"
] | null | null | null | lib_analyza.py | SamuelAmrich/SpearPy | 7011644a9bf440d082bf12425b1ee146567304ec | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# coding: utf-8
# Analyza knižnica na prácu so signalom pre Kopija_Finder_6.0
import numpy as np
# from scipy import signal
def info():
print("Analyza knižnica na prácu so signalom pre Kopija_Finder_6.0 ")
# Znormovanie všetkých dát
def norm(zaznam):
zaznam_max = np.nanmax(np.abs(zaznam))
return np.divide(zaznam, zaznam_max)
# Moving average
def mova(zaznam, rozpetie=1):
vychylka = (rozpetie - 1) // 2
gauss_zaznam = np.zeros(len(zaznam), dtype=np.float64)
for i in range(vychylka, len(zaznam) - vychylka):
gauss_zaznam[i] = np.sum(zaznam[i - vychylka:i + vychylka]) / rozpetie
return gauss_zaznam
# vychylka = (rozpetie - 1) // 2
# answ = data = np.zeros(len(a), dtype=float)
# ret = np.cumsum(a, dtype=float)
# ret[n:] = ret[n:] - ret[:-n]
# sup = ret[n - 1:] / n
# answ[vychylka:-vychylka] = sup
# return answ
# Numerické zderivovanie
def der(time, zaznam):
delta = (time[1] - time[0])
der_zaznam = (np.roll(zaznam, -1) - zaznam) / delta
return der_zaznam
# Nadefinovanie nastroja na binovanie, vsetko v array-i zaokruhluje po desiatkach,
def binn(zaznam, roun=8, multi=1):
return np.round(multi * zaznam, roun) / multi
| 24.705882 | 82 | 0.649206 |
7959aae6929379ed7330e721956e6fbb587bc0f2 | 221 | py | Python | Online-Judges/HackerRank/Python/Python_If_Else.py | shihab4t/Competitive-Programming | e8eec7d4f7d86bfa1c00b7fbbedfd6a1518f19be | [
"Unlicense"
] | 3 | 2021-06-15T01:19:23.000Z | 2022-03-16T18:23:53.000Z | Online-Judges/HackerRank/Python/Python_If_Else.py | shihab4t/Competitive-Programming | e8eec7d4f7d86bfa1c00b7fbbedfd6a1518f19be | [
"Unlicense"
] | null | null | null | Online-Judges/HackerRank/Python/Python_If_Else.py | shihab4t/Competitive-Programming | e8eec7d4f7d86bfa1c00b7fbbedfd6a1518f19be | [
"Unlicense"
] | null | null | null | #!/bin/python3
import math
import os
import random
import re
import sys
if __name__ == '__main__':
n = int(input().strip())
if n % 2 or 6 <= n <= 20:
print("Weird")
else:
print("Not Weird")
| 13.8125 | 29 | 0.570136 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.