hexsha stringlengths 40 40 | size int64 1 1.03M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 239 | max_stars_repo_name stringlengths 5 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 239 | max_issues_repo_name stringlengths 5 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 239 | max_forks_repo_name stringlengths 5 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.03M | avg_line_length float64 1 958k | max_line_length int64 1 1.03M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ace183bf416708863bd16754463f601512f563d2 | 11,042 | py | Python | tests/keras_tests/feature_networks_tests/feature_networks/weights_mixed_precision_tests.py | lapid92/model_optimization | 3fc6db67cde912a1e22399bd43bc345ba035b8b6 | [
"Apache-2.0"
] | null | null | null | tests/keras_tests/feature_networks_tests/feature_networks/weights_mixed_precision_tests.py | lapid92/model_optimization | 3fc6db67cde912a1e22399bd43bc345ba035b8b6 | [
"Apache-2.0"
] | null | null | null | tests/keras_tests/feature_networks_tests/feature_networks/weights_mixed_precision_tests.py | lapid92/model_optimization | 3fc6db67cde912a1e22399bd43bc345ba035b8b6 | [
"Apache-2.0"
] | null | null | null | # Copyright 2021 Sony Semiconductors Israel, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import numpy as np
import tensorflow as tf
from model_compression_toolkit.hardware_models.default_hwm import get_op_quantization_configs
from model_compression_toolkit.hardware_models.keras_hardware_model.keras_default import generate_fhw_model_keras
from tests.common_tests.helpers.generate_test_hw_model import generate_mixed_precision_test_hw_model
from tests.keras_tests.feature_networks_tests.base_keras_feature_test import BaseKerasFeatureNetworkTest
import model_compression_toolkit as mct
from model_compression_toolkit.common.mixed_precision.kpi import KPI
from model_compression_toolkit.common.mixed_precision.mixed_precision_quantization_config import \
MixedPrecisionQuantizationConfig
from model_compression_toolkit.common.user_info import UserInformation
from tests.common_tests.base_feature_test import BaseFeatureNetworkTest
from tests.common_tests.helpers.tensors_compare import cosine_similarity
keras = tf.keras
layers = keras.layers
hw_model = mct.hardware_representation
class MixedPercisionBaseTest(BaseKerasFeatureNetworkTest):
def __init__(self, unit_test):
super().__init__(unit_test)
def get_quantization_config(self):
qc = mct.QuantizationConfig(mct.QuantizationErrorMethod.MSE,
mct.QuantizationErrorMethod.MSE,
relu_bound_to_power_of_2=True,
weights_bias_correction=True,
weights_per_channel_threshold=True,
input_scaling=True,
activation_channel_equalization=True)
return MixedPrecisionQuantizationConfig(qc, num_of_images=1)
def get_input_shapes(self):
return [[self.val_batch_size, 224, 244, 3]]
def create_networks(self):
inputs = layers.Input(shape=self.get_input_shapes()[0][1:])
x = layers.Conv2D(30, 40)(inputs)
x = layers.BatchNormalization()(x)
x = layers.Conv2D(50, 40)(x)
outputs = layers.ReLU()(x)
model = keras.Model(inputs=inputs, outputs=outputs)
return model
def compare(self, quantized_model, float_model, input_x=None, quantization_info: UserInformation = None):
# This is a base test, so it does not check a thing. Only actual tests of mixed precision
# compare things to test.
raise NotImplementedError
class MixedPercisionManuallyConfiguredTest(MixedPercisionBaseTest):
def get_fw_hw_model(self):
base_config, _ = get_op_quantization_configs()
mp_hw_model = generate_mixed_precision_test_hw_model(base_cfg=base_config,
mp_bitwidth_candidates_list=[(8, 8), (2, 8), (3, 8)])
return generate_fhw_model_keras(name="mp_test", hardware_model=mp_hw_model)
def get_quantization_config(self):
qc = mct.QuantizationConfig(mct.QuantizationErrorMethod.MSE, mct.QuantizationErrorMethod.MSE,
relu_bound_to_power_of_2=True, weights_bias_correction=True,
weights_per_channel_threshold=False, input_scaling=True,
activation_channel_equalization=True)
return MixedPrecisionQuantizationConfig(qc)
def get_kpi(self):
# Return some KPI (it does not really matter the value here as search_methods is not done,
# and the configuration is
# set manually)
return KPI(1)
def compare(self, quantized_model, float_model, input_x=None, quantization_info=None):
assert quantization_info.mixed_precision_cfg == [2, 1]
self.unit_test.assertTrue(np.unique(quantized_model.layers[2].weights[0]).flatten().shape[0] <= 4)
self.unit_test.assertTrue(np.unique(quantized_model.layers[4].weights[0]).flatten().shape[0] <= 8)
class MixedPercisionSearchTest(MixedPercisionBaseTest):
def __init__(self, unit_test):
super().__init__(unit_test)
def get_kpi(self):
# kpi is infinity -> should give best model - 8bits
return KPI(np.inf)
def compare(self, quantized_model, float_model, input_x=None, quantization_info=None):
assert (quantization_info.mixed_precision_cfg == [0,
0]).all() # kpi is infinity -> should give best model - 8bits
for i in range(30): # quantized per channel
self.unit_test.assertTrue(
np.unique(quantized_model.layers[2].weights[0][:, :, :, i]).flatten().shape[0] <= 256)
for i in range(50): # quantized per channel
self.unit_test.assertTrue(
np.unique(quantized_model.layers[4].weights[0][:, :, :, i]).flatten().shape[0] <= 256)
class MixedPercisionSearchKPI4BitsAvgTest(MixedPercisionBaseTest):
def __init__(self, unit_test):
super().__init__(unit_test)
def get_kpi(self):
# kpi is for 4 bits on average
return KPI(2544140 * 4 / 8)
def compare(self, quantized_model, float_model, input_x=None, quantization_info=None):
assert (quantization_info.mixed_precision_cfg == [1, 1]).all()
for i in range(30): # quantized per channel
self.unit_test.assertTrue(
np.unique(quantized_model.layers[2].weights[0][:, :, :, i]).flatten().shape[0] <= 16)
for i in range(50): # quantized per channel
self.unit_test.assertTrue(
np.unique(quantized_model.layers[4].weights[0][:, :, :, i]).flatten().shape[0] <= 16)
class MixedPercisionSearchKPI2BitsAvgTest(MixedPercisionBaseTest):
def __init__(self, unit_test):
super().__init__(unit_test)
def get_kpi(self):
# kpi is for 2 bits on average
return KPI(2544200 * 2 / 8)
def compare(self, quantized_model, float_model, input_x=None, quantization_info=None):
assert (quantization_info.mixed_precision_cfg == [2, 2]).all()
for i in range(30): # quantized per channel
self.unit_test.assertTrue(
np.unique(quantized_model.layers[2].weights[0][:, :, :, i]).flatten().shape[0] <= 4)
for i in range(50): # quantized per channel
self.unit_test.assertTrue(
np.unique(quantized_model.layers[4].weights[0][:, :, :, i]).flatten().shape[0] <= 4)
class MixedPercisionDepthwiseTest(MixedPercisionBaseTest):
def __init__(self, unit_test):
super().__init__(unit_test)
def get_kpi(self):
return KPI(np.inf)
def create_networks(self):
inputs = layers.Input(shape=self.get_input_shapes()[0][1:])
x = layers.DepthwiseConv2D(30)(inputs)
x = layers.BatchNormalization()(x)
x = layers.ReLU()(x)
model = keras.Model(inputs=inputs, outputs=x)
return model
def compare(self, quantized_model, float_model, input_x=None, quantization_info=None):
y = float_model.predict(input_x)
y_hat = quantized_model.predict(input_x)
cs = cosine_similarity(y, y_hat)
self.unit_test.assertTrue(np.isclose(cs, 1), msg=f'fail cosine similarity check:{cs}')
def get_fw_hw_model(self):
base_config, _ = get_op_quantization_configs()
base_config = base_config.clone_and_edit(weights_n_bits=16,
activation_n_bits=16)
mp_hw_model = generate_mixed_precision_test_hw_model(base_cfg=base_config,
mp_bitwidth_candidates_list=[(8, 16), (2, 16), (4, 16),
(16, 16)])
return generate_fhw_model_keras(name="mp_dw_test", hardware_model=mp_hw_model)
def get_quantization_config(self):
qc = mct.QuantizationConfig(mct.QuantizationErrorMethod.MSE,
mct.QuantizationErrorMethod.MSE,
relu_bound_to_power_of_2=False,
weights_bias_correction=False,
weights_per_channel_threshold=True,
input_scaling=False,
activation_channel_equalization=False)
return MixedPrecisionQuantizationConfig(qc)
class MixedPrecisionActivationDisabled(MixedPercisionBaseTest):
def __init__(self, unit_test):
super().__init__(unit_test)
def get_quantization_config(self):
qc = mct.QuantizationConfig(mct.QuantizationErrorMethod.MSE,
mct.QuantizationErrorMethod.MSE,
relu_bound_to_power_of_2=True,
weights_bias_correction=True,
weights_per_channel_threshold=True,
input_scaling=False,
activation_channel_equalization=False)
return MixedPrecisionQuantizationConfig(qc, num_of_images=1)
def get_fw_hw_model(self):
base_config, _ = get_op_quantization_configs()
activation_disabled_config = base_config.clone_and_edit(enable_activation_quantization=False)
mp_hw_model = generate_mixed_precision_test_hw_model(base_cfg=activation_disabled_config,
mp_bitwidth_candidates_list=[(8, 8), (4, 8), (2, 8)])
return generate_fhw_model_keras(name="mp_weights_only_test", hardware_model=mp_hw_model)
def get_kpi(self):
# kpi is infinity -> should give best model - 8bits
return KPI(np.inf)
def compare(self, quantized_model, float_model, input_x=None, quantization_info=None):
assert (quantization_info.mixed_precision_cfg == [0,
0]).all() # kpi is infinity -> should give best model - 8bits
for i in range(30): # quantized per channel
self.unit_test.assertTrue(
np.unique(quantized_model.layers[1].weights[0][:, :, :, i]).flatten().shape[0] <= 256)
for i in range(50): # quantized per channel
self.unit_test.assertTrue(
np.unique(quantized_model.layers[2].weights[0][:, :, :, i]).flatten().shape[0] <= 256) | 47.800866 | 120 | 0.640826 |
ace183bf8fe759134209f080a97ef48d45750c3d | 497 | py | Python | sample/many_user.py | marianarmorgado/python-starter | 8bf3d7a16fd462cf99898c9a82c6e1cf4fc0e7f2 | [
"MIT"
] | null | null | null | sample/many_user.py | marianarmorgado/python-starter | 8bf3d7a16fd462cf99898c9a82c6e1cf4fc0e7f2 | [
"MIT"
] | null | null | null | sample/many_user.py | marianarmorgado/python-starter | 8bf3d7a16fd462cf99898c9a82c6e1cf4fc0e7f2 | [
"MIT"
] | null | null | null | users = {
'aeinstein': {
'first': 'albert',
'last': 'einstein',
'location': 'princeton'
},
'mcurie': {
'first': 'marie',
'last': 'curie',
'location': 'paris',
}
}
for username, user_info in users.items():
print("\nUsername: " + username)
full_name = user_info['first'] + " " + user_info['last']
location = user_info['location']
print("\tFull name: " + full_name.title())
print("\tLocation: " + location.title()) | 24.85 | 60 | 0.527163 |
ace184233727fd0afdc3944e24e24916d8ddbd06 | 1,836 | py | Python | agent/comms/comm_util.py | Braj-120/cryptominingdetection | 8be82132939aa8c92332493a0ca14620c110e971 | [
"MIT"
] | 1 | 2021-11-28T17:10:40.000Z | 2021-11-28T17:10:40.000Z | agent/comms/comm_util.py | Braj-120/cryptominingdetection | 8be82132939aa8c92332493a0ca14620c110e971 | [
"MIT"
] | null | null | null | agent/comms/comm_util.py | Braj-120/cryptominingdetection | 8be82132939aa8c92332493a0ca14620c110e971 | [
"MIT"
] | null | null | null | from datetime import datetime
import requests
import socket
import json
from os.path import exists
from requests.models import HTTPError
from cryptography.fernet import Fernet
def get_token(creds: dict, url: str):
"""
Fetches the token for a given credential set
@params creds: Dictionary containing username and password (unencrypted)
@url: URL to get the token from
"""
try:
hostname = socket.gethostname()
creds["hostname"] = hostname
response = requests.post(url, json=creds)
return response.text
except HTTPError as httperr:
print(
f'{str(datetime.now())}: Http error detected, below exception, resuming operation')
print(httperr)
def retrieve_token(token_file: str, cred_file: str, token_url: str, expired: bool = False):
"""
Helper method to get token from URL or file depending on scenarios
@token_file: Token file location
@cred_file: Credential file
@token_url: URL to get the token from if not avaiable locally
@params expired: Is token expired
"""
# Try to get a token if it is first time or if it has expired
if (not exists(token_file) or expired):
print(f"{str(datetime.now())}: Fetching new token")
with open(cred_file, 'r') as myfile:
data = myfile.read()
cred = json.loads(data)
f = Fernet("jlKb-yoWYc38f2r6-ezMyVszat8UAYkav8F8q2df_N0=")
cred["password"] = f.decrypt(
bytes(cred["password"], "utf-8")).decode("utf-8")
token = get_token(cred, token_url)
with open(token_file, 'w') as tfile:
tfile.write(token)
else:
print(f"{str(datetime.now())}: Token file exists, trying to read from file")
with (open(token_file, 'r')) as tfile:
token = tfile.read()
return token | 36.72 | 95 | 0.652505 |
ace1842c87f8fa86dbcc14f620a308d2b7f96918 | 6,782 | py | Python | netconsole/netconsole.py | ConnectionMaster/pynetconsole | 2e06787e14e0e27d284a25b9efcb4e08d116df4b | [
"ISC"
] | null | null | null | netconsole/netconsole.py | ConnectionMaster/pynetconsole | 2e06787e14e0e27d284a25b9efcb4e08d116df4b | [
"ISC"
] | 1 | 2022-03-15T18:06:05.000Z | 2022-03-15T18:06:05.000Z | netconsole/netconsole.py | ConnectionMaster/pynetconsole | 2e06787e14e0e27d284a25b9efcb4e08d116df4b | [
"ISC"
] | null | null | null | from argparse import ArgumentParser
import socket
import struct
import sys
import threading
import time
from ._fakeds import FakeDS
__all__ = ["Netconsole", "main", "run"]
def _output_fn(s):
sys.stdout.write(
s.encode(sys.stdout.encoding, errors="replace").decode(sys.stdout.encoding)
)
sys.stdout.write("\n")
class StreamEOF(IOError):
pass
class Netconsole:
"""
Implements the 2018+ netconsole protocol
"""
TAG_ERROR = 11
TAG_INFO = 12
def __init__(self, printfn=_output_fn):
self.frames = {self.TAG_ERROR: self._onError, self.TAG_INFO: self._onInfo}
self.cond = threading.Condition()
self.sock = None
self.sockrfp = None
self.sockwfp = None
self.sockaddr = None
self.running = False
self.printfn = printfn
def start(self, address, port=1741, connect_event=None, block=True):
with self.cond:
if self.running:
raise ValueError("Cannot start without stopping first")
self.sockaddr = (address, port)
self.connect_event = connect_event
self.running = True
self._rt = threading.Thread(
target=self._readThread, name="nc-read-thread", daemon=True
)
self._rt.start()
if block:
self._keepAlive()
else:
self._kt = threading.Thread(
target=self._keepAlive, name="nc-keepalive-thread", daemon=True
)
self._kt.start()
@property
def connected(self):
return self.sockrfp is not None
def stop(self):
with self.cond:
self.running = False
self.cond.notify_all()
self.sock.close()
def _connectionDropped(self):
print(".. connection dropped", file=sys.stderr)
self.sock.close()
with self.cond:
self.sockrfp = None
self.cond.notify_all()
def _keepAliveReady(self):
if not self.running:
return -1
elif not self.connected:
return -2
def _keepAlive(self):
while self.running:
with self.cond:
ret = self.cond.wait_for(self._keepAliveReady, timeout=2.0)
if ret == -1:
return
elif ret == -2:
self._reconnect()
else:
try:
self.sockwfp.write(b"\x00\x00")
self.sockwfp.flush()
except IOError:
self._connectionDropped()
def _readThreadReady(self):
if not self.running:
return -1
return self.sockrfp
def _readThread(self):
while True:
with self.cond:
sockrfp = self.cond.wait_for(self._readThreadReady)
if sockrfp == -1:
return
try:
data = sockrfp.read(self._headerSz)
except IOError:
data = ""
if len(data) != self._headerSz:
self._connectionDropped()
continue
blen, tag = self._header.unpack(data)
blen -= 1
try:
buf = sockrfp.read(blen)
except IOError:
buf = ""
if len(buf) != blen:
self._connectionDropped()
continue
# process the frame
fn = self.frames.get(tag)
if fn:
fn(buf)
else:
print("ERROR: Unknown tag %s; Ignoring..." % tag, file=sys.stderr)
def _reconnect(self):
# returns once the socket is connected or an exit is requested
while self.running:
sys.stderr.write("Connecting to %s:%s..." % self.sockaddr)
try:
sock = socket.create_connection(self.sockaddr, timeout=3.0)
except IOError:
sys.stderr.write(" :(\n")
# don't busywait, just in case
time.sleep(1.0)
continue
else:
sys.stderr.write("OK\n")
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
sock.settimeout(None)
sockrfp = sock.makefile("rb")
sockwfp = sock.makefile("wb")
if self.connect_event:
self.connect_event.set()
with self.cond:
self.sock = sock
self.sockrfp = sockrfp
self.sockwfp = sockwfp
self.cond.notify_all()
break
#
# Message
#
_header = struct.Struct(">Hb")
_headerSz = _header.size
_errorFrame = struct.Struct(">fHHiB")
_errorFrameSz = _errorFrame.size
_infoFrame = struct.Struct(">fH")
_infoFrameSz = _infoFrame.size
_slen = struct.Struct(">H")
_slenSz = _slen.size
def _onError(self, b):
ts, _seq, _numOcc, errorCode, flags = self._errorFrame.unpack_from(b, 0)
details, nidx = self._getStr(b, self._errorFrameSz)
location, nidx = self._getStr(b, nidx)
callStack, _ = self._getStr(b, nidx)
self.printfn(
"[%0.2f] %d %s %s %s" % (ts, errorCode, details, location, callStack)
)
def _getStr(self, b, idx):
sidx = idx + self._slenSz
(blen,) = self._slen.unpack_from(b, idx)
nextidx = sidx + blen
return b[sidx:nextidx].decode("utf-8", errors="replace"), nextidx
def _onInfo(self, b):
ts, _seq = self._infoFrame.unpack_from(b, 0)
msg = b[self._infoFrameSz :].decode("utf-8", errors="replace")
self.printfn("[%0.2f] %s" % (ts, msg))
def run(address, connect_event=None, fakeds=False):
"""
Starts the netconsole loop. Note that netconsole will only send output
if the DS is connected. If you don't have a DS available, the 'fakeds'
flag can be specified to fake a DS connection.
:param address: Address of the netconsole server
:param connect_event: a threading.event object, upon which the 'set'
function will be called when the connection has
succeeded.
:param fakeds: Fake a driver station connection
"""
if fakeds:
ds = FakeDS()
ds.start(address)
nc = Netconsole()
nc.start(address, connect_event=connect_event)
def main():
parser = ArgumentParser()
parser.add_argument("address", help="Address of Robot")
parser.add_argument(
"-f",
"--fakeds",
action="store_true",
default=False,
help="Fake a driver station connection to the robot",
)
args = parser.parse_args()
run(args.address, fakeds=args.fakeds)
| 26.389105 | 83 | 0.547774 |
ace184ac7e9a069208cf44c5cdb31ac33501a7c8 | 565 | py | Python | config.py | tencia/video_predict | fbd84769d595b6518d8174024dd2d862cd48518f | [
"MIT"
] | 84 | 2016-01-08T23:35:22.000Z | 2021-06-01T06:52:26.000Z | config.py | tencia/video_predict | fbd84769d595b6518d8174024dd2d862cd48518f | [
"MIT"
] | 2 | 2016-05-26T10:32:22.000Z | 2018-03-30T11:51:18.000Z | config.py | tencia/video_predict | fbd84769d595b6518d8174024dd2d862cd48518f | [
"MIT"
] | 34 | 2016-03-31T21:13:33.000Z | 2021-12-11T19:49:38.000Z | external_data = '/home/tencia/Documents/data/'
mm_cvae_params = 'params/mm_cvae_33190.972165.npz'
mm_lstm_ll_params = 'params/mm_lstm_ll_20075.9424863.npz'
mm_lstm_mse_params = 'params/mm_lstm_mse_0.287844618162.npz'
pf_cae_params = 'params/pf_cae_0.00378594952235.params'
pf_deconv_params = 'params/pf_deconv_0.003525.npz'
pf_vae_params = 'params/pf_vae_0.002980.npz'
pf_fullcae_params='params/pf_fullcae_0.00384501646971.params'
pf_cae_specstr='d-0.2,16-5,p-2,d-0.2,32-3,d-0.2,32-3,d-0.2,512'
pf_fullcae_specstr='d-0.2,16-5,d-0.2,32-3,d-0.2,64-3,8-1,d-0.2,256'
| 43.461538 | 67 | 0.784071 |
ace184b837f6dbe9890e584f8a13ec4ed2a2beca | 608 | py | Python | CodeSignal/Arcade/The_Core/Level_03_Corner_Of_Zeros_And_Ones/021_Second_Rightmost_Zero_Bit.py | Zubieta/CPP | fb4a3cbf2e4edcc590df15663cd28fb9ecab679c | [
"MIT"
] | 8 | 2017-03-02T07:56:45.000Z | 2021-08-07T20:20:19.000Z | CodeSignal/Arcade/The_Core/Level_03_Corner_Of_Zeros_And_Ones/021_Second_Rightmost_Zero_Bit.py | zubie7a/Algorithms | fb4a3cbf2e4edcc590df15663cd28fb9ecab679c | [
"MIT"
] | null | null | null | CodeSignal/Arcade/The_Core/Level_03_Corner_Of_Zeros_And_Ones/021_Second_Rightmost_Zero_Bit.py | zubie7a/Algorithms | fb4a3cbf2e4edcc590df15663cd28fb9ecab679c | [
"MIT"
] | 1 | 2021-08-07T20:20:20.000Z | 2021-08-07T20:20:20.000Z | # https://app.codesignal.com/arcade/code-arcade/corner-of-0s-and-1s/9nSj6DgqLDsBePJha
def secondRightmostZeroBit(n):
# Iterate up to log2(n) (num binary digits) then rightshift by that length
# and check if last digit is a 0, if so store the index.
return 2 ** [i for i in range(math.ceil(math.log(n, 2))) if (n>>i) % 2 == 0][1]
# The same but doing the length with string operations.
return 2 ** [i for i in range(len(bin(n)[2:])) if (n>>i) % 2 == 0][1]
# Now everything is using string operations.
return 2 ** (int([i for i in range(len(bin(n))) if bin(n)[::-1][i] == '0'][1]))
| 60.8 | 85 | 0.638158 |
ace1852d61823020e80496e1d83b0d23dcb75294 | 2,759 | py | Python | tests/test_pathname_validator.py | kkaruso/niv | 67ba82c93279db4dae503d4dceec7ba5e9931c0a | [
"MIT"
] | null | null | null | tests/test_pathname_validator.py | kkaruso/niv | 67ba82c93279db4dae503d4dceec7ba5e9931c0a | [
"MIT"
] | null | null | null | tests/test_pathname_validator.py | kkaruso/niv | 67ba82c93279db4dae503d4dceec7ba5e9931c0a | [
"MIT"
] | null | null | null | """
Includes all tests for pathname_validator
"""
import os
import shutil
from unittest import TestCase
from src import pathname_validitor as pv
class TestPathnameValidator(TestCase):
"""
A Class for testing functions of build_diagram
"""
test_directory_path = "testdirectory/"
def setUp(self) -> None:
"""
Setup tests directories and files for testing
"""
try:
os.mkdir(self.test_directory_path)
with open("tests.txt", "w"):
pass
except OSError:
print(f"Creation of the directory {self.test_directory_path} failed")
else:
print(f"Successfully created the directory {self.test_directory_path}")
print("Successfully created tests files")
def test_is_pathname_valid(self):
self.assertTrue(pv.is_pathname_valid("./"))
self.assertTrue(pv.is_pathname_valid("."))
self.assertTrue(pv.is_pathname_valid("./test_diagram.py"))
self.assertTrue(pv.is_pathname_valid(os.getcwd()))
self.assertTrue(pv.is_pathname_valid("../"))
self.assertTrue(pv.is_pathname_valid("../../../"))
self.assertFalse(pv.is_pathname_valid(""))
def test_is_path_exists_or_creatable(self):
self.assertTrue(pv.is_path_exists_or_creatable("."))
self.assertTrue(pv.is_path_exists_or_creatable("./"))
self.assertTrue(pv.is_path_exists_or_creatable(os.getcwd()))
self.assertTrue(pv.is_path_exists_or_creatable("../"))
self.assertTrue(pv.is_path_exists_or_creatable("./test_diagram.py"))
def test_is_file_not_in_directory(self):
self.assertTrue(pv.is_file_not_in_directory("tests2.txt"))
self.assertFalse(pv.is_file_not_in_directory("tests.txt"))
self.assertTrue(pv.is_file_not_in_directory("Tests.txt"))
def test_check_file_format(self):
self.assertTrue(pv.check_file_format("nicename.svg"))
self.assertTrue(pv.check_file_format("nicename.pdf"))
self.assertTrue(pv.check_file_format("nicename.jpg"))
self.assertTrue(pv.check_file_format("nicename.png"))
self.assertTrue(pv.check_file_format("../nicename.svg"))
self.assertFalse(pv.check_file_format("notnicename.lala"))
def tearDown(self) -> None:
"""
Removes files and directory created by testing
"""
try:
shutil.rmtree(f'{self.test_directory_path}', ignore_errors=True)
os.remove("tests.txt")
except OSError:
print(f"Deletion of the directory {self.test_directory_path} failed")
else:
print(f"Successfully deleted the directory {self.test_directory_path}")
print("Successfully deleted Test files")
| 36.786667 | 83 | 0.666183 |
ace18579f8a482784c3b95ed0a124e1856a2a324 | 999 | py | Python | pyopencl/create_video.py | hishamelreedy/innovatefpga-GestureRecognitionAccelerator | 576e432875f5736f71cf915187ea6b42f376089a | [
"MIT"
] | null | null | null | pyopencl/create_video.py | hishamelreedy/innovatefpga-GestureRecognitionAccelerator | 576e432875f5736f71cf915187ea6b42f376089a | [
"MIT"
] | null | null | null | pyopencl/create_video.py | hishamelreedy/innovatefpga-GestureRecognitionAccelerator | 576e432875f5736f71cf915187ea6b42f376089a | [
"MIT"
] | null | null | null | import cv2
def change_brightness(img, value=30):
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
h, s, v = cv2.split(hsv)
v = cv2.add(v,value)
v[v > 255] = 255
v[v < 0] = 0
final_hsv = cv2.merge((h, s, v))
img = cv2.cvtColor(final_hsv, cv2.COLOR_HSV2BGR)
return img
for datax in range(0,1):
# Opens the inbuilt camera of laptop to capture video.
cap = cv2.VideoCapture(0)
i = 0
while(cap.isOpened()):
ret, frame = cap.read()
# This condition prevents from infinite looping
# incase video ends.
if ret == False:
break
if i!=64:
# Save Frame by Frame into disk using imwrite method
dim = (28,28)
img=cv2.resize(frame,dim)
#cv2.imwrite(str(i)+'.jpg', img)
#img = change_brightness(img, value=-100) #decreases
cv2.imwrite("testvideo/"+str(i)+".jpg", img)
i += 1
print(i)
else:
break | 31.21875 | 64 | 0.541542 |
ace18595209043c29bdbf1530faa4c4191c8de0e | 14,566 | py | Python | codalab/lib/bundle_util.py | Matt-F-Wu/codalab_cl | e08e82afbc3d478c52a811065701cf53c9d3891e | [
"Apache-2.0"
] | null | null | null | codalab/lib/bundle_util.py | Matt-F-Wu/codalab_cl | e08e82afbc3d478c52a811065701cf53c9d3891e | [
"Apache-2.0"
] | null | null | null | codalab/lib/bundle_util.py | Matt-F-Wu/codalab_cl | e08e82afbc3d478c52a811065701cf53c9d3891e | [
"Apache-2.0"
] | null | null | null | import copy
from codalab.bundles import get_bundle_subclass
from codalab.client.json_api_client import JsonApiClient, JsonApiRelationship
from codalab.common import UsageError
from codalab.lib import worksheet_util
def bundle_to_bundle_info(model, bundle):
"""
Helper: Convert bundle to bundle_info.
"""
# See tables.py
result = {
'uuid': bundle.uuid,
'bundle_type': bundle.bundle_type,
'owner_id': bundle.owner_id,
'command': bundle.command,
'data_hash': bundle.data_hash,
'state': bundle.state,
'is_anonymous': bundle.is_anonymous,
'metadata': bundle.metadata.to_dict(),
'dependencies': [dep.to_dict() for dep in bundle.dependencies],
}
if result['dependencies']:
dep_names = model.get_bundle_names([dep['parent_uuid'] for dep in result['dependencies']])
for dep in result['dependencies']:
dep['parent_name'] = dep_names.get(dep['parent_uuid'])
# Shim in args
result['args'] = worksheet_util.interpret_genpath(result, 'args')
return result
def mimic_bundles(
client,
old_inputs,
old_output,
new_inputs,
new_output_name,
worksheet_uuid,
depth,
shadow,
dry_run,
metadata_override=None,
skip_prelude=False,
):
"""
:param JsonApiClient client: client
:param old_inputs: list of bundle uuids
:param old_output: bundle uuid that we produced
:param new_inputs: list of bundle uuids that are analogous to old_inputs
:param new_output_name: name of the bundle to create to be analogous to old_output (possibly None)
:param worksheet_uuid: add newly created bundles to this worksheet
:param depth: how far to do a BFS up from old_output.
:param shadow: whether to add the new inputs right after all occurrences of the old inputs in worksheets.
:param dry_run: return plan without executing anything if True.
:param skip_prelude: don't include preludes in mimicked items if True.
:param metadata_override: new metadata fields replace old ones in the newly mimicked bundles.
"""
if metadata_override is None:
metadata_override = {}
# Build the graph (get all the infos).
# If old_output is given, look at ancestors of old_output until we
# reached some depth. If it's not given, we first get all the
# descendants first, and then get their ancestors.
if old_output:
infos = client.fetch('bundles', params={'specs': old_output})
assert isinstance(infos, list)
else:
# Fetch bundles specified in `old_inputs` and their descendants
# down by `depth` levesl
infos = client.fetch('bundles', params={'specs': old_inputs, 'depth': depth})
infos = {b['uuid']: b for b in infos} # uuid -> bundle info
def get_self_and_ancestors(bundle_uuids):
# Traverse up ancestors by at most `depth` levels and returns
# the set of all bundles visited, as well as updating the `info`
# dictionary along the way.
result = bundle_uuids
visited = set()
for _ in xrange(depth):
next_bundle_uuids = []
for bundle_uuid in bundle_uuids:
if bundle_uuid in visited:
continue
# Add to infos if not there yet
if bundle_uuid not in infos:
infos[bundle_uuid] = client.fetch('bundles', bundle_uuid)
# Append all of the parents to the next batch of bundles to look at
info = infos[bundle_uuid]
for dep in info['dependencies']:
parent_uuid = dep['parent_uuid']
if parent_uuid not in infos:
next_bundle_uuids.append(parent_uuid)
# Mark this bundle as visited
visited.add(bundle_uuid)
# Prepend to the running list of all bundles
result = next_bundle_uuids + result
# Swap in the next batch of bundles for next iteration
bundle_uuids = next_bundle_uuids
return result
all_bundle_uuids = get_self_and_ancestors(infos.keys())
# Now go recursively create the bundles.
old_to_new = {} # old_uuid -> new_uuid
downstream = (
set()
) # old_uuid -> whether we're downstream of an input (and actually needs to be mapped onto a new uuid)
created_uuids = set() # set of uuids which were newly created
plan = [] # sequence of (old, new) bundle infos to make
for old, new in zip(old_inputs, new_inputs):
old_to_new[old] = new
downstream.add(old)
# Return corresponding new_bundle_uuid
def recurse(old_bundle_uuid):
if old_bundle_uuid in old_to_new:
return old_to_new[old_bundle_uuid]
# Don't have any more information (because we probably hit the maximum depth)
if old_bundle_uuid not in infos:
return old_bundle_uuid
# Get information about the old bundle.
old_info = infos[old_bundle_uuid]
new_dependencies = [
{
'parent_uuid': recurse(dep['parent_uuid']),
'parent_path': dep['parent_path'],
'child_uuid': dep[
'child_uuid'
], # This is just a placeholder to do the equality test
'child_path': dep['child_path'],
}
for dep in old_info['dependencies']
]
# If there are no inputs or if we're downstream of any inputs, we need to make a new bundle.
lone_output = len(old_inputs) == 0 and old_bundle_uuid == old_output
downstream_of_inputs = any(
dep['parent_uuid'] in downstream for dep in old_info['dependencies']
)
if lone_output or downstream_of_inputs:
# Now create a new bundle that mimics the old bundle.
new_info = copy.deepcopy(old_info)
# Make sure that new uuids are generated
new_info.pop('uuid', None)
new_info.pop('id', None)
# Only change the name if the output name is supplied.
new_metadata = new_info['metadata']
if new_output_name:
if old_bundle_uuid == old_output:
new_metadata['name'] = new_output_name
else:
# Just make up a name heuristically
new_metadata['name'] = new_output_name + '-' + old_info['metadata']['name']
# By default, the mimic bundle uses whatever image the old bundle uses
# Preferably it uses the SHA256 image digest, but it may simply copy request_docker_image
# if it is not present
if new_info['bundle_type'] == 'run' and new_metadata.get('docker_image', ''):
# Put docker_image in requested_docker_image if it is present and this is a run bundle
new_metadata['request_docker_image'] = new_metadata['docker_image']
cls = get_bundle_subclass(new_info['bundle_type'])
for spec in cls.METADATA_SPECS:
# Remove automatically generated keys
if spec.generated and spec.key in new_metadata:
del new_metadata[spec.key]
# Override original metadata keys
if spec.key in metadata_override:
new_metadata[spec.key] = metadata_override[spec.key]
# Set up info dict
new_info['metadata'] = new_metadata
new_info['dependencies'] = new_dependencies
if dry_run:
new_info['uuid'] = None
else:
if new_info['bundle_type'] not in ('make', 'run'):
raise UsageError(
'Can\'t mimic %s since it is not make or run' % old_bundle_uuid
)
# Create the new bundle, requesting to shadow the old
# bundle in its worksheet if shadow is specified, otherwise
# leave the bundle detached, to be added later below.
params = {}
params['worksheet'] = worksheet_uuid
if shadow:
params['shadow'] = old_info['uuid']
else:
params['detached'] = True
new_info = client.create('bundles', new_info, params=params)
new_bundle_uuid = new_info['uuid']
plan.append((old_info, new_info))
downstream.add(old_bundle_uuid)
created_uuids.add(new_bundle_uuid)
else:
new_bundle_uuid = old_bundle_uuid
old_to_new[old_bundle_uuid] = new_bundle_uuid # Cache it
return new_bundle_uuid
if old_output:
recurse(old_output)
else:
# Don't have a particular output we're targetting, so just create
# new versions of all the uuids.
for uuid in all_bundle_uuids:
recurse(uuid)
# Add to worksheet
if not dry_run and not shadow:
# A prelude of a bundle on a worksheet is the set of items (markup, directives, etc.)
# that occur immediately before it, until the last preceding newline.
# Let W be the first worksheet containing the old_inputs[0].
# Add all items on that worksheet that appear in old_to_new along with their preludes.
# For items not on this worksheet, add them at the end (instead of making them floating).
if old_output:
anchor_uuid = old_output
elif len(old_inputs) > 0:
anchor_uuid = old_inputs[0]
# Find worksheets that contain the anchor bundle
host_worksheets = client.fetch('worksheets', params={'keywords': 'bundle=' + anchor_uuid})
host_worksheet_uuids = [hw['id'] for hw in host_worksheets]
new_bundle_uuids_added = set()
if len(host_worksheet_uuids) > 0:
# Choose a single worksheet.
if worksheet_uuid in host_worksheet_uuids:
# If current worksheet is one of them, favor that one.
host_worksheet_uuid = worksheet_uuid
else:
# Choose an arbitrary one (in the future, have a better way of canonicalizing).
host_worksheet_uuid = host_worksheet_uuids[0]
# Fetch the worksheet
worksheet_info = client.fetch(
'worksheets', host_worksheet_uuid, params={'include': ['items', 'items.bundle']}
)
prelude_items = [] # The prelude that we're building up
for item in worksheet_info['items']:
just_added = False
if item['type'] == worksheet_util.TYPE_BUNDLE:
old_bundle_uuid = item['bundle']['id']
if old_bundle_uuid in old_to_new:
# Flush the prelude gathered so far.
new_bundle_uuid = old_to_new[old_bundle_uuid]
if new_bundle_uuid in created_uuids: # Only add novel bundles
# Add prelude
if not skip_prelude:
for item2 in prelude_items:
# Create a copy of the item on the destination worksheet
item2 = item2.copy()
item2['worksheet'] = JsonApiRelationship(
'worksheets', worksheet_uuid
)
client.create('worksheet-items', data=item2)
# Add the bundle item
client.create(
'worksheet-items',
data={
'type': worksheet_util.TYPE_BUNDLE,
'worksheet': JsonApiRelationship('worksheets', worksheet_uuid),
'bundle': JsonApiRelationship('bundles', new_bundle_uuid),
},
)
new_bundle_uuids_added.add(new_bundle_uuid)
just_added = True
if (item['type'] == worksheet_util.TYPE_MARKUP and item['value'] != '') or item[
'type'
] == worksheet_util.TYPE_DIRECTIVE:
prelude_items.append(item) # Include in prelude
else:
prelude_items = [] # Reset
# Add the bundles that haven't been added yet
for info, new_info in plan:
new_bundle_uuid = new_info['uuid']
if new_bundle_uuid not in new_bundle_uuids_added:
print('adding: ' + new_bundle_uuid)
client.create(
'worksheet-items',
data={
'type': worksheet_util.TYPE_BUNDLE,
'worksheet': JsonApiRelationship('worksheets', worksheet_uuid),
'bundle': JsonApiRelationship('bundles', new_bundle_uuid),
},
)
return plan
# A helper function that returns the uuids of parents of a bundle
# by extracting from bundle info.
def get_parents(client, bundle_info):
parent_ids = []
for dep in bundle_info['dependencies']:
parent_uuid = dep['parent_uuid']
parent_ids.append(parent_uuid)
return parent_ids
# A DFS traversal that performs an operation pre-order at each node.
def DFS(client, node, operation, depth_left):
# Exceeded the maximum depth of this traversal
if depth_left == 0:
return
info = client.fetch('bundles', node)
operation((node, info), depth_left)
depth_left -= 1
for parent in get_parents(client, info):
DFS(client, parent, operation, depth_left)
return
# The max_depth defaults to 5, as the results likely can't be
# presented in a clean/clear manner if we go any deeper.
def get_bundle_ancestors(client, bundle_uuid, max_depth=5):
# Return an inverted tree where the parent is a downstream bundle
# and the children are the immediate upstream bunble.
# DFS ordering of a bundle's ancestors
ancestors_DFS_ordering = []
def operation(bundle, depth):
ancestors_DFS_ordering.append(
(bundle[0], bundle[1].get('metadata', {}).get('name'), depth))
DFS(client, bundle_uuid, operation, max_depth)
return ancestors_DFS_ordering
| 40.91573 | 109 | 0.589386 |
ace185e8fb1d6797f6d1b67cd9b0bf8b48ed88c1 | 5,132 | py | Python | faker/providers/color/fa_IR/__init__.py | StabbarN/faker | 57882ff73255cb248d8f995b2abfce5cfee45ab3 | [
"MIT"
] | 4 | 2020-09-23T15:48:00.000Z | 2021-02-25T07:55:23.000Z | faker/providers/color/fa_IR/__init__.py | StabbarN/faker | 57882ff73255cb248d8f995b2abfce5cfee45ab3 | [
"MIT"
] | 10 | 2020-03-24T10:47:53.000Z | 2021-04-08T19:51:44.000Z | faker/providers/color/fa_IR/__init__.py | StabbarN/faker | 57882ff73255cb248d8f995b2abfce5cfee45ab3 | [
"MIT"
] | 1 | 2020-10-26T11:00:22.000Z | 2020-10-26T11:00:22.000Z | from collections import OrderedDict
from .. import Provider as ColorProvider
class Provider(ColorProvider):
# https://www.seyedrezabazyar.com/fa/name-and-code-of-colors/
# https://bit.ly/353BBiY
all_colors = OrderedDict((
("نیلی محو", "#F0F8FF"),
("بژ تیره", "#FAEBD7"),
("فیروزهای", "#00FFFF"),
("یشمی", "#7FFFD4"),
("لاجوردی", "#F0FFFF"),
("بژ", "#F5F5DC"),
("کرم", "#FFE4C4"),
("مشکی", "#000000"),
("کاهگلی", "#FFEBCD"),
("آبی", "#0000FF"),
("آبی-بنفش سیر", "#8A2BE2"),
("قهوهای", "#A52A2A"),
("خاکی", "#DEB887"),
("آبی لجنی", "#5F9EA0"),
("سبز روشن", "#7FFF00"),
("شوکولاتی", "#D2691E"),
("مرجانی", "#FF7F50"),
("آبی کدر", "#6495ED"),
("کاهی", "#FFF8DC"),
("زرشکی", "#DC143C"),
("فیروزهای", "#00FFFF"),
("سرمهای", "#00008B"),
("سبز کبریتی تیره", "#008B8B"),
("ماشی سیر", "#B8860B"),
("خاکستری سیر", "#A9A9A9"),
("سبز آووکادو", "#006400"),
("ماشی", "#BDB76B"),
("مخملی", "#8B008B"),
("زیتونی سیر", "#556B2F"),
("نارنجی سیر", "#FF8C00"),
("ارکیده بنفش", "#9932CC"),
("عنابی تند", "#8B0000"),
("قهوهایِ حنایی", "#E9967A"),
("سبز دریایی تیره", "#8FBC8F"),
("آبی دودی", "#483D8B"),
("لجنی تیره", "#2F4F4F"),
("فیروزهای سیر", "#00CED1"),
("بنفش باز", "#9400D3"),
("شفقی", "#FF1493"),
("آبی کمرنگ", "#00BFFF"),
("دودی", "#696969"),
("نیلی", "#1E90FF"),
("شرابی", "#B22222"),
("پوست پیازی", "#FFFAF0"),
("شویدی", "#228B22"),
("سرخابی", "#FF00FF"),
("خاکستری مات", "#DCDCDC"),
("سفید بنفشه", "#F8F8FF"),
("کهربایی باز", "#FFD700"),
("خردلی", "#DAA520"),
("خاکستری", "#808080"),
("سبز", "#008000"),
("مغزپستهای کمرنگ", "#ADFF2F"),
("یشمی محو", "#F0FFF0"),
("سرخابی", "#FF69B4"),
("جگری", "#CD5C5C"),
("نیلی سیر", "#4B0082"),
("استخوانی", "#FFFFF0"),
("خاکی روشن", "#F0E68C"),
("نیلی کمرنگ", "#E6E6FA"),
("صورتی مات", "#FFF0F5"),
("مغزپستهای پررنگ", "#7CFC00"),
("شیرشکری", "#FFFACD"),
("آبی کبریتی", "#ADD8E6"),
("بژ تیره", "#F08080"),
("آبی آسمانی", "#E0FFFF"),
("لیمویی روشن", "#FAFAD2"),
("خاکستری روشن", "#D3D3D3"),
("سبز روشن", "#90EE90"),
("صورتی روشن", "#FFB6C1"),
("کرم نارنجی", "#FFA07A"),
("سبز کبریتی روشن", "#20B2AA"),
("آبی آسمانی روشن", "#87CEFA"),
("سربی", "#778899"),
("بنفش مایل به آبی", "#B0C4DE"),
("شیری", "#FFFFE0"),
("مغزپستهای روشن", "#00FF00"),
("سبز چمنی", "#32CD32"),
("كتانی", "#FAF0E6"),
("سرخ آبی", "#FF00FF"),
("آلبالویی", "#800000"),
("سبز دریایی", "#66CDAA"),
("آبی سیر", "#0000CD"),
("ارکیده سیر", "#BA55D3"),
("سرخ آبی سیر", "#9370DB"),
("خزهای", "#3CB371"),
("آبی متالیک روشن", "#7B68EE"),
("یشمی سیر", "#00FA9A"),
("فیروزهای تیره", "#48D1CC"),
("ارغوانی", "#C71585"),
("آبی نفتی", "#191970"),
("سفید نعنائی", "#F5FFFA"),
("بژ", "#FFE4E1"),
("هلویی", "#FFE4B5"),
("کرم سیر", "#FFDEAD"),
("لاجوردی", "#000080"),
("بژ روشن", "#FDF5E6"),
("زیتونی", "#808000"),
("سبز ارتشی", "#6B8E23"),
("نارنجی", "#FFA500"),
("قرمز-نارنجی", "#FF4500"),
("ارکیده", "#DA70D6"),
("نخودی", "#EEE8AA"),
("سبز کمرنگ", "#98FB98"),
("فیروزهای کدر", "#AFEEEE"),
("شرابی روشن", "#DB7093"),
("هلویی روشن", "#FFEFD5"),
("هلویی پررنگ", "#FFDAB9"),
("بادامی سیر", "#CD853F"),
("صورتی", "#FFC0CB"),
("بنفش کدر", "#DDA0DD"),
("آبی کبریتی روشن", "#B0E0E6"),
("بنفش", "#800080"),
("قرمز", "#FF0000"),
("بادمجانی", "#BC8F8F"),
("فیروزهای فسفری", "#4169E1"),
("کاکائویی", "#8B4513"),
("سالمحناییِ روشنوني", "#FA8072"),
("هلویی سیر", "#F4A460"),
("خزهای پررنگ", "#2E8B57"),
("صدفی", "#FFF5EE"),
("قهوهای متوسط", "#A0522D"),
("طوسی", "#C0C0C0"),
("آبی آسمانی", "#87CEEB"),
("آبی فولادی", "#6A5ACD"),
("سربی تیره", "#708090"),
("صورتی محو", "#FFFAFA"),
("یشمی کمرنگ", "#00FF7F"),
("نیلی متالیک", "#4682B4"),
("برنزه کدر", "#D2B48C"),
("سبز دودی", "#008080"),
("بادمجانی روشن", "#D8BFD8"),
("قرمز گوجهای", "#FF6347"),
("سبز دریایی روشن", "#40E0D0"),
("بنفش روشن", "#EE82EE"),
("گندمی", "#F5DEB3"),
("سفید", "#FFFFFF"),
("خاکستری محو", "#F5F5F5"),
("زرد", "#FFFF00"),
("سبز لجنی", "#9ACD32"),
))
safe_colors = (
"سیاه", "عنابی", "سبز", "آبی کاربنی", "زیتونی",
"بنفش", "سبز دودی", "آهکی", "آبی", "نقرهای",
"خاکستری", "زرد", "ارغوانی", "فیروزهای", "سفید",
)
| 32.687898 | 65 | 0.42576 |
ace1868dd4f2d7c244509b9f75d6194045313c9c | 12,778 | py | Python | featuretools/variable_types/variable.py | esyyes/featuretools | 7d96bd221bad71c70b5d79ce7f7a8885c298f6df | [
"BSD-3-Clause"
] | 1 | 2020-06-10T02:39:27.000Z | 2020-06-10T02:39:27.000Z | featuretools/variable_types/variable.py | esyyes/featuretools | 7d96bd221bad71c70b5d79ce7f7a8885c298f6df | [
"BSD-3-Clause"
] | null | null | null | featuretools/variable_types/variable.py | esyyes/featuretools | 7d96bd221bad71c70b5d79ce7f7a8885c298f6df | [
"BSD-3-Clause"
] | null | null | null | import numpy as np
import pandas as pd
from featuretools.utils.gen_utils import camel_to_snake, find_descendents
class ClassNameDescriptor(object):
"""Descriptor to convert a class's name from camelcase to snakecase
"""
def __get__(self, instance, class_):
return camel_to_snake(class_.__name__)
class Variable(object):
"""Represent a variable in an entity
A Variable is analogous to a column in table in a relational database
Args:
id (str) : Id of variable. Must match underlying data in Entity
it belongs to.
entity (:class:`.Entity`) : Entity this variable belongs to.
name (str, optional) : Variable name. Defaults to id.
See Also:
:class:`.Entity`, :class:`.Relationship`, :class:`.BaseEntitySet`
"""
type_string = ClassNameDescriptor()
_default_pandas_dtype = object
def __init__(self, id, entity, name=None):
assert isinstance(id, str), "Variable id must be a string"
self.id = id
self._name = name
self.entity_id = entity.id
assert entity.entityset is not None, "Entity must contain reference to EntitySet"
self.entity = entity
if self.id not in self.entity.df:
default_dtype = self._default_pandas_dtype
if default_dtype == np.datetime64:
default_dtype = 'datetime64[ns]'
if default_dtype == np.timedelta64:
default_dtype = 'timedelta64[ns]'
else:
default_dtype = self.entity.df[self.id].dtype
self._interesting_values = pd.Series(dtype=default_dtype)
@property
def entityset(self):
return self.entity.entityset
def __eq__(self, other, deep=False):
shallow_eq = isinstance(other, self.__class__) and \
self.id == other.id and \
self.entity_id == other.entity_id
if not deep:
return shallow_eq
else:
return shallow_eq and set(self.interesting_values.values) == set(other.interesting_values.values)
def __hash__(self):
return hash((self.id, self.entity_id))
def __repr__(self):
return u"<Variable: {} (dtype = {})>".format(self.name, self.type_string)
@classmethod
def create_from(cls, variable):
"""Create new variable this type from existing
Args:
variable (Variable) : Existing variable to create from.
Returns:
:class:`.Variable` : new variable
"""
v = cls(id=variable.id, name=variable.name, entity=variable.entity)
return v
@property
def name(self):
return self._name if self._name is not None else self.id
@property
def dtype(self):
return self.type_string \
if self.type_string is not None else "generic_type"
@name.setter
def name(self, name):
self._name = name
@property
def interesting_values(self):
return self._interesting_values
@interesting_values.setter
def interesting_values(self, interesting_values):
self._interesting_values = pd.Series(interesting_values,
dtype=self._interesting_values.dtype)
@property
def series(self):
return self.entity.df[self.id]
def to_data_description(self):
return {
'id': self.id,
'type': {
'value': self.type_string,
},
'properties': {
'name': self.name,
'entity': self.entity.id,
'interesting_values': self._interesting_values.to_json()
},
}
class Unknown(Variable):
pass
class Discrete(Variable):
"""Superclass representing variables that take on discrete values"""
def __init__(self, id, entity, name=None):
super(Discrete, self).__init__(id, entity, name)
@property
def interesting_values(self):
return self._interesting_values
@interesting_values.setter
def interesting_values(self, values):
seen = set()
seen_add = seen.add
self._interesting_values = pd.Series([v for v in values if not
(v in seen or seen_add(v))],
dtype=self._interesting_values.dtype)
class Boolean(Variable):
"""Represents variables that take on one of two values
Args:
true_values (list) : List of valued true values. Defaults to [1, True, "true", "True", "yes", "t", "T"]
false_values (list): List of valued false values. Defaults to [0, False, "false", "False", "no", "f", "F"]
"""
_default_pandas_dtype = bool
def __init__(self,
id,
entity,
name=None,
true_values=None,
false_values=None):
default = [1, True, "true", "True", "yes", "t", "T"]
self.true_values = true_values or default
default = [0, False, "false", "False", "no", "f", "F"]
self.false_values = false_values or default
super(Boolean, self).__init__(id, entity, name=name)
def to_data_description(self):
description = super(Boolean, self).to_data_description()
description['type'].update({
'true_values': self.true_values,
'false_values': self.false_values
})
return description
class Categorical(Discrete):
"""Represents variables that can take an unordered discrete values
Args:
categories (list) : List of categories. If left blank, inferred from data.
"""
def __init__(self, id, entity, name=None, categories=None):
self.categories = None or []
super(Categorical, self).__init__(id, entity, name=name)
def to_data_description(self):
description = super(Categorical, self).to_data_description()
description['type'].update({'categories': self.categories})
return description
class Id(Categorical):
"""Represents variables that identify another entity"""
_default_pandas_dtype = int
class Ordinal(Discrete):
"""Represents variables that take on an ordered discrete value"""
_default_pandas_dtype = int
class Numeric(Variable):
"""Represents variables that contain numeric values
Args:
range (list, optional) : List of start and end. Can use inf and -inf to represent infinity. Unconstrained if not specified.
start_inclusive (bool, optional) : Whether or not range includes the start value.
end_inclusive (bool, optional) : Whether or not range includes the end value
Attributes:
max (float)
min (float)
std (float)
mean (float)
"""
_default_pandas_dtype = float
def __init__(self,
id,
entity,
name=None,
range=None,
start_inclusive=True,
end_inclusive=False):
self.range = None or []
self.start_inclusive = start_inclusive
self.end_inclusive = end_inclusive
super(Numeric, self).__init__(id, entity, name=name)
def to_data_description(self):
description = super(Numeric, self).to_data_description()
description['type'].update({
'range': self.range,
'start_inclusive': self.start_inclusive,
'end_inclusive': self.end_inclusive,
})
return description
class Index(Variable):
"""Represents variables that uniquely identify an instance of an entity
Attributes:
count (int)
"""
_default_pandas_dtype = int
class Datetime(Variable):
"""Represents variables that are points in time
Args:
format (str): Python datetime format string documented `here <http://strftime.org/>`_.
"""
_default_pandas_dtype = np.datetime64
def __init__(self, id, entity, name=None, format=None):
self.format = format
super(Datetime, self).__init__(id, entity, name=name)
def __repr__(self):
return u"<Variable: {} (dtype: {}, format: {})>".format(self.name, self.type_string, self.format)
def to_data_description(self):
description = super(Datetime, self).to_data_description()
description['type'].update({'format': self.format})
return description
class TimeIndex(Variable):
"""Represents time index of entity"""
_default_pandas_dtype = np.datetime64
class NumericTimeIndex(TimeIndex, Numeric):
"""Represents time index of entity that is numeric"""
_default_pandas_dtype = float
class DatetimeTimeIndex(TimeIndex, Datetime):
"""Represents time index of entity that is a datetime"""
_default_pandas_dtype = np.datetime64
class Timedelta(Variable):
"""Represents variables that are timedeltas
Args:
range (list, optional) : List of start and end of allowed range in seconds. Can use inf and -inf to represent infinity. Unconstrained if not specified.
start_inclusive (bool, optional) : Whether or not range includes the start value.
end_inclusive (bool, optional) : Whether or not range includes the end value
"""
_default_pandas_dtype = np.timedelta64
def __init__(self,
id,
entity,
name=None,
range=None,
start_inclusive=True,
end_inclusive=False):
self.range = range or []
self.start_inclusive = start_inclusive
self.end_inclusive = end_inclusive
super(Timedelta, self).__init__(id, entity, name=name)
def to_data_description(self):
description = super(Timedelta, self).to_data_description()
description['type'].update({
'range': self.range,
'start_inclusive': self.start_inclusive,
'end_inclusive': self.end_inclusive,
})
return description
class Text(Variable):
"""Represents variables that are arbitary strings"""
_default_pandas_dtype = str
class PandasTypes(object):
_all = 'all'
_categorical = 'category'
_pandas_datetimes = ['datetime64[ns]', 'datetime64[ns, tz]']
_pandas_timedeltas = ['Timedelta']
_pandas_numerics = ['int16', 'int32', 'int64',
'float16', 'float32', 'float64']
class LatLong(Variable):
"""Represents an ordered pair (Latitude, Longitude)
To make a latlong in a dataframe do
data['latlong'] = data[['latitude', 'longitude']].apply(tuple, axis=1)
"""
class ZIPCode(Categorical):
"""Represents a postal address in the United States.
Consists of a series of digits which are casts as
string. Five digit and 9 digit zipcodes are supported.
"""
_default_pandas_dtype = str
class IPAddress(Variable):
"""Represents a computer network address. Represented
in dotted-decimal notation. IPv4 and IPv6 are supported.
"""
_default_pandas_dtype = str
class FullName(Variable):
"""Represents a person's full name. May consist of a
first name, last name, and a title.
"""
_default_pandas_dtype = str
class EmailAddress(Variable):
"""Represents an email box to which email message are sent.
Consists of a local-part, an @ symbol, and a domain.
"""
_default_pandas_dtype = str
class URL(Variable):
"""Represents a valid web url (with or without http/www)"""
_default_pandas_dtype = str
class PhoneNumber(Variable):
"""Represents any valid phone number.
Can be with/without parenthesis.
Can be with/without area/country codes.
"""
_default_pandas_dtype = str
class DateOfBirth(Datetime):
"""Represents a date of birth as a datetime"""
_default_pandas_dtype = np.datetime64
class CountryCode(Categorical):
"""Represents an ISO-3166 standard country code.
ISO 3166-1 (countries) are supported. These codes
should be in the Alpha-2 format.
e.g. United States of America = US
"""
_default_pandas_dtype = str
class SubRegionCode(Categorical):
"""Represents an ISO-3166 standard sub-region code.
ISO 3166-2 codes (sub-regions are supported. These codes
should be in the Alpha-2 format.
e.g. United States of America, Arizona = US-AZ
"""
_default_pandas_dtype = str
class FilePath(Variable):
"""Represents a valid filepath, absolute or relative"""
_default_pandas_dtype = str
def find_variable_types():
return {vtype.type_string: vtype for vtype in find_descendents(Variable)
if vtype != Variable}
DEFAULT_DTYPE_VALUES = {
np.datetime64: pd.Timestamp.now(),
int: 0,
float: 0.1,
np.timedelta64: pd.Timedelta('1d'),
object: 'object',
bool: True,
str: 'test'
}
| 30.351544 | 159 | 0.634372 |
ace1873a0f64fb0f911f2c6bd4d1149e1a7ac1db | 6,788 | py | Python | django_messages/views.py | mirumee/django-messages | f4cabd0be1ae42fdab0702f528f1c329beb01974 | [
"BSD-3-Clause"
] | 16 | 2015-02-25T18:17:43.000Z | 2021-09-22T13:56:12.000Z | django_messages/views.py | mirumee/django-messages | f4cabd0be1ae42fdab0702f528f1c329beb01974 | [
"BSD-3-Clause"
] | null | null | null | django_messages/views.py | mirumee/django-messages | f4cabd0be1ae42fdab0702f528f1c329beb01974 | [
"BSD-3-Clause"
] | 6 | 2015-05-04T07:03:24.000Z | 2021-08-14T17:25:02.000Z | # -*- coding:utf-8 -*-
import datetime
from django.http import Http404, HttpResponseRedirect
from django.shortcuts import render_to_response, get_object_or_404, redirect
from django.template import RequestContext
from django.contrib.auth.models import User
from django.contrib.auth.decorators import login_required
from django.contrib import messages
from django.utils.translation import ugettext as _
from django.utils.translation import ugettext_noop
from django.core.urlresolvers import reverse
from django.conf import settings
from django.db import transaction
from django.views.generic.list_detail import object_list, object_detail
from django_messages.models import Message
from django_messages.forms import ComposeForm, ReplyForm
from django_messages.utils import format_quote
@login_required
def message_list(request, queryset, paginate_by=25,
extra_context=None, template_name=None):
return object_list(request, queryset=queryset, paginate_by=paginate_by,
extra_context=extra_context, template_name=template_name,
template_object_name='message')
@login_required
def inbox(request, template_name='django_messages/inbox.html', **kw):
"""
Displays a list of received messages for the current user.
"""
kw['template_name'] = template_name
queryset = Message.inbox.for_user(request.user)
return message_list(request, queryset, **kw)
@login_required
def outbox(request, template_name='django_messages/outbox.html', **kw):
"""
Displays a list of sent messages for the current user.
"""
kw['template_name'] = template_name
queryset = Message.outbox.for_user(request.user)
return message_list(request, queryset, **kw)
@login_required
def trash(request, template_name='django_messages/trash.html', **kw):
"""
Displays a list of deleted messages.
"""
kw['template_name'] = template_name
queryset = Message.trash.for_user(request.user)
return message_list(request, queryset, **kw)
@login_required
@transaction.commit_on_success
def compose(request, recipient=None, form_class=ComposeForm,
template_name='django_messages/compose.html', success_url=None,
recipient_filter=None, extra_context=None):
"""
Displays and handles the ``form_class`` form to compose new messages.
Required Arguments: None
Optional Arguments:
``recipient``: username of a `django.contrib.auth` User, who should
receive the message, optionally multiple usernames
could be separated by a '+'
``form_class``: the form-class to use
``template_name``: the template to use
``success_url``: where to redirect after successfull submission
``extra_context``: extra context dict
"""
if request.method == "POST":
form = form_class(request.user, data=request.POST,
recipient_filter=recipient_filter)
if form.is_valid():
instance, message_list = form.save()
Message.objects.send(message_list)
messages.add_message(request, messages.SUCCESS, _(u"Message successfully sent."))
return redirect(success_url or request.GET.get('next') or inbox)
else:
form = form_class(request.user, initial={'recipients': recipient})
ctx = extra_context or {}
ctx.update({
'form': form,
})
return render_to_response(template_name, RequestContext(request, ctx))
@login_required
@transaction.commit_on_success
def reply(request, message_id, form_class=ReplyForm,
template_name='django_messages/reply.html', success_url=None,
recipient_filter=None, extra_context=None):
"""
Prepares the ``form_class`` form for writing a reply to a given message
(specified via ``message_id``).
"""
parent = get_object_or_404(Message, pk=message_id, owner=request.user)
if request.method == "POST":
form = form_class(request.user, parent, data=request.POST,
recipient_filter=recipient_filter)
if form.is_valid():
instance, message_list = form.save()
Message.objects.send(message_list)
messages.add_message(request, messages.SUCCESS, _(u"Message successfully sent."))
return redirect(success_url or inbox)
else:
form = form_class(request.user, parent)
ctx = extra_context or {}
ctx.update({
'form': form,
})
return render_to_response(template_name,
RequestContext(request, ctx))
@login_required
@transaction.commit_on_success
def delete(request, message_id, success_url=None):
"""
Marks a message as deleted by sender or recipient. The message is not
really removed from the database, because two users must delete a message
before it's save to remove it completely.
A cron-job should prune the database and remove old messages which are
deleted by both users.
As a side effect, this makes it easy to implement a trash with undelete.
You can pass ?next=/foo/bar/ via the url to redirect the user to a different
page (e.g. `/foo/bar/`) than ``success_url`` after deletion of the message.
"""
message = get_object_or_404(Message, pk=message_id, owner=request.user)
message.move_to_trash()
message.save()
messages.add_message(request, messages.SUCCESS, _(u"Message successfully deleted."))
return redirect(request.GET.get('next') or success_url or inbox)
@login_required
@transaction.commit_on_success
def undelete(request, message_id, success_url=None):
"""
Recovers a message from trash.
"""
message = get_object_or_404(Message, pk=message_id, owner=request.user)
message.undelete()
message.save()
message_view = inbox # should be dependent on message box (inbox,outbox)
messages.add_message(request, messages.SUCCESS,
_(u"Message successfully recovered."))
return redirect(request.GET.get('next') or success_url or message_view)
@login_required
def view(request, message_id, template_name='django_messages/view.html',
extra_context=None):
"""
Shows a single message.``message_id`` argument is required.
The user is only allowed to see the message, if he is either
the sender or the recipient. If the user is not allowed a 404
is raised.
If the user is the recipient and the message is unread
``read_at`` is set to the current datetime.
"""
message = get_object_or_404(Message, pk=message_id, owner=request.user)
if message.is_unread():
message.mark_read()
message.save()
ctx = extra_context or {}
ctx.update({
'message': message,
})
return render_to_response(template_name, RequestContext(request, ctx))
| 35.915344 | 93 | 0.706688 |
ace1879c6914d0c0810590fffb98fea8e8b7e515 | 1,487 | py | Python | environments/env_utils/vec_env/util.py | lfeng1999/varibad | 840f4bd56ccee96a6c162265d18ec54db8b77a1e | [
"MIT"
] | 119 | 2020-02-12T07:06:17.000Z | 2022-03-24T08:37:34.000Z | environments/env_utils/vec_env/util.py | lfeng1999/varibad | 840f4bd56ccee96a6c162265d18ec54db8b77a1e | [
"MIT"
] | 2 | 2020-05-24T22:33:42.000Z | 2020-09-28T16:42:02.000Z | environments/env_utils/vec_env/util.py | lfeng1999/varibad | 840f4bd56ccee96a6c162265d18ec54db8b77a1e | [
"MIT"
] | 26 | 2020-04-20T13:10:11.000Z | 2022-03-22T10:21:10.000Z | """
Taken from https://github.com/openai/baselines
Helpers for dealing with vectorized envs.
"""
from collections import OrderedDict
import gym
import numpy as np
def copy_obs_dict(obs):
"""
Deep-copy an observation dict.
"""
return {k: np.copy(v) for k, v in obs.items()}
def dict_to_obs(obs_dict):
"""
Convert an observation dict into a raw array if the
original observation space was not a Dict space.
"""
if set(obs_dict.keys()) == {None}:
return obs_dict[None]
return obs_dict
def obs_space_info(obs_space):
"""
Get dict-structured information about a gym.Space.
Returns:
A tuple (keys, shapes, dtypes):
keys: a list of dict keys.
shapes: a dict mapping keys to shapes.
dtypes: a dict mapping keys to dtypes.
"""
try:
if isinstance(obs_space, gym.spaces.Dict):
assert isinstance(obs_space.spaces, OrderedDict)
subspaces = obs_space.spaces
else:
subspaces = {None: obs_space}
except AttributeError:
subspaces = {None: obs_space}
keys = []
shapes = {}
dtypes = {}
for key, box in subspaces.items():
keys.append(key)
shapes[key] = box.shape
dtypes[key] = getattr(box, 'dtype', np.float32)
return keys, shapes, dtypes
def obs_to_dict(obs):
"""
Convert an observation into a dict.
"""
if isinstance(obs, dict):
return obs
return {None: obs}
| 22.876923 | 60 | 0.618023 |
ace18891d9753ac282cbf2ef5cc95622c86cd54d | 1,348 | py | Python | scripts/rename_calib_files.py | h-wata/r3live_tools | 5825e5264367d06bc3491bd4a9033a9d6ae16deb | [
"Apache-2.0"
] | null | null | null | scripts/rename_calib_files.py | h-wata/r3live_tools | 5825e5264367d06bc3491bd4a9033a9d6ae16deb | [
"Apache-2.0"
] | null | null | null | scripts/rename_calib_files.py | h-wata/r3live_tools | 5825e5264367d06bc3491bd4a9033a9d6ae16deb | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# -*- encoding: utf-8 -*-
"""rename image and pcd files for livox_camera_calib.
python rename_calib_files.py (int N)
example:
$python rename_calib_files.py 0
# export calib/images/0.jpg and calib/pcds/0.pcd
"""
import glob
import os
import shutil
import sys
import rospkg
from PIL import Image
def remove_glob(pathname):
# type:(str) -> None
"""Remove files searched by glob."""
for p in glob.glob(pathname):
os.remove(p)
def find_glob(pathname):
# type:(str) -> list
"""Find files by glob."""
files = glob.glob(pathname)
if len(files) > 0:
return files
else:
print("Error: " + pathname + " is not found")
exit()
if len(sys.argv) < 2:
print("Error: this script requires an argument.")
exit()
ROOT_DIR = rospkg.RosPack().get_path('r3live_tools')
print(ROOT_DIR)
image_dir = ROOT_DIR + "/calib/images/"
pcd_dir = ROOT_DIR + "/calib/pcds/"
image_files = find_glob(image_dir + 'frame*.jpg')
shutil.move(image_files[0], image_dir + sys.argv[1] + ".jpg")
# convert jpg to bmp
Image.open(image_dir + sys.argv[1] + ".jpg").save(image_dir + sys.argv[1] + ".bmp")
pcd_files = find_glob(pcd_dir + "out_*.pcd")
shutil.move(pcd_files[0], pcd_dir + sys.argv[1] + ".pcd")
remove_glob(pcd_dir + "out_*.pcd")
remove_glob(image_dir + "frame*.jpg")
| 22.847458 | 83 | 0.65727 |
ace189981fbdeba77f642cec97d4b0f24e6787a0 | 1,882 | py | Python | cloud/container_instance_template.py | andyakins/GCPOC | aa7653d08472f3210313d600efdf1b3f4b88a12e | [
"MIT"
] | null | null | null | cloud/container_instance_template.py | andyakins/GCPOC | aa7653d08472f3210313d600efdf1b3f4b88a12e | [
"MIT"
] | null | null | null | cloud/container_instance_template.py | andyakins/GCPOC | aa7653d08472f3210313d600efdf1b3f4b88a12e | [
"MIT"
] | null | null | null | """Creates a Container VM with the provided Container manifest."""
from container_helper import GenerateManifest
def GenerateConfig(context):
"""Generates configuration."""
image = ''.join(['https://www.googleapis.com/compute/v1/',
'projects/google-containers/global/images/',
context.properties['containerImage']])
default_network = ''.join(['https://www.googleapis.com/compute/v1/projects/',
context.env['project'],
'/global/networks/default'])
instance_template = {
'name': context.env['name'] + '-it',
'type': 'compute.v1.instanceTemplate',
'properties': {
'properties': {
'metadata': {
'items': [{
'key': 'google-container-manifest',
'value': GenerateManifest(context)
}]
},
'tags':{
'items': [
'http-server'
]
},
'machineType': 'f1-micro',
'disks': [{
'deviceName': 'boot',
'boot': True,
'autoDelete': True,
'mode': 'READ_WRITE',
'type': 'PERSISTENT',
'initializeParams': {'sourceImage': image}
}],
'networkInterfaces': [{
'accessConfigs': [{
'name': 'external-nat',
'type': 'ONE_TO_ONE_NAT'
}],
'network': default_network
}]
}
}
}
outputs = [{'name': 'instanceTemplateSelfLink',
'value': '$(ref.' + instance_template['name'] + '.selfLink)'}]
return {'resources': [instance_template], 'outputs': outputs}
| 33.607143 | 79 | 0.443146 |
ace18aea56222a3021925395d21b8cc7ee10e291 | 3,655 | py | Python | pytorch_toolkit/nncf/tests/test_matcher.py | morkovka1337/openvino_training_extensions | 846db45c264d6b061505213f51763520b9432ba9 | [
"Apache-2.0"
] | 3 | 2020-12-29T02:47:32.000Z | 2021-11-12T08:12:51.000Z | pytorch_toolkit/nncf/tests/test_matcher.py | morkovka1337/openvino_training_extensions | 846db45c264d6b061505213f51763520b9432ba9 | [
"Apache-2.0"
] | 28 | 2020-09-25T22:40:36.000Z | 2022-03-12T00:37:36.000Z | pytorch_toolkit/nncf/tests/test_matcher.py | morkovka1337/openvino_training_extensions | 846db45c264d6b061505213f51763520b9432ba9 | [
"Apache-2.0"
] | 1 | 2021-03-12T10:08:44.000Z | 2021-03-12T10:08:44.000Z | """
Copyright (c) 2019 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import networkx as nx
from nncf.dynamic_graph.graph_matching import NodeExpression as N, search_all
def add_nodes(graph, types, nodes=None):
if nodes is None:
nodes = list(range(1, len(types) + 1))
for node, type_ in zip(nodes, types):
graph.add_node(node, type=type_)
def test_simple():
g = nx.DiGraph()
add_nodes(g, ['a', 'b', 'c', 'a'])
g.add_edges_from([(1, 2), (2, 3), (3, 4)])
ex = N('b') + N('c')
matches = search_all(g, ex)
assert matches == [[2, 3]]
def test_two_matched():
g = nx.DiGraph()
add_nodes(g, ['a', 'b', 'c', 'a', 'b', 'c'])
g.add_edges_from([(1, 2), (2, 3), (3, 4), (4, 5), (5, 6)])
ex = N('b') + N('c')
matches = search_all(g, ex)
assert matches == [[2, 3], [5, 6]]
def test_graph_branching():
g = nx.DiGraph()
add_nodes(g, ['a', 'b', 'a', 'c'])
g.add_edges_from([(1, 2), (1, 3), (2, 4), (3, 4)])
ex = N('a') + N('b')
matches = search_all(g, ex)
assert matches == [[1, 2]]
def test_graph_branching_other_order():
g = nx.DiGraph()
add_nodes(g, ['a', 'a', 'b', 'c'])
g.add_edges_from([(1, 2), (1, 3), (2, 4), (3, 4)])
ex = N('a') + N('b')
matches = search_all(g, ex)
assert matches == [[1, 3]]
def test_alternating():
g = nx.DiGraph()
add_nodes(g, ['a', 'b'])
g.add_edges_from([(1, 2)])
ex = N('a') + (N('a') | N('b'))
matches = search_all(g, ex)
assert matches == [[1, 2]]
def test_alternating_longest():
g = nx.DiGraph()
# b c
# a d
# b
add_nodes(g, ['a', 'b', 'c', 'b', 'd'])
g.add_edges_from([(1, 2), (2, 3), (3, 5), (1, 4), (4, 5)])
ex = N('a') + (N('b') | N('b') + N('c'))
ex2 = N('a') + (N('b') + N('c') | N('b'))
matches = search_all(g, ex)
matches2 = search_all(g, ex2)
assert matches2 == matches == [[1, 2, 3]]
def test_branching_expression():
g = nx.DiGraph()
# b
# a d
# c
add_nodes(g, ['a', 'b', 'c', 'd'])
g.add_edges_from([(1, 2), (1, 3), (2, 4), (3, 4)])
c_ = (N('b') & N('c'))
n = N('a')
node_expression = N('d')
ex = n + c_ + node_expression
ex = N('a') + (N('b') & N('c')) + N('d')
matches = search_all(g, ex)
assert matches == [[1, 2, 3, 4]]
def test_branching_expression3():
g = nx.DiGraph()
# b
# a d
# c
add_nodes(g, ['a', 'b', 'c', 'd'])
g.add_edges_from([(1, 2), (1, 3), (2, 4), (3, 4)])
c_ = N('b') & (N('e') | N('c'))
n = N('a')
node_expression = N('d')
ex = n + c_ + node_expression
ex = N('a') + (N('b') & N('c')) + N('d')
matches = search_all(g, ex)
assert matches == [[1, 2, 3, 4]]
def test_branching_expression2():
g = nx.DiGraph()
# b
# a e d
# c
add_nodes(g, ['a', 'b', 'c', 'd', 'e'])
g.add_edges_from([(1, 2), (1, 3), (2, 4), (3, 4), (1, 5), (5, 4)])
c_ = (N('b') & N('c') & N('e'))
n = N('a')
node_expression = N('d')
ex = n + c_ + node_expression
matches = search_all(g, ex)
assert matches == [[1, 2, 3, 5, 4]]
| 24.366667 | 77 | 0.516826 |
ace18b899fb50059ec20bd0206bcf4b295f7ea6c | 9,299 | py | Python | Algorithms/Sentiment-Analysis---Movie-Reviews/kNN/porter.py | MWTA/Text-Mining | d64250ed9f7d8f999bb925ec01c041062b1f4145 | [
"MIT"
] | null | null | null | Algorithms/Sentiment-Analysis---Movie-Reviews/kNN/porter.py | MWTA/Text-Mining | d64250ed9f7d8f999bb925ec01c041062b1f4145 | [
"MIT"
] | null | null | null | Algorithms/Sentiment-Analysis---Movie-Reviews/kNN/porter.py | MWTA/Text-Mining | d64250ed9f7d8f999bb925ec01c041062b1f4145 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import sys
class PorterStemmer:
def __init__(self):
self.b = "" # buffer for word to be stemmed
self.k = 0
self.k0 = 0
self.j = 0
def cons(self, i):
if self.b[i] == 'a' or self.b[i] == 'e' or self.b[i] == 'i' or self.b[i] == 'o' or self.b[i] == 'u':
return 0
if self.b[i] == 'y':
if i == self.k0:
return 1
else:
return (not self.cons(i - 1))
return 1
def m(self):
"""m() measures the number of consonant sequences between k0 and j."""
n = 0
i = self.k0
while 1:
if i > self.j:
return n
if not self.cons(i):
break
i = i + 1
i = i + 1
while 1:
while 1:
if i > self.j:
return n
if self.cons(i):
break
i = i + 1
i = i + 1
n = n + 1
while 1:
if i > self.j:
return n
if not self.cons(i):
break
i = i + 1
i = i + 1
def vowelinstem(self):
"""vowelinstem() is TRUE <=> k0,...j contains a vowel"""
for i in range(self.k0, self.j + 1):
if not self.cons(i):
return 1
return 0
def doublec(self, j):
"""doublec(j) is TRUE <=> j,(j-1) contain a double consonant."""
if j < (self.k0 + 1):
return 0
if (self.b[j] != self.b[j-1]):
return 0
return self.cons(j)
def cvc(self, i):
"""cvc(i) is TRUE <=> i-2,i-1,i has the form consonant - vowel - consonant
and also if the second c is not w,x or y."""
if i < (self.k0 + 2) or not self.cons(i) or self.cons(i-1) or not self.cons(i-2):
return 0
ch = self.b[i]
if ch == 'w' or ch == 'x' or ch == 'y':
return 0
return 1
def ends(self, s):
"""ends(s) is TRUE <=> k0,...k ends with the string s."""
length = len(s)
if s[length - 1] != self.b[self.k]:
return 0
if length > (self.k - self.k0 + 1):
return 0
if self.b[self.k-length+1:self.k+1] != s:
return 0
self.j = self.k - length
return 1
def setto(self, s):
"""setto(s) sets (j+1),...k to the characters in the string s, readjusting k."""
length = len(s)
self.b = self.b[:self.j+1] + s + self.b[self.j+length+1:]
self.k = self.j + length
def r(self, s):
"""r(s) is used further down."""
if self.m() > 0:
self.setto(s)
def step1ab(self):
"""step1ab() gets rid of plurals and -ed or -ing. e.g."""
if self.b[self.k] == 's':
if self.ends("sses"):
self.k = self.k - 2
elif self.ends("ies"):
self.setto("i")
elif self.b[self.k - 1] != 's':
self.k = self.k - 1
if self.ends("eed"):
if self.m() > 0:
self.k = self.k - 1
elif (self.ends("ed") or self.ends("ing")) and self.vowelinstem():
self.k = self.j
if self.ends("at"): self.setto("ate")
elif self.ends("bl"): self.setto("ble")
elif self.ends("iz"): self.setto("ize")
elif self.doublec(self.k):
self.k = self.k - 1
ch = self.b[self.k]
if ch == 'l' or ch == 's' or ch == 'z':
self.k = self.k + 1
elif (self.m() == 1 and self.cvc(self.k)):
self.setto("e")
def step1c(self):
"""step1c() turns terminal y to i when there is another vowel in the stem."""
if (self.ends("y") and self.vowelinstem()):
self.b = self.b[:self.k] + 'i' + self.b[self.k+1:]
def step2(self):
"""step2() maps double suffices to single ones."""
if self.b[self.k - 1] == 'a':
if self.ends("ational"): self.r("ate")
elif self.ends("tional"): self.r("tion")
elif self.b[self.k - 1] == 'c':
if self.ends("enci"): self.r("ence")
elif self.ends("anci"): self.r("ance")
elif self.b[self.k - 1] == 'e':
if self.ends("izer"): self.r("ize")
elif self.b[self.k - 1] == 'l':
if self.ends("bli"): self.r("ble")
elif self.ends("alli"): self.r("al")
elif self.ends("entli"): self.r("ent")
elif self.ends("eli"): self.r("e")
elif self.ends("ousli"): self.r("ous")
elif self.b[self.k - 1] == 'o':
if self.ends("ization"): self.r("ize")
elif self.ends("ation"): self.r("ate")
elif self.ends("ator"): self.r("ate")
elif self.b[self.k - 1] == 's':
if self.ends("alism"): self.r("al")
elif self.ends("iveness"): self.r("ive")
elif self.ends("fulness"): self.r("ful")
elif self.ends("ousness"): self.r("ous")
elif self.b[self.k - 1] == 't':
if self.ends("aliti"): self.r("al")
elif self.ends("iviti"): self.r("ive")
elif self.ends("biliti"): self.r("ble")
elif self.b[self.k - 1] == 'g':
if self.ends("logi"): self.r("log")
def step3(self):
"""step3() dels with -ic-, -full, -ness etc. similar strategy to step2."""
if self.b[self.k] == 'e':
if self.ends("icate"): self.r("ic")
elif self.ends("ative"): self.r("")
elif self.ends("alize"): self.r("al")
elif self.b[self.k] == 'i':
if self.ends("iciti"): self.r("ic")
elif self.b[self.k] == 'l':
if self.ends("ical"): self.r("ic")
elif self.ends("ful"): self.r("")
elif self.b[self.k] == 's':
if self.ends("ness"): self.r("")
def step4(self):
"""step4() takes off -ant, -ence etc., in context <c>vcvc<v>."""
if self.b[self.k - 1] == 'a':
if self.ends("al"): pass
else: return
elif self.b[self.k - 1] == 'c':
if self.ends("ance"): pass
elif self.ends("ence"): pass
else: return
elif self.b[self.k - 1] == 'e':
if self.ends("er"): pass
else: return
elif self.b[self.k - 1] == 'i':
if self.ends("ic"): pass
else: return
elif self.b[self.k - 1] == 'l':
if self.ends("able"): pass
elif self.ends("ible"): pass
else: return
elif self.b[self.k - 1] == 'n':
if self.ends("ant"): pass
elif self.ends("ement"): pass
elif self.ends("ment"): pass
elif self.ends("ent"): pass
else: return
elif self.b[self.k - 1] == 'o':
if self.ends("ion") and (self.b[self.j] == 's' or self.b[self.j] == 't'): pass
elif self.ends("ou"): pass
# takes care of -ous
else: return
elif self.b[self.k - 1] == 's':
if self.ends("ism"): pass
else: return
elif self.b[self.k - 1] == 't':
if self.ends("ate"): pass
elif self.ends("iti"): pass
else: return
elif self.b[self.k - 1] == 'u':
if self.ends("ous"): pass
else: return
elif self.b[self.k - 1] == 'v':
if self.ends("ive"): pass
else: return
elif self.b[self.k - 1] == 'z':
if self.ends("ize"): pass
else: return
else:
return
if self.m() > 1:
self.k = self.j
def step5(self):
"""step5() removes a final -e if m() > 1, and changes -ll to -l ifm() > 1."""
self.j = self.k
if self.b[self.k] == 'e':
a = self.m()
if a > 1 or (a == 1 and not self.cvc(self.k-1)):
self.k = self.k - 1
if self.b[self.k] == 'l' and self.doublec(self.k) and self.m() > 1:
self.k = self.k -1
def stem(self, p, i, j):
self.b = p
self.k = j
self.k0 = i
if self.k <= self.k0 + 1:
return self.b
self.step1ab()
self.step1c()
self.step2()
self.step3()
self.step4()
self.step5()
return self.b[self.k0:self.k+1]
if __name__ == '__main__':
p = PorterStemmer()
if len(sys.argv) > 1:
for f in sys.argv[1:]:
infile = open(f, 'r')
while 1:
output = ''
word = ''
line = infile.readline()
if line == '':
break
for c in line:
if c.isalpha():
word += c.lower()
else:
if word:
output += p.stem(word, 0,len(word)-1)
word = ''
output += c.lower()
print output,
infile.close() | 34.062271 | 108 | 0.424132 |
ace18c3fed1adc8bc2273a404ab861f57575bb6d | 34,800 | py | Python | datadog_checks_base/datadog_checks/base/checks/openmetrics/mixins.py | justinsousa/integrations-core | 223e337492a04de517bc35ec85ddf921108fd8d2 | [
"BSD-3-Clause"
] | null | null | null | datadog_checks_base/datadog_checks/base/checks/openmetrics/mixins.py | justinsousa/integrations-core | 223e337492a04de517bc35ec85ddf921108fd8d2 | [
"BSD-3-Clause"
] | null | null | null | datadog_checks_base/datadog_checks/base/checks/openmetrics/mixins.py | justinsousa/integrations-core | 223e337492a04de517bc35ec85ddf921108fd8d2 | [
"BSD-3-Clause"
] | null | null | null | # (C) Datadog, Inc. 2018
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
from fnmatch import fnmatchcase
from math import isinf, isnan
from os.path import isfile
import requests
from prometheus_client.parser import text_fd_to_metric_families
from six import PY3, iteritems, itervalues, string_types
from urllib3 import disable_warnings
from urllib3.exceptions import InsecureRequestWarning
from ...config import is_affirmative
from ...errors import CheckException
from ...utils.common import to_string
from .. import AgentCheck
if PY3:
long = int
class OpenMetricsScraperMixin(object):
# pylint: disable=E1101
# This class is not supposed to be used by itself, it provides scraping behavior but
# need to be within a check in the end
REQUESTS_CHUNK_SIZE = 1024 * 10 # use 10kb as chunk size when using the Stream feature in requests.get
# indexes in the sample tuple of core.Metric
SAMPLE_NAME = 0
SAMPLE_LABELS = 1
SAMPLE_VALUE = 2
TELEMETRY_GAUGE_MESSAGE_SIZE = "payload.size"
TELEMETRY_COUNTER_METRICS_BLACKLIST_COUNT = "metrics.blacklist.count"
TELEMETRY_COUNTER_METRICS_INPUT_COUNT = "metrics.input.count"
TELEMETRY_COUNTER_METRICS_IGNORE_COUNT = "metrics.ignored.count"
TELEMETRY_COUNTER_METRICS_PROCESS_COUNT = "metrics.processed.count"
METRIC_TYPES = ['counter', 'gauge', 'summary', 'histogram']
KUBERNETES_TOKEN_PATH = '/var/run/secrets/kubernetes.io/serviceaccount/token'
def __init__(self, *args, **kwargs):
# Initialize AgentCheck's base class
super(OpenMetricsScraperMixin, self).__init__(*args, **kwargs)
def create_scraper_configuration(self, instance=None):
# We can choose to create a default mixin configuration for an empty instance
if instance is None:
instance = {}
# Create an empty configuration
config = {}
# Set the endpoint
endpoint = instance.get('prometheus_url')
if instance and endpoint is None:
raise CheckException("You have to define a prometheus_url for each prometheus instance")
config['prometheus_url'] = endpoint
# `NAMESPACE` is the prefix metrics will have. Need to be hardcoded in the
# child check class.
namespace = instance.get('namespace')
# Check if we have a namespace
if instance and namespace is None:
if self.default_namespace is None:
raise CheckException("You have to define a namespace for each prometheus check")
namespace = self.default_namespace
config['namespace'] = namespace
# Retrieve potential default instance settings for the namespace
default_instance = self.default_instances.get(namespace, {})
# `metrics_mapper` is a dictionary where the keys are the metrics to capture
# and the values are the corresponding metrics names to have in datadog.
# Note: it is empty in the parent class but will need to be
# overloaded/hardcoded in the final check not to be counted as custom metric.
# Metrics are preprocessed if no mapping
metrics_mapper = {}
# We merge list and dictionaries from optional defaults & instance settings
metrics = default_instance.get('metrics', []) + instance.get('metrics', [])
for metric in metrics:
if isinstance(metric, string_types):
metrics_mapper[metric] = metric
else:
metrics_mapper.update(metric)
config['metrics_mapper'] = metrics_mapper
# `_metrics_wildcards` holds the potential wildcards to match for metrics
config['_metrics_wildcards'] = None
# `prometheus_metrics_prefix` allows to specify a prefix that all
# prometheus metrics should have. This can be used when the prometheus
# endpoint we are scrapping allows to add a custom prefix to it's
# metrics.
config['prometheus_metrics_prefix'] = instance.get(
'prometheus_metrics_prefix', default_instance.get('prometheus_metrics_prefix', '')
)
# `label_joins` holds the configuration for extracting 1:1 labels from
# a target metric to all metric matching the label, example:
# self.label_joins = {
# 'kube_pod_info': {
# 'label_to_match': 'pod',
# 'labels_to_get': ['node', 'host_ip']
# }
# }
config['label_joins'] = default_instance.get('label_joins', {})
config['label_joins'].update(instance.get('label_joins', {}))
# `_label_mapping` holds the additionals label info to add for a specific
# label value, example:
# self._label_mapping = {
# 'pod': {
# 'dd-agent-9s1l1': [("node","yolo"),("host_ip","yey")]
# }
# }
config['_label_mapping'] = {}
# `_active_label_mapping` holds a dictionary of label values found during the run
# to cleanup the label_mapping of unused values, example:
# self._active_label_mapping = {
# 'pod': {
# 'dd-agent-9s1l1': True
# }
# }
config['_active_label_mapping'] = {}
# `_watched_labels` holds the list of label to watch for enrichment
config['_watched_labels'] = set()
config['_dry_run'] = True
# Some metrics are ignored because they are duplicates or introduce a
# very high cardinality. Metrics included in this list will be silently
# skipped without a 'Unable to handle metric' debug line in the logs
config['ignore_metrics'] = instance.get('ignore_metrics', default_instance.get('ignore_metrics', []))
# If you want to send the buckets as tagged values when dealing with histograms,
# set send_histograms_buckets to True, set to False otherwise.
config['send_histograms_buckets'] = is_affirmative(
instance.get('send_histograms_buckets', default_instance.get('send_histograms_buckets', True))
)
# If you want to send `counter` metrics as monotonic counts, set this value to True.
# Set to False if you want to instead send those metrics as `gauge`.
config['send_monotonic_counter'] = is_affirmative(
instance.get('send_monotonic_counter', default_instance.get('send_monotonic_counter', True))
)
# If the `labels_mapper` dictionary is provided, the metrics labels names
# in the `labels_mapper` will use the corresponding value as tag name
# when sending the gauges.
config['labels_mapper'] = default_instance.get('labels_mapper', {})
config['labels_mapper'].update(instance.get('labels_mapper', {}))
# Rename bucket "le" label to "upper_bound"
config['labels_mapper']['le'] = 'upper_bound'
# `exclude_labels` is an array of labels names to exclude. Those labels
# will just not be added as tags when submitting the metric.
config['exclude_labels'] = default_instance.get('exclude_labels', []) + instance.get('exclude_labels', [])
# `type_overrides` is a dictionary where the keys are prometheus metric names
# and the values are a metric type (name as string) to use instead of the one
# listed in the payload. It can be used to force a type on untyped metrics.
# Note: it is empty in the parent class but will need to be
# overloaded/hardcoded in the final check not to be counted as custom metric.
config['type_overrides'] = default_instance.get('type_overrides', {})
config['type_overrides'].update(instance.get('type_overrides', {}))
# Some metrics are retrieved from differents hosts and often
# a label can hold this information, this transfers it to the hostname
config['label_to_hostname'] = instance.get('label_to_hostname', default_instance.get('label_to_hostname', None))
# In combination to label_as_hostname, allows to add a common suffix to the hostnames
# submitted. This can be used for instance to discriminate hosts between clusters.
config['label_to_hostname_suffix'] = instance.get(
'label_to_hostname_suffix', default_instance.get('label_to_hostname_suffix', None)
)
# Add a 'health' service check for the prometheus endpoint
config['health_service_check'] = is_affirmative(
instance.get('health_service_check', default_instance.get('health_service_check', True))
)
# Can either be only the path to the certificate and thus you should specify the private key
# or it can be the path to a file containing both the certificate & the private key
config['ssl_cert'] = instance.get('ssl_cert', default_instance.get('ssl_cert', None))
# Needed if the certificate does not include the private key
#
# /!\ The private key to your local certificate must be unencrypted.
# Currently, Requests does not support using encrypted keys.
config['ssl_private_key'] = instance.get('ssl_private_key', default_instance.get('ssl_private_key', None))
# The path to the trusted CA used for generating custom certificates
config['ssl_ca_cert'] = instance.get('ssl_ca_cert', default_instance.get('ssl_ca_cert', None))
# Whether or not to validate SSL certificates
config['ssl_verify'] = is_affirmative(instance.get('ssl_verify', default_instance.get('ssl_verify', True)))
# Extra http headers to be sent when polling endpoint
config['extra_headers'] = default_instance.get('extra_headers', {})
config['extra_headers'].update(instance.get('extra_headers', {}))
# Timeout used during the network request
config['prometheus_timeout'] = instance.get(
'prometheus_timeout', default_instance.get('prometheus_timeout', 10)
)
# Authentication used when polling endpoint
config['username'] = instance.get('username', default_instance.get('username', None))
config['password'] = instance.get('password', default_instance.get('password', None))
# Custom tags that will be sent with each metric
config['custom_tags'] = instance.get('tags', [])
# Additional tags to be sent with each metric
config['_metric_tags'] = []
# List of strings to filter the input text payload on. If any line contains
# one of these strings, it will be filtered out before being parsed.
# INTERNAL FEATURE, might be removed in future versions
config['_text_filter_blacklist'] = []
# Whether or not to use the service account bearer token for authentication
# if 'bearer_token_path' is not set, we use /var/run/secrets/kubernetes.io/serviceaccount/token
# as a default path to get the token.
config['bearer_token_auth'] = is_affirmative(
instance.get('bearer_token_auth', default_instance.get('bearer_token_auth', False))
)
# Can be used to get a service account bearer token from files
# other than /var/run/secrets/kubernetes.io/serviceaccount/token
# 'bearer_token_auth' should be enabled.
config['bearer_token_path'] = instance.get('bearer_token_path', default_instance.get('bearer_token_path', None))
# The service account bearer token to be used for authentication
config['_bearer_token'] = self._get_bearer_token(config['bearer_token_auth'], config['bearer_token_path'])
config['telemetry'] = is_affirmative(instance.get('telemetry', default_instance.get('telemetry', False)))
return config
def parse_metric_family(self, response, scraper_config):
"""
Parse the MetricFamily from a valid requests.Response object to provide a MetricFamily object (see [0])
The text format uses iter_lines() generator.
:param response: requests.Response
:return: core.Metric
"""
input_gen = response.iter_lines(chunk_size=self.REQUESTS_CHUNK_SIZE, decode_unicode=True)
if scraper_config['_text_filter_blacklist']:
input_gen = self._text_filter_input(input_gen, scraper_config)
for metric in text_fd_to_metric_families(input_gen):
self._send_telemetry_counter(
self.TELEMETRY_COUNTER_METRICS_INPUT_COUNT, len(metric.samples), scraper_config
)
metric.type = scraper_config['type_overrides'].get(metric.name, metric.type)
if metric.type not in self.METRIC_TYPES:
continue
metric.name = self._remove_metric_prefix(metric.name, scraper_config)
yield metric
def _text_filter_input(self, input_gen, scraper_config):
"""
Filters out the text input line by line to avoid parsing and processing
metrics we know we don't want to process. This only works on `text/plain`
payloads, and is an INTERNAL FEATURE implemented for the kubelet check
:param input_get: line generator
:output: generator of filtered lines
"""
for line in input_gen:
for item in scraper_config['_text_filter_blacklist']:
if item in line:
self._send_telemetry_counter(self.TELEMETRY_COUNTER_METRICS_BLACKLIST_COUNT, 1, scraper_config)
break
else:
# No blacklist matches, passing the line through
yield line
def _remove_metric_prefix(self, metric, scraper_config):
prometheus_metrics_prefix = scraper_config['prometheus_metrics_prefix']
return metric[len(prometheus_metrics_prefix) :] if metric.startswith(prometheus_metrics_prefix) else metric
def scrape_metrics(self, scraper_config):
"""
Poll the data from prometheus and return the metrics as a generator.
"""
response = self.poll(scraper_config)
if scraper_config['telemetry']:
if 'content-length' in response.headers:
content_len = int(response.headers['content-length'])
else:
content_len = len(response.content)
self._send_telemetry_gauge(self.TELEMETRY_GAUGE_MESSAGE_SIZE, content_len, scraper_config)
try:
# no dry run if no label joins
if not scraper_config['label_joins']:
scraper_config['_dry_run'] = False
elif not scraper_config['_watched_labels']:
# build the _watched_labels set
for val in itervalues(scraper_config['label_joins']):
scraper_config['_watched_labels'].add(val['label_to_match'])
for metric in self.parse_metric_family(response, scraper_config):
yield metric
# Set dry run off
scraper_config['_dry_run'] = False
# Garbage collect unused mapping and reset active labels
for metric, mapping in list(iteritems(scraper_config['_label_mapping'])):
for key in list(mapping):
if key not in scraper_config['_active_label_mapping'][metric]:
del scraper_config['_label_mapping'][metric][key]
scraper_config['_active_label_mapping'] = {}
finally:
response.close()
def process(self, scraper_config, metric_transformers=None):
"""
Polls the data from prometheus and pushes them as gauges
`endpoint` is the metrics endpoint to use to poll metrics from Prometheus
Note that if the instance has a 'tags' attribute, it will be pushed
automatically as additional custom tags and added to the metrics
"""
for metric in self.scrape_metrics(scraper_config):
self.process_metric(metric, scraper_config, metric_transformers=metric_transformers)
def _telemetry_metric_name_with_namespace(self, metric_name, scraper_config):
return '{}.{}.{}'.format(scraper_config['namespace'], 'telemetry', metric_name)
def _send_telemetry_gauge(self, metric_name, val, scraper_config):
if scraper_config['telemetry']:
metric_name_with_namespace = self._telemetry_metric_name_with_namespace(metric_name, scraper_config)
# Determine the tags to send
custom_tags = scraper_config['custom_tags']
tags = list(custom_tags)
tags.extend(scraper_config['_metric_tags'])
self.gauge(metric_name_with_namespace, val, tags=tags)
def _send_telemetry_counter(self, metric_name, val, scraper_config, extra_tags=None):
if scraper_config['telemetry']:
metric_name_with_namespace = self._telemetry_metric_name_with_namespace(metric_name, scraper_config)
# Determine the tags to send
custom_tags = scraper_config['custom_tags']
tags = list(custom_tags)
tags.extend(scraper_config['_metric_tags'])
if extra_tags:
tags.extend(extra_tags)
self.count(metric_name_with_namespace, val, tags=tags)
def _store_labels(self, metric, scraper_config):
# If targeted metric, store labels
if metric.name in scraper_config['label_joins']:
matching_label = scraper_config['label_joins'][metric.name]['label_to_match']
for sample in metric.samples:
# metadata-only metrics that are used for label joins are always equal to 1
# this is required for metrics where all combinations of a state are sent
# but only the active one is set to 1 (others are set to 0)
# example: kube_pod_status_phase in kube-state-metrics
if sample[self.SAMPLE_VALUE] != 1:
continue
label_dict = dict()
matching_value = None
for label_name, label_value in iteritems(sample[self.SAMPLE_LABELS]):
if label_name == matching_label:
matching_value = label_value
elif label_name in scraper_config['label_joins'][metric.name]['labels_to_get']:
label_dict[label_name] = label_value
try:
if scraper_config['_label_mapping'][matching_label].get(matching_value):
scraper_config['_label_mapping'][matching_label][matching_value].update(label_dict)
else:
scraper_config['_label_mapping'][matching_label][matching_value] = label_dict
except KeyError:
if matching_value is not None:
scraper_config['_label_mapping'][matching_label] = {matching_value: label_dict}
def _join_labels(self, metric, scraper_config):
# Filter metric to see if we can enrich with joined labels
if scraper_config['label_joins']:
for sample in metric.samples:
watched_labels = scraper_config['_watched_labels'].intersection(set(sample[self.SAMPLE_LABELS].keys()))
for label_name in watched_labels:
# Set this label value as active
if label_name not in scraper_config['_active_label_mapping']:
scraper_config['_active_label_mapping'][label_name] = {}
scraper_config['_active_label_mapping'][label_name][sample[self.SAMPLE_LABELS][label_name]] = True
# If mapping found add corresponding labels
try:
for name, val in iteritems(
scraper_config['_label_mapping'][label_name][sample[self.SAMPLE_LABELS][label_name]]
):
sample[self.SAMPLE_LABELS][name] = val
except KeyError:
pass
def process_metric(self, metric, scraper_config, metric_transformers=None):
"""
Handle a prometheus metric according to the following flow:
- search scraper_config['metrics_mapper'] for a prometheus.metric <--> datadog.metric mapping
- call check method with the same name as the metric
- log some info if none of the above worked
`metric_transformers` is a dict of <metric name>:<function to run when the metric name is encountered>
"""
# If targeted metric, store labels
self._store_labels(metric, scraper_config)
if metric.name in scraper_config['ignore_metrics']:
self._send_telemetry_counter(
self.TELEMETRY_COUNTER_METRICS_IGNORE_COUNT, len(metric.samples), scraper_config
)
return # Ignore the metric
self._send_telemetry_counter(self.TELEMETRY_COUNTER_METRICS_PROCESS_COUNT, len(metric.samples), scraper_config)
if self._filter_metric(metric, scraper_config):
return # Ignore the metric
# Filter metric to see if we can enrich with joined labels
self._join_labels(metric, scraper_config)
if scraper_config['_dry_run']:
return
try:
self.submit_openmetric(scraper_config['metrics_mapper'][metric.name], metric, scraper_config)
except KeyError:
if metric_transformers is not None:
if metric.name in metric_transformers:
try:
# Get the transformer function for this specific metric
transformer = metric_transformers[metric.name]
transformer(metric, scraper_config)
except Exception as err:
self.log.warning("Error handling metric: {} - error: {}".format(metric.name, err))
else:
self.log.debug(
"Unable to handle metric: {0} - error: "
"No handler function named '{0}' defined".format(metric.name)
)
else:
# build the wildcard list if first pass
if scraper_config['_metrics_wildcards'] is None:
scraper_config['_metrics_wildcards'] = [x for x in scraper_config['metrics_mapper'] if '*' in x]
# try matching wildcard (generic check)
for wildcard in scraper_config['_metrics_wildcards']:
if fnmatchcase(metric.name, wildcard):
self.submit_openmetric(metric.name, metric, scraper_config)
def poll(self, scraper_config, headers=None):
"""
Custom headers can be added to the default headers.
Returns a valid requests.Response, raise requests.HTTPError if the status code of the requests.Response
isn't valid - see response.raise_for_status()
The caller needs to close the requests.Response
:param endpoint: string url endpoint
:param headers: extra headers
:return: requests.Response
"""
endpoint = scraper_config.get('prometheus_url')
# Should we send a service check for when we make a request
health_service_check = scraper_config['health_service_check']
service_check_name = '{}{}'.format(scraper_config['namespace'], '.prometheus.health')
service_check_tags = ['endpoint:{}'.format(endpoint)]
service_check_tags.extend(scraper_config['custom_tags'])
try:
response = self.send_request(endpoint, scraper_config, headers)
except requests.exceptions.SSLError:
self.log.error("Invalid SSL settings for requesting {} endpoint".format(endpoint))
raise
except IOError:
if health_service_check:
self.service_check(service_check_name, AgentCheck.CRITICAL, tags=service_check_tags)
raise
try:
response.raise_for_status()
if health_service_check:
self.service_check(service_check_name, AgentCheck.OK, tags=service_check_tags)
return response
except requests.HTTPError:
response.close()
if health_service_check:
self.service_check(service_check_name, AgentCheck.CRITICAL, tags=service_check_tags)
raise
def send_request(self, endpoint, scraper_config, headers=None):
# Determine the headers
if headers is None:
headers = {}
if 'accept-encoding' not in headers:
headers['accept-encoding'] = 'gzip'
headers.update(scraper_config['extra_headers'])
# Add the bearer token to headers
bearer_token = scraper_config['_bearer_token']
if bearer_token is not None:
auth_header = {'Authorization': 'Bearer {}'.format(bearer_token)}
headers.update(auth_header)
# Determine the SSL verification settings
cert = None
if isinstance(scraper_config['ssl_cert'], string_types):
if isinstance(scraper_config['ssl_private_key'], string_types):
cert = (scraper_config['ssl_cert'], scraper_config['ssl_private_key'])
else:
cert = scraper_config['ssl_cert']
verify = scraper_config['ssl_verify']
# TODO: deprecate use as `ssl_verify` boolean
if scraper_config['ssl_ca_cert'] is False:
verify = False
if isinstance(scraper_config['ssl_ca_cert'], string_types):
verify = scraper_config['ssl_ca_cert']
elif verify is False:
disable_warnings(InsecureRequestWarning)
# Determine the authentication settings
username = scraper_config['username']
password = scraper_config['password']
auth = (username, password) if username is not None and password is not None else None
return requests.get(
endpoint,
headers=headers,
stream=True,
timeout=scraper_config['prometheus_timeout'],
cert=cert,
verify=verify,
auth=auth,
)
def get_hostname_for_sample(self, sample, scraper_config):
"""
Expose the label_to_hostname mapping logic to custom handler methods
"""
return self._get_hostname(None, sample, scraper_config)
def submit_openmetric(self, metric_name, metric, scraper_config, hostname=None):
"""
For each sample in the metric, report it as a gauge with all labels as tags
except if a labels dict is passed, in which case keys are label names we'll extract
and corresponding values are tag names we'll use (eg: {'node': 'node'}).
Histograms generate a set of values instead of a unique metric.
send_histograms_buckets is used to specify if yes or no you want to
send the buckets as tagged values when dealing with histograms.
`custom_tags` is an array of 'tag:value' that will be added to the
metric when sending the gauge to Datadog.
"""
if metric.type in ["gauge", "counter", "rate"]:
metric_name_with_namespace = '{}.{}'.format(scraper_config['namespace'], metric_name)
for sample in metric.samples:
val = sample[self.SAMPLE_VALUE]
if not self._is_value_valid(val):
self.log.debug("Metric value is not supported for metric {}".format(sample[self.SAMPLE_NAME]))
continue
custom_hostname = self._get_hostname(hostname, sample, scraper_config)
# Determine the tags to send
tags = self._metric_tags(metric_name, val, sample, scraper_config, hostname=custom_hostname)
if metric.type == "counter" and scraper_config['send_monotonic_counter']:
self.monotonic_count(metric_name_with_namespace, val, tags=tags, hostname=custom_hostname)
elif metric.type == "rate":
self.rate(metric_name_with_namespace, val, tags=tags, hostname=custom_hostname)
else:
self.gauge(metric_name_with_namespace, val, tags=tags, hostname=custom_hostname)
elif metric.type == "histogram":
self._submit_gauges_from_histogram(metric_name, metric, scraper_config)
elif metric.type == "summary":
self._submit_gauges_from_summary(metric_name, metric, scraper_config)
else:
self.log.error("Metric type {} unsupported for metric {}.".format(metric.type, metric_name))
def _get_hostname(self, hostname, sample, scraper_config):
"""
If hostname is None, look at label_to_hostname setting
"""
if (
hostname is None
and scraper_config['label_to_hostname'] is not None
and scraper_config['label_to_hostname'] in sample[self.SAMPLE_LABELS]
):
hostname = sample[self.SAMPLE_LABELS][scraper_config['label_to_hostname']]
suffix = scraper_config['label_to_hostname_suffix']
if suffix is not None:
hostname += suffix
return hostname
def _submit_gauges_from_summary(self, metric_name, metric, scraper_config, hostname=None):
"""
Extracts metrics from a prometheus summary metric and sends them as gauges
"""
for sample in metric.samples:
val = sample[self.SAMPLE_VALUE]
if not self._is_value_valid(val):
self.log.debug("Metric value is not supported for metric {}".format(sample[self.SAMPLE_NAME]))
continue
custom_hostname = self._get_hostname(hostname, sample, scraper_config)
if sample[self.SAMPLE_NAME].endswith("_sum"):
tags = self._metric_tags(metric_name, val, sample, scraper_config, hostname=custom_hostname)
self.gauge(
"{}.{}.sum".format(scraper_config['namespace'], metric_name),
val,
tags=tags,
hostname=custom_hostname,
)
elif sample[self.SAMPLE_NAME].endswith("_count"):
tags = self._metric_tags(metric_name, val, sample, scraper_config, hostname=custom_hostname)
self.gauge(
"{}.{}.count".format(scraper_config['namespace'], metric_name),
val,
tags=tags,
hostname=custom_hostname,
)
else:
sample[self.SAMPLE_LABELS]["quantile"] = str(float(sample[self.SAMPLE_LABELS]["quantile"]))
tags = self._metric_tags(metric_name, val, sample, scraper_config, hostname=custom_hostname)
self.gauge(
"{}.{}.quantile".format(scraper_config['namespace'], metric_name),
val,
tags=tags,
hostname=custom_hostname,
)
def _submit_gauges_from_histogram(self, metric_name, metric, scraper_config, hostname=None):
"""
Extracts metrics from a prometheus histogram and sends them as gauges
"""
for sample in metric.samples:
val = sample[self.SAMPLE_VALUE]
if not self._is_value_valid(val):
self.log.debug("Metric value is not supported for metric {}".format(sample[self.SAMPLE_NAME]))
continue
custom_hostname = self._get_hostname(hostname, sample, scraper_config)
if sample[self.SAMPLE_NAME].endswith("_sum"):
tags = self._metric_tags(metric_name, val, sample, scraper_config, hostname)
self.gauge(
"{}.{}.sum".format(scraper_config['namespace'], metric_name),
val,
tags=tags,
hostname=custom_hostname,
)
elif sample[self.SAMPLE_NAME].endswith("_count"):
tags = self._metric_tags(metric_name, val, sample, scraper_config, hostname)
if scraper_config['send_histograms_buckets']:
tags.append("upper_bound:none")
self.gauge(
"{}.{}.count".format(scraper_config['namespace'], metric_name),
val,
tags=tags,
hostname=custom_hostname,
)
elif (
scraper_config['send_histograms_buckets']
and sample[self.SAMPLE_NAME].endswith("_bucket")
and "Inf" not in sample[self.SAMPLE_LABELS]["le"]
):
sample[self.SAMPLE_LABELS]["le"] = str(float(sample[self.SAMPLE_LABELS]["le"]))
tags = self._metric_tags(metric_name, val, sample, scraper_config, hostname)
self.gauge(
"{}.{}.count".format(scraper_config['namespace'], metric_name),
val,
tags=tags,
hostname=custom_hostname,
)
def _metric_tags(self, metric_name, val, sample, scraper_config, hostname=None):
custom_tags = scraper_config['custom_tags']
_tags = list(custom_tags)
_tags.extend(scraper_config['_metric_tags'])
for label_name, label_value in iteritems(sample[self.SAMPLE_LABELS]):
if label_name not in scraper_config['exclude_labels']:
tag_name = scraper_config['labels_mapper'].get(label_name, label_name)
_tags.append('{}:{}'.format(to_string(tag_name), to_string(label_value)))
return self._finalize_tags_to_submit(
_tags, metric_name, val, sample, custom_tags=custom_tags, hostname=hostname
)
def _is_value_valid(self, val):
return not (isnan(val) or isinf(val))
def _get_bearer_token(self, bearer_token_auth, bearer_token_path):
if bearer_token_auth is False:
return None
path = None
if bearer_token_path is not None:
if isfile(bearer_token_path):
path = bearer_token_path
else:
self.log.error("File not found: {}".format(bearer_token_path))
elif isfile(self.KUBERNETES_TOKEN_PATH):
path = self.KUBERNETES_TOKEN_PATH
if path is None:
self.log.error("Cannot get bearer token from bearer_token_path or auto discovery")
raise IOError("Cannot get bearer token from bearer_token_path or auto discovery")
try:
with open(path, 'r') as f:
return f.read().rstrip()
except Exception as err:
self.log.error("Cannot get bearer token from path: {} - error: {}".format(path, err))
raise
| 48 | 120 | 0.635517 |
ace18dbc44f4f4f849165a8a6fccb02ba46dc10f | 35,003 | py | Python | tensorflow/python/kernel_tests/cond_v2_test.py | wenming2014/tensorflow | a102a6a71844e194f3946f6318768c5367f1f16b | [
"Apache-2.0"
] | 5 | 2018-07-04T22:14:02.000Z | 2018-07-04T22:21:43.000Z | tensorflow/python/kernel_tests/cond_v2_test.py | wenming2014/tensorflow | a102a6a71844e194f3946f6318768c5367f1f16b | [
"Apache-2.0"
] | null | null | null | tensorflow/python/kernel_tests/cond_v2_test.py | wenming2014/tensorflow | a102a6a71844e194f3946f6318768c5367f1f16b | [
"Apache-2.0"
] | 2 | 2019-02-26T16:21:15.000Z | 2020-12-04T17:48:17.000Z | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for cond_v2."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.eager import context
from tensorflow.python.eager import function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import cond_v2
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import saver
from tensorflow.python.util import compat
class CondV2Test(test.TestCase):
def _testCond(self, true_fn, false_fn, train_vals, feed_dict=None):
if not feed_dict:
feed_dict = {}
with self.session(graph=ops.get_default_graph()) as sess:
pred = array_ops.placeholder(dtypes.bool, name="pred")
expected = control_flow_ops.cond(pred, true_fn, false_fn, name="expected")
actual = cond_v2.cond_v2(pred, true_fn, false_fn, name="actual")
expected_grad = gradients_impl.gradients(expected, train_vals)
actual_grad = gradients_impl.gradients(actual, train_vals)
sess_run_args = {pred: True}
sess_run_args.update(feed_dict)
expected_val, actual_val, expected_grad_val, actual_grad_val = sess.run(
(expected, actual, expected_grad, actual_grad), sess_run_args)
self.assertEqual(expected_val, actual_val)
self.assertEqual(expected_grad_val, actual_grad_val)
sess_run_args = {pred: False}
sess_run_args.update(feed_dict)
expected_val, actual_val, expected_grad_val, actual_grad_val = sess.run(
(expected, actual, expected_grad, actual_grad), sess_run_args)
self.assertEqual(expected_val, actual_val)
self.assertEqual(expected_grad_val, actual_grad_val)
def testBasic(self):
x = constant_op.constant(1.0, name="x")
y = constant_op.constant(2.0, name="y")
def true_fn():
return x * 2.0
def false_fn():
return y * 3.0
self._testCond(true_fn, false_fn, [x])
self._testCond(true_fn, false_fn, [x, y])
self._testCond(true_fn, false_fn, [y])
def testMultipleOutputs(self):
x = constant_op.constant(1.0, name="x")
y = constant_op.constant(3.0, name="y")
def true_fn():
return x * y, y
def false_fn():
return x, y * 3.0
self._testCond(true_fn, false_fn, [x])
self._testCond(true_fn, false_fn, [x, y])
self._testCond(true_fn, false_fn, [y])
def testBasic2(self):
x = constant_op.constant(1.0, name="x")
y = constant_op.constant(2.0, name="y")
def true_fn():
return x * y * 2.0
def false_fn():
return 2.0
self._testCond(true_fn, false_fn, [x])
self._testCond(true_fn, false_fn, [x, y])
self._testCond(true_fn, false_fn, [y])
def testNoInputs(self):
with self.cached_session() as sess:
pred = array_ops.placeholder(dtypes.bool, name="pred")
def true_fn():
return constant_op.constant(1.0)
def false_fn():
return constant_op.constant(2.0)
out = cond_v2.cond_v2(pred, true_fn, false_fn)
self.assertEqual(sess.run(out, {pred: True}), (1.0,))
self.assertEqual(sess.run(out, {pred: False}), (2.0,))
def _createCond(self, name):
"""Helper function for testDefaultName."""
pred = constant_op.constant(True, name="pred")
x = constant_op.constant(1.0, name="x")
def true_fn():
return x
def false_fn():
return x + 1
output = cond_v2.cond_v2(pred, true_fn, false_fn, name=name)
cond_op = output.op.inputs[0].op
self.assertEqual(cond_op.type, "If")
return cond_op
def testDefaultName(self):
with ops.Graph().as_default():
cond_op = self._createCond(None)
self.assertEqual(cond_op.name, "cond")
self.assertRegexpMatches(
cond_op.get_attr("then_branch").name, r"cond_true_\d*")
self.assertRegexpMatches(
cond_op.get_attr("else_branch").name, r"cond_false_\d*")
with ops.Graph().as_default():
with ops.name_scope("foo"):
cond1_op = self._createCond("")
self.assertEqual(cond1_op.name, "foo/cond")
self.assertRegexpMatches(
cond1_op.get_attr("then_branch").name, r"foo_cond_true_\d*")
self.assertRegexpMatches(
cond1_op.get_attr("else_branch").name, r"foo_cond_false_\d*")
cond2_op = self._createCond(None)
self.assertEqual(cond2_op.name, "foo/cond_1")
self.assertRegexpMatches(
cond2_op.get_attr("then_branch").name, r"foo_cond_1_true_\d*")
self.assertRegexpMatches(
cond2_op.get_attr("else_branch").name, r"foo_cond_1_false_\d*")
def testDefunInCond(self):
self.skipTest("b/117293122")
x = constant_op.constant(1.0, name="x")
y = constant_op.constant(2.0, name="y")
def true_fn():
@function.defun
def fn():
return x * y * 2.0
return fn()
def false_fn():
return 2.0
self._testCond(true_fn, false_fn, [x])
self._testCond(true_fn, false_fn, [x, y])
self._testCond(true_fn, false_fn, [y])
def testNestedDefunInCond(self):
self.skipTest("b/117284369")
x = constant_op.constant(1.0, name="x")
y = constant_op.constant(2.0, name="y")
def true_fn():
return 2.0
def false_fn():
@function.defun
def fn():
@function.defun
def nested_fn():
return x * y * 2.0
return nested_fn()
return fn()
self._testCond(true_fn, false_fn, [x])
self._testCond(true_fn, false_fn, [x, y])
self._testCond(true_fn, false_fn, [y])
def testDoubleNestedDefunInCond(self):
self.skipTest("b/117284369")
x = constant_op.constant(1.0, name="x")
y = constant_op.constant(2.0, name="y")
def true_fn():
@function.defun
def fn():
@function.defun
def nested_fn():
@function.defun
def nested_nested_fn():
return x * y * 2.0
return nested_nested_fn()
return nested_fn()
return fn()
def false_fn():
return 2.0
self._testCond(true_fn, false_fn, [x])
self._testCond(true_fn, false_fn, [x, y])
self._testCond(true_fn, false_fn, [y])
def testNestedCond(self):
def run_test(pred_value):
def build_graph():
pred = array_ops.placeholder(dtypes.bool, name="pred")
x = constant_op.constant(1.0, name="x")
y = constant_op.constant(2.0, name="y")
def true_fn():
return 2.0
def false_fn():
def false_true_fn():
return x * y * 2.0
def false_false_fn():
return x * 5.0
return _cond(pred, false_true_fn, false_false_fn, "inside_false_fn")
return x, y, pred, true_fn, false_fn
with ops.Graph().as_default():
x, y, pred, true_fn, false_fn = build_graph()
self._testCond(true_fn, false_fn, [x, y], {pred: pred_value})
self._testCond(true_fn, false_fn, [x], {pred: pred_value})
self._testCond(true_fn, false_fn, [y], {pred: pred_value})
run_test(True)
run_test(False)
def testNestedCondBothBranches(self):
def run_test(pred_value):
def build_graph():
pred = array_ops.placeholder(dtypes.bool, name="pred")
x = constant_op.constant(1.0, name="x")
y = constant_op.constant(2.0, name="y")
def true_fn():
return _cond(pred, lambda: x + y, lambda: x * x, name=None)
def false_fn():
return _cond(pred, lambda: x - y, lambda: y * y, name=None)
return x, y, pred, true_fn, false_fn
with ops.Graph().as_default():
x, y, pred, true_fn, false_fn = build_graph()
self._testCond(true_fn, false_fn, [x, y], {pred: pred_value})
self._testCond(true_fn, false_fn, [x], {pred: pred_value})
self._testCond(true_fn, false_fn, [y], {pred: pred_value})
run_test(True)
run_test(False)
def testDoubleNestedCond(self):
def run_test(pred1_value, pred2_value):
def build_graph():
pred1 = array_ops.placeholder(dtypes.bool, name="pred1")
pred2 = array_ops.placeholder(dtypes.bool, name="pred2")
x = constant_op.constant(1.0, name="x")
y = constant_op.constant(2.0, name="y")
def true_fn():
return 2.0
def false_fn():
def false_true_fn():
def false_true_true_fn():
return x * y * 2.0
def false_true_false_fn():
return x * 10.0
return _cond(
pred1,
false_true_true_fn,
false_true_false_fn,
name="inside_false_true_fn")
def false_false_fn():
return x * 5.0
return _cond(
pred2, false_true_fn, false_false_fn, name="inside_false_fn")
return x, y, pred1, pred2, true_fn, false_fn
with ops.Graph().as_default():
x, y, pred1, pred2, true_fn, false_fn = build_graph()
self._testCond(true_fn, false_fn, [x, y], {
pred1: pred1_value,
pred2: pred2_value
})
x, y, pred1, pred2, true_fn, false_fn = build_graph()
self._testCond(true_fn, false_fn, [x], {
pred1: pred1_value,
pred2: pred2_value
})
x, y, pred1, pred2, true_fn, false_fn = build_graph()
self._testCond(true_fn, false_fn, [y], {
pred1: pred1_value,
pred2: pred2_value
})
run_test(True, True)
run_test(True, False)
run_test(False, False)
run_test(False, True)
def testGradientFromInsideDefun(self):
def build_graph():
pred_outer = array_ops.placeholder(dtypes.bool, name="pred_outer")
pred_inner = array_ops.placeholder(dtypes.bool, name="pred_inner")
x = constant_op.constant(1.0, name="x")
y = constant_op.constant(2.0, name="y")
def true_fn():
return 2.0
def false_fn():
def inner_true_fn():
return x * y * 2.0
def inner_false_fn():
return x * 5.0
return cond_v2.cond_v2(
pred_inner, inner_true_fn, inner_false_fn, name="inner_cond")
cond_outer = cond_v2.cond_v2(
pred_outer, true_fn, false_fn, name="outer_cond")
# Compute grads inside a Defun.
@function.defun
def nesting_fn():
return gradients_impl.gradients(cond_outer, [x, y])
grads = nesting_fn()
return grads, pred_outer, pred_inner
with ops.Graph().as_default():
grads, pred_outer, pred_inner = build_graph()
with self.session(graph=ops.get_default_graph()) as sess:
self.assertSequenceEqual(
sess.run(grads, {
pred_outer: True,
pred_inner: True
}), [0., 0.])
self.assertSequenceEqual(
sess.run(grads, {
pred_outer: True,
pred_inner: False
}), [0., 0.])
self.assertSequenceEqual(
sess.run(grads, {
pred_outer: False,
pred_inner: True
}), [4., 2.])
self.assertSequenceEqual(
sess.run(grads, {
pred_outer: False,
pred_inner: False
}), [5., 0.])
def testGradientFromInsideNestedDefun(self):
def build_graph():
pred_outer = array_ops.placeholder(dtypes.bool, name="pred_outer")
pred_inner = array_ops.placeholder(dtypes.bool, name="pred_inner")
x = constant_op.constant(1.0, name="x")
y = constant_op.constant(2.0, name="y")
def true_fn():
return 2.0
def false_fn():
def inner_true_fn():
return x * y * 2.0
def inner_false_fn():
return x * 5.0
return cond_v2.cond_v2(
pred_inner, inner_true_fn, inner_false_fn, name="inner_cond")
cond_outer = cond_v2.cond_v2(
pred_outer, true_fn, false_fn, name="outer_cond")
# Compute grads inside a Defun.
@function.defun
def nesting_fn():
@function.defun
def inner_nesting_fn():
return gradients_impl.gradients(cond_outer, [x, y])
return inner_nesting_fn()
grads = nesting_fn()
return grads, pred_outer, pred_inner
with ops.Graph().as_default():
grads, pred_outer, pred_inner = build_graph()
with self.session(graph=ops.get_default_graph()) as sess:
self.assertSequenceEqual(
sess.run(grads, {
pred_outer: True,
pred_inner: True
}), [0., 0.])
self.assertSequenceEqual(
sess.run(grads, {
pred_outer: True,
pred_inner: False
}), [0., 0.])
self.assertSequenceEqual(
sess.run(grads, {
pred_outer: False,
pred_inner: True
}), [4., 2.])
self.assertSequenceEqual(
sess.run(grads, {
pred_outer: False,
pred_inner: False
}), [5., 0.])
def testBuildCondAndGradientInsideDefun(self):
def build_graph():
pred_outer = array_ops.placeholder(dtypes.bool, name="pred_outer")
pred_inner = array_ops.placeholder(dtypes.bool, name="pred_inner")
x = constant_op.constant(1.0, name="x")
y = constant_op.constant(2.0, name="y")
# Build cond and its gradient inside a Defun.
@function.defun
def fn():
def true_fn():
return 2.0
def false_fn():
def inner_true_fn():
return x * y * 2.0
def inner_false_fn():
return x * 5.0
return cond_v2.cond_v2(
pred_inner, inner_true_fn, inner_false_fn, name="inner_cond")
cond_outer = cond_v2.cond_v2(
pred_outer, true_fn, false_fn, name="outer_cond")
return gradients_impl.gradients(cond_outer, [x, y])
grads = fn()
return grads, pred_outer, pred_inner
with ops.Graph().as_default(), self.session(
graph=ops.get_default_graph()) as sess:
grads, pred_outer, pred_inner = build_graph()
self.assertSequenceEqual(
sess.run(grads, {
pred_outer: True,
pred_inner: True
}), [0., 0.])
self.assertSequenceEqual(
sess.run(grads, {
pred_outer: True,
pred_inner: False
}), [0., 0.])
self.assertSequenceEqual(
sess.run(grads, {
pred_outer: False,
pred_inner: True
}), [4., 2.])
self.assertSequenceEqual(
sess.run(grads, {
pred_outer: False,
pred_inner: False
}), [5., 0.])
def testSecondDerivative(self):
with self.cached_session() as sess:
pred = array_ops.placeholder(dtypes.bool, name="pred")
x = constant_op.constant(3.0, name="x")
def true_fn():
return math_ops.pow(x, 3)
def false_fn():
return x
cond = cond_v2.cond_v2(pred, true_fn, false_fn, name="cond")
cond_grad = gradients_impl.gradients(cond, [x])
cond_grad_grad = gradients_impl.gradients(cond_grad, [x])
# d[x^3]/dx = 3x^2
true_val = sess.run(cond_grad, {pred: True})
self.assertEqual(true_val, [27.0])
# d[x]/dx = 1
false_val = sess.run(cond_grad, {pred: False})
self.assertEqual(false_val, [1.0])
true_val = sess.run(cond_grad_grad, {pred: True})
# d2[x^3]/dx2 = 6x
self.assertEqual(true_val, [18.0])
false_val = sess.run(cond_grad_grad, {pred: False})
# d2[x]/dx2 = 0
self.assertEqual(false_val, [0.0])
def testGradientOfDeserializedCond(self):
with ops.Graph().as_default():
pred = array_ops.placeholder(dtypes.bool, name="pred")
x = constant_op.constant(3.0, name="x")
ops.add_to_collection("x", x)
def true_fn():
return math_ops.pow(x, 3)
def false_fn():
return x
ops.add_to_collection("pred", pred)
cond = cond_v2.cond_v2(pred, true_fn, false_fn, name="cond")
ops.add_to_collection("cond", cond)
meta_graph = saver.export_meta_graph()
with ops.Graph().as_default() as g:
with self.session(graph=g) as sess:
saver.import_meta_graph(meta_graph)
x = ops.get_collection("x")[0]
pred = ops.get_collection("pred")[0]
cond = ops.get_collection("cond")
cond_grad = gradients_impl.gradients(cond, [x], name="cond_grad")
cond_grad_grad = gradients_impl.gradients(
cond_grad, [x], name="cond_grad_grad")
# d[x^3]/dx = 3x^2
true_val = sess.run(cond_grad, {pred: True})
self.assertEqual(true_val, [27.0])
# d[x]/dx = 1
false_val = sess.run(cond_grad, {pred: False})
self.assertEqual(false_val, [1.0])
true_val = sess.run(cond_grad_grad, {pred: True})
# d2[x^3]/dx2 = 6x
self.assertEqual(true_val, [18.0])
false_val = sess.run(cond_grad_grad, {pred: False})
# d2[x]/dx2 = 0
self.assertEqual(false_val, [0.0])
def testLowering(self):
with ops.Graph().as_default() as g:
with self.session(graph=g) as sess:
out_cond = self._createCond("cond")
run_options = config_pb2.RunOptions(output_partition_graphs=True)
run_metadata = config_pb2.RunMetadata()
sess.run(out_cond, options=run_options, run_metadata=run_metadata)
# If lowering was enabled, there should be a `Switch` node
switch_found = any(
any(node.op == "Switch" for node in graph.node)
for graph in run_metadata.partition_graphs
)
self.assertTrue(switch_found,
"A `Switch` op should exist if the graph was lowered.")
# If lowering was enabled, there should be no `If` node
if_found = any(
any(node.op == "If" for node in graph.node)
for graph in run_metadata.partition_graphs
)
self.assertFalse(if_found,
"An `If` op was found, but it should be lowered.")
def testLoweringDisabledInXLA(self):
with self.session(graph=ops.Graph()) as sess:
# Build the cond_v2 in an XLA context
xla_context = control_flow_ops.XLAControlFlowContext()
xla_context.Enter()
out_cond = self._createCond("cond")
xla_context.Exit()
run_options = config_pb2.RunOptions(output_partition_graphs=True)
run_metadata = config_pb2.RunMetadata()
sess.run(out_cond, options=run_options, run_metadata=run_metadata)
# Lowering disabled in XLA, there should be no `Switch` node
switch_found = any(
any(node.op == "Switch" for node in graph.node)
for graph in run_metadata.partition_graphs
)
self.assertFalse(
switch_found,
"A `Switch` op exists, but the graph should not be lowered.")
# Lowering disabled in XLA, there should still be an `If` node
if_found = any(
any(node.op == "If" for node in graph.node)
for graph in run_metadata.partition_graphs
)
self.assertTrue(
if_found,
"An `If` op was not found, but the graph should not be lowered.")
def testLoweringDisabledWithSingleThreadedExecutorContext(self):
with self.session(graph=ops.Graph()) as sess:
@function.defun
def _add_cond(x):
return cond_v2.cond_v2(
constant_op.constant(True, name="pred"),
lambda: x,
lambda: x + 1)
x = array_ops.placeholder(shape=None, dtype=dtypes.float32)
with context.function_executor_type("SINGLE_THREADED_EXECUTOR"):
out_cond = _add_cond(x)
# The fact that sess.run() succeeds means lowering is disabled, because
# the single threaded executor does not support cond v1 ops.
sess.run(out_cond, feed_dict={x: 1.0})
@test_util.enable_control_flow_v2
def testStructuredOutputs(self):
x = constant_op.constant(1.0, name="x")
y = constant_op.constant(3.0, name="y")
def true_fn():
return ((x * y,), y)
def false_fn():
return ((x,), y * 3.0)
output = control_flow_ops.cond(
constant_op.constant(False), true_fn, false_fn)
self.assertEqual(self.evaluate(output[0][0]), 1.)
self.assertEqual(self.evaluate(output[1]), 9.)
@test_util.enable_control_flow_v2
def testRaisesOutputStructuresMismatch(self):
x = constant_op.constant(1.0, name="x")
y = constant_op.constant(3.0, name="y")
def true_fn():
return x * y, y
def false_fn():
return ((x,), y * 3.0)
with self.assertRaisesRegexp(
ValueError, "Outputs of true_fn and false_fn must"
" have the same structure"):
control_flow_ops.cond(constant_op.constant(False), true_fn, false_fn)
@test_util.enable_control_flow_v2
def testCondAndTensorArray(self):
if test_util.is_gpu_available():
old_enable_tensor_array_v2 = tensor_array_ops.ENABLE_TENSOR_ARRAY_V2
# TODO(b/119689663): Enable this.
tensor_array_ops.ENABLE_TENSOR_ARRAY_V2 = False
x = math_ops.range(-5, 5)
output = tensor_array_ops.TensorArray(dtype=dtypes.int32, size=x.shape[0])
def loop_body(i, output):
def if_true():
return output.write(i, x[i]**2)
def if_false():
return output.write(i, x[i])
output = control_flow_ops.cond(x[i] > 0, if_true, if_false)
return i + 1, output
_, output = control_flow_ops.while_loop(
lambda i, arr: i < x.shape[0],
loop_body,
loop_vars=(constant_op.constant(0), output))
output_t = output.stack()
self.assertAllEqual(
self.evaluate(output_t), [-5, -4, -3, -2, -1, 0, 1, 4, 9, 16])
if test_util.is_gpu_available():
tensor_array_ops.ENABLE_TENSOR_ARRAY_V2 = old_enable_tensor_array_v2
@test_util.enable_control_flow_v2
def testCondAndTensorArrayInDefun(self):
if test_util.is_gpu_available():
old_enable_tensor_array_v2 = tensor_array_ops.ENABLE_TENSOR_ARRAY_V2
# TODO(b/119689663): Enable this.
tensor_array_ops.ENABLE_TENSOR_ARRAY_V2 = False
@function.defun
def f():
x = math_ops.range(-5, 5)
output = tensor_array_ops.TensorArray(dtype=dtypes.int32, size=x.shape[0])
def loop_body(i, output):
def if_true():
return output.write(i, x[i]**2)
def if_false():
return output.write(i, x[i])
output = control_flow_ops.cond(x[i] > 0, if_true, if_false)
return i + 1, output
_, output = control_flow_ops.while_loop(
lambda i, arr: i < x.shape[0],
loop_body,
loop_vars=(constant_op.constant(0), output))
return output.stack()
output_t = f()
self.assertAllEqual(
self.evaluate(output_t), [-5, -4, -3, -2, -1, 0, 1, 4, 9, 16])
if test_util.is_gpu_available():
tensor_array_ops.ENABLE_TENSOR_ARRAY_V2 = old_enable_tensor_array_v2
class CondV2CollectionTest(test.TestCase):
def testCollectionIntValueAccessInCond(self):
"""Read values from graph collections inside of cond_v2."""
with ops.Graph().as_default() as g:
with self.session(graph=g):
x = 2
y = 5
ops.add_to_collection("x", x)
ops.add_to_collection("y", y)
def fn():
x_const = constant_op.constant(ops.get_collection("x")[0])
y_const = constant_op.constant(ops.get_collection("y")[0])
return math_ops.add(x_const, y_const)
cnd = cond_v2.cond_v2(constant_op.constant(True), fn, fn)
self.assertEquals(cnd.eval(), 7)
def testCollectionTensorValueAccessInCond(self):
"""Read tensors from collections inside of cond_v2 & use them."""
with ops.Graph().as_default() as g:
with self.session(graph=g):
x = constant_op.constant(2)
y = constant_op.constant(5)
ops.add_to_collection("x", x)
ops.add_to_collection("y", y)
def fn():
x_read = ops.get_collection("x")[0]
y_read = ops.get_collection("y")[0]
return math_ops.add(x_read, y_read)
cnd = cond_v2.cond_v2(math_ops.less(x, y), fn, fn)
self.assertEquals(cnd.eval(), 7)
def testCollectionIntValueWriteInCond(self):
"""Make sure Int writes to collections work inside of cond_v2."""
with ops.Graph().as_default() as g:
with self.session(graph=g):
x = constant_op.constant(2)
y = constant_op.constant(5)
def true_fn():
z = math_ops.add(x, y)
ops.add_to_collection("z", 7)
return math_ops.mul(x, z)
def false_fn():
z = math_ops.add(x, y)
return math_ops.mul(x, z)
cnd = cond_v2.cond_v2(constant_op.constant(True), true_fn, false_fn)
self.assertEquals(cnd.eval(), 14)
read_z_collection = ops.get_collection("z")
self.assertEquals(read_z_collection, [7])
class CondV2ContainerTest(test.TestCase):
def testContainer(self):
"""Set containers outside & inside of cond_v2.
Make sure the containers are set correctly for both variable creation
(tested by variables.Variable) and for stateful ops (tested by FIFOQueue)
"""
self.skipTest("b/113048653")
with ops.Graph().as_default() as g:
with self.session(graph=g):
v0 = variables.Variable([0])
q0 = data_flow_ops.FIFOQueue(1, dtypes.float32)
def container(node):
return node.op.get_attr("container")
self.assertEqual(compat.as_bytes(""), container(v0))
self.assertEqual(compat.as_bytes(""), container(q0.queue_ref))
def true_fn():
# When this branch is created in cond below,
# the container should begin with 'l1'
v1 = variables.Variable([1])
q1 = data_flow_ops.FIFOQueue(1, dtypes.float32)
with ops.container("l2t"):
v2 = variables.Variable([2])
q2 = data_flow_ops.FIFOQueue(1, dtypes.float32)
v3 = variables.Variable([1])
q3 = data_flow_ops.FIFOQueue(1, dtypes.float32)
self.assertEqual(compat.as_bytes("l1"), container(v1))
self.assertEqual(compat.as_bytes("l1"), container(q1.queue_ref))
self.assertEqual(compat.as_bytes("l2t"), container(v2))
self.assertEqual(compat.as_bytes("l2t"), container(q2.queue_ref))
self.assertEqual(compat.as_bytes("l1"), container(v3))
self.assertEqual(compat.as_bytes("l1"), container(q3.queue_ref))
return constant_op.constant(2.0)
def false_fn():
# When this branch is created in cond below,
# the container should begin with 'l1'
v1 = variables.Variable([1])
q1 = data_flow_ops.FIFOQueue(1, dtypes.float32)
with ops.container("l2f"):
v2 = variables.Variable([2])
q2 = data_flow_ops.FIFOQueue(1, dtypes.float32)
v3 = variables.Variable([1])
q3 = data_flow_ops.FIFOQueue(1, dtypes.float32)
self.assertEqual(compat.as_bytes("l1"), container(v1))
self.assertEqual(compat.as_bytes("l1"), container(q1.queue_ref))
self.assertEqual(compat.as_bytes("l2f"), container(v2))
self.assertEqual(compat.as_bytes("l2f"), container(q2.queue_ref))
self.assertEqual(compat.as_bytes("l1"), container(v3))
self.assertEqual(compat.as_bytes("l1"), container(q3.queue_ref))
return constant_op.constant(6.0)
with ops.container("l1"):
cnd_true = cond_v2.cond_v2(
constant_op.constant(True), true_fn, false_fn)
self.assertEquals(cnd_true.eval(), 2)
cnd_false = cond_v2.cond_v2(
constant_op.constant(False), true_fn, false_fn)
self.assertEquals(cnd_false.eval(), 6)
v4 = variables.Variable([3])
q4 = data_flow_ops.FIFOQueue(1, dtypes.float32)
v5 = variables.Variable([4])
q5 = data_flow_ops.FIFOQueue(1, dtypes.float32)
self.assertEqual(compat.as_bytes("l1"), container(v4))
self.assertEqual(compat.as_bytes("l1"), container(q4.queue_ref))
self.assertEqual(compat.as_bytes(""), container(v5))
self.assertEqual(compat.as_bytes(""), container(q5.queue_ref))
class CondV2ColocationGroupAndDeviceTest(test.TestCase):
def testColocateWithBeforeCond(self):
with ops.Graph().as_default() as g:
with self.session(graph=g):
a = constant_op.constant([2.0], name="a")
b = constant_op.constant([2.0], name="b")
def fn():
c = constant_op.constant(3.0)
self.assertEqual([b"loc:@a"], c.op.colocation_groups())
return c
with ops.colocate_with(a.op):
self.assertEquals(
cond_v2.cond_v2(constant_op.constant(True), fn, fn).eval(), 3)
def fn2():
c = constant_op.constant(3.0)
self.assertEqual([b"loc:@a", b"loc:@b"], c.op.colocation_groups())
return c
with ops.colocate_with(a.op):
with ops.colocate_with(b.op):
self.assertEquals(
cond_v2.cond_v2(constant_op.constant(True), fn2, fn2).eval(), 3)
def testColocateWithInAndOutOfCond(self):
with ops.Graph().as_default() as g:
with self.session(graph=g):
a = constant_op.constant([2.0], name="a")
b = constant_op.constant([2.0], name="b")
def fn2():
with ops.colocate_with(b.op):
c = constant_op.constant(3.0)
self.assertEqual([b"loc:@a", b"loc:@b"], c.op.colocation_groups())
return c
with ops.colocate_with(a.op):
self.assertEquals(
cond_v2.cond_v2(constant_op.constant(True), fn2, fn2).eval(), 3)
d = constant_op.constant([2.0], name="d")
self.assertEqual([b"loc:@a"], d.op.colocation_groups())
def testColocateWithInCondGraphPartitioning(self):
with ops.Graph().as_default() as g:
with self.session(
graph=g,
config=config_pb2.ConfigProto(device_count={"CPU": 2})
) as sess:
with ops.device("/device:CPU:0"):
a = constant_op.constant([2.0], name="a")
with ops.device("/device:CPU:1"):
b = constant_op.constant([2.0], name="b")
def fn():
with ops.colocate_with(b.op):
c = math_ops.add(a, a, name="c")
return c
out_cond_2 = cond_v2.cond_v2(constant_op.constant(True), fn, fn)
run_options = config_pb2.RunOptions(output_partition_graphs=True)
run_metadata = config_pb2.RunMetadata()
sess.run(out_cond_2, options=run_options, run_metadata=run_metadata)
# We expect there to be two partitions because of the
# colocate_with. We are only running the cond, which has a data
# dependency on `a` but not on `b`. So, without the colocate_with
# we would expect execution on just one device.
self.assertTrue(len(run_metadata.partition_graphs) >= 2)
def testDeviceBeforeCond(self):
with ops.Graph().as_default() as g:
with self.session(graph=g):
def fn():
self.assertEqual("", constant_op.constant(3.0).op.device)
return test_ops.device_placement_op()
with ops.device("/device:CPU:0"):
self.assertIn(
compat.as_bytes("CPU:0"),
self.evaluate(cond_v2.cond_v2(constant_op.constant(True),
fn, fn)))
def fn2():
self.assertEqual("", constant_op.constant(3.0).op.device)
return test_ops.device_placement_op()
if test_util.is_gpu_available():
with ops.device("/device:GPU:0"):
self.assertIn(
compat.as_bytes("GPU:0"),
self.evaluate(cond_v2.cond_v2(constant_op.constant(True),
fn2, fn2)))
else:
self.skipTest("Test requrires a GPU to check GPU device placement.")
def testDeviceInAndOutOfCond(self):
with ops.Graph().as_default() as g:
with self.session(
graph=g, config=config_pb2.ConfigProto(device_count={"CPU": 2})):
def fn2():
with ops.device("/device:CPU:1"):
c = constant_op.constant(3.0)
self.assertEqual("/device:CPU:1", c.op.device)
return c
with ops.device("/device:CPU:0"):
self.assertEquals(
cond_v2.cond_v2(constant_op.constant(True), fn2, fn2).eval(), 3)
d = constant_op.constant(4.0)
self.assertEqual("/device:CPU:0", d.op.device)
def testDeviceInCondGraphPartitioning(self):
with ops.Graph().as_default() as g:
with self.session(
graph=g,
config=config_pb2.ConfigProto(device_count={"CPU": 2})
) as sess:
def fn():
with ops.device("/device:CPU:1"):
c = math_ops.add(a, a, name="c")
return c
with ops.device("/device:CPU:0"):
a = constant_op.constant([2.0], name="a")
out_cond_2 = cond_v2.cond_v2(constant_op.constant(True), fn, fn)
run_options = config_pb2.RunOptions(output_partition_graphs=True)
run_metadata = config_pb2.RunMetadata()
sess.run(out_cond_2, options=run_options, run_metadata=run_metadata)
self.assertTrue(len(run_metadata.partition_graphs) >= 2)
def _cond(pred, true_fn, false_fn, name):
if _is_old_cond():
return control_flow_ops.cond(pred, true_fn, false_fn, name=name)
else:
return cond_v2.cond_v2(pred, true_fn, false_fn, name=name)
def _is_old_cond():
return isinstance(ops.get_default_graph()._get_control_flow_context(),
control_flow_ops.CondContext)
if __name__ == "__main__":
test.main()
| 32.201472 | 80 | 0.618061 |
ace18dcd51ffa9286a45f380414459df6a61cdee | 2,816 | py | Python | src/crawler/spiders/mieszkania2.py | Santhin/TorScrapy | cb240f1b99659707201736ec40c0e0108ef4749c | [
"MIT"
] | null | null | null | src/crawler/spiders/mieszkania2.py | Santhin/TorScrapy | cb240f1b99659707201736ec40c0e0108ef4749c | [
"MIT"
] | null | null | null | src/crawler/spiders/mieszkania2.py | Santhin/TorScrapy | cb240f1b99659707201736ec40c0e0108ef4749c | [
"MIT"
] | 1 | 2022-02-14T13:46:48.000Z | 2022-02-14T13:46:48.000Z | import scrapy
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
class Mieszaknia2Spider(CrawlSpider):
name = 'mieszkania2'
#download_delay = 2.0
allowed_domains = ['www.gumtree.pl']
def start_requests(self):
yield scrapy.Request(url='http://www.gumtree.pl/s-mieszkania-i-domy-sprzedam-i-kupie/v1c9073p1/')
rules = (
Rule(LinkExtractor(restrict_xpaths="//a[@class='href-link tile-title-text']"),
callback='parse_item', follow=True),
Rule(LinkExtractor(
restrict_xpaths="//a[@class='arrows icon-right-arrow icon-angle-right-gray']"))
)
def parse_item(self, response):
print {
"tytuł": response.xpath("//span[@class='myAdTitle']/text()").get(),
"opis": response.xpath((".//div[@class='description']/span/p[string(.)]/text() | .//div[@class='description']/span[@class='pre']/text() |.//div[@class='description']/span[@class='pre']/div/text() | .//div[@class='description']/span/p/text() | .//div[@class='description']/span/div/p/text() | .//div[@class='description']/span/div/div/text() | .//div[@class='description']/span/b/text() | .//div[@class='description']/span/div/i/b/text() | .//div[@class='description']/span/p/b/text() | //div[@class='vip-details']/div[@class='description']/span/text() | .//div[@class='description']/span/i/text()")).getall(),
"cena": response.xpath("//span[@class='amount']/text()[1]").get(),
##Tabelka:
"data dodania": response.xpath(".//div[@class='attribute' and contains(span, 'Data dodania')]/span[2]/text()").get(),
"lokalizacja": response.xpath(".//div[@class='attribute' and contains(span, 'Lokalizacja')]/span[2]/div/a/text()").getall(),
"Na_sprzedaż_przez": response.xpath(".//div[@class='attribute' and contains(span, 'Na sprzedaż przez')]/span[2]/text()").getall(),
"Rodzaj_nieruchomosci": response.xpath(".//div[@class='attribute' and contains(span, 'Rodzaj nieruchomości')]/span[2]/text()").getall(),
"Liczba_pokoi": response.xpath(".//div[@class='attribute' and contains(span, 'Liczba pokoi')]/span[2]/text()").getall(),
"Liczba_łazienek": response.xpath(".//div[@class='attribute' and contains(span, 'Liczba łazienek')]/span[2]/text()").getall(),
"Wielkość (m2)": response.xpath(".//div[@class='attribute' and contains(span, 'Wielkość (m2)')]/span[2]/text()").getall(),
"Parking": response.xpath(".//div[@class='attribute' and contains(span, 'Parking')]/span[2]/text()").getall(),
"mieszkanie_url" : response.url,
"User-Agent": (response.request.headers['User-Agent']).decode(),
"Proxy": response.meta
}
| 68.682927 | 622 | 0.611861 |
ace18fe753788982376c515bfcff3120a12250e3 | 383 | py | Python | console/widgets/binds.py | dustinlacewell/console | b65f63354dd8ba60f211e3e169e53c078b99fdf8 | [
"MIT"
] | 11 | 2015-06-10T22:23:03.000Z | 2021-02-16T10:55:55.000Z | console/widgets/binds.py | rrosajp/console | b65f63354dd8ba60f211e3e169e53c078b99fdf8 | [
"MIT"
] | 1 | 2015-07-01T00:04:50.000Z | 2015-08-19T16:40:18.000Z | console/widgets/binds.py | rrosajp/console | b65f63354dd8ba60f211e3e169e53c078b99fdf8 | [
"MIT"
] | 5 | 2015-06-20T11:08:32.000Z | 2022-03-07T00:01:50.000Z | import urwid
class BindWidget(urwid.WidgetWrap):
def __init__(self, widget, binds):
self.binds = binds
urwid.WidgetWrap.__init__(self, widget)
def keypress(self, size, key):
"""
Translate keypress events into Console events.
"""
event = self.binds.get(key, key)
return super(BindWidget, self).keypress(size, event)
| 23.9375 | 60 | 0.629243 |
ace19025ea3383b1e42addc6b0ae13add5b9fd4c | 12,256 | py | Python | src/students/r06522601.py | batrakeshav10/PythonHomework | e555f1740c6502296bd7f06e8bb40d21d5228a45 | [
"MIT"
] | 29 | 2018-11-27T07:31:36.000Z | 2022-01-20T04:26:30.000Z | src/students/r06522601.py | batrakeshav10/PythonHomework | e555f1740c6502296bd7f06e8bb40d21d5228a45 | [
"MIT"
] | 96 | 2018-11-24T10:38:22.000Z | 2020-10-09T17:09:00.000Z | src/students/r06522601.py | batrakeshav10/PythonHomework | e555f1740c6502296bd7f06e8bb40d21d5228a45 | [
"MIT"
] | 99 | 2018-11-22T06:53:37.000Z | 2020-10-02T19:39:47.000Z | '''
This is the sample code from the homework. You shold NOT modify this file.
Instead, please copy this file to src/students/<your student ID>.py and
edit it there.
'''
import os
# Define global variables with upper case
SRC_PATH = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
TEST_DATA_DIR = os.path.join(SRC_PATH, 'test_data')
def task_1(dummy=None):
'''
Task 1: Basic Syntax and Flake8 Checker
Python uses indentations to separate blocks instead of backets.
Unlike most programming language (like C++), indentations in Python
are required.
See https://www.python-course.eu/python3_blocks.php for some examples.
Flake8 (http://flake8.pycqa.org/en/latest/) could help you check these
syntax error. It also regular your coding style. For example, using
two whitespaces as indentation is allowed in Python. However, Flake8
will tell you it is an error "E111: indentation is not a multiple of four".
This is because when many people work on the same project, it would be
confusing if people are using different identation style.
Following the coding style in Flake8 is strongly suggested.
'''
# Hint:
# Run `python autograder.py -task 1 -student_id <your student ID>`
# under src/ to see if you pass this task.
# The correct output would be "Hello world" without any
# error. Note that passing this task does NOT mean you pass the
# Flake8 chcker. Please check your style with
# `flake8 src/student/<your student ID>.py`
# TODO: fix the syntax error for the following code
if True:
sentence = "Hello world"
print(sentence)
# End of TODO (do not change the code below)
return True
def task_2(
input_list: list = [1, 4, 53, 27, 9],
target_index: int = 0,
input_dictionary: dict = {"a": " taiwan", "b": 20, "c": "CSIE"},
target_key: str = "a"
) -> tuple:
'''
Task 2: Data Types
Python has many data types, including Boolean, String, Integer, Float,
List, Dictionary, etc.
You could use the function type() to see the data type:
>>> type(5)
<class 'int'>
>>> type("hi")
<class 'str'>
>>> type(9.2)
<class 'float'>
>>> type(["list", "could", "include", "different", "data type", 5, 3.2])
<class 'list'>
>>> type(("you could not change elements", "in a tuple"))
<class 'tuple'>
>>> type({"a": 1, "b":20})
<class 'dict'>
>>> type(True)
<class 'bool'>
>>>
Try to play with the Python IDE to see different data types by yourself.
In this task, you are asked to use these datatype.
Args:
input_list: a list with several items
target_index: target index for the input_list. You need to get the
list element with this index (i.e, 'input_list[target_index]')
input_dictionary: a dictionary with several key-value pairs.
target_key: target key for the input_dictionary You need to get the
value with this key (i.e., input_dictionary[target_key])
Returns:
input_list_length_and_sentence: a tuple that contains two elements.
The first one is an integer that indicates the length of input_list
The second one is a string that contains the combination of
input_list[target_index] and input_dictionary[target_key]
Examples:
Inputs:
input_list = [1, 3, 5, 7, 9]
target_index = 0
input_dictionary = {"1": "8", "f": "abc", "s": 5.5, "5.5" 900}
target_key = "5.5"
Returns:
input_list_length_and_sentence = (5, "1900")
Hints:
* Try to use print() to print out the inputs.
* Use len() to get the length of the list.
* Different data types could not be added. Use str() to convert data
to string.
* Run `python src/autograder.py -task 2 -student_id <your student ID>`
to see if you pass this task.
* The correct output would be (4, '1 taiwan')
'''
# TODO: change length and sentence to fit the requirement
length = len(input_list)
sentence = str(input_list[target_index]) + input_dictionary[target_key]
# End of TODO
input_list_length_and_sentence = (length, sentence)
print(input_list_length_and_sentence)
return input_list_length_and_sentence
def task_3(
number: int = 1314151677777
) -> list:
'''
Task 3: Conditions
Args:
number: a integer input
Returns:
prime_factors_below_10: a list of the number's largest factors
below 10
if the number is negative, return [-1]
if the number is zero, return [0]
Hints:
* Use % to get the remainder
* Using a loop (introduced in the next task) will make some
conditions simpler
'''
prime_factors_below_10 = []
# TODO: fill in the conditions
if number < 0:
prime_factors_below_10 = [-1]
# elif stands for "else if" in Python.
elif number == 0:
prime_factors_below_10 = [0]
else:
if number % 2 == 0:
prime_factors_below_10.append(2)
if number % 3 == 0:
prime_factors_below_10.append(3)
if number % 5 == 0:
prime_factors_below_10.append(5)
if number % 7 == 0:
prime_factors_below_10.append(7)
# End of TODO
print(prime_factors_below_10)
return prime_factors_below_10
def task_4(
numbers: list = [2, 4, 5, 6, 9]
) -> list:
'''
Task 4: For and While Loop
Args:
numbers: a list of integers
Returns:
list_of_stars: a list of stars (*)
For each number n in the list, you need to
append n lines of stars to the list, where
the first line has one star, the last line
has n stars.
Examples:
input:
[1, 3, 5]
output:
['*',
'*',
'**',
'***',
'*',
'**',
'***',
'****',
'*****']
Hints:
* You could create a string with repetitive substring by <str> * <int>
'''
list_of_stars = []
# In Python, the for loop could iterate through a list directly
for number in numbers:
# TODO: change stars to correct length
for i in range(1, number+1):
stars = "*" * i
list_of_stars.append(stars)
# End of TODO
# This could be done by the while loop
list_of_stars_while = []
i = 0
while i < len(numbers):
# TODO: change stars to correct length
j = 1
while j <= numbers[i]:
stars = "*" * j
j += 1 # This line is equivalant to j = j + 1
list_of_stars_while.append(stars)
i += 1
# End of TODO
print("=====> Output list_of_stars")
for stars in list_of_stars:
print(stars)
print("=====> Output list_of_stars_while")
for stars in list_of_stars_while:
print(stars)
for ans1, ans2 in zip(list_of_stars, list_of_stars_while):
assert ans1 == ans2
return list_of_stars
def task_5(
input_filename: str = 'task_5_input.txt',
output_filename: str = 'task_5_output.txt'
) -> str:
'''
Task 5: I/O with files
Args:
input_filename: input filename
output_filename: output filename
Returns:
lines: content in the output file without commas
Hints:
* Use <str>.split(something) to split a string into several substring
* Use fout.write(something) to write text into the output file
'''
input_filename = os.path.join(TEST_DATA_DIR, input_filename)
output_filename = os.path.join(TEST_DATA_DIR, output_filename)
# Remove previous output file
if os.path.exists(output_filename):
os.remove(output_filename)
with open(input_filename, 'r') as fin, open(output_filename, 'w') as fout:
lines = fin.readlines()
print(f"=======> Input file content:")
for line in lines:
print(f"{line}")
# TODO: read the content of the input file, where words are separate by
# commas. Please remove the commas and write words to the output file
for line in line:
x = line.replace(',', '')
fout.write(x)
# End of TODO
with open(output_filename, 'r') as fin:
lines = fin.readlines()
print(f"=======> Output file content:")
print(lines)
return "".join(lines)
def task_6(
matrix: list = [[-0.5, 1], [1, 0.5], [-1, 0.5], [-1, -0.5]],
vector: list = [1, 0.5]
) -> list:
'''
Task 6: Functions
Args:
matrix: a list of v1
vector: v2
Returns:
cos_sims: a list of cosine similarity between v1s and v2
Hints:
* A good function name should be self-explained
* A good function should be less than 30 lines
* A good function should include comments to explain how to use it
* Cosine similarity of the vector itself will be 0.9999999 instead of 1
'''
# You could define function B in function A, but function B could only
# be used in the scope of function A
def dot_product(v1, v2):
assert len(v1) == len(v2)
return sum(a*b for a, b in zip(v1, v2))
def norm(vector):
# Note that this function would have some minor error due to the
# approximation of square root
return dot_product(vector, vector) ** 0.5
def get_cosine_simialrity(v1, v2):
'''
Calculate the cosine similarity = v1 * v2 / (|v1| * |v2|)
'''
# TODO: use the above functions to calculate cosine similarity of
# the two vectors v1 and v2
cos_sim = dot_product(v1, v2) / (norm(v1) * norm(v2))
# End of TODO
return cos_sim
cos_sims = []
for v1 in matrix:
cos_sim = get_cosine_simialrity(v1, vector)
print(f"Cosine similarity between {v1} and {vector}: {cos_sim}")
cos_sims.append(cos_sim)
return cos_sims
class Student():
def __init__(self, student_id, time):
self.student_id = student_id
self.time = time
self.words_to_say = "HEY!"
def set_words_to_say(self, words_to_say):
self.words_to_say = words_to_say
def hello(self):
return (
f"Hello, {self.student_id}! Time is {self.time}. "
f"I want to say {self.words_to_say}"
)
def task_7(
student_id: str = 'test_id',
time: str = '2018_11_24_0000'
) -> Student:
'''
Task 7: Class
Args:
student_id: someone's student ID
time: a certain time
Returns:
student: an Student object
Hints:
* Use Student(parameters1, parameters2 ...) to create an object
and assign it to a variable
* Use <created object>.<object function> to call object function
'''
# TODO: create a student object with different words to say
student = Student(student_id, time)
# End of TODO
print(student.hello())
return student
def task_8(
img_url: str = 'https://i.imgur.com/B75zq0x.jpg'
) -> object:
'''
Task 8: Module
Args:
img_url: address of an image
Returns:
result_img: an PIL Image
Hints:
* Make sure you have installed the PIL package
* Take a look at utils.py first
* You could easily find answers with Google
'''
from urllib import request
result_img = None
# TODO: download the image from img_url with the request module
# and add your student ID on it with draw_text() in the utils module
# under src/.
# You are allowed to change the img_url to your own image URL.
# Display the image:
# result_img.show()
# Note: please comment this line when hand in.
# If you are running on a server, use
# result.save('test.jpg')
# and copy the file to local or use Jupyter Notebook to render.
reslt_img = request.urlopen(img_url)
from .. import ultis
draw_text(result_img, 'r06522601', (0.0), (255,255,255))
#result_img.show()
result_img.save('test.jpg')
# End of TODO
return result_img
| 29.747573 | 79 | 0.607784 |
ace19086451f27f92ae60ac27c2206713a3c9fbb | 868 | py | Python | 07_Python_Lists/02_List.py | jmmedel/Python-Reference | 5749fbada4d69357bb840e02081d41cbd866b79e | [
"MIT"
] | null | null | null | 07_Python_Lists/02_List.py | jmmedel/Python-Reference | 5749fbada4d69357bb840e02081d41cbd866b79e | [
"MIT"
] | null | null | null | 07_Python_Lists/02_List.py | jmmedel/Python-Reference | 5749fbada4d69357bb840e02081d41cbd866b79e | [
"MIT"
] | null | null | null |
"""
Author: Kagaya john
Tutorial 7 : List
"""
"""
The list() Constructor
It is also possible to use the list() constructor to make a list.
To add an item to the list use append() object method. To remove a
specific item use the remove() object method. The len() function returns the length of the list.
"""
# Using the list() constructor to make a List:
thislist = list(("apple", "banana", "cherry")) # note the double round-brackets
print(thislist)
#Using the append() method to append an item:
thislist = list(("apple", "banana", "cherry"))
thislist.append("damson")
print(thislist)
#Using the remove() method to remove an item:
thislist = list(("apple", "banana", "cherry"))
thislist.remove("banana")
print(thislist)
# The len() method returns the number of items in a list:
thislist = list(("apple", "banana", "cherry"))
print(len(thislist)) | 20.666667 | 98 | 0.6947 |
ace190c479d2034dd970d5e9d47baf7f83e7841d | 6,154 | py | Python | code/pytorch/LAMPO/ct_experiment.py | hzm2016/assistive-gym-robosuite | 5c529f4444cc386383618bfa584341740a8468f9 | [
"MIT"
] | 1 | 2021-11-22T07:45:28.000Z | 2021-11-22T07:45:28.000Z | code/pytorch/LAMPO/ct_experiment.py | hzm2016/assistive-gym-robosuite | 5c529f4444cc386383618bfa584341740a8468f9 | [
"MIT"
] | null | null | null | code/pytorch/LAMPO/ct_experiment.py | hzm2016/assistive-gym-robosuite | 5c529f4444cc386383618bfa584341740a8468f9 | [
"MIT"
] | null | null | null | from code.pytorch.LAMPO.core.config import config
import argparse
from code.pytorch.LAMPO.core.collector import RunModel
from code.pytorch.LAMPO.core.colome_torras import CT_ImitationLearning, CT_ReinforcementLearning
from code.pytorch.LAMPO.core.task_interface import TaskInterface
from code.pytorch.LAMPO.core.plot import LampoMonitor
import numpy as np
import json
def get_arguments_dict():
parser = argparse.ArgumentParser()
parser.add_argument("folder_name",
help="Where you would like to save the experimental results and configuration.")
parser.add_argument("-t", "--task_name",
help="Task name.",
default="reach_target")
parser.add_argument("-i", "--id",
help="Identifier of the process.",
type=int, default=10)
parser.add_argument("-b", "--batch_size",
help="How many episodes before improvement.",
type=int, default=10)
parser.add_argument("-l", "--imitation_learning",
help="How many episodes before improvement.",
type=int, default=200)
parser.add_argument("-p", "--plot",
help="Show real time plots.",
action="store_true")
parser.add_argument("-v", "--visualize_robot",
help="Show robotic behavior",
action="store_true")
parser.add_argument("-s", "--save",
help="Save the results in the experiment directory.",
action="store_true")
parser.add_argument("-d", "--load",
help="Load configuration from folder.",
action="store_true")
parser.add_argument("-r", "--slurm",
help="Don't look for CPU usage.",
action="store_true")
parser.add_argument("--il_noise",
help="Add noise on the context",
type=float,
default=0.03)
parser.add_argument("--dense_reward",
help="Use dense reward",
action="store_true")
parser.add_argument("-k", "--kl_bound",
help="Bound the improvement kl.",
type=float,
default=0.2)
parser.add_argument("-f", "--forward",
help="Bound the improvement kl.",
action="store_true")
parser.add_argument("-m", "--max_iter",
help="Maximum number of iterations.",
type=int,
default=20)
parser.add_argument("-e", "--n_evaluations",
help="Number of the evaluation batch.",
type=int,
default=500)
parser.add_argument("--not_dr",
help="Don't do dimensionality reduction.",
default=False)
parser.add_argument("--forgetting_rate",
help="The forgetting rate of the IRWR-GMM.",
type=float,
default=1.)
args = parser.parse_args()
return args
class Objectview(object):
def __init__(self, d):
self.__dict__ = d
def process_parameters(parameters, n_samples, n_context, noise=0.03):
parameters = parameters[:n_samples].copy()
parameters[:, :n_context] += noise * np.random.normal(size=parameters[:, :n_context].shape)
return parameters
if __name__ == "__main__":
print("ciao")
args = get_arguments_dict()
experiment_path = "ct_experiments/%s/" % args.folder_name
if args.load:
with open(experiment_path + "configuration.json") as f:
args = Objectview(json.load(f))
n_clusters = config[args.task_name]["n_cluster"]
task = config[args.task_name]["task_box"](not args.visualize_robot)
state_dim = task.get_context_dim()
parameters = task.get_demonstrations()
parameters = process_parameters(parameters, args.imitation_learning, state_dim, args.il_noise)
imitation = CT_ImitationLearning(state_dim, parameters.shape[1] - config[args.task_name]["latent_dim"],
config[args.task_name]["latent_dim"], n_clusters, use_dr=not args.not_dr)
imitation.fit(parameters[:, :state_dim], parameters[:, state_dim:], forgetting_rate=args.forgetting_rate)
n_evaluation_samples = args.n_evaluations
n_batch = args.batch_size
kl_bound = args.kl_bound
if args.forward:
kl_type = "forward"
else:
kl_type = "reverse"
rl_model = CT_ReinforcementLearning(imitation, kl_bound=kl_bound)
myplot = LampoMonitor(kl_bound, kl_context_bound=0.,
title="class_log kl=%.2f, %d samples" %
(kl_bound, n_batch))
collector = RunModel(task, rl_model, args.dense_reward)
for i in range(args.max_iter):
s, r, actions, latent, cluster, observation = collector.collect_samples(n_evaluation_samples, isomorphic_noise=False)
rl_model.add_dataset(observation[:n_batch], actions[:n_batch], r[:n_batch])
print("ITERATION", i)
print("SUCCESS:", np.mean(s))
myplot.notify_outer_loop(np.mean(s), np.mean(r))
# sr.improve()
# print("Optimization %f" % sr.rlmodel._f)
# print("KL %f <= %f" % (sr.rlmodel._g, kl_bound))
# if kl_context_bound> 0:
# print("KL context %f <= %f" % (sr.rlmodel._h, kl_context_bound))
myplot.notify_inner_loop(0., 0., 0., 0.)
if args.plot:
myplot.visualize()
s, r, actions, latent, cluster, observation = collector.collect_samples(n_evaluation_samples, isomorphic_noise=False)
print("ITERATION", args.max_iter)
print("SUCCESS:", np.mean(s))
myplot.notify_outer_loop(np.mean(s), np.mean(r))
if args.plot:
myplot.visualize(last=True)
if args.save:
myplot.save(experiment_path + "result_%d.npz" % args.id)
| 39.703226 | 125 | 0.578161 |
ace193ae49829b9f4eb198b1d7d0cdb98288157d | 234 | py | Python | setup.py | diodax/commit-multi-label-classification | 30edb27e6356a66ec7fdfba4d77c2bb6147d63e3 | [
"FTL"
] | null | null | null | setup.py | diodax/commit-multi-label-classification | 30edb27e6356a66ec7fdfba4d77c2bb6147d63e3 | [
"FTL"
] | null | null | null | setup.py | diodax/commit-multi-label-classification | 30edb27e6356a66ec7fdfba4d77c2bb6147d63e3 | [
"FTL"
] | null | null | null | from setuptools import find_packages, setup
setup(
name='src',
packages=find_packages(),
version='0.1.0',
description='Multi-label classification of refactoring commits',
author='Diosdavi Lara',
license='',
)
| 21.272727 | 68 | 0.688034 |
ace194936d8bd743d7c1bb3dd87f6c871b57fec4 | 511 | py | Python | OpenAttack/attack_assist/word_embedding/__init__.py | e-tornike/OpenAttack | b19c53af2e01f096505f8ebb8f48a54388295003 | [
"MIT"
] | 444 | 2020-07-14T12:13:26.000Z | 2022-03-28T02:46:30.000Z | OpenAttack/attack_assist/word_embedding/__init__.py | e-tornike/OpenAttack | b19c53af2e01f096505f8ebb8f48a54388295003 | [
"MIT"
] | 50 | 2020-07-15T01:34:42.000Z | 2022-01-24T12:19:19.000Z | OpenAttack/attack_assist/word_embedding/__init__.py | e-tornike/OpenAttack | b19c53af2e01f096505f8ebb8f48a54388295003 | [
"MIT"
] | 86 | 2020-08-02T13:16:45.000Z | 2022-03-27T06:22:04.000Z |
from typing import Dict
class WordEmbedding:
def __init__(self, word2id : Dict[str, int], embedding) -> None:
self.word2id = word2id
self.embedding = embedding
def transform(self, word, token_unk):
if word in self.word2id:
return self.embedding[ self.word2id[word] ]
else:
if isinstance(token_unk, int):
return self.embedding[ token_unk ]
else:
return self.embedding[ self.word2id[ token_unk ] ] | 30.058824 | 68 | 0.594912 |
ace195018006ea7d9966f8069fa2bbf11f30efe1 | 683 | py | Python | app/core/migrations/0002_tag.py | DarioAcevedo/recipe-app-api | 8af296c5b358c76a67acbe8797ef97c24a520cac | [
"MIT"
] | null | null | null | app/core/migrations/0002_tag.py | DarioAcevedo/recipe-app-api | 8af296c5b358c76a67acbe8797ef97c24a520cac | [
"MIT"
] | null | null | null | app/core/migrations/0002_tag.py | DarioAcevedo/recipe-app-api | 8af296c5b358c76a67acbe8797ef97c24a520cac | [
"MIT"
] | null | null | null | # Generated by Django 2.1.15 on 2021-07-27 02:45
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('core', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Tag',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| 28.458333 | 118 | 0.616398 |
ace196c369bf27bf63d7e457629057df809d8122 | 13,677 | py | Python | Shop/views.py | Sofia190/book_store_app | 3c32f269604948bb4a495802d17794a68188e3a5 | [
"MIT"
] | null | null | null | Shop/views.py | Sofia190/book_store_app | 3c32f269604948bb4a495802d17794a68188e3a5 | [
"MIT"
] | null | null | null | Shop/views.py | Sofia190/book_store_app | 3c32f269604948bb4a495802d17794a68188e3a5 | [
"MIT"
] | null | null | null | from django.shortcuts import render, redirect, get_object_or_404
# Create your views here.
from Translator.models import Translator
from Shop.models import (Item_to_buy_work, Item_to_buy_card,
Orders, Bills, Returned_items,)
from Person.models import ContactInformation, AccountSettings
from .forms import OrdersForm
from django.http import HttpResponse, HttpResponseRedirect
from django.views.generic import View
from datetime import timedelta, datetime, date
from django.utils import timezone
from SALVETEIAPP.utils import render_to_pdf #created in step 4
from django.contrib import messages
def buy_item_work_view(request, id1, id2 ):
obj = Item_to_buy_work.objects.get(id=id2)
translator_id = Translator.objects.get(id=id1)
obj.translator.add(translator_id)
obj1 = Orders(id=Orders.objects.last().id+1)
print("id", obj1.id)
# var = Orders.objects.all()
obj1.user=request.user
obj1.save()
obj1.works.add(obj)
obj1.translator.add(Translator.objects.get(id=id1))
obj1.validity_date = Orders.objects.last().date_field + timedelta(days=1)
obj1.items_value = obj1.retrieve_items_price()
obj1.total_price=0
obj1.items_count += 1
obj1.save()
var1 = obj.translator.all().first().id
var2 = obj.translator.all().last().id
# print(var1)
if var1 != id1:
obj.translator.remove(obj.translator.all().first())
if var2 != id1:
obj.translator.remove(obj.translator.all().last())
obj.translator_instance = obj.translator.last()
print("translator id", obj.translator.all())
print("works", obj1.works.all())
print("Translator instance", obj.translator_instance.name)
context = {'obj' : obj ,}
if request.user.is_authenticated:
template_path = "Shop/item-detail.html"
else:
template_path = "Person/LOGIN_FORM.html"
return render(request, template_path, context)
def buy_item_card_view(request, id1, id2):
obj = Item_to_buy_card.objects.get(id=id2)
translator_id = Translator.objects.get(id=id1)
obj.translator.add(translator_id)
obj1 = Orders(id=Orders.objects.last().id+1)
print("id", obj1.id)
# var = Orders.objects.all()
obj1.user=request.user
obj1.save()
obj1.cards.add(obj)
obj1.translator.add(Translator.objects.get(id=id1))
obj1.validity_date = Orders.objects.last().date_field + timedelta(days=1)
obj1.items_value = obj1.retrieve_items_price()
obj1.total_price=0
obj1.items_count += 1
obj1.save()
var1 = obj.translator.all().first().id
var2 = obj.translator.all().last().id
if var1 != id1:
obj.translator.remove(obj.translator.all().first())
if var2 != id1:
obj.translator.remove(obj.translator.all().last())
obj.translator_instance = obj.translator.last()
print("translator id", obj.translator.all())
print("cards", obj1.cards.all())
print("Translator instance", obj.translator_instance.name)
context = {'obj' : obj ,}
if request.user.is_authenticated:
template_path = "Shop/item-detail.html"
else:
template_path = "Person/LOGIN_FORM.html"
return render(request, template_path, context)
def retrieve_orders_associated_with_user(request):
var = Orders.objects.all()
var.orders_associated = Orders.objects.retrieve_orders_associated(request)
var.value_total_price_items = Orders.objects.set_orders_total_price(request)
var.value_total_price_works = Orders.objects.calculate_total_price_works(request)
var.value_total_price_cards = Orders.objects.calculate_total_price_cards(request)
for item in var.orders_associated.all():
if (item.validity_date + timedelta(days=1)== timezone.now()):
item.delete()
messages.success(request, "Order nr. {id} was not valid and was deleted".format(id=item.id))
var.var_quantities = Orders.objects.retrieve_items_sorted_by_quantities(request)
var.var_total_price = Orders.objects.retrieve_items_sorted_by_total_price(request)
var.var_the_most_recent = Orders.objects.retrieve_items_sorted_by_date_field(request)
context = {'var' : var ,}
# 'var1' : var1,}
if (request.user.is_authenticated
and AccountSettings.objects.get(user=request.user).sort_orders_by_quantities==False
and AccountSettings.objects.get(user=request.user).display_most_expensive_items_at_the_top==False
and AccountSettings.objects.get(user=request.user).display_the_most_recent_items_at_the_top==False):
template_path = "Shop/orders-associated.html"
elif (request.user.is_authenticated
and AccountSettings.objects.get(user=request.user).sort_orders_by_quantities==True):
template_path = "Shop/sort-orders-by-quantities.html"
elif (request.user.is_authenticated
and AccountSettings.objects.get(user=request.user).display_most_expensive_items_at_the_top==True):
template_path = "Shop/display-most-expensive-items-at-the-top.html"
elif (request.user.is_authenticated
and AccountSettings.objects.get(user=request.user).display_the_most_recent_items_at_the_top==True):
template_path = "Shop/display-the-most-recent-items-at-the-top.html"
else:
template_path = "Person/LOGIN_FORM.html"
return render(request, template_path, context)
def view_order_details(request, id):
var = Orders.objects.all()
# var.orders_associated = Orders.objects.retrieve_orders_associated(request)
try:
var1 = Orders.objects.retrieve_orders_associated(request).get(id=id)
except:
return render(request, "Shop/bad_request.html")
var1.value_total_price_items = Orders.objects.set_orders_total_price_detail(request,id=id)
var1.value_total_price_works = Orders.objects.calculate_total_price_works_detail(request, id=id)
var1.value_total_price_cards = Orders.objects.calculate_total_price_cards_detail(request, id=id)
context = {#'var' :var,
'var1' : var1, }
if request.user.is_authenticated:
template_path = "Shop/order-associated-detail.html"
else:
template_path = "Person/LOGIN_FORM.html"
return render(request, template_path, context)
class GeneratePdf(View):
#
def get(self, request, *args, **kwargs):
if request.user.is_authenticated:
var = Orders.objects.all()
obj = Orders.objects.first()
# var.orders_associated = Orders.objects.retrieve_orders_associated(request)
var.orders_associated = Orders.objects.retrieve_orders_not_returned(request)
var.value_total_price_items = Orders.objects.set_orders_total_price(request)
var.value_total_price_works = Orders.objects.calculate_total_price_works(request)
var.value_total_price_cards = Orders.objects.calculate_total_price_cards(request)
var1 = request.user
var2 = Orders.objects.last().validity_date + timedelta(days=1)
data = { 'date' : date.today(),
'from_person': obj.from_person,
'from_person_address': obj.from_person_address,
'to_person' : var1,
'to_person_address': ContactInformation.objects.get(user=request.user).address,
'var' : var,
'term': var2,
'number': request.user.id}
pdf = render_to_pdf('Shop/invoice.html', data)
return HttpResponse(pdf, content_type='application/pdf')
else:
return render(request, "index_simple_layout_login.html")
def GeneratePdf_detail(request,id,*args, **kwargs):
if request.user.is_authenticated:
# var = Orders.objects.all()
obj = Orders.objects.first()
var1 = Orders.objects.retrieve_orders_associated(request).get(id=id)
var1.value_total_price_items = Orders.objects.set_orders_total_price_detail(request,id=id)
var1.value_total_price_works = Orders.objects.calculate_total_price_works_detail(request, id=id)
var1.value_total_price_cards = Orders.objects.calculate_total_price_cards_detail(request, id=id)
var = request.user
var2 = Orders.objects.get(id=id).validity_date + timedelta(days=1)
# print("var1 items", var1.value_total_price_items)
data = { 'date' : date.today(),
'from_person': obj.from_person,
'from_person_address': obj.from_person_address,
'to_person' : var,
'to_person_address': ContactInformation.objects.get(user=request.user).address,
'var1' : var1,
'term': var2,
'number': var1.id,
}
pdf = render_to_pdf('Shop/invoice-detail.html', data)
return HttpResponse(pdf, content_type='application/pdf')
else:
return render(request, "index_simple_layout_login.html")
def order_delete_view(request, id):
if id <= Orders.objects.last().id:
obj = Orders.objects.retrieve_orders_associated(request).get(id=id)
obj.delete()
messages.success(request, "Order nr. {id} deleted".format(id=id))
return redirect('retrieve_orders_associated_with_user')
else:
return render(request, 'Shop/bad_request.html')
context = {'object' : obj}
if request.user.is_authenticated:
template_path = "Shop/orders-associated.html"
else:
template_path = "index_simple_layout_login.html"
return render(request, template_path, context)
def cancel_orders_view(request):
qs = Orders.objects.retrieve_orders_associated(request)
qs.delete()
messages.success(request, "Orders deleted")
return redirect('retrieve_orders_associated_with_user')
context = {'object' : qs}
if request.user.is_authenticated:
template_path = "Shop/orders-associated.html"
else:
template_path = "index_simple_layout_login.html"
return render(request, template_path, context)
def return_item_view(request, id):
if id <= Orders.objects.last().id:
obj = Orders.objects.retrieve_orders_associated(request).get(id=id)
var = Returned_items()
var.save()
var.orders.add(obj)
var.user = request.user
obj.returned = True
obj.save()
var.save()
messages.success(request, "Order nr. {id} will be returned to the seller".format(id=id))
return redirect('retrieve_orders_associated_with_user')
else:
return render(request, 'Shop/bad_request.html')
context = {'object' : obj}
if request.user.is_authenticated:
template_path = "Shop/orders-associated.html"
else:
template_path = "index_simple_layout_login.html"
return render(request, template_path, context)
def return_items_associated(request):
qs = Orders.objects.retrieve_orders_associated(request)
var = Returned_items()
var.save()
for item in qs:
var.orders.add(item)
var.user = request.user
for item in qs:
item.returned = True
item.save()
var.save()
messages.success(request, "Orders will be returned to the seller")
return redirect('retrieve_orders_associated_with_user')
context = {'object' : qs}
if request.user.is_authenticated:
template_path = "Shop/orders-associated.html"
else:
template_path = "index_simple_layout_login.html"
return render(request, template_path, context)
def returned_items_view(request):
#var = Orders.objects.retrieve_orders_associated(request)
var = Returned_items.objects.filter(user=request.user)
context = {'var' : var ,}
# 'var1' : var1,}
if request.user.is_authenticated:
template_path = "Shop/returned-items.html"
else:
template_path = "index_simple_layout_login.html"
return render(request, template_path, context)
def render_invoice(request):
if request.user.is_authenticated:
var = Orders.objects.all()
var.orders_associated = Orders.objects.retrieve_orders_not_returned(request)
if var.orders_associated.all().count() != 0:
return redirect('GeneratePdf')
else:
return render(request, 'Shop/bad_request.html')
else:
return render(request, "index_simple_layout_login.html")
#@login_required
def update_order(request, id):
obj=get_object_or_404(Orders, id=id)
form = OrdersForm(request.POST or None, instance=obj)
context_dictionary = {#'object' : obj,
'form' : form, }
if form.is_valid():
obj=form.save()
obj.save()
obj.items_count = 0
for item in obj.works.all():
obj.items_value +=item.price
obj.items_count+=1
for item in obj.cards.all():
obj.items_value +=item.price
obj.items_count+=1
obj.save()
print("count", obj.items_count)
return HttpResponseRedirect("/orders")
if request.user.is_authenticated and (request.user == obj.user or request.user.is_staff):
template_path = "Shop/update-order.html"
else:
template_path = "index_simple_layout_login.html"
return render(request, template_path, context_dictionary)
| 21.538583 | 108 | 0.665058 |
ace197f7adf23595633087992ae0d375e16a0e3d | 1,346 | py | Python | test/test_list_omni_transactions_by_address_ri_senders.py | Crypto-APIs/Crypto_APIs_2.0_SDK_Python | c59ebd914850622b2c6500c4c30af31fb9cecf0e | [
"MIT"
] | 5 | 2021-05-17T04:45:03.000Z | 2022-03-23T12:51:46.000Z | test/test_list_omni_transactions_by_address_ri_senders.py | Crypto-APIs/Crypto_APIs_2.0_SDK_Python | c59ebd914850622b2c6500c4c30af31fb9cecf0e | [
"MIT"
] | null | null | null | test/test_list_omni_transactions_by_address_ri_senders.py | Crypto-APIs/Crypto_APIs_2.0_SDK_Python | c59ebd914850622b2c6500c4c30af31fb9cecf0e | [
"MIT"
] | 2 | 2021-06-02T07:32:26.000Z | 2022-02-12T02:36:23.000Z | """
CryptoAPIs
Crypto APIs 2.0 is a complex and innovative infrastructure layer that radically simplifies the development of any Blockchain and Crypto related applications. Organized around REST, Crypto APIs 2.0 can assist both novice Bitcoin/Ethereum enthusiasts and crypto experts with the development of their blockchain applications. Crypto APIs 2.0 provides unified endpoints and data, raw data, automatic tokens and coins forwardings, callback functionalities, and much more. # noqa: E501
The version of the OpenAPI document: 2.0.0
Contact: developers@cryptoapis.io
Generated by: https://openapi-generator.tech
"""
import sys
import unittest
import cryptoapis
from cryptoapis.model.list_omni_transactions_by_address_ri_senders import ListOmniTransactionsByAddressRISenders
class TestListOmniTransactionsByAddressRISenders(unittest.TestCase):
"""ListOmniTransactionsByAddressRISenders unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testListOmniTransactionsByAddressRISenders(self):
"""Test ListOmniTransactionsByAddressRISenders"""
# FIXME: construct object with mandatory attributes with example values
# model = ListOmniTransactionsByAddressRISenders() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 36.378378 | 484 | 0.771174 |
ace198483be890e24503f898c7a810d8308edf46 | 15,348 | py | Python | artifacts/old_dataset_versions/minimal_commits_v02/tequila/tequila#1/before/simulator_qulacs.py | MattePalte/Bugs-Quantum-Computing-Platforms | 0c1c805fd5dfce465a8955ee3faf81037023a23e | [
"MIT"
] | 3 | 2021-11-08T11:46:42.000Z | 2021-12-27T10:13:38.000Z | artifacts/minimal_bugfixes/tequila/tequila#1/before/simulator_qulacs.py | MattePalte/Bugs-Quantum-Computing-Platforms | 0c1c805fd5dfce465a8955ee3faf81037023a23e | [
"MIT"
] | 2 | 2021-11-09T14:57:09.000Z | 2022-01-12T12:35:58.000Z | artifacts/old_dataset_versions/minimal_commits_v02/tequila/tequila#1/before/simulator_qulacs.py | MattePalte/Bugs-Quantum-Computing-Platforms | 0c1c805fd5dfce465a8955ee3faf81037023a23e | [
"MIT"
] | null | null | null | import qulacs
import numbers, numpy
from tequila import TequilaException
from tequila.utils.bitstrings import BitNumbering, BitString, BitStringLSB
from tequila.wavefunction.qubit_wavefunction import QubitWaveFunction
from tequila.simulators.simulator_base import BackendCircuit, BackendExpectationValue, QCircuit, change_basis
"""
Developer Note:
Qulacs uses different Rotational Gate conventions: Rx(angle) = exp(i angle/2 X) instead of exp(-i angle/2 X)
And the same for MultiPauli rotational gates
The angles are scaled with -1.0 to keep things consistent with the rest of tequila
"""
class TequilaQulacsException(TequilaException):
def __str__(self):
return "Error in qulacs backend:" + self.message
class BackendCircuitQulacs(BackendCircuit):
compiler_arguments = {
"trotterized": True,
"swap": False,
"multitarget": True,
"controlled_rotation": True, # needed for gates depending on variables
"gaussian": True,
"exponential_pauli": False,
"controlled_exponential_pauli": True,
"phase": True,
"power": True,
"hadamard_power": True,
"controlled_power": True,
"controlled_phase": True,
"toffoli": False,
"phase_to_z": True,
"cc_max": False
}
numbering = BitNumbering.LSB
def initialize_state(self, n_qubits = None):
if n_qubits is None:
n_qubits = self.n_qubits
return qulacs.QuantumState(n_qubits)
def __init__(self, abstract_circuit, noise=None, *args, **kwargs):
self.op_lookup = {
'I': qulacs.gate.Identity,
'X': qulacs.gate.X,
'Y': qulacs.gate.Y,
'Z': qulacs.gate.Z,
'H': qulacs.gate.H,
'Rx': (lambda c: c.add_parametric_RX_gate, qulacs.gate.RX),
'Ry': (lambda c: c.add_parametric_RY_gate, qulacs.gate.RY),
'Rz': (lambda c: c.add_parametric_RZ_gate, qulacs.gate.RZ),
'SWAP': qulacs.gate.SWAP,
'Measure': qulacs.gate.Measurement,
'Exp-Pauli': None
}
self.variables = []
super().__init__(abstract_circuit=abstract_circuit, noise=noise, *args, **kwargs)
self.has_noise=False
if noise is not None:
self.has_noise=True
self.noise_lookup = {
'bit flip': [qulacs.gate.BitFlipNoise],
'phase flip': [lambda target, prob: qulacs.gate.Probabilistic([prob],[qulacs.gate.Z(target)])],
'phase damp': [lambda target, prob: qulacs.gate.DephasingNoise(target,(1/2)*(1-numpy.sqrt(1-prob)))],
'amplitude damp': [qulacs.gate.AmplitudeDampingNoise],
'phase-amplitude damp': [qulacs.gate.AmplitudeDampingNoise,
lambda target, prob: qulacs.gate.DephasingNoise(target,(1/2)*(1-numpy.sqrt(1-prob)))
],
'depolarizing': [lambda target,prob: qulacs.gate.DepolarizingNoise(target,3*prob/4)]
}
self.circuit=self.add_noise_to_circuit(noise)
def update_variables(self, variables):
for k, angle in enumerate(self.variables):
self.circuit.set_parameter(k, angle(variables))
def do_simulate(self, variables, initial_state, *args, **kwargs):
state = self.initialize_state(n_qubits=self.n_qubits)
lsb = BitStringLSB.from_int(initial_state, nbits=self.n_qubits)
state.set_computational_basis(BitString.from_binary(lsb.binary).integer)
self.circuit.update_quantum_state(state)
wfn = QubitWaveFunction.from_array(arr=state.get_vector(), numbering=self.numbering)
return wfn
def convert_measurements(self, backend_result) -> QubitWaveFunction:
result = QubitWaveFunction()
# todo there are faster ways
for k in backend_result:
converted_key = BitString.from_binary(BitStringLSB.from_int(integer=k, nbits=self.n_qubits).binary)
if converted_key in result._state:
result._state[converted_key] += 1
else:
result._state[converted_key] = 1
return result
def do_sample(self, samples, circuit, noise_model=None, initial_state=0, *args, **kwargs) -> QubitWaveFunction:
state = self.initialize_state(self.n_qubits)
lsb = BitStringLSB.from_int(initial_state, nbits=self.n_qubits)
state.set_computational_basis(BitString.from_binary(lsb.binary).integer)
self.circuit.update_quantum_state(state)
if hasattr(self, "measurements"):
result = {}
for sample in range(samples):
sample_result = {}
for t, m in self.measurements.items():
m.update_quantum_state(state)
sample_result[t] = state.get_classical_value(t)
sample_result = dict(sorted(sample_result.items(), key=lambda x: x[0]))
binary = BitString.from_array(sample_result.values())
if binary in result:
result[binary] += 1
else:
result[binary] = 1
return QubitWaveFunction(state=result)
else:
result = state.sampling(samples)
return self.convert_measurements(backend_result=result)
def fast_return(self, abstract_circuit):
return False
def initialize_circuit(self, *args, **kwargs):
n_qubits = len(self.qubit_map)
return qulacs.ParametricQuantumCircuit(n_qubits)
def add_exponential_pauli_gate(self, gate, circuit, variables, *args, **kwargs):
assert not gate.is_controlled()
convert = {'x': 1, 'y': 2, 'z': 3}
pind = [convert[x.lower()] for x in gate.paulistring.values()]
qind = [self.qubit_map[x] for x in gate.paulistring.keys()]
if len(gate.extract_variables()) > 0:
self.variables.append(-gate.parameter * gate.paulistring.coeff)
circuit.add_parametric_multi_Pauli_rotation_gate(qind, pind,
-gate.parameter(variables) * gate.paulistring.coeff)
else:
circuit.add_multi_Pauli_rotation_gate(qind, pind, -gate.parameter(variables) * gate.paulistring.coeff)
def add_parametrized_gate(self, gate, circuit, variables, *args, **kwargs):
op = self.op_lookup[gate.name]
if gate.name == 'Exp-Pauli':
self.add_exponential_pauli_gate(gate, circuit, variables)
return
else:
if len(gate.extract_variables()) > 0:
op = op[0]
self.variables.append(-gate.parameter)
op(circuit)(self.qubit_map[gate.target[0]], -gate.parameter(variables=variables))
if gate.is_controlled():
raise TequilaQulacsException("Gates which depend on variables can not be controlled! Gate was:\n{}".format(gate))
return
else:
op = op[1]
qulacs_gate = op(self.qubit_map[gate.target[0]], -gate.parameter(variables=variables))
if gate.is_controlled():
qulacs_gate = qulacs.gate.to_matrix_gate(qulacs_gate)
for c in gate.control:
qulacs_gate.add_control_qubit(self.qubit_map[c], 1)
circuit.add_gate(qulacs_gate)
def add_basic_gate(self, gate, circuit, *args, **kwargs):
op = self.op_lookup[gate.name]
qulacs_gate = op(*[self.qubit_map[t] for t in gate.target])
if gate.is_controlled():
qulacs_gate = qulacs.gate.to_matrix_gate(qulacs_gate)
for c in gate.control:
qulacs_gate.add_control_qubit(self.qubit_map[c], 1)
circuit.add_gate(qulacs_gate)
def add_measurement(self, gate, circuit, *args, **kwargs):
measurements = {t: qulacs.gate.Measurement(t, t) for t in gate.target}
if hasattr(self, "measurements"):
for key in measurements:
if key in self.measurements:
raise TequilaQulacsException("Measurement on qubit {} was given twice".format(key))
self.measurements = {**self.measurements, **measurements}
else:
self.measurements = measurements
def add_noise_to_circuit(self,noise_model):
c=self.circuit
n=noise_model
g_count=c.get_gate_count()
new=self.initialize_circuit()
for i in range(g_count):
g=c.get_gate(i)
new.add_gate(g)
qubits=g.get_target_index_list() + g.get_control_index_list()
for noise in n.noises:
if len(qubits) == noise.level:
for j,channel in enumerate(self.noise_lookup[noise.name]):
for q in qubits:
chan=channel(q,noise.probs[j])
new.add_gate(chan)
return new
def optimize_circuit(self, circuit, max_block_size: int = 4, silent: bool = True, *args, **kwargs):
"""
Can be overwritten if the backend supports its own circuit optimization
To be clear: Optimization means optimizing the compiled circuit w.r.t depth not
optimizing parameters
:return: Optimized circuit
"""
old = circuit.calculate_depth()
opt = qulacs.circuit.QuantumCircuitOptimizer()
opt.optimize(circuit, max_block_size)
if not silent:
print("qulacs: optimized circuit depth from {} to {} with max_block_size {}".format(old,
circuit.calculate_depth(),
max_block_size))
return circuit
class BackendExpectationValueQulacs(BackendExpectationValue):
BackendCircuitType = BackendCircuitQulacs
use_mapping = True
def simulate(self, variables, *args, **kwargs) -> numpy.array:
# fast return if possible
if self.H is None:
return numpy.asarray([0.0])
elif len(self.H) == 0:
return numpy.asarray([0.0])
elif isinstance(self.H, numbers.Number):
return numpy.asarray[self.H]
self.U.update_variables(variables)
state = self.U.initialize_state()
self.U.circuit.update_quantum_state(state)
result = []
for H in self.H:
if isinstance(H, numbers.Number):
result.append(H) # those are accumulated unit strings, e.g 0.1*X(3) in wfn on qubits 0,1
else:
result.append(H.get_expectation_value(state))
return numpy.asarray(result)
def initialize_hamiltonian(self, hamiltonians):
result = []
for H in hamiltonians:
if self.use_mapping:
# initialize only the active parts of the Hamiltonian and pre-evaluate the passive ones
# passive parts are the components of each individual pauli string which act on qubits where the circuit does not act on
# if the circuit does not act on those qubits the passive parts are always evaluating to 1 (if the pauli operator is Z) or 0 (otherwise)
# since those qubits are always in state |0>
non_zero_strings = []
unit_strings = []
for ps in H.paulistrings:
string = ""
for k, v in ps.items():
if k in self.U.qubit_map:
string += v.upper() + " " + str(self.U.qubit_map[k]) + " "
elif v.upper() != "Z":
string = "ZERO"
break
string = string.strip()
if string != "ZERO":
non_zero_strings.append((ps.coeff, string))
elif string == "":
unit_strings.append((ps.coeff, string))
# accumulate unit strings
if len(unit_strings) > 0:
coeffs = [x[0] for x in unit_strings]
result.append(sum(coeffs))
if len(non_zero_strings) > 0:
qulacs_H = qulacs.Observable(self.n_qubits)
for coeff, string in non_zero_strings:
qulacs_H.add_operator(coeff, string)
result.append(qulacs_H)
else:
if self.U.n_qubits < H.n_qubits:
raise TequilaQulacsException(
"Hamiltonian has more qubits as the Unitary. Mapped expectationvalues are switched off")
qulacs_H = qulacs.Observable(self.n_qubits)
for ps in H.paulistrings:
string = ""
for k, v in ps.items():
string += v.upper() + " " + str(k)
qulacs_H.add_operator(ps.coeff, string)
result.append(qulacs_H)
return result
def sample(self, variables, samples, *args, **kwargs) -> numpy.array:
# todo: generalize in baseclass. Do Hamiltonian mapping on initialization
self.update_variables(variables)
state = self.U.initialize_state()
self.U.circuit.update_quantum_state(state)
result = []
for H in self._abstract_hamiltonians:
E = 0.0
for ps in H.paulistrings:
bc = QCircuit()
zero_string = False
for idx, p in ps.items():
if idx not in self.U.qubit_map:
if p.upper() != "Z":
zero_string = True
else:
bc += change_basis(target=idx, axis=p)
if zero_string:
continue
qbc = self.U.create_circuit(abstract_circuit=bc, variables=None)
Esamples = []
for sample in range(samples):
if self.U.has_noise:
state = self.U.initialize_state()
self.U.circuit.update_quantum_state(state)
state_tmp = state
else:
state_tmp = state.copy()
if len(bc.gates) > 0: # otherwise there is no basis change (empty qulacs circuit does not work out)
qbc.update_quantum_state(state_tmp)
ps_measure = 1.0
for idx in ps.keys():
if idx not in self.U.qubit_map:
continue # means its 1 or Z and <0|Z|0> = 1 anyway
else:
M = qulacs.gate.Measurement(self.U.qubit_map[idx], self.U.qubit_map[idx])
M.update_quantum_state(state_tmp)
measured = state_tmp.get_classical_value(self.U.qubit_map[idx])
ps_measure *= (-2.0 * measured + 1.0) # 0 becomes 1 and 1 becomes -1
Esamples.append(ps_measure)
E += ps.coeff * sum(Esamples) / len(Esamples)
result.append(E)
return numpy.asarray(result)
| 44.616279 | 152 | 0.570433 |
ace1991f4129193154748f8e3a3d4bf8e917e8ec | 3,740 | py | Python | python_developer_tools/cv/PyTorch_Networks/SemanticSegmentation/Unet.py | carlsummer/python_developer_tools | a8c4365b7cc601cda55648cdfd8c0cb1faae132f | [
"Apache-2.0"
] | 32 | 2021-06-21T04:49:48.000Z | 2022-03-29T05:46:59.000Z | python_developer_tools/cv/PyTorch_Networks/SemanticSegmentation/Unet.py | HonestyBrave/python_developer_tools | fc0dcf5c4ef088e2e535206dc82f09bbfd01f280 | [
"Apache-2.0"
] | 1 | 2021-11-12T03:45:55.000Z | 2021-11-12T03:45:55.000Z | python_developer_tools/cv/PyTorch_Networks/SemanticSegmentation/Unet.py | HonestyBrave/python_developer_tools | fc0dcf5c4ef088e2e535206dc82f09bbfd01f280 | [
"Apache-2.0"
] | 10 | 2021-06-03T08:05:05.000Z | 2021-12-13T03:10:42.000Z | # !/usr/bin/env python
# -- coding: utf-8 --
# @Time : 2020/7/8 13:51
# @Author : liumin
# @File : unet.py
import torch
import torch.nn as nn
import torch.nn.functional as F
def Conv3x3BNReLU(in_channels,out_channels,stride,groups=1):
return nn.Sequential(
nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=3, stride=stride, padding=1, groups=groups),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True)
)
def Conv1x1BNReLU(in_channels,out_channels):
return nn.Sequential(
nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=1, stride=1),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True)
)
def Conv1x1BN(in_channels,out_channels):
return nn.Sequential(
nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=1, stride=1),
nn.BatchNorm2d(out_channels)
)
class DoubleConv(nn.Module):
"""(convolution => [BN] => ReLU) * 2"""
def __init__(self, in_channels, out_channels):
super().__init__()
self.double_conv = nn.Sequential(
Conv3x3BNReLU(in_channels, out_channels,stride=1),
Conv3x3BNReLU(out_channels, out_channels, stride=1)
)
def forward(self, x):
return self.double_conv(x)
class DownConv(nn.Module):
"""(convolution => [BN] => ReLU) * 2"""
def __init__(self, in_channels, out_channels,stride=2):
super().__init__()
self.pool = nn.MaxPool2d(kernel_size=2,stride=stride)
self.double_conv = DoubleConv(in_channels, out_channels)
def forward(self, x):
return self.pool(self.double_conv(x))
class UpConv(nn.Module):
def __init__(self, in_channels, out_channels,bilinear=True):
super().__init__()
self.reduce = Conv1x1BNReLU(in_channels, in_channels//2)
# if bilinear, use the normal convolutions to reduce the number of channels
if bilinear:
self.up = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
else:
self.up = nn.ConvTranspose2d(in_channels // 2, in_channels // 2, kernel_size=2, stride=2)
self.conv = DoubleConv(in_channels, out_channels)
def forward(self, x1, x2):
x1 = self.up(self.reduce(x1))
_, channel1, height1, width1 = x1.size()
_, channel2, height2, width2 = x2.size()
# input is CHW
diffY = height2 - height1
diffX = width2 - width1
x1 = F.pad(x1, [diffX // 2, diffX - diffX // 2, diffY // 2, diffY - diffY // 2])
x = torch.cat([x2, x1], dim=1)
return self.conv(x)
class UNet(nn.Module):
def __init__(self, num_classes):
super(UNet, self).__init__()
bilinear = True
self.conv = DoubleConv(3, 64)
self.down1 = DownConv(64, 128)
self.down2 = DownConv(128, 256)
self.down3 = DownConv(256, 512)
self.down4 = DownConv(512, 1024)
self.up1 = UpConv(1024, 512, bilinear)
self.up2 = UpConv(512, 256, bilinear)
self.up3 = UpConv(256, 128, bilinear)
self.up4 = UpConv(128, 64, bilinear)
self.outconv = nn.Conv2d(64, num_classes, kernel_size=1)
def forward(self, x):
x1 = self.conv(x)
x2 = self.down1(x1)
x3 = self.down2(x2)
x4 = self.down3(x3)
x5 = self.down4(x4)
xx = self.up1(x5, x4)
xx = self.up2(xx, x3)
xx = self.up3(xx, x2)
xx = self.up4(xx, x1)
outputs = self.outconv(xx)
return outputs
if __name__ =='__main__':
model = UNet(19)
print(model)
input = torch.randn(1,3,572,572)
out = model(input)
print(out.shape) | 31.166667 | 130 | 0.614439 |
ace199b142c271bf80a717d5b69da72608dd37ba | 686 | py | Python | lab9.py | Madelinep2/IA241-github2 | e03b21d0d3230a3f60ada21827a007313fe1a752 | [
"MIT"
] | null | null | null | lab9.py | Madelinep2/IA241-github2 | e03b21d0d3230a3f60ada21827a007313fe1a752 | [
"MIT"
] | null | null | null | lab9.py | Madelinep2/IA241-github2 | e03b21d0d3230a3f60ada21827a007313fe1a752 | [
"MIT"
] | null | null | null | '''
lab 9 class lab
'''
class my_stat():
def cal_sigma(self,m,n):
result = 0
for i in range(n,m+1):
result= result +i
return result
def cal_pi(self,m,n):
result = 1
for i in range(n,m+1):
result= result*i
return result
def cal_f(self,m):
if m ==0:
return 1
else:
return m * self.cal_f(m-1)
def cal_p(self,m,n):
return self.cal_f(m)/self.cal_f(m-n)
#3.2
my_cal = my_stat()
print(my_cal.cal_sigma(5,3))
print(my_cal.cal_pi(5,3))
print(my_cal.cal_f(5))
print(my_cal.cal_p(5,2))
| 14.913043 | 44 | 0.476676 |
ace199bd27b93b2ebfbd5ad8ff1a3db5667b7676 | 253,735 | py | Python | dask/dataframe/core.py | TopicModels/dask | 5bdccd9dbd0c25f904ab229a4658ed35dc864f52 | [
"BSD-3-Clause"
] | null | null | null | dask/dataframe/core.py | TopicModels/dask | 5bdccd9dbd0c25f904ab229a4658ed35dc864f52 | [
"BSD-3-Clause"
] | null | null | null | dask/dataframe/core.py | TopicModels/dask | 5bdccd9dbd0c25f904ab229a4658ed35dc864f52 | [
"BSD-3-Clause"
] | null | null | null | import operator
import warnings
from collections.abc import Iterator, Sequence
from functools import partial, wraps
from numbers import Integral, Number
from operator import getitem
from pprint import pformat
import numpy as np
import pandas as pd
from pandas.api.types import (
is_bool_dtype,
is_datetime64_any_dtype,
is_numeric_dtype,
is_timedelta64_dtype,
)
from pandas.util import cache_readonly
from tlz import first, merge, partition_all, remove, unique
from .. import array as da
from .. import core, threaded
from ..array.core import Array, normalize_arg
from ..base import DaskMethodsMixin, dont_optimize, is_dask_collection, tokenize
from ..blockwise import Blockwise, BlockwiseDep, BlockwiseDepDict, blockwise
from ..context import globalmethod
from ..delayed import Delayed, delayed, unpack_collections
from ..highlevelgraph import HighLevelGraph
from ..utils import (
IndexCallable,
M,
OperatorMethodMixin,
_deprecated,
apply,
derived_from,
funcname,
has_keyword,
is_arraylike,
iter_chunks,
key_split,
memory_repr,
parse_bytes,
partial_by_order,
pseudorandom,
put_lines,
random_state_data,
typename,
)
from ..widgets import get_template
from . import methods
from .accessor import DatetimeAccessor, StringAccessor
from .categorical import CategoricalAccessor, categorize
from .dispatch import (
get_parallel_type,
group_split_dispatch,
hash_object_dispatch,
meta_nonempty,
)
from .optimize import optimize
from .utils import (
PANDAS_GT_110,
PANDAS_GT_120,
check_matching_columns,
clear_known_categories,
drop_by_shallow_copy,
has_known_categories,
index_summary,
insert_meta_param_description,
is_categorical_dtype,
is_dataframe_like,
is_index_like,
is_series_like,
make_meta,
raise_on_meta_error,
valid_divisions,
)
no_default = "__no_default__"
pd.set_option("compute.use_numexpr", False)
def _numeric_only(func):
"""Decorator for methods that accept a numeric_only kwarg"""
@wraps(func)
def wrapper(self, *args, **kwargs):
# numeric_only is None by default - in that case self = self.
if kwargs.get("numeric_only") is False:
raise NotImplementedError(
"'numeric_only=False' is not implemented in Dask."
)
elif kwargs.get("numeric_only") is True:
self = self._get_numeric_data()
return func(self, *args, **kwargs)
return wrapper
def _concat(args, ignore_index=False):
if not args:
return args
if isinstance(first(core.flatten(args)), np.ndarray):
return da.core.concatenate3(args)
if not has_parallel_type(args[0]):
try:
return pd.Series(args)
except Exception:
return args
# We filter out empty partitions here because pandas frequently has
# inconsistent dtypes in results between empty and non-empty frames.
# Ideally this would be handled locally for each operation, but in practice
# this seems easier. TODO: don't do this.
args2 = [i for i in args if len(i)]
return (
args[0]
if not args2
else methods.concat(args2, uniform=True, ignore_index=ignore_index)
)
def finalize(results):
return _concat(results)
class Scalar(DaskMethodsMixin, OperatorMethodMixin):
"""A Dask object to represent a pandas scalar"""
def __init__(self, dsk, name, meta, divisions=None):
# divisions is ignored, only present to be compatible with other
# objects.
if not isinstance(dsk, HighLevelGraph):
dsk = HighLevelGraph.from_collections(name, dsk, dependencies=[])
self.dask = dsk
self._name = name
self._parent_meta = pd.Series(dtype="float64")
meta = make_meta(meta, parent_meta=self._parent_meta)
if is_dataframe_like(meta) or is_series_like(meta) or is_index_like(meta):
raise TypeError(
f"Expected meta to specify scalar, got {typename(type(meta))}"
)
self._meta = meta
def __dask_graph__(self):
return self.dask
def __dask_keys__(self):
return [self.key]
def __dask_tokenize__(self):
return self._name
def __dask_layers__(self):
return (self._name,)
__dask_optimize__ = globalmethod(
optimize, key="dataframe_optimize", falsey=dont_optimize
)
__dask_scheduler__ = staticmethod(threaded.get)
def __dask_postcompute__(self):
return first, ()
def __dask_postpersist__(self):
return self._rebuild, ()
def _rebuild(self, dsk, *, rename=None):
name = self._name
if rename:
name = rename.get(name, name)
return Scalar(dsk, name, self._meta, self.divisions)
@property
def _meta_nonempty(self):
return self._meta
@property
def dtype(self):
return self._meta.dtype
def __dir__(self):
o = set(dir(type(self)))
o.update(self.__dict__)
if not hasattr(self._meta, "dtype"):
o.remove("dtype") # dtype only in `dir` if available
return list(o)
@property
def divisions(self):
"""Dummy divisions to be compat with Series and DataFrame"""
return [None, None]
def __repr__(self):
name = self._name if len(self._name) < 10 else self._name[:7] + "..."
if hasattr(self._meta, "dtype"):
extra = ", dtype=%s" % self._meta.dtype
else:
extra = ", type=%s" % type(self._meta).__name__
return f"dd.Scalar<{name}{extra}>"
def __array__(self):
# array interface is required to support pandas instance + Scalar
# Otherwise, above op results in pd.Series of Scalar (object dtype)
return np.asarray(self.compute())
@property
def _args(self):
return (self.dask, self._name, self._meta)
def __getstate__(self):
return self._args
def __setstate__(self, state):
self.dask, self._name, self._meta = state
def __bool__(self):
raise TypeError(
f"Trying to convert {self} to a boolean value. Because Dask objects are "
"lazily evaluated, they cannot be converted to a boolean value or used "
"in boolean conditions like if statements. Try calling .compute() to "
"force computation prior to converting to a boolean value or using in "
"a conditional statement."
)
@property
def key(self):
return (self._name, 0)
@classmethod
def _get_unary_operator(cls, op):
def f(self):
name = funcname(op) + "-" + tokenize(self)
dsk = {(name, 0): (op, (self._name, 0))}
meta = op(self._meta_nonempty)
graph = HighLevelGraph.from_collections(name, dsk, dependencies=[self])
return Scalar(graph, name, meta)
return f
@classmethod
def _get_binary_operator(cls, op, inv=False):
return lambda self, other: _scalar_binary(op, self, other, inv=inv)
def to_delayed(self, optimize_graph=True):
"""Convert into a ``dask.delayed`` object.
Parameters
----------
optimize_graph : bool, optional
If True [default], the graph is optimized before converting into
``dask.delayed`` objects.
"""
dsk = self.__dask_graph__()
if optimize_graph:
dsk = self.__dask_optimize__(dsk, self.__dask_keys__())
name = "delayed-" + self._name
dsk = HighLevelGraph.from_collections(name, dsk, dependencies=())
return Delayed(self.key, dsk)
def _scalar_binary(op, self, other, inv=False):
name = f"{funcname(op)}-{tokenize(self, other)}"
dependencies = [self]
dsk = {}
return_type = get_parallel_type(other)
if isinstance(other, Scalar):
dependencies.append(other)
other_key = (other._name, 0)
elif is_dask_collection(other):
return NotImplemented
else:
other_key = other
dsk[(name, 0)] = (
(op, other_key, (self._name, 0)) if inv else (op, (self._name, 0), other_key)
)
other_meta = make_meta(other, parent_meta=self._parent_meta)
other_meta_nonempty = meta_nonempty(other_meta)
if inv:
meta = op(other_meta_nonempty, self._meta_nonempty)
else:
meta = op(self._meta_nonempty, other_meta_nonempty)
graph = HighLevelGraph.from_collections(name, dsk, dependencies=dependencies)
if return_type is not Scalar:
return return_type(graph, name, meta, [other.index.min(), other.index.max()])
else:
return Scalar(graph, name, meta)
class _Frame(DaskMethodsMixin, OperatorMethodMixin):
"""Superclass for DataFrame and Series
Parameters
----------
dsk: dict
The dask graph to compute this DataFrame
name: str
The key prefix that specifies which keys in the dask comprise this
particular DataFrame / Series
meta: pandas.DataFrame, pandas.Series, or pandas.Index
An empty pandas object with names, dtypes, and indices matching the
expected output.
divisions: tuple of index values
Values along which we partition our blocks on the index
"""
def __init__(self, dsk, name, meta, divisions):
if not isinstance(dsk, HighLevelGraph):
dsk = HighLevelGraph.from_collections(name, dsk, dependencies=[])
self.dask = dsk
self._name = name
meta = make_meta(meta)
if not self._is_partition_type(meta):
raise TypeError(
f"Expected meta to specify type {type(self).__name__}, got type "
f"{typename(type(meta))}"
)
self._meta = meta
self.divisions = tuple(divisions)
def __dask_graph__(self):
return self.dask
def __dask_keys__(self):
return [(self._name, i) for i in range(self.npartitions)]
def __dask_layers__(self):
return (self._name,)
def __dask_tokenize__(self):
return self._name
__dask_optimize__ = globalmethod(
optimize, key="dataframe_optimize", falsey=dont_optimize
)
__dask_scheduler__ = staticmethod(threaded.get)
def __dask_postcompute__(self):
return finalize, ()
def __dask_postpersist__(self):
return self._rebuild, ()
def _rebuild(self, dsk, *, rename=None):
name = self._name
if rename:
name = rename.get(name, name)
return type(self)(dsk, name, self._meta, self.divisions)
@property
def _constructor(self):
return new_dd_object
@property
def npartitions(self):
"""Return number of partitions"""
return len(self.divisions) - 1
@property
@derived_from(pd.DataFrame)
def attrs(self):
return self._meta.attrs
@attrs.setter
def attrs(self, value):
self._meta.attrs = dict(value)
@property
def size(self):
"""Size of the Series or DataFrame as a Delayed object.
Examples
--------
>>> series.size # doctest: +SKIP
dd.Scalar<size-ag..., dtype=int64>
"""
return self.reduction(
methods.size, np.sum, token="size", meta=int, split_every=False
)
@property
def _meta_nonempty(self):
"""A non-empty version of `_meta` with fake data."""
return meta_nonempty(self._meta)
@property
def _args(self):
return (self.dask, self._name, self._meta, self.divisions)
def __getstate__(self):
return self._args
def __setstate__(self, state):
self.dask, self._name, self._meta, self.divisions = state
def copy(self, deep=False):
"""Make a copy of the dataframe
This is strictly a shallow copy of the underlying computational graph.
It does not affect the underlying data
Parameters
----------
deep : boolean, default False
The deep value must be `False` and it is declared as a parameter just for
compatibility with third-party libraries like cuDF
"""
if deep is not False:
raise ValueError(
"The `deep` value must be False. This is strictly a shallow copy "
"of the underlying computational graph."
)
return new_dd_object(self.dask, self._name, self._meta, self.divisions)
def __array__(self, dtype=None, **kwargs):
self._computed = self.compute()
x = np.array(self._computed)
return x
def __array_wrap__(self, array, context=None):
raise NotImplementedError
def __array_ufunc__(self, numpy_ufunc, method, *inputs, **kwargs):
out = kwargs.get("out", ())
for x in inputs + out:
# ufuncs work with 0-dimensional NumPy ndarrays
# so we don't want to raise NotImplemented
if isinstance(x, np.ndarray) and x.shape == ():
continue
elif not isinstance(
x, (Number, Scalar, _Frame, Array, pd.DataFrame, pd.Series, pd.Index)
):
return NotImplemented
if method == "__call__":
if numpy_ufunc.signature is not None:
return NotImplemented
if numpy_ufunc.nout > 1:
# ufuncs with multiple output values
# are not yet supported for frames
return NotImplemented
else:
return elemwise(numpy_ufunc, *inputs, **kwargs)
else:
# ufunc methods are not yet supported for frames
return NotImplemented
@property
def _elemwise(self):
return elemwise
def _repr_data(self):
raise NotImplementedError
@property
def _repr_divisions(self):
name = f"npartitions={self.npartitions}"
if self.known_divisions:
divisions = pd.Index(self.divisions, name=name)
else:
# avoid to be converted to NaN
divisions = pd.Index([""] * (self.npartitions + 1), name=name)
return divisions
def __repr__(self):
data = self._repr_data().to_string(max_rows=5, show_dimensions=False)
_str_fmt = """Dask {klass} Structure:
{data}
Dask Name: {name}, {task} tasks"""
if len(self.columns) == 0:
data = data.partition("\n")[-1].replace("Index", "Divisions")
_str_fmt = f"Empty {_str_fmt}"
return _str_fmt.format(
klass=self.__class__.__name__,
data=data,
name=key_split(self._name),
task=len(self.dask),
)
@property
def index(self):
"""Return dask Index instance"""
return self.map_partitions(
getattr,
"index",
token=self._name + "-index",
meta=self._meta.index,
enforce_metadata=False,
)
@index.setter
def index(self, value):
self.divisions = value.divisions
result = map_partitions(
methods.assign_index, self, value, enforce_metadata=False
)
self.dask = result.dask
self._name = result._name
self._meta = result._meta
def reset_index(self, drop=False):
"""Reset the index to the default index.
Note that unlike in ``pandas``, the reset ``dask.dataframe`` index will
not be monotonically increasing from 0. Instead, it will restart at 0
for each partition (e.g. ``index1 = [0, ..., 10], index2 = [0, ...]``).
This is due to the inability to statically know the full length of the
index.
For DataFrame with multi-level index, returns a new DataFrame with
labeling information in the columns under the index names, defaulting
to 'level_0', 'level_1', etc. if any are None. For a standard index,
the index name will be used (if set), otherwise a default 'index' or
'level_0' (if 'index' is already taken) will be used.
Parameters
----------
drop : boolean, default False
Do not try to insert index into dataframe columns.
"""
return self.map_partitions(
M.reset_index, drop=drop, enforce_metadata=False
).clear_divisions()
@property
def known_divisions(self):
"""Whether divisions are already known"""
return len(self.divisions) > 0 and self.divisions[0] is not None
def clear_divisions(self):
"""Forget division information"""
divisions = (None,) * (self.npartitions + 1)
return type(self)(self.dask, self._name, self._meta, divisions)
def get_partition(self, n):
"""Get a dask DataFrame/Series representing the `nth` partition."""
if 0 <= n < self.npartitions:
name = f"get-partition-{str(n)}-{self._name}"
divisions = self.divisions[n : n + 2]
layer = {(name, 0): (self._name, n)}
graph = HighLevelGraph.from_collections(name, layer, dependencies=[self])
return new_dd_object(graph, name, self._meta, divisions)
else:
msg = f"n must be 0 <= n < {self.npartitions}"
raise ValueError(msg)
@derived_from(pd.DataFrame)
def drop_duplicates(
self, subset=None, split_every=None, split_out=1, ignore_index=False, **kwargs
):
if subset is not None:
# Let pandas error on bad inputs
self._meta_nonempty.drop_duplicates(subset=subset, **kwargs)
kwargs["subset"] = subset
split_out_setup = split_out_on_cols
split_out_setup_kwargs = {"cols": subset}
else:
self._meta_nonempty.drop_duplicates(**kwargs)
split_out_setup = split_out_setup_kwargs = None
if kwargs.get("keep", True) is False:
raise NotImplementedError("drop_duplicates with keep=False")
chunk = M.drop_duplicates
return aca(
self,
chunk=chunk,
aggregate=chunk,
meta=self._meta,
token="drop-duplicates",
split_every=split_every,
split_out=split_out,
split_out_setup=split_out_setup,
split_out_setup_kwargs=split_out_setup_kwargs,
ignore_index=ignore_index,
**kwargs,
)
def __len__(self):
return self.reduction(
len, np.sum, token="len", meta=int, split_every=False
).compute()
def __bool__(self):
raise ValueError(
f"The truth value of a {self.__class__.__name__} is ambiguous. "
"Use a.any() or a.all()."
)
__nonzero__ = __bool__ # python 2
def _scalarfunc(self, cast_type):
def wrapper():
raise TypeError(f"cannot convert the series to {cast_type}")
return wrapper
def __float__(self):
return self._scalarfunc(float)
def __int__(self):
return self._scalarfunc(int)
__long__ = __int__ # python 2
def __complex__(self):
return self._scalarfunc(complex)
@insert_meta_param_description(pad=12)
def map_partitions(self, func, *args, **kwargs):
"""Apply Python function on each DataFrame partition.
Note that the index and divisions are assumed to remain unchanged.
Parameters
----------
func : function
Function applied to each partition.
args, kwargs :
Arguments and keywords to pass to the function. The partition will
be the first argument, and these will be passed *after*. Arguments
and keywords may contain ``Scalar``, ``Delayed``, ``partition_info``
or regular python objects. DataFrame-like args (both dask and
pandas) will be repartitioned to align (if necessary) before
applying the function (see ``align_dataframes`` to control).
enforce_metadata : bool, default True
Whether to enforce at runtime that the structure of the DataFrame
produced by ``func`` actually matches the structure of ``meta``.
This will rename and reorder columns for each partition,
and will raise an error if this doesn't work or types don't match.
transform_divisions : bool, default True
Whether to apply the function onto the divisions and apply those
transformed divisions to the output.
align_dataframes : bool, default True
Whether to repartition DataFrame- or Series-like args
(both dask and pandas) so their divisions align before applying
the function. This requires all inputs to have known divisions.
Single-partition inputs will be split into multiple partitions.
If False, all inputs must have either the same number of partitions
or a single partition. Single-partition inputs will be broadcast to
every partition of multi-partition inputs.
$META
Examples
--------
Given a DataFrame, Series, or Index, such as:
>>> import pandas as pd
>>> import dask.dataframe as dd
>>> df = pd.DataFrame({'x': [1, 2, 3, 4, 5],
... 'y': [1., 2., 3., 4., 5.]})
>>> ddf = dd.from_pandas(df, npartitions=2)
One can use ``map_partitions`` to apply a function on each partition.
Extra arguments and keywords can optionally be provided, and will be
passed to the function after the partition.
Here we apply a function with arguments and keywords to a DataFrame,
resulting in a Series:
>>> def myadd(df, a, b=1):
... return df.x + df.y + a + b
>>> res = ddf.map_partitions(myadd, 1, b=2)
>>> res.dtype
dtype('float64')
By default, dask tries to infer the output metadata by running your
provided function on some fake data. This works well in many cases, but
can sometimes be expensive, or even fail. To avoid this, you can
manually specify the output metadata with the ``meta`` keyword. This
can be specified in many forms, for more information see
``dask.dataframe.utils.make_meta``.
Here we specify the output is a Series with no name, and dtype
``float64``:
>>> res = ddf.map_partitions(myadd, 1, b=2, meta=(None, 'f8'))
Here we map a function that takes in a DataFrame, and returns a
DataFrame with a new column:
>>> res = ddf.map_partitions(lambda df: df.assign(z=df.x * df.y))
>>> res.dtypes
x int64
y float64
z float64
dtype: object
As before, the output metadata can also be specified manually. This
time we pass in a ``dict``, as the output is a DataFrame:
>>> res = ddf.map_partitions(lambda df: df.assign(z=df.x * df.y),
... meta={'x': 'i8', 'y': 'f8', 'z': 'f8'})
In the case where the metadata doesn't change, you can also pass in
the object itself directly:
>>> res = ddf.map_partitions(lambda df: df.head(), meta=ddf)
Also note that the index and divisions are assumed to remain unchanged.
If the function you're mapping changes the index/divisions, you'll need
to clear them afterwards:
>>> ddf.map_partitions(func).clear_divisions() # doctest: +SKIP
Your map function gets information about where it is in the dataframe by
accepting a special ``partition_info`` keyword argument.
>>> def func(partition, partition_info=None):
... pass
This will receive the following information:
>>> partition_info # doctest: +SKIP
{'number': 1, 'division': 3}
For each argument and keyword arguments that are dask dataframes you will
receive the number (n) which represents the nth partition of the dataframe
and the division (the first index value in the partition). If divisions
are not known (for instance if the index is not sorted) then you will get
None as the division.
"""
return map_partitions(func, self, *args, **kwargs)
@insert_meta_param_description(pad=12)
def map_overlap(self, func, before, after, *args, **kwargs):
"""Apply a function to each partition, sharing rows with adjacent partitions.
This can be useful for implementing windowing functions such as
``df.rolling(...).mean()`` or ``df.diff()``.
Parameters
----------
func : function
Function applied to each partition.
before : int
The number of rows to prepend to partition ``i`` from the end of
partition ``i - 1``.
after : int
The number of rows to append to partition ``i`` from the beginning
of partition ``i + 1``.
args, kwargs :
Arguments and keywords to pass to the function. The partition will
be the first argument, and these will be passed *after*.
$META
Notes
-----
Given positive integers ``before`` and ``after``, and a function
``func``, ``map_overlap`` does the following:
1. Prepend ``before`` rows to each partition ``i`` from the end of
partition ``i - 1``. The first partition has no rows prepended.
2. Append ``after`` rows to each partition ``i`` from the beginning of
partition ``i + 1``. The last partition has no rows appended.
3. Apply ``func`` to each partition, passing in any extra ``args`` and
``kwargs`` if provided.
4. Trim ``before`` rows from the beginning of all but the first
partition.
5. Trim ``after`` rows from the end of all but the last partition.
Note that the index and divisions are assumed to remain unchanged.
Examples
--------
Given a DataFrame, Series, or Index, such as:
>>> import pandas as pd
>>> import dask.dataframe as dd
>>> df = pd.DataFrame({'x': [1, 2, 4, 7, 11],
... 'y': [1., 2., 3., 4., 5.]})
>>> ddf = dd.from_pandas(df, npartitions=2)
A rolling sum with a trailing moving window of size 2 can be computed by
overlapping 2 rows before each partition, and then mapping calls to
``df.rolling(2).sum()``:
>>> ddf.compute()
x y
0 1 1.0
1 2 2.0
2 4 3.0
3 7 4.0
4 11 5.0
>>> ddf.map_overlap(lambda df: df.rolling(2).sum(), 2, 0).compute()
x y
0 NaN NaN
1 3.0 3.0
2 6.0 5.0
3 11.0 7.0
4 18.0 9.0
The pandas ``diff`` method computes a discrete difference shifted by a
number of periods (can be positive or negative). This can be
implemented by mapping calls to ``df.diff`` to each partition after
prepending/appending that many rows, depending on sign:
>>> def diff(df, periods=1):
... before, after = (periods, 0) if periods > 0 else (0, -periods)
... return df.map_overlap(lambda df, periods=1: df.diff(periods),
... periods, 0, periods=periods)
>>> diff(ddf, 1).compute()
x y
0 NaN NaN
1 1.0 1.0
2 2.0 1.0
3 3.0 1.0
4 4.0 1.0
If you have a ``DatetimeIndex``, you can use a ``pd.Timedelta`` for time-
based windows.
>>> ts = pd.Series(range(10), index=pd.date_range('2017', periods=10))
>>> dts = dd.from_pandas(ts, npartitions=2)
>>> dts.map_overlap(lambda df: df.rolling('2D').sum(),
... pd.Timedelta('2D'), 0).compute()
2017-01-01 0.0
2017-01-02 1.0
2017-01-03 3.0
2017-01-04 5.0
2017-01-05 7.0
2017-01-06 9.0
2017-01-07 11.0
2017-01-08 13.0
2017-01-09 15.0
2017-01-10 17.0
Freq: D, dtype: float64
"""
from .rolling import map_overlap
return map_overlap(func, self, before, after, *args, **kwargs)
def memory_usage_per_partition(self, index=True, deep=False):
"""Return the memory usage of each partition
Parameters
----------
index : bool, default True
Specifies whether to include the memory usage of the index in
returned Series.
deep : bool, default False
If True, introspect the data deeply by interrogating
``object`` dtypes for system-level memory consumption, and include
it in the returned values.
Returns
-------
Series
A Series whose index is the partition number and whose values
are the memory usage of each partition in bytes.
"""
return self.map_partitions(
total_mem_usage, index=index, deep=deep
).clear_divisions()
@insert_meta_param_description(pad=12)
def reduction(
self,
chunk,
aggregate=None,
combine=None,
meta=no_default,
token=None,
split_every=None,
chunk_kwargs=None,
aggregate_kwargs=None,
combine_kwargs=None,
**kwargs,
):
"""Generic row-wise reductions.
Parameters
----------
chunk : callable
Function to operate on each partition. Should return a
``pandas.DataFrame``, ``pandas.Series``, or a scalar.
aggregate : callable, optional
Function to operate on the concatenated result of ``chunk``. If not
specified, defaults to ``chunk``. Used to do the final aggregation
in a tree reduction.
The input to ``aggregate`` depends on the output of ``chunk``.
If the output of ``chunk`` is a:
- scalar: Input is a Series, with one row per partition.
- Series: Input is a DataFrame, with one row per partition. Columns
are the rows in the output series.
- DataFrame: Input is a DataFrame, with one row per partition.
Columns are the columns in the output dataframes.
Should return a ``pandas.DataFrame``, ``pandas.Series``, or a
scalar.
combine : callable, optional
Function to operate on intermediate concatenated results of
``chunk`` in a tree-reduction. If not provided, defaults to
``aggregate``. The input/output requirements should match that of
``aggregate`` described above.
$META
token : str, optional
The name to use for the output keys.
split_every : int, optional
Group partitions into groups of this size while performing a
tree-reduction. If set to False, no tree-reduction will be used,
and all intermediates will be concatenated and passed to
``aggregate``. Default is 8.
chunk_kwargs : dict, optional
Keyword arguments to pass on to ``chunk`` only.
aggregate_kwargs : dict, optional
Keyword arguments to pass on to ``aggregate`` only.
combine_kwargs : dict, optional
Keyword arguments to pass on to ``combine`` only.
kwargs :
All remaining keywords will be passed to ``chunk``, ``combine``,
and ``aggregate``.
Examples
--------
>>> import pandas as pd
>>> import dask.dataframe as dd
>>> df = pd.DataFrame({'x': range(50), 'y': range(50, 100)})
>>> ddf = dd.from_pandas(df, npartitions=4)
Count the number of rows in a DataFrame. To do this, count the number
of rows in each partition, then sum the results:
>>> res = ddf.reduction(lambda x: x.count(),
... aggregate=lambda x: x.sum())
>>> res.compute()
x 50
y 50
dtype: int64
Count the number of rows in a Series with elements greater than or
equal to a value (provided via a keyword).
>>> def count_greater(x, value=0):
... return (x >= value).sum()
>>> res = ddf.x.reduction(count_greater, aggregate=lambda x: x.sum(),
... chunk_kwargs={'value': 25})
>>> res.compute()
25
Aggregate both the sum and count of a Series at the same time:
>>> def sum_and_count(x):
... return pd.Series({'count': x.count(), 'sum': x.sum()},
... index=['count', 'sum'])
>>> res = ddf.x.reduction(sum_and_count, aggregate=lambda x: x.sum())
>>> res.compute()
count 50
sum 1225
dtype: int64
Doing the same, but for a DataFrame. Here ``chunk`` returns a
DataFrame, meaning the input to ``aggregate`` is a DataFrame with an
index with non-unique entries for both 'x' and 'y'. We groupby the
index, and sum each group to get the final result.
>>> def sum_and_count(x):
... return pd.DataFrame({'count': x.count(), 'sum': x.sum()},
... columns=['count', 'sum'])
>>> res = ddf.reduction(sum_and_count,
... aggregate=lambda x: x.groupby(level=0).sum())
>>> res.compute()
count sum
x 50 1225
y 50 3725
"""
if aggregate is None:
aggregate = chunk
if combine is None:
if combine_kwargs:
raise ValueError("`combine_kwargs` provided with no `combine`")
combine = aggregate
combine_kwargs = aggregate_kwargs
chunk_kwargs = chunk_kwargs.copy() if chunk_kwargs else {}
chunk_kwargs["aca_chunk"] = chunk
combine_kwargs = combine_kwargs.copy() if combine_kwargs else {}
combine_kwargs["aca_combine"] = combine
aggregate_kwargs = aggregate_kwargs.copy() if aggregate_kwargs else {}
aggregate_kwargs["aca_aggregate"] = aggregate
return aca(
self,
chunk=_reduction_chunk,
aggregate=_reduction_aggregate,
combine=_reduction_combine,
meta=meta,
token=token,
split_every=split_every,
chunk_kwargs=chunk_kwargs,
aggregate_kwargs=aggregate_kwargs,
combine_kwargs=combine_kwargs,
**kwargs,
)
@derived_from(pd.DataFrame)
def pipe(self, func, *args, **kwargs):
# Taken from pandas:
# https://github.com/pydata/pandas/blob/master/pandas/core/generic.py#L2698-L2707
if isinstance(func, tuple):
func, target = func
if target in kwargs:
raise ValueError(
"%s is both the pipe target and a keyword argument" % target
)
kwargs[target] = self
return func(*args, **kwargs)
else:
return func(self, *args, **kwargs)
def random_split(self, frac, random_state=None, shuffle=False):
"""Pseudorandomly split dataframe into different pieces row-wise
Parameters
----------
frac : list
List of floats that should sum to one.
random_state : int or np.random.RandomState
If int create a new RandomState with this as the seed.
Otherwise draw from the passed RandomState.
shuffle : bool, default False
If set to True, the dataframe is shuffled (within partition)
before the split.
Examples
--------
50/50 split
>>> a, b = df.random_split([0.5, 0.5]) # doctest: +SKIP
80/10/10 split, consistent random_state
>>> a, b, c = df.random_split([0.8, 0.1, 0.1], random_state=123) # doctest: +SKIP
See Also
--------
dask.DataFrame.sample
"""
if not np.allclose(sum(frac), 1):
raise ValueError("frac should sum to 1")
state_data = random_state_data(self.npartitions, random_state)
token = tokenize(self, frac, random_state)
name = "split-" + token
layer = {
(name, i): (pd_split, (self._name, i), frac, state, shuffle)
for i, state in enumerate(state_data)
}
out = []
for i in range(len(frac)):
name2 = "split-%d-%s" % (i, token)
dsk2 = {
(name2, j): (getitem, (name, j), i) for j in range(self.npartitions)
}
graph = HighLevelGraph.from_collections(
name2, merge(dsk2, layer), dependencies=[self]
)
out_df = type(self)(graph, name2, self._meta, self.divisions)
out.append(out_df)
return out
def head(self, n=5, npartitions=1, compute=True):
"""First n rows of the dataset
Parameters
----------
n : int, optional
The number of rows to return. Default is 5.
npartitions : int, optional
Elements are only taken from the first ``npartitions``, with a
default of 1. If there are fewer than ``n`` rows in the first
``npartitions`` a warning will be raised and any found rows
returned. Pass -1 to use all partitions.
compute : bool, optional
Whether to compute the result, default is True.
"""
if npartitions <= -1:
npartitions = self.npartitions
# No need to warn if we're already looking at all partitions
safe = npartitions != self.npartitions
return self._head(n=n, npartitions=npartitions, compute=compute, safe=safe)
def _head(self, n, npartitions, compute, safe):
if npartitions <= -1:
npartitions = self.npartitions
if npartitions > self.npartitions:
raise ValueError(
f"only {self.npartitions} partitions, head received {npartitions}"
)
name = f"head-{npartitions}-{n}-{self._name}"
if safe:
head = safe_head
else:
head = M.head
if npartitions > 1:
name_p = f"head-partial-{n}-{self._name}"
dsk = {}
for i in range(npartitions):
dsk[(name_p, i)] = (M.head, (self._name, i), n)
concat = (_concat, [(name_p, i) for i in range(npartitions)])
dsk[(name, 0)] = (head, concat, n)
else:
dsk = {(name, 0): (head, (self._name, 0), n)}
graph = HighLevelGraph.from_collections(name, dsk, dependencies=[self])
result = new_dd_object(
graph, name, self._meta, [self.divisions[0], self.divisions[npartitions]]
)
if compute:
result = result.compute()
return result
def tail(self, n=5, compute=True):
"""Last n rows of the dataset
Caveat, the only checks the last n rows of the last partition.
"""
name = "tail-%d-%s" % (n, self._name)
dsk = {(name, 0): (M.tail, (self._name, self.npartitions - 1), n)}
graph = HighLevelGraph.from_collections(name, dsk, dependencies=[self])
result = new_dd_object(graph, name, self._meta, self.divisions[-2:])
if compute:
result = result.compute()
return result
@property
def loc(self):
"""Purely label-location based indexer for selection by label.
>>> df.loc["b"] # doctest: +SKIP
>>> df.loc["b":"d"] # doctest: +SKIP
"""
from .indexing import _LocIndexer
return _LocIndexer(self)
def _partitions(self, index):
if not isinstance(index, tuple):
index = (index,)
from ..array.slicing import normalize_index
index = normalize_index(index, (self.npartitions,))
index = tuple(slice(k, k + 1) if isinstance(k, Number) else k for k in index)
name = "blocks-" + tokenize(self, index)
new_keys = np.array(self.__dask_keys__(), dtype=object)[index].tolist()
divisions = [self.divisions[i] for _, i in new_keys] + [
self.divisions[new_keys[-1][1] + 1]
]
dsk = {(name, i): tuple(key) for i, key in enumerate(new_keys)}
graph = HighLevelGraph.from_collections(name, dsk, dependencies=[self])
return new_dd_object(graph, name, self._meta, divisions)
@property
def partitions(self):
"""Slice dataframe by partitions
This allows partitionwise slicing of a Dask Dataframe. You can perform normal
Numpy-style slicing but now rather than slice elements of the array you
slice along partitions so, for example, ``df.partitions[:5]`` produces a new
Dask Dataframe of the first five partitions.
Examples
--------
>>> df.partitions[0] # doctest: +SKIP
>>> df.partitions[:3] # doctest: +SKIP
>>> df.partitions[::10] # doctest: +SKIP
Returns
-------
A Dask DataFrame
"""
return IndexCallable(self._partitions)
# Note: iloc is implemented only on DataFrame
def repartition(
self,
divisions=None,
npartitions=None,
partition_size=None,
freq=None,
force=False,
):
"""Repartition dataframe along new divisions
Parameters
----------
divisions : list, optional
List of partitions to be used. Only used if npartitions and
partition_size isn't specified.
For convenience if given an integer this will defer to npartitions
and if given a string it will defer to partition_size (see below)
npartitions : int, optional
Number of partitions of output. Only used if partition_size
isn't specified.
partition_size: int or string, optional
Max number of bytes of memory for each partition. Use numbers or
strings like 5MB. If specified npartitions and divisions will be
ignored.
.. warning::
This keyword argument triggers computation to determine
the memory size of each partition, which may be expensive.
freq : str, pd.Timedelta
A period on which to partition timeseries data like ``'7D'`` or
``'12h'`` or ``pd.Timedelta(hours=12)``. Assumes a datetime index.
force : bool, default False
Allows the expansion of the existing divisions.
If False then the new divisions' lower and upper bounds must be
the same as the old divisions'.
Notes
-----
Exactly one of `divisions`, `npartitions`, `partition_size`, or `freq`
should be specified. A ``ValueError`` will be raised when that is
not the case.
Examples
--------
>>> df = df.repartition(npartitions=10) # doctest: +SKIP
>>> df = df.repartition(divisions=[0, 5, 10, 20]) # doctest: +SKIP
>>> df = df.repartition(freq='7d') # doctest: +SKIP
"""
if isinstance(divisions, int):
npartitions = divisions
divisions = None
if isinstance(divisions, str):
partition_size = divisions
divisions = None
if (
sum(
[
partition_size is not None,
divisions is not None,
npartitions is not None,
freq is not None,
]
)
!= 1
):
raise ValueError(
"Please provide exactly one of ``npartitions=``, ``freq=``, "
"``divisions=``, ``partition_size=`` keyword arguments"
)
if partition_size is not None:
return repartition_size(self, partition_size)
elif npartitions is not None:
return repartition_npartitions(self, npartitions)
elif divisions is not None:
return repartition(self, divisions, force=force)
elif freq is not None:
return repartition_freq(self, freq=freq)
def shuffle(
self,
on,
npartitions=None,
max_branch=None,
shuffle=None,
ignore_index=False,
compute=None,
):
"""Rearrange DataFrame into new partitions
Uses hashing of `on` to map rows to output partitions. After this
operation, rows with the same value of `on` will be in the same
partition.
Parameters
----------
on : str, list of str, or Series, Index, or DataFrame
Column(s) or index to be used to map rows to output partitions
npartitions : int, optional
Number of partitions of output. Partition count will not be
changed by default.
max_branch: int, optional
The maximum number of splits per input partition. Used within
the staged shuffling algorithm.
shuffle: {'disk', 'tasks'}, optional
Either ``'disk'`` for single-node operation or ``'tasks'`` for
distributed operation. Will be inferred by your current scheduler.
ignore_index: bool, default False
Ignore index during shuffle. If ``True``, performance may improve,
but index values will not be preserved.
compute: bool
Whether or not to trigger an immediate computation. Defaults to False.
Notes
-----
This does not preserve a meaningful index/partitioning scheme. This
is not deterministic if done in parallel.
Examples
--------
>>> df = df.shuffle(df.columns[0]) # doctest: +SKIP
"""
from .shuffle import shuffle as dd_shuffle
return dd_shuffle(
self,
on,
npartitions=npartitions,
max_branch=max_branch,
shuffle=shuffle,
ignore_index=ignore_index,
compute=compute,
)
@derived_from(pd.DataFrame)
def fillna(self, value=None, method=None, limit=None, axis=None):
axis = self._validate_axis(axis)
if method is None and limit is not None:
raise NotImplementedError("fillna with set limit and method=None")
if isinstance(value, _Frame):
test_value = value._meta_nonempty.values[0]
elif isinstance(value, Scalar):
test_value = value._meta_nonempty
else:
test_value = value
meta = self._meta_nonempty.fillna(
value=test_value, method=method, limit=limit, axis=axis
)
if axis == 1 or method is None:
# Control whether or not dask's partition alignment happens.
# We don't want for a pandas Series.
# We do want it for a dask Series
if is_series_like(value) and not is_dask_collection(value):
args = ()
kwargs = {"value": value}
else:
args = (value,)
kwargs = {}
return self.map_partitions(
M.fillna,
*args,
method=method,
limit=limit,
axis=axis,
meta=meta,
enforce_metadata=False,
**kwargs,
)
if method in ("pad", "ffill"):
method = "ffill"
skip_check = 0
before, after = 1 if limit is None else limit, 0
else:
method = "bfill"
skip_check = self.npartitions - 1
before, after = 0, 1 if limit is None else limit
if limit is None:
name = "fillna-chunk-" + tokenize(self, method)
dsk = {
(name, i): (
methods.fillna_check,
(self._name, i),
method,
i != skip_check,
)
for i in range(self.npartitions)
}
graph = HighLevelGraph.from_collections(name, dsk, dependencies=[self])
parts = new_dd_object(graph, name, meta, self.divisions)
else:
parts = self
return parts.map_overlap(
M.fillna, before, after, method=method, limit=limit, meta=meta
)
@derived_from(pd.DataFrame)
def ffill(self, axis=None, limit=None):
return self.fillna(method="ffill", limit=limit, axis=axis)
@derived_from(pd.DataFrame)
def bfill(self, axis=None, limit=None):
return self.fillna(method="bfill", limit=limit, axis=axis)
def sample(self, n=None, frac=None, replace=False, random_state=None):
"""Random sample of items
Parameters
----------
n : int, optional
Number of items to return is not supported by dask. Use frac
instead.
frac : float, optional
Fraction of axis items to return.
replace : boolean, optional
Sample with or without replacement. Default = False.
random_state : int or ``np.random.RandomState``
If int we create a new RandomState with this as the seed
Otherwise we draw from the passed RandomState
See Also
--------
DataFrame.random_split
pandas.DataFrame.sample
"""
if n is not None:
msg = (
"sample does not support the number of sampled items "
"parameter, 'n'. Please use the 'frac' parameter instead."
)
if isinstance(n, Number) and 0 <= n <= 1:
warnings.warn(msg)
frac = n
else:
raise ValueError(msg)
if frac is None:
raise ValueError("frac must not be None")
if random_state is None:
random_state = np.random.RandomState()
name = "sample-" + tokenize(self, frac, replace, random_state)
state_data = random_state_data(self.npartitions, random_state)
dsk = {
(name, i): (methods.sample, (self._name, i), state, frac, replace)
for i, state in enumerate(state_data)
}
graph = HighLevelGraph.from_collections(name, dsk, dependencies=[self])
return new_dd_object(graph, name, self._meta, self.divisions)
@derived_from(pd.DataFrame)
def replace(self, to_replace=None, value=None, regex=False):
return self.map_partitions(
M.replace,
to_replace=to_replace,
value=value,
regex=regex,
enforce_metadata=False,
)
def to_dask_array(self, lengths=None, meta=None):
"""Convert a dask DataFrame to a dask array.
Parameters
----------
lengths : bool or Sequence of ints, optional
How to determine the chunks sizes for the output array.
By default, the output array will have unknown chunk lengths
along the first axis, which can cause some later operations
to fail.
* True : immediately compute the length of each partition
* Sequence : a sequence of integers to use for the chunk sizes
on the first axis. These values are *not* validated for
correctness, beyond ensuring that the number of items
matches the number of partitions.
meta : object, optional
An optional `meta` parameter can be passed for dask to override the
default metadata on the underlying dask array.
Returns
-------
"""
if lengths is True:
lengths = tuple(self.map_partitions(len, enforce_metadata=False).compute())
arr = self.values
chunks = self._validate_chunks(arr, lengths)
arr._chunks = chunks
if meta is not None:
arr._meta = meta
return arr
def to_hdf(self, path_or_buf, key, mode="a", append=False, **kwargs):
"""See dd.to_hdf docstring for more information"""
from .io import to_hdf
return to_hdf(self, path_or_buf, key, mode, append, **kwargs)
def to_csv(self, filename, **kwargs):
"""See dd.to_csv docstring for more information"""
from .io import to_csv
return to_csv(self, filename, **kwargs)
def to_sql(
self,
name: str,
uri: str,
schema=None,
if_exists: str = "fail",
index: bool = True,
index_label=None,
chunksize=None,
dtype=None,
method=None,
compute=True,
parallel=False,
):
"""See dd.to_sql docstring for more information"""
from .io import to_sql
return to_sql(
self,
name=name,
uri=uri,
schema=schema,
if_exists=if_exists,
index=index,
index_label=index_label,
chunksize=chunksize,
dtype=dtype,
method=method,
compute=compute,
parallel=parallel,
)
def to_json(self, filename, *args, **kwargs):
"""See dd.to_json docstring for more information"""
from .io import to_json
return to_json(self, filename, *args, **kwargs)
def to_delayed(self, optimize_graph=True):
"""Convert into a list of ``dask.delayed`` objects, one per partition.
Parameters
----------
optimize_graph : bool, optional
If True [default], the graph is optimized before converting into
``dask.delayed`` objects.
Examples
--------
>>> partitions = df.to_delayed() # doctest: +SKIP
See Also
--------
dask.dataframe.from_delayed
"""
keys = self.__dask_keys__()
graph = self.__dask_graph__()
if optimize_graph:
graph = self.__dask_optimize__(graph, self.__dask_keys__())
name = "delayed-" + self._name
graph = HighLevelGraph.from_collections(name, graph, dependencies=())
return [Delayed(k, graph) for k in keys]
@classmethod
def _get_unary_operator(cls, op):
return lambda self: elemwise(op, self)
@classmethod
def _get_binary_operator(cls, op, inv=False):
if inv:
return lambda self, other: elemwise(op, other, self)
else:
return lambda self, other: elemwise(op, self, other)
def rolling(self, window, min_periods=None, center=False, win_type=None, axis=0):
"""Provides rolling transformations.
Parameters
----------
window : int, str, offset
Size of the moving window. This is the number of observations used
for calculating the statistic. When not using a ``DatetimeIndex``,
the window size must not be so large as to span more than one
adjacent partition. If using an offset or offset alias like '5D',
the data must have a ``DatetimeIndex``
.. versionchanged:: 0.15.0
Now accepts offsets and string offset aliases
min_periods : int, default None
Minimum number of observations in window required to have a value
(otherwise result is NA).
center : boolean, default False
Set the labels at the center of the window.
win_type : string, default None
Provide a window type. The recognized window types are identical
to pandas.
axis : int, default 0
Returns
-------
a Rolling object on which to call a method to compute a statistic
"""
from dask.dataframe.rolling import Rolling
if isinstance(window, Integral):
if window < 0:
raise ValueError("window must be >= 0")
if min_periods is not None:
if not isinstance(min_periods, Integral):
raise ValueError("min_periods must be an integer")
if min_periods < 0:
raise ValueError("min_periods must be >= 0")
return Rolling(
self,
window=window,
min_periods=min_periods,
center=center,
win_type=win_type,
axis=axis,
)
@derived_from(pd.DataFrame)
def diff(self, periods=1, axis=0):
"""
.. note::
Pandas currently uses an ``object``-dtype column to represent
boolean data with missing values. This can cause issues for
boolean-specific operations, like ``|``. To enable boolean-
specific operations, at the cost of metadata that doesn't match
pandas, use ``.astype(bool)`` after the ``shift``.
"""
axis = self._validate_axis(axis)
if not isinstance(periods, Integral):
raise TypeError("periods must be an integer")
if axis == 1:
return self.map_partitions(
M.diff, token="diff", periods=periods, axis=1, enforce_metadata=False
)
before, after = (periods, 0) if periods > 0 else (0, -periods)
return self.map_overlap(M.diff, before, after, token="diff", periods=periods)
@derived_from(pd.DataFrame)
def shift(self, periods=1, freq=None, axis=0):
axis = self._validate_axis(axis)
if not isinstance(periods, Integral):
raise TypeError("periods must be an integer")
if axis == 1:
return self.map_partitions(
M.shift,
token="shift",
periods=periods,
freq=freq,
axis=1,
enforce_metadata=False,
)
if freq is None:
before, after = (periods, 0) if periods > 0 else (0, -periods)
return self.map_overlap(
M.shift, before, after, token="shift", periods=periods
)
# Let pandas error on invalid arguments
meta = self._meta_nonempty.shift(periods, freq=freq)
out = self.map_partitions(
M.shift,
token="shift",
periods=periods,
freq=freq,
meta=meta,
enforce_metadata=False,
transform_divisions=False,
)
return maybe_shift_divisions(out, periods, freq=freq)
def _reduction_agg(self, name, axis=None, skipna=True, split_every=False, out=None):
axis = self._validate_axis(axis)
meta = getattr(self._meta_nonempty, name)(axis=axis, skipna=skipna)
token = self._token_prefix + name
method = getattr(M, name)
if axis == 1:
result = self.map_partitions(
method, meta=meta, token=token, skipna=skipna, axis=axis
)
return handle_out(out, result)
else:
result = self.reduction(
method,
meta=meta,
token=token,
skipna=skipna,
axis=axis,
split_every=split_every,
)
if isinstance(self, DataFrame):
result.divisions = (self.columns.min(), self.columns.max())
return handle_out(out, result)
@derived_from(pd.DataFrame)
def add_prefix(self, prefix):
res = self.map_partitions(M.add_prefix, prefix)
if self.known_divisions and is_series_like(self):
res.divisions = tuple(prefix + str(division) for division in self.divisions)
return res
@derived_from(pd.DataFrame)
def add_suffix(self, suffix):
res = self.map_partitions(M.add_suffix, suffix)
if self.known_divisions and is_series_like(self):
res.divisions = tuple(str(division) + suffix for division in self.divisions)
return res
@derived_from(pd.DataFrame)
def abs(self):
_raise_if_object_series(self, "abs")
meta = self._meta_nonempty.abs()
return self.map_partitions(M.abs, meta=meta, enforce_metadata=False)
@derived_from(pd.DataFrame)
def all(self, axis=None, skipna=True, split_every=False, out=None):
return self._reduction_agg(
"all", axis=axis, skipna=skipna, split_every=split_every, out=out
)
@derived_from(pd.DataFrame)
def any(self, axis=None, skipna=True, split_every=False, out=None):
return self._reduction_agg(
"any", axis=axis, skipna=skipna, split_every=split_every, out=out
)
@_numeric_only
@derived_from(pd.DataFrame)
def sum(
self,
axis=None,
skipna=True,
split_every=False,
dtype=None,
out=None,
min_count=None,
numeric_only=None,
):
result = self._reduction_agg(
"sum", axis=axis, skipna=skipna, split_every=split_every, out=out
)
if min_count:
cond = self.notnull().sum(axis=axis) >= min_count
if is_series_like(cond):
return result.where(cond, other=np.NaN)
else:
return _scalar_binary(
lambda x, y: result if x is y else np.NaN, cond, True
)
else:
return result
@_numeric_only
@derived_from(pd.DataFrame)
def prod(
self,
axis=None,
skipna=True,
split_every=False,
dtype=None,
out=None,
min_count=None,
numeric_only=None,
):
result = self._reduction_agg(
"prod", axis=axis, skipna=skipna, split_every=split_every, out=out
)
if min_count:
cond = self.notnull().sum(axis=axis) >= min_count
if is_series_like(cond):
return result.where(cond, other=np.NaN)
else:
return _scalar_binary(
lambda x, y: result if x is y else np.NaN, cond, True
)
else:
return result
product = prod # aliased dd.product
@_numeric_only
@derived_from(pd.DataFrame)
def max(
self, axis=None, skipna=True, split_every=False, out=None, numeric_only=None
):
return self._reduction_agg(
"max", axis=axis, skipna=skipna, split_every=split_every, out=out
)
@_numeric_only
@derived_from(pd.DataFrame)
def min(
self, axis=None, skipna=True, split_every=False, out=None, numeric_only=None
):
return self._reduction_agg(
"min", axis=axis, skipna=skipna, split_every=split_every, out=out
)
@derived_from(pd.DataFrame)
def idxmax(self, axis=None, skipna=True, split_every=False):
fn = "idxmax"
axis = self._validate_axis(axis)
meta = self._meta_nonempty.idxmax(axis=axis, skipna=skipna)
if axis == 1:
return map_partitions(
M.idxmax,
self,
meta=meta,
token=self._token_prefix + fn,
skipna=skipna,
axis=axis,
enforce_metadata=False,
)
else:
scalar = not is_series_like(meta)
result = aca(
[self],
chunk=idxmaxmin_chunk,
aggregate=idxmaxmin_agg,
combine=idxmaxmin_combine,
meta=meta,
aggregate_kwargs={"scalar": scalar},
token=self._token_prefix + fn,
split_every=split_every,
skipna=skipna,
fn=fn,
)
if isinstance(self, DataFrame):
result.divisions = (min(self.columns), max(self.columns))
return result
@derived_from(pd.DataFrame)
def idxmin(self, axis=None, skipna=True, split_every=False):
fn = "idxmin"
axis = self._validate_axis(axis)
meta = self._meta_nonempty.idxmax(axis=axis)
if axis == 1:
return map_partitions(
M.idxmin,
self,
meta=meta,
token=self._token_prefix + fn,
skipna=skipna,
axis=axis,
enforce_metadata=False,
)
else:
scalar = not is_series_like(meta)
result = aca(
[self],
chunk=idxmaxmin_chunk,
aggregate=idxmaxmin_agg,
combine=idxmaxmin_combine,
meta=meta,
aggregate_kwargs={"scalar": scalar},
token=self._token_prefix + fn,
split_every=split_every,
skipna=skipna,
fn=fn,
)
if isinstance(self, DataFrame):
result.divisions = (min(self.columns), max(self.columns))
return result
@_numeric_only
@derived_from(pd.DataFrame)
def count(self, axis=None, split_every=False, numeric_only=None):
axis = self._validate_axis(axis)
token = self._token_prefix + "count"
if axis == 1:
meta = self._meta_nonempty.count(axis=axis)
return self.map_partitions(
M.count, meta=meta, token=token, axis=axis, enforce_metadata=False
)
else:
meta = self._meta_nonempty.count()
# Need the astype(int) for empty dataframes, which sum to float dtype
result = self.reduction(
M.count,
aggregate=_count_aggregate,
meta=meta,
token=token,
split_every=split_every,
)
if isinstance(self, DataFrame):
result.divisions = (self.columns.min(), self.columns.max())
return result
@derived_from(pd.DataFrame)
def mode(self, dropna=True, split_every=False):
mode_series = self.reduction(
chunk=M.value_counts,
combine=M.sum,
aggregate=_mode_aggregate,
split_every=split_every,
chunk_kwargs={"dropna": dropna},
aggregate_kwargs={"dropna": dropna},
)
return mode_series
@_numeric_only
@derived_from(pd.DataFrame)
def mean(
self,
axis=None,
skipna=True,
split_every=False,
dtype=None,
out=None,
numeric_only=None,
):
axis = self._validate_axis(axis)
_raise_if_object_series(self, "mean")
meta = self._meta_nonempty.mean(axis=axis, skipna=skipna)
if axis == 1:
result = map_partitions(
M.mean,
self,
meta=meta,
token=self._token_prefix + "mean",
axis=axis,
skipna=skipna,
enforce_metadata=False,
)
return handle_out(out, result)
else:
num = self._get_numeric_data()
s = num.sum(skipna=skipna, split_every=split_every)
n = num.count(split_every=split_every)
name = self._token_prefix + "mean-%s" % tokenize(self, axis, skipna)
result = map_partitions(
methods.mean_aggregate,
s,
n,
token=name,
meta=meta,
enforce_metadata=False,
parent_meta=self._meta,
)
if isinstance(self, DataFrame):
result.divisions = (self.columns.min(), self.columns.max())
return handle_out(out, result)
@_numeric_only
@derived_from(pd.DataFrame)
def var(
self,
axis=None,
skipna=True,
ddof=1,
split_every=False,
dtype=None,
out=None,
numeric_only=None,
):
axis = self._validate_axis(axis)
_raise_if_object_series(self, "var")
meta = self._meta_nonempty.var(axis=axis, skipna=skipna)
if axis == 1:
result = map_partitions(
M.var,
self,
meta=meta,
token=self._token_prefix + "var",
axis=axis,
skipna=skipna,
ddof=ddof,
enforce_metadata=False,
)
return handle_out(out, result)
else:
if self.ndim == 1:
result = self._var_1d(self, skipna, ddof, split_every)
return handle_out(out, result)
# pandas 1.0+ does not implement var on timedelta
result = self._var_numeric(skipna, ddof, split_every)
if isinstance(self, DataFrame):
result.divisions = (self.columns.min(), self.columns.max())
return handle_out(out, result)
def _var_numeric(self, skipna=True, ddof=1, split_every=False):
num = self.select_dtypes(include=["number", "bool"], exclude=[np.timedelta64])
values_dtype = num.values.dtype
array_values = num.values
if not np.issubdtype(values_dtype, np.number):
array_values = num.values.astype("f8")
var = da.nanvar if skipna or skipna is None else da.var
array_var = var(array_values, axis=0, ddof=ddof, split_every=split_every)
name = self._token_prefix + "var-numeric" + tokenize(num, split_every)
cols = num._meta.columns if is_dataframe_like(num) else None
var_shape = num._meta_nonempty.values.var(axis=0).shape
array_var_name = (array_var._name,) + (0,) * len(var_shape)
layer = {(name, 0): (methods.wrap_var_reduction, array_var_name, cols)}
graph = HighLevelGraph.from_collections(name, layer, dependencies=[array_var])
return new_dd_object(
graph, name, num._meta_nonempty.var(), divisions=[None, None]
)
def _var_timedeltas(self, skipna=True, ddof=1, split_every=False):
timedeltas = self.select_dtypes(include=[np.timedelta64])
var_timedeltas = [
self._var_1d(timedeltas[col_idx], skipna, ddof, split_every)
for col_idx in timedeltas._meta.columns
]
var_timedelta_names = [(v._name, 0) for v in var_timedeltas]
name = (
self._token_prefix + "var-timedeltas-" + tokenize(timedeltas, split_every)
)
layer = {
(name, 0): (
methods.wrap_var_reduction,
var_timedelta_names,
timedeltas._meta.columns,
)
}
graph = HighLevelGraph.from_collections(
name, layer, dependencies=var_timedeltas
)
return new_dd_object(
graph, name, timedeltas._meta_nonempty.var(), divisions=[None, None]
)
def _var_mixed(self, skipna=True, ddof=1, split_every=False):
data = self.select_dtypes(include=["number", "bool", np.timedelta64])
timedelta_vars = self._var_timedeltas(skipna, ddof, split_every)
numeric_vars = self._var_numeric(skipna, ddof, split_every)
name = self._token_prefix + "var-mixed-" + tokenize(data, split_every)
layer = {
(name, 0): (
methods.var_mixed_concat,
(numeric_vars._name, 0),
(timedelta_vars._name, 0),
data._meta.columns,
)
}
graph = HighLevelGraph.from_collections(
name, layer, dependencies=[numeric_vars, timedelta_vars]
)
return new_dd_object(
graph, name, self._meta_nonempty.var(), divisions=[None, None]
)
def _var_1d(self, column, skipna=True, ddof=1, split_every=False):
is_timedelta = is_timedelta64_dtype(column._meta)
if is_timedelta:
if not skipna:
is_nan = column.isna()
column = column.astype("i8")
column = column.mask(is_nan)
else:
column = column.dropna().astype("i8")
if pd.Int64Dtype.is_dtype(column._meta_nonempty):
column = column.astype("f8")
if not np.issubdtype(column.dtype, np.number):
column = column.astype("f8")
name = self._token_prefix + "var-1d-" + tokenize(column, split_every)
var = da.nanvar if skipna or skipna is None else da.var
array_var = var(column.values, axis=0, ddof=ddof, split_every=split_every)
layer = {(name, 0): (methods.wrap_var_reduction, (array_var._name,), None)}
graph = HighLevelGraph.from_collections(name, layer, dependencies=[array_var])
return new_dd_object(
graph, name, column._meta_nonempty.var(), divisions=[None, None]
)
@_numeric_only
@derived_from(pd.DataFrame)
def std(
self,
axis=None,
skipna=True,
ddof=1,
split_every=False,
dtype=None,
out=None,
numeric_only=None,
):
axis = self._validate_axis(axis)
_raise_if_object_series(self, "std")
meta = self._meta_nonempty.std(axis=axis, skipna=skipna)
if axis == 1:
result = map_partitions(
M.std,
self,
meta=meta,
token=self._token_prefix + "std",
axis=axis,
skipna=skipna,
ddof=ddof,
enforce_metadata=False,
parent_meta=self._meta,
)
return handle_out(out, result)
else:
v = self.var(skipna=skipna, ddof=ddof, split_every=split_every)
name = self._token_prefix + "std"
result = map_partitions(
np.sqrt,
v,
meta=meta,
token=name,
enforce_metadata=False,
parent_meta=self._meta,
)
return handle_out(out, result)
@_numeric_only
@derived_from(pd.DataFrame)
def skew(
self, axis=None, bias=True, nan_policy="propagate", out=None, numeric_only=None
):
"""
.. note::
This implementation follows the dask.array.stats implementation
of skewness and calculates skewness without taking into account
a bias term for finite sample size, which corresponds to the
default settings of the scipy.stats skewness calculation. However,
Pandas corrects for this, so the values differ by a factor of
(n * (n - 1)) ** 0.5 / (n - 2), where n is the number of samples.
Further, this method currently does not support filtering out NaN
values, which is again a difference to Pandas.
"""
axis = self._validate_axis(axis)
_raise_if_object_series(self, "skew")
meta = self._meta_nonempty.skew()
if axis == 1:
result = map_partitions(
M.skew,
self,
meta=meta,
token=self._token_prefix + "skew",
axis=axis,
enforce_metadata=False,
)
return handle_out(out, result)
else:
if self.ndim == 1:
result = self._skew_1d(self, bias=bias, nan_policy=nan_policy)
return handle_out(out, result)
else:
result = self._skew_numeric(bias=bias, nan_policy=nan_policy)
if isinstance(self, DataFrame):
result.divisions = (self.columns.min(), self.columns.max())
return handle_out(out, result)
def _skew_1d(self, column, bias=True, nan_policy="propagate"):
"""1D version of the skew calculation.
Uses the array version from da.stats in case we are passing in a single series
"""
# import depends on scipy, not installed by default
from ..array import stats as da_stats
if pd.Int64Dtype.is_dtype(column._meta_nonempty):
column = column.astype("f8")
if not np.issubdtype(column.dtype, np.number):
column = column.astype("f8")
name = self._token_prefix + "skew-1d-" + tokenize(column)
array_skew = da_stats.skew(
column.values, axis=0, bias=bias, nan_policy=nan_policy
)
layer = {(name, 0): (methods.wrap_skew_reduction, (array_skew._name,), None)}
graph = HighLevelGraph.from_collections(name, layer, dependencies=[array_skew])
return new_dd_object(
graph, name, column._meta_nonempty.skew(), divisions=[None, None]
)
def _skew_numeric(self, bias=True, nan_policy="propagate"):
"""Method for dataframes with numeric columns.
Maps the array version from da.stats onto the numeric array of columns.
"""
# import depends on scipy, not installed by default
from ..array import stats as da_stats
num = self.select_dtypes(include=["number", "bool"], exclude=[np.timedelta64])
values_dtype = num.values.dtype
array_values = num.values
if not np.issubdtype(values_dtype, np.number):
array_values = num.values.astype("f8")
array_skew = da_stats.skew(
array_values, axis=0, bias=bias, nan_policy=nan_policy
)
name = self._token_prefix + "var-numeric" + tokenize(num)
cols = num._meta.columns if is_dataframe_like(num) else None
skew_shape = num._meta_nonempty.values.var(axis=0).shape
array_skew_name = (array_skew._name,) + (0,) * len(skew_shape)
layer = {(name, 0): (methods.wrap_skew_reduction, array_skew_name, cols)}
graph = HighLevelGraph.from_collections(name, layer, dependencies=[array_skew])
return new_dd_object(
graph, name, num._meta_nonempty.skew(), divisions=[None, None]
)
@_numeric_only
@derived_from(pd.DataFrame)
def kurtosis(
self,
axis=None,
fisher=True,
bias=True,
nan_policy="propagate",
out=None,
numeric_only=None,
):
"""
.. note::
This implementation follows the dask.array.stats implementation
of kurtosis and calculates kurtosis without taking into account
a bias term for finite sample size, which corresponds to the
default settings of the scipy.stats kurtosis calculation. This differs
from pandas.
Further, this method currently does not support filtering out NaN
values, which is again a difference to Pandas.
"""
axis = self._validate_axis(axis)
_raise_if_object_series(self, "kurtosis")
meta = self._meta_nonempty.kurtosis()
if axis == 1:
result = map_partitions(
M.kurtosis,
self,
meta=meta,
token=self._token_prefix + "kurtosis",
axis=axis,
enforce_metadata=False,
)
return handle_out(out, result)
else:
if self.ndim == 1:
result = self._kurtosis_1d(
self, fisher=fisher, bias=bias, nan_policy=nan_policy
)
return handle_out(out, result)
else:
result = self._kurtosis_numeric(
fisher=fisher, bias=bias, nan_policy=nan_policy
)
if isinstance(self, DataFrame):
result.divisions = (self.columns.min(), self.columns.max())
return handle_out(out, result)
def _kurtosis_1d(self, column, fisher=True, bias=True, nan_policy="propagate"):
"""1D version of the kurtosis calculation.
Uses the array version from da.stats in case we are passing in a single series
"""
# import depends on scipy, not installed by default
from ..array import stats as da_stats
if pd.api.types.is_integer_dtype(column._meta_nonempty):
column = column.astype("f8")
if not np.issubdtype(column.dtype, np.number):
column = column.astype("f8")
name = self._token_prefix + "kurtosis-1d-" + tokenize(column)
array_kurtosis = da_stats.kurtosis(
column.values, axis=0, fisher=fisher, bias=bias, nan_policy=nan_policy
)
layer = {
(name, 0): (methods.wrap_kurtosis_reduction, (array_kurtosis._name,), None)
}
graph = HighLevelGraph.from_collections(
name, layer, dependencies=[array_kurtosis]
)
return new_dd_object(
graph, name, column._meta_nonempty.kurtosis(), divisions=[None, None]
)
def _kurtosis_numeric(self, fisher=True, bias=True, nan_policy="propagate"):
"""Method for dataframes with numeric columns.
Maps the array version from da.stats onto the numeric array of columns.
"""
# import depends on scipy, not installed by default
from ..array import stats as da_stats
num = self.select_dtypes(include=["number", "bool"], exclude=[np.timedelta64])
values_dtype = num.values.dtype
array_values = num.values
if not np.issubdtype(values_dtype, np.number):
array_values = num.values.astype("f8")
array_kurtosis = da_stats.kurtosis(
array_values, axis=0, fisher=fisher, bias=bias, nan_policy=nan_policy
)
name = self._token_prefix + "kurtosis-numeric" + tokenize(num)
cols = num._meta.columns if is_dataframe_like(num) else None
kurtosis_shape = num._meta_nonempty.values.var(axis=0).shape
array_kurtosis_name = (array_kurtosis._name,) + (0,) * len(kurtosis_shape)
layer = {
(name, 0): (methods.wrap_kurtosis_reduction, array_kurtosis_name, cols)
}
graph = HighLevelGraph.from_collections(
name, layer, dependencies=[array_kurtosis]
)
return new_dd_object(
graph, name, num._meta_nonempty.kurtosis(), divisions=[None, None]
)
@_numeric_only
@derived_from(pd.DataFrame)
def sem(self, axis=None, skipna=None, ddof=1, split_every=False, numeric_only=None):
axis = self._validate_axis(axis)
_raise_if_object_series(self, "sem")
meta = self._meta_nonempty.sem(axis=axis, skipna=skipna, ddof=ddof)
if axis == 1:
return map_partitions(
M.sem,
self,
meta=meta,
token=self._token_prefix + "sem",
axis=axis,
skipna=skipna,
ddof=ddof,
parent_meta=self._meta,
)
else:
num = self._get_numeric_data()
v = num.var(skipna=skipna, ddof=ddof, split_every=split_every)
n = num.count(split_every=split_every)
name = self._token_prefix + "sem"
result = map_partitions(
np.sqrt,
v / n,
meta=meta,
token=name,
enforce_metadata=False,
parent_meta=self._meta,
)
if isinstance(self, DataFrame):
result.divisions = (self.columns.min(), self.columns.max())
return result
def quantile(self, q=0.5, axis=0, method="default"):
"""Approximate row-wise and precise column-wise quantiles of DataFrame
Parameters
----------
q : list/array of floats, default 0.5 (50%)
Iterable of numbers ranging from 0 to 1 for the desired quantiles
axis : {0, 1, 'index', 'columns'} (default 0)
0 or 'index' for row-wise, 1 or 'columns' for column-wise
method : {'default', 'tdigest', 'dask'}, optional
What method to use. By default will use dask's internal custom
algorithm (``'dask'``). If set to ``'tdigest'`` will use tdigest
for floats and ints and fallback to the ``'dask'`` otherwise.
"""
axis = self._validate_axis(axis)
keyname = "quantiles-concat--" + tokenize(self, q, axis)
if axis == 1:
if isinstance(q, list):
# Not supported, the result will have current index as columns
raise ValueError("'q' must be scalar when axis=1 is specified")
return map_partitions(
M.quantile,
self,
q,
axis,
token=keyname,
enforce_metadata=False,
meta=(q, "f8"),
parent_meta=self._meta,
)
else:
_raise_if_object_series(self, "quantile")
meta = self._meta.quantile(q, axis=axis)
num = self._get_numeric_data()
quantiles = tuple(quantile(self[c], q, method) for c in num.columns)
qnames = [(_q._name, 0) for _q in quantiles]
if isinstance(quantiles[0], Scalar):
layer = {
(keyname, 0): (type(meta), qnames, num.columns, None, meta.name)
}
graph = HighLevelGraph.from_collections(
keyname, layer, dependencies=quantiles
)
divisions = (min(num.columns), max(num.columns))
return Series(graph, keyname, meta, divisions)
else:
layer = {(keyname, 0): (methods.concat, qnames, 1)}
graph = HighLevelGraph.from_collections(
keyname, layer, dependencies=quantiles
)
return DataFrame(graph, keyname, meta, quantiles[0].divisions)
@derived_from(pd.DataFrame)
def describe(
self,
split_every=False,
percentiles=None,
percentiles_method="default",
include=None,
exclude=None,
datetime_is_numeric=False,
):
if PANDAS_GT_110:
datetime_is_numeric_kwarg = {"datetime_is_numeric": datetime_is_numeric}
elif datetime_is_numeric:
raise NotImplementedError(
"datetime_is_numeric=True is only supported for pandas >= 1.1.0"
)
else:
datetime_is_numeric_kwarg = {}
if self._meta.ndim == 1:
meta = self._meta_nonempty.describe(
percentiles=percentiles,
include=include,
exclude=exclude,
**datetime_is_numeric_kwarg,
)
output = self._describe_1d(
self, split_every, percentiles, percentiles_method, datetime_is_numeric
)
output._meta = meta
return output
elif (include is None) and (exclude is None):
_include = [np.number, np.timedelta64]
if datetime_is_numeric:
_include.append(np.datetime64)
data = self._meta.select_dtypes(include=_include)
# when some numerics/timedeltas are found, by default keep them
if len(data.columns) == 0:
chosen_columns = self._meta.columns
else:
# check if there are timedelta, boolean, or datetime columns
_include = [np.timedelta64, bool]
if datetime_is_numeric:
_include.append(np.datetime64)
bools_and_times = self._meta.select_dtypes(include=_include)
if len(bools_and_times.columns) == 0:
return self._describe_numeric(
self,
split_every,
percentiles,
percentiles_method,
)
else:
chosen_columns = data.columns
elif include == "all":
if exclude is not None:
msg = "exclude must be None when include is 'all'"
raise ValueError(msg)
chosen_columns = self._meta.columns
else:
chosen_columns = self._meta.select_dtypes(include=include, exclude=exclude)
stats = [
self._describe_1d(
self[col_idx],
split_every,
percentiles,
percentiles_method,
datetime_is_numeric,
)
for col_idx in chosen_columns
]
stats_names = [(s._name, 0) for s in stats]
name = "describe--" + tokenize(self, split_every)
layer = {(name, 0): (methods.describe_aggregate, stats_names)}
graph = HighLevelGraph.from_collections(name, layer, dependencies=stats)
meta = self._meta_nonempty.describe(
include=include, exclude=exclude, **datetime_is_numeric_kwarg
)
return new_dd_object(graph, name, meta, divisions=[None, None])
def _describe_1d(
self,
data,
split_every=False,
percentiles=None,
percentiles_method="default",
datetime_is_numeric=False,
):
if is_bool_dtype(data._meta):
return self._describe_nonnumeric_1d(
data, split_every=split_every, datetime_is_numeric=datetime_is_numeric
)
elif is_numeric_dtype(data._meta):
return self._describe_numeric(
data,
split_every=split_every,
percentiles=percentiles,
percentiles_method=percentiles_method,
)
elif is_timedelta64_dtype(data._meta):
return self._describe_numeric(
data.dropna(),
split_every=split_every,
percentiles=percentiles,
percentiles_method=percentiles_method,
is_timedelta_column=True,
)
elif is_datetime64_any_dtype(data._meta) and datetime_is_numeric:
return self._describe_numeric(
data.dropna(),
split_every=split_every,
percentiles=percentiles,
percentiles_method=percentiles_method,
is_datetime_column=True,
)
else:
return self._describe_nonnumeric_1d(
data, split_every=split_every, datetime_is_numeric=datetime_is_numeric
)
def _describe_numeric(
self,
data,
split_every=False,
percentiles=None,
percentiles_method="default",
is_timedelta_column=False,
is_datetime_column=False,
):
from .numeric import to_numeric
if is_timedelta_column or is_datetime_column:
num = to_numeric(data)
else:
num = data._get_numeric_data()
if data.ndim == 2 and len(num.columns) == 0:
raise ValueError("DataFrame contains only non-numeric data.")
elif data.ndim == 1 and data.dtype == "object":
raise ValueError("Cannot compute ``describe`` on object dtype.")
if percentiles is None:
percentiles = [0.25, 0.5, 0.75]
else:
# always include the the 50%tle to calculate the median
# unique removes duplicates and sorts quantiles
percentiles = np.array(percentiles)
percentiles = np.append(percentiles, 0.5)
percentiles = np.unique(percentiles)
percentiles = list(percentiles)
stats = [
num.count(split_every=split_every),
num.mean(split_every=split_every),
num.std(split_every=split_every),
num.min(split_every=split_every),
num.quantile(percentiles, method=percentiles_method),
num.max(split_every=split_every),
]
stats_names = [(s._name, 0) for s in stats]
colname = data._meta.name if is_series_like(data._meta) else None
name = "describe-numeric--" + tokenize(num, split_every)
layer = {
(name, 0): (
methods.describe_numeric_aggregate,
stats_names,
colname,
is_timedelta_column,
is_datetime_column,
)
}
graph = HighLevelGraph.from_collections(name, layer, dependencies=stats)
meta = num._meta_nonempty.describe()
return new_dd_object(graph, name, meta, divisions=[None, None])
def _describe_nonnumeric_1d(
self, data, split_every=False, datetime_is_numeric=False
):
from .numeric import to_numeric
vcounts = data.value_counts(split_every=split_every)
count_nonzero = vcounts[vcounts != 0]
count_unique = count_nonzero.size
stats = [
# nunique
count_unique,
# count
data.count(split_every=split_every),
# most common value
vcounts._head(1, npartitions=1, compute=False, safe=False),
]
if is_datetime64_any_dtype(data._meta) and not datetime_is_numeric:
min_ts = to_numeric(data.dropna()).min(split_every=split_every)
max_ts = to_numeric(data.dropna()).max(split_every=split_every)
stats.extend([min_ts, max_ts])
stats_names = [(s._name, 0) for s in stats]
colname = data._meta.name
name = "describe-nonnumeric-1d--" + tokenize(data, split_every)
layer = {
(name, 0): (methods.describe_nonnumeric_aggregate, stats_names, colname)
}
graph = HighLevelGraph.from_collections(name, layer, dependencies=stats)
if PANDAS_GT_110:
datetime_is_numeric_kwarg = {"datetime_is_numeric": datetime_is_numeric}
elif datetime_is_numeric:
raise NotImplementedError(
"datetime_is_numeric=True is only supported for pandas >= 1.1.0"
)
else:
datetime_is_numeric_kwarg = {}
meta = data._meta_nonempty.describe(**datetime_is_numeric_kwarg)
return new_dd_object(graph, name, meta, divisions=[None, None])
def _cum_agg(
self, op_name, chunk, aggregate, axis, skipna=True, chunk_kwargs=None, out=None
):
"""Wrapper for cumulative operation"""
axis = self._validate_axis(axis)
if axis == 1:
name = f"{self._token_prefix}{op_name}(axis=1)"
result = self.map_partitions(chunk, token=name, **chunk_kwargs)
return handle_out(out, result)
else:
# cumulate each partitions
name1 = f"{self._token_prefix}{op_name}-map"
cumpart = map_partitions(
chunk, self, token=name1, meta=self, **chunk_kwargs
)
name2 = f"{self._token_prefix}{op_name}-take-last"
cumlast = map_partitions(
_take_last,
cumpart,
skipna,
meta=pd.Series([], dtype="float"),
token=name2,
)
suffix = tokenize(self)
name = f"{self._token_prefix}{op_name}-{suffix}"
cname = f"{self._token_prefix}{op_name}-cum-last-{suffix}"
# aggregate cumulated partisions and its previous last element
layer = {}
layer[(name, 0)] = (cumpart._name, 0)
for i in range(1, self.npartitions):
# store each cumulative step to graph to reduce computation
if i == 1:
layer[(cname, i)] = (cumlast._name, i - 1)
else:
# aggregate with previous cumulation results
layer[(cname, i)] = (
methods._cum_aggregate_apply,
aggregate,
(cname, i - 1),
(cumlast._name, i - 1),
)
layer[(name, i)] = (aggregate, (cumpart._name, i), (cname, i))
graph = HighLevelGraph.from_collections(
name, layer, dependencies=[cumpart, cumlast]
)
result = new_dd_object(graph, name, chunk(self._meta), self.divisions)
return handle_out(out, result)
@derived_from(pd.DataFrame)
def cumsum(self, axis=None, skipna=True, dtype=None, out=None):
return self._cum_agg(
"cumsum",
chunk=M.cumsum,
aggregate=methods.cumsum_aggregate,
axis=axis,
skipna=skipna,
chunk_kwargs=dict(axis=axis, skipna=skipna),
out=out,
)
@derived_from(pd.DataFrame)
def cumprod(self, axis=None, skipna=True, dtype=None, out=None):
return self._cum_agg(
"cumprod",
chunk=M.cumprod,
aggregate=methods.cumprod_aggregate,
axis=axis,
skipna=skipna,
chunk_kwargs=dict(axis=axis, skipna=skipna),
out=out,
)
@derived_from(pd.DataFrame)
def cummax(self, axis=None, skipna=True, out=None):
return self._cum_agg(
"cummax",
chunk=M.cummax,
aggregate=methods.cummax_aggregate,
axis=axis,
skipna=skipna,
chunk_kwargs=dict(axis=axis, skipna=skipna),
out=out,
)
@derived_from(pd.DataFrame)
def cummin(self, axis=None, skipna=True, out=None):
return self._cum_agg(
"cummin",
chunk=M.cummin,
aggregate=methods.cummin_aggregate,
axis=axis,
skipna=skipna,
chunk_kwargs=dict(axis=axis, skipna=skipna),
out=out,
)
@derived_from(pd.DataFrame)
def where(self, cond, other=np.nan):
# cond and other may be dask instance,
# passing map_partitions via keyword will not be aligned
return map_partitions(M.where, self, cond, other, enforce_metadata=False)
@derived_from(pd.DataFrame)
def mask(self, cond, other=np.nan):
return map_partitions(M.mask, self, cond, other, enforce_metadata=False)
@derived_from(pd.DataFrame)
def notnull(self):
return self.map_partitions(M.notnull, enforce_metadata=False)
@derived_from(pd.DataFrame)
def isnull(self):
return self.map_partitions(M.isnull, enforce_metadata=False)
@derived_from(pd.DataFrame)
def isna(self):
if hasattr(pd, "isna"):
return self.map_partitions(M.isna, enforce_metadata=False)
else:
raise NotImplementedError(
"Need more recent version of Pandas "
"to support isna. "
"Please use isnull instead."
)
@derived_from(pd.DataFrame)
def isin(self, values):
if is_dataframe_like(self._meta):
# DataFrame.isin does weird alignment stuff
bad_types = (_Frame, pd.Series, pd.DataFrame)
else:
bad_types = (_Frame,)
if isinstance(values, bad_types):
raise NotImplementedError("Passing a %r to `isin`" % typename(type(values)))
meta = self._meta_nonempty.isin(values)
# We wrap values in a delayed for two reasons:
# - avoid serializing data in every task
# - avoid cost of traversal of large list in optimizations
return self.map_partitions(
M.isin, delayed(values), meta=meta, enforce_metadata=False
)
@derived_from(pd.DataFrame)
def astype(self, dtype):
# XXX: Pandas will segfault for empty dataframes when setting
# categorical dtypes. This operation isn't allowed currently anyway. We
# get the metadata with a non-empty frame to throw the error instead of
# segfaulting.
if is_dataframe_like(self._meta) and is_categorical_dtype(dtype):
meta = self._meta_nonempty.astype(dtype)
else:
meta = self._meta.astype(dtype)
if hasattr(dtype, "items"):
set_unknown = [
k
for k, v in dtype.items()
if is_categorical_dtype(v) and getattr(v, "categories", None) is None
]
meta = clear_known_categories(meta, cols=set_unknown)
elif is_categorical_dtype(dtype) and getattr(dtype, "categories", None) is None:
meta = clear_known_categories(meta)
return self.map_partitions(
M.astype, dtype=dtype, meta=meta, enforce_metadata=False
)
@derived_from(pd.Series)
def append(self, other, interleave_partitions=False):
# because DataFrame.append will override the method,
# wrap by pd.Series.append docstring
from .multi import concat
if isinstance(other, (list, dict)):
msg = "append doesn't support list or dict input"
raise NotImplementedError(msg)
return concat(
[self, other], join="outer", interleave_partitions=interleave_partitions
)
@derived_from(pd.Series)
def dot(self, other, meta=no_default):
if not isinstance(other, _Frame):
raise TypeError("The second operand must be a dask array or dask dataframe")
if isinstance(other, DataFrame):
s = self.map_partitions(M.dot, other, token="dot", meta=meta)
return s.groupby(by=s.index).apply(
lambda x: x.sum(skipna=False), meta=s._meta_nonempty
)
def _dot_series(*args, **kwargs):
# .sum() is invoked on each partition before being applied to all
# partitions. The return type is expected to be a series, not a numpy object
return pd.Series(M.dot(*args, **kwargs))
return self.map_partitions(_dot_series, other, token="dot", meta=meta).sum(
skipna=False
)
@derived_from(pd.DataFrame)
def align(self, other, join="outer", axis=None, fill_value=None):
meta1, meta2 = _emulate(
M.align, self, other, join, axis=axis, fill_value=fill_value
)
aligned = self.map_partitions(
M.align,
other,
join=join,
axis=axis,
fill_value=fill_value,
enforce_metadata=False,
)
token = tokenize(self, other, join, axis, fill_value)
name1 = "align1-" + token
dsk1 = {
(name1, i): (getitem, key, 0)
for i, key in enumerate(aligned.__dask_keys__())
}
dsk1.update(aligned.dask)
result1 = new_dd_object(dsk1, name1, meta1, aligned.divisions)
name2 = "align2-" + token
dsk2 = {
(name2, i): (getitem, key, 1)
for i, key in enumerate(aligned.__dask_keys__())
}
dsk2.update(aligned.dask)
result2 = new_dd_object(dsk2, name2, meta2, aligned.divisions)
return result1, result2
@derived_from(pd.DataFrame)
def combine(self, other, func, fill_value=None, overwrite=True):
return self.map_partitions(
M.combine, other, func, fill_value=fill_value, overwrite=overwrite
)
@derived_from(pd.DataFrame)
def combine_first(self, other):
return self.map_partitions(M.combine_first, other)
@classmethod
def _bind_operator_method(cls, name, op, original=pd.DataFrame):
"""bind operator method like DataFrame.add to this class"""
raise NotImplementedError
@derived_from(pd.DataFrame)
def resample(self, rule, closed=None, label=None):
from .tseries.resample import Resampler
return Resampler(self, rule, closed=closed, label=label)
@derived_from(pd.DataFrame)
def first(self, offset):
# Let pandas error on bad args
self._meta_nonempty.first(offset)
if not self.known_divisions:
raise ValueError("`first` is not implemented for unknown divisions")
offset = pd.tseries.frequencies.to_offset(offset)
date = self.divisions[0] + offset
end = self.loc._get_partitions(date)
is_anchored = offset.is_anchored()
include_right = is_anchored or not hasattr(offset, "delta")
if end == self.npartitions - 1:
divs = self.divisions
else:
divs = self.divisions[: end + 1] + (date,)
name = "first-" + tokenize(self, offset)
dsk = {(name, i): (self._name, i) for i in range(end)}
dsk[(name, end)] = (
methods.boundary_slice,
(self._name, end),
None,
date,
include_right,
True,
)
graph = HighLevelGraph.from_collections(name, dsk, dependencies=[self])
return new_dd_object(graph, name, self, divs)
@derived_from(pd.DataFrame)
def last(self, offset):
# Let pandas error on bad args
self._meta_nonempty.first(offset)
if not self.known_divisions:
raise ValueError("`last` is not implemented for unknown divisions")
offset = pd.tseries.frequencies.to_offset(offset)
date = self.divisions[-1] - offset
start = self.loc._get_partitions(date)
if start == 0:
divs = self.divisions
else:
divs = (date,) + self.divisions[start + 1 :]
name = "last-" + tokenize(self, offset)
dsk = {
(name, i + 1): (self._name, j + 1)
for i, j in enumerate(range(start, self.npartitions))
}
dsk[(name, 0)] = (
methods.boundary_slice,
(self._name, start),
date,
None,
True,
False,
)
graph = HighLevelGraph.from_collections(name, dsk, dependencies=[self])
return new_dd_object(graph, name, self, divs)
def nunique_approx(self, split_every=None):
"""Approximate number of unique rows.
This method uses the HyperLogLog algorithm for cardinality
estimation to compute the approximate number of unique rows.
The approximate error is 0.406%.
Parameters
----------
split_every : int, optional
Group partitions into groups of this size while performing a
tree-reduction. If set to False, no tree-reduction will be used.
Default is 8.
Returns
-------
a float representing the approximate number of elements
"""
from . import hyperloglog # here to avoid circular import issues
return aca(
[self],
chunk=hyperloglog.compute_hll_array,
combine=hyperloglog.reduce_state,
aggregate=hyperloglog.estimate_count,
split_every=split_every,
b=16,
meta=float,
)
@property
def values(self):
"""Return a dask.array of the values of this dataframe
Warning: This creates a dask.array without precise shape information.
Operations that depend on shape information, like slicing or reshaping,
will not work.
"""
return self.map_partitions(methods.values)
def _validate_chunks(self, arr, lengths):
from dask.array.core import normalize_chunks
if isinstance(lengths, Sequence):
lengths = tuple(lengths)
if len(lengths) != self.npartitions:
raise ValueError(
"The number of items in 'lengths' does not match the number of "
f"partitions. {len(lengths)} != {self.npartitions}"
)
if self.ndim == 1:
chunks = normalize_chunks((lengths,))
else:
chunks = normalize_chunks((lengths, (len(self.columns),)))
return chunks
elif lengths is not None:
raise ValueError(f"Unexpected value for 'lengths': '{lengths}'")
return arr._chunks
def _is_index_level_reference(self, key):
"""
Test whether a key is an index level reference
To be considered an index level reference, `key` must match the index name
and must NOT match the name of any column (if a dataframe).
"""
return (
self.index.name is not None
and not is_dask_collection(key)
and (np.isscalar(key) or isinstance(key, tuple))
and key == self.index.name
and key not in getattr(self, "columns", ())
)
def _contains_index_name(self, columns_or_index):
"""
Test whether the input contains a reference to the index of the DataFrame/Series
"""
if isinstance(columns_or_index, list):
return any(self._is_index_level_reference(n) for n in columns_or_index)
else:
return self._is_index_level_reference(columns_or_index)
def _raise_if_object_series(x, funcname):
"""
Utility function to raise an error if an object column does not support
a certain operation like `mean`.
"""
if isinstance(x, Series) and hasattr(x, "dtype") and x.dtype == object:
raise ValueError("`%s` not supported with object series" % funcname)
class Series(_Frame):
"""Parallel Pandas Series
Do not use this class directly. Instead use functions like
``dd.read_csv``, ``dd.read_parquet``, or ``dd.from_pandas``.
Parameters
----------
dsk: dict
The dask graph to compute this Series
_name: str
The key prefix that specifies which keys in the dask comprise this
particular Series
meta: pandas.Series
An empty ``pandas.Series`` with names, dtypes, and index matching the
expected output.
divisions: tuple of index values
Values along which we partition our blocks on the index
See Also
--------
dask.dataframe.DataFrame
"""
_partition_type = pd.Series
_is_partition_type = staticmethod(is_series_like)
_token_prefix = "series-"
_accessors = set()
def __array_wrap__(self, array, context=None):
if isinstance(context, tuple) and len(context) > 0:
if isinstance(context[1][0], np.ndarray) and context[1][0].shape == ():
index = None
else:
index = context[1][0].index
return pd.Series(array, index=index, name=self.name)
@property
def axes(self):
return [self.index]
@property
def name(self):
return self._meta.name
@name.setter
def name(self, name):
self._meta.name = name
renamed = _rename_dask(self, name)
# update myself
self.dask = renamed.dask
self._name = renamed._name
@property
def ndim(self):
"""Return dimensionality"""
return 1
@property
def shape(self):
"""
Return a tuple representing the dimensionality of a Series.
The single element of the tuple is a Delayed result.
Examples
--------
>>> series.shape # doctest: +SKIP
(dd.Scalar<size-ag..., dtype=int64>,)
"""
return (self.size,)
@property
def dtype(self):
"""Return data type"""
return self._meta.dtype
@cache_readonly
def dt(self):
"""Namespace of datetime methods"""
return DatetimeAccessor(self)
@cache_readonly
def cat(self):
return CategoricalAccessor(self)
@cache_readonly
def str(self):
"""Namespace for string methods"""
return StringAccessor(self)
def __dir__(self):
o = set(dir(type(self)))
o.update(self.__dict__)
# Remove the `cat` and `str` accessors if not available. We can't
# decide this statically for the `dt` accessor, as it works on
# datetime-like things as well.
for accessor in ["cat", "str"]:
if not hasattr(self._meta, accessor):
o.remove(accessor)
return list(o)
@property
def nbytes(self):
"""Number of bytes"""
return self.reduction(
methods.nbytes, np.sum, token="nbytes", meta=int, split_every=False
)
def _repr_data(self):
return _repr_data_series(self._meta, self._repr_divisions)
def __repr__(self):
"""have to overwrite footer"""
if self.name is not None:
footer = f"Name: {self.name}, dtype: {self.dtype}"
else:
footer = f"dtype: {self.dtype}"
return """Dask {klass} Structure:
{data}
{footer}
Dask Name: {name}, {task} tasks""".format(
klass=self.__class__.__name__,
data=self.to_string(),
footer=footer,
name=key_split(self._name),
task=len(self.dask),
)
def rename(self, index=None, inplace=False, sorted_index=False):
"""Alter Series index labels or name
Function / dict values must be unique (1-to-1). Labels not contained in
a dict / Series will be left as-is. Extra labels listed don't throw an
error.
Alternatively, change ``Series.name`` with a scalar value.
Parameters
----------
index : scalar, hashable sequence, dict-like or callable, optional
If dict-like or callable, the transformation is applied to the
index. Scalar or hashable sequence-like will alter the
``Series.name`` attribute.
inplace : boolean, default False
Whether to return a new Series or modify this one inplace.
sorted_index : bool, default False
If true, the output ``Series`` will have known divisions inferred
from the input series and the transformation. Ignored for
non-callable/dict-like ``index`` or when the input series has
unknown divisions. Note that this may only be set to ``True`` if
you know that the transformed index is monotonically increasing. Dask
will check that transformed divisions are monotonic, but cannot
check all the values between divisions, so incorrectly setting this
can result in bugs.
Returns
-------
renamed : Series
See Also
--------
pandas.Series.rename
"""
from pandas.api.types import is_dict_like, is_list_like, is_scalar
import dask.dataframe as dd
if is_scalar(index) or (
is_list_like(index)
and not is_dict_like(index)
and not isinstance(index, dd.Series)
):
if inplace:
warnings.warn(
"'inplace' argument for dask series will be removed in future versions",
PendingDeprecationWarning,
)
res = self if inplace else self.copy()
res.name = index
else:
res = self.map_partitions(M.rename, index, enforce_metadata=False)
if self.known_divisions:
if sorted_index and (callable(index) or is_dict_like(index)):
old = pd.Series(range(self.npartitions + 1), index=self.divisions)
new = old.rename(index).index
if not new.is_monotonic_increasing:
msg = (
"sorted_index=True, but the transformed index "
"isn't monotonic_increasing"
)
raise ValueError(msg)
res.divisions = tuple(methods.tolist(new))
else:
res = res.clear_divisions()
if inplace:
self.dask = res.dask
self._name = res._name
self.divisions = res.divisions
self._meta = res._meta
res = self
return res
@derived_from(pd.Series)
def round(self, decimals=0):
return elemwise(M.round, self, decimals)
@derived_from(pd.DataFrame)
def to_timestamp(self, freq=None, how="start", axis=0):
df = elemwise(M.to_timestamp, self, freq, how, axis)
df.divisions = tuple(pd.Index(self.divisions).to_timestamp())
return df
def quantile(self, q=0.5, method="default"):
"""Approximate quantiles of Series
Parameters
----------
q : list/array of floats, default 0.5 (50%)
Iterable of numbers ranging from 0 to 1 for the desired quantiles
method : {'default', 'tdigest', 'dask'}, optional
What method to use. By default will use dask's internal custom
algorithm (``'dask'``). If set to ``'tdigest'`` will use tdigest
for floats and ints and fallback to the ``'dask'`` otherwise.
"""
return quantile(self, q, method=method)
def _repartition_quantiles(self, npartitions, upsample=1.0):
"""Approximate quantiles of Series used for repartitioning"""
from .partitionquantiles import partition_quantiles
return partition_quantiles(self, npartitions, upsample=upsample)
def __getitem__(self, key):
if isinstance(key, Series) and self.divisions == key.divisions:
name = "index-%s" % tokenize(self, key)
dsk = partitionwise_graph(operator.getitem, name, self, key)
graph = HighLevelGraph.from_collections(name, dsk, dependencies=[self, key])
return Series(graph, name, self._meta, self.divisions)
return self.loc[key]
@derived_from(pd.DataFrame)
def _get_numeric_data(self, how="any", subset=None):
return self
@derived_from(pd.Series)
def iteritems(self):
for i in range(self.npartitions):
s = self.get_partition(i).compute()
yield from s.iteritems()
@derived_from(pd.Series)
def __iter__(self):
for i in range(self.npartitions):
s = self.get_partition(i).compute()
yield from s
@_deprecated(
message=(
"Using the ``in`` operator to test for membership in Series is "
"deprecated. To test for membership in the index use "
"``(s.index == key).any()``. Similarly to test for membership in "
"the values use ``(s == key).any()``"
)
)
def __contains__(self, key):
return (self == key).any().compute()
@classmethod
def _validate_axis(cls, axis=0):
if axis not in (0, "index", None):
raise ValueError(f"No axis named {axis}")
# convert to numeric axis
return {None: 0, "index": 0}.get(axis, axis)
@derived_from(pd.Series)
def groupby(
self, by=None, group_keys=True, sort=None, observed=None, dropna=None, **kwargs
):
from dask.dataframe.groupby import SeriesGroupBy
return SeriesGroupBy(
self,
by=by,
group_keys=group_keys,
sort=sort,
observed=observed,
dropna=dropna,
**kwargs,
)
@derived_from(pd.Series)
def count(self, split_every=False):
return super().count(split_every=split_every)
@derived_from(pd.Series)
def mode(self, dropna=True, split_every=False):
return super().mode(dropna=dropna, split_every=split_every)
@derived_from(pd.Series)
def explode(self):
meta = self._meta.explode()
return self.map_partitions(M.explode, meta=meta, enforce_metadata=False)
def unique(self, split_every=None, split_out=1):
"""
Return Series of unique values in the object. Includes NA values.
Returns
-------
uniques : Series
"""
return aca(
self,
chunk=methods.unique,
aggregate=methods.unique,
meta=self._meta,
token="unique",
split_every=split_every,
series_name=self.name,
split_out=split_out,
)
@derived_from(pd.Series)
def nunique(self, split_every=None):
return self.drop_duplicates(split_every=split_every).count()
@derived_from(pd.Series)
def value_counts(
self,
sort=None,
ascending=False,
dropna=None,
normalize=False,
split_every=None,
split_out=1,
):
"""
Note: dropna is only supported in pandas >= 1.1.0, in which case it defaults to
True.
"""
kwargs = {"sort": sort, "ascending": ascending}
if dropna is not None:
if not PANDAS_GT_110:
raise NotImplementedError(
"dropna is not a valid argument for dask.dataframe.value_counts "
f"if pandas < 1.1.0. Pandas version is {pd.__version__}"
)
kwargs["dropna"] = dropna
aggregate_kwargs = {"normalize": normalize}
if split_out > 1:
aggregate_kwargs["total_length"] = (
len(self) if dropna is False else len(self.dropna())
)
return aca(
self,
chunk=M.value_counts,
aggregate=methods.value_counts_aggregate,
combine=methods.value_counts_combine,
meta=self._meta.value_counts(normalize=normalize),
token="value-counts",
split_every=split_every,
split_out=split_out,
split_out_setup=split_out_on_index,
aggregate_kwargs=aggregate_kwargs,
**kwargs,
)
@derived_from(pd.Series)
def nlargest(self, n=5, split_every=None):
return aca(
self,
chunk=M.nlargest,
aggregate=M.nlargest,
meta=self._meta,
token="series-nlargest",
split_every=split_every,
n=n,
)
@derived_from(pd.Series)
def nsmallest(self, n=5, split_every=None):
return aca(
self,
chunk=M.nsmallest,
aggregate=M.nsmallest,
meta=self._meta,
token="series-nsmallest",
split_every=split_every,
n=n,
)
@derived_from(pd.Series)
def isin(self, values):
# Added just to get the different docstring for Series
return super().isin(values)
@insert_meta_param_description(pad=12)
@derived_from(pd.Series)
def map(self, arg, na_action=None, meta=no_default):
if is_series_like(arg) and is_dask_collection(arg):
return series_map(self, arg)
if not (
isinstance(arg, dict)
or callable(arg)
or is_series_like(arg)
and not is_dask_collection(arg)
):
raise TypeError(
f"arg must be pandas.Series, dict or callable. Got {type(arg)}"
)
name = "map-" + tokenize(self, arg, na_action)
dsk = {
(name, i): (M.map, k, arg, na_action)
for i, k in enumerate(self.__dask_keys__())
}
graph = HighLevelGraph.from_collections(name, dsk, dependencies=[self])
if meta is no_default:
meta = _emulate(M.map, self, arg, na_action=na_action, udf=True)
else:
meta = make_meta(
meta,
index=getattr(make_meta(self), "index", None),
parent_meta=self._meta,
)
return type(self)(graph, name, meta, self.divisions)
@derived_from(pd.Series)
def dropna(self):
return self.map_partitions(M.dropna, enforce_metadata=False)
@derived_from(pd.Series)
def between(self, left, right, inclusive="both"):
return self.map_partitions(
M.between, left=left, right=right, inclusive=inclusive
)
@derived_from(pd.Series)
def clip(self, lower=None, upper=None, out=None):
if out is not None:
raise ValueError("'out' must be None")
# np.clip may pass out
return self.map_partitions(
M.clip, lower=lower, upper=upper, enforce_metadata=False
)
@derived_from(pd.Series)
def clip_lower(self, threshold):
return self.map_partitions(
M.clip_lower, threshold=threshold, enforce_metadata=False
)
@derived_from(pd.Series)
def clip_upper(self, threshold):
return self.map_partitions(
M.clip_upper, threshold=threshold, enforce_metadata=False
)
@derived_from(pd.Series)
def align(self, other, join="outer", axis=None, fill_value=None):
return super().align(other, join=join, axis=axis, fill_value=fill_value)
@derived_from(pd.Series)
def combine(self, other, func, fill_value=None):
return self.map_partitions(M.combine, other, func, fill_value=fill_value)
@derived_from(pd.Series)
def squeeze(self):
return self
@derived_from(pd.Series)
def combine_first(self, other):
return self.map_partitions(M.combine_first, other)
def to_bag(self, index=False, format="tuple"):
"""Create a Dask Bag from a Series"""
from .io import to_bag
return to_bag(self, index, format=format)
@derived_from(pd.Series)
def to_frame(self, name=None):
return self.map_partitions(M.to_frame, name, meta=self._meta.to_frame(name))
@derived_from(pd.Series)
def to_string(self, max_rows=5):
# option_context doesn't affect
return self._repr_data().to_string(max_rows=max_rows)
@classmethod
def _bind_operator_method(cls, name, op, original=pd.Series):
"""bind operator method like Series.add to this class"""
def meth(self, other, level=None, fill_value=None, axis=0):
if level is not None:
raise NotImplementedError("level must be None")
axis = self._validate_axis(axis)
meta = _emulate(op, self, other, axis=axis, fill_value=fill_value)
return map_partitions(
op, self, other, meta=meta, axis=axis, fill_value=fill_value
)
meth.__name__ = name
setattr(cls, name, derived_from(original)(meth))
@classmethod
def _bind_comparison_method(cls, name, comparison, original=pd.Series):
"""bind comparison method like Series.eq to this class"""
def meth(self, other, level=None, fill_value=None, axis=0):
if level is not None:
raise NotImplementedError("level must be None")
axis = self._validate_axis(axis)
if fill_value is None:
return elemwise(comparison, self, other, axis=axis)
else:
op = partial(comparison, fill_value=fill_value)
return elemwise(op, self, other, axis=axis)
meth.__name__ = name
setattr(cls, name, derived_from(original)(meth))
@insert_meta_param_description(pad=12)
def apply(self, func, convert_dtype=True, meta=no_default, args=(), **kwds):
"""Parallel version of pandas.Series.apply
Parameters
----------
func : function
Function to apply
convert_dtype : boolean, default True
Try to find better dtype for elementwise function results.
If False, leave as dtype=object.
$META
args : tuple
Positional arguments to pass to function in addition to the value.
Additional keyword arguments will be passed as keywords to the function.
Returns
-------
applied : Series or DataFrame if func returns a Series.
Examples
--------
>>> import dask.dataframe as dd
>>> s = pd.Series(range(5), name='x')
>>> ds = dd.from_pandas(s, npartitions=2)
Apply a function elementwise across the Series, passing in extra
arguments in ``args`` and ``kwargs``:
>>> def myadd(x, a, b=1):
... return x + a + b
>>> res = ds.apply(myadd, args=(2,), b=1.5) # doctest: +SKIP
By default, dask tries to infer the output metadata by running your
provided function on some fake data. This works well in many cases, but
can sometimes be expensive, or even fail. To avoid this, you can
manually specify the output metadata with the ``meta`` keyword. This
can be specified in many forms, for more information see
``dask.dataframe.utils.make_meta``.
Here we specify the output is a Series with name ``'x'``, and dtype
``float64``:
>>> res = ds.apply(myadd, args=(2,), b=1.5, meta=('x', 'f8'))
In the case where the metadata doesn't change, you can also pass in
the object itself directly:
>>> res = ds.apply(lambda x: x + 1, meta=ds)
See Also
--------
dask.Series.map_partitions
"""
if meta is no_default:
meta = _emulate(
M.apply,
self._meta_nonempty,
func,
convert_dtype=convert_dtype,
args=args,
udf=True,
**kwds,
)
warnings.warn(meta_warning(meta))
return map_partitions(
M.apply, self, func, convert_dtype, args, meta=meta, **kwds
)
@derived_from(pd.Series)
def cov(self, other, min_periods=None, split_every=False):
from .multi import concat
if not isinstance(other, Series):
raise TypeError("other must be a dask.dataframe.Series")
df = concat([self, other], axis=1)
return cov_corr(df, min_periods, scalar=True, split_every=split_every)
@derived_from(pd.Series)
def corr(self, other, method="pearson", min_periods=None, split_every=False):
from .multi import concat
if not isinstance(other, Series):
raise TypeError("other must be a dask.dataframe.Series")
if method != "pearson":
raise NotImplementedError("Only Pearson correlation has been implemented")
df = concat([self, other], axis=1)
return cov_corr(
df, min_periods, corr=True, scalar=True, split_every=split_every
)
@derived_from(pd.Series)
def autocorr(self, lag=1, split_every=False):
if not isinstance(lag, Integral):
raise TypeError("lag must be an integer")
return self.corr(self if lag == 0 else self.shift(lag), split_every=split_every)
@derived_from(pd.Series)
def memory_usage(self, index=True, deep=False):
result = self.map_partitions(
M.memory_usage, index=index, deep=deep, enforce_metadata=False
)
return delayed(sum)(result.to_delayed())
def __divmod__(self, other):
res1 = self // other
res2 = self % other
return res1, res2
def __rdivmod__(self, other):
res1 = other // self
res2 = other % self
return res1, res2
class Index(Series):
_partition_type = pd.Index
_is_partition_type = staticmethod(is_index_like)
_token_prefix = "index-"
_accessors = set()
_dt_attributes = {
"nanosecond",
"microsecond",
"millisecond",
"dayofyear",
"minute",
"hour",
"day",
"dayofweek",
"second",
"week",
"weekday",
"weekofyear",
"month",
"quarter",
"year",
}
_cat_attributes = {
"known",
"as_known",
"as_unknown",
"add_categories",
"categories",
"remove_categories",
"reorder_categories",
"as_ordered",
"codes",
"remove_unused_categories",
"set_categories",
"as_unordered",
"ordered",
"rename_categories",
}
def __getattr__(self, key):
if is_categorical_dtype(self.dtype) and key in self._cat_attributes:
return getattr(self.cat, key)
elif key in self._dt_attributes:
return getattr(self.dt, key)
raise AttributeError("'Index' object has no attribute %r" % key)
def __dir__(self):
out = super().__dir__()
out.extend(self._dt_attributes)
if is_categorical_dtype(self.dtype):
out.extend(self._cat_attributes)
return out
@property
def index(self):
raise AttributeError(
f"{self.__class__.__name__!r} object has no attribute 'index'"
)
def __array_wrap__(self, array, context=None):
return pd.Index(array, name=self.name)
def head(self, n=5, compute=True):
"""First n items of the Index.
Caveat, this only checks the first partition.
"""
name = "head-%d-%s" % (n, self._name)
dsk = {(name, 0): (operator.getitem, (self._name, 0), slice(0, n))}
graph = HighLevelGraph.from_collections(name, dsk, dependencies=[self])
result = new_dd_object(graph, name, self._meta, self.divisions[:2])
if compute:
result = result.compute()
return result
@derived_from(pd.Index)
def max(self, split_every=False):
return self.reduction(
M.max,
meta=self._meta_nonempty.max(),
token=self._token_prefix + "max",
split_every=split_every,
)
@derived_from(pd.Index)
def min(self, split_every=False):
return self.reduction(
M.min,
meta=self._meta_nonempty.min(),
token=self._token_prefix + "min",
split_every=split_every,
)
def count(self, split_every=False):
return self.reduction(
methods.index_count,
np.sum,
token="index-count",
meta=int,
split_every=split_every,
)
@derived_from(pd.Index)
def shift(self, periods=1, freq=None):
if isinstance(self._meta, pd.PeriodIndex):
if freq is not None:
raise ValueError("PeriodIndex doesn't accept `freq` argument")
meta = self._meta_nonempty.shift(periods)
out = self.map_partitions(
M.shift, periods, meta=meta, token="shift", transform_divisions=False
)
else:
# Pandas will raise for other index types that don't implement shift
meta = self._meta_nonempty.shift(periods, freq=freq)
out = self.map_partitions(
M.shift,
periods,
token="shift",
meta=meta,
freq=freq,
transform_divisions=False,
)
if freq is None:
freq = meta.freq
return maybe_shift_divisions(out, periods, freq=freq)
@derived_from(pd.Index)
def to_series(self):
return self.map_partitions(
M.to_series,
meta=self._meta.to_series(),
transform_divisions=False,
)
@derived_from(pd.Index, ua_args=["index"])
def to_frame(self, index=True, name=None):
if not index:
raise NotImplementedError()
return self.map_partitions(
M.to_frame,
index,
name,
meta=self._meta.to_frame(index, name),
transform_divisions=False,
)
@insert_meta_param_description(pad=12)
@derived_from(pd.Index)
def map(self, arg, na_action=None, meta=no_default, is_monotonic=False):
"""
Note that this method clears any known divisions.
If your mapping function is monotonically increasing then use `is_monotonic`
to apply the maping function to the old divisions and assign the new
divisions to the output.
"""
applied = super().map(arg, na_action=na_action, meta=meta)
if is_monotonic and self.known_divisions:
applied.divisions = tuple(
pd.Series(self.divisions).map(arg, na_action=na_action)
)
else:
applied = applied.clear_divisions()
return applied
class DataFrame(_Frame):
"""
Parallel Pandas DataFrame
Do not use this class directly. Instead use functions like
``dd.read_csv``, ``dd.read_parquet``, or ``dd.from_pandas``.
Parameters
----------
dsk: dict
The dask graph to compute this DataFrame
name: str
The key prefix that specifies which keys in the dask comprise this
particular DataFrame
meta: pandas.DataFrame
An empty ``pandas.DataFrame`` with names, dtypes, and index matching
the expected output.
divisions: tuple of index values
Values along which we partition our blocks on the index
"""
_partition_type = pd.DataFrame
_is_partition_type = staticmethod(is_dataframe_like)
_token_prefix = "dataframe-"
_accessors = set()
def __init__(self, dsk, name, meta, divisions):
super().__init__(dsk, name, meta, divisions)
if self.dask.layers[name].collection_annotations is None:
self.dask.layers[name].collection_annotations = {
"npartitions": self.npartitions,
"columns": [col for col in self.columns],
"type": typename(type(self)),
"dataframe_type": typename(type(self._meta)),
"series_dtypes": {
col: self._meta[col].dtype
if hasattr(self._meta[col], "dtype")
else None
for col in self._meta.columns
},
}
else:
self.dask.layers[name].collection_annotations.update(
{
"npartitions": self.npartitions,
"columns": [col for col in self.columns],
"type": typename(type(self)),
"dataframe_type": typename(type(self._meta)),
"series_dtypes": {
col: self._meta[col].dtype
if hasattr(self._meta[col], "dtype")
else None
for col in self._meta.columns
},
}
)
def __array_wrap__(self, array, context=None):
if isinstance(context, tuple) and len(context) > 0:
if isinstance(context[1][0], np.ndarray) and context[1][0].shape == ():
index = None
else:
index = context[1][0].index
return pd.DataFrame(array, index=index, columns=self.columns)
@property
def axes(self):
return [self.index, self.columns]
@property
def columns(self):
return self._meta.columns
@columns.setter
def columns(self, columns):
renamed = _rename_dask(self, columns)
self._meta = renamed._meta
self._name = renamed._name
self.dask = renamed.dask
@property
def iloc(self):
"""Purely integer-location based indexing for selection by position.
Only indexing the column positions is supported. Trying to select
row positions will raise a ValueError.
See :ref:`dataframe.indexing` for more.
Examples
--------
>>> df.iloc[:, [2, 0, 1]] # doctest: +SKIP
"""
from .indexing import _iLocIndexer
# For dataframes with unique column names, this will be transformed into a __getitem__ call
return _iLocIndexer(self)
def __len__(self):
try:
s = self.iloc[:, 0]
except IndexError:
return super().__len__()
else:
return len(s)
def __contains__(self, key):
return key in self._meta
@property
def empty(self):
raise NotImplementedError(
"Checking whether a Dask DataFrame has any rows may be expensive. "
"However, checking the number of columns is fast. "
"Depending on which of these results you need, use either "
"`len(df.index) == 0` or `len(df.columns) == 0`"
)
def __getitem__(self, key):
name = "getitem-%s" % tokenize(self, key)
if np.isscalar(key) or isinstance(key, (tuple, str)):
if isinstance(self._meta.index, (pd.DatetimeIndex, pd.PeriodIndex)):
if key not in self._meta.columns:
if PANDAS_GT_120:
warnings.warn(
"Indexing a DataFrame with a datetimelike index using a single "
"string to slice the rows, like `frame[string]`, is deprecated "
"and will be removed in a future version. Use `frame.loc[string]` "
"instead.",
FutureWarning,
)
return self.loc[key]
# error is raised from pandas
meta = self._meta[_extract_meta(key)]
dsk = partitionwise_graph(operator.getitem, name, self, key)
graph = HighLevelGraph.from_collections(name, dsk, dependencies=[self])
return new_dd_object(graph, name, meta, self.divisions)
elif isinstance(key, slice):
from pandas.api.types import is_float_dtype
is_integer_slice = any(
isinstance(i, Integral) for i in (key.start, key.step, key.stop)
)
# Slicing with integer labels is always iloc based except for a
# float indexer for some reason
if is_integer_slice and not is_float_dtype(self.index.dtype):
# NOTE: this always fails currently, as iloc is mostly
# unsupported, but we call it anyway here for future-proofing
# and error-attribution purposes
return self.iloc[key]
else:
return self.loc[key]
if isinstance(key, (np.ndarray, list)) or (
not is_dask_collection(key) and (is_series_like(key) or is_index_like(key))
):
# error is raised from pandas
meta = self._meta[_extract_meta(key)]
dsk = partitionwise_graph(operator.getitem, name, self, key)
graph = HighLevelGraph.from_collections(name, dsk, dependencies=[self])
return new_dd_object(graph, name, meta, self.divisions)
if isinstance(key, Series):
# do not perform dummy calculation, as columns will not be changed.
if self.divisions != key.divisions:
from .multi import _maybe_align_partitions
self, key = _maybe_align_partitions([self, key])
dsk = partitionwise_graph(operator.getitem, name, self, key)
graph = HighLevelGraph.from_collections(name, dsk, dependencies=[self, key])
return new_dd_object(graph, name, self, self.divisions)
if isinstance(key, DataFrame):
return self.where(key, np.nan)
raise NotImplementedError(key)
def __setitem__(self, key, value):
if isinstance(key, (tuple, list)) and isinstance(value, DataFrame):
df = self.assign(**{k: value[c] for k, c in zip(key, value.columns)})
elif isinstance(key, pd.Index) and not isinstance(value, DataFrame):
key = list(key)
df = self.assign(**{k: value for k in key})
elif (
is_dataframe_like(key)
or is_series_like(key)
or isinstance(key, (DataFrame, Series))
):
df = self.where(~key, value)
elif not isinstance(key, str):
raise NotImplementedError(f"Item assignment with {type(key)} not supported")
else:
df = self.assign(**{key: value})
self.dask = df.dask
self._name = df._name
self._meta = df._meta
self.divisions = df.divisions
def __delitem__(self, key):
result = self.drop([key], axis=1)
self.dask = result.dask
self._name = result._name
self._meta = result._meta
def __setattr__(self, key, value):
try:
columns = object.__getattribute__(self, "_meta").columns
except AttributeError:
columns = ()
# exclude protected attributes from setitem
if key in columns and key not in ["divisions", "dask", "_name", "_meta"]:
self[key] = value
else:
object.__setattr__(self, key, value)
def __getattr__(self, key):
if key in self.columns:
return self[key]
else:
raise AttributeError("'DataFrame' object has no attribute %r" % key)
def __dir__(self):
o = set(dir(type(self)))
o.update(self.__dict__)
o.update(c for c in self.columns if (isinstance(c, str) and c.isidentifier()))
return list(o)
def __iter__(self):
return iter(self._meta)
def _ipython_key_completions_(self):
return methods.tolist(self.columns)
@property
def ndim(self):
"""Return dimensionality"""
return 2
@property
def shape(self):
"""
Return a tuple representing the dimensionality of the DataFrame.
The number of rows is a Delayed result. The number of columns
is a concrete integer.
Examples
--------
>>> df.size # doctest: +SKIP
(Delayed('int-07f06075-5ecc-4d77-817e-63c69a9188a8'), 2)
"""
col_size = len(self.columns)
if col_size == 0:
return (self.index.shape[0], 0)
row_size = delayed(int)(self.size / col_size)
return (row_size, col_size)
@property
def dtypes(self):
"""Return data types"""
return self._meta.dtypes
@derived_from(pd.DataFrame)
def get_dtype_counts(self):
return self._meta.get_dtype_counts()
@derived_from(pd.DataFrame)
def get_ftype_counts(self):
return self._meta.get_ftype_counts()
@derived_from(pd.DataFrame)
def select_dtypes(self, include=None, exclude=None):
cs = self._meta.select_dtypes(include=include, exclude=exclude).columns
return self[list(cs)]
def sort_values(
self,
by,
npartitions=None,
ascending=True,
na_position="last",
sort_function=None,
sort_function_kwargs=None,
**kwargs,
):
"""Sort the dataset by a single column.
Sorting a parallel dataset requires expensive shuffles and is generally
not recommended. See ``set_index`` for implementation details.
Parameters
----------
by: string
npartitions: int, None, or 'auto'
The ideal number of output partitions. If None, use the same as
the input. If 'auto' then decide by memory use.
ascending: bool, optional
Sort ascending vs. descending.
Defaults to True.
na_position: {'last', 'first'}, optional
Puts NaNs at the beginning if 'first', puts NaN at the end if 'last'.
Defaults to 'last'.
sort_function: function, optional
Sorting function to use when sorting underlying partitions.
If None, defaults to ``M.sort_values`` (the partition library's
implementation of ``sort_values``).
sort_function_kwargs: dict, optional
Additional keyword arguments to pass to the partition sorting function.
By default, ``by``, ``ascending``, and ``na_position`` are provided.
Examples
--------
>>> df2 = df.sort_values('x') # doctest: +SKIP
"""
from .shuffle import sort_values
sort_kwargs = {
"by": by,
"ascending": ascending,
"na_position": na_position,
}
if sort_function is None:
sort_function = M.sort_values
if sort_function_kwargs is not None:
sort_kwargs.update(sort_function_kwargs)
if self.npartitions == 1:
return self.map_partitions(sort_function, **sort_kwargs)
return sort_values(
self,
by,
ascending=ascending,
npartitions=npartitions,
na_position=na_position,
sort_function=sort_function,
sort_function_kwargs=sort_kwargs,
**kwargs,
)
def set_index(
self,
other,
drop=True,
sorted=False,
npartitions=None,
divisions=None,
inplace=False,
**kwargs,
):
"""Set the DataFrame index (row labels) using an existing column.
This realigns the dataset to be sorted by a new column. This can have a
significant impact on performance, because joins, groupbys, lookups, etc.
are all much faster on that column. However, this performance increase
comes with a cost, sorting a parallel dataset requires expensive shuffles.
Often we ``set_index`` once directly after data ingest and filtering and
then perform many cheap computations off of the sorted dataset.
This function operates exactly like ``pandas.set_index`` except with
different performance costs (dask dataframe ``set_index`` is much more expensive).
Under normal operation this function does an initial pass over the index column
to compute approximate quantiles to serve as future divisions. It then passes
over the data a second time, splitting up each input partition into several
pieces and sharing those pieces to all of the output partitions now in
sorted order.
In some cases we can alleviate those costs, for example if your dataset is
sorted already then we can avoid making many small pieces or if you know
good values to split the new index column then we can avoid the initial
pass over the data. For example if your new index is a datetime index and
your data is already sorted by day then this entire operation can be done
for free. You can control these options with the following parameters.
Parameters
----------
other: string or Dask Series
drop: boolean, default True
Delete column to be used as the new index.
sorted: bool, optional
If the index column is already sorted in increasing order.
Defaults to False
npartitions: int, None, or 'auto'
The ideal number of output partitions. If None, use the same as
the input. If 'auto' then decide by memory use.
Only used when ``divisions`` is not given. If ``divisions`` is given,
the number of output partitions will be ``len(divisions) - 1``.
divisions: list, optional
The "dividing lines" used to split the new index into partitions.
For ``divisions=[0, 10, 50, 100]``, there would be three output partitions,
where the new index contained [0, 10), [10, 50), and [50, 100), respectively.
See https://docs.dask.org/en/latest/dataframe-design.html#partitions.
If not given (default), good divisions are calculated by immediately computing
the data and looking at the distribution of its values. For large datasets,
this can be expensive.
Note that if ``sorted=True``, specified divisions are assumed to match
the existing partitions in the data; if this is untrue you should
leave divisions empty and call ``repartition`` after ``set_index``.
inplace: bool, optional
Modifying the DataFrame in place is not supported by Dask.
Defaults to False.
shuffle: string, 'disk' or 'tasks', optional
Either ``'disk'`` for single-node operation or ``'tasks'`` for
distributed operation. Will be inferred by your current scheduler.
compute: bool, default False
Whether or not to trigger an immediate computation. Defaults to False.
Note, that even if you set ``compute=False``, an immediate computation
will still be triggered if ``divisions`` is ``None``.
partition_size: int, optional
Desired size of each partitions in bytes.
Only used when ``npartitions='auto'``
Examples
--------
>>> import dask
>>> ddf = dask.datasets.timeseries(start="2021-01-01", end="2021-01-07", freq="1H").reset_index()
>>> ddf2 = ddf.set_index("x")
>>> ddf2 = ddf.set_index(ddf.x)
>>> ddf2 = ddf.set_index(ddf.timestamp, sorted=True)
A common case is when we have a datetime column that we know to be
sorted and is cleanly divided by day. We can set this index for free
by specifying both that the column is pre-sorted and the particular
divisions along which is is separated
>>> import pandas as pd
>>> divisions = pd.date_range(start="2021-01-01", end="2021-01-07", freq='1D')
>>> divisions
DatetimeIndex(['2021-01-01', '2021-01-02', '2021-01-03', '2021-01-04',
'2021-01-05', '2021-01-06', '2021-01-07'],
dtype='datetime64[ns]', freq='D')
Note that ``len(divisons)`` is equal to ``npartitions + 1``. This is because ``divisions``
represents the upper and lower bounds of each partition. The first item is the
lower bound of the first partition, the second item is the lower bound of the
second partition and the upper bound of the first partition, and so on.
The second-to-last item is the lower bound of the last partition, and the last
(extra) item is the upper bound of the last partition.
>>> ddf2 = ddf.set_index("timestamp", sorted=True, divisions=divisions.tolist())
If you'll be running `set_index` on the same (or similar) datasets repeatedly,
you could save time by letting Dask calculate good divisions once, then copy-pasting
them to reuse. This is especially helpful running in a Jupyter notebook:
>>> ddf2 = ddf.set_index("name") # slow, calculates data distribution
>>> ddf2.divisions # doctest: +SKIP
["Alice", "Laura", "Ursula", "Zelda"]
>>> # ^ Now copy-paste this and edit the line above to:
>>> # ddf2 = ddf.set_index("name", divisions=["Alice", "Laura", "Ursula", "Zelda"])
"""
if inplace:
raise NotImplementedError("The inplace= keyword is not supported")
pre_sorted = sorted
del sorted
if divisions is not None:
check_divisions(divisions)
if pre_sorted:
from .shuffle import set_sorted_index
return set_sorted_index(
self, other, drop=drop, divisions=divisions, **kwargs
)
else:
from .shuffle import set_index
return set_index(
self,
other,
drop=drop,
npartitions=npartitions,
divisions=divisions,
**kwargs,
)
@derived_from(pd.DataFrame)
def pop(self, item):
out = self[item]
del self[item]
return out
@derived_from(pd.DataFrame)
def nlargest(self, n=5, columns=None, split_every=None):
token = "dataframe-nlargest"
return aca(
self,
chunk=M.nlargest,
aggregate=M.nlargest,
meta=self._meta,
token=token,
split_every=split_every,
n=n,
columns=columns,
)
@derived_from(pd.DataFrame)
def nsmallest(self, n=5, columns=None, split_every=None):
token = "dataframe-nsmallest"
return aca(
self,
chunk=M.nsmallest,
aggregate=M.nsmallest,
meta=self._meta,
token=token,
split_every=split_every,
n=n,
columns=columns,
)
@derived_from(pd.DataFrame)
def groupby(
self, by=None, group_keys=True, sort=None, observed=None, dropna=None, **kwargs
):
from dask.dataframe.groupby import DataFrameGroupBy
return DataFrameGroupBy(
self,
by=by,
group_keys=group_keys,
sort=sort,
observed=observed,
dropna=dropna,
**kwargs,
)
@wraps(categorize)
def categorize(self, columns=None, index=None, split_every=None, **kwargs):
return categorize(
self, columns=columns, index=index, split_every=split_every, **kwargs
)
@derived_from(pd.DataFrame)
def assign(self, **kwargs):
data = self.copy()
for k, v in kwargs.items():
if not (
isinstance(v, Scalar)
or is_series_like(v)
or callable(v)
or pd.api.types.is_scalar(v)
or is_index_like(v)
or isinstance(v, Array)
):
raise TypeError(
f"Column assignment doesn't support type {typename(type(v))}"
)
if callable(v):
kwargs[k] = v(data)
if isinstance(v, Array):
from .io import from_dask_array
if len(v.shape) > 1:
raise ValueError("Array assignment only supports 1-D arrays")
if v.npartitions != data.npartitions:
raise ValueError(
"Number of partitions do not match "
f"({v.npartitions} != {data.npartitions})"
)
kwargs[k] = from_dask_array(v, index=data.index, meta=data._meta)
pairs = [k, kwargs[k]]
# Figure out columns of the output
df2 = data._meta_nonempty.assign(
**_extract_meta({k: kwargs[k]}, nonempty=True)
)
data = elemwise(methods.assign, data, *pairs, meta=df2)
return data
@derived_from(pd.DataFrame, ua_args=["index"])
def rename(self, index=None, columns=None):
if index is not None:
raise ValueError("Cannot rename index.")
# *args here is index, columns but columns arg is already used
return self.map_partitions(M.rename, None, columns=columns)
def query(self, expr, **kwargs):
"""Filter dataframe with complex expression
Blocked version of pd.DataFrame.query
This is like the sequential version except that this will also happen
in many threads. This may conflict with ``numexpr`` which will use
multiple threads itself. We recommend that you set ``numexpr`` to use a
single thread:
.. code-block:: python
import numexpr
numexpr.set_num_threads(1)
See also
--------
pandas.DataFrame.query
"""
return self.map_partitions(M.query, expr, **kwargs)
@derived_from(pd.DataFrame)
def eval(self, expr, inplace=None, **kwargs):
if inplace is None:
inplace = False
if "=" in expr and inplace in (True, None):
raise NotImplementedError(
"Inplace eval not supported. Please use inplace=False"
)
meta = self._meta.eval(expr, inplace=inplace, **kwargs)
return self.map_partitions(M.eval, expr, meta=meta, inplace=inplace, **kwargs)
@derived_from(pd.DataFrame)
def dropna(self, how="any", subset=None, thresh=None):
return self.map_partitions(
M.dropna, how=how, subset=subset, thresh=thresh, enforce_metadata=False
)
@derived_from(pd.DataFrame)
def clip(self, lower=None, upper=None, out=None):
if out is not None:
raise ValueError("'out' must be None")
return self.map_partitions(
M.clip, lower=lower, upper=upper, enforce_metadata=False
)
@derived_from(pd.DataFrame)
def clip_lower(self, threshold):
return self.map_partitions(
M.clip_lower, threshold=threshold, enforce_metadata=False
)
@derived_from(pd.DataFrame)
def clip_upper(self, threshold):
return self.map_partitions(
M.clip_upper, threshold=threshold, enforce_metadata=False
)
@derived_from(pd.DataFrame)
def squeeze(self, axis=None):
if axis in [None, 1]:
if len(self.columns) == 1:
return self[self.columns[0]]
else:
return self
elif axis == 0:
raise NotImplementedError(
f"{type(self)} does not support squeeze along axis 0"
)
elif axis not in [0, 1, None]:
raise ValueError(f"No axis {axis} for object type {type(self)}")
@derived_from(pd.DataFrame)
def to_timestamp(self, freq=None, how="start", axis=0):
df = elemwise(M.to_timestamp, self, freq, how, axis)
df.divisions = tuple(pd.Index(self.divisions).to_timestamp())
return df
@derived_from(pd.DataFrame)
def explode(self, column):
meta = self._meta.explode(column)
return self.map_partitions(M.explode, column, meta=meta, enforce_metadata=False)
def to_bag(self, index=False, format="tuple"):
"""Convert to a dask Bag of tuples of each row.
Parameters
----------
index : bool, optional
If True, the index is included as the first element of each tuple.
Default is False.
format : {"tuple", "dict"},optional
Whether to return a bag of tuples or dictionaries.
"""
from .io import to_bag
return to_bag(self, index, format)
def to_parquet(self, path, *args, **kwargs):
"""See dd.to_parquet docstring for more information"""
from .io import to_parquet
return to_parquet(self, path, *args, **kwargs)
def to_orc(self, path, *args, **kwargs):
"""See dd.to_orc docstring for more information"""
from .io import to_orc
return to_orc(self, path, *args, **kwargs)
@derived_from(pd.DataFrame)
def to_string(self, max_rows=5):
# option_context doesn't affect
return self._repr_data().to_string(max_rows=max_rows, show_dimensions=False)
def _get_numeric_data(self, how="any", subset=None):
# calculate columns to avoid unnecessary calculation
numerics = self._meta._get_numeric_data()
if len(numerics.columns) < len(self.columns):
name = self._token_prefix + "-get_numeric_data"
return self.map_partitions(M._get_numeric_data, meta=numerics, token=name)
else:
# use myself if all numerics
return self
@classmethod
def _validate_axis(cls, axis=0):
if axis not in (0, 1, "index", "columns", None):
raise ValueError(f"No axis named {axis}")
# convert to numeric axis
return {None: 0, "index": 0, "columns": 1}.get(axis, axis)
@derived_from(pd.DataFrame)
def drop(self, labels=None, axis=0, columns=None, errors="raise"):
axis = self._validate_axis(axis)
if axis == 0 and columns is not None:
# Columns must be specified if axis==0
return self.map_partitions(drop_by_shallow_copy, columns, errors=errors)
elif axis == 1:
return self.map_partitions(drop_by_shallow_copy, labels, errors=errors)
raise NotImplementedError(
"Drop currently only works for axis=1 or when columns is not None"
)
def merge(
self,
right,
how="inner",
on=None,
left_on=None,
right_on=None,
left_index=False,
right_index=False,
suffixes=("_x", "_y"),
indicator=False,
npartitions=None,
shuffle=None,
broadcast=None,
):
"""Merge the DataFrame with another DataFrame
This will merge the two datasets, either on the indices, a certain column
in each dataset or the index in one dataset and the column in another.
Parameters
----------
right: dask.dataframe.DataFrame
how : {'left', 'right', 'outer', 'inner'}, default: 'inner'
How to handle the operation of the two objects:
- left: use calling frame's index (or column if on is specified)
- right: use other frame's index
- outer: form union of calling frame's index (or column if on is
specified) with other frame's index, and sort it
lexicographically
- inner: form intersection of calling frame's index (or column if
on is specified) with other frame's index, preserving the order
of the calling's one
on : label or list
Column or index level names to join on. These must be found in both
DataFrames. If on is None and not merging on indexes then this
defaults to the intersection of the columns in both DataFrames.
left_on : label or list, or array-like
Column to join on in the left DataFrame. Other than in pandas
arrays and lists are only support if their length is 1.
right_on : label or list, or array-like
Column to join on in the right DataFrame. Other than in pandas
arrays and lists are only support if their length is 1.
left_index : boolean, default False
Use the index from the left DataFrame as the join key.
right_index : boolean, default False
Use the index from the right DataFrame as the join key.
suffixes : 2-length sequence (tuple, list, ...)
Suffix to apply to overlapping column names in the left and
right side, respectively
indicator : boolean or string, default False
If True, adds a column to output DataFrame called "_merge" with
information on the source of each row. If string, column with
information on source of each row will be added to output DataFrame,
and column will be named value of string. Information column is
Categorical-type and takes on a value of "left_only" for observations
whose merge key only appears in `left` DataFrame, "right_only" for
observations whose merge key only appears in `right` DataFrame,
and "both" if the observation’s merge key is found in both.
npartitions: int or None, optional
The ideal number of output partitions. This is only utilised when
performing a hash_join (merging on columns only). If ``None`` then
``npartitions = max(lhs.npartitions, rhs.npartitions)``.
Default is ``None``.
shuffle: {'disk', 'tasks'}, optional
Either ``'disk'`` for single-node operation or ``'tasks'`` for
distributed operation. Will be inferred by your current scheduler.
broadcast: boolean or float, optional
Whether to use a broadcast-based join in lieu of a shuffle-based
join for supported cases. By default, a simple heuristic will be
used to select the underlying algorithm. If a floating-point value
is specified, that number will be used as the ``broadcast_bias``
within the simple heuristic (a large number makes Dask more likely
to choose the ``broacast_join`` code path). See ``broadcast_join``
for more information.
Notes
-----
There are three ways to join dataframes:
1. Joining on indices. In this case the divisions are
aligned using the function ``dask.dataframe.multi.align_partitions``.
Afterwards, each partition is merged with the pandas merge function.
2. Joining one on index and one on column. In this case the divisions of
dataframe merged by index (:math:`d_i`) are used to divide the column
merged dataframe (:math:`d_c`) one using
``dask.dataframe.multi.rearrange_by_divisions``. In this case the
merged dataframe (:math:`d_m`) has the exact same divisions
as (:math:`d_i`). This can lead to issues if you merge multiple rows from
(:math:`d_c`) to one row in (:math:`d_i`).
3. Joining both on columns. In this case a hash join is performed using
``dask.dataframe.multi.hash_join``.
"""
if not is_dataframe_like(right):
raise ValueError("right must be DataFrame")
from .multi import merge
return merge(
self,
right,
how=how,
on=on,
left_on=left_on,
right_on=right_on,
left_index=left_index,
right_index=right_index,
suffixes=suffixes,
npartitions=npartitions,
indicator=indicator,
shuffle=shuffle,
broadcast=broadcast,
)
@derived_from(pd.DataFrame)
def join(
self,
other,
on=None,
how="left",
lsuffix="",
rsuffix="",
npartitions=None,
shuffle=None,
):
if is_series_like(other) and hasattr(other, "name"):
other = other.to_frame()
if not is_dataframe_like(other):
if not isinstance(other, list) or not all(
[is_dataframe_like(o) for o in other]
):
raise ValueError("other must be DataFrame or list of DataFrames")
if how not in ["outer", "left"]:
raise ValueError("merge_multi only supports left or outer joins")
from .multi import _recursive_pairwise_outer_join
other = _recursive_pairwise_outer_join(
other,
on=on,
lsuffix=lsuffix,
rsuffix=rsuffix,
npartitions=npartitions,
shuffle=shuffle,
)
from .multi import merge
return merge(
self,
other,
how=how,
left_index=on is None,
right_index=True,
left_on=on,
suffixes=(lsuffix, rsuffix),
npartitions=npartitions,
shuffle=shuffle,
)
@derived_from(pd.DataFrame)
def append(self, other, interleave_partitions=False):
if isinstance(other, Series):
msg = (
"Unable to appending dd.Series to dd.DataFrame."
"Use pd.Series to append as row."
)
raise ValueError(msg)
elif is_series_like(other):
other = other.to_frame().T
return super().append(other, interleave_partitions=interleave_partitions)
@derived_from(pd.DataFrame)
def iterrows(self):
for i in range(self.npartitions):
df = self.get_partition(i).compute()
yield from df.iterrows()
@derived_from(pd.DataFrame)
def itertuples(self, index=True, name="Pandas"):
for i in range(self.npartitions):
df = self.get_partition(i).compute()
yield from df.itertuples(index=index, name=name)
@derived_from(pd.DataFrame)
def items(self):
for col_idx, label in enumerate(self.columns):
yield label, self.iloc[:, col_idx]
@classmethod
def _bind_operator_method(cls, name, op, original=pd.DataFrame):
"""bind operator method like DataFrame.add to this class"""
# name must be explicitly passed for div method whose name is truediv
def meth(self, other, axis="columns", level=None, fill_value=None):
if level is not None:
raise NotImplementedError("level must be None")
axis = self._validate_axis(axis)
if axis in (1, "columns"):
# When axis=1 and other is a series, `other` is transposed
# and the operator is applied broadcast across rows. This
# isn't supported with dd.Series.
if isinstance(other, Series):
msg = f"Unable to {name} dd.Series with axis=1"
raise ValueError(msg)
elif is_series_like(other):
# Special case for pd.Series to avoid unwanted partitioning
# of other. We pass it in as a kwarg to prevent this.
meta = _emulate(
op, self, other=other, axis=axis, fill_value=fill_value
)
return map_partitions(
op,
self,
other=other,
meta=meta,
axis=axis,
fill_value=fill_value,
enforce_metadata=False,
)
meta = _emulate(op, self, other, axis=axis, fill_value=fill_value)
return map_partitions(
op,
self,
other,
meta=meta,
axis=axis,
fill_value=fill_value,
enforce_metadata=False,
)
meth.__name__ = name
setattr(cls, name, derived_from(original)(meth))
@classmethod
def _bind_comparison_method(cls, name, comparison, original=pd.DataFrame):
"""bind comparison method like DataFrame.eq to this class"""
def meth(self, other, axis="columns", level=None):
if level is not None:
raise NotImplementedError("level must be None")
axis = self._validate_axis(axis)
return elemwise(comparison, self, other, axis=axis)
meth.__name__ = name
setattr(cls, name, derived_from(original)(meth))
@insert_meta_param_description(pad=12)
def apply(
self,
func,
axis=0,
broadcast=None,
raw=False,
reduce=None,
args=(),
meta=no_default,
result_type=None,
**kwds,
):
"""Parallel version of pandas.DataFrame.apply
This mimics the pandas version except for the following:
1. Only ``axis=1`` is supported (and must be specified explicitly).
2. The user should provide output metadata via the `meta` keyword.
Parameters
----------
func : function
Function to apply to each column/row
axis : {0 or 'index', 1 or 'columns'}, default 0
- 0 or 'index': apply function to each column (NOT SUPPORTED)
- 1 or 'columns': apply function to each row
$META
args : tuple
Positional arguments to pass to function in addition to the array/series
Additional keyword arguments will be passed as keywords to the function
Returns
-------
applied : Series or DataFrame
Examples
--------
>>> import pandas as pd
>>> import dask.dataframe as dd
>>> df = pd.DataFrame({'x': [1, 2, 3, 4, 5],
... 'y': [1., 2., 3., 4., 5.]})
>>> ddf = dd.from_pandas(df, npartitions=2)
Apply a function to row-wise passing in extra arguments in ``args`` and
``kwargs``:
>>> def myadd(row, a, b=1):
... return row.sum() + a + b
>>> res = ddf.apply(myadd, axis=1, args=(2,), b=1.5) # doctest: +SKIP
By default, dask tries to infer the output metadata by running your
provided function on some fake data. This works well in many cases, but
can sometimes be expensive, or even fail. To avoid this, you can
manually specify the output metadata with the ``meta`` keyword. This
can be specified in many forms, for more information see
``dask.dataframe.utils.make_meta``.
Here we specify the output is a Series with name ``'x'``, and dtype
``float64``:
>>> res = ddf.apply(myadd, axis=1, args=(2,), b=1.5, meta=('x', 'f8'))
In the case where the metadata doesn't change, you can also pass in
the object itself directly:
>>> res = ddf.apply(lambda row: row + 1, axis=1, meta=ddf)
See Also
--------
dask.DataFrame.map_partitions
"""
if broadcast is not None:
warnings.warn(
"The `broadcast` argument is no longer used/supported. "
"It will be dropped in a future release.",
category=FutureWarning,
)
axis = self._validate_axis(axis)
pandas_kwargs = {"axis": axis, "raw": raw, "result_type": result_type}
kwds.update(pandas_kwargs)
if axis == 0:
msg = (
"dd.DataFrame.apply only supports axis=1\n"
" Try: df.apply(func, axis=1)"
)
raise NotImplementedError(msg)
if meta is no_default:
meta = _emulate(
M.apply, self._meta_nonempty, func, args=args, udf=True, **kwds
)
warnings.warn(meta_warning(meta))
kwds.update({"parent_meta": self._meta})
return map_partitions(M.apply, self, func, args=args, meta=meta, **kwds)
@derived_from(pd.DataFrame)
def applymap(self, func, meta="__no_default__"):
return elemwise(M.applymap, self, func, meta=meta)
@derived_from(pd.DataFrame)
def round(self, decimals=0):
return elemwise(M.round, self, decimals)
@derived_from(pd.DataFrame)
def mode(self, dropna=True, split_every=False):
mode_series_list = []
for col_index in range(len(self.columns)):
col_series = self.iloc[:, col_index]
mode_series = Series.mode(
col_series, dropna=dropna, split_every=split_every
)
mode_series.name = col_series.name
mode_series_list.append(mode_series)
name = "concat-" + tokenize(*mode_series_list)
dsk = {
(name, 0): (
apply,
methods.concat,
[[(df._name, 0) for df in mode_series_list]],
{"axis": 1},
)
}
meta = methods.concat([df._meta for df in mode_series_list], axis=1)
graph = HighLevelGraph.from_collections(
name, dsk, dependencies=mode_series_list
)
ddf = new_dd_object(graph, name, meta, divisions=(None, None))
return ddf
@derived_from(pd.DataFrame)
def cov(self, min_periods=None, split_every=False):
return cov_corr(self, min_periods, split_every=split_every)
@derived_from(pd.DataFrame)
def corr(self, method="pearson", min_periods=None, split_every=False):
if method != "pearson":
raise NotImplementedError("Only Pearson correlation has been implemented")
return cov_corr(self, min_periods, True, split_every=split_every)
def info(self, buf=None, verbose=False, memory_usage=False):
"""
Concise summary of a Dask DataFrame.
"""
if buf is None:
import sys
buf = sys.stdout
lines = [str(type(self))]
if len(self.columns) == 0:
lines.append("Index: 0 entries")
lines.append("Empty %s" % type(self).__name__)
put_lines(buf, lines)
return
# Group and execute the required computations
computations = {}
if verbose:
memory_usage = True
computations.update({"index": self.index, "count": self.count()})
if memory_usage:
computations.update(
{"memory_usage": self.map_partitions(M.memory_usage, index=True)}
)
computations = dict(
zip(computations.keys(), da.compute(*computations.values()))
)
if verbose:
import textwrap
index = computations["index"]
counts = computations["count"]
lines.append(index_summary(index))
lines.append(f"Data columns (total {len(self.columns)} columns):")
from pandas.io.formats.printing import pprint_thing
space = max(len(pprint_thing(k)) for k in self.columns) + 1
column_width = max(space, 7)
header = (
textwrap.dedent(
"""\
# {{column:<{column_width}}} Non-Null Count Dtype
--- {{underl:<{column_width}}} -------------- -----"""
)
.format(column_width=column_width)
.format(column="Column", underl="------")
)
column_template = textwrap.dedent(
"""\
{{i:^3}} {{name:<{column_width}}} {{count}} non-null {{dtype}}""".format(
column_width=column_width
)
)
column_info = [
column_template.format(
i=pprint_thing(i),
name=pprint_thing(name),
count=pprint_thing(count),
dtype=pprint_thing(dtype),
)
for i, (name, count, dtype) in enumerate(
zip(self.columns, counts, self.dtypes)
)
]
lines.extend(header.split("\n"))
else:
column_info = [index_summary(self.columns, name="Columns")]
lines.extend(column_info)
dtype_counts = [
"%s(%d)" % k
for k in sorted(self.dtypes.value_counts().iteritems(), key=str)
]
lines.append("dtypes: {}".format(", ".join(dtype_counts)))
if memory_usage:
memory_int = computations["memory_usage"].sum()
lines.append(f"memory usage: {memory_repr(memory_int)}\n")
put_lines(buf, lines)
@derived_from(pd.DataFrame)
def memory_usage(self, index=True, deep=False):
result = self.map_partitions(M.memory_usage, index=index, deep=deep)
result = result.groupby(result.index).sum()
return result
def pivot_table(self, index=None, columns=None, values=None, aggfunc="mean"):
"""
Create a spreadsheet-style pivot table as a DataFrame. Target ``columns``
must have category dtype to infer result's ``columns``.
``index``, ``columns``, ``values`` and ``aggfunc`` must be all scalar.
Parameters
----------
values : scalar
column to aggregate
index : scalar
column to be index
columns : scalar
column to be columns
aggfunc : {'mean', 'sum', 'count'}, default 'mean'
Returns
-------
table : DataFrame
"""
from .reshape import pivot_table
return pivot_table(
self, index=index, columns=columns, values=values, aggfunc=aggfunc
)
def melt(
self,
id_vars=None,
value_vars=None,
var_name=None,
value_name="value",
col_level=None,
):
"""
Unpivots a DataFrame from wide format to long format,
optionally leaving identifier variables set.
This function is useful to massage a DataFrame into a format where
one or more columns are identifier variables (``id_vars``), while
all other columns, considered measured variables (``value_vars``),
are "unpivoted" to the row axis, leaving just two non-identifier
columns, 'variable' and 'value'.
Parameters
----------
frame : DataFrame
id_vars : tuple, list, or ndarray, optional
Column(s) to use as identifier variables.
value_vars : tuple, list, or ndarray, optional
Column(s) to unpivot. If not specified, uses all columns that
are not set as `id_vars`.
var_name : scalar
Name to use for the 'variable' column. If None it uses
``frame.columns.name`` or 'variable'.
value_name : scalar, default 'value'
Name to use for the 'value' column.
col_level : int or string, optional
If columns are a MultiIndex then use this level to melt.
Returns
-------
DataFrame
Unpivoted DataFrame.
See Also
--------
pandas.DataFrame.melt
"""
from .reshape import melt
return melt(
self,
id_vars=id_vars,
value_vars=value_vars,
var_name=var_name,
value_name=value_name,
col_level=col_level,
)
def to_records(self, index=False, lengths=None):
from .io import to_records
if lengths is True:
lengths = tuple(self.map_partitions(len).compute())
records = to_records(self)
chunks = self._validate_chunks(records, lengths)
records._chunks = (chunks[0],)
return records
@derived_from(pd.DataFrame)
def to_html(self, max_rows=5):
# pd.Series doesn't have html repr
data = self._repr_data().to_html(max_rows=max_rows, show_dimensions=False)
return get_template("dataframe.html.j2").render(
data=data, name=self._name, task=self.dask
)
def _repr_data(self):
meta = self._meta
index = self._repr_divisions
cols = meta.columns
if len(cols) == 0:
series_df = pd.DataFrame([[]] * len(index), columns=cols, index=index)
else:
series_df = pd.concat(
[_repr_data_series(s, index=index) for _, s in meta.iteritems()], axis=1
)
return series_df
def _repr_html_(self):
data = self._repr_data().to_html(
max_rows=5, show_dimensions=False, notebook=True
)
return get_template("dataframe.html.j2").render(
data=data, name=self._name, task=self.dask
)
def _select_columns_or_index(self, columns_or_index):
"""
Parameters
----------
columns_or_index
Column or index name, or a list of these
Returns
-------
dd.DataFrame
Dask DataFrame with columns corresponding to each column or
index level in columns_or_index. If included, the column
corresponding to the index level is named _index
"""
# Ensure columns_or_index is a list
columns_or_index = (
columns_or_index
if isinstance(columns_or_index, list)
else [columns_or_index]
)
column_names = [
n for n in columns_or_index if self._is_column_label_reference(n)
]
selected_df = self[column_names]
if self._contains_index_name(columns_or_index):
# Index name was included
selected_df = selected_df.assign(_index=self.index)
return selected_df
def _is_column_label_reference(self, key):
"""
Test whether a key is a column label reference
To be considered a column label reference, `key` must match the name of at
least one column.
"""
return (
not is_dask_collection(key)
and (np.isscalar(key) or isinstance(key, tuple))
and key in self.columns
)
# bind operators
for op in [
operator.abs,
operator.add,
operator.and_,
operator.eq,
operator.gt,
operator.ge,
operator.inv,
operator.lt,
operator.le,
operator.mod,
operator.mul,
operator.ne,
operator.neg,
operator.or_,
operator.pow,
operator.sub,
operator.truediv,
operator.floordiv,
operator.xor,
]:
_Frame._bind_operator(op)
Scalar._bind_operator(op)
for name in [
"add",
"sub",
"mul",
"div",
"divide",
"truediv",
"floordiv",
"mod",
"pow",
"radd",
"rsub",
"rmul",
"rdiv",
"rtruediv",
"rfloordiv",
"rmod",
"rpow",
]:
meth = getattr(pd.DataFrame, name)
DataFrame._bind_operator_method(name, meth)
meth = getattr(pd.Series, name)
Series._bind_operator_method(name, meth)
for name in ["lt", "gt", "le", "ge", "ne", "eq"]:
meth = getattr(pd.DataFrame, name)
DataFrame._bind_comparison_method(name, meth)
meth = getattr(pd.Series, name)
Series._bind_comparison_method(name, meth)
def is_broadcastable(dfs, s):
"""
This Series is broadcastable against another dataframe in the sequence
"""
return (
isinstance(s, Series)
and s.npartitions == 1
and s.known_divisions
and any(
s.divisions == (df.columns.min(), df.columns.max())
for df in dfs
if isinstance(df, DataFrame)
)
)
def elemwise(op, *args, meta=no_default, out=None, transform_divisions=True, **kwargs):
"""Elementwise operation for Dask dataframes
Parameters
----------
op: callable
Function to apply across input dataframes
*args: DataFrames, Series, Scalars, Arrays,
The arguments of the operation
meta: pd.DataFrame, pd.Series (optional)
Valid metadata for the operation. Will evaluate on a small piece of
data if not provided.
transform_divisions: boolean
If the input is a ``dask.dataframe.Index`` we normally will also apply
the function onto the divisions and apply those transformed divisions
to the output. You can pass ``transform_divisions=False`` to override
this behavior
out : ``dask.array`` or ``None``
If out is a dask.DataFrame, dask.Series or dask.Scalar then
this overwrites the contents of it with the result
**kwargs: scalars
Examples
--------
>>> elemwise(operator.add, df.x, df.y) # doctest: +SKIP
"""
_name = funcname(op) + "-" + tokenize(op, *args, **kwargs)
args = _maybe_from_pandas(args)
from .multi import _maybe_align_partitions
args = _maybe_align_partitions(args)
dasks = [arg for arg in args if isinstance(arg, (_Frame, Scalar, Array))]
dfs = [df for df in dasks if isinstance(df, _Frame)]
# Clean up dask arrays if present
deps = dasks.copy()
for i, a in enumerate(dasks):
if not isinstance(a, Array):
continue
# Ensure that they have similar-ish chunk structure
if not all(not a.chunks or len(a.chunks[0]) == df.npartitions for df in dfs):
msg = (
"When combining dask arrays with dataframes they must "
"match chunking exactly. Operation: %s" % funcname(op)
)
raise ValueError(msg)
# Rechunk to have a single chunk along all other axes
if a.ndim > 1:
a = a.rechunk({i + 1: d for i, d in enumerate(a.shape[1:])})
dasks[i] = a
divisions = dfs[0].divisions
if transform_divisions and isinstance(dfs[0], Index) and len(dfs) == 1:
try:
divisions = op(
*[pd.Index(arg.divisions) if arg is dfs[0] else arg for arg in args],
**kwargs,
)
if isinstance(divisions, pd.Index):
divisions = methods.tolist(divisions)
except Exception:
pass
else:
if not valid_divisions(divisions):
divisions = [None] * (dfs[0].npartitions + 1)
_is_broadcastable = partial(is_broadcastable, dfs)
dfs = list(remove(_is_broadcastable, dfs))
other = [
(i, arg)
for i, arg in enumerate(args)
if not isinstance(arg, (_Frame, Scalar, Array))
]
# adjust the key length of Scalar
dsk = partitionwise_graph(op, _name, *args, **kwargs)
graph = HighLevelGraph.from_collections(_name, dsk, dependencies=deps)
if meta is no_default:
if len(dfs) >= 2 and not all(hasattr(d, "npartitions") for d in dasks):
# should not occur in current funcs
msg = "elemwise with 2 or more DataFrames and Scalar is not supported"
raise NotImplementedError(msg)
# For broadcastable series, use no rows.
parts = [
d._meta
if _is_broadcastable(d)
else np.empty((), dtype=d.dtype)
if isinstance(d, Array)
else d._meta_nonempty
for d in dasks
]
with raise_on_meta_error(funcname(op)):
meta = partial_by_order(*parts, function=op, other=other)
result = new_dd_object(graph, _name, meta, divisions)
return handle_out(out, result)
def handle_out(out, result):
"""Handle out parameters
If out is a dask.DataFrame, dask.Series or dask.Scalar then
this overwrites the contents of it with the result
"""
if isinstance(out, tuple):
if len(out) == 1:
out = out[0]
elif len(out) > 1:
raise NotImplementedError("The out parameter is not fully supported")
else:
out = None
# Notice, we use .__class__ as opposed to type() in order to support
# object proxies see <https://github.com/dask/dask/pull/6981>
if out is not None and out.__class__ != result.__class__:
raise TypeError(
"Mismatched types between result and out parameter. "
"out=%s, result=%s" % (str(type(out)), str(type(result)))
)
if isinstance(out, DataFrame):
if len(out.columns) != len(result.columns):
raise ValueError(
"Mismatched columns count between result and out parameter. "
"out=%s, result=%s" % (str(len(out.columns)), str(len(result.columns)))
)
if isinstance(out, (Series, DataFrame, Scalar)):
out._meta = result._meta
out._name = result._name
out.dask = result.dask
if not isinstance(out, Scalar):
out.divisions = result.divisions
elif out is not None:
msg = (
"The out parameter is not fully supported."
" Received type %s, expected %s "
% (typename(type(out)), typename(type(result)))
)
raise NotImplementedError(msg)
else:
return result
def _maybe_from_pandas(dfs):
from .io import from_pandas
dfs = [
from_pandas(df, 1)
if (is_series_like(df) or is_dataframe_like(df)) and not is_dask_collection(df)
else df
for df in dfs
]
return dfs
def hash_shard(
df, nparts, split_out_setup=None, split_out_setup_kwargs=None, ignore_index=False
):
if split_out_setup:
h = split_out_setup(df, **(split_out_setup_kwargs or {}))
else:
h = df
h = hash_object_dispatch(h, index=False)
if is_series_like(h):
h = h.values
np.mod(h, nparts, out=h)
return group_split_dispatch(df, h, nparts, ignore_index=ignore_index)
def split_evenly(df, k):
"""Split dataframe into k roughly equal parts"""
divisions = np.linspace(0, len(df), k + 1).astype(int)
return {i: df.iloc[divisions[i] : divisions[i + 1]] for i in range(k)}
def split_out_on_index(df):
h = df.index
if isinstance(h, pd.MultiIndex):
h = pd.DataFrame([], index=h).reset_index()
return h
def split_out_on_cols(df, cols=None):
return df[cols]
@insert_meta_param_description
def apply_concat_apply(
args,
chunk=None,
aggregate=None,
combine=None,
meta=no_default,
token=None,
chunk_kwargs=None,
aggregate_kwargs=None,
combine_kwargs=None,
split_every=None,
split_out=None,
split_out_setup=None,
split_out_setup_kwargs=None,
sort=None,
ignore_index=False,
**kwargs,
):
"""Apply a function to blocks, then concat, then apply again
Parameters
----------
args :
Positional arguments for the `chunk` function. All `dask.dataframe`
objects should be partitioned and indexed equivalently.
chunk : function [block-per-arg] -> block
Function to operate on each block of data
aggregate : function concatenated-block -> block
Function to operate on the concatenated result of chunk
combine : function concatenated-block -> block, optional
Function to operate on intermediate concatenated results of chunk
in a tree-reduction. If not provided, defaults to aggregate.
$META
token : str, optional
The name to use for the output keys.
chunk_kwargs : dict, optional
Keywords for the chunk function only.
aggregate_kwargs : dict, optional
Keywords for the aggregate function only.
combine_kwargs : dict, optional
Keywords for the combine function only.
split_every : int, optional
Group partitions into groups of this size while performing a
tree-reduction. If set to False, no tree-reduction will be used,
and all intermediates will be concatenated and passed to ``aggregate``.
Default is 8.
split_out : int, optional
Number of output partitions. Split occurs after first chunk reduction.
split_out_setup : callable, optional
If provided, this function is called on each chunk before performing
the hash-split. It should return a pandas object, where each row
(excluding the index) is hashed. If not provided, the chunk is hashed
as is.
split_out_setup_kwargs : dict, optional
Keywords for the `split_out_setup` function only.
sort : bool, default None
If allowed, sort the keys of the output aggregation.
ignore_index : bool, default False
If True, do not preserve index values throughout ACA operations.
kwargs :
All remaining keywords will be passed to ``chunk``, ``aggregate``, and
``combine``.
Examples
--------
>>> def chunk(a_block, b_block):
... pass
>>> def agg(df):
... pass
>>> apply_concat_apply([a, b], chunk=chunk, aggregate=agg) # doctest: +SKIP
"""
if chunk_kwargs is None:
chunk_kwargs = dict()
if aggregate_kwargs is None:
aggregate_kwargs = dict()
chunk_kwargs.update(kwargs)
aggregate_kwargs.update(kwargs)
if combine is None:
if combine_kwargs:
raise ValueError("`combine_kwargs` provided with no `combine`")
combine = aggregate
combine_kwargs = aggregate_kwargs
else:
if combine_kwargs is None:
combine_kwargs = dict()
combine_kwargs.update(kwargs)
if not isinstance(args, (tuple, list)):
args = [args]
dfs = [arg for arg in args if isinstance(arg, _Frame)]
npartitions = {arg.npartitions for arg in dfs}
if len(npartitions) > 1:
raise ValueError("All arguments must have same number of partitions")
npartitions = npartitions.pop()
if split_every is None:
split_every = 8
elif split_every is False:
split_every = npartitions
elif split_every < 2 or not isinstance(split_every, Integral):
raise ValueError("split_every must be an integer >= 2")
token_key = tokenize(
token or (chunk, aggregate),
meta,
args,
chunk_kwargs,
aggregate_kwargs,
combine_kwargs,
split_every,
split_out,
split_out_setup,
split_out_setup_kwargs,
)
# Chunk
a = f"{token or funcname(chunk)}-chunk-{token_key}"
if len(args) == 1 and isinstance(args[0], _Frame) and not chunk_kwargs:
dsk = {
(a, 0, i, 0): (chunk, key) for i, key in enumerate(args[0].__dask_keys__())
}
else:
dsk = {
(a, 0, i, 0): (
apply,
chunk,
[(x._name, i) if isinstance(x, _Frame) else x for x in args],
chunk_kwargs,
)
for i in range(npartitions)
}
# Split
if split_out and split_out > 1:
split_prefix = "split-%s" % token_key
shard_prefix = "shard-%s" % token_key
for i in range(npartitions):
dsk[(split_prefix, i)] = (
hash_shard,
(a, 0, i, 0),
split_out,
split_out_setup,
split_out_setup_kwargs,
ignore_index,
)
for j in range(split_out):
dsk[(shard_prefix, 0, i, j)] = (getitem, (split_prefix, i), j)
a = shard_prefix
else:
split_out = 1
# Combine
b = f"{token or funcname(combine)}-combine-{token_key}"
k = npartitions
depth = 0
while k > split_every:
for part_i, inds in enumerate(partition_all(split_every, range(k))):
for j in range(split_out):
conc = (_concat, [(a, depth, i, j) for i in inds], ignore_index)
if combine_kwargs:
dsk[(b, depth + 1, part_i, j)] = (
apply,
combine,
[conc],
combine_kwargs,
)
else:
dsk[(b, depth + 1, part_i, j)] = (combine, conc)
k = part_i + 1
a = b
depth += 1
if sort is not None:
if sort and split_out > 1:
raise NotImplementedError(
"Cannot guarantee sorted keys for `split_out>1`."
" Try using split_out=1, or grouping with sort=False."
)
aggregate_kwargs = aggregate_kwargs or {}
aggregate_kwargs["sort"] = sort
# Aggregate
for j in range(split_out):
b = f"{token or funcname(aggregate)}-agg-{token_key}"
conc = (_concat, [(a, depth, i, j) for i in range(k)], ignore_index)
if aggregate_kwargs:
dsk[(b, j)] = (apply, aggregate, [conc], aggregate_kwargs)
else:
dsk[(b, j)] = (aggregate, conc)
if meta is no_default:
meta_chunk = _emulate(chunk, *args, udf=True, **chunk_kwargs)
meta = _emulate(
aggregate, _concat([meta_chunk], ignore_index), udf=True, **aggregate_kwargs
)
meta = make_meta(
meta,
index=(getattr(make_meta(dfs[0]), "index", None) if dfs else None),
parent_meta=dfs[0]._meta,
)
graph = HighLevelGraph.from_collections(b, dsk, dependencies=dfs)
divisions = [None] * (split_out + 1)
return new_dd_object(graph, b, meta, divisions, parent_meta=dfs[0]._meta)
aca = apply_concat_apply
def _extract_meta(x, nonempty=False):
"""
Extract internal cache data (``_meta``) from dd.DataFrame / dd.Series
"""
if isinstance(x, (Scalar, _Frame)):
return x._meta_nonempty if nonempty else x._meta
elif isinstance(x, list):
return [_extract_meta(_x, nonempty) for _x in x]
elif isinstance(x, tuple):
return tuple(_extract_meta(_x, nonempty) for _x in x)
elif isinstance(x, dict):
res = {}
for k in x:
res[k] = _extract_meta(x[k], nonempty)
return res
elif isinstance(x, Delayed):
raise ValueError(
"Cannot infer dataframe metadata with a `dask.delayed` argument"
)
else:
return x
def _emulate(func, *args, udf=False, **kwargs):
"""
Apply a function using args / kwargs. If arguments contain dd.DataFrame /
dd.Series, using internal cache (``_meta``) for calculation
"""
with raise_on_meta_error(funcname(func), udf=udf):
return func(*_extract_meta(args, True), **_extract_meta(kwargs, True))
@insert_meta_param_description
def map_partitions(
func,
*args,
meta=no_default,
enforce_metadata=True,
transform_divisions=True,
align_dataframes=True,
**kwargs,
):
"""Apply Python function on each DataFrame partition.
Parameters
----------
func : function
Function applied to each partition.
args, kwargs :
Arguments and keywords to pass to the function. At least one of the
args should be a Dask.dataframe. Arguments and keywords may contain
``Scalar``, ``Delayed`` or regular python objects. DataFrame-like args
(both dask and pandas) will be repartitioned to align (if necessary)
before applying the function (see ``align_dataframes`` to control).
enforce_metadata : bool, default True
Whether to enforce at runtime that the structure of the DataFrame
produced by ``func`` actually matches the structure of ``meta``.
This will rename and reorder columns for each partition,
and will raise an error if this doesn't work or types don't match.
transform_divisions : bool, default True
Whether to apply the function onto the divisions and apply those
transformed divisions to the output.
align_dataframes : bool, default True
Whether to repartition DataFrame- or Series-like args
(both dask and pandas) so their divisions align before applying
the function. This requires all inputs to have known divisions.
Single-partition inputs will be split into multiple partitions.
If False, all inputs must have either the same number of partitions
or a single partition. Single-partition inputs will be broadcast to
every partition of multi-partition inputs.
$META
"""
name = kwargs.pop("token", None)
parent_meta = kwargs.pop("parent_meta", None)
assert callable(func)
if name is not None:
token = tokenize(meta, *args, **kwargs)
else:
name = funcname(func)
token = tokenize(func, meta, *args, **kwargs)
name = f"{name}-{token}"
from .multi import _maybe_align_partitions
if align_dataframes:
args = _maybe_from_pandas(args)
args = _maybe_align_partitions(args)
dfs = [df for df in args if isinstance(df, _Frame)]
meta_index = getattr(make_meta(dfs[0]), "index", None) if dfs else None
if parent_meta is None and dfs:
parent_meta = dfs[0]._meta
if meta is no_default:
# Use non-normalized kwargs here, as we want the real values (not
# delayed values)
meta = _emulate(func, *args, udf=True, **kwargs)
else:
meta = make_meta(meta, index=meta_index, parent_meta=parent_meta)
if all(isinstance(arg, Scalar) for arg in args):
layer = {
(name, 0): (apply, func, (tuple, [(arg._name, 0) for arg in args]), kwargs)
}
graph = HighLevelGraph.from_collections(name, layer, dependencies=args)
return Scalar(graph, name, meta)
elif not (has_parallel_type(meta) or is_arraylike(meta) and meta.shape):
# If `meta` is not a pandas object, the concatenated results will be a
# different type
meta = make_meta(_concat([meta]), index=meta_index)
# Ensure meta is empty series
meta = make_meta(meta, parent_meta=parent_meta)
args2 = []
dependencies = []
for arg in args:
if isinstance(arg, _Frame):
args2.append(arg)
dependencies.append(arg)
continue
arg = normalize_arg(arg)
arg2, collections = unpack_collections(arg)
if collections:
args2.append(arg2)
dependencies.extend(collections)
else:
args2.append(arg)
kwargs3 = {}
simple = True
for k, v in kwargs.items():
v = normalize_arg(v)
v, collections = unpack_collections(v)
dependencies.extend(collections)
kwargs3[k] = v
if collections:
simple = False
divisions = dfs[0].divisions
if transform_divisions and isinstance(dfs[0], Index) and len(dfs) == 1:
try:
divisions = func(
*[pd.Index(a.divisions) if a is dfs[0] else a for a in args], **kwargs
)
if isinstance(divisions, pd.Index):
divisions = methods.tolist(divisions)
except Exception:
pass
else:
if not valid_divisions(divisions):
divisions = [None] * (dfs[0].npartitions + 1)
if has_keyword(func, "partition_info"):
partition_info = {
(i,): {"number": i, "division": division}
for i, division in enumerate(divisions[:-1])
}
args2.insert(0, BlockwiseDepDict(partition_info))
orig_func = func
func = lambda partition_info, *args, **kwargs: orig_func(
*args, **kwargs, partition_info=partition_info
)
if enforce_metadata:
dsk = partitionwise_graph(
apply_and_enforce,
name,
*args2,
dependencies=dependencies,
_func=func,
_meta=meta,
**kwargs3,
)
else:
kwargs4 = kwargs if simple else kwargs3
dsk = partitionwise_graph(
func, name, *args2, **kwargs4, dependencies=dependencies
)
graph = HighLevelGraph.from_collections(name, dsk, dependencies=dependencies)
return new_dd_object(graph, name, meta, divisions)
def apply_and_enforce(*args, **kwargs):
"""Apply a function, and enforce the output to match meta
Ensures the output has the same columns, even if empty."""
func = kwargs.pop("_func")
meta = kwargs.pop("_meta")
df = func(*args, **kwargs)
if is_dataframe_like(df) or is_series_like(df) or is_index_like(df):
if not len(df):
return meta
if is_dataframe_like(df):
check_matching_columns(meta, df)
c = meta.columns
else:
c = meta.name
return _rename(c, df)
return df
def _rename(columns, df):
"""
Rename columns of pd.DataFrame or name of pd.Series.
Not for dd.DataFrame or dd.Series.
Parameters
----------
columns : tuple, string, pd.DataFrame or pd.Series
Column names, Series name or pandas instance which has the
target column names / name.
df : pd.DataFrame or pd.Series
target DataFrame / Series to be renamed
"""
assert not isinstance(df, _Frame)
if columns is no_default:
return df
if isinstance(columns, Iterator):
columns = list(columns)
if is_dataframe_like(df):
if is_dataframe_like(columns):
columns = columns.columns
if not isinstance(columns, pd.Index):
columns = pd.Index(columns)
if (
len(columns) == len(df.columns)
and type(columns) is type(df.columns)
and columns.equals(df.columns)
):
# if target is identical, rename is not necessary
return df
# deep=False doesn't doesn't copy any data/indices, so this is cheap
df = df.copy(deep=False)
df.columns = columns
return df
elif is_series_like(df) or is_index_like(df):
if is_series_like(columns) or is_index_like(columns):
columns = columns.name
if df.name == columns:
return df
return df.rename(columns)
# map_partition may pass other types
return df
def _rename_dask(df, names):
"""
Destructively rename columns of dd.DataFrame or name of dd.Series.
Not for pd.DataFrame or pd.Series.
Internally used to overwrite dd.DataFrame.columns and dd.Series.name
We can't use map_partition because it applies function then rename
Parameters
----------
df : dd.DataFrame or dd.Series
target DataFrame / Series to be renamed
names : tuple, string
Column names/Series name
"""
assert isinstance(df, _Frame)
metadata = _rename(names, df._meta)
name = f"rename-{tokenize(df, metadata)}"
dsk = partitionwise_graph(_rename, name, metadata, df)
graph = HighLevelGraph.from_collections(name, dsk, dependencies=[df])
return new_dd_object(graph, name, metadata, df.divisions)
def quantile(df, q, method="default"):
"""Approximate quantiles of Series.
Parameters
----------
q : list/array of floats
Iterable of numbers ranging from 0 to 100 for the desired quantiles
method : {'default', 'tdigest', 'dask'}, optional
What method to use. By default will use dask's internal custom
algorithm (``'dask'``). If set to ``'tdigest'`` will use tdigest for
floats and ints and fallback to the ``'dask'`` otherwise.
"""
# current implementation needs q to be sorted so
# sort if array-like, otherwise leave it alone
q_ndarray = np.array(q)
if q_ndarray.ndim > 0:
q_ndarray.sort(kind="mergesort")
q = q_ndarray
assert isinstance(df, Series)
allowed_methods = ["default", "dask", "tdigest"]
if method not in allowed_methods:
raise ValueError("method can only be 'default', 'dask' or 'tdigest'")
if method == "default":
internal_method = "dask"
else:
internal_method = method
# currently, only Series has quantile method
if isinstance(df, Index):
series_typ = df._meta.to_series()._constructor
meta = df._meta_nonempty.to_series().quantile(q)
else:
if is_series_like(df._meta):
series_typ = df._meta._constructor
else:
series_typ = df._meta._constructor_sliced
meta = df._meta_nonempty.quantile(q)
if is_series_like(meta):
# Index.quantile(list-like) must be pd.Series, not pd.Index
df_name = df.name
finalize_tsk = lambda tsk: (series_typ, tsk, q, None, df_name)
return_type = Series
else:
finalize_tsk = lambda tsk: (getitem, tsk, 0)
return_type = Scalar
q = [q]
# pandas uses quantile in [0, 1]
# numpy / everyone else uses [0, 100]
qs = np.asarray(q) * 100
token = tokenize(df, qs)
if len(qs) == 0:
name = "quantiles-" + token
empty_index = pd.Index([], dtype=float)
return Series(
{(name, 0): series_typ([], name=df.name, index=empty_index, dtype="float")},
name,
df._meta,
[None, None],
)
else:
new_divisions = [np.min(q), np.max(q)]
df = df.dropna()
if internal_method == "tdigest" and (
np.issubdtype(df.dtype, np.floating) or np.issubdtype(df.dtype, np.integer)
):
from dask.utils import import_required
import_required(
"crick", "crick is a required dependency for using the t-digest method."
)
from dask.array.percentile import _percentiles_from_tdigest, _tdigest_chunk
name = "quantiles_tdigest-1-" + token
val_dsk = {
(name, i): (_tdigest_chunk, (getattr, key, "values"))
for i, key in enumerate(df.__dask_keys__())
}
name2 = "quantiles_tdigest-2-" + token
merge_dsk = {
(name2, 0): finalize_tsk((_percentiles_from_tdigest, qs, sorted(val_dsk)))
}
else:
from dask.array.dispatch import percentile_lookup as _percentile
from dask.array.percentile import merge_percentiles
# Add 0 and 100 during calculation for more robust behavior (hopefully)
calc_qs = np.pad(qs, 1, mode="constant")
calc_qs[-1] = 100
name = "quantiles-1-" + token
val_dsk = {
(name, i): (_percentile, key, calc_qs)
for i, key in enumerate(df.__dask_keys__())
}
name2 = "quantiles-2-" + token
merge_dsk = {
(name2, 0): finalize_tsk(
(
merge_percentiles,
qs,
[calc_qs] * df.npartitions,
sorted(val_dsk),
"lower",
None,
False,
)
)
}
dsk = merge(val_dsk, merge_dsk)
graph = HighLevelGraph.from_collections(name2, dsk, dependencies=[df])
return return_type(graph, name2, meta, new_divisions)
def cov_corr(df, min_periods=None, corr=False, scalar=False, split_every=False):
"""DataFrame covariance and pearson correlation.
Computes pairwise covariance or correlation of columns, excluding NA/null
values.
Parameters
----------
df : DataFrame
min_periods : int, optional
Minimum number of observations required per pair of columns
to have a valid result.
corr : bool, optional
If True, compute the Pearson correlation. If False [default], compute
the covariance.
scalar : bool, optional
If True, compute covariance between two variables as a scalar. Only
valid if `df` has 2 columns. If False [default], compute the entire
covariance/correlation matrix.
split_every : int, optional
Group partitions into groups of this size while performing a
tree-reduction. If set to False, no tree-reduction will be used.
Default is False.
"""
if min_periods is None:
min_periods = 2
elif min_periods < 2:
raise ValueError("min_periods must be >= 2")
if split_every is False:
split_every = df.npartitions
elif split_every < 2 or not isinstance(split_every, Integral):
raise ValueError("split_every must be an integer >= 2")
df = df._get_numeric_data()
if scalar and len(df.columns) != 2:
raise ValueError("scalar only valid for 2 column dataframe")
token = tokenize(df, min_periods, scalar, split_every)
funcname = "corr" if corr else "cov"
a = f"{funcname}-chunk-{df._name}"
dsk = {
(a, i): (cov_corr_chunk, f, corr) for (i, f) in enumerate(df.__dask_keys__())
}
prefix = f"{funcname}-combine-{df._name}-"
k = df.npartitions
b = a
depth = 0
while k > split_every:
b = prefix + str(depth)
for part_i, inds in enumerate(partition_all(split_every, range(k))):
dsk[(b, part_i)] = (cov_corr_combine, [(a, i) for i in inds], corr)
k = part_i + 1
a = b
depth += 1
name = f"{funcname}-{token}"
dsk[(name, 0)] = (
cov_corr_agg,
[(a, i) for i in range(k)],
df.columns,
min_periods,
corr,
scalar,
)
graph = HighLevelGraph.from_collections(name, dsk, dependencies=[df])
if scalar:
return Scalar(graph, name, "f8")
meta = make_meta(
[(c, "f8") for c in df.columns], index=df.columns, parent_meta=df._meta
)
return DataFrame(graph, name, meta, (df.columns[0], df.columns[-1]))
def cov_corr_chunk(df, corr=False):
"""Chunk part of a covariance or correlation computation"""
shape = (df.shape[1], df.shape[1])
df = df.astype("float64", copy=False)
sums = np.zeros_like(df.values, shape=shape)
counts = np.zeros_like(df.values, shape=shape)
for idx, col in enumerate(df):
mask = df.iloc[:, idx].notnull()
sums[idx] = df[mask].sum().values
counts[idx] = df[mask].count().values
cov = df.cov().values
dtype = [("sum", sums.dtype), ("count", counts.dtype), ("cov", cov.dtype)]
if corr:
with warnings.catch_warnings(record=True):
warnings.simplefilter("always")
mu = (sums / counts).T
m = np.zeros_like(df.values, shape=shape)
mask = df.isnull().values
for idx, x in enumerate(df):
# Avoid using ufunc.outer (not supported by cupy)
mu_discrepancy = (
np.subtract(df.iloc[:, idx].values[:, None], mu[idx][None, :]) ** 2
)
mu_discrepancy[mask] = np.nan
m[idx] = np.nansum(mu_discrepancy, axis=0)
m = m.T
dtype.append(("m", m.dtype))
out = {"sum": sums, "count": counts, "cov": cov * (counts - 1)}
if corr:
out["m"] = m
return out
def cov_corr_combine(data_in, corr=False):
data = {"sum": None, "count": None, "cov": None}
if corr:
data["m"] = None
for k in data.keys():
data[k] = [d[k] for d in data_in]
data[k] = np.concatenate(data[k]).reshape((len(data[k]),) + data[k][0].shape)
sums = np.nan_to_num(data["sum"])
counts = data["count"]
cum_sums = np.cumsum(sums, 0)
cum_counts = np.cumsum(counts, 0)
s1 = cum_sums[:-1]
s2 = sums[1:]
n1 = cum_counts[:-1]
n2 = counts[1:]
with np.errstate(invalid="ignore"):
d = (s2 / n2) - (s1 / n1)
C = np.nansum(
(n1 * n2) / (n1 + n2) * (d * d.transpose((0, 2, 1))), 0
) + np.nansum(data["cov"], 0)
out = {"sum": cum_sums[-1], "count": cum_counts[-1], "cov": C}
if corr:
nobs = np.where(cum_counts[-1], cum_counts[-1], np.nan)
mu = cum_sums[-1] / nobs
counts_na = np.where(counts, counts, np.nan)
m = np.nansum(data["m"] + counts * (sums / counts_na - mu) ** 2, axis=0)
out["m"] = m
return out
def cov_corr_agg(data, cols, min_periods=2, corr=False, scalar=False):
out = cov_corr_combine(data, corr)
counts = out["count"]
C = out["cov"]
C[counts < min_periods] = np.nan
if corr:
m2 = out["m"]
den = np.sqrt(m2 * m2.T)
else:
den = np.where(counts, counts, np.nan) - 1
with np.errstate(invalid="ignore", divide="ignore"):
mat = C / den
if scalar:
return float(mat[0, 1])
return pd.DataFrame(mat, columns=cols, index=cols)
def pd_split(df, p, random_state=None, shuffle=False):
"""Split DataFrame into multiple pieces pseudorandomly
>>> df = pd.DataFrame({'a': [1, 2, 3, 4, 5, 6],
... 'b': [2, 3, 4, 5, 6, 7]})
>>> a, b = pd_split(
... df, [0.5, 0.5], random_state=123, shuffle=True
... ) # roughly 50/50 split
>>> a
a b
3 4 5
0 1 2
5 6 7
>>> b
a b
1 2 3
4 5 6
2 3 4
"""
p = list(p)
if shuffle:
if not isinstance(random_state, np.random.RandomState):
random_state = np.random.RandomState(random_state)
df = df.sample(frac=1.0, random_state=random_state)
index = pseudorandom(len(df), p, random_state)
return [df.iloc[index == i] for i in range(len(p))]
def _take_last(a, skipna=True):
"""
take last row (Series) of DataFrame / last value of Series
considering NaN.
Parameters
----------
a : pd.DataFrame or pd.Series
skipna : bool, default True
Whether to exclude NaN
"""
def _last_valid(s):
for i in range(1, min(10, len(s) + 1)):
val = s.iloc[-i]
if not pd.isnull(val):
return val
else:
nonnull = s[s.notna()]
if not nonnull.empty:
return nonnull.iloc[-1]
return None
if skipna is False:
return a.iloc[-1]
else:
# take last valid value excluding NaN, NaN location may be different
# in each column
if is_dataframe_like(a):
# create Series from appropriate backend dataframe library
series_typ = type(a.iloc[0:1, 0])
if a.empty:
return series_typ([], dtype="float")
return series_typ(
{col: _last_valid(a[col]) for col in a.columns}, index=a.columns
)
else:
return _last_valid(a)
def check_divisions(divisions):
if not isinstance(divisions, (list, tuple)):
raise ValueError("New division must be list or tuple")
divisions = list(divisions)
if divisions != sorted(divisions):
raise ValueError("New division must be sorted")
if len(divisions[:-1]) != len(list(unique(divisions[:-1]))):
msg = "New division must be unique, except for the last element"
raise ValueError(msg)
def repartition_divisions(a, b, name, out1, out2, force=False):
"""dask graph to repartition dataframe by new divisions
Parameters
----------
a : tuple
old divisions
b : tuple, list
new divisions
name : str
name of old dataframe
out1 : str
name of temporary splits
out2 : str
name of new dataframe
force : bool, default False
Allows the expansion of the existing divisions.
If False then the new divisions lower and upper bounds must be
the same as the old divisions.
Examples
--------
>>> from pprint import pprint
>>> pprint(repartition_divisions([1, 3, 7], [1, 4, 6, 7], 'a', 'b', 'c')) # doctest: +ELLIPSIS
{('b', 0): (<function boundary_slice at ...>, ('a', 0), 1, 3, False),
('b', 1): (<function boundary_slice at ...>, ('a', 1), 3, 4, False),
('b', 2): (<function boundary_slice at ...>, ('a', 1), 4, 6, False),
('b', 3): (<function boundary_slice at ...>, ('a', 1), 6, 7, True),
('c', 0): (<function concat at ...>, [('b', 0), ('b', 1)]),
('c', 1): ('b', 2),
('c', 2): ('b', 3)}
"""
check_divisions(b)
if len(b) < 2:
# minimum division is 2 elements, like [0, 0]
raise ValueError("New division must be longer than 2 elements")
if force:
if a[0] < b[0]:
msg = (
"left side of the new division must be equal or smaller "
"than old division"
)
raise ValueError(msg)
if a[-1] > b[-1]:
msg = (
"right side of the new division must be equal or larger "
"than old division"
)
raise ValueError(msg)
else:
if a[0] != b[0]:
msg = "left side of old and new divisions are different"
raise ValueError(msg)
if a[-1] != b[-1]:
msg = "right side of old and new divisions are different"
raise ValueError(msg)
def _is_single_last_div(x):
"""Whether last division only contains single label"""
return len(x) >= 2 and x[-1] == x[-2]
c = [a[0]]
d = dict()
low = a[0]
i, j = 1, 1 # indices for old/new divisions
k = 0 # index for temp divisions
last_elem = _is_single_last_div(a)
# process through old division
# left part of new division can be processed in this loop
while i < len(a) and j < len(b):
if a[i] < b[j]:
# tuple is something like:
# (methods.boundary_slice, ('from_pandas-#', 0), 3, 4, False))
d[(out1, k)] = (methods.boundary_slice, (name, i - 1), low, a[i], False)
low = a[i]
i += 1
elif a[i] > b[j]:
d[(out1, k)] = (methods.boundary_slice, (name, i - 1), low, b[j], False)
low = b[j]
j += 1
else:
d[(out1, k)] = (methods.boundary_slice, (name, i - 1), low, b[j], False)
low = b[j]
if len(a) == i + 1 or a[i] < a[i + 1]:
j += 1
i += 1
c.append(low)
k += 1
# right part of new division can remain
if a[-1] < b[-1] or b[-1] == b[-2]:
for _j in range(j, len(b)):
# always use right-most of old division
# because it may contain last element
m = len(a) - 2
d[(out1, k)] = (methods.boundary_slice, (name, m), low, b[_j], False)
low = b[_j]
c.append(low)
k += 1
else:
# even if new division is processed through,
# right-most element of old division can remain
if last_elem and i < len(a):
d[(out1, k)] = (methods.boundary_slice, (name, i - 1), a[i], a[i], False)
k += 1
c.append(a[-1])
# replace last element of tuple with True
d[(out1, k - 1)] = d[(out1, k - 1)][:-1] + (True,)
i, j = 0, 1
last_elem = _is_single_last_div(c)
while j < len(b):
tmp = []
while c[i] < b[j]:
tmp.append((out1, i))
i += 1
while (
last_elem
and c[i] == b[-1]
and (b[-1] != b[-2] or j == len(b) - 1)
and i < k
):
# append if last split is not included
tmp.append((out1, i))
i += 1
if len(tmp) == 0:
# dummy slice to return empty DataFrame or Series,
# which retain original data attributes (columns / name)
d[(out2, j - 1)] = (methods.boundary_slice, (name, 0), a[0], a[0], False)
elif len(tmp) == 1:
d[(out2, j - 1)] = tmp[0]
else:
if not tmp:
raise ValueError(
"check for duplicate partitions\nold:\n%s\n\n"
"new:\n%s\n\ncombined:\n%s" % (pformat(a), pformat(b), pformat(c))
)
d[(out2, j - 1)] = (methods.concat, tmp)
j += 1
return d
def repartition_freq(df, freq=None):
"""Repartition a timeseries dataframe by a new frequency"""
if not isinstance(df.divisions[0], pd.Timestamp):
raise TypeError("Can only repartition on frequency for timeseries")
freq = _map_freq_to_period_start(freq)
try:
start = df.divisions[0].ceil(freq)
except ValueError:
start = df.divisions[0]
divisions = methods.tolist(
pd.date_range(start=start, end=df.divisions[-1], freq=freq)
)
if not len(divisions):
divisions = [df.divisions[0], df.divisions[-1]]
else:
divisions.append(df.divisions[-1])
if divisions[0] != df.divisions[0]:
divisions = [df.divisions[0]] + divisions
return df.repartition(divisions=divisions)
def _map_freq_to_period_start(freq):
"""Ensure that the frequency pertains to the **start** of a period.
If e.g. `freq='M'`, then the divisions are:
- 2021-31-1 00:00:00 (start of February partition)
- 2021-2-28 00:00:00 (start of March partition)
- ...
but this **should** be:
- 2021-2-1 00:00:00 (start of February partition)
- 2021-3-1 00:00:00 (start of March partition)
- ...
Therefore, we map `freq='M'` to `freq='MS'` (same for quarter and year).
"""
if not isinstance(freq, str):
return freq
offset = pd.tseries.frequencies.to_offset(freq)
offset_type_name = type(offset).__name__
if not offset_type_name.endswith("End"):
return freq
new_offset = offset_type_name[: -len("End")] + "Begin"
try:
new_offset_type = getattr(pd.tseries.offsets, new_offset)
if "-" in freq:
_, anchor = freq.split("-")
anchor = "-" + anchor
else:
anchor = ""
n = str(offset.n) if offset.n != 1 else ""
return f"{n}{new_offset_type._prefix}{anchor}"
except AttributeError:
return freq
def repartition_size(df, size):
"""
Repartition dataframe so that new partitions have approximately `size` memory usage each
"""
if isinstance(size, str):
size = parse_bytes(size)
size = int(size)
mem_usages = df.map_partitions(total_mem_usage, deep=True).compute()
# 1. split each partition that is larger than partition_size
nsplits = 1 + mem_usages // size
if np.any(nsplits > 1):
split_name = f"repartition-split-{size}-{tokenize(df)}"
df = _split_partitions(df, nsplits, split_name)
# update mem_usages to account for the split partitions
split_mem_usages = []
for n, usage in zip(nsplits, mem_usages):
split_mem_usages.extend([usage / n] * n)
mem_usages = pd.Series(split_mem_usages)
# 2. now that all partitions are less than size, concat them up to size
assert np.all(mem_usages <= size)
new_npartitions = list(map(len, iter_chunks(mem_usages, size)))
new_partitions_boundaries = np.cumsum(new_npartitions)
new_name = f"repartition-{size}-{tokenize(df)}"
return _repartition_from_boundaries(df, new_partitions_boundaries, new_name)
def total_mem_usage(df, index=True, deep=False):
mem_usage = df.memory_usage(index=index, deep=deep)
if is_series_like(mem_usage):
mem_usage = mem_usage.sum()
return mem_usage
def repartition_npartitions(df, npartitions):
"""Repartition dataframe to a smaller number of partitions"""
new_name = "repartition-%d-%s" % (npartitions, tokenize(df))
if df.npartitions == npartitions:
return df
elif df.npartitions > npartitions:
npartitions_ratio = df.npartitions / npartitions
new_partitions_boundaries = [
int(new_partition_index * npartitions_ratio)
for new_partition_index in range(npartitions + 1)
]
return _repartition_from_boundaries(df, new_partitions_boundaries, new_name)
else:
original_divisions = divisions = pd.Series(df.divisions)
if df.known_divisions and (
np.issubdtype(divisions.dtype, np.datetime64)
or np.issubdtype(divisions.dtype, np.number)
):
if np.issubdtype(divisions.dtype, np.datetime64):
divisions = divisions.values.astype("float64")
if is_series_like(divisions):
divisions = divisions.values
n = len(divisions)
divisions = np.interp(
x=np.linspace(0, n, npartitions + 1),
xp=np.linspace(0, n, n),
fp=divisions,
)
if np.issubdtype(original_divisions.dtype, np.datetime64):
divisions = methods.tolist(
pd.Series(divisions).astype(original_divisions.dtype)
)
elif np.issubdtype(original_divisions.dtype, np.integer):
divisions = divisions.astype(original_divisions.dtype)
if isinstance(divisions, np.ndarray):
divisions = divisions.tolist()
divisions = list(divisions)
divisions[0] = df.divisions[0]
divisions[-1] = df.divisions[-1]
return df.repartition(divisions=divisions)
else:
div, mod = divmod(npartitions, df.npartitions)
nsplits = [div] * df.npartitions
nsplits[-1] += mod
return _split_partitions(df, nsplits, new_name)
def _repartition_from_boundaries(df, new_partitions_boundaries, new_name):
if not isinstance(new_partitions_boundaries, list):
new_partitions_boundaries = list(new_partitions_boundaries)
if new_partitions_boundaries[0] > 0:
new_partitions_boundaries.insert(0, 0)
if new_partitions_boundaries[-1] < df.npartitions:
new_partitions_boundaries.append(df.npartitions)
dsk = {}
for i, (start, end) in enumerate(
zip(new_partitions_boundaries, new_partitions_boundaries[1:])
):
dsk[new_name, i] = (methods.concat, [(df._name, j) for j in range(start, end)])
divisions = [df.divisions[i] for i in new_partitions_boundaries]
graph = HighLevelGraph.from_collections(new_name, dsk, dependencies=[df])
return new_dd_object(graph, new_name, df._meta, divisions)
def _split_partitions(df, nsplits, new_name):
"""Split a Dask dataframe into new partitions
Parameters
----------
df: DataFrame or Series
nsplits: List[int]
Number of target dataframes for each partition
The length of nsplits should be the same as df.npartitions
new_name: str
See Also
--------
repartition_npartitions
repartition_size
"""
if len(nsplits) != df.npartitions:
raise ValueError(f"nsplits should have len={df.npartitions}")
dsk = {}
split_name = f"split-{tokenize(df, nsplits)}"
j = 0
for i, k in enumerate(nsplits):
if k == 1:
dsk[new_name, j] = (df._name, i)
j += 1
else:
dsk[split_name, i] = (split_evenly, (df._name, i), k)
for jj in range(k):
dsk[new_name, j] = (getitem, (split_name, i), jj)
j += 1
divisions = [None] * (1 + sum(nsplits))
graph = HighLevelGraph.from_collections(new_name, dsk, dependencies=[df])
return new_dd_object(graph, new_name, df._meta, divisions)
def repartition(df, divisions=None, force=False):
"""Repartition dataframe along new divisions
Dask.DataFrame objects are partitioned along their index. Often when
multiple dataframes interact we need to align these partitionings. The
``repartition`` function constructs a new DataFrame object holding the same
data but partitioned on different values. It does this by performing a
sequence of ``loc`` and ``concat`` calls to split and merge the previous
generation of partitions.
Parameters
----------
divisions : list
List of partitions to be used
force : bool, default False
Allows the expansion of the existing divisions.
If False then the new divisions lower and upper bounds must be
the same as the old divisions.
Examples
--------
>>> df = df.repartition([0, 5, 10, 20]) # doctest: +SKIP
Also works on Pandas objects
>>> ddf = dd.repartition(df, [0, 5, 10, 20]) # doctest: +SKIP
"""
token = tokenize(df, divisions)
if isinstance(df, _Frame):
tmp = "repartition-split-" + token
out = "repartition-merge-" + token
dsk = repartition_divisions(
df.divisions, divisions, df._name, tmp, out, force=force
)
graph = HighLevelGraph.from_collections(out, dsk, dependencies=[df])
return new_dd_object(graph, out, df._meta, divisions)
elif is_dataframe_like(df) or is_series_like(df):
name = "repartition-dataframe-" + token
from .utils import shard_df_on_index
dfs = shard_df_on_index(df, divisions[1:-1])
dsk = {(name, i): df for i, df in enumerate(dfs)}
return new_dd_object(dsk, name, df, divisions)
raise ValueError("Data must be DataFrame or Series")
def _reduction_chunk(x, aca_chunk=None, **kwargs):
o = aca_chunk(x, **kwargs)
# Return a dataframe so that the concatenated version is also a dataframe
return o.to_frame().T if is_series_like(o) else o
def _reduction_combine(x, aca_combine=None, **kwargs):
if isinstance(x, list):
x = pd.Series(x)
o = aca_combine(x, **kwargs)
# Return a dataframe so that the concatenated version is also a dataframe
return o.to_frame().T if is_series_like(o) else o
def _reduction_aggregate(x, aca_aggregate=None, **kwargs):
if isinstance(x, list):
x = pd.Series(x)
return aca_aggregate(x, **kwargs)
def idxmaxmin_chunk(x, fn=None, skipna=True):
minmax = "max" if fn == "idxmax" else "min"
if len(x) > 0:
idx = getattr(x, fn)(skipna=skipna)
value = getattr(x, minmax)(skipna=skipna)
else:
idx = value = pd.Series([], dtype="i8")
if is_series_like(idx):
return pd.DataFrame({"idx": idx, "value": value})
return pd.DataFrame({"idx": [idx], "value": [value]})
def idxmaxmin_row(x, fn=None, skipna=True):
minmax = "max" if fn == "idxmax" else "min"
if len(x) > 0:
x = x.set_index("idx")
idx = [getattr(x.value, fn)(skipna=skipna)]
value = [getattr(x.value, minmax)(skipna=skipna)]
else:
idx = value = pd.Series([], dtype="i8")
return pd.DataFrame({"idx": idx, "value": value})
def idxmaxmin_combine(x, fn=None, skipna=True):
if len(x) <= 1:
return x
return (
x.groupby(level=0)
.apply(idxmaxmin_row, fn=fn, skipna=skipna)
.reset_index(level=1, drop=True)
)
def idxmaxmin_agg(x, fn=None, skipna=True, scalar=False):
res = idxmaxmin_combine(x, fn, skipna=skipna)["idx"]
if len(res) == 0:
raise ValueError("attempt to get argmax of an empty sequence")
if scalar:
return res[0]
res.name = None
return res
def _mode_aggregate(df, dropna):
value_count_series = df.sum()
max_val = value_count_series.max(skipna=dropna)
mode_series = (
value_count_series[value_count_series == max_val]
.index.to_series()
.sort_values()
.reset_index(drop=True)
)
return mode_series
def _count_aggregate(x):
return x.sum().astype("int64")
def safe_head(df, n):
r = M.head(df, n)
if len(r) != n:
warnings.warn(
f"Insufficient elements for `head`. {n} elements requested, only {len(r)} "
"elements available. Try passing larger `npartitions` to `head`."
)
return r
def maybe_shift_divisions(df, periods, freq):
"""Maybe shift divisions by periods of size freq
Used to shift the divisions for the `shift` method. If freq isn't a fixed
size (not anchored or relative), then the divisions are shifted
appropriately. Otherwise the divisions are cleared.
Parameters
----------
df : dd.DataFrame, dd.Series, or dd.Index
periods : int
The number of periods to shift.
freq : DateOffset, timedelta, or time rule string
The frequency to shift by.
"""
if isinstance(freq, str):
freq = pd.tseries.frequencies.to_offset(freq)
is_offset = isinstance(freq, pd.DateOffset)
if is_offset:
if freq.is_anchored() or not hasattr(freq, "delta"):
# Can't infer divisions on relative or anchored offsets, as
# divisions may now split identical index value.
# (e.g. index_partitions = [[1, 2, 3], [3, 4, 5]])
return df.clear_divisions()
if df.known_divisions:
divs = pd.Series(range(len(df.divisions)), index=df.divisions)
divisions = divs.shift(periods, freq=freq).index
return df.__class__(df.dask, df._name, df._meta, divisions)
return df
@wraps(pd.to_datetime)
def to_datetime(arg, meta=None, **kwargs):
tz_kwarg = {"tz": "utc"} if kwargs.get("utc") else {}
if meta is None:
if isinstance(arg, Index):
meta = pd.DatetimeIndex([], **tz_kwarg)
meta.name = arg.name
elif not (is_dataframe_like(arg) or is_series_like(arg)):
raise NotImplementedError(
"dask.dataframe.to_datetime does not support "
"non-index-able arguments (like scalars)"
)
else:
meta = pd.Series([pd.Timestamp("2000", **tz_kwarg)])
meta.index = meta.index.astype(arg.index.dtype)
meta.index.name = arg.index.name
return map_partitions(pd.to_datetime, arg, meta=meta, **kwargs)
@wraps(pd.to_timedelta)
def to_timedelta(arg, unit="ns", errors="raise"):
meta = pd.Series([pd.Timedelta(1, unit=unit)])
return map_partitions(pd.to_timedelta, arg, unit=unit, errors=errors, meta=meta)
if hasattr(pd, "isna"):
@wraps(pd.isna)
def isna(arg):
return map_partitions(pd.isna, arg)
def _repr_data_series(s, index):
"""A helper for creating the ``_repr_data`` property"""
npartitions = len(index) - 1
if is_categorical_dtype(s):
if has_known_categories(s):
dtype = "category[known]"
else:
dtype = "category[unknown]"
else:
dtype = str(s.dtype)
return pd.Series([dtype] + ["..."] * npartitions, index=index, name=s.name)
def has_parallel_type(x):
"""Does this object have a dask dataframe equivalent?"""
return get_parallel_type(x) is not Scalar
def new_dd_object(dsk, name, meta, divisions, parent_meta=None):
"""Generic constructor for dask.dataframe objects.
Decides the appropriate output class based on the type of `meta` provided.
"""
if has_parallel_type(meta):
return get_parallel_type(meta)(dsk, name, meta, divisions)
elif is_arraylike(meta) and meta.shape:
import dask.array as da
chunks = ((np.nan,) * (len(divisions) - 1),) + tuple(
(d,) for d in meta.shape[1:]
)
if len(chunks) > 1:
layer = dsk.layers[name]
if isinstance(layer, Blockwise):
layer.new_axes["j"] = chunks[1][0]
layer.output_indices = layer.output_indices + ("j",)
else:
suffix = (0,) * (len(chunks) - 1)
for i in range(len(chunks[0])):
layer[(name, i) + suffix] = layer.pop((name, i))
return da.Array(dsk, name=name, chunks=chunks, dtype=meta.dtype)
else:
return get_parallel_type(meta)(dsk, name, meta, divisions)
def partitionwise_graph(func, name, *args, **kwargs):
"""
Apply a function partition-wise across arguments to create layer of a graph
This applies a function, ``func``, in an embarrassingly parallel fashion
across partitions/chunks in the provided arguments. It handles Dataframes,
Arrays, and scalars smoothly, and relies on the ``blockwise`` machinery
to provide a nicely symbolic graph.
It is most commonly used in other graph-building functions to create the
appropriate layer of the resulting dataframe.
Parameters
----------
func: callable
name: str
descriptive name for the operation
*args:
**kwargs:
Returns
-------
out: Blockwise graph
Examples
--------
>>> subgraph = partitionwise_graph(function, x, y, z=123) # doctest: +SKIP
>>> layer = partitionwise_graph(function, df, x, z=123) # doctest: +SKIP
>>> graph = HighLevelGraph.from_collections(name, layer, dependencies=[df, x]) # doctest: +SKIP
>>> result = new_dd_object(graph, name, metadata, df.divisions) # doctest: +SKIP
See Also
--------
map_partitions
"""
pairs = []
numblocks = {}
for arg in args:
if isinstance(arg, _Frame):
pairs.extend([arg._name, "i"])
numblocks[arg._name] = (arg.npartitions,)
elif isinstance(arg, Scalar):
pairs.extend([arg._name, "i"])
numblocks[arg._name] = (1,)
elif isinstance(arg, Array):
if arg.ndim == 1:
pairs.extend([arg.name, "i"])
elif arg.ndim == 0:
pairs.extend([arg.name, ""])
elif arg.ndim == 2:
pairs.extend([arg.name, "ij"])
else:
raise ValueError("Can't add multi-dimensional array to dataframes")
numblocks[arg._name] = arg.numblocks
elif isinstance(arg, BlockwiseDep):
if len(arg.numblocks) == 1:
pairs.extend([arg, "i"])
elif len(arg.numblocks) == 2:
pairs.extend([arg, "ij"])
else:
raise ValueError(
f"BlockwiseDep arg {arg!r} has {len(arg.numblocks)} dimensions; only 1 or 2 are supported."
)
else:
pairs.extend([arg, None])
return blockwise(
func, name, "i", *pairs, numblocks=numblocks, concatenate=True, **kwargs
)
def meta_warning(df):
"""
Provide an informative message when the user is asked to provide metadata
"""
if is_dataframe_like(df):
meta_str = {k: str(v) for k, v in df.dtypes.to_dict().items()}
elif is_series_like(df):
meta_str = (df.name, str(df.dtype))
else:
meta_str = None
msg = (
"\nYou did not provide metadata, so Dask is running your "
"function on a small dataset to guess output types. "
"It is possible that Dask will guess incorrectly.\n"
"To provide an explicit output types or to silence this message, "
"please provide the `meta=` keyword, as described in the map or "
"apply function that you are using."
)
if meta_str:
msg += (
"\n"
" Before: .apply(func)\n"
" After: .apply(func, meta=%s)\n" % str(meta_str)
)
return msg
def prefix_reduction(f, ddf, identity, **kwargs):
"""Computes the prefix sums of f on df
If df has partitions [P1, P2, ..., Pn], then returns the DataFrame with
partitions [f(identity, P1),
f(f(identity, P1), P2),
f(f(f(identity, P1), P2), P3),
...]
Parameters
----------
f : callable
an associative function f
ddf : dd.DataFrame
identity : pd.DataFrame
an identity element of f, that is f(identity, df) = f(df, identity) = df
"""
dsk = dict()
name = "prefix_reduction-" + tokenize(f, ddf, identity, **kwargs)
meta = ddf._meta
n = len(ddf.divisions) - 1
divisions = [None] * (n + 1)
N = 1
while N < n:
N *= 2
for i in range(n):
dsk[(name, i, 1, 0)] = (apply, f, [(ddf._name, i), identity], kwargs)
for i in range(n, N):
dsk[(name, i, 1, 0)] = identity
d = 1
while d < N:
for i in range(0, N, 2 * d):
dsk[(name, i + 2 * d - 1, 2 * d, 0)] = (
apply,
f,
[(name, i + d - 1, d, 0), (name, i + 2 * d - 1, d, 0)],
kwargs,
)
d *= 2
dsk[(name, N - 1, N, 1)] = identity
while d > 1:
d //= 2
for i in range(0, N, 2 * d):
dsk[(name, i + d - 1, d, 1)] = (name, i + 2 * d - 1, 2 * d, 1)
dsk[(name, i + 2 * d - 1, d, 1)] = (
apply,
f,
[(name, i + 2 * d - 1, 2 * d, 1), (name, i + d - 1, d, 0)],
kwargs,
)
for i in range(n):
dsk[(name, i)] = (apply, f, [(name, i, 1, 1), identity], kwargs)
graph = HighLevelGraph.from_collections(name, dsk, dependencies=[ddf])
return new_dd_object(graph, name, meta, divisions)
def suffix_reduction(f, ddf, identity, **kwargs):
"""Computes the suffix sums of f on df
If df has partitions [P1, P2, ..., Pn], then returns the DataFrame with
partitions [f(P1, f(P2, ...f(Pn, identity)...)),
f(P2, ...f(Pn, identity)...),
...f(Pn, identity)...,
...]
Parameters
----------
f : callable
an associative function f
ddf : dd.DataFrame
identity : pd.DataFrame
an identity element of f, that is f(identity, df) = f(df, identity) = df
kwargs : ??
keyword arguments of f ??
"""
dsk = dict()
name = "suffix_reduction-" + tokenize(f, ddf, identity, **kwargs)
meta = ddf._meta
n = len(ddf.divisions) - 1
divisions = [None] * (n + 1)
N = 1
while N < n:
N *= 2
for i in range(n):
dsk[(name, i, 1, 0)] = (apply, f, [(ddf._name, n - 1 - i), identity], kwargs)
for i in range(n, N):
dsk[(name, i, 1, 0)] = identity
d = 1
while d < N:
for i in range(0, N, 2 * d):
dsk[(name, i + 2 * d - 1, 2 * d, 0)] = (
apply,
f,
[(name, i + 2 * d - 1, d, 0), (name, i + d - 1, d, 0)],
kwargs,
)
d *= 2
dsk[(name, N - 1, N, 1)] = identity
while d > 1:
d //= 2
for i in range(0, N, 2 * d):
dsk[(name, i + d - 1, d, 1)] = (name, i + 2 * d - 1, 2 * d, 1)
dsk[(name, i + 2 * d - 1, d, 1)] = (
apply,
f,
[(name, i + d - 1, d, 0), (name, i + 2 * d - 1, 2 * d, 1)],
kwargs,
)
for i in range(n):
dsk[(name, i)] = (apply, f, [(name, n - 1 - i, 1, 1), identity], kwargs)
graph = HighLevelGraph.from_collections(name, dsk, dependencies=[ddf])
return new_dd_object(graph, name, meta, divisions)
def mapseries(base_chunk, concat_map):
return base_chunk.map(concat_map)
def mapseries_combine(index, concat_result):
final_series = concat_result.sort_index()
final_series = index.to_series().map(final_series)
return final_series
def series_map(base_series, map_series):
npartitions = base_series.npartitions
split_out = map_series.npartitions
dsk = {}
base_token_key = tokenize(base_series, split_out)
base_split_prefix = f"base-split-{base_token_key}"
base_shard_prefix = f"base-shard-{base_token_key}"
for i, key in enumerate(base_series.__dask_keys__()):
dsk[(base_split_prefix, i)] = (hash_shard, key, split_out)
for j in range(split_out):
dsk[(base_shard_prefix, 0, i, j)] = (getitem, (base_split_prefix, i), j)
map_token_key = tokenize(map_series)
map_split_prefix = f"map-split-{map_token_key}"
map_shard_prefix = f"map-shard-{map_token_key}"
for i, key in enumerate(map_series.__dask_keys__()):
dsk[(map_split_prefix, i)] = (
hash_shard,
key,
split_out,
split_out_on_index,
None,
)
for j in range(split_out):
dsk[(map_shard_prefix, 0, i, j)] = (getitem, (map_split_prefix, i), j)
token_key = tokenize(base_series, map_series)
map_prefix = f"map-series-{token_key}"
for i in range(npartitions):
for j in range(split_out):
dsk[(map_prefix, i, j)] = (
mapseries,
(base_shard_prefix, 0, i, j),
(_concat, [(map_shard_prefix, 0, k, j) for k in range(split_out)]),
)
final_prefix = f"map-series-combine-{token_key}"
for i, key in enumerate(base_series.index.__dask_keys__()):
dsk[(final_prefix, i)] = (
mapseries_combine,
key,
(_concat, [(map_prefix, i, j) for j in range(split_out)]),
)
meta = map_series._meta.copy()
meta.index = base_series._meta.index
meta = make_meta(meta)
dependencies = [base_series, map_series, base_series.index]
graph = HighLevelGraph.from_collections(
final_prefix, dsk, dependencies=dependencies
)
divisions = list(base_series.divisions)
return new_dd_object(graph, final_prefix, meta, divisions)
| 34.437432 | 111 | 0.581087 |
ace19a32ae4506f774aacee827dd1f4944f0fed9 | 1,431 | py | Python | nnvm/tests/python/frontend/onnx/model_zoo/__init__.py | rah9eu/p3 | 530628be7b7a8dd3e6199c3bebebdbf104005e5f | [
"Apache-2.0"
] | 22 | 2019-02-20T12:42:20.000Z | 2021-12-25T06:09:46.000Z | nnvm/tests/python/frontend/onnx/model_zoo/__init__.py | rah9eu/p3 | 530628be7b7a8dd3e6199c3bebebdbf104005e5f | [
"Apache-2.0"
] | 4 | 2019-04-01T07:36:04.000Z | 2022-03-24T03:11:26.000Z | nnvm/tests/python/frontend/onnx/model_zoo/__init__.py | rah9eu/p3 | 530628be7b7a8dd3e6199c3bebebdbf104005e5f | [
"Apache-2.0"
] | 7 | 2019-03-20T16:04:37.000Z | 2021-04-28T18:40:11.000Z | """Store for onnx examples and common models."""
from __future__ import absolute_import as _abs
import os
import logging
from .super_resolution import get_super_resolution
def _download(url, filename, overwrite=False):
if os.path.isfile(filename) and not overwrite:
logging.debug('File %s existed, skip.', filename)
return
logging.debug('Downloading from url %s to %s', url, filename)
try:
import urllib.request
urllib.request.urlretrieve(url, filename)
except:
import urllib
urllib.urlretrieve(url, filename)
def _as_abs_path(fname):
cur_dir = os.path.abspath(os.path.dirname(__file__))
return os.path.join(cur_dir, fname)
URLS = {
'super_resolution.onnx': 'https://gist.github.com/zhreshold/bcda4716699ac97ea44f791c24310193/raw/93672b029103648953c4e5ad3ac3aadf346a4cdc/super_resolution_0.2.onnx',
'squeezenet1_1.onnx': 'https://gist.github.com/zhreshold/bcda4716699ac97ea44f791c24310193/raw/93672b029103648953c4e5ad3ac3aadf346a4cdc/squeezenet1_1_0.2.onnx',
'lenet.onnx': 'https://gist.github.com/zhreshold/bcda4716699ac97ea44f791c24310193/raw/93672b029103648953c4e5ad3ac3aadf346a4cdc/lenet_0.2.onnx'}
# download and add paths
for k, v in URLS.items():
name = k.split('.')[0]
path = _as_abs_path(k)
_download(v, path, False)
locals()[name] = path
# symbol for graph comparison
super_resolution_sym = get_super_resolution()
| 37.657895 | 169 | 0.745632 |
ace19a429f7969d2ddd59a425395e2ca74ce2d6d | 11,486 | py | Python | crabageprediction/venv/Lib/site-packages/numpy/core/_machar.py | 13rianlucero/CrabAgePrediction | 92bc7fbe1040f49e820473e33cc3902a5a7177c7 | [
"MIT"
] | 3 | 2021-12-01T11:24:52.000Z | 2022-03-06T09:07:25.000Z | crabageprediction/venv/Lib/site-packages/numpy/core/_machar.py | 13rianlucero/CrabAgePrediction | 92bc7fbe1040f49e820473e33cc3902a5a7177c7 | [
"MIT"
] | 75 | 2017-03-12T21:19:04.000Z | 2021-11-27T20:14:36.000Z | crabageprediction/venv/Lib/site-packages/numpy/core/_machar.py | 13rianlucero/CrabAgePrediction | 92bc7fbe1040f49e820473e33cc3902a5a7177c7 | [
"MIT"
] | 6 | 2016-08-04T13:37:21.000Z | 2021-01-31T07:42:24.000Z | """
Machine arithmetic - determine the parameters of the
floating-point arithmetic system
Author: Pearu Peterson, September 2003
"""
__all__ = ['MachAr']
from numpy.core.fromnumeric import any
from numpy.core._ufunc_config import errstate
from numpy.core.overrides import set_module
# Need to speed this up...especially for longfloat
# Deprecated 2021-10-20, NumPy 1.22
@set_module('numpy')
class MachAr:
"""
Diagnosing machine parameters.
Attributes
----------
ibeta : int
Radix in which numbers are represented.
it : int
Number of base-`ibeta` digits in the floating point mantissa M.
machep : int
Exponent of the smallest (most negative) power of `ibeta` that,
added to 1.0, gives something different from 1.0
eps : float
Floating-point number ``beta**machep`` (floating point precision)
negep : int
Exponent of the smallest power of `ibeta` that, subtracted
from 1.0, gives something different from 1.0.
epsneg : float
Floating-point number ``beta**negep``.
iexp : int
Number of bits in the exponent (including its sign and bias).
minexp : int
Smallest (most negative) power of `ibeta` consistent with there
being no leading zeros in the mantissa.
xmin : float
Floating-point number ``beta**minexp`` (the smallest [in
magnitude] positive floating point number with full precision).
maxexp : int
Smallest (positive) power of `ibeta` that causes overflow.
xmax : float
``(1-epsneg) * beta**maxexp`` (the largest [in magnitude]
usable floating value).
irnd : int
In ``range(6)``, information on what kind of rounding is done
in addition, and on how underflow is handled.
ngrd : int
Number of 'guard digits' used when truncating the product
of two mantissas to fit the representation.
epsilon : float
Same as `eps`.
tiny : float
An alias for `smallest_normal`, kept for backwards compatibility.
huge : float
Same as `xmax`.
precision : float
``- int(-log10(eps))``
resolution : float
``- 10**(-precision)``
smallest_normal : float
The smallest positive floating point number with 1 as leading bit in
the mantissa following IEEE-754. Same as `xmin`.
smallest_subnormal : float
The smallest positive floating point number with 0 as leading bit in
the mantissa following IEEE-754.
Parameters
----------
float_conv : function, optional
Function that converts an integer or integer array to a float
or float array. Default is `float`.
int_conv : function, optional
Function that converts a float or float array to an integer or
integer array. Default is `int`.
float_to_float : function, optional
Function that converts a float array to float. Default is `float`.
Note that this does not seem to do anything useful in the current
implementation.
float_to_str : function, optional
Function that converts a single float to a string. Default is
``lambda v:'%24.16e' %v``.
title : str, optional
Title that is printed in the string representation of `MachAr`.
See Also
--------
finfo : Machine limits for floating point types.
iinfo : Machine limits for integer types.
References
----------
.. [1] Press, Teukolsky, Vetterling and Flannery,
"Numerical Recipes in C++," 2nd ed,
Cambridge University Press, 2002, p. 31.
"""
def __init__(self, float_conv=float,int_conv=int,
float_to_float=float,
float_to_str=lambda v:'%24.16e' % v,
title='Python floating point number'):
"""
float_conv - convert integer to float (array)
int_conv - convert float (array) to integer
float_to_float - convert float array to float
float_to_str - convert array float to str
title - description of used floating point numbers
"""
# We ignore all errors here because we are purposely triggering
# underflow to detect the properties of the runninng arch.
with errstate(under='ignore'):
self._do_init(float_conv, int_conv, float_to_float, float_to_str, title)
def _do_init(self, float_conv, int_conv, float_to_float, float_to_str, title):
max_iterN = 10000
msg = "Did not converge after %d tries with %s"
one = float_conv(1)
two = one + one
zero = one - one
# Do we really need to do this? Aren't they 2 and 2.0?
# Determine ibeta and beta
a = one
for _ in range(max_iterN):
a = a + a
temp = a + one
temp1 = temp - a
if any(temp1 - one != zero):
break
else:
raise RuntimeError(msg % (_, one.dtype))
b = one
for _ in range(max_iterN):
b = b + b
temp = a + b
itemp = int_conv(temp-a)
if any(itemp != 0):
break
else:
raise RuntimeError(msg % (_, one.dtype))
ibeta = itemp
beta = float_conv(ibeta)
# Determine it and irnd
it = -1
b = one
for _ in range(max_iterN):
it = it + 1
b = b * beta
temp = b + one
temp1 = temp - b
if any(temp1 - one != zero):
break
else:
raise RuntimeError(msg % (_, one.dtype))
betah = beta / two
a = one
for _ in range(max_iterN):
a = a + a
temp = a + one
temp1 = temp - a
if any(temp1 - one != zero):
break
else:
raise RuntimeError(msg % (_, one.dtype))
temp = a + betah
irnd = 0
if any(temp-a != zero):
irnd = 1
tempa = a + beta
temp = tempa + betah
if irnd == 0 and any(temp-tempa != zero):
irnd = 2
# Determine negep and epsneg
negep = it + 3
betain = one / beta
a = one
for i in range(negep):
a = a * betain
b = a
for _ in range(max_iterN):
temp = one - a
if any(temp-one != zero):
break
a = a * beta
negep = negep - 1
# Prevent infinite loop on PPC with gcc 4.0:
if negep < 0:
raise RuntimeError("could not determine machine tolerance "
"for 'negep', locals() -> %s" % (locals()))
else:
raise RuntimeError(msg % (_, one.dtype))
negep = -negep
epsneg = a
# Determine machep and eps
machep = - it - 3
a = b
for _ in range(max_iterN):
temp = one + a
if any(temp-one != zero):
break
a = a * beta
machep = machep + 1
else:
raise RuntimeError(msg % (_, one.dtype))
eps = a
# Determine ngrd
ngrd = 0
temp = one + eps
if irnd == 0 and any(temp*one - one != zero):
ngrd = 1
# Determine iexp
i = 0
k = 1
z = betain
t = one + eps
nxres = 0
for _ in range(max_iterN):
y = z
z = y*y
a = z*one # Check here for underflow
temp = z*t
if any(a+a == zero) or any(abs(z) >= y):
break
temp1 = temp * betain
if any(temp1*beta == z):
break
i = i + 1
k = k + k
else:
raise RuntimeError(msg % (_, one.dtype))
if ibeta != 10:
iexp = i + 1
mx = k + k
else:
iexp = 2
iz = ibeta
while k >= iz:
iz = iz * ibeta
iexp = iexp + 1
mx = iz + iz - 1
# Determine minexp and xmin
for _ in range(max_iterN):
xmin = y
y = y * betain
a = y * one
temp = y * t
if any((a + a) != zero) and any(abs(y) < xmin):
k = k + 1
temp1 = temp * betain
if any(temp1*beta == y) and any(temp != y):
nxres = 3
xmin = y
break
else:
break
else:
raise RuntimeError(msg % (_, one.dtype))
minexp = -k
# Determine maxexp, xmax
if mx <= k + k - 3 and ibeta != 10:
mx = mx + mx
iexp = iexp + 1
maxexp = mx + minexp
irnd = irnd + nxres
if irnd >= 2:
maxexp = maxexp - 2
i = maxexp + minexp
if ibeta == 2 and not i:
maxexp = maxexp - 1
if i > 20:
maxexp = maxexp - 1
if any(a != y):
maxexp = maxexp - 2
xmax = one - epsneg
if any(xmax*one != xmax):
xmax = one - beta*epsneg
xmax = xmax / (xmin*beta*beta*beta)
i = maxexp + minexp + 3
for j in range(i):
if ibeta == 2:
xmax = xmax + xmax
else:
xmax = xmax * beta
smallest_subnormal = abs(xmin / beta ** (it))
self.ibeta = ibeta
self.it = it
self.negep = negep
self.epsneg = float_to_float(epsneg)
self._str_epsneg = float_to_str(epsneg)
self.machep = machep
self.eps = float_to_float(eps)
self._str_eps = float_to_str(eps)
self.ngrd = ngrd
self.iexp = iexp
self.minexp = minexp
self.xmin = float_to_float(xmin)
self._str_xmin = float_to_str(xmin)
self.maxexp = maxexp
self.xmax = float_to_float(xmax)
self._str_xmax = float_to_str(xmax)
self.irnd = irnd
self.title = title
# Commonly used parameters
self.epsilon = self.eps
self.tiny = self.xmin
self.huge = self.xmax
self.smallest_normal = self.xmin
self.smallest_subnormal = float_to_float(smallest_subnormal)
import math
self.precision = int(-math.log10(float_to_float(self.eps)))
ten = two + two + two + two + two
resolution = ten ** (-self.precision)
self.resolution = float_to_float(resolution)
self._str_resolution = float_to_str(resolution)
def __str__(self):
fmt = (
'Machine parameters for %(title)s\n'
'---------------------------------------------------------------------\n'
'ibeta=%(ibeta)s it=%(it)s iexp=%(iexp)s ngrd=%(ngrd)s irnd=%(irnd)s\n'
'machep=%(machep)s eps=%(_str_eps)s (beta**machep == epsilon)\n'
'negep =%(negep)s epsneg=%(_str_epsneg)s (beta**epsneg)\n'
'minexp=%(minexp)s xmin=%(_str_xmin)s (beta**minexp == tiny)\n'
'maxexp=%(maxexp)s xmax=%(_str_xmax)s ((1-epsneg)*beta**maxexp == huge)\n'
'smallest_normal=%(smallest_normal)s '
'smallest_subnormal=%(smallest_subnormal)s\n'
'---------------------------------------------------------------------\n'
)
return fmt % self.__dict__
if __name__ == '__main__':
print(MachAr())
| 32.264045 | 88 | 0.52046 |
ace19a5d5bc6e441ee9d9d56ec9eb20c314c8474 | 592 | py | Python | eth/tools/fixtures/__init__.py | shreyasnbhat/py-evm | cd31d83185e102a7cb2f11e2f67923b069ee9cef | [
"MIT"
] | 1 | 2018-12-09T11:56:53.000Z | 2018-12-09T11:56:53.000Z | eth/tools/fixtures/__init__.py | shreyasnbhat/py-evm | cd31d83185e102a7cb2f11e2f67923b069ee9cef | [
"MIT"
] | 8 | 2020-06-05T21:36:23.000Z | 2022-02-12T12:24:00.000Z | eth/tools/fixtures/__init__.py | shreyasnbhat/py-evm | cd31d83185e102a7cb2f11e2f67923b069ee9cef | [
"MIT"
] | 2 | 2019-09-05T01:31:56.000Z | 2019-09-17T09:09:16.000Z | from .loading import ( # noqa: F401
find_fixtures,
filter_fixtures,
load_fixture,
)
from .generation import ( # noqa: F401
generate_fixture_tests,
)
from .helpers import ( # noqa: F401
new_chain_from_fixture,
genesis_params_from_fixture,
apply_fixture_block_to_chain,
setup_account_db,
should_run_slow_tests,
verify_account_db,
)
from eth.tools._utils.normalization import ( # noqa: F401
normalize_block,
normalize_blockchain_fixtures,
normalize_statetest_fixture,
normalize_transactiontest_fixture,
normalize_vmtest_fixture,
)
| 24.666667 | 58 | 0.75 |
ace19a84704500cece035535f7f6efba7a4120cf | 18,742 | py | Python | py/agentflow/spec_utils.py | wx-b/dm_robotics | 5d407622360ccf7f0b4b50bcee84589e2cfd0783 | [
"Apache-2.0"
] | 128 | 2021-09-08T18:39:39.000Z | 2022-03-27T11:29:05.000Z | py/agentflow/spec_utils.py | wx-b/dm_robotics | 5d407622360ccf7f0b4b50bcee84589e2cfd0783 | [
"Apache-2.0"
] | 7 | 2021-10-11T14:26:17.000Z | 2022-03-15T17:26:45.000Z | py/agentflow/spec_utils.py | LaudateCorpus1/dm_robotics | 647bc810788c74972c1684a8d2e4d2dfd2791485 | [
"Apache-2.0"
] | 8 | 2021-09-08T18:25:49.000Z | 2022-02-21T23:45:16.000Z | # Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for dealing with action and observation specifications.
These specifications can be nested lists and dicts of `Array` and its
subclass `BoundedArray`.
"""
from typing import Any, Mapping, Optional, Sequence, Tuple, Type, TypeVar
from absl import flags
from absl import logging
import dm_env
from dm_env import specs
import numpy as np
# Internal profiling
FLAGS = flags.FLAGS
# Defaulting to True, to prefer failing fast and closer to the bug.
flags.DEFINE_boolean('debug_specs', True,
'Debugging switch for checking values match specs.')
flags.DEFINE_integer('max_validations', 1000,
'Stop validating after this many calls.')
_validation_count = 0
ObservationSpec = Mapping[str, specs.Array]
ObservationValue = Mapping[str, np.ndarray]
ScalarOrArray = TypeVar('ScalarOrArray', np.floating, np.ndarray)
def debugging_flag() -> bool:
return FLAGS.debug_specs
class TimeStepSpec(object):
"""Type specification for a TimeStep."""
def __init__(self, observation_spec: ObservationSpec,
reward_spec: specs.Array, discount_spec: specs.Array):
self._observation_spec = observation_spec
self._reward_spec = reward_spec
self._discount_spec = discount_spec
@property
def observation_spec(self) -> Mapping[str, specs.Array]:
return dict(self._observation_spec)
@property
def reward_spec(self) -> specs.Array:
return self._reward_spec
@property
def discount_spec(self) -> specs.Array:
return self._discount_spec
def validate(self, timestep: dm_env.TimeStep):
validate_observation(self.observation_spec, timestep.observation)
validate(self.reward_spec, timestep.reward)
validate(self.discount_spec, timestep.discount)
def minimum(self) -> dm_env.TimeStep:
"""Return a valid timestep with all minimum values."""
reward = minimum(self._reward_spec)
discount = minimum(self._discount_spec)
observation = {k: minimum(v) for k, v in self._observation_spec.items()}
return dm_env.TimeStep(
step_type=dm_env.StepType.MID,
observation=observation,
discount=discount,
reward=reward)
def maximum(self) -> dm_env.TimeStep:
"""Return a valid timestep with all minimum values."""
reward = maximum(self._reward_spec)
discount = maximum(self._discount_spec)
observation = {k: maximum(v) for k, v in self._observation_spec.items()}
return dm_env.TimeStep(
step_type=dm_env.StepType.MID,
observation=observation,
discount=discount,
reward=reward)
def replace(self,
observation_spec: Optional[Mapping[str, specs.Array]] = None,
reward_spec: Optional[specs.Array] = None,
discount_spec: Optional[specs.Array] = None) -> 'TimeStepSpec':
"""Return a new TimeStepSpec with specified fields replaced."""
if observation_spec is None:
observation_spec = self._observation_spec
if reward_spec is None:
reward_spec = self._reward_spec
if discount_spec is None:
discount_spec = self._discount_spec
return TimeStepSpec(
observation_spec=observation_spec,
reward_spec=reward_spec,
discount_spec=discount_spec)
def __eq__(self, other):
if not isinstance(other, TimeStepSpec):
return False
# All the properties of the spec must be equal.
if self.reward_spec != other.reward_spec:
return False
if self.discount_spec != other.discount_spec:
return False
if len(self.observation_spec) != len(other.observation_spec):
return False
for key in self.observation_spec:
if (key not in other.observation_spec or
self.observation_spec[key] != other.observation_spec[key]):
return False
return True
def minimum(spec: specs.Array):
if hasattr(spec, 'minimum'):
return clip(np.asarray(spec.minimum, dtype=spec.dtype), spec)
elif np.issubdtype(spec.dtype, np.integer):
return np.full(spec.shape, np.iinfo(spec.dtype).min)
else:
return np.full(spec.shape, np.finfo(spec.dtype).min)
def maximum(spec: specs.Array):
if hasattr(spec, 'maximum'):
return clip(np.asarray(spec.maximum, dtype=spec.dtype), spec)
elif np.issubdtype(spec.dtype, np.integer):
return np.full(spec.shape, np.iinfo(spec.dtype).max)
else:
return np.full(spec.shape, np.finfo(spec.dtype).max)
def zeros(action_spec: specs.Array) -> np.ndarray:
"""Create a zero value for this Spec."""
return np.zeros(shape=action_spec.shape, dtype=action_spec.dtype)
def cast(spec: specs.Array, value: ScalarOrArray) -> ScalarOrArray:
"""Cast a value to conform to a spec."""
if np.isscalar(value):
return spec.dtype.type(value)
else:
return value.astype(spec.dtype)
def clip(value: np.ndarray, spec: specs.BoundedArray) -> np.ndarray:
"""Clips the given value according to the spec."""
if value is None:
raise ValueError('no value')
if isinstance(spec.dtype, np.inexact):
eps = np.finfo(spec.dtype).eps * 5.0
else:
eps = 0
min_bound = np.array(spec.minimum, dtype=spec.dtype)
max_bound = np.array(spec.maximum, dtype=spec.dtype)
return np.clip(value, min_bound + eps, max_bound - eps)
def shrink_to_fit(
value: np.ndarray,
spec: specs.BoundedArray,
ignore_nan: Optional[bool] = None,
) -> np.ndarray:
"""Scales the value towards zero to fit within spec min and max values.
Clipping is done after scaling to ensure there are no values that are very
slightly (say 10e-8) out of range.
This, by nature, assumes that min <= 0 <= max for the spec.
Args:
value: np.ndarray to scale towards zero.
spec: Specification for value to scale and clip.
ignore_nan: If True, NaN values will not fail validation. If None, this is
determined by the size of `value`, so that large values are not checked.
Returns:
Scaled and clipped value.
Raises:
ValueError: On missing values or high-dimensional values.
"""
if value is None:
raise ValueError('no value')
if spec is None:
raise ValueError('no spec')
if not isinstance(value, np.ndarray):
raise ValueError('value not numpy array ({})'.format(type(value)))
if len(value.shape) > 1:
raise ValueError('2d values not yet handled')
if not isinstance(spec, specs.BoundedArray):
raise ValueError('Cannot scale to spec: {})'.format(spec))
if np.any(spec.minimum > 0) or np.any(spec.maximum < 0):
raise ValueError('Cannot scale to spec, due to bounds: {})'.format(spec))
factor = 1.0
for val, min_val, max_val in zip(value, spec.minimum, spec.maximum):
if val < min_val:
new_factor = min_val / val
if new_factor < factor and new_factor > 0:
factor = new_factor
if val > max_val:
new_factor = max_val / val
if new_factor < factor and new_factor > 0:
factor = new_factor
scaled = (value * factor).astype(spec.dtype)
clipped = clip(scaled, spec)
try:
validate(spec, clipped, ignore_nan)
except ValueError:
logging.error('Failed to scale %s to %s. Got: %s', value, spec, clipped)
return clipped
def merge_specs(spec_list: Sequence[specs.BoundedArray]):
"""Merges a list of BoundedArray into one."""
# Check all specs are flat.
for spec in spec_list:
if len(spec.shape) > 1:
raise ValueError('Not merging multi-dimensional spec: {}'.format(spec))
# Filter out no-op specs with no actuators.
spec_list = [spec for spec in spec_list if spec.shape and spec.shape[0]]
dtype = np.find_common_type([spec.dtype for spec in spec_list], [])
num_actions = 0
name = ''
mins = np.array([], dtype=dtype)
maxs = np.array([], dtype=dtype)
for i, spec in enumerate(spec_list):
num_actions += spec.shape[0]
if name:
name += '\t'
name += spec.name or f'spec_{i}'
mins = np.concatenate([mins, spec.minimum])
maxs = np.concatenate([maxs, spec.maximum])
return specs.BoundedArray(
shape=(num_actions,), dtype=dtype, minimum=mins, maximum=maxs, name=name)
def merge_primitives(values: Sequence[np.ndarray],
default_value: Optional[float] = None) -> np.ndarray:
"""Merge the given values (arrays) where NaNs are considered missing.
Args:
values: The values to merge.
default_value: A default value to replace NaNs with, after merging.
Returns:
A merged value.
Raises:
ValueError: On ambiguity, shape/dtype mismatch, or no values.
An ambiguity means >1 arrays have a non-nan value in the same index.
"""
if not values:
raise ValueError('No values to merge')
# Ignore Nones.
shape = values[0].shape
dtype = values[0].dtype
result = np.ndarray(shape=shape, dtype=dtype)
result.fill(np.nan)
if len(shape) != 1:
raise ValueError('Not implemented for multi-dimensional arrays')
for value in values:
if value.shape != shape:
raise ValueError('Shape mismatch, expect {} got {}. All: {}'.format(
shape, value.shape, [v.shape for v in values]))
if value.dtype != dtype:
raise ValueError('dtype mismatch, expect {} got {}. All: {}'.format(
dtype, value.dtype, [v.dtype for v in values]))
for i in range(shape[0]):
if not np.isnan(value[i]):
if np.isnan(result[i]):
result[i] = value[i]
else:
raise ValueError('Ambiguous merge at index {} with values: {}'.format(
i, values))
if default_value is not None:
result[np.isnan(result)] = default_value
return result
def merge_in_default(value, default_value):
"""Fill in the given value with the parts of the default_value."""
if value is None:
return default_value
if isinstance(default_value, dict):
for key in default_value.keys():
value[key] = merge_in_default(value.get(key, None), default_value[key])
return value
elif isinstance(value, list):
for i in range(len(default_value)):
if i >= len(value):
value.append(default_value[i])
else:
value[i] = merge_in_default(value[i], default_value[i])
return value
else:
return value
def validate_timestep(spec: TimeStepSpec, timestep: dm_env.TimeStep):
validate_observation(spec.observation_spec, timestep.observation)
validate(spec.reward_spec, timestep.reward)
validate(spec.discount_spec, timestep.discount)
def ensure_spec_compatibility(sub_specs: TimeStepSpec,
full_specs: TimeStepSpec):
"""Validates compatibility of 2 timestep specs.
For the observations we only check inclusion of sub_specs in full_specs.
Args:
sub_specs:
full_specs:
Raises:
ValueError: If the discount_spec, the reward_spec or one of the observation
spec do not match.
KeyError: If an observation in sub_specs is not in full_specs.
"""
if sub_specs.discount_spec != full_specs.discount_spec:
raise ValueError('Non matching discount specs.\nDiscount_sub_spec : {} \n'
'Discount_full_specs: {}\n'.format(
sub_specs.discount_spec, full_specs.discount_spec))
if sub_specs.reward_spec != full_specs.reward_spec:
raise ValueError('Non matching reward specs.\nReward_sub_spec : {} \n'
'Reward_spec: {}\n'.format(sub_specs.reward_spec,
full_specs.reward_spec))
for obs_spec_key, obs_spec in sub_specs.observation_spec.items():
if obs_spec_key not in full_specs.observation_spec:
raise KeyError('Missing observation key {} in spec.'.format(obs_spec_key))
if obs_spec != full_specs.observation_spec[obs_spec_key]:
raise ValueError('Non matching observation specs for key {}. \n'
'sub_spec = {} \n spec = {}'.format(
obs_spec_key, obs_spec,
full_specs.observation_spec[obs_spec_key]))
def verify_specs_equal_unbounded(expected: specs.Array, actual: specs.Array):
"""Assert that two specs are equal."""
if expected.shape != actual.shape:
raise ValueError(f'invalid shape for spec {expected.name}: '
f'{expected.shape}, actual shape: {actual.shape}')
if expected.dtype != actual.dtype:
raise ValueError(f'invalid dtype for spec {expected.name}: '
f'{expected.dtype}, actual dtype: {actual.dtype}')
if expected.name != actual.name:
raise ValueError(f'invalid name for spec {expected.name}: '
f'{expected.name}, actual name: {actual.name}')
def verify_specs_equal_bounded(expected: specs.BoundedArray,
actual: specs.BoundedArray):
"""Check specs are equal, raise a ValueError if they are not."""
if not isinstance(expected, specs.BoundedArray):
raise ValueError(f'Expected BoundedArray for first spec {expected.name}, '
'got {str(type(expected))}')
if not isinstance(actual, specs.BoundedArray):
raise ValueError(f'Expected BoundedArray for second spec {actual.name}, '
'got {str(type(actual))}')
if not np.allclose(expected.minimum, actual.minimum):
raise ValueError(f'Minimum values for spec {expected.name} do not match')
if not np.allclose(expected.maximum, actual.maximum):
raise ValueError(f'Maximum values for spec {expected.name} do not match')
verify_specs_equal_unbounded(expected, actual)
def validate_observation(spec: ObservationSpec,
value: ObservationValue,
check_extra_keys: bool = True,
ignore_nan: Optional[bool] = None,
ignore_ranges: Optional[bool] = None,
msg: Optional[str] = None):
"""Validate an observation against an observation spec.
Args:
spec: The spec to validate against.
value: The value to validate (!).
check_extra_keys: If True having extra observations will fail.
ignore_nan: If True, NaN values will not fail validation. If None, this is
determined by the size of `value`, so that large values are not checked.
ignore_ranges: If True, ignore minimum and maximum of BoundedArray. If None,
this is determined by the size of `value`, so that large values are not
checked.
msg: message to append to any failure message.
Raises:
ValueError: On a validation failure.
"""
if check_extra_keys:
extra_keys = set(value.keys()) - set(spec.keys())
if extra_keys:
raise ValueError(
'Extra keys in observation:\nSpec keys: {}\nvalue keys: {}\n'
'Extra keys: {}'.format(spec.keys(), value.keys(), extra_keys))
for spec_key, sub_spec in spec.items():
if spec_key in value: # Assumes missing keys are allowed.
validate(
sub_spec,
value[spec_key],
ignore_nan=ignore_nan,
ignore_ranges=ignore_ranges,
msg='{} for observation {}'.format(msg, spec_key))
# Profiling for .wrap('spec_utils.validate')
def validate(spec: specs.Array,
value: np.ndarray,
ignore_nan: Optional[bool] = None,
ignore_ranges: Optional[bool] = None,
msg: Optional[str] = None):
"""Validates that value matches the spec.
Args:
spec: The spec to validate against.
value: The value to validate (!).
ignore_nan: If True, NaN values will not fail validation. If None, this is
determined by the shape of `value`, so that large arrays (e.g. images) are
not checked (for performance reasons).
ignore_ranges: If True, ignore minimum and maximum of BoundedArray. If None,
this is determined by the size of `value`, so that large values are not
checked.
msg: message to append to any failure message.
Raises:
ValueError: On a validation failure.
"""
# If only validating for debugging, and the debug flag is off, don't validate.
global _validation_count
if not debugging_flag() or _validation_count >= FLAGS.max_validations:
return
_validation_count += 1
if value is None:
return # ASSUME this is ok.
value = np.asarray(value)
if not np.issubdtype(value.dtype, np.number):
# The value is non-numeric, so skip the nan and range checks.
ignore_nan = True
ignore_ranges = True
elif np.prod(spec.shape) > 128:
# Check less, in this case.
if ignore_nan is None:
ignore_nan = True
if ignore_ranges is None:
ignore_ranges = True
else:
# Check more in this case, it's cheap.
if ignore_nan is None:
ignore_nan = False
if ignore_ranges is None:
ignore_ranges = False
if not ignore_nan:
if np.any(np.isnan(value)):
raise ValueError('NaN in value: {}, spec: {} ({})'.format(
value, spec, msg))
if not ignore_ranges:
spec.validate(value)
else:
if spec.shape != value.shape:
raise ValueError('shape mismatch {}. {} vs. {}'.format(msg, spec, value))
if value.dtype != value.dtype:
raise ValueError('dtype mismatch {}. {} vs. {}'.format(msg, spec, value))
def assert_not_dtype(spec: specs.Array, dtype: Type[Any]):
"""Asserts that the spec is not of the given dtype.
Args:
spec: A spec to validate.
dtype: The dtype to check for.
"""
dtype = np.dtype(dtype)
maybe_spec_name = find_dtype(spec, dtype)
if maybe_spec_name:
spec, name = maybe_spec_name
raise AssertionError('type {} found in {} ({})'.format(dtype, spec, name))
def find_dtype(spec: specs.Array,
dtype: Type[np.floating]) -> Optional[Tuple[specs.Array, str]]:
"""Finds if the given spec uses the give type.
Args:
spec: A spec to search.
dtype: The dtype to find.
Returns:
None if no match found, else (spec, spec_name) of the spec using dtype.
"""
dtype = np.dtype(dtype)
match = None # type: Optional[Tuple[specs.Array, str]]
if isinstance(spec, specs.Array):
if spec.dtype is dtype:
match = (spec, '')
elif isinstance(spec, dict):
for name, subspec in spec.items():
if find_dtype(subspec, dtype):
match = (subspec, name)
else:
raise ValueError('Unknown spec type {}'.format(type(spec)))
return match
| 34.326007 | 80 | 0.675061 |
ace19af0cd136cba72e5a158b304ac228c0c736c | 1,187 | py | Python | solutions/problem011/problem011.py | ineedthisforCPEN/projecteuler | d20fe53445da957ebae023ea2451b33e37498112 | [
"MIT"
] | null | null | null | solutions/problem011/problem011.py | ineedthisforCPEN/projecteuler | d20fe53445da957ebae023ea2451b33e37498112 | [
"MIT"
] | null | null | null | solutions/problem011/problem011.py | ineedthisforCPEN/projecteuler | d20fe53445da957ebae023ea2451b33e37498112 | [
"MIT"
] | null | null | null | import argparse
from projecteuler import classes
from utils.resources import load_problem_resources
# Problem-specific constants
PROBLEM_NAME = "Problem 011 - Largest product in a grid"
PROBLEM_DESCRIPTION = """
The resource file for the problem contains a 20x20 grid. What four
adjascent numbers (vertically, horizontally, or diagonally) create the
largest product?
"""
class ProblemParser(argparse.ArgumentParser):
def error(self, message):
print(PROBLEM_NAME)
self.print_help()
print("\nError in argument list: " + message)
self.exit()
class Problem011(classes.Problem):
def __init__(self, args):
self.parser = self.create_parser()
self.args = self.parser.parse_args(args)
self.resources = load_problem_resources(int("011"))
self.problem_name = PROBLEM_NAME
self.problem_desc = PROBLEM_DESCRIPTION
self.problem_versions = self.get_implemented_versions(__file__)
def create_parser(self):
# THIS SECTION SHOULD BE MODIFIED
# ADD ANY ARGUMENTS REQUIRED TO RUN THIS PROBLEM'S SOLUTION
parser = ProblemParser(description=PROBLEM_DESCRIPTION)
return parser
| 30.435897 | 71 | 0.720303 |
ace19c48b9b0a4f05b96e731a2a737c0ca364dd4 | 1,206 | py | Python | dhs_src/atomic_update.py | harsham05/new590DR | a33d5e41b419180cf9afa16cf0b29fe0f15960ff | [
"Apache-2.0"
] | 1 | 2015-11-28T03:37:40.000Z | 2015-11-28T03:37:40.000Z | dhs_src/atomic_update.py | harsham05/new590DR | a33d5e41b419180cf9afa16cf0b29fe0f15960ff | [
"Apache-2.0"
] | null | null | null | dhs_src/atomic_update.py | harsham05/new590DR | a33d5e41b419180cf9afa16cf0b29fe0f15960ff | [
"Apache-2.0"
] | null | null | null | from mysolr import Solr
import os, sys, requests, json, time, argparse
def atomicUpdate(chunkFile, solrURL):
session = requests.Session()
solr = Solr(solrURL, make_request=session, version=4)
bufferDocs = []
with open(chunkFile, 'r') as inF:
for docID in inF:
docID = docID.strip()
delta_update = { "id": docID,
"dataSource_s_md": {"set": "ice"} } ## Caution change this value
bufferDocs.append(delta_update)
x = solr.update(bufferDocs, commit=True)
if x.raw_content['responseHeader']['status'] != 0:
print "Solr Commit Failed !!!! Error Status code: ", x.raw_content['responseHeader']['status']
else:
print "Awesome!! Solr Commit was a Success"
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="DHS Atomic Update")
parser.add_argument('-f', '--file', required=True, help='path to file/Chunk containing Image IDs')
parser.add_argument('--solrURL', required=True, help='Solr Core URL')
# http://localhost:8983/solr/DHS/
args = parser.parse_args()
if args.file and args.solrURL:
atomicUpdate(args.file, args.solrURL)
| 27.409091 | 102 | 0.635987 |
ace19e34a83189b9badfc83f8408b40ea945a173 | 11,332 | py | Python | slimta/relay/smtp/client.py | nanojob/python-slimta | 70b9c633756a56afaf1fdd53c5ead6d0001036e7 | [
"MIT"
] | 141 | 2015-01-24T23:59:18.000Z | 2022-01-30T16:36:37.000Z | slimta/relay/smtp/client.py | nanojob/python-slimta | 70b9c633756a56afaf1fdd53c5ead6d0001036e7 | [
"MIT"
] | 106 | 2015-01-13T22:49:07.000Z | 2021-02-17T15:14:11.000Z | slimta/relay/smtp/client.py | nanojob/python-slimta | 70b9c633756a56afaf1fdd53c5ead6d0001036e7 | [
"MIT"
] | 43 | 2015-07-29T14:55:09.000Z | 2021-09-24T22:30:38.000Z | # Copyright (c) 2012 Ian C. Good
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
from __future__ import absolute_import
import socket
from functools import wraps
from gevent import Timeout
from gevent.socket import create_connection
from slimta.smtp import SmtpError
from slimta.smtp.reply import Reply, timed_out, connection_failed
from slimta.smtp.client import Client
from slimta import logging
from ..pool import RelayPoolClient
from . import SmtpRelayError
__all__ = ['SmtpRelayClient']
log = logging.getSocketLogger(__name__)
def current_command(cmd):
def deco(old_f):
@wraps(old_f)
def new_f(self, *args, **kwargs):
prev = self.current_command
self.current_command = cmd
ret = old_f(self, *args, **kwargs)
self.current_command = prev
return ret
return new_f
return deco
class SmtpRelayClient(RelayPoolClient):
_client_class = Client
def __init__(self, address, queue, socket_creator=None, ehlo_as=None,
context=None, auth_mechanism=None,
tls_immediately=False, tls_required=False,
connect_timeout=10.0, command_timeout=10.0,
data_timeout=None, idle_timeout=None,
credentials=None, binary_encoder=None):
super(SmtpRelayClient, self).__init__(queue, idle_timeout)
self.address = address
self.socket_creator = socket_creator or create_connection
self.socket = None
self.client = None
self.ehlo_as = ehlo_as or socket.getfqdn()
self.context = context
self.auth_mechanism = auth_mechanism
self.tls_immediately = tls_immediately
self.tls_required = tls_required
self.connect_timeout = connect_timeout
self.command_timeout = command_timeout
self.data_timeout = data_timeout or command_timeout
self.credentials = credentials
self.binary_encoder = binary_encoder
self.current_command = None
@current_command(b'[CONNECT]')
def _connect(self):
with Timeout(self.connect_timeout):
self.socket = self.socket_creator(self.address)
log.connect(self.socket, self.address)
self.client = self._client_class(self.socket, self.address)
@current_command(b'[BANNER]')
def _banner(self):
with Timeout(self.command_timeout):
banner = self.client.get_banner()
if banner.is_error():
raise SmtpRelayError.factory(banner)
@current_command(b'EHLO')
def _ehlo(self):
try:
ehlo_as = self.ehlo_as(self.address)
except TypeError:
ehlo_as = self.ehlo_as
with Timeout(self.command_timeout):
ehlo = self.client.ehlo(ehlo_as)
if ehlo.is_error():
if ehlo.code == '500':
return self._helo(ehlo_as)
raise SmtpRelayError.factory(ehlo)
return ehlo
@current_command(b'HELO')
def _helo(self, ehlo_as):
with Timeout(self.command_timeout):
helo = self.client.helo(ehlo_as)
if helo.is_error():
raise SmtpRelayError.factory(helo)
return helo
@current_command(b'STARTTLS')
def _starttls(self):
with Timeout(self.command_timeout):
starttls = self.client.starttls(self.context)
if starttls.is_error() and self.tls_required:
raise SmtpRelayError.factory(starttls)
@current_command(b'AUTH')
def _authenticate(self):
try:
credentials = self.credentials()
except TypeError:
credentials = self.credentials
with Timeout(self.command_timeout):
auth = self.client.auth(*credentials,
mechanism=self.auth_mechanism)
if auth.is_error():
raise SmtpRelayError.factory(auth)
def _handshake(self):
if self.tls_immediately:
self.client.encrypt(self.context)
self._banner()
self._ehlo()
else:
self._banner()
self._ehlo()
if self.tls_required or 'STARTTLS' in self.client.extensions:
self._starttls()
self._ehlo()
if self.credentials:
self._authenticate()
@current_command(b'RSET')
def _rset(self):
with Timeout(self.command_timeout):
self.client.rset()
@current_command(b'MAIL')
def _mailfrom(self, sender):
with Timeout(self.command_timeout):
mailfrom = self.client.mailfrom(sender, auth=False)
if mailfrom and mailfrom.is_error():
raise SmtpRelayError.factory(mailfrom)
return mailfrom
@current_command(b'RCPT')
def _rcptto(self, rcpt):
with Timeout(self.command_timeout):
return self.client.rcptto(rcpt)
@current_command(b'DATA')
def _data(self):
with Timeout(self.command_timeout):
return self.client.data()
def _check_replies(self, mailfrom, rcpttos, data):
if mailfrom.is_error():
raise SmtpRelayError.factory(mailfrom)
for rcptto in rcpttos:
if not rcptto.is_error():
break
else:
raise SmtpRelayError.factory(rcpttos[0])
if data.is_error():
raise SmtpRelayError.factory(data)
@current_command(b'[SEND_DATA]')
def _send_empty_data(self):
with Timeout(self.data_timeout):
self.client.send_empty_data()
@current_command(b'[SEND_DATA]')
def _send_message_data(self, envelope):
header_data, message_data = envelope.flatten()
with Timeout(self.data_timeout):
send_data = self.client.send_data(
header_data, message_data)
self.client._flush_pipeline()
if isinstance(send_data, Reply) and send_data.is_error():
raise SmtpRelayError.factory(send_data)
return send_data
def _handle_encoding(self, envelope):
if '8BITMIME' not in self.client.extensions:
try:
envelope.encode_7bit(self.binary_encoder)
except UnicodeError:
reply = Reply('554', '5.6.3 Conversion not allowed',
command=b'[data conversion]',
address=self.address)
raise SmtpRelayError.factory(reply)
def _send_envelope(self, rcpt_results, envelope):
data = None
mailfrom = self._mailfrom(envelope.sender)
rcpttos = [self._rcptto(rcpt) for rcpt in envelope.recipients]
try:
data = self._data()
self._check_replies(mailfrom, rcpttos, data)
except SmtpRelayError:
if data and not data.is_error():
self._send_empty_data()
raise
for i, rcpt_reply in enumerate(rcpttos):
rcpt = envelope.recipients[i]
if rcpt_reply.is_error():
rcpt_results[rcpt] = SmtpRelayError.factory(rcpt_reply)
def _deliver(self, result, envelope):
rcpt_results = dict.fromkeys(envelope.recipients)
try:
self._handle_encoding(envelope)
self._send_envelope(rcpt_results, envelope)
msg_result = self._send_message_data(envelope)
except SmtpRelayError as e:
result.set_exception(e)
self._rset()
else:
for key, value in rcpt_results.items():
if value is None:
rcpt_results[key] = msg_result
result.set(rcpt_results)
def _check_server_timeout(self):
try:
if self.client.has_reply_waiting():
with Timeout(self.command_timeout):
self.client.get_reply()
return True
except SmtpError:
return True
return False
def _disconnect(self):
try:
with Timeout(self.command_timeout):
self.client.quit()
except (Timeout, Exception):
pass
finally:
if self.client:
self.client.io.close()
def _get_error_reply(self, exc):
try:
if self.client.last_error.code == '421':
return self.client.last_error
except Exception:
pass
return Reply('421', '4.3.0 '+str(exc),
command=self.current_command, address=self.address)
def _run(self):
result, envelope = self.poll()
if not result:
return
reraise = True
try:
self._connect()
self._handshake()
while result:
if self._check_server_timeout():
self.queue.appendleft((result, envelope))
break
self._deliver(result, envelope)
if self.idle_timeout is None:
break
result, envelope = self.poll()
except SmtpRelayError as e:
result.set_exception(e)
except SmtpError as e:
if not result.ready():
reply = self._get_error_reply(e)
relay_error = SmtpRelayError.factory(reply)
result.set_exception(relay_error)
except Timeout:
if not result.ready():
reply = Reply(command=self.current_command,
address=self.address).copy(timed_out)
relay_error = SmtpRelayError.factory(reply)
result.set_exception(relay_error)
except socket.error as exc:
log.error(self.socket, exc, self.address)
if not result.ready():
reply = Reply(command=self.current_command,
address=self.address).copy(connection_failed)
relay_error = SmtpRelayError.factory(reply)
result.set_exception(relay_error)
except Exception as e:
if not result.ready():
result.set_exception(e)
reraise = False
raise
finally:
try:
self._disconnect()
except Exception:
if reraise:
raise
# vim:et:fdm=marker:sts=4:sw=4:ts=4
| 35.302181 | 79 | 0.608895 |
ace19ef73c722c763245c12247232ef0e03a3cb4 | 1,651 | py | Python | csv_to_db.py | alxgmpr/WhatBotCSV | ac5f06490b778bcf8081045a6257d28c11830df0 | [
"MIT"
] | null | null | null | csv_to_db.py | alxgmpr/WhatBotCSV | ac5f06490b778bcf8081045a6257d28c11830df0 | [
"MIT"
] | null | null | null | csv_to_db.py | alxgmpr/WhatBotCSV | ac5f06490b778bcf8081045a6257d28c11830df0 | [
"MIT"
] | null | null | null | # Copyright 2018 Alexander Gompper
# Released under MIT License
import csv
import json
import random
import string
def main():
print('<<< WHATBOT CSV PROFILE READER - CSV TO DB >>>')
try:
with open('input/csv/billing.csv') as input_file:
input_reader = csv.DictReader(input_file, fieldnames=[
'name',
'firstName',
'lastName',
'phone',
'email',
'address',
'address2',
'city',
'state',
'zipCode',
'cardName',
'cardNumber',
'cardMonth',
'cardYear',
'cardCvv',
'_id',
'quickTask'
])
next(input_reader)
try:
with open('output/db/billing.db', 'wb') as output_file:
for row in input_reader:
row['_id'] = ''.join(random.choices(string.ascii_letters + string.digits, k=16)) if row['_id'] in {'', None} else row['_id']
row['quickTask'] = True if row['quickTask'].lower() == 'true' else False
output_file.write(bytes((json.dumps(row) + '\n').encode('utf-8')))
except IOError:
print('<<< [error] something went wrong writing the billing.db output file >>>')
exit(-1)
print('<<< COMPLETE >>>')
except IOError:
print('<<< [error] something went wrong opening the billing.csv input file >>>')
exit(-1)
if __name__ == '__main__':
main()
| 31.75 | 148 | 0.470018 |
ace19f4f0967c6af6be065f247844b39bac9e2f0 | 2,700 | py | Python | python/lib/Lib/site-packages/django/contrib/staticfiles/handlers.py | truthiswill/intellij-community | fff88cfb0dc168eea18ecb745d3e5b93f57b0b95 | [
"Apache-2.0"
] | 2 | 2018-12-29T09:53:39.000Z | 2018-12-29T09:53:42.000Z | python/lib/Lib/site-packages/django/contrib/staticfiles/handlers.py | truthiswill/intellij-community | fff88cfb0dc168eea18ecb745d3e5b93f57b0b95 | [
"Apache-2.0"
] | null | null | null | python/lib/Lib/site-packages/django/contrib/staticfiles/handlers.py | truthiswill/intellij-community | fff88cfb0dc168eea18ecb745d3e5b93f57b0b95 | [
"Apache-2.0"
] | 1 | 2018-10-03T12:35:06.000Z | 2018-10-03T12:35:06.000Z | import urllib
from urlparse import urlparse
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.core.handlers.wsgi import WSGIHandler
from django.contrib.staticfiles import utils
from django.contrib.staticfiles.views import serve
class StaticFilesHandler(WSGIHandler):
"""
WSGI middleware that intercepts calls to the static files directory, as
defined by the STATIC_URL setting, and serves those files.
"""
def __init__(self, application, base_dir=None):
self.application = application
if base_dir:
self.base_dir = base_dir
else:
self.base_dir = self.get_base_dir()
self.base_url = urlparse(self.get_base_url())
super(StaticFilesHandler, self).__init__()
def get_base_dir(self):
return settings.STATIC_ROOT
def get_base_url(self):
if not settings.STATIC_URL:
raise ImproperlyConfigured("You're using the staticfiles app "
"without having set the STATIC_URL setting. Set it to "
"URL that handles the files served from STATIC_ROOT.")
if settings.DEBUG:
utils.check_settings()
return settings.STATIC_URL
def _should_handle(self, path):
"""
Checks if the path should be handled. Ignores the path if:
* the host is provided as part of the base_url
* the request's path isn't under the media path (or equal)
"""
return (self.base_url[2] != path and
path.startswith(self.base_url[2]) and not self.base_url[1])
def file_path(self, url):
"""
Returns the relative path to the media file on disk for the given URL.
"""
relative_url = url[len(self.base_url[2]):]
return urllib.url2pathname(relative_url)
def serve(self, request):
"""
Actually serves the request path.
"""
return serve(request, self.file_path(request.path), insecure=True)
def get_response(self, request):
from django.http import Http404
if self._should_handle(request.path):
try:
return self.serve(request)
except Http404, e:
if settings.DEBUG:
from django.views import debug
return debug.technical_404_response(request, e)
return super(StaticFilesHandler, self).get_response(request)
def __call__(self, environ, start_response):
if not self._should_handle(environ['PATH_INFO']):
return self.application(environ, start_response)
return super(StaticFilesHandler, self).__call__(environ, start_response)
| 35.526316 | 80 | 0.653333 |
ace19f9f74bd36ac30e54042e7051e6d678c2e79 | 3,523 | py | Python | locallibrary/settings.py | bing900717/locallibrary | 868a6b64bfcc90bd8ff9cd8762cc00adec29291f | [
"MIT"
] | null | null | null | locallibrary/settings.py | bing900717/locallibrary | 868a6b64bfcc90bd8ff9cd8762cc00adec29291f | [
"MIT"
] | null | null | null | locallibrary/settings.py | bing900717/locallibrary | 868a6b64bfcc90bd8ff9cd8762cc00adec29291f | [
"MIT"
] | null | null | null | """
Django settings for locallibrary project.
Generated by 'django-admin startproject' using Django 2.1.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
#SECRET_KEY = '_go(v*h61yq2whmd&e01%o2zg13)nvs0out=b!b#tkdmivf7=3'
SECRET_KEY = os.environ.get('DJANGO_SECRET_KEY','_go(v*h61yq2whmd&e01%o2zg13)nvs0out=b!b#tkdmivf7=3')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = bool(os.environ.get('DJANGO_DEBUG', True))
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'catalog.apps.CatalogConfig',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'locallibrary.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(BASE_DIR, 'templates'),
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'locallibrary.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Asia/Shanghai'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
# Redirect to home URL after login (Default redirects to /accounts/profile/)
LOGIN_REDIRECT_URL = '/'
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
| 27.1 | 102 | 0.701107 |
ace1a01501e0dba8f497ce62ca1bf711fe1329e9 | 13,840 | py | Python | soco/music_services/data_structures.py | gitter-badger/SoCo | 65977466057748ea522a6d8b7f2a649091485a07 | [
"MIT"
] | 28 | 2020-04-24T17:32:33.000Z | 2021-12-28T23:45:16.000Z | soco/music_services/data_structures.py | gitter-badger/SoCo | 65977466057748ea522a6d8b7f2a649091485a07 | [
"MIT"
] | 11 | 2020-04-28T15:57:39.000Z | 2022-01-07T09:32:10.000Z | soco/music_services/data_structures.py | gitter-badger/SoCo | 65977466057748ea522a6d8b7f2a649091485a07 | [
"MIT"
] | 3 | 2021-08-10T11:56:14.000Z | 2022-01-13T04:19:03.000Z | # -*- coding: utf-8 -*-
# Disable while we have Python 2.x compatability
# pylint: disable=useless-object-inheritance
"""Data structures for music service items
The basis for this implementation is this page in the Sonos API
documentation: http://musicpartners.sonos.com/node/83
A note about naming. The Sonos API uses camel case with starting lower
case. These names have been adapted to match general Python class
naming conventions.
MediaMetadata:
Track
Stream
Show
Other
MediaCollection:
Artist
Album
Genre
Playlist
Search
Program
Favorites
Favorite
Collection
Container
AlbumList
TrackList
StreamList
ArtistTrackList
Other
NOTE: "Other" is allowed under both.
Class overview:
+----------------+ +----------------+ +---------------+
|MetadataDictBase+-->+MusicServiceItem+-->+MediaCollection|
+-----+----------+ +--------+-------+ +---------------+
| |
| | +------------------+
| +---->+ MediaMetadata |
| | |
| | +-------------+ |
+------------------------------>+TrackMetadata| |
| | +-------------+ |
| | |
| | +--------------+ |
+------------------------------>+StreamMetadata| |
| +--------------+ |
| |
+------------------+
"""
from __future__ import print_function, absolute_import
import sys
import logging
from collections import OrderedDict
from ..data_structures import DidlResource, DidlItem, SearchResult
from ..utils import camel_to_underscore
from ..compat import quote_url
_LOG = logging.getLogger(__name__)
_LOG.addHandler(logging.NullHandler())
# For now we generate classes dynamically. This is shorter, but
# provides no custom documentation for all the different types.
CLASSES = {}
def get_class(class_key):
"""Form a music service data structure class from the class key
Args:
class_key (str): A concatenation of the base class (e.g. MediaMetadata)
and the class name
Returns:
class: Subclass of MusicServiceItem
"""
if class_key not in CLASSES:
for basecls in (MediaMetadata, MediaCollection):
if class_key.startswith(basecls.__name__):
# So MediaMetadataTrack turns into MSTrack
class_name = 'MS' + class_key.replace(basecls.__name__, '')
if sys.version_info[0] == 2:
class_name = class_name.encode('ascii')
CLASSES[class_key] = type(class_name, (basecls,), {})
_LOG.info('Class %s created', CLASSES[class_key])
return CLASSES[class_key]
def parse_response(service, response, search_type):
"""Parse the response to a music service query and return a SearchResult
Args:
service (MusicService): The music service that produced the response
response (OrderedDict): The response from the soap client call
search_type (str): A string that indicates the search type that the
response is from
Returns:
SearchResult: A SearchResult object
"""
_LOG.debug('Parse response "%s" from service "%s" of type "%s"', response,
service, search_type)
items = []
# The result to be parsed is in either searchResult or getMetadataResult
if 'searchResult' in response:
response = response['searchResult']
elif 'getMetadataResult' in response:
response = response['getMetadataResult']
else:
raise ValueError('"response" should contain either the key '
'"searchResult" or "getMetadataResult"')
# Form the search metadata
search_metadata = {
'number_returned': response['count'],
'total_matches': None,
'search_type': search_type,
'update_id': None,
}
for result_type in ('mediaCollection', 'mediaMetadata'):
# Upper case the first letter (used for the class_key)
result_type_proper = result_type[0].upper() + result_type[1:]
raw_items = response.get(result_type, [])
# If there is only 1 result, it is not put in an array
if isinstance(raw_items, OrderedDict):
raw_items = [raw_items]
for raw_item in raw_items:
# Form the class_key, which is a unique string for this type,
# formed by concatenating the result type with the item type. Turns
# into e.g: MediaMetadataTrack
class_key = result_type_proper + raw_item['itemType'].title()
cls = get_class(class_key)
items.append(cls.from_music_service(service, raw_item))
return SearchResult(items, **search_metadata)
def form_uri(item_id, service, is_track):
"""Form and return a music service item uri
Args:
item_id (str): The item id
service (MusicService): The music service that the item originates from
is_track (bool): Whether the item_id is from a track or not
Returns:
str: The music service item uri
"""
if is_track:
uri = service.sonos_uri_from_id(item_id)
else:
uri = 'x-rincon-cpcontainer:' + item_id
return uri
# Type Helper
BOOL_STRS = set(('true', 'false'))
def bool_str(string):
"""Returns a boolean from a string imput of 'true' or 'false'"""
if string not in BOOL_STRS:
raise ValueError('Invalid boolean string: "{}"'.format(string))
return string == 'true'
# Music Service item base classes
class MetadataDictBase(object):
"""Class used to parse metadata from kwargs"""
# The following two fields should be overwritten in subclasses
# _valid_fields is a set of valid fields
_valid_fields = {}
# _types is a dict of fields with non-string types and their convertion
# callables
_types = {}
def __init__(self, metadata_dict):
"""Initialize local variables"""
_LOG.debug('MetadataDictBase.__init__ with: %s', metadata_dict)
for key in metadata_dict:
# Check for invalid fields
if key not in self._valid_fields:
message = ('%s instantiated with invalid field "%s" and '
'value: %s')
# Really wanted to raise exceptions here, but as it
# turns out I have already encountered invalid fields
# from music services.
_LOG.debug(message, self.__class__, key, metadata_dict[key])
# Convert names and create metadata dict
self.metadata = {}
for key, value in metadata_dict.items():
if key in self._types:
convertion_callable = self._types[key]
value = convertion_callable(value)
self.metadata[camel_to_underscore(key)] = value
def __getattr__(self, key):
"""Return item from metadata in case of unknown attribute"""
try:
return self.metadata[key]
except KeyError:
message = 'Class {} has no attribute "{}"'
raise AttributeError(message.format(self.__class__.__name__, key))
class MusicServiceItem(MetadataDictBase):
"""A base class for all music service items"""
# See comment in MetadataDictBase for explanation of these two attributes
_valid_fields = {}
_types = {}
def __init__(self, item_id, desc, # pylint: disable=too-many-arguments
resources, uri, metadata_dict, music_service=None):
"""Init music service item
Args:
item_id (str): This is the Didl compatible id NOT the music item id
desc (str): A DIDL descriptor, default ``'RINCON_AssociatedZPUDN'
resources (list): List of DidlResource
uri (str): The uri for the location of the item
metdata_dict (dict): Mapping of metadata
music_service (MusicService): The MusicService instance the item
originates from
"""
_LOG.debug('%s.__init__ with item_id=%s, desc=%s, resources=%s, '
'uri=%s, metadata_dict=..., music_service=%s',
self.__class__.__name__, item_id, desc, resources, uri,
music_service)
super(MusicServiceItem, self).__init__(metadata_dict)
self.item_id = item_id
self.desc = desc
self.resources = resources
self.uri = uri
self.music_service = music_service
@classmethod
def from_music_service(cls, music_service, content_dict):
"""Return an element instantiated from the information that a music
service has (alternative constructor)
Args:
music_service (MusicService): The music service that content_dict
originated from
content_dict (OrderedDict): The data to instantiate the music
service item from
Returns:
MusicServiceItem: A MusicServiceItem instance
"""
# Form the item_id
quoted_id = quote_url(content_dict['id'].encode('utf-8'))
# The hex prefix remains a mistery for now
item_id = '0fffffff{}'.format(quoted_id)
# Form the uri
is_track = cls == get_class('MediaMetadataTrack')
uri = form_uri(item_id, music_service, is_track)
# Form resources and get desc
resources = [DidlResource(uri=uri, protocol_info="DUMMY")]
desc = music_service.desc
return cls(item_id, desc, resources, uri, content_dict,
music_service=music_service)
def __str__(self):
"""Return custom string representation"""
title = self.metadata.get('title')
str_ = '<{} title="{}">'
return str_.format(self.__class__.__name__, title)
def to_element(self, include_namespaces=False):
"""Return an ElementTree Element representing this instance.
Args:
include_namespaces (bool, optional): If True, include xml
namespace attributes on the root element
Return:
~xml.etree.ElementTree.Element: The (XML) Element representation of
this object
"""
# We piggy back on the implementation in DidlItem
didl_item = DidlItem(
title="DUMMY",
# This is ignored. Sonos gets the title from the item_id
parent_id="DUMMY", # Ditto
item_id=self.item_id,
desc=self.desc,
resources=self.resources
)
return didl_item.to_element(include_namespaces=include_namespaces)
class TrackMetadata(MetadataDictBase):
"""Track metadata class"""
# _valid_fields is a set of valid fields
_valid_fields = set((
'artistId',
'artist',
'composerId',
'composer',
'albumId',
'album',
'albumArtURI',
'albumArtistId',
'albumArtist',
'genreId',
'genre',
'duration',
'canPlay',
'canSkip',
'canAddToFavorites',
'rating',
'trackNumber',
'isFavorite',
))
# _types is a dict of fields with non-string types and their
# convertion callables
_types = {
'duration': int,
'canPlay': bool_str,
'canSkip': bool_str,
'canAddToFavorites': bool_str,
'rating': int,
'trackNumber': int,
'isFavorite': bool_str,
}
class StreamMetadata(MetadataDictBase):
"""Stream metadata class"""
# _valid_fields is a set of valid fields
_valid_fields = set((
'currentHost',
'currentShowId',
'currentShow',
'secondsRemaining',
'secondsToNextShow',
'bitrate',
'logo',
'hasOutOfBandMetadata',
'description',
'isEphemeral',
))
# _types is a dict of fields with non-string types and their
# convertion callables
_types = {
'secondsRemaining': int,
'secondsToNextShow': int,
'bitrate': int,
'hasOutOfBandMetadata': bool_str,
'isEphemeral': bool_str,
}
class MediaMetadata(MusicServiceItem):
"""Base class for all media metadata items"""
# _valid_fields is a set of valid fields
_valid_fields = set((
'id',
'title',
'mimeType',
'itemType',
'displayType',
'summary',
'trackMetadata',
'streamMetadata',
'dynamic',
))
# _types is a dict of fields with non-string types and their
# convertion callables
_types = {
'trackMetadata': TrackMetadata,
'streamMetadata': StreamMetadata,
# We ignore types on the dynamic field
# 'dynamic': ???,
}
class MediaCollection(MusicServiceItem):
"""Base class for all mediaCollection items"""
# _valid_fields is a set of valid fields
_valid_fields = set((
'id',
'title',
'itemType',
'displayType',
'summary',
'artistId',
'artist',
'albumArtURI',
'canPlay',
'canEnumerate',
'canAddToFavorites',
'containsFavorite',
'canScroll',
'canSkip',
'isFavorite',
))
# _types is a dict of fields with non-string types and their
# convertion callables
_types = {
'canPlay': bool_str,
'canEnumerate': bool_str,
'canAddToFavorites': bool_str,
'containsFavorite': bool_str,
'canScroll': bool_str,
'canSkip': bool_str,
'isFavorite': bool_str,
}
| 31.963048 | 79 | 0.586272 |
ace1a0426d49b15fbf55736c4f40a84ed01c00ac | 8,740 | py | Python | python/cuxfilter/charts/core/non_aggregate/core_stacked_line.py | rnyak/cuxfilter | 626e45af3b8a0f2e37bc5cdbe6d2da618141f995 | [
"Apache-2.0"
] | null | null | null | python/cuxfilter/charts/core/non_aggregate/core_stacked_line.py | rnyak/cuxfilter | 626e45af3b8a0f2e37bc5cdbe6d2da618141f995 | [
"Apache-2.0"
] | null | null | null | python/cuxfilter/charts/core/non_aggregate/core_stacked_line.py | rnyak/cuxfilter | 626e45af3b8a0f2e37bc5cdbe6d2da618141f995 | [
"Apache-2.0"
] | null | null | null | from typing import Tuple
from ..core_chart import BaseChart
from ....layouts import chart_view
class BaseStackedLine(BaseChart):
"""
No datatiles support in non_data_tiles plot charts
If dataset size is greater than a few thousand points,
scatter geos can crash the browser tabs, and is only recommended
with datashader plugin, in which case an image is
rendered instead of points on canvas
"""
chart_type = "stacked_lines"
reset_event = None
x_range: Tuple = None
y_range: Tuple = None
use_data_tiles = False
y: list = []
colors: list = []
def __init__(
self,
x,
y=[],
data_points=100,
add_interaction=True,
colors=[],
step_size=None,
step_size_type=int,
width=800,
height=400,
**library_specific_params,
):
"""
Description:
-------------------------------------------
Input:
x
y
data_points
add_interaction
aggregate_fn
step_size
step_size_type
x_label_map
y_label_map
width
height
**library_specific_params
-------------------------------------------
Ouput:
"""
self.x = x
if type(y) != list:
raise TypeError("y must be a list of column names")
if len(y) == 0:
raise ValueError("y must not be empty")
self.y = y
self.data_points = data_points
self.add_interaction = add_interaction
self.stride = step_size
if type(colors) != list:
raise TypeError("colors must be a list of colors")
self.colors = colors
self.stride_type = step_size_type
self.library_specific_params = library_specific_params
self.width = width
self.height = height
def initiate_chart(self, dashboard_cls):
"""
Description:
-------------------------------------------
Input:
data: cudf DataFrame
-------------------------------------------
"""
if self.x_range is None:
self.x_range = (
dashboard_cls._data[self.x].min(),
dashboard_cls._data[self.x].max(),
)
if self.y_range is None:
# cudf_df[['a','b','c']].min().min() gives min value
# between all values in columns a,b and c
self.y_range = (
dashboard_cls._data[self.y].min().min(),
dashboard_cls._data[self.y].max().max(),
)
self.calculate_source(dashboard_cls._data)
self.generate_chart()
self.add_events(dashboard_cls)
def view(self):
return chart_view(self.chart, width=self.width)
def calculate_source(self, data):
"""
Description:
-------------------------------------------
Input:
data = cudf.DataFrame
-------------------------------------------
Ouput:
"""
self.format_source_data(data)
def get_selection_geometry_callback(self, dashboard_cls):
"""
Description: generate callback for choropleth selection evetn
-------------------------------------------
Input:
-------------------------------------------
Ouput:
"""
def selection_callback(xmin, xmax, ymin, ymax):
if dashboard_cls._active_view != self.name:
# reset previous active view and
# set current chart as active view
dashboard_cls._reset_current_view(new_active_view=self)
self.source = dashboard_cls._data
self.x_range = (xmin, xmax)
self.y_range = (ymin, ymax)
query = str(xmin) + "<=" + self.x + " <= " + str(xmax)
dashboard_cls._query_str_dict[self.name] = query
temp_data = dashboard_cls._query(
dashboard_cls._query_str_dict[self.name]
)
# reload all charts with new queried data (cudf.DataFrame only)
dashboard_cls._reload_charts(data=temp_data, ignore_cols=[])
# self.reload_chart(temp_data, False)
del temp_data
return selection_callback
def compute_query_dict(self, query_str_dict):
"""
Description:
-------------------------------------------
Input:
query_dict = reference to dashboard.__cls__.query_dict
-------------------------------------------
Ouput:
"""
if self.x_range is not None and self.y_range is not None:
query_str_dict[self.name] = (
str(self.x_range[0])
+ "<="
+ self.x
+ " <= "
+ str(self.x_range[1])
)
def add_events(self, dashboard_cls):
"""
Description:
-------------------------------------------
Input:
-------------------------------------------
Ouput:
"""
if self.add_interaction:
self.add_selection_geometry_event(
self.get_selection_geometry_callback(dashboard_cls)
)
if self.reset_event is not None:
self.add_reset_event(dashboard_cls)
def add_reset_event(self, dashboard_cls):
"""
Description:
-------------------------------------------
Input:
-------------------------------------------
Ouput:
"""
def reset_callback(event):
if dashboard_cls._active_view != self.name:
# reset previous active view and
# set current chart as active view
dashboard_cls._reset_current_view(new_active_view=self)
self.source = dashboard_cls._data
self.x_range = None
self.y_range = None
dashboard_cls._reload_charts()
# add callback to reset chart button
self.add_event(self.reset_event, reset_callback)
def query_chart_by_range(
self, active_chart: BaseChart, query_tuple, datatile=None
):
"""
Description:
-------------------------------------------
Input:
1. active_chart: chart object of active_chart
2. query_tuple: (min_val, max_val) of the query [type: tuple]
3. datatile: None in case of Gpu Geo Scatter charts
-------------------------------------------
Ouput:
"""
min_val, max_val = query_tuple
self.reload_chart(
self.source.query(
str(min_val) + "<=" + active_chart.x + "<=" + str(max_val)
),
False,
)
def query_chart_by_indices(
self, active_chart: BaseChart, old_indices, new_indices, datatile=None
):
"""
Description:
-------------------------------------------
Input:
1. active_chart: chart object of active_chart
2. query_tuple: (min_val, max_val) of the query [type: tuple]
3. datatile: None in case of Gpu Geo Scatter charts
-------------------------------------------
Ouput:
"""
if "" in new_indices:
new_indices.remove("")
if len(new_indices) == 0:
# case: all selected indices were reset
# reset the chart
self.reload_chart(self.source, False)
elif len(new_indices) == 1:
# just a single index
self.reload_chart(
self.source.query(
active_chart.x + "==" + str(float(new_indices[0]))
),
False,
)
else:
new_indices_str = ",".join(map(str, new_indices))
self.reload_chart(
self.source.query(
active_chart.x + " in (" + new_indices_str + ")"
),
False,
)
def add_selection_geometry_event(self, callback):
"""
Description:
-------------------------------------------
Input:
-------------------------------------------
Ouput:
"""
# ('function to be overridden by library specific extensions')
def reset_chart_geometry_ranges(self):
"""
Description:
-------------------------------------------
Input:
-------------------------------------------
Ouput:
"""
# ('function to be overridden by library specific extensions')
| 28.75 | 78 | 0.469108 |
ace1a15b503db4f574f07b9002428b66a8638274 | 5,857 | py | Python | examples/measure-packet-trace.py | edukinara/csr_aws_guestshell | 096584a26ef06495d41a92a233d28ea455078902 | [
"MIT"
] | 18 | 2017-05-25T16:00:51.000Z | 2021-09-01T03:49:33.000Z | examples/measure-packet-trace.py | edukinara/csr_aws_guestshell | 096584a26ef06495d41a92a233d28ea455078902 | [
"MIT"
] | 3 | 2017-07-27T21:59:37.000Z | 2021-04-30T12:19:21.000Z | examples/measure-packet-trace.py | edukinara/csr_aws_guestshell | 096584a26ef06495d41a92a233d28ea455078902 | [
"MIT"
] | 8 | 2018-01-29T15:32:39.000Z | 2021-07-15T14:49:45.000Z | #!/usr/bin/env python
from __future__ import print_function
from builtins import *
from builtins import range
from past.utils import old_div
import cli
import sys
import argparse
import time
import re
from collections import defaultdict
import csv
parser = argparse.ArgumentParser(
description="Enable Packet trace and measure time for FIA")
parser.add_argument(
'--clear', help='do not clear of packet trace statistics', default=1)
parser.add_argument(
'--filename', help='name of csv file to export values to', default="packet-trace.csv")
parser.add_argument('--pktnum', help='print details on pktnum', default=None)
parser.add_argument('--seconds', help='Seconds to capture', default=5)
parser.add_argument('--showonly', help='operate on previous list', default=0)
parser.add_argument(
'--pktbypkt', help='retrieve packet by packet', default=True)
parser.add_argument(
'--pktcnt', help='number of packets to capture', default=128, type=int)
args = parser.parse_args()
if args.showonly == 0 and args.pktnum is None:
print "executing CLI..."
if args.clear is not None:
cli.execute("clear platform packet-trace config")
cli.execute("clear platform packet-trace statistics")
cli.execute("debug platform condition both")
cli.execute(
"debug platform packet-trace packet %u fia-trace data-size 2048" % (args.pktcnt))
cli.execute("debug platform condition start")
for i in range(0, int(args.seconds)):
time.sleep(1)
sys.stdout.write("\r%d secs" % (i + 1))
sys.stdout.flush()
print "\n"
cli.execute("debug platform condition stop")
print "retrieving CLI..."
if args.pktbypkt is False:
output = cli.execute("show platform packet-trace packet all decode")
else:
output = ""
pkt_num = 0
while True:
cmd = cli.execute(
"show platform packet-trace packet %d decode | inc (Packet|Start|Stop|Feature|Lapsed)" % (pkt_num))
if "Invalid" in cmd or len(cmd) == 0:
break
output += (cmd + "\n")
pkt_num = pkt_num + 1
print "Retrieved %d packets" % pkt_num
features = defaultdict(list)
packets = {}
times = []
feature_lines = []
packet_line = []
start_time = stop_time = ""
total_time = 0
packet_num = 0
print "Parsing data..."
for line in output.splitlines():
if "Packet" in line:
m = re.search(r'\bPacket: (?P<packet_num>\d+).*', line)
if m:
new_packet_num = m.group('packet_num')
if len(stop_time):
total_time = (int(stop_time) - int(start_time))
times.append((packet_num, total_time))
packets[packet_num] = packet_line
packet_num = new_packet_num
stop_time = ""
packet_line = []
if "Feature:" in line:
feature_name = line.split(":")[1]
feature_lines = []
if "Lapsed time" in line:
lapsed_time = int(line.split(":")[1].split()[0])
features[feature_name].append(
(packet_num, lapsed_time, feature_lines))
feature_lines.append(line)
packet_line.append(line)
if "Start" in line:
start_time = line.split(":")[1].split()[0]
if "Stop" in line:
stop_time = line.split(":")[1].split()[0]
if args.pktnum is not None:
if args.pktnum in packet_line:
for line in packet_line[args.pktnum]:
print line
sys.exit(1)
print "Sorting data..."
time_sorted = sorted(times, key=lambda x: x[1])
if len(time_sorted) > 0:
max_time_packet = time_sorted[-1][0]
print "Min time is packet %s, value %d" % (time_sorted[0][0], time_sorted[0][1])
print "Max time is packet %s, value %d" % (max_time_packet, time_sorted[-1][1])
average = float(sum(n for _, n in time_sorted)) / len(time_sorted)
# print "Max Packet:"
# for line in packets[max_time_packet]:
# print line
print "Storing list..."
data_list = []
for feature, tuple_list in list(features.items()):
cnt = len(tuple_list)
total = sum([t[1] for t in tuple_list])
pkt_num = t[0]
average = int(float(total) / cnt)
minimum = min(tuple_list, key=lambda item: item[1])
maximum = max(tuple_list, key=lambda item: item[1])
median = sorted([(lambda x: x[1])(x) for x in tuple_list])[int(old_div(cnt, 2))]
data_list.append((feature, cnt, pkt_num, minimum[
1], maximum[1], median, average))
data_sorted = sorted(data_list, key=lambda x: x[5], reverse=True)
print "Printing list..."
print "%-40s %10s %10s %10s %10s %10s %10s" % ("Feature", "Count", "Packet", "Min", "Max", "Avg", "Med")
print "-" * 106
for entry in data_sorted:
feature = entry[0]
cnt = entry[1]
pkt_num = int(entry[2])
minimum = entry[3]
maximum = entry[4]
median = entry[5]
average = entry[6]
print "%-40s %10s %10d %10s %10s %10s %10s" % (feature[:40], cnt, pkt_num, minimum, maximum, average, median)
with open('/bootflash/' + args.filename, 'wb') as csvfile:
csvwriter = csv.writer(csvfile, delimiter=',',
quotechar='|', quoting=csv.QUOTE_MINIMAL)
csvwriter.writerow(
["Feature Name", "Count", "Packet Number", "Minimum", "Maximum", "Average", "Median"])
for entry in data_sorted:
feature = entry[0]
cnt = entry[1]
pkt_num = entry[2]
minimum = entry[3]
maximum = entry[4]
median = entry[5]
average = entry[6]
csvwriter.writerow(
[feature, cnt, pkt_num, minimum, maximum, average, median])
csvwriter.writerow(["Feature Name", "Packet Number", "Lapsed time"])
for feature, tuple_list in list(features.items()):
for t in tuple_list:
csvwriter.writerow([feature, t[0], t[1]])
for i, pkt in list(packets.items()):
for line in pkt:
csvwriter.writerow([line])
| 31.659459 | 113 | 0.628479 |
ace1a209a1f33283b8b602d71e5ad1b7bf00c926 | 71 | py | Python | demo1/returnTest.py | dollarkillerx/Python-Data-Analysis | f208d5ce9951e9fca2d084a89290100b7e543154 | [
"MIT"
] | null | null | null | demo1/returnTest.py | dollarkillerx/Python-Data-Analysis | f208d5ce9951e9fca2d084a89290100b7e543154 | [
"MIT"
] | null | null | null | demo1/returnTest.py | dollarkillerx/Python-Data-Analysis | f208d5ce9951e9fca2d084a89290100b7e543154 | [
"MIT"
] | null | null | null | def hello():
return "hello","world"
a,b = hello()
print(a)
print(b) | 14.2 | 26 | 0.605634 |
ace1a21c07ef70cde162291ef95a734facffbdf2 | 7,172 | py | Python | threedb/evaluators/detection.py | lengstrom/3db | 1d4a19600bcc43cf5637f7ee1d16887cba6308d5 | [
"MIT"
] | 1 | 2021-07-07T08:35:46.000Z | 2021-07-07T08:35:46.000Z | threedb/evaluators/detection.py | lengstrom/3db | 1d4a19600bcc43cf5637f7ee1d16887cba6308d5 | [
"MIT"
] | null | null | null | threedb/evaluators/detection.py | lengstrom/3db | 1d4a19600bcc43cf5637f7ee1d16887cba6308d5 | [
"MIT"
] | null | null | null | """
threedb.evaluators.detection
============================
An evaluator for object detection models.
Includes a concrete implementation of
:class:`threedb.evaluators.base_evaluator.BaseEvaluator`.
"""
import json
from typing import Any, Dict, List, Tuple
import torch as ch
from torch.tensor import Tensor
from torchvision.ops import boxes
from typeguard import check_type
from threedb.evaluators.base_evaluator import BaseEvaluator, LabelType, Output
# Default segmentation map value set by blender
BG_IND = -1
class SimpleDetectionEvaluator(BaseEvaluator):
"""Concrete implementation of
:class:`threedb.evaluators.base_evaluator.BaseEvaluator`.
"""
output_type = 'bboxes'
output_shape = [100, 6]
KEYS = ['is_correct_nolabel', 'precision_nolabel', 'recall_nolabel',
'is_correct', 'precision', 'recall', 'is_valid']
def __init__(self, iou_threshold: float, classmap_path: str,
min_recall: float = 1.0, min_precision: float = 0.0):
super().__init__()
self.iou_threshold = iou_threshold
self.min_rec = min_recall
self.min_prec = min_precision
self.uid_to_targets: Dict[str, LabelType] = json.load(open(classmap_path))
check_type('uid_to_targets', self.uid_to_targets, Dict[str, LabelType])
def get_segmentation_label(self, model_uid: str) -> int:
label = self.uid_to_targets[model_uid]
return label[0] if isinstance(label, list) else label
def declare_outputs(self) -> Dict[str, Tuple[List[int], str]]:
return {
'is_correct': ([], 'bool'),
'precision': ([], 'float32'),
'recall': ([], 'float32'),
'is_correct_nolabel': ([], 'bool'),
'precision_nolabel': ([], 'float32'),
'recall_nolabel': ([], 'float32'),
'is_valid': ([], 'bool')
}
def get_target(self,
model_uid: str,
render_output: Dict[str, Tensor]) -> ch.Tensor:
"""A label generator for object detection, using the outputted
segmentation map. Uses a simplistic approach that assumes that each
label in the segmentation map is a unique object. Specifically, for
each unique label (object type) in the segmentation map, we make one
bounding box, defined as the minimum-size box containing all pixels
of this type in the segmentation map.
Parameters
----------
model_uid : str
The UID of the model (not used)
render_output : Dict[str, Tensor]
The output of the renderer
(:meth:`threedb.rendering.base_renderer.BaseRenderer.render_and_apply`)
for this model. Must have a key called 'segmentation' containing
the object segmentation.
Returns
-------
ch.Tensor
A set of bounding boxes and labels for the objects that should be
detected. In particular, the bounding boxes are stored in ``(x1, y1,
x2, y2, label)`` format.
"""
seg_map = render_output['segmentation']
unique_objs = list(map(int, seg_map.unique()))
assert BG_IND in unique_objs, f'Background class ({BG_IND}) not found (found {unique_objs})'
bbs = []
for obj in unique_objs:
if obj == BG_IND:
continue
filt = seg_map == obj
_, cols, rows = ch.where(filt)
bbs.append([rows.min(), cols.min(), rows.max(), cols.max(), obj])
return ch.tensor(bbs)
def summary_stats(self,
pred: Dict[str, ch.Tensor],
label: ch.Tensor) -> Dict[str, Output]:
"""Concrete implementation of
:meth:`threedb.evaluators.base_evaluator.BaseEvaluator.summary_stats`
Parameters
----------
pred : Dict[str, ch.Tensor]
Same output format as default torchvision detection models in
evaluation mode, must have keys ('boxes', 'labels', 'scores')
label : ch.Tensor
Segmentation map containing the ground-truth objects
Returns
-------
Dict[str, Output]
The model's performance on the given image. Non-maximal suppression
is performed on the output of the classifier, the precision and
recall are calculated using the IOU threshold set at
instantiation-time (see `here <https://en.wikipedia.org/wiki/Precision_and_recall>`_
for information on precision and recall in object detection).
Precision and recall are then thresholded (by the
``min_precision`` and ``min_recall`` parameters) to get a single
boolean representing correctness. We return the corresponding
keys ``precision_nolabel``, ``recall_nolabel``, and
``is_correct_nolabel`` as well as their counterparts
``precision``, ``recall`` and ``is_correct`` which take both box
positions and class labels into account (the former only evaluate
localization, not labelling).
Finally, we return a key ``is_valid`` that represents whether we
the label corresponding to the image is actually a valid class
label.
"""
keep_inds = boxes.nms(pred['boxes'], pred['scores'], self.iou_threshold)
all_ious = boxes.box_iou(pred['boxes'][keep_inds], label[:, :4])
iou_hits = (all_ious > self.iou_threshold)
label_hits = label[:, 4][None].eq(pred['labels'][keep_inds][:, None])
assert label_hits.shape == iou_hits.shape
hits = iou_hits & label_hits
rec_nl, prec_nl = [float(ch.any(iou_hits, dim=d).float().mean().item()) for d in (0, 1)]
rec, prec = [float(ch.any(hits, dim=d).float().mean().item()) for d in (0, 1)]
return {
'is_correct_nolabel': (prec_nl >= self.min_prec) and (rec_nl >= self.min_rec),
'precision_nolabel': prec_nl,
'recall_nolabel': rec_nl,
'is_correct': (prec >= self.min_prec) and (rec >= self.min_rec),
'precision': prec,
'recall': rec,
'is_valid': (int(label[:, 4]) != -1)
}
def to_tensor(self, pred: Any, output_shape: List[int], input_shape: List[int]) -> Tensor:
"""Concrete implementation of
:meth:`threedb.evaluators.base_evaluator.BaseEvaluator.to_tensor`.
Turns a prediction dictionary into a tensor with the given output_shape
(N x 6). To do this, we concatenate the prediction into the form ``[(x1,
y1, x2, y2, score, label)]``.
"""
_, height, width = input_shape
out = ch.zeros(*output_shape) - 1
keep_inds = boxes.nms(pred['boxes'], pred['scores'], self.iou_threshold)
num_boxes = keep_inds.shape[0]
keys = ('boxes', 'scores', 'labels')
kept_preds = [pred[s][keep_inds].view(num_boxes, -1).float() for s in keys]
out[:num_boxes] = ch.cat(kept_preds, dim=1)
out[:, [0, 2]] /= width
out[:, [1, 3]] /= height
return out
Evaluator = SimpleDetectionEvaluator
| 41.697674 | 100 | 0.610429 |
ace1a2e7502cd13e99484f4deddebdb1aa3cca75 | 92 | py | Python | easy_auth/apps.py | seanwiseman/django-easy-auth | 3d15a948da1c08ff9cec98d00f219d124a014cf4 | [
"MIT"
] | 1 | 2017-09-18T11:05:42.000Z | 2017-09-18T11:05:42.000Z | easy_auth/apps.py | seanwiseman/django-easy-auth | 3d15a948da1c08ff9cec98d00f219d124a014cf4 | [
"MIT"
] | 31 | 2017-12-04T07:49:04.000Z | 2021-06-25T15:14:22.000Z | easy_auth/apps.py | seanwiseman/django-easy-auth | 3d15a948da1c08ff9cec98d00f219d124a014cf4 | [
"MIT"
] | null | null | null | from django.apps import AppConfig
class EasyAuthConfig(AppConfig):
name = 'easy_auth'
| 15.333333 | 33 | 0.76087 |
ace1a46f9b91a72e19f0f0e63b7842dd0a2f7323 | 44,780 | py | Python | Lib/test/test_import/__init__.py | JelleZijlstra/cpython | 626584284e74a68fff8157f9afe77b3088ff7be9 | [
"PSF-2.0"
] | 7 | 2018-04-12T17:11:04.000Z | 2021-12-02T14:01:47.000Z | Lib/test/test_import/__init__.py | JelleZijlstra/cpython | 626584284e74a68fff8157f9afe77b3088ff7be9 | [
"PSF-2.0"
] | 1 | 2021-12-01T08:11:51.000Z | 2021-12-01T08:11:51.000Z | Lib/test/test_import/__init__.py | JelleZijlstra/cpython | 626584284e74a68fff8157f9afe77b3088ff7be9 | [
"PSF-2.0"
] | 4 | 2018-04-27T18:03:08.000Z | 2020-04-12T23:14:29.000Z | # We import importlib *ASAP* in order to test #15386
import importlib
import importlib.util
from importlib._bootstrap_external import _get_sourcefile
import builtins
import marshal
import os
import platform
import py_compile
import random
import stat
import sys
import unittest
import unittest.mock as mock
import textwrap
import errno
import contextlib
import test.support
from test.support import (
EnvironmentVarGuard, TESTFN, check_warnings, forget, is_jython,
make_legacy_pyc, rmtree, run_unittest, swap_attr, swap_item, temp_umask,
unlink, unload, create_empty_file, cpython_only, TESTFN_UNENCODABLE,
temp_dir)
from test.support import script_helper
skip_if_dont_write_bytecode = unittest.skipIf(
sys.dont_write_bytecode,
"test meaningful only when writing bytecode")
def remove_files(name):
for f in (name + ".py",
name + ".pyc",
name + ".pyw",
name + "$py.class"):
unlink(f)
rmtree('__pycache__')
@contextlib.contextmanager
def _ready_to_import(name=None, source=""):
# sets up a temporary directory and removes it
# creates the module file
# temporarily clears the module from sys.modules (if any)
# reverts or removes the module when cleaning up
name = name or "spam"
with temp_dir() as tempdir:
path = script_helper.make_script(tempdir, name, source)
old_module = sys.modules.pop(name, None)
try:
sys.path.insert(0, tempdir)
yield name, path
sys.path.remove(tempdir)
finally:
if old_module is not None:
sys.modules[name] = old_module
elif name in sys.modules:
del sys.modules[name]
class ImportTests(unittest.TestCase):
def setUp(self):
remove_files(TESTFN)
importlib.invalidate_caches()
def tearDown(self):
unload(TESTFN)
def test_import_raises_ModuleNotFoundError(self):
with self.assertRaises(ModuleNotFoundError):
import something_that_should_not_exist_anywhere
def test_from_import_missing_module_raises_ModuleNotFoundError(self):
with self.assertRaises(ModuleNotFoundError):
from something_that_should_not_exist_anywhere import blah
def test_from_import_missing_attr_raises_ImportError(self):
with self.assertRaises(ImportError):
from importlib import something_that_should_not_exist_anywhere
def test_from_import_missing_attr_has_name_and_path(self):
with self.assertRaises(ImportError) as cm:
from os import i_dont_exist
self.assertEqual(cm.exception.name, 'os')
self.assertEqual(cm.exception.path, os.__file__)
self.assertRegex(str(cm.exception), "cannot import name 'i_dont_exist' from 'os' \(.*/Lib/os.py\)")
def test_from_import_missing_attr_has_name_and_so_path(self):
import _opcode
with self.assertRaises(ImportError) as cm:
from _opcode import i_dont_exist
self.assertEqual(cm.exception.name, '_opcode')
self.assertEqual(cm.exception.path, _opcode.__file__)
self.assertRegex(str(cm.exception), "cannot import name 'i_dont_exist' from '_opcode' \(.*\.(so|dll)\)")
def test_from_import_missing_attr_has_name(self):
with self.assertRaises(ImportError) as cm:
# _warning has no path as it's a built-in module.
from _warning import i_dont_exist
self.assertEqual(cm.exception.name, '_warning')
self.assertIsNone(cm.exception.path)
def test_from_import_missing_attr_path_is_canonical(self):
with self.assertRaises(ImportError) as cm:
from os.path import i_dont_exist
self.assertIn(cm.exception.name, {'posixpath', 'ntpath'})
self.assertIsNotNone(cm.exception)
def test_case_sensitivity(self):
# Brief digression to test that import is case-sensitive: if we got
# this far, we know for sure that "random" exists.
with self.assertRaises(ImportError):
import RAnDoM
def test_double_const(self):
# Another brief digression to test the accuracy of manifest float
# constants.
from test import double_const # don't blink -- that *was* the test
def test_import(self):
def test_with_extension(ext):
# The extension is normally ".py", perhaps ".pyw".
source = TESTFN + ext
if is_jython:
pyc = TESTFN + "$py.class"
else:
pyc = TESTFN + ".pyc"
with open(source, "w") as f:
print("# This tests Python's ability to import a",
ext, "file.", file=f)
a = random.randrange(1000)
b = random.randrange(1000)
print("a =", a, file=f)
print("b =", b, file=f)
if TESTFN in sys.modules:
del sys.modules[TESTFN]
importlib.invalidate_caches()
try:
try:
mod = __import__(TESTFN)
except ImportError as err:
self.fail("import from %s failed: %s" % (ext, err))
self.assertEqual(mod.a, a,
"module loaded (%s) but contents invalid" % mod)
self.assertEqual(mod.b, b,
"module loaded (%s) but contents invalid" % mod)
finally:
forget(TESTFN)
unlink(source)
unlink(pyc)
sys.path.insert(0, os.curdir)
try:
test_with_extension(".py")
if sys.platform.startswith("win"):
for ext in [".PY", ".Py", ".pY", ".pyw", ".PYW", ".pYw"]:
test_with_extension(ext)
finally:
del sys.path[0]
def test_module_with_large_stack(self, module='longlist'):
# Regression test for http://bugs.python.org/issue561858.
filename = module + '.py'
# Create a file with a list of 65000 elements.
with open(filename, 'w') as f:
f.write('d = [\n')
for i in range(65000):
f.write('"",\n')
f.write(']')
try:
# Compile & remove .py file; we only need .pyc.
# Bytecode must be relocated from the PEP 3147 bytecode-only location.
py_compile.compile(filename)
finally:
unlink(filename)
# Need to be able to load from current dir.
sys.path.append('')
importlib.invalidate_caches()
namespace = {}
try:
make_legacy_pyc(filename)
# This used to crash.
exec('import ' + module, None, namespace)
finally:
# Cleanup.
del sys.path[-1]
unlink(filename + 'c')
unlink(filename + 'o')
# Remove references to the module (unload the module)
namespace.clear()
try:
del sys.modules[module]
except KeyError:
pass
def test_failing_import_sticks(self):
source = TESTFN + ".py"
with open(source, "w") as f:
print("a = 1/0", file=f)
# New in 2.4, we shouldn't be able to import that no matter how often
# we try.
sys.path.insert(0, os.curdir)
importlib.invalidate_caches()
if TESTFN in sys.modules:
del sys.modules[TESTFN]
try:
for i in [1, 2, 3]:
self.assertRaises(ZeroDivisionError, __import__, TESTFN)
self.assertNotIn(TESTFN, sys.modules,
"damaged module in sys.modules on %i try" % i)
finally:
del sys.path[0]
remove_files(TESTFN)
def test_import_name_binding(self):
# import x.y.z binds x in the current namespace
import test as x
import test.support
self.assertIs(x, test, x.__name__)
self.assertTrue(hasattr(test.support, "__file__"))
# import x.y.z as w binds z as w
import test.support as y
self.assertIs(y, test.support, y.__name__)
def test_failing_reload(self):
# A failing reload should leave the module object in sys.modules.
source = TESTFN + os.extsep + "py"
with open(source, "w") as f:
f.write("a = 1\nb=2\n")
sys.path.insert(0, os.curdir)
try:
mod = __import__(TESTFN)
self.assertIn(TESTFN, sys.modules)
self.assertEqual(mod.a, 1, "module has wrong attribute values")
self.assertEqual(mod.b, 2, "module has wrong attribute values")
# On WinXP, just replacing the .py file wasn't enough to
# convince reload() to reparse it. Maybe the timestamp didn't
# move enough. We force it to get reparsed by removing the
# compiled file too.
remove_files(TESTFN)
# Now damage the module.
with open(source, "w") as f:
f.write("a = 10\nb=20//0\n")
self.assertRaises(ZeroDivisionError, importlib.reload, mod)
# But we still expect the module to be in sys.modules.
mod = sys.modules.get(TESTFN)
self.assertIsNotNone(mod, "expected module to be in sys.modules")
# We should have replaced a w/ 10, but the old b value should
# stick.
self.assertEqual(mod.a, 10, "module has wrong attribute values")
self.assertEqual(mod.b, 2, "module has wrong attribute values")
finally:
del sys.path[0]
remove_files(TESTFN)
unload(TESTFN)
@skip_if_dont_write_bytecode
def test_file_to_source(self):
# check if __file__ points to the source file where available
source = TESTFN + ".py"
with open(source, "w") as f:
f.write("test = None\n")
sys.path.insert(0, os.curdir)
try:
mod = __import__(TESTFN)
self.assertTrue(mod.__file__.endswith('.py'))
os.remove(source)
del sys.modules[TESTFN]
make_legacy_pyc(source)
importlib.invalidate_caches()
mod = __import__(TESTFN)
base, ext = os.path.splitext(mod.__file__)
self.assertEqual(ext, '.pyc')
finally:
del sys.path[0]
remove_files(TESTFN)
if TESTFN in sys.modules:
del sys.modules[TESTFN]
def test_import_by_filename(self):
path = os.path.abspath(TESTFN)
encoding = sys.getfilesystemencoding()
try:
path.encode(encoding)
except UnicodeEncodeError:
self.skipTest('path is not encodable to {}'.format(encoding))
with self.assertRaises(ImportError) as c:
__import__(path)
def test_import_in_del_does_not_crash(self):
# Issue 4236
testfn = script_helper.make_script('', TESTFN, textwrap.dedent("""\
import sys
class C:
def __del__(self):
import importlib
sys.argv.insert(0, C())
"""))
script_helper.assert_python_ok(testfn)
@skip_if_dont_write_bytecode
def test_timestamp_overflow(self):
# A modification timestamp larger than 2**32 should not be a problem
# when importing a module (issue #11235).
sys.path.insert(0, os.curdir)
try:
source = TESTFN + ".py"
compiled = importlib.util.cache_from_source(source)
with open(source, 'w') as f:
pass
try:
os.utime(source, (2 ** 33 - 5, 2 ** 33 - 5))
except OverflowError:
self.skipTest("cannot set modification time to large integer")
except OSError as e:
if e.errno not in (getattr(errno, 'EOVERFLOW', None),
getattr(errno, 'EINVAL', None)):
raise
self.skipTest("cannot set modification time to large integer ({})".format(e))
__import__(TESTFN)
# The pyc file was created.
os.stat(compiled)
finally:
del sys.path[0]
remove_files(TESTFN)
def test_bogus_fromlist(self):
try:
__import__('http', fromlist=['blah'])
except ImportError:
self.fail("fromlist must allow bogus names")
@cpython_only
def test_delete_builtins_import(self):
args = ["-c", "del __builtins__.__import__; import os"]
popen = script_helper.spawn_python(*args)
stdout, stderr = popen.communicate()
self.assertIn(b"ImportError", stdout)
def test_from_import_message_for_nonexistent_module(self):
with self.assertRaisesRegex(ImportError, "^No module named 'bogus'"):
from bogus import foo
def test_from_import_message_for_existing_module(self):
with self.assertRaisesRegex(ImportError, "^cannot import name 'bogus'"):
from re import bogus
def test_from_import_AttributeError(self):
# Issue #24492: trying to import an attribute that raises an
# AttributeError should lead to an ImportError.
class AlwaysAttributeError:
def __getattr__(self, _):
raise AttributeError
module_name = 'test_from_import_AttributeError'
self.addCleanup(unload, module_name)
sys.modules[module_name] = AlwaysAttributeError()
with self.assertRaises(ImportError) as cm:
from test_from_import_AttributeError import does_not_exist
self.assertEqual(str(cm.exception),
"cannot import name 'does_not_exist' from '<unknown module name>' (unknown location)")
@skip_if_dont_write_bytecode
class FilePermissionTests(unittest.TestCase):
# tests for file mode on cached .pyc files
@unittest.skipUnless(os.name == 'posix',
"test meaningful only on posix systems")
def test_creation_mode(self):
mask = 0o022
with temp_umask(mask), _ready_to_import() as (name, path):
cached_path = importlib.util.cache_from_source(path)
module = __import__(name)
if not os.path.exists(cached_path):
self.fail("__import__ did not result in creation of "
"a .pyc file")
stat_info = os.stat(cached_path)
# Check that the umask is respected, and the executable bits
# aren't set.
self.assertEqual(oct(stat.S_IMODE(stat_info.st_mode)),
oct(0o666 & ~mask))
@unittest.skipUnless(os.name == 'posix',
"test meaningful only on posix systems")
def test_cached_mode_issue_2051(self):
# permissions of .pyc should match those of .py, regardless of mask
mode = 0o600
with temp_umask(0o022), _ready_to_import() as (name, path):
cached_path = importlib.util.cache_from_source(path)
os.chmod(path, mode)
__import__(name)
if not os.path.exists(cached_path):
self.fail("__import__ did not result in creation of "
"a .pyc file")
stat_info = os.stat(cached_path)
self.assertEqual(oct(stat.S_IMODE(stat_info.st_mode)), oct(mode))
@unittest.skipUnless(os.name == 'posix',
"test meaningful only on posix systems")
def test_cached_readonly(self):
mode = 0o400
with temp_umask(0o022), _ready_to_import() as (name, path):
cached_path = importlib.util.cache_from_source(path)
os.chmod(path, mode)
__import__(name)
if not os.path.exists(cached_path):
self.fail("__import__ did not result in creation of "
"a .pyc file")
stat_info = os.stat(cached_path)
expected = mode | 0o200 # Account for fix for issue #6074
self.assertEqual(oct(stat.S_IMODE(stat_info.st_mode)), oct(expected))
def test_pyc_always_writable(self):
# Initially read-only .pyc files on Windows used to cause problems
# with later updates, see issue #6074 for details
with _ready_to_import() as (name, path):
# Write a Python file, make it read-only and import it
with open(path, 'w') as f:
f.write("x = 'original'\n")
# Tweak the mtime of the source to ensure pyc gets updated later
s = os.stat(path)
os.utime(path, (s.st_atime, s.st_mtime-100000000))
os.chmod(path, 0o400)
m = __import__(name)
self.assertEqual(m.x, 'original')
# Change the file and then reimport it
os.chmod(path, 0o600)
with open(path, 'w') as f:
f.write("x = 'rewritten'\n")
unload(name)
importlib.invalidate_caches()
m = __import__(name)
self.assertEqual(m.x, 'rewritten')
# Now delete the source file and check the pyc was rewritten
unlink(path)
unload(name)
importlib.invalidate_caches()
bytecode_only = path + "c"
os.rename(importlib.util.cache_from_source(path), bytecode_only)
m = __import__(name)
self.assertEqual(m.x, 'rewritten')
class PycRewritingTests(unittest.TestCase):
# Test that the `co_filename` attribute on code objects always points
# to the right file, even when various things happen (e.g. both the .py
# and the .pyc file are renamed).
module_name = "unlikely_module_name"
module_source = """
import sys
code_filename = sys._getframe().f_code.co_filename
module_filename = __file__
constant = 1
def func():
pass
func_filename = func.__code__.co_filename
"""
dir_name = os.path.abspath(TESTFN)
file_name = os.path.join(dir_name, module_name) + os.extsep + "py"
compiled_name = importlib.util.cache_from_source(file_name)
def setUp(self):
self.sys_path = sys.path[:]
self.orig_module = sys.modules.pop(self.module_name, None)
os.mkdir(self.dir_name)
with open(self.file_name, "w") as f:
f.write(self.module_source)
sys.path.insert(0, self.dir_name)
importlib.invalidate_caches()
def tearDown(self):
sys.path[:] = self.sys_path
if self.orig_module is not None:
sys.modules[self.module_name] = self.orig_module
else:
unload(self.module_name)
unlink(self.file_name)
unlink(self.compiled_name)
rmtree(self.dir_name)
def import_module(self):
ns = globals()
__import__(self.module_name, ns, ns)
return sys.modules[self.module_name]
def test_basics(self):
mod = self.import_module()
self.assertEqual(mod.module_filename, self.file_name)
self.assertEqual(mod.code_filename, self.file_name)
self.assertEqual(mod.func_filename, self.file_name)
del sys.modules[self.module_name]
mod = self.import_module()
self.assertEqual(mod.module_filename, self.file_name)
self.assertEqual(mod.code_filename, self.file_name)
self.assertEqual(mod.func_filename, self.file_name)
def test_incorrect_code_name(self):
py_compile.compile(self.file_name, dfile="another_module.py")
mod = self.import_module()
self.assertEqual(mod.module_filename, self.file_name)
self.assertEqual(mod.code_filename, self.file_name)
self.assertEqual(mod.func_filename, self.file_name)
def test_module_without_source(self):
target = "another_module.py"
py_compile.compile(self.file_name, dfile=target)
os.remove(self.file_name)
pyc_file = make_legacy_pyc(self.file_name)
importlib.invalidate_caches()
mod = self.import_module()
self.assertEqual(mod.module_filename, pyc_file)
self.assertEqual(mod.code_filename, target)
self.assertEqual(mod.func_filename, target)
def test_foreign_code(self):
py_compile.compile(self.file_name)
with open(self.compiled_name, "rb") as f:
header = f.read(12)
code = marshal.load(f)
constants = list(code.co_consts)
foreign_code = importlib.import_module.__code__
pos = constants.index(1)
constants[pos] = foreign_code
code = type(code)(code.co_argcount, code.co_kwonlyargcount,
code.co_nlocals, code.co_stacksize,
code.co_flags, code.co_code, tuple(constants),
code.co_names, code.co_varnames, code.co_filename,
code.co_name, code.co_firstlineno, code.co_lnotab,
code.co_freevars, code.co_cellvars)
with open(self.compiled_name, "wb") as f:
f.write(header)
marshal.dump(code, f)
mod = self.import_module()
self.assertEqual(mod.constant.co_filename, foreign_code.co_filename)
class PathsTests(unittest.TestCase):
SAMPLES = ('test', 'test\u00e4\u00f6\u00fc\u00df', 'test\u00e9\u00e8',
'test\u00b0\u00b3\u00b2')
path = TESTFN
def setUp(self):
os.mkdir(self.path)
self.syspath = sys.path[:]
def tearDown(self):
rmtree(self.path)
sys.path[:] = self.syspath
# Regression test for http://bugs.python.org/issue1293.
def test_trailing_slash(self):
with open(os.path.join(self.path, 'test_trailing_slash.py'), 'w') as f:
f.write("testdata = 'test_trailing_slash'")
sys.path.append(self.path+'/')
mod = __import__("test_trailing_slash")
self.assertEqual(mod.testdata, 'test_trailing_slash')
unload("test_trailing_slash")
# Regression test for http://bugs.python.org/issue3677.
@unittest.skipUnless(sys.platform == 'win32', 'Windows-specific')
def test_UNC_path(self):
with open(os.path.join(self.path, 'test_unc_path.py'), 'w') as f:
f.write("testdata = 'test_unc_path'")
importlib.invalidate_caches()
# Create the UNC path, like \\myhost\c$\foo\bar.
path = os.path.abspath(self.path)
import socket
hn = socket.gethostname()
drive = path[0]
unc = "\\\\%s\\%s$"%(hn, drive)
unc += path[2:]
try:
os.listdir(unc)
except OSError as e:
if e.errno in (errno.EPERM, errno.EACCES):
# See issue #15338
self.skipTest("cannot access administrative share %r" % (unc,))
raise
sys.path.insert(0, unc)
try:
mod = __import__("test_unc_path")
except ImportError as e:
self.fail("could not import 'test_unc_path' from %r: %r"
% (unc, e))
self.assertEqual(mod.testdata, 'test_unc_path')
self.assertTrue(mod.__file__.startswith(unc), mod.__file__)
unload("test_unc_path")
class RelativeImportTests(unittest.TestCase):
def tearDown(self):
unload("test.relimport")
setUp = tearDown
def test_relimport_star(self):
# This will import * from .test_import.
from .. import relimport
self.assertTrue(hasattr(relimport, "RelativeImportTests"))
def test_issue3221(self):
# Note for mergers: the 'absolute' tests from the 2.x branch
# are missing in Py3k because implicit relative imports are
# a thing of the past
#
# Regression test for http://bugs.python.org/issue3221.
def check_relative():
exec("from . import relimport", ns)
# Check relative import OK with __package__ and __name__ correct
ns = dict(__package__='test', __name__='test.notarealmodule')
check_relative()
# Check relative import OK with only __name__ wrong
ns = dict(__package__='test', __name__='notarealpkg.notarealmodule')
check_relative()
# Check relative import fails with only __package__ wrong
ns = dict(__package__='foo', __name__='test.notarealmodule')
self.assertRaises(SystemError, check_relative)
# Check relative import fails with __package__ and __name__ wrong
ns = dict(__package__='foo', __name__='notarealpkg.notarealmodule')
self.assertRaises(SystemError, check_relative)
# Check relative import fails with package set to a non-string
ns = dict(__package__=object())
self.assertRaises(TypeError, check_relative)
def test_absolute_import_without_future(self):
# If explicit relative import syntax is used, then do not try
# to perform an absolute import in the face of failure.
# Issue #7902.
with self.assertRaises(ImportError):
from .os import sep
self.fail("explicit relative import triggered an "
"implicit absolute import")
class OverridingImportBuiltinTests(unittest.TestCase):
def test_override_builtin(self):
# Test that overriding builtins.__import__ can bypass sys.modules.
import os
def foo():
import os
return os
self.assertEqual(foo(), os) # Quick sanity check.
with swap_attr(builtins, "__import__", lambda *x: 5):
self.assertEqual(foo(), 5)
# Test what happens when we shadow __import__ in globals(); this
# currently does not impact the import process, but if this changes,
# other code will need to change, so keep this test as a tripwire.
with swap_item(globals(), "__import__", lambda *x: 5):
self.assertEqual(foo(), os)
class PycacheTests(unittest.TestCase):
# Test the various PEP 3147/488-related behaviors.
def _clean(self):
forget(TESTFN)
rmtree('__pycache__')
unlink(self.source)
def setUp(self):
self.source = TESTFN + '.py'
self._clean()
with open(self.source, 'w') as fp:
print('# This is a test file written by test_import.py', file=fp)
sys.path.insert(0, os.curdir)
importlib.invalidate_caches()
def tearDown(self):
assert sys.path[0] == os.curdir, 'Unexpected sys.path[0]'
del sys.path[0]
self._clean()
@skip_if_dont_write_bytecode
def test_import_pyc_path(self):
self.assertFalse(os.path.exists('__pycache__'))
__import__(TESTFN)
self.assertTrue(os.path.exists('__pycache__'))
pyc_path = importlib.util.cache_from_source(self.source)
self.assertTrue(os.path.exists(pyc_path),
'bytecode file {!r} for {!r} does not '
'exist'.format(pyc_path, TESTFN))
@unittest.skipUnless(os.name == 'posix',
"test meaningful only on posix systems")
@unittest.skipIf(hasattr(os, 'geteuid') and os.geteuid() == 0,
"due to varying filesystem permission semantics (issue #11956)")
@skip_if_dont_write_bytecode
def test_unwritable_directory(self):
# When the umask causes the new __pycache__ directory to be
# unwritable, the import still succeeds but no .pyc file is written.
with temp_umask(0o222):
__import__(TESTFN)
self.assertTrue(os.path.exists('__pycache__'))
pyc_path = importlib.util.cache_from_source(self.source)
self.assertFalse(os.path.exists(pyc_path),
'bytecode file {!r} for {!r} '
'exists'.format(pyc_path, TESTFN))
@skip_if_dont_write_bytecode
def test_missing_source(self):
# With PEP 3147 cache layout, removing the source but leaving the pyc
# file does not satisfy the import.
__import__(TESTFN)
pyc_file = importlib.util.cache_from_source(self.source)
self.assertTrue(os.path.exists(pyc_file))
os.remove(self.source)
forget(TESTFN)
importlib.invalidate_caches()
self.assertRaises(ImportError, __import__, TESTFN)
@skip_if_dont_write_bytecode
def test_missing_source_legacy(self):
# Like test_missing_source() except that for backward compatibility,
# when the pyc file lives where the py file would have been (and named
# without the tag), it is importable. The __file__ of the imported
# module is the pyc location.
__import__(TESTFN)
# pyc_file gets removed in _clean() via tearDown().
pyc_file = make_legacy_pyc(self.source)
os.remove(self.source)
unload(TESTFN)
importlib.invalidate_caches()
m = __import__(TESTFN)
self.assertEqual(m.__file__,
os.path.join(os.curdir, os.path.relpath(pyc_file)))
def test___cached__(self):
# Modules now also have an __cached__ that points to the pyc file.
m = __import__(TESTFN)
pyc_file = importlib.util.cache_from_source(TESTFN + '.py')
self.assertEqual(m.__cached__, os.path.join(os.curdir, pyc_file))
@skip_if_dont_write_bytecode
def test___cached___legacy_pyc(self):
# Like test___cached__() except that for backward compatibility,
# when the pyc file lives where the py file would have been (and named
# without the tag), it is importable. The __cached__ of the imported
# module is the pyc location.
__import__(TESTFN)
# pyc_file gets removed in _clean() via tearDown().
pyc_file = make_legacy_pyc(self.source)
os.remove(self.source)
unload(TESTFN)
importlib.invalidate_caches()
m = __import__(TESTFN)
self.assertEqual(m.__cached__,
os.path.join(os.curdir, os.path.relpath(pyc_file)))
@skip_if_dont_write_bytecode
def test_package___cached__(self):
# Like test___cached__ but for packages.
def cleanup():
rmtree('pep3147')
unload('pep3147.foo')
unload('pep3147')
os.mkdir('pep3147')
self.addCleanup(cleanup)
# Touch the __init__.py
with open(os.path.join('pep3147', '__init__.py'), 'w'):
pass
with open(os.path.join('pep3147', 'foo.py'), 'w'):
pass
importlib.invalidate_caches()
m = __import__('pep3147.foo')
init_pyc = importlib.util.cache_from_source(
os.path.join('pep3147', '__init__.py'))
self.assertEqual(m.__cached__, os.path.join(os.curdir, init_pyc))
foo_pyc = importlib.util.cache_from_source(os.path.join('pep3147', 'foo.py'))
self.assertEqual(sys.modules['pep3147.foo'].__cached__,
os.path.join(os.curdir, foo_pyc))
def test_package___cached___from_pyc(self):
# Like test___cached__ but ensuring __cached__ when imported from a
# PEP 3147 pyc file.
def cleanup():
rmtree('pep3147')
unload('pep3147.foo')
unload('pep3147')
os.mkdir('pep3147')
self.addCleanup(cleanup)
# Touch the __init__.py
with open(os.path.join('pep3147', '__init__.py'), 'w'):
pass
with open(os.path.join('pep3147', 'foo.py'), 'w'):
pass
importlib.invalidate_caches()
m = __import__('pep3147.foo')
unload('pep3147.foo')
unload('pep3147')
importlib.invalidate_caches()
m = __import__('pep3147.foo')
init_pyc = importlib.util.cache_from_source(
os.path.join('pep3147', '__init__.py'))
self.assertEqual(m.__cached__, os.path.join(os.curdir, init_pyc))
foo_pyc = importlib.util.cache_from_source(os.path.join('pep3147', 'foo.py'))
self.assertEqual(sys.modules['pep3147.foo'].__cached__,
os.path.join(os.curdir, foo_pyc))
def test_recompute_pyc_same_second(self):
# Even when the source file doesn't change timestamp, a change in
# source size is enough to trigger recomputation of the pyc file.
__import__(TESTFN)
unload(TESTFN)
with open(self.source, 'a') as fp:
print("x = 5", file=fp)
m = __import__(TESTFN)
self.assertEqual(m.x, 5)
class TestSymbolicallyLinkedPackage(unittest.TestCase):
package_name = 'sample'
tagged = package_name + '-tagged'
def setUp(self):
test.support.rmtree(self.tagged)
test.support.rmtree(self.package_name)
self.orig_sys_path = sys.path[:]
# create a sample package; imagine you have a package with a tag and
# you want to symbolically link it from its untagged name.
os.mkdir(self.tagged)
self.addCleanup(test.support.rmtree, self.tagged)
init_file = os.path.join(self.tagged, '__init__.py')
test.support.create_empty_file(init_file)
assert os.path.exists(init_file)
# now create a symlink to the tagged package
# sample -> sample-tagged
os.symlink(self.tagged, self.package_name, target_is_directory=True)
self.addCleanup(test.support.unlink, self.package_name)
importlib.invalidate_caches()
self.assertEqual(os.path.isdir(self.package_name), True)
assert os.path.isfile(os.path.join(self.package_name, '__init__.py'))
def tearDown(self):
sys.path[:] = self.orig_sys_path
# regression test for issue6727
@unittest.skipUnless(
not hasattr(sys, 'getwindowsversion')
or sys.getwindowsversion() >= (6, 0),
"Windows Vista or later required")
@test.support.skip_unless_symlink
def test_symlinked_dir_importable(self):
# make sure sample can only be imported from the current directory.
sys.path[:] = ['.']
assert os.path.exists(self.package_name)
assert os.path.exists(os.path.join(self.package_name, '__init__.py'))
# Try to import the package
importlib.import_module(self.package_name)
@cpython_only
class ImportlibBootstrapTests(unittest.TestCase):
# These tests check that importlib is bootstrapped.
def test_frozen_importlib(self):
mod = sys.modules['_frozen_importlib']
self.assertTrue(mod)
def test_frozen_importlib_is_bootstrap(self):
from importlib import _bootstrap
mod = sys.modules['_frozen_importlib']
self.assertIs(mod, _bootstrap)
self.assertEqual(mod.__name__, 'importlib._bootstrap')
self.assertEqual(mod.__package__, 'importlib')
self.assertTrue(mod.__file__.endswith('_bootstrap.py'), mod.__file__)
def test_frozen_importlib_external_is_bootstrap_external(self):
from importlib import _bootstrap_external
mod = sys.modules['_frozen_importlib_external']
self.assertIs(mod, _bootstrap_external)
self.assertEqual(mod.__name__, 'importlib._bootstrap_external')
self.assertEqual(mod.__package__, 'importlib')
self.assertTrue(mod.__file__.endswith('_bootstrap_external.py'), mod.__file__)
def test_there_can_be_only_one(self):
# Issue #15386 revealed a tricky loophole in the bootstrapping
# This test is technically redundant, since the bug caused importing
# this test module to crash completely, but it helps prove the point
from importlib import machinery
mod = sys.modules['_frozen_importlib']
self.assertIs(machinery.ModuleSpec, mod.ModuleSpec)
@cpython_only
class GetSourcefileTests(unittest.TestCase):
"""Test importlib._bootstrap_external._get_sourcefile() as used by the C API.
Because of the peculiarities of the need of this function, the tests are
knowingly whitebox tests.
"""
def test_get_sourcefile(self):
# Given a valid bytecode path, return the path to the corresponding
# source file if it exists.
with mock.patch('importlib._bootstrap_external._path_isfile') as _path_isfile:
_path_isfile.return_value = True;
path = TESTFN + '.pyc'
expect = TESTFN + '.py'
self.assertEqual(_get_sourcefile(path), expect)
def test_get_sourcefile_no_source(self):
# Given a valid bytecode path without a corresponding source path,
# return the original bytecode path.
with mock.patch('importlib._bootstrap_external._path_isfile') as _path_isfile:
_path_isfile.return_value = False;
path = TESTFN + '.pyc'
self.assertEqual(_get_sourcefile(path), path)
def test_get_sourcefile_bad_ext(self):
# Given a path with an invalid bytecode extension, return the
# bytecode path passed as the argument.
path = TESTFN + '.bad_ext'
self.assertEqual(_get_sourcefile(path), path)
class ImportTracebackTests(unittest.TestCase):
def setUp(self):
os.mkdir(TESTFN)
self.old_path = sys.path[:]
sys.path.insert(0, TESTFN)
def tearDown(self):
sys.path[:] = self.old_path
rmtree(TESTFN)
def create_module(self, mod, contents, ext=".py"):
fname = os.path.join(TESTFN, mod + ext)
with open(fname, "w") as f:
f.write(contents)
self.addCleanup(unload, mod)
importlib.invalidate_caches()
return fname
def assert_traceback(self, tb, files):
deduped_files = []
while tb:
code = tb.tb_frame.f_code
fn = code.co_filename
if not deduped_files or fn != deduped_files[-1]:
deduped_files.append(fn)
tb = tb.tb_next
self.assertEqual(len(deduped_files), len(files), deduped_files)
for fn, pat in zip(deduped_files, files):
self.assertIn(pat, fn)
def test_nonexistent_module(self):
try:
# assertRaises() clears __traceback__
import nonexistent_xyzzy
except ImportError as e:
tb = e.__traceback__
else:
self.fail("ImportError should have been raised")
self.assert_traceback(tb, [__file__])
def test_nonexistent_module_nested(self):
self.create_module("foo", "import nonexistent_xyzzy")
try:
import foo
except ImportError as e:
tb = e.__traceback__
else:
self.fail("ImportError should have been raised")
self.assert_traceback(tb, [__file__, 'foo.py'])
def test_exec_failure(self):
self.create_module("foo", "1/0")
try:
import foo
except ZeroDivisionError as e:
tb = e.__traceback__
else:
self.fail("ZeroDivisionError should have been raised")
self.assert_traceback(tb, [__file__, 'foo.py'])
def test_exec_failure_nested(self):
self.create_module("foo", "import bar")
self.create_module("bar", "1/0")
try:
import foo
except ZeroDivisionError as e:
tb = e.__traceback__
else:
self.fail("ZeroDivisionError should have been raised")
self.assert_traceback(tb, [__file__, 'foo.py', 'bar.py'])
# A few more examples from issue #15425
def test_syntax_error(self):
self.create_module("foo", "invalid syntax is invalid")
try:
import foo
except SyntaxError as e:
tb = e.__traceback__
else:
self.fail("SyntaxError should have been raised")
self.assert_traceback(tb, [__file__])
def _setup_broken_package(self, parent, child):
pkg_name = "_parent_foo"
self.addCleanup(unload, pkg_name)
pkg_path = os.path.join(TESTFN, pkg_name)
os.mkdir(pkg_path)
# Touch the __init__.py
init_path = os.path.join(pkg_path, '__init__.py')
with open(init_path, 'w') as f:
f.write(parent)
bar_path = os.path.join(pkg_path, 'bar.py')
with open(bar_path, 'w') as f:
f.write(child)
importlib.invalidate_caches()
return init_path, bar_path
def test_broken_submodule(self):
init_path, bar_path = self._setup_broken_package("", "1/0")
try:
import _parent_foo.bar
except ZeroDivisionError as e:
tb = e.__traceback__
else:
self.fail("ZeroDivisionError should have been raised")
self.assert_traceback(tb, [__file__, bar_path])
def test_broken_from(self):
init_path, bar_path = self._setup_broken_package("", "1/0")
try:
from _parent_foo import bar
except ZeroDivisionError as e:
tb = e.__traceback__
else:
self.fail("ImportError should have been raised")
self.assert_traceback(tb, [__file__, bar_path])
def test_broken_parent(self):
init_path, bar_path = self._setup_broken_package("1/0", "")
try:
import _parent_foo.bar
except ZeroDivisionError as e:
tb = e.__traceback__
else:
self.fail("ZeroDivisionError should have been raised")
self.assert_traceback(tb, [__file__, init_path])
def test_broken_parent_from(self):
init_path, bar_path = self._setup_broken_package("1/0", "")
try:
from _parent_foo import bar
except ZeroDivisionError as e:
tb = e.__traceback__
else:
self.fail("ZeroDivisionError should have been raised")
self.assert_traceback(tb, [__file__, init_path])
@cpython_only
def test_import_bug(self):
# We simulate a bug in importlib and check that it's not stripped
# away from the traceback.
self.create_module("foo", "")
importlib = sys.modules['_frozen_importlib_external']
if 'load_module' in vars(importlib.SourceLoader):
old_exec_module = importlib.SourceLoader.exec_module
else:
old_exec_module = None
try:
def exec_module(*args):
1/0
importlib.SourceLoader.exec_module = exec_module
try:
import foo
except ZeroDivisionError as e:
tb = e.__traceback__
else:
self.fail("ZeroDivisionError should have been raised")
self.assert_traceback(tb, [__file__, '<frozen importlib', __file__])
finally:
if old_exec_module is None:
del importlib.SourceLoader.exec_module
else:
importlib.SourceLoader.exec_module = old_exec_module
@unittest.skipUnless(TESTFN_UNENCODABLE, 'need TESTFN_UNENCODABLE')
def test_unencodable_filename(self):
# Issue #11619: The Python parser and the import machinery must not
# encode filenames, especially on Windows
pyname = script_helper.make_script('', TESTFN_UNENCODABLE, 'pass')
self.addCleanup(unlink, pyname)
name = pyname[:-3]
script_helper.assert_python_ok("-c", "mod = __import__(%a)" % name,
__isolated=False)
class CircularImportTests(unittest.TestCase):
"""See the docstrings of the modules being imported for the purpose of the
test."""
def tearDown(self):
"""Make sure no modules pre-exist in sys.modules which are being used to
test."""
for key in list(sys.modules.keys()):
if key.startswith('test.test_import.data.circular_imports'):
del sys.modules[key]
def test_direct(self):
try:
import test.test_import.data.circular_imports.basic
except ImportError:
self.fail('circular import through relative imports failed')
def test_indirect(self):
try:
import test.test_import.data.circular_imports.indirect
except ImportError:
self.fail('relative import in module contributing to circular '
'import failed')
def test_subpackage(self):
try:
import test.test_import.data.circular_imports.subpackage
except ImportError:
self.fail('circular import involving a subpackage failed')
def test_rebinding(self):
try:
import test.test_import.data.circular_imports.rebinding as rebinding
except ImportError:
self.fail('circular import with rebinding of module attribute failed')
from test.test_import.data.circular_imports.subpkg import util
self.assertIs(util.util, rebinding.util)
if __name__ == '__main__':
# Test needs to be a package, so we can do relative imports.
unittest.main()
| 38.110638 | 112 | 0.618848 |
ace1a4e4ac6c29f2ebc32f9774befe024cd5d2f6 | 2,072 | py | Python | pylearn2/scripts/datasets/make_cifar100_gcn_whitened.py | Menerve/pylearn2 | ad7bcfda3294404aebd71f5a5c4a8623d401a98e | [
"BSD-3-Clause"
] | 3 | 2016-01-23T10:18:39.000Z | 2019-02-28T06:22:45.000Z | pylearn2/scripts/datasets/make_cifar100_gcn_whitened.py | Menerve/pylearn2 | ad7bcfda3294404aebd71f5a5c4a8623d401a98e | [
"BSD-3-Clause"
] | null | null | null | pylearn2/scripts/datasets/make_cifar100_gcn_whitened.py | Menerve/pylearn2 | ad7bcfda3294404aebd71f5a5c4a8623d401a98e | [
"BSD-3-Clause"
] | null | null | null | """
This script makes a dataset of 32x32 contrast normalized, approximately
whitened CIFAR-100 images.
"""
from pylearn2.utils import serial
from pylearn2.datasets import preprocessing
from pylearn2.utils import string_utils
from pylearn2.datasets.cifar100 import CIFAR100
data_dir = string_utils.preprocess('${PYLEARN2_DATA_PATH}/cifar100')
print 'Loading CIFAR-100 train dataset...'
train = CIFAR100(which_set = 'train', gcn = 55.)
print "Preparing output directory..."
output_dir = data_dir + '/pylearn2_gcn_whitened'
serial.mkdir( output_dir )
README = open(output_dir + '/README','w')
README.write("""
The .pkl files in this directory may be opened in python using
cPickle, pickle, or pylearn2.serial.load.
train.pkl, and test.pkl each contain
a pylearn2 Dataset object defining a labeled
dataset of a 32x32 contrast normalized, approximately whitened version of the CIFAR-100
dataset. train.pkl contains labeled train examples. test.pkl
contains labeled test examples.
preprocessor.pkl contains a pylearn2 ZCA object that was used
to approximately whiten the images. You may want to use this
object later to preprocess other images.
They were created with the pylearn2 script make_cifar100_gcn_whitened.py.
All other files in this directory, including this README, were
created by the same script and are necessary for the other files
to function correctly.
""")
README.close()
print "Learning the preprocessor and preprocessing the unsupervised train data..."
preprocessor = preprocessing.ZCA()
train.apply_preprocessor(preprocessor = preprocessor, can_fit = True)
print 'Saving the training data'
train.use_design_loc(output_dir+'/train.npy')
serial.save(output_dir + '/train.pkl', train)
print "Loading the test data"
test = CIFAR100(which_set = 'test', gcn = 55.)
print "Preprocessing the test data"
test.apply_preprocessor(preprocessor = preprocessor, can_fit = False)
print "Saving the test data"
test.use_design_loc(output_dir+'/test.npy')
serial.save(output_dir+'/test.pkl', test)
serial.save(output_dir + '/preprocessor.pkl',preprocessor)
| 31.876923 | 87 | 0.78861 |
ace1a4ef6b596102ae1a174b05493e78505852e7 | 882 | py | Python | lib/surface/ml/vision/__init__.py | bopopescu/Google-Cloud-SDK-1 | c4683bacb2f6192d8a816932e438a0493085469b | [
"Apache-2.0"
] | null | null | null | lib/surface/ml/vision/__init__.py | bopopescu/Google-Cloud-SDK-1 | c4683bacb2f6192d8a816932e438a0493085469b | [
"Apache-2.0"
] | null | null | null | lib/surface/ml/vision/__init__.py | bopopescu/Google-Cloud-SDK-1 | c4683bacb2f6192d8a816932e438a0493085469b | [
"Apache-2.0"
] | 1 | 2020-07-24T20:13:29.000Z | 2020-07-24T20:13:29.000Z | # Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command group for ml vision."""
from googlecloudsdk.calliope import base
@base.ReleaseTracks(base.ReleaseTrack.GA,
base.ReleaseTrack.BETA,
base.ReleaseTrack.ALPHA)
class Vision(base.Group):
"""Use Google Cloud Vision to analyze images."""
| 36.75 | 74 | 0.731293 |
ace1a5455ef3c4cc72d677fccaac7e4f8b9036f4 | 1,798 | py | Python | tests_basic.py | jthelin/HelloRayActors | ae3ccab8c52d919b6fa5f3823a750b22c63c3341 | [
"MIT"
] | 1 | 2019-08-31T01:45:40.000Z | 2019-08-31T01:45:40.000Z | tests_basic.py | jthelin/HelloRayActors | ae3ccab8c52d919b6fa5f3823a750b22c63c3341 | [
"MIT"
] | null | null | null | tests_basic.py | jthelin/HelloRayActors | ae3ccab8c52d919b6fa5f3823a750b22c63c3341 | [
"MIT"
] | null | null | null | import ray
import unittest
from HelloRayActors import Counter
class HelloRayActorTests(unittest.TestCase):
"""
Some basic test cases.
"""
@classmethod
def setUpClass(cls):
# Initialize runtime environment
ray.init()
def test_increment(self):
# Simple usage
a1 = Counter.remote()
result = ray.get(a1.get_value.remote())
assert result == 0, "Initial Counter value should be zero"
result = ray.get(a1.increment.remote())
assert result == 1, "Counter value was incremented to 1"
result = ray.get(a1.increment.remote())
assert result == 2, "Counter value was incremented to 2"
result = ray.get(a1.increment.remote())
assert result == 3, "Counter value was incremented to 3"
def test_two_counters(self):
# Multiple Counters usage
a1 = Counter.remote()
a2 = Counter.remote()
result = ray.get(a1.get_value.remote())
assert result == 0, "Initial Counter #1 value should be zero"
result = ray.get(a2.get_value.remote())
assert result == 0, "Initial Counter #2 value should be zero"
result = ray.get(a2.increment.remote())
assert result == 1, "Counter #2 value was incremented to 1"
result = ray.get(a2.increment.remote())
assert result == 2, "Counter #2 value was incremented to 2"
result = ray.get(a2.increment.remote())
assert result == 3, "Counter #2 value was incremented to 3"
result = ray.get(a1.increment.remote())
assert result == 1, "Counter #1 value was incremented to 1"
result = ray.get(a1.increment.remote())
assert result == 2, "Counter #1 value was incremented to 2"
if __name__ == "__main__":
unittest.main()
| 29 | 69 | 0.622358 |
ace1a5b0ef3b4924f885ca10b9281cdfe242f8f3 | 4,253 | py | Python | vidgear/tests/videocapture_tests/test_pigear.py | hyper-swift/vidgear | 3db26ff897f151e70f86cbfa8e0a1c847f29c781 | [
"Apache-2.0"
] | 2,210 | 2019-03-18T08:00:10.000Z | 2022-03-31T21:43:40.000Z | vidgear/tests/videocapture_tests/test_pigear.py | hyper-swift/vidgear | 3db26ff897f151e70f86cbfa8e0a1c847f29c781 | [
"Apache-2.0"
] | 264 | 2019-03-18T14:29:43.000Z | 2022-03-24T09:32:57.000Z | vidgear/tests/videocapture_tests/test_pigear.py | hyper-swift/vidgear | 3db26ff897f151e70f86cbfa8e0a1c847f29c781 | [
"Apache-2.0"
] | 178 | 2019-03-18T13:46:12.000Z | 2022-03-27T14:06:37.000Z | """
===============================================
vidgear library source-code is deployed under the Apache 2.0 License:
Copyright (c) 2019 Abhishek Thakur(@abhiTronix) <abhi.una12@gmail.com>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
===============================================
"""
# import the necessary packages
import os
import cv2
import time
import numpy as np
import pytest
import logging as log
import platform
from vidgear.gears.helper import logger_handler
# define test logger
logger = log.getLogger("Test_pigear")
logger.propagate = False
logger.addHandler(logger_handler())
logger.setLevel(log.DEBUG)
@pytest.mark.skipif((platform.system() != "Linux"), reason="Not Implemented")
def test_pigear_playback():
"""
Tests PiGear's playback capabilities
"""
try:
from vidgear.gears import PiGear
# open pi video stream with default parameters
stream = PiGear(logging=True, colorspace="COLOR_BGR2GRAY").start()
# playback
i = 0
while i < 10:
frame = stream.read()
if frame is None:
break
i += 1
# clean resources
stream.stop()
except Exception as e:
if isinstance(e, ImportError):
logger.exception(e)
else:
pytest.fail(str(e))
test_data = [
("invalid", None, "", 0, {}, None, AssertionError),
(-1, "invalid", "", 0.1, {}, None, AssertionError),
(1, None, "invalid", 0.1, {}, None, AssertionError),
(0, (640, 480), 60, 0, {"HWFAILURE_TIMEOUT": 15.0}, None, ValueError),
(
0,
(640, 480),
60,
"invalid",
{"HWFAILURE_TIMEOUT": "invalid"},
"COLOR_BGR2INVALID",
None,
),
(0, (640, 480), 60, 1, {"create_bug": True}, "None", RuntimeError),
(0, (640, 480), 60, 0, {"create_bug": "fail"}, "None", RuntimeError),
(-1, (640, 480), 60, 0, {"create_bug": ["fail"]}, "None", None),
(
0,
(640, 480),
60,
0,
{"HWFAILURE_TIMEOUT": 1.5, "create_bug": 5},
"COLOR_BGR2GRAY",
SystemError,
),
]
@pytest.mark.skipif((platform.system() != "Linux"), reason="Not Implemented")
@pytest.mark.parametrize(
"camera_num, resolution, framerate, time_delay, options, colorspace, exception_type",
test_data,
)
def test_pigear_parameters(
camera_num, resolution, framerate, time_delay, options, colorspace, exception_type
):
"""
Tests PiGear's options and colorspace.
"""
stream = None
try:
from vidgear.gears import PiGear
# open pi video stream with default parameters
stream = PiGear(
camera_num=camera_num,
resolution=resolution,
framerate=framerate,
logging=True,
time_delay=time_delay,
**options
).start()
# playback
i = 0
while i < 20:
frame = stream.read()
if frame is None:
break
time.sleep(0.1)
if i == 10:
if colorspace == "COLOR_BGR2INVALID":
# test wrong colorspace value
stream.color_space = 1546755
else:
# test invalid colorspace value
stream.color_space = "red"
i += 1
except Exception as e:
if not (exception_type is None) and isinstance(e, exception_type):
pytest.xfail(str(e))
else:
pytest.fail(str(e))
finally:
# clean resources
if not (stream is None):
stream.stop()
| 30.163121 | 90 | 0.561016 |
ace1a6f98002e476e187bfcad920183b40783e43 | 960 | py | Python | tests/test_gadgets.py | root-community/root-insurance-python | 2e9fce9b52cfe84aa64c61ecd6fc80b926a4a377 | [
"MIT"
] | 1 | 2018-03-22T18:26:54.000Z | 2018-03-22T18:26:54.000Z | tests/test_gadgets.py | root-community/root-insurance-python | 2e9fce9b52cfe84aa64c61ecd6fc80b926a4a377 | [
"MIT"
] | 3 | 2018-03-16T13:16:12.000Z | 2018-05-08T07:16:39.000Z | tests/test_gadgets.py | root-community/root-insurance-python | 2e9fce9b52cfe84aa64c61ecd6fc80b926a4a377 | [
"MIT"
] | 4 | 2018-03-17T14:06:01.000Z | 2018-04-09T15:01:31.000Z | from context import insurance
import json
client = insurance.Client()
def test_list_models():
result = client.gadgets.list_models()
assert result
assert result.__len__()
assert len(result) > 0
assert any(x for x in result if 'Apple' in x.get('make'))
def test_list_phone_brands():
result = client.gadgets.list_phone_brands()
assert result
assert result.__len__()
assert any(x for x in result if 'Apple' in x)
assert len(result) > 0
def test_list_phones_by_brand():
result = client.gadgets.list_phones_by_brand('Apple')
assert result
assert result.__len__()
assert len(result) > 0
assert any(x for x in result if 'iPhone' in x)
def test_get_phone_value():
#phones = client.gadgets.list_phone_brands()
phones = client.gadgets.list_phones_by_brand('Apple')
print("Phones")
print(type(phones))
result = client.gadgets.get_phone_value(list(phones)[0])
assert int(result) > 0
| 27.428571 | 61 | 0.702083 |
ace1a82df80f0048cbb4d6151d4c1bd043a8273f | 8,645 | py | Python | models/review.py | noryb009/rmc | 2c9c6368b6cd5c4996645eb1c318aaddc7b401f3 | [
"MIT"
] | 164 | 2015-01-14T00:20:02.000Z | 2022-03-10T19:55:02.000Z | models/review.py | noryb009/rmc | 2c9c6368b6cd5c4996645eb1c318aaddc7b401f3 | [
"MIT"
] | 142 | 2015-01-01T02:49:13.000Z | 2022-03-11T23:18:17.000Z | models/review.py | noryb009/rmc | 2c9c6368b6cd5c4996645eb1c318aaddc7b401f3 | [
"MIT"
] | 79 | 2015-01-19T03:32:31.000Z | 2021-08-09T22:53:10.000Z | from datetime import datetime
import mongoengine as me
import logging
class Privacy(object):
ME = 0
FRIENDS = 1
EVERYONE = 2
@staticmethod
def choices():
return [Privacy.ME, Privacy.FRIENDS, Privacy.EVERYONE]
# TODO(david): Make this class more magical and not require calling these
@staticmethod
def to_int(str_privacy, default=1):
return {
'me': Privacy.ME,
'friends': Privacy.FRIENDS,
'everyone': Privacy.EVERYONE,
}.get(str_privacy, default)
@staticmethod
def to_str(int_privacy, default='friends'):
return {
Privacy.ME: 'me',
Privacy.FRIENDS: 'friends',
Privacy.EVERYONE: 'everyone',
}.get(int_privacy, default)
class BaseReview(me.EmbeddedDocument):
comment = me.StringField(default='', max_length=4096)
comment_date = me.DateTimeField()
share_date = me.DateTimeField()
# The time that any rating for this review was changed
# (either created, modified, or deleted)
rating_change_date = me.DateTimeField()
privacy = me.IntField(choices=Privacy.choices(), default=Privacy.FRIENDS)
num_voted_helpful = me.IntField(default=0)
num_voted_not_helpful = me.IntField(default=0)
# Minimum number of characters for a review to pass
# TODO(david): Have a function to do this. First, we need consistent review
# interface
MIN_REVIEW_LENGTH = 11
def __init__(self, **kwargs):
if 'ratings' in kwargs:
kwargs.update({d['name']: d['rating'] for d in kwargs['ratings']})
del kwargs['ratings']
if isinstance(kwargs.get('privacy'), basestring):
kwargs['privacy'] = Privacy.to_int(kwargs['privacy'])
super(BaseReview, self).__init__(**kwargs)
def rating_fields(self):
raise NotImplementedError("return a list of rating field names")
@property
# Has this review ever been rated in the past?
def has_been_rated(self):
if self.rating_change_date:
return True
for rating_name in self.rating_fields():
if getattr(self, rating_name) is not None:
return True
return False
@property
def has_commented(self):
return self.comment_date and self.comment
@property
def has_shared(self):
return self.share_date
def get_ratings_array(self):
return [{'name': r, 'rating': getattr(self, r)}
for r in self.rating_fields()]
def update_ratings(self, ratings_dict):
for rating_name in self.rating_fields():
old_rating = getattr(self, rating_name)
new_rating = ratings_dict.get(rating_name)
setattr(self, 'old_%s' % rating_name, old_rating)
setattr(self, rating_name, new_rating)
if new_rating != old_rating:
self.rating_change_date = datetime.now()
def update(self, **kwargs):
if 'ratings' in kwargs:
new_values = {d['name']: d['rating'] for d in kwargs['ratings']}
self.update_ratings(new_values)
comment = kwargs.get('comment')
if comment is not None and comment != self.comment:
self.comment = comment
date = kwargs.get('comment_date')
if date is None:
logging.warn("Review.update() comment_date "
"not set. Defaulting to current time")
date = datetime.now()
self.comment_date = date
if 'privacy' in kwargs:
self.privacy = Privacy.to_int(kwargs['privacy'])
def to_dict(self, current_user=None, author_id=None, user_course_id=None,
review_type=None):
dict_ = {
'comment': self.comment,
'comment_date': self.comment_date,
'privacy': Privacy.to_str(self.privacy),
'ratings': self.get_ratings_array(),
'num_voted_not_helpful': self.num_voted_not_helpful,
'num_voted_helpful': self.num_voted_helpful,
'can_vote': False
}
if user_course_id:
dict_['user_course_id'] = str(user_course_id)
if current_user and not current_user.rated_review(
str(user_course_id), review_type):
dict_['can_vote'] = True
if review_type:
dict_['review_type'] = review_type
if author_id:
# TODO(david): Remove circular dependency
import user as _user
author = _user.User.objects.only(*(_user.User.CORE_FIELDS +
['program_name'])).with_id(author_id)
show_author = self.should_show_author(current_user, author_id)
dict_['author'] = author.to_review_author_dict(current_user,
show_author)
return dict_
def should_show_author(self, current_user, author_id):
if self.privacy == Privacy.ME:
return False
elif self.privacy == Privacy.FRIENDS:
return current_user and (author_id in current_user.friend_ids or
current_user.id == author_id)
elif self.privacy == Privacy.EVERYONE:
return True
else:
logging.error('Unrecognized privacy setting %s' % self.privacy)
return False
class CourseReview(BaseReview):
interest = me.FloatField(min_value=0.0, max_value=1.0, default=None)
easiness = me.FloatField(min_value=0.0, max_value=1.0, default=None)
usefulness = me.FloatField(min_value=0.0, max_value=1.0, default=None)
def rating_fields(self):
# Keep this list consistent with the order in Course.get_ratings
return ['usefulness', 'easiness', 'interest']
# TODO(david): Refactor into base class
def update_course_aggregate_ratings(self, cur_course):
# Update associated aggregate ratings
if hasattr(self, 'old_easiness'):
cur_course.easiness.update_aggregate_after_replacement(
self.old_easiness, self.easiness)
if hasattr(self, 'old_interest'):
cur_course.interest.update_aggregate_after_replacement(
self.old_interest, self.interest)
if hasattr(self, 'old_usefulness'):
cur_course.usefulness.update_aggregate_after_replacement(
self.old_usefulness, self.usefulness)
def to_dict(self, current_user=None, author_id=None, user_course_id=None):
return super(CourseReview, self).to_dict(current_user, author_id,
user_course_id, 'course')
class ProfessorReview(BaseReview):
clarity = me.FloatField(min_value=0.0, max_value=1.0, default=None)
passion = me.FloatField(min_value=0.0, max_value=1.0, default=None)
def rating_fields(self):
return ['clarity', 'passion']
# TODO(david): Refactor into base class
# TODO(mack): tidy up interface so we don't have to pass in course,
# course_review
# TODO(mack): handle the case you change the professor
def update_professor_aggregate_ratings(self, cur_professor,
cur_course, course_review):
redis_changes = []
if hasattr(course_review, 'old_easiness'):
redis_changes.append({
'name': 'easiness',
'old': course_review.old_easiness,
'new': course_review.easiness,
})
if hasattr(course_review, 'old_interest'):
redis_changes.append({
'name': 'interest',
'old': course_review.old_interest,
'new': course_review.interest,
})
# Update associated aggregate ratings
if hasattr(self, 'old_clarity'):
cur_professor.clarity.update_aggregate_after_replacement(
self.old_clarity, self.clarity)
redis_changes.append({
'name': 'clarity',
'old': self.old_clarity,
'new': self.clarity,
})
if hasattr(self, 'old_passion'):
cur_professor.passion.update_aggregate_after_replacement(
self.old_passion, self.passion)
redis_changes.append({
'name': 'passion',
'old': self.old_passion,
'new': self.passion,
})
cur_professor.update_redis_ratings_for_course(
cur_course.id, redis_changes)
def to_dict(self, current_user=None, author_id=None, user_course_id=None):
return super(ProfessorReview, self).to_dict(current_user,
author_id, user_course_id, 'prof')
| 35.871369 | 79 | 0.616194 |
ace1a862f01b9d3a9b52c4251d3feb062cd52cd0 | 2,824 | py | Python | azure-mgmt-loganalytics/setup.py | v-Ajnava/azure-sdk-for-python | a1f6f80eb5869c5b710e8bfb66146546697e2a6f | [
"MIT"
] | 4 | 2016-06-17T23:25:29.000Z | 2022-03-30T22:37:45.000Z | azure-mgmt-loganalytics/setup.py | v-Ajnava/azure-sdk-for-python | a1f6f80eb5869c5b710e8bfb66146546697e2a6f | [
"MIT"
] | 54 | 2016-03-25T17:25:01.000Z | 2018-10-22T17:27:54.000Z | azure-mgmt-loganalytics/setup.py | v-Ajnava/azure-sdk-for-python | a1f6f80eb5869c5b710e8bfb66146546697e2a6f | [
"MIT"
] | 3 | 2016-05-03T20:49:46.000Z | 2017-10-05T21:05:27.000Z | #!/usr/bin/env python
#-------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#--------------------------------------------------------------------------
import re
import os.path
from io import open
from setuptools import find_packages, setup
try:
from azure_bdist_wheel import cmdclass
except ImportError:
from distutils import log as logger
logger.warn("Wheel is not available, disabling bdist_wheel hook")
cmdclass = {}
# Change the PACKAGE_NAME only to change folder and different name
PACKAGE_NAME = "azure-mgmt-loganalytics"
PACKAGE_PPRINT_NAME = "Log Analytics Management"
# a-b-c => a/b/c
package_folder_path = PACKAGE_NAME.replace('-', '/')
# a-b-c => a.b.c
namespace_name = PACKAGE_NAME.replace('-', '.')
# azure v0.x is not compatible with this package
# azure v0.x used to have a __version__ attribute (newer versions don't)
try:
import azure
try:
ver = azure.__version__
raise Exception(
'This package is incompatible with azure=={}. '.format(ver) +
'Uninstall it with "pip uninstall azure".'
)
except AttributeError:
pass
except ImportError:
pass
# Version extraction inspired from 'requests'
with open(os.path.join(package_folder_path, 'version.py'), 'r') as fd:
version = re.search(r'^VERSION\s*=\s*[\'"]([^\'"]*)[\'"]',
fd.read(), re.MULTILINE).group(1)
if not version:
raise RuntimeError('Cannot find version information')
with open('README.rst', encoding='utf-8') as f:
readme = f.read()
with open('HISTORY.rst', encoding='utf-8') as f:
history = f.read()
setup(
name=PACKAGE_NAME,
version=version,
description='Microsoft Azure {} Client Library for Python'.format(PACKAGE_PPRINT_NAME),
long_description=readme + '\n\n' + history,
license='MIT License',
author='Microsoft Corporation',
author_email='azpysdkhelp@microsoft.com',
url='https://github.com/Azure/azure-sdk-for-python',
classifiers=[
'Development Status :: 4 - Beta',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'License :: OSI Approved :: MIT License',
],
zip_safe=False,
packages=find_packages(exclude=["tests"]),
install_requires=[
'msrestazure~=0.4.11',
'azure-common~=1.1',
],
cmdclass=cmdclass
)
| 32.837209 | 91 | 0.619334 |
ace1a946558200a4b276a76921b159dff2bc6793 | 400 | py | Python | airflow/plugins/__init__.py | huongdo108/ETL-pipeline-US-immigration-Pyspark-Airflow-AWS | 2a31f6810620437b67dcbacc37b97bb88c5f5202 | [
"MIT"
] | null | null | null | airflow/plugins/__init__.py | huongdo108/ETL-pipeline-US-immigration-Pyspark-Airflow-AWS | 2a31f6810620437b67dcbacc37b97bb88c5f5202 | [
"MIT"
] | null | null | null | airflow/plugins/__init__.py | huongdo108/ETL-pipeline-US-immigration-Pyspark-Airflow-AWS | 2a31f6810620437b67dcbacc37b97bb88c5f5202 | [
"MIT"
] | null | null | null | from __future__ import division, absolute_import, print_function
from airflow.plugins_manager import AirflowPlugin
import operators
import helpers
# Defining the plugin class
class ImmigrationPlugin(AirflowPlugin):
name = "immigration_plugin"
operators = [
operators.ExistRecordCheckOperator,
operators.NullRecordCheckOperator,
operators.RunAnalyticsOperator
]
| 25 | 64 | 0.78 |
ace1aab03625fe135ab51f8ca394c6793947aeb8 | 28,922 | py | Python | espnet/nets/pytorch_backend/e2e_asr_mt_datt_transformer.py | ksoky/jointlytrained | 69a3f211a52c98c51750e154072578c5dc9019ec | [
"Apache-2.0"
] | null | null | null | espnet/nets/pytorch_backend/e2e_asr_mt_datt_transformer.py | ksoky/jointlytrained | 69a3f211a52c98c51750e154072578c5dc9019ec | [
"Apache-2.0"
] | null | null | null | espnet/nets/pytorch_backend/e2e_asr_mt_datt_transformer.py | ksoky/jointlytrained | 69a3f211a52c98c51750e154072578c5dc9019ec | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 Shigeki Karita
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""Join ASR and MT model (pytorch)."""
from argparse import Namespace
from itertools import groupby
import logging
import math
import chainer
from chainer import reporter
import numpy
import torch
from espnet.utils.cli_utils import strtobool
from espnet.nets.asr_interface import ASRInterface
from espnet.nets.ctc_prefix_score import CTCPrefixScore
from espnet.nets.e2e_asr_common import end_detect
from espnet.nets.e2e_asr_common import ErrorCalculator
from espnet.nets.e2e_mt_common import ErrorCalculator as MTErrorCalculator
from espnet.nets.pytorch_backend.ctc import CTC
from espnet.nets.pytorch_backend.e2e_asr import CTC_LOSS_THRESHOLD
#from espnet.nets.pytorch_backend.e2e_asr import Reporter
from espnet.nets.pytorch_backend.nets_utils import get_subsample
from espnet.nets.pytorch_backend.nets_utils import make_non_pad_mask
from espnet.nets.pytorch_backend.nets_utils import make_pad_mask #for MT
from espnet.nets.pytorch_backend.nets_utils import to_device
from espnet.nets.pytorch_backend.nets_utils import th_accuracy
from espnet.nets.pytorch_backend.rnn.decoders import CTC_SCORING_RATIO
from espnet.nets.pytorch_backend.transformer.add_sos_eos import add_sos_eos
from espnet.nets.pytorch_backend.transformer.add_sos_eos import mask_uniform
from espnet.nets.pytorch_backend.transformer.argument import (
add_arguments_transformer_common, # noqa: H301
)
from espnet.nets.pytorch_backend.transformer.attention import (
MultiHeadedAttention, # noqa: H301
RelPositionMultiHeadedAttention, # noqa: H301
)
from espnet.nets.pytorch_backend.transformer.decoder_double_att import Decoder
from espnet.nets.pytorch_backend.transformer.dynamic_conv import DynamicConvolution
from espnet.nets.pytorch_backend.transformer.dynamic_conv2d import DynamicConvolution2D
from espnet.nets.pytorch_backend.transformer.encoder_dual import DualEncoder
from espnet.nets.pytorch_backend.transformer.initializer import initialize
from espnet.nets.pytorch_backend.transformer.label_smoothing_loss import (
LabelSmoothingLoss, # noqa: H301
)
from espnet.nets.pytorch_backend.transformer.mask import subsequent_mask
from espnet.nets.pytorch_backend.transformer.mask import target_mask
from espnet.nets.pytorch_backend.transformer.plot import PlotAttentionReport
from espnet.nets.scorers.ctc import CTCPrefixScorer
from espnet.utils.fill_missing_args import fill_missing_args
CTC_LOSS_THRESHOLD = 10000
class Reporter(chainer.Chain):
"""Define a chainer reporter wrapper."""
def report(self, loss_ctc, loss_att, cer_ctc, cer, wer, acc, ppl, bleu, mtl_loss):
"""Report at every step."""
reporter.report({"loss_ctc": loss_ctc}, self)
reporter.report({"loss_att": loss_att}, self)
reporter.report({"cer_ctc": cer_ctc}, self)
reporter.report({"cer": cer}, self)
reporter.report({"wer": wer}, self)
reporter.report({"acc": acc}, self)
reporter.report({"ppl": ppl}, self)
reporter.report({"bleu": bleu}, self)
logging.info("mtl loss:" + str(mtl_loss))
reporter.report({"loss": mtl_loss}, self)
class E2E(ASRInterface, torch.nn.Module):
"""E2E module.
:param int idim: dimension of inputs
:param int odim: dimension of outputs
:param Namespace args: argument Namespace containing options
"""
@staticmethod
def add_arguments(parser):
"""Add arguments."""
group = parser.add_argument_group("transformer model setting")
group = add_arguments_transformer_common(group)
# Non-autoregressive training
group.add_argument(
"--decoder-mode",
default="AR",
type=str,
choices=["ar", "maskctc"],
help="AR: standard autoregressive training, "
"maskctc: non-autoregressive training based on Mask CTC",
)
group.add_argument(
"--report-bleu",
default=True,
action="store_true",
help="Compute BLEU on development set",
)
# multilingual related
group.add_argument(
"--multilingual",
default=False,
type=strtobool,
help="Prepend target language ID to the source sentence. "
"Both source/target language IDs must be prepend in the pre-processing stage.",
)
group.add_argument(
"--replace-sos",
default=False,
type=strtobool,
help="Replace <sos> in the decoder with a target language ID "
"(the first token in the target sequence)",
)
group.add_argument(
"--asr-first",
default=True,
type=strtobool,
help="ASR or MT is the first cross attention",
)
return parser
@property
def attention_plot_class(self):
"""Return PlotAttentionReport."""
return PlotAttentionReport
def __init__(self, idim, odim, args, ignore_id=-1):
"""Construct an E2E object.
:param int idim: dimension of inputs
:param int odim: dimension of outputs
:param Namespace args: argument Namespace containing options
"""
torch.nn.Module.__init__(self)
# fill missing arguments for compatibility
args = fill_missing_args(args, self.add_arguments)
if args.transformer_attn_dropout_rate is None:
args.transformer_attn_dropout_rate = args.dropout_rate
##Join ASR and MT/ST encoder
# input_layer=args.transformer_input_layer,
#for idx in range(args.num_encs):
#val = "self.encoder" + str(idx)
self.encoder = DualEncoder(
idim=idim,
aux_idim=odim,
selfattention_layer_type=args.transformer_encoder_selfattn_layer_type,
attention_dim=args.adim,
attention_heads=args.aheads,
conv_wshare=args.wshare,
conv_kernel_length=args.ldconv_encoder_kernel_length,
conv_usebias=args.ldconv_usebias,
linear_units=args.eunits,
num_blocks=args.elayers,
dropout_rate=args.dropout_rate,
positional_dropout_rate=args.dropout_rate,
attention_dropout_rate=args.transformer_attn_dropout_rate,
)
if args.mtlalpha < 1:
self.decoder = Decoder(
odim=odim,
selfattention_layer_type=args.transformer_decoder_selfattn_layer_type,
attention_dim=args.adim,
attention_heads=args.aheads,
conv_wshare=args.wshare,
conv_kernel_length=args.ldconv_decoder_kernel_length,
conv_usebias=args.ldconv_usebias,
linear_units=args.dunits,
num_blocks=args.dlayers,
dropout_rate=args.dropout_rate,
positional_dropout_rate=args.dropout_rate,
self_attention_dropout_rate=args.transformer_attn_dropout_rate,
src_attention_dropout_rate=args.transformer_attn_dropout_rate,
asr_first=args.asr_first
)
self.criterion = LabelSmoothingLoss(
odim,
ignore_id,
args.lsm_weight,
args.transformer_length_normalized_loss,
)
else:
self.decoder = None
self.criterion = None
self.blank = 0
self.pad = 0 # use <blank> for padding
self.decoder_mode = args.decoder_mode
if self.decoder_mode == "maskctc":
self.mask_token = odim - 1
self.sos = odim - 2
self.eos = odim - 2
else:
self.sos = odim - 1
self.eos = odim - 1
self.odim = odim
self.ignore_id = ignore_id
self.subsample = get_subsample(args, mode="asr", arch="transformer")
# subsample info
# self.subsample_list = [get_subsample(args, mode="asr", arch="transformer"), get_subsample(args, mode="mt", arch="rnn")]
# self.num_encs = args.num_encs
self.reporter = Reporter()
self.reset_parameters(args)
self.adim = args.adim # used for CTC (equal to d_model)
self.mtlalpha = args.mtlalpha
if args.mtlalpha > 0.0:
self.ctc = CTC(
odim, args.adim, args.dropout_rate, ctc_type=args.ctc_type, reduce=True
)
else:
self.ctc = None
if args.report_cer or args.report_wer:
self.error_calculator_asr = ErrorCalculator(
args.char_list,
args.sym_space,
args.sym_blank,
args.report_cer,
args.report_wer,
)
else:
self.error_calculator_asr = None
# tie source and target emeddings
# if args.tie_src_tgt_embedding:
# if idim != odim:
# raise ValueError(
# "When using tie_src_tgt_embedding, idim and odim must be equal."
# )
# self.encoder.embed[0].weight = self.decoder.embed[0].weight
# tie emeddings and the classfier
# if args.tie_classifier:
# self.decoder.output_layer.weight = self.decoder.embed[0].weight
self.normalize_length = args.transformer_length_normalized_loss # for PPL
self.error_calculator_mt = MTErrorCalculator(
args.char_list, args.sym_space, args.sym_blank, args.report_bleu
)
# multilingual MT related
self.multilingual = args.multilingual
self.rnnlm = None
def reset_parameters(self, args):
"""Initialize parameters."""
# initialize parameters
initialize(self, args.transformer_init)
# torch.nn.init.normal_(
# self.encoder.embed[0].weight, mean=0, std=args.adim ** -0.5
# )
# torch.nn.init.constant_(self.encoder.embed[0].weight[self.pad], 0)
# torch.nn.init.normal_(
# self.decoder.embed[0].weight, mean=0, std=args.adim ** -0.5
# )
# torch.nn.init.constant_(self.decoder.embed[0].weight[self.pad], 0)
#xs_pad, ilens, ys_pad, ys_pad_src, src_lens
def forward(self, xs_pad, ilens, xt_pad, itlens, ys_pad):
"""E2E forward.
:param torch.Tensor xs_pad: batch of padded source sequences (B, Tmax, idim)
:param torch.Tensor ilens: batch of lengths of source sequences (B)
:param torch.Tensor ys_pad: batch of padded target sequences (B, Lmax)
:return: ctc loss value
:rtype: torch.Tensor
:return: attention loss value
:rtype: torch.Tensor
:return: accuracy in attention decoder
:rtype: float
"""
# 1. forward encoder
# xs_pad is the source of ASR (fbank)
xs_pad = xs_pad[:, : max(ilens)] # for data parallel
src_mask = make_non_pad_mask(ilens.tolist()).to(xs_pad.device).unsqueeze(-2)
# xt_pad is the source of MT
xt_pad = xt_pad[:, : max(itlens)] # for data parallel
xt_mask = (~make_pad_mask(itlens.tolist())).to(xt_pad.device).unsqueeze(-2)
xt_pad, yt_pad = self.target_forcing(xt_pad, ys_pad)
#from pudb import set_trace; set_trace()
hs_pad, hs_mask, mt_pad, mt_mask = self.encoder(xs_pad, src_mask, xt_pad, xt_mask)
self.hs_pad = hs_pad
# 2. forward decoder
if self.decoder is not None:
if self.decoder_mode == "maskctc":
ys_in_pad, ys_out_pad = mask_uniform(
ys_pad, self.mask_token, self.eos, self.ignore_id
)
ys_mask = (ys_in_pad != self.ignore_id).unsqueeze(-2)
else:
ys_in_pad, ys_out_pad = add_sos_eos(
ys_pad, self.sos, self.eos, self.ignore_id
)
ys_mask = target_mask(ys_in_pad, self.ignore_id)
pred_pad, pred_mask = self.decoder(ys_in_pad, ys_mask, hs_pad, hs_mask, mt_pad, mt_mask)
self.pred_pad = pred_pad
# 3. compute attention loss
loss_att = self.criterion(pred_pad, ys_out_pad)
self.acc = th_accuracy(
pred_pad.view(-1, self.odim), ys_out_pad, ignore_label=self.ignore_id
)
else:
loss_att = None
self.acc = None
# TODO(karita) show predicted text
# TODO(karita) calculate these stats
cer_ctc = None
if self.mtlalpha == 0.0:
loss_ctc = None
else:
batch_size = xs_pad.size(0)
hs_len = hs_mask.view(batch_size, -1).sum(1)
loss_ctc = self.ctc(hs_pad.view(batch_size, -1, self.adim), hs_len, ys_pad)
if not self.training and self.error_calculator_asr is not None:
ys_hat = self.ctc.argmax(hs_pad.view(batch_size, -1, self.adim)).data
cer_ctc = self.error_calculator_asr(ys_hat.cpu(), ys_pad.cpu(), is_ctc=True)
# for visualization
if not self.training:
self.ctc.softmax(hs_pad)
# 5. compute cer/wer
if self.training or self.error_calculator_asr is None or self.decoder is None:
cer, wer, self.bleu = None, None, None
else:
ys_hat = pred_pad.argmax(dim=-1)
cer, wer = self.error_calculator_asr(ys_hat.cpu(), ys_pad.cpu())
self.bleu = self.error_calculator_mt(ys_hat.cpu(), ys_pad.cpu())
# copied from e2e_asr
alpha = self.mtlalpha
if alpha == 0:
self.loss = loss_att
loss_att_data = float(loss_att)
loss_ctc_data = None
elif alpha == 1:
self.loss = loss_ctc
loss_att_data = None
loss_ctc_data = float(loss_ctc)
else:
self.loss = alpha * loss_ctc + (1 - alpha) * loss_att
loss_att_data = float(loss_att)
loss_ctc_data = float(loss_ctc)
loss_data = float(self.loss)
if self.normalize_length:
self.ppl = numpy.exp(loss_data)
else:
batch_size = ys_out_pad.size(0)
ys_out_pad = ys_out_pad.view(-1)
ignore = ys_out_pad == self.ignore_id # (B*T,)
total_n_tokens = len(ys_out_pad) - ignore.sum().item()
self.ppl = numpy.exp(loss_data * batch_size / total_n_tokens)
if loss_data < CTC_LOSS_THRESHOLD and not math.isnan(loss_data):
self.reporter.report(
loss_ctc_data, loss_att_data, cer_ctc, cer, wer, self.acc, self.ppl, self.bleu, loss_data
)
else:
logging.warning("loss (=%f) is not correct", loss_data)
return self.loss
def scorers(self):
"""Scorers."""
return dict(decoder=self.decoder, ctc=CTCPrefixScorer(self.ctc, self.eos))
# def encode(self, x):
# """Encode acoustic features.
# :param ndarray x: source acoustic feature (T, D)
# :return: encoder outputs
# :rtype: torch.Tensor
# """
# self.eval()
# x = torch.as_tensor(x).unsqueeze(0)
# enc_output, _ = self.encoder(x, None)
# return enc_output.squeeze(0)
def encode(self, x, xt):
self.eval()
x = torch.as_tensor(x).unsqueeze(0)
xt = torch.as_tensor(xt)
x_enc_output, _, xt_enc_output, _ = self.encoder(x, None, xt, None)
return x_enc_output.squeeze(0), xt_enc_output.squeeze(0)
def recognize(self, x, xt, recog_args, char_list=None, rnnlm=None, use_jit=False):
"""Recognize input speech.
:param ndnarray x: input acoustic feature (B, T, D) or (T, D)
:param Namespace recog_args: argment Namespace contraining options
:param list char_list: list of characters
:param torch.nn.Module rnnlm: language model module
:return: N-best decoding results
:rtype: list
"""
self.eval()
assert isinstance(xt, list)
# make a utt list (1) to use the same interface for encoder
if self.multilingual:
xt = to_device(
self, torch.from_numpy(numpy.fromiter(map(int, xt[0][1:]), dtype=numpy.int64))
)
else:
xt = to_device(
self, torch.from_numpy(numpy.fromiter(map(int, xt[0]), dtype=numpy.int64))
)
logging.info("input lengths: " + str(xt.size(0)))
xt_pad = xt.unsqueeze(0)
tgt_lang = None
#if recog_args.tgt_lang:recog_args.tgt_lang
#tgt_lang = char_list.index("en")
xt_pad, _ = self.target_forcing(xt_pad, tgt_lang=tgt_lang)
asr_output, mt_output = self.encode(x, xt_pad)
#logging.info("Is it the probem here!")
asr_output, mt_output = asr_output.unsqueeze(0), mt_output.unsqueeze(0)
if self.mtlalpha == 1.0:
recog_args.ctc_weight = 1.0
logging.info("Set to pure CTC decoding mode.")
if self.mtlalpha > 0 and recog_args.ctc_weight == 1.0:
from itertools import groupby
lpz = self.ctc.argmax(asr_output)
collapsed_indices = [x[0] for x in groupby(lpz[0])]
hyp = [x for x in filter(lambda x: x != self.blank, collapsed_indices)]
nbest_hyps = [{"score": 0.0, "yseq": [self.sos] + hyp}]
if recog_args.beam_size > 1:
raise NotImplementedError("Pure CTC beam search is not implemented.")
# TODO(hirofumi0810): Implement beam search
return nbest_hyps
elif self.mtlalpha > 0 and recog_args.ctc_weight > 0.0:
lpz = self.ctc.log_softmax(asr_output)
lpz = lpz.squeeze(0)
else:
lpz = None
h = asr_output.squeeze(0)
ht = mt_output.squeeze(0)
logging.info("input lengths: " + str(h.size(0)))
# search parms
beam = recog_args.beam_size
penalty = recog_args.penalty
ctc_weight = recog_args.ctc_weight
# preprare sos
y = self.sos
vy = h.new_zeros(1).long()
if recog_args.maxlenratio == 0:
maxlen = h.shape[0]
else:
# maxlen >= 1
maxlen = max(1, int(recog_args.maxlenratio * h.size(0)))
minlen = int(recog_args.minlenratio * h.size(0))
logging.info("max output length: " + str(maxlen))
logging.info("min output length: " + str(minlen))
# initialize hypothesis
if rnnlm:
hyp = {"score": 0.0, "yseq": [y], "rnnlm_prev": None}
else:
hyp = {"score": 0.0, "yseq": [y]}
if lpz is not None:
ctc_prefix_score = CTCPrefixScore(lpz.detach().numpy(), 0, self.eos, numpy)
hyp["ctc_state_prev"] = ctc_prefix_score.initial_state()
hyp["ctc_score_prev"] = 0.0
if ctc_weight != 1.0:
# pre-pruning based on attention scores
ctc_beam = min(lpz.shape[-1], int(beam * CTC_SCORING_RATIO))
else:
ctc_beam = lpz.shape[-1]
hyps = [hyp]
ended_hyps = []
import six
traced_decoder = None
for i in six.moves.range(maxlen):
logging.debug("position " + str(i))
hyps_best_kept = []
for hyp in hyps:
vy[0] = hyp["yseq"][i]
# get nbest local scores and their ids
ys_mask = subsequent_mask(i + 1).unsqueeze(0)
ys = torch.tensor(hyp["yseq"]).unsqueeze(0)
# FIXME: jit does not match non-jit result
if use_jit:
if traced_decoder is None:
traced_decoder = torch.jit.trace(
self.decoder.forward_one_step, (ys, ys_mask, asr_output, mt_output)
)
local_att_scores = traced_decoder(ys, ys_mask, asr_output, mt_output)[0]
else:
local_att_scores = self.decoder.forward_one_step(
ys, ys_mask, asr_output, mt_output
)[0]
if rnnlm:
rnnlm_state, local_lm_scores = rnnlm.predict(hyp["rnnlm_prev"], vy)
local_scores = (
local_att_scores + recog_args.lm_weight * local_lm_scores
)
else:
local_scores = local_att_scores
if lpz is not None:
local_best_scores, local_best_ids = torch.topk(
local_att_scores, ctc_beam, dim=1
)
ctc_scores, ctc_states = ctc_prefix_score(
hyp["yseq"], local_best_ids[0], hyp["ctc_state_prev"]
)
local_scores = (1.0 - ctc_weight) * local_att_scores[
:, local_best_ids[0]
] + ctc_weight * torch.from_numpy(
ctc_scores - hyp["ctc_score_prev"]
)
if rnnlm:
local_scores += (
recog_args.lm_weight * local_lm_scores[:, local_best_ids[0]]
)
local_best_scores, joint_best_ids = torch.topk(
local_scores, beam, dim=1
)
local_best_ids = local_best_ids[:, joint_best_ids[0]]
else:
local_best_scores, local_best_ids = torch.topk(
local_scores, beam, dim=1
)
for j in six.moves.range(beam):
new_hyp = {}
new_hyp["score"] = hyp["score"] + float(local_best_scores[0, j])
new_hyp["yseq"] = [0] * (1 + len(hyp["yseq"]))
new_hyp["yseq"][: len(hyp["yseq"])] = hyp["yseq"]
new_hyp["yseq"][len(hyp["yseq"])] = int(local_best_ids[0, j])
if rnnlm:
new_hyp["rnnlm_prev"] = rnnlm_state
if lpz is not None:
new_hyp["ctc_state_prev"] = ctc_states[joint_best_ids[0, j]]
new_hyp["ctc_score_prev"] = ctc_scores[joint_best_ids[0, j]]
# will be (2 x beam) hyps at most
hyps_best_kept.append(new_hyp)
hyps_best_kept = sorted(
hyps_best_kept, key=lambda x: x["score"], reverse=True
)[:beam]
# sort and get nbest
hyps = hyps_best_kept
logging.debug("number of pruned hypothes: " + str(len(hyps)))
if char_list is not None:
logging.debug(
"best hypo: "
+ "".join([char_list[int(x)] for x in hyps[0]["yseq"][1:]])
)
# add eos in the final loop to avoid that there are no ended hyps
if i == maxlen - 1:
logging.info("adding <eos> in the last postion in the loop")
for hyp in hyps:
hyp["yseq"].append(self.eos)
# add ended hypothes to a final list, and removed them from current hypothes
# (this will be a probmlem, number of hyps < beam)
remained_hyps = []
for hyp in hyps:
if hyp["yseq"][-1] == self.eos:
# only store the sequence that has more than minlen outputs
# also add penalty
if len(hyp["yseq"]) > minlen:
hyp["score"] += (i + 1) * penalty
if rnnlm: # Word LM needs to add final <eos> score
hyp["score"] += recog_args.lm_weight * rnnlm.final(
hyp["rnnlm_prev"]
)
ended_hyps.append(hyp)
else:
remained_hyps.append(hyp)
# end detection
if end_detect(ended_hyps, i) and recog_args.maxlenratio == 0.0:
logging.info("end detected at %d", i)
break
hyps = remained_hyps
if len(hyps) > 0:
logging.debug("remeined hypothes: " + str(len(hyps)))
else:
logging.info("no hypothesis. Finish decoding.")
break
if char_list is not None:
for hyp in hyps:
logging.debug(
"hypo: " + "".join([char_list[int(x)] for x in hyp["yseq"][1:]])
)
logging.debug("number of ended hypothes: " + str(len(ended_hyps)))
nbest_hyps = sorted(ended_hyps, key=lambda x: x["score"], reverse=True)[
: min(len(ended_hyps), recog_args.nbest)
]
# check number of hypotheis
if len(nbest_hyps) == 0:
logging.warning(
"there is no N-best results, perform recognition "
"again with smaller minlenratio."
)
# should copy becasuse Namespace will be overwritten globally
recog_args = Namespace(**vars(recog_args))
recog_args.minlenratio = max(0.0, recog_args.minlenratio - 0.1)
return self.recognize(x, recog_args, char_list, rnnlm)
logging.info("total log probability: " + str(nbest_hyps[0]["score"]))
logging.info(
"normalized log probability: "
+ str(nbest_hyps[0]["score"] / len(nbest_hyps[0]["yseq"]))
)
return nbest_hyps
def calculate_all_attentions(self, xs_pad, ilens, xt_pad, itlens, ys_pad):
"""E2E attention calculation.
:param torch.Tensor xs_pad: batch of padded input sequences (B, Tmax, idim)
:param torch.Tensor ilens: batch of lengths of input sequences (B)
:param torch.Tensor ys_pad: batch of padded token id sequence tensor (B, Lmax)
:return: attention weights (B, H, Lmax, Tmax)
:rtype: float ndarray
"""
self.eval()
with torch.no_grad():
self.forward(xs_pad, ilens, xt_pad, itlens, ys_pad)
ret = dict()
for name, m in self.named_modules():
if (
isinstance(m, MultiHeadedAttention)
or isinstance(m, DynamicConvolution)
or isinstance(m, RelPositionMultiHeadedAttention)
):
ret[name] = m.attn.cpu().numpy()
if isinstance(m, DynamicConvolution2D):
ret[name + "_time"] = m.attn_t.cpu().numpy()
ret[name + "_freq"] = m.attn_f.cpu().numpy()
self.train()
return ret
def calculate_all_ctc_probs(self, xs_pad, ilens, xt_pad, itlens, ys_pad):
"""E2E CTC probability calculation.
:param torch.Tensor xs_pad: batch of padded input sequences (B, Tmax)
:param torch.Tensor ilens: batch of lengths of input sequences (B)
:param torch.Tensor ys_pad: batch of padded token id sequence tensor (B, Lmax)
:return: CTC probability (B, Tmax, vocab)
:rtype: float ndarray
"""
ret = None
if self.mtlalpha == 0:
return ret
self.eval()
with torch.no_grad():
self.forward(xs_pad, ilens, xt_pad, itlens, ys_pad)
for name, m in self.named_modules():
if isinstance(m, CTC) and m.probs is not None:
ret = m.probs.cpu().numpy()
self.train()
return ret
def target_forcing(self, xs_pad, ys_pad=None, tgt_lang=None):
"""Prepend target language IDs to source sentences for multilingual MT.
These tags are prepended in source/target sentences as pre-processing.
:param torch.Tensor xs_pad: batch of padded input sequences (B, Tmax)
:return: source text without language IDs
:rtype: torch.Tensor
:return: target text without language IDs
:rtype: torch.Tensor
:return: target language IDs
:rtype: torch.Tensor (B, 1)
"""
if self.multilingual:
xs_pad = xs_pad[:, 1:] # remove source language IDs here
if ys_pad is not None:
# remove language ID in the beginning
lang_ids = ys_pad[:, 0].unsqueeze(1)
ys_pad = ys_pad[:, 1:]
elif tgt_lang is not None:
lang_ids = xs_pad.new_zeros(xs_pad.size(0), 1).fill_(tgt_lang)
else:
raise ValueError("Set ys_pad or tgt_lang.")
# prepend target language ID to source sentences
xs_pad = torch.cat([lang_ids, xs_pad], dim=1)
return xs_pad, ys_pad | 40.058172 | 129 | 0.578971 |
ace1ab3f21d73a9d587512699c01beb53dfe568f | 2,509 | py | Python | mat2yolo.py | joycenerd/yolov5-svhn-detection | 2cab36cb8f3b23c7607f41c2828171d31228272b | [
"MIT"
] | null | null | null | mat2yolo.py | joycenerd/yolov5-svhn-detection | 2cab36cb8f3b23c7607f41c2828171d31228272b | [
"MIT"
] | null | null | null | mat2yolo.py | joycenerd/yolov5-svhn-detection | 2cab36cb8f3b23c7607f41c2828171d31228272b | [
"MIT"
] | null | null | null | import h5py
import numpy as np
import argparse
import os
from PIL import Image
parser = argparse.ArgumentParser()
parser.add_argument('--data-root', type=str, default='/eva_data/zchin/vrdl_hw2_data', help='data root dir')
args = parser.parse_args()
def get_img_name(f, name_col, idx=0):
img_name = ''.join(map(chr, f[name_col[idx][0]][()].flatten()))
return (img_name)
def get_img_boxes(f, bbox_col, idx=0):
"""
get the 'height', 'left', 'top', 'width', 'label' of bounding boxes of an image
:param f: h5py.File
:param idx: index of the image
:return: dictionary
"""
bbox_prop = ['height', 'left', 'top', 'width', 'label']
meta = {key: [] for key in bbox_prop}
box = f[bbox_col[idx][0]]
for key in box.keys():
if box[key].shape[0] == 1:
meta[key].append(int(box[key][0][0]))
else:
for i in range(box[key].shape[0]):
meta[key].append(int(f[box[key][i][0]][()].item()))
return meta
def yolo_annot(bbox, img_path, annot_path):
annot_f = open(annot_path, 'w')
img = Image.open(img_path)
w, h = img.size
label_cnt = len(bbox['label'])
for i in range(label_cnt):
label, height, left, top, width = bbox['label'][i], bbox['height'][i], bbox['left'][i], bbox['top'][i], \
bbox['width'][i]
if label == 10:
label = 0
x_center = left + width / 2
y_center = top + height / 2
x_center_norm = x_center / w
y_center_norm = y_center / h
width_norm = width / w
height_norm = height / h
annot_f.write(f'{label} {x_center_norm} {y_center_norm} {width_norm} {height_norm}\n')
annot_f.close()
if __name__ == '__main__':
annot_dir = os.path.join(args.data_root, 'labels/all_train')
if not os.path.isdir(annot_dir):
os.makedirs(annot_dir)
mat_f = os.path.join(args.data_root, 'train/digitStruct.mat')
mat = h5py.File(mat_f)
data_size = mat['/digitStruct/name'].shape[0]
print(f'Data size: {data_size}')
name_col = mat['/digitStruct/name']
bbox_col = mat['/digitStruct/bbox']
for idx in range(data_size):
img_name = get_img_name(mat, name_col, idx)
bbox = get_img_boxes(mat, bbox_col, idx)
print(img_name, bbox)
annot_f = os.path.join(annot_dir, f'{img_name[:-3]}txt')
img_path = os.path.join(args.data_root, 'train', img_name)
yolo_annot(bbox, img_path, annot_f)
| 29.869048 | 113 | 0.598645 |
ace1abebcedaa50a85913c982d34b93f49cf5401 | 2,177 | py | Python | AutoTriageBot/tests/test_payout.py | jaydave/AutoTriageBot | d87898ec11d6057797697ffa867ccb52770e65ec | [
"BSD-3-Clause"
] | 58 | 2017-09-20T16:50:25.000Z | 2021-05-15T10:22:53.000Z | AutoTriageBot/tests/test_payout.py | jaydave/AutoTriageBot | d87898ec11d6057797697ffa867ccb52770e65ec | [
"BSD-3-Clause"
] | 2 | 2017-09-21T17:04:10.000Z | 2017-09-25T23:29:20.000Z | AutoTriageBot/tests/test_payout.py | jaydave/AutoTriageBot | d87898ec11d6057797697ffa867ccb52770e65ec | [
"BSD-3-Clause"
] | 16 | 2017-09-21T00:48:37.000Z | 2021-01-02T20:22:39.000Z | """
Copyright (c) 2017, salesforce.com, inc.
All rights reserved.
Licensed under the BSD 3-Clause license.
For full license text, see LICENSE.txt file in the repo root or https://opensource.org/licenses/BSD-3-Clause
"""
from typing import NamedTuple, Callable
import pytest
from AutoTriageBot import payout
from AutoTriageBot import config
@pytest.mark.fast
def test_suggestPayoutGivenType():
for key in config.payoutDB:
assert payout.suggestPayoutGivenType(config.payoutDB[key], []) == config.payoutDB[key]['average']
for vulnType in config.payoutDB:
for domain in config.payoutDB[vulnType]:
assert payout.suggestPayoutGivenType(config.payoutDB[vulnType], [domain]) == \
config.payoutDB[vulnType][domain]
@pytest.mark.fast
def test_suggestPayout():
MockedReportWrapper = NamedTuple('MockedReportWrapper', [('getReportBody', Callable),
('getReportWeakness', Callable),
('getVulnDomains', Callable)])
MockedReportWrapperXSS = MockedReportWrapper(getReportBody=lambda: 'XSS',
getReportWeakness=lambda: 'XSS',
getVulnDomains=lambda: [])
assert payout.suggestPayout(MockedReportWrapperXSS) == config.payoutDB['xss']['average']
for vulnType in config.payoutDB:
for domain in config.payoutDB[vulnType]:
MockedReportWrapperVuln = MockedReportWrapper(getReportBody=lambda: vulnType,
getReportWeakness=lambda: vulnType,
getVulnDomains=lambda: [domain])
assert payout.suggestPayout(MockedReportWrapperVuln) == config.payoutDB[vulnType][domain]
MockedReportWrapperNone = MockedReportWrapper(getReportBody=lambda: '',
getReportWeakness=lambda: '',
getVulnDomains=lambda: [])
assert payout.suggestPayout(MockedReportWrapperNone) is None
| 50.627907 | 109 | 0.602664 |
ace1ac69780beace43f38ce3476a5298d7d60678 | 1,329 | py | Python | foolbox/tests/test_model_wrappers.py | pGit1/foolbox | a75ddec2c202890026b40bd298d95b87146d82de | [
"MIT"
] | null | null | null | foolbox/tests/test_model_wrappers.py | pGit1/foolbox | a75ddec2c202890026b40bd298d95b87146d82de | [
"MIT"
] | null | null | null | foolbox/tests/test_model_wrappers.py | pGit1/foolbox | a75ddec2c202890026b40bd298d95b87146d82de | [
"MIT"
] | 1 | 2021-02-26T10:04:20.000Z | 2021-02-26T10:04:20.000Z | import numpy as np
from foolbox.models import ModelWrapper
from foolbox.models import CompositeModel
def test_context_manager(gl_bn_model):
assert isinstance(gl_bn_model, ModelWrapper)
with gl_bn_model as model:
assert model is not None
assert isinstance(model, ModelWrapper)
def test_wrapping(gl_bn_model, bn_image):
assert isinstance(gl_bn_model, ModelWrapper)
assert gl_bn_model.num_classes() == 10
assert np.all(
gl_bn_model.predictions(bn_image) ==
gl_bn_model.batch_predictions(bn_image[np.newaxis])[0])
def test_composite_model(gl_bn_model, bn_model, bn_image, bn_label):
model = CompositeModel(gl_bn_model, bn_model)
with model:
assert gl_bn_model.num_classes() == model.num_classes()
assert np.all(
gl_bn_model.predictions(bn_image) ==
model.predictions(bn_image))
assert np.all(
bn_model.gradient(bn_image, bn_label) ==
model.gradient(bn_image, bn_label))
assert np.all(
bn_model.predictions_and_gradient(bn_image, bn_label)[0] ==
model.predictions_and_gradient(bn_image, bn_label)[0])
assert np.all(
bn_model.predictions_and_gradient(bn_image, bn_label)[1] ==
model.predictions_and_gradient(bn_image, bn_label)[1])
| 34.973684 | 71 | 0.69526 |
ace1acbd36fab570abbddf1d4836f5c77dbba353 | 18,513 | py | Python | gaphor/ui/mainwindow.py | 987Frogh/project-makehuman | 3afc838b03c50f8e574d8c87cb71de4435a18a6d | [
"Apache-2.0"
] | 1 | 2020-11-27T12:39:15.000Z | 2020-11-27T12:39:15.000Z | gaphor/ui/mainwindow.py | 987Frogh/project-makehuman | 3afc838b03c50f8e574d8c87cb71de4435a18a6d | [
"Apache-2.0"
] | null | null | null | gaphor/ui/mainwindow.py | 987Frogh/project-makehuman | 3afc838b03c50f8e574d8c87cb71de4435a18a6d | [
"Apache-2.0"
] | 3 | 2020-01-23T14:13:59.000Z | 2020-02-18T18:21:47.000Z | """
The main application window.
"""
import importlib.resources
import logging
from pathlib import Path
from typing import List, Tuple
from gi.repository import Gdk, Gio, GLib, Gtk
from gaphor import UML
from gaphor.abc import ActionProvider, Service
from gaphor.core import event_handler, gettext
from gaphor.event import ActionEnabled
from gaphor.services.undomanager import UndoManagerStateChanged
from gaphor.ui import APPLICATION_ID
from gaphor.ui.abc import UIComponent
from gaphor.ui.actiongroup import window_action_group
from gaphor.ui.diagrampage import DiagramPage
from gaphor.ui.event import (
DiagramOpened,
DiagramSelectionChanged,
FileLoaded,
FileSaved,
WindowClosed,
)
from gaphor.ui.layout import deserialize
from gaphor.UML.event import AttributeUpdated, ModelFlushed, ModelReady
log = logging.getLogger(__name__)
HOME = str(Path.home())
class RecentFilesMenu(Gio.Menu):
def __init__(self, recent_manager):
super().__init__()
self._on_recent_manager_changed(recent_manager)
# TODO: should unregister if the window is closed.
self._changed_id = recent_manager.connect(
"changed", self._on_recent_manager_changed
)
def _on_recent_manager_changed(self, recent_manager):
self.remove_all()
for item in recent_manager.get_items():
if APPLICATION_ID in item.get_applications():
menu_item = Gio.MenuItem.new(
item.get_uri_display().replace(HOME, "~"), "win.file-open-recent"
)
# menu_item.set_action_and_target_value("win.file-open-recent", GLib.Variant.new_string(item.get_uri()))
menu_item.set_attribute_value(
"target", GLib.Variant.new_string(item.get_uri())
)
self.append_item(menu_item)
if self.get_n_items() > 9:
break
if self.get_n_items() == 0:
self.append_item(
Gio.MenuItem.new(gettext("No recently opened models"), None)
)
def hamburger_menu(hamburger_model):
button = Gtk.MenuButton()
image = Gtk.Image.new_from_icon_name("open-menu-symbolic", Gtk.IconSize.MENU)
button.add(image)
button.set_popover(Gtk.Popover.new_from_model(button, hamburger_model))
button.show_all()
return button
def create_hamburger_model(export_menu, tools_menu):
model = Gio.Menu.new()
part = Gio.Menu.new()
part.append(gettext("New"), "win.file-new")
part.append(gettext("New from Template"), "win.file-new-template")
model.append_section(None, part)
part = Gio.Menu.new()
part.append(gettext("Save As..."), "win.file-save-as")
part.append_submenu(gettext("Export"), export_menu)
model.append_section(None, part)
part = Gio.Menu.new()
part.append_submenu(gettext("Tools"), tools_menu)
model.append_section(None, part)
part = Gio.Menu.new()
part.append(gettext("Preferences"), "app.preferences")
part.append(gettext("Keyboard Shortcuts"), "app.shortcuts")
part.append(gettext("About Gaphor"), "app.about")
model.append_section(None, part)
return model
def create_recent_files_button(recent_manager=None):
button = Gtk.MenuButton()
image = Gtk.Image.new_from_icon_name("pan-down-symbolic", Gtk.IconSize.MENU)
button.add(image)
model = Gio.Menu.new()
model.append_section(
gettext("Recently opened files"),
RecentFilesMenu(recent_manager or Gtk.RecentManager.get_default()),
)
popover = Gtk.Popover.new_from_model(button, model)
button.set_popover(popover)
button.show_all()
return button
class MainWindow(Service, ActionProvider):
"""
The main window for the application.
It contains a Namespace-based tree view and a menu and a statusbar.
"""
size = property(lambda s: s.properties.get("ui.window-size", (760, 580)))
def __init__(
self,
event_manager,
component_registry,
element_factory,
properties,
export_menu,
tools_menu,
):
self.event_manager = event_manager
self.component_registry = component_registry
self.element_factory = element_factory
self.properties = properties
self.export_menu = export_menu
self.tools_menu = tools_menu
self.title = "Gaphor"
self.window: Gtk.Window = None
self.filename = None
self.model_changed = False
self.layout = None
self.init_styling()
def init_styling(self):
with importlib.resources.path("gaphor.ui", "layout.css") as css_file:
style_provider = Gtk.CssProvider()
style_provider.load_from_path(str(css_file))
Gtk.StyleContext.add_provider_for_screen(
Gdk.Screen.get_default(),
style_provider,
Gtk.STYLE_PROVIDER_PRIORITY_APPLICATION,
)
def shutdown(self):
if self.window:
self.window.destroy()
self.window = None
em = self.event_manager
em.unsubscribe(self._on_file_manager_state_changed)
em.unsubscribe(self._on_undo_manager_state_changed)
em.unsubscribe(self._new_model_content)
em.unsubscribe(self._on_action_enabled)
def get_ui_component(self, name):
return self.component_registry.get(UIComponent, name)
def open(self, gtk_app=None):
"""Open the main window.
"""
self.window = (
Gtk.ApplicationWindow.new(gtk_app)
if gtk_app
else Gtk.Window.new(type=Gtk.WindowType.TOPLEVEL)
)
def button(label, action_name):
b = Gtk.Button.new_with_label(label)
b.set_action_name(action_name)
b.show()
return b
header = Gtk.HeaderBar()
header.set_show_close_button(True)
self.window.set_titlebar(header)
header.show()
button_box = Gtk.Box.new(Gtk.Orientation.HORIZONTAL, 0)
button_box.get_style_context().add_class("linked")
button_box.pack_start(button(gettext("Open"), "win.file-open"), False, False, 0)
button_box.pack_start(create_recent_files_button(), False, False, 0)
button_box.show()
header.pack_start(button_box)
b = Gtk.Button.new_from_icon_name(
"gaphor-new-diagram-symbolic", Gtk.IconSize.MENU
)
b.set_action_name("tree-view.create-diagram")
b.show()
header.pack_start(b)
header.pack_end(
hamburger_menu(
create_hamburger_model(self.export_menu.menu, self.tools_menu.menu)
)
)
header.pack_end(button(gettext("Save"), "win.file-save"))
b = Gtk.MenuButton.new()
image = Gtk.Image.new_from_icon_name(
"document-edit-symbolic", Gtk.IconSize.MENU
)
b.add(image)
b.set_action_name("win.show-editors")
b.show_all()
header.pack_end(b)
self.set_title()
self.window.set_default_size(*self.size)
def _factory(name):
comp = self.get_ui_component(name)
widget = comp.open()
# Okay, this may be hackish. Action groups on component level are also added
# to the main window. This ensures that we can call those items from the
# (main) menus as well. Also this makes enabling/disabling work.
for prefix in widget.list_action_prefixes():
assert prefix not in ("app", "win")
self.window.insert_action_group(prefix, widget.get_action_group(prefix))
return widget
self.layout = []
with importlib.resources.open_text("gaphor.ui", "layout.xml") as f:
deserialize(self.layout, self.window, f.read(), _factory)
action_group, accel_group = window_action_group(self.component_registry)
self.window.insert_action_group("win", action_group)
self.window.add_accel_group(accel_group)
self.window.present()
self.window.connect("delete-event", self._on_window_delete)
# We want to store the window size, so it can be reloaded on startup
self.window.set_resizable(True)
self.window.connect("size-allocate", self._on_window_size_allocate)
em = self.event_manager
em.subscribe(self._on_file_manager_state_changed)
em.subscribe(self._on_undo_manager_state_changed)
em.subscribe(self._new_model_content)
em.subscribe(self._on_action_enabled)
def open_welcome_page(self):
"""
Create a new tab with a textual welcome page, a sort of 101 for
Gaphor.
"""
def set_title(self):
"""
Sets the window title.
"""
if self.window:
if self.filename:
p = Path(self.filename)
title = p.name
subtitle = str(p.parent).replace(HOME, "~")
else:
title = self.title
subtitle = ""
if self.model_changed:
title += " [" + gettext("edited") + "]"
self.window.set_title(title)
self.window.get_titlebar().set_subtitle(subtitle)
# Signal callbacks:
@event_handler(ModelReady)
def _new_model_content(self, event):
"""
Open the toplevel element and load toplevel diagrams.
"""
# TODO: Make handlers for ModelReady from within the GUI obj
for diagram in self.element_factory.select(
lambda e: e.isKindOf(UML.Diagram)
and not (e.namespace and e.namespace.namespace)
):
self.event_manager.handle(DiagramOpened(diagram))
@event_handler(FileLoaded, FileSaved)
def _on_file_manager_state_changed(self, event):
self.model_changed = False
self.filename = event.filename
self.set_title()
@event_handler(UndoManagerStateChanged)
def _on_undo_manager_state_changed(self, event):
"""
"""
undo_manager = event.service
if self.model_changed != undo_manager.can_undo():
self.model_changed = undo_manager.can_undo()
self.set_title()
@event_handler(ActionEnabled)
def _on_action_enabled(self, event):
ag = self.window.get_action_group(event.scope)
a = ag.lookup_action(event.name)
a.set_enabled(event.enabled)
def _on_window_delete(self, window=None, event=None):
self.event_manager.handle(WindowClosed(self))
return True
def _on_window_size_allocate(self, window, allocation):
"""
Store the window size in a property.
"""
width, height = window.get_size()
self.properties.set("ui.window-size", (width, height))
# TODO: Does not belong here
def create_item(self, ui_component):
"""
Create an item for a ui component. This method can be called from UIComponents.
"""
window = Gtk.Window.new(Gtk.WindowType.TOPLEVEL)
window.set_transient_for(self.window)
window.set_title(ui_component.title)
window.add(ui_component.open())
window.show()
window.ui_component = ui_component
Gtk.AccelMap.add_filter("gaphor")
class Diagrams(UIComponent, ActionProvider):
title = gettext("Diagrams")
def __init__(self, event_manager, element_factory, properties):
self.event_manager = event_manager
self.element_factory = element_factory
self.properties = properties
self._notebook: Gtk.Notebook = None
def open(self):
"""Open the diagrams component.
Returns:
The Gtk.Notebook.
"""
self._notebook = Gtk.Notebook()
self._notebook.props.scrollable = True
self._notebook.show()
self._notebook.connect("switch-page", self._on_switch_page)
self.event_manager.subscribe(self._on_show_diagram)
self.event_manager.subscribe(self._on_name_change)
self.event_manager.subscribe(self._on_flush_model)
return self._notebook
def close(self):
"""Close the diagrams component."""
self.event_manager.unsubscribe(self._on_flush_model)
self.event_manager.unsubscribe(self._on_name_change)
self.event_manager.unsubscribe(self._on_show_diagram)
if self._notebook:
self._notebook.destroy()
self._notebook = None
def get_current_diagram(self):
"""Returns the current page of the notebook.
Returns (DiagramPage): The current diagram page.
"""
page_num = self._notebook.get_current_page()
child_widget = self._notebook.get_nth_page(page_num)
if child_widget is not None:
return child_widget.diagram_page.get_diagram()
else:
return None
def get_current_view(self):
"""Returns the current view of the diagram page.
Returns (GtkView): The current view.
"""
if not self._notebook:
return
page_num = self._notebook.get_current_page()
child_widget = self._notebook.get_nth_page(page_num)
return child_widget and child_widget.diagram_page.get_view()
def cb_close_tab(self, button, widget):
"""Callback to close the tab and remove the notebook page.
Args:
button (Gtk.Button): The button the callback is from.
widget (Gtk.Widget): The child widget of the tab.
"""
page_num = self._notebook.page_num(widget)
# TODO why does Gtk.Notebook give a GTK-CRITICAL if you remove a page
# with set_show_tabs(True)?
self._clear_ui_settings(widget)
self._notebook.remove_page(page_num)
widget.diagram_page.close()
widget.destroy()
def create_tab(self, title, widget):
"""Creates a new Notebook tab with a label and close button.
Args:
title (str): The title of the tab, the diagram name.
widget (Gtk.Widget): The child widget of the tab.
"""
page_num = self._notebook.append_page(
child=widget, tab_label=self.tab_label(title, widget)
)
self._notebook.set_current_page(page_num)
self._notebook.set_tab_reorderable(widget, True)
view = widget.diagram_page.view
self.event_manager.handle(
DiagramSelectionChanged(view, view.focused_item, view.selected_items)
)
def tab_label(self, title, widget):
tab_box = Gtk.Box.new(orientation=Gtk.Orientation.HORIZONTAL, spacing=0)
label = Gtk.Label.new(title)
tab_box.pack_start(child=label, expand=True, fill=True, padding=0)
close_image = Gtk.Image.new_from_icon_name(
icon_name="window-close", size=Gtk.IconSize.BUTTON
)
button = Gtk.Button()
button.set_relief(Gtk.ReliefStyle.NONE)
button.set_focus_on_click(False)
button.add(close_image)
button.connect("clicked", self.cb_close_tab, widget)
tab_box.pack_start(child=button, expand=False, fill=False, padding=0)
tab_box.show_all()
return tab_box
def get_widgets_on_pages(self):
"""Gets the widget on each open page Notebook page.
The page is the page number in the Notebook (0 indexed) and the widget
is the child widget on each page.
Returns:
List of tuples (page, widget) of the currently open Notebook pages.
"""
widgets_on_pages: List[Tuple[int, Gtk.Widget]] = []
if not self._notebook:
return widgets_on_pages
num_pages = self._notebook.get_n_pages()
for page in range(0, num_pages):
widget = self._notebook.get_nth_page(page)
widgets_on_pages.append((page, widget))
return widgets_on_pages
def _on_switch_page(self, notebook, page, new_page_num):
current_page_num = notebook.get_current_page()
if current_page_num >= 0:
self._clear_ui_settings(notebook.get_nth_page(current_page_num))
self._add_ui_settings(page)
view = page.diagram_page.view
self.event_manager.handle(
DiagramSelectionChanged(view, view.focused_item, view.selected_items)
)
def _add_ui_settings(self, page):
window = page.get_toplevel()
window.insert_action_group("diagram", page.action_group.actions)
window.add_accel_group(page.action_group.shortcuts)
def _clear_ui_settings(self, page):
window = page.get_toplevel()
window.insert_action_group("diagram", None)
window.remove_accel_group(page.action_group.shortcuts)
@event_handler(DiagramOpened)
def _on_show_diagram(self, event):
"""Show a Diagram element in the Notebook.
If a diagram is already open on a Notebook page, show that one,
otherwise create a new Notebook page.
Args:
event: The service event that is calling the method.
"""
diagram = event.diagram
# Try to find an existing diagram page and give it focus
for page, widget in self.get_widgets_on_pages():
if widget.diagram_page.get_diagram() is diagram:
self._notebook.set_current_page(page)
return widget.diagram_page
# No existing diagram page found, creating one
page = DiagramPage(
diagram, self.event_manager, self.element_factory, self.properties
)
widget = page.construct()
widget.set_name("diagram-tab")
widget.diagram_page = page
page.set_drawing_style(self.properties.get("diagram.sloppiness", 0))
self.create_tab(diagram.name, widget)
return page
@event_handler(ModelFlushed)
def _on_flush_model(self, event):
"""
Close all tabs.
"""
while self._notebook.get_n_pages():
self._notebook.remove_page(0)
@event_handler(AttributeUpdated)
def _on_name_change(self, event):
if event.property is UML.Diagram.name:
for page in range(0, self._notebook.get_n_pages()):
widget = self._notebook.get_nth_page(page)
if event.element is widget.diagram_page.diagram:
self._notebook.set_tab_label(
widget, self.tab_label(event.new_value, widget)
)
| 33.782847 | 120 | 0.642467 |
ace1ae027b14378211d8322fe0db1399eb59a836 | 645 | py | Python | myicons/urls.py | plrthink/myicons | 62475e118e2c7404d88146ea5d67961418d7f8ab | [
"BSD-2-Clause"
] | 83 | 2015-01-02T04:50:43.000Z | 2021-06-06T03:26:55.000Z | myicons/urls.py | plrthink/myicons | 62475e118e2c7404d88146ea5d67961418d7f8ab | [
"BSD-2-Clause"
] | 2 | 2015-01-04T11:25:20.000Z | 2015-01-05T11:13:37.000Z | myicons/urls.py | plrthink/myicons | 62475e118e2c7404d88146ea5d67961418d7f8ab | [
"BSD-2-Clause"
] | 20 | 2015-01-15T10:00:09.000Z | 2019-11-06T07:25:59.000Z | from django.conf import settings
from django.conf.urls import patterns, include, url
from django.conf.urls.static import static
urlpatterns = patterns(
'',
url(r'^', include('frontend.urls')),
url(r'^', include('iconpacks.urls')),
url(r'^', include('iconcollections.urls')),
url(r'^', include('revisions.urls')),
url(r'^', include('labels.urls')),
url(r'^', include('fontbuilder.urls')),
url(r'^convert/', include('convert.urls')),
url(r'^accounts/', include('accounts.urls')),
)
if settings.DEBUG:
urlpatterns += static(
settings.STATIC_URL,
document_root=settings.STATIC_ROOT
)
| 25.8 | 51 | 0.643411 |
ace1af2f661182e90e8f0f7f3de17d4f5921c3c8 | 2,899 | py | Python | src/models/two_task_transformer.py | jiviteshjain/why-cite | e8aa2ea7995daff4f9c3f033534f7e5869851789 | [
"MIT"
] | null | null | null | src/models/two_task_transformer.py | jiviteshjain/why-cite | e8aa2ea7995daff4f9c3f033534f7e5869851789 | [
"MIT"
] | null | null | null | src/models/two_task_transformer.py | jiviteshjain/why-cite | e8aa2ea7995daff4f9c3f033534f7e5869851789 | [
"MIT"
] | null | null | null | from transformers import AutoModel, AutoTokenizer
import torch
from omegaconf.errors import ConfigKeyError
class TwoTaskTransformer(torch.nn.Module):
def __init__(self, lang_model, intent_dropout_probability,
section_dropout_probability, device):
super().__init__()
self._device = device
self._lang_model = lang_model.to(device)
self._heads = {
'intent': {
'pre_classifier':
torch.nn.Linear(768, 768).to(device),
'dropout_layer':
torch.nn.Dropout(intent_dropout_probability).to(device),
'classifier':
torch.nn.Linear(768, 6).to(device),
},
'section': {
'pre_classifier':
torch.nn.Linear(768, 768).to(device),
'dropout_layer':
torch.nn.Dropout(section_dropout_probability).to(device),
'classifier':
torch.nn.Linear(768, 6).to(device),
}
}
def _pass_through_head(self, task, hidden_state):
intermediate = self._heads[task]['pre_classifier'](hidden_state)
intermediate = torch.nn.ReLU()(intermediate)
intermediate = self._heads[task]['dropout_layer'](intermediate)
return self._heads[task]['classifier'](intermediate)
def forward(self, data):
input_ids = data['citation_context_ids'].to(self._device,
dtype=torch.long)
attention_mask = data['citation_context_mask'].to(self._device,
dtype=torch.long)
embeddings = self._lang_model(input_ids=input_ids,
attention_mask=attention_mask)[0]
hidden_state = embeddings[:, 0]
return [
self._pass_through_head('intent', hidden_state),
self._pass_through_head('section', hidden_state)
]
def get_model(config, device):
lang_model = AutoModel.from_pretrained(
config.models.two_task_transformer.pretrained_identifier)
tokenizer = AutoTokenizer.from_pretrained(
config.models.two_task_transformer.pretrained_identifier)
try:
special_tokens = [
str(x) for x in config.dataloaders[
config.training.dataset_in_use].special_tokens
]
tokenizer.add_special_tokens(
{'additional_special_tokens': special_tokens})
lang_model.resize_token_embeddings(len(tokenizer))
except ConfigKeyError:
pass
return TwoTaskTransformer(
lang_model,
config.models.two_task_transformer.intent_dropout_probability,
config.models.two_task_transformer.section_dropout_probability,
device), tokenizer, config.models.two_task_transformer.max_length
| 36.2375 | 77 | 0.604346 |
ace1b055a1dc639734ce82a8a672bf2aa77c25bc | 688 | py | Python | equality/util/prev_transaction_block.py | grayfallstown/equality-blockchain | 019425b703f6b013e441481ac43389a80415f2f1 | [
"Apache-2.0"
] | 10 | 2021-07-04T15:14:12.000Z | 2021-10-17T14:52:56.000Z | equality/util/prev_transaction_block.py | grayfallstown/equality-blockchain | 019425b703f6b013e441481ac43389a80415f2f1 | [
"Apache-2.0"
] | 11 | 2021-07-04T19:31:36.000Z | 2022-01-11T02:46:23.000Z | equality/util/prev_transaction_block.py | grayfallstown/equality-blockchain | 019425b703f6b013e441481ac43389a80415f2f1 | [
"Apache-2.0"
] | 11 | 2021-07-04T21:49:17.000Z | 2021-10-04T17:45:38.000Z | from typing import Tuple
from equality.consensus.block_record import BlockRecord
from equality.consensus.blockchain_interface import BlockchainInterface
from equality.util.ints import uint128
def get_prev_transaction_block(
curr: BlockRecord,
blocks: BlockchainInterface,
total_iters_sp: uint128,
) -> Tuple[bool, BlockRecord]:
prev_transaction_block = curr
while not curr.is_transaction_block:
curr = blocks.block_record(curr.prev_hash)
if total_iters_sp > curr.total_iters:
prev_transaction_block = curr
is_transaction_block = True
else:
is_transaction_block = False
return is_transaction_block, prev_transaction_block
| 31.272727 | 71 | 0.771802 |
ace1b184274767dda4cdd44fa949740f14851e9a | 6,190 | py | Python | conans/test/unittests/client/cmd/ast_replacement_scm_test.py | matthiasng/conan | 634eadc319da928084633a344d42785edccb8d6c | [
"MIT"
] | 2 | 2019-01-09T10:01:29.000Z | 2019-01-09T10:01:31.000Z | conans/test/unittests/client/cmd/ast_replacement_scm_test.py | matthiasng/conan | 634eadc319da928084633a344d42785edccb8d6c | [
"MIT"
] | 6 | 2016-03-08T22:06:45.000Z | 2020-06-02T15:22:19.000Z | conans/test/unittests/client/cmd/ast_replacement_scm_test.py | matthiasng/conan | 634eadc319da928084633a344d42785edccb8d6c | [
"MIT"
] | 2 | 2019-08-07T18:15:16.000Z | 2021-08-04T12:33:05.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import codecs
import os
import shutil
import tempfile
import unittest
import uuid
import six
from conans.client.cmd.export import _replace_scm_data_in_conanfile
from conans.client.graph.python_requires import ConanPythonRequire
from conans.client.loader import parse_conanfile
from conans.test.utils.tools import try_remove_readonly
from conans.util.files import load
class ASTReplacementTest(unittest.TestCase):
python_requires = ConanPythonRequire(None, None)
scm_data = {'type': 'git',
'url': 'this-is-the-url',
'revision': '42'}
conanfile = six.u("""{header}
from conans import ConanFile
class LibConan(ConanFile):
name = "Lib"
author = "{author}"
scm = {{"type": "git",
"url": "auto",
"revision": "auto"}}
{footer}
""")
def run(self, *args, **kwargs):
self._tmp_folder = tempfile.mkdtemp(suffix='_conans')
try:
super(ASTReplacementTest, self).run(*args, **kwargs)
finally:
shutil.rmtree(self._tmp_folder, ignore_errors=False, onerror=try_remove_readonly)
def _get_conanfile(self, header='', author="jgsogo", encoding="ascii", footer=""):
tmp = os.path.join(self._tmp_folder, str(uuid.uuid4()))
with codecs.open(tmp, 'w', encoding=encoding) as f:
f.write(self.conanfile.format(header=header, author=author, footer=footer))
return tmp
def _check_result(self, conanfile):
content = load(conanfile)
self.assertEqual(content.count(self.scm_data['url']), 1)
self.assertEqual(content.count(self.scm_data['revision']), 1)
self.assertIn(self.scm_data['url'], content)
self.assertIn(self.scm_data['revision'], content)
try:
# Check it is loadable by Conan machinery
_, conanfile = parse_conanfile(conanfile, python_requires=self.python_requires)
except Exception as e:
self.fail("Invalid conanfile: {}".format(e))
else:
self.assertEqual(conanfile.scm, self.scm_data)
def test_base(self):
conanfile = self._get_conanfile()
_replace_scm_data_in_conanfile(conanfile, self.scm_data)
self._check_result(conanfile)
@unittest.skipUnless(six.PY3, "Works only in Py3 (assumes utf-8 for source files)")
def test_author_non_ascii(self):
conanfile = self._get_conanfile(author=six.u("¡ÑÁí!"), encoding='utf-8')
_replace_scm_data_in_conanfile(conanfile, self.scm_data)
self._check_result(conanfile)
def test_shebang_utf8(self):
header = "#!/usr/bin/env python2\n# -*- coding: utf-8 -*-"
conanfile = self._get_conanfile(author=six.u("¡Ñandú!"), header=header, encoding='utf-8')
_replace_scm_data_in_conanfile(conanfile, self.scm_data)
self._check_result(conanfile)
def test_shebang_ascii(self):
header = "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-"
conanfile = self._get_conanfile(author="jgsogo", header=header, encoding='ascii')
_replace_scm_data_in_conanfile(conanfile, self.scm_data)
self._check_result(conanfile)
def test_shebang_several(self):
header = "#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n# -*- coding: utf-8 -*-"
conanfile = self._get_conanfile(author=six.u("¡Ñandú!"), header=header, encoding='utf-8')
_replace_scm_data_in_conanfile(conanfile, self.scm_data)
self._check_result(conanfile)
def test_multiline_statement(self):
""" Statement with several lines below the scm attribute """
statement = "\n long_list = 'a', 'b', 'c' \\\n 'd', 'e'"
conanfile = self._get_conanfile(footer=statement)
_replace_scm_data_in_conanfile(conanfile, self.scm_data)
self._check_result(conanfile)
# Add comments below the SCM
def test_comment_file_level(self):
comment = "# This is a comment, file level"
conanfile = self._get_conanfile(footer=comment)
self.assertIn(comment, load(conanfile))
_replace_scm_data_in_conanfile(conanfile, self.scm_data)
self._check_result(conanfile)
self.assertIn(comment, load(conanfile))
def test_comment_class_level(self):
comment = " # This is a comment, file level"
conanfile = self._get_conanfile(footer=comment)
self.assertIn(comment, load(conanfile))
_replace_scm_data_in_conanfile(conanfile, self.scm_data)
self._check_result(conanfile)
self.assertIn(comment, load(conanfile))
def test_two_comments(self):
comment = " # line1\n # line2"
conanfile = self._get_conanfile(footer=comment)
self.assertIn(comment, load(conanfile))
_replace_scm_data_in_conanfile(conanfile, self.scm_data)
self._check_result(conanfile)
self.assertIn(comment, load(conanfile))
def test_multiline_comment(self):
comment = ' """\n line1\n line2\n """'
conanfile = self._get_conanfile(footer=comment)
self.assertIn(comment, load(conanfile))
_replace_scm_data_in_conanfile(conanfile, self.scm_data)
self._check_result(conanfile)
# FIXME: We lost the multiline comment
# self.assertIn(comment, load(conanfile))
# Something below the comment
def test_comment_and_attribute(self):
comment = ' # line1\n url=23'
conanfile = self._get_conanfile(footer=comment)
self.assertIn(comment, load(conanfile))
_replace_scm_data_in_conanfile(conanfile, self.scm_data)
self._check_result(conanfile)
self.assertIn(comment, load(conanfile))
def test_multiline_comment_and_attribute(self):
comment = ' """\n line1\n line2\n """\n url=23'
conanfile = self._get_conanfile(footer=comment)
self.assertIn(comment, load(conanfile))
_replace_scm_data_in_conanfile(conanfile, self.scm_data)
self._check_result(conanfile)
# FIXME: We lost the multiline comment
self.assertIn(" url=23", load(conanfile))
# self.assertIn(comment, load(conanfile))
| 39.679487 | 97 | 0.664136 |
ace1b24b13ca8cd210d381df390970c9db3d9e7f | 8,509 | py | Python | arena.py | Nikunj-Gupta/rial-dial-pytorch | 97c0ea1bd8cfb5de60026f454c84229418bb43b3 | [
"Apache-2.0"
] | 287 | 2018-10-25T12:28:45.000Z | 2022-03-09T10:32:17.000Z | arena.py | iassael/learning-to-communicate-pytorch | 3a7401c22bcefd17fe6a0bec59e356ddaa66eaa4 | [
"Apache-2.0"
] | 2 | 2018-10-26T16:14:15.000Z | 2021-05-10T11:32:31.000Z | arena.py | iassael/learning-to-communicate-pytorch | 3a7401c22bcefd17fe6a0bec59e356ddaa66eaa4 | [
"Apache-2.0"
] | 67 | 2018-10-26T08:54:32.000Z | 2022-03-09T05:27:22.000Z | import copy
import numpy as np
import torch
from torch.autograd import Variable
from utils.dotdic import DotDic
class Arena:
def __init__(self, opt, game):
self.opt = opt
self.game = game
self.eps = opt.eps
def create_episode(self):
opt = self.opt
episode = DotDic({})
episode.steps = torch.zeros(opt.bs).int()
episode.ended = torch.zeros(opt.bs).int()
episode.r = torch.zeros(opt.bs, opt.game_nagents).float()
episode.step_records = []
return episode
def create_step_record(self):
opt = self.opt
record = DotDic({})
record.s_t = None
record.r_t = torch.zeros(opt.bs, opt.game_nagents)
record.terminal = torch.zeros(opt.bs)
record.agent_inputs = []
# Track actions at time t per agent
record.a_t = torch.zeros(opt.bs, opt.game_nagents, dtype=torch.long)
if not opt.model_dial:
record.a_comm_t = torch.zeros(opt.bs, opt.game_nagents, dtype=torch.long)
# Track messages sent at time t per agent
if opt.comm_enabled:
comm_dtype = opt.model_dial and torch.float or torch.long
comm_dtype = torch.float
record.comm = torch.zeros(opt.bs, opt.game_nagents, opt.game_comm_bits, dtype=comm_dtype)
if opt.model_dial and opt.model_target:
record.comm_target = record.comm.clone()
# Track hidden state per time t per agent
record.hidden = torch.zeros(opt.game_nagents, opt.model_rnn_layers, opt.bs, opt.model_rnn_size)
record.hidden_target = torch.zeros(opt.game_nagents, opt.model_rnn_layers, opt.bs, opt.model_rnn_size)
# Track Q(a_t) and Q(a_max_t) per agent
record.q_a_t = torch.zeros(opt.bs, opt.game_nagents)
record.q_a_max_t = torch.zeros(opt.bs, opt.game_nagents)
# Track Q(m_t) and Q(m_max_t) per agent
if not opt.model_dial:
record.q_comm_t = torch.zeros(opt.bs, opt.game_nagents)
record.q_comm_max_t = torch.zeros(opt.bs, opt.game_nagents)
return record
def run_episode(self, agents, train_mode=False):
opt = self.opt
game = self.game
game.reset()
self.eps = self.eps * opt.eps_decay
step = 0
episode = self.create_episode()
s_t = game.get_state()
episode.step_records.append(self.create_step_record())
episode.step_records[-1].s_t = s_t
episode_steps = train_mode and opt.nsteps + 1 or opt.nsteps
while step < episode_steps and episode.ended.sum() < opt.bs:
episode.step_records.append(self.create_step_record())
for i in range(1, opt.game_nagents + 1):
# Get received messages per agent per batch
agent = agents[i]
agent_idx = i - 1
comm = None
if opt.comm_enabled:
comm = episode.step_records[step].comm.clone()
comm_limited = self.game.get_comm_limited(step, agent.id)
if comm_limited is not None:
comm_lim = torch.zeros(opt.bs, 1, opt.game_comm_bits)
for b in range(opt.bs):
if comm_limited[b].item() > 0:
comm_lim[b] = comm[b][comm_limited[b] - 1]
comm = comm_lim
else:
comm[:, agent_idx].zero_()
# Get prev action per batch
prev_action = None
if opt.model_action_aware:
prev_action = torch.ones(opt.bs, dtype=torch.long)
if not opt.model_dial:
prev_message = torch.ones(opt.bs, dtype=torch.long)
for b in range(opt.bs):
if step > 0 and episode.step_records[step - 1].a_t[b, agent_idx] > 0:
prev_action[b] = episode.step_records[step - 1].a_t[b, agent_idx]
if not opt.model_dial:
if step > 0 and episode.step_records[step - 1].a_comm_t[b, agent_idx] > 0:
prev_message[b] = episode.step_records[step - 1].a_comm_t[b, agent_idx]
if not opt.model_dial:
prev_action = (prev_action, prev_message)
# Batch agent index for input into model
batch_agent_index = torch.zeros(opt.bs, dtype=torch.long).fill_(agent_idx)
agent_inputs = {
's_t': episode.step_records[step].s_t[:, agent_idx],
'messages': comm,
'hidden': episode.step_records[step].hidden[agent_idx, :], # Hidden state
'prev_action': prev_action,
'agent_index': batch_agent_index
}
episode.step_records[step].agent_inputs.append(agent_inputs)
# Compute model output (Q function + message bits)
hidden_t, q_t = agent.model(**agent_inputs)
episode.step_records[step + 1].hidden[agent_idx] = hidden_t.squeeze()
# Choose next action and comm using eps-greedy selector
(action, action_value), (comm_vector, comm_action, comm_value) = \
agent.select_action_and_comm(step, q_t, eps=self.eps, train_mode=train_mode)
# Store action + comm
episode.step_records[step].a_t[:, agent_idx] = action
episode.step_records[step].q_a_t[:, agent_idx] = action_value
episode.step_records[step + 1].comm[:, agent_idx] = comm_vector
if not opt.model_dial:
episode.step_records[step].a_comm_t[:, agent_idx] = comm_action
episode.step_records[step].q_comm_t[:, agent_idx] = comm_value
# Update game state
a_t = episode.step_records[step].a_t
episode.step_records[step].r_t, episode.step_records[step].terminal = \
self.game.step(a_t)
# Accumulate steps
if step < opt.nsteps:
for b in range(opt.bs):
if not episode.ended[b]:
episode.steps[b] = episode.steps[b] + 1
episode.r[b] = episode.r[b] + episode.step_records[step].r_t[b]
if episode.step_records[step].terminal[b]:
episode.ended[b] = 1
# Target-network forward pass
if opt.model_target and train_mode:
for i in range(1, opt.game_nagents + 1):
agent_target = agents[i]
agent_idx = i - 1
agent_inputs = episode.step_records[step].agent_inputs[agent_idx]
# import pdb; pdb.set_trace()
comm_target = agent_inputs.get('messages', None)
if opt.comm_enabled and opt.model_dial:
comm_target = episode.step_records[step].comm_target.clone()
comm_limited = self.game.get_comm_limited(step, agent.id)
if comm_limited is not None:
comm_lim = torch.zeros(opt.bs, 1, opt.game_comm_bits)
for b in range(opt.bs):
if comm_limited[b].item() > 0:
comm_lim[b] = comm_target[b][comm_limited[b] - 1]
comm_target = comm_lim
else:
comm_target[:, agent_idx].zero_()
# comm_target.retain_grad()
agent_target_inputs = copy.copy(agent_inputs)
agent_target_inputs['messages'] = Variable(comm_target)
agent_target_inputs['hidden'] = \
episode.step_records[step].hidden_target[agent_idx, :]
hidden_target_t, q_target_t = agent_target.model_target(**agent_target_inputs)
episode.step_records[step + 1].hidden_target[agent_idx] = \
hidden_target_t.squeeze()
# Choose next arg max action and comm
(action, action_value), (comm_vector, comm_action, comm_value) = \
agent_target.select_action_and_comm(step, q_target_t, eps=0, target=True, train_mode=True)
# save target actions, comm, and q_a_t, q_a_max_t
episode.step_records[step].q_a_max_t[:, agent_idx] = action_value
if opt.model_dial:
episode.step_records[step + 1].comm_target[:, agent_idx] = comm_vector
else:
episode.step_records[step].q_comm_max_t[:, agent_idx] = comm_value
# Update step
step = step + 1
if episode.ended.sum().item() < opt.bs:
episode.step_records[step].s_t = self.game.get_state()
# Collect stats
episode.game_stats = self.game.get_stats(episode.steps)
return episode
def average_reward(self, episode, normalized=True):
reward = episode.r.sum()/(self.opt.bs * self.opt.game_nagents)
if normalized:
god_reward = episode.game_stats.god_reward.sum()/self.opt.bs
if reward == god_reward:
reward = 1
elif god_reward == 0:
reward = 0
else:
reward = reward/god_reward
return float(reward)
def train(self, agents, reset=True, verbose=False, test_callback=None):
opt = self.opt
if reset:
for agent in agents[1:]:
agent.reset()
rewards = []
for e in range(opt.nepisodes):
# run episode
episode = self.run_episode(agents, train_mode=True)
norm_r = self.average_reward(episode)
if verbose:
print('train epoch:', e, 'avg steps:', episode.steps.float().mean().item(), 'avg reward:', norm_r)
if opt.model_know_share:
agents[1].learn_from_episode(episode)
else:
for agent in agents[1:]:
agent.learn_from_episode(episode)
if e % opt.step_test == 0:
episode = self.run_episode(agents, train_mode=False)
norm_r = self.average_reward(episode)
rewards.append(norm_r)
if test_callback:
test_callback(e, norm_r)
print('TEST EPOCH:', e, 'avg steps:', episode.steps.float().mean().item(), 'avg reward:', norm_r)
| 35.016461 | 104 | 0.693383 |
ace1b284b2bc6a8d846fb88dcbf9152809b7a995 | 101,727 | py | Python | api_tests/nodes/views/test_node_detail.py | brianjgeiger/osf.io | aef962685056ac4f721cf9680fca5ec2dbfed675 | [
"Apache-2.0"
] | null | null | null | api_tests/nodes/views/test_node_detail.py | brianjgeiger/osf.io | aef962685056ac4f721cf9680fca5ec2dbfed675 | [
"Apache-2.0"
] | 80 | 2015-02-25T15:12:15.000Z | 2015-06-11T18:44:55.000Z | api_tests/nodes/views/test_node_detail.py | brianjgeiger/osf.io | aef962685056ac4f721cf9680fca5ec2dbfed675 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
import mock
import pytest
from future.moves.urllib.parse import urlparse
from addons.wiki.tests.factories import WikiFactory, WikiVersionFactory
from api.base.settings.defaults import API_BASE
from api.taxonomies.serializers import subjects_as_relationships_version
from api_tests.subjects.mixins import UpdateSubjectsMixin
from framework.auth.core import Auth
from osf.models import NodeLog
from osf.models.licenses import NodeLicense
from osf.utils.sanitize import strip_html
from osf.utils import permissions
from osf_tests.factories import (
NodeFactory,
ProjectFactory,
RegistrationFactory,
AuthUserFactory,
CollectionFactory,
CommentFactory,
NodeLicenseRecordFactory,
PrivateLinkFactory,
PreprintFactory,
IdentifierFactory,
InstitutionFactory,
ForkFactory,
OSFGroupFactory,
WithdrawnRegistrationFactory,
)
from rest_framework import exceptions
from tests.base import fake
from tests.utils import assert_items_equal, assert_latest_log, assert_latest_log_not
from website.views import find_bookmark_collection
@pytest.fixture()
def user():
return AuthUserFactory()
@pytest.mark.django_db
class TestNodeDetail:
@pytest.fixture()
def user_two(self):
return AuthUserFactory()
@pytest.fixture()
def project_public(self, user):
return ProjectFactory(
title='Project One',
is_public=True,
creator=user)
@pytest.fixture()
def project_private(self, user):
return ProjectFactory(
title='Project Two',
is_public=False,
creator=user)
@pytest.fixture()
def component_public(self, user, project_public):
return NodeFactory(parent=project_public, creator=user, is_public=True)
@pytest.fixture()
def url_public(self, project_public):
return '/{}nodes/{}/'.format(API_BASE, project_public._id)
@pytest.fixture()
def url_private(self, project_private):
return '/{}nodes/{}/'.format(API_BASE, project_private._id)
@pytest.fixture()
def url_component_public(self, component_public):
return '/{}nodes/{}/'.format(API_BASE, component_public._id)
@pytest.fixture()
def permissions_read(self):
return [permissions.READ]
@pytest.fixture()
def permissions_write(self):
return [permissions.WRITE, permissions.READ]
@pytest.fixture()
def permissions_admin(self):
return [permissions.READ, permissions.ADMIN, permissions.WRITE]
def test_return_project_details(
self, app, user, user_two, project_public,
project_private, url_public, url_private,
permissions_read, permissions_admin):
# test_return_public_project_details_logged_out
res = app.get(url_public)
assert res.status_code == 200
assert res.content_type == 'application/vnd.api+json'
assert res.json['data']['attributes']['title'] == project_public.title
assert res.json['data']['attributes']['description'] == project_public.description
assert res.json['data']['attributes']['category'] == project_public.category
assert res.json['data']['attributes']['current_user_is_contributor'] is False
assert res.json['data']['attributes']['current_user_permissions'] == [permissions.READ]
# test_return_public_project_details_contributor_logged_in
res = app.get(url_public, auth=user.auth)
assert res.status_code == 200
assert res.content_type == 'application/vnd.api+json'
assert res.json['data']['attributes']['title'] == project_public.title
assert res.json['data']['attributes']['description'] == project_public.description
assert res.json['data']['attributes']['category'] == project_public.category
assert res.json['data']['attributes']['current_user_is_contributor'] is True
assert res.json['data']['attributes']['current_user_permissions'] == [permissions.ADMIN, permissions.WRITE, permissions.READ]
# test_return_public_project_details_non_contributor_logged_in
res = app.get(url_public, auth=user_two.auth)
assert res.status_code == 200
assert res.content_type == 'application/vnd.api+json'
assert res.json['data']['attributes']['title'] == project_public.title
assert res.json['data']['attributes']['description'] == project_public.description
assert res.json['data']['attributes']['category'] == project_public.category
assert res.json['data']['attributes']['current_user_is_contributor'] is False
assert res.json['data']['attributes']['current_user_permissions'] == [permissions.READ]
# test_return_private_project_details_logged_in_admin_contributor
res = app.get(url_private, auth=user.auth)
assert res.status_code == 200
assert res.content_type == 'application/vnd.api+json'
assert res.json['data']['attributes']['title'] == project_private.title
assert res.json['data']['attributes']['description'] == project_private.description
assert res.json['data']['attributes']['category'] == project_private.category
assert res.json['data']['attributes']['current_user_is_contributor'] is True
assert res.json['data']['attributes']['current_user_permissions'] == [permissions.ADMIN, permissions.WRITE, permissions.READ]
assert res.json['data']['relationships']['region']['data']['id'] == project_private.osfstorage_region._id
# test_return_private_project_details_logged_out
res = app.get(url_private, expect_errors=True)
assert res.status_code == 401
assert 'detail' in res.json['errors'][0]
# test_return_private_project_details_logged_in_non_contributor
res = app.get(url_private, auth=user_two.auth, expect_errors=True)
assert res.status_code == 403
assert 'detail' in res.json['errors'][0]
# test_return_project_where_you_have_osf_group_membership
osf_group = OSFGroupFactory(creator=user_two)
project_private.add_osf_group(osf_group, permissions.WRITE)
res = app.get(url_private, auth=user_two.auth)
assert res.status_code == 200
assert project_private.has_permission(user_two, permissions.WRITE) is True
def test_return_private_project_details_logged_in_write_contributor(
self, app, user, user_two, project_private, url_private, permissions_write):
project_private.add_contributor(
contributor=user_two, auth=Auth(user), save=True)
res = app.get(url_private, auth=user_two.auth)
assert res.status_code == 200
assert res.content_type == 'application/vnd.api+json'
assert res.json['data']['attributes']['title'] == project_private.title
assert res.json['data']['attributes']['description'] == project_private.description
assert res.json['data']['attributes']['category'] == project_private.category
assert res.json['data']['attributes']['current_user_is_contributor'] is True
assert_items_equal(
res.json['data']['attributes']['current_user_permissions'],
permissions_write)
def test_top_level_project_has_no_parent(self, app, url_public):
res = app.get(url_public)
assert res.status_code == 200
assert 'parent' not in res.json['data']['relationships']
assert 'id' in res.json['data']
assert res.content_type == 'application/vnd.api+json'
def test_child_project_has_parent(
self, app, user, project_public, url_public):
public_component = NodeFactory(
parent=project_public, creator=user, is_public=True)
public_component_url = '/{}nodes/{}/'.format(
API_BASE, public_component._id)
res = app.get(public_component_url)
assert res.status_code == 200
url = res.json['data']['relationships']['parent']['links']['related']['href']
assert urlparse(url).path == url_public
def test_node_has(self, app, url_public, project_public):
# test_node_has_children_link
res = app.get(url_public)
url = res.json['data']['relationships']['children']['links']['related']['href']
expected_url = '{}children/'.format(url_public)
assert urlparse(url).path == expected_url
# test_node_has_contributors_link
url = res.json['data']['relationships']['contributors']['links']['related']['href']
expected_url = '{}contributors/'.format(url_public)
assert urlparse(url).path == expected_url
# test_node_has_node_links_link
url = res.json['data']['relationships']['node_links']['links']['related']['href']
expected_url = '{}node_links/'.format(url_public)
assert urlparse(url).path == expected_url
# test_node_has_registrations_link
url = res.json['data']['relationships']['registrations']['links']['related']['href']
expected_url = '{}registrations/'.format(url_public)
assert urlparse(url).path == expected_url
# test_node_has_files_link
url = res.json['data']['relationships']['files']['links']['related']['href']
expected_url = '{}files/'.format(url_public)
assert urlparse(url).path == expected_url
# test_node_has_affiliated_institutions_link_and_it_doesn't_serialize_to_none
assert project_public.affiliated_institutions.count() == 0
related_url = res.json['data']['relationships']['affiliated_institutions']['links']['related']['href']
expected_url = '{}institutions/'.format(url_public)
assert urlparse(related_url).path == expected_url
self_url = res.json['data']['relationships']['affiliated_institutions']['links']['self']['href']
expected_url = '{}relationships/institutions/'.format(url_public)
assert urlparse(self_url).path == expected_url
# test_node_has_subjects_links_for_later_versions
res = app.get(url_public + '?version={}'.format(subjects_as_relationships_version))
related_url = res.json['data']['relationships']['subjects']['links']['related']['href']
expected_url = '{}subjects/'.format(url_public)
assert urlparse(related_url).path == expected_url
self_url = res.json['data']['relationships']['subjects']['links']['self']['href']
expected_url = '{}relationships/subjects/'.format(url_public)
assert urlparse(self_url).path == expected_url
def test_node_has_comments_link(
self, app, user, project_public, url_public):
CommentFactory(node=project_public, user=user)
res = app.get(url_public)
assert res.status_code == 200
assert 'comments' in res.json['data']['relationships'].keys()
url = res.json['data']['relationships']['comments']['links']['related']['href']
res = app.get(url)
assert res.status_code == 200
assert res.json['data'][0]['type'] == 'comments'
def test_node_comments_link_query_params_formatted(
self, app, user, project_public, project_private, url_private):
CommentFactory(node=project_public, user=user)
project_private_link = PrivateLinkFactory(anonymous=False)
project_private_link.nodes.add(project_private)
project_private_link.save()
res = app.get(url_private, auth=user.auth)
url = res.json['data']['relationships']['comments']['links']['related']['href']
assert project_private_link.key not in url
res = app.get(
'{}?view_only={}'.format(
url_private,
project_private_link.key))
url = res.json['data']['relationships']['comments']['links']['related']['href']
assert project_private_link.key in url
def test_node_has_correct_unread_comments_count(
self, app, user, project_public, url_public):
contributor = AuthUserFactory()
project_public.add_contributor(
contributor=contributor, auth=Auth(user), save=True)
CommentFactory(
node=project_public,
user=contributor,
page='node')
res = app.get(
'{}?related_counts=True'.format(url_public),
auth=user.auth)
unread = res.json['data']['relationships']['comments']['links']['related']['meta']['unread']
unread_comments_node = unread['node']
assert unread_comments_node == 1
def test_node_has_correct_wiki_page_count(self, user, app, url_private, project_private):
res = app.get('{}?related_counts=True'.format(url_private), auth=user.auth)
assert res.json['data']['relationships']['wikis']['links']['related']['meta']['count'] == 0
with mock.patch('osf.models.AbstractNode.update_search'):
wiki_page = WikiFactory(node=project_private, user=user)
WikiVersionFactory(wiki_page=wiki_page)
res = app.get('{}?related_counts=True'.format(url_private), auth=user.auth)
assert res.json['data']['relationships']['wikis']['links']['related']['meta']['count'] == 1
def test_node_properties(self, app, url_public):
res = app.get(url_public)
assert res.json['data']['attributes']['public'] is True
assert res.json['data']['attributes']['registration'] is False
assert res.json['data']['attributes']['collection'] is False
assert res.json['data']['attributes']['tags'] == []
def test_requesting_folder_returns_error(self, app, user):
folder = CollectionFactory(creator=user)
res = app.get(
'/{}nodes/{}/'.format(API_BASE, folder._id),
auth=user.auth,
expect_errors=True
)
assert res.status_code == 404
def test_cannot_return_registrations_at_node_detail_endpoint(
self, app, user, project_public):
registration = RegistrationFactory(
project=project_public, creator=user)
res = app.get('/{}nodes/{}/'.format(
API_BASE, registration._id),
auth=user.auth, expect_errors=True)
assert res.status_code == 404
def test_cannot_return_folder_at_node_detail_endpoint(self, app, user):
folder = CollectionFactory(creator=user)
res = app.get(
'/{}nodes/{}/'.format(API_BASE, folder._id),
auth=user.auth, expect_errors=True)
assert res.status_code == 404
def test_node_list_embed_identifier_link(self, app, user, project_public, url_public):
url = url_public + '?embed=identifiers'
res = app.get(url)
assert res.status_code == 200
link = res.json['data']['relationships']['identifiers']['links']['related']['href']
assert '{}identifiers/'.format(url_public) in link
def test_node_shows_wiki_relationship_based_on_disabled_status_and_version(self, app, user, project_public, url_public):
url = url_public + '?version=latest'
res = app.get(url, auth=user.auth)
assert 'wikis' in res.json['data']['relationships']
project_public.delete_addon('wiki', auth=Auth(user))
project_public.save()
res = app.get(url, auth=user.auth)
assert 'wikis' not in res.json['data']['relationships']
url = url_public + '?version=2.7'
res = app.get(url, auth=user.auth)
assert 'wikis' in res.json['data']['relationships']
def test_preprint_field(self, app, user, user_two, project_public, url_public):
# Returns true if project holds supplemental material for a preprint a user can view
# Published preprint, admin_contrib
preprint = PreprintFactory(project=project_public, creator=user)
res = app.get(url_public, auth=user.auth)
assert res.status_code == 200
assert res.json['data']['attributes']['preprint'] is True
# Published preprint, non_contrib
res = app.get(url_public, auth=user_two.auth)
assert res.status_code == 200
assert res.json['data']['attributes']['preprint'] is True
# Unpublished preprint, admin contrib
preprint.is_published = False
preprint.save()
res = app.get(url_public, auth=user.auth)
assert res.status_code == 200
assert res.json['data']['attributes']['preprint'] is True
# Unpublished preprint, non_contrib
res = app.get(url_public, auth=user_two.auth)
assert res.status_code == 200
assert res.json['data']['attributes']['preprint'] is False
def test_shows_access_requests_enabled_field_based_on_version(self, app, user, project_public, url_public):
url = url_public + '?version=latest'
res = app.get(url, auth=user.auth)
assert 'access_requests_enabled' not in res.json['data']['attributes']
res = app.get(url_public + '?version=2.8', auth=user.auth)
assert 'access_requests_enabled' in res.json['data']['attributes']
def test_node_shows_correct_templated_from_count(self, app, user, project_public, url_public):
url = url_public
res = app.get(url)
assert res.json['meta'].get('templated_by_count', False) is False
url = url + '?related_counts=true'
res = app.get(url)
assert res.json['meta']['templated_by_count'] == 0
ProjectFactory(title='template copy', template_node=project_public, creator=user)
project_public.reload()
res = app.get(url)
assert res.json['meta']['templated_by_count'] == 1
def test_node_show_correct_children_count(self, app, user, user_two, project_public, url_public):
node_children_url = url_public + 'children/'
url = url_public + '?related_counts=true'
child = NodeFactory(parent=project_public, creator=user)
res = app.get(url, auth=user.auth)
# Child admin can view child
assert res.json['data']['relationships']['children']['links']['related']['meta']['count'] == 1
res = app.get(node_children_url, auth=user.auth)
assert len(res.json['data']) == 1
# Implicit admin on parent can view child count
res = app.get(url, auth=user_two.auth)
assert res.json['data']['relationships']['children']['links']['related']['meta']['count'] == 0
project_public.add_contributor(user_two, permissions.ADMIN)
project_public.save()
res = app.get(url, auth=user_two.auth)
assert res.json['data']['relationships']['children']['links']['related']['meta']['count'] == 1
res = app.get(node_children_url, auth=user_two.auth)
assert len(res.json['data']) == 1
# Explicit Member of OSFGroup can view child count
user_three = AuthUserFactory()
group = OSFGroupFactory(creator=user_three)
res = app.get(url, auth=user_three.auth)
assert res.json['data']['relationships']['children']['links']['related']['meta']['count'] == 0
child.add_osf_group(group, permissions.READ)
res = app.get(url, auth=user_three.auth)
assert res.json['data']['relationships']['children']['links']['related']['meta']['count'] == 1
res = app.get(node_children_url, auth=user_three.auth)
assert len(res.json['data']) == 1
# Implicit admin group member can view child count
child.remove_osf_group(group)
res = app.get(url, auth=user_three.auth)
assert res.json['data']['relationships']['children']['links']['related']['meta']['count'] == 0
project_public.add_osf_group(group, permissions.ADMIN)
res = app.get(url, auth=user_three.auth)
assert res.json['data']['relationships']['children']['links']['related']['meta']['count'] == 1
res = app.get(node_children_url, auth=user_three.auth)
assert len(res.json['data']) == 1
# Grandchildren not shown. Children show one level.
grandparent = AuthUserFactory()
NodeFactory(parent=child, creator=user)
project_public.add_contributor(grandparent, permissions.ADMIN)
project_public.save()
res = app.get(node_children_url, auth=grandparent.auth)
assert len(res.json['data']) == 1
res = app.get(url, auth=grandparent.auth)
assert res.json['data']['relationships']['children']['links']['related']['meta']['count'] == 1
NodeFactory(parent=project_public, creator=user)
res = app.get(node_children_url, auth=grandparent.auth)
assert len(res.json['data']) == 2
res = app.get(url, auth=grandparent.auth)
assert res.json['data']['relationships']['children']['links']['related']['meta']['count'] == 2
def test_node_shows_related_count_for_linked_by_relationships(self, app, user, project_public, url_public, project_private):
url = url_public + '?related_counts=true'
res = app.get(url)
assert 'count' in res.json['data']['relationships']['linked_by_nodes']['links']['related']['meta']
assert 'count' in res.json['data']['relationships']['linked_by_registrations']['links']['related']['meta']
assert res.json['data']['relationships']['linked_by_nodes']['links']['related']['meta']['count'] == 0
assert res.json['data']['relationships']['linked_by_registrations']['links']['related']['meta']['count'] == 0
project_private.add_pointer(project_public, auth=Auth(user), save=True)
project_public.reload()
res = app.get(url)
assert 'count' in res.json['data']['relationships']['linked_by_nodes']['links']['related']['meta']
assert 'count' in res.json['data']['relationships']['linked_by_registrations']['links']['related']['meta']
assert res.json['data']['relationships']['linked_by_nodes']['links']['related']['meta']['count'] == 1
assert res.json['data']['relationships']['linked_by_registrations']['links']['related']['meta']['count'] == 0
registration = RegistrationFactory(project=project_private, creator=user)
project_public.reload()
res = app.get(url)
assert 'count' in res.json['data']['relationships']['linked_by_nodes']['links']['related']['meta']
assert 'count' in res.json['data']['relationships']['linked_by_registrations']['links']['related']['meta']
assert res.json['data']['relationships']['linked_by_nodes']['links']['related']['meta']['count'] == 1
assert res.json['data']['relationships']['linked_by_registrations']['links']['related']['meta']['count'] == 1
project_private.is_deleted = True
project_private.save()
project_public.reload()
res = app.get(url)
assert 'count' in res.json['data']['relationships']['linked_by_nodes']['links']['related']['meta']
assert 'count' in res.json['data']['relationships']['linked_by_registrations']['links']['related']['meta']
assert res.json['data']['relationships']['linked_by_nodes']['links']['related']['meta']['count'] == 0
assert res.json['data']['relationships']['linked_by_registrations']['links']['related']['meta']['count'] == 1
WithdrawnRegistrationFactory(registration=registration, user=user)
project_public.reload()
res = app.get(url)
assert 'count' in res.json['data']['relationships']['linked_by_nodes']['links']['related']['meta']
assert 'count' in res.json['data']['relationships']['linked_by_registrations']['links']['related']['meta']
assert res.json['data']['relationships']['linked_by_nodes']['links']['related']['meta']['count'] == 0
assert res.json['data']['relationships']['linked_by_registrations']['links']['related']['meta']['count'] == 0
def test_node_shows_correct_forks_count_including_private_forks(self, app, user, project_private, url_private, user_two):
project_private.add_contributor(
user_two,
permissions=permissions.ADMIN,
auth=Auth(user)
)
url = url_private + '?related_counts=true'
forks_url = url_private + 'forks/'
res = app.get(url, auth=user.auth)
assert 'count' in res.json['data']['relationships']['forks']['links']['related']['meta']
assert res.json['data']['relationships']['forks']['links']['related']['meta']['count'] == 0
res = app.get(forks_url, auth=user.auth)
assert len(res.json['data']) == 0
ForkFactory(project=project_private, user=user_two)
project_private.reload()
res = app.get(url, auth=user.auth)
assert 'count' in res.json['data']['relationships']['forks']['links']['related']['meta']
assert res.json['data']['relationships']['forks']['links']['related']['meta']['count'] == 1
res = app.get(forks_url, auth=user.auth)
assert len(res.json['data']) == 0
ForkFactory(project=project_private, user=user)
project_private.reload()
res = app.get(url, auth=user.auth)
assert 'count' in res.json['data']['relationships']['forks']['links']['related']['meta']
assert res.json['data']['relationships']['forks']['links']['related']['meta']['count'] == 2
res = app.get(forks_url, auth=user.auth)
assert len(res.json['data']) == 1
def test_current_user_permissions(self, app, user, url_public, project_public, user_two):
# in most recent API version, read isn't implicit for public nodes
url = url_public + '?version=2.11'
res = app.get(url, auth=user_two.auth)
assert not project_public.has_permission(user_two, permissions.READ)
assert permissions.READ not in res.json['data']['attributes']['current_user_permissions']
assert res.json['data']['attributes']['current_user_is_contributor_or_group_member'] is False
# ensure read is not included for an anonymous user
res = app.get(url)
assert permissions.READ not in res.json['data']['attributes']['current_user_permissions']
assert res.json['data']['attributes']['current_user_is_contributor_or_group_member'] is False
# ensure both read and write included for a write contributor
new_user = AuthUserFactory()
project_public.add_contributor(
new_user,
permissions=permissions.WRITE,
auth=Auth(project_public.creator)
)
res = app.get(url, auth=new_user.auth)
assert res.json['data']['attributes']['current_user_permissions'] == [permissions.WRITE, permissions.READ]
assert res.json['data']['attributes']['current_user_is_contributor_or_group_member'] is True
# make sure 'read' is there for implicit read contributors
comp = NodeFactory(parent=project_public, is_public=True)
comp_url = '/{}nodes/{}/?version=2.11'.format(API_BASE, comp._id)
res = app.get(comp_url, auth=user.auth)
assert project_public.has_permission(user, permissions.ADMIN)
assert permissions.READ in res.json['data']['attributes']['current_user_permissions']
assert res.json['data']['attributes']['current_user_is_contributor_or_group_member'] is False
# ensure 'read' is still included with older versions
res = app.get(url_public, auth=user_two.auth)
assert not project_public.has_permission(user_two, permissions.READ)
assert permissions.READ in res.json['data']['attributes']['current_user_permissions']
assert res.json['data']['attributes']['current_user_is_contributor_or_group_member'] is False
# check read permission is included with older versions for anon user
res = app.get(url_public)
assert permissions.READ in res.json['data']['attributes']['current_user_permissions']
assert res.json['data']['attributes']['current_user_is_contributor_or_group_member'] is False
# Read group member has "read" permissions
group_member = AuthUserFactory()
osf_group = OSFGroupFactory(creator=group_member)
project_public.add_osf_group(osf_group, permissions.READ)
res = app.get(url, auth=group_member.auth)
assert project_public.has_permission(group_member, permissions.READ)
assert permissions.READ in res.json['data']['attributes']['current_user_permissions']
assert res.json['data']['attributes']['current_user_is_contributor_or_group_member'] is True
# Write group member has "read" and "write" permissions
group_member = AuthUserFactory()
osf_group = OSFGroupFactory(creator=group_member)
project_public.add_osf_group(osf_group, permissions.WRITE)
res = app.get(url, auth=group_member.auth)
assert res.json['data']['attributes']['current_user_permissions'] == [permissions.WRITE, permissions.READ]
assert res.json['data']['attributes']['current_user_is_contributor_or_group_member'] is True
# Admin group member has "read" and "write" and "admin" permissions
group_member = AuthUserFactory()
osf_group = OSFGroupFactory(creator=group_member)
project_public.add_osf_group(osf_group, permissions.ADMIN)
res = app.get(url, auth=group_member.auth)
assert res.json['data']['attributes']['current_user_permissions'] == [permissions.ADMIN, permissions.WRITE, permissions.READ]
assert res.json['data']['attributes']['current_user_is_contributor_or_group_member'] is True
# make sure 'read' is there for implicit read group members
comp = NodeFactory(parent=project_public, is_public=True)
comp_url = '/{}nodes/{}/?version=2.11'.format(API_BASE, comp._id)
res = app.get(comp_url, auth=group_member.auth)
assert project_public.has_permission(user, permissions.ADMIN)
assert permissions.READ in res.json['data']['attributes']['current_user_permissions']
assert res.json['data']['attributes']['current_user_is_contributor_or_group_member'] is False
# ensure 'read' is still included with older versions
project_public.remove_osf_group(osf_group)
res = app.get(url_public, auth=group_member.auth)
assert not project_public.has_permission(group_member, permissions.READ)
assert permissions.READ in res.json['data']['attributes']['current_user_permissions']
assert res.json['data']['attributes']['current_user_is_contributor_or_group_member'] is False
# superusers current permissions are None
superuser = AuthUserFactory()
superuser.is_superuser = True
superuser.save()
res = app.get(url, auth=superuser.auth)
assert not project_public.has_permission(superuser, permissions.READ)
assert permissions.READ not in res.json['data']['attributes']['current_user_permissions']
assert res.json['data']['attributes']['current_user_is_contributor_or_group_member'] is False
assert res.json['data']['attributes']['current_user_is_contributor'] is False
res = app.get(url_public, auth=superuser.auth)
assert not project_public.has_permission(superuser, permissions.READ)
assert permissions.READ in res.json['data']['attributes']['current_user_permissions']
assert res.json['data']['attributes']['current_user_is_contributor_or_group_member'] is False
assert res.json['data']['attributes']['current_user_is_contributor'] is False
@pytest.mark.django_db
class NodeCRUDTestCase:
@pytest.fixture()
def institution_one(self):
return InstitutionFactory()
@pytest.fixture()
def institution_two(self):
return InstitutionFactory()
@pytest.fixture()
def user_two(self, institution_one, institution_two):
auth_user = AuthUserFactory()
auth_user.affiliated_institutions.add(institution_one)
auth_user.affiliated_institutions.add(institution_two)
return auth_user
@pytest.fixture()
def title(self):
return 'Cool Project'
@pytest.fixture()
def title_new(self):
return 'Super Cool Project'
@pytest.fixture()
def description(self):
return 'A Properly Cool Project'
@pytest.fixture()
def description_new(self):
return 'An even cooler project'
@pytest.fixture()
def category(self):
return 'data'
@pytest.fixture()
def category_new(self):
return 'project'
@pytest.fixture()
def project_public(self, user, title, description, category):
return ProjectFactory(
title=title,
description=description,
category=category,
is_public=True,
creator=user
)
@pytest.fixture()
def project_private(self, user, title, description, category):
return ProjectFactory(
title=title,
description=description,
category=category,
is_public=False,
creator=user
)
@pytest.fixture()
def url_public(self, project_public):
return '/{}nodes/{}/'.format(API_BASE, project_public._id)
@pytest.fixture()
def sparse_url_public(self, project_public):
return '/{}sparse/nodes/{}/'.format(API_BASE, project_public._id)
@pytest.fixture()
def url_private(self, project_private):
return '/{}nodes/{}/'.format(API_BASE, project_private._id)
@pytest.fixture()
def url_fake(self):
return '/{}nodes/{}/'.format(API_BASE, '12345')
@pytest.fixture()
def make_node_payload(self):
def payload(node, attributes, relationships=None):
payload_data = {
'data': {
'id': node._id,
'type': 'nodes',
'attributes': attributes,
}
}
if relationships:
payload_data['data']['relationships'] = relationships
return payload_data
return payload
@pytest.fixture()
def make_sparse_node_payload(self):
def payload(node, attributes, relationships=None):
payload_data = {
'data': {
'id': node._id,
'type': 'sparse-nodes',
'attributes': attributes,
}
}
if relationships:
payload_data['data']['relationships'] = relationships
return payload_data
return payload
@pytest.mark.django_db
class TestNodeUpdate(NodeCRUDTestCase):
def test_node_institution_update(self, app, user_two, project_private, url_private, make_node_payload,
institution_one, institution_two):
project_private.add_contributor(
user_two,
permissions=permissions.ADMIN,
auth=Auth(project_private.creator)
)
affiliated_institutions = {
'affiliated_institutions':
{'data': [
{
'type': 'institutions',
'id': institution_one._id
},
{
'type': 'institutions',
'id': institution_two._id
},
]
}
}
payload = make_node_payload(project_private, {'public': False}, relationships=affiliated_institutions)
res = app.patch_json_api(url_private, payload, auth=user_two.auth, expect_errors=False)
assert res.status_code == 200
institutions = project_private.affiliated_institutions.all()
assert institution_one in institutions
assert institution_two in institutions
def test_node_update_invalid_data(self, app, user, url_public):
res = app.put_json_api(
url_public, 'Incorrect data',
auth=user.auth, expect_errors=True)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == exceptions.ParseError.default_detail
res = app.put_json_api(
url_public, ['Incorrect data'],
auth=user.auth, expect_errors=True)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == exceptions.ParseError.default_detail
def test_cannot_make_project_public_if_non_contributor(
self, app, project_private, url_private, make_node_payload):
with assert_latest_log_not(NodeLog.MADE_PUBLIC, project_private):
non_contrib = AuthUserFactory()
res = app.patch_json(
url_private,
make_node_payload(project_private, {'public': True}),
auth=non_contrib.auth, expect_errors=True
)
assert res.status_code == 403
def test_cannot_make_project_public_if_non_admin_contributor(
self, app, project_private, url_private, make_node_payload):
non_admin = AuthUserFactory()
project_private.add_contributor(
non_admin,
permissions=permissions.WRITE,
auth=Auth(project_private.creator)
)
project_private.save()
res = app.patch_json(
url_private,
make_node_payload(project_private, {'public': True}),
auth=non_admin.auth, expect_errors=True
)
assert res.status_code == 403
project_private.reload()
assert not project_private.is_public
def test_can_make_project_public_if_admin_contributor(
self, app, project_private, url_private, make_node_payload):
with assert_latest_log(NodeLog.MADE_PUBLIC, project_private):
admin_user = AuthUserFactory()
project_private.add_contributor(
admin_user,
permissions=permissions.ADMIN,
auth=Auth(project_private.creator))
project_private.save()
res = app.patch_json_api(
url_private,
make_node_payload(project_private, {'public': True}),
auth=admin_user.auth # self.user is creator/admin
)
assert res.status_code == 200
project_private.reload()
assert project_private.is_public
def test_update_errors(
self, app, user, user_two, title_new, description_new,
category_new, project_public, project_private,
url_public, url_private, sparse_url_public, make_sparse_node_payload):
# test_update_project_properties_not_nested
res = app.put_json_api(url_public, {
'id': project_public._id,
'type': 'nodes',
'title': title_new,
'description': description_new,
'category': category_new,
'public': True,
}, auth=user.auth, expect_errors=True)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == 'Request must include /data.'
assert res.json['errors'][0]['source']['pointer'] == '/data'
# test_cannot_update_sparse
res = app.patch_json_api(
sparse_url_public,
make_sparse_node_payload(project_public, {'public': False}),
auth=user.auth,
expect_errors=True
)
assert res.status_code == 405
# test_update_invalid_id
res = app.put_json_api(url_public, {
'data': {
'id': '12345',
'type': 'nodes',
'attributes': {
'title': title_new,
'description': description_new,
'category': category_new,
'public': True
}
}
}, auth=user.auth, expect_errors=True)
assert res.status_code == 409
# test_update_invalid_type
res = app.put_json_api(url_public, {
'data': {
'id': project_public._id,
'type': 'node',
'attributes': {
'title': title_new,
'description': description_new,
'category': category_new,
'public': True
}
}
}, auth=user.auth, expect_errors=True)
assert res.status_code == 409
# test_update_no_id
res = app.put_json_api(url_public, {
'data': {
'type': 'nodes',
'attributes': {
'title': title_new,
'description': description_new,
'category': category_new,
'public': True
}
}
}, auth=user.auth, expect_errors=True)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == 'This field may not be null.'
assert res.json['errors'][0]['source']['pointer'] == '/data/id'
# test_update_no_type
res = app.put_json_api(url_public, {
'data': {
'id': project_public._id,
'attributes': {
'title': title_new,
'description': description_new,
'category': category_new,
'public': True
}
}
}, auth=user.auth, expect_errors=True)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == 'This field may not be null.'
assert res.json['errors'][0]['source']['pointer'] == '/data/type'
# test_update_public_project_logged_out
res = app.put_json_api(url_public, {
'data': {
'id': project_public._id,
'type': 'nodes',
'attributes': {
'title': title_new,
'description': description_new,
'category': category_new,
'public': True
}
}
}, expect_errors=True)
assert res.status_code == 401
assert 'detail' in res.json['errors'][0]
# test_update_project_invalid_title
project = {
'data': {
'type': 'nodes',
'id': project_public._id,
'attributes': {
'title': 'A' * 513,
'category': 'project',
}
}
}
res = app.put_json_api(
url_public, project,
auth=user.auth, expect_errors=True)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == 'Title cannot exceed 512 characters.'
# test_update_public_project_logged_in_but_unauthorized
res = app.put_json_api(url_public, {
'data': {
'id': project_private._id,
'type': 'nodes',
'attributes': {
'title': title_new,
'description': description_new,
'category': category_new,
'public': True
}
}
}, auth=user_two.auth, expect_errors=True)
assert res.status_code == 403
assert 'detail' in res.json['errors'][0]
# test_update_private_project_logged_out
res = app.put_json_api(url_private, {
'data': {
'id': project_private._id,
'type': 'nodes',
'attributes': {
'title': title_new,
'description': description_new,
'category': category_new,
'public': False
}
}
}, expect_errors=True)
assert res.status_code == 401
assert 'detail' in res.json['errors'][0]
# test_update_private_project_logged_in_non_contributor
res = app.put_json_api(url_private, {
'data': {
'id': project_private._id,
'type': 'nodes',
'attributes': {
'title': title_new,
'description': description_new,
'category': category_new,
'public': False
}
}
}, auth=user_two.auth, expect_errors=True)
assert res.status_code == 403
assert 'detail' in res.json['errors'][0]
# test_update_private_project_group_has_read_perms
osf_group = OSFGroupFactory(creator=user_two)
project_private.add_osf_group(osf_group, permissions.READ)
res = app.put_json_api(url_private, {
'data': {
'id': project_private._id,
'type': 'nodes',
'attributes': {
'title': title_new,
'description': description_new,
'category': category_new,
'public': False
}
}
}, auth=user_two.auth, expect_errors=True)
assert project_private.has_permission(user_two, permissions.READ) is True
assert res.status_code == 403
assert 'detail' in res.json['errors'][0]
def test_update_public_project_logged_in(
self, app, user, title_new, description_new,
category_new, project_public, url_public):
res = app.put_json_api(url_public, {
'data': {
'id': project_public._id,
'type': 'nodes',
'attributes': {
'title': title_new,
'description': description_new,
'category': category_new,
'public': True
}
}
}, auth=user.auth)
assert res.status_code == 200
assert res.content_type == 'application/vnd.api+json'
assert res.json['data']['attributes']['title'] == title_new
assert res.json['data']['attributes']['description'] == description_new
assert res.json['data']['attributes']['category'] == category_new
log_actions = project_public.logs.values_list('action', flat=True)
assert NodeLog.EDITED_TITLE in log_actions
assert NodeLog.EDITED_DESCRIPTION in log_actions
assert NodeLog.CATEGORY_UPDATED in log_actions
def test_update_public_project_osf_group_member(
self, app, user_two, title_new, description_new,
category_new, project_public, url_public):
osf_group = OSFGroupFactory(creator=user_two)
project_public.add_osf_group(osf_group, permissions.WRITE)
res = app.put_json_api(url_public, {
'data': {
'id': project_public._id,
'type': 'nodes',
'attributes': {
'title': title_new,
'description': description_new,
'category': category_new,
}
}
}, auth=user_two.auth)
assert res.status_code == 200
assert res.content_type == 'application/vnd.api+json'
assert res.json['data']['attributes']['title'] == title_new
assert res.json['data']['attributes']['description'] == description_new
assert res.json['data']['attributes']['category'] == category_new
log_actions = project_public.logs.values_list('action', flat=True)
assert NodeLog.CATEGORY_UPDATED in log_actions
assert NodeLog.EDITED_TITLE in log_actions
assert NodeLog.EDITED_DESCRIPTION in log_actions
def test_cannot_update_a_registration(self, app, user, project_public):
registration = RegistrationFactory(
project=project_public, creator=user)
original_title = registration.title
original_description = registration.description
url = '/{}nodes/{}/'.format(API_BASE, registration._id)
res = app.put_json_api(url, {
'data': {
'id': registration._id,
'type': 'nodes',
'attributes': {
'title': fake.catch_phrase(),
'description': fake.bs(),
'category': 'hypothesis',
'public': True
}
}
}, auth=user.auth, expect_errors=True)
registration.reload()
assert res.status_code == 404
assert registration.title == original_title
assert registration.description == original_description
def test_update_private_project_logged_in_contributor(
self, app, user, title_new, description_new,
category_new, project_private, url_private):
res = app.put_json_api(url_private, {
'data': {
'id': project_private._id,
'type': 'nodes',
'attributes': {
'title': title_new,
'description': description_new,
'category': category_new,
'public': False
}
}
}, auth=user.auth)
assert res.status_code == 200
assert res.content_type == 'application/vnd.api+json'
assert res.json['data']['attributes']['title'] == title_new
assert res.json['data']['attributes']['description'] == description_new
assert res.json['data']['attributes']['category'] == category_new
log_actions = [log.action for log in project_private.logs.all()]
assert NodeLog.EDITED_TITLE in log_actions
assert NodeLog.EDITED_DESCRIPTION in log_actions
assert NodeLog.CATEGORY_UPDATED in log_actions
def test_update_project_sanitizes_html_properly(
self, app, user, category_new, project_public, url_public):
"""Post request should update resource, and any HTML in fields should be stripped"""
new_title = '<strong>Super</strong> Cool Project'
new_description = 'An <script>alert("even cooler")</script> project'
res = app.put_json_api(url_public, {
'data': {
'id': project_public._id,
'type': 'nodes',
'attributes': {
'title': new_title,
'description': new_description,
'category': category_new,
'public': True,
}
}
}, auth=user.auth)
assert res.status_code == 200
assert res.content_type == 'application/vnd.api+json'
assert res.json['data']['attributes']['title'] == strip_html(
new_title)
assert res.json['data']['attributes']['description'] == strip_html(
new_description)
log_actions = [log.action for log in project_public.logs.all()]
assert NodeLog.EDITED_TITLE in log_actions
assert NodeLog.EDITED_DESCRIPTION in log_actions
assert NodeLog.CATEGORY_UPDATED in log_actions
def test_partial_update_project_updates_project_correctly_and_sanitizes_html(
self, app, user, description, category, project_public, url_public):
with assert_latest_log(NodeLog.EDITED_TITLE, project_public):
new_title = 'An <script>alert("even cooler")</script> project'
res = app.patch_json_api(url_public, {
'data': {
'id': project_public._id,
'type': 'nodes',
'attributes': {
'title': new_title
}
}
}, auth=user.auth)
assert res.status_code == 200
assert res.content_type == 'application/vnd.api+json'
res = app.get(url_public)
assert res.status_code == 200
assert res.content_type == 'application/vnd.api+json'
assert res.json['data']['attributes']['title'] == strip_html(
new_title)
assert res.json['data']['attributes']['description'] == description
assert res.json['data']['attributes']['category'] == category
def test_partial_update_public_project_logged_in(
self, app, user, title_new, description,
category, project_public, url_public):
with assert_latest_log(NodeLog.EDITED_TITLE, project_public):
res = app.patch_json_api(url_public, {
'data': {
'id': project_public._id,
'type': 'nodes',
'attributes': {
'title': title_new,
}
}
}, auth=user.auth)
assert res.status_code == 200
assert res.content_type == 'application/vnd.api+json'
assert res.json['data']['attributes']['title'] == title_new
assert res.json['data']['attributes']['description'] == description
assert res.json['data']['attributes']['category'] == category
def test_write_to_public_field_non_contrib_forbidden(
self, app, user_two, project_public, url_public):
# Test non-contrib writing to public field
res = app.patch_json_api(url_public, {
'data': {
'attributes': {
'public': False},
'id': project_public._id,
'type': 'nodes'
}
}, auth=user_two.auth, expect_errors=True)
assert res.status_code == 403
assert 'detail' in res.json['errors'][0]
def test_partial_update_errors(
self, app, user, user_two, title_new,
project_public, project_private,
url_public, url_private):
# test_partial_update_public_project_logged_out
res = app.patch_json_api(url_public, {
'data': {
'id': project_public._id,
'type': 'nodes',
'attributes': {
'title': title_new
}
}
}, expect_errors=True)
assert res.status_code == 401
assert 'detail' in res.json['errors'][0]
# test_partial_update_public_project_logged_in_but_unauthorized
# Public resource, logged in, unauthorized
res = app.patch_json_api(url_public, {
'data': {
'attributes': {
'title': title_new},
'id': project_public._id,
'type': 'nodes',
}
}, auth=user_two.auth, expect_errors=True)
assert res.status_code == 403
assert 'detail' in res.json['errors'][0]
# test_partial_update_private_project_logged_out
res = app.patch_json_api(url_private, {
'data': {
'id': project_private._id,
'type': 'nodes',
'attributes': {
'title': title_new
}
}
}, expect_errors=True)
assert res.status_code == 401
assert 'detail' in res.json['errors'][0]
# test_partial_update_private_project_logged_in_non_contributor
res = app.patch_json_api(url_private, {
'data': {
'attributes': {
'title': title_new},
'id': project_private._id,
'type': 'nodes',
}
}, auth=user_two.auth, expect_errors=True)
assert res.status_code == 403
assert 'detail' in res.json['errors'][0]
# test_partial_update_invalid_id
res = app.patch_json_api(url_public, {
'data': {
'id': '12345',
'type': 'nodes',
'attributes': {
'title': title_new,
}
}
}, auth=user.auth, expect_errors=True)
assert res.status_code == 409
# test_partial_update_invalid_type
res = app.patch_json_api(url_public, {
'data': {
'id': project_public._id,
'type': 'node',
'attributes': {
'title': title_new,
}
}
}, auth=user.auth, expect_errors=True)
assert res.status_code == 409
# test_partial_update_no_id
res = app.patch_json_api(url_public, {
'data': {
'type': 'nodes',
'attributes': {
'title': title_new,
}
}
}, auth=user.auth, expect_errors=True)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == 'This field may not be null.'
assert res.json['errors'][0]['source']['pointer'] == '/data/id'
# test_partial_update_no_type
res = app.patch_json_api(url_public, {
'data': {
'id': project_public._id,
'attributes': {
'title': title_new,
}
}
}, auth=user.auth, expect_errors=True)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == 'This field may not be null.'
assert res.json['errors'][0]['source']['pointer'] == '/data/type'
# Nothing will be updated here
# test_partial_update_project_properties_not_nested
res = app.patch_json_api(url_public, {
'data': {
'id': project_public._id,
'type': 'nodes',
'title': title_new,
}
}, auth=user.auth, expect_errors=True)
assert res.status_code == 200
def test_partial_update_private_project_logged_in_contributor(
self, app, user, title_new, description, category, project_private, url_private):
with assert_latest_log(NodeLog.EDITED_TITLE, project_private):
res = app.patch_json_api(url_private, {
'data': {
'attributes': {
'title': title_new},
'id': project_private._id,
'type': 'nodes',
}
}, auth=user.auth)
assert res.status_code == 200
assert res.content_type == 'application/vnd.api+json'
assert res.json['data']['attributes']['title'] == title_new
assert res.json['data']['attributes']['description'] == description
assert res.json['data']['attributes']['category'] == category
def test_multiple_patch_requests_with_same_category_generates_one_log(
self, app, user, project_private, url_private, make_node_payload):
project_private.category = 'project'
project_private.save()
new_category = 'data'
payload = make_node_payload(
project_private,
attributes={'category': new_category})
original_n_logs = project_private.logs.count()
res = app.patch_json_api(url_private, payload, auth=user.auth)
assert res.status_code == 200
project_private.reload()
assert project_private.category == new_category
assert project_private.logs.count() == original_n_logs + 1 # sanity check
app.patch_json_api(url_private, payload, auth=user.auth)
project_private.reload()
assert project_private.category == new_category
assert project_private.logs.count() == original_n_logs + 1
def test_public_project_with_publicly_editable_wiki_turns_private(
self, app, user, project_public, url_public, make_node_payload):
wiki = project_public.get_addon('wiki')
wiki.set_editing(permissions=True, auth=Auth(user=user), log=True)
res = app.patch_json_api(
url_public,
make_node_payload(project_public, {'public': False}),
auth=user.auth # self.user is creator/admin
)
assert res.status_code == 200
@mock.patch('website.identifiers.tasks.update_doi_metadata_on_change.s')
def test_set_node_private_updates_doi(
self, mock_update_doi_metadata, app, user, project_public,
url_public, make_node_payload):
IdentifierFactory(referent=project_public, category='doi')
res = app.patch_json_api(
url_public,
make_node_payload(
project_public,
{'public': False}),
auth=user.auth)
assert res.status_code == 200
project_public.reload()
assert not project_public.is_public
mock_update_doi_metadata.assert_called_with(project_public._id)
@pytest.mark.enable_enqueue_task
@mock.patch('website.preprints.tasks.update_or_enqueue_on_preprint_updated')
def test_set_node_with_preprint_private_updates_doi(
self, mock_update_doi_metadata, app, user,
project_public, url_public, make_node_payload):
target_object = PreprintFactory(project=project_public)
res = app.patch_json_api(
url_public,
make_node_payload(
project_public,
{'public': False}),
auth=user.auth)
assert res.status_code == 200
project_public.reload()
assert not project_public.is_public
# Turning supplemental_project private no longer turns preprint private
assert target_object.is_public
assert not mock_update_doi_metadata.called
@pytest.mark.django_db
@pytest.mark.enable_bookmark_creation
class TestNodeDelete(NodeCRUDTestCase):
def test_deletes_node_errors(
self, app, user, user_two, project_public,
project_private, url_public, url_private,
url_fake, sparse_url_public):
# test_deletes_public_node_logged_out
res = app.delete(url_public, expect_errors=True)
assert res.status_code == 401
assert 'detail' in res.json['errors'][0]
# test_deletes_public_node_fails_if_unauthorized
res = app.delete_json_api(
url_public,
auth=user_two.auth,
expect_errors=True)
project_public.reload()
assert res.status_code == 403
assert project_public.is_deleted is False
assert 'detail' in res.json['errors'][0]
# test_deletes_from_sparse_fails
res = app.delete_json_api(
sparse_url_public,
auth=user.auth,
expect_errors=True)
project_public.reload()
assert res.status_code == 405
assert project_public.is_deleted is False
assert 'detail' in res.json['errors'][0]
# test_deletes_private_node_logged_out
res = app.delete(url_private, expect_errors=True)
assert res.status_code == 401
assert 'detail' in res.json['errors'][0]
# test_deletes_private_node_logged_in_non_contributor
res = app.delete(url_private, auth=user_two.auth, expect_errors=True)
project_private.reload()
assert res.status_code == 403
assert project_private.is_deleted is False
assert 'detail' in res.json['errors'][0]
# test_deletes_invalid_node
res = app.delete(url_fake, auth=user.auth, expect_errors=True)
assert res.status_code == 404
assert 'detail' in res.json['errors'][0]
# test_delete_osf_group_improper_permissions
osf_group = OSFGroupFactory(creator=user_two)
project_private.add_osf_group(osf_group, permissions.READ)
res = app.delete(url_private, auth=user_two.auth, expect_errors=True)
project_private.reload()
assert res.status_code == 403
assert project_private.is_deleted is False
assert 'detail' in res.json['errors'][0]
def test_deletes_private_node_logged_in_read_only_contributor(
self, app, user_two, project_private, url_private):
project_private.add_contributor(
user_two, permissions=permissions.READ)
project_private.save()
res = app.delete(url_private, auth=user_two.auth, expect_errors=True)
project_private.reload()
assert res.status_code == 403
assert project_private.is_deleted is False
assert 'detail' in res.json['errors'][0]
def test_deletes_private_node_logged_in_write_contributor(
self, app, user_two, project_private, url_private):
project_private.add_contributor(
user_two, permissions=permissions.WRITE)
project_private.save()
res = app.delete(url_private, auth=user_two.auth, expect_errors=True)
project_private.reload()
assert res.status_code == 403
assert project_private.is_deleted is False
assert 'detail' in res.json['errors'][0]
def test_delete_project_with_component_returns_errors_pre_2_12(self, app, user):
project = ProjectFactory(creator=user)
NodeFactory(parent=project, creator=user)
# Return a 400 because component must be deleted before deleting the
# parent
res = app.delete_json_api(
'/{}nodes/{}/'.format(API_BASE, project._id),
auth=user.auth,
expect_errors=True
)
assert res.status_code == 400
errors = res.json['errors']
assert len(errors) == 1
assert (
errors[0]['detail'] ==
'Any child components must be deleted prior to deleting this project.')
def test_delete_project_with_component_allowed_with_2_12(self, app, user):
project = ProjectFactory(creator=user)
child = NodeFactory(parent=project, creator=user)
grandchild = NodeFactory(parent=child, creator=user)
# Versions 2.12 and greater delete all the nodes in the hierarchy
res = app.delete_json_api(
'/{}nodes/{}/?version=2.12'.format(API_BASE, project._id),
auth=user.auth,
expect_errors=True
)
assert res.status_code == 204
project.reload()
child.reload()
grandchild.reload()
assert project.is_deleted is True
assert child.is_deleted is True
assert grandchild.is_deleted is True
def test_delete_project_with_private_component_2_12(self, app, user):
user_two = AuthUserFactory()
project = ProjectFactory(creator=user)
child = NodeFactory(parent=project, creator=user_two)
# Versions 2.12 and greater delete all the nodes in the hierarchy
res = app.delete_json_api(
'/{}nodes/{}/?version=2.12'.format(API_BASE, project._id),
auth=user.auth,
expect_errors=True
)
assert res.status_code == 403
project.reload()
child.reload()
assert project.is_deleted is False
assert child.is_deleted is False
def test_delete_bookmark_collection_returns_error(self, app, user):
bookmark_collection = find_bookmark_collection(user)
res = app.delete_json_api(
'/{}nodes/{}/'.format(API_BASE, bookmark_collection._id),
auth=user.auth,
expect_errors=True
)
# Bookmark collections are collections, so a 404 is returned
assert res.status_code == 404
@mock.patch('website.identifiers.tasks.update_doi_metadata_on_change.s')
def test_delete_node_with_preprint_calls_preprint_update_status(
self, mock_update_doi_metadata_on_change, app, user,
project_public, url_public):
PreprintFactory(project=project_public)
app.delete_json_api(url_public, auth=user.auth, expect_errors=True)
project_public.reload()
assert not mock_update_doi_metadata_on_change.called
@mock.patch('website.identifiers.tasks.update_doi_metadata_on_change.s')
def test_delete_node_with_identifier_calls_preprint_update_status(
self, mock_update_doi_metadata_on_change, app, user,
project_public, url_public):
IdentifierFactory(referent=project_public, category='doi')
app.delete_json_api(url_public, auth=user.auth, expect_errors=True)
project_public.reload()
assert mock_update_doi_metadata_on_change.called
def test_deletes_public_node_succeeds_as_owner(
self, app, user, project_public, url_public):
with assert_latest_log(NodeLog.PROJECT_DELETED, project_public):
res = app.delete_json_api(
url_public, auth=user.auth, expect_errors=True)
project_public.reload()
assert res.status_code == 204
assert project_public.is_deleted is True
def test_requesting_deleted_returns_410(
self, app, project_public, url_public):
project_public.is_deleted = True
project_public.save()
res = app.get(url_public, expect_errors=True)
assert res.status_code == 410
assert 'detail' in res.json['errors'][0]
def test_deletes_private_node_logged_in_contributor(
self, app, user, project_private, url_private):
with assert_latest_log(NodeLog.PROJECT_DELETED, project_private):
res = app.delete(url_private, auth=user.auth, expect_errors=True)
project_private.reload()
assert res.status_code == 204
assert project_private.is_deleted is True
@pytest.mark.django_db
class TestReturnDeletedNode:
@pytest.fixture()
def project_public_deleted(self, user):
return ProjectFactory(
is_deleted=True,
creator=user,
title='This public project has been deleted',
category='project',
is_public=True
)
@pytest.fixture()
def project_private_deleted(self, user):
return ProjectFactory(
is_deleted=True,
creator=user,
title='This private project has been deleted',
category='project',
is_public=False
)
@pytest.fixture()
def title_new(self):
return 'This deleted node has been edited'
@pytest.fixture()
def url_project_public_deleted(self, project_public_deleted):
return '/{}nodes/{}/'.format(API_BASE, project_public_deleted._id)
@pytest.fixture()
def url_project_private_deleted(self, project_private_deleted):
return '/{}nodes/{}/'.format(API_BASE, project_private_deleted._id)
def test_return_deleted_node(
self, app, user, title_new, project_public_deleted,
project_private_deleted, url_project_public_deleted,
url_project_private_deleted):
# test_return_deleted_public_node
res = app.get(url_project_public_deleted, expect_errors=True)
assert res.status_code == 410
# test_return_deleted_private_node
res = app.get(
url_project_private_deleted,
auth=user.auth,
expect_errors=True)
assert res.status_code == 410
# test_edit_deleted_public_node
res = app.put_json_api(
url_project_public_deleted,
params={
'title': title_new,
'node_id': project_public_deleted._id,
'category': project_public_deleted.category
},
auth=user.auth, expect_errors=True)
assert res.status_code == 410
# test_edit_deleted_private_node
res = app.put_json_api(
url_project_private_deleted,
params={
'title': title_new,
'node_id': project_private_deleted._id,
'category': project_private_deleted.category
},
auth=user.auth, expect_errors=True)
assert res.status_code == 410
# test_delete_deleted_public_node
res = app.delete(
url_project_public_deleted,
auth=user.auth,
expect_errors=True)
assert res.status_code == 410
# test_delete_deleted_private_node
res = app.delete(
url_project_private_deleted,
auth=user.auth,
expect_errors=True)
assert res.status_code == 410
@pytest.mark.django_db
class TestUpdateNodeSubjects(UpdateSubjectsMixin):
@pytest.fixture()
def resource(self, user_admin_contrib, user_write_contrib, user_read_contrib):
project = ProjectFactory(is_public=True, creator=user_admin_contrib)
project.add_contributor(user_write_contrib, permissions=permissions.WRITE)
project.add_contributor(user_read_contrib, permissions=permissions.READ)
project.save()
return project
@pytest.mark.django_db
class TestNodeTags:
@pytest.fixture()
def user_admin(self):
return AuthUserFactory()
@pytest.fixture()
def user_non_contrib(self):
return AuthUserFactory()
@pytest.fixture()
def user_read_contrib(self):
return AuthUserFactory()
@pytest.fixture()
def project_public(self, user, user_admin):
project_public = ProjectFactory(
title='Project One', is_public=True, creator=user)
project_public.add_contributor(
user_admin,
permissions=permissions.CREATOR_PERMISSIONS,
save=True)
project_public.add_contributor(
user, permissions=permissions.DEFAULT_CONTRIBUTOR_PERMISSIONS, save=True)
return project_public
@pytest.fixture()
def project_private(self, user, user_admin):
project_private = ProjectFactory(
title='Project Two', is_public=False, creator=user)
project_private.add_contributor(
user_admin, permissions=permissions.CREATOR_PERMISSIONS, save=True)
project_private.add_contributor(
user, permissions=permissions.DEFAULT_CONTRIBUTOR_PERMISSIONS, save=True)
return project_private
@pytest.fixture()
def url_public(self, project_public):
return '/{}nodes/{}/'.format(API_BASE, project_public._id)
@pytest.fixture()
def url_private(self, project_private):
return '/{}nodes/{}/'.format(API_BASE, project_private._id)
@pytest.fixture()
def payload_public(self, project_public):
return {
'data': {
'id': project_public._id,
'type': 'nodes',
'attributes': {
'tags': ['new-tag']
}
}
}
@pytest.fixture()
def payload_private(self, project_private):
return {
'data': {
'id': project_private._id,
'type': 'nodes',
'attributes': {
'tags': ['new-tag']
}
}
}
def test_public_project_starts_with_no_tags(self, app, url_public):
res = app.get(url_public)
assert res.status_code == 200
assert len(res.json['data']['attributes']['tags']) == 0
def test_node_detail_does_not_expose_system_tags(
self, app, project_public, url_public):
project_public.add_system_tag('systag', save=True)
res = app.get(url_public)
assert res.status_code == 200
assert len(res.json['data']['attributes']['tags']) == 0
def test_contributor_can_add_tag_to_public_project(
self, app, user, project_public, payload_public, url_public):
with assert_latest_log(NodeLog.TAG_ADDED, project_public):
res = app.patch_json_api(
url_public,
payload_public,
auth=user.auth,
expect_errors=True)
assert res.status_code == 200
# Ensure data is correct from the PATCH response
assert len(res.json['data']['attributes']['tags']) == 1
assert res.json['data']['attributes']['tags'][0] == 'new-tag'
# Ensure data is correct in the database
project_public.reload()
assert project_public.tags.count() == 1
assert project_public.tags.first()._id == 'new-tag'
# Ensure data is correct when GETting the resource again
reload_res = app.get(url_public)
assert len(reload_res.json['data']['attributes']['tags']) == 1
assert reload_res.json['data']['attributes']['tags'][0] == 'new-tag'
def test_contributor_can_add_tag_to_private_project(
self, app, user, project_private, payload_private, url_private):
with assert_latest_log(NodeLog.TAG_ADDED, project_private):
res = app.patch_json_api(
url_private, payload_private, auth=user.auth)
assert res.status_code == 200
# Ensure data is correct from the PATCH response
assert len(res.json['data']['attributes']['tags']) == 1
assert res.json['data']['attributes']['tags'][0] == 'new-tag'
# Ensure data is correct in the database
project_private.reload()
assert project_private.tags.count() == 1
assert project_private.tags.first()._id == 'new-tag'
# Ensure data is correct when GETting the resource again
reload_res = app.get(url_private, auth=user.auth)
assert len(reload_res.json['data']['attributes']['tags']) == 1
assert reload_res.json['data']['attributes']['tags'][0] == 'new-tag'
def test_partial_update_project_does_not_clear_tags(
self, app, user_admin, project_private, payload_private, url_private):
res = app.patch_json_api(
url_private,
payload_private,
auth=user_admin.auth)
assert res.status_code == 200
assert len(res.json['data']['attributes']['tags']) == 1
new_payload = {
'data': {
'id': project_private._id,
'type': 'nodes',
'attributes': {
'public': True
}
}
}
res = app.patch_json_api(
url_private,
new_payload,
auth=user_admin.auth)
assert res.status_code == 200
assert len(res.json['data']['attributes']['tags']) == 1
new_payload['data']['attributes']['public'] = False
res = app.patch_json_api(
url_private,
new_payload,
auth=user_admin.auth)
assert res.status_code == 200
assert len(res.json['data']['attributes']['tags']) == 1
def test_add_tag_to_project_errors(
self, app, user_non_contrib, user_read_contrib,
payload_public, payload_private,
url_public, url_private):
# test_non_authenticated_user_cannot_add_tag_to_public_project
res = app.patch_json_api(
url_public, payload_public,
expect_errors=True, auth=None)
assert res.status_code == 401
# test_non_authenticated_user_cannot_add_tag_to_private_project
res = app.patch_json_api(
url_private, payload_private,
expect_errors=True, auth=None)
assert res.status_code == 401
# test_non_contributor_cannot_add_tag_to_public_project
res = app.patch_json_api(
url_public, payload_public,
expect_errors=True, auth=user_non_contrib.auth)
assert res.status_code == 403
# test_non_contributor_cannot_add_tag_to_private_project
res = app.patch_json_api(
url_private, payload_private,
expect_errors=True, auth=user_non_contrib.auth)
assert res.status_code == 403
# test_read_only_contributor_cannot_add_tag_to_public_project
res = app.patch_json_api(
url_public, payload_public,
expect_errors=True,
auth=user_read_contrib.auth)
assert res.status_code == 403
# test_read_only_contributor_cannot_add_tag_to_private_project
res = app.patch_json_api(
url_private, payload_private,
expect_errors=True,
auth=user_read_contrib.auth)
assert res.status_code == 403
def test_tags_add_and_remove_properly(
self, app, user, project_private,
payload_private, url_private):
with assert_latest_log(NodeLog.TAG_ADDED, project_private):
res = app.patch_json_api(
url_private, payload_private, auth=user.auth)
assert res.status_code == 200
# Ensure adding tag data is correct from the PATCH response
assert len(res.json['data']['attributes']['tags']) == 1
assert res.json['data']['attributes']['tags'][0] == 'new-tag'
with assert_latest_log(NodeLog.TAG_REMOVED, project_private), assert_latest_log(NodeLog.TAG_ADDED, project_private, 1):
# Ensure removing and adding tag data is correct from the PATCH
# response
res = app.patch_json_api(
url_private,
{
'data': {
'id': project_private._id,
'type': 'nodes',
'attributes': {'tags': ['newer-tag']}
}
}, auth=user.auth)
assert res.status_code == 200
assert len(res.json['data']['attributes']['tags']) == 1
assert res.json['data']['attributes']['tags'][0] == 'newer-tag'
with assert_latest_log(NodeLog.TAG_REMOVED, project_private):
# Ensure removing tag data is correct from the PATCH response
res = app.patch_json_api(
url_private,
{
'data': {
'id': project_private._id,
'type': 'nodes',
'attributes': {'tags': []}
}
}, auth=user.auth)
assert res.status_code == 200
assert len(res.json['data']['attributes']['tags']) == 0
def test_tags_post_object_instead_of_list(self, user, app):
url = '/{}nodes/'.format(API_BASE)
payload = {'data': {
'type': 'nodes',
'attributes': {
'title': 'new title',
'category': 'project',
'tags': {'foo': 'bar'}
}
}}
res = app.post_json_api(
url, payload, auth=user.auth,
expect_errors=True)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == 'Expected a list of items but got type "dict".'
def test_tags_patch_object_instead_of_list(
self, app, user, payload_public, url_public):
payload_public['data']['attributes']['tags'] = {'foo': 'bar'}
res = app.patch_json_api(
url_public, payload_public,
auth=user.auth, expect_errors=True)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == 'Expected a list of items but got type "dict".'
@pytest.mark.django_db
class TestNodeLicense:
@pytest.fixture()
def user_admin(self):
return AuthUserFactory()
@pytest.fixture()
def user_two(self):
return AuthUserFactory()
@pytest.fixture()
def user_read_contrib(self):
return AuthUserFactory()
@pytest.fixture()
def license_name(self):
return 'MIT License'
@pytest.fixture()
def node_license(self, license_name):
return NodeLicense.objects.filter(name=license_name).first()
@pytest.fixture()
def year(self):
return '2105'
@pytest.fixture()
def copyright_holders(self):
return ['Foo', 'Bar']
@pytest.fixture()
def project_public(
self, user, user_admin, node_license,
year, copyright_holders):
project_public = ProjectFactory(
title='Project One', is_public=True, creator=user)
project_public.add_contributor(
user_admin,
permissions=permissions.CREATOR_PERMISSIONS,
save=True)
project_public.add_contributor(
user, permissions=permissions.DEFAULT_CONTRIBUTOR_PERMISSIONS, save=True)
project_public.node_license = NodeLicenseRecordFactory(
node_license=node_license,
year=year,
copyright_holders=copyright_holders
)
project_public.save()
return project_public
@pytest.fixture()
def project_private(
self, user, user_admin, node_license,
year, copyright_holders):
project_private = ProjectFactory(
title='Project Two', is_public=False, creator=user)
project_private.add_contributor(
user_admin, permissions=permissions.CREATOR_PERMISSIONS, save=True)
project_private.add_contributor(
user, permissions=permissions.DEFAULT_CONTRIBUTOR_PERMISSIONS, save=True)
project_private.node_license = NodeLicenseRecordFactory(
node_license=node_license,
year=year,
copyright_holders=copyright_holders
)
project_private.save()
return project_private
@pytest.fixture()
def url_public(self, project_public):
return '/{}nodes/{}/'.format(API_BASE, project_public._id)
@pytest.fixture()
def url_private(self, project_private):
return '/{}nodes/{}/'.format(API_BASE, project_private._id)
def test_node_has(
self, app, user, node_license, project_public,
project_private, url_private, url_public):
# test_public_node_has_node_license
res = app.get(url_public)
assert project_public.node_license.year == res.json[
'data']['attributes']['node_license']['year']
# test_public_node_has_license_relationship
res = app.get(url_public)
expected_license_url = '/{}licenses/{}'.format(
API_BASE, node_license._id)
actual_license_url = res.json['data']['relationships']['license']['links']['related']['href']
assert expected_license_url in actual_license_url
# test_private_node_has_node_license
res = app.get(url_private, auth=user.auth)
assert project_private.node_license.year == res.json[
'data']['attributes']['node_license']['year']
# test_private_node_has_license_relationship
res = app.get(url_private, auth=user.auth)
expected_license_url = '/{}licenses/{}'.format(
API_BASE, node_license._id)
actual_license_url = res.json['data']['relationships']['license']['links']['related']['href']
assert expected_license_url in actual_license_url
def test_component_return_parent_license_if_no_license(
self, app, user, node_license, project_public):
node = NodeFactory(parent=project_public, creator=user)
node.save()
node_url = '/{}nodes/{}/'.format(API_BASE, node._id)
res = app.get(node_url, auth=user.auth)
assert not node.node_license
assert project_public.node_license.year == \
res.json['data']['attributes']['node_license']['year']
actual_license_url = res.json['data']['relationships']['license']['links']['related']['href']
expected_license_url = '/{}licenses/{}'.format(
API_BASE, node_license._id)
assert expected_license_url in actual_license_url
@pytest.mark.django_db
class TestNodeUpdateLicense:
@pytest.fixture()
def user_admin_contrib(self):
return AuthUserFactory()
@pytest.fixture()
def user_write_contrib(self):
return AuthUserFactory()
@pytest.fixture()
def user_read_contrib(self):
return AuthUserFactory()
@pytest.fixture()
def user_non_contrib(self):
return AuthUserFactory()
@pytest.fixture()
def node(self, user_admin_contrib, user_write_contrib, user_read_contrib):
node = NodeFactory(creator=user_admin_contrib)
node.add_contributor(user_write_contrib, auth=Auth(user_admin_contrib))
node.add_contributor(
user_read_contrib,
auth=Auth(user_admin_contrib),
permissions=permissions.READ)
node.save()
return node
@pytest.fixture()
def license_cc0(self):
return NodeLicense.objects.filter(name='CC0 1.0 Universal').first()
@pytest.fixture()
def license_mit(self):
return NodeLicense.objects.filter(name='MIT License').first()
@pytest.fixture()
def license_no(self):
return NodeLicense.objects.get(name='No license')
@pytest.fixture()
def url_node(self, node):
return '/{}nodes/{}/'.format(API_BASE, node._id)
@pytest.fixture()
def make_payload(self):
def payload(
node_id, license_id=None, license_year=None,
copyright_holders=None):
attributes = {}
if license_year and copyright_holders:
attributes = {
'node_license': {
'year': license_year,
'copyright_holders': copyright_holders
}
}
elif license_year:
attributes = {
'node_license': {
'year': license_year
}
}
elif copyright_holders:
attributes = {
'node_license': {
'copyright_holders': copyright_holders
}
}
return {
'data': {
'type': 'nodes',
'id': node_id,
'attributes': attributes,
'relationships': {
'license': {
'data': {
'type': 'licenses',
'id': license_id
}
}
}
}
} if license_id else {
'data': {
'type': 'nodes',
'id': node_id,
'attributes': attributes
}
}
return payload
@pytest.fixture()
def make_request(self, app):
def request(url, data, auth=None, expect_errors=False):
return app.patch_json_api(
url, data, auth=auth, expect_errors=expect_errors)
return request
def test_admin_update_license_with_invalid_id(
self, user_admin_contrib, node, make_payload,
make_request, url_node):
data = make_payload(
node_id=node._id,
license_id='thisisafakelicenseid'
)
assert node.node_license is None
res = make_request(
url_node, data,
auth=user_admin_contrib.auth,
expect_errors=True)
assert res.status_code == 404
assert res.json['errors'][0]['detail'] == 'Unable to find specified license.'
node.reload()
assert node.node_license is None
def test_admin_can_update_license(
self, user_admin_contrib, node,
make_payload, make_request,
license_cc0, url_node):
data = make_payload(
node_id=node._id,
license_id=license_cc0._id
)
assert node.node_license is None
res = make_request(url_node, data, auth=user_admin_contrib.auth)
assert res.status_code == 200
node.reload()
assert node.node_license.node_license == license_cc0
assert node.node_license.year is None
assert node.node_license.copyright_holders == []
def test_admin_can_update_license_record(
self, user_admin_contrib, node,
make_payload, make_request,
license_no, url_node):
data = make_payload(
node_id=node._id,
license_id=license_no._id,
license_year='2015',
copyright_holders=['Mr. Monument', 'Princess OSF']
)
assert node.node_license is None
res = make_request(url_node, data, auth=user_admin_contrib.auth)
assert res.status_code == 200
node.reload()
assert node.node_license.node_license == license_no
assert node.node_license.year == '2015'
assert node.node_license.copyright_holders == [
'Mr. Monument', 'Princess OSF']
def test_cannot_update(
self, user_write_contrib, user_read_contrib,
user_non_contrib, node, make_payload,
make_request, license_cc0, url_node):
# def test_rw_contributor_cannot_update_license(self):
data = make_payload(
node_id=node._id,
license_id=license_cc0._id
)
res = make_request(
url_node, data,
auth=user_write_contrib.auth,
expect_errors=True)
assert res.status_code == 403
assert res.json['errors'][0]['detail'] == exceptions.PermissionDenied.default_detail
# def test_read_contributor_cannot_update_license(self):
data = make_payload(
node_id=node._id,
license_id=license_cc0._id
)
res = make_request(
url_node, data,
auth=user_read_contrib.auth,
expect_errors=True)
assert res.status_code == 403
assert res.json['errors'][0]['detail'] == exceptions.PermissionDenied.default_detail
# def test_non_contributor_cannot_update_license(self):
data = make_payload(
node_id=node._id,
license_id=license_cc0._id
)
res = make_request(
url_node, data,
auth=user_non_contrib.auth,
expect_errors=True)
assert res.status_code == 403
assert res.json['errors'][0]['detail'] == exceptions.PermissionDenied.default_detail
# def test_unauthenticated_user_cannot_update_license(self):
data = make_payload(
node_id=node._id,
license_id=license_cc0._id
)
res = make_request(url_node, data, expect_errors=True)
assert res.status_code == 401
assert res.json['errors'][0]['detail'] == exceptions.NotAuthenticated.default_detail
def test_update_node_with_existing_license_year_attribute_only(
self, user_admin_contrib, node, make_payload,
make_request, license_no, url_node):
node.set_node_license(
{
'id': license_no.license_id,
'year': '2014',
'copyrightHolders': ['Reason', 'Mr. E']
},
Auth(user_admin_contrib),
)
node.save()
assert node.node_license.node_license == license_no
assert node.node_license.year == '2014'
assert node.node_license.copyright_holders == ['Reason', 'Mr. E']
data = make_payload(
node_id=node._id,
license_year='2015'
)
res = make_request(url_node, data, auth=user_admin_contrib.auth)
assert res.status_code == 200
node.node_license.reload()
assert node.node_license.node_license == license_no
assert node.node_license.year == '2015'
assert node.node_license.copyright_holders == ['Reason', 'Mr. E']
def test_update_node_with_existing_license_copyright_holders_attribute_only(
self, user_admin_contrib, node, make_payload, make_request, license_no, url_node):
node.set_node_license(
{
'id': license_no.license_id,
'year': '2014',
'copyrightHolders': ['Reason', 'Mr. E']
},
Auth(user_admin_contrib),
)
node.save()
assert node.node_license.node_license == license_no
assert node.node_license.year == '2014'
assert node.node_license.copyright_holders == ['Reason', 'Mr. E']
data = make_payload(
node_id=node._id,
copyright_holders=['Mr. Monument', 'Princess OSF']
)
res = make_request(url_node, data, auth=user_admin_contrib.auth)
assert res.status_code == 200
node.node_license.reload()
assert node.node_license.node_license == license_no
assert node.node_license.year == '2014'
assert node.node_license.copyright_holders == [
'Mr. Monument', 'Princess OSF']
def test_update_node_with_existing_license_relationship_only(
self, user_admin_contrib, node, make_payload,
make_request, license_cc0, license_no, url_node):
node.set_node_license(
{
'id': license_no.license_id,
'year': '2014',
'copyrightHolders': ['Reason', 'Mr. E']
},
Auth(user_admin_contrib),
)
node.save()
assert node.node_license.node_license == license_no
assert node.node_license.year == '2014'
assert node.node_license.copyright_holders == ['Reason', 'Mr. E']
data = make_payload(
node_id=node._id,
license_id=license_cc0._id
)
res = make_request(url_node, data, auth=user_admin_contrib.auth)
assert res.status_code == 200
node.node_license.reload()
assert node.node_license.node_license == license_cc0
assert node.node_license.year == '2014'
assert node.node_license.copyright_holders == ['Reason', 'Mr. E']
def test_update_node_with_existing_license_relationship_and_attributes(
self, user_admin_contrib, node, make_payload, make_request,
license_no, license_cc0, url_node):
node.set_node_license(
{
'id': license_no.license_id,
'year': '2014',
'copyrightHolders': ['Reason', 'Mr. E']
},
Auth(user_admin_contrib),
save=True
)
assert node.node_license.node_license == license_no
assert node.node_license.year == '2014'
assert node.node_license.copyright_holders == ['Reason', 'Mr. E']
data = make_payload(
node_id=node._id,
license_id=license_cc0._id,
license_year='2015',
copyright_holders=['Mr. Monument', 'Princess OSF']
)
res = make_request(url_node, data, auth=user_admin_contrib.auth)
assert res.status_code == 200
node.node_license.reload()
assert node.node_license.node_license == license_cc0
assert node.node_license.year == '2015'
assert node.node_license.copyright_holders == [
'Mr. Monument', 'Princess OSF']
def test_update_node_license_without_required_year_in_payload(
self, user_admin_contrib, node, make_payload,
make_request, license_no, url_node):
data = make_payload(
node_id=node._id,
license_id=license_no._id,
copyright_holders=['Rick', 'Morty']
)
res = make_request(
url_node, data,
auth=user_admin_contrib.auth,
expect_errors=True)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == 'year must be specified for this license'
def test_update_node_license_without_license_id(
self, node, make_payload, make_request, url_node, user_admin_contrib):
data = make_payload(
node_id=node._id,
license_year='2015',
copyright_holders=['Ben, Jerry']
)
res = make_request(
url_node, data,
auth=user_admin_contrib.auth,
expect_errors=True)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == 'License ID must be provided for a Node License.'
def test_update_node_license_without_required_copyright_holders_in_payload_(
self, user_admin_contrib, node, make_payload, make_request, license_no, url_node):
data = make_payload(
node_id=node._id,
license_id=license_no._id,
license_year='1994'
)
res = make_request(
url_node, data,
auth=user_admin_contrib.auth,
expect_errors=True)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == 'copyrightHolders must be specified for this license'
def test_update_node_license_adds_log(
self, user_admin_contrib, node, make_payload,
make_request, license_cc0, url_node):
data = make_payload(
node_id=node._id,
license_id=license_cc0._id
)
logs_before_update = node.logs.count()
res = make_request(url_node, data, auth=user_admin_contrib.auth)
assert res.status_code == 200
node.reload()
logs_after_update = node.logs.count()
assert logs_before_update != logs_after_update
assert node.logs.latest().action == 'license_changed'
def test_update_node_license_without_change_does_not_add_log(
self, user_admin_contrib, node, make_payload,
make_request, license_no, url_node):
node.set_node_license(
{
'id': license_no.license_id,
'year': '2015',
'copyrightHolders': ['Kim', 'Kanye']
},
auth=Auth(user_admin_contrib),
save=True
)
before_num_logs = node.logs.count()
before_update_log = node.logs.latest()
data = make_payload(
node_id=node._id,
license_id=license_no._id,
license_year='2015',
copyright_holders=['Kanye', 'Kim']
)
res = make_request(url_node, data, auth=user_admin_contrib.auth)
node.reload()
after_num_logs = node.logs.count()
after_update_log = node.logs.latest()
assert res.status_code == 200
assert before_num_logs == after_num_logs
assert before_update_log._id == after_update_log._id
| 40.544839 | 133 | 0.612522 |
ace1b2d37b467ff95f3ba0c689cb81389d308ac8 | 8,764 | py | Python | go_expvar/datadog_checks/go_expvar/go_expvar.py | fujigon/integrations-core | 256b1c138fd1bf1c71db63698737e813cfda00f8 | [
"BSD-3-Clause"
] | null | null | null | go_expvar/datadog_checks/go_expvar/go_expvar.py | fujigon/integrations-core | 256b1c138fd1bf1c71db63698737e813cfda00f8 | [
"BSD-3-Clause"
] | null | null | null | go_expvar/datadog_checks/go_expvar/go_expvar.py | fujigon/integrations-core | 256b1c138fd1bf1c71db63698737e813cfda00f8 | [
"BSD-3-Clause"
] | 1 | 2019-12-23T13:35:17.000Z | 2019-12-23T13:35:17.000Z | # (C) Datadog, Inc. 2014-2017
# (C) Cory Watson <cory@stripe.com> 2015-2016
# All rights reserved
# Licensed under Simplified BSD License (see LICENSE)
import re
from collections import defaultdict
from six import iteritems
from six.moves.urllib.parse import urlparse
from datadog_checks.base import AgentCheck
DEFAULT_MAX_METRICS = 350
PATH = "path"
ALIAS = "alias"
TYPE = "type"
TAGS = "tags"
GAUGE = "gauge"
RATE = "rate"
COUNT = "count"
COUNTER = "counter" # Deprecated
MONOTONIC_COUNTER = "monotonic_counter"
DEFAULT_TYPE = GAUGE
SUPPORTED_TYPES = {
GAUGE: AgentCheck.gauge,
RATE: AgentCheck.rate,
COUNT: AgentCheck.count,
COUNTER: AgentCheck.increment, # Deprecated
MONOTONIC_COUNTER: AgentCheck.monotonic_count,
}
DEFAULT_METRIC_NAMESPACE = "go_expvar"
# See http://golang.org/pkg/runtime/#MemStats
DEFAULT_GAUGE_MEMSTAT_METRICS = [
# General statistics
"Alloc",
"TotalAlloc",
# Main allocation heap statistics
"HeapAlloc",
"HeapSys",
"HeapIdle",
"HeapInuse",
"HeapReleased",
"HeapObjects",
]
DEFAULT_RATE_MEMSTAT_METRICS = [
# General statistics
"Lookups",
"Mallocs",
"Frees",
# Garbage collector statistics
"PauseTotalNs",
"NumGC",
]
DEFAULT_METRICS = [{PATH: "memstats/%s" % path, TYPE: GAUGE} for path in DEFAULT_GAUGE_MEMSTAT_METRICS] + [
{PATH: "memstats/%s" % path, TYPE: RATE} for path in DEFAULT_RATE_MEMSTAT_METRICS
]
GO_EXPVAR_URL_PATH = "/debug/vars"
class GoExpvar(AgentCheck):
HTTP_CONFIG_REMAPPER = {
'ssl_verify': {'name': 'tls_verify', 'default': None},
'ssl_certfile': {'name': 'tls_cert', 'default': None},
'ssl_keyfile': {'name': 'tls_private_key', 'default': None},
}
def __init__(self, name, init_config, instances):
super(GoExpvar, self).__init__(name, init_config, instances)
self._regexes = {}
self._last_gc_count = defaultdict(int)
def _get_data(self, url, instance):
resp = self.http.get(url)
resp.raise_for_status()
return resp.json()
def _load(self, instance):
url = instance.get('expvar_url')
if not url:
raise Exception('GoExpvar instance missing "expvar_url" value.')
parsed_url = urlparse(url)
# if no path is specified we use the default one
if not parsed_url.path:
url = parsed_url._replace(path=GO_EXPVAR_URL_PATH).geturl()
tags = instance.get('tags', [])
expvar_url_tag = "expvar_url:%s" % url
if expvar_url_tag not in tags:
tags.append(expvar_url_tag)
data = self._get_data(url, instance)
metrics = DEFAULT_METRICS + instance.get("metrics", [])
max_metrics = instance.get("max_returned_metrics", DEFAULT_MAX_METRICS)
namespace = instance.get('namespace', DEFAULT_METRIC_NAMESPACE)
return data, tags, metrics, max_metrics, url, namespace
def get_gc_collection_histogram(self, data, tags, url, namespace):
num_gc = data.get("memstats", {}).get("NumGC")
pause_hist = data.get("memstats", {}).get("PauseNs")
last_gc_count = self._last_gc_count[url]
if last_gc_count == num_gc:
# No GC has run. Do nothing
return
start = last_gc_count % 256
end = (num_gc + 255) % 256 + 1
if start < end:
values = pause_hist[start:end]
else:
values = pause_hist[start:] + pause_hist[:end]
self._last_gc_count[url] = num_gc
for value in values:
self.histogram(self.normalize("memstats.PauseNs", namespace, fix_case=True), value, tags=tags)
def check(self, instance):
data, tags, metrics, max_metrics, url, namespace = self._load(instance)
self.get_gc_collection_histogram(data, tags, url, namespace)
self.parse_expvar_data(data, tags, metrics, max_metrics, namespace)
def parse_expvar_data(self, data, tags, metrics, max_metrics, namespace):
'''
Report all the metrics based on the configuration in instance
If a metric is not well configured or is not present in the payload,
continue processing metrics but log the information to the info page
'''
count = 0
for metric in metrics:
path = metric.get(PATH)
metric_type = metric.get(TYPE, DEFAULT_TYPE)
metric_tags = list(metric.get(TAGS, []))
metric_tags += tags
alias = metric.get(ALIAS)
if not path:
self.warning("Metric %s has no path", metric)
continue
if metric_type not in SUPPORTED_TYPES:
self.warning("Metric type %s not supported for this check", metric_type)
continue
keys = path.split("/")
values = self.deep_get(data, keys)
if len(values) == 0:
self.warning("No results matching path %s", path)
continue
tag_by_path = alias is not None
for traversed_path, value in values:
actual_path = ".".join(traversed_path)
path_tag = ["path:%s" % actual_path] if tag_by_path else []
metric_name = alias or self.normalize(actual_path, namespace, fix_case=True)
try:
float(value)
except (TypeError, ValueError):
self.log.warning("Unreportable value for path %s: %s", path, value)
continue
if count >= max_metrics:
self.warning(
"Reporting more metrics than the allowed maximum. "
"Please contact support@datadoghq.com for more information."
)
return
SUPPORTED_TYPES[metric_type](self, metric_name, value, metric_tags + path_tag)
count += 1
def deep_get(self, content, keys, traversed_path=None):
'''
Allow to retrieve content nested inside a several layers deep dict/list
Examples: -content: {
"key1": {
"key2" : [
{
"name" : "object1",
"value" : 42
},
{
"name" : "object2",
"value" : 72
}
]
}
}
-keys: ["key1", "key2", "1", "value"]
would return:
[(["key1", "key2", "1", "value"], 72)]
-keys: ["key1", "key2", "1", "*"]
would return:
[(["key1", "key2", "1", "value"], 72), (["key1", "key2", "1", "name"], "object2")]
-keys: ["key1", "key2", "*", "value"]
would return:
[(["key1", "key2", "1", "value"], 72), (["key1", "key2", "0", "value"], 42)]
'''
if traversed_path is None:
traversed_path = []
if keys == []:
return [(traversed_path, content)]
key = keys[0]
if key.isalnum():
# key is not a regex, simply match for equality
matcher = key.__eq__
else:
# key might be a regex
key_regex = self._regexes.get(key)
if key_regex is None:
# we don't have it cached, compile it
regex = "^{}$".format(key)
try:
key_regex = re.compile(regex)
except Exception:
self.warning("Cannot compile regex: %s", regex)
return []
self._regexes[key] = key_regex
matcher = key_regex.match
results = []
for new_key, new_content in self.items(content):
if matcher(new_key):
results.extend(self.deep_get(new_content, keys[1:], traversed_path + [str(new_key)]))
return results
def items(self, object):
if isinstance(object, list):
for new_key, new_content in enumerate(object):
yield str(new_key), new_content
elif isinstance(object, dict):
for new_key, new_content in iteritems(object):
yield str(new_key), new_content
else:
self.log.warning("Could not parse this object, check the json served by the expvar")
| 34.640316 | 107 | 0.542903 |
ace1b3084b464c35c65cafbb57e01a9e1d95d28f | 10,942 | py | Python | perfkitbenchmarker/providers/gcp/gcp_dpb_dataproc.py | srelf-ukcloud/PerfKitBenchmarker | 8ebdf1c90a34a4af33d08283198aaafc5f6e5099 | [
"Apache-2.0"
] | null | null | null | perfkitbenchmarker/providers/gcp/gcp_dpb_dataproc.py | srelf-ukcloud/PerfKitBenchmarker | 8ebdf1c90a34a4af33d08283198aaafc5f6e5099 | [
"Apache-2.0"
] | null | null | null | perfkitbenchmarker/providers/gcp/gcp_dpb_dataproc.py | srelf-ukcloud/PerfKitBenchmarker | 8ebdf1c90a34a4af33d08283198aaafc5f6e5099 | [
"Apache-2.0"
] | null | null | null | # Copyright 2017 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module containing class for GCP's dataproc service.
Clusters can be created, have jobs submitted to them and deleted. See details
at https://cloud.google.com/dataproc/
"""
import datetime
import json
import logging
from absl import flags
from perfkitbenchmarker import dpb_service
from perfkitbenchmarker import errors
from perfkitbenchmarker.linux_packages import aws_credentials
from perfkitbenchmarker.providers import gcp
from perfkitbenchmarker.providers.gcp import gcs
from perfkitbenchmarker.providers.gcp import util
FLAGS = flags.FLAGS
flags.DEFINE_string('dpb_dataproc_image_version', None,
'The image version to use for the cluster.')
flags.DEFINE_integer('dpb_dataproc_distcp_num_maps', None,
'Number of maps to copy data.')
disk_to_hdfs_map = {
'pd-standard': 'HDD',
'pd-ssd': 'SSD'
}
class GcpDpbDataproc(dpb_service.BaseDpbService):
"""Object representing a GCP Dataproc cluster.
Attributes:
project: ID of the project.
"""
CLOUD = gcp.CLOUD
SERVICE_TYPE = 'dataproc'
PERSISTENT_FS_PREFIX = 'gs://'
def __init__(self, dpb_service_spec):
super(GcpDpbDataproc, self).__init__(dpb_service_spec)
self.dpb_service_type = GcpDpbDataproc.SERVICE_TYPE
self.project = FLAGS.project
if FLAGS.dpb_dataproc_image_version:
self.dpb_version = FLAGS.dpb_dataproc_image_version
if not self.dpb_service_zone:
raise errors.Setup.InvalidSetupError(
'dpb_service_zone must be provided, for provisioning.')
self.region = self.dpb_service_zone.rsplit('-', 1)[0]
self.storage_service = gcs.GoogleCloudStorageService()
self.storage_service.PrepareService(location=self.region)
@staticmethod
def _ParseTime(state_time: str) -> datetime:
"""Parses time from json output.
Args:
state_time: string. the state start time.
Returns:
Parsed datetime.
"""
try:
return datetime.datetime.strptime(state_time, '%Y-%m-%dT%H:%M:%S.%fZ')
except ValueError:
return datetime.datetime.strptime(state_time, '%Y-%m-%dT%H:%M:%SZ')
@staticmethod
def CheckPrerequisites(benchmark_config):
del benchmark_config # Unused
def DataprocGcloudCommand(self, *args):
all_args = ('dataproc',) + args
cmd = util.GcloudCommand(self, *all_args)
cmd.flags['region'] = self.region
return cmd
def _Create(self):
"""Creates the cluster."""
cmd = self.DataprocGcloudCommand('clusters', 'create', self.cluster_id)
if self.project is not None:
cmd.flags['project'] = self.project
if self.spec.worker_count:
# The number of worker machines in the cluster
cmd.flags['num-workers'] = self.spec.worker_count
else:
cmd.flags['single-node'] = True
# Initialize applications on the dataproc cluster
if self.spec.applications:
logging.info('Include the requested applications')
cmd.flags['optional-components'] = ','.join(self.spec.applications)
# Enable component gateway for debuggability. Does not impact performance.
cmd.flags['enable-component-gateway'] = True
# TODO(pclay): stop ignoring spec.master_group?
for role in ['worker', 'master']:
# Set machine type
if self.spec.worker_group.vm_spec.machine_type:
self._AddToCmd(cmd, '{0}-machine-type'.format(role),
self.spec.worker_group.vm_spec.machine_type)
# Set boot_disk_size
if self.spec.worker_group.disk_spec.disk_size:
size_in_gb = '{}GB'.format(
str(self.spec.worker_group.disk_spec.disk_size))
self._AddToCmd(cmd, '{0}-boot-disk-size'.format(role), size_in_gb)
# Set boot_disk_type
if self.spec.worker_group.disk_spec.disk_type:
self._AddToCmd(cmd, '{0}-boot-disk-type'.format(role),
self.spec.worker_group.disk_spec.disk_type)
self.dpb_hdfs_type = disk_to_hdfs_map[
self.spec.worker_group.disk_spec.disk_type]
# Set ssd count
if self.spec.worker_group.vm_spec.num_local_ssds:
self._AddToCmd(cmd, 'num-{0}-local-ssds'.format(role),
self.spec.worker_group.vm_spec.num_local_ssds)
# Set zone
cmd.flags['zone'] = self.dpb_service_zone
if self.dpb_version:
cmd.flags['image-version'] = self.dpb_version
if FLAGS.gcp_dataproc_image:
cmd.flags['image'] = FLAGS.gcp_dataproc_image
cmd.flags['metadata'] = util.MakeFormattedDefaultTags()
cmd.flags['labels'] = util.MakeFormattedDefaultTags()
timeout = 900 # 15 min
# TODO(saksena): Retrieve the cluster create time and hold in a var
_, stderr, retcode = cmd.Issue(timeout=timeout, raise_on_failure=False)
if retcode:
util.CheckGcloudResponseKnownFailures(stderr, retcode)
raise errors.Resource.CreationError(stderr)
def _Delete(self):
"""Deletes the cluster."""
cmd = self.DataprocGcloudCommand('clusters', 'delete', self.cluster_id)
cmd.Issue(raise_on_failure=False)
def _Exists(self):
"""Check to see whether the cluster exists."""
cmd = self.DataprocGcloudCommand('clusters', 'describe', self.cluster_id)
_, _, retcode = cmd.Issue(raise_on_failure=False)
return retcode == 0
def SubmitJob(self,
jarfile=None,
classname=None,
pyspark_file=None,
query_file=None,
job_poll_interval=None,
job_stdout_file=None,
job_arguments=None,
job_files=None,
job_jars=None,
job_type=None,
properties=None):
"""See base class."""
assert job_type
args = ['jobs', 'submit', job_type]
if job_type == self.PYSPARK_JOB_TYPE:
args.append(pyspark_file)
cmd = self.DataprocGcloudCommand(*args)
cmd.flags['cluster'] = self.cluster_id
cmd.flags['labels'] = util.MakeFormattedDefaultTags()
job_jars = job_jars or []
if classname:
if jarfile:
# Dataproc does not support both a main class and a main jar so just
# make the main jar an additional jar instead.
job_jars.append(jarfile)
cmd.flags['class'] = classname
elif jarfile:
cmd.flags['jar'] = jarfile
if query_file:
cmd.flags['file'] = query_file
if job_files:
cmd.flags['files'] = ','.join(job_files)
if job_jars:
cmd.flags['jars'] = ','.join(job_jars)
# Dataproc gives as stdout an object describing job execution.
# Its stderr contains a mix of the stderr of the job, and the
# stdout of the job. We set the driver log level to FATAL
# to suppress those messages, and we can then separate, hopefully
# the job standard out from the log messages.
cmd.flags['driver-log-levels'] = 'root={}'.format(FLAGS.dpb_log_level)
all_properties = self.GetJobProperties()
all_properties.update(properties or {})
if all_properties:
# For commas: https://cloud.google.com/sdk/gcloud/reference/topic/escaping
cmd.flags['properties'] = '^@^' + '@'.join(
'{}={}'.format(k, v) for k, v in all_properties.items())
if job_arguments:
cmd.additional_flags = ['--'] + job_arguments
stdout, stderr, retcode = cmd.Issue(timeout=None, raise_on_failure=False)
if retcode != 0:
raise dpb_service.JobSubmissionError(stderr)
results = json.loads(stdout)
# Otherwise retcode would not have been 0
assert results['status']['state'] == 'DONE'
done_time = GcpDpbDataproc._ParseTime(results['status']['stateStartTime'])
pending_time = None
start_time = None
for state in results['statusHistory']:
if state['state'] == 'PENDING':
pending_time = GcpDpbDataproc._ParseTime(state['stateStartTime'])
elif state['state'] == 'RUNNING':
start_time = GcpDpbDataproc._ParseTime(state['stateStartTime'])
assert pending_time and start_time and done_time
return dpb_service.JobResult(
run_time=(done_time - start_time).total_seconds(),
pending_time=(start_time - pending_time).total_seconds())
def _AddToCmd(self, cmd, cmd_property, cmd_value):
flag_name = cmd_property
cmd.flags[flag_name] = cmd_value
def distributed_copy(self, source_location, destination_location):
"""Method to copy data using a distributed job on the cluster."""
cmd = self.DataprocGcloudCommand('jobs', 'submit', 'hadoop')
cmd.flags['cluster'] = self.cluster_id
cmd.flags['class'] = 'org.apache.hadoop.tools.DistCp'
job_arguments = (['-m={}'.format(FLAGS.dpb_dataproc_distcp_num_maps)]
if FLAGS.dpb_dataproc_distcp_num_maps is not None else [])
job_arguments.extend([source_location, destination_location])
cmd.additional_flags = ['--'] + job_arguments
_, _, retcode = cmd.Issue(timeout=None, raise_on_failure=False)
return {dpb_service.SUCCESS: retcode == 0}
def MigrateCrossCloud(self,
source_location,
destination_location,
dest_cloud='AWS'):
"""Method to copy data cross cloud using a distributed job on the cluster.
Currently the only supported destination cloud is AWS.
TODO(user): Add support for other destination clouds.
Args:
source_location: The source GCS path to migrate.
destination_location: The destination path.
dest_cloud: The cloud to copy data to.
Returns:
A dictionary with key 'success' and boolean value set to the status of
data migration command.
"""
if dest_cloud == 'AWS':
dest_prefix = 's3a://'
else:
raise ValueError('Unsupported destination cloud.')
cmd = self.DataprocGcloudCommand('jobs', 'submit', 'hadoop')
if self.project is not None:
cmd.flags['project'] = self.project
cmd.flags['cluster'] = self.cluster_id
cmd.flags['class'] = 'org.apache.hadoop.tools.DistCp'
s3_access_key, s3_secret_key = aws_credentials.GetCredentials()
cmd.flags['properties'] = 'fs.s3a.access.key=%s,fs.s3a.secret.key=%s' % (
s3_access_key, s3_secret_key)
cmd.additional_flags = ['--'] + [
'gs://' + source_location, dest_prefix + destination_location
]
_, _, retcode = cmd.Issue(timeout=None, raise_on_failure=False)
return {dpb_service.SUCCESS: retcode == 0}
| 36.718121 | 80 | 0.680771 |
ace1b4efe83dde22fda77d93e63d03fce6a65a13 | 10,301 | py | Python | homeassistant/scripts/check_config.py | mbs-technologie/home-assistant | 71fc446425cbb1c0d4670c261ce8ea3bfd83a73d | [
"MIT"
] | 13 | 2017-02-01T13:25:34.000Z | 2022-01-26T01:30:39.000Z | homeassistant/scripts/check_config.py | Paxy/home-assistant | 326cc83a17d344ea60ccaa1ef8295efaebf82f21 | [
"MIT"
] | 9 | 2017-07-26T18:05:32.000Z | 2021-12-05T14:16:34.000Z | homeassistant/scripts/check_config.py | Paxy/home-assistant | 326cc83a17d344ea60ccaa1ef8295efaebf82f21 | [
"MIT"
] | 21 | 2017-07-26T17:09:40.000Z | 2022-03-27T22:37:22.000Z | """Script to ensure a configuration file exists."""
import argparse
import logging
import os
from collections import OrderedDict
from glob import glob
from platform import system
from unittest.mock import patch
from typing import Dict, List, Sequence
import homeassistant.bootstrap as bootstrap
import homeassistant.config as config_util
import homeassistant.loader as loader
import homeassistant.util.yaml as yaml
from homeassistant.exceptions import HomeAssistantError
REQUIREMENTS = ('colorlog>2.1,<3',)
if system() == 'Windows': # Ensure colorama installed for colorlog on Windows
REQUIREMENTS += ('colorama<=1',)
_LOGGER = logging.getLogger(__name__)
# pylint: disable=protected-access
MOCKS = {
'load': ("homeassistant.util.yaml.load_yaml", yaml.load_yaml),
'load*': ("homeassistant.config.load_yaml", yaml.load_yaml),
'get': ("homeassistant.loader.get_component", loader.get_component),
'secrets': ("homeassistant.util.yaml._secret_yaml", yaml._secret_yaml),
'except': ("homeassistant.bootstrap.async_log_exception",
bootstrap.async_log_exception)
}
SILENCE = (
'homeassistant.bootstrap.clear_secret_cache',
'homeassistant.core._LOGGER.info',
'homeassistant.loader._LOGGER.info',
'homeassistant.bootstrap._LOGGER.info',
'homeassistant.bootstrap._LOGGER.warning',
'homeassistant.util.yaml._LOGGER.debug',
)
PATCHES = {}
C_HEAD = 'bold'
ERROR_STR = 'General Errors'
def color(the_color, *args, reset=None):
"""Color helper."""
from colorlog.escape_codes import escape_codes, parse_colors
try:
if len(args) == 0:
assert reset is None, "You cannot reset if nothing being printed"
return parse_colors(the_color)
return parse_colors(the_color) + ' '.join(args) + \
escape_codes[reset or 'reset']
except KeyError as k:
raise ValueError("Invalid color {} in {}".format(str(k), the_color))
def run(script_args: List) -> int:
"""Handle ensure config commandline script."""
parser = argparse.ArgumentParser(
description=("Check Home Assistant configuration."))
parser.add_argument(
'--script', choices=['check_config'])
parser.add_argument(
'-c', '--config',
default=config_util.get_default_config_dir(),
help="Directory that contains the Home Assistant configuration")
parser.add_argument(
'-i', '--info',
default=None,
help="Show a portion of the config")
parser.add_argument(
'-f', '--files',
action='store_true',
help="Show used configuration files")
parser.add_argument(
'-s', '--secrets',
action='store_true',
help="Show secret information")
args = parser.parse_args()
config_dir = os.path.join(os.getcwd(), args.config)
config_path = os.path.join(config_dir, 'configuration.yaml')
if not os.path.isfile(config_path):
print('Config does not exist:', config_path)
return 1
print(color('bold', "Testing configuration at", config_dir))
domain_info = []
if args.info:
domain_info = args.info.split(',')
res = check(config_path)
if args.files:
print(color(C_HEAD, 'yaml files'), '(used /',
color('red', 'not used') + ')')
# Python 3.5 gets a recursive, but not in 3.4
for yfn in sorted(glob(os.path.join(config_dir, '*.yaml')) +
glob(os.path.join(config_dir, '*/*.yaml'))):
the_color = '' if yfn in res['yaml_files'] else 'red'
print(color(the_color, '-', yfn))
if len(res['except']) > 0:
print(color('bold_white', 'Failed config'))
for domain, config in res['except'].items():
domain_info.append(domain)
print(' ', color('bold_red', domain + ':'),
color('red', '', reset='red'))
dump_dict(config, reset='red')
print(color('reset'))
if domain_info:
if 'all' in domain_info:
print(color('bold_white', 'Successful config (all)'))
for domain, config in res['components'].items():
print(' ', color(C_HEAD, domain + ':'))
dump_dict(config)
else:
print(color('bold_white', 'Successful config (partial)'))
for domain in domain_info:
if domain == ERROR_STR:
continue
print(' ', color(C_HEAD, domain + ':'))
dump_dict(res['components'].get(domain, None))
if args.secrets:
flatsecret = {}
for sfn, sdict in res['secret_cache'].items():
sss = []
for skey, sval in sdict.items():
if skey in flatsecret:
_LOGGER.error('Duplicated secrets in files %s and %s',
flatsecret[skey], sfn)
flatsecret[skey] = sfn
sss.append(color('green', skey) if skey in res['secrets']
else skey)
print(color(C_HEAD, 'Secrets from', sfn + ':'), ', '.join(sss))
print(color(C_HEAD, 'Used Secrets:'))
for skey, sval in res['secrets'].items():
print(' -', skey + ':', sval, color('cyan', '[from:', flatsecret
.get(skey, 'keyring') + ']'))
return 0
def check(config_path):
"""Perform a check by mocking hass load functions."""
res = {
'yaml_files': OrderedDict(), # yaml_files loaded
'secrets': OrderedDict(), # secret cache and secrets loaded
'except': OrderedDict(), # exceptions raised (with config)
'components': OrderedDict(), # successful components
'secret_cache': OrderedDict(),
}
# pylint: disable=unused-variable
def mock_load(filename):
"""Mock hass.util.load_yaml to save config files."""
res['yaml_files'][filename] = True
return MOCKS['load'][1](filename)
# pylint: disable=unused-variable
def mock_get(comp_name):
"""Mock hass.loader.get_component to replace setup & setup_platform."""
def mock_setup(*kwargs):
"""Mock setup, only record the component name & config."""
assert comp_name not in res['components'], \
"Components should contain a list of platforms"
res['components'][comp_name] = kwargs[1].get(comp_name)
return True
module = MOCKS['get'][1](comp_name)
if module is None:
# Ensure list
res['except'][ERROR_STR] = res['except'].get(ERROR_STR, [])
res['except'][ERROR_STR].append('{} not found: {}'.format(
'Platform' if '.' in comp_name else 'Component', comp_name))
return None
# Test if platform/component and overwrite setup
if '.' in comp_name:
module.setup_platform = mock_setup
if hasattr(module, 'async_setup_platform'):
del module.async_setup_platform
else:
module.setup = mock_setup
if hasattr(module, 'async_setup'):
del module.async_setup
return module
# pylint: disable=unused-variable
def mock_secrets(ldr, node):
"""Mock _get_secrets."""
try:
val = MOCKS['secrets'][1](ldr, node)
except HomeAssistantError:
val = None
res['secrets'][node.value] = val
return val
def mock_except(ex, domain, config, # pylint: disable=unused-variable
hass=None):
"""Mock bootstrap.log_exception."""
MOCKS['except'][1](ex, domain, config, hass)
res['except'][domain] = config.get(domain, config)
# Patches to skip functions
for sil in SILENCE:
PATCHES[sil] = patch(sil)
# Patches with local mock functions
for key, val in MOCKS.items():
# The * in the key is removed to find the mock_function (side_effect)
# This allows us to use one side_effect to patch multiple locations
mock_function = locals()['mock_' + key.replace('*', '')]
PATCHES[key] = patch(val[0], side_effect=mock_function)
# Start all patches
for pat in PATCHES.values():
pat.start()
# Ensure !secrets point to the patched function
yaml.yaml.SafeLoader.add_constructor('!secret', yaml._secret_yaml)
try:
bootstrap.from_config_file(config_path, skip_pip=True)
res['secret_cache'] = dict(yaml.__SECRET_CACHE)
except Exception as err: # pylint: disable=broad-except
print(color('red', 'Fatal error while loading config:'), str(err))
finally:
# Stop all patches
for pat in PATCHES.values():
pat.stop()
# Ensure !secrets point to the original function
yaml.yaml.SafeLoader.add_constructor('!secret', yaml._secret_yaml)
bootstrap.clear_secret_cache()
return res
def dump_dict(layer, indent_count=3, listi=False, **kwargs):
"""Display a dict.
A friendly version of print yaml.yaml.dump(config).
"""
def line_src(this):
"""Display line config source."""
if hasattr(this, '__config_file__'):
return color('cyan', "[source {}:{}]"
.format(this.__config_file__, this.__line__ or '?'),
**kwargs)
return ''
def sort_dict_key(val):
"""Return the dict key for sorting."""
skey = str.lower(val[0])
if str(skey) == 'platform':
skey = '0'
return skey
indent_str = indent_count * ' '
if listi or isinstance(layer, list):
indent_str = indent_str[:-1] + '-'
if isinstance(layer, Dict):
for key, value in sorted(layer.items(), key=sort_dict_key):
if isinstance(value, dict) or isinstance(value, list):
print(indent_str, key + ':', line_src(value))
dump_dict(value, indent_count + 2)
else:
print(indent_str, key + ':', value)
indent_str = indent_count * ' '
if isinstance(layer, Sequence):
for i in layer:
if isinstance(i, dict):
dump_dict(i, indent_count + 2, True)
else:
print(' ', indent_str, i)
| 36.017483 | 79 | 0.594117 |
ace1b59e7ba0cce2f034244d1e061b38e7f3baf9 | 8,409 | py | Python | tests/integration/modules/test_mac_user.py | initOS/salt | 20d5073a9186439b08f63e49b96c6d789ba6bda7 | [
"Apache-2.0"
] | null | null | null | tests/integration/modules/test_mac_user.py | initOS/salt | 20d5073a9186439b08f63e49b96c6d789ba6bda7 | [
"Apache-2.0"
] | 4 | 2021-02-06T14:30:48.000Z | 2021-12-13T20:50:10.000Z | tests/integration/modules/test_mac_user.py | initOS/salt | 20d5073a9186439b08f63e49b96c6d789ba6bda7 | [
"Apache-2.0"
] | 1 | 2021-05-10T13:59:33.000Z | 2021-05-10T13:59:33.000Z | """
:codeauthor: Nicole Thomas <nicole@saltstack.com>
"""
import os
import salt.utils.files
from salt.exceptions import CommandExecutionError
from tests.support.case import ModuleCase
from tests.support.helpers import (
destructiveTest,
random_string,
runs_on,
skip_if_not_root,
slowTest,
)
# Create user strings for tests
ADD_USER = random_string("RS-", lowercase=False)
DEL_USER = random_string("RS-", lowercase=False)
PRIMARY_GROUP_USER = random_string("RS-", lowercase=False)
CHANGE_USER = random_string("RS-", lowercase=False)
@destructiveTest
@skip_if_not_root
@runs_on(kernel="Darwin")
class MacUserModuleTest(ModuleCase):
"""
Integration tests for the mac_user module
"""
def setUp(self):
"""
Sets up test requirements
"""
super().setUp()
os_grain = self.run_function("grains.item", ["kernel"])
if os_grain["kernel"] not in "Darwin":
self.skipTest("Test not applicable to '{kernel}' kernel".format(**os_grain))
@slowTest
def test_mac_user_add(self):
"""
Tests the add function
"""
try:
self.run_function("user.add", [ADD_USER])
user_info = self.run_function("user.info", [ADD_USER])
self.assertEqual(ADD_USER, user_info["name"])
except CommandExecutionError:
self.run_function("user.delete", [ADD_USER])
raise
@slowTest
def test_mac_user_delete(self):
"""
Tests the delete function
"""
# Create a user to delete - If unsuccessful, skip the test
if self.run_function("user.add", [DEL_USER]) is not True:
self.run_function("user.delete", [DEL_USER])
self.skipTest("Failed to create a user to delete")
# Now try to delete the added user
ret = self.run_function("user.delete", [DEL_USER])
self.assertTrue(ret)
@slowTest
def test_mac_user_primary_group(self):
"""
Tests the primary_group function
"""
# Create a user to test primary group function
if self.run_function("user.add", [PRIMARY_GROUP_USER]) is not True:
self.run_function("user.delete", [PRIMARY_GROUP_USER])
self.skipTest("Failed to create a user")
try:
# Test mac_user.primary_group
primary_group = self.run_function(
"user.primary_group", [PRIMARY_GROUP_USER]
)
uid_info = self.run_function("user.info", [PRIMARY_GROUP_USER])
self.assertIn(primary_group, uid_info["groups"])
except AssertionError:
self.run_function("user.delete", [PRIMARY_GROUP_USER])
raise
@slowTest
def test_mac_user_changes(self):
"""
Tests mac_user functions that change user properties
"""
# Create a user to manipulate - if unsuccessful, skip the test
if self.run_function("user.add", [CHANGE_USER]) is not True:
self.run_function("user.delete", [CHANGE_USER])
self.skipTest("Failed to create a user")
try:
# Test mac_user.chuid
self.run_function("user.chuid", [CHANGE_USER, 4376])
uid_info = self.run_function("user.info", [CHANGE_USER])
self.assertEqual(uid_info["uid"], 4376)
# Test mac_user.chgid
self.run_function("user.chgid", [CHANGE_USER, 4376])
gid_info = self.run_function("user.info", [CHANGE_USER])
self.assertEqual(gid_info["gid"], 4376)
# Test mac.user.chshell
self.run_function("user.chshell", [CHANGE_USER, "/bin/zsh"])
shell_info = self.run_function("user.info", [CHANGE_USER])
self.assertEqual(shell_info["shell"], "/bin/zsh")
# Test mac_user.chhome
self.run_function("user.chhome", [CHANGE_USER, "/Users/foo"])
home_info = self.run_function("user.info", [CHANGE_USER])
self.assertEqual(home_info["home"], "/Users/foo")
# Test mac_user.chfullname
self.run_function("user.chfullname", [CHANGE_USER, "Foo Bar"])
fullname_info = self.run_function("user.info", [CHANGE_USER])
self.assertEqual(fullname_info["fullname"], "Foo Bar")
# Test mac_user.chgroups
self.run_function("user.chgroups", [CHANGE_USER, "wheel"])
groups_info = self.run_function("user.info", [CHANGE_USER])
self.assertEqual(groups_info["groups"], ["wheel"])
except AssertionError:
self.run_function("user.delete", [CHANGE_USER])
raise
@slowTest
def test_mac_user_enable_auto_login(self):
"""
Tests mac_user functions that enable auto login
"""
# Make sure auto login is disabled before we start
if self.run_function("user.get_auto_login"):
self.skipTest("Auto login already enabled")
try:
# Does enable return True
self.assertTrue(
self.run_function(
"user.enable_auto_login", ["Spongebob", "Squarepants"]
)
)
# Did it set the user entry in the plist file
self.assertEqual(self.run_function("user.get_auto_login"), "Spongebob")
# Did it generate the `/etc/kcpassword` file
self.assertTrue(os.path.exists("/etc/kcpassword"))
# Are the contents of the file correct
test_data = b".\xc3\xb8'B\xc2\xa0\xc3\x99\xc2\xad\xc2\x8b\xc3\x8d\xc3\x8dl"
with salt.utils.files.fopen("/etc/kcpassword", "rb") as f:
file_data = f.read()
self.assertEqual(test_data, file_data)
# Does disable return True
self.assertTrue(self.run_function("user.disable_auto_login"))
# Does it remove the user entry in the plist file
self.assertFalse(self.run_function("user.get_auto_login"))
# Is the `/etc/kcpassword` file removed
self.assertFalse(os.path.exists("/etc/kcpassword"))
finally:
# Make sure auto_login is disabled
self.assertTrue(self.run_function("user.disable_auto_login"))
# Make sure autologin is disabled
if self.run_function("user.get_auto_login"):
raise Exception("Failed to disable auto login")
@slowTest
def test_mac_user_disable_auto_login(self):
"""
Tests mac_user functions that disable auto login
"""
# Make sure auto login is enabled before we start
# Is there an existing setting
if self.run_function("user.get_auto_login"):
self.skipTest("Auto login already enabled")
try:
# Enable auto login for the test
self.run_function("user.enable_auto_login", ["Spongebob", "Squarepants"])
# Make sure auto login got set up
if not self.run_function("user.get_auto_login") == "Spongebob":
raise Exception("Failed to enable auto login")
# Does disable return True
self.assertTrue(self.run_function("user.disable_auto_login"))
# Does it remove the user entry in the plist file
self.assertFalse(self.run_function("user.get_auto_login"))
# Is the `/etc/kcpassword` file removed
self.assertFalse(os.path.exists("/etc/kcpassword"))
finally:
# Make sure auto login is disabled
self.assertTrue(self.run_function("user.disable_auto_login"))
# Make sure auto login is disabled
if self.run_function("user.get_auto_login"):
raise Exception("Failed to disable auto login")
def tearDown(self):
"""
Clean up after tests
"""
# Delete ADD_USER
add_info = self.run_function("user.info", [ADD_USER])
if add_info:
self.run_function("user.delete", [ADD_USER])
# Delete DEL_USER if something failed
del_info = self.run_function("user.info", [DEL_USER])
if del_info:
self.run_function("user.delete", [DEL_USER])
# Delete CHANGE_USER
change_info = self.run_function("user.info", [CHANGE_USER])
if change_info:
self.run_function("user.delete", [CHANGE_USER])
| 35.331933 | 88 | 0.608277 |
ace1b6719b05fa539cfb86468d4b0d0085d6483e | 11,295 | py | Python | contrib/stack/stripmapStack/insarStack.py | earthobservatory/isce2 | 655c46cc4add275879167b750a5e91f6d00f168e | [
"ECL-2.0",
"Apache-2.0"
] | 1 | 2020-09-29T13:29:41.000Z | 2020-09-29T13:29:41.000Z | contrib/stack/stripmapStack/insarStack.py | earthobservatory/isce2 | 655c46cc4add275879167b750a5e91f6d00f168e | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | contrib/stack/stripmapStack/insarStack.py | earthobservatory/isce2 | 655c46cc4add275879167b750a5e91f6d00f168e | [
"ECL-2.0",
"Apache-2.0"
] | 2 | 2021-06-24T20:20:18.000Z | 2021-06-24T20:32:23.000Z | #! /usr/bin/env python
#Author: Heresh Fattahi
import os
import sys
import h5py
import insarPair as insarPair
from numpy import median, float32, vstack
chunk_shape =(128,128)
dataType = float32
class insarStack:
"""
InsarStack object for a stack of InSAR data (multi-platform multi-track data).
Method save2h5 creates a HDF5 file to store the stack data.
Each platform-track is a group with three sub groups : observation, quality, geometry.
Each sub-group may have different datasets. Some common datasets are:
observations (3D) : LOS, North, East, Up, RangeOffset, AzimuthOffset, ...
quality (3D) : coherence, uncertainty, ...
geometry (2D or 3D) : incidence, heading angle, ...
All pairs for a given platform-track are required to have the same size.
Pairs of different platforms-tracks may have different size.
"""
def __init__(self, name='insarStack', pairsDict = None):
self.pairs = pairsDict
def save2h5(self, output = 'data.h5', access_mode = 'w' , platform_tracks = None , ref_pixels = None , ref_pixel_method = 'average_coherence' ):
'''
h5OutName : Name of the HDF5 file for the InSAR stack
platform_tracks : A list containing the platform_tracks to be stored
in the HDF5 file. If None all platform_tracks are
exctracted from pairs. If pairs does not contain information
about the platform_tracks, then all pairs in the pairsDict are
considered from a single platform single track.
ref_pixel : A dictionary containing refernce pixels for each platform_track.
eg: ref_pixDict = {'Sentinel1A/Track144':(500,300) , 'Sentinel1A/Track100':(440,700)}
first pixel is in y direction (lines) and second pixel in x direction (columns)
'''
self.h5file = h5py.File(output, access_mode)
self.platform_tracks = platform_tracks
self.ref_pixels = ref_pixels
if self.platform_tracks is None:
self.get_platform_tracks()
for platTrack in self.platform_tracks:
print ('platform-track : ' , platTrack)
group = self.h5file.create_group(platTrack)
obsGroup = group.create_group('observations')
qualityGroup = group.create_group('quality')
geometryGroup = group.create_group('geometry')
################################
# A class object for the platformTrack
platTrackObj = platformTrack()
platTrackObj.getPairs(self.pairs, platTrack)
platTrackObj.getSize()
platTrackObj.getDatasetNames()
###############################
# Get the reference pixel for a given platform/track
if self.ref_pixels is not None:
platTrackObj.ref_pixel = self.ref_pixels[platTrack]
else:
platTrackObj.ref_pixel = None
###############################
# 3D datasets for observation quality (Coherence, uncertainty, ...)
pairs = [pair for pair in platTrackObj.pairs.keys()]
for dsName in platTrackObj.dsetQualityNames:
print ('Create dataset for ', dsName)
dsq = qualityGroup.create_dataset(dsName, shape=(platTrackObj.numPairs, platTrackObj.length, platTrackObj.width),
dtype=dataType) # , chunks=chunk_shape
masterTimes = [None]*platTrackObj.numPairs
slaveTimes = [None]*platTrackObj.numPairs
for i in range(platTrackObj.numPairs):
data, metadata = platTrackObj.pairs[pairs[i]].read(dsName)
dsq[i,:,:] = data
master , slave = pairs[i]
masterTimes[i] = master.strftime('%Y-%m-%d %H:%M:%S').encode('utf8')
slaveTimes[i] = slave.strftime('%Y-%m-%d %H:%M:%S').encode('utf8')
###############################
# store the pair times as a 2D dataset
if len(platTrackObj.dsetQualityNames) > 0:
piars_idx = vstack((masterTimes,slaveTimes)).T
dsq = qualityGroup.create_dataset('pairs_idx', data=piars_idx, dtype=piars_idx.dtype)
###############################
# if the reference pixel is not given let's choose a pixel with maximum average coherence
#if platTrackObj.ref_pixel is None:
# platTrackObj.ref_pixel = self.choose_ref_pixel(platTrack , method == 'average_coherence')
###############################
# 3D datasets for observations (possible datasets: unwrapped-phase, RangeOffset, AzimuthOffset, unwrapped-amplitude, etc)
# There should be no limitation for storing any other possible observations.
pairs = [pair for pair in platTrackObj.pairs.keys()]
for dsName in platTrackObj.dsetObservationNames:
print ('Create dataset for ', dsName)
dso = obsGroup.create_dataset(dsName, shape=(platTrackObj.numPairs, platTrackObj.length, platTrackObj.width),
dtype=dataType) #, chunks=chunk_shape)
masterTimes = [None]*platTrackObj.numPairs
slaveTimes = [None]*platTrackObj.numPairs
for i in range(platTrackObj.numPairs):
data, metadata = platTrackObj.pairs[pairs[i]].read(dsName)
#ds[i,:,:] = data - data[0, platTrackObj.ref_pixel[0] , platTrackObj.ref_pixel[1]]
dso[i,:,:] = data
master , slave = pairs[i]
masterTimes[i] = master.strftime('%Y-%m-%d %H:%M:%S').encode("ascii", "ignore")
slaveTimes[i] = slave.strftime('%Y-%m-%d %H:%M:%S').encode("ascii", "ignore")
###############################
# A 2D dataset containing a 2D array of strings. First column
# is the master time and second column the slave time of pairs.
if len(platTrackObj.dsetObservationNames) > 0:
piars_idx = vstack((masterTimes,slaveTimes)).T
dspairs = obsGroup.create_dataset('pairs_idx', data=piars_idx, dtype=piars_idx.dtype)
###################################
for key,value in metadata.items():
obsGroup.attrs[key] = value
###################################
# 2D or 3D datasets for geometry (Lat, Lon, Heigt, Incidence,
# Heading, Bperp, ...). For a given platform from a specific
# track, a common viewing geometry is assumed. Therfore each
# of Lat, Lon, Height, Incidence and Heading can be stored as
# 2D dataset. Baselines if provided should be 3D.
for dsName in platTrackObj.dsetGeometryNames:
print ('Create dataset for ', dsName)
pairs, length, width = platTrackObj.getSize_geometry(dsName)
numPairs = len(pairs)
dsg = geometryGroup.create_dataset(dsName, shape=(numPairs, length, width),
dtype=dataType) #, chunks=chunk_shape)
for i in range(numPairs):
data, metadata = platTrackObj.pairs[pairs[i]].read(dsName)
dsg[i,:,:] = data
for key,value in metadata.items():
geometryGroup.attrs[key] = value
self.h5file.close()
def get_platform_tracks(self):
self.platform_tracks = []
for pair in self.pairs.keys():
if self.pairs[pair].platform_track not in self.platform_tracks:
self.platform_tracks.append(self.pairs[pair].platform_track)
# def loadh5(self, platform_track , groupName='observation', datasetName='unwrapped', method = , method_par, )
# method : chunck , block , all
# method_par : Chunck_size , block_size ,
# def choose_reference_pixel(self, platTrack , method):
# compute average coherence of the 3D dataset
# find the pixel with maximum value
# def time_baseline_timeseries():
##################################
class platformTrack:
def __init__(self, name='platformTrack'): #, pairDict = None):
self.pairs = None
def getPairs(self, pairDict, platTrack):
pairs = pairDict.keys()
self.pairs = {}
for pair in pairs:
if pairDict[pair].platform_track == platTrack:
self.pairs[pair]=pairDict[pair]
def getSize_geometry(self, dsName):
pairs = self.pairs.keys()
pairs2 = []
width = []
length = []
files = []
for pair in pairs:
self.pairs[pair].get_metadata(dsName)
if self.pairs[pair].length != 0 and self.pairs[pair].file not in files:
files.append(self.pairs[pair].file)
pairs2.append(pair)
width.append(self.pairs[pair].width)
length.append(self.pairs[pair].length)
length = median(length)
width = median(width)
return pairs2, length, width
def getSize(self):
pairs = self.pairs.keys()
self.numPairs = len(pairs)
width = []
length = []
for pair in pairs:
length.append(self.pairs[pair].length)
width.append(self.pairs[pair].width)
self.length = median(length)
self.width = median(width)
def getDatasetNames(self):
# extract the name of the datasets which are actually the keys of
# observations, quality and geometry dictionaries.
pairs = [pair for pair in self.pairs.keys()]
# Assuming all pairs of a given platform-track have the same observations
# let's extract the keys of the observations of the first pair.
if self.pairs[pairs[0]].observationsDict is not None:
self.dsetObservationNames = [k for k in self.pairs[pairs[0]].observationsDict.keys()]
else:
self.dsetObservationNames = []
# Assuming all pairs of a given platform-track have the same quality files
# let's extract the keys of the quality dictionary of the first pair.
if self.pairs[pairs[0]].qualityDict is not None:
self.dsetQualityNames = [k for k in self.pairs[pairs[0]].qualityDict.keys()]
else:
self.dsetQualityNames = []
##################
# Despite the observation and quality files, the geometry may not exist
# for all pairs. Therfore we need to look at all pairs and get possible
# dataset names.
self.dsetGeometryNames = []
for pair in pairs:
if self.pairs[pair].geometryDict is not None:
keys = [k for k in self.pairs[pair].geometryDict.keys()]
self.dsetGeometryNames = list(set(self.dsetGeometryNames) | set(keys))
| 44.468504 | 148 | 0.569278 |
ace1b6c8090dbda8ab16e6e4845edece1476f6bc | 6,878 | py | Python | docs/conf.py | UnixJunkie/OpenPharmacophore | 12c015ae939e87b3dbee26574fdb1d1cc11d64b3 | [
"MIT"
] | 1 | 2022-03-18T08:22:04.000Z | 2022-03-18T08:22:04.000Z | docs/conf.py | UnixJunkie/OpenPharmacophore | 12c015ae939e87b3dbee26574fdb1d1cc11d64b3 | [
"MIT"
] | null | null | null | docs/conf.py | UnixJunkie/OpenPharmacophore | 12c015ae939e87b3dbee26574fdb1d1cc11d64b3 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
import os
import sys
import openpharmacophore
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# -- Project information -----------------------------------------------------
project = 'OpenPharmacophore'
copyright = ('2021, UIBCDF Lab at the Mexico City Childrens Hospital Federico Gomez and authors.'
'Project structure based on the Computational Molecular Science Python Cookiecutter version 1.5')
author = 'UIBCDF Lab and contributors'
# The short X.Y version
version = openpharmacophore.__version__.split('+')[0]
# The full version, including alpha/beta/rc tags
release = openpharmacophore.__version__
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.intersphinx',
'sphinx.ext.mathjax',
'sphinx.ext.todo',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
'sphinx.ext.napoleon',
'sphinx.ext.githubpages',
'sphinxcontrib.bibtex',
'nbsphinx',
'recommonmark',
'sphinx_markdown_tables',
'sphinx.ext.extlinks'
]
autosummary_generate = True
napoleon_numpy_docstring = True
napoleon_include_init_with_doc = True
napoleon_use_param = True
napoleon_use_ivar = True
numpydoc_show_class_members= True
numpydoc_show_inherited_class_members= True
numpydoc_class_members_toctree= True
# sphinxcontrib-bibtex
bibtex_bibfiles = ['bibliography.bib'] # list of *.bib files
bibtex_default_style = 'alpha'
bibtex_encoding = 'utf-8-sig'
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
source_parsers={
}
source_suffix = ['.rst', '.md']
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
# language was edited to use sphinx-intl
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path .
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store', '**.ipynb_checkpoints']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'default'
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
html_theme_options = {
'canonical_url': '',
'analytics_id': '',
'logo_only': False,
'display_version': True,
'prev_next_buttons_location': 'bottom',
'style_external_links': False,
# Toc options
'collapse_navigation': False,
'sticky_navigation': True,
'navigation_depth': 4,
'includehidden': True,
'titles_only': False
}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = []
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom css
html_css_files = [
'custom.css',
]
#### I should check
#### https://github.com/lotharschulz/sphinx-pages/blob/master/conf.py for more
#### options
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'openpharmacophore_librarydoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
]
# -- 1 for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
]
# -- Extension configuration -------------------------------------------------
# -- Options for intersphinx extension ---------------------------------------
# Example configuration for intersphinx: refer to the Python standard library.
#intersphinx_mapping = {'https://docs.python.org/': None}
# -- Options for todo extension ----------------------------------------------
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
| 30.433628 | 105 | 0.672434 |
ace1b780a1ea78b8f5f572884394e5afe78de6a9 | 613 | py | Python | message/models.py | nasir733/mumbleapi | 9f4ac577e94426d8f409594a40e5458f3b1867cb | [
"Apache-2.0"
] | 1 | 2021-05-14T04:16:00.000Z | 2021-05-14T04:16:00.000Z | message/models.py | shukl08vk/mumbleapi | 101825d8aecba7eac4e31046e7b4b15b36c55f77 | [
"Apache-2.0"
] | null | null | null | message/models.py | shukl08vk/mumbleapi | 101825d8aecba7eac4e31046e7b4b15b36c55f77 | [
"Apache-2.0"
] | null | null | null | from django.db import models
from django.contrib.auth.models import User
import uuid
class Message(models.Model):
id = models.UUIDField(default=uuid.uuid4, unique=True, primary_key=True, editable=False)
to_user = models.ForeignKey(User,on_delete=models.SET_NULL, null=True, blank=True, related_name='messages')
created = models.DateTimeField(auto_now_add=True)
created_by = models.ForeignKey(User,on_delete=models.SET_NULL, null=True, blank=True)
content = models.CharField(max_length=255)
is_read = models.BooleanField(default=False)
def __str__(self):
return str(self.id)
| 40.866667 | 111 | 0.758564 |
ace1b9936313e99b612653b2dfdbc4127a318151 | 14,709 | py | Python | scripts/wayback_search.py | workmanjack/privacy-policies-through-time | 039e5f299abd200026d747635cee545c813d89a5 | [
"MIT"
] | null | null | null | scripts/wayback_search.py | workmanjack/privacy-policies-through-time | 039e5f299abd200026d747635cee545c813d89a5 | [
"MIT"
] | 6 | 2021-03-18T23:51:55.000Z | 2022-03-11T23:45:22.000Z | scripts/wayback_search.py | workmanjack/privacy-policies-through-time | 039e5f299abd200026d747635cee545c813d89a5 | [
"MIT"
] | null | null | null | from datetime import date, timedelta, datetime
from shutil import move
import urllib.parse
import dateparser
import requests
import argparse
import json
import csv
import re
import os
DEBUG = 0
POLICY_DIR = '../privacy-policies-through-time/'
# these are strings that signal the start/end of the wayback inserted html/js material in an
# archived webpage - we use them to strip out the company's part of the archived page
WAYBACK_HEADER_END = '<!-- END WAYBACK TOOLBAR INSERT -->'
WAYBACK_FOOTER_START = '<div id="footer">'
POLICY_BOOKENDS = [
('<div id="main">', '<div id="footer">'),
('</head>', '<footer'),
]
REGEX_DUP_WHITESPACE = re.compile(r' +', flags=re.IGNORECASE)
REGEX_DUP_NEWLINE = re.compile(r'\s*\n\s+')
REGEX_LINK_TAG = re.compile(r'<link.*?>')
REGEX_SCRIPT_TAG = re.compile(r'\<script.*?\</script\>', flags=re.DOTALL)
REGEX_STYLE_TAG = re.compile(r'\<style.*?\</style\>', flags=re.DOTALL)
REGEX_TAGS = re.compile('<[^<]+?>')
REGEX_POLICY_DATE_MASTER = [
re.compile(
r'(?:(?:effective)|' +
r'(?:update)|' +
r'(?:updated)|' +
r'(?:amended)|' +
r'(?:posted)|' +
r'(?:modified)|' +
r'(?:revision)|' +
r'(?:revised))' +
r'\s?(?:date|on|in|as of)?'
r'[:]?' + # colon or no colon
r'[\s\n]*' + # Whitespace/newlines between prefix and date
r'(' +
r'(\w+\.? \d+\w*,? \d+)|' + # November 3, 2003 | November 3 2003 | Nov. 3 2003 | Nov 3rd, 2003
r'(\w+\.?\,? \d+)|' + # November 2003 | Nov. 2003 | Nov, 2003
r'(\d+ \w+\.?\,? \d+)|' + # 3 November 2003 | 3 Nov. 2003
r'(\d+/\d+/\d+)|' + # 11/3/2003 | 3/11/2003
r'(\d+-\d+-\d+)' + # 11-3-2003 | 3-11-2003
r')', flags=re.IGNORECASE)
]
REGEX_POLICY_DATE_LIST = [
re.compile(r'Privacy Policy\s*(\w+ \d+, \d+)'),
re.compile(r'\n(\d+-\d+-\d+)'),
re.compile(r'\n(\w+ \d+, \d+)'),
re.compile(r'amended as of (\w+ \d+)', flags=re.IGNORECASE),
re.compile(r'amended as of (\w+\.* \d+, \d+)', flags=re.IGNORECASE),
re.compile(r'Published:* (\w+ \d+, \d+)', flags=re.IGNORECASE),
re.compile(r'Last update: (\d+/\d+/\d+)', flags=re.IGNORECASE),
re.compile(r'Last Revision: (\w+ \d+, \d+)', flags=re.IGNORECASE),
re.compile(r'Last Revised: (\w+ \d+, \d+)'),
re.compile(r'posted as of:? (\w+ \d+, \d+)', flags=re.IGNORECASE),
re.compile(r'Last modified: (\w+ \d+, \d+)'),
re.compile(r'(\d+-\d+-\d+) privacy policy'),
re.compile(r'effective as of (\w+ \d+, \d+)'),
re.compile(r'last modified on (\w+ \d+, \d+)'),
re.compile(r'Effective Date: (\w+ \d+\w+ \d+)'),
re.compile(r'Effective Date: (\w+ \d+, \d+)'),
re.compile(r'Last Updated: *(\w+ \d+, \d+)', flags=re.IGNORECASE),
re.compile(r'Last Updated: *(\d+ \w+ \d+)', flags=re.IGNORECASE),
re.compile(r'Last revised on (\w+ \d+, \d+)'),
re.compile(r'Revised: (\w+ \d+, \d+)'),
re.compile(r'Revised ([^\.]*)'),
re.compile(r'last updated in ([^\.]*)'),
re.compile(r'last updated on (\w+ \d+\w*, \d+)', flags=re.IGNORECASE),
re.compile(r'last updated on (.*) \('),
re.compile(r'updated on\n? ?(\w+ \d+, \d+)', flags=re.IGNORECASE),
re.compile(r'updated on ([^\.]*)', flags=re.IGNORECASE),
re.compile(r'Privacy Policy dated (.*)\n'),
re.compile(r'Last update:? (\w+ \d+, \d+)'),
re.compile(r'LAST UPDATED (\w+ \d+, \d+)', flags=re.IGNORECASE),
re.compile(r'UPDATED (\w+,? \d+)', flags=re.IGNORECASE),
re.compile(r'Updated: (.*)\n', flags=re.IGNORECASE),
re.compile(r'Effective:? (.*)\n', flags=re.IGNORECASE),
]
def print_debug(msg):
if DEBUG:
print(msg)
return
# https://stackoverflow.com/questions/1060279/iterating-through-a-range-of-dates-in-python
def daterange(start_date, end_date):
for n in range(int ((end_date - start_date).days)):
yield start_date + timedelta(n)
# https://stackoverflow.com/questions/5734438/how-to-create-a-month-iterator
def month_year_iter(start_month, start_year, end_month, end_year):
ym_start = 12 * start_year + start_month - 1
ym_end = 12 * end_year + end_month - 1
for ym in range(ym_start, ym_end):
y, m = divmod(ym, 12)
yield y, m + 1
def api_query(url):
#print('api_query: {}'.format(url))
#resp = requests.get(url=url, auth=(self.user, self.pw), verify=False)
resp = requests.get(url=url)
data = None
if resp.status_code != 200:
print('failed to retrieve data from {0}'.format(url))
print('status_code={0}'.format(resp.status_code))
else:
data = resp.json()
return data
def remove_wayback(page):
"""
Given a wayback archive url, this function will remove all wayback inserted
text from the html and return just the original page
Args:
page: str, text of webpage to process
Returns:
str
"""
# if retrieved via api, wayback header/toolbar is not present
header_end_index = page.find(WAYBACK_HEADER_END)
if header_end_index >= 0:
page = page[header_end_index + len(WAYBACK_HEADER_END):]
else:
print('wayback header not found')
footer_start_index = page.rfind(WAYBACK_FOOTER_START)
if footer_start_index >= 0:
page = page[:footer_start_index]
else:
print('wayback footer not found')
return page
def make_policy_comparable(page, policy_bookends, timestamp):
"""
Returns:
str, specially formatted for comparison
str, formatted for printing
"""
# trim by start and end
print_debug('make_policy_comparable: {}'.format(timestamp))
print_debug('start = {}'.format(len(page)))
for start, end in policy_bookends:
start_index = page.find(start)
end_index = page.rfind(end)
if start_index == -1:
#print('policy start "{}" not found'.format(start))
start_index = 0
if end_index == -1:
#print('policy end "{}" not found'.format(end))
end_index = len(page)
page = page[start_index:end_index]
print_debug('after bookends = {}'.format(len(page)))
# remove timestamps as they are unique and inserted by wayback
page = page.replace(timestamp, '')
print_debug('after timestamp = {}'.format(len(page)))
# remove all <script> and <style> blocks
page = REGEX_SCRIPT_TAG.sub('', page)
print_debug('after <script> = {}'.format(len(page)))
page = REGEX_STYLE_TAG.sub('', page)
print_debug('after <style> = {}'.format(len(page)))
# remove all tags because we want only the text
page = REGEX_TAGS.sub('', page)
print_debug('after tags = {}'.format(len(page)))
# replace duplicate whitespace
page = REGEX_DUP_WHITESPACE.sub(' ', page)
print_debug('after whitespace = {}'.format(len(page)))
# replace duplicate newlines
page = REGEX_DUP_NEWLINE.sub('\n', page)
print_debug('after newlines = {}'.format(len(page)))
return page
pretty = page
# remove all newlines for comparison
compare = page.replace('\n', '')
return compare, pretty
def get_update_date(page, regex_list):
for regex in regex_list:
m = regex.search(page)
update_date = None
if m and len(m.group()) > 1:
try:
update_date = dateparser.parse(m.group(1))
# print(update_date)
except RecursionError as exc:
print(exc)
break
if not update_date:
print('update date not found!')
return update_date
def read_config_file(path):
data = None
with open(path) as f:
data = json.load(f)
return data
def go_wayback(url, timestamp):
url = urllib.parse.quote(url)
request_url = 'http://archive.org/wayback/available?url={}×tamp={}'.format(url, timestamp)
data = api_query(request_url)
update_date = None
out = None
archive = data.get('archived_snapshots', {}).get('closest', {})
archive_url = archive.get('url', None)
archive_timestamp = archive.get('timestamp', None)
return archive_url, archive_timestamp
def make_index_file_name(company):
return '{}-privacy-policies-index.csv'.format(company)
def make_policy_file_name(company, update_date):
out_date = update_date.strftime('%Y-%m-%d')
out = '{}/{}-{}.txt'.format(company, company, out_date)
out_path = os.path.join(POLICY_DIR, out)
return out_path, out
def process_policy(company, archive_url, archive_timestamp, last_date, write=True):
"""
"""
update_date = None
out = None
resp = requests.get(url=archive_url)
if resp.status_code != 200:
print('failed to retrieve data from {0}'.format(archive_url))
else:
page = resp.text
# it is debatable if this provides value
#page = remove_wayback(page)
# trim policy
page = make_policy_comparable(page, POLICY_BOOKENDS, archive_timestamp)
update_date = get_update_date(page, regex_list=REGEX_POLICY_DATE_LIST)
# check dates
if update_date and last_date and update_date == last_date:
print('{} ({}) no change'.format(archive_timestamp, update_date))
else:
last_date = update_date
if update_date:
out_path, out = make_policy_file_name(company, update_date)
else:
out = '{}_check_date.txt'.format(archive_timestamp)
out_path = out
if write:
with open(out_path, 'wb') as f:
f.write(page.encode('UTF-8'))
print('{} ({}) written to {}'.format(archive_timestamp, update_date, out))
return update_date, out
def parse_args():
# parse args
parser = argparse.ArgumentParser()
parser.add_argument('-c', '--config', action='store', type=str, required=True, help='Company config file for searching')
parser.add_argument('-a', '--abort', action='store_true', required=False, help='Aborts config if cannot find date')
args = parser.parse_args()
# arg sanity check
if not os.path.exists(args.config):
raise ValueError('Config {} does not exist!'.format(args.config))
return args
def main():
args = parse_args()
config = read_config_file(args.config)
print(config)
rows = list()
company = config.get('company')
os.makedirs(os.path.join(POLICY_DIR, company), exist_ok=True)
links = config.get('links', None)
if links:
# for when we have the direct links to previous policies
for link in links:
policy_date, policy_path = process_policy(company, link, 'linked', None)
if not policy_date:
# failed to read date from document, do we have it in config?
dates = config.get('dates', list())
for d in dates:
if link == d[0]:
policy_date = dateparser.parse(d[1])
print('Found date in config: {}'.format(policy_date))
out_path, out = make_policy_file_name(company, policy_date)
move(policy_path, out_path)
policy_path = out
print('Moved _check_date to {}'.format(out_path))
row = [company, policy_date, link, policy_path]
rows.append(row)
configs = config.get('configs', None)
if configs:
for cfg in config['configs']:
policy_url = cfg.get('url')
date_url = cfg.get('date_url', None)
ignores = cfg.get('ignore', list())
print('Searching {}'.format(policy_url))
start_cfg = cfg.get('start')
start_date = date(start_cfg.get('year'), start_cfg.get('month'), start_cfg.get('day'))
print('Starting with {}'.format(start_date))
end_cfg = cfg.get('end', None)
if end_cfg:
end_date = date(end_cfg.get('year'), end_cfg.get('month'), end_cfg.get('day'))
else:
end_date = date.today()
print('Ending with {}'.format(end_date))
# iterate through dates, query wayback, retrieve snapshots
# if snapshot is new, then get page source; else, continue
snapshots = list()
last_page = ''
last_date = None
for year, month in month_year_iter(start_date.month, start_date.year, end_date.month, end_date.year):
row = [company]
check_date = date(year, month, 1)
# check if snapshot exists for this date
timestamp = check_date.strftime('%Y%m%d')
if timestamp in ignores:
print('Ignoring {}'.format(timestamp))
continue
archive_url, archive_timestamp = go_wayback(policy_url, timestamp)
if archive_timestamp in snapshots:
print('{} -> {}'.format(timestamp, archive_timestamp))
continue
policy_date, policy_path = process_policy(company, archive_url, archive_timestamp, last_date)
if date_url:
# some websites have the update date on a different page than the privacy policy, we handle that here
policy_date, _ = process_policy(company, date_url, archive_timestamp, last_date, write=True)
if not policy_date:
print('Check date (timestamp={}, archive={})'.format(timestamp, archive_timestamp))
# a bit hacky but this is how we know if we are done or not
if policy_path and '_check_date' not in policy_path:
# no need to save if we skip due to snapshots
last_date = policy_date
snapshots.append(archive_timestamp)
row.append(policy_date)
row.append(archive_url)
row.append(policy_path)
rows.append(row)
elif args.abort and policy_path and '_check_date' in policy_path:
# we couldn't detect the date... abort
break
csv_out = make_index_file_name(company)
csv_path = os.path.join(POLICY_DIR, csv_out)
with open(csv_path, 'w', newline='') as csvfile:
csvwriter = csv.writer(csvfile)
csvwriter.writerow(['company', 'policy_date', 'policy_url', 'policy_path'])
for row in rows:
csvwriter.writerow(row)
print('csv index written to {}'.format(csv_out))
return
if __name__ == '__main__':
main()
| 35.963325 | 124 | 0.590251 |
ace1b9f9c09f54fde7b2e96e9035c284398ae73a | 1,934 | py | Python | Codechef November Long Challenge/PHCUL.py | Point-Blank-PB/Coding-Repo | fe46d88e77becd157da36214812077944aa64ec8 | [
"MIT"
] | null | null | null | Codechef November Long Challenge/PHCUL.py | Point-Blank-PB/Coding-Repo | fe46d88e77becd157da36214812077944aa64ec8 | [
"MIT"
] | 1 | 2019-12-10T17:23:48.000Z | 2019-12-10T21:15:45.000Z | Codechef November Long Challenge/PHCUL.py | Point-Blank-PB/Coding-Repo | fe46d88e77becd157da36214812077944aa64ec8 | [
"MIT"
] | 5 | 2019-11-04T12:36:29.000Z | 2019-12-10T20:24:39.000Z | # STATUS ACCEPTED AC(100%) ON PYPY 3.5
# PARTIAL ACCEPTED (50%) ON PYTHON 3.6
import math
def distance(a, b):
return math.sqrt((a[0] - b[0]) ** 2 + (a[1] - b[1]) ** 2)
for case in range(int(input())):
p, q = [int(x) for x in input().split()]
origin = (p, q)
n, m, k = [int(x) for x in input().split()]
set_1 = []
temp = [int(x) for x in input().split()]
for index in range(0, 2*n, 2):
set_1.append((temp[index], temp[index+1]))
set_2 = []
temp = [int(x) for x in input().split()]
for index in range(0, 2*m, 2):
set_2.append((temp[index], temp[index+1]))
set_3 = []
temp = [int(x) for x in input().split()]
for index in range(0, 2*k, 2):
set_3.append((temp[index], temp[index+1]))
# print(origin)
# print(set_1)
# print(set_2)
# print(set_3)
# considering path x -> a -> b -> c
min_x_to_a = [0] * n
for i in range(n):
min_x_to_a[i] = distance(origin, set_1[i])
min_x_to_b = [0] * m
for i in range(m):
min_x_to_b[i] = min([distance(set_2[i], set_1[j]) + min_x_to_a[j] for j in range(n)])
min_x_to_c = [0] * k
for i in range(k):
min_x_to_c[i] = min([distance(set_3[i], set_2[j]) + min_x_to_b[j] for j in range(m)])
#
# print(min_x_to_a)
# print(min_x_to_b)
# print(min_x_to_c)
min_d1 = min(min_x_to_c)
# considering path x -> b -> a -> c
min_x_to_b = [0] * m
for i in range(m):
min_x_to_b[i] = distance(origin, set_2[i])
min_x_to_a = [0] * n
for i in range(n):
min_x_to_a[i] = min([distance(set_1[i], set_2[j]) + min_x_to_b[j] for j in range(m)])
min_x_to_c = [0] * k
for i in range(k):
min_x_to_c[i] = min([distance(set_3[i], set_1[j]) + min_x_to_a[j] for j in range(n)])
# print(min_x_to_a)
# print(min_x_to_b)
# print(min_x_to_c)
min_d2 = min(min_x_to_c)
print(min(min_d1, min_d2))
| 26.861111 | 93 | 0.553775 |
ace1ba454d01e65aba0a30ffbe83134dab323d40 | 8,601 | py | Python | sdk/network/azure-mgmt-network/azure/mgmt/network/v2019_09_01/operations/_load_balancer_probes_operations.py | rsdoherty/azure-sdk-for-python | 6bba5326677468e6660845a703686327178bb7b1 | [
"MIT"
] | 2,728 | 2015-01-09T10:19:32.000Z | 2022-03-31T14:50:33.000Z | sdk/network/azure-mgmt-network/azure/mgmt/network/v2019_09_01/operations/_load_balancer_probes_operations.py | rsdoherty/azure-sdk-for-python | 6bba5326677468e6660845a703686327178bb7b1 | [
"MIT"
] | 17,773 | 2015-01-05T15:57:17.000Z | 2022-03-31T23:50:25.000Z | sdk/network/azure-mgmt-network/azure/mgmt/network/v2019_09_01/operations/_load_balancer_probes_operations.py | rsdoherty/azure-sdk-for-python | 6bba5326677468e6660845a703686327178bb7b1 | [
"MIT"
] | 1,916 | 2015-01-19T05:05:41.000Z | 2022-03-31T19:36:44.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class LoadBalancerProbesOperations(object):
"""LoadBalancerProbesOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2019_09_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
resource_group_name, # type: str
load_balancer_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.LoadBalancerProbeListResult"]
"""Gets all the load balancer probes.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param load_balancer_name: The name of the load balancer.
:type load_balancer_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either LoadBalancerProbeListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2019_09_01.models.LoadBalancerProbeListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.LoadBalancerProbeListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-09-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('LoadBalancerProbeListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/probes'} # type: ignore
def get(
self,
resource_group_name, # type: str
load_balancer_name, # type: str
probe_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.Probe"
"""Gets load balancer probe.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param load_balancer_name: The name of the load balancer.
:type load_balancer_name: str
:param probe_name: The name of the probe.
:type probe_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Probe, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_09_01.models.Probe
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Probe"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-09-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'probeName': self._serialize.url("probe_name", probe_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('Probe', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/probes/{probeName}'} # type: ignore
| 46.491892 | 192 | 0.659458 |
ace1ba6f9b65761382fd6c1e9852ca3415ab0564 | 7,218 | py | Python | factory-ai-vision/EdgeSolution/modules/VisionSampleModule/utility.py | piyushka17/azure-intelligent-edge-patterns | 0d088899afb0022daa2ac434226824dba2c997c1 | [
"MIT"
] | 176 | 2019-07-03T00:20:15.000Z | 2022-03-14T07:51:22.000Z | factory-ai-vision/EdgeSolution/modules/VisionSampleModule/utility.py | piyushka17/azure-intelligent-edge-patterns | 0d088899afb0022daa2ac434226824dba2c997c1 | [
"MIT"
] | 121 | 2019-06-24T20:47:27.000Z | 2022-03-28T02:16:18.000Z | factory-ai-vision/EdgeSolution/modules/VisionSampleModule/utility.py | piyushka17/azure-intelligent-edge-patterns | 0d088899afb0022daa2ac434226824dba2c997c1 | [
"MIT"
] | 144 | 2019-06-18T18:48:43.000Z | 2022-03-31T12:14:46.000Z | # Copyright (c) Microsoft. All rights reserved.
# Licensed under the MIT license. See LICENSE file in the project root for
# full license information.
import time
import os
import subprocess as sp
import sys
import shutil
import socket
import logging
import json
import urllib.request as urllib2
from urllib.request import urlopen
import glob
import zipfile
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
logger.disabled = False
#this function returns the device ip address if it is apublic ip else 127.0.0.1
def getWlanIp():
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
# doesn't even have to be reachable
s.connect(('10.255.255.255', 1))
IP = s.getsockname()[0]
if IP.split('.')[0] == '172':
print("Ip address detected is :: " + IP )
IP = '172.17.0.1'
print("Ip address changed to :: " + IP + "to avoid docker interface")
print("Ip address detected is :: " + IP )
except:
IP = '172.17.0.1'
finally:
s.close()
return IP
# this function prepare the camera folder clears any previous models that the device may have
def prepare_folder(folder):
print("preparing: %s" % folder)
if(os.path.isdir(folder)):
print ("found directory cleaning it before copying new files...")
#ToDo delete all files in folder
shutil.rmtree(folder,ignore_errors=True)
os.makedirs(folder, exist_ok=True)
else:
os.makedirs(folder, exist_ok=True)
def WaitForFileDownload(FileName):
# ----------------------------------------------------
# Wait until the end of the download
# ----------------------------------------------------
valid=0
while valid==0:
try:
with open(FileName):valid=1
except IOError:
time.sleep(1)
print("Got it ! File Download Complete !")
def get_file(url,dst_folder="/app/vam_model_folder") :
#adding code to fix issue where the file name may not be part of url details here
remotefile = urlopen(url)
myurl = remotefile.url
FileName = myurl.split("/")[-1]
if FileName:
# find root folders
dirpath = os.getcwd()
#src = os.path.join(dirpath,"model")
dst = os.path.abspath(dst_folder)
print("Downloading File ::" + FileName)
urllib2.urlretrieve(url, filename=(os.path.join(dst,FileName)))
WaitForFileDownload(os.path.join(dst,FileName))
return True
else:
print("Cannot extract file name from URL")
return False
def get_file_zip(url,dst_folder="model") :
#adding code to fix issue where the file name may not be part of url details here
#
remotefile = urlopen(url)
myurl = remotefile.url
FileName = myurl.split("/")[-1]
if FileName:
# find root folders
dirpath = os.getcwd()
dirpath_file = os.path.join(dirpath,dst_folder)
src = os.path.abspath(dirpath_file)
src_file_path = os.path.join(src,FileName)
logger.info("location to download is ::" + src_file_path)
prepare_folder(dirpath_file)
print("\n Downloading File ::" + FileName)
urllib2.urlretrieve(url, filename=src_file_path)
WaitForFileDownload(src_file_path)
result=unzip_and_move(src_file_path, dst_folder)
return result
else:
print("Cannot extract file name from URL")
return False
def unzip_and_move(file_path=None, dst_folder="model"):
zip_ref = zipfile.ZipFile(file_path,'r')
dirpath = os.getcwd()
dirpath_file = os.path.join(dirpath, dst_folder)
zip_ref.extractall(dirpath_file)
zip_ref.close()
logger.info("files unzipped to : " + dirpath_file)
#transferdlc(True,"twin_provided_model")
return True
# thsi function pushes a new model to device to location /data/misc/camera mounted at /app/vam_model_folder
def transferdlc(pushmodel=None,src_folder="model"):
#if pushmodel.find("True") == -1 :
if not pushmodel:
# checking and transferring model if the devie does not have any tflite or .dlc file on it..
if(checkmodelexist()):
print("Not transferring model as transfer from container is disabled by settting pushmodel to False")
return
else:
print(" transferring model as the device does not have any model on it even if pushmodel is set to False")
else:
print("transferring model ,label and va config file as set in create option with -p %s passed" % pushmodel)
# find root folders
dirpath = os.getcwd()
src = os.path.join(dirpath,src_folder)
dst = os.path.abspath("/app/vam_model_folder")
# find model files
vamconfig_file = find_file(src, "va-snpe-engine-library_config.json")
with open(vamconfig_file) as f:
vamconfig = json.load(f)
dlc_file = find_file(src, vamconfig["DLC_NAME"])
label_file = find_file(src, vamconfig["LABELS_NAME"])
files = [vamconfig_file, dlc_file, label_file]
print("Found model files: %s in %s" % (files, src))
# clean up
prepare_folder(dst)
# copy across
for filename in files:
print("transfering file :: " + filename)
shutil.copy(os.path.join(filename),dst)
def checkmodelexist():
#for file in os.listdir(os.path.abspath("/app/vam_model_folder")):
#if file.endswith(".dlc") or file.endswith(".tflite"):
if(glob.glob('/app/vam_model_folder/*.dlc')):
return True
else:
print("No dlc or tflit model on device")
return False
def send_system_cmd(cmd):
print('Command we are sending is ::' + cmd)
returnedvalue = sp.call(cmd,shell=True)
print('returned-value is:' + str(returnedvalue))
# this function will find the required files to be transferred to the device
def find_file(input_path, suffix):
files = [os.path.join(dp, f) for dp, dn, filenames in os.walk(input_path) for f in filenames if f == suffix]
if len(files) != 1:
raise ValueError("Expecting a file ending with %s file as input. Found %s in %s. Files: %s" % (suffix, len(files), input_path, files))
return os.path.join(input_path, files[0])
#get the model path from confgiuartion file only used by Azure machine learning service path
def getmodelpath(model_name):
with open(os.path.join(sys.path[0],'model_config_map.json')) as file:
data = json.load(file)
print(data)
#toDo Change the hardcoded QCOmDlc below with value read
#print(data['models'][0])
models = data['models']
if len(models.keys()) == 0:
raise ValueError("no models found")
if model_name is None:
# default to the first model
model_name, model_data = models.popitem()
else:
model_data = models[model_name]
# construct the path
model_id = model_data['id']
print("using model %s" % model_id)
mydata = model_id.split(":")
model_path = os.path.join(*mydata)
return model_path
#if __name__ == "__main__":
#get_file_zip("https://yadavsrorageaccount01.blob.core.windows.net/visionstoragecontainer/a5719e7549c044fcaf83381a22e3d0b2.VAIDK.zip","twin_provided_model")
| 34.869565 | 161 | 0.651843 |
ace1bb4cda8e6f5fcff58013529fd39c7fd9f857 | 3,995 | py | Python | app/recipe/tests/test_tags_api.py | clintbugsdev/recipe-app-api | 4628704aaf59257cd0cbe0b6daa1031944cd7331 | [
"MIT"
] | null | null | null | app/recipe/tests/test_tags_api.py | clintbugsdev/recipe-app-api | 4628704aaf59257cd0cbe0b6daa1031944cd7331 | [
"MIT"
] | null | null | null | app/recipe/tests/test_tags_api.py | clintbugsdev/recipe-app-api | 4628704aaf59257cd0cbe0b6daa1031944cd7331 | [
"MIT"
] | null | null | null | from django.contrib.auth import get_user_model
from django.urls import reverse
from django.test import TestCase
from rest_framework import status
from rest_framework.test import APIClient
from core.models import Tag, Recipe
from recipe.serializers import TagSerializer
TAGS_URL = reverse('recipe:tag-list')
class PublicTagsApiTests(TestCase):
"""Test the publicity available tags API"""
def setUp(self):
self.client = APIClient()
def test_login_required(self):
"""Test that login is required for retrieving tags"""
res = self.client.get(TAGS_URL)
self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)
class PrivateTagsApiTests(TestCase):
"""Test the authorized user tags API"""
def setUp(self):
self.user = get_user_model().objects.create_user(
'test@recipeappdev.com',
'password123'
)
self.client = APIClient()
self.client.force_authenticate(self.user)
def test_retrieve_tags(self):
"""Test retrieving tags"""
Tag.objects.create(user=self.user, name='Vegan')
Tag.objects.create(user=self.user, name='Dessert')
res = self.client.get(TAGS_URL)
tags = Tag.objects.all().order_by('-name')
serializer = TagSerializer(tags, many=True)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(res.data, serializer.data)
def test_tags_limited_to_user(self):
"""Test that tags returned are for the authenticated user"""
user2 = get_user_model().objects.create_user(
'other@recipeappdev.com',
'testpass'
)
Tag.objects.create(user=user2, name='Fruity')
tag = Tag.objects.create(user=self.user, name='Comfort Food')
res = self.client.get(TAGS_URL)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(len(res.data), 1)
self.assertEqual(res.data[0]['name'], tag.name)
def test_create_tag_successful(self):
"""Test creating a new tag"""
payload = {'name': 'Test tag'}
self.client.post(TAGS_URL, payload)
exists = Tag.objects.filter(
user=self.user,
name=payload['name']
).exists()
self.assertTrue(exists)
def test_create_tag_invalid(self):
"""Test creating a new tag with invalid payload"""
payload = {'name': ''}
res = self.client.post(TAGS_URL, payload)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
def test_retrieve_tags_assigned_to_recipes(self):
"""Test filtering tags by those assigned to recipes"""
tag1 = Tag.objects.create(user=self.user, name='Breakfast')
tag2 = Tag.objects.create(user=self.user, name='Lunch')
recipe = Recipe.objects.create(
title='Coriander eggs on toast',
time_minutes=10,
price=5.00,
user=self.user,
)
recipe.tags.add(tag1)
res = self.client.get(TAGS_URL, {'assigned_only': 1})
serializer1 = TagSerializer(tag1)
serializer2 = TagSerializer(tag2)
self.assertIn(serializer1.data, res.data)
self.assertNotIn(serializer2.data, res.data)
def test_retrieve_tags_assigned_unique(self):
"""Test filtering tags by assigned returns unique items"""
tag = Tag.objects.create(user=self.user, name='Breakfast')
Tag.objects.create(user=self.user, name='Lunch')
recipe1 = Recipe.objects.create(
title='Pancakes',
time_minutes=5,
price=3.00,
user=self.user
)
recipe1.tags.add(tag)
recipe2 = Recipe.objects.create(
title='Porridge',
time_minutes=3,
price=2.00,
user=self.user
)
recipe2.tags.add(tag)
res = self.client.get(TAGS_URL, {'assigned_only': 1})
self.assertEqual(len(res.data), 1)
| 31.706349 | 71 | 0.63229 |
ace1bbbff69801928594c29d8b8fe6feb676145b | 1,328 | py | Python | translation_station/translation_station.py | d4rkr41n/python | 2ec923b258647bb9dcc298c273e6b0fddb51d634 | [
"MIT"
] | null | null | null | translation_station/translation_station.py | d4rkr41n/python | 2ec923b258647bb9dcc298c273e6b0fddb51d634 | [
"MIT"
] | null | null | null | translation_station/translation_station.py | d4rkr41n/python | 2ec923b258647bb9dcc298c273e6b0fddb51d634 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding utf8 -*-
import os
import subprocess
import getpass
userprofile = str(os.environ['USERPROFILE'])
def print_logo():
print(" _____ _ _ ")
print("|_ _| | | | | ")
print(" | |_ __ __ _ _ __ ___| | __ _| |_ ___ ")
print(" | | '__/ _` | '_ \/ __| |/ _` | __/ _ |")
print(" | | | | (_| | | | \__ \ | (_| | || __/")
print(" \_/_| \__,_|_| |_|___/_|\__,_|\__\___|")
print("=========================================")
def program_map(action):
dictionary = {
"1": "caesar_cipher.py",
"q": "quit"
}
return dictionary.get(action, "none")
print_logo()
print("Choose what type of encryption or decryption you desire")
print("1 : Caesar Cipher / Shift Cipher")
print("q : Quit")
action = input(": ")
# Search the dictionary for the program name
action = program_map(action)
# Validation
while(action == "none" or action == None):
print(action + ", please pick an available number.")
action = input(": ")
action = program_map(action)
# Launch the dictonary selected program
if(action != "quit"):
os.system("python " + userprofile + "/Documents/python/translation_station/" + action)
| 29.511111 | 90 | 0.509789 |
ace1bbca02fbac2aee64fe3de7331b5f38922b07 | 5,894 | py | Python | otcextensions/tests/functional/sdk/auto_scaling/v1/test_instance.py | artem-lifshits/python-otcextensions | 2021da124f393e0429dd5913a3bc635e6143ba1e | [
"Apache-2.0"
] | 10 | 2018-03-03T17:59:59.000Z | 2020-01-08T10:03:00.000Z | otcextensions/tests/functional/sdk/auto_scaling/v1/test_instance.py | artem-lifshits/python-otcextensions | 2021da124f393e0429dd5913a3bc635e6143ba1e | [
"Apache-2.0"
] | 208 | 2020-02-10T08:27:46.000Z | 2022-03-29T15:24:21.000Z | otcextensions/tests/functional/sdk/auto_scaling/v1/test_instance.py | artem-lifshits/python-otcextensions | 2021da124f393e0429dd5913a3bc635e6143ba1e | [
"Apache-2.0"
] | 15 | 2020-04-01T20:45:54.000Z | 2022-03-23T12:45:43.000Z | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import uuid
from openstack import exceptions
from openstack import _log
from openstack import utils
from otcextensions.tests.functional.sdk.auto_scaling.v1 import base
_logger = _log.setup_logging('openstack')
class TestInstance(base.BaseASTest):
UUID = uuid.uuid4().hex[:9]
AS_GROUP_NAME = "test-as-group-" + UUID
AS_CONFIG_NAME = "test-as-config-" + UUID
DESIRE_INSTANCE_NUMBER = 1
MIN_INSTANCE_NUMBER = 0
MAX_INSTANCE_NUMBER = 1
FLAVOR = "s3.medium.1"
IMAGE_NAME = "Standard_Ubuntu_18.04_latest"
DISK_SIZE = 4
DISK_VOL_TYPE = "SATA"
DISK_TYPE = "SYS"
def setUp(self):
super(TestInstance, self).setUp()
self._initialize_as_group_with_instance()
def tearDown(self):
try:
self._deinitialize_as_group_with_instance()
except exceptions.SDKException as e:
_logger.warning('Got exception during clearing resources %s'
% e.message)
super(TestInstance, self).tearDown()
def _get_image_id(self):
image = self.conn.compute.find_image(
name_or_id=self.IMAGE_NAME
)
if image:
return image.id
def _get_default_sec_group(self):
sec_group = self.conn.network.find_security_group(
name_or_id="default"
)
if sec_group:
return sec_group.id
def _create_as_config(self, image_id, sec_group_id):
config_attrs = {
"name": self.AS_CONFIG_NAME,
"instance_config": {
"flavorRef": self.FLAVOR,
"imageRef": image_id,
"disk": [{
'size': self.DISK_SIZE,
'volume_type': self.DISK_VOL_TYPE,
'disk_type': self.DISK_TYPE
}],
"key_name": self.KP_NAME,
"security_groups": [{
"id": sec_group_id
}]
}
}
return self.conn.auto_scaling.create_config(**config_attrs)
def _delete_as_config(self, as_config):
return self.conn.auto_scaling.delete_config(
config=as_config
)
def _create_as_group(self, as_config_id, router_id, network_id):
group_attrs = {
"scaling_group_name": self.AS_GROUP_NAME,
"scaling_configuration_id": as_config_id,
"desire_instance_number": self.DESIRE_INSTANCE_NUMBER,
"min_instance_number": self.MIN_INSTANCE_NUMBER,
"max_instance_number": self.MAX_INSTANCE_NUMBER,
"vpc_id": router_id,
"networks": [{
"id": network_id
}]
}
as_group = self.conn.auto_scaling.create_group(**group_attrs)
self.conn.auto_scaling.resume_group(as_group)
return self.conn.auto_scaling.wait_for_group(as_group)
def _delete_as_group(self, as_group):
timeout = 2 * int(os.environ.get('OS_TEST_TIMEOUT'))
self.conn.auto_scaling.pause_group(as_group)
self.conn.auto_scaling.delete_group(
group=as_group
)
self.conn.auto_scaling.wait_for_delete_group(
group=as_group, wait=timeout)
def _wait_for_instance(self, as_group):
timeout = int(os.environ.get('OS_TEST_TIMEOUT'))
for count in utils.iterate_timeout(
timeout=timeout,
message="Timeout waiting for instance"
):
instances = list(self.conn.auto_scaling.instances(
group=as_group
))
if len(instances) == self.MAX_INSTANCE_NUMBER and instances[0].id:
return self.conn.auto_scaling.wait_for_instance(instances[0])
def _delete_instance(self, instance):
timeout = 2 * int(os.environ.get('OS_TEST_TIMEOUT'))
self.conn.auto_scaling.remove_instance(
instance=instance,
delete_instance=True
)
self.conn.auto_scaling.wait_for_delete_instance(
instance=instance,
wait=timeout
)
def _initialize_as_group_with_instance(self):
self.as_config = self._create_as_config(
self._get_image_id(), self._get_default_sec_group()
)
self.as_group = self._create_as_group(
self.as_config.id, self.infra.get("router_id"),
self.infra.get("network_id")
)
self.as_instance = self._wait_for_instance(
self.as_group
)
def _deinitialize_as_group_with_instance(self):
if self.as_instance:
self._delete_instance(self.as_instance)
if self.as_group:
self._delete_as_group(self.as_group)
if self.as_config:
self._delete_as_config(self.as_config)
def test_find_instance_by_id(self):
result = self.conn.auto_scaling.find_instance(
name_or_id=self.as_instance.id,
group=self.as_group
)
self.assertIsNotNone(result)
self.assertEqual(self.as_instance.id, result.id)
def test_find_instance_by_name(self):
result = self.conn.auto_scaling.find_instance(
name_or_id=self.as_instance.name,
group=self.as_group
)
self.assertIsNotNone(result)
self.assertEqual(self.as_instance.name, result.name)
| 34.267442 | 78 | 0.627587 |
ace1bdc2600315f9057db6397e617233136e641f | 3,979 | py | Python | cumulusci/core/keychain/EncryptedFileProjectKeychain.py | justindonnaruma/CumulusCI | cc097c1f6f102a104f83ad9a9684af9d6bc0af31 | [
"BSD-3-Clause"
] | null | null | null | cumulusci/core/keychain/EncryptedFileProjectKeychain.py | justindonnaruma/CumulusCI | cc097c1f6f102a104f83ad9a9684af9d6bc0af31 | [
"BSD-3-Clause"
] | 2 | 2021-03-25T23:56:47.000Z | 2021-03-31T19:52:05.000Z | cumulusci/core/keychain/EncryptedFileProjectKeychain.py | justindonnaruma/CumulusCI | cc097c1f6f102a104f83ad9a9684af9d6bc0af31 | [
"BSD-3-Clause"
] | null | null | null | from __future__ import print_function
import os
from cumulusci.core.exceptions import OrgNotFound
from cumulusci.core.exceptions import ServiceNotConfigured
from cumulusci.core.keychain import BaseEncryptedProjectKeychain
class EncryptedFileProjectKeychain(BaseEncryptedProjectKeychain):
""" An encrypted project keychain that stores in the project's local directory """
@property
def config_local_dir(self):
return os.path.join(
os.path.expanduser('~'),
self.project_config.global_config_obj.config_local_dir,
)
@property
def project_local_dir(self):
return self.project_config.project_local_dir
def _load_files(self, dirname, extension, key):
for item in sorted(os.listdir(dirname)):
if item.endswith(extension):
with open(os.path.join(dirname, item), 'r') as f_item:
config = f_item.read()
name = item.replace(extension, '')
if not key in self.config:
self.config[key] = []
self.config[key][name] = config
def _load_file(self, dirname, filename, key):
full_path = os.path.join(dirname, filename)
if not os.path.exists(full_path):
return
with open(os.path.join(dirname, filename), 'r') as f_item:
config = f_item.read()
self.config[key] = config
def _load_app(self):
self._load_file(self.config_local_dir, 'connected.app', 'app')
self._load_file(self.project_local_dir, 'connected.app', 'app')
def _load_orgs(self):
self._load_files(self.config_local_dir, '.org', 'orgs')
self._load_files(self.project_local_dir, '.org', 'orgs')
def _load_services(self):
self._load_files(self.config_local_dir, '.service', 'services')
self._load_files(self.project_local_dir, '.service', 'services')
def _remove_org(self, name, global_org):
if global_org:
full_path = os.path.join(self.config_local_dir, '{}.org'.format(name))
else:
full_path = os.path.join(self.project_local_dir, '{}.org'.format(name))
if not os.path.exists(full_path):
kwargs = {'name': name}
if not global_org:
raise OrgNotFound('Could not find org named {name} to delete. Deleting in project org mode. Is {name} a global org?'.format(**kwargs))
raise OrgNotFound('Could not find org named {name} to delete. Deleting in global org mode. Is {name} a project org instead of a global org?'.format(**kwargs))
os.remove(full_path)
self._load_orgs()
def _set_encrypted_org(self, name, encrypted, global_org):
if global_org:
filename = os.path.join(
self.config_local_dir, '{}.org'.format(name))
else:
filename = os.path.join(
self.project_local_dir, '{}.org'.format(name))
with open(filename, 'wb') as f_org:
f_org.write(encrypted)
def _set_encrypted_service(self, name, encrypted, project):
if project:
filename = os.path.join(
self.project_local_dir, '{}.service'.format(name))
else:
filename = os.path.join(
self.config_local_dir, '{}.service'.format(name))
with open(filename, 'wb') as f_service:
f_service.write(encrypted)
def _raise_org_not_found(self, name):
raise OrgNotFound(
'Org information could not be found. Expected to find encrypted file at {}/{}.org'.format(
self.project_local_dir,
name
)
)
def _raise_service_not_configured(self, name):
raise ServiceNotConfigured(
'Service configuration could not be found. Expected to find encrypted file at {}/{}.org'.format(
self.project_local_dir,
name
)
)
| 39.009804 | 172 | 0.614979 |
ace1bf4d404da751ae1e22a52979cb63b0be5729 | 3,159 | py | Python | packages/xcodeml-tools/package.py | BenWeber42/spack-mch | f7c52c886c70cc5aef0845fb0f7c1f5dd0478e9a | [
"MIT"
] | 1 | 2020-11-16T22:32:35.000Z | 2020-11-16T22:32:35.000Z | packages/xcodeml-tools/package.py | BenWeber42/spack-mch | f7c52c886c70cc5aef0845fb0f7c1f5dd0478e9a | [
"MIT"
] | 216 | 2020-03-21T20:14:13.000Z | 2021-06-18T15:49:43.000Z | packages/xcodeml-tools/package.py | BenWeber42/spack-mch | f7c52c886c70cc5aef0845fb0f7c1f5dd0478e9a | [
"MIT"
] | 9 | 2020-03-20T08:43:33.000Z | 2021-02-10T15:15:29.000Z | __author__ = "Mikhail Zhigun"
__copyright__ = "Copyright 2020, MeteoSwiss"
from spack import *
import os, subprocess
_GIT = 'https://github.com/claw-project/xcodeml-tools.git'
def _get_latest_commit_id(): # type: () -> str
git_url = _GIT
args = ['git', 'ls-remote', git_url, 'HEAD']
timeout = 5 # seconds
p = subprocess.run(args=args, timeout=timeout, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
s = p.stdout.decode('utf-8')
assert p.returncode == 0, f'Failed to connect to {git_url} error: {s}'
commit = s.strip().split()[0]
return commit
_LAST_COMMIT = _get_latest_commit_id()
class XcodemlTools(AutotoolsPackage):
"""Set of tools for translating C and Fortran code to XCodeML and back """
@property
def latest_commit(self):
return _LAST_COMMIT
homepage = 'https://omni-compiler.org/manual/en/'
git = _GIT
maintainers = ['FrostyMike']
version('92a35f9', branch='master', commit='92a35f9dbe3601f6177b099825d318cbc3285945')
version('latest', branch='master', commit=_LAST_COMMIT)
depends_on('autoconf@2.69:')
depends_on('m4')
depends_on('automake')
depends_on('libxml2%gcc')
depends_on('java@8:')
depends_on('bison%gcc')
depends_on('flex%gcc')
depends_on('libtool')
def configure_args(self):
args = ['--prefix=' + self.prefix,
'--with-force-explicit-lxml2',
'--without-native-fortran-compiler']
libxml2_prefix = self.spec['libxml2'].prefix
if libxml2_prefix == '/usr':
args.append('--with-libxml2-include=/usr/include/libxml2')
for lib_dir_name in ('lib', 'lib64'):
lib_path = '/usr/%s/libxml2.so' % lib_dir_name
if os.path.isfile(lib_path):
args.append('--with-libxml2-lib=/usr/%s' % lib_dir_name)
else:
args.append('--with-libxml2=' + libxml2_prefix)
java_prefix = self.spec['java'].prefix
path = {'java': 'bin/java',
'javac': 'bin/javac',
'jar': 'bin/jar'}
for name, rel_path in path.items():
abs_path = os.path.normpath(os.path.join(java_prefix, rel_path))
assert os.path.exists(abs_path) and os.path.isfile(abs_path), '%s not found at "%s"' % (name, abs_path)
path[name] = abs_path
args.append('--with-java=' + path['java'])
args.append('--with-javac=' + path['javac'])
args.append('--with-jar=' + path['jar'])
version_name = self.version
version_tag = self.versions[version_name].get('commit', None)
if version_tag is not None:
args.append('--with-version-tag=' + version_tag)
return args
def setup_environment(self, spack_env, run_env):
spack_env.set('YACC', 'bison -y')
def setup_run_environment(self, run_env):
java_prefix = self.spec['java'].prefix
abs_path = os.path.normpath(os.path.join(java_prefix, 'bin/java'))
assert os.path.exists(abs_path) and os.path.isfile(abs_path), 'java not found at "%s"' % abs_path
run_env.set('OMNI_JAVA', abs_path)
| 37.607143 | 115 | 0.617284 |
ace1bff19ece0016d9d17480592687f1a93a803c | 10,541 | py | Python | torchtext/experimental/datasets/language_modeling.py | dongreenberg/text | 77c732ab816a41f52259e72430aa837910cc6948 | [
"BSD-3-Clause"
] | 1 | 2021-05-06T15:49:58.000Z | 2021-05-06T15:49:58.000Z | torchtext/experimental/datasets/language_modeling.py | dongreenberg/text | 77c732ab816a41f52259e72430aa837910cc6948 | [
"BSD-3-Clause"
] | null | null | null | torchtext/experimental/datasets/language_modeling.py | dongreenberg/text | 77c732ab816a41f52259e72430aa837910cc6948 | [
"BSD-3-Clause"
] | 1 | 2021-07-14T21:06:11.000Z | 2021-07-14T21:06:11.000Z | import torch
from torchtext.data.utils import get_tokenizer
from torchtext.vocab import build_vocab_from_iterator
from torchtext.experimental.datasets.raw import language_modeling as raw
def build_vocab(data, transforms):
def apply_transforms(data):
for line in data:
tokens = transforms(line)
yield tokens
return build_vocab_from_iterator(apply_transforms(data), len(data))
class LanguageModelingDataset(torch.utils.data.Dataset):
"""Defines a dataset for language modeling.
Currently, we only support the following datasets:
- WikiText2
- WikiText103
- PennTreebank
- WMTNewsCrawl
"""
def __init__(self, data, vocab, transforms, single_line):
"""Initiate language modeling dataset.
Arguments:
data: a tensor of tokens. tokens are ids after
numericalizing the string tokens.
torch.tensor([token_id_1, token_id_2, token_id_3, token_id1]).long()
vocab: Vocabulary object used for dataset.
transforms: Text string transforms.
"""
super(LanguageModelingDataset, self).__init__()
self.vocab = vocab
self.transforms = transforms
self.single_line = single_line
self.data = data
if single_line:
self.data = torch.cat(tuple(filter(lambda t: t.numel() > 0, self.data)))
def __getitem__(self, i):
if self.single_line:
return self.data[i]
else:
return self.transforms(self.data[i])
def __len__(self):
return len(self.data)
def __iter__(self):
for x in self.data:
yield x
def get_vocab(self):
return self.vocab
def _setup_datasets(dataset_name, tokenizer=None, root='.data', vocab=None,
data_select=('train', 'test', 'valid'), single_line=True):
if tokenizer is None:
tokenizer = get_tokenizer('basic_english')
if isinstance(data_select, str):
data_select = [data_select]
if not set(data_select).issubset(set(('train', 'valid', 'test'))):
raise TypeError('Given data selection {} is not supported!'.format(data_select))
if not single_line and dataset_name != 'WikiText103':
raise TypeError('single_line must be True except for WikiText103')
if vocab is None:
if 'train' not in data_select:
raise TypeError("Must pass a vocab if train is not selected.")
raw_train, = raw.DATASETS[dataset_name](root=root, data_select=('train',))
vocab = build_vocab(raw_train, tokenizer)
def text_transform(line):
return torch.tensor([vocab[token] for token in tokenizer(line)], dtype=torch.long)
raw_data = {}
for name in data_select:
raw_data[name], = raw.DATASETS[dataset_name](root=root, data_select=name)
raw_data[name] = [text_transform(txt) for txt in raw_data[name]]
return tuple(LanguageModelingDataset(raw_data[item], vocab, text_transform, single_line)
for item in data_select)
def WikiText2(*args, **kwargs):
""" Defines WikiText2 datasets.
Create language modeling dataset: WikiText2
Separately returns the train/test/valid set
Arguments:
tokenizer: the tokenizer used to preprocess raw text data.
The default one is basic_english tokenizer in fastText. spacy tokenizer
is supported as well (see example below). A custom tokenizer is callable
function with input of a string and output of a token list.
root: Directory where the datasets are saved. Default: ".data"
vocab: Vocabulary used for dataset. If None, it will generate a new
vocabulary based on the train data set.
data_select: a string or tupel for the returned datasets
(Default: ('train', 'test','valid'))
By default, all the three datasets (train, test, valid) are generated. Users
could also choose any one or two of them, for example ('train', 'test') or
just a string 'train'. If 'train' is not in the tuple or string, a vocab
object should be provided which will be used to process valid and/or test
data.
single_line: whether to return all tokens in a single line.
(Default: True)
By default, all lines in raw text file are concatenated into a single line.
Use `single_line = False` if one wants to get data line by line.
Examples:
>>> from torchtext.experimental.datasets import WikiText2
>>> from torchtext.data.utils import get_tokenizer
>>> tokenizer = get_tokenizer("spacy")
>>> train_dataset, test_dataset, valid_dataset = WikiText2(tokenizer=tokenizer)
>>> vocab = train_dataset.get_vocab()
>>> valid_dataset, = WikiText2(tokenizer=tokenizer, vocab=vocab,
data_select='valid')
"""
return _setup_datasets(*(("WikiText2",) + args), **kwargs)
def WikiText103(*args, **kwargs):
""" Defines WikiText103 datasets.
Create language modeling dataset: WikiText103
Separately returns the train/test/valid set
Arguments:
tokenizer: the tokenizer used to preprocess raw text data.
The default one is basic_english tokenizer in fastText. spacy tokenizer
is supported as well (see example below). A custom tokenizer is callable
function with input of a string and output of a token list.
root: Directory where the datasets are saved. Default: ".data"
vocab: Vocabulary used for dataset. If None, it will generate a new
vocabulary based on the train data set.
data_select: a string or tupel for the returned datasets
(Default: ('train', 'test','valid'))
By default, all the three datasets (train, test, valid) are generated. Users
could also choose any one or two of them, for example ('train', 'test') or
just a string 'train'. If 'train' is not in the tuple or string, a vocab
object should be provided which will be used to process valid and/or test
data.
single_line: whether to return all tokens in a single line.
(Default: True)
By default, all lines in raw text file are concatenated into a single line.
Use `single_line = False` if one wants to get data line by line.
Examples:
>>> from torchtext.experimental.datasets import WikiText103
>>> from torchtext.data.utils import get_tokenizer
>>> tokenizer = get_tokenizer("spacy")
>>> train_dataset, test_dataset, valid_dataset = WikiText103(tokenizer=tokenizer)
>>> vocab = train_dataset.get_vocab()
>>> valid_dataset, = WikiText103(tokenizer=tokenizer, vocab=vocab,
data_select='valid')
"""
return _setup_datasets(*(("WikiText103",) + args), **kwargs)
def PennTreebank(*args, **kwargs):
""" Defines PennTreebank datasets.
Create language modeling dataset: PennTreebank
Separately returns the train/test/valid set
Arguments:
tokenizer: the tokenizer used to preprocess raw text data.
The default one is basic_english tokenizer in fastText. spacy tokenizer
is supported as well (see example below). A custom tokenizer is callable
function with input of a string and output of a token list.
root: Directory where the datasets are saved. Default: ".data"
vocab: Vocabulary used for dataset. If None, it will generate a new
vocabulary based on the train data set.
data_select: a string or tupel for the returned datasets
(Default: ('train', 'test','valid'))
By default, all the three datasets (train, test, valid) are generated. Users
could also choose any one or two of them, for example ('train', 'test') or
just a string 'train'. If 'train' is not in the tuple or string, a vocab
object should be provided which will be used to process valid and/or test
data.
single_line: whether to return all tokens in a single line.
(Default: True)
By default, all lines in raw text file are concatenated into a single line.
Use `single_line = False` if one wants to get data line by line.
Examples:
>>> from torchtext.experimental.datasets import PennTreebank
>>> from torchtext.data.utils import get_tokenizer
>>> tokenizer = get_tokenizer("spacy")
>>> train_dataset, test_dataset, valid_dataset = PennTreebank(tokenizer=tokenizer)
>>> vocab = train_dataset.get_vocab()
>>> valid_dataset, = PennTreebank(tokenizer=tokenizer, vocab=vocab,
data_select='valid')
"""
return _setup_datasets(*(("PennTreebank",) + args), **kwargs)
def WMTNewsCrawl(*args, **kwargs):
""" Defines WMTNewsCrawl datasets.
Create language modeling dataset: WMTNewsCrawl
returns the train set
Arguments:
tokenizer: the tokenizer used to preprocess raw text data.
The default one is basic_english tokenizer in fastText. spacy tokenizer
is supported as well (see example below). A custom tokenizer is callable
function with input of a string and output of a token list.
root: Directory where the datasets are saved. Default: ".data"
vocab: Vocabulary used for dataset. If None, it will generate a new
vocabulary based on the train data set.
data_select: a string or tupel for the returned datasets
(Default: ('train',))
single_line: whether to return all tokens in a single line.
(Default: True)
By default, all lines in raw text file are concatenated into a single line.
Use `single_line = False` if one wants to get data line by line.
Examples:
>>> from torchtext.experimental.datasets import WMTNewsCrawl
>>> from torchtext.data.utils import get_tokenizer
>>> tokenizer = get_tokenizer("spacy")
>>> train_dataset, = WMTNewsCrawl(tokenizer=tokenizer, data_select='train')
"""
return _setup_datasets(*(("WMTNewsCrawl",) + args), **kwargs)
DATASETS = {
'WikiText2': WikiText2,
'WikiText103': WikiText103,
'PennTreebank': PennTreebank,
'WMTNewsCrawl': WMTNewsCrawl
}
| 42.164 | 92 | 0.649654 |
ace1c0123069f6b76bdacd96c6b61d0d3db32d34 | 609 | py | Python | tests/unit/test_meshes/test_zero_dimensional_submesh.py | gyouhoc/PyBaMM | 6852e0e518157e6802ce83a2549562e7d0ed4b9f | [
"BSD-3-Clause"
] | 1 | 2019-10-29T19:06:04.000Z | 2019-10-29T19:06:04.000Z | tests/unit/test_meshes/test_zero_dimensional_submesh.py | gyouhoc/PyBaMM | 6852e0e518157e6802ce83a2549562e7d0ed4b9f | [
"BSD-3-Clause"
] | null | null | null | tests/unit/test_meshes/test_zero_dimensional_submesh.py | gyouhoc/PyBaMM | 6852e0e518157e6802ce83a2549562e7d0ed4b9f | [
"BSD-3-Clause"
] | null | null | null | import pybamm
import unittest
class TestSubMesh0D(unittest.TestCase):
def test_exceptions(self):
position = {"x": 0, "y": 0}
with self.assertRaises(pybamm.GeometryError):
pybamm.SubMesh0D(position)
def test_init(self):
position = {"x": 1}
generator = pybamm.MeshGenerator(pybamm.SubMesh0D)
mesh = generator(position, None)
mesh.add_ghost_meshes()
if __name__ == "__main__":
print("Add -v for more debug output")
import sys
if "-v" in sys.argv:
debug = True
pybamm.settings.debug_mode = True
unittest.main()
| 23.423077 | 58 | 0.632184 |
ace1c01d1c54100b8bc44a64678c2bfa1a2dddbd | 496 | py | Python | huxley/core/migrations/0037_positionpaper_graded_file.py | srisainachuri/huxley | 7166a1423e49b506d6d5f142c748eac4e5d2314c | [
"BSD-3-Clause"
] | 18 | 2015-07-12T00:55:51.000Z | 2021-12-13T15:41:06.000Z | huxley/core/migrations/0037_positionpaper_graded_file.py | srisainachuri/huxley | 7166a1423e49b506d6d5f142c748eac4e5d2314c | [
"BSD-3-Clause"
] | 288 | 2015-01-13T23:05:09.000Z | 2022-03-25T17:35:36.000Z | huxley/core/migrations/0037_positionpaper_graded_file.py | srisainachuri/huxley | 7166a1423e49b506d6d5f142c748eac4e5d2314c | [
"BSD-3-Clause"
] | 47 | 2015-05-12T15:39:57.000Z | 2022-03-30T09:12:48.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.10.7 on 2019-02-03 18:02
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [('core', '0036_conference_position_papers_accepted'), ]
operations = [
migrations.AddField(
model_name='positionpaper',
name='graded_file',
field=models.FileField(
null=True, upload_to=b'graded_papers/'), ),
]
| 26.105263 | 75 | 0.647177 |
ace1c17d01f0c24a6097ad1cb7a2f2f0eafe9bad | 16,887 | py | Python | pyzoo/zoo/tfpark/model.py | EN1AC13/analytics-zoo | 169dac5400f341135b7bf38bb87e3fca89ac09d8 | [
"Apache-2.0"
] | 35 | 2020-07-03T06:31:12.000Z | 2020-07-12T08:38:10.000Z | pyzoo/zoo/tfpark/model.py | Angelina319/analytics-zoo | 439f2c99d657fb20a5ff4bf510869616402ba0cf | [
"Apache-2.0"
] | null | null | null | pyzoo/zoo/tfpark/model.py | Angelina319/analytics-zoo | 439f2c99d657fb20a5ff4bf510869616402ba0cf | [
"Apache-2.0"
] | null | null | null | #
# Copyright 2018 Analytics Zoo Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
from bigdl.optim.optimizer import MaxEpoch
from zoo.tfpark.utils import evaluate_string_metrics
from zoo.common import load_from_file
from zoo.common import save_file
from zoo.common.nncontext import getOrCreateSparkContext
from zoo.tfpark.tf_dataset import TFNdarrayDataset, TFDataset
from zoo.tfpark.tf_optimizer import TFOptimizer
from zoo.tfpark.tf_predictor import TFPredictor
class KerasModel(object):
def __init__(self, model, model_dir=None):
"""
:param model: a compiled keras model
"""
self.model = model
self.model_dir = model_dir
import tensorflow as tf
self.real_batch_size = tf.shape(self.model.inputs[0])[0]
@property
def metrics_names(self):
return self.model.metrics_names
def get_weights(self):
return self.model.get_weights()
def set_weights(self, weights):
self.model.set_weights(weights)
def save_weights(self, filepath, overwrite=True, save_format=None):
def save_func(file_path):
self.model.save_weights(file_path, overwrite, save_format)
save_file(save_func, filepath)
def load_weights(self, filepath, by_name=False):
def load_func(file_path):
self.model.load_weights(file_path, by_name)
load_from_file(load_func, filepath)
def save_model(self, path):
"""
Save the model to a single HDF5 file.
:param path: String. The path to save the model.
"""
def save_func(file_path):
self.model.save(file_path)
save_file(save_func, path)
@staticmethod
def load_model(path):
"""
Load an existing keras model (with weights) from HDF5 file.
:param path: String. The path to the pre-defined model.
:return: KerasModel.
"""
from tensorflow.python.keras import models
def load_func(file_path):
return models.load_model(file_path)
keras_model = load_from_file(load_func, path)
return KerasModel(keras_model)
def fit(self,
x=None,
y=None,
batch_size=None,
epochs=1,
validation_data=None,
distributed=False,
**kwargs
):
"""
Train the model for a fixed num of epochs
Arguments:
:param x: Input data. It could be:
- a TFDataset object
- A Numpy array (or array-like), or a list of arrays
(in case the model has multiple inputs).
- A dict mapping input names to the corresponding array/tensors,
if the model has named inputs.
:param y: Target data. Like the input data `x`,
It should be consistent with `x` (you cannot have Numpy inputs and
tensor targets, or inversely). If `x` is a TFDataset, `y` should
not be specified (since targets will be obtained from `x`).
:param batch_size: Integer or `None`.
Number of samples per gradient update.
If `x` is a TFDataset, you do not need to specify batch_size.
:param epochs: Integer. Number of epochs to train the model.
An epoch is an iteration over the entire `x` and `y`
data provided.
:param validation_data: Data on which to evaluate
the loss and any model metrics at the end of each epoch.
The model will not be trained on this data.
`validation_data` could be:
- tuple `(x_val, y_val)` of Numpy arrays or tensors
:param distributed: Boolean. Whether to do prediction in distributed mode or local mode.
Default is True. In local mode, x must be a Numpy array.
"""
if isinstance(x, TFDataset):
# todo check arguments
assert validation_data is None, "validation_data must be None when " \
"using TFDataset as input, please " \
"use set the validation data in TFDataset"
if not x.has_batch:
raise ValueError("The batch_size of TFDataset must be " +
"specified when used in KerasModel fit.")
if isinstance(x, TFNdarrayDataset):
x = _standarize_feature_label_dataset(x, self.model)
self._fit_distributed(x, epochs, **kwargs)
elif distributed:
dataset = TFDataset.from_ndarrays((x, y), val_tensors=validation_data,
batch_size=batch_size)
self._fit_distributed(dataset, epochs, **kwargs)
else:
self.model.fit(x=x,
y=y,
batch_size=batch_size,
epochs=epochs,
validation_data=validation_data,
**kwargs
)
def _fit_distributed(self, dataset, epochs, **kwargs):
self.tf_optimizer = TFOptimizer.from_keras(self.model, dataset,
model_dir=self.model_dir,
**kwargs)
self.tf_optimizer.optimize(MaxEpoch(epochs))
def evaluate(self,
x=None,
y=None,
batch_per_thread=None,
distributed=False
):
"""
Evaluate a model on a given dataset
:param x: Input data. It could be:
- a TFDataset object
- A Numpy array (or array-like), or a list of arrays
(in case the model has multiple inputs).
- A dict mapping input names to the corresponding array/tensors,
if the model has named inputs.
:param y: Target data. Like the input data `x`,
It should be consistent with `x` (you cannot have Numpy inputs and
tensor targets, or inversely). If `x` is a TFDataset, `y` should
not be specified (since targets will be obtained from `x`).
:param batch_per_thread:
The default value is 1.
When distributed is True,the total batch size is batch_per_thread * rdd.getNumPartitions.
When distributed is False the total batch size is batch_per_thread * numOfCores.
:param distributed: Boolean. Whether to do prediction in distributed mode or local mode.
Default is True. In local mode, x must be a Numpy array.
"""
if isinstance(x, TFDataset):
if not x.has_batch:
raise ValueError("The batch_per_thread of TFDataset must be " +
"specified when used in KerasModel evaluate.")
if isinstance(x, TFNdarrayDataset):
x = _standarize_feature_label_dataset(x, self.model)
# todo check arguments
return self._evaluate_distributed(x)
else:
if distributed:
dataset = TFDataset.from_ndarrays((x, y),
batch_per_thread=-1 if batch_per_thread is None
else batch_per_thread
)
return self._evaluate_distributed(dataset)
else:
results = self.model.evaluate(x=x,
y=y,
batch_size=batch_per_thread)
results = dict(zip(self.metrics_names, results))
return results
def _evaluate_distributed(self, dataset):
import tensorflow.keras.backend as K
if hasattr(self.model, "targets"):
model_targets = self.model.targets
else:
model_targets = self.model._targets
return evaluate_string_metrics(sess=K.get_session(),
string_metrics=self.metrics_names,
dataset=dataset,
inputs=self.model.inputs + model_targets,
targets=model_targets,
outputs=self.model.outputs,
loss=self.model.total_loss)
def predict(self,
x,
batch_per_thread=None,
distributed=False):
"""
Use a model to do prediction.
:param x: Input data. It could be:
- a TFDataset object
- A Numpy array (or array-like), or a list of arrays
(in case the model has multiple inputs).
- A dict mapping input names to the corresponding array/tensors,
if the model has named inputs.
:param batch_per_thread:
The default value is 1.
When distributed is True,the total batch size is batch_per_thread * rdd.getNumPartitions.
When distributed is False the total batch size is batch_per_thread * numOfCores.
:param distributed: Boolean. Whether to do prediction in distributed mode or local mode.
Default is True. In local mode, x must be a Numpy array.
"""
if isinstance(x, TFDataset):
# todo check arguments
if not x.has_batch:
raise ValueError("The batch_per_thread of TFDataset" +
" must be specified when used in KerasModel predict.")
if isinstance(x, TFNdarrayDataset):
x = _standarize_feature_dataset(x, self.model)
return self._predict_distributed(x)
else:
if distributed:
sc = getOrCreateSparkContext()
rdd, types, shapes = _create_rdd_x(x, self.model._feed_input_names, sc)
dataset = TFDataset.from_rdd(rdd,
names=self.model._feed_input_names,
types=types,
shapes=shapes,
batch_per_thread=-1 if batch_per_thread is None
else batch_per_thread)
results = self._predict_distributed(dataset).collect()
output_num = len(self.model.outputs)
if output_num == 1:
return np.stack(results)
else:
predictions = []
for i in range(0, output_num):
predictions.append(np.stack([res[i] for res in results]))
return predictions
else:
return self.model.predict(x=x,
batch_size=batch_per_thread)
def _predict_distributed(self, x):
predictor = TFPredictor.from_keras(self.model, x)
return predictor.predict()
def train_on_batch(self,
x,
y=None,
sample_weight=None,
class_weight=None,
reset_metrics=True):
return self.model.train_on_batch(x=x,
y=y,
sample_weight=sample_weight,
class_weight=class_weight,
reset_metrics=reset_metrics)
def test_on_batch(self, x, y=None, sample_weight=None, reset_metrics=True):
return self.model.test_on_batch(x=x,
y=y,
sample_weight=sample_weight,
reset_metrics=reset_metrics)
def predict_on_batch(self, x):
return self.model.predict_on_batch(x)
def _standarize_feature_label_dataset(dataset, model):
input_names = model.input_names
output_names = model.output_names
def _process_labels(ys):
if isinstance(ys, dict):
return {k: np.expand_dims(y, axis=-1) if y.ndim == 0 else y for k, y in ys.items()}
elif isinstance(ys, list):
return [np.expand_dims(y, axis=-1) if y.ndim == 0 else y for y in ys]
else:
return np.expand_dims(ys, axis=-1) if ys.ndim == 0 else ys
def _training_reorder(x, input_names, output_names):
assert isinstance(x, tuple)
return (_reorder(x[0], input_names), _reorder(x[1], output_names))
def _reorder(x, names):
if isinstance(x, dict):
return [x[name] for name in names]
elif isinstance(x, list):
return x
else:
return [x]
rdd = dataset.rdd.map(lambda x: (x[0], _process_labels(x[1])))\
.map(lambda sample: _training_reorder(sample, input_names, output_names))
if dataset.val_rdd is not None:
val_rdd = dataset.val_rdd.map(lambda x: (x[0], _process_labels(x[1])))\
.map(lambda sample: _training_reorder(sample, input_names, output_names))
else:
val_rdd = None
tensor_structure = _training_reorder(dataset.tensor_structure, input_names, output_names)
new_dataset = TFNdarrayDataset(rdd, tensor_structure, dataset.batch_size,
-1, dataset.hard_code_batch_size, val_rdd)
new_dataset.batch_per_thread = dataset.batch_per_thread
return new_dataset
def _standarize_feature_dataset(dataset, model):
input_names = model.input_names
def _reorder(x, names):
if isinstance(x, dict):
return [x[name] for name in names]
elif isinstance(x, list):
return x
elif isinstance(x, tuple):
return list(x)
return [x]
rdd = dataset.rdd.map(lambda sample: _reorder(sample, input_names))
feature_schema = _reorder(dataset.tensor_structure[0], input_names)
dataset = TFNdarrayDataset(rdd, feature_schema, dataset.batch_size,
-1, dataset.hard_code_batch_size)
return dataset
def _create_rdd_x_y(x, y, input_names, output_names, sc):
from tensorflow.python.keras.engine import training_utils
x = training_utils.standardize_input_data(x, input_names,
check_batch_axis=False,
exception_prefix='input')
y = training_utils.standardize_input_data(y, output_names,
shapes=None, check_batch_axis=False,
exception_prefix='target')
num_samples = x[0].shape[0]
num_inputs = len(x)
num_targets = len(y)
input_data = []
for i in range(num_samples):
inputs = []
for j in range(num_inputs):
inputs.append(x[j][i])
targets = []
for j in range(num_targets):
if y[j][i].ndim == 0:
targets.append(np.expand_dims(y[j][i], axis=1))
else:
targets.append(y[j][i])
input_data.append((inputs, targets))
x_meta = dict([(input_names[i],
(input_data[0][0][i].dtype, input_data[0][0][i].shape))
for i in range(len(input_names))])
y_meta = dict([(output_names[i],
(input_data[0][1][i].dtype, input_data[0][1][i].shape))
for i in range(len(input_names))])
rdd = sc.parallelize(input_data)
return rdd, x_meta, y_meta
def _create_rdd_x(x, input_names, sc):
from tensorflow.python.keras.engine import training_utils
x = training_utils.standardize_input_data(x, input_names,
check_batch_axis=False,
exception_prefix='input')
num_samples = x[0].shape[0]
num_inputs = len(x)
input_data = []
for i in range(num_samples):
sample = []
for j in range(num_inputs):
sample.append(x[j][i])
input_data.append(sample)
types = [x.dtype for x in input_data[0]]
shapes = [x.shape for x in input_data[0]]
rdd = sc.parallelize(input_data)
return rdd, types, shapes
| 39.734118 | 99 | 0.561971 |
ace1c2ed3a3916b88d99787e1c28c05099980fd6 | 22,537 | py | Python | solidata_api/_core/queries_db/query_utils.py | co-demos/solidata-backend | 2c67aecbd457cdec78b0772d78dcf699e20dd3dc | [
"MIT"
] | 2 | 2019-12-17T22:27:53.000Z | 2020-06-22T12:47:37.000Z | solidata_api/_core/queries_db/query_utils.py | co-demos/solidata-backend | 2c67aecbd457cdec78b0772d78dcf699e20dd3dc | [
"MIT"
] | 13 | 2019-06-16T15:42:33.000Z | 2022-02-26T05:12:34.000Z | solidata_api/_core/queries_db/query_utils.py | co-demos/solidata-backend | 2c67aecbd457cdec78b0772d78dcf699e20dd3dc | [
"MIT"
] | 1 | 2019-12-17T22:27:58.000Z | 2019-12-17T22:27:58.000Z | # -*- encoding: utf-8 -*-
"""
_core/queries_db/query_utils.py
"""
import re
import random
import pandas as pd
import numpy as np
from pandas.io.json import json_normalize
from log_config import log, pformat
log.debug("... _core.queries_db.query_doc.py ..." )
from bson.objectid import ObjectId
from flask_restplus import marshal
from . import db_dict_by_type, Marshaller
from solidata_api._choices._choices_docs import doc_type_dict
from solidata_api._choices._choices_f_types import dmf_types_list, dmf_type_categ
from solidata_api._core.pandas_ops.pd_utils import *
import operator
def removeKey(d, key):
r = dict(d)
del r[key]
return r
def weighted(nb):
if nb is None:
# return -float('inf')
return ''
else:
# return nb
return str(nb)
# return -float('inf') if nb is None else nb
def append_search_for_to_query(query, search_for) :
log.debug( "query : \n%s", pformat(query) )
log.debug( "search_for : \n%s", pformat(search_for) )
if search_for != None and search_for != [] and search_for != [''] and search_for != '' :
search_for = [ s for s in search_for if s!= "" ]
search_words = [ "\""+word+"\"" for word in search_for ] ### "\"<word>\"" means AND operrator on text search
query["$text"] = {
"$search" : u" ".join(search_words)
}
log.debug( "query : \n%s", pformat(query) )
return query
def append_filters_to_query(query, search_filters, splitter="__") :
"""
build a dict from search_filters argument
"""
filters_dict = {}
if search_filters != None and search_filters != "" and search_filters != [] :
search_filters = [ s for s in search_filters if s != "" ]
for q in search_filters :
splitted = q.split(splitter)
filters_field = { splitted[0] : [] }
filters_dict.update( filters_field )
for q in search_filters :
splitted = q.split(splitter)
filters_dict[splitted[0]].append(splitted[1])
if filters_dict != {}:
query["$and"] = []
for key, values in filters_dict.items():
q_filters = { "$or" : [] }
regex_string = [ u".*"+word+".*" for word in values ]
new_filters = [{ key : { "$regex" : reg , "$options": "-i" } } for reg in regex_string ]
q_filters["$or"] = new_filters
query["$and"].append(q_filters)
log.debug('query : \n%s', pformat(query) )
return query
def sort_list_of_dicts(list_to_sort, key_value, is_reverse=True) :
# return sorted(list_to_sort, key = lambda i: i[key_value])
return sorted(list_to_sort, key=lambda i:weighted(i[key_value]), reverse=is_reverse)
def build_first_term_query(ds_oid, query_args, field_to_query="oid_dso") :
"""
build query understandable by mongodb
inspired by work on openscraper
"""
print()
print("-+- "*40)
log.debug( "... build_first_term_query " )
query = { field_to_query : ds_oid }
log.debug('query_args : \n%s', pformat(query_args) )
search_for = query_args.get('search_for', None )
# search_in = query_args.get('search_in', None )
# search_int = query_args.get('search_int', None )
# search_float = query_args.get('search_float', None )
search_tags = query_args.get('search_tags', None )
item_id = query_args.get('item_id', None )
is_complete = query_args.get('is_complete', None )
search_filters = query_args.get('search_filters', [] )
### append filters
if search_filters != None and search_filters != [] :
query = append_filters_to_query( query, search_filters )
# search by item_id
if item_id != None :
q_item = { "_id" : { "$in" : [ ObjectId(i_id) for i_id in item_id ] }}
query.update(q_item)
### TO DO ...
# search by tags
if search_tags != None :
pass
# q_tag = { "_id" : ObjectId(item_id) }
# query.update(q_tag)
### search by content --> collection need to be indexed
# cf : https://stackoverflow.com/questions/6790819/searching-for-value-of-any-field-in-mongodb-without-explicitly-naming-it
# if search_for != None and search_for != [] and search_for != [''] :
# search_words = [ "\""+word+"\"" for word in search_for ]
# q_search_for = { "$text" :
# { "$search" : u" ".join(search_words) } # doable because text fields are indexed at main.py
# }
# query.update(q_search_for)
query = append_search_for_to_query( query, search_for )
return query
def build_projected_fields(ignore_fields_list=[], keep_fields_list=[] ) :
"""
projection of the query
"""
### cf : https://docs.mongodb.com/manual/tutorial/project-fields-from-query-results/
print()
print("-+- "*40)
log.debug( "... build_projected_fields " )
# add ignore_fields / keep_fields criterias to query if any
projected_fields = None
if ignore_fields_list != [] or keep_fields_list != [] :
projected_fields = {}
# add fields to ignore
if ignore_fields_list != [] :
ignore_fields = { f : 0 for f in ignore_fields_list } ### always keep _id field
projected_fields.update( ignore_fields )
# add fields to retrieve
if keep_fields_list != [] :
keep_fields = { f : 1 for f in keep_fields_list }
projected_fields.update( keep_fields )
return projected_fields
def get_ds_docs(doc_oid, query_args, db_coll="dso_doc", f_col_headers=[] ) :
"""
get_ds_docs + search filters to f_data
"""
print()
print("-+- "*40)
log.debug( "... get_ds_docs " )
log.debug( "... f_col_headers : \n%s", pformat(f_col_headers) )
filters_field = ""
keep_fields_list = []
ignore_fields_list = []
map_list = query_args.get('map_list', False )
get_filters = query_args.get('get_filters', False )
get_uniques = query_args.get('get_uniques', None )
shuffle_seed = query_args.get('shuffle_seed', None )
fields_to_return = query_args.get('fields_to_return', None )
if db_coll == "dso_doc" :
field_to_query = "oid_dso"
if db_coll == "dsi_doc" :
field_to_query = "oid_dsi"
ds_doc_collection = db_dict_by_type[db_coll]
query = build_first_term_query(doc_oid, query_args, field_to_query=field_to_query)
log.debug('query : \n%s', pformat(query) )
### minimal fields list for map request
if map_list :
keep_fields_list += ['_id', field_to_query, "lat", "lon"]
if fields_to_return :
for return_field in fields_to_return :
keep_fields_list.append(return_field)
###
if get_filters and db_coll == "dso_doc" :
if get_uniques == None :
list_filters = dmf_type_categ
else :
list_filters = [ get_uniques ]
keep_fields_list = [ h["f_title"] for h in f_col_headers if h["f_type"] in list_filters ]
log.debug( 'keep_fields_list : \n%s', pformat(keep_fields_list) )
projected_fields = build_projected_fields(ignore_fields_list, keep_fields_list)
log.debug( 'projected_fields : \n%s', pformat(projected_fields) )
# results = ds_doc_collection.find({'oid_dso' : doc_oid })
cursor = ds_doc_collection.find(query, projected_fields)
results = list(cursor)
# log.debug('results[0] : \n%s', pformat(results[0]) )
if get_filters :
u_values = []
for h in keep_fields_list :
u_list = list(set( str(dic[h]) for dic in results if h in dic.keys() ))
u_val_fields = { h : u_list }
u_values.append(u_val_fields)
results = u_values
### TEST SHUFFLE HERE / FIRST
### shuffle results
if shuffle_seed != None and map_list == False and get_filters == False:
random.seed(shuffle_seed)
# random.shuffle(document_out["data_raw"]["f_data"])
random.shuffle(results)
# log.debug('results[0] : \n%s', pformat(results[0]) )
return results
def strip_f_data( data_raw,
doc_open_level_show,
team_oids,
created_by_oid,
roles_for_complete,
user_role,
user_oid,
document_type="dso",
map_list=False,
fields_to_return=None
):
"""
TO DO
strip f_data from fields not authorized for user
"""
print()
print("-+- "*40)
log.debug( "... strip_f_data " )
log.debug( "... strip_f_data / map_list : %s", map_list )
log.debug( "... strip_f_data / fields_to_return : \n%s", pformat(fields_to_return) )
f_data = data_raw["f_data"]
f_col_headers = data_raw["f_col_headers"]
log.debug('f_col_headers : \n%s', pformat(f_col_headers) )
### load f_data as dataframe
f_data_df = pd.DataFrame(f_data)
f_data_df = f_data_df.replace({np.nan:None})
log.debug('f_data_df.head(5) : \n%s', f_data_df.head(5) )
### select f_col_headers given user auth
if document_type == "dso" :
field_oid_ref = "oid_dso"
if user_role == 'anonymous' :
f_col_headers_selected = [ h for h in f_col_headers if h["open_level_show"] in ["open_data"] ]
elif user_oid in team_oids and user_oid != created_by_oid :
f_col_headers_selected = [ h for h in f_col_headers if h["open_level_show"] in ["open_data", "commons", "collective"] ]
elif user_oid == created_by_oid :
f_col_headers_selected = f_col_headers
elif user_role in roles_for_complete :
f_col_headers_selected = f_col_headers
else :
f_col_headers_selected = [ h for h in f_col_headers if h["open_level_show"] in ["open_data", "commons"] ]
elif document_type == "dsi" :
field_oid_ref = "oid_dsi"
f_col_headers_selected = f_col_headers
log.debug('f_col_headers_selected : \n%s', pformat(f_col_headers_selected) )
### build f_col_headers_for_df
f_data_cols = list(f_data_df.columns.values)
log.debug('f_data_cols : \n%s', pformat(f_data_cols) )
if document_type == "dsi" :
f_col_headers_for_df = [ h["f_coll_header_val"] for h in f_col_headers_selected ]
elif document_type == "dso" :
f_col_headers_for_df = [ h["f_title"] for h in f_col_headers_selected if h["f_title"] in f_data_cols ]
f_col_headers_for_df.append("oid_dsi")
# simplify returned fields if map_list
if map_list :
# f_col_headers_for_df = [ field_oid_ref, 'lat', 'lon' ]
f_col_headers_for_df = [ 'lat', 'lon' ]
# simplify returned fields if fields_to_return list
if fields_to_return :
if map_list :
f_col_headers_for_df += [ f for f in fields_to_return if f in f_data_cols ]
else :
f_col_headers_for_df = [ f for f in fields_to_return if f in f_data_cols ]
### stringify oid_dso | oid_dsi field
if field_oid_ref in f_data_cols :
f_data_df[field_oid_ref] = f_data_df[field_oid_ref].apply(lambda x: str(x))
if "oid_dsi" in f_col_headers_for_df and field_oid_ref == "oid_dso" :
f_data_df["oid_dsi"] = f_data_df["oid_dsi"].apply(lambda x: str(x))
# f_data_df = f_data_df.rename( index=str, columns = {"oid_dsi" : "sd_id_dsi"})
### append "_id" column to authorized columns
f_data_df["_id"] = f_data_df["_id"].apply(lambda x: str(x))
f_data_df = f_data_df.rename( index=str, columns = {"_id" : "sd_id"})
f_col_headers_for_df.append("sd_id")
f_col_headers_for_df = [ f for f in f_col_headers_for_df if f != '_id' ]
log.debug('f_col_headers_for_df : \n%s', pformat(f_col_headers_for_df) )
f_data_df_out = f_data_df[ f_col_headers_for_df ]
### clean f_data_df_out from NaNs
# f_data_df_out = f_data_df_out.dropna(how="all")
f_data_df_out = f_data_df_out.replace({np.nan:None})
# df = df.dropna(how="all")
# df = df.replace({np.nan:None})
f_data_df_out = cleanDfFromNans(f_data_df_out)
print ("\n", f_data_df_out.head(5))
### transform f_data to dict
f_data = f_data_df_out.to_dict('records')
del f_data_df_out, f_data_df
return f_data
def search_for_str( search_str, row) :
### TO DO : TREAT strings within "" and commas here
search_split = []
for s in search_str :
search_split += s.split()
search_reg = "|".join(search_split)
# log.debug( "search_reg : %s" , search_reg )
### change series type as string
row = row.astype(str)
row_check = row.str.contains(search_reg, case=False, regex=True)
# if row.dtype.kind == 'O' :
# log.debug( "search_str : %s", search_str )
# log.debug( "row : \n%s", row )
# print ("- - -")
# log.debug( "row_check : \n%s", row_check )
return row_check
def search_f_data (data_raw, query_args, not_filtered=True) :
"""
apply search filters to f_data
"""
print()
print("-+- "*40)
log.debug( "... search_f_data " )
f_data = data_raw["f_data"]
if not_filtered :
### f_data is not a filtered result from direct db query
log.debug('query_args : \n%s', pformat(query_args) )
search_for = query_args.get('search_for', None )
# search_in = query_args.get('search_in', None )
# search_int = query_args.get('search_int', None )
# search_float = query_args.get('search_float', None )
item_id = query_args.get('item_id', None )
is_complete = query_args.get('is_complete', None )
### use pandas to retrieve search results from
f_data_df = pd.DataFrame(f_data)
f_data_df_cols = list(f_data_df.columns.values)
log.debug( "... f_data_df_cols : \n%s", pformat(f_data_df_cols) )
log.debug( "... f_data_df : \n%s", f_data_df.head(5) )
if search_for is not None and search_for != [''] :
f_data_df = f_data_df[f_data_df.apply(lambda row: search_for_str(search_for, row) ).any(axis=1)]
### convert Nan to None
f_data_df = f_data_df.dropna(how="all")
f_data_df = f_data_df.replace({np.nan:None})
f_data = f_data_df.to_dict('records')
log.debug( "... f_data[0] : \n%s ", pformat(f_data[0]) )
del f_data_df
return f_data
def latLngTuple(f_data, query_args) :
map_list = query_args.get('map_list', False )
### map listing required
if map_list :
as_latlng = query_args.get('as_latlng', False )
geo_precision = query_args.get('geo_precision', 6 )
only_geocoded = query_args.get('only_geocoded', True )
f_data_tupled = []
for d in f_data :
d_keys = list(d.keys())
has_geo = False
if "lat" in d_keys and "lon" in d_keys :
if d["lat"]!= 'None' and d["lon"]!='None' and d["lat"]!= None and d["lon"]!= None:
has_geo = True
d["lat"] = round(float(d["lat"]), geo_precision)
d["lon"] = round(float(d["lon"]), geo_precision)
if as_latlng :
d["latlng"] = ( d["lat"], d["lon"])
d = removeKey(d, "lat")
d = removeKey(d, "lon")
else :
d = removeKey(d, "lat")
d = removeKey(d, "lon")
if only_geocoded == False :
f_data_tupled.append(d)
else :
if has_geo :
f_data_tupled.append(d)
### map_list not required
else :
f_data_tupled = f_data
return f_data_tupled
def GetFData( document_type,
can_access_complete, not_filtered,
document, document_out, doc_oid, doc_open_level_show,
team_oids, created_by_oid, roles_for_complete, user_role, user_oid,
page_args, query_args,
) :
# start_index, end_index
# shuffle_seed, sort_by, slice_f_data,
"""
refactoring getting f_data
"""
### get pagination arguments
log.debug('page_args : \n%s', pformat(page_args) )
page = page_args.get('page', 1 )
per_page = page_args.get('per_page', 5 )
sort_by = page_args.get('sort_by', None )
descending = page_args.get('descending', False )
shuffle_seed = page_args.get('shuffle_seed', None )
if page != 1 :
start_index = ( page - 1 ) * per_page
end_index = start_index + per_page
else :
start_index = 0
end_index = per_page
log.debug('start_index : %s', start_index )
log.debug('end_index : %s', end_index )
# ### get query arguments
# log.debug('query_args : \n%s', pformat(query_args) )
# only_f_data = query_args.get('only_f_data', False )
# only_stats = query_args.get('only_stats', False )
# q_normalize = query_args.get('normalize', False )
slice_f_data = query_args.get('slice_f_data', True )
# sort_by = query_args.get('sort_by', None )
# descending = query_args.get('descending', False )
# shuffle_seed = query_args.get('shuffle_seed', None )
map_list = query_args.get('map_list', False )
get_filters = query_args.get('get_filters', False )
fields_to_return = query_args.get('fields_to_return', None )
# append "f_data" if doc is in ["dsi", "dsr", "dsr"]
if document_type in ["dsi", "dso"] and can_access_complete :
### override slice_f_data in case doc is dsi or dso to avoid overloading the api or the client
slice_f_data = True
log.debug( '...document_type : %s', document_type )
# log.debug( '...document["data_raw"]["f_data"][:1] : \n%s', pformat(document["data_raw"]["f_data"][:1]) )
# log.debug( '...document["data_raw"] : \n%s', pformat(document["data_raw"]) )
### copy f_data
if document_type in ["dso", "dsi"] :
### strip f_data from not allowed fields
not_filtered = False
if document_type == "dso" :
db_coll="dso_doc"
document_out["data_raw"]["f_col_headers"].append(
{
'f_title' : '_id',
'open_level_show' : 'sd_id',
'f_type' : 'id'
}
)
elif document_type == "dsi" :
db_coll="dsi_doc"
document_out["data_raw"] = {"f_col_headers" : document["data_raw"]["f_col_headers"]}
document_out["data_raw"]["f_col_headers"].append(
{
'f_coll_header_text': 'sd_id',
'f_coll_header_val': 'sd_id'
}
)
document_out["data_raw"]["f_data"] = get_ds_docs(
doc_oid,
query_args,
db_coll=db_coll,
f_col_headers=document["data_raw"]["f_col_headers"]
)
if get_filters == False :
document_out["data_raw"]["f_data"] = strip_f_data(
document_out["data_raw"],
doc_open_level_show,
team_oids,
created_by_oid,
roles_for_complete,
user_role,
user_oid,
document_type=document_type,
map_list=map_list,
fields_to_return=fields_to_return
)
document_out["data_raw"]["f_data"] = latLngTuple(document_out["data_raw"]["f_data"], query_args)
else :
document_out["data_raw"]["f_data"] = document["data_raw"]["f_data"]
### MAIN SEARCH QUERIES
document_out["data_raw"]["f_data"] = search_f_data(
document_out["data_raw"],
query_args,
not_filtered=not_filtered
)
### sort results
if sort_by != None :
log.debug( 'sort_by : %s', sort_by )
# NOT WORKING : document_out["data_raw"]["f_data"] = document_out["data_raw"]["f_data"].sort(key=operator.itemgetter(sort_by))
# NOT WORKING WITH MISSING FIELDS : document_out["data_raw"]["f_data"] = sorted(document_out["data_raw"]["f_data"], key = lambda i: i[sort_by])
document_out["data_raw"]["f_data"] = sort_list_of_dicts(
document_out["data_raw"]["f_data"],
sort_by,
is_reverse=descending
)
log.debug( '...document_out sorted' )
# add total of items within f_data in response
document_out["data_raw"]["f_data_count"] = len(document_out["data_raw"]["f_data"])
# slice f_data
if slice_f_data == True and map_list == False and get_filters == False :
log.debug( 'slice_f_data : %s', slice_f_data )
document_out["data_raw"]["f_data"] = document_out["data_raw"]["f_data"][ start_index : end_index ]
# only f_data
# if only_f_data :
# document_out = document_out["data_raw"]["f_data"]
return document_out
def check_if_prj_is_buildable (doc_prj) :
"""
check if prj has enough mapping to be buildable
"""
print("-+- "*40)
log.debug( "... check_if_prj_is_buildable ... " )
is_buildable = False
prj_dmt = doc_prj["datasets"]["dmt_list"]
prj_dsi = doc_prj["datasets"]["dsi_list"]
prj_map_open = doc_prj["mapping"]["dmf_to_open_level"]
prj_map_dsi = doc_prj["mapping"]["dsi_to_dmf"]
### check if prj contains dmt and dsi
if len(prj_dmt)>0 and len(prj_dsi)>0 :
log.debug( "... lengths prj_dmt & prj_map_dsi : OK ... " )
### check if prj contains dmt and dsi
if len(prj_map_open)>0 and len(prj_map_dsi)>0 :
log.debug( "... lengths prj_map_open & prj_map_dsi : OK ... " )
### set of unique values of dmf from prj_map_open
prj_dsi_set = { d['oid_dsi'] for d in prj_dsi }
log.debug( "... prj_dsi_set : \n%s : ", pformat(prj_dsi_set) )
### set of unique values of dmf from prj_map_open
prj_map_dmf_set = { d['oid_dmf'] for d in prj_map_open }
log.debug( "... prj_map_dmf_set : \n%s : ", pformat(prj_map_dmf_set) )
### unique values of dsi from prj_map_dsi
prj_map_dsi_dict = { d['oid_dsi'] : { "dmf_list" : [] } for d in prj_map_dsi }
for d in prj_map_dsi :
prj_map_dsi_dict[ d['oid_dsi'] ]["dmf_list"].append( d["oid_dmf"] )
log.debug( "... prj_map_dsi_dict : \n%s : ", pformat(prj_map_dsi_dict) )
### set of unique values of dmf for each dsi from prj_map_dsi_dict
prj_map_dsi_sets = { k : set(v['dmf_list']) for k,v in prj_map_dsi_dict.items() }
log.debug( "... prj_map_dsi_sets : \n%s : ", pformat(prj_map_dsi_sets) )
### check if dmf in prj_map_dsi are in prj_map_open
dsi_mapped_not_in_prj = 0
dsi_mapped_but_no_dmf_mapped_in_prj_map = 0
for dsi_oid, dsi_dmf_mapped_set in prj_map_dsi_sets.items() :
log.debug( "... dsi_oid : %s ", dsi_oid )
if dsi_oid in prj_dsi_set :
### check if dsi_dmf_mapped_set contains at least 1 dmf from prj_map_dmf_set
log.debug( "... dsi_dmf_mapped_set : \n%s ", pformat(dsi_dmf_mapped_set) )
log.debug( "... prj_map_dmf_set : \n%s ", pformat(prj_map_dmf_set) )
intersection = dsi_dmf_mapped_set & prj_map_dmf_set
log.debug( "... intersection : %s ", intersection )
len_intersection = len(intersection)
if len_intersection == 0 :
dsi_mapped_but_no_dmf_mapped_in_prj_map += 1
else :
dsi_mapped_not_in_prj += 1
log.debug( "... dsi_mapped_not_in_prj : %s ", dsi_mapped_not_in_prj )
log.debug( "... dsi_mapped_but_no_dmf_mapped_in_prj_map : %s ", dsi_mapped_but_no_dmf_mapped_in_prj_map )
if dsi_mapped_but_no_dmf_mapped_in_prj_map == 0 :
is_buildable = True
return is_buildable
| 32.804949 | 150 | 0.641878 |
ace1c4dd8c75c568c367701ef5ad79f680e85863 | 2,100 | py | Python | app/__init__.py | rstefko/whoogle-search | 98bfc19616a8287ac121d3df0254797c72c3d52a | [
"MIT"
] | null | null | null | app/__init__.py | rstefko/whoogle-search | 98bfc19616a8287ac121d3df0254797c72c3d52a | [
"MIT"
] | null | null | null | app/__init__.py | rstefko/whoogle-search | 98bfc19616a8287ac121d3df0254797c72c3d52a | [
"MIT"
] | null | null | null | from app.request import send_tor_signal
from app.utils.session_utils import generate_user_keys
from app.utils.gen_ddg_bangs import gen_bangs_json
from flask import Flask
from flask_session import Session
import json
import os
from stem import Signal
app = Flask(__name__, static_folder=os.path.dirname(
os.path.abspath(__file__)) + '/static')
app.user_elements = {}
app.default_key_set = generate_user_keys()
app.no_cookie_ips = []
app.config['SECRET_KEY'] = os.urandom(32)
app.config['SESSION_TYPE'] = 'filesystem'
app.config['VERSION_NUMBER'] = '0.3.1'
app.config['APP_ROOT'] = os.getenv(
'APP_ROOT',
os.path.dirname(os.path.abspath(__file__)))
app.config['LANGUAGES'] = json.load(open(
os.path.join(app.config['APP_ROOT'], 'misc/languages.json')))
app.config['COUNTRIES'] = json.load(open(
os.path.join(app.config['APP_ROOT'], 'misc/countries.json')))
app.config['STATIC_FOLDER'] = os.getenv(
'STATIC_FOLDER',
os.path.join(app.config['APP_ROOT'], 'static'))
app.config['CONFIG_PATH'] = os.getenv(
'CONFIG_VOLUME',
os.path.join(app.config['STATIC_FOLDER'], 'config'))
app.config['DEFAULT_CONFIG'] = os.path.join(
app.config['CONFIG_PATH'],
'config.json')
app.config['SESSION_FILE_DIR'] = os.path.join(
app.config['CONFIG_PATH'],
'session')
app.config['BANG_PATH'] = os.getenv(
'CONFIG_VOLUME',
os.path.join(app.config['STATIC_FOLDER'], 'bangs'))
app.config['BANG_FILE'] = os.path.join(
app.config['BANG_PATH'],
'bangs.json')
if not os.path.exists(app.config['CONFIG_PATH']):
os.makedirs(app.config['CONFIG_PATH'])
if not os.path.exists(app.config['SESSION_FILE_DIR']):
os.makedirs(app.config['SESSION_FILE_DIR'])
# Generate DDG bang filter, and create path if it doesn't exist yet
if not os.path.exists(app.config['BANG_PATH']):
os.makedirs(app.config['BANG_PATH'])
if not os.path.exists(app.config['BANG_FILE']):
gen_bangs_json(app.config['BANG_FILE'])
Session(app)
# Attempt to acquire tor identity, to determine if Tor config is available
send_tor_signal(Signal.HEARTBEAT)
from app import routes # noqa
| 33.870968 | 74 | 0.721905 |
ace1c4e300505879c28e54a4a914fc58dfd0b51a | 19,293 | py | Python | bundle/vim-python-mode/pymode/libs/pylama/lint/pylama_pylint/astroid/as_string.py | ninegrid/dotfiles-vim | 4604f8a2e114cb2e98d5d79f2f41048c4f564b02 | [
"Unlicense"
] | null | null | null | bundle/vim-python-mode/pymode/libs/pylama/lint/pylama_pylint/astroid/as_string.py | ninegrid/dotfiles-vim | 4604f8a2e114cb2e98d5d79f2f41048c4f564b02 | [
"Unlicense"
] | null | null | null | bundle/vim-python-mode/pymode/libs/pylama/lint/pylama_pylint/astroid/as_string.py | ninegrid/dotfiles-vim | 4604f8a2e114cb2e98d5d79f2f41048c4f564b02 | [
"Unlicense"
] | 1 | 2020-10-01T18:51:49.000Z | 2020-10-01T18:51:49.000Z | # copyright 2003-2013 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of astroid.
#
# astroid is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 2.1 of the License, or (at your
# option) any later version.
#
# astroid is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License
# for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with astroid. If not, see <http://www.gnu.org/licenses/>.
"""This module renders Astroid nodes as string:
* :func:`to_code` function return equivalent (hopefuly valid) python string
* :func:`dump` function return an internal representation of nodes found
in the tree, useful for debugging or understanding the tree structure
"""
import sys
INDENT = ' ' # 4 spaces ; keep indentation variable
def dump(node, ids=False):
"""print a nice astroid tree representation.
:param ids: if true, we also print the ids (usefull for debugging)
"""
result = []
_repr_tree(node, result, ids=ids)
return "\n".join(result)
def _repr_tree(node, result, indent='', _done=None, ids=False):
"""built a tree representation of a node as a list of lines"""
if _done is None:
_done = set()
if not hasattr(node, '_astroid_fields'): # not a astroid node
return
if node in _done:
result.append( indent + 'loop in tree: %s' % node )
return
_done.add(node)
node_str = str(node)
if ids:
node_str += ' . \t%x' % id(node)
result.append( indent + node_str )
indent += INDENT
for field in node._astroid_fields:
value = getattr(node, field)
if isinstance(value, (list, tuple) ):
result.append( indent + field + " = [" )
for child in value:
if isinstance(child, (list, tuple) ):
# special case for Dict # FIXME
_repr_tree(child[0], result, indent, _done, ids)
_repr_tree(child[1], result, indent, _done, ids)
result.append(indent + ',')
else:
_repr_tree(child, result, indent, _done, ids)
result.append( indent + "]" )
else:
result.append( indent + field + " = " )
_repr_tree(value, result, indent, _done, ids)
class AsStringVisitor(object):
"""Visitor to render an Astroid node as a valid python code string"""
def __call__(self, node):
"""Makes this visitor behave as a simple function"""
return node.accept(self)
def _stmt_list(self, stmts):
"""return a list of nodes to string"""
stmts = '\n'.join([nstr for nstr in [n.accept(self) for n in stmts] if nstr])
return INDENT + stmts.replace('\n', '\n'+INDENT)
## visit_<node> methods ###########################################
def visit_arguments(self, node):
"""return an astroid.Function node as string"""
return node.format_args()
def visit_assattr(self, node):
"""return an astroid.AssAttr node as string"""
return self.visit_getattr(node)
def visit_assert(self, node):
"""return an astroid.Assert node as string"""
if node.fail:
return 'assert %s, %s' % (node.test.accept(self),
node.fail.accept(self))
return 'assert %s' % node.test.accept(self)
def visit_assname(self, node):
"""return an astroid.AssName node as string"""
return node.name
def visit_assign(self, node):
"""return an astroid.Assign node as string"""
lhs = ' = '.join([n.accept(self) for n in node.targets])
return '%s = %s' % (lhs, node.value.accept(self))
def visit_augassign(self, node):
"""return an astroid.AugAssign node as string"""
return '%s %s %s' % (node.target.accept(self), node.op, node.value.accept(self))
def visit_backquote(self, node):
"""return an astroid.Backquote node as string"""
return '`%s`' % node.value.accept(self)
def visit_binop(self, node):
"""return an astroid.BinOp node as string"""
return '(%s) %s (%s)' % (node.left.accept(self), node.op, node.right.accept(self))
def visit_boolop(self, node):
"""return an astroid.BoolOp node as string"""
return (' %s ' % node.op).join(['(%s)' % n.accept(self)
for n in node.values])
def visit_break(self, node):
"""return an astroid.Break node as string"""
return 'break'
def visit_callfunc(self, node):
"""return an astroid.CallFunc node as string"""
expr_str = node.func.accept(self)
args = [arg.accept(self) for arg in node.args]
if node.starargs:
args.append( '*' + node.starargs.accept(self))
if node.kwargs:
args.append( '**' + node.kwargs.accept(self))
return '%s(%s)' % (expr_str, ', '.join(args))
def visit_class(self, node):
"""return an astroid.Class node as string"""
decorate = node.decorators and node.decorators.accept(self) or ''
bases = ', '.join([n.accept(self) for n in node.bases])
if sys.version_info[0] == 2:
bases = bases and '(%s)' % bases or ''
else:
metaclass = node.metaclass()
if metaclass:
if bases:
bases = '(%s, metaclass=%s)' % (bases, metaclass.name)
else:
bases = '(metaclass=%s)' % metaclass.name
else:
bases = bases and '(%s)' % bases or ''
docs = node.doc and '\n%s"""%s"""' % (INDENT, node.doc) or ''
return '\n\n%sclass %s%s:%s\n%s\n' % (decorate, node.name, bases, docs,
self._stmt_list( node.body))
def visit_compare(self, node):
"""return an astroid.Compare node as string"""
rhs_str = ' '.join(['%s %s' % (op, expr.accept(self))
for op, expr in node.ops])
return '%s %s' % (node.left.accept(self), rhs_str)
def visit_comprehension(self, node):
"""return an astroid.Comprehension node as string"""
ifs = ''.join([ ' if %s' % n.accept(self) for n in node.ifs])
return 'for %s in %s%s' % (node.target.accept(self),
node.iter.accept(self), ifs )
def visit_const(self, node):
"""return an astroid.Const node as string"""
return repr(node.value)
def visit_continue(self, node):
"""return an astroid.Continue node as string"""
return 'continue'
def visit_delete(self, node): # XXX check if correct
"""return an astroid.Delete node as string"""
return 'del %s' % ', '.join([child.accept(self)
for child in node.targets])
def visit_delattr(self, node):
"""return an astroid.DelAttr node as string"""
return self.visit_getattr(node)
def visit_delname(self, node):
"""return an astroid.DelName node as string"""
return node.name
def visit_decorators(self, node):
"""return an astroid.Decorators node as string"""
return '@%s\n' % '\n@'.join([item.accept(self) for item in node.nodes])
def visit_dict(self, node):
"""return an astroid.Dict node as string"""
return '{%s}' % ', '.join(['%s: %s' % (key.accept(self),
value.accept(self)) for key, value in node.items])
def visit_dictcomp(self, node):
"""return an astroid.DictComp node as string"""
return '{%s: %s %s}' % (node.key.accept(self), node.value.accept(self),
' '.join([n.accept(self) for n in node.generators]))
def visit_discard(self, node):
"""return an astroid.Discard node as string"""
return node.value.accept(self)
def visit_emptynode(self, node):
"""dummy method for visiting an Empty node"""
return ''
def visit_excepthandler(self, node):
if node.type:
if node.name:
excs = 'except %s, %s' % (node.type.accept(self),
node.name.accept(self))
else:
excs = 'except %s' % node.type.accept(self)
else:
excs = 'except'
return '%s:\n%s' % (excs, self._stmt_list(node.body))
def visit_ellipsis(self, node):
"""return an astroid.Ellipsis node as string"""
return '...'
def visit_empty(self, node):
"""return an Empty node as string"""
return ''
def visit_exec(self, node):
"""return an astroid.Exec node as string"""
if node.locals:
return 'exec %s in %s, %s' % (node.expr.accept(self),
node.locals.accept(self),
node.globals.accept(self))
if node.globals:
return 'exec %s in %s' % (node.expr.accept(self),
node.globals.accept(self))
return 'exec %s' % node.expr.accept(self)
def visit_extslice(self, node):
"""return an astroid.ExtSlice node as string"""
return ','.join( [dim.accept(self) for dim in node.dims] )
def visit_for(self, node):
"""return an astroid.For node as string"""
fors = 'for %s in %s:\n%s' % (node.target.accept(self),
node.iter.accept(self),
self._stmt_list( node.body))
if node.orelse:
fors = '%s\nelse:\n%s' % (fors, self._stmt_list(node.orelse))
return fors
def visit_from(self, node):
"""return an astroid.From node as string"""
return 'from %s import %s' % ('.' * (node.level or 0) + node.modname,
_import_string(node.names))
def visit_function(self, node):
"""return an astroid.Function node as string"""
decorate = node.decorators and node.decorators.accept(self) or ''
docs = node.doc and '\n%s"""%s"""' % (INDENT, node.doc) or ''
return '\n%sdef %s(%s):%s\n%s' % (decorate, node.name, node.args.accept(self),
docs, self._stmt_list(node.body))
def visit_genexpr(self, node):
"""return an astroid.GenExpr node as string"""
return '(%s %s)' % (node.elt.accept(self), ' '.join([n.accept(self)
for n in node.generators]))
def visit_getattr(self, node):
"""return an astroid.Getattr node as string"""
return '%s.%s' % (node.expr.accept(self), node.attrname)
def visit_global(self, node):
"""return an astroid.Global node as string"""
return 'global %s' % ', '.join(node.names)
def visit_if(self, node):
"""return an astroid.If node as string"""
ifs = ['if %s:\n%s' % (node.test.accept(self), self._stmt_list(node.body))]
if node.orelse:# XXX use elif ???
ifs.append('else:\n%s' % self._stmt_list(node.orelse))
return '\n'.join(ifs)
def visit_ifexp(self, node):
"""return an astroid.IfExp node as string"""
return '%s if %s else %s' % (node.body.accept(self),
node.test.accept(self), node.orelse.accept(self))
def visit_import(self, node):
"""return an astroid.Import node as string"""
return 'import %s' % _import_string(node.names)
def visit_keyword(self, node):
"""return an astroid.Keyword node as string"""
return '%s=%s' % (node.arg, node.value.accept(self))
def visit_lambda(self, node):
"""return an astroid.Lambda node as string"""
return 'lambda %s: %s' % (node.args.accept(self), node.body.accept(self))
def visit_list(self, node):
"""return an astroid.List node as string"""
return '[%s]' % ', '.join([child.accept(self) for child in node.elts])
def visit_listcomp(self, node):
"""return an astroid.ListComp node as string"""
return '[%s %s]' % (node.elt.accept(self), ' '.join([n.accept(self)
for n in node.generators]))
def visit_module(self, node):
"""return an astroid.Module node as string"""
docs = node.doc and '"""%s"""\n\n' % node.doc or ''
return docs + '\n'.join([n.accept(self) for n in node.body]) + '\n\n'
def visit_name(self, node):
"""return an astroid.Name node as string"""
return node.name
def visit_pass(self, node):
"""return an astroid.Pass node as string"""
return 'pass'
def visit_print(self, node):
"""return an astroid.Print node as string"""
nodes = ', '.join([n.accept(self) for n in node.values])
if not node.nl:
nodes = '%s,' % nodes
if node.dest:
return 'print >> %s, %s' % (node.dest.accept(self), nodes)
return 'print %s' % nodes
def visit_raise(self, node):
"""return an astroid.Raise node as string"""
if node.exc:
if node.inst:
if node.tback:
return 'raise %s, %s, %s' % (node.exc.accept(self),
node.inst.accept(self),
node.tback.accept(self))
return 'raise %s, %s' % (node.exc.accept(self),
node.inst.accept(self))
return 'raise %s' % node.exc.accept(self)
return 'raise'
def visit_return(self, node):
"""return an astroid.Return node as string"""
if node.value:
return 'return %s' % node.value.accept(self)
else:
return 'return'
def visit_index(self, node):
"""return a astroid.Index node as string"""
return node.value.accept(self)
def visit_set(self, node):
"""return an astroid.Set node as string"""
return '{%s}' % ', '.join([child.accept(self) for child in node.elts])
def visit_setcomp(self, node):
"""return an astroid.SetComp node as string"""
return '{%s %s}' % (node.elt.accept(self), ' '.join([n.accept(self)
for n in node.generators]))
def visit_slice(self, node):
"""return a astroid.Slice node as string"""
lower = node.lower and node.lower.accept(self) or ''
upper = node.upper and node.upper.accept(self) or ''
step = node.step and node.step.accept(self) or ''
if step:
return '%s:%s:%s' % (lower, upper, step)
return '%s:%s' % (lower, upper)
def visit_subscript(self, node):
"""return an astroid.Subscript node as string"""
return '%s[%s]' % (node.value.accept(self), node.slice.accept(self))
def visit_tryexcept(self, node):
"""return an astroid.TryExcept node as string"""
trys = ['try:\n%s' % self._stmt_list( node.body)]
for handler in node.handlers:
trys.append(handler.accept(self))
if node.orelse:
trys.append('else:\n%s' % self._stmt_list(node.orelse))
return '\n'.join(trys)
def visit_tryfinally(self, node):
"""return an astroid.TryFinally node as string"""
return 'try:\n%s\nfinally:\n%s' % (self._stmt_list( node.body),
self._stmt_list(node.finalbody))
def visit_tuple(self, node):
"""return an astroid.Tuple node as string"""
if len(node.elts) == 1:
return '(%s, )' % node.elts[0].accept(self)
return '(%s)' % ', '.join([child.accept(self) for child in node.elts])
def visit_unaryop(self, node):
"""return an astroid.UnaryOp node as string"""
if node.op == 'not':
operator = 'not '
else:
operator = node.op
return '%s%s' % (operator, node.operand.accept(self))
def visit_while(self, node):
"""return an astroid.While node as string"""
whiles = 'while %s:\n%s' % (node.test.accept(self),
self._stmt_list(node.body))
if node.orelse:
whiles = '%s\nelse:\n%s' % (whiles, self._stmt_list(node.orelse))
return whiles
def visit_with(self, node): # 'with' without 'as' is possible
"""return an astroid.With node as string"""
items = ', '.join(('(%s)' % expr.accept(self)) +
(vars and ' as (%s)' % (vars.accept(self)) or '')
for expr, vars in node.items)
return 'with %s:\n%s' % (items, self._stmt_list( node.body))
def visit_yield(self, node):
"""yield an ast.Yield node as string"""
yi_val = node.value and (" " + node.value.accept(self)) or ""
expr = 'yield' + yi_val
if node.parent.is_statement:
return expr
else:
return "(%s)" % (expr,)
class AsStringVisitor3k(AsStringVisitor):
"""AsStringVisitor3k overwrites some AsStringVisitor methods"""
def visit_excepthandler(self, node):
if node.type:
if node.name:
excs = 'except %s as %s' % (node.type.accept(self),
node.name.accept(self))
else:
excs = 'except %s' % node.type.accept(self)
else:
excs = 'except'
return '%s:\n%s' % (excs, self._stmt_list(node.body))
def visit_nonlocal(self, node):
"""return an astroid.Nonlocal node as string"""
return 'nonlocal %s' % ', '.join(node.names)
def visit_raise(self, node):
"""return an astroid.Raise node as string"""
if node.exc:
if node.cause:
return 'raise %s from %s' % (node.exc.accept(self),
node.cause.accept(self))
return 'raise %s' % node.exc.accept(self)
return 'raise'
def visit_starred(self, node):
"""return Starred node as string"""
return "*" + node.value.accept(self)
def visit_yieldfrom(self, node):
""" Return an astroid.YieldFrom node as string. """
yi_val = node.value and (" " + node.value.accept(self)) or ""
expr = 'yield from' + yi_val
if node.parent.is_statement:
return expr
else:
return "(%s)" % (expr,)
def _import_string(names):
"""return a list of (name, asname) formatted as a string"""
_names = []
for name, asname in names:
if asname is not None:
_names.append('%s as %s' % (name, asname))
else:
_names.append(name)
return ', '.join(_names)
if sys.version_info >= (3, 0):
AsStringVisitor = AsStringVisitor3k
# this visitor is stateless, thus it can be reused
to_code = AsStringVisitor()
| 38.818913 | 90 | 0.554139 |
ace1c54612f0415c2594f16dffb6b5b756853081 | 1,060 | py | Python | striga-compiler.py | ateska/striga | 451b5d9421e2e5fdf49b94c8f3d76e576abc5923 | [
"MIT"
] | null | null | null | striga-compiler.py | ateska/striga | 451b5d9421e2e5fdf49b94c8f3d76e576abc5923 | [
"MIT"
] | null | null | null | striga-compiler.py | ateska/striga | 451b5d9421e2e5fdf49b94c8f3d76e576abc5923 | [
"MIT"
] | null | null | null | #!/usr/bin/env strigapython
import sys, logging as L
import striga.compiler.compilerapp
import striga.core.exception
if __name__ == '__main__':
try:
#TODO: Uncomment this for production
# try:
# import psyco
# psyco.full()
# except ImportError:
# pass
app = striga.compiler.compilerapp.CompilerApplication()
app.Run()
sys.exit(0)
except striga.core.exception.StrigaConfigurationError, e:
sys.stderr.write('!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n')
sys.stderr.write(str(e)+'\n')
sys.stderr.write('!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n')
sys.exit(2)
except striga.core.exception.StrigaFatalError, e:
sys.stderr.write('!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n')
sys.stderr.write(str(e)+'\n')
sys.stderr.write('!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n')
sys.exit(2)
except Exception, e:
sys.stderr.write('!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n')
sys.stderr.write(str(e)+'\n')
sys.stderr.write('!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n')
#TODO: Write stack ...
raise
sys.exit(2)
#TODO: More exception handling
| 24.651163 | 61 | 0.542453 |
ace1c56c70566355714c439e057b29cfd746633b | 387 | py | Python | test_app/urls.py | yurtaev/django-httplog | 41ab3a4b390de18d91e50ad621550b36fef2c310 | [
"BSD-3-Clause"
] | null | null | null | test_app/urls.py | yurtaev/django-httplog | 41ab3a4b390de18d91e50ad621550b36fef2c310 | [
"BSD-3-Clause"
] | null | null | null | test_app/urls.py | yurtaev/django-httplog | 41ab3a4b390de18d91e50ad621550b36fef2c310 | [
"BSD-3-Clause"
] | null | null | null | # coding: utf-8
try:
from django.conf.urls import patterns, url, include
except ImportError:
from django.conf.urls.defaults import patterns, url, include
from django.http import HttpResponse
def dummy(request):
return HttpResponse()
urlpatterns = patterns('',
url('^api/.+/$', dummy, name='dummy'),
url('', include('django.contrib.auth.urls', app_name='auth'))
)
| 22.764706 | 65 | 0.69509 |
ace1c6b9f7b7750c9fcb8c378bfe915181904173 | 6,621 | py | Python | utils/external/faster_rcnn_tensorflow/utility/proposal_target_layer.py | HatsuneMiku4/PocketFlow | 285edbd5529cb00d6d51e728f9b468604b1b98b9 | [
"Apache-2.0"
] | 2,724 | 2018-11-02T03:00:00.000Z | 2022-03-27T15:20:18.000Z | utils/external/faster_rcnn_tensorflow/utility/proposal_target_layer.py | HatsuneMiku4/PocketFlow | 285edbd5529cb00d6d51e728f9b468604b1b98b9 | [
"Apache-2.0"
] | 292 | 2018-11-02T14:59:40.000Z | 2021-08-14T11:59:53.000Z | utils/external/faster_rcnn_tensorflow/utility/proposal_target_layer.py | HatsuneMiku4/PocketFlow | 285edbd5529cb00d6d51e728f9b468604b1b98b9 | [
"Apache-2.0"
] | 521 | 2018-11-02T02:31:05.000Z | 2022-03-27T05:13:04.000Z | # --------------------------------------------------------
# Faster R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from utils.external.faster_rcnn_tensorflow.configs import cfgs
import numpy as np
import numpy.random as npr
from utils.external.faster_rcnn_tensorflow.utility import encode_and_decode
def bbox_overlaps(boxes, query_boxes):
"""
Parameters
----------
boxes: (N, 4) ndarray of float
query_boxes: (K, 4) ndarray of float
Returns
-------
overlaps: (N, K) ndarray of overlap between boxes and query_boxes
"""
N = boxes.shape[0]
K = query_boxes.shape[0]
overlaps = np.zeros((N, K))
for k in range(K):
box_area = (
(query_boxes[k, 2] - query_boxes[k, 0] + 1) *
(query_boxes[k, 3] - query_boxes[k, 1] + 1)
)
for n in range(N):
iw = (
min(boxes[n, 2], query_boxes[k, 2]) -
max(boxes[n, 0], query_boxes[k, 0]) + 1
)
if iw > 0:
ih = (
min(boxes[n, 3], query_boxes[k, 3]) -
max(boxes[n, 1], query_boxes[k, 1]) + 1
)
if ih > 0:
ua = float(
(boxes[n, 2] - boxes[n, 0] + 1) *
(boxes[n, 3] - boxes[n, 1] + 1) +
box_area - iw * ih
)
overlaps[n, k] = iw * ih / ua
return overlaps
def proposal_target_layer(rpn_rois, gt_boxes):
"""
Assign object detection proposals to ground-truth targets. Produces proposal
classification labels and bounding-box regression targets.
"""
# Proposal ROIs (x1, y1, x2, y2) coming from RPN
# gt_boxes (x1, y1, x2, y2, label)
if cfgs.ADD_GTBOXES_TO_TRAIN:
all_rois = np.vstack((rpn_rois, gt_boxes[:, :-1]))
else:
all_rois = rpn_rois
# np.inf
rois_per_image = np.inf if cfgs.FAST_RCNN_MINIBATCH_SIZE == -1 else cfgs.FAST_RCNN_MINIBATCH_SIZE
fg_rois_per_image = np.round(cfgs.FAST_RCNN_POSITIVE_RATE * rois_per_image)
# Sample rois with classification labels and bounding box regression
labels, rois, bbox_targets = _sample_rois(all_rois, gt_boxes, fg_rois_per_image,
rois_per_image, cfgs.CLASS_NUM+1)
# print(labels.shape, rois.shape, bbox_targets.shape)
rois = rois.reshape(-1, 4)
labels = labels.reshape(-1)
bbox_targets = bbox_targets.reshape(-1, (cfgs.CLASS_NUM+1) * 4)
return rois, labels, bbox_targets
def _get_bbox_regression_labels(bbox_target_data, num_classes):
"""Bounding-box regression targets (bbox_target_data) are stored in a
compact form N x (class, tx, ty, tw, th)
This function expands those targets into the 4-of-4*K representation used
by the network (i.e. only one class has non-zero targets).
Returns:
bbox_target (ndarray): N x 4K blob of regression targets
"""
clss = bbox_target_data[:, 0]
bbox_targets = np.zeros((clss.size, 4 * num_classes), dtype=np.float32)
inds = np.where(clss > 0)[0]
for ind in inds:
cls = clss[ind]
start = int(4 * cls)
end = start + 4
bbox_targets[ind, start:end] = bbox_target_data[ind, 1:]
return bbox_targets
def _compute_targets(ex_rois, gt_rois, labels):
"""Compute bounding-box regression targets for an image.
that is : [label, tx, ty, tw, th]
"""
assert ex_rois.shape[0] == gt_rois.shape[0]
assert ex_rois.shape[1] == 4
assert gt_rois.shape[1] == 4
targets = encode_and_decode.encode_boxes(unencode_boxes=gt_rois,
reference_boxes=ex_rois,
scale_factors=cfgs.ROI_SCALE_FACTORS)
# targets = encode_and_decode.encode_boxes(ex_rois=ex_rois,
# gt_rois=gt_rois,
# scale_factor=cfgs.ROI_SCALE_FACTORS)
return np.hstack(
(labels[:, np.newaxis], targets)).astype(np.float32, copy=False)
def _sample_rois(all_rois, gt_boxes, fg_rois_per_image,
rois_per_image, num_classes):
"""Generate a random sample of RoIs comprising foreground and background
examples.
all_rois shape is [-1, 4]
gt_boxes shape is [-1, 5]. that is [x1, y1, x2, y2, label]
"""
# overlaps: (rois x gt_boxes)
overlaps = bbox_overlaps(all_rois,gt_boxes)
gt_assignment = overlaps.argmax(axis=1)
max_overlaps = overlaps.max(axis=1)
labels = gt_boxes[gt_assignment, -1]
# Select foreground RoIs as those with >= FG_THRESH overlap
fg_inds = np.where(max_overlaps >= cfgs.FAST_RCNN_IOU_POSITIVE_THRESHOLD)[0]
# Guard against the case when an image has fewer than fg_rois_per_image
# Select background RoIs as those within [BG_THRESH_LO, BG_THRESH_HI)
bg_inds = np.where((max_overlaps < cfgs.FAST_RCNN_IOU_POSITIVE_THRESHOLD) &
(max_overlaps >= cfgs.FAST_RCNN_IOU_NEGATIVE_THRESHOLD))[0]
# print("first fileter, fg_size: {} || bg_size: {}".format(fg_inds.shape, bg_inds.shape))
# Guard against the case when an image has fewer than fg_rois_per_image
# foreground RoIs
fg_rois_per_this_image = min(fg_rois_per_image, fg_inds.size)
# Sample foreground regions without replacement
if fg_inds.size > 0:
fg_inds = npr.choice(fg_inds, size=int(fg_rois_per_this_image), replace=False)
# Compute number of background RoIs to take from this image (guarding
# against there being fewer than desired)
bg_rois_per_this_image = rois_per_image - fg_rois_per_this_image
bg_rois_per_this_image = min(bg_rois_per_this_image, bg_inds.size)
# Sample background regions without replacement
if bg_inds.size > 0:
bg_inds = npr.choice(bg_inds, size=int(bg_rois_per_this_image), replace=False)
# print("second fileter, fg_size: {} || bg_size: {}".format(fg_inds.shape, bg_inds.shape))
# The indices that we're selecting (both fg and bg)
keep_inds = np.append(fg_inds, bg_inds)
# Select sampled values from various arrays:
labels = labels[keep_inds]
# Clamp labels for the background RoIs to 0
labels[int(fg_rois_per_this_image):] = 0
rois = all_rois[keep_inds]
bbox_target_data = _compute_targets(
rois, gt_boxes[gt_assignment[keep_inds], :-1], labels)
bbox_targets = \
_get_bbox_regression_labels(bbox_target_data, num_classes)
return labels, rois, bbox_targets
| 37.196629 | 101 | 0.643558 |
ace1c7504e9e5ded7c5465093743e967717d65e1 | 2,580 | py | Python | venv/Lib/site-packages/werkzeug/middleware/dispatcher.py | ajayiagbebaku/NFL-Model | afcc67a85ca7138c58c3334d45988ada2da158ed | [
"MIT"
] | 4,200 | 2016-03-29T16:32:32.000Z | 2022-03-30T15:37:03.000Z | venv/Lib/site-packages/werkzeug/middleware/dispatcher.py | ajayiagbebaku/NFL-Model | afcc67a85ca7138c58c3334d45988ada2da158ed | [
"MIT"
] | 1,203 | 2016-03-29T15:46:57.000Z | 2022-03-31T21:15:00.000Z | venv/Lib/site-packages/werkzeug/middleware/dispatcher.py | ajayiagbebaku/NFL-Model | afcc67a85ca7138c58c3334d45988ada2da158ed | [
"MIT"
] | 1,403 | 2016-03-29T16:50:37.000Z | 2022-03-29T09:18:38.000Z | """
Application Dispatcher
======================
This middleware creates a single WSGI application that dispatches to
multiple other WSGI applications mounted at different URL paths.
A common example is writing a Single Page Application, where you have a
backend API and a frontend written in JavaScript that does the routing
in the browser rather than requesting different pages from the server.
The frontend is a single HTML and JS file that should be served for any
path besides "/api".
This example dispatches to an API app under "/api", an admin app
under "/admin", and an app that serves frontend files for all other
requests::
app = DispatcherMiddleware(serve_frontend, {
'/api': api_app,
'/admin': admin_app,
})
In production, you might instead handle this at the HTTP server level,
serving files or proxying to application servers based on location. The
API and admin apps would each be deployed with a separate WSGI server,
and the static files would be served directly by the HTTP server.
.. autoclass:: DispatcherMiddleware
:copyright: 2007 Pallets
:license: BSD-3-Clause
"""
import typing as t
if t.TYPE_CHECKING:
from _typeshed.wsgi import StartResponse
from _typeshed.wsgi import WSGIApplication
from _typeshed.wsgi import WSGIEnvironment
class DispatcherMiddleware:
"""Combine multiple applications as a single WSGI application.
Requests are dispatched to an application based on the path it is
mounted under.
:param app: The WSGI application to dispatch to if the request
doesn't match a mounted path.
:param mounts: Maps path prefixes to applications for dispatching.
"""
def __init__(
self,
app: "WSGIApplication",
mounts: t.Optional[t.Dict[str, "WSGIApplication"]] = None,
) -> None:
self.app = app
self.mounts = mounts or {}
def __call__(
self, environ: "WSGIEnvironment", start_response: "StartResponse"
) -> t.Iterable[bytes]:
script = environ.get("PATH_INFO", "")
path_info = ""
while "/" in script:
if script in self.mounts:
app = self.mounts[script]
break
script, last_item = script.rsplit("/", 1)
path_info = f"/{last_item}{path_info}"
else:
app = self.mounts.get(script, self.app)
original_script_name = environ.get("SCRIPT_NAME", "")
environ["SCRIPT_NAME"] = original_script_name + script
environ["PATH_INFO"] = path_info
return app(environ, start_response)
| 32.658228 | 73 | 0.683333 |
ace1c7a80ca8d91bd04900de126fd094fa591974 | 870 | py | Python | src/scripts/finn-ubrukte-språktekster.py | navikt/familie-ba-soknad | b231916f4c9a5eaeea83dca7546bd42f85288aa4 | [
"MIT"
] | null | null | null | src/scripts/finn-ubrukte-språktekster.py | navikt/familie-ba-soknad | b231916f4c9a5eaeea83dca7546bd42f85288aa4 | [
"MIT"
] | 93 | 2020-07-07T07:54:09.000Z | 2022-03-29T10:16:27.000Z | src/scripts/finn-ubrukte-språktekster.py | navikt/familie-ba-soknad | b231916f4c9a5eaeea83dca7546bd42f85288aa4 | [
"MIT"
] | 2 | 2020-06-26T07:35:23.000Z | 2022-01-20T13:29:06.000Z | #!/usr/bin/env python3
from pathlib import Path
from json import load
from typing import Dict, List
from subprocess import run
def get_src_dir() -> Path:
this_file = Path(__file__)
return this_file.parent.parent
def read_lang_keys() -> List[str]:
nb_lang_file = get_src_dir() / "frontend/assets/lang/nb.json"
with open(nb_lang_file, "r") as nb_lang_open:
nb_lang: Dict = load(nb_lang_open)
return nb_lang.keys()
def find_key_usage(key: str) -> int:
if "sivilstatus.kode" in key:
return 1
proc = run(
["grep", "-rch", "--exclude-dir=assets", key, get_src_dir() / "frontend"],
capture_output=True,
text=True,
)
hits = list(filter(None, proc.stdout.split("\n")))
return sum([int(x) for x in hits])
for key in read_lang_keys():
if find_key_usage(key) == 0:
print(key)
| 23.513514 | 82 | 0.641379 |
ace1c9cbddedb3300fd484916e3aacf9383d9e2b | 4,262 | py | Python | pandapower/pypower/newtonpf.py | MDiesing/pandapower | 02df20351bcc39e074711fa9550acb448a7c522c | [
"BSD-3-Clause"
] | null | null | null | pandapower/pypower/newtonpf.py | MDiesing/pandapower | 02df20351bcc39e074711fa9550acb448a7c522c | [
"BSD-3-Clause"
] | null | null | null | pandapower/pypower/newtonpf.py | MDiesing/pandapower | 02df20351bcc39e074711fa9550acb448a7c522c | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 1996-2015 PSERC. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
# Copyright (c) 2016-2019 by University of Kassel and Fraunhofer Institute for Energy Economics
# and Energy System Technology (IEE), Kassel. All rights reserved.
"""Solves the power flow using a full Newton's method.
"""
from numpy import angle, exp, linalg, conj, r_, Inf, arange, zeros, max, zeros_like, column_stack
from scipy.sparse.linalg import spsolve
from pandapower.pf.iwamoto_multiplier import _iwamoto_step
from pandapower.pypower.makeSbus import makeSbus
from pandapower.pf.create_jacobian import create_jacobian_matrix, get_fastest_jacobian_function
def newtonpf(Ybus, Sbus, V0, pv, pq, ppci, options):
"""Solves the power flow using a full Newton's method.
Solves for bus voltages given the full system admittance matrix (for
all buses), the complex bus power injection vector (for all buses),
the initial vector of complex bus voltages, and column vectors with
the lists of bus indices for the swing bus, PV buses, and PQ buses,
respectively. The bus voltage vector contains the set point for
generator (including ref bus) buses, and the reference angle of the
swing bus, as well as an initial guess for remaining magnitudes and
angles.
@see: L{runpf}
@author: Ray Zimmerman (PSERC Cornell)
@author: Richard Lincoln
Modified by University of Kassel (Florian Schaefer) to use numba
"""
# options
tol = options['tolerance_mva']
max_it = options["max_iteration"]
numba = options["numba"]
iwamoto = options["algorithm"] == "iwamoto_nr"
voltage_depend_loads = options["voltage_depend_loads"]
v_debug = options["v_debug"]
baseMVA = ppci['baseMVA']
bus = ppci['bus']
gen = ppci['gen']
# initialize
i = 0
V = V0
Va = angle(V)
Vm = abs(V)
dVa, dVm = None, None
if iwamoto:
dVm, dVa = zeros_like(Vm), zeros_like(Va)
if v_debug:
Vm_it = Vm.copy()
Va_it = Va.copy()
else:
Vm_it = None
Va_it = None
# set up indexing for updating V
pvpq = r_[pv, pq]
# generate lookup pvpq -> index pvpq (used in createJ)
pvpq_lookup = zeros(max(Ybus.indices) + 1, dtype=int)
pvpq_lookup[pvpq] = arange(len(pvpq))
# get jacobian function
createJ = get_fastest_jacobian_function(pvpq, pq, numba)
npv = len(pv)
npq = len(pq)
j1 = 0
j2 = npv # j1:j2 - V angle of pv buses
j3 = j2
j4 = j2 + npq # j3:j4 - V angle of pq buses
j5 = j4
j6 = j4 + npq # j5:j6 - V mag of pq buses
# evaluate F(x0)
F = _evaluate_Fx(Ybus, V, Sbus, pv, pq)
converged = _check_for_convergence(F, tol)
Ybus = Ybus.tocsr()
J = None
# do Newton iterations
while (not converged and i < max_it):
# update iteration counter
i = i + 1
J = create_jacobian_matrix(Ybus, V, pvpq, pq, createJ, pvpq_lookup, npv, npq, numba)
dx = -1 * spsolve(J, F)
# update voltage
if npv and not iwamoto:
Va[pv] = Va[pv] + dx[j1:j2]
if npq and not iwamoto:
Va[pq] = Va[pq] + dx[j3:j4]
Vm[pq] = Vm[pq] + dx[j5:j6]
# iwamoto multiplier to increase convergence
if iwamoto:
Vm, Va = _iwamoto_step(Ybus, J, F, dx, pq, npv, npq, dVa, dVm, Vm, Va, pv, j1, j2, j3, j4, j5, j6)
V = Vm * exp(1j * Va)
Vm = abs(V) # update Vm and Va again in case
Va = angle(V) # we wrapped around with a negative Vm
if v_debug:
Vm_it = column_stack((Vm_it, Vm))
Va_it = column_stack((Va_it, Va))
if voltage_depend_loads:
Sbus = makeSbus(baseMVA, bus, gen, vm=Vm)
F = _evaluate_Fx(Ybus, V, Sbus, pv, pq)
converged = _check_for_convergence(F, tol)
return V, converged, i, J, Vm_it, Va_it
def _evaluate_Fx(Ybus, V, Sbus, pv, pq):
# evalute F(x)
mis = V * conj(Ybus * V) - Sbus
F = r_[mis[pv].real,
mis[pq].real,
mis[pq].imag]
return F
def _check_for_convergence(F, tol):
# calc infinity norm
return linalg.norm(F, Inf) < tol
| 29.597222 | 110 | 0.629047 |
ace1ca801e9c30ea9d0ff560aaa335c65ba7d910 | 735 | py | Python | Find_Occurence_String_in_String.py | spacetracker-collab/Coding-Solutions | af0285d2e0a5db65bb9fd3ae76eeae4e823ae13b | [
"BSD-2-Clause"
] | null | null | null | Find_Occurence_String_in_String.py | spacetracker-collab/Coding-Solutions | af0285d2e0a5db65bb9fd3ae76eeae4e823ae13b | [
"BSD-2-Clause"
] | null | null | null | Find_Occurence_String_in_String.py | spacetracker-collab/Coding-Solutions | af0285d2e0a5db65bb9fd3ae76eeae4e823ae13b | [
"BSD-2-Clause"
] | null | null | null | #Author - Chetan Sharma
#Question - Write a program/function that checks for the presence of a given character in a string and prints out all the locations
def check_char(char):
return len(char) != 1
def find_occurence_of_char(temp_string, temp_char):
if not isinstance(temp_string, str) or not isinstance(temp_char, str):
raise ValueError(f"temp_string and temp_char should be string")
if check_char(temp_char):
raise ValueError("temp_char can't be empty or it should a char")
if len(temp_char) > len(temp_string):
raise ValueError("char shouldn't have")
for i, v in enumerate(temp_string.strip()):
if temp_char == v:
print(f"{temp_char} is present at {i}")
| 33.409091 | 131 | 0.693878 |
ace1cbb3ad3679b292063b58dad3736fe6b53b61 | 2,344 | py | Python | Exercise8/exercise2/house.py | Ssteevooh/Object-Oriented-Programming | be39b2721f43605bb214d7acc89128455c48c80d | [
"MIT"
] | null | null | null | Exercise8/exercise2/house.py | Ssteevooh/Object-Oriented-Programming | be39b2721f43605bb214d7acc89128455c48c80d | [
"MIT"
] | null | null | null | Exercise8/exercise2/house.py | Ssteevooh/Object-Oriented-Programming | be39b2721f43605bb214d7acc89128455c48c80d | [
"MIT"
] | null | null | null | # File name: house.py
# Author: Steve Hommy
# Description: Create a House Class
class House:
def __init__(self):
self.bedroom_window = "Dirty"
self.kitchen_window = "Dirty"
self.bedroom_floor = "Dirty"
self.kitchen_floor = "Dirty"
self.bedroom_bed = "Unmade"
self.bedroom_surfaces = "Dusty"
self.kitchen_surfaces = "Dusty"
self.bathroom_surfaces = "Dusty"
self.kitchen_fridge = "Empty"
self.bathroom_toilet_paper = "Running out"
def set_wash_windows_and_bed_is_made(self):
self.bedroom_window = "Clean"
self.kitchen_window = "Clean"
self.bedroom_bed = "Made"
def get_wash_windows_and_bed_is_made(self):
print("Washing the windows and making the bed")
return self.bedroom_window, self.kitchen_window, self.bedroom_bed
def set_vacuum_floors_and_clean_surfaces(self):
self.bedroom_floor = "Clean"
self.kitchen_floor = "Clean"
self.bedroom_surfaces = "Clean"
self.kitchen_surfaces = "Clean"
self.bathroom_surfaces = "Clean"
def get_vacuum_floors_and_clean_surfaces(self):
print("Vacuuming the floor and cleaning surfaces")
return self.bedroom_floor, self.kitchen_floor, self.bedroom_surfaces, self.kitchen_surfaces, self.bathroom_surfaces
def set_shopping(self):
self.kitchen_fridge = "Filled"
self.bathroom_toilet_paper = "Enough"
def get_shopping(self):
print("Going shopping to refill fridge and buy more toilet paper")
return self.kitchen_fridge, self.bathroom_toilet_paper
def set_enough_toilet_paper(self):
self.bathroom_toilet_paper = "More than enough"
def get_enough_toilet_paper(self):
print("We should prepare for the lockdown and buy more toilet paper")
return self.bathroom_toilet_paper
def __str__(self):
return f"""
Houses status:
Bedroom:
Window: {self.bedroom_window}
Floor: {self.bedroom_floor}
Bed: {self.bedroom_bed}
Surfaces: {self.bedroom_surfaces}
Kitchen:
Window: {self.kitchen_window}
Floor: {self.kitchen_floor}
Fridge: {self.kitchen_fridge}
Surfaces: {self.kitchen_surfaces}
Bathroom:
Surfaces: {self.bathroom_surfaces}
Toilet paper: {self.bathroom_toilet_paper}
"""
| 31.675676 | 123 | 0.6843 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.